Handlers refactoring #2
6
.gitignore
vendored
6
.gitignore
vendored
@ -1,4 +1,4 @@
|
||||
*.sqlite
|
||||
*.db
|
||||
assets/*
|
||||
*.sqlite
|
||||
*.db
|
||||
assets/*
|
||||
build/*
|
@ -1,12 +1,12 @@
|
||||
filename: "mock_{{.InterfaceName}}.go"
|
||||
dir: "mocks/{{.PackagePath}}"
|
||||
outpkg: "{{.PackageName}}"
|
||||
with-expecter: true
|
||||
packages:
|
||||
git.optclblast.xyz/draincloud/draincloud-core/internal/storage:
|
||||
interfaces:
|
||||
Database:
|
||||
AuthAuditLogStorage:
|
||||
AuthStorage:
|
||||
BlobStorage:
|
||||
filename: "mock_{{.InterfaceName}}.go"
|
||||
dir: "mocks/{{.PackagePath}}"
|
||||
outpkg: "{{.PackageName}}"
|
||||
with-expecter: true
|
||||
packages:
|
||||
git.optclblast.xyz/draincloud/draincloud-core/internal/storage:
|
||||
interfaces:
|
||||
Database:
|
||||
AuthAuditLogStorage:
|
||||
AuthStorage:
|
||||
BlobStorage:
|
||||
MetaStorage:
|
30
.vscode/launch.json
vendored
30
.vscode/launch.json
vendored
@ -1,16 +1,16 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Launch Package",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}/cmd/main.go"
|
||||
|
||||
}
|
||||
]
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Launch Package",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}/cmd/main.go"
|
||||
|
||||
}
|
||||
]
|
||||
}
|
14
README.md
14
README.md
@ -1,7 +1,7 @@
|
||||
# DrainCloud Core
|
||||
DrainCloud Core is an all-in-one lightweight DrainCloud distribution designed to work in resource-constrained environments.
|
||||
The node can work in three modes: #TBD
|
||||
1. All-in-one mode, the recommended one.
|
||||
2. Auth-node. Only auth api will be operational.
|
||||
3. Storage-node. Only filestorage api will be operational.
|
||||
|
||||
# DrainCloud Core
|
||||
DrainCloud Core is an all-in-one lightweight DrainCloud distribution designed to work in resource-constrained environments.
|
||||
The node can work in three modes: #TBD
|
||||
1. All-in-one mode, the recommended one.
|
||||
2. Auth-node. Only auth api will be operational.
|
||||
3. Storage-node. Only filestorage api will be operational.
|
||||
|
||||
|
@ -1,12 +1,12 @@
|
||||
version: 3
|
||||
|
||||
tasks:
|
||||
run:
|
||||
cmds:
|
||||
- go run cmd/main.go
|
||||
deploy-local:
|
||||
cmds:
|
||||
- sudo docker stack deploy draincloud_core -c ./compose.rw.yaml
|
||||
migrate-local-status:
|
||||
cmds:
|
||||
version: 3
|
||||
|
||||
tasks:
|
||||
run:
|
||||
cmds:
|
||||
- go run cmd/main.go
|
||||
deploy-local:
|
||||
cmds:
|
||||
- sudo docker stack deploy draincloud_core -c ./compose.rw.yaml
|
||||
migrate-local-status:
|
||||
cmds:
|
||||
- goose postgres "postgres://draincloud:draincloud@localhost:5432/draincloud" status -dir migrations
|
66
cmd/main.go
66
cmd/main.go
@ -1,33 +1,33 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/app"
|
||||
filesengine "git.optclblast.xyz/draincloud/draincloud-core/internal/files_engine"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/plugin"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/postgres"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill)
|
||||
defer cancel()
|
||||
|
||||
plugin.MustNewPluginLoader(ctx, 8081, plugin.NewPluginStore()).
|
||||
Run(ctx)
|
||||
|
||||
pg := postgres.New(ctx, "postgres://draincloud:mysuperstrongpassword@127.0.0.1:5432/draincloud?sslmode=disable")
|
||||
|
||||
// TODO move cron on a separate job (k8s cronjob / docker cron)
|
||||
// cleanupSessionsCron := cleanupsessions.New(pg)
|
||||
// cleanupSessionsCron.Run(ctx)
|
||||
|
||||
engine := filesengine.NewFilesEngine(nil, nil)
|
||||
|
||||
go app.New(pg, engine).
|
||||
Run(ctx)
|
||||
|
||||
<-ctx.Done()
|
||||
}
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/app"
|
||||
filesengine "git.optclblast.xyz/draincloud/draincloud-core/internal/files_engine"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/plugin"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/postgres"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill)
|
||||
defer cancel()
|
||||
|
||||
plugin.MustNewPluginLoader(ctx, 8081, plugin.NewPluginStore()).
|
||||
Run(ctx)
|
||||
|
||||
pg := postgres.New(ctx, "postgres://draincloud:mysuperstrongpassword@127.0.0.1:5432/draincloud?sslmode=disable")
|
||||
|
||||
// TODO move cron on a separate job (k8s cronjob / docker cron)
|
||||
// cleanupSessionsCron := cleanupsessions.New(pg)
|
||||
// cleanupSessionsCron.Run(ctx)
|
||||
|
||||
engine := filesengine.NewFilesEngine(nil, nil)
|
||||
|
||||
go app.New(ctx, pg, engine).
|
||||
Run(ctx)
|
||||
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
@ -1,50 +1,50 @@
|
||||
services:
|
||||
rw_1:
|
||||
image: postgres:17
|
||||
container_name: draincloud-db-rw-1
|
||||
ports:
|
||||
- 5432:5432
|
||||
environment:
|
||||
- POSTGRES_USER=draincloud
|
||||
- POSTGRES_PASSWORD=mysuperstrongpassword
|
||||
- POSTGRES_DB=draincloud
|
||||
volumes:
|
||||
- draincloud-rw-1:/var/lib/postgresql/data
|
||||
networks:
|
||||
- draincloud-pg
|
||||
|
||||
# rw_2:
|
||||
# image: postgres:17
|
||||
# container_name: draincloud-db-rw-2
|
||||
# ports:
|
||||
# - 5433:5432
|
||||
# environment:
|
||||
# - POSTGRES_USER=draincloud
|
||||
# - POSTGRES_PASSWORD=mysuperstrongpassword
|
||||
# - POSTGRES_DB=draincloud
|
||||
# volumes:
|
||||
# - draincloud-rw-2:/var/lib/postgresql/data
|
||||
# networks:
|
||||
# - draincloud-pg
|
||||
|
||||
# rw_3:
|
||||
# image: postgres:17
|
||||
# container_name: draincloud-db-rw-3
|
||||
# ports:
|
||||
# - 5434:5432
|
||||
# environment:
|
||||
# - POSTGRES_USER=draincloud
|
||||
# - POSTGRES_PASSWORD=mysuperstrongpassword
|
||||
# - POSTGRES_DB=draincloud
|
||||
# volumes:
|
||||
# - draincloud-rw-3:/var/lib/postgresql/data
|
||||
# networks:
|
||||
# - draincloud-pg
|
||||
|
||||
volumes:
|
||||
draincloud-rw-1: {}
|
||||
# draincloud-rw-2: {}
|
||||
# draincloud-rw-3: {}
|
||||
|
||||
networks:
|
||||
services:
|
||||
rw_1:
|
||||
image: postgres:17
|
||||
container_name: draincloud-db-rw-1
|
||||
ports:
|
||||
- 5432:5432
|
||||
environment:
|
||||
- POSTGRES_USER=draincloud
|
||||
- POSTGRES_PASSWORD=mysuperstrongpassword
|
||||
- POSTGRES_DB=draincloud
|
||||
volumes:
|
||||
- draincloud-rw-1:/var/lib/postgresql/data
|
||||
networks:
|
||||
- draincloud-pg
|
||||
|
||||
# rw_2:
|
||||
# image: postgres:17
|
||||
# container_name: draincloud-db-rw-2
|
||||
# ports:
|
||||
# - 5433:5432
|
||||
# environment:
|
||||
# - POSTGRES_USER=draincloud
|
||||
# - POSTGRES_PASSWORD=mysuperstrongpassword
|
||||
# - POSTGRES_DB=draincloud
|
||||
# volumes:
|
||||
# - draincloud-rw-2:/var/lib/postgresql/data
|
||||
# networks:
|
||||
# - draincloud-pg
|
||||
|
||||
# rw_3:
|
||||
# image: postgres:17
|
||||
# container_name: draincloud-db-rw-3
|
||||
# ports:
|
||||
# - 5434:5432
|
||||
# environment:
|
||||
# - POSTGRES_USER=draincloud
|
||||
# - POSTGRES_PASSWORD=mysuperstrongpassword
|
||||
# - POSTGRES_DB=draincloud
|
||||
# volumes:
|
||||
# - draincloud-rw-3:/var/lib/postgresql/data
|
||||
# networks:
|
||||
# - draincloud-pg
|
||||
|
||||
volumes:
|
||||
draincloud-rw-1: {}
|
||||
# draincloud-rw-2: {}
|
||||
# draincloud-rw-3: {}
|
||||
|
||||
networks:
|
||||
draincloud-pg: {}
|
7
go.mod
7
go.mod
@ -3,7 +3,6 @@ module git.optclblast.xyz/draincloud/draincloud-core
|
||||
go 1.23.0
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/fatih/color v1.17.0
|
||||
github.com/fsnotify/fsnotify v1.7.0
|
||||
github.com/gin-gonic/gin v1.10.0
|
||||
@ -12,9 +11,9 @@ require (
|
||||
github.com/jmoiron/sqlx v1.4.0
|
||||
github.com/nats-io/nats.go v1.37.0
|
||||
github.com/spf13/viper v1.19.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
golang.org/x/crypto v0.28.0
|
||||
golang.org/x/sync v0.8.0
|
||||
google.golang.org/grpc v1.62.1
|
||||
)
|
||||
|
||||
require (
|
||||
@ -28,6 +27,7 @@ require (
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.20.0 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
@ -44,7 +44,6 @@ require (
|
||||
github.com/nats-io/nkeys v0.4.7 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
@ -52,7 +51,6 @@ require (
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.6.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.2.12 // indirect
|
||||
@ -63,6 +61,7 @@ require (
|
||||
golang.org/x/net v0.25.0 // indirect
|
||||
golang.org/x/sys v0.26.0 // indirect
|
||||
golang.org/x/text v0.19.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect
|
||||
google.golang.org/protobuf v1.34.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
16
go.sum
16
go.sum
@ -36,8 +36,12 @@ github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpv
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@ -115,7 +119,6 @@ github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
@ -153,6 +156,13 @@ golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
||||
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
|
||||
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c h1:lfpJ/2rWPa/kJgxyyXM8PrNnfCzcmxJ265mADgwmvLI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
|
||||
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
|
||||
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
@ -1,81 +1,82 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/app/handlers"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/domain"
|
||||
filesengine "git.optclblast.xyz/draincloud/draincloud-core/internal/files_engine"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/processor"
|
||||
resolvedispatcher "git.optclblast.xyz/draincloud/draincloud-core/internal/resolve_dispatcher"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type DrainCloud struct {
|
||||
mux *gin.Engine
|
||||
database storage.Database
|
||||
filesEngine *filesengine.FilesEngine
|
||||
|
||||
ginProcessor processor.Processor[gin.HandlerFunc]
|
||||
}
|
||||
|
||||
func New(
|
||||
database storage.Database,
|
||||
filesEngine *filesengine.FilesEngine,
|
||||
) *DrainCloud {
|
||||
mux := gin.Default()
|
||||
|
||||
dispatcher := resolvedispatcher.New()
|
||||
|
||||
d := &DrainCloud{
|
||||
database: database,
|
||||
filesEngine: filesEngine,
|
||||
ginProcessor: processor.NewGinProcessor(database, dispatcher),
|
||||
}
|
||||
|
||||
// Built-in auth component of DrainCloud-Core
|
||||
authGroup := mux.Group("/auth")
|
||||
{
|
||||
// authGroup.POST("/register", d.Register)
|
||||
authGroup.POST("/register", d.ginProcessor.Process(
|
||||
handlers.NewRegisterHandler(database),
|
||||
))
|
||||
authGroup.POST("/logon", d.Login)
|
||||
}
|
||||
|
||||
filesGroup := mux.Group("/files")
|
||||
{
|
||||
filesGroup.POST("/upload", d.UploadFile)
|
||||
}
|
||||
|
||||
d.mux = mux
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *DrainCloud) Run(ctx context.Context) error {
|
||||
return d.mux.Run()
|
||||
}
|
||||
|
||||
func writeError(ctx *gin.Context, err error) {
|
||||
switch {
|
||||
case errors.Is(err, ErrorAccessDenied):
|
||||
ctx.JSON(http.StatusInternalServerError, domain.ErrorJson{
|
||||
Code: http.StatusForbidden,
|
||||
Message: err.Error(),
|
||||
})
|
||||
case errors.Is(err, ErrorSessionExpired):
|
||||
ctx.JSON(http.StatusInternalServerError, domain.ErrorJson{
|
||||
Code: http.StatusForbidden,
|
||||
Message: err.Error(),
|
||||
})
|
||||
default:
|
||||
ctx.JSON(http.StatusInternalServerError, domain.ErrorJson{
|
||||
Code: http.StatusInternalServerError,
|
||||
Message: "Internal Error",
|
||||
})
|
||||
}
|
||||
}
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/app/handlers"
|
||||
filesengine "git.optclblast.xyz/draincloud/draincloud-core/internal/files_engine"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/processor"
|
||||
resolvedispatcher "git.optclblast.xyz/draincloud/draincloud-core/internal/resolve_dispatcher"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/resolvers/auth"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type DrainCloud struct {
|
||||
mux *gin.Engine
|
||||
database storage.Database
|
||||
filesEngine *filesengine.FilesEngine
|
||||
|
||||
ginProcessor processor.Processor[gin.HandlerFunc]
|
||||
}
|
||||
|
||||
func New(
|
||||
ctx context.Context,
|
||||
database storage.Database,
|
||||
filesEngine *filesengine.FilesEngine,
|
||||
) *DrainCloud {
|
||||
mux := gin.Default()
|
||||
|
||||
dispatcher := resolvedispatcher.New()
|
||||
dispatcher.RegisterResolver(
|
||||
ctx,
|
||||
auth.AuthResolverV1Name,
|
||||
auth.NewAuthResolver(database),
|
||||
)
|
||||
|
||||
d := &DrainCloud{
|
||||
database: database,
|
||||
filesEngine: filesEngine,
|
||||
ginProcessor: processor.NewGinProcessor(database, dispatcher),
|
||||
}
|
||||
|
||||
// TODO. Maybe overkill
|
||||
internalGroup := mux.Group("/_internal")
|
||||
{
|
||||
regGroup := internalGroup.Group("/register")
|
||||
{
|
||||
regGroup.POST("/resolver", d.ginProcessor.Process(
|
||||
handlers.NewInternalRegisterResolverHandler(dispatcher),
|
||||
))
|
||||
regGroup.POST("/plugin", func(ctx *gin.Context) {})
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
// Built-in auth component of DrainCloud-Core
|
||||
authGroup := mux.Group("/auth")
|
||||
{
|
||||
authGroup.POST("/register", d.ginProcessor.Process(
|
||||
handlers.NewRegisterHandler(database),
|
||||
))
|
||||
authGroup.POST("/logon", d.ginProcessor.Process(
|
||||
handlers.NewLogonHandler(database),
|
||||
))
|
||||
}
|
||||
|
||||
filesGroup := mux.Group("/files")
|
||||
{
|
||||
filesGroup.POST("/upload", d.ginProcessor.Process(
|
||||
handlers.NewUploadFileHandler(filesEngine),
|
||||
))
|
||||
}
|
||||
|
||||
d.mux = mux
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *DrainCloud) Run(ctx context.Context) error {
|
||||
return d.mux.Run()
|
||||
}
|
||||
|
@ -1,63 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
const (
|
||||
csrfTokenCookie = "__Csrf_token"
|
||||
sessionTokenCookie = "__Session_token"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrorUnauthorized = errors.New("unauthorized")
|
||||
)
|
||||
|
||||
func (d *DrainCloud) authorize(ctx *gin.Context) (*models.Session, error) {
|
||||
session, err := d.getSession(ctx)
|
||||
if err != nil && !errors.Is(err, http.ErrNoCookie) {
|
||||
return nil, ErrorUnauthorized
|
||||
}
|
||||
|
||||
if session == nil {
|
||||
return nil, ErrorUnauthorized
|
||||
}
|
||||
|
||||
if err := validateSession(ctx, session); err != nil {
|
||||
// TODO add audit log entry
|
||||
return nil, ErrorUnauthorized
|
||||
}
|
||||
|
||||
logger.Debug(ctx, "[authorize] user authorized", slog.String("session_id", session.ID.String()))
|
||||
|
||||
return session, nil
|
||||
}
|
||||
|
||||
func validateLoginAndPassword(login, password string) error {
|
||||
if len(login) < 4 {
|
||||
return fmt.Errorf("login must be longer than 8 chars")
|
||||
}
|
||||
|
||||
if len(password) < 6 {
|
||||
return fmt.Errorf("password must be longer than 8 chars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateSessionToken(length int) (string, error) {
|
||||
bytes := make([]byte, length)
|
||||
if _, err := rand.Read(bytes); err != nil {
|
||||
return "", fmt.Errorf("failed to generate token: %w", err)
|
||||
}
|
||||
return base64.URLEncoding.EncodeToString(bytes), nil
|
||||
}
|
@ -1,37 +1,37 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
csrfTokenCookie = "__Csrf_token"
|
||||
sessionTokenCookie = "__Session_token"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrorUnauthorized = errors.New("unauthorized")
|
||||
)
|
||||
|
||||
func validateLoginAndPassword(login, password string) error {
|
||||
if len(login) < 4 {
|
||||
return fmt.Errorf("login must be longer than 8 chars")
|
||||
}
|
||||
|
||||
if len(password) < 6 {
|
||||
return fmt.Errorf("password must be longer than 8 chars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateSessionToken(length int) (string, error) {
|
||||
bytes := make([]byte, length)
|
||||
if _, err := rand.Read(bytes); err != nil {
|
||||
return "", fmt.Errorf("failed to generate token: %w", err)
|
||||
}
|
||||
return base64.URLEncoding.EncodeToString(bytes), nil
|
||||
}
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
csrfTokenCookie = "__Csrf_token"
|
||||
sessionTokenCookie = "__Session_token"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrorUnauthorized = errors.New("unauthorized")
|
||||
)
|
||||
|
||||
func validateLoginAndPassword(login, password string) error {
|
||||
if len(login) < 4 {
|
||||
return fmt.Errorf("login must be longer than 8 chars")
|
||||
}
|
||||
|
||||
if len(password) < 6 {
|
||||
return fmt.Errorf("password must be longer than 8 chars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateSessionToken(length int) (string, error) {
|
||||
bytes := make([]byte, length)
|
||||
if _, err := rand.Read(bytes); err != nil {
|
||||
return "", fmt.Errorf("failed to generate token: %w", err)
|
||||
}
|
||||
return base64.URLEncoding.EncodeToString(bytes), nil
|
||||
}
|
||||
|
179
internal/app/handlers/logon.go
Normal file
179
internal/app/handlers/logon.go
Normal file
@ -0,0 +1,179 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/domain"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/errs"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth"
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
type LogonHandler struct {
|
||||
*handler.BaseHandler
|
||||
authStorage storage.AuthStorage
|
||||
}
|
||||
|
||||
func NewLogonHandler(
|
||||
authStorage storage.AuthStorage,
|
||||
) *LogonHandler {
|
||||
h := &LogonHandler{
|
||||
authStorage: authStorage,
|
||||
BaseHandler: handler.New().
|
||||
WithName("logonv1").
|
||||
WithRequiredResolveParams(),
|
||||
}
|
||||
h.WithProcessFunc(h.process)
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *LogonHandler) process(ctx context.Context, req *common.Request, w handler.Writer) error {
|
||||
logger.Debug(ctx, "[Logon] new request")
|
||||
|
||||
body := new(domain.LogonRequest)
|
||||
err := json.Unmarshal(req.Body, body)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "[Logon] failed to bind request", logger.Err(err))
|
||||
w.Write(ctx, map[string]string{
|
||||
"error": "bad request",
|
||||
}, handler.WithCode(http.StatusBadRequest))
|
||||
return nil
|
||||
}
|
||||
|
||||
session, err := h.getSession(ctx, req)
|
||||
if err != nil && !errors.Is(err, http.ErrNoCookie) {
|
||||
return err
|
||||
}
|
||||
|
||||
if session != nil {
|
||||
if err := validateSession(req, session); err != nil {
|
||||
// TODO add audit log entry
|
||||
return err
|
||||
}
|
||||
logger.Debug(ctx, "[login] user is already logged in", slog.String("session_id", session.ID.String()))
|
||||
w.Write(ctx, &domain.LogonResponse{
|
||||
Ok: true,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
logger.Debug(ctx, "[login] session not founh. trying to authorize")
|
||||
|
||||
resp, err := h.login(ctx, body, session, w)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "[Logon] failed to login user", logger.Err(err))
|
||||
return err
|
||||
}
|
||||
|
||||
w.Write(ctx, resp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *LogonHandler) login(ctx context.Context, req *domain.LogonRequest, session *auth.Session, w handler.Writer) (*domain.LogonResponse, error) {
|
||||
passwordHash, err := bcrypt.GenerateFromPassword([]byte(req.Password), 10)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "[login] failed to generate password hash", logger.Err(err))
|
||||
return nil, fmt.Errorf("failed to generate password hash: %w", err)
|
||||
}
|
||||
|
||||
user, err := h.authStorage.GetUserByLogin(ctx, req.Login)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch user by login: %w", err)
|
||||
}
|
||||
|
||||
if bytes.Equal(passwordHash, user.PasswordHash) {
|
||||
logger.Warn(ctx, "[login] failed to login user. passwords hashes not equal")
|
||||
return nil, errs.ErrorAccessDenied
|
||||
}
|
||||
|
||||
sessionCreatedAt := time.Now()
|
||||
sessionExpiredAt := sessionCreatedAt.Add(time.Hour * 24 * 7)
|
||||
|
||||
sessionToken, err := generateSessionToken(100)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate a session token: %w", err)
|
||||
}
|
||||
w.SetCookie(sessionTokenCookie, sessionToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, true)
|
||||
|
||||
csrfToken, err := generateSessionToken(100)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate a csrf token: %w", err)
|
||||
}
|
||||
w.SetCookie(csrfTokenCookie, csrfToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, false)
|
||||
|
||||
sessionID, err := uuid.NewV7()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate session id: %w", err)
|
||||
}
|
||||
|
||||
if _, err = h.authStorage.AddSession(ctx, &auth.Session{
|
||||
ID: sessionID,
|
||||
SessionToken: sessionToken,
|
||||
CsrfToken: csrfToken,
|
||||
UserID: user.ID,
|
||||
CreatedAt: sessionCreatedAt,
|
||||
ExpiredAt: sessionExpiredAt,
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("failed to save session: %w", err)
|
||||
}
|
||||
|
||||
// TODO add audit log entry
|
||||
|
||||
return &domain.LogonResponse{
|
||||
Ok: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *LogonHandler) getSession(ctx context.Context, req *common.Request) (*auth.Session, error) {
|
||||
token, err := common.GetValue[string](req.Metadata, sessionTokenCookie)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch session cookie from request: %w", err)
|
||||
}
|
||||
csrfToken, err := common.GetValue[string](req.Metadata, csrfTokenCookie)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch csrf cookie from request: %w", err)
|
||||
}
|
||||
|
||||
if len(csrfToken) == 0 || len(token) == 0 {
|
||||
return nil, fmt.Errorf("session token or csrf token is empty")
|
||||
}
|
||||
|
||||
session, err := h.authStorage.GetSession(ctx, token)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch session from repo: %w", err)
|
||||
}
|
||||
|
||||
return session, nil
|
||||
}
|
||||
|
||||
func validateSession(req *common.Request, session *auth.Session) error {
|
||||
if session == nil {
|
||||
return errs.ErrorAccessDenied
|
||||
}
|
||||
|
||||
csrfToken, err := common.GetValue[string](req.Metadata, csrfTokenCookie)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch csrf cookie from request: %w", err)
|
||||
}
|
||||
|
||||
if session.CsrfToken != csrfToken {
|
||||
return errs.ErrorAccessDenied
|
||||
}
|
||||
|
||||
if session.ExpiredAt.Before(time.Now()) {
|
||||
return errs.ErrorSessionExpired
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,121 +1,121 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/domain"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models"
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
type RegisterHandler struct {
|
||||
*handler.BaseHandler
|
||||
authStorage storage.AuthStorage
|
||||
}
|
||||
|
||||
func NewRegisterHandler(
|
||||
authStorage storage.AuthStorage,
|
||||
) *RegisterHandler {
|
||||
h := &RegisterHandler{
|
||||
authStorage: authStorage,
|
||||
BaseHandler: handler.New().
|
||||
WithName("registerv1").
|
||||
WithRequiredResolveParams(),
|
||||
}
|
||||
h.WithProcessFunc(h.process)
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *RegisterHandler) process(ctx context.Context, req *common.Request, w handler.Writer) error {
|
||||
regReq := new(domain.RegisterRequest)
|
||||
|
||||
if err := json.Unmarshal(req.Body, regReq); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := h.register(ctx, regReq, w)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to register user: %w", err)
|
||||
}
|
||||
|
||||
w.Write(ctx, resp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *RegisterHandler) register(
|
||||
ctx context.Context,
|
||||
req *domain.RegisterRequest,
|
||||
w handler.Writer,
|
||||
) (*domain.RegisterResponse, error) {
|
||||
if err := validateLoginAndPassword(req.Login, req.Password); err != nil {
|
||||
return nil, fmt.Errorf("invalid creds: %w", err)
|
||||
}
|
||||
|
||||
passwordHash, err := bcrypt.GenerateFromPassword([]byte(req.Password), 10)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "[register] failed to generate password hash", logger.Err(err))
|
||||
return nil, fmt.Errorf("failed to generate password hash: %w", err)
|
||||
}
|
||||
|
||||
userID, err := uuid.NewV7()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate user id: %w", err)
|
||||
}
|
||||
|
||||
user := &models.User{
|
||||
ID: userID,
|
||||
Username: req.Login,
|
||||
Login: req.Login,
|
||||
PasswordHash: passwordHash,
|
||||
}
|
||||
|
||||
err = d.authStorage.AddUser(ctx, userID, user.Login, user.Username, user.PasswordHash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to add new user: %w", err)
|
||||
}
|
||||
|
||||
sessionCreatedAt := time.Now()
|
||||
sessionExpiredAt := sessionCreatedAt.Add(time.Hour * 24 * 7)
|
||||
|
||||
sessionToken, err := generateSessionToken(100)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate a session token: %w", err)
|
||||
}
|
||||
w.SetCookie(sessionTokenCookie, sessionToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, true)
|
||||
|
||||
csrfToken, err := generateSessionToken(100)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate a csrf token: %w", err)
|
||||
}
|
||||
w.SetCookie(csrfTokenCookie, csrfToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, false)
|
||||
|
||||
sessionID, err := uuid.NewV7()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate session id: %w", err)
|
||||
}
|
||||
|
||||
if _, err = d.authStorage.AddSession(ctx, &models.Session{
|
||||
ID: sessionID,
|
||||
SessionToken: sessionToken,
|
||||
CsrfToken: csrfToken,
|
||||
UserID: user.ID,
|
||||
CreatedAt: sessionCreatedAt,
|
||||
ExpiredAt: sessionExpiredAt,
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("failed to save session: %w", err)
|
||||
}
|
||||
|
||||
return &domain.RegisterResponse{
|
||||
Ok: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/domain"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth"
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
type RegisterHandler struct {
|
||||
*handler.BaseHandler
|
||||
authStorage storage.AuthStorage
|
||||
}
|
||||
|
||||
func NewRegisterHandler(
|
||||
authStorage storage.AuthStorage,
|
||||
) *RegisterHandler {
|
||||
h := &RegisterHandler{
|
||||
authStorage: authStorage,
|
||||
BaseHandler: handler.New().
|
||||
WithName("registerv1").
|
||||
WithRequiredResolveParams(),
|
||||
}
|
||||
h.WithProcessFunc(h.process)
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *RegisterHandler) process(ctx context.Context, req *common.Request, w handler.Writer) error {
|
||||
regReq := new(domain.RegisterRequest)
|
||||
|
||||
if err := json.Unmarshal(req.Body, regReq); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := h.register(ctx, regReq, w)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to register user: %w", err)
|
||||
}
|
||||
|
||||
w.Write(ctx, resp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *RegisterHandler) register(
|
||||
ctx context.Context,
|
||||
req *domain.RegisterRequest,
|
||||
w handler.Writer,
|
||||
) (*domain.RegisterResponse, error) {
|
||||
if err := validateLoginAndPassword(req.Login, req.Password); err != nil {
|
||||
return nil, fmt.Errorf("invalid creds: %w", err)
|
||||
}
|
||||
|
||||
passwordHash, err := bcrypt.GenerateFromPassword([]byte(req.Password), 10)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "[register] failed to generate password hash", logger.Err(err))
|
||||
return nil, fmt.Errorf("failed to generate password hash: %w", err)
|
||||
}
|
||||
|
||||
userID, err := uuid.NewV7()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate user id: %w", err)
|
||||
}
|
||||
|
||||
user := &auth.User{
|
||||
ID: userID,
|
||||
Username: req.Login,
|
||||
Login: req.Login,
|
||||
PasswordHash: passwordHash,
|
||||
}
|
||||
|
||||
err = d.authStorage.AddUser(ctx, userID, user.Login, user.Username, user.PasswordHash)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to add new user: %w", err)
|
||||
}
|
||||
|
||||
sessionCreatedAt := time.Now()
|
||||
sessionExpiredAt := sessionCreatedAt.Add(time.Hour * 24 * 7)
|
||||
|
||||
sessionToken, err := generateSessionToken(100)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate a session token: %w", err)
|
||||
}
|
||||
w.SetCookie(sessionTokenCookie, sessionToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, true)
|
||||
|
||||
csrfToken, err := generateSessionToken(100)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate a csrf token: %w", err)
|
||||
}
|
||||
w.SetCookie(csrfTokenCookie, csrfToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, false)
|
||||
|
||||
sessionID, err := uuid.NewV7()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate session id: %w", err)
|
||||
}
|
||||
|
||||
if _, err = d.authStorage.AddSession(ctx, &auth.Session{
|
||||
ID: sessionID,
|
||||
SessionToken: sessionToken,
|
||||
CsrfToken: csrfToken,
|
||||
UserID: user.ID,
|
||||
CreatedAt: sessionCreatedAt,
|
||||
ExpiredAt: sessionExpiredAt,
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("failed to save session: %w", err)
|
||||
}
|
||||
|
||||
return &domain.RegisterResponse{
|
||||
Ok: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
38
internal/app/handlers/register_resolver.go
Normal file
38
internal/app/handlers/register_resolver.go
Normal file
@ -0,0 +1,38 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
|
||||
resolvedispatcher "git.optclblast.xyz/draincloud/draincloud-core/internal/resolve_dispatcher"
|
||||
)
|
||||
|
||||
// TODO. Maybe remove
|
||||
type InternalRegisterResolverHandler struct {
|
||||
*handler.BaseHandler
|
||||
resolveDispatcher *resolvedispatcher.ResolveDispatcher
|
||||
}
|
||||
|
||||
func NewInternalRegisterResolverHandler(
|
||||
resolveDispatcher *resolvedispatcher.ResolveDispatcher,
|
||||
) *InternalRegisterResolverHandler {
|
||||
h := &InternalRegisterResolverHandler{
|
||||
resolveDispatcher: resolveDispatcher,
|
||||
}
|
||||
h.BaseHandler = handler.New().
|
||||
WithName("internal_registerresolver").
|
||||
WithProcessFunc(h.process)
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *InternalRegisterResolverHandler) process(
|
||||
ctx context.Context,
|
||||
req *common.Request,
|
||||
w handler.Writer,
|
||||
) error {
|
||||
//_, ok := h.resolveDispatcher.GetResolver()
|
||||
return fmt.Errorf("uniplemented")
|
||||
}
|
95
internal/app/handlers/upload_file.go
Normal file
95
internal/app/handlers/upload_file.go
Normal file
@ -0,0 +1,95 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
filesengine "git.optclblast.xyz/draincloud/draincloud-core/internal/files_engine"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/resolvers/auth"
|
||||
)
|
||||
|
||||
const (
|
||||
maxFileSize = 10 << 30
|
||||
)
|
||||
|
||||
type UploadFileHandler struct {
|
||||
*handler.BaseHandler
|
||||
filesEngine *filesengine.FilesEngine
|
||||
}
|
||||
|
||||
func NewUploadFileHandler(
|
||||
filesEngine *filesengine.FilesEngine,
|
||||
) *UploadFileHandler {
|
||||
h := &UploadFileHandler{
|
||||
filesEngine: filesEngine,
|
||||
BaseHandler: handler.New().
|
||||
WithName("uploadfilev1").
|
||||
WithRequiredResolveParams(
|
||||
auth.AuthResolverV1Name,
|
||||
// TODO with MultipartReaderResolverV1Name
|
||||
// or
|
||||
// MultipartDataResolverV1Name
|
||||
),
|
||||
}
|
||||
h.WithProcessFunc(h.process)
|
||||
return h
|
||||
}
|
||||
|
||||
func (d *UploadFileHandler) process(ctx context.Context, req *common.Request, w handler.Writer) error {
|
||||
// TODO fetch (interface{ ParseMultipartForm(size int) error }) from req.GetValue[ParseMultipartFormer](req.ResolveValues)
|
||||
|
||||
// if err := req.RawReq.ParseMultipartForm(maxFileSize); err != nil {
|
||||
// logger.Error(ctx, "uploadFile handler error", logger.Err(err))
|
||||
// return err
|
||||
// }
|
||||
|
||||
// if err := d.uploadFile(ctx, userID); err != nil {
|
||||
// logger.Error(ctx, "uploadFile handle", logger.Err(err))
|
||||
// writeError(ctx, err)
|
||||
// return
|
||||
// }
|
||||
return nil
|
||||
}
|
||||
|
||||
// func (d *UploadFileHandler) uploadFile(ctx context.Context, req *common.Request) error {
|
||||
// title := ctx.PostForm("file")
|
||||
// logger.Info(ctx, "uploadFile", slog.Any("postForm data", spew.Sdump(title)))
|
||||
|
||||
// file, header, err := req.RawReq.FormFile("file")
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// logger.Info(ctx, "uploadFile", slog.Any("header", spew.Sdump(header)))
|
||||
|
||||
// data, err := io.ReadAll(file)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// ext := parseExtension(header.Filename)
|
||||
|
||||
// id, err := d.filesEngine.SaveFile(ctx, filesengine.File{
|
||||
// Name: header.Filename,
|
||||
// UserID: userID,
|
||||
// Data: data,
|
||||
// Ext: ext,
|
||||
// Size: int64(len(data)),
|
||||
// Type: "", // че такое type?
|
||||
// })
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to save file: %w", err)
|
||||
// }
|
||||
// logger.Debug(ctx, "new file id", "id", id)
|
||||
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func parseExtension(filename string) string {
|
||||
// parts := strings.Split(filename, ".")
|
||||
// if len(parts) == 0 {
|
||||
// return ""
|
||||
// }
|
||||
|
||||
// return parts[len(parts)-1]
|
||||
// }
|
@ -1,163 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/domain"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrorAccessDenied = errors.New("access denied")
|
||||
ErrorSessionExpired = errors.New("session expired")
|
||||
)
|
||||
|
||||
func (d *DrainCloud) Login(ctx *gin.Context) {
|
||||
logger.Debug(ctx, "[Login] new request")
|
||||
|
||||
req := new(domain.LoginRequest)
|
||||
err := ctx.BindJSON(req)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "[Login] failed to bind request", logger.Err(err))
|
||||
ctx.JSON(http.StatusBadRequest, map[string]string{
|
||||
"error": "bad request",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := d.login(ctx, req)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "[Login] failed to login user", logger.Err(err))
|
||||
ctx.JSON(http.StatusInternalServerError, map[string]string{
|
||||
"error": err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
ctx.JSON(http.StatusOK, resp)
|
||||
}
|
||||
|
||||
func (d *DrainCloud) login(ctx *gin.Context, req *domain.LoginRequest) (*domain.LoginResponse, error) {
|
||||
session, err := d.getSession(ctx)
|
||||
if err != nil && !errors.Is(err, http.ErrNoCookie) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if session != nil {
|
||||
if err := validateSession(ctx, session); err != nil {
|
||||
// TODO add audit log entry
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Debug(ctx, "[login] user is already logged in", slog.String("session_id", session.ID.String()))
|
||||
return &domain.LoginResponse{
|
||||
Ok: true,
|
||||
}, nil
|
||||
}
|
||||
logger.Debug(ctx, "[login] session not found. trying to authorize")
|
||||
|
||||
passwordHash, err := bcrypt.GenerateFromPassword([]byte(req.Password), 10)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "[login] failed to generate password hash", logger.Err(err))
|
||||
return nil, fmt.Errorf("failed to generate password hash: %w", err)
|
||||
}
|
||||
|
||||
user, err := d.database.GetUserByLogin(ctx, req.Login)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch user by login: %w", err)
|
||||
}
|
||||
|
||||
if bytes.Equal(passwordHash, user.PasswordHash) {
|
||||
logger.Warn(ctx, "[login] failed to login user. passwords hashes not equal")
|
||||
return nil, ErrorAccessDenied
|
||||
}
|
||||
|
||||
sessionCreatedAt := time.Now()
|
||||
sessionExpiredAt := sessionCreatedAt.Add(time.Hour * 24 * 7)
|
||||
|
||||
sessionToken, err := generateSessionToken(100)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate a session token: %w", err)
|
||||
}
|
||||
ctx.SetCookie(sessionTokenCookie, sessionToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, true)
|
||||
|
||||
csrfToken, err := generateSessionToken(100)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate a csrf token: %w", err)
|
||||
}
|
||||
ctx.SetCookie(csrfTokenCookie, csrfToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, false)
|
||||
|
||||
sessionID, err := uuid.NewV7()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate session id: %w", err)
|
||||
}
|
||||
|
||||
if _, err = d.database.AddSession(ctx, &models.Session{
|
||||
ID: sessionID,
|
||||
SessionToken: sessionToken,
|
||||
CsrfToken: csrfToken,
|
||||
UserID: user.ID,
|
||||
CreatedAt: sessionCreatedAt,
|
||||
ExpiredAt: sessionExpiredAt,
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("failed to save session: %w", err)
|
||||
}
|
||||
|
||||
// TODO add audit log entry
|
||||
|
||||
return &domain.LoginResponse{
|
||||
Ok: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *DrainCloud) getSession(ctx *gin.Context) (*models.Session, error) {
|
||||
token, err := ctx.Cookie(sessionTokenCookie)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch session cookie from request: %w", err)
|
||||
}
|
||||
csrfToken, err := ctx.Cookie(csrfTokenCookie)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch csrf cookie from request: %w", err)
|
||||
}
|
||||
|
||||
if len(csrfToken) == 0 || len(token) == 0 {
|
||||
return nil, fmt.Errorf("session token or csrf token is empty")
|
||||
}
|
||||
|
||||
session, err := d.database.GetSession(ctx, token)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch session from repo: %w", err)
|
||||
}
|
||||
|
||||
return session, nil
|
||||
}
|
||||
|
||||
func validateSession(ctx *gin.Context, session *models.Session) error {
|
||||
if session == nil {
|
||||
return ErrorAccessDenied
|
||||
}
|
||||
|
||||
csrfToken, err := ctx.Cookie(csrfTokenCookie)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch csrf cookie from request: %w", err)
|
||||
}
|
||||
|
||||
if session.CsrfToken != csrfToken {
|
||||
return ErrorAccessDenied
|
||||
}
|
||||
|
||||
if session.ExpiredAt.Before(time.Now()) {
|
||||
return ErrorSessionExpired
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,28 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/reqcontext"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type authorizer interface {
|
||||
authorize(ctx *gin.Context) (*models.Session, error)
|
||||
}
|
||||
|
||||
func WithAuth(handler gin.HandlerFunc, auth authorizer) gin.HandlerFunc {
|
||||
return func(ctx *gin.Context) {
|
||||
sess, err := auth.authorize(ctx)
|
||||
if err != nil {
|
||||
writeError(ctx, err)
|
||||
ctx.Abort()
|
||||
return
|
||||
}
|
||||
|
||||
authCtx := reqcontext.WithSession(ctx.Request.Context(), sess)
|
||||
authCtx = reqcontext.WithUserID(authCtx, sess.UserID)
|
||||
ctx.Request = ctx.Request.WithContext(authCtx)
|
||||
|
||||
handler(ctx)
|
||||
}
|
||||
}
|
@ -1,81 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
filesengine "git.optclblast.xyz/draincloud/draincloud-core/internal/files_engine"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/reqcontext"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
maxFileSize = 10 << 30
|
||||
)
|
||||
|
||||
func (d *DrainCloud) UploadFile(ctx *gin.Context) {
|
||||
if err := ctx.Request.ParseMultipartForm(maxFileSize); err != nil {
|
||||
logger.Error(ctx, "uploadFile handler error", logger.Err(err))
|
||||
writeError(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
userID, err := reqcontext.GetUserID(ctx)
|
||||
if err != nil {
|
||||
writeError(ctx, ErrorAccessDenied)
|
||||
return
|
||||
}
|
||||
|
||||
if err := d.uploadFile(ctx, userID); err != nil {
|
||||
logger.Error(ctx, "uploadFile handle", logger.Err(err))
|
||||
writeError(ctx, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DrainCloud) uploadFile(ctx *gin.Context, userID uuid.UUID) error {
|
||||
title := ctx.PostForm("file")
|
||||
logger.Info(ctx, "uploadFile", slog.Any("postForm data", spew.Sdump(title)))
|
||||
|
||||
file, header, err := ctx.Request.FormFile("file")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Info(ctx, "uploadFile", slog.Any("header", spew.Sdump(header)))
|
||||
|
||||
data, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ext := parseExtension(header.Filename)
|
||||
|
||||
id, err := d.filesEngine.SaveFile(ctx, filesengine.File{
|
||||
Name: header.Filename,
|
||||
UserID: userID,
|
||||
Data: data,
|
||||
Ext: ext,
|
||||
Size: int64(len(data)),
|
||||
Type: "", // че такое type?
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save file: %w", err)
|
||||
}
|
||||
logger.Debug(ctx, "new file id", "id", id)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseExtension(filename string) string {
|
||||
parts := strings.Split(filename, ".")
|
||||
if len(parts) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return parts[len(parts)-1]
|
||||
}
|
@ -1,40 +1,50 @@
|
||||
package closer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
)
|
||||
|
||||
var globalCloser *Closer = &Closer{
|
||||
closeFns: make([]func() error, 0),
|
||||
}
|
||||
|
||||
type Closer struct {
|
||||
closeFns []func() error
|
||||
}
|
||||
|
||||
func (c *Closer) Add(fn func() error) {
|
||||
c.closeFns = append(c.closeFns, fn)
|
||||
}
|
||||
|
||||
func (c *Closer) Close() error {
|
||||
var commonErr error
|
||||
for _, fn := range c.closeFns {
|
||||
if err := fn(); err != nil {
|
||||
logger.Error(context.Background(), "[closer][Close] error at close func call", logger.Err(err))
|
||||
commonErr = errors.Join(commonErr, err)
|
||||
}
|
||||
}
|
||||
|
||||
return commonErr
|
||||
}
|
||||
|
||||
func Add(fn func() error) {
|
||||
globalCloser.Add(fn)
|
||||
}
|
||||
|
||||
func Close() error {
|
||||
return globalCloser.Close()
|
||||
}
|
||||
package closer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
)
|
||||
|
||||
var globalCloser *Closer = &Closer{
|
||||
closeFns: make([]func() error, 0),
|
||||
}
|
||||
|
||||
type Closer struct {
|
||||
_lock atomic.Bool
|
||||
closeFns []func() error
|
||||
}
|
||||
|
||||
func (c *Closer) Add(fn func() error) {
|
||||
if c._lock.Load() {
|
||||
return
|
||||
}
|
||||
c.closeFns = append(c.closeFns, fn)
|
||||
}
|
||||
|
||||
func (c *Closer) Close() error {
|
||||
if !c._lock.CompareAndSwap(false, true) {
|
||||
return fmt.Errorf("already closed")
|
||||
}
|
||||
|
||||
var commonErr error
|
||||
for _, fn := range c.closeFns {
|
||||
if err := fn(); err != nil {
|
||||
logger.Error(context.Background(), "[closer][Close] error at close func call", logger.Err(err))
|
||||
commonErr = errors.Join(commonErr, err)
|
||||
}
|
||||
}
|
||||
|
||||
return commonErr
|
||||
}
|
||||
|
||||
func Add(fn func() error) {
|
||||
globalCloser.Add(fn)
|
||||
}
|
||||
|
||||
func Close() error {
|
||||
return globalCloser.Close()
|
||||
}
|
||||
|
@ -1,105 +1,103 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type RequestPool struct {
|
||||
sp sync.Pool
|
||||
}
|
||||
|
||||
func (p *RequestPool) Get() *Request {
|
||||
r, _ := p.sp.Get().(*Request)
|
||||
return r
|
||||
}
|
||||
|
||||
func (p *RequestPool) Put(r *Request) {
|
||||
r.ID = ""
|
||||
r.Metadata = &sync.Map{}
|
||||
r.ResolveValues = &sync.Map{}
|
||||
r.Session = nil
|
||||
r.User = nil
|
||||
r.Body = nil
|
||||
p.sp.Put(r)
|
||||
}
|
||||
|
||||
func NewRequestPool() *RequestPool {
|
||||
return &RequestPool{
|
||||
sp: sync.Pool{
|
||||
New: func() any {
|
||||
return &Request{
|
||||
ResolveValues: &sync.Map{},
|
||||
Metadata: &sync.Map{},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
ID string
|
||||
Session *models.Session
|
||||
User *models.User
|
||||
// ResolveValues - data required to process request.
|
||||
ResolveValues *sync.Map
|
||||
// Metadata - an additional data, usually added with preprocessing.
|
||||
Metadata *sync.Map
|
||||
// Request body
|
||||
Body []byte
|
||||
RawReq *http.Request
|
||||
}
|
||||
|
||||
// NewRequestFromHttp builds a new *Request struct from raw http Request. No auth data validated.
|
||||
func NewRequestFromHttp(pool *RequestPool, req *http.Request) *Request {
|
||||
out := pool.sp.Get().(*Request)
|
||||
|
||||
cookies := req.Cookies()
|
||||
headers := req.Header
|
||||
|
||||
out.Metadata = &sync.Map{}
|
||||
out.RawReq = req
|
||||
|
||||
for _, cookie := range cookies {
|
||||
out.Metadata.Store(cookie.Name, cookie.Value)
|
||||
}
|
||||
|
||||
for hname, hval := range headers {
|
||||
out.Metadata.Store(hname, hval)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
logger.Error(context.TODO(), "failed to read request body", logger.Err(err))
|
||||
}
|
||||
out.Body = body
|
||||
|
||||
reqID := uuid.NewString()
|
||||
out.ID = reqID
|
||||
return out
|
||||
}
|
||||
|
||||
func GetValue[T any](vals *sync.Map, key string) (T, error) {
|
||||
var out T
|
||||
if vals == nil {
|
||||
return out, fmt.Errorf("nil vals map")
|
||||
}
|
||||
rawVal, ok := vals.Load(key)
|
||||
if !ok {
|
||||
return out, fmt.Errorf("value not found in resolve values set")
|
||||
}
|
||||
|
||||
out, ok = rawVal.(T)
|
||||
if !ok {
|
||||
return out, fmt.Errorf("type of a value is unexpected")
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type RequestPool struct {
|
||||
sp sync.Pool
|
||||
}
|
||||
|
||||
func (p *RequestPool) Get() *Request {
|
||||
r, _ := p.sp.Get().(*Request)
|
||||
return r
|
||||
}
|
||||
|
||||
func (p *RequestPool) Put(r *Request) {
|
||||
r.ID = ""
|
||||
r.Metadata = &sync.Map{}
|
||||
r.ResolveValues = &sync.Map{}
|
||||
r.Session = nil
|
||||
r.User = nil
|
||||
r.Body = nil
|
||||
p.sp.Put(r)
|
||||
}
|
||||
|
||||
func NewRequestPool() *RequestPool {
|
||||
return &RequestPool{
|
||||
sp: sync.Pool{
|
||||
New: func() any {
|
||||
return &Request{
|
||||
ResolveValues: &sync.Map{},
|
||||
Metadata: &sync.Map{},
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
ID string
|
||||
Session *auth.Session
|
||||
User *auth.User
|
||||
// ResolveValues - data required to process request.
|
||||
ResolveValues *sync.Map
|
||||
// Metadata - an additional data, usually added with preprocessing.
|
||||
Metadata *sync.Map
|
||||
// Request body
|
||||
Body []byte
|
||||
}
|
||||
|
||||
// NewRequestFromHttp builds a new *Request struct from raw http Request. No auth data validated.
|
||||
func NewRequestFromHttp(pool *RequestPool, req *http.Request) *Request {
|
||||
out := pool.sp.Get().(*Request)
|
||||
|
||||
cookies := req.Cookies()
|
||||
headers := req.Header
|
||||
|
||||
out.Metadata = &sync.Map{}
|
||||
|
||||
for _, cookie := range cookies {
|
||||
out.Metadata.Store(cookie.Name, cookie.Value)
|
||||
}
|
||||
|
||||
for hname, hval := range headers {
|
||||
out.Metadata.Store(hname, hval)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
logger.Error(context.TODO(), "failed to read request body", logger.Err(err))
|
||||
}
|
||||
out.Body = body
|
||||
|
||||
reqID := uuid.NewString()
|
||||
out.ID = reqID
|
||||
return out
|
||||
}
|
||||
|
||||
func GetValue[T any](vals *sync.Map, key string) (T, error) {
|
||||
var out T
|
||||
if vals == nil {
|
||||
return out, fmt.Errorf("nil vals map")
|
||||
}
|
||||
rawVal, ok := vals.Load(key)
|
||||
if !ok {
|
||||
return out, fmt.Errorf("value not found in resolve values set")
|
||||
}
|
||||
|
||||
out, ok = rawVal.(T)
|
||||
if !ok {
|
||||
return out, fmt.Errorf("type of a value is unexpected")
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
@ -1,261 +1,261 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetValue_string(t *testing.T) {
|
||||
t.Parallel()
|
||||
type args struct {
|
||||
vals map[string]any
|
||||
key string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "ok",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": "123",
|
||||
"2": "234",
|
||||
},
|
||||
key: "1",
|
||||
},
|
||||
want: "123",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "value not presented",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": "123",
|
||||
"2": "234",
|
||||
},
|
||||
key: "3",
|
||||
},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "nil map",
|
||||
args: args{
|
||||
vals: nil,
|
||||
key: "1",
|
||||
},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid type",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": "123",
|
||||
"2": 234,
|
||||
},
|
||||
key: "2",
|
||||
},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := GetValue[string](_mapToSyncMap(tt.args.vals), tt.args.key)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("GetValue() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("GetValue() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetValue_struct(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type val struct {
|
||||
a int
|
||||
b string
|
||||
c bool
|
||||
}
|
||||
type args struct {
|
||||
vals map[string]any
|
||||
key string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want val
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "ok",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": val{
|
||||
a: 1,
|
||||
b: "2",
|
||||
c: true,
|
||||
},
|
||||
"2": "234",
|
||||
},
|
||||
key: "1",
|
||||
},
|
||||
want: val{
|
||||
a: 1,
|
||||
b: "2",
|
||||
c: true,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "value not presented",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": "123",
|
||||
"2": "234",
|
||||
},
|
||||
key: "3",
|
||||
},
|
||||
want: val{},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "nil map",
|
||||
args: args{
|
||||
vals: nil,
|
||||
key: "1",
|
||||
},
|
||||
want: val{},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid type",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": "123",
|
||||
"2": 234,
|
||||
},
|
||||
key: "2",
|
||||
},
|
||||
want: val{},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := GetValue[val](_mapToSyncMap(tt.args.vals), tt.args.key)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("GetValue() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("GetValue() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetValue_structptr(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type val struct {
|
||||
a int
|
||||
b string
|
||||
c bool
|
||||
}
|
||||
type args struct {
|
||||
vals map[string]any
|
||||
key string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *val
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "ok",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": &val{
|
||||
a: 1,
|
||||
b: "2",
|
||||
c: true,
|
||||
},
|
||||
"2": "234",
|
||||
},
|
||||
key: "1",
|
||||
},
|
||||
want: &val{
|
||||
a: 1,
|
||||
b: "2",
|
||||
c: true,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "value not presented",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": "123",
|
||||
"2": "234",
|
||||
},
|
||||
key: "3",
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "nil map",
|
||||
args: args{
|
||||
vals: nil,
|
||||
key: "1",
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid type",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": "123",
|
||||
"2": 234,
|
||||
},
|
||||
key: "2",
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := GetValue[*val](_mapToSyncMap(tt.args.vals), tt.args.key)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("GetValue() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("GetValue() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func _mapToSyncMap(m map[string]any) *sync.Map {
|
||||
out := &sync.Map{}
|
||||
|
||||
for k, v := range m {
|
||||
out.Store(k, v)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
package common
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetValue_string(t *testing.T) {
|
||||
t.Parallel()
|
||||
type args struct {
|
||||
vals map[string]any
|
||||
key string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "ok",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": "123",
|
||||
"2": "234",
|
||||
},
|
||||
key: "1",
|
||||
},
|
||||
want: "123",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "value not presented",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": "123",
|
||||
"2": "234",
|
||||
},
|
||||
key: "3",
|
||||
},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "nil map",
|
||||
args: args{
|
||||
vals: nil,
|
||||
key: "1",
|
||||
},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid type",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": "123",
|
||||
"2": 234,
|
||||
},
|
||||
key: "2",
|
||||
},
|
||||
want: "",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := GetValue[string](_mapToSyncMap(tt.args.vals), tt.args.key)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("GetValue() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("GetValue() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetValue_struct(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type val struct {
|
||||
a int
|
||||
b string
|
||||
c bool
|
||||
}
|
||||
type args struct {
|
||||
vals map[string]any
|
||||
key string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want val
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "ok",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": val{
|
||||
a: 1,
|
||||
b: "2",
|
||||
c: true,
|
||||
},
|
||||
"2": "234",
|
||||
},
|
||||
key: "1",
|
||||
},
|
||||
want: val{
|
||||
a: 1,
|
||||
b: "2",
|
||||
c: true,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "value not presented",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": "123",
|
||||
"2": "234",
|
||||
},
|
||||
key: "3",
|
||||
},
|
||||
want: val{},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "nil map",
|
||||
args: args{
|
||||
vals: nil,
|
||||
key: "1",
|
||||
},
|
||||
want: val{},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid type",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": "123",
|
||||
"2": 234,
|
||||
},
|
||||
key: "2",
|
||||
},
|
||||
want: val{},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := GetValue[val](_mapToSyncMap(tt.args.vals), tt.args.key)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("GetValue() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("GetValue() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetValue_structptr(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type val struct {
|
||||
a int
|
||||
b string
|
||||
c bool
|
||||
}
|
||||
type args struct {
|
||||
vals map[string]any
|
||||
key string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *val
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "ok",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": &val{
|
||||
a: 1,
|
||||
b: "2",
|
||||
c: true,
|
||||
},
|
||||
"2": "234",
|
||||
},
|
||||
key: "1",
|
||||
},
|
||||
want: &val{
|
||||
a: 1,
|
||||
b: "2",
|
||||
c: true,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "value not presented",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": "123",
|
||||
"2": "234",
|
||||
},
|
||||
key: "3",
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "nil map",
|
||||
args: args{
|
||||
vals: nil,
|
||||
key: "1",
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid type",
|
||||
args: args{
|
||||
vals: map[string]any{
|
||||
"1": "123",
|
||||
"2": 234,
|
||||
},
|
||||
key: "2",
|
||||
},
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := GetValue[*val](_mapToSyncMap(tt.args.vals), tt.args.key)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("GetValue() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("GetValue() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func _mapToSyncMap(m map[string]any) *sync.Map {
|
||||
out := &sync.Map{}
|
||||
|
||||
for k, v := range m {
|
||||
out.Store(k, v)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
@ -1,70 +1,70 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Provider interface {
|
||||
GetValue(ctx context.Context, key Key) Value
|
||||
}
|
||||
|
||||
type Key string
|
||||
|
||||
type Value interface {
|
||||
Int() int
|
||||
String() string
|
||||
Float() float32
|
||||
Duration() time.Duration
|
||||
}
|
||||
|
||||
type DurationValue time.Duration
|
||||
|
||||
type FloatValue struct {
|
||||
EmptyValue
|
||||
Val float32
|
||||
}
|
||||
|
||||
func (v FloatValue) Float() float32 {
|
||||
return v.Val
|
||||
}
|
||||
|
||||
type StringValue struct {
|
||||
EmptyValue
|
||||
Val string
|
||||
}
|
||||
|
||||
func (v StringValue) String() string {
|
||||
return v.Val
|
||||
}
|
||||
|
||||
type IntValue struct {
|
||||
EmptyValue
|
||||
Val int
|
||||
}
|
||||
|
||||
func (v IntValue) Int() int {
|
||||
return v.Val
|
||||
}
|
||||
|
||||
func (v IntValue) Float() float32 {
|
||||
return float32(v.Val)
|
||||
}
|
||||
|
||||
type EmptyValue struct{}
|
||||
|
||||
func (v EmptyValue) Int() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (v EmptyValue) String() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (v EmptyValue) Float() float32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (v EmptyValue) Duration() time.Duration {
|
||||
return 0
|
||||
}
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Provider interface {
|
||||
GetValue(ctx context.Context, key Key) Value
|
||||
}
|
||||
|
||||
type Key string
|
||||
|
||||
type Value interface {
|
||||
Int() int
|
||||
String() string
|
||||
Float() float64
|
||||
Duration() time.Duration
|
||||
}
|
||||
|
||||
type DurationValue time.Duration
|
||||
|
||||
type FloatValue struct {
|
||||
EmptyValue
|
||||
Val float64
|
||||
}
|
||||
|
||||
func (v FloatValue) Float() float64 {
|
||||
return v.Val
|
||||
}
|
||||
|
||||
type StringValue struct {
|
||||
EmptyValue
|
||||
Val string
|
||||
}
|
||||
|
||||
func (v StringValue) String() string {
|
||||
return v.Val
|
||||
}
|
||||
|
||||
type IntValue struct {
|
||||
EmptyValue
|
||||
Val int
|
||||
}
|
||||
|
||||
func (v IntValue) Int() int {
|
||||
return v.Val
|
||||
}
|
||||
|
||||
func (v IntValue) Float() float64 {
|
||||
return float64(v.Val)
|
||||
}
|
||||
|
||||
type EmptyValue struct{}
|
||||
|
||||
func (v EmptyValue) Int() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (v EmptyValue) String() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (v EmptyValue) Float() float64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (v EmptyValue) Duration() time.Duration {
|
||||
return 0
|
||||
}
|
||||
|
@ -1 +1 @@
|
||||
package externalprovider
|
||||
package externalprovider
|
||||
|
@ -1,30 +1,30 @@
|
||||
package natskv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"github.com/nats-io/nats.go/jetstream"
|
||||
)
|
||||
|
||||
type Provider struct {
|
||||
cc jetstream.KeyValue
|
||||
}
|
||||
|
||||
func New(
|
||||
ctx context.Context,
|
||||
js jetstream.JetStream,
|
||||
) *Provider {
|
||||
kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{
|
||||
Bucket: "rtc",
|
||||
Description: "Real Time Config",
|
||||
Storage: jetstream.FileStorage,
|
||||
Replicas: 2,
|
||||
Compression: true,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "[natskv][New] failed to initialize rtc", logger.Err(err))
|
||||
}
|
||||
|
||||
return &Provider{cc: kv}
|
||||
}
|
||||
package natskv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"github.com/nats-io/nats.go/jetstream"
|
||||
)
|
||||
|
||||
type Provider struct {
|
||||
cc jetstream.KeyValue
|
||||
}
|
||||
|
||||
func New(
|
||||
ctx context.Context,
|
||||
js jetstream.JetStream,
|
||||
) *Provider {
|
||||
kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{
|
||||
Bucket: "rtc",
|
||||
Description: "Real Time Config",
|
||||
Storage: jetstream.FileStorage,
|
||||
Replicas: 2,
|
||||
Compression: true,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "[natskv][New] failed to initialize rtc", logger.Err(err))
|
||||
}
|
||||
|
||||
return &Provider{cc: kv}
|
||||
}
|
||||
|
@ -1,123 +1,123 @@
|
||||
package staticprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/config"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var _ config.Provider = new(staticProvider)
|
||||
|
||||
type StaticProvider interface {
|
||||
config.Provider
|
||||
}
|
||||
|
||||
type staticProvider struct {
|
||||
m sync.RWMutex
|
||||
rawValues map[string]any
|
||||
}
|
||||
|
||||
func (p *staticProvider) GetValue(ctx context.Context, key config.Key) config.Value {
|
||||
p.m.RLock()
|
||||
defer p.m.RUnlock()
|
||||
rawVal, ok := p.rawValues[string(key)]
|
||||
if !ok {
|
||||
return config.EmptyValue{}
|
||||
}
|
||||
|
||||
switch val := rawVal.(type) {
|
||||
case int:
|
||||
return config.IntValue{
|
||||
Val: val,
|
||||
}
|
||||
case string:
|
||||
return config.StringValue{
|
||||
Val: val,
|
||||
}
|
||||
case float32:
|
||||
return config.FloatValue{
|
||||
Val: val,
|
||||
}
|
||||
default:
|
||||
return config.EmptyValue{}
|
||||
}
|
||||
}
|
||||
|
||||
type newStaticProviderOptions struct {
|
||||
configName string
|
||||
configDirPath string
|
||||
configFileType string
|
||||
}
|
||||
|
||||
func mustDefaultNewStaticProviderOptions(ctx context.Context) *newStaticProviderOptions {
|
||||
ex, err := os.Executable()
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "failed to get executable location", logger.Err(err))
|
||||
}
|
||||
|
||||
exPath := filepath.Dir(ex)
|
||||
|
||||
return &newStaticProviderOptions{
|
||||
configName: "config",
|
||||
configDirPath: exPath,
|
||||
configFileType: "yaml",
|
||||
}
|
||||
}
|
||||
|
||||
type NewStaticProviderOption func(o *newStaticProviderOptions)
|
||||
|
||||
func WithConfigDir(path string) NewStaticProviderOption {
|
||||
return func(o *newStaticProviderOptions) {
|
||||
o.configDirPath = path
|
||||
}
|
||||
}
|
||||
|
||||
func WithConfigType(t string) NewStaticProviderOption {
|
||||
return func(o *newStaticProviderOptions) {
|
||||
o.configFileType = t
|
||||
}
|
||||
}
|
||||
|
||||
func WithConfigName(name string) NewStaticProviderOption {
|
||||
return func(o *newStaticProviderOptions) {
|
||||
o.configName = name
|
||||
}
|
||||
}
|
||||
|
||||
func NewStaticProvider(
|
||||
ctx context.Context,
|
||||
opts ...NewStaticProviderOption,
|
||||
) (*staticProvider, error) {
|
||||
o := mustDefaultNewStaticProviderOptions(ctx)
|
||||
for _, opt := range opts {
|
||||
opt(o)
|
||||
}
|
||||
|
||||
// TODO check if ile exists
|
||||
|
||||
provider := &staticProvider{
|
||||
rawValues: make(map[string]any),
|
||||
}
|
||||
|
||||
viper.SetConfigName(o.configName)
|
||||
viper.SetConfigType(o.configFileType)
|
||||
viper.AddConfigPath(o.configDirPath)
|
||||
|
||||
viper.WatchConfig()
|
||||
|
||||
viper.OnConfigChange(func(_ fsnotify.Event) {
|
||||
provider.m.Lock()
|
||||
defer provider.m.Unlock()
|
||||
provider.rawValues = viper.AllSettings()
|
||||
})
|
||||
|
||||
provider.rawValues = viper.AllSettings()
|
||||
|
||||
return provider, nil
|
||||
}
|
||||
package staticprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/config"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var _ config.Provider = new(staticProvider)
|
||||
|
||||
type StaticProvider interface {
|
||||
config.Provider
|
||||
}
|
||||
|
||||
type staticProvider struct {
|
||||
m sync.RWMutex
|
||||
rawValues map[string]any
|
||||
}
|
||||
|
||||
func (p *staticProvider) GetValue(ctx context.Context, key config.Key) config.Value {
|
||||
p.m.RLock()
|
||||
defer p.m.RUnlock()
|
||||
rawVal, ok := p.rawValues[string(key)]
|
||||
if !ok {
|
||||
return config.EmptyValue{}
|
||||
}
|
||||
|
||||
switch val := rawVal.(type) {
|
||||
case int:
|
||||
return config.IntValue{
|
||||
Val: val,
|
||||
}
|
||||
case string:
|
||||
return config.StringValue{
|
||||
Val: val,
|
||||
}
|
||||
case float64:
|
||||
return config.FloatValue{
|
||||
Val: val,
|
||||
}
|
||||
default:
|
||||
return config.EmptyValue{}
|
||||
}
|
||||
}
|
||||
|
||||
type newStaticProviderOptions struct {
|
||||
configName string
|
||||
configDirPath string
|
||||
configFileType string
|
||||
}
|
||||
|
||||
func mustDefaultNewStaticProviderOptions(ctx context.Context) *newStaticProviderOptions {
|
||||
ex, err := os.Executable()
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "failed to get executable location", logger.Err(err))
|
||||
}
|
||||
|
||||
exPath := filepath.Dir(ex)
|
||||
|
||||
return &newStaticProviderOptions{
|
||||
configName: "config",
|
||||
configDirPath: exPath,
|
||||
configFileType: "yaml",
|
||||
}
|
||||
}
|
||||
|
||||
type NewStaticProviderOption func(o *newStaticProviderOptions)
|
||||
|
||||
func WithConfigDir(path string) NewStaticProviderOption {
|
||||
return func(o *newStaticProviderOptions) {
|
||||
o.configDirPath = path
|
||||
}
|
||||
}
|
||||
|
||||
func WithConfigType(t string) NewStaticProviderOption {
|
||||
return func(o *newStaticProviderOptions) {
|
||||
o.configFileType = t
|
||||
}
|
||||
}
|
||||
|
||||
func WithConfigName(name string) NewStaticProviderOption {
|
||||
return func(o *newStaticProviderOptions) {
|
||||
o.configName = name
|
||||
}
|
||||
}
|
||||
|
||||
func NewStaticProvider(
|
||||
ctx context.Context,
|
||||
opts ...NewStaticProviderOption,
|
||||
) (*staticProvider, error) {
|
||||
o := mustDefaultNewStaticProviderOptions(ctx)
|
||||
for _, opt := range opts {
|
||||
opt(o)
|
||||
}
|
||||
|
||||
// TODO check if ile exists
|
||||
|
||||
provider := &staticProvider{
|
||||
rawValues: make(map[string]any),
|
||||
}
|
||||
|
||||
viper.SetConfigName(o.configName)
|
||||
viper.SetConfigType(o.configFileType)
|
||||
viper.AddConfigPath(o.configDirPath)
|
||||
|
||||
viper.WatchConfig()
|
||||
|
||||
viper.OnConfigChange(func(_ fsnotify.Event) {
|
||||
provider.m.Lock()
|
||||
defer provider.m.Unlock()
|
||||
provider.rawValues = viper.AllSettings()
|
||||
})
|
||||
|
||||
provider.rawValues = viper.AllSettings()
|
||||
|
||||
return provider, nil
|
||||
}
|
||||
|
@ -1,46 +1,46 @@
|
||||
package cleanupsessions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
)
|
||||
|
||||
// TODO set with config
|
||||
const cronInterval = time.Minute * 10
|
||||
|
||||
type ExpiredSessionsRemover interface {
|
||||
RemoveExpiredSessions(ctx context.Context) error
|
||||
}
|
||||
|
||||
type CleanupSessionCron struct {
|
||||
db ExpiredSessionsRemover
|
||||
}
|
||||
|
||||
func New(db ExpiredSessionsRemover) *CleanupSessionCron {
|
||||
return &CleanupSessionCron{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CleanupSessionCron) Run(ctx context.Context) {
|
||||
logger.Info(ctx, "[CleanupSessionCron] running cron")
|
||||
go func() {
|
||||
t := time.NewTicker(cronInterval)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Warn(ctx, "[CleanupSessionCron] context cancelled")
|
||||
return
|
||||
case <-t.C:
|
||||
logger.Notice(ctx, "[CleanupSessionCron] cleanup started")
|
||||
t.Reset(cronInterval)
|
||||
if err := c.db.RemoveExpiredSessions(ctx); err != nil {
|
||||
logger.Error(ctx, "[CleanupSessionCron] failed to remove expired sessions", logger.Err(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
package cleanupsessions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
)
|
||||
|
||||
// TODO set with config
|
||||
const cronInterval = time.Minute * 10
|
||||
|
||||
type ExpiredSessionsRemover interface {
|
||||
RemoveExpiredSessions(ctx context.Context) error
|
||||
}
|
||||
|
||||
type CleanupSessionCron struct {
|
||||
db ExpiredSessionsRemover
|
||||
}
|
||||
|
||||
func New(db ExpiredSessionsRemover) *CleanupSessionCron {
|
||||
return &CleanupSessionCron{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CleanupSessionCron) Run(ctx context.Context) {
|
||||
logger.Info(ctx, "[CleanupSessionCron] running cron")
|
||||
go func() {
|
||||
t := time.NewTicker(cronInterval)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Warn(ctx, "[CleanupSessionCron] context cancelled")
|
||||
return
|
||||
case <-t.C:
|
||||
logger.Notice(ctx, "[CleanupSessionCron] cleanup started")
|
||||
t.Reset(cronInterval)
|
||||
if err := c.db.RemoveExpiredSessions(ctx); err != nil {
|
||||
logger.Error(ctx, "[CleanupSessionCron] failed to remove expired sessions", logger.Err(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
package cron
|
||||
|
||||
import "context"
|
||||
|
||||
type Cron interface {
|
||||
Run(ctx context.Context)
|
||||
}
|
||||
package cron
|
||||
|
||||
import "context"
|
||||
|
||||
type Cron interface {
|
||||
Run(ctx context.Context)
|
||||
}
|
||||
|
@ -1 +1 @@
|
||||
package domain
|
||||
package domain
|
||||
|
24
internal/domain/fs_link.go
Normal file
24
internal/domain/fs_link.go
Normal file
@ -0,0 +1,24 @@
|
||||
package domain
|
||||
|
||||
import "fmt"
|
||||
|
||||
type StorageType int
|
||||
|
||||
const (
|
||||
StorageTypeFS StorageType = iota
|
||||
StorageTypeS3
|
||||
)
|
||||
|
||||
const (
|
||||
fslinkTemplate = "fs:///%s"
|
||||
)
|
||||
|
||||
func GetFSConverter(storageType StorageType) func(fslink string) string {
|
||||
switch storageType {
|
||||
default:
|
||||
// TODO s3 converter
|
||||
return func(fslink string) string {
|
||||
return fmt.Sprintf(fslinkTemplate, fslink)
|
||||
}
|
||||
}
|
||||
}
|
7
internal/domain/internal.go
Normal file
7
internal/domain/internal.go
Normal file
@ -0,0 +1,7 @@
|
||||
package domain
|
||||
|
||||
type RegisterResolverRequest struct {
|
||||
ResolverName string `json:"resolver_name"`
|
||||
ResolverEndpoint string `json:"resolver_endpoint"`
|
||||
RequiredResolveParams []string `json:"required_resolve_params"`
|
||||
}
|
@ -1,29 +1,29 @@
|
||||
package domain
|
||||
|
||||
type RegisterRequest struct {
|
||||
Login string `json:"login"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
type RegisterResponse struct {
|
||||
Ok bool `json:"ok"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type LoginRequest struct {
|
||||
Login string `json:"login"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
type LoginResponse struct {
|
||||
Ok bool `json:"ok"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type LogoutRequest struct {
|
||||
}
|
||||
|
||||
type ErrorJson struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
package domain
|
||||
|
||||
type RegisterRequest struct {
|
||||
Login string `json:"login"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
type RegisterResponse struct {
|
||||
Ok bool `json:"ok"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type LogonRequest struct {
|
||||
Login string `json:"login"`
|
||||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
type LogonResponse struct {
|
||||
Ok bool `json:"ok"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type LogoutRequest struct {
|
||||
}
|
||||
|
||||
type ErrorJson struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
@ -1,9 +1,9 @@
|
||||
package errs
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
ErrorUnauthorized = errors.New("unauthorized")
|
||||
ErrorAccessDenied = errors.New("access denied")
|
||||
ErrorSessionExpired = errors.New("session expired")
|
||||
)
|
||||
package errs
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
ErrorUnauthorized = errors.New("unauthorized")
|
||||
ErrorAccessDenied = errors.New("access denied")
|
||||
ErrorSessionExpired = errors.New("session expired")
|
||||
)
|
||||
|
@ -1,51 +1,58 @@
|
||||
package filesengine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/files"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type FilesEngine struct {
|
||||
blobStorage storage.BlobStorage
|
||||
metaStorage storage.MetaStorage
|
||||
}
|
||||
|
||||
func NewFilesEngine(
|
||||
blobStorage storage.BlobStorage,
|
||||
metaStorage storage.MetaStorage,
|
||||
) *FilesEngine {
|
||||
return &FilesEngine{
|
||||
blobStorage: blobStorage,
|
||||
metaStorage: metaStorage,
|
||||
}
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Name string
|
||||
UserID uuid.UUID
|
||||
Ext string
|
||||
Type string
|
||||
Size int64
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// TODO save file
|
||||
func (e *FilesEngine) SaveFile(
|
||||
ctx context.Context,
|
||||
file File,
|
||||
) (uuid.UUID, error) {
|
||||
fileID, err := e.metaStorage.SaveMetadata(ctx, files.FileMetadata{})
|
||||
if err != nil {
|
||||
return uuid.Nil, fmt.Errorf("failed to create new file metadata: %w", err)
|
||||
}
|
||||
|
||||
if err = e.blobStorage.SaveBlob(ctx, fileID, file.Data); err != nil {
|
||||
return uuid.Nil, fmt.Errorf("failed to save file data: %w", err)
|
||||
}
|
||||
|
||||
return fileID, nil
|
||||
}
|
||||
package filesengine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/files"
|
||||
// "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/files"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type FilesEngine struct {
|
||||
blobStorage storage.BlobStorage
|
||||
metaStorage storage.MetaStorage
|
||||
}
|
||||
|
||||
func NewFilesEngine(
|
||||
blobStorage storage.BlobStorage,
|
||||
metaStorage storage.MetaStorage,
|
||||
) *FilesEngine {
|
||||
return &FilesEngine{
|
||||
blobStorage: blobStorage,
|
||||
metaStorage: metaStorage,
|
||||
}
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Name string
|
||||
UserID int64
|
||||
Ext string
|
||||
Type string
|
||||
Size int64
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// TODO save file
|
||||
func (e *FilesEngine) SaveFile(
|
||||
ctx context.Context,
|
||||
file File,
|
||||
) (uuid.UUID, error) {
|
||||
fileID, err := e.metaStorage.SaveMetadata(ctx, files.FileMetadata{
|
||||
Name: file.Name,
|
||||
UserID: file.UserID,
|
||||
Ext: file.Ext,
|
||||
Type: file.Type,
|
||||
// FSLink: f,
|
||||
})
|
||||
if err != nil {
|
||||
return uuid.Nil, fmt.Errorf("failed to create new file metadata: %w", err)
|
||||
}
|
||||
|
||||
if err = e.blobStorage.SaveBlob(ctx, fileID, file.Data); err != nil {
|
||||
return uuid.Nil, fmt.Errorf("failed to save file data: %w", err)
|
||||
}
|
||||
|
||||
return fileID, nil
|
||||
}
|
||||
|
9
internal/handler/call_handler.go
Normal file
9
internal/handler/call_handler.go
Normal file
@ -0,0 +1,9 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
)
|
||||
|
||||
type CallHandler func(ctx context.Context, req *common.Request) ([]byte, error)
|
@ -1,66 +1,78 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
)
|
||||
|
||||
type Writer interface {
|
||||
Write(ctx context.Context, resp any)
|
||||
SetCookie(name string, value string, maxAge int, path string, domain string, secure bool, httpOnly bool)
|
||||
}
|
||||
|
||||
type Handler interface {
|
||||
GetName() string
|
||||
GetRequiredResolveParams() []string
|
||||
GetProcessFn() func(ctx context.Context, req *common.Request, w Writer) error
|
||||
GetPreprocessFn() func(ctx context.Context, req *common.Request, w Writer) error
|
||||
}
|
||||
|
||||
type BaseHandler struct {
|
||||
Name string
|
||||
RequiredResolveParams []string
|
||||
ProcessFn func(ctx context.Context, req *common.Request, w Writer) error
|
||||
PreprocessFn func(ctx context.Context, req *common.Request, w Writer) error
|
||||
}
|
||||
|
||||
func New() *BaseHandler {
|
||||
return new(BaseHandler)
|
||||
}
|
||||
|
||||
func (h *BaseHandler) WithName(name string) *BaseHandler {
|
||||
h.Name = name
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *BaseHandler) WithRequiredResolveParams(params ...string) *BaseHandler {
|
||||
h.RequiredResolveParams = params
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *BaseHandler) WithProcessFunc(fn func(ctx context.Context, req *common.Request, w Writer) error) *BaseHandler {
|
||||
h.ProcessFn = fn
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *BaseHandler) WithPreprocessFunc(fn func(ctx context.Context, req *common.Request, w Writer) error) *BaseHandler {
|
||||
h.PreprocessFn = fn
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *BaseHandler) GetName() string {
|
||||
return h.Name
|
||||
}
|
||||
|
||||
func (h *BaseHandler) GetRequiredResolveParams() []string {
|
||||
return h.RequiredResolveParams
|
||||
}
|
||||
|
||||
func (h *BaseHandler) GetProcessFn() func(ctx context.Context, req *common.Request, w Writer) error {
|
||||
return h.ProcessFn
|
||||
}
|
||||
|
||||
func (h *BaseHandler) GetPreprocessFn() func(ctx context.Context, req *common.Request, w Writer) error {
|
||||
return h.PreprocessFn
|
||||
}
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
)
|
||||
|
||||
type WriteOptions struct {
|
||||
Code int
|
||||
}
|
||||
|
||||
type WriteOption func(opts *WriteOptions)
|
||||
|
||||
func WithCode(code int) WriteOption {
|
||||
return func(opts *WriteOptions) {
|
||||
opts.Code = code
|
||||
}
|
||||
}
|
||||
|
||||
type Writer interface {
|
||||
Write(ctx context.Context, resp any, opts ...WriteOption)
|
||||
SetCookie(name string, value string, maxAge int, path string, domain string, secure bool, httpOnly bool)
|
||||
}
|
||||
|
||||
type Handler interface {
|
||||
GetName() string
|
||||
GetRequiredResolveParams() []string
|
||||
GetProcessFn() func(ctx context.Context, req *common.Request, w Writer) error
|
||||
GetPreprocessFn() func(ctx context.Context, req *common.Request, w Writer) error
|
||||
}
|
||||
|
||||
type BaseHandler struct {
|
||||
Name string
|
||||
RequiredResolveParams []string
|
||||
ProcessFn func(ctx context.Context, req *common.Request, w Writer) error
|
||||
PreprocessFn func(ctx context.Context, req *common.Request, w Writer) error
|
||||
}
|
||||
|
||||
func New() *BaseHandler {
|
||||
return new(BaseHandler)
|
||||
}
|
||||
|
||||
func (h *BaseHandler) WithName(name string) *BaseHandler {
|
||||
h.Name = name
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *BaseHandler) WithRequiredResolveParams(params ...string) *BaseHandler {
|
||||
h.RequiredResolveParams = params
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *BaseHandler) WithProcessFunc(fn func(ctx context.Context, req *common.Request, w Writer) error) *BaseHandler {
|
||||
h.ProcessFn = fn
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *BaseHandler) WithPreprocessFunc(fn func(ctx context.Context, req *common.Request, w Writer) error) *BaseHandler {
|
||||
h.PreprocessFn = fn
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *BaseHandler) GetName() string {
|
||||
return h.Name
|
||||
}
|
||||
|
||||
func (h *BaseHandler) GetRequiredResolveParams() []string {
|
||||
return h.RequiredResolveParams
|
||||
}
|
||||
|
||||
func (h *BaseHandler) GetProcessFn() func(ctx context.Context, req *common.Request, w Writer) error {
|
||||
return h.ProcessFn
|
||||
}
|
||||
|
||||
func (h *BaseHandler) GetPreprocessFn() func(ctx context.Context, req *common.Request, w Writer) error {
|
||||
return h.PreprocessFn
|
||||
}
|
||||
|
@ -1,154 +1,154 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type _key string
|
||||
|
||||
//nolint:gochecknoglobals // ...
|
||||
var loggerKey _key = "_core_logger"
|
||||
|
||||
type LoggerOpt func(p *loggerParams)
|
||||
|
||||
func NewLoggerContext(ctx context.Context, opts ...LoggerOpt) context.Context {
|
||||
p := new(loggerParams)
|
||||
|
||||
for _, o := range opts {
|
||||
o(p)
|
||||
}
|
||||
|
||||
log := p.build()
|
||||
|
||||
return context.WithValue(ctx, loggerKey, log)
|
||||
}
|
||||
|
||||
type loggerParams struct {
|
||||
local bool
|
||||
addSource bool
|
||||
lvl slog.Level
|
||||
writers []io.Writer
|
||||
}
|
||||
|
||||
func WithWriter(w io.Writer) LoggerOpt {
|
||||
return func(p *loggerParams) {
|
||||
p.writers = append(p.writers, w)
|
||||
}
|
||||
}
|
||||
|
||||
func WithLevel(l slog.Level) LoggerOpt {
|
||||
return func(p *loggerParams) {
|
||||
p.lvl = l
|
||||
}
|
||||
}
|
||||
|
||||
func Local() LoggerOpt {
|
||||
return func(p *loggerParams) {
|
||||
p.local = true
|
||||
}
|
||||
}
|
||||
|
||||
func WithSource() LoggerOpt {
|
||||
return func(p *loggerParams) {
|
||||
p.addSource = true
|
||||
}
|
||||
}
|
||||
|
||||
func Err(err error) slog.Attr {
|
||||
return slog.Attr{
|
||||
Key: "error",
|
||||
Value: slog.StringValue(err.Error()),
|
||||
}
|
||||
}
|
||||
|
||||
func MapLevel(lvl string) slog.Level {
|
||||
switch strings.ToLower(lvl) {
|
||||
case "debug":
|
||||
return LevelDebug
|
||||
case "info":
|
||||
return LevelInfo
|
||||
case "notice":
|
||||
return LevelNotice
|
||||
case "warn":
|
||||
return LevelWarn
|
||||
case "error":
|
||||
return LevelError
|
||||
case "critical":
|
||||
return LevelCritial
|
||||
case "alert":
|
||||
return LevelAlert
|
||||
case "emergency":
|
||||
return LevelEmergency
|
||||
default:
|
||||
return LevelInfo
|
||||
}
|
||||
}
|
||||
|
||||
func (b *loggerParams) build() *slog.Logger {
|
||||
if len(b.writers) == 0 {
|
||||
b.writers = append(b.writers, os.Stdout)
|
||||
}
|
||||
|
||||
w := io.MultiWriter(b.writers...)
|
||||
|
||||
if b.local {
|
||||
opts := prettyHandlerOptions{
|
||||
SlogOpts: &slog.HandlerOptions{
|
||||
Level: b.lvl,
|
||||
AddSource: b.addSource,
|
||||
},
|
||||
}
|
||||
|
||||
handler := opts.newPrettyHandler(w)
|
||||
|
||||
return slog.New(handler)
|
||||
}
|
||||
|
||||
return newLogger(b.lvl, w)
|
||||
}
|
||||
|
||||
func newLogger(lvl slog.Level, w io.Writer) *slog.Logger {
|
||||
return slog.New(
|
||||
slog.NewJSONHandler(w, &slog.HandlerOptions{
|
||||
Level: lvl,
|
||||
ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
|
||||
if a.Key == slog.LevelKey {
|
||||
level := a.Value.Any().(slog.Level)
|
||||
|
||||
switch {
|
||||
case level < LevelInfo:
|
||||
a.Value = slog.StringValue("DEBUG")
|
||||
case level < LevelNotice:
|
||||
a.Value = slog.StringValue("INFO")
|
||||
case level < LevelWarn:
|
||||
a.Value = slog.StringValue("NOTICE")
|
||||
case level < LevelError:
|
||||
a.Value = slog.StringValue("WARNING")
|
||||
case level < LevelCritial:
|
||||
a.Value = slog.StringValue("ERROR")
|
||||
case level < LevelAlert:
|
||||
a.Value = slog.StringValue("CRITICAL")
|
||||
case level < LevelEmergency:
|
||||
a.Value = slog.StringValue("ALERT")
|
||||
default:
|
||||
a.Value = slog.StringValue("EMERGENCY")
|
||||
}
|
||||
}
|
||||
|
||||
return a
|
||||
},
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
func loggerFromCtx(ctx context.Context) *slog.Logger {
|
||||
if l, ok := ctx.Value(loggerKey).(*slog.Logger); ok {
|
||||
return l
|
||||
}
|
||||
|
||||
return globalLogger
|
||||
}
|
||||
package logger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type _key string
|
||||
|
||||
//nolint:gochecknoglobals // ...
|
||||
var loggerKey _key = "_core_logger"
|
||||
|
||||
type LoggerOpt func(p *loggerParams)
|
||||
|
||||
func NewLoggerContext(ctx context.Context, opts ...LoggerOpt) context.Context {
|
||||
p := new(loggerParams)
|
||||
|
||||
for _, o := range opts {
|
||||
o(p)
|
||||
}
|
||||
|
||||
log := p.build()
|
||||
|
||||
return context.WithValue(ctx, loggerKey, log)
|
||||
}
|
||||
|
||||
type loggerParams struct {
|
||||
local bool
|
||||
addSource bool
|
||||
lvl slog.Level
|
||||
writers []io.Writer
|
||||
}
|
||||
|
||||
func WithWriter(w io.Writer) LoggerOpt {
|
||||
return func(p *loggerParams) {
|
||||
p.writers = append(p.writers, w)
|
||||
}
|
||||
}
|
||||
|
||||
func WithLevel(l slog.Level) LoggerOpt {
|
||||
return func(p *loggerParams) {
|
||||
p.lvl = l
|
||||
}
|
||||
}
|
||||
|
||||
func Local() LoggerOpt {
|
||||
return func(p *loggerParams) {
|
||||
p.local = true
|
||||
}
|
||||
}
|
||||
|
||||
func WithSource() LoggerOpt {
|
||||
return func(p *loggerParams) {
|
||||
p.addSource = true
|
||||
}
|
||||
}
|
||||
|
||||
func Err(err error) slog.Attr {
|
||||
return slog.Attr{
|
||||
Key: "error",
|
||||
Value: slog.StringValue(err.Error()),
|
||||
}
|
||||
}
|
||||
|
||||
func MapLevel(lvl string) slog.Level {
|
||||
switch strings.ToLower(lvl) {
|
||||
case "debug":
|
||||
return LevelDebug
|
||||
case "info":
|
||||
return LevelInfo
|
||||
case "notice":
|
||||
return LevelNotice
|
||||
case "warn":
|
||||
return LevelWarn
|
||||
case "error":
|
||||
return LevelError
|
||||
case "critical":
|
||||
return LevelCritial
|
||||
case "alert":
|
||||
return LevelAlert
|
||||
case "emergency":
|
||||
return LevelEmergency
|
||||
default:
|
||||
return LevelInfo
|
||||
}
|
||||
}
|
||||
|
||||
func (b *loggerParams) build() *slog.Logger {
|
||||
if len(b.writers) == 0 {
|
||||
b.writers = append(b.writers, os.Stdout)
|
||||
}
|
||||
|
||||
w := io.MultiWriter(b.writers...)
|
||||
|
||||
if b.local {
|
||||
opts := prettyHandlerOptions{
|
||||
SlogOpts: &slog.HandlerOptions{
|
||||
Level: b.lvl,
|
||||
AddSource: b.addSource,
|
||||
},
|
||||
}
|
||||
|
||||
handler := opts.newPrettyHandler(w)
|
||||
|
||||
return slog.New(handler)
|
||||
}
|
||||
|
||||
return newLogger(b.lvl, w)
|
||||
}
|
||||
|
||||
func newLogger(lvl slog.Level, w io.Writer) *slog.Logger {
|
||||
return slog.New(
|
||||
slog.NewJSONHandler(w, &slog.HandlerOptions{
|
||||
Level: lvl,
|
||||
ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
|
||||
if a.Key == slog.LevelKey {
|
||||
level := a.Value.Any().(slog.Level)
|
||||
|
||||
switch {
|
||||
case level < LevelInfo:
|
||||
a.Value = slog.StringValue("DEBUG")
|
||||
case level < LevelNotice:
|
||||
a.Value = slog.StringValue("INFO")
|
||||
case level < LevelWarn:
|
||||
a.Value = slog.StringValue("NOTICE")
|
||||
case level < LevelError:
|
||||
a.Value = slog.StringValue("WARNING")
|
||||
case level < LevelCritial:
|
||||
a.Value = slog.StringValue("ERROR")
|
||||
case level < LevelAlert:
|
||||
a.Value = slog.StringValue("CRITICAL")
|
||||
case level < LevelEmergency:
|
||||
a.Value = slog.StringValue("ALERT")
|
||||
default:
|
||||
a.Value = slog.StringValue("EMERGENCY")
|
||||
}
|
||||
}
|
||||
|
||||
return a
|
||||
},
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
func loggerFromCtx(ctx context.Context) *slog.Logger {
|
||||
if l, ok := ctx.Value(loggerKey).(*slog.Logger); ok {
|
||||
return l
|
||||
}
|
||||
|
||||
return globalLogger
|
||||
}
|
||||
|
@ -1,35 +1,35 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
//nolint:unused //...
|
||||
func newDiscardLogger() *slog.Logger {
|
||||
return slog.New(newDiscardHandler())
|
||||
}
|
||||
|
||||
//nolint:unused //...
|
||||
type DiscardHandler struct{}
|
||||
|
||||
//nolint:unused //...
|
||||
func newDiscardHandler() *DiscardHandler {
|
||||
return &DiscardHandler{}
|
||||
}
|
||||
|
||||
func (h *DiscardHandler) Handle(_ context.Context, _ slog.Record) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *DiscardHandler) WithAttrs(_ []slog.Attr) slog.Handler {
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *DiscardHandler) WithGroup(_ string) slog.Handler {
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *DiscardHandler) Enabled(_ context.Context, _ slog.Level) bool {
|
||||
return false
|
||||
}
|
||||
package logger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
//nolint:unused //...
|
||||
func newDiscardLogger() *slog.Logger {
|
||||
return slog.New(newDiscardHandler())
|
||||
}
|
||||
|
||||
//nolint:unused //...
|
||||
type DiscardHandler struct{}
|
||||
|
||||
//nolint:unused //...
|
||||
func newDiscardHandler() *DiscardHandler {
|
||||
return &DiscardHandler{}
|
||||
}
|
||||
|
||||
func (h *DiscardHandler) Handle(_ context.Context, _ slog.Record) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *DiscardHandler) WithAttrs(_ []slog.Attr) slog.Handler {
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *DiscardHandler) WithGroup(_ string) slog.Handler {
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *DiscardHandler) Enabled(_ context.Context, _ slog.Level) bool {
|
||||
return false
|
||||
}
|
||||
|
@ -1,81 +1,81 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"os"
|
||||
)
|
||||
|
||||
//nolint:gochecknoglobals // ...
|
||||
var globalLogger *slog.Logger = newLogger(LevelDebug, os.Stdout)
|
||||
|
||||
func SetLevel(l slog.Level) {
|
||||
globalLogger = newLogger(l, os.Stdout)
|
||||
}
|
||||
|
||||
const (
|
||||
LevelEmergency = slog.Level(10000)
|
||||
LevelAlert = slog.Level(1000)
|
||||
LevelCritial = slog.Level(100)
|
||||
LevelError = slog.LevelError
|
||||
LevelWarn = slog.LevelWarn
|
||||
LevelNotice = slog.Level(2)
|
||||
LevelInfo = slog.LevelInfo
|
||||
LevelDebug = slog.LevelDebug
|
||||
)
|
||||
|
||||
func Fatal(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.Log(ctx, LevelEmergency, message, attrs...)
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func Emergency(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.Log(ctx, LevelEmergency, message, attrs...)
|
||||
}
|
||||
|
||||
func Alert(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.Log(ctx, LevelAlert, message, attrs...)
|
||||
}
|
||||
|
||||
func Critial(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.Log(ctx, LevelCritial, message, attrs...)
|
||||
}
|
||||
|
||||
func Error(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.ErrorContext(ctx, message, attrs...)
|
||||
}
|
||||
|
||||
func Warn(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.WarnContext(ctx, message, attrs...)
|
||||
}
|
||||
|
||||
func Notice(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.Log(ctx, LevelNotice, message, attrs...)
|
||||
}
|
||||
|
||||
func Info(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.InfoContext(ctx, message, attrs...)
|
||||
}
|
||||
|
||||
func Debug(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.DebugContext(ctx, message, attrs...)
|
||||
}
|
||||
package logger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"os"
|
||||
)
|
||||
|
||||
//nolint:gochecknoglobals // ...
|
||||
var globalLogger *slog.Logger = newLogger(LevelDebug, os.Stdout)
|
||||
|
||||
func SetLevel(l slog.Level) {
|
||||
globalLogger = newLogger(l, os.Stdout)
|
||||
}
|
||||
|
||||
const (
|
||||
LevelEmergency = slog.Level(10000)
|
||||
LevelAlert = slog.Level(1000)
|
||||
LevelCritial = slog.Level(100)
|
||||
LevelError = slog.LevelError
|
||||
LevelWarn = slog.LevelWarn
|
||||
LevelNotice = slog.Level(2)
|
||||
LevelInfo = slog.LevelInfo
|
||||
LevelDebug = slog.LevelDebug
|
||||
)
|
||||
|
||||
func Fatal(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.Log(ctx, LevelEmergency, message, attrs...)
|
||||
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func Emergency(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.Log(ctx, LevelEmergency, message, attrs...)
|
||||
}
|
||||
|
||||
func Alert(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.Log(ctx, LevelAlert, message, attrs...)
|
||||
}
|
||||
|
||||
func Critial(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.Log(ctx, LevelCritial, message, attrs...)
|
||||
}
|
||||
|
||||
func Error(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.ErrorContext(ctx, message, attrs...)
|
||||
}
|
||||
|
||||
func Warn(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.WarnContext(ctx, message, attrs...)
|
||||
}
|
||||
|
||||
func Notice(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.Log(ctx, LevelNotice, message, attrs...)
|
||||
}
|
||||
|
||||
func Info(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.InfoContext(ctx, message, attrs...)
|
||||
}
|
||||
|
||||
func Debug(ctx context.Context, message string, attrs ...any) {
|
||||
l := loggerFromCtx(ctx)
|
||||
|
||||
l.DebugContext(ctx, message, attrs...)
|
||||
}
|
||||
|
@ -1,97 +1,97 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
stdlog "log"
|
||||
"log/slog"
|
||||
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
|
||||
type prettyHandlerOptions struct {
|
||||
SlogOpts *slog.HandlerOptions
|
||||
}
|
||||
|
||||
type prettyHandler struct {
|
||||
opts prettyHandlerOptions
|
||||
slog.Handler
|
||||
l *stdlog.Logger
|
||||
attrs []slog.Attr
|
||||
}
|
||||
|
||||
func (opts prettyHandlerOptions) newPrettyHandler(
|
||||
out io.Writer,
|
||||
) *prettyHandler {
|
||||
h := &prettyHandler{
|
||||
Handler: slog.NewJSONHandler(out, opts.SlogOpts),
|
||||
l: stdlog.New(out, "", 0),
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *prettyHandler) Handle(_ context.Context, r slog.Record) error {
|
||||
level := r.Level.String() + ":"
|
||||
|
||||
switch r.Level {
|
||||
case slog.LevelDebug:
|
||||
level = color.MagentaString(level)
|
||||
case slog.LevelInfo:
|
||||
level = color.BlueString(level)
|
||||
case slog.LevelWarn:
|
||||
level = color.YellowString(level)
|
||||
case slog.LevelError:
|
||||
level = color.RedString(level)
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{}, r.NumAttrs())
|
||||
|
||||
r.Attrs(func(a slog.Attr) bool {
|
||||
fields[a.Key] = a.Value.Any()
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
for _, a := range h.attrs {
|
||||
fields[a.Key] = a.Value.Any()
|
||||
}
|
||||
|
||||
var b []byte
|
||||
var err error
|
||||
|
||||
if len(fields) > 0 {
|
||||
b, err = json.MarshalIndent(fields, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
timeStr := r.Time.Format("[15:05:05.000]")
|
||||
msg := color.CyanString(r.Message)
|
||||
|
||||
h.l.Println(
|
||||
timeStr,
|
||||
level,
|
||||
msg,
|
||||
color.WhiteString(string(b)),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *prettyHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||
return &prettyHandler{
|
||||
Handler: h.Handler,
|
||||
l: h.l,
|
||||
attrs: attrs,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *prettyHandler) WithGroup(name string) slog.Handler {
|
||||
return &prettyHandler{
|
||||
Handler: h.Handler.WithGroup(name),
|
||||
l: h.l,
|
||||
}
|
||||
}
|
||||
package logger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
stdlog "log"
|
||||
"log/slog"
|
||||
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
|
||||
type prettyHandlerOptions struct {
|
||||
SlogOpts *slog.HandlerOptions
|
||||
}
|
||||
|
||||
type prettyHandler struct {
|
||||
opts prettyHandlerOptions
|
||||
slog.Handler
|
||||
l *stdlog.Logger
|
||||
attrs []slog.Attr
|
||||
}
|
||||
|
||||
func (opts prettyHandlerOptions) newPrettyHandler(
|
||||
out io.Writer,
|
||||
) *prettyHandler {
|
||||
h := &prettyHandler{
|
||||
Handler: slog.NewJSONHandler(out, opts.SlogOpts),
|
||||
l: stdlog.New(out, "", 0),
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *prettyHandler) Handle(_ context.Context, r slog.Record) error {
|
||||
level := r.Level.String() + ":"
|
||||
|
||||
switch r.Level {
|
||||
case slog.LevelDebug:
|
||||
level = color.MagentaString(level)
|
||||
case slog.LevelInfo:
|
||||
level = color.BlueString(level)
|
||||
case slog.LevelWarn:
|
||||
level = color.YellowString(level)
|
||||
case slog.LevelError:
|
||||
level = color.RedString(level)
|
||||
}
|
||||
|
||||
fields := make(map[string]interface{}, r.NumAttrs())
|
||||
|
||||
r.Attrs(func(a slog.Attr) bool {
|
||||
fields[a.Key] = a.Value.Any()
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
for _, a := range h.attrs {
|
||||
fields[a.Key] = a.Value.Any()
|
||||
}
|
||||
|
||||
var b []byte
|
||||
var err error
|
||||
|
||||
if len(fields) > 0 {
|
||||
b, err = json.MarshalIndent(fields, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
timeStr := r.Time.Format("[15:05:05.000]")
|
||||
msg := color.CyanString(r.Message)
|
||||
|
||||
h.l.Println(
|
||||
timeStr,
|
||||
level,
|
||||
msg,
|
||||
color.WhiteString(string(b)),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *prettyHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||
return &prettyHandler{
|
||||
Handler: h.Handler,
|
||||
l: h.l,
|
||||
attrs: attrs,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *prettyHandler) WithGroup(name string) slog.Handler {
|
||||
return &prettyHandler{
|
||||
Handler: h.Handler.WithGroup(name),
|
||||
l: h.l,
|
||||
}
|
||||
}
|
||||
|
@ -1,42 +1,42 @@
|
||||
package domain
|
||||
|
||||
type InitPluginRequest struct {
|
||||
Name string `json:"name"`
|
||||
Version int `json:"version"`
|
||||
Namespace string `json:"namespace"`
|
||||
}
|
||||
|
||||
type PluginPage struct {
|
||||
Name string `json:"name"`
|
||||
Version int `json:"version"`
|
||||
Namespace string `json:"namespace"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
type PluginAction struct {
|
||||
Name string `json:"name"`
|
||||
Version int `json:"version"`
|
||||
Namespace string `json:"namespace"`
|
||||
RequiredResolveParams []string `json:"required_resolve_params"`
|
||||
OptionalResolveParams []string `json:"optional_resolve_params"`
|
||||
WithActions bool `json:"with_actions"`
|
||||
Async bool `json:"async"`
|
||||
}
|
||||
|
||||
type PluginComponent struct {
|
||||
Name string `json:"name"`
|
||||
Version int `json:"version"`
|
||||
Namespace string `json:"namespace"`
|
||||
RequiredResolveParams []string `json:"required_resolve_params"`
|
||||
OptionalResolveParams []string `json:"optional_resolve_params"`
|
||||
WithActions bool `json:"with_actions"`
|
||||
Async bool `json:"async"`
|
||||
}
|
||||
|
||||
type Ping struct {
|
||||
Payload any `json:"payload"`
|
||||
}
|
||||
|
||||
type Pong struct {
|
||||
Payload any `json:"payload"`
|
||||
}
|
||||
package domain
|
||||
|
||||
type InitPluginRequest struct {
|
||||
Name string `json:"name"`
|
||||
Version int `json:"version"`
|
||||
Namespace string `json:"namespace"`
|
||||
}
|
||||
|
||||
type PluginPage struct {
|
||||
Name string `json:"name"`
|
||||
Version int `json:"version"`
|
||||
Namespace string `json:"namespace"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
type PluginAction struct {
|
||||
Name string `json:"name"`
|
||||
Version int `json:"version"`
|
||||
Namespace string `json:"namespace"`
|
||||
RequiredResolveParams []string `json:"required_resolve_params"`
|
||||
OptionalResolveParams []string `json:"optional_resolve_params"`
|
||||
WithActions bool `json:"with_actions"`
|
||||
Async bool `json:"async"`
|
||||
}
|
||||
|
||||
type PluginComponent struct {
|
||||
Name string `json:"name"`
|
||||
Version int `json:"version"`
|
||||
Namespace string `json:"namespace"`
|
||||
RequiredResolveParams []string `json:"required_resolve_params"`
|
||||
OptionalResolveParams []string `json:"optional_resolve_params"`
|
||||
WithActions bool `json:"with_actions"`
|
||||
Async bool `json:"async"`
|
||||
}
|
||||
|
||||
type Ping struct {
|
||||
Payload any `json:"payload"`
|
||||
}
|
||||
|
||||
type Pong struct {
|
||||
Payload any `json:"payload"`
|
||||
}
|
||||
|
@ -1,114 +1,114 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/closer"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/plugin/domain"
|
||||
)
|
||||
|
||||
type PluginLoader struct {
|
||||
l net.Listener
|
||||
store *PluginStore
|
||||
}
|
||||
|
||||
func MustNewPluginLoader(ctx context.Context, listenPort uint16, ps *PluginStore) *PluginLoader {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:"+strconv.FormatInt(int64(listenPort), 10))
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "[MustNewPluginLoader] error build listener", logger.Err(err))
|
||||
}
|
||||
|
||||
return &PluginLoader{
|
||||
l: l,
|
||||
store: ps,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PluginLoader) Run(ctx context.Context) {
|
||||
go p.run(ctx)
|
||||
}
|
||||
|
||||
func (p *PluginLoader) run(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Info(ctx, "[plugin_loader][loop] closing")
|
||||
if err := p.l.Close(); err != nil {
|
||||
logger.Error(ctx, "[plugin_loader][loop] failed to close listener", logger.Err(err))
|
||||
}
|
||||
default:
|
||||
conn, err := p.l.Accept()
|
||||
if err != nil {
|
||||
logger.Error(ctx, "[plugin_loader][loop] failed to accet new connection", logger.Err(err))
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Debug(ctx, "[plugin_loader][loop] accepting connection")
|
||||
|
||||
go p.accept(ctx, conn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PluginLoader) accept(ctx context.Context, conn net.Conn) {
|
||||
data := make([]byte, 0)
|
||||
// TODO make read loop
|
||||
n, err := conn.Read(data)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "[plugin_loader][accept] read error", logger.Err(err))
|
||||
return
|
||||
}
|
||||
logger.Debug(ctx, "[plugin_loader][accept] bytes read", slog.Int("n", n))
|
||||
|
||||
init := new(domain.InitPluginRequest)
|
||||
|
||||
if err = json.Unmarshal(data, init); err != nil {
|
||||
logger.Error(ctx, "[plugin_loader][accept] unmarshal request error", logger.Err(err))
|
||||
return
|
||||
}
|
||||
|
||||
if init.Namespace == "" {
|
||||
logger.Error(ctx, "[plugin_loader][accept] empty namespace")
|
||||
err = errors.Join(err, errors.New("init request must contain namespace"))
|
||||
}
|
||||
|
||||
if init.Name == "" {
|
||||
logger.Error(ctx, "[plugin_loader][accept] empty namespace")
|
||||
err = errors.Join(err, errors.New("init request must contain namespace"))
|
||||
}
|
||||
|
||||
if init.Version == 0 {
|
||||
logger.Error(ctx, "[plugin_loader][accept] empty namespace")
|
||||
err = errors.Join(err, errors.New("init request must contain namespace"))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if _, werr := conn.Write([]byte(err.Error())); werr != nil {
|
||||
logger.Error(ctx, "[plugin_loader][accept] failed to write init error", logger.Err(werr))
|
||||
}
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
logger.Error(ctx, "[plugin_loader][accept] failed to close conn", logger.Err(cerr))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug(ctx,
|
||||
"[plugin_loader][accept] new plugin initialized",
|
||||
"plugin", PluginStoreKey(init.Namespace, init.Name, init.Version),
|
||||
)
|
||||
|
||||
plugin := &Plugin{
|
||||
conn: conn,
|
||||
md: *init,
|
||||
}
|
||||
|
||||
closer.Add(plugin.Close)
|
||||
|
||||
p.store.Add(plugin)
|
||||
}
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/closer"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/plugin/domain"
|
||||
)
|
||||
|
||||
type PluginLoader struct {
|
||||
l net.Listener
|
||||
store *PluginStore
|
||||
}
|
||||
|
||||
func MustNewPluginLoader(ctx context.Context, listenPort uint16, ps *PluginStore) *PluginLoader {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:"+strconv.FormatInt(int64(listenPort), 10))
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "[MustNewPluginLoader] error build listener", logger.Err(err))
|
||||
}
|
||||
|
||||
return &PluginLoader{
|
||||
l: l,
|
||||
store: ps,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PluginLoader) Run(ctx context.Context) {
|
||||
go p.run(ctx)
|
||||
}
|
||||
|
||||
func (p *PluginLoader) run(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Info(ctx, "[plugin_loader][loop] closing")
|
||||
if err := p.l.Close(); err != nil {
|
||||
logger.Error(ctx, "[plugin_loader][loop] failed to close listener", logger.Err(err))
|
||||
}
|
||||
default:
|
||||
conn, err := p.l.Accept()
|
||||
if err != nil {
|
||||
logger.Error(ctx, "[plugin_loader][loop] failed to accet new connection", logger.Err(err))
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Debug(ctx, "[plugin_loader][loop] accepting connection")
|
||||
|
||||
go p.accept(ctx, conn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PluginLoader) accept(ctx context.Context, conn net.Conn) {
|
||||
data := make([]byte, 0)
|
||||
// TODO make read loop
|
||||
n, err := conn.Read(data)
|
||||
if err != nil {
|
||||
logger.Error(ctx, "[plugin_loader][accept] read error", logger.Err(err))
|
||||
return
|
||||
}
|
||||
logger.Debug(ctx, "[plugin_loader][accept] bytes read", slog.Int("n", n))
|
||||
|
||||
init := new(domain.InitPluginRequest)
|
||||
|
||||
if err = json.Unmarshal(data, init); err != nil {
|
||||
logger.Error(ctx, "[plugin_loader][accept] unmarshal request error", logger.Err(err))
|
||||
return
|
||||
}
|
||||
|
||||
if init.Namespace == "" {
|
||||
logger.Error(ctx, "[plugin_loader][accept] empty namespace")
|
||||
err = errors.Join(err, errors.New("init request must contain namespace"))
|
||||
}
|
||||
|
||||
if init.Name == "" {
|
||||
logger.Error(ctx, "[plugin_loader][accept] empty namespace")
|
||||
err = errors.Join(err, errors.New("init request must contain namespace"))
|
||||
}
|
||||
|
||||
if init.Version == 0 {
|
||||
logger.Error(ctx, "[plugin_loader][accept] empty namespace")
|
||||
err = errors.Join(err, errors.New("init request must contain namespace"))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if _, werr := conn.Write([]byte(err.Error())); werr != nil {
|
||||
logger.Error(ctx, "[plugin_loader][accept] failed to write init error", logger.Err(werr))
|
||||
}
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
logger.Error(ctx, "[plugin_loader][accept] failed to close conn", logger.Err(cerr))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug(ctx,
|
||||
"[plugin_loader][accept] new plugin initialized",
|
||||
"plugin", PluginStoreKey(init.Namespace, init.Name, init.Version),
|
||||
)
|
||||
|
||||
plugin := &Plugin{
|
||||
conn: conn,
|
||||
md: *init,
|
||||
}
|
||||
|
||||
closer.Add(plugin.Close)
|
||||
|
||||
p.store.Add(plugin)
|
||||
}
|
||||
|
@ -1,45 +1,45 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/plugin/domain"
|
||||
)
|
||||
|
||||
type Plugin struct {
|
||||
conn net.Conn
|
||||
md domain.InitPluginRequest
|
||||
}
|
||||
|
||||
func (p *Plugin) Init(initPayload any) error {
|
||||
r := &domain.Ping{
|
||||
Payload: initPayload,
|
||||
}
|
||||
|
||||
pingData, err := json.Marshal(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = p.conn.Write(pingData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pongData := make([]byte, 0)
|
||||
if _, err := p.conn.Read(pongData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(pongData, pingData) {
|
||||
return fmt.Errorf("ping-pong payload assertion error")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Plugin) Close() error {
|
||||
return p.conn.Close()
|
||||
}
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/plugin/domain"
|
||||
)
|
||||
|
||||
type Plugin struct {
|
||||
conn net.Conn
|
||||
md domain.InitPluginRequest
|
||||
}
|
||||
|
||||
func (p *Plugin) Init(initPayload any) error {
|
||||
r := &domain.Ping{
|
||||
Payload: initPayload,
|
||||
}
|
||||
|
||||
pingData, err := json.Marshal(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = p.conn.Write(pingData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pongData := make([]byte, 0)
|
||||
if _, err := p.conn.Read(pongData); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(pongData, pingData) {
|
||||
return fmt.Errorf("ping-pong payload assertion error")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Plugin) Close() error {
|
||||
return p.conn.Close()
|
||||
}
|
||||
|
24
internal/plugin/processor.go
Normal file
24
internal/plugin/processor.go
Normal file
@ -0,0 +1,24 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
|
||||
)
|
||||
|
||||
type PluginHandler struct {
|
||||
*handler.BaseHandler
|
||||
store *PluginStore
|
||||
}
|
||||
|
||||
func (_ *PluginHandler) GetName() string {
|
||||
return "pluginv1"
|
||||
}
|
||||
|
||||
func (p *PluginHandler) GetProcessFn() func(ctx context.Context, req *common.Request, w handler.Writer) error {
|
||||
return func(ctx context.Context, req *common.Request, w handler.Writer) error {
|
||||
return fmt.Errorf("unimplemented")
|
||||
}
|
||||
}
|
@ -1,28 +1,37 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type PluginStore struct {
|
||||
m sync.RWMutex
|
||||
plugins map[string]*Plugin
|
||||
}
|
||||
|
||||
func NewPluginStore() *PluginStore {
|
||||
return &PluginStore{
|
||||
plugins: make(map[string]*Plugin),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PluginStore) Add(plugin *Plugin) {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
|
||||
s.plugins[PluginStoreKey(plugin.md.Namespace, plugin.md.Name, plugin.md.Version)] = plugin
|
||||
}
|
||||
|
||||
func PluginStoreKey(ns, name string, v int) string {
|
||||
return fmt.Sprintf("%s.%s.%v", ns, name, v)
|
||||
}
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type PluginStore struct {
|
||||
m sync.RWMutex
|
||||
plugins map[string]*Plugin
|
||||
}
|
||||
|
||||
func NewPluginStore() *PluginStore {
|
||||
return &PluginStore{
|
||||
plugins: make(map[string]*Plugin),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PluginStore) Add(plugin *Plugin) {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
|
||||
s.plugins[PluginStoreKey(plugin.md.Namespace, plugin.md.Name, plugin.md.Version)] = plugin
|
||||
}
|
||||
|
||||
func (s *PluginStore) Get(plugin string) *Plugin {
|
||||
s.m.RLock()
|
||||
defer s.m.RUnlock()
|
||||
if p, ok := s.plugins[plugin]; ok {
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func PluginStoreKey(ns, name string, v int) string {
|
||||
return fmt.Sprintf("%s.%s.%v", ns, name, v)
|
||||
}
|
||||
|
@ -1,107 +1,112 @@
|
||||
package processor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/domain"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/errs"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
resolvedispatcher "git.optclblast.xyz/draincloud/draincloud-core/internal/resolve_dispatcher"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
|
||||
"github.com/gin-gonic/gin"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type GinProcessor struct {
|
||||
rp *common.RequestPool
|
||||
authStorage storage.AuthStorage
|
||||
resolveDispatcher *resolvedispatcher.ResolveDispatcher
|
||||
}
|
||||
|
||||
func NewGinProcessor(
|
||||
authStorage storage.AuthStorage,
|
||||
resolveDispatcher *resolvedispatcher.ResolveDispatcher,
|
||||
) *GinProcessor {
|
||||
return &GinProcessor{
|
||||
rp: common.NewRequestPool(),
|
||||
authStorage: authStorage,
|
||||
resolveDispatcher: resolveDispatcher,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *GinProcessor) Process(handler handler.Handler) gin.HandlerFunc {
|
||||
return func(ctx *gin.Context) {
|
||||
req := common.NewRequestFromHttp(p.rp, ctx.Request)
|
||||
ctx.Request = ctx.Request.WithContext(context.WithValue(ctx.Request.Context(), "__request_id", req.ID))
|
||||
|
||||
// 1. Resolve the resolvers, collect all data required
|
||||
// 2. Try process oprional resolvers
|
||||
err := p.resolve(ctx, handler, req)
|
||||
if err != nil {
|
||||
p.writeError(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
// 3. Call preprocessing fn's, middlewares etc.
|
||||
if err = handler.GetPreprocessFn()(ctx, req, wrapGin(ctx)); err != nil {
|
||||
p.writeError(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
// 4. Call handler.ProcessFn
|
||||
if err = handler.GetProcessFn()(ctx, req, wrapGin(ctx)); err != nil {
|
||||
p.writeError(ctx, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *GinProcessor) resolve(ctx context.Context, h handler.Handler, req *common.Request) error {
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for _, r := range h.GetRequiredResolveParams() {
|
||||
resolver, err := p.resolveDispatcher.GetResolver(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve '%s' param: no resolver provided: %w", r, err)
|
||||
}
|
||||
|
||||
resolveValueName := r
|
||||
eg.Go(func() error {
|
||||
if resolveErr := resolver.Resolve(ctx, req); resolveErr != nil {
|
||||
return fmt.Errorf("failed to resolve '%s' value: %w", resolveValueName, resolveErr)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *GinProcessor) writeError(ctx *gin.Context, err error) {
|
||||
logger.Error(ctx, "error process request", logger.Err(err))
|
||||
switch {
|
||||
case errors.Is(err, errs.ErrorAccessDenied):
|
||||
ctx.JSON(http.StatusInternalServerError, domain.ErrorJson{
|
||||
Code: http.StatusForbidden,
|
||||
Message: err.Error(),
|
||||
})
|
||||
case errors.Is(err, errs.ErrorSessionExpired):
|
||||
ctx.JSON(http.StatusInternalServerError, domain.ErrorJson{
|
||||
Code: http.StatusForbidden,
|
||||
Message: err.Error(),
|
||||
})
|
||||
default:
|
||||
ctx.JSON(http.StatusInternalServerError, domain.ErrorJson{
|
||||
Code: http.StatusInternalServerError,
|
||||
Message: "Internal Error",
|
||||
})
|
||||
}
|
||||
}
|
||||
package processor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/domain"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/errs"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
resolvedispatcher "git.optclblast.xyz/draincloud/draincloud-core/internal/resolve_dispatcher"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
|
||||
"github.com/gin-gonic/gin"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type GinProcessor struct {
|
||||
rp *common.RequestPool
|
||||
authStorage storage.AuthStorage
|
||||
resolveDispatcher *resolvedispatcher.ResolveDispatcher
|
||||
}
|
||||
|
||||
func NewGinProcessor(
|
||||
authStorage storage.AuthStorage,
|
||||
resolveDispatcher *resolvedispatcher.ResolveDispatcher,
|
||||
) *GinProcessor {
|
||||
return &GinProcessor{
|
||||
rp: common.NewRequestPool(),
|
||||
authStorage: authStorage,
|
||||
resolveDispatcher: resolveDispatcher,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *GinProcessor) Process(handler handler.Handler) gin.HandlerFunc {
|
||||
return func(ctx *gin.Context) {
|
||||
req := common.NewRequestFromHttp(p.rp, ctx.Request)
|
||||
ctx.Request = ctx.Request.WithContext(context.WithValue(ctx.Request.Context(), "__request_id", req.ID))
|
||||
|
||||
// 1. Resolve the resolvers, collect all data required
|
||||
// 2. Try process oprional resolvers
|
||||
err := p.resolve(ctx, handler, req)
|
||||
if err != nil {
|
||||
p.writeError(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
// 3. Call preprocessing fn's, middlewares etc.
|
||||
if preprocessFn := handler.GetPreprocessFn(); preprocessFn != nil {
|
||||
if err = preprocessFn(ctx, req, wrapGin(ctx)); err != nil {
|
||||
p.writeError(ctx, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Call handler.ProcessFn
|
||||
if err = handler.GetProcessFn()(ctx, req, wrapGin(ctx)); err != nil {
|
||||
p.writeError(ctx, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *GinProcessor) resolve(ctx *gin.Context, h handler.Handler, req *common.Request) error {
|
||||
eg, c := errgroup.WithContext(ctx)
|
||||
for _, r := range h.GetRequiredResolveParams() {
|
||||
resolver, err := p.resolveDispatcher.GetResolver(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to resolve '%s' param: no resolver provided: %w", r, err)
|
||||
}
|
||||
|
||||
resolveValueName := r
|
||||
eg.Go(func() error {
|
||||
if resolveErr := resolver.Resolve(c, req, ctx); resolveErr != nil {
|
||||
return fmt.Errorf("failed to resolve '%s' value: %w", resolveValueName, resolveErr)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *GinProcessor) writeError(ctx *gin.Context, err error) {
|
||||
logger.Error(ctx, "error process request", logger.Err(err))
|
||||
|
||||
// TODO do a custom error handling for resolvers / handlers / processors etc
|
||||
|
||||
switch {
|
||||
case errors.Is(err, errs.ErrorAccessDenied):
|
||||
ctx.JSON(http.StatusInternalServerError, domain.ErrorJson{
|
||||
Code: http.StatusForbidden,
|
||||
Message: err.Error(),
|
||||
})
|
||||
case errors.Is(err, errs.ErrorSessionExpired):
|
||||
ctx.JSON(http.StatusInternalServerError, domain.ErrorJson{
|
||||
Code: http.StatusForbidden,
|
||||
Message: err.Error(),
|
||||
})
|
||||
default:
|
||||
ctx.JSON(http.StatusInternalServerError, domain.ErrorJson{
|
||||
Code: http.StatusInternalServerError,
|
||||
Message: "Internal Error",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1,26 +1,34 @@
|
||||
package processor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type ginWriter struct {
|
||||
ctx *gin.Context
|
||||
}
|
||||
|
||||
func wrapGin(ctx *gin.Context) ginWriter {
|
||||
return ginWriter{
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func (w ginWriter) Write(ctx context.Context, resp any) {
|
||||
w.ctx.JSON(http.StatusOK, resp)
|
||||
}
|
||||
|
||||
func (w ginWriter) SetCookie(name string, value string, maxAge int, path string, domain string, secure bool, httpOnly bool) {
|
||||
w.ctx.SetCookie(name, value, maxAge, path, domain, secure, httpOnly)
|
||||
}
|
||||
package processor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type ginWriter struct {
|
||||
ctx *gin.Context
|
||||
}
|
||||
|
||||
func wrapGin(ctx *gin.Context) ginWriter {
|
||||
return ginWriter{
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func (w ginWriter) Write(ctx context.Context, resp any, opts ...handler.WriteOption) {
|
||||
params := &handler.WriteOptions{
|
||||
Code: http.StatusOK,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(params)
|
||||
}
|
||||
|
||||
w.ctx.JSON(params.Code, resp)
|
||||
}
|
||||
|
||||
func (w ginWriter) SetCookie(name string, value string, maxAge int, path string, domain string, secure bool, httpOnly bool) {
|
||||
w.ctx.SetCookie(name, value, maxAge, path, domain, secure, httpOnly)
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
package processor
|
||||
|
||||
import "git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
|
||||
|
||||
type Processor[H any] interface {
|
||||
Process(handler.Handler) H
|
||||
}
|
||||
package processor
|
||||
|
||||
import "git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
|
||||
|
||||
type Processor[H any] interface {
|
||||
Process(handler.Handler) H
|
||||
}
|
||||
|
@ -1,38 +1,38 @@
|
||||
package reqcontext
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type CtxKey string
|
||||
|
||||
const (
|
||||
UserIDCtxKey CtxKey = "_ctx_user_id"
|
||||
SessionCtxKey CtxKey = "_ctx_session"
|
||||
)
|
||||
|
||||
func WithUserID(parent context.Context, userID uuid.UUID) context.Context {
|
||||
return context.WithValue(parent, UserIDCtxKey, userID)
|
||||
}
|
||||
|
||||
func GetUserID(ctx context.Context) (uuid.UUID, error) {
|
||||
if id, ok := ctx.Value(UserIDCtxKey).(uuid.UUID); ok {
|
||||
return id, nil
|
||||
}
|
||||
return uuid.Nil, fmt.Errorf("userID not passed with context")
|
||||
}
|
||||
|
||||
func WithSession(parent context.Context, session *models.Session) context.Context {
|
||||
return context.WithValue(parent, SessionCtxKey, session)
|
||||
}
|
||||
|
||||
func GetSession(ctx context.Context) (*models.Session, error) {
|
||||
if ses, ok := ctx.Value(UserIDCtxKey).(*models.Session); ok {
|
||||
return ses, nil
|
||||
}
|
||||
return nil, fmt.Errorf("session not passed with context")
|
||||
}
|
||||
package reqcontext
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type CtxKey string
|
||||
|
||||
const (
|
||||
UserIDCtxKey CtxKey = "_ctx_user_id"
|
||||
SessionCtxKey CtxKey = "_ctx_session"
|
||||
)
|
||||
|
||||
func WithUserID(parent context.Context, userID uuid.UUID) context.Context {
|
||||
return context.WithValue(parent, UserIDCtxKey, userID)
|
||||
}
|
||||
|
||||
func GetUserID(ctx context.Context) (uuid.UUID, error) {
|
||||
if id, ok := ctx.Value(UserIDCtxKey).(uuid.UUID); ok {
|
||||
return id, nil
|
||||
}
|
||||
return uuid.Nil, fmt.Errorf("userID not passed with context")
|
||||
}
|
||||
|
||||
func WithSession(parent context.Context, session *auth.Session) context.Context {
|
||||
return context.WithValue(parent, SessionCtxKey, session)
|
||||
}
|
||||
|
||||
func GetSession(ctx context.Context) (*auth.Session, error) {
|
||||
if ses, ok := ctx.Value(UserIDCtxKey).(*auth.Session); ok {
|
||||
return ses, nil
|
||||
}
|
||||
return nil, fmt.Errorf("session not passed with context")
|
||||
}
|
||||
|
@ -1,48 +1,48 @@
|
||||
package resolvedispatcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/resolvers"
|
||||
)
|
||||
|
||||
type ResolveDispatcher struct {
|
||||
m sync.RWMutex
|
||||
router map[string]resolvers.Resolver
|
||||
}
|
||||
|
||||
func New() *ResolveDispatcher {
|
||||
return &ResolveDispatcher{
|
||||
router: map[string]resolvers.Resolver{},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ResolveDispatcher) RegisterResolver(
|
||||
ctx context.Context,
|
||||
resolverName string,
|
||||
resolver resolvers.Resolver,
|
||||
) {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
if _, ok := d.router[resolverName]; ok {
|
||||
logger.Fatal(ctx, fmt.Sprintf("resolver '%s' is already registered in router", resolverName))
|
||||
}
|
||||
|
||||
d.router[resolverName] = resolver
|
||||
}
|
||||
|
||||
func (d *ResolveDispatcher) GetResolver(name string) (resolvers.Resolver, error) {
|
||||
d.m.RLock()
|
||||
defer d.m.RUnlock()
|
||||
|
||||
res, ok := d.router[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("resolver '%s' not found", name)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
package resolvedispatcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/resolvers"
|
||||
)
|
||||
|
||||
type ResolveDispatcher struct {
|
||||
m sync.RWMutex
|
||||
router map[string]resolvers.Resolver
|
||||
}
|
||||
|
||||
func New() *ResolveDispatcher {
|
||||
return &ResolveDispatcher{
|
||||
router: map[string]resolvers.Resolver{},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *ResolveDispatcher) RegisterResolver(
|
||||
ctx context.Context,
|
||||
resolverName string,
|
||||
resolver resolvers.Resolver,
|
||||
) {
|
||||
d.m.Lock()
|
||||
defer d.m.Unlock()
|
||||
|
||||
if _, ok := d.router[resolverName]; ok {
|
||||
logger.Fatal(ctx, fmt.Sprintf("resolver '%s' is already registered in router", resolverName))
|
||||
}
|
||||
|
||||
d.router[resolverName] = resolver
|
||||
}
|
||||
|
||||
func (d *ResolveDispatcher) GetResolver(name string) (resolvers.Resolver, error) {
|
||||
d.m.RLock()
|
||||
defer d.m.RUnlock()
|
||||
|
||||
res, ok := d.router[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("resolver '%s' not found", name)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
@ -1,95 +1,109 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/errs"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models"
|
||||
)
|
||||
|
||||
const (
|
||||
csrfTokenCookie = "__Csrf_token"
|
||||
sessionTokenCookie = "__Session_token"
|
||||
)
|
||||
|
||||
type AuthResolver struct {
|
||||
authStorage storage.AuthStorage
|
||||
}
|
||||
|
||||
func (r *AuthResolver) Resolve(ctx context.Context, req *common.Request) error {
|
||||
return r.authorize(ctx, req)
|
||||
}
|
||||
|
||||
func (p *AuthResolver) authorize(ctx context.Context, req *common.Request) error {
|
||||
session, err := p.getSession(ctx, req)
|
||||
if err != nil && !errors.Is(err, http.ErrNoCookie) {
|
||||
return errs.ErrorUnauthorized
|
||||
}
|
||||
|
||||
if session == nil {
|
||||
return errs.ErrorUnauthorized
|
||||
}
|
||||
|
||||
if err := validateSession(ctx, req, session); err != nil {
|
||||
// TODO add audit log entry
|
||||
return errs.ErrorUnauthorized
|
||||
}
|
||||
|
||||
user, err := p.authStorage.GetUserByID(ctx, session.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch user by id: %w", err)
|
||||
}
|
||||
logger.Debug(ctx, "[authorize] user authorized", slog.String("session_id", session.ID.String()))
|
||||
|
||||
req.User = user
|
||||
req.Session = session
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AuthResolver) getSession(ctx context.Context, req *common.Request) (*models.Session, error) {
|
||||
token, err := common.GetValue[string](req.Metadata, sessionTokenCookie)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch session cookie from request: %w", err)
|
||||
}
|
||||
|
||||
if len(token) == 0 {
|
||||
return nil, fmt.Errorf("session token or csrf token is empty")
|
||||
}
|
||||
|
||||
session, err := d.authStorage.GetSession(ctx, token)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch session from repo: %w", err)
|
||||
}
|
||||
|
||||
return session, nil
|
||||
}
|
||||
|
||||
func validateSession(_ context.Context, req *common.Request, session *models.Session) error {
|
||||
if session == nil {
|
||||
return errs.ErrorAccessDenied
|
||||
}
|
||||
|
||||
csrfToken, err := common.GetValue[string](req.Metadata, csrfTokenCookie)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch csrf cookie from request: %w", err)
|
||||
}
|
||||
|
||||
if session.CsrfToken != csrfToken {
|
||||
return errs.ErrorAccessDenied
|
||||
}
|
||||
|
||||
if session.ExpiredAt.Before(time.Now()) {
|
||||
return errs.ErrorSessionExpired
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/errs"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
|
||||
models "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth"
|
||||
)
|
||||
|
||||
const (
|
||||
AuthResolverV1Name = "auth.v1"
|
||||
)
|
||||
|
||||
const (
|
||||
csrfTokenCookie = "__Csrf_token"
|
||||
sessionTokenCookie = "__Session_token"
|
||||
)
|
||||
|
||||
type AuthResolver struct {
|
||||
authStorage storage.AuthStorage
|
||||
}
|
||||
|
||||
func NewAuthResolver(authStorage storage.AuthStorage) *AuthResolver {
|
||||
return &AuthResolver{
|
||||
authStorage: authStorage,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *AuthResolver) Resolve(ctx context.Context, req *common.Request, _ any) error {
|
||||
return r.authorize(ctx, req)
|
||||
}
|
||||
|
||||
func (r *AuthResolver) GetRequiredResolveParams() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *AuthResolver) authorize(ctx context.Context, req *common.Request) error {
|
||||
session, err := p.getSession(ctx, req)
|
||||
if err != nil && !errors.Is(err, http.ErrNoCookie) {
|
||||
return errs.ErrorUnauthorized
|
||||
}
|
||||
|
||||
if session == nil {
|
||||
return errs.ErrorUnauthorized
|
||||
}
|
||||
|
||||
if err := validateSession(ctx, req, session); err != nil {
|
||||
// TODO add audit log entry
|
||||
return errs.ErrorUnauthorized
|
||||
}
|
||||
|
||||
user, err := p.authStorage.GetUserByID(ctx, session.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch user by id: %w", err)
|
||||
}
|
||||
logger.Debug(ctx, "[authorize] user authorized", slog.String("session_id", session.ID.String()))
|
||||
|
||||
req.User = user
|
||||
req.Session = session
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *AuthResolver) getSession(ctx context.Context, req *common.Request) (*models.Session, error) {
|
||||
token, err := common.GetValue[string](req.Metadata, sessionTokenCookie)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch session cookie from request: %w", err)
|
||||
}
|
||||
|
||||
if len(token) == 0 {
|
||||
return nil, fmt.Errorf("session token or csrf token is empty")
|
||||
}
|
||||
|
||||
session, err := d.authStorage.GetSession(ctx, token)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch session from repo: %w", err)
|
||||
}
|
||||
|
||||
return session, nil
|
||||
}
|
||||
|
||||
func validateSession(_ context.Context, req *common.Request, session *models.Session) error {
|
||||
if session == nil {
|
||||
return errs.ErrorAccessDenied
|
||||
}
|
||||
|
||||
csrfToken, err := common.GetValue[string](req.Metadata, csrfTokenCookie)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch csrf cookie from request: %w", err)
|
||||
}
|
||||
|
||||
if session.CsrfToken != csrfToken {
|
||||
return errs.ErrorAccessDenied
|
||||
}
|
||||
|
||||
if session.ExpiredAt.Before(time.Now()) {
|
||||
return errs.ErrorSessionExpired
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
33
internal/resolvers/plugin_name/plugin_name.go
Normal file
33
internal/resolvers/plugin_name/plugin_name.go
Normal file
@ -0,0 +1,33 @@
|
||||
package pluginname
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
"github.com/gin-gonic/gin"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
PluginNameResolverName = "plugin_name"
|
||||
)
|
||||
|
||||
type PluginNameResolver struct{}
|
||||
|
||||
func (p *PluginNameResolver) Resolve(ctx context.Context, req *common.Request, rawReq any) error {
|
||||
ginCtx, ok := rawReq.(*gin.Context)
|
||||
if !ok {
|
||||
return status.Errorf(codes.Internal, "invalid request type")
|
||||
}
|
||||
pluginName := ginCtx.Param("plugin_name")
|
||||
if pluginName == "" {
|
||||
return status.Error(codes.InvalidArgument, "plugin name is empty")
|
||||
}
|
||||
req.ResolveValues.Store(PluginNameResolverName, pluginName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PluginNameResolver) GetRequiredResolveParams() []string {
|
||||
return nil
|
||||
}
|
@ -1,11 +1,12 @@
|
||||
package resolvers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
)
|
||||
|
||||
type Resolver interface {
|
||||
Resolve(ctx context.Context, req *common.Request) error
|
||||
}
|
||||
package resolvers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/common"
|
||||
)
|
||||
|
||||
type Resolver interface {
|
||||
Resolve(ctx context.Context, req *common.Request, reqReq any) error
|
||||
GetRequiredResolveParams() []string
|
||||
}
|
||||
|
6
internal/resolvers/seal/acl.go
Normal file
6
internal/resolvers/seal/acl.go
Normal file
@ -0,0 +1,6 @@
|
||||
package seal
|
||||
|
||||
// TODO
|
||||
type SealResolver struct {
|
||||
wardenClient any
|
||||
}
|
@ -1,19 +1,19 @@
|
||||
package audit
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/audit"
|
||||
)
|
||||
|
||||
type Repository struct {
|
||||
db *pgx.Conn
|
||||
}
|
||||
|
||||
func (r *Repository) AddEntry(ctx context.Context, entry audit.AuditLogEntry) error {
|
||||
logger.Warn(ctx, "[Repository][AddEntry] not implemented yet!")
|
||||
return nil
|
||||
}
|
||||
package audit
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/audit"
|
||||
)
|
||||
|
||||
type Repository struct {
|
||||
db *pgx.Conn
|
||||
}
|
||||
|
||||
func (r *Repository) AddEntry(ctx context.Context, entry audit.AuditLogEntry) error {
|
||||
logger.Warn(ctx, "[Repository][AddEntry] not implemented yet!")
|
||||
return nil
|
||||
}
|
||||
|
@ -1,86 +1,86 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
)
|
||||
|
||||
type Storage struct {
|
||||
lm *sync.Map
|
||||
dir string
|
||||
}
|
||||
|
||||
func NewFSStorage(dir string) *Storage {
|
||||
return &Storage{
|
||||
lm: &sync.Map{},
|
||||
dir: dir,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Storage) GetFile(ctx context.Context, id int64) (*os.File, error) {
|
||||
tx := lockFile(s.lm, id)
|
||||
defer unlockFile(s.lm, id, tx)
|
||||
|
||||
file, err := os.Open(getFilePath(s.dir, id))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err = file.Close(); err != nil {
|
||||
logger.Error(ctx, "[getFile] close error", logger.Err(err))
|
||||
}
|
||||
}()
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (s *Storage) SaveBlob(ctx context.Context, id int64, data []byte) error {
|
||||
tx := lockFile(s.lm, id)
|
||||
defer unlockFile(s.lm, id, tx)
|
||||
|
||||
file, err := os.Open(getFilePath(s.dir, id))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err = file.Close(); err != nil {
|
||||
logger.Error(ctx, "[saveFile] close error", logger.Err(err))
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err = file.Write(data); err != nil {
|
||||
return fmt.Errorf("failed to write data to file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) DeleteFile(ctx context.Context, id int64) error {
|
||||
tx := lockFile(s.lm, id)
|
||||
defer unlockFile(s.lm, id, tx)
|
||||
|
||||
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFilePath(dir string, id int64) string {
|
||||
return fmt.Sprintf("%s/%v", dir, id)
|
||||
}
|
||||
|
||||
func lockFile(lm *sync.Map, id int64) sync.Locker {
|
||||
_m := &sync.Mutex{}
|
||||
many, _ := lm.LoadOrStore(id, _m)
|
||||
_m, _ = many.(*sync.Mutex)
|
||||
_m.Lock()
|
||||
return _m
|
||||
}
|
||||
|
||||
func unlockFile(lm *sync.Map, id int64, tx sync.Locker) {
|
||||
tx.Unlock()
|
||||
lm.Delete(id)
|
||||
}
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
)
|
||||
|
||||
type Storage struct {
|
||||
lm *sync.Map
|
||||
dir string
|
||||
// If file is not belongs to current node FS - redirect to corresponding node
|
||||
// cluster DrainCloudCluster
|
||||
}
|
||||
|
||||
func NewFSStorage(dir string) *Storage {
|
||||
return &Storage{
|
||||
lm: &sync.Map{},
|
||||
dir: dir,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Storage) GetFile(ctx context.Context, id int64) (*os.File, error) {
|
||||
tx := lockFile(s.lm, id)
|
||||
defer unlockFile(s.lm, id, tx)
|
||||
|
||||
file, err := os.Open(getFilePath(s.dir, id))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err = file.Close(); err != nil {
|
||||
logger.Error(ctx, "[getFile] close error", logger.Err(err))
|
||||
}
|
||||
}()
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (s *Storage) SaveBlob(ctx context.Context, id int64, data []byte) error {
|
||||
tx := lockFile(s.lm, id)
|
||||
defer unlockFile(s.lm, id, tx)
|
||||
|
||||
file, err := os.Open(getFilePath(s.dir, id))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err = file.Close(); err != nil {
|
||||
logger.Error(ctx, "[saveFile] close error", logger.Err(err))
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err = file.Write(data); err != nil {
|
||||
return fmt.Errorf("failed to write data to file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) DeleteFile(ctx context.Context, id int64) error {
|
||||
tx := lockFile(s.lm, id)
|
||||
defer unlockFile(s.lm, id, tx)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFilePath(dir string, id int64) string {
|
||||
return fmt.Sprintf("%s/%v", dir, id)
|
||||
}
|
||||
|
||||
func lockFile(lm *sync.Map, id int64) sync.Locker {
|
||||
_m := &sync.Mutex{}
|
||||
many, _ := lm.LoadOrStore(id, _m)
|
||||
_m, _ = many.(*sync.Mutex)
|
||||
_m.Lock()
|
||||
return _m
|
||||
}
|
||||
|
||||
func unlockFile(lm *sync.Map, id int64, tx sync.Locker) {
|
||||
tx.Unlock()
|
||||
lm.Delete(id)
|
||||
}
|
||||
|
@ -1,39 +1,40 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models"
|
||||
auditmodels "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/audit"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/files"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type Database interface {
|
||||
AuthStorage
|
||||
}
|
||||
|
||||
type AuthStorage interface {
|
||||
AddUser(ctx context.Context, id uuid.UUID, login string, username string, passwordHash []byte) error
|
||||
GetUserByLogin(ctx context.Context, login string) (*models.User, error)
|
||||
GetUserByID(ctx context.Context, id uuid.UUID) (*models.User, error)
|
||||
|
||||
AddSession(ctx context.Context, ses *models.Session) (uuid.UUID, error)
|
||||
GetSession(ctx context.Context, sessionToken string) (*models.Session, error)
|
||||
RemoveSession(ctx context.Context, id uuid.UUID) error
|
||||
}
|
||||
|
||||
type AuthAuditLogStorage interface {
|
||||
AddEntry(ctx context.Context, entry auditmodels.AuditLogEntry) error
|
||||
}
|
||||
|
||||
type MetaStorage interface {
|
||||
SaveMetadata(ctx context.Context, meta files.FileMetadata) (uuid.UUID, error)
|
||||
}
|
||||
|
||||
type BlobStorage interface {
|
||||
GetFile(ctx context.Context, id uuid.UUID) (*os.File, error)
|
||||
SaveBlob(ctx context.Context, id uuid.UUID, data []byte) error
|
||||
DeleteFile(ctx context.Context, id uuid.UUID) error
|
||||
}
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
auditmodels "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/audit"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/files"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type Database interface {
|
||||
AuthStorage
|
||||
}
|
||||
|
||||
type AuthStorage interface {
|
||||
AddUser(ctx context.Context, id uuid.UUID, login string, username string, passwordHash []byte) error
|
||||
GetUserByLogin(ctx context.Context, login string) (*auth.User, error)
|
||||
GetUserByID(ctx context.Context, id uuid.UUID) (*auth.User, error)
|
||||
|
||||
AddSession(ctx context.Context, ses *auth.Session) (uuid.UUID, error)
|
||||
GetSession(ctx context.Context, sessionToken string) (*auth.Session, error)
|
||||
RemoveSession(ctx context.Context, id uuid.UUID) error
|
||||
}
|
||||
|
||||
type AuthAuditLogStorage interface {
|
||||
AddEntry(ctx context.Context, entry auditmodels.AuditLogEntry) error
|
||||
}
|
||||
|
||||
type MetaStorage interface {
|
||||
SaveMetadata(ctx context.Context, meta files.FileMetadata) (uuid.UUID, error)
|
||||
}
|
||||
|
||||
type BlobStorage interface {
|
||||
GetFile(ctx context.Context, id uuid.UUID) (*os.File, error)
|
||||
SaveBlob(ctx context.Context, id uuid.UUID, data []byte) error
|
||||
DeleteFile(ctx context.Context, id uuid.UUID) error
|
||||
GetFSLink(ctx context.Context, fileID uuid.UUID) (string, error)
|
||||
}
|
||||
|
@ -1,49 +1,49 @@
|
||||
package audit
|
||||
|
||||
import "time"
|
||||
|
||||
type EventType int
|
||||
|
||||
const (
|
||||
EventUnspecified EventType = iota
|
||||
EventSuccessfullLogin
|
||||
EventFailedLogin
|
||||
EventSuccessfullRegister
|
||||
EventFailedRegister
|
||||
EventSuccessfullAuth
|
||||
EventFailedAuth
|
||||
EventUserUpdated
|
||||
)
|
||||
|
||||
type Severity int
|
||||
|
||||
const (
|
||||
SeverityAlert = 0
|
||||
SeverityWarning = 10
|
||||
SeverityInfo = 100
|
||||
SeverityNotice = 200
|
||||
)
|
||||
|
||||
type Actor struct {
|
||||
ActorSysName string
|
||||
RemoteIP string
|
||||
ID int64
|
||||
}
|
||||
|
||||
const (
|
||||
ActorDrainCloudCore = "_actor_draincloud_core"
|
||||
ActorUser = "user"
|
||||
)
|
||||
|
||||
type AuditLogEntry struct {
|
||||
EventType EventType
|
||||
// Who caused changes
|
||||
Actor Actor
|
||||
Severity Severity
|
||||
SessionID int64
|
||||
CreatedAt time.Time
|
||||
// What changed
|
||||
Object string
|
||||
// How it was changed
|
||||
Action string
|
||||
}
|
||||
package audit
|
||||
|
||||
import "time"
|
||||
|
||||
type EventType int
|
||||
|
||||
const (
|
||||
EventUnspecified EventType = iota
|
||||
EventSuccessfullLogin
|
||||
EventFailedLogin
|
||||
EventSuccessfullRegister
|
||||
EventFailedRegister
|
||||
EventSuccessfullAuth
|
||||
EventFailedAuth
|
||||
EventUserUpdated
|
||||
)
|
||||
|
||||
type Severity int
|
||||
|
||||
const (
|
||||
SeverityAlert = 0
|
||||
SeverityWarning = 10
|
||||
SeverityInfo = 100
|
||||
SeverityNotice = 200
|
||||
)
|
||||
|
||||
type Actor struct {
|
||||
ActorSysName string
|
||||
RemoteIP string
|
||||
ID int64
|
||||
}
|
||||
|
||||
const (
|
||||
ActorDrainCloudCore = "_actor_draincloud_core"
|
||||
ActorUser = "user"
|
||||
)
|
||||
|
||||
type AuditLogEntry struct {
|
||||
EventType EventType
|
||||
// Who caused changes
|
||||
Actor Actor
|
||||
Severity Severity
|
||||
SessionID int64
|
||||
CreatedAt time.Time
|
||||
// What changed
|
||||
Object string
|
||||
// How it was changed
|
||||
Action string
|
||||
}
|
||||
|
@ -1,25 +1,25 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type Session struct {
|
||||
ID uuid.UUID
|
||||
SessionToken string
|
||||
CsrfToken string
|
||||
UserID uuid.UUID
|
||||
CreatedAt time.Time
|
||||
ExpiredAt time.Time
|
||||
}
|
||||
|
||||
type User struct {
|
||||
ID uuid.UUID
|
||||
Username string
|
||||
Login string
|
||||
PasswordHash []byte
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
package auth
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type Session struct {
|
||||
ID uuid.UUID
|
||||
SessionToken string
|
||||
CsrfToken string
|
||||
UserID uuid.UUID
|
||||
CreatedAt time.Time
|
||||
ExpiredAt time.Time
|
||||
}
|
||||
|
||||
type User struct {
|
||||
ID uuid.UUID
|
||||
Username string
|
||||
Login string
|
||||
PasswordHash []byte
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
@ -1,13 +1,13 @@
|
||||
package files
|
||||
|
||||
import "github.com/google/uuid"
|
||||
|
||||
type FileMetadata struct {
|
||||
Id uuid.UUID
|
||||
Name string
|
||||
UserID int64
|
||||
Ext string
|
||||
Type string
|
||||
FSLink string
|
||||
Size int64
|
||||
}
|
||||
package files
|
||||
|
||||
import "github.com/google/uuid"
|
||||
|
||||
type FileMetadata struct {
|
||||
Id uuid.UUID
|
||||
Name string
|
||||
UserID int64
|
||||
Ext string
|
||||
Type string
|
||||
FSLink string
|
||||
Size int64
|
||||
}
|
||||
|
@ -1,147 +1,147 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/closer"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
type Database struct {
|
||||
db *pgx.Conn
|
||||
cluster *ShardCluster
|
||||
}
|
||||
|
||||
func New(ctx context.Context, dsn string) *Database {
|
||||
db, err := pgx.Connect(ctx, dsn)
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "failed to connect to postgres", logger.Err(err))
|
||||
}
|
||||
|
||||
closer.Add(func() error {
|
||||
ctx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer cancel()
|
||||
return db.Close(ctx)
|
||||
})
|
||||
|
||||
return &Database{db: db}
|
||||
}
|
||||
|
||||
type dbtx interface {
|
||||
Exec(ctx context.Context, stmt string, args ...any) (pgconn.CommandTag, error)
|
||||
QueryRow(ctx context.Context, sql string, args ...any) pgx.Row
|
||||
Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error)
|
||||
}
|
||||
|
||||
func (d *Database) AddUser(ctx context.Context, id uuid.UUID, login string, username string, passwordHash []byte) error {
|
||||
return addUser(ctx, d.db, id, login, username, passwordHash)
|
||||
}
|
||||
|
||||
func (d *Database) GetUserByID(ctx context.Context, id uuid.UUID) (*models.User, error) {
|
||||
return getUserByID(ctx, d.db, id)
|
||||
}
|
||||
|
||||
func (d *Database) GetUserByLogin(ctx context.Context, login string) (*models.User, error) {
|
||||
return getUserByLogin(ctx, d.db, login)
|
||||
}
|
||||
|
||||
func (d *Database) AddSession(ctx context.Context, ses *models.Session) (uuid.UUID, error) {
|
||||
return addSession(ctx, d.db, ses)
|
||||
}
|
||||
|
||||
func (d *Database) GetSession(ctx context.Context, sessionToken string) (*models.Session, error) {
|
||||
const stmt = `SELECT
|
||||
s.id, s.session_token, s.csrf_token, s.user_id, s.created_at, s.expired_at
|
||||
FROM sessions as s
|
||||
WHERE s.session_token = $1;`
|
||||
|
||||
row := d.db.QueryRow(ctx, stmt, sessionToken)
|
||||
|
||||
var (
|
||||
id uuid.UUID
|
||||
sesToken, csrfToken string
|
||||
userID uuid.UUID
|
||||
createdAt sql.NullTime
|
||||
expiredAt sql.NullTime
|
||||
)
|
||||
|
||||
if err := row.Scan(&id, &sesToken, &csrfToken, &userID, &createdAt, &expiredAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &models.Session{
|
||||
ID: id,
|
||||
SessionToken: sesToken,
|
||||
CsrfToken: csrfToken,
|
||||
UserID: userID,
|
||||
CreatedAt: createdAt.Time,
|
||||
ExpiredAt: expiredAt.Time,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Database) RemoveSession(ctx context.Context, id uuid.UUID) error {
|
||||
const stmt = `DELETE FROM sessions WHERE id = $1;`
|
||||
_, err := d.db.Exec(ctx, stmt, id)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Database) RemoveExpiredSessions(ctx context.Context) error {
|
||||
const stmt = `DELETE FROM sessions WHERE expired_at < $1;`
|
||||
res, err := d.db.Exec(ctx, stmt, time.Now())
|
||||
logger.Notice(ctx, "[Database][RemoveExpiredSessions] sessions cleanup", slog.Int64("removed", res.RowsAffected()))
|
||||
return err
|
||||
}
|
||||
|
||||
func addUser(ctx context.Context, conn dbtx, id uuid.UUID, login string, username string, passwordHash []byte) error {
|
||||
const stmt = `INSERT INTO users (id,login,username,password)
|
||||
VALUES ($1,$2,$3,$4);`
|
||||
|
||||
_, err := conn.Exec(ctx, stmt, id, login, username, passwordHash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert user data into users table: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getUserByID(ctx context.Context, conn dbtx, id uuid.UUID) (*models.User, error) {
|
||||
const stmt = `SELECT * FROM users WHERE id = $1 LIMIT 1`
|
||||
u := new(models.User)
|
||||
row := conn.QueryRow(ctx, stmt, id)
|
||||
if err := row.Scan(&u.ID, &u.Login, &u.Username, &u.PasswordHash, &u.CreatedAt, &u.UpdatedAt); err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch user by id: %w", err)
|
||||
}
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func getUserByLogin(ctx context.Context, conn dbtx, login string) (*models.User, error) {
|
||||
const stmt = `SELECT * FROM users WHERE login = $1 LIMIT 1`
|
||||
u := new(models.User)
|
||||
row := conn.QueryRow(ctx, stmt, login)
|
||||
if err := row.Scan(&u.ID, &u.Login, &u.Username, &u.PasswordHash, &u.CreatedAt, &u.UpdatedAt); err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch user by login: %w", err)
|
||||
}
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func addSession(ctx context.Context, conn dbtx, session *models.Session) (uuid.UUID, error) {
|
||||
const stmt = `INSERT INTO sessions (id,session_token, csrf_token, user_id,
|
||||
created_at, expired_at) VALUES ($1, $2, $3, $4, $5, $6) RETURNING id;`
|
||||
var id uuid.UUID
|
||||
row := conn.QueryRow(ctx, stmt, session.ID, session.SessionToken, session.CsrfToken, session.UserID, session.CreatedAt, session.ExpiredAt)
|
||||
if err := row.Scan(&id); err != nil {
|
||||
return uuid.Nil, fmt.Errorf("failed to insert new session: %w", err)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/closer"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
)
|
||||
|
||||
type Database struct {
|
||||
db *pgx.Conn
|
||||
cluster *ShardCluster
|
||||
}
|
||||
|
||||
func New(ctx context.Context, dsn string) *Database {
|
||||
db, err := pgx.Connect(ctx, dsn)
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "failed to connect to postgres", logger.Err(err))
|
||||
}
|
||||
|
||||
closer.Add(func() error {
|
||||
ctx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer cancel()
|
||||
return db.Close(ctx)
|
||||
})
|
||||
|
||||
return &Database{db: db}
|
||||
}
|
||||
|
||||
type dbtx interface {
|
||||
Exec(ctx context.Context, stmt string, args ...any) (pgconn.CommandTag, error)
|
||||
QueryRow(ctx context.Context, sql string, args ...any) pgx.Row
|
||||
Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error)
|
||||
}
|
||||
|
||||
func (d *Database) AddUser(ctx context.Context, id uuid.UUID, login string, username string, passwordHash []byte) error {
|
||||
return addUser(ctx, d.db, id, login, username, passwordHash)
|
||||
}
|
||||
|
||||
func (d *Database) GetUserByID(ctx context.Context, id uuid.UUID) (*auth.User, error) {
|
||||
return getUserByID(ctx, d.db, id)
|
||||
}
|
||||
|
||||
func (d *Database) GetUserByLogin(ctx context.Context, login string) (*auth.User, error) {
|
||||
return getUserByLogin(ctx, d.db, login)
|
||||
}
|
||||
|
||||
func (d *Database) AddSession(ctx context.Context, ses *auth.Session) (uuid.UUID, error) {
|
||||
return addSession(ctx, d.db, ses)
|
||||
}
|
||||
|
||||
func (d *Database) GetSession(ctx context.Context, sessionToken string) (*auth.Session, error) {
|
||||
const stmt = `SELECT
|
||||
s.id, s.session_token, s.csrf_token, s.user_id, s.created_at, s.expired_at
|
||||
FROM sessions as s
|
||||
WHERE s.session_token = $1;`
|
||||
|
||||
row := d.db.QueryRow(ctx, stmt, sessionToken)
|
||||
|
||||
var (
|
||||
id uuid.UUID
|
||||
sesToken, csrfToken string
|
||||
userID uuid.UUID
|
||||
createdAt sql.NullTime
|
||||
expiredAt sql.NullTime
|
||||
)
|
||||
|
||||
if err := row.Scan(&id, &sesToken, &csrfToken, &userID, &createdAt, &expiredAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &auth.Session{
|
||||
ID: id,
|
||||
SessionToken: sesToken,
|
||||
CsrfToken: csrfToken,
|
||||
UserID: userID,
|
||||
CreatedAt: createdAt.Time,
|
||||
ExpiredAt: expiredAt.Time,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Database) RemoveSession(ctx context.Context, id uuid.UUID) error {
|
||||
const stmt = `DELETE FROM sessions WHERE id = $1;`
|
||||
_, err := d.db.Exec(ctx, stmt, id)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Database) RemoveExpiredSessions(ctx context.Context) error {
|
||||
const stmt = `DELETE FROM sessions WHERE expired_at < $1;`
|
||||
res, err := d.db.Exec(ctx, stmt, time.Now())
|
||||
logger.Notice(ctx, "[Database][RemoveExpiredSessions] sessions cleanup", slog.Int64("removed", res.RowsAffected()))
|
||||
return err
|
||||
}
|
||||
|
||||
func addUser(ctx context.Context, conn dbtx, id uuid.UUID, login string, username string, passwordHash []byte) error {
|
||||
const stmt = `INSERT INTO users (id,login,username,password)
|
||||
VALUES ($1,$2,$3,$4);`
|
||||
|
||||
_, err := conn.Exec(ctx, stmt, id, login, username, passwordHash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert user data into users table: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getUserByID(ctx context.Context, conn dbtx, id uuid.UUID) (*auth.User, error) {
|
||||
const stmt = `SELECT * FROM users WHERE id = $1 LIMIT 1`
|
||||
u := new(auth.User)
|
||||
row := conn.QueryRow(ctx, stmt, id)
|
||||
if err := row.Scan(&u.ID, &u.Login, &u.Username, &u.PasswordHash, &u.CreatedAt, &u.UpdatedAt); err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch user by id: %w", err)
|
||||
}
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func getUserByLogin(ctx context.Context, conn dbtx, login string) (*auth.User, error) {
|
||||
const stmt = `SELECT * FROM users WHERE login = $1 LIMIT 1`
|
||||
u := new(auth.User)
|
||||
row := conn.QueryRow(ctx, stmt, login)
|
||||
if err := row.Scan(&u.ID, &u.Login, &u.Username, &u.PasswordHash, &u.CreatedAt, &u.UpdatedAt); err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch user by login: %w", err)
|
||||
}
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func addSession(ctx context.Context, conn dbtx, session *auth.Session) (uuid.UUID, error) {
|
||||
const stmt = `INSERT INTO sessions (id,session_token, csrf_token, user_id,
|
||||
created_at, expired_at) VALUES ($1, $2, $3, $4, $5, $6) RETURNING id;`
|
||||
var id uuid.UUID
|
||||
row := conn.QueryRow(ctx, stmt, session.ID, session.SessionToken, session.CsrfToken, session.UserID, session.CreatedAt, session.ExpiredAt)
|
||||
if err := row.Scan(&id); err != nil {
|
||||
return uuid.Nil, fmt.Errorf("failed to insert new session: %w", err)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
@ -1,41 +1,41 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"hash/crc32"
|
||||
"log/slog"
|
||||
"sync"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
type ShardMap = map[uint32]*pgx.ConnConfig
|
||||
|
||||
type ShardCluster struct {
|
||||
m sync.Mutex
|
||||
shards []*pgx.Conn
|
||||
}
|
||||
|
||||
func NewShardCluster(ctx context.Context, shardMap ShardMap) *ShardCluster {
|
||||
shards := make([]*pgx.Conn, len(shardMap))
|
||||
for n, cfg := range shardMap {
|
||||
conn, err := pgx.ConnectConfig(ctx, cfg)
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "failed to connect to shard", slog.Uint64("num", uint64(n)), logger.Err(err))
|
||||
}
|
||||
shards[n] = conn
|
||||
}
|
||||
return &ShardCluster{shards: shards}
|
||||
}
|
||||
|
||||
func (c *ShardCluster) PickShard(n uint32) *pgx.Conn {
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
return c.shards[n]
|
||||
}
|
||||
|
||||
func UUIDShardFn(id uuid.UUID, numShards uint32) uint32 {
|
||||
return crc32.ChecksumIEEE(id[:]) % numShards
|
||||
}
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"hash/crc32"
|
||||
"log/slog"
|
||||
"sync"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
type ShardMap = map[uint32]*pgx.ConnConfig
|
||||
|
||||
type ShardCluster struct {
|
||||
m sync.Mutex
|
||||
shards []*pgx.Conn
|
||||
}
|
||||
|
||||
func NewShardCluster(ctx context.Context, shardMap ShardMap) *ShardCluster {
|
||||
shards := make([]*pgx.Conn, len(shardMap))
|
||||
for n, cfg := range shardMap {
|
||||
conn, err := pgx.ConnectConfig(ctx, cfg)
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "failed to connect to shard", slog.Uint64("num", uint64(n)), logger.Err(err))
|
||||
}
|
||||
shards[n] = conn
|
||||
}
|
||||
return &ShardCluster{shards: shards}
|
||||
}
|
||||
|
||||
func (c *ShardCluster) PickShard(n uint32) *pgx.Conn {
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
return c.shards[n]
|
||||
}
|
||||
|
||||
func UUIDShardFn(id uuid.UUID, numShards uint32) uint32 {
|
||||
return crc32.ChecksumIEEE(id[:]) % numShards
|
||||
}
|
||||
|
@ -1,66 +1,66 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type txKey struct{}
|
||||
|
||||
var ctxKey txKey = txKey{}
|
||||
|
||||
type DBTX interface {
|
||||
sqlx.Ext
|
||||
sqlx.ExtContext
|
||||
}
|
||||
|
||||
func Transaction(ctx context.Context, db *sqlx.DB, fn func(context.Context) error) (err error) {
|
||||
tx := txFromContext(ctx)
|
||||
if tx == nil {
|
||||
tx, err = db.BeginTxx(ctx, &sql.TxOptions{
|
||||
Isolation: sql.LevelRepeatableRead,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to begin tx: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = tx.Commit()
|
||||
}
|
||||
if err != nil {
|
||||
if rbErr := tx.Rollback(); rbErr != nil {
|
||||
err = errors.Join(err, rbErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ctx = txContext(ctx, tx)
|
||||
}
|
||||
|
||||
return fn(ctx)
|
||||
}
|
||||
|
||||
func Conn(ctx context.Context, db DBTX) DBTX {
|
||||
if tx := txFromContext(ctx); tx != nil {
|
||||
return tx
|
||||
}
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func txFromContext(ctx context.Context) *sqlx.Tx {
|
||||
if tx, ok := ctx.Value(ctxKey).(*sqlx.Tx); ok {
|
||||
return tx
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func txContext(parent context.Context, tx *sqlx.Tx) context.Context {
|
||||
return context.WithValue(parent, tx, ctxKey)
|
||||
}
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type txKey struct{}
|
||||
|
||||
var ctxKey txKey = txKey{}
|
||||
|
||||
type DBTX interface {
|
||||
sqlx.Ext
|
||||
sqlx.ExtContext
|
||||
}
|
||||
|
||||
func Transaction(ctx context.Context, db *sqlx.DB, fn func(context.Context) error) (err error) {
|
||||
tx := txFromContext(ctx)
|
||||
if tx == nil {
|
||||
tx, err = db.BeginTxx(ctx, &sql.TxOptions{
|
||||
Isolation: sql.LevelRepeatableRead,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to begin tx: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = tx.Commit()
|
||||
}
|
||||
if err != nil {
|
||||
if rbErr := tx.Rollback(); rbErr != nil {
|
||||
err = errors.Join(err, rbErr)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
ctx = txContext(ctx, tx)
|
||||
}
|
||||
|
||||
return fn(ctx)
|
||||
}
|
||||
|
||||
func Conn(ctx context.Context, db DBTX) DBTX {
|
||||
if tx := txFromContext(ctx); tx != nil {
|
||||
return tx
|
||||
}
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func txFromContext(ctx context.Context) *sqlx.Tx {
|
||||
if tx, ok := ctx.Value(ctxKey).(*sqlx.Tx); ok {
|
||||
return tx
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func txContext(parent context.Context, tx *sqlx.Tx) context.Context {
|
||||
return context.WithValue(parent, tx, ctxKey)
|
||||
}
|
||||
|
@ -1,87 +1,88 @@
|
||||
package pool
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultMaxConns = 20
|
||||
defaultStrategy = &RoundrobinStrategy{
|
||||
lastSelected: initialRoundrobinAtomic(),
|
||||
}
|
||||
)
|
||||
|
||||
func initialRoundrobinAtomic() atomic.Int64 {
|
||||
a := atomic.Int64{}
|
||||
a.Store(-1)
|
||||
return a
|
||||
}
|
||||
|
||||
type ConnSelectionStrategy interface {
|
||||
Select() int
|
||||
}
|
||||
|
||||
type RoundrobinStrategy struct {
|
||||
lastSelected atomic.Int64
|
||||
}
|
||||
|
||||
func (r *RoundrobinStrategy) Select() int {
|
||||
return int(r.lastSelected.Add(1))
|
||||
}
|
||||
|
||||
type ConnPool struct {
|
||||
m sync.RWMutex
|
||||
strategy ConnSelectionStrategy
|
||||
conns []net.Conn
|
||||
}
|
||||
|
||||
type newConnPoolOpts struct {
|
||||
strategy ConnSelectionStrategy
|
||||
maxConns int
|
||||
}
|
||||
|
||||
func newNewConnPoolOpts() newConnPoolOpts {
|
||||
return newConnPoolOpts{
|
||||
strategy: defaultStrategy,
|
||||
maxConns: defaultMaxConns,
|
||||
}
|
||||
}
|
||||
|
||||
type NewConnPoolOpt func(p *newConnPoolOpts)
|
||||
|
||||
func WithStrategy(s ConnSelectionStrategy) NewConnPoolOpt {
|
||||
return func(p *newConnPoolOpts) {
|
||||
p.strategy = s
|
||||
}
|
||||
}
|
||||
|
||||
func WithMaxConns(mc int) NewConnPoolOpt {
|
||||
return func(p *newConnPoolOpts) {
|
||||
p.maxConns = mc
|
||||
}
|
||||
}
|
||||
|
||||
func NewConnPool(opts ...NewConnPoolOpt) *ConnPool {
|
||||
o := newNewConnPoolOpts()
|
||||
for _, opt := range opts {
|
||||
opt(&o)
|
||||
}
|
||||
return &ConnPool{
|
||||
conns: make([]net.Conn, 0),
|
||||
strategy: o.strategy,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ConnPool) SelectConn() net.Conn {
|
||||
p.m.RLock()
|
||||
defer p.m.RUnlock()
|
||||
return p.conns[p.strategy.Select()]
|
||||
}
|
||||
|
||||
func (p *ConnPool) AddConn(conn net.Conn) {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
p.conns = append(p.conns, conn)
|
||||
}
|
||||
// TODO wtf?
|
||||
package pool
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultMaxConns = 20
|
||||
defaultStrategy = &RoundrobinStrategy{
|
||||
lastSelected: initialRoundrobinAtomic(),
|
||||
}
|
||||
)
|
||||
|
||||
func initialRoundrobinAtomic() atomic.Int64 {
|
||||
a := atomic.Int64{}
|
||||
a.Store(-1)
|
||||
return a
|
||||
}
|
||||
|
||||
type ConnSelectionStrategy interface {
|
||||
Select() int
|
||||
}
|
||||
|
||||
type RoundrobinStrategy struct {
|
||||
lastSelected atomic.Int64
|
||||
}
|
||||
|
||||
func (r *RoundrobinStrategy) Select() int {
|
||||
return int(r.lastSelected.Add(1))
|
||||
}
|
||||
|
||||
type ConnPool struct {
|
||||
m sync.RWMutex
|
||||
strategy ConnSelectionStrategy
|
||||
conns []net.Conn
|
||||
}
|
||||
|
||||
type newConnPoolOpts struct {
|
||||
strategy ConnSelectionStrategy
|
||||
maxConns int
|
||||
}
|
||||
|
||||
func newNewConnPoolOpts() newConnPoolOpts {
|
||||
return newConnPoolOpts{
|
||||
strategy: defaultStrategy,
|
||||
maxConns: defaultMaxConns,
|
||||
}
|
||||
}
|
||||
|
||||
type NewConnPoolOpt func(p *newConnPoolOpts)
|
||||
|
||||
func WithStrategy(s ConnSelectionStrategy) NewConnPoolOpt {
|
||||
return func(p *newConnPoolOpts) {
|
||||
p.strategy = s
|
||||
}
|
||||
}
|
||||
|
||||
func WithMaxConns(mc int) NewConnPoolOpt {
|
||||
return func(p *newConnPoolOpts) {
|
||||
p.maxConns = mc
|
||||
}
|
||||
}
|
||||
|
||||
func NewConnPool(opts ...NewConnPoolOpt) *ConnPool {
|
||||
o := newNewConnPoolOpts()
|
||||
for _, opt := range opts {
|
||||
opt(&o)
|
||||
}
|
||||
return &ConnPool{
|
||||
conns: make([]net.Conn, 0),
|
||||
strategy: o.strategy,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ConnPool) SelectConn() net.Conn {
|
||||
p.m.RLock()
|
||||
defer p.m.RUnlock()
|
||||
return p.conns[p.strategy.Select()]
|
||||
}
|
||||
|
||||
func (p *ConnPool) AddConn(conn net.Conn) {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
p.conns = append(p.conns, conn)
|
||||
}
|
||||
|
@ -1,71 +1,71 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
SELECT 'up SQL query';
|
||||
|
||||
-- Users as auth data
|
||||
create table if not exists users (
|
||||
id uuid primary key,
|
||||
username text default null,
|
||||
login text not null unique,
|
||||
password bytea not null,
|
||||
created_at timestamptz default current_timestamp,
|
||||
updated_at timestamptz default current_timestamp
|
||||
);
|
||||
|
||||
create index idx_users_login on users (login);
|
||||
|
||||
create index idx_users_username on users (username);
|
||||
|
||||
-- Sessions and auth data
|
||||
create table sessions (
|
||||
id uuid primary key,
|
||||
session_token varchar(200) not null unique,
|
||||
csrf_token varchar(200) not null unique,
|
||||
user_id uuid references users(id),
|
||||
created_at timestamp default current_timestamp,
|
||||
expired_at timestamp not null
|
||||
);
|
||||
|
||||
create index if not exists idx_sessions_session_token_csrf_token on sessions (session_token, csrf_token);
|
||||
|
||||
-- Files
|
||||
create table files_metadata (
|
||||
id uuid primary key,
|
||||
name text not null,
|
||||
fslink text not null,
|
||||
size bigint not null,
|
||||
ext text not null,
|
||||
owner_id uuid not null,
|
||||
parent_dir uuid not null,
|
||||
created_at timestamptz default current_timestamp,
|
||||
updated_at timestamptz default null,
|
||||
deleted_at timestamptz default null
|
||||
);
|
||||
|
||||
create index idx_fm_owner_id on files_metadata(owner_id);
|
||||
create index idx_fm_owner_id_parent_dir on files_metadata(owner_id, parent_dir);
|
||||
|
||||
create table directories (
|
||||
id uuid primary key,
|
||||
name text not null,
|
||||
owner_id uuid not null,
|
||||
parent_dir uuid not null,
|
||||
created_at timestamptz default current_timestamp,
|
||||
updated_at timestamptz default null,
|
||||
deleted_at timestamptz default null
|
||||
);
|
||||
|
||||
create index idx_directories_owner_id_parent_dir on directories(owner_id, parent_dir);
|
||||
|
||||
create table directory_users_access (
|
||||
id uuid primary key,
|
||||
dir_id uuid not null,
|
||||
user_id uuid not null,
|
||||
assess_flag integer,
|
||||
created_at timestamptz default current_timestamp,
|
||||
updated_at timestamptz default null
|
||||
);
|
||||
|
||||
create index idx_dua_owner_id_parent_dir on directories(owner_id, parent_dir);
|
||||
|
||||
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
SELECT 'up SQL query';
|
||||
|
||||
-- Users as auth data
|
||||
create table if not exists users (
|
||||
id uuid primary key,
|
||||
username text default null,
|
||||
login text not null unique,
|
||||
password bytea not null,
|
||||
created_at timestamptz default current_timestamp,
|
||||
updated_at timestamptz default current_timestamp
|
||||
);
|
||||
|
||||
create index idx_users_login on users (login);
|
||||
|
||||
create index idx_users_username on users (username);
|
||||
|
||||
-- Sessions and auth data
|
||||
create table sessions (
|
||||
id uuid primary key,
|
||||
session_token varchar(200) not null unique,
|
||||
csrf_token varchar(200) not null unique,
|
||||
user_id uuid references users(id),
|
||||
created_at timestamp default current_timestamp,
|
||||
expired_at timestamp not null
|
||||
);
|
||||
|
||||
create index if not exists idx_sessions_session_token_csrf_token on sessions (session_token, csrf_token);
|
||||
|
||||
-- Files
|
||||
create table files_metadata (
|
||||
id uuid primary key,
|
||||
name text not null,
|
||||
fslink text not null,
|
||||
size bigint not null,
|
||||
ext text not null,
|
||||
owner_id uuid not null,
|
||||
parent_dir uuid not null,
|
||||
created_at timestamptz default current_timestamp,
|
||||
updated_at timestamptz default null,
|
||||
deleted_at timestamptz default null
|
||||
);
|
||||
|
||||
create index idx_fm_owner_id on files_metadata(owner_id);
|
||||
create index idx_fm_owner_id_parent_dir on files_metadata(owner_id, parent_dir);
|
||||
|
||||
create table directories (
|
||||
id uuid primary key,
|
||||
name text not null,
|
||||
owner_id uuid not null,
|
||||
parent_dir uuid not null,
|
||||
created_at timestamptz default current_timestamp,
|
||||
updated_at timestamptz default null,
|
||||
deleted_at timestamptz default null
|
||||
);
|
||||
|
||||
create index idx_directories_owner_id_parent_dir on directories(owner_id, parent_dir);
|
||||
|
||||
create table directory_users_access (
|
||||
id uuid primary key,
|
||||
dir_id uuid not null,
|
||||
user_id uuid not null,
|
||||
assess_flag integer,
|
||||
created_at timestamptz default current_timestamp,
|
||||
updated_at timestamptz default null
|
||||
);
|
||||
|
||||
create index idx_dua_owner_id_parent_dir on directories(owner_id, parent_dir);
|
||||
|
||||
|
||||
|
@ -1,85 +0,0 @@
|
||||
// Code generated by mockery v2.48.0. DO NOT EDIT.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
audit "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/audit"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockAuthAuditLogStorage is an autogenerated mock type for the AuthAuditLogStorage type
|
||||
type MockAuthAuditLogStorage struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockAuthAuditLogStorage_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockAuthAuditLogStorage) EXPECT() *MockAuthAuditLogStorage_Expecter {
|
||||
return &MockAuthAuditLogStorage_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// AddEntry provides a mock function with given fields: ctx, entry
|
||||
func (_m *MockAuthAuditLogStorage) AddEntry(ctx context.Context, entry audit.AuditLogEntry) error {
|
||||
ret := _m.Called(ctx, entry)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for AddEntry")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, audit.AuditLogEntry) error); ok {
|
||||
r0 = rf(ctx, entry)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockAuthAuditLogStorage_AddEntry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddEntry'
|
||||
type MockAuthAuditLogStorage_AddEntry_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// AddEntry is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - entry audit.AuditLogEntry
|
||||
func (_e *MockAuthAuditLogStorage_Expecter) AddEntry(ctx interface{}, entry interface{}) *MockAuthAuditLogStorage_AddEntry_Call {
|
||||
return &MockAuthAuditLogStorage_AddEntry_Call{Call: _e.mock.On("AddEntry", ctx, entry)}
|
||||
}
|
||||
|
||||
func (_c *MockAuthAuditLogStorage_AddEntry_Call) Run(run func(ctx context.Context, entry audit.AuditLogEntry)) *MockAuthAuditLogStorage_AddEntry_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(audit.AuditLogEntry))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthAuditLogStorage_AddEntry_Call) Return(_a0 error) *MockAuthAuditLogStorage_AddEntry_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthAuditLogStorage_AddEntry_Call) RunAndReturn(run func(context.Context, audit.AuditLogEntry) error) *MockAuthAuditLogStorage_AddEntry_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockAuthAuditLogStorage creates a new instance of MockAuthAuditLogStorage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockAuthAuditLogStorage(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockAuthAuditLogStorage {
|
||||
mock := &MockAuthAuditLogStorage{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@ -1,377 +0,0 @@
|
||||
// Code generated by mockery v2.48.0. DO NOT EDIT.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
models "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockAuthStorage is an autogenerated mock type for the AuthStorage type
|
||||
type MockAuthStorage struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockAuthStorage_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockAuthStorage) EXPECT() *MockAuthStorage_Expecter {
|
||||
return &MockAuthStorage_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// AddSession provides a mock function with given fields: ctx, ses
|
||||
func (_m *MockAuthStorage) AddSession(ctx context.Context, ses *models.Session) (int64, error) {
|
||||
ret := _m.Called(ctx, ses)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for AddSession")
|
||||
}
|
||||
|
||||
var r0 int64
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *models.Session) (int64, error)); ok {
|
||||
return rf(ctx, ses)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *models.Session) int64); ok {
|
||||
r0 = rf(ctx, ses)
|
||||
} else {
|
||||
r0 = ret.Get(0).(int64)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *models.Session) error); ok {
|
||||
r1 = rf(ctx, ses)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockAuthStorage_AddSession_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddSession'
|
||||
type MockAuthStorage_AddSession_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// AddSession is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - ses *models.Session
|
||||
func (_e *MockAuthStorage_Expecter) AddSession(ctx interface{}, ses interface{}) *MockAuthStorage_AddSession_Call {
|
||||
return &MockAuthStorage_AddSession_Call{Call: _e.mock.On("AddSession", ctx, ses)}
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_AddSession_Call) Run(run func(ctx context.Context, ses *models.Session)) *MockAuthStorage_AddSession_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*models.Session))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_AddSession_Call) Return(_a0 int64, _a1 error) *MockAuthStorage_AddSession_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_AddSession_Call) RunAndReturn(run func(context.Context, *models.Session) (int64, error)) *MockAuthStorage_AddSession_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// AddUser provides a mock function with given fields: ctx, login, username, passwordHash
|
||||
func (_m *MockAuthStorage) AddUser(ctx context.Context, login string, username string, passwordHash []byte) (int64, error) {
|
||||
ret := _m.Called(ctx, login, username, passwordHash)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for AddUser")
|
||||
}
|
||||
|
||||
var r0 int64
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, []byte) (int64, error)); ok {
|
||||
return rf(ctx, login, username, passwordHash)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, []byte) int64); ok {
|
||||
r0 = rf(ctx, login, username, passwordHash)
|
||||
} else {
|
||||
r0 = ret.Get(0).(int64)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string, string, []byte) error); ok {
|
||||
r1 = rf(ctx, login, username, passwordHash)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockAuthStorage_AddUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddUser'
|
||||
type MockAuthStorage_AddUser_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// AddUser is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - login string
|
||||
// - username string
|
||||
// - passwordHash []byte
|
||||
func (_e *MockAuthStorage_Expecter) AddUser(ctx interface{}, login interface{}, username interface{}, passwordHash interface{}) *MockAuthStorage_AddUser_Call {
|
||||
return &MockAuthStorage_AddUser_Call{Call: _e.mock.On("AddUser", ctx, login, username, passwordHash)}
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_AddUser_Call) Run(run func(ctx context.Context, login string, username string, passwordHash []byte)) *MockAuthStorage_AddUser_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].([]byte))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_AddUser_Call) Return(_a0 int64, _a1 error) *MockAuthStorage_AddUser_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_AddUser_Call) RunAndReturn(run func(context.Context, string, string, []byte) (int64, error)) *MockAuthStorage_AddUser_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetSession provides a mock function with given fields: ctx, sessionToken
|
||||
func (_m *MockAuthStorage) GetSession(ctx context.Context, sessionToken string) (*models.Session, error) {
|
||||
ret := _m.Called(ctx, sessionToken)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetSession")
|
||||
}
|
||||
|
||||
var r0 *models.Session
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) (*models.Session, error)); ok {
|
||||
return rf(ctx, sessionToken)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *models.Session); ok {
|
||||
r0 = rf(ctx, sessionToken)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*models.Session)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, sessionToken)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockAuthStorage_GetSession_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSession'
|
||||
type MockAuthStorage_GetSession_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetSession is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - sessionToken string
|
||||
func (_e *MockAuthStorage_Expecter) GetSession(ctx interface{}, sessionToken interface{}) *MockAuthStorage_GetSession_Call {
|
||||
return &MockAuthStorage_GetSession_Call{Call: _e.mock.On("GetSession", ctx, sessionToken)}
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_GetSession_Call) Run(run func(ctx context.Context, sessionToken string)) *MockAuthStorage_GetSession_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_GetSession_Call) Return(_a0 *models.Session, _a1 error) *MockAuthStorage_GetSession_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_GetSession_Call) RunAndReturn(run func(context.Context, string) (*models.Session, error)) *MockAuthStorage_GetSession_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetUserByID provides a mock function with given fields: ctx, id
|
||||
func (_m *MockAuthStorage) GetUserByID(ctx context.Context, id uint64) (*models.User, error) {
|
||||
ret := _m.Called(ctx, id)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetUserByID")
|
||||
}
|
||||
|
||||
var r0 *models.User
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, uint64) (*models.User, error)); ok {
|
||||
return rf(ctx, id)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, uint64) *models.User); ok {
|
||||
r0 = rf(ctx, id)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*models.User)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok {
|
||||
r1 = rf(ctx, id)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockAuthStorage_GetUserByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUserByID'
|
||||
type MockAuthStorage_GetUserByID_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetUserByID is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - id uint64
|
||||
func (_e *MockAuthStorage_Expecter) GetUserByID(ctx interface{}, id interface{}) *MockAuthStorage_GetUserByID_Call {
|
||||
return &MockAuthStorage_GetUserByID_Call{Call: _e.mock.On("GetUserByID", ctx, id)}
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_GetUserByID_Call) Run(run func(ctx context.Context, id uint64)) *MockAuthStorage_GetUserByID_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(uint64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_GetUserByID_Call) Return(_a0 *models.User, _a1 error) *MockAuthStorage_GetUserByID_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_GetUserByID_Call) RunAndReturn(run func(context.Context, uint64) (*models.User, error)) *MockAuthStorage_GetUserByID_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetUserByLogin provides a mock function with given fields: ctx, login
|
||||
func (_m *MockAuthStorage) GetUserByLogin(ctx context.Context, login string) (*models.User, error) {
|
||||
ret := _m.Called(ctx, login)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetUserByLogin")
|
||||
}
|
||||
|
||||
var r0 *models.User
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) (*models.User, error)); ok {
|
||||
return rf(ctx, login)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *models.User); ok {
|
||||
r0 = rf(ctx, login)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*models.User)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, login)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockAuthStorage_GetUserByLogin_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUserByLogin'
|
||||
type MockAuthStorage_GetUserByLogin_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetUserByLogin is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - login string
|
||||
func (_e *MockAuthStorage_Expecter) GetUserByLogin(ctx interface{}, login interface{}) *MockAuthStorage_GetUserByLogin_Call {
|
||||
return &MockAuthStorage_GetUserByLogin_Call{Call: _e.mock.On("GetUserByLogin", ctx, login)}
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_GetUserByLogin_Call) Run(run func(ctx context.Context, login string)) *MockAuthStorage_GetUserByLogin_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_GetUserByLogin_Call) Return(_a0 *models.User, _a1 error) *MockAuthStorage_GetUserByLogin_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_GetUserByLogin_Call) RunAndReturn(run func(context.Context, string) (*models.User, error)) *MockAuthStorage_GetUserByLogin_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// RemoveSession provides a mock function with given fields: ctx, id
|
||||
func (_m *MockAuthStorage) RemoveSession(ctx context.Context, id int64) error {
|
||||
ret := _m.Called(ctx, id)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for RemoveSession")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
|
||||
r0 = rf(ctx, id)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockAuthStorage_RemoveSession_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveSession'
|
||||
type MockAuthStorage_RemoveSession_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// RemoveSession is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - id int64
|
||||
func (_e *MockAuthStorage_Expecter) RemoveSession(ctx interface{}, id interface{}) *MockAuthStorage_RemoveSession_Call {
|
||||
return &MockAuthStorage_RemoveSession_Call{Call: _e.mock.On("RemoveSession", ctx, id)}
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_RemoveSession_Call) Run(run func(ctx context.Context, id int64)) *MockAuthStorage_RemoveSession_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(int64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_RemoveSession_Call) Return(_a0 error) *MockAuthStorage_RemoveSession_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockAuthStorage_RemoveSession_Call) RunAndReturn(run func(context.Context, int64) error) *MockAuthStorage_RemoveSession_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockAuthStorage creates a new instance of MockAuthStorage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockAuthStorage(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockAuthStorage {
|
||||
mock := &MockAuthStorage{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@ -1,191 +0,0 @@
|
||||
// Code generated by mockery v2.48.0. DO NOT EDIT.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
context "context"
|
||||
os "os"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockBlobStorage is an autogenerated mock type for the BlobStorage type
|
||||
type MockBlobStorage struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockBlobStorage_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockBlobStorage) EXPECT() *MockBlobStorage_Expecter {
|
||||
return &MockBlobStorage_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// DeleteFile provides a mock function with given fields: ctx, id
|
||||
func (_m *MockBlobStorage) DeleteFile(ctx context.Context, id int64) error {
|
||||
ret := _m.Called(ctx, id)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DeleteFile")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
|
||||
r0 = rf(ctx, id)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockBlobStorage_DeleteFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteFile'
|
||||
type MockBlobStorage_DeleteFile_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// DeleteFile is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - id int64
|
||||
func (_e *MockBlobStorage_Expecter) DeleteFile(ctx interface{}, id interface{}) *MockBlobStorage_DeleteFile_Call {
|
||||
return &MockBlobStorage_DeleteFile_Call{Call: _e.mock.On("DeleteFile", ctx, id)}
|
||||
}
|
||||
|
||||
func (_c *MockBlobStorage_DeleteFile_Call) Run(run func(ctx context.Context, id int64)) *MockBlobStorage_DeleteFile_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(int64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBlobStorage_DeleteFile_Call) Return(_a0 error) *MockBlobStorage_DeleteFile_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBlobStorage_DeleteFile_Call) RunAndReturn(run func(context.Context, int64) error) *MockBlobStorage_DeleteFile_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetFile provides a mock function with given fields: ctx, id
|
||||
func (_m *MockBlobStorage) GetFile(ctx context.Context, id int64) (*os.File, error) {
|
||||
ret := _m.Called(ctx, id)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetFile")
|
||||
}
|
||||
|
||||
var r0 *os.File
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64) (*os.File, error)); ok {
|
||||
return rf(ctx, id)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64) *os.File); ok {
|
||||
r0 = rf(ctx, id)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*os.File)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, int64) error); ok {
|
||||
r1 = rf(ctx, id)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockBlobStorage_GetFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFile'
|
||||
type MockBlobStorage_GetFile_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetFile is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - id int64
|
||||
func (_e *MockBlobStorage_Expecter) GetFile(ctx interface{}, id interface{}) *MockBlobStorage_GetFile_Call {
|
||||
return &MockBlobStorage_GetFile_Call{Call: _e.mock.On("GetFile", ctx, id)}
|
||||
}
|
||||
|
||||
func (_c *MockBlobStorage_GetFile_Call) Run(run func(ctx context.Context, id int64)) *MockBlobStorage_GetFile_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(int64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBlobStorage_GetFile_Call) Return(_a0 *os.File, _a1 error) *MockBlobStorage_GetFile_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBlobStorage_GetFile_Call) RunAndReturn(run func(context.Context, int64) (*os.File, error)) *MockBlobStorage_GetFile_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// SaveBlob provides a mock function with given fields: ctx, id, data
|
||||
func (_m *MockBlobStorage) SaveBlob(ctx context.Context, id int64, data []byte) error {
|
||||
ret := _m.Called(ctx, id, data)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SaveBlob")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64, []byte) error); ok {
|
||||
r0 = rf(ctx, id, data)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockBlobStorage_SaveBlob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveBlob'
|
||||
type MockBlobStorage_SaveBlob_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// SaveBlob is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - id int64
|
||||
// - data []byte
|
||||
func (_e *MockBlobStorage_Expecter) SaveBlob(ctx interface{}, id interface{}, data interface{}) *MockBlobStorage_SaveBlob_Call {
|
||||
return &MockBlobStorage_SaveBlob_Call{Call: _e.mock.On("SaveBlob", ctx, id, data)}
|
||||
}
|
||||
|
||||
func (_c *MockBlobStorage_SaveBlob_Call) Run(run func(ctx context.Context, id int64, data []byte)) *MockBlobStorage_SaveBlob_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(int64), args[2].([]byte))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBlobStorage_SaveBlob_Call) Return(_a0 error) *MockBlobStorage_SaveBlob_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBlobStorage_SaveBlob_Call) RunAndReturn(run func(context.Context, int64, []byte) error) *MockBlobStorage_SaveBlob_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockBlobStorage creates a new instance of MockBlobStorage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockBlobStorage(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockBlobStorage {
|
||||
mock := &MockBlobStorage{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@ -1,377 +0,0 @@
|
||||
// Code generated by mockery v2.48.0. DO NOT EDIT.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
models "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models"
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockDatabase is an autogenerated mock type for the Database type
|
||||
type MockDatabase struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockDatabase_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockDatabase) EXPECT() *MockDatabase_Expecter {
|
||||
return &MockDatabase_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// AddSession provides a mock function with given fields: ctx, ses
|
||||
func (_m *MockDatabase) AddSession(ctx context.Context, ses *models.Session) (int64, error) {
|
||||
ret := _m.Called(ctx, ses)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for AddSession")
|
||||
}
|
||||
|
||||
var r0 int64
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *models.Session) (int64, error)); ok {
|
||||
return rf(ctx, ses)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *models.Session) int64); ok {
|
||||
r0 = rf(ctx, ses)
|
||||
} else {
|
||||
r0 = ret.Get(0).(int64)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *models.Session) error); ok {
|
||||
r1 = rf(ctx, ses)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockDatabase_AddSession_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddSession'
|
||||
type MockDatabase_AddSession_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// AddSession is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - ses *models.Session
|
||||
func (_e *MockDatabase_Expecter) AddSession(ctx interface{}, ses interface{}) *MockDatabase_AddSession_Call {
|
||||
return &MockDatabase_AddSession_Call{Call: _e.mock.On("AddSession", ctx, ses)}
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_AddSession_Call) Run(run func(ctx context.Context, ses *models.Session)) *MockDatabase_AddSession_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(*models.Session))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_AddSession_Call) Return(_a0 int64, _a1 error) *MockDatabase_AddSession_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_AddSession_Call) RunAndReturn(run func(context.Context, *models.Session) (int64, error)) *MockDatabase_AddSession_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// AddUser provides a mock function with given fields: ctx, login, username, passwordHash
|
||||
func (_m *MockDatabase) AddUser(ctx context.Context, login string, username string, passwordHash []byte) (int64, error) {
|
||||
ret := _m.Called(ctx, login, username, passwordHash)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for AddUser")
|
||||
}
|
||||
|
||||
var r0 int64
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, []byte) (int64, error)); ok {
|
||||
return rf(ctx, login, username, passwordHash)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, string, []byte) int64); ok {
|
||||
r0 = rf(ctx, login, username, passwordHash)
|
||||
} else {
|
||||
r0 = ret.Get(0).(int64)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string, string, []byte) error); ok {
|
||||
r1 = rf(ctx, login, username, passwordHash)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockDatabase_AddUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddUser'
|
||||
type MockDatabase_AddUser_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// AddUser is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - login string
|
||||
// - username string
|
||||
// - passwordHash []byte
|
||||
func (_e *MockDatabase_Expecter) AddUser(ctx interface{}, login interface{}, username interface{}, passwordHash interface{}) *MockDatabase_AddUser_Call {
|
||||
return &MockDatabase_AddUser_Call{Call: _e.mock.On("AddUser", ctx, login, username, passwordHash)}
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_AddUser_Call) Run(run func(ctx context.Context, login string, username string, passwordHash []byte)) *MockDatabase_AddUser_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].([]byte))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_AddUser_Call) Return(_a0 int64, _a1 error) *MockDatabase_AddUser_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_AddUser_Call) RunAndReturn(run func(context.Context, string, string, []byte) (int64, error)) *MockDatabase_AddUser_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetSession provides a mock function with given fields: ctx, sessionToken
|
||||
func (_m *MockDatabase) GetSession(ctx context.Context, sessionToken string) (*models.Session, error) {
|
||||
ret := _m.Called(ctx, sessionToken)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetSession")
|
||||
}
|
||||
|
||||
var r0 *models.Session
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) (*models.Session, error)); ok {
|
||||
return rf(ctx, sessionToken)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *models.Session); ok {
|
||||
r0 = rf(ctx, sessionToken)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*models.Session)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, sessionToken)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockDatabase_GetSession_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSession'
|
||||
type MockDatabase_GetSession_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetSession is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - sessionToken string
|
||||
func (_e *MockDatabase_Expecter) GetSession(ctx interface{}, sessionToken interface{}) *MockDatabase_GetSession_Call {
|
||||
return &MockDatabase_GetSession_Call{Call: _e.mock.On("GetSession", ctx, sessionToken)}
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_GetSession_Call) Run(run func(ctx context.Context, sessionToken string)) *MockDatabase_GetSession_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_GetSession_Call) Return(_a0 *models.Session, _a1 error) *MockDatabase_GetSession_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_GetSession_Call) RunAndReturn(run func(context.Context, string) (*models.Session, error)) *MockDatabase_GetSession_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetUserByID provides a mock function with given fields: ctx, id
|
||||
func (_m *MockDatabase) GetUserByID(ctx context.Context, id uint64) (*models.User, error) {
|
||||
ret := _m.Called(ctx, id)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetUserByID")
|
||||
}
|
||||
|
||||
var r0 *models.User
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, uint64) (*models.User, error)); ok {
|
||||
return rf(ctx, id)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, uint64) *models.User); ok {
|
||||
r0 = rf(ctx, id)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*models.User)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok {
|
||||
r1 = rf(ctx, id)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockDatabase_GetUserByID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUserByID'
|
||||
type MockDatabase_GetUserByID_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetUserByID is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - id uint64
|
||||
func (_e *MockDatabase_Expecter) GetUserByID(ctx interface{}, id interface{}) *MockDatabase_GetUserByID_Call {
|
||||
return &MockDatabase_GetUserByID_Call{Call: _e.mock.On("GetUserByID", ctx, id)}
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_GetUserByID_Call) Run(run func(ctx context.Context, id uint64)) *MockDatabase_GetUserByID_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(uint64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_GetUserByID_Call) Return(_a0 *models.User, _a1 error) *MockDatabase_GetUserByID_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_GetUserByID_Call) RunAndReturn(run func(context.Context, uint64) (*models.User, error)) *MockDatabase_GetUserByID_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// GetUserByLogin provides a mock function with given fields: ctx, login
|
||||
func (_m *MockDatabase) GetUserByLogin(ctx context.Context, login string) (*models.User, error) {
|
||||
ret := _m.Called(ctx, login)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GetUserByLogin")
|
||||
}
|
||||
|
||||
var r0 *models.User
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) (*models.User, error)); ok {
|
||||
return rf(ctx, login)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *models.User); ok {
|
||||
r0 = rf(ctx, login)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*models.User)
|
||||
}
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, login)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockDatabase_GetUserByLogin_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUserByLogin'
|
||||
type MockDatabase_GetUserByLogin_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// GetUserByLogin is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - login string
|
||||
func (_e *MockDatabase_Expecter) GetUserByLogin(ctx interface{}, login interface{}) *MockDatabase_GetUserByLogin_Call {
|
||||
return &MockDatabase_GetUserByLogin_Call{Call: _e.mock.On("GetUserByLogin", ctx, login)}
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_GetUserByLogin_Call) Run(run func(ctx context.Context, login string)) *MockDatabase_GetUserByLogin_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_GetUserByLogin_Call) Return(_a0 *models.User, _a1 error) *MockDatabase_GetUserByLogin_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_GetUserByLogin_Call) RunAndReturn(run func(context.Context, string) (*models.User, error)) *MockDatabase_GetUserByLogin_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// RemoveSession provides a mock function with given fields: ctx, id
|
||||
func (_m *MockDatabase) RemoveSession(ctx context.Context, id int64) error {
|
||||
ret := _m.Called(ctx, id)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for RemoveSession")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, int64) error); ok {
|
||||
r0 = rf(ctx, id)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockDatabase_RemoveSession_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveSession'
|
||||
type MockDatabase_RemoveSession_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// RemoveSession is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - id int64
|
||||
func (_e *MockDatabase_Expecter) RemoveSession(ctx interface{}, id interface{}) *MockDatabase_RemoveSession_Call {
|
||||
return &MockDatabase_RemoveSession_Call{Call: _e.mock.On("RemoveSession", ctx, id)}
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_RemoveSession_Call) Run(run func(ctx context.Context, id int64)) *MockDatabase_RemoveSession_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(int64))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_RemoveSession_Call) Return(_a0 error) *MockDatabase_RemoveSession_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockDatabase_RemoveSession_Call) RunAndReturn(run func(context.Context, int64) error) *MockDatabase_RemoveSession_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockDatabase creates a new instance of MockDatabase. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockDatabase(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockDatabase {
|
||||
mock := &MockDatabase{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
// Code generated by mockery v2.48.0. DO NOT EDIT.
|
||||
|
||||
package storage
|
||||
|
||||
import mock "github.com/stretchr/testify/mock"
|
||||
|
||||
// MockFileStorage is an autogenerated mock type for the FileStorage type
|
||||
type MockFileStorage struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockFileStorage_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockFileStorage) EXPECT() *MockFileStorage_Expecter {
|
||||
return &MockFileStorage_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// NewMockFileStorage creates a new instance of MockFileStorage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockFileStorage(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockFileStorage {
|
||||
mock := &MockFileStorage{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@ -1,95 +0,0 @@
|
||||
// Code generated by mockery v2.48.0. DO NOT EDIT.
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// MockMetaStorage is an autogenerated mock type for the MetaStorage type
|
||||
type MockMetaStorage struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockMetaStorage_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockMetaStorage) EXPECT() *MockMetaStorage_Expecter {
|
||||
return &MockMetaStorage_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// SaveMetadata provides a mock function with given fields: ctx, fileType, size, ext
|
||||
func (_m *MockMetaStorage) SaveMetadata(ctx context.Context, fileType string, size int64, ext string) (int64, error) {
|
||||
ret := _m.Called(ctx, fileType, size, ext)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SaveMetadata")
|
||||
}
|
||||
|
||||
var r0 int64
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, int64, string) (int64, error)); ok {
|
||||
return rf(ctx, fileType, size, ext)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string, int64, string) int64); ok {
|
||||
r0 = rf(ctx, fileType, size, ext)
|
||||
} else {
|
||||
r0 = ret.Get(0).(int64)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string, int64, string) error); ok {
|
||||
r1 = rf(ctx, fileType, size, ext)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// MockMetaStorage_SaveMetadata_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveMetadata'
|
||||
type MockMetaStorage_SaveMetadata_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// SaveMetadata is a helper method to define mock.On call
|
||||
// - ctx context.Context
|
||||
// - fileType string
|
||||
// - size int64
|
||||
// - ext string
|
||||
func (_e *MockMetaStorage_Expecter) SaveMetadata(ctx interface{}, fileType interface{}, size interface{}, ext interface{}) *MockMetaStorage_SaveMetadata_Call {
|
||||
return &MockMetaStorage_SaveMetadata_Call{Call: _e.mock.On("SaveMetadata", ctx, fileType, size, ext)}
|
||||
}
|
||||
|
||||
func (_c *MockMetaStorage_SaveMetadata_Call) Run(run func(ctx context.Context, fileType string, size int64, ext string)) *MockMetaStorage_SaveMetadata_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(context.Context), args[1].(string), args[2].(int64), args[3].(string))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockMetaStorage_SaveMetadata_Call) Return(_a0 int64, _a1 error) *MockMetaStorage_SaveMetadata_Call {
|
||||
_c.Call.Return(_a0, _a1)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockMetaStorage_SaveMetadata_Call) RunAndReturn(run func(context.Context, string, int64, string) (int64, error)) *MockMetaStorage_SaveMetadata_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockMetaStorage creates a new instance of MockMetaStorage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockMetaStorage(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockMetaStorage {
|
||||
mock := &MockMetaStorage{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
Loading…
Reference in New Issue
Block a user