This commit is contained in:
r8zavetr8v 2025-02-25 14:47:57 -08:00
parent bfec6255bc
commit 2f0b38a3fb
60 changed files with 3260 additions and 3228 deletions

6
.gitignore vendored
View File

@ -1,4 +1,4 @@
*.sqlite *.sqlite
*.db *.db
assets/* assets/*
build/* build/*

View File

@ -1,12 +1,12 @@
filename: "mock_{{.InterfaceName}}.go" filename: "mock_{{.InterfaceName}}.go"
dir: "mocks/{{.PackagePath}}" dir: "mocks/{{.PackagePath}}"
outpkg: "{{.PackageName}}" outpkg: "{{.PackageName}}"
with-expecter: true with-expecter: true
packages: packages:
git.optclblast.xyz/draincloud/draincloud-core/internal/storage: git.optclblast.xyz/draincloud/draincloud-core/internal/storage:
interfaces: interfaces:
Database: Database:
AuthAuditLogStorage: AuthAuditLogStorage:
AuthStorage: AuthStorage:
BlobStorage: BlobStorage:
MetaStorage: MetaStorage:

30
.vscode/launch.json vendored
View File

@ -1,16 +1,16 @@
{ {
// Use IntelliSense to learn about possible attributes. // Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes. // Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0", "version": "0.2.0",
"configurations": [ "configurations": [
{ {
"name": "Launch Package", "name": "Launch Package",
"type": "go", "type": "go",
"request": "launch", "request": "launch",
"mode": "auto", "mode": "auto",
"program": "${workspaceFolder}/cmd/main.go" "program": "${workspaceFolder}/cmd/main.go"
} }
] ]
} }

View File

@ -1,7 +1,7 @@
# DrainCloud Core # DrainCloud Core
DrainCloud Core is an all-in-one lightweight DrainCloud distribution designed to work in resource-constrained environments. DrainCloud Core is an all-in-one lightweight DrainCloud distribution designed to work in resource-constrained environments.
The node can work in three modes: #TBD The node can work in three modes: #TBD
1. All-in-one mode, the recommended one. 1. All-in-one mode, the recommended one.
2. Auth-node. Only auth api will be operational. 2. Auth-node. Only auth api will be operational.
3. Storage-node. Only filestorage api will be operational. 3. Storage-node. Only filestorage api will be operational.

View File

@ -1,12 +1,12 @@
version: 3 version: 3
tasks: tasks:
run: run:
cmds: cmds:
- go run cmd/main.go - go run cmd/main.go
deploy-local: deploy-local:
cmds: cmds:
- sudo docker stack deploy draincloud_core -c ./compose.rw.yaml - sudo docker stack deploy draincloud_core -c ./compose.rw.yaml
migrate-local-status: migrate-local-status:
cmds: cmds:
- goose postgres "postgres://draincloud:draincloud@localhost:5432/draincloud" status -dir migrations - goose postgres "postgres://draincloud:draincloud@localhost:5432/draincloud" status -dir migrations

BIN
bin/task Normal file

Binary file not shown.

View File

@ -1,33 +1,33 @@
package main package main
import ( import (
"context" "context"
"os" "os"
"os/signal" "os/signal"
"git.optclblast.xyz/draincloud/draincloud-core/internal/app" "git.optclblast.xyz/draincloud/draincloud-core/internal/app"
filesengine "git.optclblast.xyz/draincloud/draincloud-core/internal/files_engine" filesengine "git.optclblast.xyz/draincloud/draincloud-core/internal/files_engine"
"git.optclblast.xyz/draincloud/draincloud-core/internal/plugin" "git.optclblast.xyz/draincloud/draincloud-core/internal/plugin"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/postgres" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/postgres"
) )
func main() { func main() {
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill) ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill)
defer cancel() defer cancel()
plugin.MustNewPluginLoader(ctx, 8081, plugin.NewPluginStore()). plugin.MustNewPluginLoader(ctx, 8081, plugin.NewPluginStore()).
Run(ctx) Run(ctx)
pg := postgres.New(ctx, "postgres://draincloud:mysuperstrongpassword@127.0.0.1:5432/draincloud?sslmode=disable") pg := postgres.New(ctx, "postgres://draincloud:mysuperstrongpassword@127.0.0.1:5432/draincloud?sslmode=disable")
// TODO move cron on a separate job (k8s cronjob / docker cron) // TODO move cron on a separate job (k8s cronjob / docker cron)
// cleanupSessionsCron := cleanupsessions.New(pg) // cleanupSessionsCron := cleanupsessions.New(pg)
// cleanupSessionsCron.Run(ctx) // cleanupSessionsCron.Run(ctx)
engine := filesengine.NewFilesEngine(nil, nil) engine := filesengine.NewFilesEngine(nil, nil)
go app.New(ctx, pg, engine). go app.New(ctx, pg, engine).
Run(ctx) Run(ctx)
<-ctx.Done() <-ctx.Done()
} }

View File

@ -1,50 +1,50 @@
services: services:
rw_1: rw_1:
image: postgres:17 image: postgres:17
container_name: draincloud-db-rw-1 container_name: draincloud-db-rw-1
ports: ports:
- 5432:5432 - 5432:5432
environment: environment:
- POSTGRES_USER=draincloud - POSTGRES_USER=draincloud
- POSTGRES_PASSWORD=mysuperstrongpassword - POSTGRES_PASSWORD=mysuperstrongpassword
- POSTGRES_DB=draincloud - POSTGRES_DB=draincloud
volumes: volumes:
- draincloud-rw-1:/var/lib/postgresql/data - draincloud-rw-1:/var/lib/postgresql/data
networks: networks:
- draincloud-pg - draincloud-pg
# rw_2: # rw_2:
# image: postgres:17 # image: postgres:17
# container_name: draincloud-db-rw-2 # container_name: draincloud-db-rw-2
# ports: # ports:
# - 5433:5432 # - 5433:5432
# environment: # environment:
# - POSTGRES_USER=draincloud # - POSTGRES_USER=draincloud
# - POSTGRES_PASSWORD=mysuperstrongpassword # - POSTGRES_PASSWORD=mysuperstrongpassword
# - POSTGRES_DB=draincloud # - POSTGRES_DB=draincloud
# volumes: # volumes:
# - draincloud-rw-2:/var/lib/postgresql/data # - draincloud-rw-2:/var/lib/postgresql/data
# networks: # networks:
# - draincloud-pg # - draincloud-pg
# rw_3: # rw_3:
# image: postgres:17 # image: postgres:17
# container_name: draincloud-db-rw-3 # container_name: draincloud-db-rw-3
# ports: # ports:
# - 5434:5432 # - 5434:5432
# environment: # environment:
# - POSTGRES_USER=draincloud # - POSTGRES_USER=draincloud
# - POSTGRES_PASSWORD=mysuperstrongpassword # - POSTGRES_PASSWORD=mysuperstrongpassword
# - POSTGRES_DB=draincloud # - POSTGRES_DB=draincloud
# volumes: # volumes:
# - draincloud-rw-3:/var/lib/postgresql/data # - draincloud-rw-3:/var/lib/postgresql/data
# networks: # networks:
# - draincloud-pg # - draincloud-pg
volumes: volumes:
draincloud-rw-1: {} draincloud-rw-1: {}
# draincloud-rw-2: {} # draincloud-rw-2: {}
# draincloud-rw-3: {} # draincloud-rw-3: {}
networks: networks:
draincloud-pg: {} draincloud-pg: {}

View File

@ -1,82 +1,82 @@
package app package app
import ( import (
"context" "context"
"git.optclblast.xyz/draincloud/draincloud-core/internal/app/handlers" "git.optclblast.xyz/draincloud/draincloud-core/internal/app/handlers"
filesengine "git.optclblast.xyz/draincloud/draincloud-core/internal/files_engine" filesengine "git.optclblast.xyz/draincloud/draincloud-core/internal/files_engine"
"git.optclblast.xyz/draincloud/draincloud-core/internal/processor" "git.optclblast.xyz/draincloud/draincloud-core/internal/processor"
resolvedispatcher "git.optclblast.xyz/draincloud/draincloud-core/internal/resolve_dispatcher" resolvedispatcher "git.optclblast.xyz/draincloud/draincloud-core/internal/resolve_dispatcher"
"git.optclblast.xyz/draincloud/draincloud-core/internal/resolvers/auth" "git.optclblast.xyz/draincloud/draincloud-core/internal/resolvers/auth"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
type DrainCloud struct { type DrainCloud struct {
mux *gin.Engine mux *gin.Engine
database storage.Database database storage.Database
filesEngine *filesengine.FilesEngine filesEngine *filesengine.FilesEngine
ginProcessor processor.Processor[gin.HandlerFunc] ginProcessor processor.Processor[gin.HandlerFunc]
} }
func New( func New(
ctx context.Context, ctx context.Context,
database storage.Database, database storage.Database,
filesEngine *filesengine.FilesEngine, filesEngine *filesengine.FilesEngine,
) *DrainCloud { ) *DrainCloud {
mux := gin.Default() mux := gin.Default()
dispatcher := resolvedispatcher.New() dispatcher := resolvedispatcher.New()
dispatcher.RegisterResolver( dispatcher.RegisterResolver(
ctx, ctx,
auth.AuthResolverV1Name, auth.AuthResolverV1Name,
auth.NewAuthResolver(database), auth.NewAuthResolver(database),
) )
d := &DrainCloud{ d := &DrainCloud{
database: database, database: database,
filesEngine: filesEngine, filesEngine: filesEngine,
ginProcessor: processor.NewGinProcessor(database, dispatcher), ginProcessor: processor.NewGinProcessor(database, dispatcher),
} }
// TODO. Maybe overkill // TODO. Maybe overkill
internalGroup := mux.Group("/_internal") internalGroup := mux.Group("/_internal")
{ {
regGroup := internalGroup.Group("/register") regGroup := internalGroup.Group("/register")
{ {
regGroup.POST("/resolver", d.ginProcessor.Process( regGroup.POST("/resolver", d.ginProcessor.Process(
handlers.NewInternalRegisterResolverHandler(dispatcher), handlers.NewInternalRegisterResolverHandler(dispatcher),
)) ))
regGroup.POST("/plugin", func(ctx *gin.Context) {}) regGroup.POST("/plugin", func(ctx *gin.Context) {})
} }
} }
// Built-in auth component of DrainCloud-Core // Built-in auth component of DrainCloud-Core
authGroup := mux.Group("/auth") authGroup := mux.Group("/auth")
{ {
authGroup.POST("/register", d.ginProcessor.Process( authGroup.POST("/register", d.ginProcessor.Process(
handlers.NewRegisterHandler(database), handlers.NewRegisterHandler(database),
)) ))
authGroup.POST("/logon", d.ginProcessor.Process( authGroup.POST("/logon", d.ginProcessor.Process(
handlers.NewLogonHandler(database), handlers.NewLogonHandler(database),
)) ))
} }
filesGroup := mux.Group("/files") filesGroup := mux.Group("/files")
{ {
filesGroup.POST("/upload", d.ginProcessor.Process( filesGroup.POST("/upload", d.ginProcessor.Process(
handlers.NewUploadFileHandler(filesEngine), handlers.NewUploadFileHandler(filesEngine),
)) ))
} }
d.mux = mux d.mux = mux
return d return d
} }
func (d *DrainCloud) Run(ctx context.Context) error { func (d *DrainCloud) Run(ctx context.Context) error {
return d.mux.Run() return d.mux.Run()
} }

View File

@ -1,37 +1,37 @@
package handlers package handlers
import ( import (
"crypto/rand" "crypto/rand"
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
) )
const ( const (
csrfTokenCookie = "__Csrf_token" csrfTokenCookie = "__Csrf_token"
sessionTokenCookie = "__Session_token" sessionTokenCookie = "__Session_token"
) )
var ( var (
ErrorUnauthorized = errors.New("unauthorized") ErrorUnauthorized = errors.New("unauthorized")
) )
func validateLoginAndPassword(login, password string) error { func validateLoginAndPassword(login, password string) error {
if len(login) < 4 { if len(login) < 4 {
return fmt.Errorf("login must be longer than 8 chars") return fmt.Errorf("login must be longer than 8 chars")
} }
if len(password) < 6 { if len(password) < 6 {
return fmt.Errorf("password must be longer than 8 chars") return fmt.Errorf("password must be longer than 8 chars")
} }
return nil return nil
} }
func generateSessionToken(length int) (string, error) { func generateSessionToken(length int) (string, error) {
bytes := make([]byte, length) bytes := make([]byte, length)
if _, err := rand.Read(bytes); err != nil { if _, err := rand.Read(bytes); err != nil {
return "", fmt.Errorf("failed to generate token: %w", err) return "", fmt.Errorf("failed to generate token: %w", err)
} }
return base64.URLEncoding.EncodeToString(bytes), nil return base64.URLEncoding.EncodeToString(bytes), nil
} }

View File

@ -1,179 +1,179 @@
package handlers package handlers
import ( import (
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"log/slog" "log/slog"
"net/http" "net/http"
"time" "time"
"git.optclblast.xyz/draincloud/draincloud-core/internal/common" "git.optclblast.xyz/draincloud/draincloud-core/internal/common"
"git.optclblast.xyz/draincloud/draincloud-core/internal/domain" "git.optclblast.xyz/draincloud/draincloud-core/internal/domain"
"git.optclblast.xyz/draincloud/draincloud-core/internal/errs" "git.optclblast.xyz/draincloud/draincloud-core/internal/errs"
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler" "git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger" "git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth"
"github.com/google/uuid" "github.com/google/uuid"
"golang.org/x/crypto/bcrypt" "golang.org/x/crypto/bcrypt"
) )
type LogonHandler struct { type LogonHandler struct {
*handler.BaseHandler *handler.BaseHandler
authStorage storage.AuthStorage authStorage storage.AuthStorage
} }
func NewLogonHandler( func NewLogonHandler(
authStorage storage.AuthStorage, authStorage storage.AuthStorage,
) *LogonHandler { ) *LogonHandler {
h := &LogonHandler{ h := &LogonHandler{
authStorage: authStorage, authStorage: authStorage,
BaseHandler: handler.New(). BaseHandler: handler.New().
WithName("logonv1"). WithName("logonv1").
WithRequiredResolveParams(), WithRequiredResolveParams(),
} }
h.WithProcessFunc(h.process) h.WithProcessFunc(h.process)
return h return h
} }
func (h *LogonHandler) process(ctx context.Context, req *common.Request, w handler.Writer) error { func (h *LogonHandler) process(ctx context.Context, req *common.Request, w handler.Writer) error {
logger.Debug(ctx, "[Logon] new request") logger.Debug(ctx, "[Logon] new request")
body := new(domain.LogonRequest) body := new(domain.LogonRequest)
err := json.Unmarshal(req.Body, body) err := json.Unmarshal(req.Body, body)
if err != nil { if err != nil {
logger.Error(ctx, "[Logon] failed to bind request", logger.Err(err)) logger.Error(ctx, "[Logon] failed to bind request", logger.Err(err))
w.Write(ctx, map[string]string{ w.Write(ctx, map[string]string{
"error": "bad request", "error": "bad request",
}, handler.WithCode(http.StatusBadRequest)) }, handler.WithCode(http.StatusBadRequest))
return nil return nil
} }
session, err := h.getSession(ctx, req) session, err := h.getSession(ctx, req)
if err != nil && !errors.Is(err, http.ErrNoCookie) { if err != nil && !errors.Is(err, http.ErrNoCookie) {
return err return err
} }
if session != nil { if session != nil {
if err := validateSession(req, session); err != nil { if err := validateSession(req, session); err != nil {
// TODO add audit log entry // TODO add audit log entry
return err return err
} }
logger.Debug(ctx, "[login] user is already logged in", slog.String("session_id", session.ID.String())) logger.Debug(ctx, "[login] user is already logged in", slog.String("session_id", session.ID.String()))
w.Write(ctx, &domain.LogonResponse{ w.Write(ctx, &domain.LogonResponse{
Ok: true, Ok: true,
}) })
return nil return nil
} }
logger.Debug(ctx, "[login] session not founh. trying to authorize") logger.Debug(ctx, "[login] session not founh. trying to authorize")
resp, err := h.login(ctx, body, session, w) resp, err := h.login(ctx, body, session, w)
if err != nil { if err != nil {
logger.Error(ctx, "[Logon] failed to login user", logger.Err(err)) logger.Error(ctx, "[Logon] failed to login user", logger.Err(err))
return err return err
} }
w.Write(ctx, resp) w.Write(ctx, resp)
return nil return nil
} }
func (h *LogonHandler) login(ctx context.Context, req *domain.LogonRequest, session *auth.Session, w handler.Writer) (*domain.LogonResponse, error) { func (h *LogonHandler) login(ctx context.Context, req *domain.LogonRequest, session *auth.Session, w handler.Writer) (*domain.LogonResponse, error) {
passwordHash, err := bcrypt.GenerateFromPassword([]byte(req.Password), 10) passwordHash, err := bcrypt.GenerateFromPassword([]byte(req.Password), 10)
if err != nil { if err != nil {
logger.Error(ctx, "[login] failed to generate password hash", logger.Err(err)) logger.Error(ctx, "[login] failed to generate password hash", logger.Err(err))
return nil, fmt.Errorf("failed to generate password hash: %w", err) return nil, fmt.Errorf("failed to generate password hash: %w", err)
} }
user, err := h.authStorage.GetUserByLogin(ctx, req.Login) user, err := h.authStorage.GetUserByLogin(ctx, req.Login)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to fetch user by login: %w", err) return nil, fmt.Errorf("failed to fetch user by login: %w", err)
} }
if bytes.Equal(passwordHash, user.PasswordHash) { if bytes.Equal(passwordHash, user.PasswordHash) {
logger.Warn(ctx, "[login] failed to login user. passwords hashes not equal") logger.Warn(ctx, "[login] failed to login user. passwords hashes not equal")
return nil, errs.ErrorAccessDenied return nil, errs.ErrorAccessDenied
} }
sessionCreatedAt := time.Now() sessionCreatedAt := time.Now()
sessionExpiredAt := sessionCreatedAt.Add(time.Hour * 24 * 7) sessionExpiredAt := sessionCreatedAt.Add(time.Hour * 24 * 7)
sessionToken, err := generateSessionToken(100) sessionToken, err := generateSessionToken(100)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to generate a session token: %w", err) return nil, fmt.Errorf("failed to generate a session token: %w", err)
} }
w.SetCookie(sessionTokenCookie, sessionToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, true) w.SetCookie(sessionTokenCookie, sessionToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, true)
csrfToken, err := generateSessionToken(100) csrfToken, err := generateSessionToken(100)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to generate a csrf token: %w", err) return nil, fmt.Errorf("failed to generate a csrf token: %w", err)
} }
w.SetCookie(csrfTokenCookie, csrfToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, false) w.SetCookie(csrfTokenCookie, csrfToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, false)
sessionID, err := uuid.NewV7() sessionID, err := uuid.NewV7()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to generate session id: %w", err) return nil, fmt.Errorf("failed to generate session id: %w", err)
} }
if _, err = h.authStorage.AddSession(ctx, &auth.Session{ if _, err = h.authStorage.AddSession(ctx, &auth.Session{
ID: sessionID, ID: sessionID,
SessionToken: sessionToken, SessionToken: sessionToken,
CsrfToken: csrfToken, CsrfToken: csrfToken,
UserID: user.ID, UserID: user.ID,
CreatedAt: sessionCreatedAt, CreatedAt: sessionCreatedAt,
ExpiredAt: sessionExpiredAt, ExpiredAt: sessionExpiredAt,
}); err != nil { }); err != nil {
return nil, fmt.Errorf("failed to save session: %w", err) return nil, fmt.Errorf("failed to save session: %w", err)
} }
// TODO add audit log entry // TODO add audit log entry
return &domain.LogonResponse{ return &domain.LogonResponse{
Ok: true, Ok: true,
}, nil }, nil
} }
func (h *LogonHandler) getSession(ctx context.Context, req *common.Request) (*auth.Session, error) { func (h *LogonHandler) getSession(ctx context.Context, req *common.Request) (*auth.Session, error) {
token, err := common.GetValue[string](req.Metadata, sessionTokenCookie) token, err := common.GetValue[string](req.Metadata, sessionTokenCookie)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to fetch session cookie from request: %w", err) return nil, fmt.Errorf("failed to fetch session cookie from request: %w", err)
} }
csrfToken, err := common.GetValue[string](req.Metadata, csrfTokenCookie) csrfToken, err := common.GetValue[string](req.Metadata, csrfTokenCookie)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to fetch csrf cookie from request: %w", err) return nil, fmt.Errorf("failed to fetch csrf cookie from request: %w", err)
} }
if len(csrfToken) == 0 || len(token) == 0 { if len(csrfToken) == 0 || len(token) == 0 {
return nil, fmt.Errorf("session token or csrf token is empty") return nil, fmt.Errorf("session token or csrf token is empty")
} }
session, err := h.authStorage.GetSession(ctx, token) session, err := h.authStorage.GetSession(ctx, token)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to fetch session from repo: %w", err) return nil, fmt.Errorf("failed to fetch session from repo: %w", err)
} }
return session, nil return session, nil
} }
func validateSession(req *common.Request, session *auth.Session) error { func validateSession(req *common.Request, session *auth.Session) error {
if session == nil { if session == nil {
return errs.ErrorAccessDenied return errs.ErrorAccessDenied
} }
csrfToken, err := common.GetValue[string](req.Metadata, csrfTokenCookie) csrfToken, err := common.GetValue[string](req.Metadata, csrfTokenCookie)
if err != nil { if err != nil {
return fmt.Errorf("failed to fetch csrf cookie from request: %w", err) return fmt.Errorf("failed to fetch csrf cookie from request: %w", err)
} }
if session.CsrfToken != csrfToken { if session.CsrfToken != csrfToken {
return errs.ErrorAccessDenied return errs.ErrorAccessDenied
} }
if session.ExpiredAt.Before(time.Now()) { if session.ExpiredAt.Before(time.Now()) {
return errs.ErrorSessionExpired return errs.ErrorSessionExpired
} }
return nil return nil
} }

View File

@ -1,121 +1,121 @@
package handlers package handlers
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"time" "time"
"git.optclblast.xyz/draincloud/draincloud-core/internal/common" "git.optclblast.xyz/draincloud/draincloud-core/internal/common"
"git.optclblast.xyz/draincloud/draincloud-core/internal/domain" "git.optclblast.xyz/draincloud/draincloud-core/internal/domain"
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler" "git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger" "git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth"
"github.com/google/uuid" "github.com/google/uuid"
"golang.org/x/crypto/bcrypt" "golang.org/x/crypto/bcrypt"
) )
type RegisterHandler struct { type RegisterHandler struct {
*handler.BaseHandler *handler.BaseHandler
authStorage storage.AuthStorage authStorage storage.AuthStorage
} }
func NewRegisterHandler( func NewRegisterHandler(
authStorage storage.AuthStorage, authStorage storage.AuthStorage,
) *RegisterHandler { ) *RegisterHandler {
h := &RegisterHandler{ h := &RegisterHandler{
authStorage: authStorage, authStorage: authStorage,
BaseHandler: handler.New(). BaseHandler: handler.New().
WithName("registerv1"). WithName("registerv1").
WithRequiredResolveParams(), WithRequiredResolveParams(),
} }
h.WithProcessFunc(h.process) h.WithProcessFunc(h.process)
return h return h
} }
func (h *RegisterHandler) process(ctx context.Context, req *common.Request, w handler.Writer) error { func (h *RegisterHandler) process(ctx context.Context, req *common.Request, w handler.Writer) error {
regReq := new(domain.RegisterRequest) regReq := new(domain.RegisterRequest)
if err := json.Unmarshal(req.Body, regReq); err != nil { if err := json.Unmarshal(req.Body, regReq); err != nil {
return err return err
} }
resp, err := h.register(ctx, regReq, w) resp, err := h.register(ctx, regReq, w)
if err != nil { if err != nil {
return fmt.Errorf("failed to register user: %w", err) return fmt.Errorf("failed to register user: %w", err)
} }
w.Write(ctx, resp) w.Write(ctx, resp)
return nil return nil
} }
func (d *RegisterHandler) register( func (d *RegisterHandler) register(
ctx context.Context, ctx context.Context,
req *domain.RegisterRequest, req *domain.RegisterRequest,
w handler.Writer, w handler.Writer,
) (*domain.RegisterResponse, error) { ) (*domain.RegisterResponse, error) {
if err := validateLoginAndPassword(req.Login, req.Password); err != nil { if err := validateLoginAndPassword(req.Login, req.Password); err != nil {
return nil, fmt.Errorf("invalid creds: %w", err) return nil, fmt.Errorf("invalid creds: %w", err)
} }
passwordHash, err := bcrypt.GenerateFromPassword([]byte(req.Password), 10) passwordHash, err := bcrypt.GenerateFromPassword([]byte(req.Password), 10)
if err != nil { if err != nil {
logger.Error(ctx, "[register] failed to generate password hash", logger.Err(err)) logger.Error(ctx, "[register] failed to generate password hash", logger.Err(err))
return nil, fmt.Errorf("failed to generate password hash: %w", err) return nil, fmt.Errorf("failed to generate password hash: %w", err)
} }
userID, err := uuid.NewV7() userID, err := uuid.NewV7()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to generate user id: %w", err) return nil, fmt.Errorf("failed to generate user id: %w", err)
} }
user := &auth.User{ user := &auth.User{
ID: userID, ID: userID,
Username: req.Login, Username: req.Login,
Login: req.Login, Login: req.Login,
PasswordHash: passwordHash, PasswordHash: passwordHash,
} }
err = d.authStorage.AddUser(ctx, userID, user.Login, user.Username, user.PasswordHash) err = d.authStorage.AddUser(ctx, userID, user.Login, user.Username, user.PasswordHash)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to add new user: %w", err) return nil, fmt.Errorf("failed to add new user: %w", err)
} }
sessionCreatedAt := time.Now() sessionCreatedAt := time.Now()
sessionExpiredAt := sessionCreatedAt.Add(time.Hour * 24 * 7) sessionExpiredAt := sessionCreatedAt.Add(time.Hour * 24 * 7)
sessionToken, err := generateSessionToken(100) sessionToken, err := generateSessionToken(100)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to generate a session token: %w", err) return nil, fmt.Errorf("failed to generate a session token: %w", err)
} }
w.SetCookie(sessionTokenCookie, sessionToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, true) w.SetCookie(sessionTokenCookie, sessionToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, true)
csrfToken, err := generateSessionToken(100) csrfToken, err := generateSessionToken(100)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to generate a csrf token: %w", err) return nil, fmt.Errorf("failed to generate a csrf token: %w", err)
} }
w.SetCookie(csrfTokenCookie, csrfToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, false) w.SetCookie(csrfTokenCookie, csrfToken, int(sessionExpiredAt.Sub(sessionCreatedAt).Seconds()), "_path", "_domain", true, false)
sessionID, err := uuid.NewV7() sessionID, err := uuid.NewV7()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to generate session id: %w", err) return nil, fmt.Errorf("failed to generate session id: %w", err)
} }
if _, err = d.authStorage.AddSession(ctx, &auth.Session{ if _, err = d.authStorage.AddSession(ctx, &auth.Session{
ID: sessionID, ID: sessionID,
SessionToken: sessionToken, SessionToken: sessionToken,
CsrfToken: csrfToken, CsrfToken: csrfToken,
UserID: user.ID, UserID: user.ID,
CreatedAt: sessionCreatedAt, CreatedAt: sessionCreatedAt,
ExpiredAt: sessionExpiredAt, ExpiredAt: sessionExpiredAt,
}); err != nil { }); err != nil {
return nil, fmt.Errorf("failed to save session: %w", err) return nil, fmt.Errorf("failed to save session: %w", err)
} }
return &domain.RegisterResponse{ return &domain.RegisterResponse{
Ok: true, Ok: true,
}, nil }, nil
} }

View File

@ -1,38 +1,38 @@
package handlers package handlers
import ( import (
"context" "context"
"fmt" "fmt"
"git.optclblast.xyz/draincloud/draincloud-core/internal/common" "git.optclblast.xyz/draincloud/draincloud-core/internal/common"
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler" "git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
resolvedispatcher "git.optclblast.xyz/draincloud/draincloud-core/internal/resolve_dispatcher" resolvedispatcher "git.optclblast.xyz/draincloud/draincloud-core/internal/resolve_dispatcher"
) )
// TODO. Maybe remove // TODO. Maybe remove
type InternalRegisterResolverHandler struct { type InternalRegisterResolverHandler struct {
*handler.BaseHandler *handler.BaseHandler
resolveDispatcher *resolvedispatcher.ResolveDispatcher resolveDispatcher *resolvedispatcher.ResolveDispatcher
} }
func NewInternalRegisterResolverHandler( func NewInternalRegisterResolverHandler(
resolveDispatcher *resolvedispatcher.ResolveDispatcher, resolveDispatcher *resolvedispatcher.ResolveDispatcher,
) *InternalRegisterResolverHandler { ) *InternalRegisterResolverHandler {
h := &InternalRegisterResolverHandler{ h := &InternalRegisterResolverHandler{
resolveDispatcher: resolveDispatcher, resolveDispatcher: resolveDispatcher,
} }
h.BaseHandler = handler.New(). h.BaseHandler = handler.New().
WithName("internal_registerresolver"). WithName("internal_registerresolver").
WithProcessFunc(h.process) WithProcessFunc(h.process)
return h return h
} }
func (h *InternalRegisterResolverHandler) process( func (h *InternalRegisterResolverHandler) process(
ctx context.Context, ctx context.Context,
req *common.Request, req *common.Request,
w handler.Writer, w handler.Writer,
) error { ) error {
//_, ok := h.resolveDispatcher.GetResolver() //_, ok := h.resolveDispatcher.GetResolver()
return fmt.Errorf("uniplemented") return fmt.Errorf("uniplemented")
} }

View File

@ -1,95 +1,95 @@
package handlers package handlers
import ( import (
"context" "context"
"git.optclblast.xyz/draincloud/draincloud-core/internal/common" "git.optclblast.xyz/draincloud/draincloud-core/internal/common"
filesengine "git.optclblast.xyz/draincloud/draincloud-core/internal/files_engine" filesengine "git.optclblast.xyz/draincloud/draincloud-core/internal/files_engine"
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler" "git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
"git.optclblast.xyz/draincloud/draincloud-core/internal/resolvers/auth" "git.optclblast.xyz/draincloud/draincloud-core/internal/resolvers/auth"
) )
const ( const (
maxFileSize = 10 << 30 maxFileSize = 10 << 30
) )
type UploadFileHandler struct { type UploadFileHandler struct {
*handler.BaseHandler *handler.BaseHandler
filesEngine *filesengine.FilesEngine filesEngine *filesengine.FilesEngine
} }
func NewUploadFileHandler( func NewUploadFileHandler(
filesEngine *filesengine.FilesEngine, filesEngine *filesengine.FilesEngine,
) *UploadFileHandler { ) *UploadFileHandler {
h := &UploadFileHandler{ h := &UploadFileHandler{
filesEngine: filesEngine, filesEngine: filesEngine,
BaseHandler: handler.New(). BaseHandler: handler.New().
WithName("uploadfilev1"). WithName("uploadfilev1").
WithRequiredResolveParams( WithRequiredResolveParams(
auth.AuthResolverV1Name, auth.AuthResolverV1Name,
// TODO with MultipartReaderResolverV1Name // TODO with MultipartReaderResolverV1Name
// or // or
// MultipartDataResolverV1Name // MultipartDataResolverV1Name
), ),
} }
h.WithProcessFunc(h.process) h.WithProcessFunc(h.process)
return h return h
} }
func (d *UploadFileHandler) process(ctx context.Context, req *common.Request, w handler.Writer) error { func (d *UploadFileHandler) process(ctx context.Context, req *common.Request, w handler.Writer) error {
// TODO fetch (interface{ ParseMultipartForm(size int) error }) from req.GetValue[ParseMultipartFormer](req.ResolveValues) // TODO fetch (interface{ ParseMultipartForm(size int) error }) from req.GetValue[ParseMultipartFormer](req.ResolveValues)
// if err := req.RawReq.ParseMultipartForm(maxFileSize); err != nil { // if err := req.RawReq.ParseMultipartForm(maxFileSize); err != nil {
// logger.Error(ctx, "uploadFile handler error", logger.Err(err)) // logger.Error(ctx, "uploadFile handler error", logger.Err(err))
// return err // return err
// } // }
// if err := d.uploadFile(ctx, userID); err != nil { // if err := d.uploadFile(ctx, userID); err != nil {
// logger.Error(ctx, "uploadFile handle", logger.Err(err)) // logger.Error(ctx, "uploadFile handle", logger.Err(err))
// writeError(ctx, err) // writeError(ctx, err)
// return // return
// } // }
return nil return nil
} }
// func (d *UploadFileHandler) uploadFile(ctx context.Context, req *common.Request) error { // func (d *UploadFileHandler) uploadFile(ctx context.Context, req *common.Request) error {
// title := ctx.PostForm("file") // title := ctx.PostForm("file")
// logger.Info(ctx, "uploadFile", slog.Any("postForm data", spew.Sdump(title))) // logger.Info(ctx, "uploadFile", slog.Any("postForm data", spew.Sdump(title)))
// file, header, err := req.RawReq.FormFile("file") // file, header, err := req.RawReq.FormFile("file")
// if err != nil { // if err != nil {
// return err // return err
// } // }
// logger.Info(ctx, "uploadFile", slog.Any("header", spew.Sdump(header))) // logger.Info(ctx, "uploadFile", slog.Any("header", spew.Sdump(header)))
// data, err := io.ReadAll(file) // data, err := io.ReadAll(file)
// if err != nil { // if err != nil {
// return err // return err
// } // }
// ext := parseExtension(header.Filename) // ext := parseExtension(header.Filename)
// id, err := d.filesEngine.SaveFile(ctx, filesengine.File{ // id, err := d.filesEngine.SaveFile(ctx, filesengine.File{
// Name: header.Filename, // Name: header.Filename,
// UserID: userID, // UserID: userID,
// Data: data, // Data: data,
// Ext: ext, // Ext: ext,
// Size: int64(len(data)), // Size: int64(len(data)),
// Type: "", // че такое type? // Type: "", // че такое type?
// }) // })
// if err != nil { // if err != nil {
// return fmt.Errorf("failed to save file: %w", err) // return fmt.Errorf("failed to save file: %w", err)
// } // }
// logger.Debug(ctx, "new file id", "id", id) // logger.Debug(ctx, "new file id", "id", id)
// return nil // return nil
// } // }
// func parseExtension(filename string) string { // func parseExtension(filename string) string {
// parts := strings.Split(filename, ".") // parts := strings.Split(filename, ".")
// if len(parts) == 0 { // if len(parts) == 0 {
// return "" // return ""
// } // }
// return parts[len(parts)-1] // return parts[len(parts)-1]
// } // }

View File

@ -1,50 +1,50 @@
package closer package closer
import ( import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"sync/atomic" "sync/atomic"
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger" "git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
) )
var globalCloser *Closer = &Closer{ var globalCloser *Closer = &Closer{
closeFns: make([]func() error, 0), closeFns: make([]func() error, 0),
} }
type Closer struct { type Closer struct {
_lock atomic.Bool _lock atomic.Bool
closeFns []func() error closeFns []func() error
} }
func (c *Closer) Add(fn func() error) { func (c *Closer) Add(fn func() error) {
if c._lock.Load() { if c._lock.Load() {
return return
} }
c.closeFns = append(c.closeFns, fn) c.closeFns = append(c.closeFns, fn)
} }
func (c *Closer) Close() error { func (c *Closer) Close() error {
if !c._lock.CompareAndSwap(false, true) { if !c._lock.CompareAndSwap(false, true) {
return fmt.Errorf("already closed") return fmt.Errorf("already closed")
} }
var commonErr error var commonErr error
for _, fn := range c.closeFns { for _, fn := range c.closeFns {
if err := fn(); err != nil { if err := fn(); err != nil {
logger.Error(context.Background(), "[closer][Close] error at close func call", logger.Err(err)) logger.Error(context.Background(), "[closer][Close] error at close func call", logger.Err(err))
commonErr = errors.Join(commonErr, err) commonErr = errors.Join(commonErr, err)
} }
} }
return commonErr return commonErr
} }
func Add(fn func() error) { func Add(fn func() error) {
globalCloser.Add(fn) globalCloser.Add(fn)
} }
func Close() error { func Close() error {
return globalCloser.Close() return globalCloser.Close()
} }

View File

@ -1,103 +1,103 @@
package common package common
import ( import (
"context" "context"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"sync" "sync"
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger" "git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth"
"github.com/google/uuid" "github.com/google/uuid"
) )
type RequestPool struct { type RequestPool struct {
sp sync.Pool sp sync.Pool
} }
func (p *RequestPool) Get() *Request { func (p *RequestPool) Get() *Request {
r, _ := p.sp.Get().(*Request) r, _ := p.sp.Get().(*Request)
return r return r
} }
func (p *RequestPool) Put(r *Request) { func (p *RequestPool) Put(r *Request) {
r.ID = "" r.ID = ""
r.Metadata = &sync.Map{} r.Metadata = &sync.Map{}
r.ResolveValues = &sync.Map{} r.ResolveValues = &sync.Map{}
r.Session = nil r.Session = nil
r.User = nil r.User = nil
r.Body = nil r.Body = nil
p.sp.Put(r) p.sp.Put(r)
} }
func NewRequestPool() *RequestPool { func NewRequestPool() *RequestPool {
return &RequestPool{ return &RequestPool{
sp: sync.Pool{ sp: sync.Pool{
New: func() any { New: func() any {
return &Request{ return &Request{
ResolveValues: &sync.Map{}, ResolveValues: &sync.Map{},
Metadata: &sync.Map{}, Metadata: &sync.Map{},
} }
}, },
}, },
} }
} }
type Request struct { type Request struct {
ID string ID string
Session *auth.Session Session *auth.Session
User *auth.User User *auth.User
// ResolveValues - data required to process request. // ResolveValues - data required to process request.
ResolveValues *sync.Map ResolveValues *sync.Map
// Metadata - an additional data, usually added with preprocessing. // Metadata - an additional data, usually added with preprocessing.
Metadata *sync.Map Metadata *sync.Map
// Request body // Request body
Body []byte Body []byte
} }
// NewRequestFromHttp builds a new *Request struct from raw http Request. No auth data validated. // NewRequestFromHttp builds a new *Request struct from raw http Request. No auth data validated.
func NewRequestFromHttp(pool *RequestPool, req *http.Request) *Request { func NewRequestFromHttp(pool *RequestPool, req *http.Request) *Request {
out := pool.sp.Get().(*Request) out := pool.sp.Get().(*Request)
cookies := req.Cookies() cookies := req.Cookies()
headers := req.Header headers := req.Header
out.Metadata = &sync.Map{} out.Metadata = &sync.Map{}
for _, cookie := range cookies { for _, cookie := range cookies {
out.Metadata.Store(cookie.Name, cookie.Value) out.Metadata.Store(cookie.Name, cookie.Value)
} }
for hname, hval := range headers { for hname, hval := range headers {
out.Metadata.Store(hname, hval) out.Metadata.Store(hname, hval)
} }
body, err := io.ReadAll(req.Body) body, err := io.ReadAll(req.Body)
if err != nil { if err != nil {
logger.Error(context.TODO(), "failed to read request body", logger.Err(err)) logger.Error(context.TODO(), "failed to read request body", logger.Err(err))
} }
out.Body = body out.Body = body
reqID := uuid.NewString() reqID := uuid.NewString()
out.ID = reqID out.ID = reqID
return out return out
} }
func GetValue[T any](vals *sync.Map, key string) (T, error) { func GetValue[T any](vals *sync.Map, key string) (T, error) {
var out T var out T
if vals == nil { if vals == nil {
return out, fmt.Errorf("nil vals map") return out, fmt.Errorf("nil vals map")
} }
rawVal, ok := vals.Load(key) rawVal, ok := vals.Load(key)
if !ok { if !ok {
return out, fmt.Errorf("value not found in resolve values set") return out, fmt.Errorf("value not found in resolve values set")
} }
out, ok = rawVal.(T) out, ok = rawVal.(T)
if !ok { if !ok {
return out, fmt.Errorf("type of a value is unexpected") return out, fmt.Errorf("type of a value is unexpected")
} }
return out, nil return out, nil
} }

View File

@ -1,261 +1,261 @@
package common package common
import ( import (
"reflect" "reflect"
"sync" "sync"
"testing" "testing"
) )
func TestGetValue_string(t *testing.T) { func TestGetValue_string(t *testing.T) {
t.Parallel() t.Parallel()
type args struct { type args struct {
vals map[string]any vals map[string]any
key string key string
} }
tests := []struct { tests := []struct {
name string name string
args args args args
want string want string
wantErr bool wantErr bool
}{ }{
{ {
name: "ok", name: "ok",
args: args{ args: args{
vals: map[string]any{ vals: map[string]any{
"1": "123", "1": "123",
"2": "234", "2": "234",
}, },
key: "1", key: "1",
}, },
want: "123", want: "123",
wantErr: false, wantErr: false,
}, },
{ {
name: "value not presented", name: "value not presented",
args: args{ args: args{
vals: map[string]any{ vals: map[string]any{
"1": "123", "1": "123",
"2": "234", "2": "234",
}, },
key: "3", key: "3",
}, },
want: "", want: "",
wantErr: true, wantErr: true,
}, },
{ {
name: "nil map", name: "nil map",
args: args{ args: args{
vals: nil, vals: nil,
key: "1", key: "1",
}, },
want: "", want: "",
wantErr: true, wantErr: true,
}, },
{ {
name: "invalid type", name: "invalid type",
args: args{ args: args{
vals: map[string]any{ vals: map[string]any{
"1": "123", "1": "123",
"2": 234, "2": 234,
}, },
key: "2", key: "2",
}, },
want: "", want: "",
wantErr: true, wantErr: true,
}, },
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
got, err := GetValue[string](_mapToSyncMap(tt.args.vals), tt.args.key) got, err := GetValue[string](_mapToSyncMap(tt.args.vals), tt.args.key)
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("GetValue() error = %v, wantErr %v", err, tt.wantErr) t.Errorf("GetValue() error = %v, wantErr %v", err, tt.wantErr)
return return
} }
if !reflect.DeepEqual(got, tt.want) { if !reflect.DeepEqual(got, tt.want) {
t.Errorf("GetValue() = %v, want %v", got, tt.want) t.Errorf("GetValue() = %v, want %v", got, tt.want)
} }
}) })
} }
} }
func TestGetValue_struct(t *testing.T) { func TestGetValue_struct(t *testing.T) {
t.Parallel() t.Parallel()
type val struct { type val struct {
a int a int
b string b string
c bool c bool
} }
type args struct { type args struct {
vals map[string]any vals map[string]any
key string key string
} }
tests := []struct { tests := []struct {
name string name string
args args args args
want val want val
wantErr bool wantErr bool
}{ }{
{ {
name: "ok", name: "ok",
args: args{ args: args{
vals: map[string]any{ vals: map[string]any{
"1": val{ "1": val{
a: 1, a: 1,
b: "2", b: "2",
c: true, c: true,
}, },
"2": "234", "2": "234",
}, },
key: "1", key: "1",
}, },
want: val{ want: val{
a: 1, a: 1,
b: "2", b: "2",
c: true, c: true,
}, },
wantErr: false, wantErr: false,
}, },
{ {
name: "value not presented", name: "value not presented",
args: args{ args: args{
vals: map[string]any{ vals: map[string]any{
"1": "123", "1": "123",
"2": "234", "2": "234",
}, },
key: "3", key: "3",
}, },
want: val{}, want: val{},
wantErr: true, wantErr: true,
}, },
{ {
name: "nil map", name: "nil map",
args: args{ args: args{
vals: nil, vals: nil,
key: "1", key: "1",
}, },
want: val{}, want: val{},
wantErr: true, wantErr: true,
}, },
{ {
name: "invalid type", name: "invalid type",
args: args{ args: args{
vals: map[string]any{ vals: map[string]any{
"1": "123", "1": "123",
"2": 234, "2": 234,
}, },
key: "2", key: "2",
}, },
want: val{}, want: val{},
wantErr: true, wantErr: true,
}, },
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
got, err := GetValue[val](_mapToSyncMap(tt.args.vals), tt.args.key) got, err := GetValue[val](_mapToSyncMap(tt.args.vals), tt.args.key)
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("GetValue() error = %v, wantErr %v", err, tt.wantErr) t.Errorf("GetValue() error = %v, wantErr %v", err, tt.wantErr)
return return
} }
if !reflect.DeepEqual(got, tt.want) { if !reflect.DeepEqual(got, tt.want) {
t.Errorf("GetValue() = %v, want %v", got, tt.want) t.Errorf("GetValue() = %v, want %v", got, tt.want)
} }
}) })
} }
} }
func TestGetValue_structptr(t *testing.T) { func TestGetValue_structptr(t *testing.T) {
t.Parallel() t.Parallel()
type val struct { type val struct {
a int a int
b string b string
c bool c bool
} }
type args struct { type args struct {
vals map[string]any vals map[string]any
key string key string
} }
tests := []struct { tests := []struct {
name string name string
args args args args
want *val want *val
wantErr bool wantErr bool
}{ }{
{ {
name: "ok", name: "ok",
args: args{ args: args{
vals: map[string]any{ vals: map[string]any{
"1": &val{ "1": &val{
a: 1, a: 1,
b: "2", b: "2",
c: true, c: true,
}, },
"2": "234", "2": "234",
}, },
key: "1", key: "1",
}, },
want: &val{ want: &val{
a: 1, a: 1,
b: "2", b: "2",
c: true, c: true,
}, },
wantErr: false, wantErr: false,
}, },
{ {
name: "value not presented", name: "value not presented",
args: args{ args: args{
vals: map[string]any{ vals: map[string]any{
"1": "123", "1": "123",
"2": "234", "2": "234",
}, },
key: "3", key: "3",
}, },
want: nil, want: nil,
wantErr: true, wantErr: true,
}, },
{ {
name: "nil map", name: "nil map",
args: args{ args: args{
vals: nil, vals: nil,
key: "1", key: "1",
}, },
want: nil, want: nil,
wantErr: true, wantErr: true,
}, },
{ {
name: "invalid type", name: "invalid type",
args: args{ args: args{
vals: map[string]any{ vals: map[string]any{
"1": "123", "1": "123",
"2": 234, "2": 234,
}, },
key: "2", key: "2",
}, },
want: nil, want: nil,
wantErr: true, wantErr: true,
}, },
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
got, err := GetValue[*val](_mapToSyncMap(tt.args.vals), tt.args.key) got, err := GetValue[*val](_mapToSyncMap(tt.args.vals), tt.args.key)
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("GetValue() error = %v, wantErr %v", err, tt.wantErr) t.Errorf("GetValue() error = %v, wantErr %v", err, tt.wantErr)
return return
} }
if !reflect.DeepEqual(got, tt.want) { if !reflect.DeepEqual(got, tt.want) {
t.Errorf("GetValue() = %v, want %v", got, tt.want) t.Errorf("GetValue() = %v, want %v", got, tt.want)
} }
}) })
} }
} }
func _mapToSyncMap(m map[string]any) *sync.Map { func _mapToSyncMap(m map[string]any) *sync.Map {
out := &sync.Map{} out := &sync.Map{}
for k, v := range m { for k, v := range m {
out.Store(k, v) out.Store(k, v)
} }
return out return out
} }

View File

@ -1,70 +1,70 @@
package config package config
import ( import (
"context" "context"
"time" "time"
) )
type Provider interface { type Provider interface {
GetValue(ctx context.Context, key Key) Value GetValue(ctx context.Context, key Key) Value
} }
type Key string type Key string
type Value interface { type Value interface {
Int() int Int() int
String() string String() string
Float() float32 Float() float64
Duration() time.Duration Duration() time.Duration
} }
type DurationValue time.Duration type DurationValue time.Duration
type FloatValue struct { type FloatValue struct {
EmptyValue EmptyValue
Val float32 Val float64
} }
func (v FloatValue) Float() float32 { func (v FloatValue) Float() float64 {
return v.Val return v.Val
} }
type StringValue struct { type StringValue struct {
EmptyValue EmptyValue
Val string Val string
} }
func (v StringValue) String() string { func (v StringValue) String() string {
return v.Val return v.Val
} }
type IntValue struct { type IntValue struct {
EmptyValue EmptyValue
Val int Val int
} }
func (v IntValue) Int() int { func (v IntValue) Int() int {
return v.Val return v.Val
} }
func (v IntValue) Float() float32 { func (v IntValue) Float() float64 {
return float32(v.Val) return float64(v.Val)
} }
type EmptyValue struct{} type EmptyValue struct{}
func (v EmptyValue) Int() int { func (v EmptyValue) Int() int {
return 0 return 0
} }
func (v EmptyValue) String() string { func (v EmptyValue) String() string {
return "" return ""
} }
func (v EmptyValue) Float() float32 { func (v EmptyValue) Float() float64 {
return 0 return 0
} }
func (v EmptyValue) Duration() time.Duration { func (v EmptyValue) Duration() time.Duration {
return 0 return 0
} }

View File

@ -1 +1 @@
package externalprovider package externalprovider

View File

@ -1,30 +1,30 @@
package natskv package natskv
import ( import (
"context" "context"
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger" "git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
"github.com/nats-io/nats.go/jetstream" "github.com/nats-io/nats.go/jetstream"
) )
type Provider struct { type Provider struct {
cc jetstream.KeyValue cc jetstream.KeyValue
} }
func New( func New(
ctx context.Context, ctx context.Context,
js jetstream.JetStream, js jetstream.JetStream,
) *Provider { ) *Provider {
kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{ kv, err := js.CreateKeyValue(ctx, jetstream.KeyValueConfig{
Bucket: "rtc", Bucket: "rtc",
Description: "Real Time Config", Description: "Real Time Config",
Storage: jetstream.FileStorage, Storage: jetstream.FileStorage,
Replicas: 2, Replicas: 2,
Compression: true, Compression: true,
}) })
if err != nil { if err != nil {
logger.Fatal(ctx, "[natskv][New] failed to initialize rtc", logger.Err(err)) logger.Fatal(ctx, "[natskv][New] failed to initialize rtc", logger.Err(err))
} }
return &Provider{cc: kv} return &Provider{cc: kv}
} }

View File

@ -1,123 +1,123 @@
package staticprovider package staticprovider
import ( import (
"context" "context"
"os" "os"
"path/filepath" "path/filepath"
"sync" "sync"
"git.optclblast.xyz/draincloud/draincloud-core/internal/config" "git.optclblast.xyz/draincloud/draincloud-core/internal/config"
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger" "git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
"github.com/fsnotify/fsnotify" "github.com/fsnotify/fsnotify"
"github.com/spf13/viper" "github.com/spf13/viper"
) )
var _ config.Provider = new(staticProvider) var _ config.Provider = new(staticProvider)
type StaticProvider interface { type StaticProvider interface {
config.Provider config.Provider
} }
type staticProvider struct { type staticProvider struct {
m sync.RWMutex m sync.RWMutex
rawValues map[string]any rawValues map[string]any
} }
func (p *staticProvider) GetValue(ctx context.Context, key config.Key) config.Value { func (p *staticProvider) GetValue(ctx context.Context, key config.Key) config.Value {
p.m.RLock() p.m.RLock()
defer p.m.RUnlock() defer p.m.RUnlock()
rawVal, ok := p.rawValues[string(key)] rawVal, ok := p.rawValues[string(key)]
if !ok { if !ok {
return config.EmptyValue{} return config.EmptyValue{}
} }
switch val := rawVal.(type) { switch val := rawVal.(type) {
case int: case int:
return config.IntValue{ return config.IntValue{
Val: val, Val: val,
} }
case string: case string:
return config.StringValue{ return config.StringValue{
Val: val, Val: val,
} }
case float32: case float64:
return config.FloatValue{ return config.FloatValue{
Val: val, Val: val,
} }
default: default:
return config.EmptyValue{} return config.EmptyValue{}
} }
} }
type newStaticProviderOptions struct { type newStaticProviderOptions struct {
configName string configName string
configDirPath string configDirPath string
configFileType string configFileType string
} }
func mustDefaultNewStaticProviderOptions(ctx context.Context) *newStaticProviderOptions { func mustDefaultNewStaticProviderOptions(ctx context.Context) *newStaticProviderOptions {
ex, err := os.Executable() ex, err := os.Executable()
if err != nil { if err != nil {
logger.Fatal(ctx, "failed to get executable location", logger.Err(err)) logger.Fatal(ctx, "failed to get executable location", logger.Err(err))
} }
exPath := filepath.Dir(ex) exPath := filepath.Dir(ex)
return &newStaticProviderOptions{ return &newStaticProviderOptions{
configName: "config", configName: "config",
configDirPath: exPath, configDirPath: exPath,
configFileType: "yaml", configFileType: "yaml",
} }
} }
type NewStaticProviderOption func(o *newStaticProviderOptions) type NewStaticProviderOption func(o *newStaticProviderOptions)
func WithConfigDir(path string) NewStaticProviderOption { func WithConfigDir(path string) NewStaticProviderOption {
return func(o *newStaticProviderOptions) { return func(o *newStaticProviderOptions) {
o.configDirPath = path o.configDirPath = path
} }
} }
func WithConfigType(t string) NewStaticProviderOption { func WithConfigType(t string) NewStaticProviderOption {
return func(o *newStaticProviderOptions) { return func(o *newStaticProviderOptions) {
o.configFileType = t o.configFileType = t
} }
} }
func WithConfigName(name string) NewStaticProviderOption { func WithConfigName(name string) NewStaticProviderOption {
return func(o *newStaticProviderOptions) { return func(o *newStaticProviderOptions) {
o.configName = name o.configName = name
} }
} }
func NewStaticProvider( func NewStaticProvider(
ctx context.Context, ctx context.Context,
opts ...NewStaticProviderOption, opts ...NewStaticProviderOption,
) (*staticProvider, error) { ) (*staticProvider, error) {
o := mustDefaultNewStaticProviderOptions(ctx) o := mustDefaultNewStaticProviderOptions(ctx)
for _, opt := range opts { for _, opt := range opts {
opt(o) opt(o)
} }
// TODO check if ile exists // TODO check if ile exists
provider := &staticProvider{ provider := &staticProvider{
rawValues: make(map[string]any), rawValues: make(map[string]any),
} }
viper.SetConfigName(o.configName) viper.SetConfigName(o.configName)
viper.SetConfigType(o.configFileType) viper.SetConfigType(o.configFileType)
viper.AddConfigPath(o.configDirPath) viper.AddConfigPath(o.configDirPath)
viper.WatchConfig() viper.WatchConfig()
viper.OnConfigChange(func(_ fsnotify.Event) { viper.OnConfigChange(func(_ fsnotify.Event) {
provider.m.Lock() provider.m.Lock()
defer provider.m.Unlock() defer provider.m.Unlock()
provider.rawValues = viper.AllSettings() provider.rawValues = viper.AllSettings()
}) })
provider.rawValues = viper.AllSettings() provider.rawValues = viper.AllSettings()
return provider, nil return provider, nil
} }

View File

@ -1,46 +1,46 @@
package cleanupsessions package cleanupsessions
import ( import (
"context" "context"
"time" "time"
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger" "git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
) )
// TODO set with config // TODO set with config
const cronInterval = time.Minute * 10 const cronInterval = time.Minute * 10
type ExpiredSessionsRemover interface { type ExpiredSessionsRemover interface {
RemoveExpiredSessions(ctx context.Context) error RemoveExpiredSessions(ctx context.Context) error
} }
type CleanupSessionCron struct { type CleanupSessionCron struct {
db ExpiredSessionsRemover db ExpiredSessionsRemover
} }
func New(db ExpiredSessionsRemover) *CleanupSessionCron { func New(db ExpiredSessionsRemover) *CleanupSessionCron {
return &CleanupSessionCron{ return &CleanupSessionCron{
db: db, db: db,
} }
} }
func (c *CleanupSessionCron) Run(ctx context.Context) { func (c *CleanupSessionCron) Run(ctx context.Context) {
logger.Info(ctx, "[CleanupSessionCron] running cron") logger.Info(ctx, "[CleanupSessionCron] running cron")
go func() { go func() {
t := time.NewTicker(cronInterval) t := time.NewTicker(cronInterval)
defer t.Stop() defer t.Stop()
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
logger.Warn(ctx, "[CleanupSessionCron] context cancelled") logger.Warn(ctx, "[CleanupSessionCron] context cancelled")
return return
case <-t.C: case <-t.C:
logger.Notice(ctx, "[CleanupSessionCron] cleanup started") logger.Notice(ctx, "[CleanupSessionCron] cleanup started")
t.Reset(cronInterval) t.Reset(cronInterval)
if err := c.db.RemoveExpiredSessions(ctx); err != nil { if err := c.db.RemoveExpiredSessions(ctx); err != nil {
logger.Error(ctx, "[CleanupSessionCron] failed to remove expired sessions", logger.Err(err)) logger.Error(ctx, "[CleanupSessionCron] failed to remove expired sessions", logger.Err(err))
} }
} }
} }
}() }()
} }

View File

@ -1,7 +1,7 @@
package cron package cron
import "context" import "context"
type Cron interface { type Cron interface {
Run(ctx context.Context) Run(ctx context.Context)
} }

View File

@ -1 +1 @@
package domain package domain

View File

@ -0,0 +1,24 @@
package domain
import "fmt"
type StorageType int
const (
StorageTypeFS StorageType = iota
StorageTypeS3
)
const (
fslinkTemplate = "fs:///%s"
)
func GetFSConverter(storageType StorageType) func(fslink string) string {
switch storageType {
default:
// TODO s3 converter
return func(fslink string) string {
return fmt.Sprintf(fslinkTemplate, fslink)
}
}
}

View File

@ -1,7 +1,7 @@
package domain package domain
type RegisterResolverRequest struct { type RegisterResolverRequest struct {
ResolverName string `json:"resolver_name"` ResolverName string `json:"resolver_name"`
ResolverEndpoint string `json:"resolver_endpoint"` ResolverEndpoint string `json:"resolver_endpoint"`
RequiredResolveParams []string `json:"required_resolve_params"` RequiredResolveParams []string `json:"required_resolve_params"`
} }

View File

@ -1,29 +1,29 @@
package domain package domain
type RegisterRequest struct { type RegisterRequest struct {
Login string `json:"login"` Login string `json:"login"`
Password string `json:"password"` Password string `json:"password"`
} }
type RegisterResponse struct { type RegisterResponse struct {
Ok bool `json:"ok"` Ok bool `json:"ok"`
Message string `json:"message"` Message string `json:"message"`
} }
type LogonRequest struct { type LogonRequest struct {
Login string `json:"login"` Login string `json:"login"`
Password string `json:"password"` Password string `json:"password"`
} }
type LogonResponse struct { type LogonResponse struct {
Ok bool `json:"ok"` Ok bool `json:"ok"`
Message string `json:"message"` Message string `json:"message"`
} }
type LogoutRequest struct { type LogoutRequest struct {
} }
type ErrorJson struct { type ErrorJson struct {
Code int `json:"code"` Code int `json:"code"`
Message string `json:"message"` Message string `json:"message"`
} }

View File

@ -1,9 +1,9 @@
package errs package errs
import "errors" import "errors"
var ( var (
ErrorUnauthorized = errors.New("unauthorized") ErrorUnauthorized = errors.New("unauthorized")
ErrorAccessDenied = errors.New("access denied") ErrorAccessDenied = errors.New("access denied")
ErrorSessionExpired = errors.New("session expired") ErrorSessionExpired = errors.New("session expired")
) )

View File

@ -1,51 +1,58 @@
package filesengine package filesengine
import ( import (
"context" "context"
"fmt" "fmt"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/files" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/files"
"github.com/google/uuid" // "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/files"
) "github.com/google/uuid"
)
type FilesEngine struct {
blobStorage storage.BlobStorage type FilesEngine struct {
metaStorage storage.MetaStorage blobStorage storage.BlobStorage
} metaStorage storage.MetaStorage
}
func NewFilesEngine(
blobStorage storage.BlobStorage, func NewFilesEngine(
metaStorage storage.MetaStorage, blobStorage storage.BlobStorage,
) *FilesEngine { metaStorage storage.MetaStorage,
return &FilesEngine{ ) *FilesEngine {
blobStorage: blobStorage, return &FilesEngine{
metaStorage: metaStorage, blobStorage: blobStorage,
} metaStorage: metaStorage,
} }
}
type File struct {
Name string type File struct {
UserID uuid.UUID Name string
Ext string UserID int64
Type string Ext string
Size int64 Type string
Data []byte Size int64
} Data []byte
}
// TODO save file
func (e *FilesEngine) SaveFile( // TODO save file
ctx context.Context, func (e *FilesEngine) SaveFile(
file File, ctx context.Context,
) (uuid.UUID, error) { file File,
fileID, err := e.metaStorage.SaveMetadata(ctx, files.FileMetadata{}) ) (uuid.UUID, error) {
if err != nil { fileID, err := e.metaStorage.SaveMetadata(ctx, files.FileMetadata{
return uuid.Nil, fmt.Errorf("failed to create new file metadata: %w", err) Name: file.Name,
} UserID: file.UserID,
Ext: file.Ext,
if err = e.blobStorage.SaveBlob(ctx, fileID, file.Data); err != nil { Type: file.Type,
return uuid.Nil, fmt.Errorf("failed to save file data: %w", err) // FSLink: f,
} })
if err != nil {
return fileID, nil return uuid.Nil, fmt.Errorf("failed to create new file metadata: %w", err)
} }
if err = e.blobStorage.SaveBlob(ctx, fileID, file.Data); err != nil {
return uuid.Nil, fmt.Errorf("failed to save file data: %w", err)
}
return fileID, nil
}

View File

@ -1,9 +1,9 @@
package handler package handler
import ( import (
"context" "context"
"git.optclblast.xyz/draincloud/draincloud-core/internal/common" "git.optclblast.xyz/draincloud/draincloud-core/internal/common"
) )
type CallHandler func(ctx context.Context, req *common.Request) ([]byte, error) type CallHandler func(ctx context.Context, req *common.Request) ([]byte, error)

View File

@ -1,78 +1,78 @@
package handler package handler
import ( import (
"context" "context"
"git.optclblast.xyz/draincloud/draincloud-core/internal/common" "git.optclblast.xyz/draincloud/draincloud-core/internal/common"
) )
type WriteOptions struct { type WriteOptions struct {
Code int Code int
} }
type WriteOption func(opts *WriteOptions) type WriteOption func(opts *WriteOptions)
func WithCode(code int) WriteOption { func WithCode(code int) WriteOption {
return func(opts *WriteOptions) { return func(opts *WriteOptions) {
opts.Code = code opts.Code = code
} }
} }
type Writer interface { type Writer interface {
Write(ctx context.Context, resp any, opts ...WriteOption) Write(ctx context.Context, resp any, opts ...WriteOption)
SetCookie(name string, value string, maxAge int, path string, domain string, secure bool, httpOnly bool) SetCookie(name string, value string, maxAge int, path string, domain string, secure bool, httpOnly bool)
} }
type Handler interface { type Handler interface {
GetName() string GetName() string
GetRequiredResolveParams() []string GetRequiredResolveParams() []string
GetProcessFn() func(ctx context.Context, req *common.Request, w Writer) error GetProcessFn() func(ctx context.Context, req *common.Request, w Writer) error
GetPreprocessFn() func(ctx context.Context, req *common.Request, w Writer) error GetPreprocessFn() func(ctx context.Context, req *common.Request, w Writer) error
} }
type BaseHandler struct { type BaseHandler struct {
Name string Name string
RequiredResolveParams []string RequiredResolveParams []string
ProcessFn func(ctx context.Context, req *common.Request, w Writer) error ProcessFn func(ctx context.Context, req *common.Request, w Writer) error
PreprocessFn func(ctx context.Context, req *common.Request, w Writer) error PreprocessFn func(ctx context.Context, req *common.Request, w Writer) error
} }
func New() *BaseHandler { func New() *BaseHandler {
return new(BaseHandler) return new(BaseHandler)
} }
func (h *BaseHandler) WithName(name string) *BaseHandler { func (h *BaseHandler) WithName(name string) *BaseHandler {
h.Name = name h.Name = name
return h return h
} }
func (h *BaseHandler) WithRequiredResolveParams(params ...string) *BaseHandler { func (h *BaseHandler) WithRequiredResolveParams(params ...string) *BaseHandler {
h.RequiredResolveParams = params h.RequiredResolveParams = params
return h return h
} }
func (h *BaseHandler) WithProcessFunc(fn func(ctx context.Context, req *common.Request, w Writer) error) *BaseHandler { func (h *BaseHandler) WithProcessFunc(fn func(ctx context.Context, req *common.Request, w Writer) error) *BaseHandler {
h.ProcessFn = fn h.ProcessFn = fn
return h return h
} }
func (h *BaseHandler) WithPreprocessFunc(fn func(ctx context.Context, req *common.Request, w Writer) error) *BaseHandler { func (h *BaseHandler) WithPreprocessFunc(fn func(ctx context.Context, req *common.Request, w Writer) error) *BaseHandler {
h.PreprocessFn = fn h.PreprocessFn = fn
return h return h
} }
func (h *BaseHandler) GetName() string { func (h *BaseHandler) GetName() string {
return h.Name return h.Name
} }
func (h *BaseHandler) GetRequiredResolveParams() []string { func (h *BaseHandler) GetRequiredResolveParams() []string {
return h.RequiredResolveParams return h.RequiredResolveParams
} }
func (h *BaseHandler) GetProcessFn() func(ctx context.Context, req *common.Request, w Writer) error { func (h *BaseHandler) GetProcessFn() func(ctx context.Context, req *common.Request, w Writer) error {
return h.ProcessFn return h.ProcessFn
} }
func (h *BaseHandler) GetPreprocessFn() func(ctx context.Context, req *common.Request, w Writer) error { func (h *BaseHandler) GetPreprocessFn() func(ctx context.Context, req *common.Request, w Writer) error {
return h.PreprocessFn return h.PreprocessFn
} }

View File

@ -1,154 +1,154 @@
package logger package logger
import ( import (
"context" "context"
"io" "io"
"log/slog" "log/slog"
"os" "os"
"strings" "strings"
) )
type _key string type _key string
//nolint:gochecknoglobals // ... //nolint:gochecknoglobals // ...
var loggerKey _key = "_core_logger" var loggerKey _key = "_core_logger"
type LoggerOpt func(p *loggerParams) type LoggerOpt func(p *loggerParams)
func NewLoggerContext(ctx context.Context, opts ...LoggerOpt) context.Context { func NewLoggerContext(ctx context.Context, opts ...LoggerOpt) context.Context {
p := new(loggerParams) p := new(loggerParams)
for _, o := range opts { for _, o := range opts {
o(p) o(p)
} }
log := p.build() log := p.build()
return context.WithValue(ctx, loggerKey, log) return context.WithValue(ctx, loggerKey, log)
} }
type loggerParams struct { type loggerParams struct {
local bool local bool
addSource bool addSource bool
lvl slog.Level lvl slog.Level
writers []io.Writer writers []io.Writer
} }
func WithWriter(w io.Writer) LoggerOpt { func WithWriter(w io.Writer) LoggerOpt {
return func(p *loggerParams) { return func(p *loggerParams) {
p.writers = append(p.writers, w) p.writers = append(p.writers, w)
} }
} }
func WithLevel(l slog.Level) LoggerOpt { func WithLevel(l slog.Level) LoggerOpt {
return func(p *loggerParams) { return func(p *loggerParams) {
p.lvl = l p.lvl = l
} }
} }
func Local() LoggerOpt { func Local() LoggerOpt {
return func(p *loggerParams) { return func(p *loggerParams) {
p.local = true p.local = true
} }
} }
func WithSource() LoggerOpt { func WithSource() LoggerOpt {
return func(p *loggerParams) { return func(p *loggerParams) {
p.addSource = true p.addSource = true
} }
} }
func Err(err error) slog.Attr { func Err(err error) slog.Attr {
return slog.Attr{ return slog.Attr{
Key: "error", Key: "error",
Value: slog.StringValue(err.Error()), Value: slog.StringValue(err.Error()),
} }
} }
func MapLevel(lvl string) slog.Level { func MapLevel(lvl string) slog.Level {
switch strings.ToLower(lvl) { switch strings.ToLower(lvl) {
case "debug": case "debug":
return LevelDebug return LevelDebug
case "info": case "info":
return LevelInfo return LevelInfo
case "notice": case "notice":
return LevelNotice return LevelNotice
case "warn": case "warn":
return LevelWarn return LevelWarn
case "error": case "error":
return LevelError return LevelError
case "critical": case "critical":
return LevelCritial return LevelCritial
case "alert": case "alert":
return LevelAlert return LevelAlert
case "emergency": case "emergency":
return LevelEmergency return LevelEmergency
default: default:
return LevelInfo return LevelInfo
} }
} }
func (b *loggerParams) build() *slog.Logger { func (b *loggerParams) build() *slog.Logger {
if len(b.writers) == 0 { if len(b.writers) == 0 {
b.writers = append(b.writers, os.Stdout) b.writers = append(b.writers, os.Stdout)
} }
w := io.MultiWriter(b.writers...) w := io.MultiWriter(b.writers...)
if b.local { if b.local {
opts := prettyHandlerOptions{ opts := prettyHandlerOptions{
SlogOpts: &slog.HandlerOptions{ SlogOpts: &slog.HandlerOptions{
Level: b.lvl, Level: b.lvl,
AddSource: b.addSource, AddSource: b.addSource,
}, },
} }
handler := opts.newPrettyHandler(w) handler := opts.newPrettyHandler(w)
return slog.New(handler) return slog.New(handler)
} }
return newLogger(b.lvl, w) return newLogger(b.lvl, w)
} }
func newLogger(lvl slog.Level, w io.Writer) *slog.Logger { func newLogger(lvl slog.Level, w io.Writer) *slog.Logger {
return slog.New( return slog.New(
slog.NewJSONHandler(w, &slog.HandlerOptions{ slog.NewJSONHandler(w, &slog.HandlerOptions{
Level: lvl, Level: lvl,
ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
if a.Key == slog.LevelKey { if a.Key == slog.LevelKey {
level := a.Value.Any().(slog.Level) level := a.Value.Any().(slog.Level)
switch { switch {
case level < LevelInfo: case level < LevelInfo:
a.Value = slog.StringValue("DEBUG") a.Value = slog.StringValue("DEBUG")
case level < LevelNotice: case level < LevelNotice:
a.Value = slog.StringValue("INFO") a.Value = slog.StringValue("INFO")
case level < LevelWarn: case level < LevelWarn:
a.Value = slog.StringValue("NOTICE") a.Value = slog.StringValue("NOTICE")
case level < LevelError: case level < LevelError:
a.Value = slog.StringValue("WARNING") a.Value = slog.StringValue("WARNING")
case level < LevelCritial: case level < LevelCritial:
a.Value = slog.StringValue("ERROR") a.Value = slog.StringValue("ERROR")
case level < LevelAlert: case level < LevelAlert:
a.Value = slog.StringValue("CRITICAL") a.Value = slog.StringValue("CRITICAL")
case level < LevelEmergency: case level < LevelEmergency:
a.Value = slog.StringValue("ALERT") a.Value = slog.StringValue("ALERT")
default: default:
a.Value = slog.StringValue("EMERGENCY") a.Value = slog.StringValue("EMERGENCY")
} }
} }
return a return a
}, },
}), }),
) )
} }
func loggerFromCtx(ctx context.Context) *slog.Logger { func loggerFromCtx(ctx context.Context) *slog.Logger {
if l, ok := ctx.Value(loggerKey).(*slog.Logger); ok { if l, ok := ctx.Value(loggerKey).(*slog.Logger); ok {
return l return l
} }
return globalLogger return globalLogger
} }

View File

@ -1,35 +1,35 @@
package logger package logger
import ( import (
"context" "context"
"log/slog" "log/slog"
) )
//nolint:unused //... //nolint:unused //...
func newDiscardLogger() *slog.Logger { func newDiscardLogger() *slog.Logger {
return slog.New(newDiscardHandler()) return slog.New(newDiscardHandler())
} }
//nolint:unused //... //nolint:unused //...
type DiscardHandler struct{} type DiscardHandler struct{}
//nolint:unused //... //nolint:unused //...
func newDiscardHandler() *DiscardHandler { func newDiscardHandler() *DiscardHandler {
return &DiscardHandler{} return &DiscardHandler{}
} }
func (h *DiscardHandler) Handle(_ context.Context, _ slog.Record) error { func (h *DiscardHandler) Handle(_ context.Context, _ slog.Record) error {
return nil return nil
} }
func (h *DiscardHandler) WithAttrs(_ []slog.Attr) slog.Handler { func (h *DiscardHandler) WithAttrs(_ []slog.Attr) slog.Handler {
return h return h
} }
func (h *DiscardHandler) WithGroup(_ string) slog.Handler { func (h *DiscardHandler) WithGroup(_ string) slog.Handler {
return h return h
} }
func (h *DiscardHandler) Enabled(_ context.Context, _ slog.Level) bool { func (h *DiscardHandler) Enabled(_ context.Context, _ slog.Level) bool {
return false return false
} }

View File

@ -1,81 +1,81 @@
package logger package logger
import ( import (
"context" "context"
"log/slog" "log/slog"
"os" "os"
) )
//nolint:gochecknoglobals // ... //nolint:gochecknoglobals // ...
var globalLogger *slog.Logger = newLogger(LevelDebug, os.Stdout) var globalLogger *slog.Logger = newLogger(LevelDebug, os.Stdout)
func SetLevel(l slog.Level) { func SetLevel(l slog.Level) {
globalLogger = newLogger(l, os.Stdout) globalLogger = newLogger(l, os.Stdout)
} }
const ( const (
LevelEmergency = slog.Level(10000) LevelEmergency = slog.Level(10000)
LevelAlert = slog.Level(1000) LevelAlert = slog.Level(1000)
LevelCritial = slog.Level(100) LevelCritial = slog.Level(100)
LevelError = slog.LevelError LevelError = slog.LevelError
LevelWarn = slog.LevelWarn LevelWarn = slog.LevelWarn
LevelNotice = slog.Level(2) LevelNotice = slog.Level(2)
LevelInfo = slog.LevelInfo LevelInfo = slog.LevelInfo
LevelDebug = slog.LevelDebug LevelDebug = slog.LevelDebug
) )
func Fatal(ctx context.Context, message string, attrs ...any) { func Fatal(ctx context.Context, message string, attrs ...any) {
l := loggerFromCtx(ctx) l := loggerFromCtx(ctx)
l.Log(ctx, LevelEmergency, message, attrs...) l.Log(ctx, LevelEmergency, message, attrs...)
os.Exit(1) os.Exit(1)
} }
func Emergency(ctx context.Context, message string, attrs ...any) { func Emergency(ctx context.Context, message string, attrs ...any) {
l := loggerFromCtx(ctx) l := loggerFromCtx(ctx)
l.Log(ctx, LevelEmergency, message, attrs...) l.Log(ctx, LevelEmergency, message, attrs...)
} }
func Alert(ctx context.Context, message string, attrs ...any) { func Alert(ctx context.Context, message string, attrs ...any) {
l := loggerFromCtx(ctx) l := loggerFromCtx(ctx)
l.Log(ctx, LevelAlert, message, attrs...) l.Log(ctx, LevelAlert, message, attrs...)
} }
func Critial(ctx context.Context, message string, attrs ...any) { func Critial(ctx context.Context, message string, attrs ...any) {
l := loggerFromCtx(ctx) l := loggerFromCtx(ctx)
l.Log(ctx, LevelCritial, message, attrs...) l.Log(ctx, LevelCritial, message, attrs...)
} }
func Error(ctx context.Context, message string, attrs ...any) { func Error(ctx context.Context, message string, attrs ...any) {
l := loggerFromCtx(ctx) l := loggerFromCtx(ctx)
l.ErrorContext(ctx, message, attrs...) l.ErrorContext(ctx, message, attrs...)
} }
func Warn(ctx context.Context, message string, attrs ...any) { func Warn(ctx context.Context, message string, attrs ...any) {
l := loggerFromCtx(ctx) l := loggerFromCtx(ctx)
l.WarnContext(ctx, message, attrs...) l.WarnContext(ctx, message, attrs...)
} }
func Notice(ctx context.Context, message string, attrs ...any) { func Notice(ctx context.Context, message string, attrs ...any) {
l := loggerFromCtx(ctx) l := loggerFromCtx(ctx)
l.Log(ctx, LevelNotice, message, attrs...) l.Log(ctx, LevelNotice, message, attrs...)
} }
func Info(ctx context.Context, message string, attrs ...any) { func Info(ctx context.Context, message string, attrs ...any) {
l := loggerFromCtx(ctx) l := loggerFromCtx(ctx)
l.InfoContext(ctx, message, attrs...) l.InfoContext(ctx, message, attrs...)
} }
func Debug(ctx context.Context, message string, attrs ...any) { func Debug(ctx context.Context, message string, attrs ...any) {
l := loggerFromCtx(ctx) l := loggerFromCtx(ctx)
l.DebugContext(ctx, message, attrs...) l.DebugContext(ctx, message, attrs...)
} }

View File

@ -1,97 +1,97 @@
package logger package logger
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"io" "io"
stdlog "log" stdlog "log"
"log/slog" "log/slog"
"github.com/fatih/color" "github.com/fatih/color"
) )
type prettyHandlerOptions struct { type prettyHandlerOptions struct {
SlogOpts *slog.HandlerOptions SlogOpts *slog.HandlerOptions
} }
type prettyHandler struct { type prettyHandler struct {
opts prettyHandlerOptions opts prettyHandlerOptions
slog.Handler slog.Handler
l *stdlog.Logger l *stdlog.Logger
attrs []slog.Attr attrs []slog.Attr
} }
func (opts prettyHandlerOptions) newPrettyHandler( func (opts prettyHandlerOptions) newPrettyHandler(
out io.Writer, out io.Writer,
) *prettyHandler { ) *prettyHandler {
h := &prettyHandler{ h := &prettyHandler{
Handler: slog.NewJSONHandler(out, opts.SlogOpts), Handler: slog.NewJSONHandler(out, opts.SlogOpts),
l: stdlog.New(out, "", 0), l: stdlog.New(out, "", 0),
} }
return h return h
} }
func (h *prettyHandler) Handle(_ context.Context, r slog.Record) error { func (h *prettyHandler) Handle(_ context.Context, r slog.Record) error {
level := r.Level.String() + ":" level := r.Level.String() + ":"
switch r.Level { switch r.Level {
case slog.LevelDebug: case slog.LevelDebug:
level = color.MagentaString(level) level = color.MagentaString(level)
case slog.LevelInfo: case slog.LevelInfo:
level = color.BlueString(level) level = color.BlueString(level)
case slog.LevelWarn: case slog.LevelWarn:
level = color.YellowString(level) level = color.YellowString(level)
case slog.LevelError: case slog.LevelError:
level = color.RedString(level) level = color.RedString(level)
} }
fields := make(map[string]interface{}, r.NumAttrs()) fields := make(map[string]interface{}, r.NumAttrs())
r.Attrs(func(a slog.Attr) bool { r.Attrs(func(a slog.Attr) bool {
fields[a.Key] = a.Value.Any() fields[a.Key] = a.Value.Any()
return true return true
}) })
for _, a := range h.attrs { for _, a := range h.attrs {
fields[a.Key] = a.Value.Any() fields[a.Key] = a.Value.Any()
} }
var b []byte var b []byte
var err error var err error
if len(fields) > 0 { if len(fields) > 0 {
b, err = json.MarshalIndent(fields, "", " ") b, err = json.MarshalIndent(fields, "", " ")
if err != nil { if err != nil {
return err return err
} }
} }
timeStr := r.Time.Format("[15:05:05.000]") timeStr := r.Time.Format("[15:05:05.000]")
msg := color.CyanString(r.Message) msg := color.CyanString(r.Message)
h.l.Println( h.l.Println(
timeStr, timeStr,
level, level,
msg, msg,
color.WhiteString(string(b)), color.WhiteString(string(b)),
) )
return nil return nil
} }
func (h *prettyHandler) WithAttrs(attrs []slog.Attr) slog.Handler { func (h *prettyHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
return &prettyHandler{ return &prettyHandler{
Handler: h.Handler, Handler: h.Handler,
l: h.l, l: h.l,
attrs: attrs, attrs: attrs,
} }
} }
func (h *prettyHandler) WithGroup(name string) slog.Handler { func (h *prettyHandler) WithGroup(name string) slog.Handler {
return &prettyHandler{ return &prettyHandler{
Handler: h.Handler.WithGroup(name), Handler: h.Handler.WithGroup(name),
l: h.l, l: h.l,
} }
} }

View File

@ -1,42 +1,42 @@
package domain package domain
type InitPluginRequest struct { type InitPluginRequest struct {
Name string `json:"name"` Name string `json:"name"`
Version int `json:"version"` Version int `json:"version"`
Namespace string `json:"namespace"` Namespace string `json:"namespace"`
} }
type PluginPage struct { type PluginPage struct {
Name string `json:"name"` Name string `json:"name"`
Version int `json:"version"` Version int `json:"version"`
Namespace string `json:"namespace"` Namespace string `json:"namespace"`
Path string `json:"path"` Path string `json:"path"`
} }
type PluginAction struct { type PluginAction struct {
Name string `json:"name"` Name string `json:"name"`
Version int `json:"version"` Version int `json:"version"`
Namespace string `json:"namespace"` Namespace string `json:"namespace"`
RequiredResolveParams []string `json:"required_resolve_params"` RequiredResolveParams []string `json:"required_resolve_params"`
OptionalResolveParams []string `json:"optional_resolve_params"` OptionalResolveParams []string `json:"optional_resolve_params"`
WithActions bool `json:"with_actions"` WithActions bool `json:"with_actions"`
Async bool `json:"async"` Async bool `json:"async"`
} }
type PluginComponent struct { type PluginComponent struct {
Name string `json:"name"` Name string `json:"name"`
Version int `json:"version"` Version int `json:"version"`
Namespace string `json:"namespace"` Namespace string `json:"namespace"`
RequiredResolveParams []string `json:"required_resolve_params"` RequiredResolveParams []string `json:"required_resolve_params"`
OptionalResolveParams []string `json:"optional_resolve_params"` OptionalResolveParams []string `json:"optional_resolve_params"`
WithActions bool `json:"with_actions"` WithActions bool `json:"with_actions"`
Async bool `json:"async"` Async bool `json:"async"`
} }
type Ping struct { type Ping struct {
Payload any `json:"payload"` Payload any `json:"payload"`
} }
type Pong struct { type Pong struct {
Payload any `json:"payload"` Payload any `json:"payload"`
} }

View File

@ -1,114 +1,114 @@
package plugin package plugin
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors" "errors"
"log/slog" "log/slog"
"net" "net"
"strconv" "strconv"
"git.optclblast.xyz/draincloud/draincloud-core/internal/closer" "git.optclblast.xyz/draincloud/draincloud-core/internal/closer"
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger" "git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
"git.optclblast.xyz/draincloud/draincloud-core/internal/plugin/domain" "git.optclblast.xyz/draincloud/draincloud-core/internal/plugin/domain"
) )
type PluginLoader struct { type PluginLoader struct {
l net.Listener l net.Listener
store *PluginStore store *PluginStore
} }
func MustNewPluginLoader(ctx context.Context, listenPort uint16, ps *PluginStore) *PluginLoader { func MustNewPluginLoader(ctx context.Context, listenPort uint16, ps *PluginStore) *PluginLoader {
l, err := net.Listen("tcp", "127.0.0.1:"+strconv.FormatInt(int64(listenPort), 10)) l, err := net.Listen("tcp", "127.0.0.1:"+strconv.FormatInt(int64(listenPort), 10))
if err != nil { if err != nil {
logger.Fatal(ctx, "[MustNewPluginLoader] error build listener", logger.Err(err)) logger.Fatal(ctx, "[MustNewPluginLoader] error build listener", logger.Err(err))
} }
return &PluginLoader{ return &PluginLoader{
l: l, l: l,
store: ps, store: ps,
} }
} }
func (p *PluginLoader) Run(ctx context.Context) { func (p *PluginLoader) Run(ctx context.Context) {
go p.run(ctx) go p.run(ctx)
} }
func (p *PluginLoader) run(ctx context.Context) { func (p *PluginLoader) run(ctx context.Context) {
for { for {
select { select {
case <-ctx.Done(): case <-ctx.Done():
logger.Info(ctx, "[plugin_loader][loop] closing") logger.Info(ctx, "[plugin_loader][loop] closing")
if err := p.l.Close(); err != nil { if err := p.l.Close(); err != nil {
logger.Error(ctx, "[plugin_loader][loop] failed to close listener", logger.Err(err)) logger.Error(ctx, "[plugin_loader][loop] failed to close listener", logger.Err(err))
} }
default: default:
conn, err := p.l.Accept() conn, err := p.l.Accept()
if err != nil { if err != nil {
logger.Error(ctx, "[plugin_loader][loop] failed to accet new connection", logger.Err(err)) logger.Error(ctx, "[plugin_loader][loop] failed to accet new connection", logger.Err(err))
continue continue
} }
logger.Debug(ctx, "[plugin_loader][loop] accepting connection") logger.Debug(ctx, "[plugin_loader][loop] accepting connection")
go p.accept(ctx, conn) go p.accept(ctx, conn)
} }
} }
} }
func (p *PluginLoader) accept(ctx context.Context, conn net.Conn) { func (p *PluginLoader) accept(ctx context.Context, conn net.Conn) {
data := make([]byte, 0) data := make([]byte, 0)
// TODO make read loop // TODO make read loop
n, err := conn.Read(data) n, err := conn.Read(data)
if err != nil { if err != nil {
logger.Error(ctx, "[plugin_loader][accept] read error", logger.Err(err)) logger.Error(ctx, "[plugin_loader][accept] read error", logger.Err(err))
return return
} }
logger.Debug(ctx, "[plugin_loader][accept] bytes read", slog.Int("n", n)) logger.Debug(ctx, "[plugin_loader][accept] bytes read", slog.Int("n", n))
init := new(domain.InitPluginRequest) init := new(domain.InitPluginRequest)
if err = json.Unmarshal(data, init); err != nil { if err = json.Unmarshal(data, init); err != nil {
logger.Error(ctx, "[plugin_loader][accept] unmarshal request error", logger.Err(err)) logger.Error(ctx, "[plugin_loader][accept] unmarshal request error", logger.Err(err))
return return
} }
if init.Namespace == "" { if init.Namespace == "" {
logger.Error(ctx, "[plugin_loader][accept] empty namespace") logger.Error(ctx, "[plugin_loader][accept] empty namespace")
err = errors.Join(err, errors.New("init request must contain namespace")) err = errors.Join(err, errors.New("init request must contain namespace"))
} }
if init.Name == "" { if init.Name == "" {
logger.Error(ctx, "[plugin_loader][accept] empty namespace") logger.Error(ctx, "[plugin_loader][accept] empty namespace")
err = errors.Join(err, errors.New("init request must contain namespace")) err = errors.Join(err, errors.New("init request must contain namespace"))
} }
if init.Version == 0 { if init.Version == 0 {
logger.Error(ctx, "[plugin_loader][accept] empty namespace") logger.Error(ctx, "[plugin_loader][accept] empty namespace")
err = errors.Join(err, errors.New("init request must contain namespace")) err = errors.Join(err, errors.New("init request must contain namespace"))
} }
if err != nil { if err != nil {
if _, werr := conn.Write([]byte(err.Error())); werr != nil { if _, werr := conn.Write([]byte(err.Error())); werr != nil {
logger.Error(ctx, "[plugin_loader][accept] failed to write init error", logger.Err(werr)) logger.Error(ctx, "[plugin_loader][accept] failed to write init error", logger.Err(werr))
} }
if cerr := conn.Close(); cerr != nil { if cerr := conn.Close(); cerr != nil {
logger.Error(ctx, "[plugin_loader][accept] failed to close conn", logger.Err(cerr)) logger.Error(ctx, "[plugin_loader][accept] failed to close conn", logger.Err(cerr))
} }
return return
} }
logger.Debug(ctx, logger.Debug(ctx,
"[plugin_loader][accept] new plugin initialized", "[plugin_loader][accept] new plugin initialized",
"plugin", PluginStoreKey(init.Namespace, init.Name, init.Version), "plugin", PluginStoreKey(init.Namespace, init.Name, init.Version),
) )
plugin := &Plugin{ plugin := &Plugin{
conn: conn, conn: conn,
md: *init, md: *init,
} }
closer.Add(plugin.Close) closer.Add(plugin.Close)
p.store.Add(plugin) p.store.Add(plugin)
} }

View File

@ -1,45 +1,45 @@
package plugin package plugin
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"net" "net"
"git.optclblast.xyz/draincloud/draincloud-core/internal/plugin/domain" "git.optclblast.xyz/draincloud/draincloud-core/internal/plugin/domain"
) )
type Plugin struct { type Plugin struct {
conn net.Conn conn net.Conn
md domain.InitPluginRequest md domain.InitPluginRequest
} }
func (p *Plugin) Init(initPayload any) error { func (p *Plugin) Init(initPayload any) error {
r := &domain.Ping{ r := &domain.Ping{
Payload: initPayload, Payload: initPayload,
} }
pingData, err := json.Marshal(r) pingData, err := json.Marshal(r)
if err != nil { if err != nil {
return err return err
} }
if _, err = p.conn.Write(pingData); err != nil { if _, err = p.conn.Write(pingData); err != nil {
return err return err
} }
pongData := make([]byte, 0) pongData := make([]byte, 0)
if _, err := p.conn.Read(pongData); err != nil { if _, err := p.conn.Read(pongData); err != nil {
return err return err
} }
if !bytes.Equal(pongData, pingData) { if !bytes.Equal(pongData, pingData) {
return fmt.Errorf("ping-pong payload assertion error") return fmt.Errorf("ping-pong payload assertion error")
} }
return nil return nil
} }
func (p *Plugin) Close() error { func (p *Plugin) Close() error {
return p.conn.Close() return p.conn.Close()
} }

View File

@ -1,24 +1,24 @@
package plugin package plugin
import ( import (
"context" "context"
"fmt" "fmt"
"git.optclblast.xyz/draincloud/draincloud-core/internal/common" "git.optclblast.xyz/draincloud/draincloud-core/internal/common"
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler" "git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
) )
type PluginHandler struct { type PluginHandler struct {
*handler.BaseHandler *handler.BaseHandler
store *PluginStore store *PluginStore
} }
func (_ *PluginHandler) GetName() string { func (_ *PluginHandler) GetName() string {
return "pluginv1" return "pluginv1"
} }
func (p *PluginHandler) GetProcessFn() func(ctx context.Context, req *common.Request, w handler.Writer) error { func (p *PluginHandler) GetProcessFn() func(ctx context.Context, req *common.Request, w handler.Writer) error {
return func(ctx context.Context, req *common.Request, w handler.Writer) error { return func(ctx context.Context, req *common.Request, w handler.Writer) error {
return fmt.Errorf("unimplemented") return fmt.Errorf("unimplemented")
} }
} }

View File

@ -1,37 +1,37 @@
package plugin package plugin
import ( import (
"fmt" "fmt"
"sync" "sync"
) )
type PluginStore struct { type PluginStore struct {
m sync.RWMutex m sync.RWMutex
plugins map[string]*Plugin plugins map[string]*Plugin
} }
func NewPluginStore() *PluginStore { func NewPluginStore() *PluginStore {
return &PluginStore{ return &PluginStore{
plugins: make(map[string]*Plugin), plugins: make(map[string]*Plugin),
} }
} }
func (s *PluginStore) Add(plugin *Plugin) { func (s *PluginStore) Add(plugin *Plugin) {
s.m.Lock() s.m.Lock()
defer s.m.Unlock() defer s.m.Unlock()
s.plugins[PluginStoreKey(plugin.md.Namespace, plugin.md.Name, plugin.md.Version)] = plugin s.plugins[PluginStoreKey(plugin.md.Namespace, plugin.md.Name, plugin.md.Version)] = plugin
} }
func (s *PluginStore) Get(plugin string) *Plugin { func (s *PluginStore) Get(plugin string) *Plugin {
s.m.RLock() s.m.RLock()
defer s.m.RUnlock() defer s.m.RUnlock()
if p, ok := s.plugins[plugin]; ok { if p, ok := s.plugins[plugin]; ok {
return p return p
} }
return nil return nil
} }
func PluginStoreKey(ns, name string, v int) string { func PluginStoreKey(ns, name string, v int) string {
return fmt.Sprintf("%s.%s.%v", ns, name, v) return fmt.Sprintf("%s.%s.%v", ns, name, v)
} }

View File

@ -1,112 +1,112 @@
package processor package processor
import ( import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
"git.optclblast.xyz/draincloud/draincloud-core/internal/common" "git.optclblast.xyz/draincloud/draincloud-core/internal/common"
"git.optclblast.xyz/draincloud/draincloud-core/internal/domain" "git.optclblast.xyz/draincloud/draincloud-core/internal/domain"
"git.optclblast.xyz/draincloud/draincloud-core/internal/errs" "git.optclblast.xyz/draincloud/draincloud-core/internal/errs"
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler" "git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger" "git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
resolvedispatcher "git.optclblast.xyz/draincloud/draincloud-core/internal/resolve_dispatcher" resolvedispatcher "git.optclblast.xyz/draincloud/draincloud-core/internal/resolve_dispatcher"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
type GinProcessor struct { type GinProcessor struct {
rp *common.RequestPool rp *common.RequestPool
authStorage storage.AuthStorage authStorage storage.AuthStorage
resolveDispatcher *resolvedispatcher.ResolveDispatcher resolveDispatcher *resolvedispatcher.ResolveDispatcher
} }
func NewGinProcessor( func NewGinProcessor(
authStorage storage.AuthStorage, authStorage storage.AuthStorage,
resolveDispatcher *resolvedispatcher.ResolveDispatcher, resolveDispatcher *resolvedispatcher.ResolveDispatcher,
) *GinProcessor { ) *GinProcessor {
return &GinProcessor{ return &GinProcessor{
rp: common.NewRequestPool(), rp: common.NewRequestPool(),
authStorage: authStorage, authStorage: authStorage,
resolveDispatcher: resolveDispatcher, resolveDispatcher: resolveDispatcher,
} }
} }
func (p *GinProcessor) Process(handler handler.Handler) gin.HandlerFunc { func (p *GinProcessor) Process(handler handler.Handler) gin.HandlerFunc {
return func(ctx *gin.Context) { return func(ctx *gin.Context) {
req := common.NewRequestFromHttp(p.rp, ctx.Request) req := common.NewRequestFromHttp(p.rp, ctx.Request)
ctx.Request = ctx.Request.WithContext(context.WithValue(ctx.Request.Context(), "__request_id", req.ID)) ctx.Request = ctx.Request.WithContext(context.WithValue(ctx.Request.Context(), "__request_id", req.ID))
// 1. Resolve the resolvers, collect all data required // 1. Resolve the resolvers, collect all data required
// 2. Try process oprional resolvers // 2. Try process oprional resolvers
err := p.resolve(ctx, handler, req) err := p.resolve(ctx, handler, req)
if err != nil { if err != nil {
p.writeError(ctx, err) p.writeError(ctx, err)
return return
} }
// 3. Call preprocessing fn's, middlewares etc. // 3. Call preprocessing fn's, middlewares etc.
if preprocessFn := handler.GetPreprocessFn(); preprocessFn != nil { if preprocessFn := handler.GetPreprocessFn(); preprocessFn != nil {
if err = preprocessFn(ctx, req, wrapGin(ctx)); err != nil { if err = preprocessFn(ctx, req, wrapGin(ctx)); err != nil {
p.writeError(ctx, err) p.writeError(ctx, err)
return return
} }
} }
// 4. Call handler.ProcessFn // 4. Call handler.ProcessFn
if err = handler.GetProcessFn()(ctx, req, wrapGin(ctx)); err != nil { if err = handler.GetProcessFn()(ctx, req, wrapGin(ctx)); err != nil {
p.writeError(ctx, err) p.writeError(ctx, err)
return return
} }
} }
} }
func (p *GinProcessor) resolve(ctx *gin.Context, h handler.Handler, req *common.Request) error { func (p *GinProcessor) resolve(ctx *gin.Context, h handler.Handler, req *common.Request) error {
eg, c := errgroup.WithContext(ctx) eg, c := errgroup.WithContext(ctx)
for _, r := range h.GetRequiredResolveParams() { for _, r := range h.GetRequiredResolveParams() {
resolver, err := p.resolveDispatcher.GetResolver(r) resolver, err := p.resolveDispatcher.GetResolver(r)
if err != nil { if err != nil {
return fmt.Errorf("failed to resolve '%s' param: no resolver provided: %w", r, err) return fmt.Errorf("failed to resolve '%s' param: no resolver provided: %w", r, err)
} }
resolveValueName := r resolveValueName := r
eg.Go(func() error { eg.Go(func() error {
if resolveErr := resolver.Resolve(c, req, ctx); resolveErr != nil { if resolveErr := resolver.Resolve(c, req, ctx); resolveErr != nil {
return fmt.Errorf("failed to resolve '%s' value: %w", resolveValueName, resolveErr) return fmt.Errorf("failed to resolve '%s' value: %w", resolveValueName, resolveErr)
} }
return nil return nil
}) })
} }
if err := eg.Wait(); err != nil { if err := eg.Wait(); err != nil {
return err return err
} }
return nil return nil
} }
func (p *GinProcessor) writeError(ctx *gin.Context, err error) { func (p *GinProcessor) writeError(ctx *gin.Context, err error) {
logger.Error(ctx, "error process request", logger.Err(err)) logger.Error(ctx, "error process request", logger.Err(err))
// TODO do a custom error handling for resolvers / handlers / processors etc // TODO do a custom error handling for resolvers / handlers / processors etc
switch { switch {
case errors.Is(err, errs.ErrorAccessDenied): case errors.Is(err, errs.ErrorAccessDenied):
ctx.JSON(http.StatusInternalServerError, domain.ErrorJson{ ctx.JSON(http.StatusInternalServerError, domain.ErrorJson{
Code: http.StatusForbidden, Code: http.StatusForbidden,
Message: err.Error(), Message: err.Error(),
}) })
case errors.Is(err, errs.ErrorSessionExpired): case errors.Is(err, errs.ErrorSessionExpired):
ctx.JSON(http.StatusInternalServerError, domain.ErrorJson{ ctx.JSON(http.StatusInternalServerError, domain.ErrorJson{
Code: http.StatusForbidden, Code: http.StatusForbidden,
Message: err.Error(), Message: err.Error(),
}) })
default: default:
ctx.JSON(http.StatusInternalServerError, domain.ErrorJson{ ctx.JSON(http.StatusInternalServerError, domain.ErrorJson{
Code: http.StatusInternalServerError, Code: http.StatusInternalServerError,
Message: "Internal Error", Message: "Internal Error",
}) })
} }
} }

View File

@ -1,34 +1,34 @@
package processor package processor
import ( import (
"context" "context"
"net/http" "net/http"
"git.optclblast.xyz/draincloud/draincloud-core/internal/handler" "git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
) )
type ginWriter struct { type ginWriter struct {
ctx *gin.Context ctx *gin.Context
} }
func wrapGin(ctx *gin.Context) ginWriter { func wrapGin(ctx *gin.Context) ginWriter {
return ginWriter{ return ginWriter{
ctx: ctx, ctx: ctx,
} }
} }
func (w ginWriter) Write(ctx context.Context, resp any, opts ...handler.WriteOption) { func (w ginWriter) Write(ctx context.Context, resp any, opts ...handler.WriteOption) {
params := &handler.WriteOptions{ params := &handler.WriteOptions{
Code: http.StatusOK, Code: http.StatusOK,
} }
for _, o := range opts { for _, o := range opts {
o(params) o(params)
} }
w.ctx.JSON(params.Code, resp) w.ctx.JSON(params.Code, resp)
} }
func (w ginWriter) SetCookie(name string, value string, maxAge int, path string, domain string, secure bool, httpOnly bool) { func (w ginWriter) SetCookie(name string, value string, maxAge int, path string, domain string, secure bool, httpOnly bool) {
w.ctx.SetCookie(name, value, maxAge, path, domain, secure, httpOnly) w.ctx.SetCookie(name, value, maxAge, path, domain, secure, httpOnly)
} }

View File

@ -1,7 +1,7 @@
package processor package processor
import "git.optclblast.xyz/draincloud/draincloud-core/internal/handler" import "git.optclblast.xyz/draincloud/draincloud-core/internal/handler"
type Processor[H any] interface { type Processor[H any] interface {
Process(handler.Handler) H Process(handler.Handler) H
} }

View File

@ -1,38 +1,38 @@
package reqcontext package reqcontext
import ( import (
"context" "context"
"fmt" "fmt"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth"
"github.com/google/uuid" "github.com/google/uuid"
) )
type CtxKey string type CtxKey string
const ( const (
UserIDCtxKey CtxKey = "_ctx_user_id" UserIDCtxKey CtxKey = "_ctx_user_id"
SessionCtxKey CtxKey = "_ctx_session" SessionCtxKey CtxKey = "_ctx_session"
) )
func WithUserID(parent context.Context, userID uuid.UUID) context.Context { func WithUserID(parent context.Context, userID uuid.UUID) context.Context {
return context.WithValue(parent, UserIDCtxKey, userID) return context.WithValue(parent, UserIDCtxKey, userID)
} }
func GetUserID(ctx context.Context) (uuid.UUID, error) { func GetUserID(ctx context.Context) (uuid.UUID, error) {
if id, ok := ctx.Value(UserIDCtxKey).(uuid.UUID); ok { if id, ok := ctx.Value(UserIDCtxKey).(uuid.UUID); ok {
return id, nil return id, nil
} }
return uuid.Nil, fmt.Errorf("userID not passed with context") return uuid.Nil, fmt.Errorf("userID not passed with context")
} }
func WithSession(parent context.Context, session *auth.Session) context.Context { func WithSession(parent context.Context, session *auth.Session) context.Context {
return context.WithValue(parent, SessionCtxKey, session) return context.WithValue(parent, SessionCtxKey, session)
} }
func GetSession(ctx context.Context) (*auth.Session, error) { func GetSession(ctx context.Context) (*auth.Session, error) {
if ses, ok := ctx.Value(UserIDCtxKey).(*auth.Session); ok { if ses, ok := ctx.Value(UserIDCtxKey).(*auth.Session); ok {
return ses, nil return ses, nil
} }
return nil, fmt.Errorf("session not passed with context") return nil, fmt.Errorf("session not passed with context")
} }

View File

@ -1,48 +1,48 @@
package resolvedispatcher package resolvedispatcher
import ( import (
"context" "context"
"fmt" "fmt"
"sync" "sync"
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger" "git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
"git.optclblast.xyz/draincloud/draincloud-core/internal/resolvers" "git.optclblast.xyz/draincloud/draincloud-core/internal/resolvers"
) )
type ResolveDispatcher struct { type ResolveDispatcher struct {
m sync.RWMutex m sync.RWMutex
router map[string]resolvers.Resolver router map[string]resolvers.Resolver
} }
func New() *ResolveDispatcher { func New() *ResolveDispatcher {
return &ResolveDispatcher{ return &ResolveDispatcher{
router: map[string]resolvers.Resolver{}, router: map[string]resolvers.Resolver{},
} }
} }
func (d *ResolveDispatcher) RegisterResolver( func (d *ResolveDispatcher) RegisterResolver(
ctx context.Context, ctx context.Context,
resolverName string, resolverName string,
resolver resolvers.Resolver, resolver resolvers.Resolver,
) { ) {
d.m.Lock() d.m.Lock()
defer d.m.Unlock() defer d.m.Unlock()
if _, ok := d.router[resolverName]; ok { if _, ok := d.router[resolverName]; ok {
logger.Fatal(ctx, fmt.Sprintf("resolver '%s' is already registered in router", resolverName)) logger.Fatal(ctx, fmt.Sprintf("resolver '%s' is already registered in router", resolverName))
} }
d.router[resolverName] = resolver d.router[resolverName] = resolver
} }
func (d *ResolveDispatcher) GetResolver(name string) (resolvers.Resolver, error) { func (d *ResolveDispatcher) GetResolver(name string) (resolvers.Resolver, error) {
d.m.RLock() d.m.RLock()
defer d.m.RUnlock() defer d.m.RUnlock()
res, ok := d.router[name] res, ok := d.router[name]
if !ok { if !ok {
return nil, fmt.Errorf("resolver '%s' not found", name) return nil, fmt.Errorf("resolver '%s' not found", name)
} }
return res, nil return res, nil
} }

View File

@ -1,109 +1,109 @@
package auth package auth
import ( import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"log/slog" "log/slog"
"net/http" "net/http"
"time" "time"
"git.optclblast.xyz/draincloud/draincloud-core/internal/common" "git.optclblast.xyz/draincloud/draincloud-core/internal/common"
"git.optclblast.xyz/draincloud/draincloud-core/internal/errs" "git.optclblast.xyz/draincloud/draincloud-core/internal/errs"
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger" "git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
models "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth" models "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth"
) )
const ( const (
AuthResolverV1Name = "auth.v1" AuthResolverV1Name = "auth.v1"
) )
const ( const (
csrfTokenCookie = "__Csrf_token" csrfTokenCookie = "__Csrf_token"
sessionTokenCookie = "__Session_token" sessionTokenCookie = "__Session_token"
) )
type AuthResolver struct { type AuthResolver struct {
authStorage storage.AuthStorage authStorage storage.AuthStorage
} }
func NewAuthResolver(authStorage storage.AuthStorage) *AuthResolver { func NewAuthResolver(authStorage storage.AuthStorage) *AuthResolver {
return &AuthResolver{ return &AuthResolver{
authStorage: authStorage, authStorage: authStorage,
} }
} }
func (r *AuthResolver) Resolve(ctx context.Context, req *common.Request, _ any) error { func (r *AuthResolver) Resolve(ctx context.Context, req *common.Request, _ any) error {
return r.authorize(ctx, req) return r.authorize(ctx, req)
} }
func (r *AuthResolver) GetRequiredResolveParams() []string { func (r *AuthResolver) GetRequiredResolveParams() []string {
return nil return nil
} }
func (p *AuthResolver) authorize(ctx context.Context, req *common.Request) error { func (p *AuthResolver) authorize(ctx context.Context, req *common.Request) error {
session, err := p.getSession(ctx, req) session, err := p.getSession(ctx, req)
if err != nil && !errors.Is(err, http.ErrNoCookie) { if err != nil && !errors.Is(err, http.ErrNoCookie) {
return errs.ErrorUnauthorized return errs.ErrorUnauthorized
} }
if session == nil { if session == nil {
return errs.ErrorUnauthorized return errs.ErrorUnauthorized
} }
if err := validateSession(ctx, req, session); err != nil { if err := validateSession(ctx, req, session); err != nil {
// TODO add audit log entry // TODO add audit log entry
return errs.ErrorUnauthorized return errs.ErrorUnauthorized
} }
user, err := p.authStorage.GetUserByID(ctx, session.UserID) user, err := p.authStorage.GetUserByID(ctx, session.UserID)
if err != nil { if err != nil {
return fmt.Errorf("failed to fetch user by id: %w", err) return fmt.Errorf("failed to fetch user by id: %w", err)
} }
logger.Debug(ctx, "[authorize] user authorized", slog.String("session_id", session.ID.String())) logger.Debug(ctx, "[authorize] user authorized", slog.String("session_id", session.ID.String()))
req.User = user req.User = user
req.Session = session req.Session = session
return nil return nil
} }
func (d *AuthResolver) getSession(ctx context.Context, req *common.Request) (*models.Session, error) { func (d *AuthResolver) getSession(ctx context.Context, req *common.Request) (*models.Session, error) {
token, err := common.GetValue[string](req.Metadata, sessionTokenCookie) token, err := common.GetValue[string](req.Metadata, sessionTokenCookie)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to fetch session cookie from request: %w", err) return nil, fmt.Errorf("failed to fetch session cookie from request: %w", err)
} }
if len(token) == 0 { if len(token) == 0 {
return nil, fmt.Errorf("session token or csrf token is empty") return nil, fmt.Errorf("session token or csrf token is empty")
} }
session, err := d.authStorage.GetSession(ctx, token) session, err := d.authStorage.GetSession(ctx, token)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to fetch session from repo: %w", err) return nil, fmt.Errorf("failed to fetch session from repo: %w", err)
} }
return session, nil return session, nil
} }
func validateSession(_ context.Context, req *common.Request, session *models.Session) error { func validateSession(_ context.Context, req *common.Request, session *models.Session) error {
if session == nil { if session == nil {
return errs.ErrorAccessDenied return errs.ErrorAccessDenied
} }
csrfToken, err := common.GetValue[string](req.Metadata, csrfTokenCookie) csrfToken, err := common.GetValue[string](req.Metadata, csrfTokenCookie)
if err != nil { if err != nil {
return fmt.Errorf("failed to fetch csrf cookie from request: %w", err) return fmt.Errorf("failed to fetch csrf cookie from request: %w", err)
} }
if session.CsrfToken != csrfToken { if session.CsrfToken != csrfToken {
return errs.ErrorAccessDenied return errs.ErrorAccessDenied
} }
if session.ExpiredAt.Before(time.Now()) { if session.ExpiredAt.Before(time.Now()) {
return errs.ErrorSessionExpired return errs.ErrorSessionExpired
} }
return nil return nil
} }

View File

@ -1,33 +1,33 @@
package pluginname package pluginname
import ( import (
"context" "context"
"git.optclblast.xyz/draincloud/draincloud-core/internal/common" "git.optclblast.xyz/draincloud/draincloud-core/internal/common"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
) )
const ( const (
PluginNameResolverName = "plugin_name" PluginNameResolverName = "plugin_name"
) )
type PluginNameResolver struct{} type PluginNameResolver struct{}
func (p *PluginNameResolver) Resolve(ctx context.Context, req *common.Request, rawReq any) error { func (p *PluginNameResolver) Resolve(ctx context.Context, req *common.Request, rawReq any) error {
ginCtx, ok := rawReq.(*gin.Context) ginCtx, ok := rawReq.(*gin.Context)
if !ok { if !ok {
return status.Errorf(codes.Internal, "invalid request type") return status.Errorf(codes.Internal, "invalid request type")
} }
pluginName := ginCtx.Param("plugin_name") pluginName := ginCtx.Param("plugin_name")
if pluginName == "" { if pluginName == "" {
return status.Error(codes.InvalidArgument, "plugin name is empty") return status.Error(codes.InvalidArgument, "plugin name is empty")
} }
req.ResolveValues.Store(PluginNameResolverName, pluginName) req.ResolveValues.Store(PluginNameResolverName, pluginName)
return nil return nil
} }
func (p *PluginNameResolver) GetRequiredResolveParams() []string { func (p *PluginNameResolver) GetRequiredResolveParams() []string {
return nil return nil
} }

View File

@ -1,12 +1,12 @@
package resolvers package resolvers
import ( import (
"context" "context"
"git.optclblast.xyz/draincloud/draincloud-core/internal/common" "git.optclblast.xyz/draincloud/draincloud-core/internal/common"
) )
type Resolver interface { type Resolver interface {
Resolve(ctx context.Context, req *common.Request, reqReq any) error Resolve(ctx context.Context, req *common.Request, reqReq any) error
GetRequiredResolveParams() []string GetRequiredResolveParams() []string
} }

View File

@ -1,6 +1,6 @@
package seal package seal
// TODO // TODO
type SealResolver struct { type SealResolver struct {
wardenClient any wardenClient any
} }

View File

@ -1,19 +1,19 @@
package audit package audit
import ( import (
"context" "context"
"github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5"
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger" "git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/audit" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/audit"
) )
type Repository struct { type Repository struct {
db *pgx.Conn db *pgx.Conn
} }
func (r *Repository) AddEntry(ctx context.Context, entry audit.AuditLogEntry) error { func (r *Repository) AddEntry(ctx context.Context, entry audit.AuditLogEntry) error {
logger.Warn(ctx, "[Repository][AddEntry] not implemented yet!") logger.Warn(ctx, "[Repository][AddEntry] not implemented yet!")
return nil return nil
} }

View File

@ -1,86 +1,86 @@
package fs package fs
import ( import (
"context" "context"
"fmt" "fmt"
"os" "os"
"sync" "sync"
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger" "git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
) )
type Storage struct { type Storage struct {
lm *sync.Map lm *sync.Map
dir string dir string
// If file is not belongs to current node FS - redirect to corresponding node // If file is not belongs to current node FS - redirect to corresponding node
// cluster DrainCloudCluster // cluster DrainCloudCluster
} }
func NewFSStorage(dir string) *Storage { func NewFSStorage(dir string) *Storage {
return &Storage{ return &Storage{
lm: &sync.Map{}, lm: &sync.Map{},
dir: dir, dir: dir,
} }
} }
func (s *Storage) GetFile(ctx context.Context, id int64) (*os.File, error) { func (s *Storage) GetFile(ctx context.Context, id int64) (*os.File, error) {
tx := lockFile(s.lm, id) tx := lockFile(s.lm, id)
defer unlockFile(s.lm, id, tx) defer unlockFile(s.lm, id, tx)
file, err := os.Open(getFilePath(s.dir, id)) file, err := os.Open(getFilePath(s.dir, id))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open file: %w", err) return nil, fmt.Errorf("failed to open file: %w", err)
} }
defer func() { defer func() {
if err = file.Close(); err != nil { if err = file.Close(); err != nil {
logger.Error(ctx, "[getFile] close error", logger.Err(err)) logger.Error(ctx, "[getFile] close error", logger.Err(err))
} }
}() }()
return file, nil return file, nil
} }
func (s *Storage) SaveBlob(ctx context.Context, id int64, data []byte) error { func (s *Storage) SaveBlob(ctx context.Context, id int64, data []byte) error {
tx := lockFile(s.lm, id) tx := lockFile(s.lm, id)
defer unlockFile(s.lm, id, tx) defer unlockFile(s.lm, id, tx)
file, err := os.Open(getFilePath(s.dir, id)) file, err := os.Open(getFilePath(s.dir, id))
if err != nil { if err != nil {
return fmt.Errorf("failed to open file: %w", err) return fmt.Errorf("failed to open file: %w", err)
} }
defer func() { defer func() {
if err = file.Close(); err != nil { if err = file.Close(); err != nil {
logger.Error(ctx, "[saveFile] close error", logger.Err(err)) logger.Error(ctx, "[saveFile] close error", logger.Err(err))
} }
}() }()
if _, err = file.Write(data); err != nil { if _, err = file.Write(data); err != nil {
return fmt.Errorf("failed to write data to file: %w", err) return fmt.Errorf("failed to write data to file: %w", err)
} }
return nil return nil
} }
func (s *Storage) DeleteFile(ctx context.Context, id int64) error { func (s *Storage) DeleteFile(ctx context.Context, id int64) error {
tx := lockFile(s.lm, id) tx := lockFile(s.lm, id)
defer unlockFile(s.lm, id, tx) defer unlockFile(s.lm, id, tx)
return nil return nil
} }
func getFilePath(dir string, id int64) string { func getFilePath(dir string, id int64) string {
return fmt.Sprintf("%s/%v", dir, id) return fmt.Sprintf("%s/%v", dir, id)
} }
func lockFile(lm *sync.Map, id int64) sync.Locker { func lockFile(lm *sync.Map, id int64) sync.Locker {
_m := &sync.Mutex{} _m := &sync.Mutex{}
many, _ := lm.LoadOrStore(id, _m) many, _ := lm.LoadOrStore(id, _m)
_m, _ = many.(*sync.Mutex) _m, _ = many.(*sync.Mutex)
_m.Lock() _m.Lock()
return _m return _m
} }
func unlockFile(lm *sync.Map, id int64, tx sync.Locker) { func unlockFile(lm *sync.Map, id int64, tx sync.Locker) {
tx.Unlock() tx.Unlock()
lm.Delete(id) lm.Delete(id)
} }

View File

@ -1,39 +1,40 @@
package storage package storage
import ( import (
"context" "context"
"os" "os"
auditmodels "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/audit" auditmodels "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/audit"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/files" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/files"
"github.com/google/uuid" "github.com/google/uuid"
) )
type Database interface { type Database interface {
AuthStorage AuthStorage
} }
type AuthStorage interface { type AuthStorage interface {
AddUser(ctx context.Context, id uuid.UUID, login string, username string, passwordHash []byte) error AddUser(ctx context.Context, id uuid.UUID, login string, username string, passwordHash []byte) error
GetUserByLogin(ctx context.Context, login string) (*auth.User, error) GetUserByLogin(ctx context.Context, login string) (*auth.User, error)
GetUserByID(ctx context.Context, id uuid.UUID) (*auth.User, error) GetUserByID(ctx context.Context, id uuid.UUID) (*auth.User, error)
AddSession(ctx context.Context, ses *auth.Session) (uuid.UUID, error) AddSession(ctx context.Context, ses *auth.Session) (uuid.UUID, error)
GetSession(ctx context.Context, sessionToken string) (*auth.Session, error) GetSession(ctx context.Context, sessionToken string) (*auth.Session, error)
RemoveSession(ctx context.Context, id uuid.UUID) error RemoveSession(ctx context.Context, id uuid.UUID) error
} }
type AuthAuditLogStorage interface { type AuthAuditLogStorage interface {
AddEntry(ctx context.Context, entry auditmodels.AuditLogEntry) error AddEntry(ctx context.Context, entry auditmodels.AuditLogEntry) error
} }
type MetaStorage interface { type MetaStorage interface {
SaveMetadata(ctx context.Context, meta files.FileMetadata) (uuid.UUID, error) SaveMetadata(ctx context.Context, meta files.FileMetadata) (uuid.UUID, error)
} }
type BlobStorage interface { type BlobStorage interface {
GetFile(ctx context.Context, id uuid.UUID) (*os.File, error) GetFile(ctx context.Context, id uuid.UUID) (*os.File, error)
SaveBlob(ctx context.Context, id uuid.UUID, data []byte) error SaveBlob(ctx context.Context, id uuid.UUID, data []byte) error
DeleteFile(ctx context.Context, id uuid.UUID) error DeleteFile(ctx context.Context, id uuid.UUID) error
} GetFSLink(ctx context.Context, fileID uuid.UUID) (string, error)
}

View File

@ -1,49 +1,49 @@
package audit package audit
import "time" import "time"
type EventType int type EventType int
const ( const (
EventUnspecified EventType = iota EventUnspecified EventType = iota
EventSuccessfullLogin EventSuccessfullLogin
EventFailedLogin EventFailedLogin
EventSuccessfullRegister EventSuccessfullRegister
EventFailedRegister EventFailedRegister
EventSuccessfullAuth EventSuccessfullAuth
EventFailedAuth EventFailedAuth
EventUserUpdated EventUserUpdated
) )
type Severity int type Severity int
const ( const (
SeverityAlert = 0 SeverityAlert = 0
SeverityWarning = 10 SeverityWarning = 10
SeverityInfo = 100 SeverityInfo = 100
SeverityNotice = 200 SeverityNotice = 200
) )
type Actor struct { type Actor struct {
ActorSysName string ActorSysName string
RemoteIP string RemoteIP string
ID int64 ID int64
} }
const ( const (
ActorDrainCloudCore = "_actor_draincloud_core" ActorDrainCloudCore = "_actor_draincloud_core"
ActorUser = "user" ActorUser = "user"
) )
type AuditLogEntry struct { type AuditLogEntry struct {
EventType EventType EventType EventType
// Who caused changes // Who caused changes
Actor Actor Actor Actor
Severity Severity Severity Severity
SessionID int64 SessionID int64
CreatedAt time.Time CreatedAt time.Time
// What changed // What changed
Object string Object string
// How it was changed // How it was changed
Action string Action string
} }

View File

@ -1,25 +1,25 @@
package auth package auth
import ( import (
"time" "time"
"github.com/google/uuid" "github.com/google/uuid"
) )
type Session struct { type Session struct {
ID uuid.UUID ID uuid.UUID
SessionToken string SessionToken string
CsrfToken string CsrfToken string
UserID uuid.UUID UserID uuid.UUID
CreatedAt time.Time CreatedAt time.Time
ExpiredAt time.Time ExpiredAt time.Time
} }
type User struct { type User struct {
ID uuid.UUID ID uuid.UUID
Username string Username string
Login string Login string
PasswordHash []byte PasswordHash []byte
CreatedAt time.Time CreatedAt time.Time
UpdatedAt time.Time UpdatedAt time.Time
} }

View File

@ -1,13 +1,13 @@
package files package files
import "github.com/google/uuid" import "github.com/google/uuid"
type FileMetadata struct { type FileMetadata struct {
Id uuid.UUID Id uuid.UUID
Name string Name string
UserID int64 UserID int64
Ext string Ext string
Type string Type string
FSLink string FSLink string
Size int64 Size int64
} }

View File

@ -1,147 +1,147 @@
package postgres package postgres
import ( import (
"context" "context"
"database/sql" "database/sql"
"fmt" "fmt"
"log/slog" "log/slog"
"time" "time"
"git.optclblast.xyz/draincloud/draincloud-core/internal/closer" "git.optclblast.xyz/draincloud/draincloud-core/internal/closer"
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger" "git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth" "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/auth"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5"
"github.com/jackc/pgx/v5/pgconn" "github.com/jackc/pgx/v5/pgconn"
) )
type Database struct { type Database struct {
db *pgx.Conn db *pgx.Conn
cluster *ShardCluster cluster *ShardCluster
} }
func New(ctx context.Context, dsn string) *Database { func New(ctx context.Context, dsn string) *Database {
db, err := pgx.Connect(ctx, dsn) db, err := pgx.Connect(ctx, dsn)
if err != nil { if err != nil {
logger.Fatal(ctx, "failed to connect to postgres", logger.Err(err)) logger.Fatal(ctx, "failed to connect to postgres", logger.Err(err))
} }
closer.Add(func() error { closer.Add(func() error {
ctx, cancel := context.WithTimeout(ctx, 2*time.Second) ctx, cancel := context.WithTimeout(ctx, 2*time.Second)
defer cancel() defer cancel()
return db.Close(ctx) return db.Close(ctx)
}) })
return &Database{db: db} return &Database{db: db}
} }
type dbtx interface { type dbtx interface {
Exec(ctx context.Context, stmt string, args ...any) (pgconn.CommandTag, error) Exec(ctx context.Context, stmt string, args ...any) (pgconn.CommandTag, error)
QueryRow(ctx context.Context, sql string, args ...any) pgx.Row QueryRow(ctx context.Context, sql string, args ...any) pgx.Row
Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error) Query(ctx context.Context, sql string, args ...any) (pgx.Rows, error)
} }
func (d *Database) AddUser(ctx context.Context, id uuid.UUID, login string, username string, passwordHash []byte) error { func (d *Database) AddUser(ctx context.Context, id uuid.UUID, login string, username string, passwordHash []byte) error {
return addUser(ctx, d.db, id, login, username, passwordHash) return addUser(ctx, d.db, id, login, username, passwordHash)
} }
func (d *Database) GetUserByID(ctx context.Context, id uuid.UUID) (*auth.User, error) { func (d *Database) GetUserByID(ctx context.Context, id uuid.UUID) (*auth.User, error) {
return getUserByID(ctx, d.db, id) return getUserByID(ctx, d.db, id)
} }
func (d *Database) GetUserByLogin(ctx context.Context, login string) (*auth.User, error) { func (d *Database) GetUserByLogin(ctx context.Context, login string) (*auth.User, error) {
return getUserByLogin(ctx, d.db, login) return getUserByLogin(ctx, d.db, login)
} }
func (d *Database) AddSession(ctx context.Context, ses *auth.Session) (uuid.UUID, error) { func (d *Database) AddSession(ctx context.Context, ses *auth.Session) (uuid.UUID, error) {
return addSession(ctx, d.db, ses) return addSession(ctx, d.db, ses)
} }
func (d *Database) GetSession(ctx context.Context, sessionToken string) (*auth.Session, error) { func (d *Database) GetSession(ctx context.Context, sessionToken string) (*auth.Session, error) {
const stmt = `SELECT const stmt = `SELECT
s.id, s.session_token, s.csrf_token, s.user_id, s.created_at, s.expired_at s.id, s.session_token, s.csrf_token, s.user_id, s.created_at, s.expired_at
FROM sessions as s FROM sessions as s
WHERE s.session_token = $1;` WHERE s.session_token = $1;`
row := d.db.QueryRow(ctx, stmt, sessionToken) row := d.db.QueryRow(ctx, stmt, sessionToken)
var ( var (
id uuid.UUID id uuid.UUID
sesToken, csrfToken string sesToken, csrfToken string
userID uuid.UUID userID uuid.UUID
createdAt sql.NullTime createdAt sql.NullTime
expiredAt sql.NullTime expiredAt sql.NullTime
) )
if err := row.Scan(&id, &sesToken, &csrfToken, &userID, &createdAt, &expiredAt); err != nil { if err := row.Scan(&id, &sesToken, &csrfToken, &userID, &createdAt, &expiredAt); err != nil {
return nil, err return nil, err
} }
return &auth.Session{ return &auth.Session{
ID: id, ID: id,
SessionToken: sesToken, SessionToken: sesToken,
CsrfToken: csrfToken, CsrfToken: csrfToken,
UserID: userID, UserID: userID,
CreatedAt: createdAt.Time, CreatedAt: createdAt.Time,
ExpiredAt: expiredAt.Time, ExpiredAt: expiredAt.Time,
}, nil }, nil
} }
func (d *Database) RemoveSession(ctx context.Context, id uuid.UUID) error { func (d *Database) RemoveSession(ctx context.Context, id uuid.UUID) error {
const stmt = `DELETE FROM sessions WHERE id = $1;` const stmt = `DELETE FROM sessions WHERE id = $1;`
_, err := d.db.Exec(ctx, stmt, id) _, err := d.db.Exec(ctx, stmt, id)
return err return err
} }
func (d *Database) RemoveExpiredSessions(ctx context.Context) error { func (d *Database) RemoveExpiredSessions(ctx context.Context) error {
const stmt = `DELETE FROM sessions WHERE expired_at < $1;` const stmt = `DELETE FROM sessions WHERE expired_at < $1;`
res, err := d.db.Exec(ctx, stmt, time.Now()) res, err := d.db.Exec(ctx, stmt, time.Now())
logger.Notice(ctx, "[Database][RemoveExpiredSessions] sessions cleanup", slog.Int64("removed", res.RowsAffected())) logger.Notice(ctx, "[Database][RemoveExpiredSessions] sessions cleanup", slog.Int64("removed", res.RowsAffected()))
return err return err
} }
func addUser(ctx context.Context, conn dbtx, id uuid.UUID, login string, username string, passwordHash []byte) error { func addUser(ctx context.Context, conn dbtx, id uuid.UUID, login string, username string, passwordHash []byte) error {
const stmt = `INSERT INTO users (id,login,username,password) const stmt = `INSERT INTO users (id,login,username,password)
VALUES ($1,$2,$3,$4);` VALUES ($1,$2,$3,$4);`
_, err := conn.Exec(ctx, stmt, id, login, username, passwordHash) _, err := conn.Exec(ctx, stmt, id, login, username, passwordHash)
if err != nil { if err != nil {
return fmt.Errorf("failed to insert user data into users table: %w", err) return fmt.Errorf("failed to insert user data into users table: %w", err)
} }
return nil return nil
} }
func getUserByID(ctx context.Context, conn dbtx, id uuid.UUID) (*auth.User, error) { func getUserByID(ctx context.Context, conn dbtx, id uuid.UUID) (*auth.User, error) {
const stmt = `SELECT * FROM users WHERE id = $1 LIMIT 1` const stmt = `SELECT * FROM users WHERE id = $1 LIMIT 1`
u := new(auth.User) u := new(auth.User)
row := conn.QueryRow(ctx, stmt, id) row := conn.QueryRow(ctx, stmt, id)
if err := row.Scan(&u.ID, &u.Login, &u.Username, &u.PasswordHash, &u.CreatedAt, &u.UpdatedAt); err != nil { if err := row.Scan(&u.ID, &u.Login, &u.Username, &u.PasswordHash, &u.CreatedAt, &u.UpdatedAt); err != nil {
return nil, fmt.Errorf("failed to fetch user by id: %w", err) return nil, fmt.Errorf("failed to fetch user by id: %w", err)
} }
return u, nil return u, nil
} }
func getUserByLogin(ctx context.Context, conn dbtx, login string) (*auth.User, error) { func getUserByLogin(ctx context.Context, conn dbtx, login string) (*auth.User, error) {
const stmt = `SELECT * FROM users WHERE login = $1 LIMIT 1` const stmt = `SELECT * FROM users WHERE login = $1 LIMIT 1`
u := new(auth.User) u := new(auth.User)
row := conn.QueryRow(ctx, stmt, login) row := conn.QueryRow(ctx, stmt, login)
if err := row.Scan(&u.ID, &u.Login, &u.Username, &u.PasswordHash, &u.CreatedAt, &u.UpdatedAt); err != nil { if err := row.Scan(&u.ID, &u.Login, &u.Username, &u.PasswordHash, &u.CreatedAt, &u.UpdatedAt); err != nil {
return nil, fmt.Errorf("failed to fetch user by login: %w", err) return nil, fmt.Errorf("failed to fetch user by login: %w", err)
} }
return u, nil return u, nil
} }
func addSession(ctx context.Context, conn dbtx, session *auth.Session) (uuid.UUID, error) { func addSession(ctx context.Context, conn dbtx, session *auth.Session) (uuid.UUID, error) {
const stmt = `INSERT INTO sessions (id,session_token, csrf_token, user_id, const stmt = `INSERT INTO sessions (id,session_token, csrf_token, user_id,
created_at, expired_at) VALUES ($1, $2, $3, $4, $5, $6) RETURNING id;` created_at, expired_at) VALUES ($1, $2, $3, $4, $5, $6) RETURNING id;`
var id uuid.UUID var id uuid.UUID
row := conn.QueryRow(ctx, stmt, session.ID, session.SessionToken, session.CsrfToken, session.UserID, session.CreatedAt, session.ExpiredAt) row := conn.QueryRow(ctx, stmt, session.ID, session.SessionToken, session.CsrfToken, session.UserID, session.CreatedAt, session.ExpiredAt)
if err := row.Scan(&id); err != nil { if err := row.Scan(&id); err != nil {
return uuid.Nil, fmt.Errorf("failed to insert new session: %w", err) return uuid.Nil, fmt.Errorf("failed to insert new session: %w", err)
} }
return id, nil return id, nil
} }

View File

@ -1,41 +1,41 @@
package postgres package postgres
import ( import (
"context" "context"
"hash/crc32" "hash/crc32"
"log/slog" "log/slog"
"sync" "sync"
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger" "git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5"
) )
type ShardMap = map[uint32]*pgx.ConnConfig type ShardMap = map[uint32]*pgx.ConnConfig
type ShardCluster struct { type ShardCluster struct {
m sync.Mutex m sync.Mutex
shards []*pgx.Conn shards []*pgx.Conn
} }
func NewShardCluster(ctx context.Context, shardMap ShardMap) *ShardCluster { func NewShardCluster(ctx context.Context, shardMap ShardMap) *ShardCluster {
shards := make([]*pgx.Conn, len(shardMap)) shards := make([]*pgx.Conn, len(shardMap))
for n, cfg := range shardMap { for n, cfg := range shardMap {
conn, err := pgx.ConnectConfig(ctx, cfg) conn, err := pgx.ConnectConfig(ctx, cfg)
if err != nil { if err != nil {
logger.Fatal(ctx, "failed to connect to shard", slog.Uint64("num", uint64(n)), logger.Err(err)) logger.Fatal(ctx, "failed to connect to shard", slog.Uint64("num", uint64(n)), logger.Err(err))
} }
shards[n] = conn shards[n] = conn
} }
return &ShardCluster{shards: shards} return &ShardCluster{shards: shards}
} }
func (c *ShardCluster) PickShard(n uint32) *pgx.Conn { func (c *ShardCluster) PickShard(n uint32) *pgx.Conn {
c.m.Lock() c.m.Lock()
defer c.m.Unlock() defer c.m.Unlock()
return c.shards[n] return c.shards[n]
} }
func UUIDShardFn(id uuid.UUID, numShards uint32) uint32 { func UUIDShardFn(id uuid.UUID, numShards uint32) uint32 {
return crc32.ChecksumIEEE(id[:]) % numShards return crc32.ChecksumIEEE(id[:]) % numShards
} }

View File

@ -1,66 +1,66 @@
package storage package storage
import ( import (
"context" "context"
"database/sql" "database/sql"
"errors" "errors"
"fmt" "fmt"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
) )
type txKey struct{} type txKey struct{}
var ctxKey txKey = txKey{} var ctxKey txKey = txKey{}
type DBTX interface { type DBTX interface {
sqlx.Ext sqlx.Ext
sqlx.ExtContext sqlx.ExtContext
} }
func Transaction(ctx context.Context, db *sqlx.DB, fn func(context.Context) error) (err error) { func Transaction(ctx context.Context, db *sqlx.DB, fn func(context.Context) error) (err error) {
tx := txFromContext(ctx) tx := txFromContext(ctx)
if tx == nil { if tx == nil {
tx, err = db.BeginTxx(ctx, &sql.TxOptions{ tx, err = db.BeginTxx(ctx, &sql.TxOptions{
Isolation: sql.LevelRepeatableRead, Isolation: sql.LevelRepeatableRead,
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to begin tx: %w", err) return fmt.Errorf("failed to begin tx: %w", err)
} }
defer func() { defer func() {
if err == nil { if err == nil {
err = tx.Commit() err = tx.Commit()
} }
if err != nil { if err != nil {
if rbErr := tx.Rollback(); rbErr != nil { if rbErr := tx.Rollback(); rbErr != nil {
err = errors.Join(err, rbErr) err = errors.Join(err, rbErr)
} }
} }
}() }()
ctx = txContext(ctx, tx) ctx = txContext(ctx, tx)
} }
return fn(ctx) return fn(ctx)
} }
func Conn(ctx context.Context, db DBTX) DBTX { func Conn(ctx context.Context, db DBTX) DBTX {
if tx := txFromContext(ctx); tx != nil { if tx := txFromContext(ctx); tx != nil {
return tx return tx
} }
return db return db
} }
func txFromContext(ctx context.Context) *sqlx.Tx { func txFromContext(ctx context.Context) *sqlx.Tx {
if tx, ok := ctx.Value(ctxKey).(*sqlx.Tx); ok { if tx, ok := ctx.Value(ctxKey).(*sqlx.Tx); ok {
return tx return tx
} }
return nil return nil
} }
func txContext(parent context.Context, tx *sqlx.Tx) context.Context { func txContext(parent context.Context, tx *sqlx.Tx) context.Context {
return context.WithValue(parent, tx, ctxKey) return context.WithValue(parent, tx, ctxKey)
} }

View File

@ -1,88 +1,88 @@
// TODO wtf? // TODO wtf?
package pool package pool
import ( import (
"net" "net"
"sync" "sync"
"sync/atomic" "sync/atomic"
) )
var ( var (
defaultMaxConns = 20 defaultMaxConns = 20
defaultStrategy = &RoundrobinStrategy{ defaultStrategy = &RoundrobinStrategy{
lastSelected: initialRoundrobinAtomic(), lastSelected: initialRoundrobinAtomic(),
} }
) )
func initialRoundrobinAtomic() atomic.Int64 { func initialRoundrobinAtomic() atomic.Int64 {
a := atomic.Int64{} a := atomic.Int64{}
a.Store(-1) a.Store(-1)
return a return a
} }
type ConnSelectionStrategy interface { type ConnSelectionStrategy interface {
Select() int Select() int
} }
type RoundrobinStrategy struct { type RoundrobinStrategy struct {
lastSelected atomic.Int64 lastSelected atomic.Int64
} }
func (r *RoundrobinStrategy) Select() int { func (r *RoundrobinStrategy) Select() int {
return int(r.lastSelected.Add(1)) return int(r.lastSelected.Add(1))
} }
type ConnPool struct { type ConnPool struct {
m sync.RWMutex m sync.RWMutex
strategy ConnSelectionStrategy strategy ConnSelectionStrategy
conns []net.Conn conns []net.Conn
} }
type newConnPoolOpts struct { type newConnPoolOpts struct {
strategy ConnSelectionStrategy strategy ConnSelectionStrategy
maxConns int maxConns int
} }
func newNewConnPoolOpts() newConnPoolOpts { func newNewConnPoolOpts() newConnPoolOpts {
return newConnPoolOpts{ return newConnPoolOpts{
strategy: defaultStrategy, strategy: defaultStrategy,
maxConns: defaultMaxConns, maxConns: defaultMaxConns,
} }
} }
type NewConnPoolOpt func(p *newConnPoolOpts) type NewConnPoolOpt func(p *newConnPoolOpts)
func WithStrategy(s ConnSelectionStrategy) NewConnPoolOpt { func WithStrategy(s ConnSelectionStrategy) NewConnPoolOpt {
return func(p *newConnPoolOpts) { return func(p *newConnPoolOpts) {
p.strategy = s p.strategy = s
} }
} }
func WithMaxConns(mc int) NewConnPoolOpt { func WithMaxConns(mc int) NewConnPoolOpt {
return func(p *newConnPoolOpts) { return func(p *newConnPoolOpts) {
p.maxConns = mc p.maxConns = mc
} }
} }
func NewConnPool(opts ...NewConnPoolOpt) *ConnPool { func NewConnPool(opts ...NewConnPoolOpt) *ConnPool {
o := newNewConnPoolOpts() o := newNewConnPoolOpts()
for _, opt := range opts { for _, opt := range opts {
opt(&o) opt(&o)
} }
return &ConnPool{ return &ConnPool{
conns: make([]net.Conn, 0), conns: make([]net.Conn, 0),
strategy: o.strategy, strategy: o.strategy,
} }
} }
func (p *ConnPool) SelectConn() net.Conn { func (p *ConnPool) SelectConn() net.Conn {
p.m.RLock() p.m.RLock()
defer p.m.RUnlock() defer p.m.RUnlock()
return p.conns[p.strategy.Select()] return p.conns[p.strategy.Select()]
} }
func (p *ConnPool) AddConn(conn net.Conn) { func (p *ConnPool) AddConn(conn net.Conn) {
p.m.Lock() p.m.Lock()
defer p.m.Unlock() defer p.m.Unlock()
p.conns = append(p.conns, conn) p.conns = append(p.conns, conn)
} }

View File

@ -1,71 +1,71 @@
-- +goose Up -- +goose Up
-- +goose StatementBegin -- +goose StatementBegin
SELECT 'up SQL query'; SELECT 'up SQL query';
-- Users as auth data -- Users as auth data
create table if not exists users ( create table if not exists users (
id uuid primary key, id uuid primary key,
username text default null, username text default null,
login text not null unique, login text not null unique,
password bytea not null, password bytea not null,
created_at timestamptz default current_timestamp, created_at timestamptz default current_timestamp,
updated_at timestamptz default current_timestamp updated_at timestamptz default current_timestamp
); );
create index idx_users_login on users (login); create index idx_users_login on users (login);
create index idx_users_username on users (username); create index idx_users_username on users (username);
-- Sessions and auth data -- Sessions and auth data
create table sessions ( create table sessions (
id uuid primary key, id uuid primary key,
session_token varchar(200) not null unique, session_token varchar(200) not null unique,
csrf_token varchar(200) not null unique, csrf_token varchar(200) not null unique,
user_id uuid references users(id), user_id uuid references users(id),
created_at timestamp default current_timestamp, created_at timestamp default current_timestamp,
expired_at timestamp not null expired_at timestamp not null
); );
create index if not exists idx_sessions_session_token_csrf_token on sessions (session_token, csrf_token); create index if not exists idx_sessions_session_token_csrf_token on sessions (session_token, csrf_token);
-- Files -- Files
create table files_metadata ( create table files_metadata (
id uuid primary key, id uuid primary key,
name text not null, name text not null,
fslink text not null, fslink text not null,
size bigint not null, size bigint not null,
ext text not null, ext text not null,
owner_id uuid not null, owner_id uuid not null,
parent_dir uuid not null, parent_dir uuid not null,
created_at timestamptz default current_timestamp, created_at timestamptz default current_timestamp,
updated_at timestamptz default null, updated_at timestamptz default null,
deleted_at timestamptz default null deleted_at timestamptz default null
); );
create index idx_fm_owner_id on files_metadata(owner_id); create index idx_fm_owner_id on files_metadata(owner_id);
create index idx_fm_owner_id_parent_dir on files_metadata(owner_id, parent_dir); create index idx_fm_owner_id_parent_dir on files_metadata(owner_id, parent_dir);
create table directories ( create table directories (
id uuid primary key, id uuid primary key,
name text not null, name text not null,
owner_id uuid not null, owner_id uuid not null,
parent_dir uuid not null, parent_dir uuid not null,
created_at timestamptz default current_timestamp, created_at timestamptz default current_timestamp,
updated_at timestamptz default null, updated_at timestamptz default null,
deleted_at timestamptz default null deleted_at timestamptz default null
); );
create index idx_directories_owner_id_parent_dir on directories(owner_id, parent_dir); create index idx_directories_owner_id_parent_dir on directories(owner_id, parent_dir);
create table directory_users_access ( create table directory_users_access (
id uuid primary key, id uuid primary key,
dir_id uuid not null, dir_id uuid not null,
user_id uuid not null, user_id uuid not null,
assess_flag integer, assess_flag integer,
created_at timestamptz default current_timestamp, created_at timestamptz default current_timestamp,
updated_at timestamptz default null updated_at timestamptz default null
); );
create index idx_dua_owner_id_parent_dir on directories(owner_id, parent_dir); create index idx_dua_owner_id_parent_dir on directories(owner_id, parent_dir);