tmp
This commit is contained in:
parent
cc36524ac0
commit
f74a4779aa
@ -1,3 +1,7 @@
|
||||
# DrainCloud Core
|
||||
DrainCloud Core is an all-in-one lightweight DrainCloud distribution designed to work in resource-constrained environments.
|
||||
It requires **# TODO put requirements here **
|
||||
The node can work in three modes: #TBD
|
||||
1. All-in-one mode, the recommended one.
|
||||
2. Auth-node. Only auth api will be operational.
|
||||
3. Storage-node. Only filestorage api will be operational.
|
||||
|
||||
|
10
Taskfile.yaml
Normal file
10
Taskfile.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
version: 3
|
||||
|
||||
tasks:
|
||||
prepare-env:
|
||||
cmds:
|
||||
- sudo docker swarm init
|
||||
- sudo docker stack deploy draincloud_core -c ./compose.rw.yaml
|
||||
migrate-local-status:
|
||||
cmds:
|
||||
- goose postgres "postgres://draincloud:draincloud@localhost:5432/draincloud" status -dir migrations
|
41
compose.rw.yaml
Normal file
41
compose.rw.yaml
Normal file
@ -0,0 +1,41 @@
|
||||
services:
|
||||
rw_1:
|
||||
image: postgres:17
|
||||
container_name: draincloud-db-rw-1
|
||||
ports:
|
||||
- 5432:5432
|
||||
environment:
|
||||
- POSTGRES_USER=draincloud
|
||||
- POSTGRES_PASSWORD=draincloud
|
||||
- POSTGRES_DB=draincloud
|
||||
volumes:
|
||||
- draincloud-rw-1:/var/lib/postgresql/data
|
||||
|
||||
rw_2:
|
||||
image: postgres:17
|
||||
container_name: draincloud-db-rw-2
|
||||
ports:
|
||||
- 5433:5432
|
||||
environment:
|
||||
- POSTGRES_USER=draincloud
|
||||
- POSTGRES_PASSWORD=draincloud
|
||||
- POSTGRES_DB=draincloud
|
||||
volumes:
|
||||
- draincloud-rw-2:/var/lib/postgresql/data
|
||||
|
||||
rw_3:
|
||||
image: postgres:17
|
||||
container_name: draincloud-db-rw-3
|
||||
ports:
|
||||
- 5434:5432
|
||||
environment:
|
||||
- POSTGRES_USER=draincloud
|
||||
- POSTGRES_PASSWORD=draincloud
|
||||
- POSTGRES_DB=draincloud
|
||||
volumes:
|
||||
- draincloud-rw-3:/var/lib/postgresql/data
|
||||
|
||||
volumes:
|
||||
draincloud-rw-1: {}
|
||||
draincloud-rw-2: {}
|
||||
draincloud-rw-3: {}
|
@ -1,15 +0,0 @@
|
||||
services:
|
||||
database:
|
||||
image: postgres:17
|
||||
container_name: draincloud-db
|
||||
ports:
|
||||
- 5432:5432
|
||||
environment:
|
||||
- POSTGRES_USER=draincloud
|
||||
- POSTGRES_PASSWORD=draincloud
|
||||
- POSTGRES_DB=draincloud
|
||||
volumes:
|
||||
- draincloud-db-data:/var/lib/postgresql/data
|
||||
|
||||
volumes:
|
||||
draincloud-db-data: {}
|
@ -70,11 +70,11 @@ func (d *DrainCloud) uploadFile(ctx *gin.Context, userID int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseExtension(filename string) filesengine.FileExtension {
|
||||
func parseExtension(filename string) string {
|
||||
parts := strings.Split(filename, ".")
|
||||
if len(parts) == 0 {
|
||||
return filesengine.FileExtensionUnspecified
|
||||
return ""
|
||||
}
|
||||
|
||||
return filesengine.FileExtension(parts[len(parts)-1])
|
||||
return parts[len(parts)-1]
|
||||
}
|
||||
|
@ -2,8 +2,10 @@ package filesengine
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/files"
|
||||
)
|
||||
|
||||
type FilesEngine struct {
|
||||
@ -24,17 +26,25 @@ func NewFilesEngine(
|
||||
type File struct {
|
||||
Name string
|
||||
UserID int64
|
||||
Ext FileExtension
|
||||
Ext string
|
||||
Type string
|
||||
Size int64
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// TODO save file
|
||||
func (e *FilesEngine) SaveFile(
|
||||
ctx context.Context,
|
||||
file File,
|
||||
) (int64, error) {
|
||||
e.metaStorage.SaveMetadata(ctx, file.Type, )
|
||||
fileID, err := e.metaStorage.SaveMetadata(ctx, files.FileMetadata{})
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("failed to create new file metadata: %w", err)
|
||||
}
|
||||
|
||||
return -1, nil
|
||||
if err = e.blobStorage.SaveBlob(ctx, fileID, file.Data); err != nil {
|
||||
return -1, fmt.Errorf("failed to save file data: %w", err)
|
||||
}
|
||||
|
||||
return fileID, nil
|
||||
}
|
||||
|
@ -1,7 +0,0 @@
|
||||
package filesengine
|
||||
|
||||
type FileExtension string
|
||||
|
||||
const (
|
||||
FileExtensionUnspecified FileExtension = "unspecified"
|
||||
)
|
@ -6,6 +6,7 @@ import (
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models"
|
||||
auditmodels "git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/audit"
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/storage/models/files"
|
||||
)
|
||||
|
||||
type Database interface {
|
||||
@ -27,7 +28,7 @@ type AuthAuditLogStorage interface {
|
||||
}
|
||||
|
||||
type MetaStorage interface {
|
||||
SaveMetadata(ctx context.Context, fileType string, size int64, ext string) (int64, error)
|
||||
SaveMetadata(ctx context.Context, meta files.FileMetadata) (int64, error)
|
||||
}
|
||||
|
||||
type BlobStorage interface {
|
||||
|
11
internal/storage/models/files/files.go
Normal file
11
internal/storage/models/files/files.go
Normal file
@ -0,0 +1,11 @@
|
||||
package files
|
||||
|
||||
type FileMetadata struct {
|
||||
Id int64
|
||||
Name string
|
||||
UserID int64
|
||||
Ext string
|
||||
Type string
|
||||
FSLink string
|
||||
Size int64
|
||||
}
|
35
internal/storage/postgres/connection_pool.go
Normal file
35
internal/storage/postgres/connection_pool.go
Normal file
@ -0,0 +1,35 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"sync"
|
||||
|
||||
"git.optclblast.xyz/draincloud/draincloud-core/internal/logger"
|
||||
"github.com/jackc/pgx/v5"
|
||||
)
|
||||
|
||||
type ShardMap = map[uint16]*pgx.ConnConfig
|
||||
|
||||
type ShardCluster struct {
|
||||
m sync.Mutex
|
||||
shards map[uint16]*pgx.Conn
|
||||
}
|
||||
|
||||
func NewShardCluster(ctx context.Context, shardMap ShardMap) *ShardCluster {
|
||||
shards := make(map[uint16]*pgx.Conn, len(shardMap))
|
||||
for n, cfg := range shardMap {
|
||||
conn, err := pgx.ConnectConfig(ctx, cfg)
|
||||
if err != nil {
|
||||
logger.Fatal(ctx, "failed to connect to shard", slog.Uint64("num", uint64(n)), logger.Err(err))
|
||||
}
|
||||
shards[n] = conn
|
||||
}
|
||||
return &ShardCluster{shards: shards}
|
||||
}
|
||||
|
||||
func (c *ShardCluster) SelectShard(n uint16) *pgx.Conn {
|
||||
c.m.Lock()
|
||||
defer c.m.Unlock()
|
||||
return c.shards[n]
|
||||
}
|
@ -1,3 +1,7 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
SELECT 'up SQL query';
|
||||
|
||||
-- Users as auth data
|
||||
create table if not exists users (
|
||||
id bigserial primary key,
|
||||
@ -64,9 +68,8 @@ create table directory_users_access (
|
||||
|
||||
create index idx_dua_owner_id_parent_dir on directories(owner_id, parent_dir);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
SELECT 'down SQL query';
|
||||
-- +goose StatementEnd
|
||||
|
Loading…
Reference in New Issue
Block a user