Commit 853722f9 authored by unteem's avatar unteem
Browse files

initial commit

parents
Pipeline #756 failed with stages
in 17 seconds
version: '3'
services:
nextcloud-web:
image: libresh/nextcloud:19.0.9-web
ports:
- 80:80
env_file:
- ./env
networks:
- back
nextcloud:
image: libresh/nextcloud:19.0.9
command: php-fpm
env_file:
- ./env
networks:
- back
postgres:
image: postgres:12-alpine
ports:
- 5432:5432
env_file:
- ./env
volumes:
- postgres:/data/postgres
networks:
- back
redis:
image: redis
env_file:
- ./env
networks:
- back
minio:
image: minio/minio
volumes:
- minio:/data
environment:
- MINIO_ACCESS_KEY=minio
- MINIO_SECRET_KEY=miniosecret
ports:
- 9000:9000
- 9001:9001
command: server --address :9000 --console-address :9001 /data
networks:
- back
redis_filesystem:
image: redis
env_file:
- ./env
networks:
- back
nextcloud-web_filesystem:
image: libresh/nextcloud:19.0.9-web
ports:
- 81:80
env_file:
- ./env_filesystem
networks:
- back
nextcloud_filesystem:
image: libresh/nextcloud:19.0.9
command: php-fpm
volumes:
- ../data:/usr/src/nextcloud/data
env_file:
- ./env_filesystem
networks:
- back
postgres_filesystem:
image: postgres:12-alpine
ports:
- 5433:5432
env_file:
- ./env_filesystem
volumes:
- ../postgres_filesystem:/data/postgres
networks:
- back
networks:
back:
driver: bridge
volumes:
postgres:
minio:
#INSTALLED=true
REDIS_HOST=redis
VERSION=19.0.9.1
APPS_STORE_ENABLE=false
CONFIG_READONLY=true
DATA_DIRECTORY=/usr/src/nextcloud/data
OVERWRITE_PROTOCOL=http
BACKEND_HOST=nextcloud
POSTGRES_USER=nextcloud
POSTGRES_PASSWORD=password
PGDATA=/data/postgres
DB_NAME=nextcloud
DB_PORT=3306
DB_TYPE=pgsql
DB_USER=nextcloud
DB_PASSWORD=password
NEXTCLOUD_ADMIN_USER=admin
OBJECTSTORE_S3_HOST=minio
OBJECTSTORE_S3_PORT=9000
OBJECTSTORE_S3_REGION=default
OBJECTSTORE_S3_AUTOCREATE=false
OBJECTSTORE_S3_USEPATH_STYLE=true
OBJECTSTORE_S3_SSL=false
OBJECTSTORE_S3_SECRET=miniosecret
OBJECTSTORE_S3_KEY=minio
#REDIS_HOST=redis
#REDIS_HOST_PORT=6379
#REDIS_PORT=6379
UPDATE_CHECKER=false
UPDATE_DISABLE_WEB=true
DISABLE_APPS=firstrunwizard
ENABLE_APPS=user_saml,apporder,calendar,external,groupfolders,admin_audit,theming_customcss
DB_HOST=postgres
OBJECTSTORE_S3_BUCKET=test
OVERWRITE_CLI_URL=http://localhost
NEXTCLOUD_TRUSTED_DOMAINS=localhost
INSTANCE_ID=jdsjflj
SECRET=ldkdkdkezopnvooojcfslk
PASSWORD_SALT=passwordsalt
NEXTCLOUD_ADMIN_PASSWORD=password
INSTALLED=true
REDIS_HOST=redis_filesystem
VERSION=19.0.9.1
APPS_STORE_ENABLE=false
CONFIG_READONLY=true
DATA_DIRECTORY=/usr/src/nextcloud/data
OVERWRITE_PROTOCOL=http
BACKEND_HOST=nextcloud_filesystem
POSTGRES_USER=nextcloud
POSTGRES_PASSWORD=password
PGDATA=/data/postgres
DB_NAME=nextcloud
DB_PORT=3306
DB_TYPE=pgsql
DB_USER=nextcloud
DB_PASSWORD=password
NEXTCLOUD_ADMIN_USER=admin
#REDIS_HOST=redis
#REDIS_HOST_PORT=6379
#REDIS_PORT=6379
UPDATE_CHECKER=false
UPDATE_DISABLE_WEB=true
DISABLE_APPS=firstrunwizard
ENABLE_APPS=user_saml,apporder,calendar,external,groupfolders,admin_audit,theming_customcss
DB_HOST=postgres_filesystem
OVERWRITE_CLI_URL=http://localhost
NEXTCLOUD_TRUSTED_DOMAINS=localhost
INSTANCE_ID=jdsjflj
SECRET=ldkdkdkezopnvooojcfslk
PASSWORD_SALT=passwordsalt
NEXTCLOUD_ADMIN_PASSWORD=password
module libre.sh/librecleaner
go 1.16
require (
github.com/RocketChat/filestore-migrator v0.0.0-20210317211720-a39f02e57f07
github.com/jackc/pgx/v4 v4.13.0
github.com/minio/minio-go/v7 v7.0.12
github.com/uptrace/bun v1.0.1
github.com/uptrace/bun/dialect/pgdialect v1.0.1
github.com/uptrace/bun/driver/pgdriver v1.0.1
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 // indirect
golang.org/x/sys v0.0.0-20210902050250-f475640dd07b // indirect
)
// replace libre.sh/librecleaner => ./
This diff is collapsed.
package main
import (
"context"
"database/sql"
"fmt"
"os"
"strings"
"crypto/md5"
"github.com/jackc/pgx/v4"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/uptrace/bun"
"github.com/uptrace/bun/dialect/pgdialect"
"github.com/uptrace/bun/driver/pgdriver"
"libre.sh/librecleaner/store"
)
func init() {
}
func pg(conn *pgx.Conn) {
// var fileid int64
/* rows, err := conn.Query(context.Background(), `select fileid from filecache
join storages
on storage = numeric_id
where id like 'home::%'
order by id;`)
*/
rows, err := conn.Query(context.Background(), `select concat('data/',
substring(id from 7), '/', path), fileid
from filecache
join storages
on storage = numeric_id
where id like 'home::%'
order by id;`)
if err != nil {
fmt.Fprintf(os.Stderr, "QueryRow failed: %v\n", err)
os.Exit(1)
}
// fmt.Println(rows)
// rows.Close is called by rows.Next when all rows are read
// or an error occurs in Next or Scan. So it may optionally be
// omitted if nothing in the rows.Next loop can panic. It is
// safe to close rows multiple times.
defer rows.Close()
// Iterate through the result set
for rows.Next() {
file := new(store.FileCache)
err = rows.Scan(&file.Path, &file.Fileid /* , &file.storage, &file.path, &file.name, &file.size */)
if err != nil {
panic(err)
// os.Exit(1)
}
// fmt.Println(file)
// Step 1: Upload
// Check if its a file or a dir
// path := "./path/to/fileOrDir"
fileInfo, err := os.Stat(file.Path)
if err != nil {
// error handling
}
if !fileInfo.IsDir() {
fmt.Println(file)
// TODO Uploads to S3
// function for urn name
}
// Step 2: Change DB entry
/*
query := `update storages
set id = concat('object::user:', substring(id from 7))
where id like 'home::%';
update storages
set id = 'object::store:amazon::my-nextcloud-bucket'
where id like 'local::%';`
*/
}
// Any errors encountered by rows.Next or rows.Scan will be returned here
if rows.Err() != nil {
os.Exit(1)
}
}
type Storage struct {
NumericID int64
ID string
}
type User struct {
UID string
Storage int64
}
type Users struct {
UID string
Displayname string
Password string
UIDLower string
// Storage int64
}
func foo(conn *pgx.Conn) error {
// var fileid int64
/* rows, err := conn.Query(context.Background(), `select fileid from filecache
join storages
on storage = numeric_id
where id like 'home::%'
order by id;`)
*/
rows, err := conn.Query(context.Background(), `select numeric_id, id from storages`)
if err != nil {
fmt.Fprintf(os.Stderr, "QueryRow failed: %v\n", err)
os.Exit(1)
}
// fmt.Println(rows)
// rows.Close is called by rows.Next when all rows are read
// or an error occurs in Next or Scan. So it may optionally be
// omitted if nothing in the rows.Next loop can panic. It is
// safe to close rows multiple times.
defer rows.Close()
storages := []*Storage{}
// Iterate through the result set
for rows.Next() {
storage := new(Storage)
err = rows.Scan(&storage.NumericID, &storage.ID)
if err != nil {
panic(err)
// os.Exit(1)
}
storages = append(storages, storage)
}
// Any errors encountered by rows.Next or rows.Scan will be returned here
if rows.Err() != nil {
// os.Exit(1)
return err
}
userToDelete := []*User{}
for _, storage := range storages {
var userid string
if storage.ID != "object::store:amazon::test" {
username := strings.Replace(storage.ID, "object::user:", "", -1)
row := conn.QueryRow(context.TODO(), "select uid from users where uid = $1", username)
err := row.Scan(&userid)
fmt.Println("user id: ", userid)
if err != nil {
// f err == sql.ErrNoRows {
if err.Error() == "no rows in result set" {
fmt.Println("no row for user: ", username)
userToDelete = append(userToDelete, &User{UID: username, Storage: storage.NumericID})
continue
}
return err
}
}
}
for _, u := range userToDelete {
cmdTag, err := conn.Exec(context.TODO(), "delete from filecache where storage = $1", u.Storage)
if err != nil {
return err
}
fmt.Println(cmdTag)
}
return nil
}
type GroupFolder struct {
FolderID int64
MountPoint string
Quota int64
ACL int64
}
func deleteFileRecursive(ctx context.Context, db *bun.DB, fileid string) error {
// file = new(store.FileCache)
qry, err := db.NewDelete().
Model((*store.FileCache)(nil)).
ModelTableExpr("filecache as file_cache").
Where("? = ? AND parent = ?", bun.Ident("fileid"), fileid).
// Returning("parent").
Exec(ctx)
if err != nil {
return err
}
fmt.Println(qry)
}
func cleanPreviews(ctx context.Context, db *bun.DB) error {
rows, err := db.NewSelect().
Model((*store.FileCache)(nil)).
ModelTableExpr("filecache as file_cache").
Where("path LIKE 'appdata_jdsjflj/preview/%'").
Rows(ctx)
if err != nil {
panic(err)
}
defer rows.Close()
for rows.Next() {
file := new(store.FileCache)
if err := db.ScanRow(ctx, rows, file); err != nil {
panic(err)
}
fmt.Println(file)
}
return nil
}
func cleanFileStorage(ctx context.Context, db *bun.DB) error {
rows, err := db.NewSelect().
Model((*Storage)(nil)).
Rows(ctx)
if err != nil {
panic(err)
}
defer rows.Close()
userToDelete := []*User{}
for rows.Next() {
storage := new(Storage)
if err := db.ScanRow(ctx, rows, storage); err != nil {
panic(err)
}
// TOFIX
if storage.ID != "object::store:amazon::test" {
user := new(Users)
username := strings.Replace(storage.ID, "object::user:", "", -1)
err = db.NewSelect().
Model(user).
Where("? = ?", bun.Ident("uid"), username).
Limit(1).
Scan(ctx)
if err != nil {
// f err == sql.ErrNoRows {
fmt.Println(err)
if err.Error() == "sql: no rows in result set" {
fmt.Println("no row for user: ", username)
userToDelete = append(userToDelete, &User{UID: username, Storage: storage.NumericID})
continue
}
return err
}
}
fmt.Println(userToDelete)
}
for _, u := range userToDelete {
qry, err := db.NewDelete().
Model((*store.FileCache)(nil)).
ModelTableExpr("filecache as file_cache").
Where("? = ?", bun.Ident("storage"), u.Storage).
Exec(ctx)
if err != nil {
return err
}
fmt.Println(qry)
}
return nil
}
func cleanObjects(ctx context.Context, db *bun.DB) {
}
func migrateGroupFolder(ctx context.Context, db *bun.DB) {
// Find group folder id for the group folder to move
groupFolder := new(GroupFolder)
groupFolderName := "My Group Folder"
err := db.NewSelect().
Model(groupFolder).
Where("? = ?", bun.Ident("mount_point"), groupFolderName).
// Table("users").
// ModelTableExpr("users AS user").
Limit(1).
Scan(ctx)
if err != nil {
panic(err)
}
fmt.Println(groupFolder)
// Find storage ID for the user
storage := new(Storage)
groupUserName := "mygroup"
folderName := "Group Folder"
storageID := fmt.Sprintf("object::user:%s", groupUserName)
err = db.NewSelect().
Model(storage).
Where("? = ?", bun.Ident("id"), storageID).
Limit(1).
Scan(ctx)
if err != nil {
panic(err)
}
fmt.Println(storage)
parent := new(store.FileCache)
err = db.NewSelect().
Model(parent).
ModelTableExpr("filecache as file_cache").
Where("storage = ? AND path = 'files'", storage.NumericID).
Scan(ctx)
if err != nil {
panic(err)
}
fmt.Println(parent.Fileid)
groupFolderPath := fmt.Sprintf("__groupfolders/%v", groupFolder.FolderID)
groupPath := fmt.Sprintf("files/%s/%s", groupUserName, folderName)
// files := new([]store.FileCache)
fmt.Println(storage.NumericID)
rows, err := db.NewSelect().
Model((*store.FileCache)(nil)).
ModelTableExpr("filecache as file_cache").
Where("path LIKE ?", groupFolderPath+"%").
Rows(ctx)
if err != nil {
panic(err)
}
defer rows.Close()
for rows.Next() {
file := new(store.FileCache)
if err := db.ScanRow(ctx, rows, file); err != nil {
panic(err)
}
file.Path = strings.Replace(file.Path, groupFolderPath, groupPath, -1)
file.PathHash = fmt.Sprintf("%x", md5.Sum([]byte(file.Path)))
file.Storage = storage.NumericID
if file.Path == groupPath {
file.Name = folderName
file.Parent = parent.Fileid
}
qry, err := db.NewUpdate().
Model(file).
ModelTableExpr("filecache as file_cache").
Where("fileid = ?", file.Fileid).
Column("path", "path_hash", "storage").
Exec(ctx)
if err != nil {
panic(err)
}
fmt.Println(qry)
}
if err := rows.Err(); err != nil {
panic(err)
}
/* rootFolder := new(store.FileCache)
rootFolder.Parent = parent.Fileid
rootFolder.Name = folderName
qry, err := db.NewUpdate().
Model(rootFolder).