- commit
- 654e15b
- parent
- 954b9b8
- author
- Eric Bower
- date
- 2025-01-25 14:11:52 -0500 EST
refactor(pgs): decouple from rest of codebase
+1,
-1
1@@ -87,7 +87,7 @@ successfully added pico+ user
2 ---
3
4 [TestUser - 1]
5-[{"id":"1","user_id":"user-1","name":"my-key","key":"nice-pubkey","created_at":"0001-01-01T00:00:00Z"}]
6+[{"id":"1","user_id":"user-1","name":"my-key","public_key":"nice-pubkey","created_at":"0001-01-01T00:00:00Z"}]
7 ---
8
9 [TestAuthApi/rss - 1]
+23,
-2
1@@ -1,7 +1,28 @@
2 package main
3
4-import "github.com/picosh/pico/pgs"
5+import (
6+ "github.com/picosh/pico/pgs"
7+ pgsdb "github.com/picosh/pico/pgs/db"
8+ "github.com/picosh/pico/shared"
9+ "github.com/picosh/pico/shared/storage"
10+ "github.com/picosh/utils"
11+)
12
13 func main() {
14- pgs.StartSshServer()
15+ minioURL := utils.GetEnv("MINIO_URL", "")
16+ minioUser := utils.GetEnv("MINIO_ROOT_USER", "")
17+ minioPass := utils.GetEnv("MINIO_ROOT_PASSWORD", "")
18+ dbURL := utils.GetEnv("DATABASE_URL", "")
19+ logger := shared.CreateLogger("pgs")
20+ dbpool, err := pgsdb.NewDB(dbURL, logger)
21+ if err != nil {
22+ panic(err)
23+ }
24+ st, err := storage.NewStorageMinio(logger, minioURL, minioUser, minioPass)
25+ if err != nil {
26+ panic(err)
27+ }
28+ cfg := pgs.NewPgsConfig(logger, dbpool, st)
29+ killCh := make(chan error)
30+ pgs.StartSshServer(cfg, killCh)
31 }
+22,
-2
1@@ -1,7 +1,27 @@
2 package main
3
4-import "github.com/picosh/pico/pgs"
5+import (
6+ "github.com/picosh/pico/pgs"
7+ pgsdb "github.com/picosh/pico/pgs/db"
8+ "github.com/picosh/pico/shared"
9+ "github.com/picosh/pico/shared/storage"
10+ "github.com/picosh/utils"
11+)
12
13 func main() {
14- pgs.StartApiServer()
15+ minioURL := utils.GetEnv("MINIO_URL", "")
16+ minioUser := utils.GetEnv("MINIO_ROOT_USER", "")
17+ minioPass := utils.GetEnv("MINIO_ROOT_PASSWORD", "")
18+ dbURL := utils.GetEnv("DATABASE_URL", "")
19+ logger := shared.CreateLogger("pgs")
20+ dbpool, err := pgsdb.NewDB(dbURL, logger)
21+ if err != nil {
22+ panic(err)
23+ }
24+ st, err := storage.NewStorageMinio(logger, minioURL, minioUser, minioPass)
25+ if err != nil {
26+ panic(err)
27+ }
28+ cfg := pgs.NewPgsConfig(logger, dbpool, st)
29+ pgs.StartApiServer(cfg)
30 }
1@@ -6,8 +6,8 @@ import (
2 "strings"
3
4 "github.com/picosh/pico/db"
5- "github.com/picosh/pico/db/postgres"
6 "github.com/picosh/pico/pgs"
7+ pgsdb "github.com/picosh/pico/pgs/db"
8 "github.com/picosh/pico/shared"
9 "github.com/picosh/pico/shared/storage"
10 "github.com/picosh/utils"
11@@ -41,10 +41,10 @@ func main() {
12 picoCfg.MinioURL = os.Getenv("MINIO_URL")
13 picoCfg.MinioUser = os.Getenv("MINIO_ROOT_USER")
14 picoCfg.MinioPass = os.Getenv("MINIO_ROOT_PASSWORD")
15- picoDb := postgres.NewDB(picoCfg.DbURL, picoCfg.Logger)
16+ picoDb, err := pgsdb.NewDB(picoCfg.DbURL, picoCfg.Logger)
17+ bail(err)
18
19 var st storage.StorageServe
20- var err error
21 st, err = storage.NewStorageMinio(logger, picoCfg.MinioURL, picoCfg.MinioUser, picoCfg.MinioPass)
22 bail(err)
23
M
db/db.go
+31,
-39
1@@ -15,18 +15,18 @@ var ErrNameInvalid = errors.New("username has invalid characters in it")
2 var ErrPublicKeyTaken = errors.New("public key is already associated with another user")
3
4 type PublicKey struct {
5- ID string `json:"id"`
6- UserID string `json:"user_id"`
7- Name string `json:"name"`
8- Key string `json:"key"`
9- CreatedAt *time.Time `json:"created_at"`
10+ ID string `json:"id" db:"id"`
11+ UserID string `json:"user_id" db:"user_id"`
12+ Name string `json:"name" db:"name"`
13+ Key string `json:"public_key" db:"public_key"`
14+ CreatedAt *time.Time `json:"created_at" db:"created_at"`
15 }
16
17 type User struct {
18- ID string `json:"id"`
19- Name string `json:"name"`
20- PublicKey *PublicKey `json:"public_key,omitempty"`
21- CreatedAt *time.Time `json:"created_at"`
22+ ID string `json:"id" db:"id"`
23+ Name string `json:"name" db:"name"`
24+ PublicKey *PublicKey `json:"public_key,omitempty" db:"public_key,omitempty"`
25+ CreatedAt *time.Time `json:"created_at" db:"created_at"`
26 }
27
28 type PostData struct {
29@@ -53,20 +53,20 @@ func (p *PostData) Scan(value interface{}) error {
30 }
31
32 type Project struct {
33- ID string `json:"id"`
34- UserID string `json:"user_id"`
35- Name string `json:"name"`
36- ProjectDir string `json:"project_dir"`
37- Username string `json:"username"`
38- Acl ProjectAcl `json:"acl"`
39- Blocked string `json:"blocked"`
40- CreatedAt *time.Time `json:"created_at"`
41- UpdatedAt *time.Time `json:"updated_at"`
42+ ID string `json:"id" db:"id"`
43+ UserID string `json:"user_id" db:"user_id"`
44+ Name string `json:"name" db:"name"`
45+ ProjectDir string `json:"project_dir" db:"project_dir"`
46+ Username string `json:"username" db:"username"`
47+ Acl ProjectAcl `json:"acl" db:"acl"`
48+ Blocked string `json:"blocked" db:"blocked"`
49+ CreatedAt *time.Time `json:"created_at" db:"created_at"`
50+ UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
51 }
52
53 type ProjectAcl struct {
54- Type string `json:"type"`
55- Data []string `json:"data"`
56+ Type string `json:"type" db:"type"`
57+ Data []string `json:"data" db:"data"`
58 }
59
60 // Make the Attrs struct implement the driver.Valuer interface. This method
61@@ -218,13 +218,13 @@ type Token struct {
62 }
63
64 type FeatureFlag struct {
65- ID string `json:"id"`
66- UserID string `json:"user_id"`
67- PaymentHistoryID string `json:"payment_history_id"`
68- Name string `json:"name"`
69- CreatedAt *time.Time `json:"created_at"`
70- ExpiresAt *time.Time `json:"expires_at"`
71- Data FeatureFlagData `json:"data"`
72+ ID string `json:"id" db:"id"`
73+ UserID string `json:"user_id" db:"user_id"`
74+ PaymentHistoryID string `json:"payment_history_id" db:"payment_history_id"`
75+ Name string `json:"name" db:"name"`
76+ CreatedAt *time.Time `json:"created_at" db:"created_at"`
77+ ExpiresAt *time.Time `json:"expires_at" db:"expires_at"`
78+ Data FeatureFlagData `json:"data" db:"data"`
79 }
80
81 func NewFeatureFlag(userID, name string, storageMax uint64, fileMax int64, specialFileMax int64) *FeatureFlag {
82@@ -268,9 +268,9 @@ func (ff *FeatureFlag) IsValid() bool {
83 }
84
85 type FeatureFlagData struct {
86- StorageMax uint64 `json:"storage_max"`
87- FileMax int64 `json:"file_max"`
88- SpecialFileMax int64 `json:"special_file_max"`
89+ StorageMax uint64 `json:"storage_max" db:"storage_max"`
90+ FileMax int64 `json:"file_max" db:"file_max"`
91+ SpecialFileMax int64 `json:"special_file_max" db:"special_file_max"`
92 }
93
94 // Make the Attrs struct implement the driver.Valuer interface. This method
95@@ -354,6 +354,7 @@ type DB interface {
96 FindUserForName(name string) (*User, error)
97 FindUserForNameAndKey(name string, pubkey string) (*User, error)
98 FindUserForKey(name string, pubkey string) (*User, error)
99+ FindUserByPubkey(pubkey string) (*User, error)
100 FindUser(userID string) (*User, error)
101 ValidateName(name string) (bool, error)
102 SetUserName(userID string, name string) error
103@@ -405,16 +406,7 @@ type DB interface {
104 FindFeedItemsByPostID(postID string) ([]*FeedItem, error)
105
106 UpsertProject(userID, name, projectDir string) (*Project, error)
107- InsertProject(userID, name, projectDir string) (string, error)
108- UpdateProject(userID, name string) error
109- UpdateProjectAcl(userID, name string, acl ProjectAcl) error
110- LinkToProject(userID, projectID, projectDir string, commit bool) error
111- RemoveProject(projectID string) error
112 FindProjectByName(userID, name string) (*Project, error)
113- FindProjectLinks(userID, name string) ([]*Project, error)
114- FindProjectsByUser(userID string) ([]*Project, error)
115- FindProjectsByPrefix(userID, name string) ([]*Project, error)
116- FindAllProjects(page *Pager, by string) (*Paginate[*Project], error)
117
118 Close() error
119 }
+16,
-201
1@@ -256,7 +256,6 @@ const (
2
3 sqlInsertProject = `INSERT INTO projects (user_id, name, project_dir) VALUES ($1, $2, $3) RETURNING id;`
4 sqlUpdateProject = `UPDATE projects SET updated_at = $3 WHERE user_id = $1 AND name = $2;`
5- sqlUpdateProjectAcl = `UPDATE projects SET acl = $3, updated_at = $4 WHERE user_id = $1 AND name = $2;`
6 sqlFindProjectByName = `SELECT id, user_id, name, project_dir, acl, blocked, created_at, updated_at FROM projects WHERE user_id = $1 AND name = $2;`
7 sqlSelectProjectCount = `SELECT count(id) FROM projects`
8 sqlFindProjectsByUser = `SELECT id, user_id, name, project_dir, acl, blocked, created_at, updated_at FROM projects WHERE user_id = $1 ORDER BY name ASC, updated_at DESC;`
9@@ -624,6 +623,22 @@ func (me *PsqlDB) FindUserForKey(username string, key string) (*db.User, error)
10 return nil, err
11 }
12
13+func (me *PsqlDB) FindUserByPubkey(key string) (*db.User, error) {
14+ me.Logger.Info("attempting to find user with only public key", "key", key)
15+ pk, err := me.FindPublicKeyForKey(key)
16+ if err != nil {
17+ return nil, err
18+ }
19+
20+ me.Logger.Info("found pubkey, looking for user", "key", key, "userId", pk.UserID)
21+ user, err := me.FindUser(pk.UserID)
22+ if err != nil {
23+ return nil, err
24+ }
25+ user.PublicKey = pk
26+ return user, nil
27+}
28+
29 func (me *PsqlDB) FindUser(userID string) (*db.User, error) {
30 user := &db.User{}
31 var un sql.NullString
32@@ -1639,60 +1654,6 @@ func (me *PsqlDB) UpdateProject(userID, name string) error {
33 return err
34 }
35
36-func (me *PsqlDB) UpdateProjectAcl(userID, name string, acl db.ProjectAcl) error {
37- _, err := me.Db.Exec(sqlUpdateProjectAcl, userID, name, acl, time.Now())
38- return err
39-}
40-
41-func (me *PsqlDB) LinkToProject(userID, projectID, projectDir string, commit bool) error {
42- linkToProject, err := me.FindProjectByName(userID, projectDir)
43- if err != nil {
44- return err
45- }
46- isAlreadyLinked := linkToProject.Name != linkToProject.ProjectDir
47- sameProject := linkToProject.ID == projectID
48-
49- /*
50- A project linked to another project which is also linked to a
51- project is forbidden. CI/CD Example:
52- - ProjectProd links to ProjectStaging
53- - ProjectStaging links to ProjectMain
54- - We merge `main` and trigger a deploy which uploads to ProjectMain
55- - All three get updated immediately
56- This scenario was not the intent of our CI/CD. What we actually
57- wanted was to create a snapshot of ProjectMain and have ProjectStaging
58- link to the snapshot, but that's not the intended design of pgs.
59-
60- So we want to close that gap here.
61-
62- We ensure that `project.Name` and `project.ProjectDir` are identical
63- when there is no aliasing.
64- */
65- if !sameProject && isAlreadyLinked {
66- return fmt.Errorf(
67- "cannot link (%s) to (%s) because it is also a link to (%s)",
68- projectID,
69- projectDir,
70- linkToProject.ProjectDir,
71- )
72- }
73-
74- if commit {
75- _, err = me.Db.Exec(
76- sqlLinkToProject,
77- projectDir,
78- time.Now(),
79- projectID,
80- )
81- }
82- return err
83-}
84-
85-func (me *PsqlDB) RemoveProject(projectID string) error {
86- _, err := me.Db.Exec(sqlRemoveProject, projectID)
87- return err
88-}
89-
90 func (me *PsqlDB) FindProjectByName(userID, name string) (*db.Project, error) {
91 project := &db.Project{}
92 r := me.Db.QueryRow(sqlFindProjectByName, userID, name)
93@@ -1713,152 +1674,6 @@ func (me *PsqlDB) FindProjectByName(userID, name string) (*db.Project, error) {
94 return project, nil
95 }
96
97-func (me *PsqlDB) FindProjectLinks(userID, name string) ([]*db.Project, error) {
98- var projects []*db.Project
99- rs, err := me.Db.Query(sqlFindProjectLinks, userID, name)
100- if err != nil {
101- return nil, err
102- }
103- for rs.Next() {
104- project := &db.Project{}
105- err := rs.Scan(
106- &project.ID,
107- &project.UserID,
108- &project.Name,
109- &project.ProjectDir,
110- &project.Acl,
111- &project.Blocked,
112- &project.CreatedAt,
113- &project.UpdatedAt,
114- )
115- if err != nil {
116- return nil, err
117- }
118-
119- projects = append(projects, project)
120- }
121-
122- if rs.Err() != nil {
123- return nil, rs.Err()
124- }
125-
126- return projects, nil
127-}
128-
129-func (me *PsqlDB) FindProjectsByPrefix(userID, prefix string) ([]*db.Project, error) {
130- var projects []*db.Project
131- rs, err := me.Db.Query(sqlFindProjectsByPrefix, userID, prefix+"%")
132- if err != nil {
133- return nil, err
134- }
135- for rs.Next() {
136- project := &db.Project{}
137- err := rs.Scan(
138- &project.ID,
139- &project.UserID,
140- &project.Name,
141- &project.ProjectDir,
142- &project.Acl,
143- &project.Blocked,
144- &project.CreatedAt,
145- &project.UpdatedAt,
146- )
147- if err != nil {
148- return nil, err
149- }
150-
151- projects = append(projects, project)
152- }
153-
154- if rs.Err() != nil {
155- return nil, rs.Err()
156- }
157-
158- return projects, nil
159-}
160-
161-func (me *PsqlDB) FindProjectsByUser(userID string) ([]*db.Project, error) {
162- var projects []*db.Project
163- rs, err := me.Db.Query(sqlFindProjectsByUser, userID)
164- if err != nil {
165- return nil, err
166- }
167- for rs.Next() {
168- project := &db.Project{}
169- err := rs.Scan(
170- &project.ID,
171- &project.UserID,
172- &project.Name,
173- &project.ProjectDir,
174- &project.Acl,
175- &project.Blocked,
176- &project.CreatedAt,
177- &project.UpdatedAt,
178- )
179- if err != nil {
180- return nil, err
181- }
182-
183- projects = append(projects, project)
184- }
185-
186- if rs.Err() != nil {
187- return nil, rs.Err()
188- }
189-
190- return projects, nil
191-}
192-
193-func (me *PsqlDB) FindAllProjects(page *db.Pager, by string) (*db.Paginate[*db.Project], error) {
194- var projects []*db.Project
195- sqlFindAllProjects := fmt.Sprintf(`
196- SELECT projects.id, user_id, app_users.name as username, projects.name, project_dir, projects.acl, projects.blocked, projects.created_at, projects.updated_at
197- FROM projects
198- LEFT JOIN app_users ON app_users.id = projects.user_id
199- ORDER BY %s DESC
200- LIMIT $1 OFFSET $2`, by)
201- rs, err := me.Db.Query(sqlFindAllProjects, page.Num, page.Num*page.Page)
202- if err != nil {
203- return nil, err
204- }
205- for rs.Next() {
206- project := &db.Project{}
207- err := rs.Scan(
208- &project.ID,
209- &project.UserID,
210- &project.Username,
211- &project.Name,
212- &project.ProjectDir,
213- &project.Acl,
214- &project.Blocked,
215- &project.CreatedAt,
216- &project.UpdatedAt,
217- )
218- if err != nil {
219- return nil, err
220- }
221-
222- projects = append(projects, project)
223- }
224-
225- if rs.Err() != nil {
226- return nil, rs.Err()
227- }
228-
229- var count int
230- err = me.Db.QueryRow(sqlSelectProjectCount).Scan(&count)
231- if err != nil {
232- return nil, err
233- }
234-
235- pager := &db.Paginate[*db.Project]{
236- Data: projects,
237- Total: int(math.Ceil(float64(count) / float64(page.Num))),
238- }
239-
240- return pager, nil
241-}
242-
243 func (me *PsqlDB) InsertToken(userID, name string) (string, error) {
244 var token string
245 err := me.Db.QueryRow(sqlInsertToken, userID, name).Scan(&token)
+4,
-28
1@@ -69,6 +69,10 @@ func (me *StubDB) FindUserForKey(username string, key string) (*db.User, error)
2 return nil, notImpl
3 }
4
5+func (me *StubDB) FindUserByPubkey(key string) (*db.User, error) {
6+ return nil, notImpl
7+}
8+
9 func (me *StubDB) FindUser(userID string) (*db.User, error) {
10 return nil, notImpl
11 }
12@@ -225,38 +229,10 @@ func (me *StubDB) UpdateProject(userID, name string) error {
13 return notImpl
14 }
15
16-func (me *StubDB) UpdateProjectAcl(userID, name string, acl db.ProjectAcl) error {
17- return notImpl
18-}
19-
20-func (me *StubDB) LinkToProject(userID, projectID, projectDir string, commit bool) error {
21- return notImpl
22-}
23-
24-func (me *StubDB) RemoveProject(projectID string) error {
25- return notImpl
26-}
27-
28 func (me *StubDB) FindProjectByName(userID, name string) (*db.Project, error) {
29 return &db.Project{}, notImpl
30 }
31
32-func (me *StubDB) FindProjectLinks(userID, name string) ([]*db.Project, error) {
33- return []*db.Project{}, notImpl
34-}
35-
36-func (me *StubDB) FindProjectsByPrefix(userID, prefix string) ([]*db.Project, error) {
37- return []*db.Project{}, notImpl
38-}
39-
40-func (me *StubDB) FindProjectsByUser(userID string) ([]*db.Project, error) {
41- return []*db.Project{}, notImpl
42-}
43-
44-func (me *StubDB) FindAllProjects(page *db.Pager, by string) (*db.Paginate[*db.Project], error) {
45- return &db.Paginate[*db.Project]{}, notImpl
46-}
47-
48 func (me *StubDB) InsertToken(userID, name string) (string, error) {
49 return "", notImpl
50 }
+1,
-1
1@@ -69,7 +69,7 @@ func StartSshServer() {
2 }
3 handler := filehandlers.NewFileHandlerRouter(cfg, dbh, fileMap)
4
5- sshAuth := shared.NewSshAuthHandler(dbh, logger, cfg)
6+ sshAuth := shared.NewSshAuthHandler(dbh, logger)
7 s, err := wish.NewServer(
8 wish.WithAddress(fmt.Sprintf("%s:%s", host, port)),
9 wish.WithHostKeyPath("ssh_data/term_info_ed25519"),
M
go.mod
+4,
-3
1@@ -42,8 +42,10 @@ require (
2 github.com/google/go-cmp v0.6.0
3 github.com/google/uuid v1.6.0
4 github.com/gorilla/feeds v1.2.0
5+ github.com/jmoiron/sqlx v1.4.0
6 github.com/lib/pq v1.10.9
7 github.com/microcosm-cc/bluemonday v1.0.27
8+ github.com/minio/minio-go/v7 v7.0.80
9 github.com/mmcdole/gofeed v1.3.0
10 github.com/muesli/reflow v0.3.0
11 github.com/muesli/termenv v0.15.3-0.20240912151726-82936c5ea257
12@@ -53,6 +55,7 @@ require (
13 github.com/picosh/send v0.0.0-20250121195737-daab6db117d5
14 github.com/picosh/tunkit v0.0.0-20240905223921-532404cef9d9
15 github.com/picosh/utils v0.0.0-20241120033529-8ca070c09bf4
16+ github.com/pkg/sftp v1.13.7
17 github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06
18 github.com/sendgrid/sendgrid-go v3.16.0+incompatible
19 github.com/simplesurance/go-ip-anonymizer v0.0.0-20200429124537-35a880f8e87d
20@@ -173,7 +176,7 @@ require (
21 github.com/go-logfmt/logfmt v0.6.0 // indirect
22 github.com/go-ole/go-ole v1.3.0 // indirect
23 github.com/go-redis/redis/v8 v8.11.5 // indirect
24- github.com/go-sql-driver/mysql v1.7.1 // indirect
25+ github.com/go-sql-driver/mysql v1.8.1 // indirect
26 github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
27 github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect
28 github.com/goccy/go-json v0.10.3 // indirect
29@@ -234,7 +237,6 @@ require (
30 github.com/miekg/dns v1.1.62 // indirect
31 github.com/minio/madmin-go/v3 v3.0.77 // indirect
32 github.com/minio/md5-simd v1.1.2 // indirect
33- github.com/minio/minio-go/v7 v7.0.80 // indirect
34 github.com/mitchellh/copystructure v1.2.0 // indirect
35 github.com/mitchellh/go-ps v1.0.0 // indirect
36 github.com/mitchellh/reflectwalk v1.0.2 // indirect
37@@ -256,7 +258,6 @@ require (
38 github.com/picosh/go-rsync-receiver v0.0.0-20250121150813-93b4f1b7aa4b // indirect
39 github.com/pierrec/lz4/v4 v4.1.21 // indirect
40 github.com/pkg/errors v0.9.1 // indirect
41- github.com/pkg/sftp v1.13.7 // indirect
42 github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
43 github.com/pquerna/cachecontrol v0.2.0 // indirect
44 github.com/prometheus/client_golang v1.20.5 // indirect
M
go.sum
+6,
-2
1@@ -355,8 +355,8 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
2 github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
3 github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
4 github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
5-github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
6-github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
7+github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
8+github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
9 github.com/go-stack/stack v1.6.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
10 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
11 github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
12@@ -544,6 +544,8 @@ github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0f
13 github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
14 github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
15 github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
16+github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
17+github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
18 github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
19 github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
20 github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
21@@ -613,6 +615,8 @@ github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6T
22 github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
23 github.com/mattn/go-sixel v0.0.5 h1:55w2FR5ncuhKhXrM5ly1eiqMQfZsnAHIpYNGZX03Cv8=
24 github.com/mattn/go-sixel v0.0.5/go.mod h1:h2Sss+DiUEHy0pUqcIB6PFXo5Cy8sTQEFr3a9/5ZLNw=
25+github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
26+github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
27 github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
28 github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
29 github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+1,
-1
1@@ -67,7 +67,7 @@ func StartSshServer() {
2 "fallback": filehandlers.NewScpPostHandler(dbh, cfg, hooks),
3 }
4 handler := filehandlers.NewFileHandlerRouter(cfg, dbh, fileMap)
5- sshAuth := shared.NewSshAuthHandler(dbh, logger, cfg)
6+ sshAuth := shared.NewSshAuthHandler(dbh, logger)
7 s, err := wish.NewServer(
8 wish.WithAddress(fmt.Sprintf("%s:%s", host, port)),
9 wish.WithHostKeyPath("ssh_data/term_info_ed25519"),
+13,
-4
1@@ -7,10 +7,12 @@ import (
2 "log/slog"
3 "path/filepath"
4 "strings"
5+ "time"
6
7 "github.com/charmbracelet/lipgloss"
8 "github.com/charmbracelet/lipgloss/table"
9 "github.com/picosh/pico/db"
10+ pgsdb "github.com/picosh/pico/pgs/db"
11 "github.com/picosh/pico/shared"
12 "github.com/picosh/pico/tui/common"
13 sst "github.com/picosh/pobj/storage"
14@@ -124,12 +126,12 @@ type Cmd struct {
15 Session utils.CmdSession
16 Log *slog.Logger
17 Store sst.ObjectStorage
18- Dbpool db.DB
19+ Dbpool pgsdb.PgsDB
20 Write bool
21 Styles common.Styles
22 Width int
23 Height int
24- Cfg *shared.ConfigSite
25+ Cfg *PgsConfig
26 }
27
28 func (c *Cmd) output(out string) {
29@@ -205,7 +207,7 @@ func (c *Cmd) help() {
30 }
31
32 func (c *Cmd) stats(cfgMaxSize uint64) error {
33- ff, err := c.Dbpool.FindFeatureForUser(c.User.ID, "plus")
34+ ff, err := c.Dbpool.FindFeature(c.User.ID, "plus")
35 if err != nil {
36 ff = db.NewFeatureFlag(c.User.ID, "plus", cfgMaxSize, 0, 0)
37 }
38@@ -539,7 +541,14 @@ func (c *Cmd) cache(projectName string) error {
39 }
40
41 func (c *Cmd) cacheAll() error {
42- isAdmin := c.Dbpool.HasFeatureForUser(c.User.ID, "admin")
43+ isAdmin := false
44+ ff, _ := c.Dbpool.FindFeature(c.User.ID, "admin")
45+ if ff != nil {
46+ if ff.ExpiresAt.Before(time.Now()) {
47+ isAdmin = true
48+ }
49+ }
50+
51 if !isAdmin {
52 return fmt.Errorf("must be admin to use this command")
53 }
R pgs/wish.go =>
pgs/cli_wish.go
+5,
-4
1@@ -11,19 +11,20 @@ import (
2 bm "github.com/charmbracelet/wish/bubbletea"
3 "github.com/muesli/termenv"
4 "github.com/picosh/pico/db"
5+ pgsdb "github.com/picosh/pico/pgs/db"
6 "github.com/picosh/pico/tui/common"
7 sendutils "github.com/picosh/send/utils"
8 "github.com/picosh/utils"
9 )
10
11-func getUser(s ssh.Session, dbpool db.DB) (*db.User, error) {
12+func getUser(s ssh.Session, dbpool pgsdb.PgsDB) (*db.User, error) {
13 if s.PublicKey() == nil {
14 return nil, fmt.Errorf("key not found")
15 }
16
17 key := utils.KeyForKeyText(s.PublicKey())
18
19- user, err := dbpool.FindUserForKey(s.User(), key)
20+ user, err := dbpool.FindUserByPubkey(key)
21 if err != nil {
22 return nil, err
23 }
24@@ -64,10 +65,10 @@ func flagCheck(cmd *flag.FlagSet, posArg string, cmdArgs []string) bool {
25 }
26
27 func WishMiddleware(handler *UploadAssetHandler) wish.Middleware {
28- dbpool := handler.DBPool
29+ dbpool := handler.Cfg.DB
30 log := handler.Cfg.Logger
31 cfg := handler.Cfg
32- store := handler.Storage
33+ store := handler.Cfg.Storage
34
35 return func(next ssh.Handler) ssh.Handler {
36 return func(sesh ssh.Session) {
+73,
-19
1@@ -2,19 +2,72 @@ package pgs
2
3 import (
4 "fmt"
5+ "log/slog"
6+ "path/filepath"
7 "time"
8
9- "github.com/picosh/pico/shared"
10+ pgsdb "github.com/picosh/pico/pgs/db"
11+ "github.com/picosh/pico/shared/storage"
12 "github.com/picosh/utils"
13 )
14
15+type PgsConfig struct {
16+ CacheControl string
17+ CacheTTL time.Duration
18+ Domain string
19+ MaxAssetSize int64
20+ MaxSize uint64
21+ MaxSpecialFileSize int64
22+ SshHost string
23+ SshPort string
24+ StorageDir string
25+ TxtPrefix string
26+ WebPort string
27+ WebProtocol string
28+
29+ // This channel will receive the surrogate key for a project (e.g. static site)
30+ // which will inform the caching layer to clear the cache for that site.
31+ CacheClearingQueue chan string
32+ // Database layer; it's just an interface that could be implemented
33+ // with anything.
34+ DB pgsdb.PgsDB
35+ Logger *slog.Logger
36+ // Where we store the static assets uploaded to our service.
37+ Storage storage.StorageServe
38+}
39+
40+func (c *PgsConfig) AssetURL(username, projectName, fpath string) string {
41+ if username == projectName {
42+ return fmt.Sprintf(
43+ "%s://%s.%s/%s",
44+ c.WebProtocol,
45+ username,
46+ c.Domain,
47+ fpath,
48+ )
49+ }
50+
51+ return fmt.Sprintf(
52+ "%s://%s-%s.%s/%s",
53+ c.WebProtocol,
54+ username,
55+ projectName,
56+ c.Domain,
57+ fpath,
58+ )
59+}
60+
61+func (c *PgsConfig) StaticPath(fname string) string {
62+ return filepath.Join("pgs", fname)
63+}
64+
65 var maxSize = uint64(25 * utils.MB)
66 var maxAssetSize = int64(10 * utils.MB)
67
68 // Needs to be small for caching files like _headers and _redirects.
69 var maxSpecialFileSize = int64(5 * utils.KB)
70
71-func NewConfigSite() *shared.ConfigSite {
72+func NewPgsConfig(logger *slog.Logger, dbpool pgsdb.PgsDB, st storage.StorageServe) *PgsConfig {
73 domain := utils.GetEnv("PGS_DOMAIN", "pgs.sh")
74 port := utils.GetEnv("PGS_WEB_PORT", "3000")
75 protocol := utils.GetEnv("PGS_PROTOCOL", "https")
76@@ -26,27 +79,28 @@ func NewConfigSite() *shared.ConfigSite {
77 cacheControl := utils.GetEnv(
78 "PGS_CACHE_CONTROL",
79 fmt.Sprintf("max-age=%d", int(cacheTTL.Seconds())))
80- minioURL := utils.GetEnv("MINIO_URL", "")
81- minioUser := utils.GetEnv("MINIO_ROOT_USER", "")
82- minioPass := utils.GetEnv("MINIO_ROOT_PASSWORD", "")
83- dbURL := utils.GetEnv("DATABASE_URL", "")
84
85- cfg := shared.ConfigSite{
86- Domain: domain,
87- Port: port,
88- Protocol: protocol,
89- DbURL: dbURL,
90- StorageDir: storageDir,
91- CacheTTL: cacheTTL,
92+ sshHost := utils.GetEnv("PGS_SSH_HOST", "0.0.0.0")
93+ sshPort := utils.GetEnv("PGS_SSH_PORT", "2222")
94+
95+ cfg := PgsConfig{
96 CacheControl: cacheControl,
97- MinioURL: minioURL,
98- MinioUser: minioUser,
99- MinioPass: minioPass,
100- Space: "pgs",
101- MaxSize: maxSize,
102+ CacheTTL: cacheTTL,
103+ Domain: domain,
104 MaxAssetSize: maxAssetSize,
105+ MaxSize: maxSize,
106 MaxSpecialFileSize: maxSpecialFileSize,
107- Logger: shared.CreateLogger("pgs"),
108+ SshHost: sshHost,
109+ SshPort: sshPort,
110+ StorageDir: storageDir,
111+ TxtPrefix: "pgs",
112+ WebPort: port,
113+ WebProtocol: protocol,
114+
115+ CacheClearingQueue: make(chan string, 100),
116+ DB: dbpool,
117+ Logger: logger,
118+ Storage: st,
119 }
120
121 return &cfg
+26,
-0
1@@ -0,0 +1,26 @@
2+package pgsdb
3+
4+import "github.com/picosh/pico/db"
5+
6+type PgsDB interface {
7+ FindUser(userID string) (*db.User, error)
8+ FindUserByName(name string) (*db.User, error)
9+ FindUserByPubkey(pubkey string) (*db.User, error)
10+ FindUsers() ([]*db.User, error)
11+
12+ FindFeature(userID string, name string) (*db.FeatureFlag, error)
13+
14+ InsertProject(userID, name, projectDir string) (string, error)
15+ UpdateProject(userID, name string) error
16+ UpdateProjectAcl(userID, name string, acl db.ProjectAcl) error
17+ UpsertProject(userID, projectName, projectDir string) (*db.Project, error)
18+ RemoveProject(projectID string) error
19+ LinkToProject(userID, projectID, projectDir string, commit bool) error
20+ FindProjectByName(userID, name string) (*db.Project, error)
21+ FindProjectLinks(userID, name string) ([]*db.Project, error)
22+ FindProjectsByUser(userID string) ([]*db.Project, error)
23+ FindProjectsByPrefix(userID, name string) ([]*db.Project, error)
24+ FindProjects(by string) ([]*db.Project, error)
25+
26+ Close() error
27+}
+171,
-0
1@@ -0,0 +1,171 @@
2+package pgsdb
3+
4+import (
5+ "fmt"
6+ "log/slog"
7+ "time"
8+
9+ "github.com/google/uuid"
10+ "github.com/picosh/pico/db"
11+ "github.com/picosh/utils"
12+)
13+
14+type MemoryDB struct {
15+ Logger *slog.Logger
16+ Users []*db.User
17+ Projects []*db.Project
18+ Pubkeys []*db.PublicKey
19+ Feature *db.FeatureFlag
20+}
21+
22+var _ PgsDB = (*MemoryDB)(nil)
23+
24+func NewDBMemory(logger *slog.Logger) *MemoryDB {
25+ d := &MemoryDB{
26+ Logger: logger,
27+ }
28+ d.Logger.Info("connecting to our in-memory database. All data created during runtime will be lost on exit.")
29+ return d
30+}
31+
32+func (me *MemoryDB) SetupTestData() {
33+ user := &db.User{
34+ ID: uuid.NewString(),
35+ Name: "testusr",
36+ }
37+ me.Users = append(me.Users, user)
38+ feature := db.NewFeatureFlag(
39+ user.ID,
40+ "plus",
41+ uint64(25*utils.MB),
42+ int64(10*utils.MB),
43+ int64(5*utils.KB),
44+ )
45+ expiresAt := time.Now().Add(time.Hour * 24)
46+ feature.ExpiresAt = &expiresAt
47+ me.Feature = feature
48+}
49+
50+var notImpl = fmt.Errorf("not implemented")
51+
52+func (me *MemoryDB) FindUsers() ([]*db.User, error) {
53+ users := []*db.User{}
54+ return users, notImpl
55+}
56+
57+func (me *MemoryDB) FindUserByPubkey(key string) (*db.User, error) {
58+ for _, pk := range me.Pubkeys {
59+ if pk.Key == key {
60+ return me.FindUser(pk.UserID)
61+ }
62+ }
63+ return nil, fmt.Errorf("user not found")
64+}
65+
66+func (me *MemoryDB) FindUser(userID string) (*db.User, error) {
67+ for _, user := range me.Users {
68+ if user.ID == userID {
69+ return user, nil
70+ }
71+ }
72+ return nil, fmt.Errorf("user not found")
73+}
74+
75+func (me *MemoryDB) FindUserByName(name string) (*db.User, error) {
76+ for _, user := range me.Users {
77+ if user.Name == name {
78+ return user, nil
79+ }
80+ }
81+ return nil, fmt.Errorf("user not found")
82+}
83+
84+func (me *MemoryDB) FindFeature(userID, name string) (*db.FeatureFlag, error) {
85+ return me.Feature, nil
86+}
87+
88+func (me *MemoryDB) Close() error {
89+ return nil
90+}
91+
92+func (me *MemoryDB) FindTotalSizeForUser(userID string) (int, error) {
93+ return 0, notImpl
94+}
95+
96+func (me *MemoryDB) InsertProject(userID, name, projectDir string) (string, error) {
97+ id := uuid.NewString()
98+ me.Projects = append(me.Projects, &db.Project{
99+ ID: id,
100+ UserID: userID,
101+ Name: name,
102+ ProjectDir: projectDir,
103+ })
104+ return id, nil
105+}
106+
107+func (me *MemoryDB) UpdateProject(userID, name string) error {
108+ return notImpl
109+}
110+
111+func (me *MemoryDB) UpsertProject(userID, projectName, projectDir string) (*db.Project, error) {
112+ project, err := me.FindProjectByName(userID, projectName)
113+ if err == nil {
114+ // this just updates the `createdAt` timestamp, useful for book-keeping
115+ err = me.UpdateProject(userID, projectName)
116+ if err != nil {
117+ me.Logger.Error("could not update project", "err", err)
118+ return nil, err
119+ }
120+ return project, nil
121+ }
122+
123+ _, err = me.InsertProject(userID, projectName, projectName)
124+ if err != nil {
125+ me.Logger.Error("could not create project", "err", err)
126+ return nil, err
127+ }
128+ return me.FindProjectByName(userID, projectName)
129+}
130+
131+func (me *MemoryDB) LinkToProject(userID, projectID, projectDir string, commit bool) error {
132+ return notImpl
133+}
134+
135+func (me *MemoryDB) RemoveProject(projectID string) error {
136+ return notImpl
137+}
138+
139+func (me *MemoryDB) FindProjectByName(userID, name string) (*db.Project, error) {
140+ for _, project := range me.Projects {
141+ if project.UserID != userID {
142+ continue
143+ }
144+
145+ if project.Name != name {
146+ continue
147+ }
148+
149+ return project, nil
150+ }
151+ return nil, fmt.Errorf("project not found by name %s", name)
152+}
153+
154+func (me *MemoryDB) FindProjectLinks(userID, name string) ([]*db.Project, error) {
155+ return []*db.Project{}, notImpl
156+}
157+
158+func (me *MemoryDB) FindProjectsByPrefix(userID, prefix string) ([]*db.Project, error) {
159+ return []*db.Project{}, notImpl
160+}
161+
162+func (me *MemoryDB) FindProjectsByUser(userID string) ([]*db.Project, error) {
163+ return []*db.Project{}, notImpl
164+}
165+
166+func (me *MemoryDB) FindProjects(userID string) ([]*db.Project, error) {
167+ return []*db.Project{}, notImpl
168+}
169+
170+func (me *MemoryDB) UpdateProjectAcl(userID, name string, acl db.ProjectAcl) error {
171+ return notImpl
172+}
+232,
-0
1@@ -0,0 +1,232 @@
2+package pgsdb
3+
4+import (
5+ "fmt"
6+ "log/slog"
7+ "time"
8+
9+ "github.com/jmoiron/sqlx"
10+ _ "github.com/lib/pq"
11+ "github.com/picosh/pico/db"
12+ "github.com/picosh/utils"
13+)
14+
15+type PgsPsqlDB struct {
16+ Logger *slog.Logger
17+ Db *sqlx.DB
18+}
19+
20+var _ PgsDB = (*PgsPsqlDB)(nil)
21+
22+func NewDB(databaseUrl string, logger *slog.Logger) (*PgsPsqlDB, error) {
23+ var err error
24+ d := &PgsPsqlDB{
25+ Logger: logger,
26+ }
27+ d.Logger.Info("connecting to postgres", "databaseUrl", databaseUrl)
28+
29+ db, err := sqlx.Connect("postgres", databaseUrl)
30+ if err != nil {
31+ return nil, err
32+ }
33+
34+ d.Db = db
35+ return d, nil
36+}
37+
38+func (me *PgsPsqlDB) Close() error {
39+ return me.Db.Close()
40+}
41+
42+func (me *PgsPsqlDB) FindUsers() ([]*db.User, error) {
43+ users := []*db.User{}
44+ err := me.Db.Select(&users, "SELECT * FROM app_users")
45+ return users, err
46+}
47+
48+func (me *PgsPsqlDB) FindUserByPubkey(key string) (*db.User, error) {
49+ pk := []db.PublicKey{}
50+ err := me.Db.Select(&pk, "SELECT * FROM public_keys WHERE public_key=$1", key)
51+ if err != nil {
52+ return nil, err
53+ }
54+ if len(pk) == 0 {
55+ return nil, fmt.Errorf("pubkey not found in our database: [%s]", key)
56+ }
57+ // When we run PublicKeyForKey and there are multiple public keys returned from the database
58+ // that should mean that we don't have the correct username for this public key.
59+ // When that happens we need to reject the authentication and ask the user to provide the correct
60+ // username when using ssh. So instead of `ssh <domain>` it should be `ssh user@<domain>`
61+ if len(pk) > 1 {
62+ return nil, &db.ErrMultiplePublicKeys{}
63+ }
64+
65+ return me.FindUser(pk[0].UserID)
66+}
67+
68+func (me *PgsPsqlDB) FindUser(userID string) (*db.User, error) {
69+ user := db.User{}
70+ err := me.Db.Get(&user, "SELECT * FROM app_users WHERE id=$1", userID)
71+ return &user, err
72+}
73+
74+func (me *PgsPsqlDB) FindUserByName(name string) (*db.User, error) {
75+ user := db.User{}
76+ err := me.Db.Get(&user, "SELECT * FROM app_users WHERE name=$1", name)
77+ return &user, err
78+}
79+
80+func (me *PgsPsqlDB) FindFeature(userID, name string) (*db.FeatureFlag, error) {
81+ ff := db.FeatureFlag{}
82+ err := me.Db.Get(&ff, "SELECT * FROM feature_flags WHERE user_id=$1 AND name=$2 ORDER BY expires_at DESC LIMIT 1", userID, name)
83+ return &ff, err
84+}
85+
86+func (me *PgsPsqlDB) InsertProject(userID, name, projectDir string) (string, error) {
87+ if !utils.IsValidSubdomain(name) {
88+ return "", fmt.Errorf("'%s' is not a valid project name, must match /^[a-z0-9-]+$/", name)
89+ }
90+
91+ var projectID string
92+ row := me.Db.QueryRow(
93+ "INSERT INTO projects (user_id, name, project_dir) VALUES ($1, $2, $3) RETURNING id",
94+ userID,
95+ name,
96+ projectDir,
97+ )
98+ err := row.Scan(&projectID)
99+ return projectID, err
100+}
101+
102+func (me *PgsPsqlDB) UpdateProject(userID, name string) error {
103+ _, err := me.Db.Exec("UPDATE projects SET updated_at=$1 WHERE user_id=$2 AND name=$3", time.Now(), userID, name)
104+ return err
105+}
106+
107+func (me *PgsPsqlDB) UpsertProject(userID, projectName, projectDir string) (*db.Project, error) {
108+ project, err := me.FindProjectByName(userID, projectName)
109+ if err == nil {
110+ // this just updates the `createdAt` timestamp, useful for book-keeping
111+ err = me.UpdateProject(userID, projectName)
112+ if err != nil {
113+ me.Logger.Error("could not update project", "err", err)
114+ return nil, err
115+ }
116+ return project, nil
117+ }
118+
119+ _, err = me.InsertProject(userID, projectName, projectName)
120+ if err != nil {
121+ me.Logger.Error("could not create project", "err", err)
122+ return nil, err
123+ }
124+ return me.FindProjectByName(userID, projectName)
125+}
126+
127+func (me *PgsPsqlDB) LinkToProject(userID, projectID, projectDir string, commit bool) error {
128+ linkToProject, err := me.FindProjectByName(userID, projectDir)
129+ if err != nil {
130+ return err
131+ }
132+ isAlreadyLinked := linkToProject.Name != linkToProject.ProjectDir
133+ sameProject := linkToProject.ID == projectID
134+
135+ /*
136+ A project linked to another project which is also linked to a
137+ project is forbidden. CI/CD Example:
138+ - ProjectProd links to ProjectStaging
139+ - ProjectStaging links to ProjectMain
140+ - We merge `main` and trigger a deploy which uploads to ProjectMain
141+ - All three get updated immediately
142+ This scenario was not the intent of our CI/CD. What we actually
143+ wanted was to create a snapshot of ProjectMain and have ProjectStaging
144+ link to the snapshot, but that's not the intended design of pgs.
145+
146+ So we want to close that gap here.
147+
148+ We ensure that `project.Name` and `project.ProjectDir` are identical
149+ when there is no aliasing.
150+ */
151+ if !sameProject && isAlreadyLinked {
152+ return fmt.Errorf(
153+ "cannot link (%s) to (%s) because it is also a link to (%s)",
154+ projectID,
155+ projectDir,
156+ linkToProject.ProjectDir,
157+ )
158+ }
159+
160+ if commit {
161+ _, err = me.Db.Exec(
162+ "UPDATE projects SET project_dir=$1, updated_at=$2 WHERE id=$3",
163+ projectDir,
164+ time.Now(),
165+ projectID,
166+ )
167+ }
168+ return err
169+}
170+
171+func (me *PgsPsqlDB) RemoveProject(projectID string) error {
172+ _, err := me.Db.Exec("DELETE FROM projects WHERE id=$1", projectID)
173+ return err
174+}
175+
176+func (me *PgsPsqlDB) FindProjectByName(userID, name string) (*db.Project, error) {
177+ project := db.Project{}
178+ err := me.Db.Get(&project, "SELECT * FROM projects WHERE user_id=$1 AND name=$2", userID, name)
179+ return &project, err
180+}
181+
182+func (me *PgsPsqlDB) FindProjectLinks(userID, name string) ([]*db.Project, error) {
183+ projects := []*db.Project{}
184+ err := me.Db.Select(
185+ &projects,
186+ "SELECT * FROM projects WHERE user_id=$1 AND name != project_dir AND project_dir=$2 ORDER BY name ASC",
187+ userID,
188+ name,
189+ )
190+ return projects, err
191+}
192+
193+func (me *PgsPsqlDB) FindProjectsByPrefix(userID, prefix string) ([]*db.Project, error) {
194+ projects := []*db.Project{}
195+ err := me.Db.Select(
196+ &projects,
197+ "SELECT * FROM projects WHERE user_id=$1 AND name=project_dir AND name ILIKE $2 ORDER BY updated_at ASC, name ASC",
198+ userID,
199+ prefix+"%",
200+ )
201+ return projects, err
202+}
203+
204+func (me *PgsPsqlDB) FindProjectsByUser(userID string) ([]*db.Project, error) {
205+ projects := []*db.Project{}
206+ err := me.Db.Select(
207+ &projects,
208+ "SELECT * FROM projects WHERE user_id=$1 ORDER BY name ASC",
209+ userID,
210+ )
211+ return projects, err
212+}
213+
214+func (me *PgsPsqlDB) FindProjects(by string) ([]*db.Project, error) {
215+ projects := []*db.Project{}
216+ err := me.Db.Select(
217+ &projects,
218+ `SELECT p.id, p.user_id, u.name as username, p.name, p.project_dir, p.acl, p.blocked, p.created_at, p.updated_at
219+ FROM projects AS p
220+ LEFT JOIN app_users AS u ON u.id = p.user_id
221+ ORDER BY $1 DESC`,
222+ by,
223+ )
224+ return projects, err
225+}
226+
227+func (me *PgsPsqlDB) UpdateProjectAcl(userID, name string, acl db.ProjectAcl) error {
228+ _, err := me.Db.Exec(
229+ "UPDATE projects SET acl=$3, updated_at=$4 WHERE user_id=$1 AND name=$2",
230+ userID, name, acl, time.Now(),
231+ )
232+ return err
233+}
+17,
-36
1@@ -11,9 +11,7 @@ import (
2 "github.com/charmbracelet/promwish"
3 "github.com/charmbracelet/ssh"
4 "github.com/charmbracelet/wish"
5- "github.com/picosh/pico/db/postgres"
6 "github.com/picosh/pico/shared"
7- "github.com/picosh/pico/shared/storage"
8 wsh "github.com/picosh/pico/wish"
9 "github.com/picosh/send/auth"
10 "github.com/picosh/send/list"
11@@ -52,49 +50,28 @@ func withProxy(handler *UploadAssetHandler, otherMiddleware ...wish.Middleware)
12 }
13 }
14
15-func StartSshServer() {
16+func StartSshServer(cfg *PgsConfig, killCh chan error) {
17 host := utils.GetEnv("PGS_HOST", "0.0.0.0")
18 port := utils.GetEnv("PGS_SSH_PORT", "2222")
19 promPort := utils.GetEnv("PGS_PROM_PORT", "9222")
20- cfg := NewConfigSite()
21 logger := cfg.Logger
22- dbpool := postgres.NewDB(cfg.DbURL, cfg.Logger)
23- defer dbpool.Close()
24-
25- var st storage.StorageServe
26- var err error
27- if cfg.MinioURL == "" {
28- st, err = storage.NewStorageFS(cfg.Logger, cfg.StorageDir)
29- } else {
30- st, err = storage.NewStorageMinio(cfg.Logger, cfg.MinioURL, cfg.MinioUser, cfg.MinioPass)
31- }
32-
33- if err != nil {
34- logger.Error(err.Error())
35- return
36- }
37
38 ctx := context.Background()
39 defer ctx.Done()
40+
41+ cacheClearingQueue := make(chan string, 100)
42 handler := NewUploadAssetHandler(
43- dbpool,
44 cfg,
45- st,
46+ cacheClearingQueue,
47 ctx,
48 )
49
50- apiConfig := &shared.ApiConfig{
51- Cfg: cfg,
52- Dbpool: dbpool,
53- Storage: st,
54- }
55-
56 webTunnel := &tunkit.WebTunnelHandler{
57 Logger: logger,
58- HttpHandler: createHttpHandler(apiConfig),
59+ HttpHandler: createHttpHandler(cfg),
60 }
61
62- sshAuth := shared.NewSshAuthHandler(dbpool, logger, cfg)
63+ sshAuth := shared.NewSshAuthHandler(cfg.DB, logger)
64 s, err := wish.NewServer(
65 wish.WithAddress(fmt.Sprintf("%s:%s", host, port)),
66 wish.WithHostKeyPath("ssh_data/term_info_ed25519"),
67@@ -120,12 +97,16 @@ func StartSshServer() {
68 }
69 }()
70
71- <-done
72- logger.Info("stopping SSH server")
73- ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
74- defer func() { cancel() }()
75- if err := s.Shutdown(ctx); err != nil {
76- logger.Error("shutdown", "err", err.Error())
77- os.Exit(1)
78+ select {
79+ case <-done:
80+ logger.Info("stopping ssh server")
81+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
82+ defer func() { cancel() }()
83+ if err := s.Shutdown(ctx); err != nil {
84+ logger.Error("shutdown", "err", err.Error())
85+ os.Exit(1)
86+ }
87+ case <-killCh:
88+ logger.Info("stopping ssh server")
89 }
90 }
+188,
-0
1@@ -0,0 +1,188 @@
2+package pgs
3+
4+import (
5+ "crypto/ed25519"
6+ "crypto/rand"
7+ "io"
8+ "log/slog"
9+ "os"
10+ "strings"
11+ "testing"
12+ "time"
13+
14+ "github.com/picosh/pico/db"
15+ pgsdb "github.com/picosh/pico/pgs/db"
16+ "github.com/picosh/pico/shared/storage"
17+ "github.com/picosh/utils"
18+ "github.com/pkg/sftp"
19+ "golang.org/x/crypto/ssh"
20+)
21+
22+func TestSshServer(t *testing.T) {
23+ logger := slog.Default()
24+ dbpool := pgsdb.NewDBMemory(logger)
25+ // setup test data
26+ dbpool.SetupTestData()
27+ st, err := storage.NewStorageMemory(map[string]map[string]string{})
28+ if err != nil {
29+ panic(err)
30+ }
31+ cfg := NewPgsConfig(logger, dbpool, st)
32+ done := make(chan error)
33+ go StartSshServer(cfg, done)
34+ // Hack to wait for startup
35+ time.Sleep(time.Millisecond * 100)
36+
37+ user := GenerateUser()
38+ // add user's pubkey to the default test account
39+ dbpool.Pubkeys = append(dbpool.Pubkeys, &db.PublicKey{
40+ ID: "nice-pubkey",
41+ UserID: dbpool.Users[0].ID,
42+ Key: utils.KeyForKeyText(user.signer.PublicKey()),
43+ })
44+
45+ client, err := user.NewClient()
46+ if err != nil {
47+ t.Error(err)
48+ return
49+ }
50+ defer client.Close()
51+
52+ _, err = WriteFileWithSftp(cfg, client)
53+ if err != nil {
54+ t.Error(err)
55+ return
56+ }
57+
58+ done <- nil
59+}
60+
61+type UserSSH struct {
62+ username string
63+ signer ssh.Signer
64+}
65+
66+func NewUserSSH(username string, signer ssh.Signer) *UserSSH {
67+ return &UserSSH{
68+ username: username,
69+ signer: signer,
70+ }
71+}
72+
73+func (s UserSSH) Public() string {
74+ pubkey := s.signer.PublicKey()
75+ return string(ssh.MarshalAuthorizedKey(pubkey))
76+}
77+
78+func (s UserSSH) MustCmd(client *ssh.Client, patch []byte, cmd string) string {
79+ res, err := s.Cmd(client, patch, cmd)
80+ if err != nil {
81+ panic(err)
82+ }
83+ return res
84+}
85+
86+func (s UserSSH) NewClient() (*ssh.Client, error) {
87+ host := "localhost:2222"
88+
89+ config := &ssh.ClientConfig{
90+ User: s.username,
91+ Auth: []ssh.AuthMethod{
92+ ssh.PublicKeys(s.signer),
93+ },
94+ HostKeyCallback: ssh.InsecureIgnoreHostKey(),
95+ }
96+
97+ client, err := ssh.Dial("tcp", host, config)
98+ return client, err
99+}
100+
101+func (s UserSSH) Cmd(client *ssh.Client, patch []byte, cmd string) (string, error) {
102+ session, err := client.NewSession()
103+ if err != nil {
104+ return "", err
105+ }
106+ defer session.Close()
107+
108+ stdinPipe, err := session.StdinPipe()
109+ if err != nil {
110+ return "", err
111+ }
112+
113+ stdoutPipe, err := session.StdoutPipe()
114+ if err != nil {
115+ return "", err
116+ }
117+
118+ if err := session.Start(cmd); err != nil {
119+ return "", err
120+ }
121+
122+ if patch != nil {
123+ _, err = stdinPipe.Write(patch)
124+ if err != nil {
125+ return "", err
126+ }
127+ }
128+
129+ stdinPipe.Close()
130+
131+ if err := session.Wait(); err != nil {
132+ return "", err
133+ }
134+
135+ buf := new(strings.Builder)
136+ _, err = io.Copy(buf, stdoutPipe)
137+ if err != nil {
138+ return "", err
139+ }
140+
141+ return buf.String(), nil
142+}
143+
144+func GenerateUser() UserSSH {
145+ _, userKey, err := ed25519.GenerateKey(rand.Reader)
146+ if err != nil {
147+ panic(err)
148+ }
149+
150+ userSigner, err := ssh.NewSignerFromKey(userKey)
151+ if err != nil {
152+ panic(err)
153+ }
154+
155+ return UserSSH{
156+ username: "testuser",
157+ signer: userSigner,
158+ }
159+}
160+
161+func WriteFileWithSftp(cfg *PgsConfig, conn *ssh.Client) (*os.FileInfo, error) {
162+ // open an SFTP session over an existing ssh connection.
163+ client, err := sftp.NewClient(conn)
164+ if err != nil {
165+ cfg.Logger.Error("could not create sftp client", "err", err)
166+ return nil, err
167+ }
168+ defer client.Close()
169+
170+ f, err := client.Create("test/hello.txt")
171+ if err != nil {
172+ cfg.Logger.Error("could not create file", "err", err)
173+ return nil, err
174+ }
175+ if _, err := f.Write([]byte("Hello world!")); err != nil {
176+ cfg.Logger.Error("could not write to file", "err", err)
177+ return nil, err
178+ }
179+ f.Close()
180+
181+ // check it's there
182+ fi, err := client.Lstat("test/hello.txt")
183+ if err != nil {
184+ cfg.Logger.Error("could not get stat for file", "err", err)
185+ return nil, err
186+ }
187+
188+ return &fi, nil
189+}
+16,
-14
1@@ -4,6 +4,7 @@ import (
2 "context"
3 "net/http"
4 "strings"
5+ "time"
6
7 "github.com/charmbracelet/ssh"
8 "github.com/picosh/pico/db"
9@@ -43,10 +44,9 @@ func getInfoFromUser(user string) (string, string) {
10 return "", user
11 }
12
13-func createHttpHandler(apiConfig *shared.ApiConfig) CtxHttpBridge {
14+func createHttpHandler(cfg *PgsConfig) CtxHttpBridge {
15 return func(ctx ssh.Context) http.Handler {
16- dbh := apiConfig.Dbpool
17- logger := apiConfig.Cfg.Logger
18+ logger := cfg.Logger
19 asUser, subdomain := getInfoFromUser(ctx.User())
20 log := logger.With(
21 "subdomain", subdomain,
22@@ -69,7 +69,7 @@ func createHttpHandler(apiConfig *shared.ApiConfig) CtxHttpBridge {
23 return http.HandlerFunc(shared.UnauthorizedHandler)
24 }
25
26- owner, err := dbh.FindUserForName(props.Username)
27+ owner, err := cfg.DB.FindUserByName(props.Username)
28 if err != nil {
29 log.Error(
30 "could not find user from name",
31@@ -82,13 +82,13 @@ func createHttpHandler(apiConfig *shared.ApiConfig) CtxHttpBridge {
32 "owner", owner.Name,
33 )
34
35- project, err := dbh.FindProjectByName(owner.ID, props.ProjectName)
36+ project, err := cfg.DB.FindProjectByName(owner.ID, props.ProjectName)
37 if err != nil {
38 log.Error("could not get project by name", "project", props.ProjectName, "err", err.Error())
39 return http.HandlerFunc(shared.UnauthorizedHandler)
40 }
41
42- requester, _ := dbh.FindUserForKey("", pubkey)
43+ requester, _ := cfg.DB.FindUserByPubkey(pubkey)
44 if requester != nil {
45 log = log.With(
46 "requester", requester.Name,
47@@ -97,12 +97,19 @@ func createHttpHandler(apiConfig *shared.ApiConfig) CtxHttpBridge {
48
49 // impersonation logic
50 if asUser != "" {
51- isAdmin := dbh.HasFeatureForUser(requester.ID, "admin")
52+ isAdmin := false
53+ ff, _ := cfg.DB.FindFeature(requester.ID, "admin")
54+ if ff != nil {
55+ if ff.ExpiresAt.Before(time.Now()) {
56+ isAdmin = true
57+ }
58+ }
59+
60 if !isAdmin {
61 log.Error("impersonation attempt failed")
62 return http.HandlerFunc(shared.UnauthorizedHandler)
63 }
64- requester, _ = dbh.FindUserForName(asUser)
65+ requester, _ = cfg.DB.FindUserByName(asUser)
66 }
67
68 ctx.Permissions().Extensions["user_id"] = requester.ID
69@@ -118,12 +125,7 @@ func createHttpHandler(apiConfig *shared.ApiConfig) CtxHttpBridge {
70
71 log.Info("user has access to site")
72
73- routes := NewWebRouter(
74- apiConfig.Cfg,
75- logger,
76- apiConfig.Dbpool,
77- apiConfig.Storage,
78- )
79+ routes := NewWebRouter(cfg)
80 tunnelRouter := TunnelWebRouter{routes, subdomain}
81 tunnelRouter.initRouters()
82 return &tunnelRouter
+49,
-35
1@@ -18,6 +18,7 @@ import (
2 "github.com/charmbracelet/ssh"
3 "github.com/charmbracelet/wish"
4 "github.com/picosh/pico/db"
5+ pgsdb "github.com/picosh/pico/pgs/db"
6 "github.com/picosh/pico/shared"
7 "github.com/picosh/pobj"
8 sst "github.com/picosh/pobj/storage"
9@@ -99,21 +100,14 @@ type FileData struct {
10 }
11
12 type UploadAssetHandler struct {
13- DBPool db.DB
14- Cfg *shared.ConfigSite
15- Storage sst.ObjectStorage
16+ Cfg *PgsConfig
17 CacheClearingQueue chan string
18 }
19
20-func NewUploadAssetHandler(dbpool db.DB, cfg *shared.ConfigSite, storage sst.ObjectStorage, ctx context.Context) *UploadAssetHandler {
21- // Enable buffering so we don't slow down uploads.
22- ch := make(chan string, 100)
23- go runCacheQueue(cfg, ctx, ch)
24- // publish all file uploads to a pipe topic
25+func NewUploadAssetHandler(cfg *PgsConfig, ch chan string, ctx context.Context) *UploadAssetHandler {
26+ go runCacheQueue(cfg, ctx)
27 return &UploadAssetHandler{
28- DBPool: dbpool,
29 Cfg: cfg,
30- Storage: storage,
31 CacheClearingQueue: ch,
32 }
33 }
34@@ -123,7 +117,7 @@ func (h *UploadAssetHandler) GetLogger() *slog.Logger {
35 }
36
37 func (h *UploadAssetHandler) Read(s ssh.Session, entry *sendutils.FileEntry) (os.FileInfo, sendutils.ReaderAtCloser, error) {
38- user, err := h.DBPool.FindUser(s.Permissions().Extensions["user_id"])
39+ user, err := h.Cfg.DB.FindUser(s.Permissions().Extensions["user_id"])
40 if err != nil {
41 return nil, nil, err
42 }
43@@ -135,13 +129,13 @@ func (h *UploadAssetHandler) Read(s ssh.Session, entry *sendutils.FileEntry) (os
44 FModTime: time.Unix(entry.Mtime, 0),
45 }
46
47- bucket, err := h.Storage.GetBucket(shared.GetAssetBucketName(user.ID))
48+ bucket, err := h.Cfg.Storage.GetBucket(shared.GetAssetBucketName(user.ID))
49 if err != nil {
50 return nil, nil, err
51 }
52
53 fname := shared.GetAssetFileName(entry)
54- contents, info, err := h.Storage.GetObject(bucket, fname)
55+ contents, info, err := h.Cfg.Storage.GetObject(bucket, fname)
56 if err != nil {
57 return nil, nil, err
58 }
59@@ -157,7 +151,7 @@ func (h *UploadAssetHandler) Read(s ssh.Session, entry *sendutils.FileEntry) (os
60 func (h *UploadAssetHandler) List(s ssh.Session, fpath string, isDir bool, recursive bool) ([]os.FileInfo, error) {
61 var fileList []os.FileInfo
62
63- user, err := h.DBPool.FindUser(s.Permissions().Extensions["user_id"])
64+ user, err := h.Cfg.DB.FindUser(s.Permissions().Extensions["user_id"])
65 if err != nil {
66 return fileList, err
67 }
68@@ -165,7 +159,7 @@ func (h *UploadAssetHandler) List(s ssh.Session, fpath string, isDir bool, recur
69 cleanFilename := fpath
70
71 bucketName := shared.GetAssetBucketName(user.ID)
72- bucket, err := h.Storage.GetBucket(bucketName)
73+ bucket, err := h.Cfg.Storage.GetBucket(bucketName)
74 if err != nil {
75 return fileList, err
76 }
77@@ -187,7 +181,7 @@ func (h *UploadAssetHandler) List(s ssh.Session, fpath string, isDir bool, recur
78 cleanFilename += "/"
79 }
80
81- foundList, err := h.Storage.ListObjects(bucket, cleanFilename, recursive)
82+ foundList, err := h.Cfg.Storage.ListObjects(bucket, cleanFilename, recursive)
83 if err != nil {
84 return fileList, err
85 }
86@@ -199,19 +193,19 @@ func (h *UploadAssetHandler) List(s ssh.Session, fpath string, isDir bool, recur
87 }
88
89 func (h *UploadAssetHandler) Validate(s ssh.Session) error {
90- user, err := h.DBPool.FindUser(s.Permissions().Extensions["user_id"])
91+ user, err := h.Cfg.DB.FindUser(s.Permissions().Extensions["user_id"])
92 if err != nil {
93 return err
94 }
95
96 assetBucket := shared.GetAssetBucketName(user.ID)
97- bucket, err := h.Storage.UpsertBucket(assetBucket)
98+ bucket, err := h.Cfg.Storage.UpsertBucket(assetBucket)
99 if err != nil {
100 return err
101 }
102 s.Context().SetValue(ctxBucketKey{}, bucket)
103
104- totalStorageSize, err := h.Storage.GetBucketQuota(bucket)
105+ totalStorageSize, err := h.Cfg.Storage.GetBucketQuota(bucket)
106 if err != nil {
107 return err
108 }
109@@ -225,14 +219,14 @@ func (h *UploadAssetHandler) Validate(s ssh.Session) error {
110 h.Cfg.Logger.Info(
111 "attempting to upload files",
112 "user", user.Name,
113- "space", h.Cfg.Space,
114+ "txtPrefix", h.Cfg.TxtPrefix,
115 )
116
117 return nil
118 }
119
120 func (h *UploadAssetHandler) findDenylist(bucket sst.Bucket, project *db.Project, logger *slog.Logger) (string, error) {
121- fp, _, err := h.Storage.GetObject(bucket, filepath.Join(project.ProjectDir, "_pgs_ignore"))
122+ fp, _, err := h.Cfg.Storage.GetObject(bucket, filepath.Join(project.ProjectDir, "_pgs_ignore"))
123 if err != nil {
124 return "", fmt.Errorf("_pgs_ignore not found")
125 }
126@@ -249,8 +243,28 @@ func (h *UploadAssetHandler) findDenylist(bucket sst.Bucket, project *db.Project
127 return str, nil
128 }
129
130+func findPlusFF(dbpool pgsdb.PgsDB, cfg *PgsConfig, userID string) *db.FeatureFlag {
131+ ff, _ := dbpool.FindFeature(userID, "plus")
132+ // we have free tiers so users might not have a feature flag
133+ // in which case we set sane defaults
134+ if ff == nil {
135+ ff = db.NewFeatureFlag(
136+ userID,
137+ "plus",
138+ cfg.MaxSize,
139+ cfg.MaxAssetSize,
140+ cfg.MaxSpecialFileSize,
141+ )
142+ }
143+ // this is jank
144+ ff.Data.StorageMax = ff.FindStorageMax(cfg.MaxSize)
145+ ff.Data.FileMax = ff.FindFileMax(cfg.MaxAssetSize)
146+ ff.Data.SpecialFileMax = ff.FindSpecialFileMax(cfg.MaxSpecialFileSize)
147+ return ff
148+}
149+
150 func (h *UploadAssetHandler) Write(s ssh.Session, entry *sendutils.FileEntry) (string, error) {
151- user, err := h.DBPool.FindUser(s.Permissions().Extensions["user_id"])
152+ user, err := h.Cfg.DB.FindUser(s.Permissions().Extensions["user_id"])
153 if user == nil || err != nil {
154 h.Cfg.Logger.Error("user not found in ctx", "err", err.Error())
155 return "", err
156@@ -279,7 +293,7 @@ func (h *UploadAssetHandler) Write(s ssh.Session, entry *sendutils.FileEntry) (s
157
158 // find, create, or update project if we haven't already done it
159 if project == nil {
160- project, err = h.DBPool.UpsertProject(user.ID, projectName, projectName)
161+ project, err = h.Cfg.DB.UpsertProject(user.ID, projectName, projectName)
162 if err != nil {
163 logger.Error("upsert project", "err", err.Error())
164 return "", err
165@@ -293,7 +307,7 @@ func (h *UploadAssetHandler) Write(s ssh.Session, entry *sendutils.FileEntry) (s
166 }
167
168 if entry.Mode.IsDir() {
169- _, _, err := h.Storage.PutObject(
170+ _, _, err := h.Cfg.Storage.PutObject(
171 bucket,
172 path.Join(shared.GetAssetFileName(entry), "._pico_keep_dir"),
173 bytes.NewReader([]byte{}),
174@@ -302,11 +316,11 @@ func (h *UploadAssetHandler) Write(s ssh.Session, entry *sendutils.FileEntry) (s
175 return "", err
176 }
177
178- featureFlag := shared.FindPlusFF(h.DBPool, h.Cfg, user.ID)
179+ featureFlag := findPlusFF(h.Cfg.DB, h.Cfg, user.ID)
180 // calculate the filsize difference between the same file already
181 // stored and the updated file being uploaded
182 assetFilename := shared.GetAssetFileName(entry)
183- obj, info, _ := h.Storage.GetObject(bucket, assetFilename)
184+ obj, info, _ := h.Cfg.Storage.GetObject(bucket, assetFilename)
185 var curFileSize int64
186 if info != nil {
187 curFileSize = info.Size
188@@ -400,7 +414,7 @@ func (h *UploadAssetHandler) Write(s ssh.Session, entry *sendutils.FileEntry) (s
189 )
190
191 surrogate := getSurrogateKey(user.Name, projectName)
192- h.CacheClearingQueue <- surrogate
193+ h.Cfg.CacheClearingQueue <- surrogate
194
195 return str, err
196 }
197@@ -411,7 +425,7 @@ func isSpecialFile(entry *sendutils.FileEntry) bool {
198 }
199
200 func (h *UploadAssetHandler) Delete(s ssh.Session, entry *sendutils.FileEntry) error {
201- user, err := h.DBPool.FindUser(s.Permissions().Extensions["user_id"])
202+ user, err := h.Cfg.DB.FindUser(s.Permissions().Extensions["user_id"])
203 if err != nil {
204 h.Cfg.Logger.Error("user not found in ctx", "err", err.Error())
205 return err
206@@ -447,7 +461,7 @@ func (h *UploadAssetHandler) Delete(s ssh.Session, entry *sendutils.FileEntry) e
207 pathDir := filepath.Dir(assetFilepath)
208 fileName := filepath.Base(assetFilepath)
209
210- sibs, err := h.Storage.ListObjects(bucket, pathDir+"/", false)
211+ sibs, err := h.Cfg.Storage.ListObjects(bucket, pathDir+"/", false)
212 if err != nil {
213 return err
214 }
215@@ -457,7 +471,7 @@ func (h *UploadAssetHandler) Delete(s ssh.Session, entry *sendutils.FileEntry) e
216 })
217
218 if len(sibs) == 0 {
219- _, _, err := h.Storage.PutObject(
220+ _, _, err := h.Cfg.Storage.PutObject(
221 bucket,
222 filepath.Join(pathDir, "._pico_keep_dir"),
223 bytes.NewReader([]byte{}),
224@@ -467,10 +481,10 @@ func (h *UploadAssetHandler) Delete(s ssh.Session, entry *sendutils.FileEntry) e
225 return err
226 }
227 }
228- err = h.Storage.DeleteObject(bucket, assetFilepath)
229+ err = h.Cfg.Storage.DeleteObject(bucket, assetFilepath)
230
231 surrogate := getSurrogateKey(user.Name, projectName)
232- h.CacheClearingQueue <- surrogate
233+ h.Cfg.CacheClearingQueue <- surrogate
234
235 if err != nil {
236 return err
237@@ -514,7 +528,7 @@ func (h *UploadAssetHandler) writeAsset(reader io.Reader, data *FileData) (int64
238 "filename", assetFilepath,
239 )
240
241- _, fsize, err := h.Storage.PutObject(
242+ _, fsize, err := h.Cfg.Storage.PutObject(
243 data.Bucket,
244 assetFilepath,
245 reader,
246@@ -527,13 +541,13 @@ func (h *UploadAssetHandler) writeAsset(reader io.Reader, data *FileData) (int64
247 // One message arrives per file that is written/deleted during uploads.
248 // Repeated messages for the same site are grouped so that we only flush once
249 // per site per 5 seconds.
250-func runCacheQueue(cfg *shared.ConfigSite, ctx context.Context, ch chan string) {
251+func runCacheQueue(cfg *PgsConfig, ctx context.Context) {
252 send := createPubCacheDrain(ctx, cfg.Logger)
253 var pendingFlushes sync.Map
254 tick := time.Tick(5 * time.Second)
255 for {
256 select {
257- case host := <-ch:
258+ case host := <-cfg.CacheClearingQueue:
259 pendingFlushes.Store(host, host)
260 case <-tick:
261 go func() {
+99,
-101
1@@ -4,6 +4,7 @@ import (
2 "bufio"
3 "context"
4 "fmt"
5+ "html/template"
6 "log/slog"
7 "net/http"
8 "net/url"
9@@ -20,7 +21,6 @@ import (
10 "github.com/darkweak/storages/core"
11 "github.com/gorilla/feeds"
12 "github.com/picosh/pico/db"
13- "github.com/picosh/pico/db/postgres"
14 "github.com/picosh/pico/shared"
15 "github.com/picosh/pico/shared/storage"
16 sst "github.com/picosh/pobj/storage"
17@@ -39,26 +39,9 @@ func (c *CachedHttp) ServeHTTP(writer http.ResponseWriter, req *http.Request) {
18 })
19 }
20
21-func StartApiServer() {
22+func StartApiServer(cfg *PgsConfig) {
23 ctx := context.Background()
24- cfg := NewConfigSite()
25- logger := cfg.Logger
26
27- dbpool := postgres.NewDB(cfg.DbURL, cfg.Logger)
28- defer dbpool.Close()
29-
30- var st storage.StorageServe
31- var err error
32- if cfg.MinioURL == "" {
33- st, err = storage.NewStorageFS(cfg.Logger, cfg.StorageDir)
34- } else {
35- st, err = storage.NewStorageMinio(cfg.Logger, cfg.MinioURL, cfg.MinioUser, cfg.MinioPass)
36- }
37-
38- if err != nil {
39- logger.Error("could not connect to object storage", "err", err.Error())
40- return
41- }
42 ttl := configurationtypes.Duration{Duration: cfg.CacheTTL}
43 stale := configurationtypes.Duration{Duration: cfg.CacheTTL * 2}
44 c := &middleware.BaseConfiguration{
45@@ -81,10 +64,10 @@ func StartApiServer() {
46 DefaultCacheControl: cfg.CacheControl,
47 },
48 }
49- c.SetLogger(&CompatLogger{logger})
50+ c.SetLogger(&CompatLogger{cfg.Logger})
51 storages.InitFromConfiguration(c)
52 httpCache := middleware.NewHTTPCacheHandler(c)
53- routes := NewWebRouter(cfg, logger, dbpool, st)
54+ routes := NewWebRouter(cfg)
55 cacher := &CachedHttp{
56 handler: httpCache,
57 routes: routes,
58@@ -92,14 +75,14 @@ func StartApiServer() {
59
60 go routes.cacheMgmt(ctx, httpCache)
61
62- portStr := fmt.Sprintf(":%s", cfg.Port)
63- logger.Info(
64+ portStr := fmt.Sprintf(":%s", cfg.WebPort)
65+ cfg.Logger.Info(
66 "starting server on port",
67- "port", cfg.Port,
68+ "port", cfg.WebPort,
69 "domain", cfg.Domain,
70 )
71- err = http.ListenAndServe(portStr, cacher)
72- logger.Error(
73+ err := http.ListenAndServe(portStr, cacher)
74+ cfg.Logger.Error(
75 "listen and serve",
76 "err", err.Error(),
77 )
78@@ -108,20 +91,14 @@ func StartApiServer() {
79 type HasPerm = func(proj *db.Project) bool
80
81 type WebRouter struct {
82- Cfg *shared.ConfigSite
83- Logger *slog.Logger
84- Dbpool db.DB
85- Storage storage.StorageServe
86+ Cfg *PgsConfig
87 RootRouter *http.ServeMux
88 UserRouter *http.ServeMux
89 }
90
91-func NewWebRouter(cfg *shared.ConfigSite, logger *slog.Logger, dbpool db.DB, st storage.StorageServe) *WebRouter {
92+func NewWebRouter(cfg *PgsConfig) *WebRouter {
93 router := &WebRouter{
94- Cfg: cfg,
95- Logger: logger,
96- Dbpool: dbpool,
97- Storage: st,
98+ Cfg: cfg,
99 }
100 router.initRouters()
101 return router
102@@ -154,7 +131,7 @@ func (web *WebRouter) initRouters() {
103
104 func (web *WebRouter) serveFile(file string, contentType string) http.HandlerFunc {
105 return func(w http.ResponseWriter, r *http.Request) {
106- logger := web.Logger
107+ logger := web.Cfg.Logger
108 cfg := web.Cfg
109
110 contents, err := os.ReadFile(cfg.StaticPath(fmt.Sprintf("public/%s", file)))
111@@ -180,11 +157,28 @@ func (web *WebRouter) serveFile(file string, contentType string) http.HandlerFun
112 }
113 }
114
115+func renderTemplate(cfg *PgsConfig, templates []string) (*template.Template, error) {
116+ files := make([]string, len(templates))
117+ copy(files, templates)
118+ files = append(
119+ files,
120+ cfg.StaticPath("html/footer.partial.tmpl"),
121+ cfg.StaticPath("html/marketing-footer.partial.tmpl"),
122+ cfg.StaticPath("html/base.layout.tmpl"),
123+ )
124+
125+ ts, err := template.New("base").ParseFiles(files...)
126+ if err != nil {
127+ return nil, err
128+ }
129+ return ts, nil
130+}
131+
132 func (web *WebRouter) createPageHandler(fname string) http.HandlerFunc {
133 return func(w http.ResponseWriter, r *http.Request) {
134- logger := web.Logger
135+ logger := web.Cfg.Logger
136 cfg := web.Cfg
137- ts, err := shared.RenderTemplate(cfg, []string{cfg.StaticPath(fname)})
138+ ts, err := renderTemplate(cfg, []string{cfg.StaticPath(fname)})
139
140 if err != nil {
141 logger.Error(
142@@ -197,7 +191,7 @@ func (web *WebRouter) createPageHandler(fname string) http.HandlerFunc {
143 }
144
145 data := shared.PageData{
146- Site: *cfg.GetSiteData(),
147+ Site: shared.SitePageData{Domain: template.URL(cfg.Domain), HomeURL: "/"},
148 }
149 err = ts.Execute(w, data)
150 if err != nil {
151@@ -212,54 +206,52 @@ func (web *WebRouter) createPageHandler(fname string) http.HandlerFunc {
152 }
153
154 func (web *WebRouter) checkHandler(w http.ResponseWriter, r *http.Request) {
155- dbpool := web.Dbpool
156+ dbpool := web.Cfg.DB
157 cfg := web.Cfg
158- logger := web.Logger
159-
160- if cfg.IsCustomdomains() {
161- hostDomain := r.URL.Query().Get("domain")
162- appDomain := strings.Split(cfg.Domain, ":")[0]
163-
164- if !strings.Contains(hostDomain, appDomain) {
165- subdomain := shared.GetCustomDomain(hostDomain, cfg.Space)
166- props, err := shared.GetProjectFromSubdomain(subdomain)
167- if err != nil {
168- logger.Error(
169- "could not get project from subdomain",
170- "subdomain", subdomain,
171- "err", err.Error(),
172- )
173- w.WriteHeader(http.StatusNotFound)
174- return
175- }
176+ logger := web.Cfg.Logger
177
178- u, err := dbpool.FindUserForName(props.Username)
179- if err != nil {
180- logger.Error("could not find user", "err", err.Error())
181- w.WriteHeader(http.StatusNotFound)
182- return
183- }
184+ hostDomain := r.URL.Query().Get("domain")
185+ appDomain := strings.Split(cfg.Domain, ":")[0]
186+
187+ if !strings.Contains(hostDomain, appDomain) {
188+ subdomain := shared.GetCustomDomain(hostDomain, cfg.TxtPrefix)
189+ props, err := shared.GetProjectFromSubdomain(subdomain)
190+ if err != nil {
191+ logger.Error(
192+ "could not get project from subdomain",
193+ "subdomain", subdomain,
194+ "err", err.Error(),
195+ )
196+ w.WriteHeader(http.StatusNotFound)
197+ return
198+ }
199+
200+ u, err := dbpool.FindUserByName(props.Username)
201+ if err != nil {
202+ logger.Error("could not find user", "err", err.Error())
203+ w.WriteHeader(http.StatusNotFound)
204+ return
205+ }
206
207- logger = logger.With(
208+ logger = logger.With(
209+ "user", u.Name,
210+ "project", props.ProjectName,
211+ )
212+ p, err := dbpool.FindProjectByName(u.ID, props.ProjectName)
213+ if err != nil {
214+ logger.Error(
215+ "could not find project for user",
216 "user", u.Name,
217 "project", props.ProjectName,
218+ "err", err.Error(),
219 )
220- p, err := dbpool.FindProjectByName(u.ID, props.ProjectName)
221- if err != nil {
222- logger.Error(
223- "could not find project for user",
224- "user", u.Name,
225- "project", props.ProjectName,
226- "err", err.Error(),
227- )
228- w.WriteHeader(http.StatusNotFound)
229- return
230- }
231+ w.WriteHeader(http.StatusNotFound)
232+ return
233+ }
234
235- if u != nil && p != nil {
236- w.WriteHeader(http.StatusOK)
237- return
238- }
239+ if u != nil && p != nil {
240+ w.WriteHeader(http.StatusOK)
241+ return
242 }
243 }
244
245@@ -268,21 +260,21 @@ func (web *WebRouter) checkHandler(w http.ResponseWriter, r *http.Request) {
246
247 func (web *WebRouter) cacheMgmt(ctx context.Context, httpCache *middleware.SouinBaseHandler) {
248 storer := httpCache.Storers[0]
249- drain := createSubCacheDrain(ctx, web.Logger)
250+ drain := createSubCacheDrain(ctx, web.Cfg.Logger)
251
252 for {
253 scanner := bufio.NewScanner(drain)
254 for scanner.Scan() {
255 surrogateKey := strings.TrimSpace(scanner.Text())
256- web.Logger.Info("received cache-drain item", "surrogateKey", surrogateKey)
257+ web.Cfg.Logger.Info("received cache-drain item", "surrogateKey", surrogateKey)
258
259 if surrogateKey == "*" {
260 storer.DeleteMany(".+")
261 err := httpCache.SurrogateKeyStorer.Destruct()
262 if err != nil {
263- web.Logger.Error("could not clear cache and surrogate key store", "err", err)
264+ web.Cfg.Logger.Error("could not clear cache and surrogate key store", "err", err)
265 } else {
266- web.Logger.Info("successfully cleared cache and surrogate keys store")
267+ web.Cfg.Logger.Info("successfully cleared cache and surrogate keys store")
268 }
269 continue
270 }
271@@ -298,7 +290,7 @@ func (web *WebRouter) cacheMgmt(ctx context.Context, httpCache *middleware.Souin
272 if e := proto.Unmarshal(b, &mapping); e == nil {
273 for k := range mapping.GetMapping() {
274 qkey, _ := url.QueryUnescape(k)
275- web.Logger.Info(
276+ web.Cfg.Logger.Info(
277 "deleting key from surrogate cache",
278 "surrogateKey", surrogateKey,
279 "key", qkey,
280@@ -309,7 +301,7 @@ func (web *WebRouter) cacheMgmt(ctx context.Context, httpCache *middleware.Souin
281 }
282
283 qkey, _ := url.QueryUnescape(key)
284- web.Logger.Info(
285+ web.Cfg.Logger.Info(
286 "deleting from cache",
287 "surrogateKey", surrogateKey,
288 "key", core.MappingKeyPrefix+qkey,
289@@ -322,11 +314,11 @@ func (web *WebRouter) cacheMgmt(ctx context.Context, httpCache *middleware.Souin
290
291 func (web *WebRouter) createRssHandler(by string) http.HandlerFunc {
292 return func(w http.ResponseWriter, r *http.Request) {
293- dbpool := web.Dbpool
294- logger := web.Logger
295+ dbpool := web.Cfg.DB
296+ logger := web.Cfg.Logger
297 cfg := web.Cfg
298
299- pager, err := dbpool.FindAllProjects(&db.Pager{Num: 100, Page: 0}, by)
300+ projects, err := dbpool.FindProjects(by)
301 if err != nil {
302 logger.Error("could not find projects", "err", err.Error())
303 http.Error(w, err.Error(), http.StatusInternalServerError)
304@@ -335,14 +327,14 @@ func (web *WebRouter) createRssHandler(by string) http.HandlerFunc {
305
306 feed := &feeds.Feed{
307 Title: fmt.Sprintf("%s discovery feed %s", cfg.Domain, by),
308- Link: &feeds.Link{Href: cfg.ReadURL()},
309+ Link: &feeds.Link{Href: "https://pgs.sh"},
310 Description: fmt.Sprintf("%s projects %s", cfg.Domain, by),
311 Author: &feeds.Author{Name: cfg.Domain},
312 Created: time.Now(),
313 }
314
315 var feedItems []*feeds.Item
316- for _, project := range pager.Data {
317+ for _, project := range projects {
318 realUrl := strings.TrimSuffix(
319 cfg.AssetURL(project.Username, project.Name, ""),
320 "/",
321@@ -385,7 +377,7 @@ func (web *WebRouter) createRssHandler(by string) http.HandlerFunc {
322 }
323
324 func (web *WebRouter) Perm(proj *db.Project) bool {
325- return proj.Acl.Type == "public"
326+ return proj.Acl.Type == "public" || proj.Acl.Type == ""
327 }
328
329 var imgRegex = regexp.MustCompile("(.+.(?:jpg|jpeg|png|gif|webp|svg))(/.+)")
330@@ -414,7 +406,7 @@ func (web *WebRouter) ImageRequest(w http.ResponseWriter, r *http.Request) {
331 opts, err := storage.UriToImgProcessOpts(imgOpts)
332 if err != nil {
333 errMsg := fmt.Sprintf("error processing img options: %s", err.Error())
334- web.Logger.Error("error processing img options", "err", errMsg)
335+ web.Cfg.Logger.Error("error processing img options", "err", errMsg)
336 http.Error(w, errMsg, http.StatusUnprocessableEntity)
337 return
338 }
339@@ -425,7 +417,7 @@ func (web *WebRouter) ImageRequest(w http.ResponseWriter, r *http.Request) {
340 func (web *WebRouter) ServeAsset(fname string, opts *storage.ImgProcessOpts, fromImgs bool, hasPerm HasPerm, w http.ResponseWriter, r *http.Request) {
341 subdomain := shared.GetSubdomain(r)
342
343- logger := web.Logger.With(
344+ logger := web.Cfg.Logger.With(
345 "subdomain", subdomain,
346 "filename", fname,
347 "url", fmt.Sprintf("%s%s", r.Host, r.URL.Path),
348@@ -447,7 +439,7 @@ func (web *WebRouter) ServeAsset(fname string, opts *storage.ImgProcessOpts, fro
349 "user", props.Username,
350 )
351
352- user, err := web.Dbpool.FindUserForName(props.Username)
353+ user, err := web.Cfg.DB.FindUserByName(props.Username)
354 if err != nil {
355 logger.Info("user not found")
356 http.Error(w, "user not found", http.StatusNotFound)
357@@ -465,10 +457,10 @@ func (web *WebRouter) ServeAsset(fname string, opts *storage.ImgProcessOpts, fro
358 var bucket sst.Bucket
359 // imgs has a different bucket directory
360 if fromImgs {
361- bucket, err = web.Storage.GetBucket(shared.GetImgsBucketName(user.ID))
362+ bucket, err = web.Cfg.Storage.GetBucket(shared.GetImgsBucketName(user.ID))
363 } else {
364- bucket, err = web.Storage.GetBucket(shared.GetAssetBucketName(user.ID))
365- project, perr := web.Dbpool.FindProjectByName(user.ID, props.ProjectName)
366+ bucket, err = web.Cfg.Storage.GetBucket(shared.GetAssetBucketName(user.ID))
367+ project, perr := web.Cfg.DB.FindProjectByName(user.ID, props.ProjectName)
368 if perr != nil {
369 logger.Info("project not found")
370 http.Error(w, "project not found", http.StatusNotFound)
371@@ -500,7 +492,13 @@ func (web *WebRouter) ServeAsset(fname string, opts *storage.ImgProcessOpts, fro
372 return
373 }
374
375- hasPicoPlus := web.Dbpool.HasFeatureForUser(user.ID, "plus")
376+ hasPicoPlus := false
377+ ff, _ := web.Cfg.DB.FindFeature(user.ID, "plus")
378+ if ff != nil {
379+ if ff.ExpiresAt.Before(time.Now()) {
380+ hasPicoPlus = true
381+ }
382+ }
383
384 asset := &ApiAssetHandler{
385 WebRouter: web,
386@@ -521,9 +519,9 @@ func (web *WebRouter) ServeAsset(fname string, opts *storage.ImgProcessOpts, fro
387 }
388
389 func (web *WebRouter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
390- subdomain := shared.GetSubdomainFromRequest(r, web.Cfg.Domain, web.Cfg.Space)
391+ subdomain := shared.GetSubdomainFromRequest(r, web.Cfg.Domain, web.Cfg.TxtPrefix)
392 if web.RootRouter == nil || web.UserRouter == nil {
393- web.Logger.Error("routers not initialized")
394+ web.Cfg.Logger.Error("routers not initialized")
395 http.Error(w, "routers not initialized", http.StatusInternalServerError)
396 return
397 }
+4,
-4
1@@ -41,7 +41,7 @@ func hasProtocol(url string) bool {
2 func (h *ApiAssetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
3 logger := h.Logger
4 var redirects []*RedirectRule
5- redirectFp, redirectInfo, err := h.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_redirects"))
6+ redirectFp, redirectInfo, err := h.Cfg.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_redirects"))
7 if err == nil {
8 defer redirectFp.Close()
9 if redirectInfo != nil && redirectInfo.Size > h.Cfg.MaxSpecialFileSize {
10@@ -85,7 +85,7 @@ func (h *ApiAssetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
11 // before redirecting, this saves a hop that will just end up a 404
12 if !hasProtocol(fp.Filepath) && strings.HasSuffix(fp.Filepath, "/") {
13 next := filepath.Join(h.ProjectDir, fp.Filepath, "index.html")
14- obj, _, err := h.Storage.GetObject(h.Bucket, next)
15+ obj, _, err := h.Cfg.Storage.GetObject(h.Bucket, next)
16 if err != nil {
17 continue
18 }
19@@ -137,7 +137,7 @@ func (h *ApiAssetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
20 attempts = append(attempts, fpath)
21 logger = logger.With("object", fpath)
22 logger.Info("serving object")
23- c, info, err = h.Storage.ServeObject(
24+ c, info, err = h.Cfg.Storage.ServeObject(
25 h.Bucket,
26 fpath,
27 h.ImgProcessOpts,
28@@ -164,7 +164,7 @@ func (h *ApiAssetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
29 defer contents.Close()
30
31 var headers []*HeaderRule
32- headersFp, headersInfo, err := h.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_headers"))
33+ headersFp, headersInfo, err := h.Cfg.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_headers"))
34 if err == nil {
35 defer headersFp.Close()
36 if headersInfo != nil && headersInfo.Size > h.Cfg.MaxSpecialFileSize {
+2,
-2
1@@ -48,13 +48,13 @@ func createSubCacheDrain(ctx context.Context, logger *slog.Logger) *pipe.Reconne
2 // cached assets for a given subdomain are grouped under a single key (which is
3 // separate from the "GET-https-example.com-/path" key used for serving files
4 // from the cache).
5-func purgeCache(cfg *shared.ConfigSite, send *pipe.ReconnectReadWriteCloser, surrogate string) error {
6+func purgeCache(cfg *PgsConfig, send *pipe.ReconnectReadWriteCloser, surrogate string) error {
7 cfg.Logger.Info("purging cache", "surrogate", surrogate)
8 time.Sleep(1 * time.Second)
9 _, err := send.Write([]byte(surrogate + "\n"))
10 return err
11 }
12
13-func purgeAllCache(cfg *shared.ConfigSite, send *pipe.ReconnectReadWriteCloser) error {
14+func purgeAllCache(cfg *PgsConfig, send *pipe.ReconnectReadWriteCloser) error {
15 return purgeCache(cfg, send, "*")
16 }
+30,
-72
1@@ -10,16 +10,12 @@ import (
2 "testing"
3 "time"
4
5- "github.com/picosh/pico/db"
6- "github.com/picosh/pico/db/stub"
7+ pgsdb "github.com/picosh/pico/pgs/db"
8 "github.com/picosh/pico/shared"
9 "github.com/picosh/pico/shared/storage"
10 sst "github.com/picosh/pobj/storage"
11 )
12
13-var testUserID = "user-1"
14-var testUsername = "user"
15-
16 type ApiExample struct {
17 name string
18 path string
19@@ -29,63 +25,34 @@ type ApiExample struct {
20 status int
21 contentType string
22
23- dbpool db.DB
24 storage map[string]map[string]string
25 }
26
27 type PgsDb struct {
28- *stub.StubDB
29+ *pgsdb.MemoryDB
30 }
31
32 func NewPgsDb(logger *slog.Logger) *PgsDb {
33- sb := stub.NewStubDB(logger)
34- return &PgsDb{
35- StubDB: sb,
36+ sb := pgsdb.NewDBMemory(logger)
37+ sb.SetupTestData()
38+ _, err := sb.InsertProject(sb.Users[0].ID, "test", "test")
39+ if err != nil {
40+ panic(err)
41 }
42-}
43-
44-func (p *PgsDb) FindUserForName(name string) (*db.User, error) {
45- return &db.User{
46- ID: testUserID,
47- Name: testUsername,
48- }, nil
49-}
50-
51-func (p *PgsDb) FindProjectByName(userID, name string) (*db.Project, error) {
52- return &db.Project{
53- ID: "project-1",
54- UserID: userID,
55- Name: name,
56- ProjectDir: name,
57- Username: testUsername,
58- Acl: db.ProjectAcl{
59- Type: "public",
60- },
61- }, nil
62-}
63-
64-type PgsAnalyticsDb struct {
65- *PgsDb
66-}
67-
68-func NewPgsAnalticsDb(logger *slog.Logger) *PgsAnalyticsDb {
69- return &PgsAnalyticsDb{
70- PgsDb: NewPgsDb(logger),
71+ return &PgsDb{
72+ MemoryDB: sb,
73 }
74 }
75
76-func (p *PgsAnalyticsDb) HasFeatureForUser(userID, feature string) bool {
77- return true
78-}
79-
80-func mkpath(path string) string {
81- return fmt.Sprintf("https://%s-test.pgs.test%s", testUsername, path)
82+func (p *PgsDb) mkpath(path string) string {
83+ return fmt.Sprintf("https://%s-test.pgs.test%s", p.Users[0].Name, path)
84 }
85
86 func TestApiBasic(t *testing.T) {
87- bucketName := shared.GetAssetBucketName(testUserID)
88- cfg := NewConfigSite()
89- cfg.Domain = "pgs.test"
90+ logger := slog.Default()
91+ dbpool := NewPgsDb(logger)
92+ bucketName := shared.GetAssetBucketName(dbpool.Users[0].ID)
93+
94 tt := []*ApiExample{
95 {
96 name: "basic",
97@@ -94,7 +61,6 @@ func TestApiBasic(t *testing.T) {
98 status: http.StatusOK,
99 contentType: "text/html",
100
101- dbpool: NewPgsDb(cfg.Logger),
102 storage: map[string]map[string]string{
103 bucketName: {
104 "test/index.html": "hello world!",
105@@ -108,7 +74,6 @@ func TestApiBasic(t *testing.T) {
106 status: http.StatusOK,
107 contentType: "text/html",
108
109- dbpool: NewPgsDb(cfg.Logger),
110 storage: map[string]map[string]string{
111 bucketName: {
112 "test/test.html": "hello world!",
113@@ -122,7 +87,6 @@ func TestApiBasic(t *testing.T) {
114 status: http.StatusMovedPermanently,
115 contentType: "text/html; charset=utf-8",
116
117- dbpool: NewPgsDb(cfg.Logger),
118 storage: map[string]map[string]string{
119 bucketName: {
120 "test/subdir/index.html": "hello world!",
121@@ -136,7 +100,6 @@ func TestApiBasic(t *testing.T) {
122 status: http.StatusMovedPermanently,
123 contentType: "text/html; charset=utf-8",
124
125- dbpool: NewPgsDb(cfg.Logger),
126 storage: map[string]map[string]string{
127 bucketName: {
128 "test/_redirects": "/anything /about.html 301",
129@@ -151,7 +114,6 @@ func TestApiBasic(t *testing.T) {
130 status: http.StatusOK,
131 contentType: "text/html",
132
133- dbpool: NewPgsDb(cfg.Logger),
134 storage: map[string]map[string]string{
135 bucketName: {
136 "test/subdir/index.html": "hello world!",
137@@ -165,7 +127,6 @@ func TestApiBasic(t *testing.T) {
138 status: http.StatusOK,
139 contentType: "text/html",
140
141- dbpool: NewPgsDb(cfg.Logger),
142 storage: map[string]map[string]string{
143 bucketName: {
144 "test/_redirects": "/* /index.html 200",
145@@ -180,7 +141,6 @@ func TestApiBasic(t *testing.T) {
146 status: http.StatusNotFound,
147 contentType: "text/plain; charset=utf-8",
148
149- dbpool: NewPgsDb(cfg.Logger),
150 storage: map[string]map[string]string{
151 bucketName: {},
152 },
153@@ -192,7 +152,6 @@ func TestApiBasic(t *testing.T) {
154 status: http.StatusNotFound,
155 contentType: "text/html",
156
157- dbpool: NewPgsDb(cfg.Logger),
158 storage: map[string]map[string]string{
159 bucketName: {
160 "test/404.html": "boom!",
161@@ -206,7 +165,6 @@ func TestApiBasic(t *testing.T) {
162 status: http.StatusOK,
163 contentType: "image/jpeg",
164
165- dbpool: NewPgsDb(cfg.Logger),
166 storage: map[string]map[string]string{
167 bucketName: {
168 "test/profile.jpg": "image",
169@@ -221,7 +179,6 @@ func TestApiBasic(t *testing.T) {
170 status: http.StatusMovedPermanently,
171 contentType: "text/html; charset=utf-8",
172
173- dbpool: NewPgsDb(cfg.Logger),
174 storage: map[string]map[string]string{
175 bucketName: {
176 "test/_redirects": "/anything /about.html 301",
177@@ -239,7 +196,6 @@ func TestApiBasic(t *testing.T) {
178 status: http.StatusNotModified,
179 contentType: "",
180
181- dbpool: NewPgsDb(cfg.Logger),
182 storage: map[string]map[string]string{
183 bucketName: {
184 "test/test.html": "hello world!",
185@@ -256,7 +212,6 @@ func TestApiBasic(t *testing.T) {
186 status: http.StatusOK,
187 contentType: "text/html",
188
189- dbpool: NewPgsDb(cfg.Logger),
190 storage: map[string]map[string]string{
191 bucketName: {
192 "test/test.html": "hello world!",
193@@ -273,7 +228,6 @@ func TestApiBasic(t *testing.T) {
194 status: http.StatusNotModified,
195 contentType: "",
196
197- dbpool: NewPgsDb(cfg.Logger),
198 storage: map[string]map[string]string{
199 bucketName: {
200 "test/test.html": "hello world!",
201@@ -290,7 +244,6 @@ func TestApiBasic(t *testing.T) {
202 status: http.StatusOK,
203 contentType: "text/html",
204
205- dbpool: NewPgsDb(cfg.Logger),
206 storage: map[string]map[string]string{
207 bucketName: {
208 "test/test.html": "hello world!",
209@@ -309,7 +262,6 @@ func TestApiBasic(t *testing.T) {
210 status: http.StatusNotModified,
211 contentType: "",
212
213- dbpool: NewPgsDb(cfg.Logger),
214 storage: map[string]map[string]string{
215 bucketName: {
216 "test/test.html": "hello world!",
217@@ -320,14 +272,16 @@ func TestApiBasic(t *testing.T) {
218
219 for _, tc := range tt {
220 t.Run(tc.name, func(t *testing.T) {
221- request := httptest.NewRequest("GET", mkpath(tc.path), strings.NewReader(""))
222+ request := httptest.NewRequest("GET", dbpool.mkpath(tc.path), strings.NewReader(""))
223 for key, val := range tc.reqHeaders {
224 request.Header.Set(key, val)
225 }
226 responseRecorder := httptest.NewRecorder()
227
228 st, _ := storage.NewStorageMemory(tc.storage)
229- router := NewWebRouter(cfg, cfg.Logger, tc.dbpool, st)
230+ cfg := NewPgsConfig(logger, dbpool, st)
231+ cfg.Domain = "pgs.test"
232+ router := NewWebRouter(cfg)
233 router.ServeHTTP(responseRecorder, request)
234
235 if responseRecorder.Code != tc.status {
236@@ -349,6 +303,10 @@ func TestApiBasic(t *testing.T) {
237 if err != nil {
238 t.Errorf("err: %s", err.Error())
239 }
240+ if location == nil {
241+ t.Error("no location header in response")
242+ return
243+ }
244 if tc.wantUrl != location.String() {
245 t.Errorf("Want '%s', got '%s'", tc.wantUrl, location.String())
246 }
247@@ -374,9 +332,9 @@ func (s *ImageStorageMemory) ServeObject(bucket sst.Bucket, fpath string, opts *
248 }
249
250 func TestImageManipulation(t *testing.T) {
251- bucketName := shared.GetAssetBucketName(testUserID)
252- cfg := NewConfigSite()
253- cfg.Domain = "pgs.test"
254+ logger := slog.Default()
255+ dbpool := NewPgsDb(logger)
256+ bucketName := shared.GetAssetBucketName(dbpool.Users[0].ID)
257
258 tt := []ApiExample{
259 {
260@@ -386,7 +344,6 @@ func TestImageManipulation(t *testing.T) {
261 status: http.StatusOK,
262 contentType: "image/jpeg",
263
264- dbpool: NewPgsDb(cfg.Logger),
265 storage: map[string]map[string]string{
266 bucketName: {
267 "test/app.jpg": "hello world!",
268@@ -400,7 +357,6 @@ func TestImageManipulation(t *testing.T) {
269 status: http.StatusOK,
270 contentType: "image/jpeg",
271
272- dbpool: NewPgsDb(cfg.Logger),
273 storage: map[string]map[string]string{
274 bucketName: {
275 "test/subdir/app.jpg": "hello world!",
276@@ -411,7 +367,7 @@ func TestImageManipulation(t *testing.T) {
277
278 for _, tc := range tt {
279 t.Run(tc.name, func(t *testing.T) {
280- request := httptest.NewRequest("GET", mkpath(tc.path), strings.NewReader(""))
281+ request := httptest.NewRequest("GET", dbpool.mkpath(tc.path), strings.NewReader(""))
282 responseRecorder := httptest.NewRecorder()
283
284 memst, _ := storage.NewStorageMemory(tc.storage)
285@@ -421,7 +377,9 @@ func TestImageManipulation(t *testing.T) {
286 Ratio: &storage.Ratio{},
287 },
288 }
289- router := NewWebRouter(cfg, cfg.Logger, tc.dbpool, st)
290+ cfg := NewPgsConfig(logger, dbpool, st)
291+ cfg.Domain = "pgs.test"
292+ router := NewWebRouter(cfg)
293 router.ServeHTTP(responseRecorder, request)
294
295 if responseRecorder.Code != tc.status {
+1,
-1
1@@ -71,7 +71,7 @@ func StartSshServer() {
2 DBPool: dbpool,
3 }
4
5- sshAuth := shared.NewSshAuthHandler(dbpool, logger, cfg)
6+ sshAuth := shared.NewSshAuthHandler(dbpool, logger)
7 s, err := wish.NewServer(
8 wish.WithAddress(fmt.Sprintf("%s:%s", host, port)),
9 wish.WithHostKeyPath("ssh_data/term_info_ed25519"),
+1,
-1
1@@ -42,7 +42,7 @@ func StartSshServer() {
2 Access: syncmap.New[string, []string](),
3 }
4
5- sshAuth := shared.NewSshAuthHandler(dbh, logger, cfg)
6+ sshAuth := shared.NewSshAuthHandler(dbh, logger)
7 s, err := wish.NewServer(
8 wish.WithAddress(fmt.Sprintf("%s:%s", host, port)),
9 wish.WithHostKeyPath("ssh_data/term_info_ed25519"),
+1,
-1
1@@ -86,7 +86,7 @@ func StartSshServer() {
2 }
3 handler := filehandlers.NewFileHandlerRouter(cfg, dbh, fileMap)
4
5- sshAuth := shared.NewSshAuthHandler(dbh, logger, cfg)
6+ sshAuth := shared.NewSshAuthHandler(dbh, logger)
7 s, err := wish.NewServer(
8 wish.WithAddress(fmt.Sprintf("%s:%s", host, port)),
9 wish.WithHostKeyPath("ssh_data/term_info_ed25519"),
1@@ -9,42 +9,24 @@ import (
2 )
3
4 type SshAuthHandler struct {
5- DBPool db.DB
6+ DB AuthFindUser
7 Logger *slog.Logger
8- Cfg *ConfigSite
9 }
10
11-func NewSshAuthHandler(dbpool db.DB, logger *slog.Logger, cfg *ConfigSite) *SshAuthHandler {
12- return &SshAuthHandler{
13- DBPool: dbpool,
14- Logger: logger,
15- Cfg: cfg,
16- }
17+type AuthFindUser interface {
18+ FindUserByPubkey(key string) (*db.User, error)
19 }
20
21-func FindPlusFF(dbpool db.DB, cfg *ConfigSite, userID string) *db.FeatureFlag {
22- ff, _ := dbpool.FindFeatureForUser(userID, "plus")
23- // we have free tiers so users might not have a feature flag
24- // in which case we set sane defaults
25- if ff == nil {
26- ff = db.NewFeatureFlag(
27- userID,
28- "plus",
29- cfg.MaxSize,
30- cfg.MaxAssetSize,
31- cfg.MaxSpecialFileSize,
32- )
33+func NewSshAuthHandler(dbh AuthFindUser, logger *slog.Logger) *SshAuthHandler {
34+ return &SshAuthHandler{
35+ DB: dbh,
36+ Logger: logger,
37 }
38- // this is jank
39- ff.Data.StorageMax = ff.FindStorageMax(cfg.MaxSize)
40- ff.Data.FileMax = ff.FindFileMax(cfg.MaxAssetSize)
41- ff.Data.SpecialFileMax = ff.FindSpecialFileMax(cfg.MaxSpecialFileSize)
42- return ff
43 }
44
45 func (r *SshAuthHandler) PubkeyAuthHandler(ctx ssh.Context, key ssh.PublicKey) bool {
46 pubkey := utils.KeyForKeyText(key)
47- user, err := r.DBPool.FindUserForKey(ctx.User(), pubkey)
48+ user, err := r.DB.FindUserByPubkey(pubkey)
49 if err != nil {
50 r.Logger.Error(
51 "could not find user for key",
52@@ -66,3 +48,23 @@ func (r *SshAuthHandler) PubkeyAuthHandler(ctx ssh.Context, key ssh.PublicKey) b
53 ctx.Permissions().Extensions["pubkey"] = pubkey
54 return true
55 }
56+
57+func FindPlusFF(dbpool db.DB, cfg *ConfigSite, userID string) *db.FeatureFlag {
58+ ff, _ := dbpool.FindFeatureForUser(userID, "plus")
59+ // we have free tiers so users might not have a feature flag
60+ // in which case we set sane defaults
61+ if ff == nil {
62+ ff = db.NewFeatureFlag(
63+ userID,
64+ "plus",
65+ cfg.MaxSize,
66+ cfg.MaxAssetSize,
67+ cfg.MaxSpecialFileSize,
68+ )
69+ }
70+ // this is jank
71+ ff.Data.StorageMax = ff.FindStorageMax(cfg.MaxSize)
72+ ff.Data.FileMax = ff.FindFileMax(cfg.MaxAssetSize)
73+ ff.Data.SpecialFileMax = ff.FindSpecialFileMax(cfg.MaxSpecialFileSize)
74+ return ff
75+}
+5,
-1
1@@ -158,8 +158,12 @@ func (m Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
2
3 case tea.KeyMsg:
4 switch msg.String() {
5- case "q", "esc":
6+ case "esc":
7 return m, pages.Navigate(pages.MenuPage)
8+ case "q":
9+ if !m.input.Focused() {
10+ return m, pages.Navigate(pages.MenuPage)
11+ }
12 case "tab":
13 if m.input.Focused() {
14 m.input.Blur()