- commit
- b229935
- parent
- abd75f3
- author
- Eric Bower
- date
- 2026-03-28 20:53:04 -0400 EDT
refactor: merge pboj and shared/storage
19 files changed,
+557,
-651
+2,
-2
1@@ -13,8 +13,8 @@ import (
2
3 pgsdb "github.com/picosh/pico/pkg/apps/pgs/db"
4 "github.com/picosh/pico/pkg/db"
5- sst "github.com/picosh/pico/pkg/pobj/storage"
6 "github.com/picosh/pico/pkg/shared"
7+ "github.com/picosh/pico/pkg/shared/storage"
8 )
9
10 func NewTabWriter(out io.Writer) *tabwriter.Writer {
11@@ -48,7 +48,7 @@ type Cmd struct {
12 User *db.User
13 Session shared.CmdSession
14 Log *slog.Logger
15- Store sst.ObjectStorage
16+ Store storage.ObjectStorage
17 Dbpool pgsdb.PgsDB
18 Write bool
19 Width int
+2,
-2
1@@ -9,7 +9,7 @@ import (
2 "sort"
3 "strings"
4
5- sst "github.com/picosh/pico/pkg/pobj/storage"
6+ "github.com/picosh/pico/pkg/shared/storage"
7 )
8
9 //go:embed html/*
10@@ -92,7 +92,7 @@ func toDisplayEntries(entries []os.FileInfo) []dirEntryDisplay {
11 return displayEntries
12 }
13
14-func shouldGenerateListing(st sst.ObjectStorage, bucket sst.Bucket, projectDir string, path string) bool {
15+func shouldGenerateListing(st storage.ObjectStorage, bucket storage.Bucket, projectDir string, path string) bool {
16 dirPath := projectDir + path
17 if path == "/" {
18 dirPath = projectDir + "/"
+3,
-3
1@@ -6,8 +6,8 @@ import (
2 "testing"
3 "time"
4
5- sst "github.com/picosh/pico/pkg/pobj/storage"
6 "github.com/picosh/pico/pkg/send/utils"
7+ "github.com/picosh/pico/pkg/shared/storage"
8 )
9
10 func TestGenerateDirectoryHTML(t *testing.T) {
11@@ -208,8 +208,8 @@ func TestShouldGenerateListing(t *testing.T) {
12
13 for _, fixture := range fixtures {
14 t.Run(fixture.Name, func(t *testing.T) {
15- st, _ := sst.NewStorageMemory(fixture.Storage)
16- bucket := sst.Bucket{Name: "testbucket", Path: "testbucket"}
17+ st, _ := storage.NewStorageMemory(fixture.Storage)
18+ bucket := storage.Bucket{Name: "testbucket", Path: "testbucket"}
19
20 result := shouldGenerateListing(st, bucket, "project", fixture.Path)
21
+6,
-7
1@@ -17,11 +17,10 @@ import (
2
3 pgsdb "github.com/picosh/pico/pkg/apps/pgs/db"
4 "github.com/picosh/pico/pkg/db"
5- "github.com/picosh/pico/pkg/pobj"
6- sst "github.com/picosh/pico/pkg/pobj/storage"
7 "github.com/picosh/pico/pkg/pssh"
8 sendutils "github.com/picosh/pico/pkg/send/utils"
9 "github.com/picosh/pico/pkg/shared"
10+ "github.com/picosh/pico/pkg/shared/storage"
11 ignore "github.com/sabhiram/go-gitignore"
12 )
13
14@@ -60,8 +59,8 @@ func setProject(s *pssh.SSHServerConnSession, project *db.Project) {
15 s.SetValue(ctxProjectKey{}, project)
16 }
17
18-func getBucket(s *pssh.SSHServerConnSession) (sst.Bucket, error) {
19- bucket := s.Context().Value(ctxBucketKey{}).(sst.Bucket)
20+func getBucket(s *pssh.SSHServerConnSession) (storage.Bucket, error) {
21+ bucket := s.Context().Value(ctxBucketKey{}).(storage.Bucket)
22 if bucket.Name == "" {
23 return bucket, fmt.Errorf("bucket not set on `ssh.Context()` for connection")
24 }
25@@ -92,7 +91,7 @@ func shouldIgnoreFile(fp, ignoreStr string) bool {
26 type FileData struct {
27 *sendutils.FileEntry
28 User *db.User
29- Bucket sst.Bucket
30+ Bucket storage.Bucket
31 Project *db.Project
32 DenyList string
33 }
34@@ -145,7 +144,7 @@ func (h *UploadAssetHandler) Read(s *pssh.SSHServerConnSession, entry *sendutils
35 fileInfo.FSize = info.Size
36 fileInfo.FModTime = info.LastModified
37
38- reader := pobj.NewAllReaderAt(contents)
39+ reader := storage.NewAllReaderAt(contents)
40
41 return fileInfo, reader, nil
42 }
43@@ -238,7 +237,7 @@ func (h *UploadAssetHandler) Validate(s *pssh.SSHServerConnSession) error {
44 return nil
45 }
46
47-func (h *UploadAssetHandler) findDenylist(bucket sst.Bucket, project *db.Project, logger *slog.Logger) (string, error) {
48+func (h *UploadAssetHandler) findDenylist(bucket storage.Bucket, project *db.Project, logger *slog.Logger) (string, error) {
49 fp, _, err := h.Cfg.Storage.GetObject(bucket, filepath.Join(project.ProjectDir, "_pgs_ignore"))
50 if err != nil {
51 return "", fmt.Errorf("_pgs_ignore not found")
+1,
-2
1@@ -24,7 +24,6 @@ import (
2 "github.com/gorilla/feeds"
3 "github.com/hashicorp/golang-lru/v2/expirable"
4 "github.com/picosh/pico/pkg/db"
5- sst "github.com/picosh/pico/pkg/pobj/storage"
6 "github.com/picosh/pico/pkg/shared"
7 "github.com/picosh/pico/pkg/shared/router"
8 "github.com/picosh/pico/pkg/shared/storage"
9@@ -486,7 +485,7 @@ func (web *WebRouter) ServeAsset(fname string, opts *storage.ImgProcessOpts, has
10 "userId", user.ID,
11 )
12
13- var bucket sst.Bucket
14+ var bucket storage.Bucket
15 bucket, err = web.Cfg.Storage.GetBucket(shared.GetAssetBucketName(user.ID))
16 project, perr := web.Cfg.DB.FindProjectByName(user.ID, props.ProjectName)
17 if perr != nil {
+2,
-3
1@@ -14,7 +14,6 @@ import (
2 "net/http/httputil"
3 _ "net/http/pprof"
4
5- sst "github.com/picosh/pico/pkg/pobj/storage"
6 "github.com/picosh/pico/pkg/shared/storage"
7 )
8
9@@ -27,7 +26,7 @@ type ApiAssetHandler struct {
10 Subdomain string
11 ProjectDir string
12 Filepath string
13- Bucket sst.Bucket
14+ Bucket storage.Bucket
15 ImgProcessOpts *storage.ImgProcessOpts
16 ProjectID string
17 HasPicoPlus bool
18@@ -87,7 +86,7 @@ func (h *ApiAssetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
19
20 var contents io.ReadCloser
21 assetFilepath := ""
22- var info *sst.ObjectInfo
23+ var info *storage.ObjectInfo
24 status := http.StatusOK
25 attempts := []string{}
26 for _, fp := range routes {
+2,
-3
1@@ -11,7 +11,6 @@ import (
2 "time"
3
4 pgsdb "github.com/picosh/pico/pkg/apps/pgs/db"
5- sst "github.com/picosh/pico/pkg/pobj/storage"
6 "github.com/picosh/pico/pkg/shared"
7 "github.com/picosh/pico/pkg/shared/storage"
8 )
9@@ -481,10 +480,10 @@ type ImageStorageMemory struct {
10 Fpath string
11 }
12
13-func (s *ImageStorageMemory) ServeObject(r *http.Request, bucket sst.Bucket, fpath string, opts *storage.ImgProcessOpts) (io.ReadCloser, *sst.ObjectInfo, error) {
14+func (s *ImageStorageMemory) ServeObject(r *http.Request, bucket storage.Bucket, fpath string, opts *storage.ImgProcessOpts) (io.ReadCloser, *storage.ObjectInfo, error) {
15 s.Opts = opts
16 s.Fpath = fpath
17- info := sst.ObjectInfo{
18+ info := storage.ObjectInfo{
19 Metadata: make(http.Header),
20 }
21 info.Metadata.Set("content-type", "image/jpeg")
+2,
-4
1@@ -13,8 +13,6 @@ import (
2
3 exifremove "github.com/neurosnap/go-exif-remove"
4 "github.com/picosh/pico/pkg/db"
5- "github.com/picosh/pico/pkg/pobj"
6- sst "github.com/picosh/pico/pkg/pobj/storage"
7 "github.com/picosh/pico/pkg/pssh"
8 sendutils "github.com/picosh/pico/pkg/send/utils"
9 "github.com/picosh/pico/pkg/shared"
10@@ -30,7 +28,7 @@ type PostMetaData struct {
11 Filename string
12 User *db.User
13 FeatureFlag *db.FeatureFlag
14- Bucket sst.Bucket
15+ Bucket storage.Bucket
16 }
17
18 type UploadImgHandler struct {
19@@ -125,7 +123,7 @@ func (h *UploadImgHandler) Read(s *pssh.SSHServerConnSession, entry *sendutils.F
20 if err != nil {
21 return nil, nil, err
22 }
23- reader := pobj.NewAllReaderAt(contents)
24+ reader := storage.NewAllReaderAt(contents)
25
26 fileInfo := &sendutils.VirtualFile{
27 FName: cleanFilename,
+0,
-322
1@@ -1,322 +0,0 @@
2-package storage
3-
4-import (
5- "crypto/md5"
6- "encoding/hex"
7- "fmt"
8- "io"
9- "io/fs"
10- "log/slog"
11- "net/http"
12- "os"
13- "path"
14- "path/filepath"
15- "strings"
16- "time"
17-
18- "github.com/google/renameio/v2"
19- "github.com/picosh/pico/pkg/send/utils"
20- "github.com/picosh/pico/pkg/shared/mime"
21-)
22-
23-var KB = 1000
24-var MB = KB * 1000
25-
26-// https://stackoverflow.com/a/32482941
27-func dirSize(path string) (int64, error) {
28- var size int64
29- err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
30- if err != nil {
31- return err
32- }
33- if !info.IsDir() {
34- size += info.Size()
35- }
36- return err
37- })
38-
39- return size, err
40-}
41-
42-type StorageFS struct {
43- Dir string
44- Logger *slog.Logger
45-}
46-
47-var _ ObjectStorage = &StorageFS{}
48-var _ ObjectStorage = (*StorageFS)(nil)
49-
50-func NewStorageFS(logger *slog.Logger, dir string) (*StorageFS, error) {
51- return &StorageFS{Logger: logger, Dir: dir}, nil
52-}
53-
54-func (s *StorageFS) GetBucket(name string) (Bucket, error) {
55- dirPath := filepath.Join(s.Dir, name)
56- bucket := Bucket{
57- Name: name,
58- Path: dirPath,
59- }
60- // s.Logger.Info("get bucket", "dir", dirPath)
61-
62- info, err := os.Stat(dirPath)
63- if os.IsNotExist(err) {
64- return bucket, fmt.Errorf("directory does not exist: %v %w", dirPath, err)
65- }
66-
67- if err != nil {
68- return bucket, fmt.Errorf("directory error: %v %w", dirPath, err)
69-
70- }
71-
72- if !info.IsDir() {
73- return bucket, fmt.Errorf("directory is a file, not a directory: %#v", dirPath)
74- }
75-
76- return bucket, nil
77-}
78-
79-func (s *StorageFS) UpsertBucket(name string) (Bucket, error) {
80- s.Logger.Info("upsert bucket", "name", name)
81- bucket, err := s.GetBucket(name)
82- if err == nil {
83- return bucket, nil
84- }
85-
86- dir := filepath.Join(s.Dir, name)
87- s.Logger.Info("bucket not found, creating", "dir", dir, "err", err)
88- err = os.MkdirAll(dir, os.ModePerm)
89- if err != nil {
90- return bucket, err
91- }
92-
93- return bucket, nil
94-}
95-
96-func (s *StorageFS) GetBucketQuota(bucket Bucket) (uint64, error) {
97- dsize, err := dirSize(bucket.Path)
98- return uint64(dsize), err
99-}
100-
101-// DeleteBucket will delete all contents regardless if files exist inside of it.
102-func (s *StorageFS) DeleteBucket(bucket Bucket) error {
103- return os.RemoveAll(bucket.Path)
104-}
105-
106-func (s *StorageFS) GetObject(bucket Bucket, fpath string) (utils.ReadAndReaderAtCloser, *ObjectInfo, error) {
107- objInfo := &ObjectInfo{
108- Size: 0,
109- LastModified: time.Time{},
110- Metadata: make(http.Header),
111- ETag: "",
112- }
113-
114- dat, err := os.Open(filepath.Join(bucket.Path, fpath))
115- if err != nil {
116- return nil, objInfo, err
117- }
118-
119- info, err := dat.Stat()
120- if err != nil {
121- _ = dat.Close()
122- return nil, objInfo, err
123- }
124-
125- etag := ""
126- // only generate etag if file is less than 10MB
127- if info.Size() <= int64(10*MB) {
128- // calculate etag
129- h := md5.New()
130- if _, err := io.Copy(h, dat); err != nil {
131- _ = dat.Close()
132- return nil, objInfo, err
133- }
134- md5Sum := h.Sum(nil)
135- etag = hex.EncodeToString(md5Sum)
136-
137- // reset os.File reader
138- _, err = dat.Seek(0, io.SeekStart)
139- if err != nil {
140- _ = dat.Close()
141- return nil, objInfo, err
142- }
143- }
144-
145- objInfo.ETag = etag
146- objInfo.Size = info.Size()
147- objInfo.LastModified = info.ModTime()
148- objInfo.Metadata.Set("content-type", mime.GetMimeType(fpath))
149- return dat, objInfo, nil
150-}
151-
152-func (s *StorageFS) PutObject(bucket Bucket, fpath string, contents io.Reader, entry *utils.FileEntry) (string, int64, error) {
153- loc := filepath.Join(bucket.Path, fpath)
154- err := os.MkdirAll(filepath.Dir(loc), os.ModePerm)
155- if err != nil {
156- return "", 0, err
157- }
158- out, err := renameio.NewPendingFile(loc, renameio.WithPermissions(os.ModePerm))
159- if err != nil {
160- return "", 0, err
161- }
162-
163- size, err := io.Copy(out, contents)
164- if err != nil {
165- return "", 0, err
166- }
167-
168- if err := out.CloseAtomicallyReplace(); err != nil {
169- return "", 0, err
170- }
171-
172- if entry.Mtime > 0 {
173- uTime := time.Unix(entry.Mtime, 0)
174- _ = os.Chtimes(loc, uTime, uTime)
175- }
176-
177- return loc, size, nil
178-}
179-
180-func (s *StorageFS) DeleteObject(bucket Bucket, fpath string) error {
181- loc := filepath.Join(bucket.Path, fpath)
182- err := os.Remove(loc)
183- if err != nil {
184- if os.IsNotExist(err) {
185- return nil
186- }
187- return err
188- }
189-
190- // traverse up the folder tree and remove all empty folders
191- dir := filepath.Dir(loc)
192- for dir != "" {
193- f, err := os.Open(dir)
194- if err != nil {
195- s.Logger.Info("open dir", "dir", dir, "err", err)
196- break
197- }
198- defer func() {
199- _ = f.Close()
200- }()
201-
202- // https://stackoverflow.com/a/30708914
203- contents, err := f.Readdirnames(-1)
204- if err != nil {
205- s.Logger.Info("read dir", "dir", dir, "err", err)
206- break
207- }
208- if len(contents) > 0 {
209- break
210- }
211-
212- err = os.Remove(dir)
213- if err != nil {
214- s.Logger.Info("remove dir", "dir", dir, "err", err)
215- break
216- }
217- fp := strings.Split(dir, "/")
218- prefix := ""
219- if strings.HasPrefix(loc, "/") {
220- prefix = "/"
221- }
222- dir = prefix + filepath.Join(fp[:len(fp)-1]...)
223- }
224-
225- return nil
226-}
227-
228-func (s *StorageFS) ListBuckets() ([]string, error) {
229- entries, err := os.ReadDir(s.Dir)
230- if err != nil {
231- return []string{}, err
232- }
233-
234- buckets := []string{}
235- for _, e := range entries {
236- if !e.IsDir() {
237- continue
238- }
239- buckets = append(buckets, e.Name())
240- }
241- return buckets, nil
242-}
243-
244-func (s *StorageFS) ListObjects(bucket Bucket, dir string, recursive bool) ([]os.FileInfo, error) {
245- fileList := []os.FileInfo{}
246-
247- fpath := path.Join(bucket.Path, dir)
248-
249- info, err := os.Stat(fpath)
250- if err != nil {
251- if os.IsNotExist(err) {
252- return fileList, nil
253- }
254- return fileList, err
255- }
256-
257- if info.IsDir() && !strings.HasSuffix(dir, "/") {
258- fileList = append(fileList, &utils.VirtualFile{
259- FName: "",
260- FIsDir: info.IsDir(),
261- FSize: info.Size(),
262- FModTime: info.ModTime(),
263- })
264-
265- return fileList, err
266- }
267-
268- var files []utils.VirtualFile
269-
270- if recursive {
271- err = filepath.WalkDir(fpath, func(s string, d fs.DirEntry, err error) error {
272- if err != nil {
273- return err
274- }
275- info, err := d.Info()
276- if err != nil {
277- return nil
278- }
279- fname := strings.TrimPrefix(s, fpath)
280- if fname == "" {
281- return nil
282- }
283- // rsync does not expect prefixed `/` so without this `rsync --delete` is borked
284- fname = strings.TrimPrefix(fname, "/")
285- files = append(files, utils.VirtualFile{
286- FName: fname,
287- FIsDir: info.IsDir(),
288- FSize: info.Size(),
289- FModTime: info.ModTime(),
290- })
291- return nil
292- })
293- if err != nil {
294- fileList = append(fileList, info)
295- return fileList, nil
296- }
297- } else {
298- fls, err := os.ReadDir(fpath)
299- if err != nil {
300- fileList = append(fileList, info)
301- return fileList, nil
302- }
303- for _, d := range fls {
304- info, err := d.Info()
305- if err != nil {
306- continue
307- }
308- fp := info.Name()
309- files = append(files, utils.VirtualFile{
310- FName: fp,
311- FIsDir: info.IsDir(),
312- FSize: info.Size(),
313- FModTime: info.ModTime(),
314- })
315- }
316- }
317-
318- for _, f := range files {
319- fileList = append(fileList, &f)
320- }
321-
322- return fileList, err
323-}
+0,
-207
1@@ -1,207 +0,0 @@
2-package storage
3-
4-import (
5- "fmt"
6- "io"
7- "os"
8- "path/filepath"
9- "strings"
10- "sync"
11- "time"
12-
13- "github.com/picosh/pico/pkg/send/utils"
14-)
15-
16-type StorageMemory struct {
17- storage map[string]map[string]string
18- mu sync.RWMutex
19-}
20-
21-var _ ObjectStorage = &StorageMemory{}
22-var _ ObjectStorage = (*StorageMemory)(nil)
23-
24-func NewStorageMemory(st map[string]map[string]string) (*StorageMemory, error) {
25- return &StorageMemory{
26- storage: st,
27- }, nil
28-}
29-
30-func (s *StorageMemory) GetBucket(name string) (Bucket, error) {
31- s.mu.RLock()
32- defer s.mu.RUnlock()
33-
34- bucket := Bucket{
35- Name: name,
36- Path: name,
37- }
38-
39- _, ok := s.storage[name]
40- if !ok {
41- return bucket, fmt.Errorf("bucket does not exist")
42- }
43-
44- return bucket, nil
45-}
46-
47-func (s *StorageMemory) UpsertBucket(name string) (Bucket, error) {
48- bucket, err := s.GetBucket(name)
49- if err == nil {
50- return bucket, nil
51- }
52-
53- s.mu.Lock()
54- defer s.mu.Unlock()
55-
56- s.storage[name] = map[string]string{}
57- return bucket, nil
58-}
59-
60-func (s *StorageMemory) GetBucketQuota(bucket Bucket) (uint64, error) {
61- s.mu.RLock()
62- defer s.mu.RUnlock()
63-
64- objects := s.storage[bucket.Path]
65- size := 0
66- for _, val := range objects {
67- size += len([]byte(val))
68- }
69- return uint64(size), nil
70-}
71-
72-func (s *StorageMemory) DeleteBucket(bucket Bucket) error {
73- s.mu.Lock()
74- defer s.mu.Unlock()
75-
76- delete(s.storage, bucket.Path)
77- return nil
78-}
79-
80-func (s *StorageMemory) GetObject(bucket Bucket, fpath string) (utils.ReadAndReaderAtCloser, *ObjectInfo, error) {
81- s.mu.RLock()
82- defer s.mu.RUnlock()
83-
84- if !strings.HasPrefix(fpath, "/") {
85- fpath = "/" + fpath
86- }
87-
88- objInfo := &ObjectInfo{
89- LastModified: time.Time{},
90- Metadata: nil,
91- }
92-
93- dat, ok := s.storage[bucket.Path][fpath]
94- if !ok {
95- return nil, objInfo, fmt.Errorf("object does not exist: %s", fpath)
96- }
97-
98- objInfo.Size = int64(len([]byte(dat)))
99- reader := utils.NopReadAndReaderAtCloser(strings.NewReader(dat))
100- return reader, objInfo, nil
101-}
102-
103-func (s *StorageMemory) PutObject(bucket Bucket, fpath string, contents io.Reader, entry *utils.FileEntry) (string, int64, error) {
104- s.mu.Lock()
105- defer s.mu.Unlock()
106-
107- d, err := io.ReadAll(contents)
108- if err != nil {
109- return "", 0, err
110- }
111-
112- s.storage[bucket.Path][fpath] = string(d)
113- return fmt.Sprintf("%s%s", bucket.Path, fpath), int64(len(d)), nil
114-}
115-
116-func (s *StorageMemory) DeleteObject(bucket Bucket, fpath string) error {
117- s.mu.Lock()
118- defer s.mu.Unlock()
119-
120- delete(s.storage[bucket.Path], fpath)
121- return nil
122-}
123-
124-func (s *StorageMemory) ListBuckets() ([]string, error) {
125- s.mu.RLock()
126- defer s.mu.RUnlock()
127-
128- buckets := []string{}
129- for key := range s.storage {
130- buckets = append(buckets, key)
131- }
132- return buckets, nil
133-}
134-
135-func (s *StorageMemory) ListObjects(bucket Bucket, dir string, recursive bool) ([]os.FileInfo, error) {
136- s.mu.RLock()
137- defer s.mu.RUnlock()
138-
139- var fileList []os.FileInfo
140-
141- resolved := dir
142-
143- if !strings.HasPrefix(resolved, "/") {
144- resolved = "/" + resolved
145- }
146-
147- objects := s.storage[bucket.Path]
148- // dir is actually an object
149- oval, ok := objects[resolved]
150- if ok {
151- fileList = append(fileList, &utils.VirtualFile{
152- FName: filepath.Base(resolved),
153- FIsDir: false,
154- FSize: int64(len([]byte(oval))),
155- FModTime: time.Time{},
156- })
157- return fileList, nil
158- }
159-
160- for key, val := range objects {
161- if !strings.HasPrefix(key, resolved) {
162- continue
163- }
164-
165- rep := strings.Replace(key, resolved, "", 1)
166- fdir := filepath.Dir(rep)
167- fname := filepath.Base(rep)
168- paths := strings.Split(fdir, "/")
169-
170- if fdir == "/" {
171- ffname := filepath.Base(resolved)
172- fileList = append(fileList, &utils.VirtualFile{
173- FName: ffname,
174- FIsDir: true,
175- })
176- }
177-
178- for _, p := range paths {
179- if p == "" || p == "/" || p == "." {
180- continue
181- }
182- fileList = append(fileList, &utils.VirtualFile{
183- FName: p,
184- FIsDir: true,
185- })
186- }
187-
188- trimRes := strings.TrimSuffix(resolved, "/")
189- dirKey := filepath.Dir(key)
190- if recursive {
191- fileList = append(fileList, &utils.VirtualFile{
192- FName: fname,
193- FIsDir: false,
194- FSize: int64(len([]byte(val))),
195- FModTime: time.Time{},
196- })
197- } else if resolved == dirKey || trimRes == dirKey {
198- fileList = append(fileList, &utils.VirtualFile{
199- FName: fname,
200- FIsDir: false,
201- FSize: int64(len([]byte(val))),
202- FModTime: time.Time{},
203- })
204- }
205- }
206-
207- return fileList, nil
208-}
+0,
-36
1@@ -1,36 +0,0 @@
2-package storage
3-
4-import (
5- "io"
6- "net/http"
7- "os"
8- "time"
9-
10- "github.com/picosh/pico/pkg/send/utils"
11-)
12-
13-type Bucket struct {
14- Name string
15- Path string
16- Root string
17-}
18-
19-type ObjectStorage interface {
20- GetBucket(name string) (Bucket, error)
21- GetBucketQuota(bucket Bucket) (uint64, error)
22- UpsertBucket(name string) (Bucket, error)
23- ListBuckets() ([]string, error)
24- DeleteBucket(bucket Bucket) error
25-
26- GetObject(bucket Bucket, fpath string) (utils.ReadAndReaderAtCloser, *ObjectInfo, error)
27- PutObject(bucket Bucket, fpath string, contents io.Reader, entry *utils.FileEntry) (string, int64, error)
28- DeleteObject(bucket Bucket, fpath string) error
29- ListObjects(bucket Bucket, dir string, recursive bool) ([]os.FileInfo, error)
30-}
31-
32-type ObjectInfo struct {
33- Size int64
34- LastModified time.Time
35- ETag string
36- Metadata http.Header
37-}
+0,
-30
1@@ -1,30 +0,0 @@
2-package pobj
3-
4-import (
5- "log/slog"
6- "os"
7-
8- "github.com/picosh/pico/pkg/pobj/storage"
9-)
10-
11-func GetEnv(key string, defaultVal string) string {
12- if value, exists := os.LookupEnv(key); exists {
13- return value
14- }
15- return defaultVal
16-}
17-
18-func EnvDriverDetector(logger *slog.Logger) (storage.ObjectStorage, error) {
19- driver := GetEnv("OBJECT_DRIVER", "fs")
20- logger.Info("driver detected", "driver", driver)
21-
22- switch driver {
23- case "memory":
24- return storage.NewStorageMemory(map[string]map[string]string{})
25- }
26-
27- // implied driver == "fs"
28- storageDir := GetEnv("OBJECT_URL", "./.storage")
29- logger.Info("object config detected", "dir", storageDir)
30- return storage.NewStorageFS(logger, storageDir)
31-}
1@@ -1,4 +1,4 @@
2-package pobj
3+package storage
4
5 import (
6 "fmt"
1@@ -1,34 +1,329 @@
2 package storage
3
4 import (
5+ "crypto/md5"
6+ "encoding/hex"
7 "fmt"
8 "io"
9+ "io/fs"
10 "log/slog"
11 "net/http"
12 "os"
13+ "path"
14 "path/filepath"
15 "strings"
16+ "time"
17
18- sst "github.com/picosh/pico/pkg/pobj/storage"
19+ "github.com/google/renameio/v2"
20+ "github.com/picosh/pico/pkg/send/utils"
21 "github.com/picosh/pico/pkg/shared/mime"
22 )
23
24+var KB = 1000
25+var MB = KB * 1000
26+
27+// https://stackoverflow.com/a/32482941
28+func dirSize(path string) (int64, error) {
29+ var size int64
30+ err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
31+ if err != nil {
32+ return err
33+ }
34+ if !info.IsDir() {
35+ size += info.Size()
36+ }
37+ return err
38+ })
39+
40+ return size, err
41+}
42+
43 type StorageFS struct {
44- *sst.StorageFS
45+ Dir string
46 Logger *slog.Logger
47 }
48
49+var _ ObjectStorage = &StorageFS{}
50+var _ ObjectStorage = (*StorageFS)(nil)
51+
52 func NewStorageFS(logger *slog.Logger, dir string) (*StorageFS, error) {
53- st, err := sst.NewStorageFS(logger, dir)
54+ return &StorageFS{Logger: logger, Dir: dir}, nil
55+}
56+
57+func (s *StorageFS) GetBucket(name string) (Bucket, error) {
58+ dirPath := filepath.Join(s.Dir, name)
59+ bucket := Bucket{
60+ Name: name,
61+ Path: dirPath,
62+ }
63+ // s.Logger.Info("get bucket", "dir", dirPath)
64+
65+ info, err := os.Stat(dirPath)
66+ if os.IsNotExist(err) {
67+ return bucket, fmt.Errorf("directory does not exist: %v %w", dirPath, err)
68+ }
69+
70+ if err != nil {
71+ return bucket, fmt.Errorf("directory error: %v %w", dirPath, err)
72+
73+ }
74+
75+ if !info.IsDir() {
76+ return bucket, fmt.Errorf("directory is a file, not a directory: %#v", dirPath)
77+ }
78+
79+ return bucket, nil
80+}
81+
82+func (s *StorageFS) UpsertBucket(name string) (Bucket, error) {
83+ s.Logger.Info("upsert bucket", "name", name)
84+ bucket, err := s.GetBucket(name)
85+ if err == nil {
86+ return bucket, nil
87+ }
88+
89+ dir := filepath.Join(s.Dir, name)
90+ s.Logger.Info("bucket not found, creating", "dir", dir, "err", err)
91+ err = os.MkdirAll(dir, os.ModePerm)
92+ if err != nil {
93+ return bucket, err
94+ }
95+
96+ return bucket, nil
97+}
98+
99+func (s *StorageFS) GetBucketQuota(bucket Bucket) (uint64, error) {
100+ dsize, err := dirSize(bucket.Path)
101+ return uint64(dsize), err
102+}
103+
104+// DeleteBucket will delete all contents regardless if files exist inside of it.
105+func (s *StorageFS) DeleteBucket(bucket Bucket) error {
106+ return os.RemoveAll(bucket.Path)
107+}
108+
109+func (s *StorageFS) GetObject(bucket Bucket, fpath string) (utils.ReadAndReaderAtCloser, *ObjectInfo, error) {
110+ objInfo := &ObjectInfo{
111+ Size: 0,
112+ LastModified: time.Time{},
113+ Metadata: make(http.Header),
114+ ETag: "",
115+ }
116+
117+ dat, err := os.Open(filepath.Join(bucket.Path, fpath))
118+ if err != nil {
119+ return nil, objInfo, err
120+ }
121+
122+ info, err := dat.Stat()
123 if err != nil {
124- return nil, err
125+ _ = dat.Close()
126+ return nil, objInfo, err
127 }
128- return &StorageFS{st, logger}, nil
129+
130+ etag := ""
131+ // only generate etag if file is less than 10MB
132+ if info.Size() <= int64(10*MB) {
133+ // calculate etag
134+ h := md5.New()
135+ if _, err := io.Copy(h, dat); err != nil {
136+ _ = dat.Close()
137+ return nil, objInfo, err
138+ }
139+ md5Sum := h.Sum(nil)
140+ etag = hex.EncodeToString(md5Sum)
141+
142+ // reset os.File reader
143+ _, err = dat.Seek(0, io.SeekStart)
144+ if err != nil {
145+ _ = dat.Close()
146+ return nil, objInfo, err
147+ }
148+ }
149+
150+ objInfo.ETag = etag
151+ objInfo.Size = info.Size()
152+ objInfo.LastModified = info.ModTime()
153+ objInfo.Metadata.Set("content-type", mime.GetMimeType(fpath))
154+ return dat, objInfo, nil
155+}
156+
157+func (s *StorageFS) PutObject(bucket Bucket, fpath string, contents io.Reader, entry *utils.FileEntry) (string, int64, error) {
158+ loc := filepath.Join(bucket.Path, fpath)
159+ err := os.MkdirAll(filepath.Dir(loc), os.ModePerm)
160+ if err != nil {
161+ return "", 0, err
162+ }
163+ out, err := renameio.NewPendingFile(loc, renameio.WithPermissions(os.ModePerm))
164+ if err != nil {
165+ return "", 0, err
166+ }
167+
168+ size, err := io.Copy(out, contents)
169+ if err != nil {
170+ return "", 0, err
171+ }
172+
173+ if err := out.CloseAtomicallyReplace(); err != nil {
174+ return "", 0, err
175+ }
176+
177+ if entry.Mtime > 0 {
178+ uTime := time.Unix(entry.Mtime, 0)
179+ _ = os.Chtimes(loc, uTime, uTime)
180+ }
181+
182+ return loc, size, nil
183+}
184+
185+func (s *StorageFS) DeleteObject(bucket Bucket, fpath string) error {
186+ loc := filepath.Join(bucket.Path, fpath)
187+ err := os.Remove(loc)
188+ if err != nil {
189+ if os.IsNotExist(err) {
190+ return nil
191+ }
192+ return err
193+ }
194+
195+ // traverse up the folder tree and remove all empty folders
196+ dir := filepath.Dir(loc)
197+ for dir != "" {
198+ f, err := os.Open(dir)
199+ if err != nil {
200+ s.Logger.Info("open dir", "dir", dir, "err", err)
201+ break
202+ }
203+ defer func() {
204+ _ = f.Close()
205+ }()
206+
207+ // https://stackoverflow.com/a/30708914
208+ contents, err := f.Readdirnames(-1)
209+ if err != nil {
210+ s.Logger.Info("read dir", "dir", dir, "err", err)
211+ break
212+ }
213+ if len(contents) > 0 {
214+ break
215+ }
216+
217+ err = os.Remove(dir)
218+ if err != nil {
219+ s.Logger.Info("remove dir", "dir", dir, "err", err)
220+ break
221+ }
222+ fp := strings.Split(dir, "/")
223+ prefix := ""
224+ if strings.HasPrefix(loc, "/") {
225+ prefix = "/"
226+ }
227+ dir = prefix + filepath.Join(fp[:len(fp)-1]...)
228+ }
229+
230+ return nil
231+}
232+
233+func (s *StorageFS) ListBuckets() ([]string, error) {
234+ entries, err := os.ReadDir(s.Dir)
235+ if err != nil {
236+ return []string{}, err
237+ }
238+
239+ buckets := []string{}
240+ for _, e := range entries {
241+ if !e.IsDir() {
242+ continue
243+ }
244+ buckets = append(buckets, e.Name())
245+ }
246+ return buckets, nil
247+}
248+
249+func (s *StorageFS) ListObjects(bucket Bucket, dir string, recursive bool) ([]os.FileInfo, error) {
250+ fileList := []os.FileInfo{}
251+
252+ fpath := path.Join(bucket.Path, dir)
253+
254+ info, err := os.Stat(fpath)
255+ if err != nil {
256+ if os.IsNotExist(err) {
257+ return fileList, nil
258+ }
259+ return fileList, err
260+ }
261+
262+ if info.IsDir() && !strings.HasSuffix(dir, "/") {
263+ fileList = append(fileList, &utils.VirtualFile{
264+ FName: "",
265+ FIsDir: info.IsDir(),
266+ FSize: info.Size(),
267+ FModTime: info.ModTime(),
268+ })
269+
270+ return fileList, err
271+ }
272+
273+ var files []utils.VirtualFile
274+
275+ if recursive {
276+ err = filepath.WalkDir(fpath, func(s string, d fs.DirEntry, err error) error {
277+ if err != nil {
278+ return err
279+ }
280+ info, err := d.Info()
281+ if err != nil {
282+ return nil
283+ }
284+ fname := strings.TrimPrefix(s, fpath)
285+ if fname == "" {
286+ return nil
287+ }
288+ // rsync does not expect prefixed `/` so without this `rsync --delete` is borked
289+ fname = strings.TrimPrefix(fname, "/")
290+ files = append(files, utils.VirtualFile{
291+ FName: fname,
292+ FIsDir: info.IsDir(),
293+ FSize: info.Size(),
294+ FModTime: info.ModTime(),
295+ })
296+ return nil
297+ })
298+ if err != nil {
299+ fileList = append(fileList, info)
300+ return fileList, nil
301+ }
302+ } else {
303+ fls, err := os.ReadDir(fpath)
304+ if err != nil {
305+ fileList = append(fileList, info)
306+ return fileList, nil
307+ }
308+ for _, d := range fls {
309+ info, err := d.Info()
310+ if err != nil {
311+ continue
312+ }
313+ fp := info.Name()
314+ files = append(files, utils.VirtualFile{
315+ FName: fp,
316+ FIsDir: info.IsDir(),
317+ FSize: info.Size(),
318+ FModTime: info.ModTime(),
319+ })
320+ }
321+ }
322+
323+ for _, f := range files {
324+ fileList = append(fileList, &f)
325+ }
326+
327+ return fileList, err
328 }
329
330-func (s *StorageFS) ServeObject(r *http.Request, bucket sst.Bucket, fpath string, opts *ImgProcessOpts) (io.ReadCloser, *sst.ObjectInfo, error) {
331+func (s *StorageFS) ServeObject(r *http.Request, bucket Bucket, fpath string, opts *ImgProcessOpts) (io.ReadCloser, *ObjectInfo, error) {
332 var rc io.ReadCloser
333- info := &sst.ObjectInfo{}
334+ info := &ObjectInfo{}
335 var err error
336 mimeType := mime.GetMimeType(fpath)
337 if !strings.HasPrefix(mimeType, "image/") || opts == nil || os.Getenv("IMGPROXY_URL") == "" {
1@@ -1,4 +1,4 @@
2-package pobj
3+package storage
4
5 import (
6 "bytes"
7@@ -10,15 +10,14 @@ import (
8 "path/filepath"
9 "time"
10
11- "github.com/picosh/pico/pkg/pobj/storage"
12 "github.com/picosh/pico/pkg/pssh"
13 "github.com/picosh/pico/pkg/send/utils"
14 )
15
16 type ctxBucketKey struct{}
17
18-func getBucket(ctx *pssh.SSHServerConnSession) (storage.Bucket, error) {
19- bucket, ok := ctx.Value(ctxBucketKey{}).(storage.Bucket)
20+func getBucket(ctx *pssh.SSHServerConnSession) (Bucket, error) {
21+ bucket, ok := ctx.Value(ctxBucketKey{}).(Bucket)
22 if !ok {
23 return bucket, fmt.Errorf("bucket not set on `ssh.Context()` for connection")
24 }
25@@ -27,7 +26,7 @@ func getBucket(ctx *pssh.SSHServerConnSession) (storage.Bucket, error) {
26 }
27 return bucket, nil
28 }
29-func setBucket(ctx *pssh.SSHServerConnSession, bucket storage.Bucket) {
30+func setBucket(ctx *pssh.SSHServerConnSession, bucket Bucket) {
31 ctx.SetValue(ctxBucketKey{}, bucket)
32 }
33
34@@ -35,12 +34,12 @@ type FileData struct {
35 *utils.FileEntry
36 Text []byte
37 User string
38- Bucket storage.Bucket
39+ Bucket Bucket
40 }
41
42 type Config struct {
43 Logger *slog.Logger
44- Storage storage.ObjectStorage
45+ Storage ObjectStorage
46 AssetNames AssetNames
47 }
48
1@@ -1,27 +1,214 @@
2 package storage
3
4 import (
5+ "fmt"
6 "io"
7 "net/http"
8+ "os"
9+ "path/filepath"
10+ "strings"
11+ "sync"
12 "time"
13
14- sst "github.com/picosh/pico/pkg/pobj/storage"
15+ "github.com/picosh/pico/pkg/send/utils"
16 "github.com/picosh/pico/pkg/shared/mime"
17 )
18
19 type StorageMemory struct {
20- *sst.StorageMemory
21+ storage map[string]map[string]string
22+ mu sync.RWMutex
23 }
24
25-func NewStorageMemory(sto map[string]map[string]string) (*StorageMemory, error) {
26- st, err := sst.NewStorageMemory(sto)
27+var _ ObjectStorage = &StorageMemory{}
28+var _ ObjectStorage = (*StorageMemory)(nil)
29+
30+func NewStorageMemory(st map[string]map[string]string) (*StorageMemory, error) {
31+ return &StorageMemory{
32+ storage: st,
33+ }, nil
34+}
35+
36+func (s *StorageMemory) GetBucket(name string) (Bucket, error) {
37+ s.mu.RLock()
38+ defer s.mu.RUnlock()
39+
40+ bucket := Bucket{
41+ Name: name,
42+ Path: name,
43+ }
44+
45+ _, ok := s.storage[name]
46+ if !ok {
47+ return bucket, fmt.Errorf("bucket does not exist")
48+ }
49+
50+ return bucket, nil
51+}
52+
53+func (s *StorageMemory) UpsertBucket(name string) (Bucket, error) {
54+ bucket, err := s.GetBucket(name)
55+ if err == nil {
56+ return bucket, nil
57+ }
58+
59+ s.mu.Lock()
60+ defer s.mu.Unlock()
61+
62+ s.storage[name] = map[string]string{}
63+ return bucket, nil
64+}
65+
66+func (s *StorageMemory) GetBucketQuota(bucket Bucket) (uint64, error) {
67+ s.mu.RLock()
68+ defer s.mu.RUnlock()
69+
70+ objects := s.storage[bucket.Path]
71+ size := 0
72+ for _, val := range objects {
73+ size += len([]byte(val))
74+ }
75+ return uint64(size), nil
76+}
77+
78+func (s *StorageMemory) DeleteBucket(bucket Bucket) error {
79+ s.mu.Lock()
80+ defer s.mu.Unlock()
81+
82+ delete(s.storage, bucket.Path)
83+ return nil
84+}
85+
86+func (s *StorageMemory) GetObject(bucket Bucket, fpath string) (utils.ReadAndReaderAtCloser, *ObjectInfo, error) {
87+ s.mu.RLock()
88+ defer s.mu.RUnlock()
89+
90+ if !strings.HasPrefix(fpath, "/") {
91+ fpath = "/" + fpath
92+ }
93+
94+ objInfo := &ObjectInfo{
95+ LastModified: time.Time{},
96+ Metadata: nil,
97+ }
98+
99+ dat, ok := s.storage[bucket.Path][fpath]
100+ if !ok {
101+ return nil, objInfo, fmt.Errorf("object does not exist: %s", fpath)
102+ }
103+
104+ objInfo.Size = int64(len([]byte(dat)))
105+ reader := utils.NopReadAndReaderAtCloser(strings.NewReader(dat))
106+ return reader, objInfo, nil
107+}
108+
109+func (s *StorageMemory) PutObject(bucket Bucket, fpath string, contents io.Reader, entry *utils.FileEntry) (string, int64, error) {
110+ s.mu.Lock()
111+ defer s.mu.Unlock()
112+
113+ d, err := io.ReadAll(contents)
114 if err != nil {
115- return nil, err
116+ return "", 0, err
117+ }
118+
119+ s.storage[bucket.Path][fpath] = string(d)
120+ return fmt.Sprintf("%s%s", bucket.Path, fpath), int64(len(d)), nil
121+}
122+
123+func (s *StorageMemory) DeleteObject(bucket Bucket, fpath string) error {
124+ s.mu.Lock()
125+ defer s.mu.Unlock()
126+
127+ delete(s.storage[bucket.Path], fpath)
128+ return nil
129+}
130+
131+func (s *StorageMemory) ListBuckets() ([]string, error) {
132+ s.mu.RLock()
133+ defer s.mu.RUnlock()
134+
135+ buckets := []string{}
136+ for key := range s.storage {
137+ buckets = append(buckets, key)
138 }
139- return &StorageMemory{st}, nil
140+ return buckets, nil
141+}
142+
143+func (s *StorageMemory) ListObjects(bucket Bucket, dir string, recursive bool) ([]os.FileInfo, error) {
144+ s.mu.RLock()
145+ defer s.mu.RUnlock()
146+
147+ var fileList []os.FileInfo
148+
149+ resolved := dir
150+
151+ if !strings.HasPrefix(resolved, "/") {
152+ resolved = "/" + resolved
153+ }
154+
155+ objects := s.storage[bucket.Path]
156+ // dir is actually an object
157+ oval, ok := objects[resolved]
158+ if ok {
159+ fileList = append(fileList, &utils.VirtualFile{
160+ FName: filepath.Base(resolved),
161+ FIsDir: false,
162+ FSize: int64(len([]byte(oval))),
163+ FModTime: time.Time{},
164+ })
165+ return fileList, nil
166+ }
167+
168+ for key, val := range objects {
169+ if !strings.HasPrefix(key, resolved) {
170+ continue
171+ }
172+
173+ rep := strings.Replace(key, resolved, "", 1)
174+ fdir := filepath.Dir(rep)
175+ fname := filepath.Base(rep)
176+ paths := strings.Split(fdir, "/")
177+
178+ if fdir == "/" {
179+ ffname := filepath.Base(resolved)
180+ fileList = append(fileList, &utils.VirtualFile{
181+ FName: ffname,
182+ FIsDir: true,
183+ })
184+ }
185+
186+ for _, p := range paths {
187+ if p == "" || p == "/" || p == "." {
188+ continue
189+ }
190+ fileList = append(fileList, &utils.VirtualFile{
191+ FName: p,
192+ FIsDir: true,
193+ })
194+ }
195+
196+ trimRes := strings.TrimSuffix(resolved, "/")
197+ dirKey := filepath.Dir(key)
198+ if recursive {
199+ fileList = append(fileList, &utils.VirtualFile{
200+ FName: fname,
201+ FIsDir: false,
202+ FSize: int64(len([]byte(val))),
203+ FModTime: time.Time{},
204+ })
205+ } else if resolved == dirKey || trimRes == dirKey {
206+ fileList = append(fileList, &utils.VirtualFile{
207+ FName: fname,
208+ FIsDir: false,
209+ FSize: int64(len([]byte(val))),
210+ FModTime: time.Time{},
211+ })
212+ }
213+ }
214+
215+ return fileList, nil
216 }
217
218-func (s *StorageMemory) ServeObject(r *http.Request, bucket sst.Bucket, fpath string, opts *ImgProcessOpts) (io.ReadCloser, *sst.ObjectInfo, error) {
219+func (s *StorageMemory) ServeObject(r *http.Request, bucket Bucket, fpath string, opts *ImgProcessOpts) (io.ReadCloser, *ObjectInfo, error) {
220 obj, info, err := s.GetObject(bucket, fpath)
221 if info.Metadata == nil {
222 info.Metadata = make(http.Header)
1@@ -13,8 +13,6 @@ import (
2 "strconv"
3 "strings"
4 "time"
5-
6- "github.com/picosh/pico/pkg/pobj/storage"
7 )
8
9 func UriToImgProcessOpts(uri string) (*ImgProcessOpts, error) {
10@@ -133,7 +131,7 @@ func (img *ImgProcessOpts) String() string {
11 return processOpts
12 }
13
14-func HandleProxy(r *http.Request, logger *slog.Logger, dataURL string, opts *ImgProcessOpts) (io.ReadCloser, *storage.ObjectInfo, error) {
15+func HandleProxy(r *http.Request, logger *slog.Logger, dataURL string, opts *ImgProcessOpts) (io.ReadCloser, *ObjectInfo, error) {
16 imgProxyURL := os.Getenv("IMGPROXY_URL")
17 imgProxySalt := os.Getenv("IMGPROXY_SALT")
18 imgProxyKey := os.Getenv("IMGPROXY_KEY")
19@@ -182,7 +180,7 @@ func HandleProxy(r *http.Request, logger *slog.Logger, dataURL string, opts *Img
20 if err != nil {
21 logger.Error("decoding last-modified", "err", err)
22 }
23- info := &storage.ObjectInfo{
24+ info := &ObjectInfo{
25 Size: res.ContentLength,
26 ETag: trimEtag(res.Header.Get("etag")),
27 Metadata: res.Header.Clone(),
1@@ -1,4 +1,4 @@
2-package pobj
3+package storage
4
5 import (
6 "errors"
1@@ -3,11 +3,39 @@ package storage
2 import (
3 "io"
4 "net/http"
5+ "os"
6+ "time"
7
8- sst "github.com/picosh/pico/pkg/pobj/storage"
9+ "github.com/picosh/pico/pkg/send/utils"
10 )
11
12+type Bucket struct {
13+ Name string
14+ Path string
15+ Root string
16+}
17+
18+type ObjectStorage interface {
19+ GetBucket(name string) (Bucket, error)
20+ GetBucketQuota(bucket Bucket) (uint64, error)
21+ UpsertBucket(name string) (Bucket, error)
22+ ListBuckets() ([]string, error)
23+ DeleteBucket(bucket Bucket) error
24+
25+ GetObject(bucket Bucket, fpath string) (utils.ReadAndReaderAtCloser, *ObjectInfo, error)
26+ PutObject(bucket Bucket, fpath string, contents io.Reader, entry *utils.FileEntry) (string, int64, error)
27+ DeleteObject(bucket Bucket, fpath string) error
28+ ListObjects(bucket Bucket, dir string, recursive bool) ([]os.FileInfo, error)
29+}
30+
31+type ObjectInfo struct {
32+ Size int64
33+ LastModified time.Time
34+ ETag string
35+ Metadata http.Header
36+}
37+
38 type StorageServe interface {
39- sst.ObjectStorage
40- ServeObject(r *http.Request, bucket sst.Bucket, fpath string, opts *ImgProcessOpts) (io.ReadCloser, *sst.ObjectInfo, error)
41+ ObjectStorage
42+ ServeObject(r *http.Request, bucket Bucket, fpath string, opts *ImgProcessOpts) (io.ReadCloser, *ObjectInfo, error)
43 }