Eric Bower
·
2025-04-21
fs.go
1package storage
2
3import (
4 "fmt"
5 "io"
6 "io/fs"
7 "log/slog"
8 "net/http"
9 "os"
10 "path"
11 "path/filepath"
12 "strings"
13 "time"
14
15 "github.com/picosh/pico/pkg/send/utils"
16 "github.com/picosh/pico/pkg/shared/mime"
17)
18
19// https://stackoverflow.com/a/32482941
20func dirSize(path string) (int64, error) {
21 var size int64
22 err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
23 if err != nil {
24 return err
25 }
26 if !info.IsDir() {
27 size += info.Size()
28 }
29 return err
30 })
31
32 return size, err
33}
34
35type StorageFS struct {
36 Dir string
37 Logger *slog.Logger
38}
39
40var _ ObjectStorage = &StorageFS{}
41var _ ObjectStorage = (*StorageFS)(nil)
42
43func NewStorageFS(logger *slog.Logger, dir string) (*StorageFS, error) {
44 return &StorageFS{Logger: logger, Dir: dir}, nil
45}
46
47func (s *StorageFS) GetBucket(name string) (Bucket, error) {
48 dirPath := filepath.Join(s.Dir, name)
49 bucket := Bucket{
50 Name: name,
51 Path: dirPath,
52 }
53 s.Logger.Info("get bucket", "dir", dirPath)
54
55 info, err := os.Stat(dirPath)
56 if os.IsNotExist(err) {
57 return bucket, fmt.Errorf("directory does not exist: %v %w", dirPath, err)
58 }
59
60 if err != nil {
61 return bucket, fmt.Errorf("directory error: %v %w", dirPath, err)
62
63 }
64
65 if !info.IsDir() {
66 return bucket, fmt.Errorf("directory is a file, not a directory: %#v", dirPath)
67 }
68
69 return bucket, nil
70}
71
72func (s *StorageFS) UpsertBucket(name string) (Bucket, error) {
73 s.Logger.Info("upsert bucket", "name", name)
74 bucket, err := s.GetBucket(name)
75 if err == nil {
76 return bucket, nil
77 }
78
79 dir := filepath.Join(s.Dir, name)
80 s.Logger.Info("bucket not found, creating", "dir", dir, "err", err)
81 err = os.MkdirAll(dir, os.ModePerm)
82 if err != nil {
83 return bucket, err
84 }
85
86 return bucket, nil
87}
88
89func (s *StorageFS) GetBucketQuota(bucket Bucket) (uint64, error) {
90 dsize, err := dirSize(bucket.Path)
91 return uint64(dsize), err
92}
93
94// DeleteBucket will delete all contents regardless if files exist inside of it.
95// This is different from minio impl which requires all files be deleted first.
96func (s *StorageFS) DeleteBucket(bucket Bucket) error {
97 return os.RemoveAll(bucket.Path)
98}
99
100func (s *StorageFS) GetObject(bucket Bucket, fpath string) (utils.ReadAndReaderAtCloser, *ObjectInfo, error) {
101 objInfo := &ObjectInfo{
102 LastModified: time.Time{},
103 Metadata: make(http.Header),
104 }
105
106 dat, err := os.Open(filepath.Join(bucket.Path, fpath))
107 if err != nil {
108 return nil, objInfo, err
109 }
110
111 info, err := dat.Stat()
112 if err != nil {
113 return nil, objInfo, err
114 }
115
116 objInfo.Size = info.Size()
117 objInfo.LastModified = info.ModTime()
118 objInfo.Metadata.Set("content-type", mime.GetMimeType(fpath))
119 return dat, objInfo, nil
120}
121
122func (s *StorageFS) PutObject(bucket Bucket, fpath string, contents io.Reader, entry *utils.FileEntry) (string, int64, error) {
123 loc := filepath.Join(bucket.Path, fpath)
124 err := os.MkdirAll(filepath.Dir(loc), os.ModePerm)
125 if err != nil {
126 return "", 0, err
127 }
128 f, err := os.OpenFile(loc, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
129 if err != nil {
130 return "", 0, err
131 }
132
133 size, err := io.Copy(f, contents)
134 if err != nil {
135 return "", 0, err
136 }
137
138 f.Close()
139
140 if entry.Mtime > 0 {
141 uTime := time.Unix(entry.Mtime, 0)
142 _ = os.Chtimes(loc, uTime, uTime)
143 }
144
145 return loc, size, nil
146}
147
148func (s *StorageFS) DeleteObject(bucket Bucket, fpath string) error {
149 loc := filepath.Join(bucket.Path, fpath)
150 err := os.Remove(loc)
151 if err != nil {
152 return err
153 }
154
155 // traverse up the folder tree and remove all empty folders
156 dir := filepath.Dir(loc)
157 for dir != "" {
158 err = os.Remove(dir)
159 if err != nil {
160 break
161 }
162 fp := strings.Split(dir, "/")
163 dir = "/" + filepath.Join(fp[:len(fp)-1]...)
164 }
165
166 return nil
167}
168
169func (s *StorageFS) ListBuckets() ([]string, error) {
170 entries, err := os.ReadDir(s.Dir)
171 if err != nil {
172 return []string{}, err
173 }
174
175 buckets := []string{}
176 for _, e := range entries {
177 if !e.IsDir() {
178 continue
179 }
180 buckets = append(buckets, e.Name())
181 }
182 return buckets, nil
183}
184
185func (s *StorageFS) ListObjects(bucket Bucket, dir string, recursive bool) ([]os.FileInfo, error) {
186 var fileList []os.FileInfo
187
188 fpath := path.Join(bucket.Path, dir)
189
190 info, err := os.Stat(fpath)
191 if err != nil {
192 return fileList, err
193 }
194
195 if info.IsDir() && !strings.HasSuffix(dir, "/") {
196 fileList = append(fileList, &utils.VirtualFile{
197 FName: "",
198 FIsDir: info.IsDir(),
199 FSize: info.Size(),
200 FModTime: info.ModTime(),
201 })
202
203 return fileList, err
204 }
205
206 var files []fs.DirEntry
207
208 if recursive {
209 err = filepath.WalkDir(fpath, func(s string, d fs.DirEntry, err error) error {
210 if err != nil {
211 return err
212 }
213 files = append(files, d)
214 return nil
215 })
216 if err != nil {
217 fileList = append(fileList, info)
218 return fileList, nil
219 }
220 } else {
221 files, err = os.ReadDir(fpath)
222 if err != nil {
223 fileList = append(fileList, info)
224 return fileList, nil
225 }
226 }
227
228 for _, f := range files {
229 info, err := f.Info()
230 if err != nil {
231 return fileList, err
232 }
233
234 i := &utils.VirtualFile{
235 FName: f.Name(),
236 FIsDir: f.IsDir(),
237 FSize: info.Size(),
238 FModTime: info.ModTime(),
239 }
240
241 fileList = append(fileList, i)
242 }
243
244 return fileList, err
245}