driver

package
v0.0.0-...-a58c836 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Dec 14, 2025 License: AGPL-3.0 Imports: 4 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func ServerUploadLimitWaitN

func ServerUploadLimitWaitN(ctx context.Context, n int) error

Types

type Additional

type Additional interface{}

type ArchiveDecompress

type ArchiveDecompress interface {
	ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error
}

type ArchiveDecompressResult

type ArchiveDecompressResult interface {
	// ArchiveDecompress decompress an archive
	// when args.PutIntoNewDir, the new sub-folder should be named the same to the archive but without the extension
	// return each decompressed obj from the root path of the archive when args.PutIntoNewDir is false
	// return only the newly created folder when args.PutIntoNewDir is true
	// return errs.NotImplement to use internal archive tools to decompress
	ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error)
}

type ArchiveGetter

type ArchiveGetter interface {
	// ArchiveGet get file by inner path
	// return errs.NotImplement to use internal archive tools to get the children
	// return errs.NotSupport if the folder structure should be acquired from model.ArchiveMeta.GetTree
	ArchiveGet(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (model.Obj, error)
}

type ArchiveReader

type ArchiveReader interface {
	// GetArchiveMeta get the meta-info of an archive
	// return errs.WrongArchivePassword if the meta-info is also encrypted but provided password is wrong or empty
	// return errs.NotImplement to use internal archive tools to get the meta-info, such as the following cases:
	// 1. the driver do not support the format of the archive but there may be an internal tool do
	// 2. handling archives is a VIP feature, but the driver does not have VIP access
	GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error)
	// ListArchive list the children of model.ArchiveArgs.InnerPath in the archive
	// return errs.NotImplement to use internal archive tools to list the children
	// return errs.NotSupport if the folder structure should be acquired from model.ArchiveMeta.GetTree
	ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error)
	// Extract get url/filepath/reader of a file in the archive
	// return errs.NotImplement to use internal archive tools to extract
	Extract(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) (*model.Link, error)
}

type Config

type Config struct {
	Name              string `json:"name"`
	LocalSort         bool   `json:"local_sort"`
	OnlyLocal         bool   `json:"only_local"`
	OnlyProxy         bool   `json:"only_proxy"`
	NoCache           bool   `json:"no_cache"`
	NoUpload          bool   `json:"no_upload"`
	NeedMs            bool   `json:"need_ms"` // if need get message from user, such as validate code
	DefaultRoot       string `json:"default_root"`
	CheckStatus       bool   `json:"-"`
	Alert             string `json:"alert"` //info,success,warning,danger
	NoOverwriteUpload bool   `json:"-"`     // whether to support overwrite upload
	ProxyRangeOption  bool   `json:"-"`
}

func (Config) MustProxy

func (c Config) MustProxy() bool

type Copy

type Copy interface {
	Copy(ctx context.Context, srcObj, dstDir model.Obj) error
}

type CopyResult

type CopyResult interface {
	Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error)
}

type Driver

type Driver interface {
	Meta
	Reader
}

type GetRooter

type GetRooter interface {
	GetRoot(ctx context.Context) (model.Obj, error)
}

type Getter

type Getter interface {
	// Get file by path, the path haven't been joined with root path
	Get(ctx context.Context, path string) (model.Obj, error)
}

type IRootId

type IRootId interface {
	GetRootId() string
}

type IRootPath

type IRootPath interface {
	GetRootPath() string
}

type Info

type Info struct {
	Common     []Item `json:"common"`
	Additional []Item `json:"additional"`
	Config     Config `json:"config"`
}

type Item

type Item struct {
	Name     string `json:"name"`
	Type     string `json:"type"`
	Default  string `json:"default"`
	Options  string `json:"options"`
	Required bool   `json:"required"`
	Help     string `json:"help"`
}

type Meta

type Meta interface {
	Config() Config
	// GetStorage just get raw storage, no need to implement, because model.Storage have implemented
	GetStorage() *model.Storage
	SetStorage(model.Storage)
	// GetAddition Additional is used for unmarshal of JSON, so need return pointer
	GetAddition() Additional
	// Init If already initialized, drop first
	Init(ctx context.Context) error
	Drop(ctx context.Context) error
}

type Mkdir

type Mkdir interface {
	MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error
}

type MkdirResult

type MkdirResult interface {
	MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error)
}

type Move

type Move interface {
	Move(ctx context.Context, srcObj, dstDir model.Obj) error
}

type MoveResult

type MoveResult interface {
	Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error)
}

type Other

type Other interface {
	Other(ctx context.Context, args model.OtherArgs) (interface{}, error)
}

type Progress

type Progress struct {
	Total int64
	Done  int64
	// contains filtered or unexported fields
}

func NewProgress

func NewProgress(total int64, up UpdateProgress) *Progress

func (*Progress) Write

func (p *Progress) Write(b []byte) (n int, err error)

type Put

type Put interface {
	// Put a file (provided as a FileStreamer) into the driver
	// Besides the most basic upload functionality, the following features also need to be implemented:
	// 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods:
	//   (1) Use request methods that carry context, such as the following:
	//      a. http.NewRequestWithContext
	//      b. resty.Request.SetContext
	//      c. s3manager.Uploader.UploadWithContext
	//      d. utils.CopyWithCtx
	//   (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream`
	//   (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
	//       this is typically applicable to chunked uploads.
	// 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:
	//   (1) Use `utils.CopyWithCtx`
	//   (2) Use `driver.ReaderUpdatingProgress`
	//   (3) Use `driver.Progress` with `io.TeeReader`
	// 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream
	//    in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and
	//    before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN`
	//    if your file chunks are sufficiently small (less than about 50KB).
	// NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if
	// you use a `errgroup.Group` to upload each chunk in parallel, you should consider using a recursive
	// mutex like `semaphore.Weighted` to limit the maximum number of upload threads, preventing excessive
	// memory usage caused by buffering too many file chunks awaiting upload.
	Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) error
}

type PutResult

type PutResult interface {
	// Put a file (provided as a FileStreamer) into the driver and return the put obj
	// Besides the most basic upload functionality, the following features also need to be implemented:
	// 1. Canceling (when `<-ctx.Done()` returns), which can be supported by the following methods:
	//   (1) Use request methods that carry context, such as the following:
	//      a. http.NewRequestWithContext
	//      b. resty.Request.SetContext
	//      c. s3manager.Uploader.UploadWithContext
	//      d. utils.CopyWithCtx
	//   (2) Use a `driver.ReaderWithCtx` or `driver.NewLimitedUploadStream`
	//   (3) Use `utils.IsCanceled` to check if the upload has been canceled during the upload process,
	//       this is typically applicable to chunked uploads.
	// 2. Submit upload progress (via `up`) in real-time. There are three recommended ways as follows:
	//   (1) Use `utils.CopyWithCtx`
	//   (2) Use `driver.ReaderUpdatingProgress`
	//   (3) Use `driver.Progress` with `io.TeeReader`
	// 3. Slow down upload speed (via `stream.ServerUploadLimit`). It requires you to wrap the read stream
	//    in a `driver.RateLimitReader` or a `driver.RateLimitFile` after calculating the file's hash and
	//    before uploading the file or file chunks. Or you can directly call `driver.ServerUploadLimitWaitN`
	//    if your file chunks are sufficiently small (less than about 50KB).
	// NOTE that the network speed may be significantly slower than the stream's read speed. Therefore, if
	// you use a `errgroup.Group` to upload each chunk in parallel, you should consider using a recursive
	// mutex like `semaphore.Weighted` to limit the maximum number of upload threads, preventing excessive
	// memory usage caused by buffering too many file chunks awaiting upload.
	Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up UpdateProgress) (model.Obj, error)
}

type PutURL

type PutURL interface {
	// PutURL directly put a URL into the storage
	// Applicable to index-based drivers like URL-Tree or drivers that support uploading files as URLs
	// Called when using SimpleHttp for offline downloading, skipping creating a download task
	PutURL(ctx context.Context, dstDir model.Obj, name, url string) error
}

type PutURLResult

type PutURLResult interface {
	// PutURL directly put a URL into the storage
	// Applicable to index-based drivers like URL-Tree or drivers that support uploading files as URLs
	// Called when using SimpleHttp for offline downloading, skipping creating a download task
	PutURL(ctx context.Context, dstDir model.Obj, name, url string) (model.Obj, error)
}

type RateLimitFile

type RateLimitFile = stream.RateLimitFile

func NewLimitedUploadFile

func NewLimitedUploadFile(ctx context.Context, f model.File) *RateLimitFile

type RateLimitReader

type RateLimitReader = stream.RateLimitReader

func NewLimitedUploadStream

func NewLimitedUploadStream(ctx context.Context, r io.Reader) *RateLimitReader

type RateLimitWriter

type RateLimitWriter = stream.RateLimitWriter

type Reader

type Reader interface {
	// List files in the path
	// if identify files by path, need to set ID with path,like path.Join(dir.GetID(), obj.GetName())
	// if identify files by id, need to set ID with corresponding id
	List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error)
	// Link get url/filepath/reader of file
	Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error)
}

type ReaderUpdatingProgress

type ReaderUpdatingProgress = stream.ReaderUpdatingProgress

type ReaderWithCtx

type ReaderWithCtx = stream.ReaderWithCtx

type Reference

type Reference interface {
	InitReference(storage Driver) error
}

type Remove

type Remove interface {
	Remove(ctx context.Context, obj model.Obj) error
}

type Rename

type Rename interface {
	Rename(ctx context.Context, srcObj model.Obj, newName string) error
}

type RenameResult

type RenameResult interface {
	Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error)
}

type RootID

type RootID struct {
	RootFolderID string `json:"root_folder_id"`
}

func (RootID) GetRootId

func (r RootID) GetRootId() string

type RootPath

type RootPath struct {
	RootFolderPath string `json:"root_folder_path"`
}

func (RootPath) GetRootPath

func (r RootPath) GetRootPath() string

func (*RootPath) SetRootPath

func (r *RootPath) SetRootPath(path string)

type Select

type Select string

type SimpleReaderWithSize

type SimpleReaderWithSize = stream.SimpleReaderWithSize

type UpdateProgress

type UpdateProgress = model.UpdateProgress

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL