backup

package
v18.6.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 25, 2025 License: MIT Imports: 55 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	// ErrSkipped means the repository was skipped because there was nothing to backup
	ErrSkipped = errors.New("repository skipped")
	// ErrDoesntExist means that the data was not found.
	ErrDoesntExist = errors.New("doesn't exist")
)

Functions

func NewLocalRepository

func NewLocalRepository(
	logger log.Logger,
	locator storage.Locator,
	gitCmdFactory gitcmd.CommandFactory,
	txManager transaction.Manager,
	repoCounter *counter.RepositoryCounter,
	catfileCache catfile.Cache,
	repo *localrepo.Repo,
	migrationStateManager migration.StateManager,
) *localRepository

NewLocalRepository returns a repository accessor that operates on a local repository.

func NewRemoteRepository

func NewRemoteRepository(repo *gitalypb.Repository, conn *grpc.ClientConn) *remoteRepository

NewRemoteRepository returns a repository accessor that operates on a remote repository.

func NewRepositoryKey

func NewRepositoryKey(repo *gitalypb.Repository) repositoryKey

NewRepositoryKey returns a unique identifier for the provided repo.

Types

type Backup

type Backup struct {
	// ID is the identifier that uniquely identifies the backup for this repository.
	ID string `toml:"-"`
	// Repository is the repository being backed up.
	Repository storage.Repository `toml:"-"`
	// Empty is true if the repository is empty
	Empty bool `toml:"empty"`
	// NonExistent is true if the repository does not exist.
	// Any Project/Non-Project that doesn't have any repository initialised can be considered as non-existent.
	NonExistent bool `toml:"non_existent"`
	// Steps are the ordered list of steps required to restore this backup
	Steps []Step `toml:"steps"`
	// ObjectFormat is the name of the object hash used by the repository.
	ObjectFormat string `toml:"object_format"`
	// HeadReference is the reference that HEAD points to.
	HeadReference string `toml:"head_reference,omitempty"`
}

Backup represents all the information needed to restore a backup for a repository

type Command

type Command interface {
	Repository() *gitalypb.Repository
	Name() string
	Execute(context.Context) error
}

Command handles a specific backup operation

type CreateClientFunc

CreateClientFunc is a function for creating partition client, makes it easier to mock client functions in test.

type CreateCommand

type CreateCommand struct {
	// contains filtered or unexported fields
}

CreateCommand creates a backup for a repository

func NewCreateCommand

func NewCreateCommand(strategy Strategy, request CreateRequest) *CreateCommand

NewCreateCommand builds a CreateCommand

func (CreateCommand) Execute

func (cmd CreateCommand) Execute(ctx context.Context) error

Execute performs the backup

func (CreateCommand) Name

func (cmd CreateCommand) Name() string

Name is the name of the command

func (CreateCommand) Repository

func (cmd CreateCommand) Repository() *gitalypb.Repository

Repository is the repository that will be acted on

type CreateRequest

type CreateRequest struct {
	// Server contains gitaly server connection information required to call
	// RPCs in the non-local backup.Manager configuration.
	Server storage.ServerInfo
	// Repository is the repository to be backed up.
	Repository *gitalypb.Repository
	// VanityRepository is used to determine the backup path.
	VanityRepository *gitalypb.Repository
	// Incremental when true will create an increment on the specified full backup.
	Incremental bool
	// BackupID is used to determine a unique path for the backup when a full
	// backup is created.
	BackupID string
}

CreateRequest is the request to create a backup

type LazyWriter

type LazyWriter struct {
	// contains filtered or unexported fields
}

LazyWriter is a WriteCloser that will call Create when on the first call to Write. This means it will only create a file if there will be data written to it.

func NewLazyWriter

func NewLazyWriter(create func() (io.WriteCloser, error)) *LazyWriter

NewLazyWriter initializes a new LazyWriter. create is called on the first call of Write, any errors will be returned by this call.

func (*LazyWriter) BytesWritten

func (w *LazyWriter) BytesWritten() int

BytesWritten returns the total number of bytes written to the underlying WriteCloser. The count is never explicitly reset to 0.

func (*LazyWriter) Close

func (w *LazyWriter) Close() error

Close calls Close on the WriteCloser returned by Create, passing on any returned error. Close must be called to properly clean up resources.

func (*LazyWriter) Write

func (w *LazyWriter) Write(p []byte) (int, error)

type LegacyLocator

type LegacyLocator struct{}

LegacyLocator locates backup paths for historic backups. This is the structure that gitlab used before incremental backups were introduced.

Existing backup files are expected to be overwritten by the latest backup files.

Structure:

<repo relative path>.bundle
<repo relative path>.refs
<repo relative path>/custom_hooks.tar

func (LegacyLocator) BeginFull

func (l LegacyLocator) BeginFull(ctx context.Context, repo storage.Repository, backupID string) *Backup

BeginFull returns the static paths for a legacy repository backup

func (LegacyLocator) BeginIncremental

func (l LegacyLocator) BeginIncremental(ctx context.Context, repo storage.Repository, backupID string) (*Backup, error)

BeginIncremental is not supported for legacy backups

func (LegacyLocator) Commit

func (l LegacyLocator) Commit(ctx context.Context, full *Backup) error

Commit is unused as the locations are static

func (LegacyLocator) Find

func (l LegacyLocator) Find(ctx context.Context, repo storage.Repository, backupID string) (*Backup, error)

Find is not supported for legacy backups.

func (LegacyLocator) FindLatest

func (l LegacyLocator) FindLatest(ctx context.Context, repo storage.Repository) (*Backup, error)

FindLatest returns the static paths for a legacy repository backup

type ListIterator

type ListIterator struct {
	// contains filtered or unexported fields
}

ListIterator allows iterating over objects stored in the Sink.

func (*ListIterator) Err

func (li *ListIterator) Err() error

Err returns the iteration error if there is one.

func (*ListIterator) Next

func (li *ListIterator) Next(ctx context.Context) bool

Next retrieves the next result from the iterator. It returns true if there are more objects to retrieve and false if an error occurred or there are no more objects. It is the callers responsibility to check for iteration errors by calling Err.

func (*ListIterator) Path

func (li *ListIterator) Path() string

Path is the path of the current object.

type Locator

type Locator interface {
	// BeginFull returns the tentative backup paths needed to create a full backup.
	BeginFull(ctx context.Context, repo storage.Repository, backupID string) *Backup

	// BeginIncremental returns the backup with the last element of Steps being
	// the tentative step needed to create an incremental backup.
	BeginIncremental(ctx context.Context, repo storage.Repository, backupID string) (*Backup, error)

	// Commit persists the backup so that it can be looked up by FindLatest. It
	// is expected that the last element of Steps will be the newly created
	// backup.
	Commit(ctx context.Context, backup *Backup) error

	// FindLatest returns the latest backup that was written by Commit
	FindLatest(ctx context.Context, repo storage.Repository) (*Backup, error)

	// Find returns the repository backup at the given backupID. If the backup does
	// not exist then the error ErrDoesntExist is returned.
	Find(ctx context.Context, repo storage.Repository, backupID string) (*Backup, error)
}

Locator finds sink backup paths for repositories

func ResolveLocator

func ResolveLocator(layout string, sink *Sink) (Locator, error)

ResolveLocator returns a locator implementation based on a locator identifier.

type LogEntryArchiver

type LogEntryArchiver struct {
	// contains filtered or unexported fields
}

LogEntryArchiver is used to backup applied log entries. It has a configurable number of worker goroutines that will perform backups. Each partition may only have one backup executing at a time, entries are always processed in-order. Backup failures will trigger an exponential backoff.

func NewLogEntryArchiver

func NewLogEntryArchiver(logger logging.Logger, archiveSink *Sink, workerCount uint, node *storage.Node) *LogEntryArchiver

NewLogEntryArchiver constructs a new LogEntryArchiver.

func (*LogEntryArchiver) Close

func (la *LogEntryArchiver) Close()

Close stops the LogEntryArchiver, causing Run to return.

func (*LogEntryArchiver) Collect

func (la *LogEntryArchiver) Collect(metrics chan<- prometheus.Metric)

Collect is used to collect Prometheus metrics.

func (*LogEntryArchiver) Describe

func (la *LogEntryArchiver) Describe(descs chan<- *prometheus.Desc)

Describe is used to describe Prometheus metrics.

func (*LogEntryArchiver) NotifyNewEntries

func (la *LogEntryArchiver) NotifyNewEntries(storageName string, partitionID storage.PartitionID, lowWaterMark, highWaterMark storage.LSN)

NotifyNewEntries passes the log entry information to the LogEntryArchiver for processing.

func (*LogEntryArchiver) Run

func (la *LogEntryArchiver) Run()

Run starts log entry archiving.

type LogEntryIterator

type LogEntryIterator struct {
	// contains filtered or unexported fields
}

LogEntryIterator iterates over archived log entries in object-storage.

func (*LogEntryIterator) Err

func (it *LogEntryIterator) Err() error

Err returns a iteration error if there were any.

func (*LogEntryIterator) LSN

func (it *LogEntryIterator) LSN() storage.LSN

LSN of the current log entry.

func (*LogEntryIterator) Next

func (it *LogEntryIterator) Next(ctx context.Context) bool

Next iterates to the next item. Returns false if there are no more results.

func (*LogEntryIterator) Path

func (it *LogEntryIterator) Path() string

Path of the current log entry.

type LogEntryStore

type LogEntryStore struct {
	// contains filtered or unexported fields
}

LogEntryStore manages uploaded log entry archives in object storage.

func NewLogEntryStore

func NewLogEntryStore(sink *Sink) LogEntryStore

NewLogEntryStore returns a new LogEntryStore.

func (LogEntryStore) Exists

func (s LogEntryStore) Exists(ctx context.Context, info PartitionInfo, lsn storage.LSN) (bool, error)

Exists returns true if a log entry for the specified partition and LSN exists in the store.

func (*LogEntryStore) GetReader

func (s *LogEntryStore) GetReader(ctx context.Context, info PartitionInfo, lsn storage.LSN) (io.ReadCloser, error)

GetReader returns a reader in order to read a log entry from the store.

func (LogEntryStore) GetWriter

func (s LogEntryStore) GetWriter(ctx context.Context, info PartitionInfo, lsn storage.LSN) (io.WriteCloser, error)

GetWriter returns a writer in order to write a new log entry into the store.

func (LogEntryStore) Query

Query returns an iterator that finds all log entries in the store for the given partition starting at the LSN specified by from.

type Manager

type Manager struct {
	// contains filtered or unexported fields
}

Manager manages process of the creating/restoring backups.

func NewManager

func NewManager(sink *Sink, logger log.Logger, locator Locator, pool *client.Pool) *Manager

NewManager creates and returns initialized *Manager instance.

func NewManagerLocal

func NewManagerLocal(
	sink *Sink,
	logger log.Logger,
	locator Locator,
	storageLocator storage.Locator,
	gitCmdFactory gitcmd.CommandFactory,
	catfileCache catfile.Cache,
	txManager transaction.Manager,
	repoCounter *counter.RepositoryCounter,
	migrationStateManager migration.StateManager,
) *Manager

NewManagerLocal creates and returns a *Manager instance for operating on local repositories.

func (*Manager) Create

func (mgr *Manager) Create(ctx context.Context, req *CreateRequest) error

Create creates a repository backup.

func (*Manager) Restore

func (mgr *Manager) Restore(ctx context.Context, req *RestoreRequest) error

Restore restores a repository from a backup. If req.BackupID is empty, the latest backup will be used.

type ManifestLoader

type ManifestLoader struct {
	// contains filtered or unexported fields
}

ManifestLoader reads and writes manifest files from a Sink. Manifest files are used to persist all details about a repository needed to properly restore it to a known state.

func NewManifestLoader

func NewManifestLoader(sink *Sink) ManifestLoader

NewManifestLoader builds a new ManifestLoader

func (ManifestLoader) ReadManifest

func (l ManifestLoader) ReadManifest(ctx context.Context, repo storage.Repository, backupID string) (*Backup, error)

ReadManifest reads a manifest from the sink for the specified backup ID.

func (ManifestLoader) WriteManifest

func (l ManifestLoader) WriteManifest(ctx context.Context, backup *Backup, backupID string) (returnErr error)

WriteManifest writes a manifest to the sink for the specified backup ID.

type ManifestLocator

type ManifestLocator struct {
	Loader   ManifestLoader
	Fallback Locator
}

ManifestLocator locates backup paths based on manifest files that are written to a predetermined path:

manifests/<repo_storage_name>/<repo_relative_path>/<backup_id>.toml

It relies on Fallback to determine paths of new backups.

func NewManifestLocator

func NewManifestLocator(sink *Sink, fallback Locator) ManifestLocator

NewManifestLocator builds a new ManifestLocator.

func (ManifestLocator) BeginFull

func (l ManifestLocator) BeginFull(ctx context.Context, repo storage.Repository, backupID string) *Backup

BeginFull returns a tentative first step needed to create a new full backup. The logic will be overridden by the fallback locator, if configured.

func (ManifestLocator) BeginIncremental

func (l ManifestLocator) BeginIncremental(ctx context.Context, repo storage.Repository, backupID string) (*Backup, error)

BeginIncremental returns a tentative step needed to create a new incremental backup. The incremental backup is always based off of the latest backup. If there is no latest backup, a new full backup step is returned using backupID. The logic will be overridden by the fallback locator, if configured.

func (ManifestLocator) Commit

func (l ManifestLocator) Commit(ctx context.Context, backup *Backup) error

Commit passes through to Fallback, then writes a manifest file for the backup.

func (ManifestLocator) Find

func (l ManifestLocator) Find(ctx context.Context, repo storage.Repository, backupID string) (*Backup, error)

Find loads the manifest for the provided repo and backupID. If this manifest does not exist, the fallback is used.

func (ManifestLocator) FindLatest

func (l ManifestLocator) FindLatest(ctx context.Context, repo storage.Repository) (*Backup, error)

FindLatest loads the manifest called +latest. If this manifest does not exist, the Fallback is used.

type PartitionBackupManager

type PartitionBackupManager struct {
	// contains filtered or unexported fields
}

PartitionBackupManager manages process of the creating/restoring partition backups.

func NewPartitionBackupManager

func NewPartitionBackupManager(pool *client.Pool, opts ...PartitionBackupOption) *PartitionBackupManager

NewPartitionBackupManager creates and returns initialized *PartitionBackupManager instance.

func (*PartitionBackupManager) Create

func (pbm *PartitionBackupManager) Create(ctx context.Context, serverInfo storage.ServerInfo, storageName string, logger log.Logger) error

Create creates backup for all the partitions of given storage.

func (*PartitionBackupManager) WithBackupTimeout

func (pbm *PartitionBackupManager) WithBackupTimeout(timeout time.Duration) *PartitionBackupManager

WithBackupTimeout sets the timeout for individual partition backup calls. If not specified, the default timeout is 5 minutes.

func (*PartitionBackupManager) WithPaginationLimit

func (pbm *PartitionBackupManager) WithPaginationLimit(limit int32) *PartitionBackupManager

WithPaginationLimit sets the pagination page size. If not specified, the default page size is 100.

type PartitionBackupOption

type PartitionBackupOption func(*PartitionBackupManager)

PartitionBackupOption is a functional option for the *PartitionBackupManager.

func WithPartitionBackupTimeout

func WithPartitionBackupTimeout(timeout time.Duration) PartitionBackupOption

WithPartitionBackupTimeout sets the timeout for individual partition backup calls.

func WithPartitionConcurrencyLimit

func WithPartitionConcurrencyLimit(limit int) PartitionBackupOption

WithPartitionConcurrencyLimit sets the maximum number of concurrent backup calls.

func WithPartitionCreateClientFunc

func WithPartitionCreateClientFunc(creatClientFunc CreateClientFunc) PartitionBackupOption

WithPartitionCreateClientFunc sets a custom function for creating partition client.

func WithPartitionPaginationLimit

func WithPartitionPaginationLimit(limit int32) PartitionBackupOption

WithPartitionPaginationLimit sets the pagination page size.

type PartitionInfo

type PartitionInfo struct {
	StorageName string
	PartitionID storage.PartitionID
}

PartitionInfo is the global identifier for a partition.

type Pipeline

type Pipeline struct {
	// contains filtered or unexported fields
}

Pipeline is a pipeline for running backup and restore jobs.

func NewPipeline

func NewPipeline(log log.Logger, opts ...PipelineOption) (*Pipeline, error)

NewPipeline creates a pipeline that executes backup and restore jobs. The pipeline executes sequentially by default, but can be made concurrent by calling WithConcurrency() after initialisation.

func (*Pipeline) Done

func (p *Pipeline) Done() (processedRepos map[string]map[repositoryKey]struct{}, err error)

Done waits for any in progress jobs to complete then reports any accumulated errors

func (*Pipeline) Handle

func (p *Pipeline) Handle(ctx context.Context, cmd Command)

Handle queues a request to create a backup. Commands either processed sequentially or concurrently, if WithConcurrency() was called.

type PipelineOption

type PipelineOption func(*Pipeline) error

PipelineOption represents an optional configuration parameter for the Pipeline.

func WithConcurrency

func WithConcurrency(total, perStorage int) PipelineOption

WithConcurrency configures the pipeline to run backup and restore jobs concurrently. total defines the absolute maximum number of jobs that the pipeline should execute concurrently. perStorage defines the number of jobs per Gitaly storage that the pipeline should attempt to execute concurrently.

For example, in a Gitaly deployment with 2 storages, WithConcurrency(3, 2) means that at most 3 jobs will execute concurrently, despite 2 concurrent jobs being allowed per storage (2*2=4).

type PointerLocator

type PointerLocator struct {
	Sink     *Sink
	Fallback Locator
}

PointerLocator locates backup paths where each full backup is put into a unique timestamp directory and the latest backup taken is pointed to by a file named LATEST.

Structure:

<repo relative path>/LATEST
<repo relative path>/<backup id>/LATEST
<repo relative path>/<backup id>/<nnn>.bundle
<repo relative path>/<backup id>/<nnn>.refs
<repo relative path>/<backup id>/<nnn>.custom_hooks.tar

func (PointerLocator) BeginFull

func (l PointerLocator) BeginFull(ctx context.Context, repo storage.Repository, backupID string) *Backup

BeginFull returns a tentative first step needed to create a new full backup.

func (PointerLocator) BeginIncremental

func (l PointerLocator) BeginIncremental(ctx context.Context, repo storage.Repository, fallbackBackupID string) (*Backup, error)

BeginIncremental returns a tentative step needed to create a new incremental backup. The incremental backup is always based off of the latest full backup. If there is no latest backup, a new full backup step is returned using fallbackBackupID

func (PointerLocator) Commit

func (l PointerLocator) Commit(ctx context.Context, backup *Backup) error

Commit persists the step so that it can be looked up by FindLatest

func (PointerLocator) Find

func (l PointerLocator) Find(ctx context.Context, repo storage.Repository, backupID string) (*Backup, error)

Find returns the repository backup at the given backupID. If the backup does not exist then the error ErrDoesntExist is returned.

func (PointerLocator) FindLatest

func (l PointerLocator) FindLatest(ctx context.Context, repo storage.Repository) (*Backup, error)

FindLatest returns the paths committed by the latest call to CommitFull.

If there is no `LATEST` file, the result of the `Fallback` is used.

type RefIterator

type RefIterator interface {
	Next() bool
	Ref() git.Reference
	Close() error
	Err() error
}

RefIterator is an interface for iterating over refs.

type Repository

type Repository interface {
	// ListRefs returns an iterator to fetch the full set of refs and targets for the repository.
	ListRefs(ctx context.Context) (RefIterator, error)
	// GetCustomHooks fetches the custom hooks archive.
	GetCustomHooks(ctx context.Context, out io.Writer) error
	// CreateBundle fetches a bundle that contains refs matching patterns. When
	// patterns is nil all refs are bundled.
	CreateBundle(ctx context.Context, out io.Writer, patterns io.Reader) error
	// Remove removes the repository. Does not return an error if the
	// repository cannot be found.
	Remove(ctx context.Context) error
	// Create creates the repository.
	Create(ctx context.Context, hash git.ObjectHash, defaultBranch string) error
	// FetchBundle fetches references from a bundle. Refs will be mirrored to
	// the repository.
	FetchBundle(ctx context.Context, reader io.Reader, updateHead bool) error
	// SetCustomHooks updates the custom hooks for the repository.
	SetCustomHooks(ctx context.Context, reader io.Reader) error
	// ObjectHash detects the object hash used by the repository.
	ObjectHash(ctx context.Context) (git.ObjectHash, error)
	// HeadReference fetches the reference pointed to by HEAD.
	HeadReference(ctx context.Context) (git.ReferenceName, error)
	// ResetRefs attempts to reset the list of refs in the repository to match the
	// specified refs slice. This can fail if objects pointed to by a ref no longer
	// exists in the repository. The list of refs should not include the symbolic
	// HEAD reference.
	// If optimistic, update ref failures are ignored for regular updates but not
	// for removal updates.
	ResetRefs(ctx context.Context, refs []git.Reference, optimistic bool) error
	// SetHeadReference sets the symbolic HEAD reference of the repository to the
	// given target, for example a branch name.
	SetHeadReference(ctx context.Context, target git.ReferenceName) error
}

Repository abstracts git access required to make a repository backup

type RestoreCommand

type RestoreCommand struct {
	// contains filtered or unexported fields
}

RestoreCommand restores a backup for a repository

func NewRestoreCommand

func NewRestoreCommand(strategy Strategy, request RestoreRequest) *RestoreCommand

NewRestoreCommand builds a RestoreCommand

func (RestoreCommand) Execute

func (cmd RestoreCommand) Execute(ctx context.Context) error

Execute performs the restore

func (RestoreCommand) Name

func (cmd RestoreCommand) Name() string

Name is the name of the command

func (RestoreCommand) Repository

func (cmd RestoreCommand) Repository() *gitalypb.Repository

Repository is the repository that will be acted on

type RestoreRequest

type RestoreRequest struct {
	// Server contains gitaly server connection information required to call
	// RPCs in the non-local backup.Manager configuration.
	Server storage.ServerInfo
	// Repository is the repository to be restored.
	Repository *gitalypb.Repository
	// VanityRepository is used to determine the backup path.
	VanityRepository *gitalypb.Repository
	// AlwaysCreate forces the repository to be created even if no bundle for
	// it exists. See https://gitlab.com/gitlab-org/gitlab/-/issues/357044
	AlwaysCreate bool
	// BackupID is the ID of the full backup to restore. If not specified, the
	// latest backup is restored..
	BackupID string
}

RestoreRequest is the request to restore from a backup

type ServerSideAdapter

type ServerSideAdapter struct {
	// contains filtered or unexported fields
}

ServerSideAdapter allows calling the server-side backup RPCs `BackupRepository` and `RestoreRepository` through `backup.Strategy` such that server-side backups can be used with `backup.Pipeline`.

func NewServerSideAdapter

func NewServerSideAdapter(pool *client.Pool) *ServerSideAdapter

NewServerSideAdapter creates and returns initialized *ServerSideAdapter instance.

func (ServerSideAdapter) Create

func (ss ServerSideAdapter) Create(ctx context.Context, req *CreateRequest) error

Create calls the BackupRepository RPC.

func (ServerSideAdapter) Restore

func (ss ServerSideAdapter) Restore(ctx context.Context, req *RestoreRequest) error

Restore calls the RestoreRepository RPC.

type Sink

type Sink struct {
	// contains filtered or unexported fields
}

Sink uses a storage engine that can be defined by the construction url on creation.

func ResolveSink

func ResolveSink(ctx context.Context, uri string, opts ...SinkOption) (*Sink, error)

ResolveSink returns a sink implementation based on the provided uri. The storage engine is chosen based on the provided uri. It is the caller's responsibility to provide all required environment variables in order to get properly initialized storage engine driver.

func (Sink) Close

func (s Sink) Close() error

Close releases resources associated with the bucket communication.

func (Sink) Exists

func (s Sink) Exists(ctx context.Context, relativePath string) (bool, error)

Exists is a wrapper around the underlying bucket and returns true if a blob exists at key, false if it does not exist, or an error.

func (Sink) GetReader

func (s Sink) GetReader(ctx context.Context, relativePath string) (io.ReadCloser, error)

GetReader returns a reader to consume the data from the configured bucket. It is the caller's responsibility to Close the reader after usage.

func (Sink) GetWriter

func (s Sink) GetWriter(ctx context.Context, relativePath string) (io.WriteCloser, error)

GetWriter stores the written data into a relativePath path on the configured bucket. It is the callers responsibility to Close the reader after usage.

func (Sink) List

func (s Sink) List(prefix string) *ListIterator

List all objects that have the specified prefix.

func (Sink) SignedURL

func (s Sink) SignedURL(ctx context.Context, relativePath string, expiry time.Duration) (string, error)

SignedURL returns a URL that can be used to GET the blob for the duration specified in expiry.

type SinkOption

type SinkOption func(*Sink)

SinkOption is a function that configures a Sink.

func WithBufferSize

func WithBufferSize(size int) SinkOption

WithBufferSize sets the buffer size for the sink.

type Step

type Step struct {
	// BundlePath is the path of the bundle
	BundlePath string `toml:"bundle_path,omitempty"`
	// RefPath is the path of the ref file
	RefPath string `toml:"ref_path,omitempty"`
	// PreviousRefPath is the path of the previous ref file
	PreviousRefPath string `toml:"previous_ref_path,omitempty"`
	// CustomHooksPath is the path of the custom hooks archive
	CustomHooksPath string `toml:"custom_hooks_path,omitempty"`
}

Step represents an incremental step that makes up a complete backup for a repository

type Strategy

type Strategy interface {
	Create(context.Context, *CreateRequest) error
	Restore(context.Context, *RestoreRequest) error
}

Strategy used to create/restore backups

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL