Documentation
¶
Index ¶
- Constants
- func NewUnzip(dst string) (unzip, io.WriteCloser)
- func NewZip(sourcePath string, level int) (zip, io.ReadCloser)
- type AbortRequest
- type AbortResponse
- type BackupBackendProvider
- type BackupRequest
- type CanCommitResponse
- type Compression
- type CompressionLevel
- type Handler
- func (m *Handler) OnAbort(ctx context.Context, req *AbortRequest) error
- func (m *Handler) OnCanCommit(ctx context.Context, req *Request) *CanCommitResponse
- func (m *Handler) OnCommit(ctx context.Context, req *StatusRequest) (err error)
- func (m *Handler) OnStatus(ctx context.Context, req *StatusRequest) *StatusResponse
- type Op
- type Request
- type Scheduler
- func (s *Scheduler) Backup(ctx context.Context, pr *models.Principal, req *BackupRequest) (_ *models.BackupCreateResponse, err error)
- func (s *Scheduler) BackupStatus(ctx context.Context, principal *models.Principal, backend, backupID string) (_ *Status, err error)
- func (s *Scheduler) RestorationStatus(ctx context.Context, principal *models.Principal, backend, backupID string) (_ *Status, err error)
- func (s *Scheduler) Restore(ctx context.Context, pr *models.Principal, req *BackupRequest) (_ *models.BackupRestoreResponse, err error)
- type Sourcer
- type Status
- type StatusRequest
- type StatusResponse
Constants ¶
const ( // DefaultChunkSize if size is not specified DefaultChunkSize = 1 << 27 // 128MB // DefaultCPUPercentage default CPU percentage can be consumed by the file writer DefaultCPUPercentage = 50 )
TODO adjust or make configurable
const ( // BackupFile used by a node to store its metadata BackupFile = "backup.json" // GlobalBackupFile used by coordinator to store its metadata GlobalBackupFile = "backup_config.json" GlobalRestoreFile = "restore_config.json" )
const ( // Version > version1 support compression // "2.1" support restore on 2 phases // "2.0" support compression Version = "2.0" )
Version of backup structure
Variables ¶
This section is empty.
Functions ¶
func NewUnzip ¶ added in v1.21.0
func NewUnzip(dst string) (unzip, io.WriteCloser)
Types ¶
type AbortRequest ¶
type AbortRequest StatusRequest
type AbortResponse ¶
type AbortResponse StatusResponse
type BackupBackendProvider ¶
type BackupBackendProvider interface {
BackupBackend(backend string) (modulecapabilities.BackupBackend, error)
}
type BackupRequest ¶
type BackupRequest struct {
// Compression is the compression configuration.
Compression
// ID is the backup ID
ID string
// Backend specify on which backend to store backups (gcs, s3, ..)
Backend string
// Include is list of class which need to be backed up
// The same class cannot appear in both Include and Exclude in the same request
Include []string
// Exclude means include all classes but those specified in Exclude
// The same class cannot appear in both Include and Exclude in the same request
Exclude []string
// NodeMapping is a map of node name replacement where key is the old name and value is the new name
// No effect if the map is empty
NodeMapping map[string]string
}
BackupRequest a transition request from API to Backend.
type CanCommitResponse ¶
type Compression ¶ added in v1.24.0
type Compression struct {
// Level is one of DefaultCompression, BestSpeed, BestCompression
Level CompressionLevel
// ChunkSize represents the desired size for chunks between 1 - 512 MB
// However, during compression, the chunk size might
// slightly deviate from this value, being either slightly
// below or above the specified size
ChunkSize int
// CPUPercentage desired CPU core utilization (1%-80%), default: 50%
CPUPercentage int
}
Compression is the compression configuration.
type CompressionLevel ¶ added in v1.21.0
type CompressionLevel int
CompressionLevel represents supported compression level
const ( DefaultCompression CompressionLevel = iota BestSpeed BestCompression )
type Handler ¶ added in v1.21.0
type Handler struct {
// contains filtered or unexported fields
}
func NewHandler ¶ added in v1.21.0
func NewHandler( logger logrus.FieldLogger, authorizer authorizer, schema schemaManger, sourcer Sourcer, backends BackupBackendProvider, ) *Handler
func (*Handler) OnAbort ¶ added in v1.21.0
func (m *Handler) OnAbort(ctx context.Context, req *AbortRequest) error
OnAbort will be triggered when the coordinator abort the execution of a previous operation
func (*Handler) OnCanCommit ¶ added in v1.21.0
func (m *Handler) OnCanCommit(ctx context.Context, req *Request) *CanCommitResponse
OnCanCommit will be triggered when coordinator asks the node to participate in a distributed backup operation
func (*Handler) OnCommit ¶ added in v1.21.0
func (m *Handler) OnCommit(ctx context.Context, req *StatusRequest) (err error)
OnCommit will be triggered when the coordinator confirms the execution of a previous operation
func (*Handler) OnStatus ¶ added in v1.21.0
func (m *Handler) OnStatus(ctx context.Context, req *StatusRequest) *StatusResponse
type Request ¶
type Request struct {
// Method is the backup operation (create, restore)
Method Op
// ID is the backup ID
ID string
// Backend specify on which backend to store backups (gcs, s3, ..)
Backend string
// NodeMapping specify node names replacement to be made on restore
NodeMapping map[string]string
// Classes is list of class which need to be backed up
Classes []string
// Duration
Duration time.Duration
// Compression is the compression configuration.
Compression
}
type Scheduler ¶
type Scheduler struct {
// contains filtered or unexported fields
}
Scheduler assigns backup operations to coordinators.
func NewScheduler ¶
func NewScheduler( authorizer authorizer, client client, sourcer selector, backends BackupBackendProvider, nodeResolver nodeResolver, logger logrus.FieldLogger, ) *Scheduler
NewScheduler creates a new scheduler with two coordinators
func (*Scheduler) Backup ¶
func (s *Scheduler) Backup(ctx context.Context, pr *models.Principal, req *BackupRequest, ) (_ *models.BackupCreateResponse, err error)
func (*Scheduler) BackupStatus ¶
func (*Scheduler) RestorationStatus ¶
type Sourcer ¶
type Sourcer interface {
// ReleaseBackup signals to the underlying index that the files have been
// copied (or the operation aborted), and that it is safe for the index to
// change the files, such as start compactions.
ReleaseBackup(_ context.Context, id, class string) error
// Backupable returns whether all given class can be backed up.
Backupable(_ context.Context, classes []string) error
// BackupDescriptors returns a channel of class descriptors.
// Class descriptor records everything needed to restore a class
// If an error happens a descriptor with an error will be written to the channel just before closing it.
//
// BackupDescriptors acquires resources so that a call to ReleaseBackup() is mandatory to free acquired resources.
BackupDescriptors(_ context.Context, bakid string, classes []string,
) <-chan backup.ClassDescriptor
// ClassExists checks whether a class exits or not
ClassExists(name string) bool
// ListBackupable returns a list of all classes which can be backed up.
//
// A class cannot be backed up either if it doesn't exist or if it has more than one physical shard.
ListBackupable() []string
}
Sourcer represents the source of artifacts used in the backup