backup

package
v1.35.13 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 6, 2026 License: BSD-3-Clause Imports: 5 Imported by: 0

Documentation

Index

Constants

View Source
const DeleteMarker = "__DELETE_ME_AFTER_BACKUP__"

DeleteMarkerAdd marks folders of indices that have been deleted during an ongoing backup and are already removed from store and schema. However, we want to keep them on disk to ensure that the backup can complete. The folders are removed after backup completion. In case of a crash they are deleted at the next startup.

Variables

This section is empty.

Functions

func DeleteMarkerAdd added in v1.32.17

func DeleteMarkerAdd(filename string) string

func IsCancelled added in v1.24.26

func IsCancelled(err error, meta *DistributedBackupDescriptor) bool

Types

type BackupDescriptor

type BackupDescriptor struct {
	StartedAt               time.Time         `json:"startedAt"`
	CompletedAt             time.Time         `json:"completedAt"`
	ID                      string            `json:"id"` // User created backup id
	Classes                 []ClassDescriptor `json:"classes"`
	RbacBackups             []byte            `json:"rbacBackups"`
	UserBackups             []byte            `json:"userBackups"`
	Status                  Status            `json:"status"`  // "STARTED|TRANSFERRING|TRANSFERRED|SUCCESS|FAILED|CANCELED"
	Version                 string            `json:"version"` //
	ServerVersion           string            `json:"serverVersion"`
	Error                   string            `json:"error"`
	PreCompressionSizeBytes int64             `json:"preCompressionSizeBytes"` // Size of this node's backup in bytes before compression
	CompressionType         *CompressionType  `json:"compressionType,omitempty"`
	BaseBackupID            string            `json:"baseBackupId,omitempty"`
}

BackupDescriptor contains everything needed to completely restore a list of classes

func (*BackupDescriptor) AllExist

func (d *BackupDescriptor) AllExist(classes []string) string

AllExist checks if all classes exist in d. It returns either "" or the first class which it could not find

func (*BackupDescriptor) Exclude

func (d *BackupDescriptor) Exclude(classes []string)

Exclude removes classes from d

func (*BackupDescriptor) Filter

func (d *BackupDescriptor) Filter(pred func(s string) bool)

Filter classes based on predicate

func (*BackupDescriptor) GetBaseBackupID added in v1.34.18

func (d *BackupDescriptor) GetBaseBackupID() string

func (*BackupDescriptor) GetClassDescriptor added in v1.34.18

func (d *BackupDescriptor) GetClassDescriptor(className string) *ClassDescriptor

func (*BackupDescriptor) GetCompressionType added in v1.32.18

func (d *BackupDescriptor) GetCompressionType() CompressionType

func (*BackupDescriptor) GetStatus added in v1.34.18

func (d *BackupDescriptor) GetStatus() Status

func (*BackupDescriptor) Include

func (d *BackupDescriptor) Include(classes []string)

Include only these classes and remove everything else

func (*BackupDescriptor) List

func (d *BackupDescriptor) List() []string

List all existing classes in d

func (*BackupDescriptor) ToDistributed

func (d *BackupDescriptor) ToDistributed() *DistributedBackupDescriptor

ToDistributed is used just for backward compatibility with the old version.

func (*BackupDescriptor) Validate

func (d *BackupDescriptor) Validate(newSchema bool) error

type BigFileInfo added in v1.34.18

type BigFileInfo struct {
	ChunkKeys  []string  `json:"chunkKeys"`
	Size       int64     `json:"size"`
	ModifiedAt time.Time `json:"modifiedAt"`
}

type ClassDescriptor

type ClassDescriptor struct {
	Name          string             `json:"name"` // DB class name, also selected by user
	BackupID      string             `json:"backupId"`
	Shards        []*ShardDescriptor `json:"shards"`
	ShardingState []byte             `json:"shardingState"`
	Schema        []byte             `json:"schema"`
	Aliases       []byte             `json:"aliases"`

	// AliasesIncluded makes the old backup backward compatible when
	// old backups are restored by newer ClassDescriptor that supports
	// aliases
	AliasesIncluded         bool               `json:"aliasesIncluded"`
	Chunks                  map[int32][]string `json:"chunks,omitempty"`
	Error                   error              `json:"-"`
	PreCompressionSizeBytes int64              `json:"preCompressionSizeBytes"` // Size of this class's backup in bytes before compression
}

ClassDescriptor contains everything needed to completely restore a class

func (*ClassDescriptor) GetShardDescriptor added in v1.34.18

func (c *ClassDescriptor) GetShardDescriptor(shardName string) *ShardDescriptor

type CompressionType added in v1.32.18

type CompressionType string
const (
	CompressionZSTD CompressionType = "zstd"
	CompressionGZIP CompressionType = "gzip"
	CompressionNone CompressionType = "none"
)

type CreateMeta

type CreateMeta struct {
	Path   string
	Status Status
}

type DistributedBackupDescriptor

type DistributedBackupDescriptor struct {
	StartedAt               time.Time                  `json:"startedAt"`
	CompletedAt             time.Time                  `json:"completedAt"`
	ID                      string                     `json:"id"` // User created backup id
	Nodes                   map[string]*NodeDescriptor `json:"nodes"`
	NodeMapping             map[string]string          `json:"node_mapping"`
	Status                  Status                     `json:"status"`  //
	Version                 string                     `json:"version"` //
	ServerVersion           string                     `json:"serverVersion"`
	Leader                  string                     `json:"leader"`
	Error                   string                     `json:"error"`
	PreCompressionSizeBytes int64                      `json:"preCompressionSizeBytes"` // Size of this node's backup in bytes before compression
	CompressionType         CompressionType            `json:"compressionType"`
	BaseBackupID            string                     `json:"baseBackupId"`
}

DistributedBAckupDescriptor contains everything need to completely restore a distributed backup

func (*DistributedBackupDescriptor) AllExist

func (d *DistributedBackupDescriptor) AllExist(classes []string) string

AllExist checks if all classes exist in d. It returns either "" or the first class which it could not find

func (*DistributedBackupDescriptor) ApplyNodeMapping added in v1.22.0

func (d *DistributedBackupDescriptor) ApplyNodeMapping()

ApplyNodeMapping applies d.NodeMapping translation to d.Nodes. If a node in d.Nodes is not translated by d.NodeMapping, it will remain unchanged.

func (*DistributedBackupDescriptor) Classes

func (d *DistributedBackupDescriptor) Classes() []string

Classes returns all classes contained in d

func (*DistributedBackupDescriptor) Count

func (d *DistributedBackupDescriptor) Count() int

Count number of classes

func (*DistributedBackupDescriptor) Exclude

func (d *DistributedBackupDescriptor) Exclude(classes []string)

Exclude removes classes from d

func (*DistributedBackupDescriptor) Filter

func (d *DistributedBackupDescriptor) Filter(pred func(s string) bool)

Filter classes based on predicate

func (*DistributedBackupDescriptor) GetBaseBackupID added in v1.34.18

func (d *DistributedBackupDescriptor) GetBaseBackupID() string

func (*DistributedBackupDescriptor) GetCompressionType added in v1.34.18

func (d *DistributedBackupDescriptor) GetCompressionType() CompressionType

func (*DistributedBackupDescriptor) GetStatus added in v1.34.18

func (d *DistributedBackupDescriptor) GetStatus() Status

func (*DistributedBackupDescriptor) Include

func (d *DistributedBackupDescriptor) Include(classes []string)

Include only these classes and remove everything else

func (*DistributedBackupDescriptor) Len

Len returns how many nodes exist in d

func (*DistributedBackupDescriptor) RemoveEmpty

RemoveEmpty removes any nodes with an empty class list

func (*DistributedBackupDescriptor) ResetStatus

resetStatus sets status and sub-statuses to Started It also empties error and sub-errors

func (*DistributedBackupDescriptor) ToMappedNodeName added in v1.22.0

func (d *DistributedBackupDescriptor) ToMappedNodeName(nodeName string) string

ToMappedNodeName will return nodeName after applying d.NodeMapping translation on it. If nodeName is not contained in d.nodeMapping, returns nodeName unmodified

func (*DistributedBackupDescriptor) ToOriginalNodeName added in v1.22.0

func (d *DistributedBackupDescriptor) ToOriginalNodeName(nodeName string) string

ToOriginalNodeName will return nodeName after trying to find an original node name from d.NodeMapping values. If nodeName is not contained in d.nodeMapping values, returns nodeName unmodified

func (*DistributedBackupDescriptor) Validate

func (d *DistributedBackupDescriptor) Validate() error

type ErrContextExpired

type ErrContextExpired struct {
	// contains filtered or unexported fields
}

func NewErrContextExpired

func NewErrContextExpired(err error) ErrContextExpired

func (ErrContextExpired) Error

func (e ErrContextExpired) Error() string

type ErrInternal

type ErrInternal struct {
	// contains filtered or unexported fields
}

func NewErrInternal

func NewErrInternal(err error) ErrInternal

func (ErrInternal) Error

func (e ErrInternal) Error() string

type ErrNotFound

type ErrNotFound struct {
	// contains filtered or unexported fields
}

func NewErrNotFound

func NewErrNotFound(err error) ErrNotFound

func (ErrNotFound) Error

func (e ErrNotFound) Error() string

type ErrUnprocessable

type ErrUnprocessable struct {
	// contains filtered or unexported fields
}

func NewErrUnprocessable

func NewErrUnprocessable(err error) ErrUnprocessable

func (ErrUnprocessable) Error

func (e ErrUnprocessable) Error() string

type FileList added in v1.33.14

type FileList struct {
	Files     []string
	FileSizes map[string]int64 // map of relative file path to file size in bytes
	// Top100Size is the size of the 100th biggest file (or smallest if fewer than 100 files),
	// with a minimum of 1MB. This can be used for chunk size optimization.
	Top100Size int64
	// contains filtered or unexported fields
}

FileList holds a list of file paths and allows modification of the underlying slice

func (*FileList) GetFileSize added in v1.34.18

func (f *FileList) GetFileSize(relPath string) int64

GetFileSize returns the pre-collected size for a file, or -1 if not found

func (*FileList) Len added in v1.33.14

func (f *FileList) Len() int

Len returns the number of files in the list

func (*FileList) Peek added in v1.33.14

func (f *FileList) Peek() string

Peek returns the first file without removing it

func (*FileList) PeekAt added in v1.34.18

func (f *FileList) PeekAt(i int) string

PeekAt returns the file at offset i from start without removing it. Returns "" if i is out of range.

func (*FileList) PopFront added in v1.33.14

func (f *FileList) PopFront() string

PopFront removes and returns the first file from the list

func (*FileList) RemoveIndices added in v1.34.18

func (f *FileList) RemoveIndices(indices []int)

RemoveIndices removes files at the given offsets (relative to start) from the list. Indices must be sorted ascending. Remaining files preserve their relative order.

type IncrementalBackupInfo added in v1.34.18

type IncrementalBackupInfo struct {
	File      string   `json:"file"`
	ChunkKeys []string `json:"chunkKeys"`
}

type IncrementalBackupInfos added in v1.34.18

type IncrementalBackupInfos struct {
	FilesPerBackup map[string][]IncrementalBackupInfo `json:"filesPerBackup,omitempty"`
	// TotalSize and NumFilesSkipped are not needed for restore, only for correct
	// calculation of sizes during the backup process.
	TotalSize       int64 `json:"-"`
	NumFilesSkipped int   `json:"-"`
}

type NodeDescriptor

type NodeDescriptor struct {
	Classes                 []string `json:"classes"`
	Status                  Status   `json:"status"`
	Error                   string   `json:"error"`
	PreCompressionSizeBytes int64    `json:"preCompressionSizeBytes"` // Size of this node's backup in bytes before compression
}

NodeDescriptor contains data related to one participant in DBRO

type ReadCloserWithError added in v1.34.15

type ReadCloserWithError interface {
	io.ReadCloser
	CloseWithError(error) error
}

ReadCloserWithError extends io.ReadCloser with CloseWithError method. CloseWithError closes the reader and signals the given error to the writer, so the writer sees the actual error instead of a generic "closed pipe" error.

type RestoreMeta

type RestoreMeta struct {
	Path   string
	Status Status
}

type ShardAndID added in v1.34.18

type ShardAndID struct {
	ShardDesc *ShardDescriptor
	BackupID  string
}

type ShardDescriptor

type ShardDescriptor struct {
	Name                  string                 `json:"name"`
	Node                  string                 `json:"node"`
	Files                 []string               `json:"files,omitempty"`
	BigFilesChunk         map[string]BigFileInfo `json:"bigFilesChunk,omitempty"`
	IncrementalBackupInfo IncrementalBackupInfos `json:"incrementalBackupInfo"`

	DocIDCounterPath      string `json:"docIdCounterPath,omitempty"`
	DocIDCounter          []byte `json:"docIdCounter,omitempty"`
	PropLengthTrackerPath string `json:"propLengthTrackerPath,omitempty"`
	PropLengthTracker     []byte `json:"propLengthTracker,omitempty"`
	ShardVersionPath      string `json:"shardVersionPath,omitempty"`
	Version               []byte `json:"version,omitempty"`
}

ShardDescriptor contains everything needed to completely restore a partition of a specific class

func (*ShardDescriptor) ClearTemporary added in v1.21.0

func (s *ShardDescriptor) ClearTemporary()

ClearTemporary clears fields that are no longer needed once compression is done. These fields are not required in versions > 1 because they are stored in the tarball.

func (*ShardDescriptor) FillFileInfo added in v1.34.18

func (s *ShardDescriptor) FillFileInfo(files []string, shardBaseDescrs []ShardAndID, rootPath string) error

type Status

type Status string
const (
	Started      Status = "STARTED"
	Transferring Status = "TRANSFERRING"
	Transferred  Status = "TRANSFERRED"
	Success      Status = "SUCCESS"
	Cancelled    Status = "CANCELED"
	Failed       Status = "FAILED"
)

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL