Documentation
¶
Index ¶
Constants ¶
View Source
const MaxAnnotationSize = 128 * 1024
View Source
const SpecialVersionCreated = 9
View Source
const SpecialVersionFirst = 10
Variables ¶
View Source
var ErrAnnotationTooBig = errors.New("Annotation too big")
View Source
var ErrExists = errors.New("File exists")
View Source
var ErrInvalidArgument = errors.New("Invalid argument")
View Source
var ErrNoSpace = errors.New("No more space")
Functions ¶
This section is empty.
Types ¶
type Segment ¶
type Segment interface {
//Returns the address of the first free word in the segment when it was locked
BaseAddress() uint64
//Unlocks the segment for the StorageProvider to give to other consumers
//Implies a flush
Unlock()
//Writes a slice to the segment, returns immediately
//Returns nil if op is OK, otherwise ErrNoSpace or ErrInvalidArgument
//It is up to the implementer to work out how to report no space immediately
//The uint64 is the address to be used for the next write
Write(uuid []byte, address uint64, data []byte) (uint64, error)
//Block until all writes are complete. Note this does not imply a flush of the underlying files.
Flush()
}
type StorageProvider ¶
type StorageProvider interface {
//Called at startup of a normal run
Initialize(configprovider.Configuration, *rez.RezManager)
//Called to create the database for the first time
//Note that initialize is not called before this function call
//and you can assume the program will exit shortly after this
//function call
CreateDatabase(cfg configprovider.Configuration, overwrite bool) error
// Lock a segment, or block until a segment can be locked
// Returns a Segment struct
LockCoreSegment(uuid []byte) Segment
LockVectorSegment(uuid []byte) Segment
// Read the blob into the given buffer
Read(ctx context.Context, uuid []byte, address uint64, buffer []byte) ([]byte, error)
// Read the given version of superblock into the buffer.
ReadSuperBlock(ctx context.Context, uuid []byte, version uint64, buffer []byte) ([]byte, error)
// Writes a superblock of the given version
// TODO I think the storage will need to chunk this, because sb logs of gigabytes are possible
WriteSuperBlock(uuid []byte, version uint64, buffer []byte)
// Sets the version of a stream. If it is in the past, it is essentially a rollback,
// and although no space is freed, the consecutive version numbers can be reused
// note to self: you must make sure not to call ReadSuperBlock on versions higher
// than you get from GetStreamVersion because they might succeed
SetStreamVersion(uuid []byte, version uint64)
// A subset of the above, but just gets version
GetStreamVersion(ctx context.Context, uuid []byte) (uint64, error)
// Tombstones a uuid
ObliterateStreamMetadata(uuid []byte)
// Perform a background cleanup iteration. This could take a long time.
// If it returns no error, the given uuids no longer exist (assuming they
// were not written to while the bg task was active)
BackgroundCleanup(uuids [][]byte) error
}
Click to show internal directories.
Click to hide internal directories.