Documentation
¶
Index ¶
- type AppendOnly
- func (m *AppendOnly) ApplyStealSortedBatch(entries []batchpkg.Entry, onKey func(key []byte))
- func (m *AppendOnly) ApplyStealSortedBatchTrusted(entries []batchpkg.Entry, onKey func(key []byte))
- func (m *AppendOnly) Delete(key []byte)
- func (m *AppendOnly) DeleteSteal(key []byte)
- func (m *AppendOnly) DeleteWithCallback(key []byte, cb func(k, v []byte) error) error
- func (m *AppendOnly) Freeze()
- func (m *AppendOnly) Get(key []byte) ([]byte, bool, bool)
- func (m *AppendOnly) GetEntry(key []byte) ([]byte, page.ValuePtr, byte, bool)
- func (m *AppendOnly) Len() int
- func (m *AppendOnly) NewIterator(start, end []byte) iterator.UnsafeIterator
- func (m *AppendOnly) NewReverseIterator(start, end []byte) iterator.UnsafeIterator
- func (m *AppendOnly) PutWithCallback(key, value []byte, cb func(k, v []byte) error) error
- func (m *AppendOnly) Reset()
- func (m *AppendOnly) ResetWithCapacity(capacity, estimatedBytesPerEntry int)
- func (m *AppendOnly) ResetWithCapacityHard(capacity, estimatedBytesPerEntry int)
- func (m *AppendOnly) Set(key, value []byte)
- func (m *AppendOnly) SetEntry(key, value []byte, ptr page.ValuePtr, flags byte)
- func (m *AppendOnly) SetEntryBorrowValue(key, value []byte, ptr page.ValuePtr, flags byte)
- func (m *AppendOnly) SetEntrySteal(key, value []byte, ptr page.ValuePtr, flags byte)
- func (m *AppendOnly) SetSteal(key, value []byte)
- func (m *AppendOnly) Size() int64
- func (*AppendOnly) StableUnsafeIteratorSlices() bool
- type BTree
- func (m *BTree) ApplyStealSortedBatch(entries []batchpkg.Entry, onKey func(key []byte))
- func (m *BTree) Delete(key []byte)
- func (m *BTree) DeleteSteal(key []byte)
- func (m *BTree) DeleteWithCallback(key []byte, cb func(k, v []byte) error) error
- func (m *BTree) Freeze()
- func (m *BTree) Get(key []byte) ([]byte, bool, bool)
- func (m *BTree) GetEntry(key []byte) ([]byte, page.ValuePtr, byte, bool)
- func (m *BTree) Len() int
- func (m *BTree) NewIterator(start, end []byte) iterator.UnsafeIterator
- func (m *BTree) NewReverseIterator(start, end []byte) iterator.UnsafeIterator
- func (m *BTree) PutWithCallback(key, value []byte, cb func(k, v []byte) error) error
- func (m *BTree) Reset()
- func (m *BTree) Set(key, value []byte)
- func (m *BTree) SetEntry(key, value []byte, ptr page.ValuePtr, flags byte)
- func (m *BTree) SetEntrySteal(key, value []byte, ptr page.ValuePtr, flags byte)
- func (m *BTree) SetSteal(key, value []byte)
- func (m *BTree) Size() int64
- func (*BTree) StableUnsafeIteratorSlices() bool
- type HashSorted
- func (m *HashSorted) ApplyStealSortedBatch(entries []batchpkg.Entry, onKey func(key []byte))
- func (m *HashSorted) ApplyStealSortedBatchTrusted(entries []batchpkg.Entry, onKey func(key []byte))
- func (m *HashSorted) Delete(key []byte)
- func (m *HashSorted) DeleteSteal(key []byte)
- func (m *HashSorted) DeleteWithCallback(key []byte, cb func(k, v []byte) error) error
- func (m *HashSorted) Freeze()
- func (m *HashSorted) Get(key []byte) ([]byte, bool, bool)
- func (m *HashSorted) GetEntry(key []byte) ([]byte, page.ValuePtr, byte, bool)
- func (m *HashSorted) Len() int
- func (m *HashSorted) NewIterator(start, end []byte) iterator.UnsafeIterator
- func (m *HashSorted) NewReverseIterator(start, end []byte) iterator.UnsafeIterator
- func (m *HashSorted) PutWithCallback(key, value []byte, cb func(k, v []byte) error) error
- func (m *HashSorted) Reset()
- func (m *HashSorted) Set(key, value []byte)
- func (m *HashSorted) SetEntry(key, value []byte, ptr page.ValuePtr, flags byte)
- func (m *HashSorted) SetEntrySteal(key, value []byte, ptr page.ValuePtr, flags byte)
- func (m *HashSorted) SetSteal(key, value []byte)
- func (m *HashSorted) Size() int64
- func (*HashSorted) StableUnsafeIteratorSlices() bool
- type HashSortedIndexer
- type Iterator
- func (it *Iterator) Close() error
- func (it *Iterator) Domain() (start, end []byte)
- func (it *Iterator) Error() error
- func (it *Iterator) IsDeleted() bool
- func (it *Iterator) Key() []byte
- func (it *Iterator) KeyCopy(dst []byte) []byte
- func (it *Iterator) Next()
- func (it *Iterator) Seek(key []byte)
- func (it *Iterator) UnsafeEntry() ([]byte, page.ValuePtr, byte)
- func (it *Iterator) UnsafeKey() []byte
- func (it *Iterator) UnsafeValue() []byte
- func (it *Iterator) Valid() bool
- func (it *Iterator) Value() []byte
- func (it *Iterator) ValueCopy(dst []byte) []byte
- type Memtable
- func (m *Memtable) ApplyStealSortedBatch(entries []batchpkg.Entry, onKey func(key []byte))
- func (m *Memtable) Delete(key []byte)
- func (m *Memtable) DeleteSteal(key []byte)
- func (m *Memtable) DeleteWithCallback(key []byte, cb func(k, v []byte) error) error
- func (m *Memtable) Freeze()
- func (m *Memtable) Get(key []byte) ([]byte, bool, bool)
- func (m *Memtable) GetEntry(key []byte) ([]byte, page.ValuePtr, byte, bool)
- func (m *Memtable) Len() int
- func (m *Memtable) NewIterator(start, end []byte) iterator.UnsafeIterator
- func (m *Memtable) NewReverseIterator(start, end []byte) iterator.UnsafeIterator
- func (m *Memtable) PutWithCallback(key, value []byte, cb func(k, v []byte) error) error
- func (m *Memtable) Reset()
- func (m *Memtable) Set(key, value []byte)
- func (m *Memtable) SetEntry(key, value []byte, ptr page.ValuePtr, flags byte)
- func (m *Memtable) SetEntrySteal(key, value []byte, ptr page.ValuePtr, flags byte)
- func (m *Memtable) SetSteal(key, value []byte)
- func (m *Memtable) Size() int64
- func (*Memtable) StableUnsafeIteratorSlices() bool
- type Mode
- type ReverseIterator
- func (it *ReverseIterator) Close() error
- func (it *ReverseIterator) Domain() (start, end []byte)
- func (it *ReverseIterator) Error() error
- func (it *ReverseIterator) IsDeleted() bool
- func (it *ReverseIterator) Key() []byte
- func (it *ReverseIterator) KeyCopy(dst []byte) []byte
- func (it *ReverseIterator) Next()
- func (it *ReverseIterator) Seek(key []byte)
- func (it *ReverseIterator) UnsafeEntry() ([]byte, page.ValuePtr, byte)
- func (it *ReverseIterator) UnsafeKey() []byte
- func (it *ReverseIterator) UnsafeValue() []byte
- func (it *ReverseIterator) Valid() bool
- func (it *ReverseIterator) Value() []byte
- func (it *ReverseIterator) ValueCopy(dst []byte) []byte
- type SortedBatchApplier
- type StableUnsafeIteratorTable
- type Table
- type TrustedSortedBatchApplier
- type ValueBorrower
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type AppendOnly ¶ added in v0.4.0
type AppendOnly struct {
// contains filtered or unexported fields
}
func NewAppendOnlyWithCapacity ¶ added in v0.4.0
func NewAppendOnlyWithCapacity(capacity int) *AppendOnly
func NewAppendOnlyWithCapacityEstimatedEntryBytes ¶ added in v0.4.0
func NewAppendOnlyWithCapacityEstimatedEntryBytes(capacity, estimatedBytesPerEntry int) *AppendOnly
func (*AppendOnly) ApplyStealSortedBatch ¶ added in v0.4.0
func (m *AppendOnly) ApplyStealSortedBatch(entries []batchpkg.Entry, onKey func(key []byte))
func (*AppendOnly) ApplyStealSortedBatchTrusted ¶ added in v0.4.0
func (m *AppendOnly) ApplyStealSortedBatchTrusted(entries []batchpkg.Entry, onKey func(key []byte))
func (*AppendOnly) Delete ¶ added in v0.4.0
func (m *AppendOnly) Delete(key []byte)
func (*AppendOnly) DeleteSteal ¶ added in v0.4.0
func (m *AppendOnly) DeleteSteal(key []byte)
func (*AppendOnly) DeleteWithCallback ¶ added in v0.4.0
func (m *AppendOnly) DeleteWithCallback(key []byte, cb func(k, v []byte) error) error
func (*AppendOnly) Freeze ¶ added in v0.4.0
func (m *AppendOnly) Freeze()
func (*AppendOnly) Len ¶ added in v0.4.0
func (m *AppendOnly) Len() int
func (*AppendOnly) NewIterator ¶ added in v0.4.0
func (m *AppendOnly) NewIterator(start, end []byte) iterator.UnsafeIterator
func (*AppendOnly) NewReverseIterator ¶ added in v0.4.0
func (m *AppendOnly) NewReverseIterator(start, end []byte) iterator.UnsafeIterator
func (*AppendOnly) PutWithCallback ¶ added in v0.4.0
func (m *AppendOnly) PutWithCallback(key, value []byte, cb func(k, v []byte) error) error
func (*AppendOnly) Reset ¶ added in v0.4.0
func (m *AppendOnly) Reset()
func (*AppendOnly) ResetWithCapacity ¶ added in v0.4.0
func (m *AppendOnly) ResetWithCapacity(capacity, estimatedBytesPerEntry int)
ResetWithCapacity resets the memtable and, when needed, shrinks retained internal buffers toward the capacity-derived baseline. Unlike Reset, callers provide a capacity estimate so post-spike entry retention can decay.
func (*AppendOnly) ResetWithCapacityHard ¶ added in v0.4.0
func (m *AppendOnly) ResetWithCapacityHard(capacity, estimatedBytesPerEntry int)
ResetWithCapacityHard resets and clamps retained internal buffers to the capacity-derived baseline (without carrying over recent spike cardinality).
func (*AppendOnly) Set ¶ added in v0.4.0
func (m *AppendOnly) Set(key, value []byte)
func (*AppendOnly) SetEntry ¶ added in v0.4.0
func (m *AppendOnly) SetEntry(key, value []byte, ptr page.ValuePtr, flags byte)
func (*AppendOnly) SetEntryBorrowValue ¶ added in v0.4.0
func (m *AppendOnly) SetEntryBorrowValue(key, value []byte, ptr page.ValuePtr, flags byte)
func (*AppendOnly) SetEntrySteal ¶ added in v0.4.0
func (m *AppendOnly) SetEntrySteal(key, value []byte, ptr page.ValuePtr, flags byte)
func (*AppendOnly) SetSteal ¶ added in v0.4.0
func (m *AppendOnly) SetSteal(key, value []byte)
func (*AppendOnly) Size ¶ added in v0.4.0
func (m *AppendOnly) Size() int64
func (*AppendOnly) StableUnsafeIteratorSlices ¶ added in v0.4.0
func (*AppendOnly) StableUnsafeIteratorSlices() bool
type BTree ¶
type BTree struct {
// contains filtered or unexported fields
}
func NewBTreeWithDegree ¶
func (*BTree) ApplyStealSortedBatch ¶
func (*BTree) DeleteSteal ¶
func (*BTree) DeleteWithCallback ¶
func (*BTree) NewIterator ¶
func (m *BTree) NewIterator(start, end []byte) iterator.UnsafeIterator
func (*BTree) NewReverseIterator ¶ added in v0.4.0
func (m *BTree) NewReverseIterator(start, end []byte) iterator.UnsafeIterator
func (*BTree) PutWithCallback ¶
func (*BTree) Reset ¶
func (m *BTree) Reset()
Reset clears all entries while retaining internal allocations.
func (*BTree) SetEntrySteal ¶ added in v0.2.0
func (*BTree) StableUnsafeIteratorSlices ¶ added in v0.4.0
type HashSorted ¶
type HashSorted struct {
// contains filtered or unexported fields
}
func NewHashSorted ¶
func NewHashSorted() *HashSorted
func NewHashSortedWithCapacityAndIndexer ¶ added in v0.3.0
func NewHashSortedWithCapacityAndIndexer(capacity int, indexer *HashSortedIndexer) *HashSorted
func NewHashSortedWithIndexer ¶
func NewHashSortedWithIndexer(indexer *HashSortedIndexer) *HashSorted
func (*HashSorted) ApplyStealSortedBatch ¶
func (m *HashSorted) ApplyStealSortedBatch(entries []batchpkg.Entry, onKey func(key []byte))
func (*HashSorted) ApplyStealSortedBatchTrusted ¶ added in v0.4.0
func (m *HashSorted) ApplyStealSortedBatchTrusted(entries []batchpkg.Entry, onKey func(key []byte))
func (*HashSorted) Delete ¶
func (m *HashSorted) Delete(key []byte)
func (*HashSorted) DeleteSteal ¶
func (m *HashSorted) DeleteSteal(key []byte)
func (*HashSorted) DeleteWithCallback ¶
func (m *HashSorted) DeleteWithCallback(key []byte, cb func(k, v []byte) error) error
func (*HashSorted) Freeze ¶
func (m *HashSorted) Freeze()
func (*HashSorted) Len ¶
func (m *HashSorted) Len() int
func (*HashSorted) NewIterator ¶
func (m *HashSorted) NewIterator(start, end []byte) iterator.UnsafeIterator
func (*HashSorted) NewReverseIterator ¶ added in v0.4.0
func (m *HashSorted) NewReverseIterator(start, end []byte) iterator.UnsafeIterator
func (*HashSorted) PutWithCallback ¶
func (m *HashSorted) PutWithCallback(key, value []byte, cb func(k, v []byte) error) error
func (*HashSorted) Reset ¶
func (m *HashSorted) Reset()
Reset clears all entries while retaining internal allocations.
func (*HashSorted) Set ¶
func (m *HashSorted) Set(key, value []byte)
func (*HashSorted) SetEntry ¶ added in v0.2.0
func (m *HashSorted) SetEntry(key, value []byte, ptr page.ValuePtr, flags byte)
func (*HashSorted) SetEntrySteal ¶ added in v0.2.0
func (m *HashSorted) SetEntrySteal(key, value []byte, ptr page.ValuePtr, flags byte)
func (*HashSorted) SetSteal ¶
func (m *HashSorted) SetSteal(key, value []byte)
func (*HashSorted) Size ¶
func (m *HashSorted) Size() int64
func (*HashSorted) StableUnsafeIteratorSlices ¶ added in v0.4.0
func (*HashSorted) StableUnsafeIteratorSlices() bool
type HashSortedIndexer ¶
type HashSortedIndexer struct {
// contains filtered or unexported fields
}
HashSortedIndexer processes sealed key chunks in the background.
func NewHashSortedIndexer ¶
func NewHashSortedIndexer() *HashSortedIndexer
func (*HashSortedIndexer) Close ¶
func (x *HashSortedIndexer) Close()
Close stops the indexer after draining queued work.
type Iterator ¶
type Iterator struct {
// contains filtered or unexported fields
}
Iterator wrapper
func (*Iterator) UnsafeValue ¶
type Memtable ¶
type Memtable struct {
// contains filtered or unexported fields
}
func NewWithCapacity ¶
NewWithCapacity creates a new Memtable with the requested arena capacity. A non-positive capacity uses a small default to keep rotations cheap.
func (*Memtable) ApplyStealSortedBatch ¶
func (*Memtable) DeleteSteal ¶
DeleteSteal - SkipList copies data, so Steal is same as Delete.
func (*Memtable) DeleteWithCallback ¶
func (*Memtable) NewIterator ¶
func (m *Memtable) NewIterator(start, end []byte) iterator.UnsafeIterator
func (*Memtable) NewReverseIterator ¶ added in v0.4.0
func (m *Memtable) NewReverseIterator(start, end []byte) iterator.UnsafeIterator
func (*Memtable) PutWithCallback ¶
func (*Memtable) Reset ¶
func (m *Memtable) Reset()
Reset clears the memtable while retaining its arena capacity.
func (*Memtable) SetEntrySteal ¶ added in v0.2.0
SetEntrySteal - SkipList copies data, so Steal is same as SetEntry.
func (*Memtable) StableUnsafeIteratorSlices ¶ added in v0.4.0
type ReverseIterator ¶ added in v0.4.0
type ReverseIterator struct {
// contains filtered or unexported fields
}
func (*ReverseIterator) Close ¶ added in v0.4.0
func (it *ReverseIterator) Close() error
func (*ReverseIterator) Domain ¶ added in v0.4.0
func (it *ReverseIterator) Domain() (start, end []byte)
func (*ReverseIterator) Error ¶ added in v0.4.0
func (it *ReverseIterator) Error() error
func (*ReverseIterator) IsDeleted ¶ added in v0.4.0
func (it *ReverseIterator) IsDeleted() bool
func (*ReverseIterator) Key ¶ added in v0.4.0
func (it *ReverseIterator) Key() []byte
func (*ReverseIterator) KeyCopy ¶ added in v0.4.0
func (it *ReverseIterator) KeyCopy(dst []byte) []byte
func (*ReverseIterator) Next ¶ added in v0.4.0
func (it *ReverseIterator) Next()
func (*ReverseIterator) Seek ¶ added in v0.4.0
func (it *ReverseIterator) Seek(key []byte)
func (*ReverseIterator) UnsafeEntry ¶ added in v0.4.0
func (it *ReverseIterator) UnsafeEntry() ([]byte, page.ValuePtr, byte)
func (*ReverseIterator) UnsafeKey ¶ added in v0.4.0
func (it *ReverseIterator) UnsafeKey() []byte
func (*ReverseIterator) UnsafeValue ¶ added in v0.4.0
func (it *ReverseIterator) UnsafeValue() []byte
func (*ReverseIterator) Valid ¶ added in v0.4.0
func (it *ReverseIterator) Valid() bool
func (*ReverseIterator) Value ¶ added in v0.4.0
func (it *ReverseIterator) Value() []byte
func (*ReverseIterator) ValueCopy ¶ added in v0.4.0
func (it *ReverseIterator) ValueCopy(dst []byte) []byte
type SortedBatchApplier ¶
type SortedBatchApplier interface {
ApplyStealSortedBatch(entries []batchpkg.Entry, onKey func(key []byte))
}
SortedBatchApplier is an optional fast path for applying a strictly-increasing batch under a single memtable lock.
Callers should only use this when they know the entries are already in increasing key order.
type StableUnsafeIteratorTable ¶ added in v0.4.0
type StableUnsafeIteratorTable interface {
StableUnsafeIteratorSlices() bool
}
StableUnsafeIteratorTable marks memtable implementations whose iterator.UnsafeIterator key/value views (from UnsafeKey/UnsafeValue/UnsafeEntry) are backed by storage that outlives the iterator itself.
Implementations returning true MUST ensure these views stay valid and immutable across Next/Seek and remain valid after Close, at least until the underlying memtable is reset or garbage-collected.
Callers such as buildOpRuns rely on this stronger contract to materialize immutable flush runs without per-entry defensive copies.
type Table ¶
type Table interface {
Set(key, value []byte)
// SetEntry stores a value with explicit flags and optional value pointer.
// When flags include node.FlagPointer, value may be nil and ptr must be set;
// if value is non-nil it is stored alongside the pointer bytes.
SetEntry(key, value []byte, ptr page.ValuePtr, flags byte)
PutWithCallback(key, value []byte, cb func(k, v []byte) error) error
Delete(key []byte)
DeleteWithCallback(key []byte, cb func(k, v []byte) error) error
SetSteal(key, value []byte)
// SetEntrySteal is like SetEntry but allows stealing the provided value slice.
SetEntrySteal(key, value []byte, ptr page.ValuePtr, flags byte)
DeleteSteal(key []byte)
Get(key []byte) ([]byte, bool, bool)
// GetEntry returns the raw entry, including pointer and flags, if present.
GetEntry(key []byte) (val []byte, ptr page.ValuePtr, flags byte, found bool)
Size() int64
Len() int
// NewIterator may hold a read lock until Close; callers should avoid
// iterating over mutable memtables on hot write paths.
NewIterator(start, end []byte) iterator.UnsafeIterator
// NewReverseIterator returns a reverse iterator over [start, end).
// Like NewIterator, it may hold a read lock until Close.
NewReverseIterator(start, end []byte) iterator.UnsafeIterator
Freeze()
}
func NewWithCapacityModeAndIndexer ¶
func NewWithCapacityModeAndIndexer(capacity int, mode Mode, indexer *HashSortedIndexer) (Table, error)
type TrustedSortedBatchApplier ¶ added in v0.4.0
type TrustedSortedBatchApplier interface {
ApplyStealSortedBatchTrusted(entries []batchpkg.Entry, onKey func(key []byte))
}
TrustedSortedBatchApplier is an optional fast path for callers that already guarantee strictly increasing keys (for example, stream-qualified batch writes partitioned by shard while preserving source order).
type ValueBorrower ¶ added in v0.4.0
type ValueBorrower interface {
SetEntryBorrowValue(key, value []byte, ptr page.ValuePtr, flags byte)
}
ValueBorrower marks memtables that can safely retain caller-owned value slices while still copying keys into their own storage.
Callers must keep the underlying value storage alive until the memtable is retired or reset.