Documentation
¶
Index ¶
- Variables
- func SetIteratorDebug(enabled bool)
- type BackendDB
- type Batch
- func (b *Batch) Close() error
- func (b *Batch) Delete(key []byte) error
- func (b *Batch) DeleteView(key []byte) error
- func (b *Batch) GetByteSize() (int, error)
- func (b *Batch) Replay(fn func(batch.Entry) error) error
- func (b *Batch) Reset()
- func (b *Batch) Set(key, value []byte) error
- func (b *Batch) SetOps(ops []batch.Entry) error
- func (b *Batch) SetView(key, value []byte) error
- func (b *Batch) Write() error
- func (b *Batch) WriteSync() error
- type DB
- func (db *DB) Checkpoint() error
- func (db *DB) Close() error
- func (db *DB) CompactionAssist()
- func (db *DB) Delete(key []byte) error
- func (db *DB) DeleteRange(start, end []byte) error
- func (db *DB) DeleteSync(key []byte) error
- func (db *DB) Drain() error
- func (db *DB) Get(key []byte) ([]byte, error)
- func (db *DB) GetAppend(key, dst []byte) ([]byte, error)
- func (db *DB) GetUnsafe(key []byte) ([]byte, error)
- func (db *DB) Has(key []byte) (bool, error)
- func (db *DB) Iterator(start, end []byte) (merging.Iterator, error)
- func (db *DB) NewBatch() *Batch
- func (db *DB) NewBatchWithSize(size int) *Batch
- func (db *DB) Print() error
- func (db *DB) QueueBacklogBytes() int64
- func (db *DB) ReverseIterator(start, end []byte) (merging.Iterator, error)
- func (db *DB) Set(key, value []byte) error
- func (db *DB) SetDictStore(store DictStore)
- func (db *DB) SetSync(key, value []byte) error
- func (db *DB) StartAutoCheckpoint(interval time.Duration, maxWALBytes int64, idleInterval time.Duration)
- func (db *DB) Stats() map[string]string
- func (db *DB) TriggerAutoCheckpoint()
- func (db *DB) TriggerFlush()
- type DictStore
- type Options
- type VlogAutotuneBenchMode
- type VlogAutotuneBenchRequest
- type VlogAutotuneBenchResult
- type VlogAutotuneBenchSegment
- type VlogAutotuneBenchSegmentResult
Constants ¶
This section is empty.
Variables ¶
var ErrBatchClosed = fmt.Errorf("batch has been written or closed")
var ErrKeyEmpty = fmt.Errorf("key cannot be empty")
var ErrMemtableFull = fmt.Errorf("memtable full")
var ErrMemtableValueLogPointers = errors.New("memtable value-log pointers require WAL/value-log enabled")
var ErrSplitValueLog = errors.New("split value log requires WAL/value-log enabled")
var ErrUnsafeOptions = fmt.Errorf("unsafe options require AllowUnsafe")
var ErrValueNil = fmt.Errorf("value cannot be nil")
Functions ¶
func SetIteratorDebug ¶
func SetIteratorDebug(enabled bool)
SetIteratorDebug toggles attaching debug metadata to iterators returned by CachingDB.Iterator. It is intended for benchmarking/diagnostics.
Types ¶
type BackendDB ¶
type BackendDB interface {
Get(key []byte) ([]byte, error)
GetUnsafe(key []byte) ([]byte, error)
GetAppend(key, dst []byte) ([]byte, error)
Has(key []byte) (bool, error)
Iterator(start, end []byte) (iterator.UnsafeIterator, error)
ReverseIterator(start, end []byte) (iterator.UnsafeIterator, error)
NewBatch() batch.Interface
Close() error
Print() error
Stats() map[string]string
}
BackendDB defines the subset of treedb.DB needed by CachingDB.
type Batch ¶
type Batch struct {
// contains filtered or unexported fields
}
func (*Batch) DeleteView ¶
DeleteView records a Delete without copying key bytes. Callers must treat key as immutable until the batch is written or closed.
func (*Batch) GetByteSize ¶
func (*Batch) Reset ¶
func (b *Batch) Reset()
Reset clears the batch for reuse without closing it.
This intentionally keeps internal buffers to avoid per-batch allocations in callers that frequently reset (e.g. geth benchmarks).
type DB ¶
type DB struct {
// contains filtered or unexported fields
}
func (*DB) Checkpoint ¶
Checkpoint forces a durable backend boundary and trims the WAL so long-running cached-mode runs do not accumulate unbounded `wal/` growth.
It blocks writers while it:
- rotates the current mutable memtable (if non-empty),
- rotates to a fresh WAL segment,
- flushes all queued memtables with backend sync,
- forces a backend sync boundary (even if the queue is empty),
- removes all older WAL segments (keeping only the currently-open one).
func (*DB) CompactionAssist ¶
func (db *DB) CompactionAssist()
CompactionAssist performs bounded flush work when backpressure triggers. It is intended to be called by background maintenance (e.g. slab compaction) so that flush debt does not grow unbounded in the absence of foreground writes.
func (*DB) DeleteRange ¶
DeleteRange deletes all keys in the range [start, end).
When WAL is disabled and the backend is empty, a full-range delete can be satisfied by clearing the in-memory layers without enumerating keys.
func (*DB) DeleteSync ¶
func (*DB) Drain ¶
Drain flushes all currently buffered writes (mutable + queued memtables) to the backend. It is intended for maintenance operations that require a fully materialized backend state (e.g. index vacuum).
Drain does not provide mutual exclusion against concurrent writers; callers should ensure no writes occur concurrently if they require a fully drained state.
func (*DB) GetAppend ¶
GetAppend appends the value for the key to dst and returns the new slice. If the key is not found, it returns dst and ErrKeyNotFound.
func (*DB) NewBatchWithSize ¶
func (*DB) QueueBacklogBytes ¶
QueueBacklogBytes returns the current queued memtable backlog in bytes.
func (*DB) ReverseIterator ¶
func (*DB) SetDictStore ¶ added in v0.2.0
SetDictStore installs the dictionary store for current-ID freezing.
func (*DB) StartAutoCheckpoint ¶
func (db *DB) StartAutoCheckpoint(interval time.Duration, maxWALBytes int64, idleInterval time.Duration)
StartAutoCheckpoint enables a background loop that periodically forces a durable boundary and trims cached-mode WAL segments. When idleInterval > 0, it also triggers an opportunistic checkpoint after a period of write-idleness.
interval > 0 enables periodic checkpoints. maxWALBytes is a safety cap: if > 0, the loop will attempt to checkpoint when the effective WAL bytes exceed this cap. maxWALBytes <= 0 disables the size trigger.
This does not make each individual write durable; it bounds the window of unsynced writes for long-running workloads.
func (*DB) TriggerAutoCheckpoint ¶
func (db *DB) TriggerAutoCheckpoint()
TriggerAutoCheckpoint schedules a best-effort immediate auto-checkpoint pass.
func (*DB) TriggerFlush ¶
func (db *DB) TriggerFlush()
TriggerFlush schedules a background flush pass (best-effort).
type DictStore ¶ added in v0.2.0
type DictStore interface {
GetCurrent(ctx context.Context) (uint64, error)
GetDictBytes(ctx context.Context, dictID uint64) ([]byte, error)
}
DictStore provides access to the current dictionary ID for write freezing.
type Options ¶
type Options struct {
FlushThreshold int64
// MemtableMode selects the in-memory write buffer implementation.
// Supported: "skiplist", "hash_sorted", "btree", "adaptive".
// Use "adaptive" or "adaptive:<mode>" to switch per-rotation based on workload.
MemtableMode string
// MemtableShards controls the number of mutable memtable shards. Values <= 0
// use a default derived from GOMAXPROCS. The count is rounded down to a power
// of two.
MemtableShards int
// Legacy backpressure knob: queue length limit.
// 0 uses the default (4). <0 disables writer backpressure entirely.
MaxQueuedMemtables int
// Adaptive backpressure knobs (seconds/bytes). If any of these are non-zero,
// the caching layer uses backlog-bytes thresholds instead of queue length.
SlowdownBacklogSeconds float64
StopBacklogSeconds float64
MaxBacklogBytes int64
// Writer flush assist limits when backpressure triggers.
WriterFlushMaxMemtables int
WriterFlushMaxDuration time.Duration
// FlushBuildConcurrency controls how many goroutines may be used to build a
// combined flush batch from multiple immutable memtables. Values <= 1 disable
// parallelism.
FlushBuildConcurrency int
// DisableWAL disables the Write-Ahead Log entirely (legacy alias for disabling
// both the journal/redo log and value-log pointers).
DisableWAL bool
// DisableJournal disables the redo/journal records while still allowing
// value-log pointers/value storage. A crash may lose writes since the last
// checkpoint because there is no redo log to replay.
DisableJournal bool
// DisableValueLog forces the cached WAL to remain in legacy mode (no value-log pointers).
DisableValueLog bool
// SplitValueLog stores WAL records in wal/ while large values go to vlog/
// segments, and WAL entries reference them via pointers.
SplitValueLog bool
// JournalLanes controls the number of active commit/value log lanes (0=default).
// Max supported lanes is 255; value-log segment sequence per lane is capped at 8,388,607.
JournalLanes int
// WALMaxSegmentBytes caps the size of a single WAL segment payload.
// 0 uses the default limit.
WALMaxSegmentBytes int64
// JournalCompression enables best-effort zstd compression for journal/commitlog
// segments (metadata only). The writer only keeps compressed bytes when they
// are smaller than the raw payload, so compression never causes size
// amplification.
JournalCompression bool
// RelaxedSync disables fsync on Sync operations.
RelaxedSync bool
// MemtableValueLogPointers avoids storing large values in the memtable and
// serves them by pointer from the value log (WAL/vlog). Requires WAL/value-log.
MemtableValueLogPointers bool
// ValueLogPointerThreshold controls when WAL/vlog pointers are used.
// Values <= 0 use the default inline threshold (256 bytes).
ValueLogPointerThreshold int
// DisableReadChecksum skips CRC verification on value-log reads.
DisableReadChecksum bool
// AllowUnsafe acknowledges unsafe durability options.
// When false, Open will reject DisableWAL or RelaxedSync.
AllowUnsafe bool
// MaxValueLogRetainedBytes emits a warning when retained value-log bytes exceed
// this threshold (0 disables warnings).
MaxValueLogRetainedBytes int64
// MaxValueLogRetainedBytesHard disables value-log pointers for new large
// values once retained bytes exceed this threshold (0 disables the cap).
MaxValueLogRetainedBytesHard int64
// ValueLogDictTrain configures background dictionary training for value-log frame compression.
// TrainBytes <= 0 disables training.
ValueLogDictTrain compression.TrainConfig
// ValueLogDictAdaptiveRatio enables adaptive pause of dict compression when payload ratios degrade.
// 0 disables.
ValueLogDictAdaptiveRatio float64
// ValueLogDictMetricsWindowBytes controls the metrics window size (0=default).
ValueLogDictMetricsWindowBytes int
// ValueLogDictMetricsMinRecords is a minimum record count before pausing (0=default).
ValueLogDictMetricsMinRecords int
// ValueLogDictMetricsPauseBytes controls pause duration in bytes (0=default).
ValueLogDictMetricsPauseBytes int
// ValueLogDictMinPayloadSavingsRatio rejects newly trained dictionaries whose payload ratio
// does not improve by at least this fraction (0 uses default).
ValueLogDictMinPayloadSavingsRatio float64
// ValueLogCompressionAutotune configures the wall-time value-log compression autotuner.
// Cached mode only (SplitValueLog must be enabled).
ValueLogCompressionAutotune valuelog.AutotuneOptions
// NotifyError is an optional hook for background maintenance failures.
NotifyError func(error)
}
type VlogAutotuneBenchMode ¶ added in v0.2.0
type VlogAutotuneBenchMode string
VlogAutotuneBenchMode controls the deterministic bench mode.
const ( VlogAutotuneBenchOff VlogAutotuneBenchMode = "off" VlogAutotuneBenchNoDictFixed VlogAutotuneBenchMode = "no_dict_fixed" VlogAutotuneBenchDictFixed VlogAutotuneBenchMode = "dict_fixed" VlogAutotuneBenchAutotune VlogAutotuneBenchMode = "autotune" )
type VlogAutotuneBenchRequest ¶ added in v0.2.0
type VlogAutotuneBenchRequest struct {
Mode VlogAutotuneBenchMode
FixedK int
Segments []VlogAutotuneBenchSegment
}
type VlogAutotuneBenchResult ¶ added in v0.2.0
type VlogAutotuneBenchResult struct {
Mode VlogAutotuneBenchMode
Segments []VlogAutotuneBenchSegmentResult
RawBytes uint64
StoredBytes uint64
WallTimeNs int64
ThroughputMB float64
TrainerStats compression.TrainerStats
}
func RunVlogAutotuneBench ¶ added in v0.2.0
func RunVlogAutotuneBench(req VlogAutotuneBenchRequest) (*VlogAutotuneBenchResult, error)
type VlogAutotuneBenchSegment ¶ added in v0.2.0
type VlogAutotuneBenchSegmentResult ¶ added in v0.2.0
type VlogAutotuneBenchSegmentResult struct {
Name string
RawBytes uint64
StoredBytes uint64
WallTimeNs int64
ThroughputRawMBps float64
AttemptedFrac float64
KeptFrac float64
ObservedRatio float64
FramesTotal uint64
FramesAttempted uint64
FramesKept uint64
EncodeNsTotal int64
IoNsTotal int64
State string
DictID uint64
DictHash uint64
HistoryBytes int
K int
PublishOrderingOK bool
TrainerProfileOK bool
TrainerProfileK int
TrainerProfileHash uint64
}