Documentation
¶
Index ¶
- Constants
- Variables
- func ApplyProfile(opts *Options, profile Profile)
- func DisableValueLogDictCompression(opts *Options)
- func EnableValueLogDictCompression(opts *Options)
- func VacuumIndexOffline(opts Options) error
- type AutotuneMode
- type AutotuneOptions
- type Batch
- type DB
- func (db *DB) AcquireSnapshot() *Snapshot
- func (db *DB) Checkpoint() error
- func (db *DB) Close() error
- func (db *DB) CompactIndex() error
- func (db *DB) Delete(key []byte) error
- func (db *DB) DeleteRange(start, end []byte) error
- func (db *DB) DeleteSync(key []byte) error
- func (db *DB) DurabilityMode() string
- func (db *DB) FragmentationReport() (map[string]string, error)
- func (db *DB) Get(key []byte) ([]byte, error)
- func (db *DB) GetAppend(key, dst []byte) ([]byte, error)
- func (db *DB) GetUnsafe(key []byte) ([]byte, error)
- func (db *DB) Has(key []byte) (bool, error)
- func (db *DB) Iterator(start, end []byte) (Iterator, error)
- func (db *DB) NewBatch() Batch
- func (db *DB) NewBatchWithSize(size int) Batch
- func (db *DB) Print() error
- func (db *DB) ReverseIterator(start, end []byte) (Iterator, error)
- func (db *DB) Set(key, value []byte) error
- func (db *DB) SetSync(key, value []byte) error
- func (db *DB) Stats() map[string]string
- func (db *DB) VacuumIndexOnline(ctx context.Context) error
- func (db *DB) ValueLogGC(ctx context.Context, opts ValueLogGCOptions) (ValueLogGCStats, error)
- type DictLookup
- type DurabilityMode
- type IntegrityMode
- type Iterator
- type Options
- type Profile
- type Snapshot
- type TrainConfig
- type ValueLogAutoPolicy
- type ValueLogBlockCodec
- type ValueLogCompressionMode
- type ValueLogGCOptions
- type ValueLogGCStats
- type ValueLogOptions
- type ValueLogRewriteStats
- type ZSTDEncoderLevel
Examples ¶
Constants ¶
const ( DurabilityDurable = db.DurabilityDurable DurabilityWALOnRelaxed = db.DurabilityWALOnRelaxed DurabilityWALOffRelaxed = db.DurabilityWALOffRelaxed )
const ( IntegrityVerify = db.IntegrityVerify IntegritySkipChecksums = db.IntegritySkipChecksums )
const ( ValueLogCompressionOff = db.ValueLogCompressionOff ValueLogCompressionBlock = db.ValueLogCompressionBlock ValueLogCompressionDict = db.ValueLogCompressionDict ValueLogCompressionAuto = db.ValueLogCompressionAuto )
const ( ValueLogBlockSnappy = db.ValueLogBlockSnappy ValueLogBlockLZ4 = db.ValueLogBlockLZ4 )
const ( ValueLogAutoThroughput = db.ValueLogAutoThroughput ValueLogAutoBalanced = db.ValueLogAutoBalanced ValueLogAutoSize = db.ValueLogAutoSize )
const ( AutotuneUnset = valuelog.AutotuneUnset AutotuneOff = valuelog.AutotuneOff AutotuneMedium = valuelog.AutotuneMedium AutotuneAggressive = valuelog.AutotuneAggressive )
const ( ZSTDLevelFastest = zstd.SpeedFastest ZSTDLevelDefault = zstd.SpeedDefault ZSTDLevelBetter = zstd.SpeedBetterCompression ZSTDLevelBest = zstd.SpeedBestCompression )
Variables ¶
var ( // ErrLocked indicates the database directory is already opened by another process. ErrLocked = db.ErrLocked // ErrMemtableFull indicates the cached memtable has reached its hard cap. ErrMemtableFull = caching.ErrMemtableFull // ErrClosed indicates the DB handle has been closed. ErrClosed = errors.New("treedb: db is closed") // ErrKeyNotFound indicates the key does not exist. ErrKeyNotFound = tree.ErrKeyNotFound )
Functions ¶
func ApplyProfile ¶
ApplyProfile applies a profile to opts without overwriting explicit caller overrides.
For numeric/duration fields, "explicit override" means the field is already set to a non-zero value. For background intervals, note TreeDB conventions:
- `0` means "use default"
- `<0` means "disable"
For booleans, Go does not provide a way to distinguish “unset” from “explicit false”, so profiles set boolean policy knobs to match the profile. If you want the opposite policy, apply the profile and then override the boolean explicitly.
func DisableValueLogDictCompression ¶ added in v0.3.0
func DisableValueLogDictCompression(opts *Options)
DisableValueLogDictCompression disables background dictionary training for value-log frame compression (cached mode).
It does not remove dictdb state on disk; it only prevents training/publishing new dictionaries for future writes.
func EnableValueLogDictCompression ¶ added in v0.3.0
func EnableValueLogDictCompression(opts *Options)
EnableValueLogDictCompression enables background dictionary training for value-log frame compression (cached mode).
This is a convenience helper intended to avoid requiring callers to set many low-level knobs. It sets TrainBytes to a safe default if unset and ensures side stores are enabled so dictionaries can be persisted in dictdb/.
Advanced tuning remains available via opts.ValueLog.DictTrain and opts.ValueLog.CompressionAutotune.
func VacuumIndexOffline ¶
VacuumIndexOffline rewrites `index.db` into a fresh file and swaps it in. This is intended to reclaim space and restore locality after long churn.
It is an offline operation: it acquires the exclusive open lock for opts.Dir.
Types ¶
type AutotuneMode ¶ added in v0.3.0
type AutotuneMode = valuelog.AutotuneMode
type AutotuneOptions ¶ added in v0.3.0
type AutotuneOptions = valuelog.AutotuneOptions
type Batch ¶
type Batch interface {
Set(key, value []byte) error
Delete(key []byte) error
Write() error
WriteSync() error
Close() error
Replay(func(batch.Entry) error) error
GetByteSize() (int, error)
}
Batch is the public batch contract returned by TreeDB. Both cached and backend implementations satisfy it.
Example ¶
package main
import (
"fmt"
"os"
treedb "github.com/snissn/gomap/TreeDB"
)
func main() {
dir, err := os.MkdirTemp("", "treedb-batch-example-")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
db, err := treedb.Open(treedb.Options{Dir: dir})
if err != nil {
panic(err)
}
defer db.Close()
batch := db.NewBatch()
if batch == nil {
panic("batch creation failed")
}
batch.Set([]byte("key1"), []byte("value1"))
batch.Set([]byte("key2"), []byte("value2"))
batch.Delete([]byte("key1"))
// WriteSync ensures durability.
if err := batch.WriteSync(); err != nil {
panic(err)
}
val, _ := db.Get([]byte("key2"))
fmt.Println("key2:", string(val))
val, _ = db.Get([]byte("key1"))
fmt.Println("key1:", val)
}
Output: key2: value2 key1: []
type DB ¶
type DB struct {
// contains filtered or unexported fields
}
DB is the public TreeDB handle (cached mode by default; read-only opens skip caching).
func Open ¶
Open opens TreeDB. By default it enables caching (write-back layer).
Example ¶
package main
import (
"fmt"
"os"
treedb "github.com/snissn/gomap/TreeDB"
)
func main() {
dir, err := os.MkdirTemp("", "treedb-example-")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
db, err := treedb.Open(treedb.Options{Dir: dir, ChunkSize: 64 * 1024})
if err != nil {
panic(err)
}
defer db.Close()
if err := db.SetSync([]byte("k"), []byte("v")); err != nil {
panic(err)
}
val, err := db.Get([]byte("k"))
if err != nil {
panic(err)
}
fmt.Println(string(val))
}
Output: v
func (*DB) AcquireSnapshot ¶
AcquireSnapshot returns a new snapshot.
func (*DB) Checkpoint ¶
Checkpoint forces a durable backend boundary and trims cached-mode WAL segments, so long-running cached-mode workloads do not accumulate unbounded `wal/` growth.
In cached mode this flushes queued memtables with backend sync and resets the WAL to a fresh segment. In backend mode it forces a sync boundary.
func (*DB) CompactIndex ¶
CompactIndex performs an in-place index vacuum (bulk rebuild) on the backend. In cached mode it first drains the caching layer so the backend reflects all buffered writes before rebuilding.
func (*DB) DeleteRange ¶
DeleteRange removes all keys in the range [start, end).
This is primarily used by benchmark suites and maintenance tooling. In cached mode, it may use fast paths that avoid per-key tombstones when safe.
func (*DB) DeleteSync ¶
DeleteSync removes a key and forces a durability boundary.
func (*DB) DurabilityMode ¶
DurabilityMode reports the effective durability/integrity policy string.
func (*DB) FragmentationReport ¶
FragmentationReport returns best-effort structural stats about the on-disk user index that help diagnose scan regressions after churn.
Note: In cached mode this reflects the backend state only; queued memtables are not included unless the caller has explicitly drained the cache (e.g. via close+reopen or a maintenance operation that drains).
func (*DB) GetAppend ¶
GetAppend appends the value for the key to dst and returns the new slice. It avoids internal allocations by using the provided buffer. If the key is not found, it returns dst and ErrKeyNotFound.
func (*DB) GetUnsafe ¶
GetUnsafe returns the value for a key.
Semantics: Returns a safe copy of the value. For zero-copy views tied to a snapshot lifetime, use AcquireSnapshot().GetUnsafe.
func (*DB) NewBatchWithSize ¶
NewBatchWithSize creates a new batch with a hint for the expected entry size.
func (*DB) ReverseIterator ¶
ReverseIterator returns a reverse iterator over the range [start, end).
func (*DB) SetSync ¶
SetSync writes a key/value pair and forces a durability boundary. With DurabilityWALOnRelaxed or DurabilityWALOffRelaxed enabled, Sync operations are crash-consistent only (no fsync) and may not survive power loss.
func (*DB) VacuumIndexOnline ¶
VacuumIndexOnline rebuilds the user index into a new file and swaps it in with a short writer pause. Disk space from the old index is reclaimed once any old snapshots/iterators drain.
func (*DB) ValueLogGC ¶ added in v0.3.0
func (db *DB) ValueLogGC(ctx context.Context, opts ValueLogGCOptions) (ValueLogGCStats, error)
ValueLogGC deletes fully-unreferenced value-log segments.
In cached mode, this first checkpoints to ensure memtable/WAL state is fully reflected in the backend before scanning pointers.
type DictLookup ¶ added in v0.3.0
type DictLookup = valuelog.DictLookup
type DurabilityMode ¶ added in v0.3.0
type DurabilityMode = db.DurabilityMode
type IntegrityMode ¶ added in v0.3.0
type IntegrityMode = db.IntegrityMode
type Iterator ¶
type Iterator interface {
Valid() bool
Next()
Key() []byte
Value() []byte
KeyCopy(dst []byte) []byte
ValueCopy(dst []byte) []byte
Close() error
Error() error
}
Iterator is the public iterator contract returned by TreeDB.
Semantics (performance-first; callers must treat slices as read-only):
- Key() and Value() return views valid until the next Next()/Close().
- Use KeyCopy/ValueCopy if you need stable bytes.
Example ¶
package main
import (
"fmt"
"os"
treedb "github.com/snissn/gomap/TreeDB"
)
func main() {
dir, err := os.MkdirTemp("", "treedb-iter-example-")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
db, err := treedb.Open(treedb.Options{Dir: dir})
if err != nil {
panic(err)
}
defer db.Close()
// Insert keys in random order
db.SetSync([]byte("b"), []byte("2"))
db.SetSync([]byte("a"), []byte("1"))
db.SetSync([]byte("c"), []byte("3"))
// Iterate over all keys
it, err := db.Iterator(nil, nil)
if err != nil {
panic(err)
}
defer it.Close()
for ; it.Valid(); it.Next() {
fmt.Printf("%s: %s\n", it.Key(), it.Value())
}
}
Output: a: 1 b: 2 c: 3
type Options ¶
Options configures TreeDB. It is re-exported from TreeDB/db for convenience.
func OptionsFor ¶
OptionsFor returns a copy of Options pre-filled for the given Profile.
The returned Options still follow TreeDB's normal defaulting rules for fields left as zero values (e.g. ChunkSize, KeepRecent, backpressure thresholds).
type Profile ¶
type Profile string
Profile is a documented, high-level preset for TreeDB Options.
Why profiles exist ------------------ TreeDB exposes many low-level knobs because different workloads want different trade-offs (durability vs throughput, steady-state vs benchmark determinism, background maintenance vs predictable latency).
In practice, most callers want one of a small number of "bundles":
- "Durable": the safest defaults; favors crash-recovery and integrity.
- "Fast": higher throughput by relaxing durability/integrity knobs.
- "Bench": a deterministic variant intended for benchmarking; disables background workers that can otherwise inject "random" work (e.g. index vacuum firing mid-run).
Profiles are intentionally conservative:
- They set the *meaningful policy knobs* (durability/integrity/background), while leaving the many throughput/capacity tuning knobs at their usual defaults.
- They do not require TreeDB internals to infer "intent" from combinations of flags. You pick the intent explicitly.
How to use ----------
New DB (recommended): opts := treedb.OptionsFor(treedb.ProfileDurable, "/path/to/db") opts.FlushThreshold = 128 << 20 // optional tuning db, err := treedb.Open(opts)
Existing Options (merge without clobbering explicit overrides): opts := treedb.Options{Dir: "/path/to/db"} treedb.ApplyProfile(&opts, treedb.ProfileBench) opts.FlushThreshold = 64 << 20 // explicit overrides always win db, err := treedb.Open(opts)
Note: Profiles are a convenience API. TreeDB still honors the established "0 uses a default; <0 disables" convention for background knobs, and callers can always override any field directly after applying a profile.
const ( // ProfileDurable is the recommended default for production use when you care // about durability and corruption detection. // // It keeps WAL and checksums enabled and leaves background maintenance at // their default settings. ProfileDurable Profile = "durable" // ProfileFast prioritizes throughput by relaxing durability/integrity knobs. // // This profile is appropriate when: // - you are running on top of an external durability boundary (e.g. you // snapshot at higher layers), or // - you are exploring performance limits, and crashes/corruption detection // are acceptable trade-offs. // // Background maintenance is left enabled by default; it is generally helpful // for keeping the index compact and read-friendly. ProfileFast Profile = "fast" // ProfileWALOnFast is a "WAL on + relaxed durability" profile intended for // write-heavy benchmarks and ingest workloads. // // It keeps WAL enabled but disables fsync and value-log read checksums. ProfileWALOnFast Profile = "wal_on_fast" // ProfileBench is a "fast + deterministic" profile intended specifically for // benchmarking. // // It disables background workers that can inject heavy work mid-run (e.g. // background index vacuum). This makes comparisons more stable across runs. // // IMPORTANT: This is not a recommended production profile. ProfileBench Profile = "bench" )
type TrainConfig ¶ added in v0.3.0
type TrainConfig = compression.TrainConfig
Dictionary training/lookup helpers for value-log compression.
type ValueLogAutoPolicy ¶ added in v0.3.0
type ValueLogAutoPolicy = db.ValueLogAutoPolicy
type ValueLogBlockCodec ¶ added in v0.3.0
type ValueLogBlockCodec = db.ValueLogBlockCodec
type ValueLogCompressionMode ¶ added in v0.3.0
type ValueLogCompressionMode = db.ValueLogCompressionMode
type ValueLogGCOptions ¶ added in v0.3.0
type ValueLogGCOptions struct {
DryRun bool
}
ValueLogGCOptions controls value-log garbage collection.
type ValueLogGCStats ¶ added in v0.3.0
type ValueLogGCStats struct {
SegmentsTotal int
SegmentsReferenced int
SegmentsActive int
SegmentsEligible int
SegmentsDeleted int
BytesTotal int64
BytesReferenced int64
BytesActive int64
BytesEligible int64
BytesDeleted int64
}
ValueLogGCStats summarizes value-log GC work.
type ValueLogOptions ¶ added in v0.3.0
type ValueLogOptions = db.ValueLogOptions
type ValueLogRewriteStats ¶ added in v0.3.0
type ValueLogRewriteStats struct {
SegmentsBefore int
SegmentsAfter int
BytesBefore int64
BytesAfter int64
RecordsCopied int
}
ValueLogRewriteStats summarizes value-log rewrite compaction results.
func ValueLogRewriteOffline ¶ added in v0.3.0
func ValueLogRewriteOffline(opts Options) (ValueLogRewriteStats, error)
ValueLogRewriteOffline rewrites value-log pointers into new segments and swaps index.db to reference the new log. This is an offline operation that requires an exclusive lock and a clean commitlog.
type ZSTDEncoderLevel ¶ added in v0.3.0
type ZSTDEncoderLevel = zstd.EncoderLevel
Zstd encoder levels (for dict-compressed value-log frames).
Source Files
¶
Directories
¶
| Path | Synopsis |
|---|---|
|
cmd
|
|
|
db_histogram
command
|
|
|
debug_open
command
|
|
|
stress
command
|
|
|
treemap
command
|
|
|
unified_bench
command
|
|
|
verify
command
|
|
|
vlog_dict_realdata
command
|
|
|
internal
|
|