db

package
v0.16.0-rc.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 17, 2026 License: Apache-2.0 Imports: 14 Imported by: 6

Documentation

Index

Constants

View Source
const (
	// maps peer ID to peer multiaddresses
	Peer = Bucket(peer)

	// TrieJournal -> journal
	TrieJournal = Bucket(trieJournal)

	// maps block range to AggregatedBloomFilter
	AggregatedBloomFilters = Bucket(aggregatedBloomFilters)
	// aggregated filter not full yet
	RunningEventFilter = Bucket(runningEventFilter)

	// TODO: remove this variable when removing the deprecated migrations
	Unused = Bucket(unused)

	/****************************************************
			Block
	*****************************************************/
	BlockCommitments         = Bucket(blockCommitments)
	BlockHeaderNumbersByHash = Bucket(blockHeaderNumbersByHash)
	BlockHeadersByNumber     = Bucket(blockHeadersByNumber)
	// maps block number to transactions and receipts
	BlockTransactions = Bucket(blockTransactions)
	// Latest height of the blockchain
	ChainHeight = Bucket(chainHeight)
	// maps l1 handler msg hash to l1 handler txn hash
	L1HandlerTxnHashByMsgHash = Bucket(l1HandlerTxnHashByMsgHash)
	L1Height                  = Bucket(l1Height)
	// maps block number and index to transaction receipt
	ReceiptsByBlockNumberAndIndex = Bucket(receiptsByBlockNumberAndIndex)
	// maps transaction hashes to block number and index
	TransactionBlockNumbersAndIndicesByHash = Bucket(transactionBlockNumbersAndIndicesByHash)
	// maps block number and index to transaction
	TransactionsByBlockNumberAndIndex = Bucket(transactionsByBlockNumberAndIndex)

	/****************************************************
			Class
	*****************************************************/
	// maps class hashes to classes
	Class = Bucket(class)
	// Class CASM hash metadata (declaration and migration info)
	ClassCasmHashMetadata = Bucket(classCasmHashMetadata)
	ClassesTrie           = Bucket(classesTrie)
	// ClassTrie + nodetype + path + pathlength -> Trie Node
	ClassTrie = Bucket(classTrie)

	/****************************************************
			Contract
	*****************************************************/
	// Contract + ContractAddr -> Contract
	Contract = Bucket(contract)
	// maps contract addresses and class hashes
	ContractClassHash = Bucket(contractClassHash)
	// contract nonce
	ContractNonce = Bucket(contractNonce)
	// contract storages
	ContractStorage = Bucket(contractStorage)

	// maps contract addresses to their deployment block number
	ContractDeploymentHeight = Bucket(contractDeploymentHeight)
	// ContractTrieContract + nodetype + path + pathlength -> Trie Node
	ContractTrieContract = Bucket(contractTrieContract)
	// ContractTrieStorage + owner + nodetype + path + pathlength -> Trie Node
	ContractTrieStorage = Bucket(contractTrieStorage)

	// ContractClassHashHistory + Contract address + block number -> old class hash.
	ContractClassHashHistory = Bucket(contractClassHashHistory)
	// ContractNonceHistory + Contract address + block number -> old nonce.
	ContractNonceHistory = Bucket(contractNonceHistory)
	// ContractStorageHistory + Contract address + storage location + block number -> old value.
	ContractStorageHistory = Bucket(contractStorageHistory)

	/****************************************************
			Mempool
	*****************************************************/
	// key of the head node
	MempoolHead = Bucket(mempoolHead)
	// number of transactions
	MempoolLength = Bucket(mempoolLength)
	MempoolNode   = Bucket(mempoolNode)
	// key of the tail node
	MempoolTail = Bucket(mempoolTail)

	/****************************************************
			Migration
	*****************************************************/
	// used temporarily for migrations
	Temporary                         = Bucket(temporary)
	SchemaIntermediateState           = Bucket(schemaIntermediateState)
	SchemaMetadata                    = Bucket(schemaMetadata)
	DeprecatedSchemaIntermediateState = Bucket(deprecatedSchemaIntermediateState)
	DeprecatedSchemaVersion           = Bucket(deprecatedSchemaVersion)

	/****************************************************
			State
	*****************************************************/
	// PersistedStateID -> state id
	PersistedStateID = Bucket(persistedStateID)
	// StateHash -> ClassRootHash + ContractRootHash
	StateHashToTrieRoots = Bucket(stateHashToTrieRoots)
	// StateID + root hash -> state id
	StateID = Bucket(stateID)
	// state metadata (e.g., the state root)
	StateTrie                 = Bucket(stateTrie)
	StateUpdatesByBlockNumber = Bucket(stateUpdatesByBlockNumber)
)
View Source
const AggregatedBloomFilterRangeKeySize = 16
View Source
const BlockNumIndexKeySize = 16
View Source
const DefaultBatchSize = 10 * utils.Megabyte

Variables

View Source
var ErrKeyNotFound = errors.New("key not found")

Functions

func AggregatedBloomFilterKey added in v0.14.6

func AggregatedBloomFilterKey(fromBlock, toBlock uint64) []byte

func BlockCommitmentsKey added in v0.14.4

func BlockCommitmentsKey(blockNum uint64) []byte

func BlockHeaderByNumberKey added in v0.14.4

func BlockHeaderByNumberKey(blockNum uint64) []byte

func BlockHeaderNumbersByHashKey added in v0.14.4

func BlockHeaderNumbersByHashKey(hash *felt.Felt) []byte

func ClassKey added in v0.14.4

func ClassKey(classHash *felt.Felt) []byte

func ContractClassHashHistoryAtBlockKey added in v0.15.12

func ContractClassHashHistoryAtBlockKey(addr *felt.Felt, blockNum uint64) []byte

func ContractClassHashHistoryKey added in v0.14.4

func ContractClassHashHistoryKey(addr *felt.Felt) []byte

func ContractClassHashKey added in v0.14.4

func ContractClassHashKey(addr *felt.Felt) []byte

func ContractDeploymentHeightKey added in v0.14.4

func ContractDeploymentHeightKey(addr *felt.Felt) []byte

func ContractKey added in v0.14.7

func ContractKey(addr *felt.Felt) []byte

func ContractNonceHistoryAtBlockKey added in v0.15.12

func ContractNonceHistoryAtBlockKey(addr *felt.Felt, blockNum uint64) []byte

func ContractNonceHistoryKey added in v0.14.4

func ContractNonceHistoryKey(addr *felt.Felt) []byte

func ContractNonceKey added in v0.14.4

func ContractNonceKey(addr *felt.Felt) []byte

func ContractStorageHistoryAtBlockKey added in v0.15.12

func ContractStorageHistoryAtBlockKey(addr, key *felt.Felt, blockNum uint64) []byte

func ContractStorageHistoryKey added in v0.14.4

func ContractStorageHistoryKey(addr, loc *felt.Felt) []byte

func ContractStorageKey added in v0.14.4

func ContractStorageKey(addr *felt.Felt, key []byte) []byte

func L1HandlerTxnHashByMsgHashKey added in v0.14.4

func L1HandlerTxnHashByMsgHashKey(msgHash []byte) []byte

func MempoolNodeKey added in v0.14.4

func MempoolNodeKey(txnHash *felt.Felt) []byte

func PeerKey added in v0.14.4

func PeerKey(peerID []byte) []byte

func StateHashToTrieRootsKey added in v0.14.6

func StateHashToTrieRootsKey(stateCommitment *felt.StateRootHash) []byte

func StateIDKey added in v0.14.5

func StateIDKey(root *felt.StateRootHash) []byte

func StateUpdateByBlockNumKey added in v0.14.4

func StateUpdateByBlockNumKey(num uint64) []byte

func TestKeyValueStoreSuite added in v0.14.4

func TestKeyValueStoreSuite(t *testing.T, newDB func() KeyValueStore)

TestKeyValueStoreSuite runs a suite of tests against a KeyValueStore database implementation.

Types

type AggregatedBloomFilterRangeKey added in v0.14.6

type AggregatedBloomFilterRangeKey struct {
	FromBlock uint64
	ToBlock   uint64
}

func (AggregatedBloomFilterRangeKey) Marshal added in v0.15.13

func (b AggregatedBloomFilterRangeKey) Marshal() []byte

func (*AggregatedBloomFilterRangeKey) MarshalBinary added in v0.14.6

func (b *AggregatedBloomFilterRangeKey) MarshalBinary() ([]byte, error)

func (*AggregatedBloomFilterRangeKey) UnmarshalBinary added in v0.14.6

func (b *AggregatedBloomFilterRangeKey) UnmarshalBinary(data []byte) error

type Batch added in v0.14.4

type Batch interface {
	io.Closer
	KeyValueWriter
	KeyValueRangeDeleter
	// Retrieves the value size of the data stored in the batch for writing
	Size() int
	// Flushes the data stored to disk and closes the batch
	Write() error
}

A write-only store that gathers changes in-memory and writes them to disk in a single atomic operation. It is not thread-safe for a single batch, but different batches can be used in different threads.

type Batcher added in v0.14.4

type Batcher interface {
	// Creates a write-only batch
	NewBatch() Batch
	// Creates a write-only batch with a pre-allocated size
	NewBatchWithSize(size int) Batch
}

Produce a batch to write to the database

type BlockNumIndexKey added in v0.14.4

type BlockNumIndexKey struct {
	Number uint64
	Index  uint64
}

func (BlockNumIndexKey) Marshal added in v0.15.13

func (b BlockNumIndexKey) Marshal() []byte

func (*BlockNumIndexKey) MarshalBinary added in v0.14.4

func (b *BlockNumIndexKey) MarshalBinary() ([]byte, error)

func (*BlockNumIndexKey) UnmarshalBinary added in v0.14.4

func (b *BlockNumIndexKey) UnmarshalBinary(data []byte) error

type Bucket

type Bucket innerBucket

Pebble does not support buckets to differentiate between groups of keys like Bolt or MDBX does. We use a global prefix list as a poor man's bucket alternative.

func BucketValues added in v0.12.0

func BucketValues() []Bucket

BucketValues returns all bucket values, derived from the generated innerBucket enum, wrapped in the Bucket type.

func (Bucket) Key

func (b Bucket) Key(key ...[]byte) []byte

Key flattens a prefix and series of byte arrays into a single []byte.

func (Bucket) String added in v0.12.0

func (b Bucket) String() string

type BufferBatch added in v0.14.4

type BufferBatch struct {
	// contains filtered or unexported fields
}

TODO: DO NOT USE THIS! This is meant to be a temporary replacement for buffered transaction. After state refactor, we can remove this.

func NewBufferBatch added in v0.14.4

func NewBufferBatch(txn IndexedBatch) *BufferBatch

func (*BufferBatch) Close added in v0.15.17

func (b *BufferBatch) Close() error

func (*BufferBatch) Delete added in v0.14.4

func (b *BufferBatch) Delete(key []byte) error

func (*BufferBatch) DeleteRange added in v0.14.4

func (b *BufferBatch) DeleteRange(start, end []byte) error

func (*BufferBatch) Flush added in v0.14.4

func (b *BufferBatch) Flush() error

func (*BufferBatch) Get added in v0.14.4

func (b *BufferBatch) Get(key []byte, cb func(value []byte) error) error

func (*BufferBatch) Has added in v0.14.4

func (b *BufferBatch) Has(key []byte) (bool, error)

func (*BufferBatch) NewIterator added in v0.14.4

func (b *BufferBatch) NewIterator(prefix []byte, withUpperBound bool) (Iterator, error)

func (*BufferBatch) Put added in v0.14.4

func (b *BufferBatch) Put(key, val []byte) error

func (*BufferBatch) Size added in v0.14.4

func (b *BufferBatch) Size() int

func (*BufferBatch) Write added in v0.14.4

func (b *BufferBatch) Write() error

type EventListener added in v0.7.0

type EventListener interface {
	OnIO(write bool, start time.Time)
	OnCommit(start time.Time)
	OnWriteStall(isL0 bool, duration time.Duration)
}

type Helper added in v0.14.4

type Helper interface {
	// This will create a read-write transaction, apply the callback to it, and flush the changes
	Update(func(IndexedBatch) error) error
	// This will create a read-only snapshot and apply the callback to it
	View(func(Snapshot) error) error
	// TODO(weiihann): honestly this doesn't make sense, but it's currently needed for the metrics
	// remove this once the metrics are refactored
	// Returns the underlying database
	Impl() any
}

Helper interface

type IndexedBatch deprecated added in v0.14.4

type IndexedBatch interface {
	Batch
	KeyValueReader
}

Same as Batch, but allows for reads from the batch and the disk. Use this only if you need to read from both the in-memory and on-disk data. Write operations will be slower compared to a regular Batch.

Deprecated: Use Batch for writes and access the database directly for reads.

type IndexedBatcher deprecated added in v0.14.4

type IndexedBatcher interface {
	// Deprecated: Use NewBatch instead.
	NewIndexedBatch() IndexedBatch
	// Deprecated: Use NewBatchWithSize instead.
	NewIndexedBatchWithSize(size int) IndexedBatch
}

Produce an IndexedBatch to write to the database and read from it.

Deprecated: Use Batcher for writes and access the database directly for reads.

type Iterator

type Iterator interface {
	io.Closer

	// Valid returns true if the iterator is positioned at a valid key/value pair.
	Valid() bool

	// First moves the iterator to the first key/value pair. Returns true
	// if the iterator is pointing at a valid entry and false otherwise.
	First() bool

	// Prev moves the iterator to the previous key/value pair. Returns true
	// if the iterator is pointing at a valid entry and false otherwise.
	Prev() bool

	// Next moves the iterator to the next key/value pair. It returns whether the
	// iterator is valid after the call. Once invalid, the iterator remains
	// invalid.
	Next() bool

	// Key returns the key at the current position.
	Key() []byte

	// Value returns the value at the current position.
	Value() ([]byte, error)

	// DO NOT USE this unless the value is consumed immediately.
	//
	// UncopiedValue returns the value at the current position without copying it.
	// The returned slice is invalidated by the next call to [Next], [Prev], or [Seek].
	// Callers must copy the value if it needs to outlive the current iteration.
	//
	// This is intended for immediate unmarshalling to avoid an extra allocation.
	UncopiedValue() ([]byte, error)

	// Seek would seek to the provided key if present. If absent, it would seek to the next
	// key in lexicographical order. Returns true if the iterator is pointing at a valid entry
	// and false otherwise.
	Seek(key []byte) bool
}

Provides functionality to iterate over a database's key/value pairs in ascending order. It must be closed after use. A single iterator cannot be used concurrently. Multiple iterators can be used concurrently.

type KeyValueRangeDeleter added in v0.14.4

type KeyValueRangeDeleter interface {
	// Deletes a range of keys from start (inclusive) to end (exclusive)
	DeleteRange(start, end []byte) error
}

Exposes a range-deletion interface to the database

type KeyValueReader added in v0.14.4

type KeyValueReader interface {
	// Checks if a key exists in the data store
	Has(key []byte) (bool, error)
	// If a given key exists, the callback will be called with the value
	// Example:
	//
	//	var value []byte
	//	db.Get([]byte("key"), func(v []byte) error {
	//		value = v
	//		return nil
	//	})
	Get(key []byte, cb func(value []byte) error) error
	// Creates iterators over a database's key/value pairs
	NewIterator(prefix []byte, withUpperBound bool) (Iterator, error)
}

Exposes a read-only interface to the database

type KeyValueStore added in v0.14.4

Represents a key-value data store that can handle different operations

type KeyValueWriter added in v0.14.4

type KeyValueWriter interface {
	// Inserts a given value into the data store
	Put(key []byte, value []byte) error
	// Deletes a given key from the data store
	Delete(key []byte) error
}

Exposes a write-only interface to the database

type Listener added in v0.14.4

type Listener interface {
	WithListener(listener EventListener) KeyValueStore
}

type SelectiveListener added in v0.7.0

type SelectiveListener struct {
	OnIOCb         func(write bool, duration time.Duration)
	OnCommitCb     func(duration time.Duration)
	OnWriteStallCb func(isL0 bool, duration time.Duration)
}

func (*SelectiveListener) OnCommit added in v0.7.4

func (l *SelectiveListener) OnCommit(start time.Time)

func (*SelectiveListener) OnIO added in v0.7.0

func (l *SelectiveListener) OnIO(write bool, start time.Time)

func (*SelectiveListener) OnWriteStall added in v0.15.17

func (l *SelectiveListener) OnWriteStall(isL0 bool, duration time.Duration)

type Snapshot added in v0.14.4

type Snapshot interface {
	KeyValueReader
	Close() error
}

Represents a read-only view of the database at a specific point in time. If you don't need to read at a specific time, use the database directly.

type Snapshotter added in v0.14.4

type Snapshotter interface {
	NewSnapshot() Snapshot
}

Produces a read-only snapshot of the database

type SyncBatch added in v0.14.4

type SyncBatch struct {
	// contains filtered or unexported fields
}

A wrapper around IndexedBatch that allows for thread-safe operations. Ideally, you shouldn't have to use this at all. If you need to write to batches concurrently, it's better to create a single batch for each goroutine and then merge them afterwards.

func NewSyncBatch added in v0.14.4

func NewSyncBatch(batch IndexedBatch) *SyncBatch

func (*SyncBatch) Close added in v0.15.17

func (s *SyncBatch) Close() error

func (*SyncBatch) Delete added in v0.14.4

func (s *SyncBatch) Delete(key []byte) error

func (*SyncBatch) DeleteRange added in v0.14.4

func (s *SyncBatch) DeleteRange(start, end []byte) error

func (*SyncBatch) Get added in v0.14.4

func (s *SyncBatch) Get(key []byte, cb func(value []byte) error) error

func (*SyncBatch) Has added in v0.14.4

func (s *SyncBatch) Has(key []byte) (bool, error)

func (*SyncBatch) NewIterator added in v0.14.4

func (s *SyncBatch) NewIterator(lowerBound []byte, withUpperBound bool) (Iterator, error)

func (*SyncBatch) Put added in v0.14.4

func (s *SyncBatch) Put(key, val []byte) error

func (*SyncBatch) Size added in v0.14.4

func (s *SyncBatch) Size() int

func (*SyncBatch) Write added in v0.14.4

func (s *SyncBatch) Write() error

Directories

Path Synopsis
key

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL