Documentation
¶
Index ¶
- Constants
- Variables
- func HeldOneLock(lctx lockctx.Proof, lockA string, lockB string) (bool, string)
- func IsInvalidDKGStateTransitionError(err error) bool
- func Locks() []string
- func MakeSingletonLockManager() lockctx.Manager
- func NewInvalidDKGStateTransitionErrorf(from, to flow.DKGState, msg string, args ...any) error
- func NewTestingLockManager() lockctx.Manager
- func OnCommitSucceed(b ReaderBatchWriter, onSuccessFn func())
- func OnlyWriter(fn func(Writer) error) func(ReaderBatchWriter) error
- func PrefixUpperBound(prefix []byte) []byte
- func StartEndPrefixToLowerUpperBound(startPrefix, endPrefix []byte) (lowerBound, upperBound []byte, hasUpperBound bool)
- func WithLock(manager lockctx.Manager, lockID string, fn func(lctx lockctx.Context) error) error
- func WithLocks(manager lockctx.Manager, lockIDs []string, fn func(lctx lockctx.Context) error) error
- type All
- type Batch
- type BatchStorage
- type Blocks
- type ChunkDataPacks
- type ChunksQueue
- type ClusterBlocks
- type ClusterPayloads
- type Collections
- type CollectionsReader
- type Commits
- type CommitsReader
- type ComputationResultUploadStatus
- type ConsumerProgress
- type ConsumerProgressInitializer
- type DB
- type DKGState
- type DKGStateReader
- type EpochCommits
- type EpochProtocolStateEntries
- type EpochRecoveryMyBeaconKey
- type EpochSetups
- type Events
- type EventsReader
- type ExecutionForkEvidence
- type ExecutionReceipts
- type ExecutionResults
- type ExecutionResultsReader
- type Guarantees
- type Headers
- type HeightIndex
- type Index
- type InvalidDKGStateTransitionError
- type IterItem
- type Iterator
- type IteratorOption
- type LatestPersistedSealedResult
- type Ledger
- type LedgerVerifier
- type LightTransactionResults
- type LightTransactionResultsReader
- type LockManager
- type MyExecutionReceipts
- type NodeDisallowList
- type Payloads
- type ProtocolKVStore
- type QuorumCertificates
- type Reader
- type ReaderBatchWriter
- type RegisterIndex
- type RegisterIndexReader
- type ResultApprovals
- type SafeBeaconKeys
- type ScheduledTransactions
- type ScheduledTransactionsReader
- type Seals
- type Seeker
- type ServiceEvents
- type StoredChunkDataPack
- type StoredChunkDataPacks
- type Transactiondeprecated
- type TransactionResultErrorMessages
- type TransactionResultErrorMessagesReader
- type TransactionResults
- type TransactionResultsReader
- type Transactions
- type TransactionsReader
- type VersionBeacons
- type Writer
Constants ¶
const ( // LockInsertBlock protects the entire block insertion process (`ParticipantState.Extend` or `FollowerState.ExtendCertified`) LockInsertBlock = "lock_insert_block" // LockFinalizeBlock protects the entire block finalization process (`FollowerState.Finalize`) LockFinalizeBlock = "lock_finalize_block" // LockIndexResultApproval protects indexing result approvals by approval and chunk. LockIndexResultApproval = "lock_index_result_approval" // LockInsertOrFinalizeClusterBlock protects the entire cluster block insertion or finalization process. // The reason they are combined is because insertion process reads some data updated by finalization process, // in order to prevent dirty reads, we need to acquire the lock for both operations. LockInsertOrFinalizeClusterBlock = "lock_insert_or_finalize_cluster_block" // LockInsertOwnReceipt is intended for Execution Nodes to ensure that they never publish different receipts for the same block. // Specifically, with this lock we prevent accidental overwrites of the index `executed block ID` ➜ `Receipt ID`. LockInsertOwnReceipt = "lock_insert_own_receipt" // LockInsertCollection protects the insertion of collections. LockInsertCollection = "lock_insert_collection" // LockBootstrapping protects data that is *exclusively* written during bootstrapping. LockBootstrapping = "lock_bootstrapping" // LockIndexChunkDataPackByChunkID protects the insertion of chunk data packs LockIndexChunkDataPackByChunkID = "lock_index_chunk_data_pack_by_chunk_id" // LockInsertTransactionResultErrMessage protects the insertion of transaction result error messages LockInsertTransactionResultErrMessage = "lock_insert_transaction_result_message" // LockInsertLightTransactionResult protects the insertion of light transaction results LockInsertLightTransactionResult = "lock_insert_light_transaction_result" // LockInsertExecutionForkEvidence protects the insertion of execution fork evidence LockInsertExecutionForkEvidence = "lock_insert_execution_fork_evidence" LockInsertSafetyData = "lock_insert_safety_data" LockInsertLivenessData = "lock_insert_liveness_data" // LockIndexScheduledTransaction protects the indexing of scheduled transactions. LockIndexScheduledTransaction = "lock_index_scheduled_transaction" )
Variables ¶
var ( // ErrNotFound is returned when a retrieved key does not exist in the database. // Note: there is another not found error: badger.ErrKeyNotFound. The difference between // badger.ErrKeyNotFound and storage.ErrNotFound is that: // badger.ErrKeyNotFound is the error returned by the badger API. // Modules in storage/badger and storage/badger/operation package both // return storage.ErrNotFound for not found error ErrNotFound = errors.New("key not found") // ErrAlreadyExists is returned when an insert attempts to set the value // for a key that already exists. Inserts may only occur once per key, // updates may overwrite an existing key without returning an error. ErrAlreadyExists = errors.New("key already exists") // ErrDataMismatch is returned when a repeatable insert operation attempts // to insert a different value for the same key. ErrDataMismatch = errors.New("data for key is different") // ErrHeightNotIndexed is returned when data that is indexed sequentially is queried by a given block height // and that data is unavailable. ErrHeightNotIndexed = errors.New("data for block height not available") // ErrNotBootstrapped is returned when the database has not been bootstrapped. ErrNotBootstrapped = errors.New("pebble database not bootstrapped") )
Functions ¶
func HeldOneLock ¶ added in v0.43.3
HeldOneLock checks that exactly one of the two specified locks is held in the provided lock context.
func IsInvalidDKGStateTransitionError ¶ added in v0.39.0
func Locks ¶ added in v0.43.0
func Locks() []string
Locks returns a list of all named locks used by the storage layer.
func MakeSingletonLockManager ¶ added in v0.43.0
MakeSingletonLockManager returns the lock manager used by the storage layer. This function must be used for production builds and must be called exactly once process-wide.
The Lock Manager is a core component enforcing atomicity of various storage operations across different components. Therefore, the lock manager is a singleton instance, as the storage layer's atomicity and consistency depends on the same set of locks being used everywhere. By convention, the lock manager singleton is injected into the node's components during their initialization, following the same dependency-injection pattern as other components that are conceptually singletons (e.g. the storage layer abstractions). Thereby, we explicitly codify in the constructor that a component uses the lock manager. We think it is helpful to emphasize that the component at times will acquire _exclusive access_ to all key-value pairs in the database whose keys start with some specific prefixes (see `storage/badger/operation/prefix.go` for an exhaustive list of prefixes). In comparison, the alternative pattern (which we do not use) of retrieving a singleton instance via a global variable would hide which components required exclusive storage access, and in addition, it would break with our broadly established dependency-injection pattern. To enforce best practices, this function will panic if it is called more than once.
CAUTION:
- The lock manager only guarantees atomicity of reads and writes for the thread holding the lock. Other threads can continue to read (possibly stale) values, while the lock is held by a different thread.
- Furthermore, the writer must bundle all their writes into a _single_ Write Batch for atomicity. Even when holding the lock, reading threads can still observe the writes of one batch while not observing the writes of a second batch, despite the thread writing both batches while holding the lock. It was a deliberate choice for the sake of performance to allow reads without any locking - so instead of waiting for the newest value in case a write is currently ongoing, the reader will just retrieve the previous value. This aligns with our architecture of the node operating as an eventually-consistent system, which favors loose coupling and high throughput for different components within a node.
func NewInvalidDKGStateTransitionErrorf ¶ added in v0.39.0
NewInvalidDKGStateTransitionErrorf constructs a new InvalidDKGStateTransitionError error with a formatted message.
func NewTestingLockManager ¶ added in v0.43.0
NewTestingLockManager returns the lock manager used by the storage layer. This function must be used for testing only but NOT for PRODUCTION builds. Unlike MakeSingletonLockManager, this function may be called multiple times.
func OnCommitSucceed ¶ added in v0.38.0
func OnCommitSucceed(b ReaderBatchWriter, onSuccessFn func())
OnCommitSucceed adds a callback to execute after the batch has been successfully committed.
Context on why we don't add this method to the ReaderBatchWriter: Because the implementation of the ReaderBatchWriter interface would have to provide an implementation for AddSuccessCallback, which can be derived for free from the AddCallback method. It's better avoid using AddCallback directly and use OnCommitSucceed instead, because you might write `if err != nil` by mistake, which is a golang idiom for error handling
func OnlyWriter ¶ added in v0.38.0
func OnlyWriter(fn func(Writer) error) func(ReaderBatchWriter) error
OnlyWriter is an adapter to convert a function that takes a Writer to a function that takes a ReaderBatchWriter.
func PrefixUpperBound ¶ added in v0.38.0
PrefixUpperBound returns a key K such that all possible keys beginning with the input prefix sort lower than K according to the byte-wise lexicographic key ordering. This is used to define an upper bound for iteration, when we want to iterate over all keys beginning with a given prefix. referred to https://pkg.go.dev/github.com/cockroachdb/pebble#example-Iterator-PrefixIteration when the prefix is all 1s, such as []byte{0xff}, or []byte(0xff, 0xff} etc, there is no upper-bound It returns nil in this case.
func StartEndPrefixToLowerUpperBound ¶ added in v0.38.0
func StartEndPrefixToLowerUpperBound(startPrefix, endPrefix []byte) (lowerBound, upperBound []byte, hasUpperBound bool)
StartEndPrefixToLowerUpperBound returns the lower and upper bounds for a range of keys specified by the start and end prefixes. the lower and upper bounds are used for the key iteration. The return value lowerBound specifies the smallest key to iterate and it's inclusive. The return value upperBound specifies the largest key to iterate and it's exclusive (not inclusive) The return value hasUpperBound specifies whether there is upperBound in order to match all keys prefixed with `endPrefix`, we increment the bytes of `endPrefix` by 1, for instance, to iterate keys between "hello" and "world", we use "hello" as LowerBound, "worle" as UpperBound, so that "world", "world1", "worldffff...ffff" will all be included. In the case that the endPrefix is all 1s, such as []byte{0xff, 0xff, ...}, there is no upper-bound, it returns (startPrefix, nil, false)
func WithLock ¶ added in v0.43.3
WithLock is a helper function that creates a new lock context, acquires the specified lock, and executes the provided function within that context. This function passes through any errors returned by fn.
func WithLocks ¶ added in v0.43.3
func WithLocks(manager lockctx.Manager, lockIDs []string, fn func(lctx lockctx.Context) error) error
WithLocks is a helper function that creates a new lock context, acquires the specified locks, and executes the provided function within that context. This function passes through any errors returned by fn.
Types ¶
type All ¶ added in v0.12.0
type All struct { Headers Headers Guarantees Guarantees Seals Seals Index Index Payloads Payloads Blocks Blocks QuorumCertificates QuorumCertificates Setups EpochSetups EpochCommits EpochCommits ChunkDataPacks ChunkDataPacks Transactions Transactions Collections Collections EpochProtocolStateEntries EpochProtocolStateEntries ProtocolKVStore ProtocolKVStore VersionBeacons VersionBeacons RegisterIndex RegisterIndex // These results are for reading and storing the result data from block payload // EN uses a different results module to store their own results // and receipts (see the Execution struct below) Results ExecutionResults Receipts ExecutionReceipts }
All includes all the storage modules
type Batch ¶ added in v0.38.0
type Batch interface { ReaderBatchWriter // Commit applies the batched updates to the database. // Commit may be called at most once per Batch. // No errors are expected during normal operation. Commit() error // Close releases memory of the batch. // Close must be called exactly once per Batch. // This can be called as a defer statement immediately after creating Batch // to reduce risk of unbounded memory consumption. // No errors are expected during normal operation. Close() error }
Batch is an interface for a batch of writes to a storage backend. The batch is pending until it is committed. Useful for dynamically adding writes to the batch.
type BatchStorage ¶ added in v0.15.0
type BatchStorage interface { GetWriter() *badger.WriteBatch // OnSucceed adds a callback to execute after the batch has // been successfully flushed. // useful for implementing the cache where we will only cache // after the batch has been successfully flushed OnSucceed(callback func()) // Flush will flush the write batch and update the cache. Flush() error }
BatchStorage serves as an abstraction over batch storage, adding ability to add ability to add extra callbacks which fire after the batch is successfully flushed. Deprecated: BatchStorage is being deprecated as part of the transition from Badger to Pebble. Use ReaderBatchWriter instead of BatchStorage for all new code.
type Blocks ¶
type Blocks interface { // BatchStore stores a valid block in a batch. // Error returns: // - storage.ErrAlreadyExists if the blockID already exists in the database. // - generic error in case of unexpected failure from the database layer or encoding failure. BatchStore(lctx lockctx.Proof, rw ReaderBatchWriter, proposal *flow.Proposal) error // ByID returns the block with the given hash. It is available for all incorporated blocks (validated blocks // that have been appended to any of the known forks) no matter whether the block has been finalized or not. // // Error returns: // - storage.ErrNotFound if no block with the corresponding ID was found // - generic error in case of unexpected failure from the database layer, or failure // to decode an existing database value ByID(blockID flow.Identifier) (*flow.Block, error) // ProposalByID returns the block with the given ID, along with the proposer's signature on it. // It is available for all incorporated blocks (validated blocks that have been appended to any // of the known forks) no matter whether the block has been finalized or not. // // Error returns: // - storage.ErrNotFound if no block with the corresponding ID was found // - generic error in case of unexpected failure from the database layer, or failure // to decode an existing database value ProposalByID(blockID flow.Identifier) (*flow.Proposal, error) // ByHeight returns the block at the given height. It is only available // for finalized blocks. // // Error returns: // - storage.ErrNotFound if no block for the corresponding height was found // - generic error in case of unexpected failure from the database layer, or failure // to decode an existing database value ByHeight(height uint64) (*flow.Block, error) // ProposalByHeight returns the block at the given height, along with the proposer's // signature on it. It is only available for finalized blocks. // // Error returns: // - storage.ErrNotFound if no block proposal for the corresponding height was found // - generic error in case of unexpected failure from the database layer, or failure // to decode an existing database value ProposalByHeight(height uint64) (*flow.Proposal, error) // ByView returns the block with the given view. It is only available for certified blocks. // Certified blocks are the blocks that have received a QC. Hotstuff guarantees that for each view, // at most one block is certified. Hence, the return value of `ByView` is guaranteed to be unique // even for non-finalized blocks. // // Expected errors during normal operations: // - `storage.ErrNotFound` if no certified block is known at given view. ByView(view uint64) (*flow.Block, error) // ProposalByView returns the block proposal with the given view. It is only available for certified blocks. // // Expected errors during normal operations: // - `storage.ErrNotFound` if no certified block is known at given view. ProposalByView(view uint64) (*flow.Proposal, error) // ByCollectionID returns the block for the given [flow.CollectionGuarantee] ID. // This method is only available for collections included in finalized blocks. // While consensus nodes verify that collections are not repeated within the same fork, // each different fork can contain a recent collection once. Therefore, we must wait for // finality. // CAUTION: this method is not backed by a cache and therefore comparatively slow! // // Error returns: // - storage.ErrNotFound if the collection ID was not found // - generic error in case of unexpected failure from the database layer, or failure // to decode an existing database value ByCollectionID(collID flow.Identifier) (*flow.Block, error) // IndexBlockContainingCollectionGuarantees populates an index `guaranteeID->blockID` for each guarantee // which appears in the block. // CAUTION: a collection can be included in multiple *unfinalized* blocks. However, the implementation // assumes a one-to-one map from collection ID to a *single* block ID. This holds for FINALIZED BLOCKS ONLY // *and* only in the absence of byzantine collector clusters (which the mature protocol must tolerate). // Hence, this function should be treated as a temporary solution, which requires generalization // (one-to-many mapping) for soft finality and the mature protocol. // // Error returns: // - generic error in case of unexpected failure from the database layer or encoding failure. IndexBlockContainingCollectionGuarantees(blockID flow.Identifier, collIDs []flow.Identifier) error }
Blocks provides persistent storage for blocks.
Conceptually, blocks must always be signed by the proposer. Once a block is certified (i.e. received votes from a supermajority of consensus participants, in their aggregated form represented by the Quorum Certificate [QC]), the proposer's signature is included in the QC and does not need to be provided individually anymore. Therefore, from the protocol perspective, the proper data structures are either a block proposal (including the proposer's signature) or a certified block (including a QC for the block).
type ChunkDataPacks ¶
type ChunkDataPacks interface { // Store persists multiple ChunkDataPacks in a two-phase process: // 1. Store chunk data packs (StoredChunkDataPack) by its hash (chunkDataPackID) in chunk data pack database. // 2. Populate index mapping from ChunkID to chunkDataPackID in protocol database. // // Reasoning for two-phase approach: the chunk data pack and the other execution data are stored in different databases. // - Chunk data pack content is stored in the chunk data pack database by its hash (ID). Conceptually, it would be possible // to store multiple different (disagreeing) chunk data packs here. Each chunk data pack is stored using its own collision // resistant hash as key, so different chunk data packs will be stored under different keys. So from the perspective of the // storage layer, we _could_ in phase 1 store all known chunk data packs. However, an Execution Node may only commit to a single // chunk data pack (or it will get slashed). This mapping from chunk ID to the ID of the chunk data pack that the Execution Node // actually committed to is stored in the protocol database, in the following phase 2. // - In the second phase, we populate the index mappings from ChunkID to one "distinguished" chunk data pack ID. This mapping // is stored in the protocol database. Typically, an Execution Node uses this for indexing its own chunk data packs which it // publicly committed to. // // ATOMICITY: // [ChunkDataPacks.Store] executes phase 1 immediately, persisting the chunk data packs in their dedicated database. However, // the index mappings in phase 2 is deferred to the caller, who must invoke the returned functor to perform phase 2. This // approach has the following benefits: // - Our API reflects that we are writing to two different databases here, with the chunk data pack database containing largely // specialized data subject to pruning. In contrast, the protocol database persists the commitments a node make (subject to // slashing). The caller receives the ability to persist this commitment in the form of the returned functor. The functor // may be discarded by the caller without corrupting the state (if anything, we have just stored some additional chunk data // packs). // - The serialization and storage of the comparatively large chunk data packs is separated from the protocol database writes. // - The locking duration of the protocol database is reduced. // // The Store method returns: // - func(lctx lockctx.Proof, rw storage.ReaderBatchWriter) error: Function for populating the index mapping from chunkID // to chunk data pack ID in the protocol database. This mapping persists that the Execution Node committed to the result // represented by this chunk data pack. This function returns [storage.ErrDataMismatch] when a _different_ chunk data pack // ID for the same chunk ID has already been stored (changing which result an execution Node committed to would be a // slashable protocol violation). The caller must acquire [storage.LockInsertChunkDataPack] and hold it until the database // write has been committed. // - error: No error should be returned during normal operation. Any error indicates a failure in the first phase. Store(cs []*flow.ChunkDataPack) (func(lctx lockctx.Proof, protocolDBBatch ReaderBatchWriter) error, error) // ByChunkID returns the chunk data for the given chunk ID. // It returns [storage.ErrNotFound] if no entry exists for the given chunk ID. ByChunkID(chunkID flow.Identifier) (*flow.ChunkDataPack, error) // BatchRemove schedules all ChunkDataPacks with the given IDs to be deleted from the databases, // part of the provided write batches. Unknown IDs are silently ignored. // It returns the list of chunk data pack IDs (chunkDataPackID) that were scheduled for removal from the chunk data pack database. // It performs a two-phase removal: // 1. First phase: Remove index mappings from ChunkID to chunkDataPackID in the protocol database // 2. Second phase: Remove chunk data packs (StoredChunkDataPack) by its hash (chunkDataPackID) in chunk data pack database. // This phase is deferred until the caller of BatchRemove invokes the returned functor. // // Note: it does not remove the collection referred by the chunk data pack. // This method is useful for the rollback execution tool to batch remove chunk data packs associated with a set of blocks. // No errors are expected during normal operation, even if no entries are matched. BatchRemove(chunkIDs []flow.Identifier, rw ReaderBatchWriter) (chunkDataPackIDs []flow.Identifier, err error) // BatchRemoveChunkDataPacksOnly removes multiple ChunkDataPacks with the given chunk IDs from chunk data pack database only. // It does not remove the index mappings from ChunkID to chunkDataPackID in the protocol database. // This method is useful for the runtime chunk data pack pruner to batch remove chunk data packs associated with a set of blocks. // CAUTION: the chunk data pack batch is for chunk data pack database only, DO NOT pass a batch writer for protocol database. // No errors are expected during normal operation, even if no entries are matched. BatchRemoveChunkDataPacksOnly(chunkIDs []flow.Identifier, chunkDataPackBatch ReaderBatchWriter) error }
ChunkDataPacks represents persistent storage for chunk data packs.
type ChunksQueue ¶ added in v0.15.0
type ClusterBlocks ¶
type ClusterBlocks interface { // ProposalByID returns the collection with the given ID, along with the proposer's signature on it. // It is available for all incorporated collections (validated blocks that have been appended to any // of the known forks) no matter whether the collection has been finalized or not. // // Error returns: // - storage.ErrNotFound if the block ID was not found // - generic error in case of unexpected failure from the database layer, or failure // to decode an existing database value ProposalByID(blockID flow.Identifier) (*cluster.Proposal, error) // ProposalByHeight returns the collection at the given height, along with the proposer's // signature on it. It is only available for finalized collections. // // Error returns: // - storage.ErrNotFound if the block height or block ID was not found // - generic error in case of unexpected failure from the database layer, or failure // to decode an existing database value ProposalByHeight(height uint64) (*cluster.Proposal, error) }
ClusterBlocks provides persistent storage for collector blocks (aka collections) produced by *one specific* collector cluster (identified by the ClusterChainID). For consistency, method naming is analogous to the storage.Blocks interface. Though, at the moment, we only need to store cluster.Proposal. Therefore, methods `ByID` and `ByHeight` don't exist here (but might be added later).
type ClusterPayloads ¶
type ClusterPayloads interface { // ByBlockID returns the cluster payload for the given block ID. ByBlockID(blockID flow.Identifier) (*cluster.Payload, error) }
ClusterPayloads handles storing and retrieving payloads for collection node cluster consensus.
type Collections ¶
type Collections interface { CollectionsReader // Store inserts the collection keyed by ID and all constituent // transactions. // This is used by execution node storing collections. // No errors are expected during normal operation. Store(collection *flow.Collection) (*flow.LightCollection, error) // Remove removes the collection and all constituent transactions. // No errors are expected during normal operation. Remove(collID flow.Identifier) error // StoreAndIndexByTransaction stores the collection and indexes it by transaction. // This is used by access node storing collections for finalized blocks. // // CAUTION: current approach is NOT BFT and needs to be revised in the future. // Honest clusters ensure a transaction can only belong to one collection. However, in rare // cases, the collector clusters can exceed byzantine thresholds -- making it possible to // produce multiple finalized collections (aka guaranteed collections) containing the same // transaction repeatedly. // TODO: eventually we need to handle Byzantine clusters // // No errors are expected during normal operation. StoreAndIndexByTransaction(lctx lockctx.Proof, collection *flow.Collection) (*flow.LightCollection, error) // BatchStoreAndIndexByTransaction stores the collection and indexes it by transaction within a batch. // // CAUTION: current approach is NOT BFT and needs to be revised in the future. // Honest clusters ensure a transaction can only belong to one collection. However, in rare // cases, the collector clusters can exceed byzantine thresholds -- making it possible to // produce multiple finalized collections (aka guaranteed collections) containing the same // transaction repeatedly. // TODO: eventually we need to handle Byzantine clusters // // This is used by access node storing collections for finalized blocks BatchStoreAndIndexByTransaction(lctx lockctx.Proof, collection *flow.Collection, batch ReaderBatchWriter) (*flow.LightCollection, error) }
Collections represents persistent storage for collections.
type CollectionsReader ¶ added in v0.41.0
type CollectionsReader interface { // ByID returns the collection with the given ID, including all // transactions within the collection. // // Expected errors during normal operation: // - `storage.ErrNotFound` if no light collection was found. ByID(collID flow.Identifier) (*flow.Collection, error) // LightByID returns a reduced representation of the collection with the given ID. // The reduced collection references the constituent transactions by their hashes. // // Expected errors during normal operation: // - `storage.ErrNotFound` if no light collection was found. LightByID(collID flow.Identifier) (*flow.LightCollection, error) // LightByTransactionID returns a reduced representation of the collection // holding the given transaction ID. The reduced collection references the // constituent transactions by their hashes. // // Expected errors during normal operation: // - `storage.ErrNotFound` if no light collection was found. LightByTransactionID(txID flow.Identifier) (*flow.LightCollection, error) }
CollectionsReader represents persistent storage read operations for collections.
type Commits ¶
type Commits interface { CommitsReader // BatchStore stores Commit keyed by blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. // If the database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. BatchStore(lctx lockctx.Proof, blockID flow.Identifier, commit flow.StateCommitment, batch ReaderBatchWriter) error // BatchRemoveByBlockID removes Commit keyed by blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. // If the database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. BatchRemoveByBlockID(blockID flow.Identifier, batch ReaderBatchWriter) error }
Commits represents persistent storage for state commitments.
type CommitsReader ¶ added in v0.40.0
type CommitsReader interface { // ByBlockID will retrieve a commit by its ID from persistent storage. ByBlockID(blockID flow.Identifier) (flow.StateCommitment, error) }
type ComputationResultUploadStatus ¶ added in v0.28.0
type ComputationResultUploadStatus interface { // Upsert upserts omputationResult into persistent storage with given BlockID. Upsert(blockID flow.Identifier, wasUploadCompleted bool) error // GetIDsByUploadStatus returns BlockIDs whose upload status matches with targetUploadStatus GetIDsByUploadStatus(targetUploadStatus bool) ([]flow.Identifier, error) // ByID returns the upload status of ComputationResult with given BlockID. ByID(blockID flow.Identifier) (bool, error) // Remove removes an instance of ComputationResult with given BlockID. Remove(blockID flow.Identifier) error }
ComputationResultUploadStatus interface defines storage operations for upload status of given ComputationResult instance: - false as upload not completed - true as upload completed
type ConsumerProgress ¶ added in v0.15.0
type ConsumerProgress interface { // ProcessedIndex returns the processed index for the consumer // No errors are expected during normal operation ProcessedIndex() (uint64, error) // SetProcessedIndex updates the processed index for the consumer // The caller must use ConsumerProgressInitializer to initialize the progress index in storage // No errors are expected during normal operation SetProcessedIndex(processed uint64) error // BatchSetProcessedIndex updates the processed index for the consumer within in provided batch // The caller must use ConsumerProgressInitializer to initialize the progress index in storage // No errors are expected during normal operation BatchSetProcessedIndex(processed uint64, batch ReaderBatchWriter) error }
ConsumerProgress reads and writes the last processed index of the job in the job queue It must be created by the ConsumerProgressInitializer, so that it can guarantee the ProcessedIndex and SetProcessedIndex methods are safe to use.
type ConsumerProgressInitializer ¶ added in v0.38.0
type ConsumerProgressInitializer interface { // Initialize takes a default index and initializes the consumer progress index in storage // Initialize must be concurrent safe, meaning if called by different modules, should only // initialize once. Initialize(defaultIndex uint64) (ConsumerProgress, error) }
ConsumerProgressInitializer is a helper to initialize the consumer progress index in storage It prevents the consumer from being used before initialization
type DB ¶ added in v0.38.0
type DB interface { // Reader returns a database-backed reader which reads the latest // committed global database state Reader() Reader // WithReaderBatchWriter creates a batch writer and allows the caller to perform // atomic batch updates to the database. // Any error returned are considered fatal and the batch is not committed. WithReaderBatchWriter(func(ReaderBatchWriter) error) error // NewBatch create a new batch for writing. NewBatch() Batch // Close closes the database and releases all resources. // No errors are expected during normal operation. Close() error }
DB is an interface for a database store that provides a reader and a writer.
type DKGState ¶ added in v0.23.9
type DKGState interface { DKGStateReader // SetDKGState performs a state transition for the Random Beacon Recoverable State Machine. // Some state transitions may not be possible using this method. For instance, we might not be able to enter [flow.DKGStateCompleted] // state directly from [flow.DKGStateStarted], even if such transition is valid. The reason for this is that some states require additional // data to be processed by the state machine before the transition can be made. For such cases there are dedicated methods that should be used, ex. // InsertMyBeaconPrivateKey and UpsertMyBeaconPrivateKey, which allow to store the needed data and perform the transition in one atomic operation. // Error returns: // - [storage.InvalidDKGStateTransitionError] - if the requested state transition is invalid. SetDKGState(epochCounter uint64, newState flow.DKGState) error // InsertMyBeaconPrivateKey stores the random beacon private key for an epoch and transitions the // state machine into the [flow.DKGStateCompleted] state. // // CAUTION: these keys are stored before they are validated against the // canonical key vector and may not be valid for use in signing. Use [SafeBeaconKeys] // to guarantee only keys safe for signing are returned // Error returns: // - [storage.ErrAlreadyExists] - if there is already a key stored for given epoch. // - [storage.InvalidDKGStateTransitionError] - if the requested state transition is invalid. InsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey) error // CommitMyBeaconPrivateKey commits the previously inserted random beacon private key for an epoch. Effectively, this method // transitions the state machine into the [flow.RandomBeaconKeyCommitted] state if the current state is [flow.DKGStateCompleted]. // The caller needs to supply the [flow.EpochCommit] as evidence that the stored key is valid for the specified epoch. Repeated // calls for the same epoch are accepted (idempotent operation),if and only if the provided EpochCommit confirms the already // committed key. // No errors are expected during normal operations. CommitMyBeaconPrivateKey(epochCounter uint64, commit *flow.EpochCommit) error }
DKGState is the storage interface for storing all artifacts and state related to the DKG process, including the latest state of a running or completed DKG, and computed beacon keys. DKGState supports all state transitions that can occur for an individual node during the happy path epoch switchover of the network as a whole. Recovery from the epoch fallback mode is supported by the EpochRecoveryMyBeaconKey interface.
type DKGStateReader ¶ added in v0.39.0
type DKGStateReader interface { SafeBeaconKeys // GetDKGState retrieves the current state of the state machine for the given epoch. // If an error is returned, the state is undefined meaning that state machine is in initial state // Error returns: // - [storage.ErrNotFound] - if there is no state stored for given epoch, meaning the state machine is in initial state. GetDKGState(epochCounter uint64) (flow.DKGState, error) // IsDKGStarted checks whether the DKG has been started for the given epoch. // No errors expected during normal operation. IsDKGStarted(epochCounter uint64) (bool, error) // UnsafeRetrieveMyBeaconPrivateKey retrieves the random beacon private key for an epoch. // // CAUTION: these keys are stored before they are validated against the // canonical key vector and may not be valid for use in signing. Use SafeBeaconKeys // to guarantee only keys safe for signing are returned // Error returns: // - [storage.ErrNotFound] - if there is no key stored for given epoch. UnsafeRetrieveMyBeaconPrivateKey(epochCounter uint64) (crypto.PrivateKey, error) }
DKGStateReader is a read-only interface for low-level reading of the Random Beacon Recoverable State Machine.
type EpochCommits ¶
type EpochCommits interface { // BatchStore allows us to store a new epoch commit in a DB batch update while updating the cache. // No errors are expected during normal operation. BatchStore(rw ReaderBatchWriter, commit *flow.EpochCommit) error // ByID will return the EpochCommit event by its ID. // Error returns: // * storage.ErrNotFound if no EpochCommit with the ID exists ByID(flow.Identifier) (*flow.EpochCommit, error) }
type EpochProtocolStateEntries ¶ added in v0.33.30
type EpochProtocolStateEntries interface { // BatchStore persists the given epoch protocol state entry as part of a DB batch. Per convention, the identities in // the flow.MinEpochStateEntry must be in canonical order for the current and next epoch (if present), otherwise an // exception is returned. // // CAUTION: The caller must ensure `epochProtocolStateID` is a collision-resistant hash of the provided // `epochProtocolStateEntry`! This method silently overrides existing data, which is safe only if for the same // key, we always write the same value. // // No errors are expected during normal operation. BatchStore(w Writer, epochProtocolStateID flow.Identifier, epochProtocolStateEntry *flow.MinEpochStateEntry) error // BatchIndex persists the specific map entry in the node's database. // In a nutshell, we want to maintain a map from `blockID` to `epochStateEntry`, where `blockID` references the // block that _proposes_ the referenced epoch protocol state entry. // Protocol convention: // - Consider block B, whose ingestion might potentially lead to an updated protocol state. For example, // the protocol state changes if we seal some execution results emitting service events. // - For the key `blockID`, we use the identity of block B which _proposes_ this Protocol State. As value, // the hash of the resulting protocol state at the end of processing B is to be used. // - IMPORTANT: The protocol state requires confirmation by a QC and will only become active at the child block, // _after_ validating the QC. // // CAUTION: // - The caller must acquire the lock [storage.LockInsertBlock] and hold it until the database write has been committed. // - OVERWRITES existing data (potential for data corruption): // The lock proof serves as a reminder that the CALLER is responsible to ensure that the DEDUPLICATION CHECK is done elsewhere // ATOMICALLY within this write operation. Currently it's done by operation.InsertHeader where it performs a check // to ensure the blockID is new, therefore any data indexed by this blockID is new as well. // // Expected errors during normal operations: // No expected errors during normal operations. BatchIndex(lctx lockctx.Proof, rw ReaderBatchWriter, blockID flow.Identifier, epochProtocolStateID flow.Identifier) error // ByID returns the flow.RichEpochStateEntry by its ID. // Expected errors during normal operations: // - storage.ErrNotFound if no epoch state entry with the given Identifier is known. ByID(id flow.Identifier) (*flow.RichEpochStateEntry, error) // ByBlockID retrieves the flow.RichEpochStateEntry that the block with the given ID proposes. // CAUTION: this protocol state requires confirmation by a QC and will only become active at the child block, // _after_ validating the QC. Protocol convention: // - Consider block B, whose ingestion might potentially lead to an updated protocol state. For example, // the protocol state changes if we seal some execution results emitting service events. // - For the key `blockID`, we use the identity of block B which _proposes_ this Protocol State. As value, // the hash of the resulting protocol state at the end of processing B is to be used. // - CAUTION: The protocol state requires confirmation by a QC and will only become active at the child block, // _after_ validating the QC. // // Expected errors during normal operations: // - storage.ErrNotFound if no epoch state entry has been indexed for the given block. ByBlockID(blockID flow.Identifier) (*flow.RichEpochStateEntry, error) }
EpochProtocolStateEntries represents persistent, fork-aware storage for the Epoch-related sub-state of the overall of the overall Protocol State (KV Store).
type EpochRecoveryMyBeaconKey ¶ added in v0.39.0
type EpochRecoveryMyBeaconKey interface { DKGStateReader // UpsertMyBeaconPrivateKey overwrites the random beacon private key for the epoch that recovers the protocol // from Epoch Fallback Mode. The resulting state of this method call is [flow.RandomBeaconKeyCommitted]. // State transitions are allowed if and only if the current state is not equal to [flow.RandomBeaconKeyCommitted]. // Repeated calls for the same epoch are idempotent, if and only if the provided EpochCommit confirms the already // committed key (error otherwise). // No errors are expected during normal operations. UpsertMyBeaconPrivateKey(epochCounter uint64, key crypto.PrivateKey, commit *flow.EpochCommit) error }
EpochRecoveryMyBeaconKey is a specific interface that allows to overwrite the beacon private key for a future epoch, provided that the state machine has not yet reached the flow.RandomBeaconKeyCommitted state for the specified epoch. This interface is used *ONLY* in the epoch recovery process and only by the consensus participants. On the happy path, each consensus committee member takes part in the DKG, and after successfully finishing the DKG protocol it obtains a random beacon private key, which is stored in the database along with DKG state flow.DKGStateCompleted. If for any reason DKG fails, then the private key will be nil and DKG end state will be equal to flow.DKGStateFailure. This module allows to overwrite the random beacon private key in case of EFM recovery or other configuration issues.
type EpochSetups ¶
type EpochSetups interface { // BatchStore allows us to store a new epoch setup in a DB batch update while going through the cache. // No errors are expected during normal operation. BatchStore(rw ReaderBatchWriter, setup *flow.EpochSetup) error // ByID will return the EpochSetup event by its ID. // Error returns: // * storage.ErrNotFound if no EpochSetup with the ID exists ByID(flow.Identifier) (*flow.EpochSetup, error) }
type Events ¶
type Events interface { EventsReader // Store will store events for the given block ID // TODO: error documentation Store(blockID flow.Identifier, blockEvents []flow.EventsList) error // BatchStore will store events for the given block ID in a given batch // TODO: error documentation BatchStore(blockID flow.Identifier, events []flow.EventsList, batch ReaderBatchWriter) error // BatchRemoveByBlockID removes events keyed by a blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. BatchRemoveByBlockID(blockID flow.Identifier, batch ReaderBatchWriter) error }
Events represents persistent storage for events.
type EventsReader ¶ added in v0.40.0
type EventsReader interface { // ByBlockID returns the events for the given block ID ByBlockID(blockID flow.Identifier) ([]flow.Event, error) // ByBlockIDTransactionID returns the events for the given block ID and transaction ID ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) ([]flow.Event, error) // ByBlockIDTransactionIndex returns the events for the transaction at given index in a given block ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) ([]flow.Event, error) // ByBlockIDEventType returns the events for the given block ID and event type ByBlockIDEventType(blockID flow.Identifier, eventType flow.EventType) ([]flow.Event, error) }
type ExecutionForkEvidence ¶ added in v0.42.1
type ExecutionForkEvidence interface { // StoreIfNotExists stores the given conflictingSeals to the database // if no execution fork evidence is currently stored in the database. // This function is a no-op if evidence is already stored, because // only one execution fork evidence can be stored at a time. // The caller must hold the [storage.LockInsertExecutionForkEvidence] lock. // No errors are expected during normal operations. StoreIfNotExists(lctx lockctx.Proof, conflictingSeals []*flow.IncorporatedResultSeal) error // Retrieve reads conflicting seals from the database. // No error is returned if database record doesn't exist. // No errors are expected during normal operations. Retrieve() ([]*flow.IncorporatedResultSeal, error) }
ExecutionForkEvidence represents persistent storage for execution fork evidence. CAUTION: Not safe for concurrent use by multiple goroutines.
type ExecutionReceipts ¶
type ExecutionReceipts interface { // Store stores an execution receipt. Store(receipt *flow.ExecutionReceipt) error // BatchStore stores an execution receipt inside given batch BatchStore(receipt *flow.ExecutionReceipt, batch ReaderBatchWriter) error // ByID retrieves an execution receipt by its ID. ByID(receiptID flow.Identifier) (*flow.ExecutionReceipt, error) // ByBlockID retrieves all known execution receipts for the given block // (from any Execution Node). // // No errors are expected errors during normal operations. ByBlockID(blockID flow.Identifier) (flow.ExecutionReceiptList, error) }
ExecutionReceipts holds and indexes Execution Receipts. The storage-layer abstraction is from the viewpoint of the network: there are multiple execution nodes which produce several receipts for each block. By default, there is no distinguished execution node (the are all equal).
type ExecutionResults ¶
type ExecutionResults interface { ExecutionResultsReader // Store stores an execution result. Store(result *flow.ExecutionResult) error // BatchStore stores an execution result in a given batch BatchStore(result *flow.ExecutionResult, batch ReaderBatchWriter) error // Index indexes an execution result by block ID. Index(blockID flow.Identifier, resultID flow.Identifier) error // ForceIndex indexes an execution result by block ID overwriting existing database entry ForceIndex(blockID flow.Identifier, resultID flow.Identifier) error // BatchIndex indexes an execution result by block ID in a given batch BatchIndex(blockID flow.Identifier, resultID flow.Identifier, batch ReaderBatchWriter) error // BatchRemoveIndexByBlockID removes blockID-to-executionResultID index entries keyed by blockID in a provided batch. // No errors are expected during normal operation, even if no entries are matched. // If Badger unexpectedly fails to process the request, the error is wrapped in a generic error and returned. BatchRemoveIndexByBlockID(blockID flow.Identifier, batch ReaderBatchWriter) error }
type ExecutionResultsReader ¶ added in v0.40.0
type ExecutionResultsReader interface { // ByID retrieves an execution result by its ID. Returns `ErrNotFound` if `resultID` is unknown. ByID(resultID flow.Identifier) (*flow.ExecutionResult, error) // ByBlockID retrieves an execution result by block ID. ByBlockID(blockID flow.Identifier) (*flow.ExecutionResult, error) }
type Guarantees ¶
type Guarantees interface { // ByID returns the [flow.CollectionGuarantee] by its ID. // Expected errors during normal operations: // - [storage.ErrNotFound] if no collection guarantee with the given Identifier is known. ByID(guaranteeID flow.Identifier) (*flow.CollectionGuarantee, error) // ByCollectionID retrieves the collection guarantee by collection ID. // Expected errors during normal operations: // - [storage.ErrNotFound] if no collection guarantee has been indexed for the given collection ID. ByCollectionID(collID flow.Identifier) (*flow.CollectionGuarantee, error) }
Guarantees represents persistent storage for collection guarantees. Must only be used to store finalized collection guarantees.
type Headers ¶
type Headers interface { // ByBlockID returns the header with the given ID. It is available for finalized blocks and those pending finalization. // Error returns: // - [storage.ErrNotFound] if no block header with the given ID exists ByBlockID(blockID flow.Identifier) (*flow.Header, error) // ByHeight returns the block with the given number. It is only available for finalized blocks. // Error returns: // - [storage.ErrNotFound] if no finalized block is known at the given height ByHeight(height uint64) (*flow.Header, error) // ByView returns the block with the given view. It is only available for certified blocks. // Certified blocks are the blocks that have received QC. Hotstuff guarantees that for each view, // at most one block is certified. Hence, the return value of `ByView` is guaranteed to be unique // even for non-finalized blocks. // // Expected errors during normal operations: // - [storage.ErrNotFound] if no certified block is known at given view. ByView(view uint64) (*flow.Header, error) // Exists returns true if a header with the given ID has been stored. // No errors are expected during normal operation. Exists(blockID flow.Identifier) (bool, error) // BlockIDByHeight returns the block ID that is finalized at the given height. It is an optimized // version of `ByHeight` that skips retrieving the block. Expected errors during normal operations: // - [storage.ErrNotFound] if no finalized block is known at given height BlockIDByHeight(height uint64) (flow.Identifier, error) // ByParentID finds all children for the given parent block. The returned headers // might be unfinalized; if there is more than one, at least one of them has to // be unfinalized. // CAUTION: this method is not backed by a cache and therefore comparatively slow! // // Expected error returns during normal operations: // - [storage.ErrNotFound] if no block with the given parentID is known ByParentID(parentID flow.Identifier) ([]*flow.Header, error) // ProposalByBlockID returns the header with the given ID, along with the corresponding proposer signature. // It is available for finalized blocks and those pending finalization. // Error returns: // - [storage.ErrNotFound] if no block header or proposer signature with the given blockID exists ProposalByBlockID(blockID flow.Identifier) (*flow.ProposalHeader, error) }
Headers represents persistent storage for blocks.
type HeightIndex ¶ added in v0.32.0
type HeightIndex interface { // LatestHeight returns the latest indexed height. LatestHeight() (uint64, error) // FirstHeight at which we started to index. Returns the first indexed height found in the store. FirstHeight() (uint64, error) // SetLatestHeight updates the latest height. // The provided height should either be one higher than the current height or the same to ensure idempotency. // If the height is not within those bounds it will panic! // An error might get returned if there are problems with persisting the height. SetLatestHeight(height uint64) error }
HeightIndex defines methods for indexing height. This interface should normally be composed with some other resource we want to index by height.
type Index ¶
type Index interface { // ByBlockID retrieves the index for a block payload. // Error returns: // - ErrNotFound if no block header with the given ID exists ByBlockID(blockID flow.Identifier) (*flow.Index, error) }
type InvalidDKGStateTransitionError ¶ added in v0.39.0
type InvalidDKGStateTransitionError struct { From flow.DKGState To flow.DKGState // contains filtered or unexported fields }
InvalidDKGStateTransitionError is a sentinel error that is returned in case an invalid state transition is attempted.
func (InvalidDKGStateTransitionError) Error ¶ added in v0.39.0
func (e InvalidDKGStateTransitionError) Error() string
type IterItem ¶ added in v0.38.0
type IterItem interface { // Key returns the key of the current key-value pair // Key is only valid until the Iterator.Next() method is called // If you need to use it outside its validity, please use KeyCopy Key() []byte // KeyCopy returns a copy of the key of the item, writing it to dst slice. // If nil is passed, or capacity of dst isn't sufficient, a new slice would be allocated and // returned. KeyCopy(dst []byte) []byte // Value returns the value of the current key-value pair // The reason it takes a function is to follow badgerDB's API pattern // No errors expected during normal operation Value(func(val []byte) error) error }
IterItem is an interface for iterating over key-value pairs in a storage backend.
type Iterator ¶ added in v0.38.0
type Iterator interface { // First seeks to the smallest key greater than or equal to the given key. // This method must be called because it's necessary for the badger implementation // to move the iteration cursor to the first key in the iteration range. // This method must be called before calling Valid, Next, IterItem, or Close. // return true if the iterator is pointing to a valid key-value pair after calling First, // return false otherwise. First() bool // Valid returns whether the iterator is positioned at a valid key-value pair. // If Valid returns false, the iterator is done and must be closed. Valid() bool // Next advances the iterator to the next key-value pair. // The next key-value pair might be invalid, so you should call Valid() to check. Next() // IterItem returns the current key-value pair, or nil if Valid returns false. // Always to call Valid() before calling IterItem. // Note, the returned item is only valid until the Next() method is called. IterItem() IterItem // Close closes the iterator. Iterator must be closed, otherwise it causes memory leak. // No errors expected during normal operation Close() error }
Iterator is an interface for iterating over key-value pairs in a storage backend. A common usage is:
defer it.Close() for it.First(); it.Valid(); it.Next() { item := it.IterItem() }
type IteratorOption ¶ added in v0.38.0
type IteratorOption struct {
BadgerIterateKeyOnly bool // default false
}
func DefaultIteratorOptions ¶ added in v0.38.0
func DefaultIteratorOptions() IteratorOption
TODO: convert into a var
type LatestPersistedSealedResult ¶ added in v0.43.0
type LatestPersistedSealedResult interface { // Latest returns the ID and height of the latest persisted sealed result. Latest() (flow.Identifier, uint64) // BatchSet updates the latest persisted sealed result in a batch operation // The resultID and height are added to the provided batch, and the local data is updated only after // the batch is successfully committed. // // No errors are expected during normal operation, BatchSet(resultID flow.Identifier, height uint64, batch ReaderBatchWriter) error }
LatestPersistedSealedResult tracks the most recently persisted sealed execution result processed by the Access ingestion engine.
type Ledger ¶
type Ledger interface { EmptyStateCommitment() flow.StateCommitment // Trusted methods (without proof) // Get registers at specific StateCommitment by a list of register ids GetRegisters(registerIDs []flow.RegisterID, stateCommitment flow.StateCommitment) (values []flow.RegisterValue, err error) // Batched atomic updates of a subset of registers at specific state UpdateRegisters(registerIDs []flow.RegisterID, values []flow.RegisterValue, stateCommitment flow.StateCommitment) (newStateCommitment flow.StateCommitment, err error) // Untrusted methods (providing proofs) // Get registers at specific StateCommitment by a list of register ids with proofs GetRegistersWithProof(registerIDs []flow.RegisterID, stateCommitment flow.StateCommitment) (values []flow.RegisterValue, proofs []flow.StorageProof, err error) // Batched atomic updates of a subset of registers at specific state with proofs UpdateRegistersWithProof(registerIDs []flow.RegisterID, values []flow.RegisterValue, stateCommitment flow.StateCommitment) (newStateCommitment flow.StateCommitment, proofs []flow.StorageProof, err error) }
Ledger takes care of storing registers (key, value pairs) providing proof of correctness we aim to store a state of the order of 10^10 registers with up to 1M historic state versions
type LedgerVerifier ¶
type LedgerVerifier interface { // verify if a provided proof for getRegisters is accurate VerifyRegistersProof(registerIDs []flow.RegisterID, stateCommitment flow.StateCommitment, values []flow.RegisterValue, proof []flow.StorageProof) (verified bool, err error) }
LedgerVerifier should be designed as an standalone package to verify proofs of storage
type LightTransactionResults ¶ added in v0.32.0
type LightTransactionResults interface { LightTransactionResultsReader // BatchStore persists and indexes all transaction results (light representation) for the given blockID // as part of the provided batch. The caller must acquire [storage.LockInsertLightTransactionResult] and // hold it until the write batch has been committed. // It returns [storage.ErrAlreadyExists] if light transaction results for the block already exist. BatchStore(lctx lockctx.Proof, rw ReaderBatchWriter, blockID flow.Identifier, transactionResults []flow.LightTransactionResult) error }
LightTransactionResults represents persistent storage for light transaction result
type LightTransactionResultsReader ¶ added in v0.41.0
type LightTransactionResultsReader interface { // ByBlockIDTransactionID returns the transaction result for the given block ID and transaction ID // // Expected error returns during normal operation: // - [storage.ErrNotFound] if light transaction result at given blockID wasn't found. ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.LightTransactionResult, error) // ByBlockIDTransactionIndex returns the transaction result for the given blockID and transaction index // // Expected error returns during normal operation: // - [storage.ErrNotFound] if light transaction result at given blockID and txIndex wasn't found. ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.LightTransactionResult, error) // ByBlockID gets all transaction results for a block, ordered by transaction index // CAUTION: this function returns the empty list in case for block IDs without known results. // No error returns are expected during normal operations. ByBlockID(id flow.Identifier) ([]flow.LightTransactionResult, error) }
LightTransactionResultsReader represents persistent storage read operations for light transaction result
type LockManager ¶ added in v0.43.0
type MyExecutionReceipts ¶ added in v0.15.0
type MyExecutionReceipts interface { // BatchStoreMyReceipt stores blockID-to-my-receipt index entry keyed by blockID in a provided batch. // // If entity fails marshalling, the error is wrapped in a generic error and returned. // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. // // Expected error returns during *normal* operations: // - `storage.ErrDataMismatch` if a *different* receipt has already been indexed for the same block BatchStoreMyReceipt(lctx lockctx.Proof, receipt *flow.ExecutionReceipt, batch ReaderBatchWriter) error // MyReceipt retrieves my receipt for the given block. MyReceipt(blockID flow.Identifier) (*flow.ExecutionReceipt, error) // BatchRemoveIndexByBlockID removes blockID-to-my-execution-receipt index entry keyed by a blockID in a provided batch // No errors are expected during normal operation, even if no entries are matched. // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. BatchRemoveIndexByBlockID(blockID flow.Identifier, batch ReaderBatchWriter) error }
MyExecutionReceipts reuses the storage.ExecutionReceipts API, but doesn't expose them. Instead, it includes the "My" in the method name in order to highlight the notion of "MY execution receipt", from the viewpoint of an individual Execution Node.
type NodeDisallowList ¶ added in v0.41.0
type NodeDisallowList interface { // Store writes the given disallowList to the database. // To avoid legacy entries in the database, we purge // the entire database entry if disallowList is empty. // No errors are expected during normal operations. Store(disallowList map[flow.Identifier]struct{}) error // Retrieve reads the set of disallowed nodes from the database. // No error is returned if no database entry exists. // No errors are expected during normal operations. Retrieve(disallowList *map[flow.Identifier]struct{}) error }
NodeDisallowList represents persistent storage for node disallow list.
type Payloads ¶
type Payloads interface { // ByBlockID returns the payload with the given hash. It is available for // finalized and ambiguous blocks. ByBlockID(blockID flow.Identifier) (*flow.Payload, error) }
Payloads represents persistent storage for payloads.
type ProtocolKVStore ¶ added in v0.33.30
type ProtocolKVStore interface { // BatchStore persists the KV-store snapshot in the database using the given ID as key. // BatchStore is idempotent, i.e. it accepts repeated calls with the same pairs of (stateID, kvStore). // Here, the ID is expected to be a collision-resistant hash of the snapshot (including the // ProtocolStateVersion). // // No error is expected during normal operations. BatchStore(rw ReaderBatchWriter, stateID flow.Identifier, data *flow.PSKeyValueStoreData) error // BatchIndex appends the following operation to the provided write batch: // we extend the map from `blockID` to `stateID`, where `blockID` references the // block that _proposes_ updated key-value store. // BatchIndex is idempotent, i.e. it accepts repeated calls with the same pairs of (blockID , stateID). // Per protocol convention, the block references the `stateID`. As the `blockID` is a collision-resistant hash, // for the same `blockID`, BatchIndex will reject changing the data. // Protocol convention: // - Consider block B, whose ingestion might potentially lead to an updated KV store. For example, // the KV store changes if we seal some execution results emitting specific service events. // - For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store. // - IMPORTANT: The updated state requires confirmation by a QC and will only become active at the // child block, _after_ validating the QC. // // CAUTION: To prevent data corruption, we need to guarantee atomicity of existence-check and the subsequent // database write. Hence, we require the caller to acquire [storage.LockInsertBlock] and hold it until the // database write has been committed. // // Expected error returns during normal operations: // - [storage.ErrAlreadyExists] if a KV store for the given blockID has already been indexed BatchIndex(lctx lockctx.Proof, rw ReaderBatchWriter, blockID flow.Identifier, stateID flow.Identifier) error // ByID retrieves the KV store snapshot with the given ID. // Expected errors during normal operations: // - storage.ErrNotFound if no snapshot with the given Identifier is known. ByID(id flow.Identifier) (*flow.PSKeyValueStoreData, error) // ByBlockID retrieves the kv-store snapshot that the block with the given ID proposes. // CAUTION: this store snapshot requires confirmation by a QC and will only become active at the child block, // _after_ validating the QC. Protocol convention: // - Consider block B, whose ingestion might potentially lead to an updated KV store state. // For example, the state changes if we seal some execution results emitting specific service events. // - For the key `blockID`, we use the identity of block B which _proposes_ this updated KV store. As value, // the hash of the resulting state at the end of processing B is to be used. // - CAUTION: The updated state requires confirmation by a QC and will only become active at the child block, // _after_ validating the QC. // // Expected errors during normal operations: // - storage.ErrNotFound if no snapshot has been indexed for the given block. ByBlockID(blockID flow.Identifier) (*flow.PSKeyValueStoreData, error) }
ProtocolKVStore persists different snapshots of key-value stores [KV-stores]. At this level, the API deals with versioned data blobs, each representing a Snapshot of the Protocol State. The *current* implementation allows to retrieve snapshots from the database (e.g. to answer external API calls) even for legacy protocol states whose versions are not support anymore. However, this _may_ change in the future, where only versioned snapshots can be retrieved that are also supported by the current software. TODO maybe rename to `ProtocolStateSnapshots` (?) because at this low level, we are not exposing the KV-store, it is just an encoded data blob
type QuorumCertificates ¶ added in v0.30.0
type QuorumCertificates interface { // BatchStore stores a Quorum Certificate as part of database batch update. QC is indexed by QC.BlockID. // // Note: For the same block, different QCs can easily be constructed by selecting different sub-sets of the received votes // (provided more than the minimal number of consensus participants voted, which is typically the case). In most cases, it // is only important that a block has been certified, but irrelevant who specifically contributed to the QC. Therefore, we // only store the first QC. // // If *any* quorum certificate for QC.BlockID has already been stored, a `storage.ErrAlreadyExists` is returned (typically benign). BatchStore(lockctx.Proof, ReaderBatchWriter, *flow.QuorumCertificate) error // ByBlockID returns QC that certifies block referred by blockID. // * storage.ErrNotFound if no QC for blockID doesn't exist. ByBlockID(blockID flow.Identifier) (*flow.QuorumCertificate, error) }
QuorumCertificates represents storage for Quorum Certificates. Quorum Certificates are distributed using blocks, where a block incorporates a QC for its parent. When stored, QCs are indexed by the ID of the block they certify (not the block they are included within). In the example below, `QC_1` is indexed by `Block_1.ID()` Block_1 <- Block_2(QC_1)
type Reader ¶ added in v0.38.0
type Reader interface { // Get gets the value for the given key. It returns ErrNotFound if the DB // does not contain the key. // other errors are exceptions // // The caller should not modify the contents of the returned slice, but it is // safe to modify the contents of the `key` argument after Get returns. The // returned slice will remain valid until the returned Closer is closed. // when err == nil, the caller MUST call closer.Close() or a memory leak will occur. Get(key []byte) (value []byte, closer io.Closer, err error) // NewIter returns a new Iterator for the given key prefix range [startPrefix, endPrefix], both inclusive. // We require that startPrefix ≤ endPrefix (otherwise this function errors). // Specifically, all keys that meet ANY of the following conditions are included in the iteration: // - have a prefix equal to startPrefix OR // - have a prefix equal to the endPrefix OR // - have a prefix that is lexicographically between startPrefix and endPrefix NewIter(startPrefix, endPrefix []byte, ops IteratorOption) (Iterator, error) // NewSeeker returns a new Seeker. NewSeeker() Seeker }
type ReaderBatchWriter ¶ added in v0.38.0
type ReaderBatchWriter interface { // GlobalReader returns a database-backed reader which reads the latest committed global database state ("read-committed isolation"). // This reader will not read writes written to ReaderBatchWriter.Writer until the write batch is committed. // This reader may observe different values for the same key on subsequent reads. GlobalReader() Reader // Writer returns a writer associated with a batch of writes. The batch is pending until it is committed. // When we `Write` into the batch, that write operation is added to the pending batch, but not committed. // The commit operation is atomic w.r.t. the batch; either all writes are applied to the database, or no writes are. // Note: // - The writer cannot be used concurrently for writing. Writer() Writer // AddCallback adds a callback to execute after the batch has been flush // regardless the batch update is succeeded or failed. // The error parameter is the error returned by the batch update. AddCallback(func(error)) // SetScopedValue stores the given value by the given key in this batch. // Value can be retrieved by the same key via ScopedValue(key). // // Saving data in ReaderBatchWriter can be useful when the store's operation // is called repeatedly with the same ReaderBatchWriter and different data // (e.g., block ID). Aggregating different data (e.g., block ID) within // the same ReaderBatchWriter can help store's operation to perform batch // operation efficiently on commit success. // // For example, TransactionResults.BatchRemoveByBlockID() receives // ReaderBatchWriter and block ID to remove the given block from the // database and memory cache. TransactionResults.BatchRemoveByBlockID() // can be called repeatedly with the same ReaderBatchWriter and different // block IDs to remove multiple blocks. By saving all removed block IDs // with the same ReaderBatchWriter, TransactionResults.BatchRemoveByBlockID() // retrieves all block IDs and removes cached blocks by locking just once // in OnCommitSucceed() callback, instead of locking TransactionResults cache // for every removed block ID. SetScopedValue(key string, value any) // ScopedValue returns the value associated with this batch for the given key // and true if key exists, or nil and false if key doesn't exist. ScopedValue(key string) (any, bool) }
ReaderBatchWriter is an interface for reading and writing to a storage backend. It is useful for performing a related sequence of reads and writes, after which you would like to modify some non-database state if the sequence completed successfully (via AddCallback). If you are not using AddCallback, avoid using ReaderBatchWriter: use Reader and Writer directly. ReaderBatchWriter is not safe for concurrent use.
type RegisterIndex ¶ added in v0.32.0
type RegisterIndex interface { RegisterIndexReader // Store batch of register entries at the provided block height. // // The provided height must either be one higher than the current height or the same to ensure idempotency, // otherwise and error is returned. If the height is not within those bounds there is either a bug // or state corruption. // // No errors are expected during normal operation. Store(entries flow.RegisterEntries, height uint64) error }
RegisterIndex defines methods for the register index.
type RegisterIndexReader ¶ added in v0.41.0
type RegisterIndexReader interface { // Get register by the register ID at a given block height. // // If the register at the given height was not indexed, returns the highest // height the register was indexed at. // Expected errors: // - storage.ErrHeightNotIndexed if the given height was not indexed yet or lower than the first indexed height. // - storage.ErrNotFound if the given height is indexed, but the register does not exist. Get(ID flow.RegisterID, height uint64) (flow.RegisterValue, error) // LatestHeight returns the latest indexed height. LatestHeight() uint64 // FirstHeight at which we started to index. Returns the first indexed height found in the store. FirstHeight() uint64 }
RegisterIndexReader defines readonly methods for the register index.
type ResultApprovals ¶ added in v0.14.0
type ResultApprovals interface { // StoreMyApproval returns a functor, whose execution // - will store the given ResultApproval // - and index it by result ID and chunk index. // - requires storage.LockIndexResultApproval lock to be held by the caller // The functor's expected error returns during normal operation are: // - `storage.ErrDataMismatch` if a *different* approval for the same key pair (ExecutionResultID, chunk index) is already indexed // // CAUTION: the Flow protocol requires multiple approvals for the same chunk from different verification // nodes. In other words, there are multiple different approvals for the same chunk. Therefore, the index // Executed Chunk ➜ ResultApproval ID (populated here) is *only safe* to be used by Verification Nodes // for tracking their own approvals. // // For the same ExecutionResult, a Verifier will always produce the same approval. Therefore, this operation // is idempotent, i.e. repeated calls with the *same inputs* are equivalent to just calling the method once; // still the method succeeds on each call. However, when attempting to index *different* ResultApproval IDs // for the same key (resultID, chunkIndex) this method returns an exception, as this should never happen for // a correct Verification Node indexing its own approvals. // It returns a functor so that some computation (such as computing approval ID) can be done // before acquiring the lock. StoreMyApproval(approval *flow.ResultApproval) func(lctx lockctx.Proof) error // ByID retrieves a ResultApproval by its ID. // Returns [storage.ErrNotFound] if no Approval with the given ID has been stored. ByID(approvalID flow.Identifier) (*flow.ResultApproval, error) // ByChunk retrieves a ResultApproval by result ID and chunk index. // Returns [storage.ErrNotFound] if no Approval for the given key (resultID, chunkIndex) has been stored. ByChunk(resultID flow.Identifier, chunkIndex uint64) (*flow.ResultApproval, error) }
ResultApprovals implements persistent storage for result approvals. Implementations of this interface must be concurrency safe.
CAUTION suitable only for _Verification Nodes_ for persisting their _own_ approvals!
- In general, the Flow protocol requires multiple approvals for the same chunk from different verification nodes. In other words, there are multiple different approvals for the same chunk.
- Internally, ResultApprovals populates an index from Executed Chunk ➜ ResultApproval. This is *only safe* for Verification Nodes when tracking their own approvals (for the same ExecutionResult, a Verifier will always produce the same approval)
type SafeBeaconKeys ¶ added in v0.23.9
type SafeBeaconKeys interface { // RetrieveMyBeaconPrivateKey retrieves my beacon private key for the given // epoch, only if my key has been confirmed valid and safe for use. // // Returns: // - (key, true, nil) if the key is present and confirmed valid // - (nil, false, nil) if the key has been marked invalid or unavailable // -> no beacon key will ever be available for the epoch in this case // - (nil, false, [storage.ErrNotFound]) if the DKG has not ended // - (nil, false, error) for any unexpected exception RetrieveMyBeaconPrivateKey(epochCounter uint64) (key crypto.PrivateKey, safe bool, err error) }
SafeBeaconKeys is a safe way to access beacon keys.
type ScheduledTransactions ¶ added in v0.43.3
type ScheduledTransactions interface { ScheduledTransactionsReader // BatchIndex indexes the scheduled transaction by its block ID, transaction ID, and scheduled transaction ID. // `txID` is be the TransactionBody.ID of the scheduled transaction. // `scheduledTxID` is the uint64 id field returned by the system smart contract. // Requires the lock: [storage.LockIndexScheduledTransaction] // // Expected error returns during normal operation: // - [storage.ErrAlreadyExists]: if the scheduled transaction is already indexed BatchIndex(lctx lockctx.Proof, blockID flow.Identifier, txID flow.Identifier, scheduledTxID uint64, batch ReaderBatchWriter) error }
ScheduledTransactions represents persistent storage for scheduled transaction indices. Note: no scheduled transactions are stored. Transaction bodies can be generated on-demand using the blueprints package. This interface provides access to indices used to lookup the block ID that the scheduled transaction was executed in, which allows querying its transaction result.
type ScheduledTransactionsReader ¶ added in v0.43.3
type ScheduledTransactionsReader interface { // TransactionIDByID returns the transaction ID of the scheduled transaction by its scheduled transaction ID. // Note: `scheduledTxID` is the uint64 id field returned by the system smart contract. // // Expected error returns during normal operation: // - [storage.ErrNotFound]: if no transaction ID is found for the given scheduled transaction ID TransactionIDByID(scheduledTxID uint64) (flow.Identifier, error) // BlockIDByTransactionID returns the block ID in which the provided system transaction was executed. // `txID` is the TransactionBody.ID of the scheduled transaction. // // Expected error returns during normal operation: // - [storage.ErrNotFound]: if no block ID is found for the given transaction ID BlockIDByTransactionID(txID flow.Identifier) (flow.Identifier, error) }
type Seals ¶
type Seals interface { // Store inserts the seal. Store(seal *flow.Seal) error // ByID retrieves the seal by the collection // fingerprint. ByID(sealID flow.Identifier) (*flow.Seal, error) // HighestInFork retrieves the highest seal that was included in the // fork up to (and including) the given blockID. // This method should return // - a seal for any block known to the node. // - storage.ErrNotFound if blockID is unknown. HighestInFork(blockID flow.Identifier) (*flow.Seal, error) // FinalizedSealForBlock retrieves the finalized seal for the given block ID. // Returns storage.ErrNotFound if blockID is unknown or no _finalized_ seal // is known for the block. FinalizedSealForBlock(blockID flow.Identifier) (*flow.Seal, error) }
Seals represents persistent storage for seals.
type Seeker ¶ added in v0.41.0
type Seeker interface { // SeekLE (seek less than or equal) returns the largest key in lexicographical // order within inclusive range of [startPrefix, key]. // This function returns an error if specified key is less than startPrefix. // This function returns storage.ErrNotFound if a key that matches // the specified criteria is not found. SeekLE(startPrefix, key []byte) ([]byte, error) }
Seeker is an interface for seeking a key within a range.
type ServiceEvents ¶ added in v0.14.0
type ServiceEvents interface { // BatchStore stores service events keyed by a blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. BatchStore(blockID flow.Identifier, events []flow.Event, batch ReaderBatchWriter) error // ByBlockID returns the events for the given block ID ByBlockID(blockID flow.Identifier) ([]flow.Event, error) // BatchRemoveByBlockID removes service events keyed by a blockID in provided batch // No errors are expected during normal operation, even if no entries are matched. // If database unexpectedly fails to process the request, the error is wrapped in a generic error and returned. BatchRemoveByBlockID(blockID flow.Identifier, batch ReaderBatchWriter) error }
type StoredChunkDataPack ¶ added in v0.33.36
type StoredChunkDataPack struct { ChunkID flow.Identifier StartState flow.StateCommitment Proof flow.StorageProof CollectionID flow.Identifier // flow.ZeroID for system chunks ExecutionDataRoot flow.BlockExecutionDataRoot }
StoredChunkDataPack is an in-storage representation of chunk data pack. Its prime difference is instead of an actual collection, it keeps a collection ID hence relying on maintaining the collection on a secondary storage. Note, StoredChunkDataPack.ID() is the same as ChunkDataPack.ID()
func NewStoredChunkDataPack ¶ added in v0.43.3
func NewStoredChunkDataPack( chunkID flow.Identifier, startState flow.StateCommitment, proof flow.StorageProof, collectionID flow.Identifier, executionDataRoot flow.BlockExecutionDataRoot, ) *StoredChunkDataPack
NewStoredChunkDataPack instantiates an "immutable" StoredChunkDataPack. The `collectionID` field is set to flow.ZeroID for system chunks.
func ToStoredChunkDataPack ¶ added in v0.33.36
func ToStoredChunkDataPack(c *flow.ChunkDataPack) *StoredChunkDataPack
ToStoredChunkDataPack converts the given Chunk Data Pack to its reduced representation. (Collections are stored separately and don't need to be included again here).
func ToStoredChunkDataPacks ¶ added in v0.43.3
func ToStoredChunkDataPacks(cs []*flow.ChunkDataPack) []*StoredChunkDataPack
ToStoredChunkDataPacks converts the given Chunk Data Packs to their reduced representation. (Collections are stored separately and don't need to be included again here).
func (StoredChunkDataPack) Equals ¶ added in v0.43.3
func (c StoredChunkDataPack) Equals(other StoredChunkDataPack) (equal bool, diffReason string)
Equals compares two StoredChunkDataPack for equality. It returns (true, "") if they are equal, otherwise (false, reason) where reason is the first found reason for the mismatch.
func (StoredChunkDataPack) ID ¶ added in v0.43.3
func (c StoredChunkDataPack) ID() flow.Identifier
ID returns the identifier of the chunk data pack, which is derived from its contents. Note, StoredChunkDataPack.ID() is the same as ChunkDataPack.ID()
func (*StoredChunkDataPack) IsSystemChunk ¶ added in v0.43.3
func (s *StoredChunkDataPack) IsSystemChunk() bool
IsSystemChunk returns true if this chunk data pack is for a system chunk.
type StoredChunkDataPacks ¶ added in v0.43.3
type StoredChunkDataPacks interface { // StoreChunkDataPacks stores multiple StoredChunkDataPacks cs in a batch. // It returns the chunk data pack IDs // No error returns are expected during normal operation. StoreChunkDataPacks(cs []*StoredChunkDataPack) ([]flow.Identifier, error) // ByID returns the StoredChunkDataPack for the given ID. // It returns [storage.ErrNotFound] if no entry exists for the given ID. ByID(id flow.Identifier) (*StoredChunkDataPack, error) // Remove removes multiple ChunkDataPacks cs keyed by their IDs in a batch. // No error returns are expected during normal operation, even if none of the referenced objects exist in storage. Remove(chunkDataPackIDs []flow.Identifier) error // BatchRemove removes multiple ChunkDataPacks with the given IDs from storage as part of the provided write batch. // No error returns are expected during normal operation, even if no entries are matched. BatchRemove(chunkDataPackIDs []flow.Identifier, rw ReaderBatchWriter) error }
StoredChunkDataPacks represents persistent storage for chunk data packs. It works with the reduced representation `StoredChunkDataPack` for chunk data packs, where instead of the full collection data, only the collection's hash (ID) is contained.
type Transaction
deprecated
added in
v0.15.1
type TransactionResultErrorMessages ¶ added in v0.38.0
type TransactionResultErrorMessages interface { TransactionResultErrorMessagesReader // Store persists and indexes all transaction result error messages for the given blockID. The caller must // acquire [storage.LockInsertTransactionResultErrMessage] and hold it until the write batch has been committed. // It returns [storage.ErrAlreadyExists] if tx result error messages for the block already exist. Store(lctx lockctx.Proof, blockID flow.Identifier, transactionResultErrorMessages []flow.TransactionResultErrorMessage) error // BatchStore persists and indexes all transaction result error messages for the given blockID as part // of the provided batch. The caller must acquire [storage.LockInsertTransactionResultErrMessage] and // hold it until the write batch has been committed. // It returns [storage.ErrAlreadyExists] if tx result error messages for the block already exist. BatchStore(lctx lockctx.Proof, rw ReaderBatchWriter, blockID flow.Identifier, transactionResultErrorMessages []flow.TransactionResultErrorMessage) error }
TransactionResultErrorMessages represents persistent storage for transaction result error messages
type TransactionResultErrorMessagesReader ¶ added in v0.41.0
type TransactionResultErrorMessagesReader interface { // Exists returns true if transaction result error messages for the given ID have been stored. // // Note that transaction error messages are auxiliary data provided by the Execution Nodes on a goodwill basis and // not protected by the protocol. Execution Error messages might be non-deterministic, i.e. potentially different // for different execution nodes. // // No errors are expected during normal operation. Exists(blockID flow.Identifier) (bool, error) // ByBlockIDTransactionID returns the transaction result error message for the given block ID and transaction ID. // // Note that transaction error messages are auxiliary data provided by the Execution Nodes on a goodwill basis and // not protected by the protocol. Execution Error messages might be non-deterministic, i.e. potentially different // for different execution nodes. // // Expected errors during normal operation: // - [storage.ErrNotFound] if no transaction error message is known at given block and transaction id. ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.TransactionResultErrorMessage, error) // ByBlockIDTransactionIndex returns the transaction result error message for the given blockID and transaction index. // // Note that transaction error messages are auxiliary data provided by the Execution Nodes on a goodwill basis and // not protected by the protocol. Execution Error messages might be non-deterministic, i.e. potentially different // for different execution nodes. // // Expected errors during normal operation: // - [storage.ErrNotFound] if no transaction error message is known at given block and transaction index. ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.TransactionResultErrorMessage, error) // ByBlockID gets all transaction result error messages for a block, ordered by transaction index. // CAUTION: This method will return an empty slice both if the block is not indexed yet and if the block does not have any errors. // // Note that transaction error messages are auxiliary data provided by the Execution Nodes on a goodwill basis and // not protected by the protocol. Execution Error messages might be non-deterministic, i.e. potentially different // for different execution nodes. // // No errors are expected during normal operations. ByBlockID(id flow.Identifier) ([]flow.TransactionResultErrorMessage, error) }
TransactionResultErrorMessagesReader represents persistent storage read operations for transaction result error messages
type TransactionResults ¶
type TransactionResults interface { TransactionResultsReader // BatchStore inserts a batch of transaction result into a batch BatchStore(blockID flow.Identifier, transactionResults []flow.TransactionResult, batch ReaderBatchWriter) error // RemoveByBlockID removes all transaction results for a block BatchRemoveByBlockID(id flow.Identifier, batch ReaderBatchWriter) error }
TransactionResults represents persistent storage for transaction result
type TransactionResultsReader ¶ added in v0.40.0
type TransactionResultsReader interface { // ByBlockIDTransactionID returns the transaction result for the given block ID and transaction ID ByBlockIDTransactionID(blockID flow.Identifier, transactionID flow.Identifier) (*flow.TransactionResult, error) // ByBlockIDTransactionIndex returns the transaction result for the given blockID and transaction index ByBlockIDTransactionIndex(blockID flow.Identifier, txIndex uint32) (*flow.TransactionResult, error) // ByBlockID gets all transaction results for a block, ordered by transaction index ByBlockID(id flow.Identifier) ([]flow.TransactionResult, error) }
type Transactions ¶
type Transactions interface { TransactionsReader // Store inserts the transaction, keyed by fingerprint. Duplicate transaction insertion is ignored // No errors are expected during normal operation. Store(tx *flow.TransactionBody) error // BatchStore stores transaction within a batch operation. // No errors are expected during normal operation. BatchStore(tx *flow.TransactionBody, batch ReaderBatchWriter) error }
Transactions represents persistent storage for transactions.
type TransactionsReader ¶ added in v0.41.0
type TransactionsReader interface { // ByID returns the transaction for the given fingerprint. // Expected errors during normal operation: // - `storage.ErrNotFound` if transaction is not found. ByID(txID flow.Identifier) (*flow.TransactionBody, error) }
TransactionsReader represents persistent storage read operations for transactions.
type VersionBeacons ¶ added in v0.31.0
type VersionBeacons interface { // Highest finds the highest flow.SealedVersionBeacon but no higher than // belowOrEqualTo // Returns nil if no version beacon has been sealed below or equal to the // input height. Highest(belowOrEqualTo uint64) (*flow.SealedVersionBeacon, error) }
VersionBeacons represents persistent storage for Version Beacons.
type Writer ¶ added in v0.38.0
type Writer interface { // Set sets the value for the given key. It overwrites any previous value // for that key; a DB is not a multi-map. // // It is safe to modify the contents of the arguments after Set returns. // No errors expected during normal operation Set(k, v []byte) error // Delete deletes the value for the given key. Deletes are blind all will // succeed even if the given key does not exist. // // It is safe to modify the contents of the arguments after Delete returns. // No errors expected during normal operation Delete(key []byte) error // DeleteByRange removes all keys with a prefix that falls within the // range [start, end], both inclusive. // No errors expected during normal operation DeleteByRange(globalReader Reader, startPrefix, endPrefix []byte) error }
Writer is an interface for batch writing to a storage backend. One Writer instance cannot be used concurrently by multiple goroutines.
Source Files
¶
- all.go
- approvals.go
- batch.go
- blocks.go
- chunk_data_packs.go
- chunk_data_packs_stored.go
- chunks_queue.go
- cluster_blocks.go
- cluster_payloads.go
- collections.go
- commits.go
- computation_result.go
- consumer_progress.go
- dkg.go
- epoch_commits.go
- epoch_protocol_state.go
- epoch_setups.go
- errors.go
- events.go
- execution_fork_evidence.go
- guarantees.go
- headers.go
- height.go
- index.go
- latest_persisted_sealed_result.go
- ledger.go
- light_transaction_results.go
- locks.go
- node_disallow_list.go
- operations.go
- payloads.go
- protocol_kv_store.go
- qcs.go
- receipts.go
- registers.go
- results.go
- scheduled_transactions.go
- seals.go
- transaction_result_error_messages.go
- transaction_results.go
- transactions.go
- version_beacon.go