Documentation
¶
Index ¶
- type AddressBalanceLoader
- func (l *AddressBalanceLoader) Clear(key string)
- func (l *AddressBalanceLoader) Load(key string) (int64, error)
- func (l *AddressBalanceLoader) LoadAll(keys []string) ([]int64, []error)
- func (l *AddressBalanceLoader) LoadAllThunk(keys []string) func() ([]int64, []error)
- func (l *AddressBalanceLoader) LoadThunk(key string) func() (int64, error)
- func (l *AddressBalanceLoader) Prime(key string, value int64) bool
- type AddressBalanceLoaderConfig
- type BlockLoader
- func (l *BlockLoader) Clear(key string)
- func (l *BlockLoader) Load(key string) ([]*model.Block, error)
- func (l *BlockLoader) LoadAll(keys []string) ([][]*model.Block, []error)
- func (l *BlockLoader) LoadAllThunk(keys []string) func() ([][]*model.Block, []error)
- func (l *BlockLoader) LoadThunk(key string) func() ([]*model.Block, error)
- func (l *BlockLoader) Prime(key string, value []*model.Block) bool
- type BlockLoaderConfig
- type PostLoader
- func (l *PostLoader) Clear(key string)
- func (l *PostLoader) Load(key string) (*model.Post, error)
- func (l *PostLoader) LoadAll(keys []string) ([]*model.Post, []error)
- func (l *PostLoader) LoadAllThunk(keys []string) func() ([]*model.Post, []error)
- func (l *PostLoader) LoadThunk(key string) func() (*model.Post, error)
- func (l *PostLoader) Prime(key string, value *model.Post) bool
- type PostLoaderConfig
- type ProfileLoader
- func (l *ProfileLoader) Clear(key string)
- func (l *ProfileLoader) Load(key string) (*model.Profile, error)
- func (l *ProfileLoader) LoadAll(keys []string) ([]*model.Profile, []error)
- func (l *ProfileLoader) LoadAllThunk(keys []string) func() ([]*model.Profile, []error)
- func (l *ProfileLoader) LoadThunk(key string) func() (*model.Profile, error)
- func (l *ProfileLoader) Prime(key string, value *model.Profile) bool
- type ProfileLoaderConfig
- type TxLostLoader
- func (l *TxLostLoader) Clear(key string)
- func (l *TxLostLoader) Load(key string) (*model.TxLost, error)
- func (l *TxLostLoader) LoadAll(keys []string) ([]*model.TxLost, []error)
- func (l *TxLostLoader) LoadAllThunk(keys []string) func() ([]*model.TxLost, []error)
- func (l *TxLostLoader) LoadThunk(key string) func() (*model.TxLost, error)
- func (l *TxLostLoader) Prime(key string, value *model.TxLost) bool
- type TxLostLoaderConfig
- type TxOutputLoader
- func (l *TxOutputLoader) Clear(key model.HashIndex)
- func (l *TxOutputLoader) Load(key model.HashIndex) (*model.TxOutput, error)
- func (l *TxOutputLoader) LoadAll(keys []model.HashIndex) ([]*model.TxOutput, []error)
- func (l *TxOutputLoader) LoadAllThunk(keys []model.HashIndex) func() ([]*model.TxOutput, []error)
- func (l *TxOutputLoader) LoadThunk(key model.HashIndex) func() (*model.TxOutput, error)
- func (l *TxOutputLoader) Prime(key model.HashIndex, value *model.TxOutput) bool
- type TxOutputLoaderConfig
- type TxOutputSpendLoader
- func (l *TxOutputSpendLoader) Clear(key model.HashIndex)
- func (l *TxOutputSpendLoader) Load(key model.HashIndex) ([]*model.TxInput, error)
- func (l *TxOutputSpendLoader) LoadAll(keys []model.HashIndex) ([][]*model.TxInput, []error)
- func (l *TxOutputSpendLoader) LoadAllThunk(keys []model.HashIndex) func() ([][]*model.TxInput, []error)
- func (l *TxOutputSpendLoader) LoadThunk(key model.HashIndex) func() ([]*model.TxInput, error)
- func (l *TxOutputSpendLoader) Prime(key model.HashIndex, value []*model.TxInput) bool
- type TxOutputSpendLoaderConfig
- type TxRawLoader
- func (l *TxRawLoader) Clear(key string)
- func (l *TxRawLoader) Load(key string) (*model.Tx, error)
- func (l *TxRawLoader) LoadAll(keys []string) ([]*model.Tx, []error)
- func (l *TxRawLoader) LoadAllThunk(keys []string) func() ([]*model.Tx, []error)
- func (l *TxRawLoader) LoadThunk(key string) func() (*model.Tx, error)
- func (l *TxRawLoader) Prime(key string, value *model.Tx) bool
- type TxRawLoaderConfig
- type TxSeenLoader
- func (l *TxSeenLoader) Clear(key string)
- func (l *TxSeenLoader) Load(key string) (*model.Date, error)
- func (l *TxSeenLoader) LoadAll(keys []string) ([]*model.Date, []error)
- func (l *TxSeenLoader) LoadAllThunk(keys []string) func() ([]*model.Date, []error)
- func (l *TxSeenLoader) LoadThunk(key string) func() (*model.Date, error)
- func (l *TxSeenLoader) Prime(key string, value *model.Date) bool
- type TxSeenLoaderConfig
- type TxSuspectLoader
- func (l *TxSuspectLoader) Clear(key string)
- func (l *TxSuspectLoader) Load(key string) (*model.TxSuspect, error)
- func (l *TxSuspectLoader) LoadAll(keys []string) ([]*model.TxSuspect, []error)
- func (l *TxSuspectLoader) LoadAllThunk(keys []string) func() ([]*model.TxSuspect, []error)
- func (l *TxSuspectLoader) LoadThunk(key string) func() (*model.TxSuspect, error)
- func (l *TxSuspectLoader) Prime(key string, value *model.TxSuspect) bool
- type TxSuspectLoaderConfig
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type AddressBalanceLoader ¶
type AddressBalanceLoader struct {
// contains filtered or unexported fields
}
AddressBalanceLoader batches and caches requests
func NewAddressBalanceLoader ¶
func NewAddressBalanceLoader(config AddressBalanceLoaderConfig) *AddressBalanceLoader
NewAddressBalanceLoader creates a new AddressBalanceLoader given a fetch, wait, and maxBatch
func (*AddressBalanceLoader) Clear ¶
func (l *AddressBalanceLoader) Clear(key string)
Clear the value at key from the cache, if it exists
func (*AddressBalanceLoader) Load ¶
func (l *AddressBalanceLoader) Load(key string) (int64, error)
Load a int64 by key, batching and caching will be applied automatically
func (*AddressBalanceLoader) LoadAll ¶
func (l *AddressBalanceLoader) LoadAll(keys []string) ([]int64, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*AddressBalanceLoader) LoadAllThunk ¶
func (l *AddressBalanceLoader) LoadAllThunk(keys []string) func() ([]int64, []error)
LoadAllThunk returns a function that when called will block waiting for a int64s. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*AddressBalanceLoader) LoadThunk ¶
func (l *AddressBalanceLoader) LoadThunk(key string) func() (int64, error)
LoadThunk returns a function that when called will block waiting for a int64. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*AddressBalanceLoader) Prime ¶
func (l *AddressBalanceLoader) Prime(key string, value int64) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type AddressBalanceLoaderConfig ¶
type AddressBalanceLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([]int64, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
AddressBalanceLoaderConfig captures the config to create a new AddressBalanceLoader
type BlockLoader ¶
type BlockLoader struct {
// contains filtered or unexported fields
}
BlockLoader batches and caches requests
func NewBlockLoader ¶
func NewBlockLoader(config BlockLoaderConfig) *BlockLoader
NewBlockLoader creates a new BlockLoader given a fetch, wait, and maxBatch
func (*BlockLoader) Clear ¶
func (l *BlockLoader) Clear(key string)
Clear the value at key from the cache, if it exists
func (*BlockLoader) Load ¶
func (l *BlockLoader) Load(key string) ([]*model.Block, error)
Load a Block by key, batching and caching will be applied automatically
func (*BlockLoader) LoadAll ¶
func (l *BlockLoader) LoadAll(keys []string) ([][]*model.Block, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*BlockLoader) LoadAllThunk ¶
func (l *BlockLoader) LoadAllThunk(keys []string) func() ([][]*model.Block, []error)
LoadAllThunk returns a function that when called will block waiting for a Blocks. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*BlockLoader) LoadThunk ¶
func (l *BlockLoader) LoadThunk(key string) func() ([]*model.Block, error)
LoadThunk returns a function that when called will block waiting for a Block. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*BlockLoader) Prime ¶
func (l *BlockLoader) Prime(key string, value []*model.Block) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type BlockLoaderConfig ¶
type BlockLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([][]*model.Block, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
BlockLoaderConfig captures the config to create a new BlockLoader
type PostLoader ¶
type PostLoader struct {
// contains filtered or unexported fields
}
PostLoader batches and caches requests
func NewPostLoader ¶
func NewPostLoader(config PostLoaderConfig) *PostLoader
NewPostLoader creates a new PostLoader given a fetch, wait, and maxBatch
func (*PostLoader) Clear ¶
func (l *PostLoader) Clear(key string)
Clear the value at key from the cache, if it exists
func (*PostLoader) Load ¶
func (l *PostLoader) Load(key string) (*model.Post, error)
Load a Post by key, batching and caching will be applied automatically
func (*PostLoader) LoadAll ¶
func (l *PostLoader) LoadAll(keys []string) ([]*model.Post, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*PostLoader) LoadAllThunk ¶
func (l *PostLoader) LoadAllThunk(keys []string) func() ([]*model.Post, []error)
LoadAllThunk returns a function that when called will block waiting for a Posts. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*PostLoader) LoadThunk ¶
func (l *PostLoader) LoadThunk(key string) func() (*model.Post, error)
LoadThunk returns a function that when called will block waiting for a Post. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*PostLoader) Prime ¶
func (l *PostLoader) Prime(key string, value *model.Post) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type PostLoaderConfig ¶
type PostLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([]*model.Post, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
PostLoaderConfig captures the config to create a new PostLoader
type ProfileLoader ¶
type ProfileLoader struct {
// contains filtered or unexported fields
}
ProfileLoader batches and caches requests
func NewProfileLoader ¶
func NewProfileLoader(config ProfileLoaderConfig) *ProfileLoader
NewProfileLoader creates a new ProfileLoader given a fetch, wait, and maxBatch
func (*ProfileLoader) Clear ¶
func (l *ProfileLoader) Clear(key string)
Clear the value at key from the cache, if it exists
func (*ProfileLoader) Load ¶
func (l *ProfileLoader) Load(key string) (*model.Profile, error)
Load a Profile by key, batching and caching will be applied automatically
func (*ProfileLoader) LoadAll ¶
func (l *ProfileLoader) LoadAll(keys []string) ([]*model.Profile, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*ProfileLoader) LoadAllThunk ¶
func (l *ProfileLoader) LoadAllThunk(keys []string) func() ([]*model.Profile, []error)
LoadAllThunk returns a function that when called will block waiting for a Profiles. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*ProfileLoader) LoadThunk ¶
func (l *ProfileLoader) LoadThunk(key string) func() (*model.Profile, error)
LoadThunk returns a function that when called will block waiting for a Profile. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*ProfileLoader) Prime ¶
func (l *ProfileLoader) Prime(key string, value *model.Profile) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type ProfileLoaderConfig ¶
type ProfileLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([]*model.Profile, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
ProfileLoaderConfig captures the config to create a new ProfileLoader
type TxLostLoader ¶
type TxLostLoader struct {
// contains filtered or unexported fields
}
TxLostLoader batches and caches requests
func NewTxLostLoader ¶
func NewTxLostLoader(config TxLostLoaderConfig) *TxLostLoader
NewTxLostLoader creates a new TxLostLoader given a fetch, wait, and maxBatch
func (*TxLostLoader) Clear ¶
func (l *TxLostLoader) Clear(key string)
Clear the value at key from the cache, if it exists
func (*TxLostLoader) Load ¶
func (l *TxLostLoader) Load(key string) (*model.TxLost, error)
Load a TxLost by key, batching and caching will be applied automatically
func (*TxLostLoader) LoadAll ¶
func (l *TxLostLoader) LoadAll(keys []string) ([]*model.TxLost, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*TxLostLoader) LoadAllThunk ¶
func (l *TxLostLoader) LoadAllThunk(keys []string) func() ([]*model.TxLost, []error)
LoadAllThunk returns a function that when called will block waiting for a TxLosts. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*TxLostLoader) LoadThunk ¶
func (l *TxLostLoader) LoadThunk(key string) func() (*model.TxLost, error)
LoadThunk returns a function that when called will block waiting for a TxLost. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*TxLostLoader) Prime ¶
func (l *TxLostLoader) Prime(key string, value *model.TxLost) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type TxLostLoaderConfig ¶
type TxLostLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([]*model.TxLost, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
TxLostLoaderConfig captures the config to create a new TxLostLoader
type TxOutputLoader ¶
type TxOutputLoader struct {
// contains filtered or unexported fields
}
TxOutputLoader batches and caches requests
func NewTxOutputLoader ¶
func NewTxOutputLoader(config TxOutputLoaderConfig) *TxOutputLoader
NewTxOutputLoader creates a new TxOutputLoader given a fetch, wait, and maxBatch
func (*TxOutputLoader) Clear ¶
func (l *TxOutputLoader) Clear(key model.HashIndex)
Clear the value at key from the cache, if it exists
func (*TxOutputLoader) Load ¶
Load a TxOutput by key, batching and caching will be applied automatically
func (*TxOutputLoader) LoadAll ¶
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*TxOutputLoader) LoadAllThunk ¶
LoadAllThunk returns a function that when called will block waiting for a TxOutputs. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*TxOutputLoader) LoadThunk ¶
LoadThunk returns a function that when called will block waiting for a TxOutput. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
type TxOutputLoaderConfig ¶
type TxOutputLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.HashIndex) ([]*model.TxOutput, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
TxOutputLoaderConfig captures the config to create a new TxOutputLoader
type TxOutputSpendLoader ¶
type TxOutputSpendLoader struct {
// contains filtered or unexported fields
}
TxOutputSpendLoader batches and caches requests
func NewTxOutputSpendLoader ¶
func NewTxOutputSpendLoader(config TxOutputSpendLoaderConfig) *TxOutputSpendLoader
NewTxOutputSpendLoader creates a new TxOutputSpendLoader given a fetch, wait, and maxBatch
func (*TxOutputSpendLoader) Clear ¶
func (l *TxOutputSpendLoader) Clear(key model.HashIndex)
Clear the value at key from the cache, if it exists
func (*TxOutputSpendLoader) Load ¶
Load a TxInput by key, batching and caching will be applied automatically
func (*TxOutputSpendLoader) LoadAll ¶
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*TxOutputSpendLoader) LoadAllThunk ¶
func (l *TxOutputSpendLoader) LoadAllThunk(keys []model.HashIndex) func() ([][]*model.TxInput, []error)
LoadAllThunk returns a function that when called will block waiting for a TxInputs. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*TxOutputSpendLoader) LoadThunk ¶
LoadThunk returns a function that when called will block waiting for a TxInput. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
type TxOutputSpendLoaderConfig ¶
type TxOutputSpendLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.HashIndex) ([][]*model.TxInput, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
TxOutputSpendLoaderConfig captures the config to create a new TxOutputSpendLoader
type TxRawLoader ¶
type TxRawLoader struct {
// contains filtered or unexported fields
}
TxRawLoader batches and caches requests
func NewTxRawLoader ¶
func NewTxRawLoader(config TxRawLoaderConfig) *TxRawLoader
NewTxRawLoader creates a new TxRawLoader given a fetch, wait, and maxBatch
func (*TxRawLoader) Clear ¶
func (l *TxRawLoader) Clear(key string)
Clear the value at key from the cache, if it exists
func (*TxRawLoader) Load ¶
func (l *TxRawLoader) Load(key string) (*model.Tx, error)
Load a Tx by key, batching and caching will be applied automatically
func (*TxRawLoader) LoadAll ¶
func (l *TxRawLoader) LoadAll(keys []string) ([]*model.Tx, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*TxRawLoader) LoadAllThunk ¶
func (l *TxRawLoader) LoadAllThunk(keys []string) func() ([]*model.Tx, []error)
LoadAllThunk returns a function that when called will block waiting for a Txs. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*TxRawLoader) LoadThunk ¶
func (l *TxRawLoader) LoadThunk(key string) func() (*model.Tx, error)
LoadThunk returns a function that when called will block waiting for a Tx. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*TxRawLoader) Prime ¶
func (l *TxRawLoader) Prime(key string, value *model.Tx) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type TxRawLoaderConfig ¶
type TxRawLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([]*model.Tx, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
TxRawLoaderConfig captures the config to create a new TxRawLoader
type TxSeenLoader ¶
type TxSeenLoader struct {
// contains filtered or unexported fields
}
TxSeenLoader batches and caches requests
func NewTxSeenLoader ¶
func NewTxSeenLoader(config TxSeenLoaderConfig) *TxSeenLoader
NewTxSeenLoader creates a new TxSeenLoader given a fetch, wait, and maxBatch
func (*TxSeenLoader) Clear ¶
func (l *TxSeenLoader) Clear(key string)
Clear the value at key from the cache, if it exists
func (*TxSeenLoader) Load ¶
func (l *TxSeenLoader) Load(key string) (*model.Date, error)
Load a Date by key, batching and caching will be applied automatically
func (*TxSeenLoader) LoadAll ¶
func (l *TxSeenLoader) LoadAll(keys []string) ([]*model.Date, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*TxSeenLoader) LoadAllThunk ¶
func (l *TxSeenLoader) LoadAllThunk(keys []string) func() ([]*model.Date, []error)
LoadAllThunk returns a function that when called will block waiting for a Dates. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*TxSeenLoader) LoadThunk ¶
func (l *TxSeenLoader) LoadThunk(key string) func() (*model.Date, error)
LoadThunk returns a function that when called will block waiting for a Date. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*TxSeenLoader) Prime ¶
func (l *TxSeenLoader) Prime(key string, value *model.Date) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type TxSeenLoaderConfig ¶
type TxSeenLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([]*model.Date, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
TxSeenLoaderConfig captures the config to create a new TxSeenLoader
type TxSuspectLoader ¶
type TxSuspectLoader struct {
// contains filtered or unexported fields
}
TxSuspectLoader batches and caches requests
func NewTxSuspectLoader ¶
func NewTxSuspectLoader(config TxSuspectLoaderConfig) *TxSuspectLoader
NewTxSuspectLoader creates a new TxSuspectLoader given a fetch, wait, and maxBatch
func (*TxSuspectLoader) Clear ¶
func (l *TxSuspectLoader) Clear(key string)
Clear the value at key from the cache, if it exists
func (*TxSuspectLoader) Load ¶
func (l *TxSuspectLoader) Load(key string) (*model.TxSuspect, error)
Load a TxSuspect by key, batching and caching will be applied automatically
func (*TxSuspectLoader) LoadAll ¶
func (l *TxSuspectLoader) LoadAll(keys []string) ([]*model.TxSuspect, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*TxSuspectLoader) LoadAllThunk ¶
func (l *TxSuspectLoader) LoadAllThunk(keys []string) func() ([]*model.TxSuspect, []error)
LoadAllThunk returns a function that when called will block waiting for a TxSuspects. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*TxSuspectLoader) LoadThunk ¶
func (l *TxSuspectLoader) LoadThunk(key string) func() (*model.TxSuspect, error)
LoadThunk returns a function that when called will block waiting for a TxSuspect. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*TxSuspectLoader) Prime ¶
func (l *TxSuspectLoader) Prime(key string, value *model.TxSuspect) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type TxSuspectLoaderConfig ¶
type TxSuspectLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []string) ([]*model.TxSuspect, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
TxSuspectLoaderConfig captures the config to create a new TxSuspectLoader