Documentation
¶
Index ¶
- type AgencyLoader
- func (l *AgencyLoader) Clear(key int)
- func (l *AgencyLoader) Load(key int) (*model.Agency, error)
- func (l *AgencyLoader) LoadAll(keys []int) ([]*model.Agency, []error)
- func (l *AgencyLoader) LoadAllThunk(keys []int) func() ([]*model.Agency, []error)
- func (l *AgencyLoader) LoadThunk(key int) func() (*model.Agency, error)
- func (l *AgencyLoader) Prime(key int, value *model.Agency) bool
- type AgencyLoaderConfig
- type AgencyPlaceWhereLoader
- func (l *AgencyPlaceWhereLoader) Clear(key model.AgencyPlaceParam)
- func (l *AgencyPlaceWhereLoader) Load(key model.AgencyPlaceParam) ([]*model.AgencyPlace, error)
- func (l *AgencyPlaceWhereLoader) LoadAll(keys []model.AgencyPlaceParam) ([][]*model.AgencyPlace, []error)
- func (l *AgencyPlaceWhereLoader) LoadAllThunk(keys []model.AgencyPlaceParam) func() ([][]*model.AgencyPlace, []error)
- func (l *AgencyPlaceWhereLoader) LoadThunk(key model.AgencyPlaceParam) func() ([]*model.AgencyPlace, error)
- func (l *AgencyPlaceWhereLoader) Prime(key model.AgencyPlaceParam, value []*model.AgencyPlace) bool
- type AgencyPlaceWhereLoaderConfig
- type AgencyWhereLoader
- func (l *AgencyWhereLoader) Clear(key model.AgencyParam)
- func (l *AgencyWhereLoader) Load(key model.AgencyParam) ([]*model.Agency, error)
- func (l *AgencyWhereLoader) LoadAll(keys []model.AgencyParam) ([][]*model.Agency, []error)
- func (l *AgencyWhereLoader) LoadAllThunk(keys []model.AgencyParam) func() ([][]*model.Agency, []error)
- func (l *AgencyWhereLoader) LoadThunk(key model.AgencyParam) func() ([]*model.Agency, error)
- func (l *AgencyWhereLoader) Prime(key model.AgencyParam, value []*model.Agency) bool
- type AgencyWhereLoaderConfig
- type CalendarDateWhereLoader
- func (l *CalendarDateWhereLoader) Clear(key model.CalendarDateParam)
- func (l *CalendarDateWhereLoader) Load(key model.CalendarDateParam) ([]*model.CalendarDate, error)
- func (l *CalendarDateWhereLoader) LoadAll(keys []model.CalendarDateParam) ([][]*model.CalendarDate, []error)
- func (l *CalendarDateWhereLoader) LoadAllThunk(keys []model.CalendarDateParam) func() ([][]*model.CalendarDate, []error)
- func (l *CalendarDateWhereLoader) LoadThunk(key model.CalendarDateParam) func() ([]*model.CalendarDate, error)
- func (l *CalendarDateWhereLoader) Prime(key model.CalendarDateParam, value []*model.CalendarDate) bool
- type CalendarDateWhereLoaderConfig
- type CalendarLoader
- func (l *CalendarLoader) Clear(key int)
- func (l *CalendarLoader) Load(key int) (*model.Calendar, error)
- func (l *CalendarLoader) LoadAll(keys []int) ([]*model.Calendar, []error)
- func (l *CalendarLoader) LoadAllThunk(keys []int) func() ([]*model.Calendar, []error)
- func (l *CalendarLoader) LoadThunk(key int) func() (*model.Calendar, error)
- func (l *CalendarLoader) Prime(key int, value *model.Calendar) bool
- type CalendarLoaderConfig
- type CensusGeographyWhereLoader
- func (l *CensusGeographyWhereLoader) Clear(key model.CensusGeographyParam)
- func (l *CensusGeographyWhereLoader) Load(key model.CensusGeographyParam) ([]*model.CensusGeography, error)
- func (l *CensusGeographyWhereLoader) LoadAll(keys []model.CensusGeographyParam) ([][]*model.CensusGeography, []error)
- func (l *CensusGeographyWhereLoader) LoadAllThunk(keys []model.CensusGeographyParam) func() ([][]*model.CensusGeography, []error)
- func (l *CensusGeographyWhereLoader) LoadThunk(key model.CensusGeographyParam) func() ([]*model.CensusGeography, error)
- func (l *CensusGeographyWhereLoader) Prime(key model.CensusGeographyParam, value []*model.CensusGeography) bool
- type CensusGeographyWhereLoaderConfig
- type CensusTableLoader
- func (l *CensusTableLoader) Clear(key int)
- func (l *CensusTableLoader) Load(key int) (*model.CensusTable, error)
- func (l *CensusTableLoader) LoadAll(keys []int) ([]*model.CensusTable, []error)
- func (l *CensusTableLoader) LoadAllThunk(keys []int) func() ([]*model.CensusTable, []error)
- func (l *CensusTableLoader) LoadThunk(key int) func() (*model.CensusTable, error)
- func (l *CensusTableLoader) Prime(key int, value *model.CensusTable) bool
- type CensusTableLoaderConfig
- type CensusValueWhereLoader
- func (l *CensusValueWhereLoader) Clear(key model.CensusValueParam)
- func (l *CensusValueWhereLoader) Load(key model.CensusValueParam) ([]*model.CensusValue, error)
- func (l *CensusValueWhereLoader) LoadAll(keys []model.CensusValueParam) ([][]*model.CensusValue, []error)
- func (l *CensusValueWhereLoader) LoadAllThunk(keys []model.CensusValueParam) func() ([][]*model.CensusValue, []error)
- func (l *CensusValueWhereLoader) LoadThunk(key model.CensusValueParam) func() ([]*model.CensusValue, error)
- func (l *CensusValueWhereLoader) Prime(key model.CensusValueParam, value []*model.CensusValue) bool
- type CensusValueWhereLoaderConfig
- type FeedInfoWhereLoader
- func (l *FeedInfoWhereLoader) Clear(key model.FeedInfoParam)
- func (l *FeedInfoWhereLoader) Load(key model.FeedInfoParam) ([]*model.FeedInfo, error)
- func (l *FeedInfoWhereLoader) LoadAll(keys []model.FeedInfoParam) ([][]*model.FeedInfo, []error)
- func (l *FeedInfoWhereLoader) LoadAllThunk(keys []model.FeedInfoParam) func() ([][]*model.FeedInfo, []error)
- func (l *FeedInfoWhereLoader) LoadThunk(key model.FeedInfoParam) func() ([]*model.FeedInfo, error)
- func (l *FeedInfoWhereLoader) Prime(key model.FeedInfoParam, value []*model.FeedInfo) bool
- type FeedInfoWhereLoaderConfig
- type FeedLoader
- func (l *FeedLoader) Clear(key int)
- func (l *FeedLoader) Load(key int) (*model.Feed, error)
- func (l *FeedLoader) LoadAll(keys []int) ([]*model.Feed, []error)
- func (l *FeedLoader) LoadAllThunk(keys []int) func() ([]*model.Feed, []error)
- func (l *FeedLoader) LoadThunk(key int) func() (*model.Feed, error)
- func (l *FeedLoader) Prime(key int, value *model.Feed) bool
- type FeedLoaderConfig
- type FeedStateLoader
- func (l *FeedStateLoader) Clear(key int)
- func (l *FeedStateLoader) Load(key int) (*model.FeedState, error)
- func (l *FeedStateLoader) LoadAll(keys []int) ([]*model.FeedState, []error)
- func (l *FeedStateLoader) LoadAllThunk(keys []int) func() ([]*model.FeedState, []error)
- func (l *FeedStateLoader) LoadThunk(key int) func() (*model.FeedState, error)
- func (l *FeedStateLoader) Prime(key int, value *model.FeedState) bool
- type FeedStateLoaderConfig
- type FeedVersionFileInfoWhereLoader
- func (l *FeedVersionFileInfoWhereLoader) Clear(key model.FeedVersionFileInfoParam)
- func (l *FeedVersionFileInfoWhereLoader) Load(key model.FeedVersionFileInfoParam) ([]*model.FeedVersionFileInfo, error)
- func (l *FeedVersionFileInfoWhereLoader) LoadAll(keys []model.FeedVersionFileInfoParam) ([][]*model.FeedVersionFileInfo, []error)
- func (l *FeedVersionFileInfoWhereLoader) LoadAllThunk(keys []model.FeedVersionFileInfoParam) func() ([][]*model.FeedVersionFileInfo, []error)
- func (l *FeedVersionFileInfoWhereLoader) LoadThunk(key model.FeedVersionFileInfoParam) func() ([]*model.FeedVersionFileInfo, error)
- func (l *FeedVersionFileInfoWhereLoader) Prime(key model.FeedVersionFileInfoParam, value []*model.FeedVersionFileInfo) bool
- type FeedVersionFileInfoWhereLoaderConfig
- type FeedVersionGtfsImportLoader
- func (l *FeedVersionGtfsImportLoader) Clear(key int)
- func (l *FeedVersionGtfsImportLoader) Load(key int) (*model.FeedVersionGtfsImport, error)
- func (l *FeedVersionGtfsImportLoader) LoadAll(keys []int) ([]*model.FeedVersionGtfsImport, []error)
- func (l *FeedVersionGtfsImportLoader) LoadAllThunk(keys []int) func() ([]*model.FeedVersionGtfsImport, []error)
- func (l *FeedVersionGtfsImportLoader) LoadThunk(key int) func() (*model.FeedVersionGtfsImport, error)
- func (l *FeedVersionGtfsImportLoader) Prime(key int, value *model.FeedVersionGtfsImport) bool
- type FeedVersionGtfsImportLoaderConfig
- type FeedVersionLoader
- func (l *FeedVersionLoader) Clear(key int)
- func (l *FeedVersionLoader) Load(key int) (*model.FeedVersion, error)
- func (l *FeedVersionLoader) LoadAll(keys []int) ([]*model.FeedVersion, []error)
- func (l *FeedVersionLoader) LoadAllThunk(keys []int) func() ([]*model.FeedVersion, []error)
- func (l *FeedVersionLoader) LoadThunk(key int) func() (*model.FeedVersion, error)
- func (l *FeedVersionLoader) Prime(key int, value *model.FeedVersion) bool
- type FeedVersionLoaderConfig
- type FeedVersionServiceLevelWhereLoader
- func (l *FeedVersionServiceLevelWhereLoader) Clear(key model.FeedVersionServiceLevelParam)
- func (l *FeedVersionServiceLevelWhereLoader) Load(key model.FeedVersionServiceLevelParam) ([]*model.FeedVersionServiceLevel, error)
- func (l *FeedVersionServiceLevelWhereLoader) LoadAll(keys []model.FeedVersionServiceLevelParam) ([][]*model.FeedVersionServiceLevel, []error)
- func (l *FeedVersionServiceLevelWhereLoader) LoadAllThunk(keys []model.FeedVersionServiceLevelParam) func() ([][]*model.FeedVersionServiceLevel, []error)
- func (l *FeedVersionServiceLevelWhereLoader) LoadThunk(key model.FeedVersionServiceLevelParam) func() ([]*model.FeedVersionServiceLevel, error)
- func (l *FeedVersionServiceLevelWhereLoader) Prime(key model.FeedVersionServiceLevelParam, value []*model.FeedVersionServiceLevel) bool
- type FeedVersionServiceLevelWhereLoaderConfig
- type FeedVersionWhereLoader
- func (l *FeedVersionWhereLoader) Clear(key model.FeedVersionParam)
- func (l *FeedVersionWhereLoader) Load(key model.FeedVersionParam) ([]*model.FeedVersion, error)
- func (l *FeedVersionWhereLoader) LoadAll(keys []model.FeedVersionParam) ([][]*model.FeedVersion, []error)
- func (l *FeedVersionWhereLoader) LoadAllThunk(keys []model.FeedVersionParam) func() ([][]*model.FeedVersion, []error)
- func (l *FeedVersionWhereLoader) LoadThunk(key model.FeedVersionParam) func() ([]*model.FeedVersion, error)
- func (l *FeedVersionWhereLoader) Prime(key model.FeedVersionParam, value []*model.FeedVersion) bool
- type FeedVersionWhereLoaderConfig
- type FrequencyWhereLoader
- func (l *FrequencyWhereLoader) Clear(key model.FrequencyParam)
- func (l *FrequencyWhereLoader) Load(key model.FrequencyParam) ([]*model.Frequency, error)
- func (l *FrequencyWhereLoader) LoadAll(keys []model.FrequencyParam) ([][]*model.Frequency, []error)
- func (l *FrequencyWhereLoader) LoadAllThunk(keys []model.FrequencyParam) func() ([][]*model.Frequency, []error)
- func (l *FrequencyWhereLoader) LoadThunk(key model.FrequencyParam) func() ([]*model.Frequency, error)
- func (l *FrequencyWhereLoader) Prime(key model.FrequencyParam, value []*model.Frequency) bool
- type FrequencyWhereLoaderConfig
- type LevelLoader
- func (l *LevelLoader) Clear(key int)
- func (l *LevelLoader) Load(key int) (*model.Level, error)
- func (l *LevelLoader) LoadAll(keys []int) ([]*model.Level, []error)
- func (l *LevelLoader) LoadAllThunk(keys []int) func() ([]*model.Level, []error)
- func (l *LevelLoader) LoadThunk(key int) func() (*model.Level, error)
- func (l *LevelLoader) Prime(key int, value *model.Level) bool
- type LevelLoaderConfig
- type OperatorWhereLoader
- func (l *OperatorWhereLoader) Clear(key model.OperatorParam)
- func (l *OperatorWhereLoader) Load(key model.OperatorParam) ([]*model.Operator, error)
- func (l *OperatorWhereLoader) LoadAll(keys []model.OperatorParam) ([][]*model.Operator, []error)
- func (l *OperatorWhereLoader) LoadAllThunk(keys []model.OperatorParam) func() ([][]*model.Operator, []error)
- func (l *OperatorWhereLoader) LoadThunk(key model.OperatorParam) func() ([]*model.Operator, error)
- func (l *OperatorWhereLoader) Prime(key model.OperatorParam, value []*model.Operator) bool
- type OperatorWhereLoaderConfig
- type PathwayWhereLoader
- func (l *PathwayWhereLoader) Clear(key model.PathwayParam)
- func (l *PathwayWhereLoader) Load(key model.PathwayParam) ([]*model.Pathway, error)
- func (l *PathwayWhereLoader) LoadAll(keys []model.PathwayParam) ([][]*model.Pathway, []error)
- func (l *PathwayWhereLoader) LoadAllThunk(keys []model.PathwayParam) func() ([][]*model.Pathway, []error)
- func (l *PathwayWhereLoader) LoadThunk(key model.PathwayParam) func() ([]*model.Pathway, error)
- func (l *PathwayWhereLoader) Prime(key model.PathwayParam, value []*model.Pathway) bool
- type PathwayWhereLoaderConfig
- type RouteGeometryWhereLoader
- func (l *RouteGeometryWhereLoader) Clear(key model.RouteGeometryParam)
- func (l *RouteGeometryWhereLoader) Load(key model.RouteGeometryParam) ([]*model.RouteGeometry, error)
- func (l *RouteGeometryWhereLoader) LoadAll(keys []model.RouteGeometryParam) ([][]*model.RouteGeometry, []error)
- func (l *RouteGeometryWhereLoader) LoadAllThunk(keys []model.RouteGeometryParam) func() ([][]*model.RouteGeometry, []error)
- func (l *RouteGeometryWhereLoader) LoadThunk(key model.RouteGeometryParam) func() ([]*model.RouteGeometry, error)
- func (l *RouteGeometryWhereLoader) Prime(key model.RouteGeometryParam, value []*model.RouteGeometry) bool
- type RouteGeometryWhereLoaderConfig
- type RouteHeadwayLoader
- func (l *RouteHeadwayLoader) Clear(key int)
- func (l *RouteHeadwayLoader) Load(key int) (*model.RouteHeadway, error)
- func (l *RouteHeadwayLoader) LoadAll(keys []int) ([]*model.RouteHeadway, []error)
- func (l *RouteHeadwayLoader) LoadAllThunk(keys []int) func() ([]*model.RouteHeadway, []error)
- func (l *RouteHeadwayLoader) LoadThunk(key int) func() (*model.RouteHeadway, error)
- func (l *RouteHeadwayLoader) Prime(key int, value *model.RouteHeadway) bool
- type RouteHeadwayLoaderConfig
- type RouteHeadwayWhereLoader
- func (l *RouteHeadwayWhereLoader) Clear(key model.RouteHeadwayParam)
- func (l *RouteHeadwayWhereLoader) Load(key model.RouteHeadwayParam) ([]*model.RouteHeadway, error)
- func (l *RouteHeadwayWhereLoader) LoadAll(keys []model.RouteHeadwayParam) ([][]*model.RouteHeadway, []error)
- func (l *RouteHeadwayWhereLoader) LoadAllThunk(keys []model.RouteHeadwayParam) func() ([][]*model.RouteHeadway, []error)
- func (l *RouteHeadwayWhereLoader) LoadThunk(key model.RouteHeadwayParam) func() ([]*model.RouteHeadway, error)
- func (l *RouteHeadwayWhereLoader) Prime(key model.RouteHeadwayParam, value []*model.RouteHeadway) bool
- type RouteHeadwayWhereLoaderConfig
- type RouteLoader
- func (l *RouteLoader) Clear(key int)
- func (l *RouteLoader) Load(key int) (*model.Route, error)
- func (l *RouteLoader) LoadAll(keys []int) ([]*model.Route, []error)
- func (l *RouteLoader) LoadAllThunk(keys []int) func() ([]*model.Route, []error)
- func (l *RouteLoader) LoadThunk(key int) func() (*model.Route, error)
- func (l *RouteLoader) Prime(key int, value *model.Route) bool
- type RouteLoaderConfig
- type RouteStopWhereLoader
- func (l *RouteStopWhereLoader) Clear(key model.RouteStopParam)
- func (l *RouteStopWhereLoader) Load(key model.RouteStopParam) ([]*model.RouteStop, error)
- func (l *RouteStopWhereLoader) LoadAll(keys []model.RouteStopParam) ([][]*model.RouteStop, []error)
- func (l *RouteStopWhereLoader) LoadAllThunk(keys []model.RouteStopParam) func() ([][]*model.RouteStop, []error)
- func (l *RouteStopWhereLoader) LoadThunk(key model.RouteStopParam) func() ([]*model.RouteStop, error)
- func (l *RouteStopWhereLoader) Prime(key model.RouteStopParam, value []*model.RouteStop) bool
- type RouteStopWhereLoaderConfig
- type RouteWhereLoader
- func (l *RouteWhereLoader) Clear(key model.RouteParam)
- func (l *RouteWhereLoader) Load(key model.RouteParam) ([]*model.Route, error)
- func (l *RouteWhereLoader) LoadAll(keys []model.RouteParam) ([][]*model.Route, []error)
- func (l *RouteWhereLoader) LoadAllThunk(keys []model.RouteParam) func() ([][]*model.Route, []error)
- func (l *RouteWhereLoader) LoadThunk(key model.RouteParam) func() ([]*model.Route, error)
- func (l *RouteWhereLoader) Prime(key model.RouteParam, value []*model.Route) bool
- type RouteWhereLoaderConfig
- type ShapeLoader
- func (l *ShapeLoader) Clear(key int)
- func (l *ShapeLoader) Load(key int) (*model.Shape, error)
- func (l *ShapeLoader) LoadAll(keys []int) ([]*model.Shape, []error)
- func (l *ShapeLoader) LoadAllThunk(keys []int) func() ([]*model.Shape, []error)
- func (l *ShapeLoader) LoadThunk(key int) func() (*model.Shape, error)
- func (l *ShapeLoader) Prime(key int, value *model.Shape) bool
- type ShapeLoaderConfig
- type StopLoader
- func (l *StopLoader) Clear(key int)
- func (l *StopLoader) Load(key int) (*model.Stop, error)
- func (l *StopLoader) LoadAll(keys []int) ([]*model.Stop, []error)
- func (l *StopLoader) LoadAllThunk(keys []int) func() ([]*model.Stop, []error)
- func (l *StopLoader) LoadThunk(key int) func() (*model.Stop, error)
- func (l *StopLoader) Prime(key int, value *model.Stop) bool
- type StopLoaderConfig
- type StopTimeWhereLoader
- func (l *StopTimeWhereLoader) Clear(key model.StopTimeParam)
- func (l *StopTimeWhereLoader) Load(key model.StopTimeParam) ([]*model.StopTime, error)
- func (l *StopTimeWhereLoader) LoadAll(keys []model.StopTimeParam) ([][]*model.StopTime, []error)
- func (l *StopTimeWhereLoader) LoadAllThunk(keys []model.StopTimeParam) func() ([][]*model.StopTime, []error)
- func (l *StopTimeWhereLoader) LoadThunk(key model.StopTimeParam) func() ([]*model.StopTime, error)
- func (l *StopTimeWhereLoader) Prime(key model.StopTimeParam, value []*model.StopTime) bool
- type StopTimeWhereLoaderConfig
- type StopWhereLoader
- func (l *StopWhereLoader) Clear(key model.StopParam)
- func (l *StopWhereLoader) Load(key model.StopParam) ([]*model.Stop, error)
- func (l *StopWhereLoader) LoadAll(keys []model.StopParam) ([][]*model.Stop, []error)
- func (l *StopWhereLoader) LoadAllThunk(keys []model.StopParam) func() ([][]*model.Stop, []error)
- func (l *StopWhereLoader) LoadThunk(key model.StopParam) func() ([]*model.Stop, error)
- func (l *StopWhereLoader) Prime(key model.StopParam, value []*model.Stop) bool
- type StopWhereLoaderConfig
- type TripLoader
- func (l *TripLoader) Clear(key int)
- func (l *TripLoader) Load(key int) (*model.Trip, error)
- func (l *TripLoader) LoadAll(keys []int) ([]*model.Trip, []error)
- func (l *TripLoader) LoadAllThunk(keys []int) func() ([]*model.Trip, []error)
- func (l *TripLoader) LoadThunk(key int) func() (*model.Trip, error)
- func (l *TripLoader) Prime(key int, value *model.Trip) bool
- type TripLoaderConfig
- type TripWhereLoader
- func (l *TripWhereLoader) Clear(key model.TripParam)
- func (l *TripWhereLoader) Load(key model.TripParam) ([]*model.Trip, error)
- func (l *TripWhereLoader) LoadAll(keys []model.TripParam) ([][]*model.Trip, []error)
- func (l *TripWhereLoader) LoadAllThunk(keys []model.TripParam) func() ([][]*model.Trip, []error)
- func (l *TripWhereLoader) LoadThunk(key model.TripParam) func() ([]*model.Trip, error)
- func (l *TripWhereLoader) Prime(key model.TripParam, value []*model.Trip) bool
- type TripWhereLoaderConfig
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type AgencyLoader ¶
type AgencyLoader struct {
// contains filtered or unexported fields
}
AgencyLoader batches and caches requests
func NewAgencyLoader ¶
func NewAgencyLoader(config AgencyLoaderConfig) *AgencyLoader
NewAgencyLoader creates a new AgencyLoader given a fetch, wait, and maxBatch
func (*AgencyLoader) Clear ¶
func (l *AgencyLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*AgencyLoader) Load ¶
func (l *AgencyLoader) Load(key int) (*model.Agency, error)
Load a Agency by key, batching and caching will be applied automatically
func (*AgencyLoader) LoadAll ¶
func (l *AgencyLoader) LoadAll(keys []int) ([]*model.Agency, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*AgencyLoader) LoadAllThunk ¶
func (l *AgencyLoader) LoadAllThunk(keys []int) func() ([]*model.Agency, []error)
LoadAllThunk returns a function that when called will block waiting for a Agencys. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*AgencyLoader) LoadThunk ¶
func (l *AgencyLoader) LoadThunk(key int) func() (*model.Agency, error)
LoadThunk returns a function that when called will block waiting for a Agency. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*AgencyLoader) Prime ¶
func (l *AgencyLoader) Prime(key int, value *model.Agency) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type AgencyLoaderConfig ¶
type AgencyLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Agency, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
AgencyLoaderConfig captures the config to create a new AgencyLoader
type AgencyPlaceWhereLoader ¶
type AgencyPlaceWhereLoader struct {
// contains filtered or unexported fields
}
AgencyPlaceWhereLoader batches and caches requests
func NewAgencyPlaceWhereLoader ¶
func NewAgencyPlaceWhereLoader(config AgencyPlaceWhereLoaderConfig) *AgencyPlaceWhereLoader
NewAgencyPlaceWhereLoader creates a new AgencyPlaceWhereLoader given a fetch, wait, and maxBatch
func (*AgencyPlaceWhereLoader) Clear ¶
func (l *AgencyPlaceWhereLoader) Clear(key model.AgencyPlaceParam)
Clear the value at key from the cache, if it exists
func (*AgencyPlaceWhereLoader) Load ¶
func (l *AgencyPlaceWhereLoader) Load(key model.AgencyPlaceParam) ([]*model.AgencyPlace, error)
Load a AgencyPlace by key, batching and caching will be applied automatically
func (*AgencyPlaceWhereLoader) LoadAll ¶
func (l *AgencyPlaceWhereLoader) LoadAll(keys []model.AgencyPlaceParam) ([][]*model.AgencyPlace, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*AgencyPlaceWhereLoader) LoadAllThunk ¶
func (l *AgencyPlaceWhereLoader) LoadAllThunk(keys []model.AgencyPlaceParam) func() ([][]*model.AgencyPlace, []error)
LoadAllThunk returns a function that when called will block waiting for a AgencyPlaces. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*AgencyPlaceWhereLoader) LoadThunk ¶
func (l *AgencyPlaceWhereLoader) LoadThunk(key model.AgencyPlaceParam) func() ([]*model.AgencyPlace, error)
LoadThunk returns a function that when called will block waiting for a AgencyPlace. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*AgencyPlaceWhereLoader) Prime ¶
func (l *AgencyPlaceWhereLoader) Prime(key model.AgencyPlaceParam, value []*model.AgencyPlace) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type AgencyPlaceWhereLoaderConfig ¶
type AgencyPlaceWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.AgencyPlaceParam) ([][]*model.AgencyPlace, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
AgencyPlaceWhereLoaderConfig captures the config to create a new AgencyPlaceWhereLoader
type AgencyWhereLoader ¶
type AgencyWhereLoader struct {
// contains filtered or unexported fields
}
AgencyWhereLoader batches and caches requests
func NewAgencyWhereLoader ¶
func NewAgencyWhereLoader(config AgencyWhereLoaderConfig) *AgencyWhereLoader
NewAgencyWhereLoader creates a new AgencyWhereLoader given a fetch, wait, and maxBatch
func (*AgencyWhereLoader) Clear ¶
func (l *AgencyWhereLoader) Clear(key model.AgencyParam)
Clear the value at key from the cache, if it exists
func (*AgencyWhereLoader) Load ¶
func (l *AgencyWhereLoader) Load(key model.AgencyParam) ([]*model.Agency, error)
Load a Agency by key, batching and caching will be applied automatically
func (*AgencyWhereLoader) LoadAll ¶
func (l *AgencyWhereLoader) LoadAll(keys []model.AgencyParam) ([][]*model.Agency, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*AgencyWhereLoader) LoadAllThunk ¶
func (l *AgencyWhereLoader) LoadAllThunk(keys []model.AgencyParam) func() ([][]*model.Agency, []error)
LoadAllThunk returns a function that when called will block waiting for a Agencys. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*AgencyWhereLoader) LoadThunk ¶
func (l *AgencyWhereLoader) LoadThunk(key model.AgencyParam) func() ([]*model.Agency, error)
LoadThunk returns a function that when called will block waiting for a Agency. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*AgencyWhereLoader) Prime ¶
func (l *AgencyWhereLoader) Prime(key model.AgencyParam, value []*model.Agency) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type AgencyWhereLoaderConfig ¶
type AgencyWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.AgencyParam) ([][]*model.Agency, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
AgencyWhereLoaderConfig captures the config to create a new AgencyWhereLoader
type CalendarDateWhereLoader ¶
type CalendarDateWhereLoader struct {
// contains filtered or unexported fields
}
CalendarDateWhereLoader batches and caches requests
func NewCalendarDateWhereLoader ¶
func NewCalendarDateWhereLoader(config CalendarDateWhereLoaderConfig) *CalendarDateWhereLoader
NewCalendarDateWhereLoader creates a new CalendarDateWhereLoader given a fetch, wait, and maxBatch
func (*CalendarDateWhereLoader) Clear ¶
func (l *CalendarDateWhereLoader) Clear(key model.CalendarDateParam)
Clear the value at key from the cache, if it exists
func (*CalendarDateWhereLoader) Load ¶
func (l *CalendarDateWhereLoader) Load(key model.CalendarDateParam) ([]*model.CalendarDate, error)
Load a CalendarDate by key, batching and caching will be applied automatically
func (*CalendarDateWhereLoader) LoadAll ¶
func (l *CalendarDateWhereLoader) LoadAll(keys []model.CalendarDateParam) ([][]*model.CalendarDate, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*CalendarDateWhereLoader) LoadAllThunk ¶
func (l *CalendarDateWhereLoader) LoadAllThunk(keys []model.CalendarDateParam) func() ([][]*model.CalendarDate, []error)
LoadAllThunk returns a function that when called will block waiting for a CalendarDates. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*CalendarDateWhereLoader) LoadThunk ¶
func (l *CalendarDateWhereLoader) LoadThunk(key model.CalendarDateParam) func() ([]*model.CalendarDate, error)
LoadThunk returns a function that when called will block waiting for a CalendarDate. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*CalendarDateWhereLoader) Prime ¶
func (l *CalendarDateWhereLoader) Prime(key model.CalendarDateParam, value []*model.CalendarDate) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type CalendarDateWhereLoaderConfig ¶
type CalendarDateWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.CalendarDateParam) ([][]*model.CalendarDate, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
CalendarDateWhereLoaderConfig captures the config to create a new CalendarDateWhereLoader
type CalendarLoader ¶
type CalendarLoader struct {
// contains filtered or unexported fields
}
CalendarLoader batches and caches requests
func NewCalendarLoader ¶
func NewCalendarLoader(config CalendarLoaderConfig) *CalendarLoader
NewCalendarLoader creates a new CalendarLoader given a fetch, wait, and maxBatch
func (*CalendarLoader) Clear ¶
func (l *CalendarLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*CalendarLoader) Load ¶
func (l *CalendarLoader) Load(key int) (*model.Calendar, error)
Load a Calendar by key, batching and caching will be applied automatically
func (*CalendarLoader) LoadAll ¶
func (l *CalendarLoader) LoadAll(keys []int) ([]*model.Calendar, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*CalendarLoader) LoadAllThunk ¶
func (l *CalendarLoader) LoadAllThunk(keys []int) func() ([]*model.Calendar, []error)
LoadAllThunk returns a function that when called will block waiting for a Calendars. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*CalendarLoader) LoadThunk ¶
func (l *CalendarLoader) LoadThunk(key int) func() (*model.Calendar, error)
LoadThunk returns a function that when called will block waiting for a Calendar. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*CalendarLoader) Prime ¶
func (l *CalendarLoader) Prime(key int, value *model.Calendar) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type CalendarLoaderConfig ¶
type CalendarLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Calendar, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
CalendarLoaderConfig captures the config to create a new CalendarLoader
type CensusGeographyWhereLoader ¶
type CensusGeographyWhereLoader struct {
// contains filtered or unexported fields
}
CensusGeographyWhereLoader batches and caches requests
func NewCensusGeographyWhereLoader ¶
func NewCensusGeographyWhereLoader(config CensusGeographyWhereLoaderConfig) *CensusGeographyWhereLoader
NewCensusGeographyWhereLoader creates a new CensusGeographyWhereLoader given a fetch, wait, and maxBatch
func (*CensusGeographyWhereLoader) Clear ¶
func (l *CensusGeographyWhereLoader) Clear(key model.CensusGeographyParam)
Clear the value at key from the cache, if it exists
func (*CensusGeographyWhereLoader) Load ¶
func (l *CensusGeographyWhereLoader) Load(key model.CensusGeographyParam) ([]*model.CensusGeography, error)
Load a CensusGeography by key, batching and caching will be applied automatically
func (*CensusGeographyWhereLoader) LoadAll ¶
func (l *CensusGeographyWhereLoader) LoadAll(keys []model.CensusGeographyParam) ([][]*model.CensusGeography, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*CensusGeographyWhereLoader) LoadAllThunk ¶
func (l *CensusGeographyWhereLoader) LoadAllThunk(keys []model.CensusGeographyParam) func() ([][]*model.CensusGeography, []error)
LoadAllThunk returns a function that when called will block waiting for a CensusGeographys. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*CensusGeographyWhereLoader) LoadThunk ¶
func (l *CensusGeographyWhereLoader) LoadThunk(key model.CensusGeographyParam) func() ([]*model.CensusGeography, error)
LoadThunk returns a function that when called will block waiting for a CensusGeography. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*CensusGeographyWhereLoader) Prime ¶
func (l *CensusGeographyWhereLoader) Prime(key model.CensusGeographyParam, value []*model.CensusGeography) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type CensusGeographyWhereLoaderConfig ¶
type CensusGeographyWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.CensusGeographyParam) ([][]*model.CensusGeography, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
CensusGeographyWhereLoaderConfig captures the config to create a new CensusGeographyWhereLoader
type CensusTableLoader ¶
type CensusTableLoader struct {
// contains filtered or unexported fields
}
CensusTableLoader batches and caches requests
func NewCensusTableLoader ¶
func NewCensusTableLoader(config CensusTableLoaderConfig) *CensusTableLoader
NewCensusTableLoader creates a new CensusTableLoader given a fetch, wait, and maxBatch
func (*CensusTableLoader) Clear ¶
func (l *CensusTableLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*CensusTableLoader) Load ¶
func (l *CensusTableLoader) Load(key int) (*model.CensusTable, error)
Load a CensusTable by key, batching and caching will be applied automatically
func (*CensusTableLoader) LoadAll ¶
func (l *CensusTableLoader) LoadAll(keys []int) ([]*model.CensusTable, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*CensusTableLoader) LoadAllThunk ¶
func (l *CensusTableLoader) LoadAllThunk(keys []int) func() ([]*model.CensusTable, []error)
LoadAllThunk returns a function that when called will block waiting for a CensusTables. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*CensusTableLoader) LoadThunk ¶
func (l *CensusTableLoader) LoadThunk(key int) func() (*model.CensusTable, error)
LoadThunk returns a function that when called will block waiting for a CensusTable. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*CensusTableLoader) Prime ¶
func (l *CensusTableLoader) Prime(key int, value *model.CensusTable) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type CensusTableLoaderConfig ¶
type CensusTableLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.CensusTable, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
CensusTableLoaderConfig captures the config to create a new CensusTableLoader
type CensusValueWhereLoader ¶
type CensusValueWhereLoader struct {
// contains filtered or unexported fields
}
CensusValueWhereLoader batches and caches requests
func NewCensusValueWhereLoader ¶
func NewCensusValueWhereLoader(config CensusValueWhereLoaderConfig) *CensusValueWhereLoader
NewCensusValueWhereLoader creates a new CensusValueWhereLoader given a fetch, wait, and maxBatch
func (*CensusValueWhereLoader) Clear ¶
func (l *CensusValueWhereLoader) Clear(key model.CensusValueParam)
Clear the value at key from the cache, if it exists
func (*CensusValueWhereLoader) Load ¶
func (l *CensusValueWhereLoader) Load(key model.CensusValueParam) ([]*model.CensusValue, error)
Load a CensusValue by key, batching and caching will be applied automatically
func (*CensusValueWhereLoader) LoadAll ¶
func (l *CensusValueWhereLoader) LoadAll(keys []model.CensusValueParam) ([][]*model.CensusValue, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*CensusValueWhereLoader) LoadAllThunk ¶
func (l *CensusValueWhereLoader) LoadAllThunk(keys []model.CensusValueParam) func() ([][]*model.CensusValue, []error)
LoadAllThunk returns a function that when called will block waiting for a CensusValues. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*CensusValueWhereLoader) LoadThunk ¶
func (l *CensusValueWhereLoader) LoadThunk(key model.CensusValueParam) func() ([]*model.CensusValue, error)
LoadThunk returns a function that when called will block waiting for a CensusValue. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*CensusValueWhereLoader) Prime ¶
func (l *CensusValueWhereLoader) Prime(key model.CensusValueParam, value []*model.CensusValue) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type CensusValueWhereLoaderConfig ¶
type CensusValueWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.CensusValueParam) ([][]*model.CensusValue, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
CensusValueWhereLoaderConfig captures the config to create a new CensusValueWhereLoader
type FeedInfoWhereLoader ¶
type FeedInfoWhereLoader struct {
// contains filtered or unexported fields
}
FeedInfoWhereLoader batches and caches requests
func NewFeedInfoWhereLoader ¶
func NewFeedInfoWhereLoader(config FeedInfoWhereLoaderConfig) *FeedInfoWhereLoader
NewFeedInfoWhereLoader creates a new FeedInfoWhereLoader given a fetch, wait, and maxBatch
func (*FeedInfoWhereLoader) Clear ¶
func (l *FeedInfoWhereLoader) Clear(key model.FeedInfoParam)
Clear the value at key from the cache, if it exists
func (*FeedInfoWhereLoader) Load ¶
func (l *FeedInfoWhereLoader) Load(key model.FeedInfoParam) ([]*model.FeedInfo, error)
Load a FeedInfo by key, batching and caching will be applied automatically
func (*FeedInfoWhereLoader) LoadAll ¶
func (l *FeedInfoWhereLoader) LoadAll(keys []model.FeedInfoParam) ([][]*model.FeedInfo, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*FeedInfoWhereLoader) LoadAllThunk ¶
func (l *FeedInfoWhereLoader) LoadAllThunk(keys []model.FeedInfoParam) func() ([][]*model.FeedInfo, []error)
LoadAllThunk returns a function that when called will block waiting for a FeedInfos. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FeedInfoWhereLoader) LoadThunk ¶
func (l *FeedInfoWhereLoader) LoadThunk(key model.FeedInfoParam) func() ([]*model.FeedInfo, error)
LoadThunk returns a function that when called will block waiting for a FeedInfo. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FeedInfoWhereLoader) Prime ¶
func (l *FeedInfoWhereLoader) Prime(key model.FeedInfoParam, value []*model.FeedInfo) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type FeedInfoWhereLoaderConfig ¶
type FeedInfoWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.FeedInfoParam) ([][]*model.FeedInfo, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
FeedInfoWhereLoaderConfig captures the config to create a new FeedInfoWhereLoader
type FeedLoader ¶
type FeedLoader struct {
// contains filtered or unexported fields
}
FeedLoader batches and caches requests
func NewFeedLoader ¶
func NewFeedLoader(config FeedLoaderConfig) *FeedLoader
NewFeedLoader creates a new FeedLoader given a fetch, wait, and maxBatch
func (*FeedLoader) Clear ¶
func (l *FeedLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*FeedLoader) Load ¶
func (l *FeedLoader) Load(key int) (*model.Feed, error)
Load a Feed by key, batching and caching will be applied automatically
func (*FeedLoader) LoadAll ¶
func (l *FeedLoader) LoadAll(keys []int) ([]*model.Feed, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*FeedLoader) LoadAllThunk ¶
func (l *FeedLoader) LoadAllThunk(keys []int) func() ([]*model.Feed, []error)
LoadAllThunk returns a function that when called will block waiting for a Feeds. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FeedLoader) LoadThunk ¶
func (l *FeedLoader) LoadThunk(key int) func() (*model.Feed, error)
LoadThunk returns a function that when called will block waiting for a Feed. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
type FeedLoaderConfig ¶
type FeedLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Feed, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
FeedLoaderConfig captures the config to create a new FeedLoader
type FeedStateLoader ¶
type FeedStateLoader struct {
// contains filtered or unexported fields
}
FeedStateLoader batches and caches requests
func NewFeedStateLoader ¶
func NewFeedStateLoader(config FeedStateLoaderConfig) *FeedStateLoader
NewFeedStateLoader creates a new FeedStateLoader given a fetch, wait, and maxBatch
func (*FeedStateLoader) Clear ¶
func (l *FeedStateLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*FeedStateLoader) Load ¶
func (l *FeedStateLoader) Load(key int) (*model.FeedState, error)
Load a FeedState by key, batching and caching will be applied automatically
func (*FeedStateLoader) LoadAll ¶
func (l *FeedStateLoader) LoadAll(keys []int) ([]*model.FeedState, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*FeedStateLoader) LoadAllThunk ¶
func (l *FeedStateLoader) LoadAllThunk(keys []int) func() ([]*model.FeedState, []error)
LoadAllThunk returns a function that when called will block waiting for a FeedStates. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FeedStateLoader) LoadThunk ¶
func (l *FeedStateLoader) LoadThunk(key int) func() (*model.FeedState, error)
LoadThunk returns a function that when called will block waiting for a FeedState. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FeedStateLoader) Prime ¶
func (l *FeedStateLoader) Prime(key int, value *model.FeedState) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type FeedStateLoaderConfig ¶
type FeedStateLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.FeedState, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
FeedStateLoaderConfig captures the config to create a new FeedStateLoader
type FeedVersionFileInfoWhereLoader ¶
type FeedVersionFileInfoWhereLoader struct {
// contains filtered or unexported fields
}
FeedVersionFileInfoWhereLoader batches and caches requests
func NewFeedVersionFileInfoWhereLoader ¶
func NewFeedVersionFileInfoWhereLoader(config FeedVersionFileInfoWhereLoaderConfig) *FeedVersionFileInfoWhereLoader
NewFeedVersionFileInfoWhereLoader creates a new FeedVersionFileInfoWhereLoader given a fetch, wait, and maxBatch
func (*FeedVersionFileInfoWhereLoader) Clear ¶
func (l *FeedVersionFileInfoWhereLoader) Clear(key model.FeedVersionFileInfoParam)
Clear the value at key from the cache, if it exists
func (*FeedVersionFileInfoWhereLoader) Load ¶
func (l *FeedVersionFileInfoWhereLoader) Load(key model.FeedVersionFileInfoParam) ([]*model.FeedVersionFileInfo, error)
Load a FeedVersionFileInfo by key, batching and caching will be applied automatically
func (*FeedVersionFileInfoWhereLoader) LoadAll ¶
func (l *FeedVersionFileInfoWhereLoader) LoadAll(keys []model.FeedVersionFileInfoParam) ([][]*model.FeedVersionFileInfo, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*FeedVersionFileInfoWhereLoader) LoadAllThunk ¶
func (l *FeedVersionFileInfoWhereLoader) LoadAllThunk(keys []model.FeedVersionFileInfoParam) func() ([][]*model.FeedVersionFileInfo, []error)
LoadAllThunk returns a function that when called will block waiting for a FeedVersionFileInfos. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FeedVersionFileInfoWhereLoader) LoadThunk ¶
func (l *FeedVersionFileInfoWhereLoader) LoadThunk(key model.FeedVersionFileInfoParam) func() ([]*model.FeedVersionFileInfo, error)
LoadThunk returns a function that when called will block waiting for a FeedVersionFileInfo. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FeedVersionFileInfoWhereLoader) Prime ¶
func (l *FeedVersionFileInfoWhereLoader) Prime(key model.FeedVersionFileInfoParam, value []*model.FeedVersionFileInfo) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type FeedVersionFileInfoWhereLoaderConfig ¶
type FeedVersionFileInfoWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.FeedVersionFileInfoParam) ([][]*model.FeedVersionFileInfo, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
FeedVersionFileInfoWhereLoaderConfig captures the config to create a new FeedVersionFileInfoWhereLoader
type FeedVersionGtfsImportLoader ¶
type FeedVersionGtfsImportLoader struct {
// contains filtered or unexported fields
}
FeedVersionGtfsImportLoader batches and caches requests
func NewFeedVersionGtfsImportLoader ¶
func NewFeedVersionGtfsImportLoader(config FeedVersionGtfsImportLoaderConfig) *FeedVersionGtfsImportLoader
NewFeedVersionGtfsImportLoader creates a new FeedVersionGtfsImportLoader given a fetch, wait, and maxBatch
func (*FeedVersionGtfsImportLoader) Clear ¶
func (l *FeedVersionGtfsImportLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*FeedVersionGtfsImportLoader) Load ¶
func (l *FeedVersionGtfsImportLoader) Load(key int) (*model.FeedVersionGtfsImport, error)
Load a FeedVersionGtfsImport by key, batching and caching will be applied automatically
func (*FeedVersionGtfsImportLoader) LoadAll ¶
func (l *FeedVersionGtfsImportLoader) LoadAll(keys []int) ([]*model.FeedVersionGtfsImport, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*FeedVersionGtfsImportLoader) LoadAllThunk ¶
func (l *FeedVersionGtfsImportLoader) LoadAllThunk(keys []int) func() ([]*model.FeedVersionGtfsImport, []error)
LoadAllThunk returns a function that when called will block waiting for a FeedVersionGtfsImports. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FeedVersionGtfsImportLoader) LoadThunk ¶
func (l *FeedVersionGtfsImportLoader) LoadThunk(key int) func() (*model.FeedVersionGtfsImport, error)
LoadThunk returns a function that when called will block waiting for a FeedVersionGtfsImport. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FeedVersionGtfsImportLoader) Prime ¶
func (l *FeedVersionGtfsImportLoader) Prime(key int, value *model.FeedVersionGtfsImport) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type FeedVersionGtfsImportLoaderConfig ¶
type FeedVersionGtfsImportLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.FeedVersionGtfsImport, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
FeedVersionGtfsImportLoaderConfig captures the config to create a new FeedVersionGtfsImportLoader
type FeedVersionLoader ¶
type FeedVersionLoader struct {
// contains filtered or unexported fields
}
FeedVersionLoader batches and caches requests
func NewFeedVersionLoader ¶
func NewFeedVersionLoader(config FeedVersionLoaderConfig) *FeedVersionLoader
NewFeedVersionLoader creates a new FeedVersionLoader given a fetch, wait, and maxBatch
func (*FeedVersionLoader) Clear ¶
func (l *FeedVersionLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*FeedVersionLoader) Load ¶
func (l *FeedVersionLoader) Load(key int) (*model.FeedVersion, error)
Load a FeedVersion by key, batching and caching will be applied automatically
func (*FeedVersionLoader) LoadAll ¶
func (l *FeedVersionLoader) LoadAll(keys []int) ([]*model.FeedVersion, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*FeedVersionLoader) LoadAllThunk ¶
func (l *FeedVersionLoader) LoadAllThunk(keys []int) func() ([]*model.FeedVersion, []error)
LoadAllThunk returns a function that when called will block waiting for a FeedVersions. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FeedVersionLoader) LoadThunk ¶
func (l *FeedVersionLoader) LoadThunk(key int) func() (*model.FeedVersion, error)
LoadThunk returns a function that when called will block waiting for a FeedVersion. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FeedVersionLoader) Prime ¶
func (l *FeedVersionLoader) Prime(key int, value *model.FeedVersion) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type FeedVersionLoaderConfig ¶
type FeedVersionLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.FeedVersion, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
FeedVersionLoaderConfig captures the config to create a new FeedVersionLoader
type FeedVersionServiceLevelWhereLoader ¶
type FeedVersionServiceLevelWhereLoader struct {
// contains filtered or unexported fields
}
FeedVersionServiceLevelWhereLoader batches and caches requests
func NewFeedVersionServiceLevelWhereLoader ¶
func NewFeedVersionServiceLevelWhereLoader(config FeedVersionServiceLevelWhereLoaderConfig) *FeedVersionServiceLevelWhereLoader
NewFeedVersionServiceLevelWhereLoader creates a new FeedVersionServiceLevelWhereLoader given a fetch, wait, and maxBatch
func (*FeedVersionServiceLevelWhereLoader) Clear ¶
func (l *FeedVersionServiceLevelWhereLoader) Clear(key model.FeedVersionServiceLevelParam)
Clear the value at key from the cache, if it exists
func (*FeedVersionServiceLevelWhereLoader) Load ¶
func (l *FeedVersionServiceLevelWhereLoader) Load(key model.FeedVersionServiceLevelParam) ([]*model.FeedVersionServiceLevel, error)
Load a FeedVersionServiceLevel by key, batching and caching will be applied automatically
func (*FeedVersionServiceLevelWhereLoader) LoadAll ¶
func (l *FeedVersionServiceLevelWhereLoader) LoadAll(keys []model.FeedVersionServiceLevelParam) ([][]*model.FeedVersionServiceLevel, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*FeedVersionServiceLevelWhereLoader) LoadAllThunk ¶
func (l *FeedVersionServiceLevelWhereLoader) LoadAllThunk(keys []model.FeedVersionServiceLevelParam) func() ([][]*model.FeedVersionServiceLevel, []error)
LoadAllThunk returns a function that when called will block waiting for a FeedVersionServiceLevels. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FeedVersionServiceLevelWhereLoader) LoadThunk ¶
func (l *FeedVersionServiceLevelWhereLoader) LoadThunk(key model.FeedVersionServiceLevelParam) func() ([]*model.FeedVersionServiceLevel, error)
LoadThunk returns a function that when called will block waiting for a FeedVersionServiceLevel. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FeedVersionServiceLevelWhereLoader) Prime ¶
func (l *FeedVersionServiceLevelWhereLoader) Prime(key model.FeedVersionServiceLevelParam, value []*model.FeedVersionServiceLevel) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type FeedVersionServiceLevelWhereLoaderConfig ¶
type FeedVersionServiceLevelWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.FeedVersionServiceLevelParam) ([][]*model.FeedVersionServiceLevel, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
FeedVersionServiceLevelWhereLoaderConfig captures the config to create a new FeedVersionServiceLevelWhereLoader
type FeedVersionWhereLoader ¶
type FeedVersionWhereLoader struct {
// contains filtered or unexported fields
}
FeedVersionWhereLoader batches and caches requests
func NewFeedVersionWhereLoader ¶
func NewFeedVersionWhereLoader(config FeedVersionWhereLoaderConfig) *FeedVersionWhereLoader
NewFeedVersionWhereLoader creates a new FeedVersionWhereLoader given a fetch, wait, and maxBatch
func (*FeedVersionWhereLoader) Clear ¶
func (l *FeedVersionWhereLoader) Clear(key model.FeedVersionParam)
Clear the value at key from the cache, if it exists
func (*FeedVersionWhereLoader) Load ¶
func (l *FeedVersionWhereLoader) Load(key model.FeedVersionParam) ([]*model.FeedVersion, error)
Load a FeedVersion by key, batching and caching will be applied automatically
func (*FeedVersionWhereLoader) LoadAll ¶
func (l *FeedVersionWhereLoader) LoadAll(keys []model.FeedVersionParam) ([][]*model.FeedVersion, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*FeedVersionWhereLoader) LoadAllThunk ¶
func (l *FeedVersionWhereLoader) LoadAllThunk(keys []model.FeedVersionParam) func() ([][]*model.FeedVersion, []error)
LoadAllThunk returns a function that when called will block waiting for a FeedVersions. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FeedVersionWhereLoader) LoadThunk ¶
func (l *FeedVersionWhereLoader) LoadThunk(key model.FeedVersionParam) func() ([]*model.FeedVersion, error)
LoadThunk returns a function that when called will block waiting for a FeedVersion. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FeedVersionWhereLoader) Prime ¶
func (l *FeedVersionWhereLoader) Prime(key model.FeedVersionParam, value []*model.FeedVersion) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type FeedVersionWhereLoaderConfig ¶
type FeedVersionWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.FeedVersionParam) ([][]*model.FeedVersion, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
FeedVersionWhereLoaderConfig captures the config to create a new FeedVersionWhereLoader
type FrequencyWhereLoader ¶
type FrequencyWhereLoader struct {
// contains filtered or unexported fields
}
FrequencyWhereLoader batches and caches requests
func NewFrequencyWhereLoader ¶
func NewFrequencyWhereLoader(config FrequencyWhereLoaderConfig) *FrequencyWhereLoader
NewFrequencyWhereLoader creates a new FrequencyWhereLoader given a fetch, wait, and maxBatch
func (*FrequencyWhereLoader) Clear ¶
func (l *FrequencyWhereLoader) Clear(key model.FrequencyParam)
Clear the value at key from the cache, if it exists
func (*FrequencyWhereLoader) Load ¶
func (l *FrequencyWhereLoader) Load(key model.FrequencyParam) ([]*model.Frequency, error)
Load a Frequency by key, batching and caching will be applied automatically
func (*FrequencyWhereLoader) LoadAll ¶
func (l *FrequencyWhereLoader) LoadAll(keys []model.FrequencyParam) ([][]*model.Frequency, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*FrequencyWhereLoader) LoadAllThunk ¶
func (l *FrequencyWhereLoader) LoadAllThunk(keys []model.FrequencyParam) func() ([][]*model.Frequency, []error)
LoadAllThunk returns a function that when called will block waiting for a Frequencys. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FrequencyWhereLoader) LoadThunk ¶
func (l *FrequencyWhereLoader) LoadThunk(key model.FrequencyParam) func() ([]*model.Frequency, error)
LoadThunk returns a function that when called will block waiting for a Frequency. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*FrequencyWhereLoader) Prime ¶
func (l *FrequencyWhereLoader) Prime(key model.FrequencyParam, value []*model.Frequency) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type FrequencyWhereLoaderConfig ¶
type FrequencyWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.FrequencyParam) ([][]*model.Frequency, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
FrequencyWhereLoaderConfig captures the config to create a new FrequencyWhereLoader
type LevelLoader ¶
type LevelLoader struct {
// contains filtered or unexported fields
}
LevelLoader batches and caches requests
func NewLevelLoader ¶
func NewLevelLoader(config LevelLoaderConfig) *LevelLoader
NewLevelLoader creates a new LevelLoader given a fetch, wait, and maxBatch
func (*LevelLoader) Clear ¶
func (l *LevelLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*LevelLoader) Load ¶
func (l *LevelLoader) Load(key int) (*model.Level, error)
Load a Level by key, batching and caching will be applied automatically
func (*LevelLoader) LoadAll ¶
func (l *LevelLoader) LoadAll(keys []int) ([]*model.Level, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*LevelLoader) LoadAllThunk ¶
func (l *LevelLoader) LoadAllThunk(keys []int) func() ([]*model.Level, []error)
LoadAllThunk returns a function that when called will block waiting for a Levels. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*LevelLoader) LoadThunk ¶
func (l *LevelLoader) LoadThunk(key int) func() (*model.Level, error)
LoadThunk returns a function that when called will block waiting for a Level. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*LevelLoader) Prime ¶
func (l *LevelLoader) Prime(key int, value *model.Level) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type LevelLoaderConfig ¶
type LevelLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Level, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
LevelLoaderConfig captures the config to create a new LevelLoader
type OperatorWhereLoader ¶
type OperatorWhereLoader struct {
// contains filtered or unexported fields
}
OperatorWhereLoader batches and caches requests
func NewOperatorWhereLoader ¶
func NewOperatorWhereLoader(config OperatorWhereLoaderConfig) *OperatorWhereLoader
NewOperatorWhereLoader creates a new OperatorWhereLoader given a fetch, wait, and maxBatch
func (*OperatorWhereLoader) Clear ¶
func (l *OperatorWhereLoader) Clear(key model.OperatorParam)
Clear the value at key from the cache, if it exists
func (*OperatorWhereLoader) Load ¶
func (l *OperatorWhereLoader) Load(key model.OperatorParam) ([]*model.Operator, error)
Load a Operator by key, batching and caching will be applied automatically
func (*OperatorWhereLoader) LoadAll ¶
func (l *OperatorWhereLoader) LoadAll(keys []model.OperatorParam) ([][]*model.Operator, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*OperatorWhereLoader) LoadAllThunk ¶
func (l *OperatorWhereLoader) LoadAllThunk(keys []model.OperatorParam) func() ([][]*model.Operator, []error)
LoadAllThunk returns a function that when called will block waiting for a Operators. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*OperatorWhereLoader) LoadThunk ¶
func (l *OperatorWhereLoader) LoadThunk(key model.OperatorParam) func() ([]*model.Operator, error)
LoadThunk returns a function that when called will block waiting for a Operator. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*OperatorWhereLoader) Prime ¶
func (l *OperatorWhereLoader) Prime(key model.OperatorParam, value []*model.Operator) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type OperatorWhereLoaderConfig ¶
type OperatorWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.OperatorParam) ([][]*model.Operator, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
OperatorWhereLoaderConfig captures the config to create a new OperatorWhereLoader
type PathwayWhereLoader ¶
type PathwayWhereLoader struct {
// contains filtered or unexported fields
}
PathwayWhereLoader batches and caches requests
func NewPathwayWhereLoader ¶
func NewPathwayWhereLoader(config PathwayWhereLoaderConfig) *PathwayWhereLoader
NewPathwayWhereLoader creates a new PathwayWhereLoader given a fetch, wait, and maxBatch
func (*PathwayWhereLoader) Clear ¶
func (l *PathwayWhereLoader) Clear(key model.PathwayParam)
Clear the value at key from the cache, if it exists
func (*PathwayWhereLoader) Load ¶
func (l *PathwayWhereLoader) Load(key model.PathwayParam) ([]*model.Pathway, error)
Load a Pathway by key, batching and caching will be applied automatically
func (*PathwayWhereLoader) LoadAll ¶
func (l *PathwayWhereLoader) LoadAll(keys []model.PathwayParam) ([][]*model.Pathway, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*PathwayWhereLoader) LoadAllThunk ¶
func (l *PathwayWhereLoader) LoadAllThunk(keys []model.PathwayParam) func() ([][]*model.Pathway, []error)
LoadAllThunk returns a function that when called will block waiting for a Pathways. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*PathwayWhereLoader) LoadThunk ¶
func (l *PathwayWhereLoader) LoadThunk(key model.PathwayParam) func() ([]*model.Pathway, error)
LoadThunk returns a function that when called will block waiting for a Pathway. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*PathwayWhereLoader) Prime ¶
func (l *PathwayWhereLoader) Prime(key model.PathwayParam, value []*model.Pathway) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type PathwayWhereLoaderConfig ¶
type PathwayWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.PathwayParam) ([][]*model.Pathway, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
PathwayWhereLoaderConfig captures the config to create a new PathwayWhereLoader
type RouteGeometryWhereLoader ¶
type RouteGeometryWhereLoader struct {
// contains filtered or unexported fields
}
RouteGeometryWhereLoader batches and caches requests
func NewRouteGeometryWhereLoader ¶
func NewRouteGeometryWhereLoader(config RouteGeometryWhereLoaderConfig) *RouteGeometryWhereLoader
NewRouteGeometryWhereLoader creates a new RouteGeometryWhereLoader given a fetch, wait, and maxBatch
func (*RouteGeometryWhereLoader) Clear ¶
func (l *RouteGeometryWhereLoader) Clear(key model.RouteGeometryParam)
Clear the value at key from the cache, if it exists
func (*RouteGeometryWhereLoader) Load ¶
func (l *RouteGeometryWhereLoader) Load(key model.RouteGeometryParam) ([]*model.RouteGeometry, error)
Load a RouteGeometry by key, batching and caching will be applied automatically
func (*RouteGeometryWhereLoader) LoadAll ¶
func (l *RouteGeometryWhereLoader) LoadAll(keys []model.RouteGeometryParam) ([][]*model.RouteGeometry, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*RouteGeometryWhereLoader) LoadAllThunk ¶
func (l *RouteGeometryWhereLoader) LoadAllThunk(keys []model.RouteGeometryParam) func() ([][]*model.RouteGeometry, []error)
LoadAllThunk returns a function that when called will block waiting for a RouteGeometrys. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*RouteGeometryWhereLoader) LoadThunk ¶
func (l *RouteGeometryWhereLoader) LoadThunk(key model.RouteGeometryParam) func() ([]*model.RouteGeometry, error)
LoadThunk returns a function that when called will block waiting for a RouteGeometry. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*RouteGeometryWhereLoader) Prime ¶
func (l *RouteGeometryWhereLoader) Prime(key model.RouteGeometryParam, value []*model.RouteGeometry) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type RouteGeometryWhereLoaderConfig ¶
type RouteGeometryWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.RouteGeometryParam) ([][]*model.RouteGeometry, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
RouteGeometryWhereLoaderConfig captures the config to create a new RouteGeometryWhereLoader
type RouteHeadwayLoader ¶
type RouteHeadwayLoader struct {
// contains filtered or unexported fields
}
RouteHeadwayLoader batches and caches requests
func NewRouteHeadwayLoader ¶
func NewRouteHeadwayLoader(config RouteHeadwayLoaderConfig) *RouteHeadwayLoader
NewRouteHeadwayLoader creates a new RouteHeadwayLoader given a fetch, wait, and maxBatch
func (*RouteHeadwayLoader) Clear ¶
func (l *RouteHeadwayLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*RouteHeadwayLoader) Load ¶
func (l *RouteHeadwayLoader) Load(key int) (*model.RouteHeadway, error)
Load a RouteHeadway by key, batching and caching will be applied automatically
func (*RouteHeadwayLoader) LoadAll ¶
func (l *RouteHeadwayLoader) LoadAll(keys []int) ([]*model.RouteHeadway, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*RouteHeadwayLoader) LoadAllThunk ¶
func (l *RouteHeadwayLoader) LoadAllThunk(keys []int) func() ([]*model.RouteHeadway, []error)
LoadAllThunk returns a function that when called will block waiting for a RouteHeadways. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*RouteHeadwayLoader) LoadThunk ¶
func (l *RouteHeadwayLoader) LoadThunk(key int) func() (*model.RouteHeadway, error)
LoadThunk returns a function that when called will block waiting for a RouteHeadway. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*RouteHeadwayLoader) Prime ¶
func (l *RouteHeadwayLoader) Prime(key int, value *model.RouteHeadway) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type RouteHeadwayLoaderConfig ¶
type RouteHeadwayLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.RouteHeadway, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
RouteHeadwayLoaderConfig captures the config to create a new RouteHeadwayLoader
type RouteHeadwayWhereLoader ¶
type RouteHeadwayWhereLoader struct {
// contains filtered or unexported fields
}
RouteHeadwayWhereLoader batches and caches requests
func NewRouteHeadwayWhereLoader ¶
func NewRouteHeadwayWhereLoader(config RouteHeadwayWhereLoaderConfig) *RouteHeadwayWhereLoader
NewRouteHeadwayWhereLoader creates a new RouteHeadwayWhereLoader given a fetch, wait, and maxBatch
func (*RouteHeadwayWhereLoader) Clear ¶
func (l *RouteHeadwayWhereLoader) Clear(key model.RouteHeadwayParam)
Clear the value at key from the cache, if it exists
func (*RouteHeadwayWhereLoader) Load ¶
func (l *RouteHeadwayWhereLoader) Load(key model.RouteHeadwayParam) ([]*model.RouteHeadway, error)
Load a RouteHeadway by key, batching and caching will be applied automatically
func (*RouteHeadwayWhereLoader) LoadAll ¶
func (l *RouteHeadwayWhereLoader) LoadAll(keys []model.RouteHeadwayParam) ([][]*model.RouteHeadway, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*RouteHeadwayWhereLoader) LoadAllThunk ¶
func (l *RouteHeadwayWhereLoader) LoadAllThunk(keys []model.RouteHeadwayParam) func() ([][]*model.RouteHeadway, []error)
LoadAllThunk returns a function that when called will block waiting for a RouteHeadways. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*RouteHeadwayWhereLoader) LoadThunk ¶
func (l *RouteHeadwayWhereLoader) LoadThunk(key model.RouteHeadwayParam) func() ([]*model.RouteHeadway, error)
LoadThunk returns a function that when called will block waiting for a RouteHeadway. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*RouteHeadwayWhereLoader) Prime ¶
func (l *RouteHeadwayWhereLoader) Prime(key model.RouteHeadwayParam, value []*model.RouteHeadway) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type RouteHeadwayWhereLoaderConfig ¶
type RouteHeadwayWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.RouteHeadwayParam) ([][]*model.RouteHeadway, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
RouteHeadwayWhereLoaderConfig captures the config to create a new RouteHeadwayWhereLoader
type RouteLoader ¶
type RouteLoader struct {
// contains filtered or unexported fields
}
RouteLoader batches and caches requests
func NewRouteLoader ¶
func NewRouteLoader(config RouteLoaderConfig) *RouteLoader
NewRouteLoader creates a new RouteLoader given a fetch, wait, and maxBatch
func (*RouteLoader) Clear ¶
func (l *RouteLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*RouteLoader) Load ¶
func (l *RouteLoader) Load(key int) (*model.Route, error)
Load a Route by key, batching and caching will be applied automatically
func (*RouteLoader) LoadAll ¶
func (l *RouteLoader) LoadAll(keys []int) ([]*model.Route, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*RouteLoader) LoadAllThunk ¶
func (l *RouteLoader) LoadAllThunk(keys []int) func() ([]*model.Route, []error)
LoadAllThunk returns a function that when called will block waiting for a Routes. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*RouteLoader) LoadThunk ¶
func (l *RouteLoader) LoadThunk(key int) func() (*model.Route, error)
LoadThunk returns a function that when called will block waiting for a Route. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*RouteLoader) Prime ¶
func (l *RouteLoader) Prime(key int, value *model.Route) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type RouteLoaderConfig ¶
type RouteLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Route, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
RouteLoaderConfig captures the config to create a new RouteLoader
type RouteStopWhereLoader ¶
type RouteStopWhereLoader struct {
// contains filtered or unexported fields
}
RouteStopWhereLoader batches and caches requests
func NewRouteStopWhereLoader ¶
func NewRouteStopWhereLoader(config RouteStopWhereLoaderConfig) *RouteStopWhereLoader
NewRouteStopWhereLoader creates a new RouteStopWhereLoader given a fetch, wait, and maxBatch
func (*RouteStopWhereLoader) Clear ¶
func (l *RouteStopWhereLoader) Clear(key model.RouteStopParam)
Clear the value at key from the cache, if it exists
func (*RouteStopWhereLoader) Load ¶
func (l *RouteStopWhereLoader) Load(key model.RouteStopParam) ([]*model.RouteStop, error)
Load a RouteStop by key, batching and caching will be applied automatically
func (*RouteStopWhereLoader) LoadAll ¶
func (l *RouteStopWhereLoader) LoadAll(keys []model.RouteStopParam) ([][]*model.RouteStop, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*RouteStopWhereLoader) LoadAllThunk ¶
func (l *RouteStopWhereLoader) LoadAllThunk(keys []model.RouteStopParam) func() ([][]*model.RouteStop, []error)
LoadAllThunk returns a function that when called will block waiting for a RouteStops. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*RouteStopWhereLoader) LoadThunk ¶
func (l *RouteStopWhereLoader) LoadThunk(key model.RouteStopParam) func() ([]*model.RouteStop, error)
LoadThunk returns a function that when called will block waiting for a RouteStop. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*RouteStopWhereLoader) Prime ¶
func (l *RouteStopWhereLoader) Prime(key model.RouteStopParam, value []*model.RouteStop) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type RouteStopWhereLoaderConfig ¶
type RouteStopWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.RouteStopParam) ([][]*model.RouteStop, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
RouteStopWhereLoaderConfig captures the config to create a new RouteStopWhereLoader
type RouteWhereLoader ¶
type RouteWhereLoader struct {
// contains filtered or unexported fields
}
RouteWhereLoader batches and caches requests
func NewRouteWhereLoader ¶
func NewRouteWhereLoader(config RouteWhereLoaderConfig) *RouteWhereLoader
NewRouteWhereLoader creates a new RouteWhereLoader given a fetch, wait, and maxBatch
func (*RouteWhereLoader) Clear ¶
func (l *RouteWhereLoader) Clear(key model.RouteParam)
Clear the value at key from the cache, if it exists
func (*RouteWhereLoader) Load ¶
func (l *RouteWhereLoader) Load(key model.RouteParam) ([]*model.Route, error)
Load a Route by key, batching and caching will be applied automatically
func (*RouteWhereLoader) LoadAll ¶
func (l *RouteWhereLoader) LoadAll(keys []model.RouteParam) ([][]*model.Route, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*RouteWhereLoader) LoadAllThunk ¶
func (l *RouteWhereLoader) LoadAllThunk(keys []model.RouteParam) func() ([][]*model.Route, []error)
LoadAllThunk returns a function that when called will block waiting for a Routes. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*RouteWhereLoader) LoadThunk ¶
func (l *RouteWhereLoader) LoadThunk(key model.RouteParam) func() ([]*model.Route, error)
LoadThunk returns a function that when called will block waiting for a Route. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*RouteWhereLoader) Prime ¶
func (l *RouteWhereLoader) Prime(key model.RouteParam, value []*model.Route) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type RouteWhereLoaderConfig ¶
type RouteWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.RouteParam) ([][]*model.Route, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
RouteWhereLoaderConfig captures the config to create a new RouteWhereLoader
type ShapeLoader ¶
type ShapeLoader struct {
// contains filtered or unexported fields
}
ShapeLoader batches and caches requests
func NewShapeLoader ¶
func NewShapeLoader(config ShapeLoaderConfig) *ShapeLoader
NewShapeLoader creates a new ShapeLoader given a fetch, wait, and maxBatch
func (*ShapeLoader) Clear ¶
func (l *ShapeLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*ShapeLoader) Load ¶
func (l *ShapeLoader) Load(key int) (*model.Shape, error)
Load a Shape by key, batching and caching will be applied automatically
func (*ShapeLoader) LoadAll ¶
func (l *ShapeLoader) LoadAll(keys []int) ([]*model.Shape, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*ShapeLoader) LoadAllThunk ¶
func (l *ShapeLoader) LoadAllThunk(keys []int) func() ([]*model.Shape, []error)
LoadAllThunk returns a function that when called will block waiting for a Shapes. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*ShapeLoader) LoadThunk ¶
func (l *ShapeLoader) LoadThunk(key int) func() (*model.Shape, error)
LoadThunk returns a function that when called will block waiting for a Shape. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*ShapeLoader) Prime ¶
func (l *ShapeLoader) Prime(key int, value *model.Shape) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type ShapeLoaderConfig ¶
type ShapeLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Shape, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
ShapeLoaderConfig captures the config to create a new ShapeLoader
type StopLoader ¶
type StopLoader struct {
// contains filtered or unexported fields
}
StopLoader batches and caches requests
func NewStopLoader ¶
func NewStopLoader(config StopLoaderConfig) *StopLoader
NewStopLoader creates a new StopLoader given a fetch, wait, and maxBatch
func (*StopLoader) Clear ¶
func (l *StopLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*StopLoader) Load ¶
func (l *StopLoader) Load(key int) (*model.Stop, error)
Load a Stop by key, batching and caching will be applied automatically
func (*StopLoader) LoadAll ¶
func (l *StopLoader) LoadAll(keys []int) ([]*model.Stop, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*StopLoader) LoadAllThunk ¶
func (l *StopLoader) LoadAllThunk(keys []int) func() ([]*model.Stop, []error)
LoadAllThunk returns a function that when called will block waiting for a Stops. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*StopLoader) LoadThunk ¶
func (l *StopLoader) LoadThunk(key int) func() (*model.Stop, error)
LoadThunk returns a function that when called will block waiting for a Stop. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
type StopLoaderConfig ¶
type StopLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Stop, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
StopLoaderConfig captures the config to create a new StopLoader
type StopTimeWhereLoader ¶
type StopTimeWhereLoader struct {
// contains filtered or unexported fields
}
StopTimeWhereLoader batches and caches requests
func NewStopTimeWhereLoader ¶
func NewStopTimeWhereLoader(config StopTimeWhereLoaderConfig) *StopTimeWhereLoader
NewStopTimeWhereLoader creates a new StopTimeWhereLoader given a fetch, wait, and maxBatch
func (*StopTimeWhereLoader) Clear ¶
func (l *StopTimeWhereLoader) Clear(key model.StopTimeParam)
Clear the value at key from the cache, if it exists
func (*StopTimeWhereLoader) Load ¶
func (l *StopTimeWhereLoader) Load(key model.StopTimeParam) ([]*model.StopTime, error)
Load a StopTime by key, batching and caching will be applied automatically
func (*StopTimeWhereLoader) LoadAll ¶
func (l *StopTimeWhereLoader) LoadAll(keys []model.StopTimeParam) ([][]*model.StopTime, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*StopTimeWhereLoader) LoadAllThunk ¶
func (l *StopTimeWhereLoader) LoadAllThunk(keys []model.StopTimeParam) func() ([][]*model.StopTime, []error)
LoadAllThunk returns a function that when called will block waiting for a StopTimes. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*StopTimeWhereLoader) LoadThunk ¶
func (l *StopTimeWhereLoader) LoadThunk(key model.StopTimeParam) func() ([]*model.StopTime, error)
LoadThunk returns a function that when called will block waiting for a StopTime. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*StopTimeWhereLoader) Prime ¶
func (l *StopTimeWhereLoader) Prime(key model.StopTimeParam, value []*model.StopTime) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type StopTimeWhereLoaderConfig ¶
type StopTimeWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.StopTimeParam) ([][]*model.StopTime, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
StopTimeWhereLoaderConfig captures the config to create a new StopTimeWhereLoader
type StopWhereLoader ¶
type StopWhereLoader struct {
// contains filtered or unexported fields
}
StopWhereLoader batches and caches requests
func NewStopWhereLoader ¶
func NewStopWhereLoader(config StopWhereLoaderConfig) *StopWhereLoader
NewStopWhereLoader creates a new StopWhereLoader given a fetch, wait, and maxBatch
func (*StopWhereLoader) Clear ¶
func (l *StopWhereLoader) Clear(key model.StopParam)
Clear the value at key from the cache, if it exists
func (*StopWhereLoader) Load ¶
Load a Stop by key, batching and caching will be applied automatically
func (*StopWhereLoader) LoadAll ¶
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*StopWhereLoader) LoadAllThunk ¶
LoadAllThunk returns a function that when called will block waiting for a Stops. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*StopWhereLoader) LoadThunk ¶
LoadThunk returns a function that when called will block waiting for a Stop. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
type StopWhereLoaderConfig ¶
type StopWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.StopParam) ([][]*model.Stop, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
StopWhereLoaderConfig captures the config to create a new StopWhereLoader
type TripLoader ¶
type TripLoader struct {
// contains filtered or unexported fields
}
TripLoader batches and caches requests
func NewTripLoader ¶
func NewTripLoader(config TripLoaderConfig) *TripLoader
NewTripLoader creates a new TripLoader given a fetch, wait, and maxBatch
func (*TripLoader) Clear ¶
func (l *TripLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*TripLoader) Load ¶
func (l *TripLoader) Load(key int) (*model.Trip, error)
Load a Trip by key, batching and caching will be applied automatically
func (*TripLoader) LoadAll ¶
func (l *TripLoader) LoadAll(keys []int) ([]*model.Trip, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*TripLoader) LoadAllThunk ¶
func (l *TripLoader) LoadAllThunk(keys []int) func() ([]*model.Trip, []error)
LoadAllThunk returns a function that when called will block waiting for a Trips. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*TripLoader) LoadThunk ¶
func (l *TripLoader) LoadThunk(key int) func() (*model.Trip, error)
LoadThunk returns a function that when called will block waiting for a Trip. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
type TripLoaderConfig ¶
type TripLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []int) ([]*model.Trip, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
TripLoaderConfig captures the config to create a new TripLoader
type TripWhereLoader ¶
type TripWhereLoader struct {
// contains filtered or unexported fields
}
TripWhereLoader batches and caches requests
func NewTripWhereLoader ¶
func NewTripWhereLoader(config TripWhereLoaderConfig) *TripWhereLoader
NewTripWhereLoader creates a new TripWhereLoader given a fetch, wait, and maxBatch
func (*TripWhereLoader) Clear ¶
func (l *TripWhereLoader) Clear(key model.TripParam)
Clear the value at key from the cache, if it exists
func (*TripWhereLoader) Load ¶
Load a Trip by key, batching and caching will be applied automatically
func (*TripWhereLoader) LoadAll ¶
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*TripWhereLoader) LoadAllThunk ¶
LoadAllThunk returns a function that when called will block waiting for a Trips. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*TripWhereLoader) LoadThunk ¶
LoadThunk returns a function that when called will block waiting for a Trip. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
type TripWhereLoaderConfig ¶
type TripWhereLoaderConfig struct {
// Fetch is a method that provides the data for the loader
Fetch func(keys []model.TripParam) ([][]*model.Trip, []error)
// Wait is how long wait before sending a batch
Wait time.Duration
// MaxBatch will limit the maximum number of keys to send in one batch, 0 = not limit
MaxBatch int
}
TripWhereLoaderConfig captures the config to create a new TripWhereLoader
Source Files
¶
- agencyloader_gen.go
- agencyplacewhereloader_gen.go
- agencywhereloader_gen.go
- calendardatewhereloader_gen.go
- calendarloader_gen.go
- censusgeographywhereloader_gen.go
- censustableloader_gen.go
- censusvaluewhereloader_gen.go
- dataloader.go
- feedinfowhereloader_gen.go
- feedloader_gen.go
- feedstateloader_gen.go
- feedversionfileinfowhereloader_gen.go
- feedversiongtfsimportloader_gen.go
- feedversionloader_gen.go
- feedversionservicelevelwhereloader_gen.go
- feedversionwhereloader_gen.go
- frequencywhereloader_gen.go
- levelloader_gen.go
- operatorwhereloader_gen.go
- pathwaywhereloader_gen.go
- routegeometrywhereloader_gen.go
- routeheadwayloader_gen.go
- routeheadwaywhereloader_gen.go
- routeloader_gen.go
- routestopwhereloader_gen.go
- routewhereloader_gen.go
- shapeloader_gen.go
- stoploader_gen.go
- stoptimewhereloader_gen.go
- stopwhereloader_gen.go
- triploader_gen.go
- tripwhereloader_gen.go