Documentation
¶
Index ¶
- func LoaderMiddleware(next http.Handler) http.Handler
- func NewExecutableSchema(cfg Config) graphql.ExecutableSchema
- type Address
- type AddressLoader
- type ComplexityRoot
- type Config
- type Customer
- type CustomerResolver
- type DirectiveRoot
- type Item
- type ItemSliceLoader
- type Order
- type OrderResolver
- type OrderSliceLoader
- type QueryResolver
- type Resolver
- type ResolverRoot
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func NewExecutableSchema ¶
func NewExecutableSchema(cfg Config) graphql.ExecutableSchema
NewExecutableSchema creates an ExecutableSchema from the ResolverRoot interface.
Types ¶
type AddressLoader ¶
type AddressLoader struct {
// contains filtered or unexported fields
}
AddressLoader batches and caches requests
func (*AddressLoader) Clear ¶
func (l *AddressLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*AddressLoader) Load ¶
func (l *AddressLoader) Load(key int) (*Address, error)
Load a address by key, batching and caching will be applied automatically
func (*AddressLoader) LoadAll ¶
func (l *AddressLoader) LoadAll(keys []int) ([]*Address, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*AddressLoader) LoadThunk ¶
func (l *AddressLoader) LoadThunk(key int) func() (*Address, error)
LoadThunk returns a function that when called will block waiting for a address. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*AddressLoader) Prime ¶
func (l *AddressLoader) Prime(key int, value *Address) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type ComplexityRoot ¶
type ComplexityRoot struct {
Address struct {
Id func(childComplexity int) int
Street func(childComplexity int) int
Country func(childComplexity int) int
}
Customer struct {
Id func(childComplexity int) int
Name func(childComplexity int) int
Address func(childComplexity int) int
Orders func(childComplexity int) int
}
Item struct {
Name func(childComplexity int) int
}
Order struct {
Id func(childComplexity int) int
Date func(childComplexity int) int
Amount func(childComplexity int) int
Items func(childComplexity int) int
}
Query struct {
Customers func(childComplexity int) int
Torture1d func(childComplexity int, customerIds []int) int
Torture2d func(childComplexity int, customerIds [][]int) int
}
}
type Config ¶
type Config struct {
Resolvers ResolverRoot
Directives DirectiveRoot
Complexity ComplexityRoot
}
type CustomerResolver ¶
type DirectiveRoot ¶
type DirectiveRoot struct {
}
type ItemSliceLoader ¶
type ItemSliceLoader struct {
// contains filtered or unexported fields
}
ItemSliceLoader batches and caches requests
func (*ItemSliceLoader) Clear ¶
func (l *ItemSliceLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*ItemSliceLoader) Load ¶
func (l *ItemSliceLoader) Load(key int) ([]Item, error)
Load a item by key, batching and caching will be applied automatically
func (*ItemSliceLoader) LoadAll ¶
func (l *ItemSliceLoader) LoadAll(keys []int) ([][]Item, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*ItemSliceLoader) LoadThunk ¶
func (l *ItemSliceLoader) LoadThunk(key int) func() ([]Item, error)
LoadThunk returns a function that when called will block waiting for a item. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*ItemSliceLoader) Prime ¶
func (l *ItemSliceLoader) Prime(key int, value []Item) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type OrderResolver ¶
type OrderSliceLoader ¶
type OrderSliceLoader struct {
// contains filtered or unexported fields
}
OrderSliceLoader batches and caches requests
func (*OrderSliceLoader) Clear ¶
func (l *OrderSliceLoader) Clear(key int)
Clear the value at key from the cache, if it exists
func (*OrderSliceLoader) Load ¶
func (l *OrderSliceLoader) Load(key int) ([]Order, error)
Load a order by key, batching and caching will be applied automatically
func (*OrderSliceLoader) LoadAll ¶
func (l *OrderSliceLoader) LoadAll(keys []int) ([][]Order, []error)
LoadAll fetches many keys at once. It will be broken into appropriate sized sub batches depending on how the loader is configured
func (*OrderSliceLoader) LoadThunk ¶
func (l *OrderSliceLoader) LoadThunk(key int) func() ([]Order, error)
LoadThunk returns a function that when called will block waiting for a order. This method should be used if you want one goroutine to make requests to many different data loaders without blocking until the thunk is called.
func (*OrderSliceLoader) Prime ¶
func (l *OrderSliceLoader) Prime(key int, value []Order) bool
Prime the cache with the provided key and value. If the key already exists, no change is made and false is returned. (To forcefully prime the cache, clear the key first with loader.clear(key).prime(key, value).)
type QueryResolver ¶
type Resolver ¶
type Resolver struct{}
func (*Resolver) Customer ¶
func (r *Resolver) Customer() CustomerResolver
func (*Resolver) Order ¶
func (r *Resolver) Order() OrderResolver
func (*Resolver) Query ¶
func (r *Resolver) Query() QueryResolver
type ResolverRoot ¶
type ResolverRoot interface {
Customer() CustomerResolver
Order() OrderResolver
Query() QueryResolver
}