Documentation
¶
Index ¶
Examples ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type FixedPool ¶
type FixedPool struct {
// contains filtered or unexported fields
}
FixedPool implements a fixed size blocking pool.
Example ¶
var JobKey = "job_id"
l := 1000 // limit to 1000 concurrent requests.
// create a new pool
pool, err := NewFixedPool(
"protected_resource_pool",
l,
100,
time.Millisecond*250,
time.Millisecond*500,
time.Millisecond*10,
time.Second,
limit.BuiltinLimitLogger{},
nil,
)
if err != nil {
panic(err)
}
wg := sync.WaitGroup{}
wg.Add(l * 3)
// spawn 3000 concurrent requests that would normally be too much load for the protected resource.
for i := 0; i <= l*3; i++ {
go func(c int) {
defer wg.Done()
ctx := context.WithValue(context.Background(), JobKey, c)
// this will block until timeout or token was acquired.
listener, ok := pool.Acquire(ctx)
if !ok {
log.Printf("was not able to acquire lock for id=%d\n", c)
return
}
log.Printf("acquired lock for id=%d\n", c)
// do something...
time.Sleep(time.Millisecond * 10)
listener.OnSuccess()
log.Printf("released lock for id=%d\n", c)
}(i)
}
// wait for completion
wg.Wait()
log.Println("Finished")
func NewFixedPool ¶
func NewFixedPool( name string, fixedLimit int, windowSize int, minWindowTime time.Duration, maxWindowTime time.Duration, minRTTThreshold time.Duration, timeout time.Duration, logger limit.Logger, metricRegistry core.MetricRegistry, ) (*FixedPool, error)
NewFixedPool creates a named fixed pool resource. You can use this to guard another resource from too many concurrent requests.
use < 0 values for defaults, but fixedLimit and name are required.
func NewLIFOFixedPool ¶
func NewLIFOFixedPool( name string, fixedLimit int, windowSize int, minWindowTime time.Duration, maxWindowTime time.Duration, minRTTThreshold time.Duration, maxBacklog int, timeout time.Duration, logger limit.Logger, metricRegistry core.MetricRegistry, ) (*FixedPool, error)
NewLIFOFixedPool creates a named LIFO fixed pool resource. You can use this to guard another resource from too many concurrent requests.
use < 0 values for defaults, but fixedLimit and name are required.
type LIFOFixedPool ¶
type LIFOFixedPool struct {
// contains filtered or unexported fields
}
LIFOFixedPool implements a fixed size LIFO blocking pool.
Example ¶
var JobKey = "job_id"
l := 1000 // limit to 1000 concurrent requests.
// create a new pool
pool, err := NewLIFOFixedPool(
"protected_resource_pool",
l,
100,
time.Millisecond*250,
time.Millisecond*500,
time.Millisecond*10,
3*l,
time.Second,
limit.BuiltinLimitLogger{},
nil,
)
if err != nil {
panic(err)
}
wg := sync.WaitGroup{}
wg.Add(l * 3)
// spawn 3000 concurrent requests that would normally be too much load for the protected resource.
for i := 0; i <= l*3; i++ {
go func(c int) {
defer wg.Done()
ctx := context.WithValue(context.Background(), JobKey, c)
// this will block until timeout or token was acquired.
listener, ok := pool.Acquire(ctx)
if !ok {
log.Printf("was not able to acquire lock for id=%d\n", c)
return
}
log.Printf("acquired lock for id=%d\n", c)
// do something...
time.Sleep(time.Millisecond * 10)
listener.OnSuccess()
log.Printf("released lock for id=%d\n", c)
}(i)
}
// wait for completion
wg.Wait()
log.Println("Finished")
func (*LIFOFixedPool) Acquire ¶
Acquire a token for the protected resource. This method will block until acquisition or the configured timeout has expired.
func (*LIFOFixedPool) Limit ¶
func (p *LIFOFixedPool) Limit() int
Limit will return the configured limit
type Pool ¶
type Pool struct {
// contains filtered or unexported fields
}
Pool implements a generic blocking pool pattern.
Example ¶
var JobKey = "job_id"
l := 1000 // limit to adjustable 1000 concurrent requests.
delegateLimit := limit.NewDefaultAIMLimit(
"aimd_limiter",
nil,
)
// wrap with a default limiter and simple strategy
// you could of course get very complicated with this.
delegateLimiter, err := limiter.NewDefaultLimiter(
delegateLimit,
(time.Millisecond * 250).Nanoseconds(),
(time.Millisecond * 500).Nanoseconds(),
(time.Millisecond * 10).Nanoseconds(),
100,
strategy.NewSimpleStrategy(l),
limit.BuiltinLimitLogger{},
nil,
)
if err != nil {
panic(err)
}
// create a new pool
pool, err := NewPool(
delegateLimiter,
false,
0,
time.Second,
limit.BuiltinLimitLogger{},
nil,
)
if err != nil {
panic(err)
}
wg := sync.WaitGroup{}
wg.Add(l * 3)
// spawn 3000 concurrent requests that would normally be too much load for the protected resource.
for i := 0; i <= l*3; i++ {
go func(c int) {
defer wg.Done()
ctx := context.WithValue(context.Background(), JobKey, c)
// this will block until timeout or token was acquired.
listener, ok := pool.Acquire(ctx)
if !ok {
log.Printf("was not able to acquire lock for id=%d\n", c)
return
}
log.Printf("acquired lock for id=%d\n", c)
// do something...
time.Sleep(time.Millisecond * 10)
listener.OnSuccess()
log.Printf("released lock for id=%d\n", c)
}(i)
}
// wait for completion
wg.Wait()
log.Println("Finished")
func NewPool ¶
func NewPool( delegateLimiter core.Limiter, isLIFO bool, maxBacklog int, timeout time.Duration, logger limit.Logger, metricRegistry core.MetricRegistry, ) (*Pool, error)
NewPool creates a pool resource. You can use this to guard another resource from too many concurrent requests.
use < 0 values for defaults, but delegateLimiter and name are required.