Merge pull request #29 from ActiveState/leakybucket

Rate limit using leaky bucket algorithm

for https://bugs.activestate.com/show_bug.cgi?id=103143
This commit is contained in:
Sridhar Ratnakumar 2014-04-29 20:44:32 -07:00
commit 8dcd1ad3e5
10 changed files with 269 additions and 44 deletions

View File

@ -1,3 +1,7 @@
# May, 2014
* Improved rate limiting using leaky bucket (PR #29)
# Apr, 2014
* LimitRate now discards read buffer (PR #28)

View File

@ -1,7 +1,7 @@
default: test
test: *.go
go test -v
go test -v ./...
fmt:
gofmt -w .

20
rate.go
View File

@ -1,20 +0,0 @@
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
package tail
// RateMonitor is a naive rate monitor that monitors the number of
// items processed in the current second.
type RateMonitor struct {
second int64
num int64
}
func (r *RateMonitor) Tick(unixTime int64) int64 {
if r.second != unixTime {
r.second = unixTime
r.num = 1
} else {
r.num += 1
}
return r.num
}

7
ratelimiter/Licence Normal file
View File

@ -0,0 +1,7 @@
Copyright (C) 2013 99designs
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,97 @@
// Package ratelimiter implements the Leaky Bucket ratelimiting algorithm with memcached and in-memory backends.
package ratelimiter
import (
"time"
)
type LeakyBucket struct {
Size uint16
Fill float64
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
Lastupdate time.Time
Now func() time.Time
}
func NewLeakyBucket(size uint16, leakInterval time.Duration) *LeakyBucket {
bucket := LeakyBucket{
Size: size,
Fill: 0,
LeakInterval: leakInterval,
Now: time.Now,
Lastupdate: time.Now(),
}
return &bucket
}
func (b *LeakyBucket) updateFill() {
now := b.Now()
if b.Fill > 0 {
elapsed := now.Sub(b.Lastupdate)
b.Fill -= float64(elapsed) / float64(b.LeakInterval)
if b.Fill < 0 {
b.Fill = 0
}
}
b.Lastupdate = now
}
func (b *LeakyBucket) Pour(amount uint16) bool {
b.updateFill()
var newfill float64 = b.Fill + float64(amount)
if newfill > float64(b.Size) {
return false
}
b.Fill = newfill
return true
}
// The time at which this bucket will be completely drained
func (b *LeakyBucket) DrainedAt() time.Time {
return b.Lastupdate.Add(time.Duration(b.Fill * float64(b.LeakInterval)))
}
// The duration until this bucket is completely drained
func (b *LeakyBucket) TimeToDrain() time.Duration {
return b.DrainedAt().Sub(b.Now())
}
func (b *LeakyBucket) TimeSinceLastUpdate() time.Duration {
return b.Now().Sub(b.Lastupdate)
}
type LeakyBucketSer struct {
Size uint16
Fill float64
LeakInterval time.Duration // time.Duration for 1 unit of size to leak
Lastupdate time.Time
}
func (b *LeakyBucket) Serialise() *LeakyBucketSer {
bucket := LeakyBucketSer{
Size: b.Size,
Fill: b.Fill,
LeakInterval: b.LeakInterval,
Lastupdate: b.Lastupdate,
}
return &bucket
}
func (b *LeakyBucketSer) DeSerialise() *LeakyBucket {
bucket := LeakyBucket{
Size: b.Size,
Fill: b.Fill,
LeakInterval: b.LeakInterval,
Lastupdate: b.Lastupdate,
Now: time.Now,
}
return &bucket
}

View File

@ -0,0 +1,73 @@
package ratelimiter
import (
"testing"
"time"
)
func TestPour(t *testing.T) {
bucket := NewLeakyBucket(60, time.Second)
bucket.Lastupdate = time.Unix(0, 0)
bucket.Now = func() time.Time { return time.Unix(1, 0) }
if bucket.Pour(61) {
t.Error("Expected false")
}
if !bucket.Pour(10) {
t.Error("Expected true")
}
if !bucket.Pour(49) {
t.Error("Expected true")
}
if bucket.Pour(2) {
t.Error("Expected false")
}
bucket.Now = func() time.Time { return time.Unix(61, 0) }
if !bucket.Pour(60) {
t.Error("Expected true")
}
if bucket.Pour(1) {
t.Error("Expected false")
}
bucket.Now = func() time.Time { return time.Unix(70, 0) }
if !bucket.Pour(1) {
t.Error("Expected true")
}
}
func TestTimeSinceLastUpdate(t *testing.T) {
bucket := NewLeakyBucket(60, time.Second)
bucket.Now = func() time.Time { return time.Unix(1, 0) }
bucket.Pour(1)
bucket.Now = func() time.Time { return time.Unix(2, 0) }
sinceLast := bucket.TimeSinceLastUpdate()
if sinceLast != time.Second*1 {
t.Errorf("Expected time since last update to be less than 1 second, got %d", sinceLast)
}
}
func TestTimeToDrain(t *testing.T) {
bucket := NewLeakyBucket(60, time.Second)
bucket.Now = func() time.Time { return time.Unix(1, 0) }
bucket.Pour(10)
if bucket.TimeToDrain() != time.Second*10 {
t.Error("Time to drain should be 10 seconds")
}
bucket.Now = func() time.Time { return time.Unix(2, 0) }
if bucket.TimeToDrain() != time.Second*9 {
t.Error("Time to drain should be 9 seconds")
}
}

58
ratelimiter/memory.go Normal file
View File

@ -0,0 +1,58 @@
package ratelimiter
import (
"errors"
"time"
)
const GC_SIZE int = 100
type Memory struct {
store map[string]LeakyBucket
lastGCCollected time.Time
}
func NewMemory() *Memory {
m := new(Memory)
m.store = make(map[string]LeakyBucket)
m.lastGCCollected = time.Now()
return m
}
func (m *Memory) GetBucketFor(key string) (*LeakyBucket, error) {
bucket, ok := m.store[key]
if !ok {
return nil, errors.New("miss")
}
return &bucket, nil
}
func (m *Memory) SetBucketFor(key string, bucket LeakyBucket) error {
if len(m.store) > GC_SIZE {
m.GarbageCollect()
}
m.store[key] = bucket
return nil
}
func (m *Memory) GarbageCollect() {
now := time.Now()
// rate limit GC to once per minute
if now.Add(60*time.Second).Unix() > m.lastGCCollected.Unix() {
for key, bucket := range m.store {
// if the bucket is drained, then GC
if bucket.DrainedAt().Unix() > now.Unix() {
delete(m.store, key)
}
}
m.lastGCCollected = now
}
}

6
ratelimiter/storage.go Normal file
View File

@ -0,0 +1,6 @@
package ratelimiter
type Storage interface {
GetBucketFor(string) (*LeakyBucket, error)
SetBucketFor(string, LeakyBucket) error
}

31
tail.go
View File

@ -5,6 +5,7 @@ package tail
import (
"bufio"
"fmt"
"github.com/ActiveState/tail/ratelimiter"
"github.com/ActiveState/tail/util"
"github.com/ActiveState/tail/watch"
"io"
@ -39,11 +40,11 @@ type SeekInfo struct {
// Config is used to specify how a file must be tailed.
type Config struct {
// File-specifc
Location *SeekInfo // Seek to this location before tailing
ReOpen bool // Reopen recreated files (tail -F)
MustExist bool // Fail early if the file does not exist
Poll bool // Poll for file changes instead of using inotify
LimitRate int64 // Maximum read rate (lines per second)
Location *SeekInfo // Seek to this location before tailing
ReOpen bool // Reopen recreated files (tail -F)
MustExist bool // Fail early if the file does not exist
Poll bool // Poll for file changes instead of using inotify
RateLimiter *ratelimiter.LeakyBucket
// Generic IO
Follow bool // Continue looking for new lines (tail -f)
@ -63,7 +64,6 @@ type Tail struct {
reader *bufio.Reader
watcher watch.FileWatcher
changes *watch.FileChanges
rateMon *RateMonitor
tomb.Tomb // provides: Done, Kill, Dying
}
@ -95,8 +95,6 @@ func TailFile(filename string, config Config) (*Tail, error) {
t.Logger = log.New(os.Stderr, "", log.LstdFlags)
}
t.rateMon = new(RateMonitor)
if t.Poll {
t.watcher = watch.NewPollingFileWatcher(filename)
} else {
@ -222,9 +220,8 @@ func (tail *Tail) tailFileSync() {
// Wait a second before seeking till the end of
// file when rate limit is reached.
msg := fmt.Sprintf(
"Too much log activity (more than %d lines "+
"per second being written); waiting a second "+
"before resuming tailing", tail.LimitRate)
"Too much log activity; waiting a second " +
"before resuming tailing")
tail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)}
select {
case <-time.After(time.Second):
@ -333,7 +330,6 @@ func (tail *Tail) seekEnd() error {
// if necessary. Return false if rate limit is reached.
func (tail *Tail) sendLine(line []byte) bool {
now := time.Now()
nowUnix := now.Unix()
lines := []string{string(line)}
// Split longer lines
@ -344,11 +340,12 @@ func (tail *Tail) sendLine(line []byte) bool {
for _, line := range lines {
tail.Lines <- &Line{line, now, nil}
rate := tail.rateMon.Tick(nowUnix)
if tail.LimitRate > 0 && rate > tail.LimitRate {
tail.Logger.Printf("Rate limit (%v < %v) reached on file (%v); entering 1s cooloff period.\n",
tail.LimitRate,
rate,
}
if tail.Config.RateLimiter != nil {
ok := tail.Config.RateLimiter.Pour(uint16(len(lines)))
if !ok {
tail.Logger.Printf("Leaky bucket full (%v); entering 1s cooloff period.\n",
tail.Filename)
return false
}

View File

@ -8,6 +8,7 @@ package tail
import (
"./watch"
_ "fmt"
"github.com/ActiveState/tail/ratelimiter"
"io/ioutil"
"os"
"strings"
@ -261,15 +262,17 @@ func TestRateLimiting(_t *testing.T) {
t := NewTailTest("rate-limiting", _t)
t.CreateFile("test.txt", "hello\nworld\nagain\nextra\n")
config := Config{
Follow: true,
LimitRate: 2}
expecting := "Too much log activity (more than 2 lines per second being written); waiting a second before resuming tailing"
Follow: true,
RateLimiter: ratelimiter.NewLeakyBucket(2, time.Second)}
leakybucketFull := "Too much log activity; waiting a second before resuming tailing"
tail := t.StartTail("test.txt", config)
// TODO: also verify that tail resumes after the cooloff period.
go t.VerifyTailOutput(
tail,
[]string{"hello", "world", "again", expecting, "more", "data"})
go t.VerifyTailOutput(tail, []string{
"hello", "world", "again",
leakybucketFull,
"more", "data",
leakybucketFull})
// Add more data only after reasonable delay.
<-time.After(1200 * time.Millisecond)