Queue & Setting: Add worker pool implementation

This commit is contained in:
Andrew Thornton 2019-11-20 21:31:39 +00:00
parent 85042634fc
commit 9fb051654a
No known key found for this signature in database
GPG Key ID: 3CDE74631F13A748
10 changed files with 92 additions and 25 deletions

View File

@ -18,6 +18,7 @@ const BatchedChannelQueueType Type = "batched-channel"
type BatchedChannelQueueConfiguration struct { type BatchedChannelQueueConfiguration struct {
QueueLength int QueueLength int
BatchLength int BatchLength int
Workers int
} }
// BatchedChannelQueue implements // BatchedChannelQueue implements
@ -38,6 +39,7 @@ func NewBatchedChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queu
queue: make(chan Data, config.QueueLength), queue: make(chan Data, config.QueueLength),
handle: handle, handle: handle,
exemplar: exemplar, exemplar: exemplar,
workers: config.Workers,
}, },
config.BatchLength, config.BatchLength,
}, nil }, nil
@ -51,6 +53,7 @@ func (c *BatchedChannelQueue) Run(atShutdown, atTerminate func(context.Context,
atTerminate(context.Background(), func() { atTerminate(context.Background(), func() {
log.Warn("BatchedChannelQueue is not terminatable!") log.Warn("BatchedChannelQueue is not terminatable!")
}) })
for i := 0; i < c.workers; i++ {
go func() { go func() {
delay := time.Millisecond * 300 delay := time.Millisecond * 300
var datas = make([]Data, 0, c.batchLength) var datas = make([]Data, 0, c.batchLength)
@ -72,6 +75,7 @@ func (c *BatchedChannelQueue) Run(atShutdown, atTerminate func(context.Context,
} }
}() }()
} }
}
func init() { func init() {
queuesMap[BatchedChannelQueueType] = NewBatchedChannelQueue queuesMap[BatchedChannelQueueType] = NewBatchedChannelQueue

View File

@ -22,7 +22,7 @@ func TestBatchedChannelQueue(t *testing.T) {
nilFn := func(_ context.Context, _ func()) {} nilFn := func(_ context.Context, _ func()) {}
queue, err := NewBatchedChannelQueue(handle, BatchedChannelQueueConfiguration{QueueLength: 20, BatchLength: 2}, &testData{}) queue, err := NewBatchedChannelQueue(handle, BatchedChannelQueueConfiguration{QueueLength: 20, BatchLength: 2, Workers: 1}, &testData{})
assert.NoError(t, err) assert.NoError(t, err)
go queue.Run(nilFn, nilFn) go queue.Run(nilFn, nilFn)

View File

@ -18,6 +18,7 @@ const ChannelQueueType Type = "channel"
// ChannelQueueConfiguration is the configuration for a ChannelQueue // ChannelQueueConfiguration is the configuration for a ChannelQueue
type ChannelQueueConfiguration struct { type ChannelQueueConfiguration struct {
QueueLength int QueueLength int
Workers int
} }
// ChannelQueue implements // ChannelQueue implements
@ -25,6 +26,7 @@ type ChannelQueue struct {
queue chan Data queue chan Data
handle HandlerFunc handle HandlerFunc
exemplar interface{} exemplar interface{}
workers int
} }
// NewChannelQueue create a memory channel queue // NewChannelQueue create a memory channel queue
@ -38,6 +40,7 @@ func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, erro
queue: make(chan Data, config.QueueLength), queue: make(chan Data, config.QueueLength),
handle: handle, handle: handle,
exemplar: exemplar, exemplar: exemplar,
workers: config.Workers,
}, nil }, nil
} }
@ -49,12 +52,14 @@ func (c *ChannelQueue) Run(atShutdown, atTerminate func(context.Context, func())
atTerminate(context.Background(), func() { atTerminate(context.Background(), func() {
log.Warn("ChannelQueue is not terminatable!") log.Warn("ChannelQueue is not terminatable!")
}) })
for i := 0; i < c.workers; i++ {
go func() { go func() {
for data := range c.queue { for data := range c.queue {
c.handle(data) c.handle(data)
} }
}() }()
} }
}
// Push will push the indexer data to queue // Push will push the indexer data to queue
func (c *ChannelQueue) Push(data Data) error { func (c *ChannelQueue) Push(data Data) error {

View File

@ -22,7 +22,7 @@ func TestChannelQueue(t *testing.T) {
nilFn := func(_ context.Context, _ func()) {} nilFn := func(_ context.Context, _ func()) {}
queue, err := NewChannelQueue(handle, ChannelQueueConfiguration{QueueLength: 20}, &testData{}) queue, err := NewChannelQueue(handle, ChannelQueueConfiguration{QueueLength: 20, Workers: 1}, &testData{})
assert.NoError(t, err) assert.NoError(t, err)
go queue.Run(nilFn, nilFn) go queue.Run(nilFn, nilFn)

View File

@ -9,6 +9,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"reflect" "reflect"
"sync"
"time" "time"
"code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/log"
@ -23,6 +24,7 @@ const LevelQueueType Type = "level"
type LevelQueueConfiguration struct { type LevelQueueConfiguration struct {
DataDir string DataDir string
BatchLength int BatchLength int
Workers int
} }
// LevelQueue implements a disk library queue // LevelQueue implements a disk library queue
@ -32,6 +34,7 @@ type LevelQueue struct {
batchLength int batchLength int
closed chan struct{} closed chan struct{}
exemplar interface{} exemplar interface{}
workers int
} }
// NewLevelQueue creates a ledis local queue // NewLevelQueue creates a ledis local queue
@ -53,6 +56,7 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error)
batchLength: config.BatchLength, batchLength: config.BatchLength,
exemplar: exemplar, exemplar: exemplar,
closed: make(chan struct{}), closed: make(chan struct{}),
workers: config.Workers,
}, nil }, nil
} }
@ -60,6 +64,19 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error)
func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func())) { func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
atShutdown(context.Background(), l.Shutdown) atShutdown(context.Background(), l.Shutdown)
atTerminate(context.Background(), l.Terminate) atTerminate(context.Background(), l.Terminate)
wg := sync.WaitGroup{}
for i := 0; i < l.workers; i++ {
wg.Add(1)
go func() {
l.worker()
wg.Done()
}()
}
wg.Wait()
}
func (l *LevelQueue) worker() {
var i int var i int
var datas = make([]Data, 0, l.batchLength) var datas = make([]Data, 0, l.batchLength)
for { for {

View File

@ -6,6 +6,7 @@ package queue
import ( import (
"context" "context"
"sync"
"time" "time"
) )
@ -19,6 +20,7 @@ type PersistableChannelQueueConfiguration struct {
QueueLength int QueueLength int
Timeout time.Duration Timeout time.Duration
MaxAttempts int MaxAttempts int
Workers int
} }
// PersistableChannelQueue wraps a channel queue and level queue together // PersistableChannelQueue wraps a channel queue and level queue together
@ -40,14 +42,17 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (
batchChannelQueue, err := NewBatchedChannelQueue(handle, BatchedChannelQueueConfiguration{ batchChannelQueue, err := NewBatchedChannelQueue(handle, BatchedChannelQueueConfiguration{
QueueLength: config.QueueLength, QueueLength: config.QueueLength,
BatchLength: config.BatchLength, BatchLength: config.BatchLength,
Workers: config.Workers,
}, exemplar) }, exemplar)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// the level backend only needs one worker to catch up with the previously dropped work
levelCfg := LevelQueueConfiguration{ levelCfg := LevelQueueConfiguration{
DataDir: config.DataDir, DataDir: config.DataDir,
BatchLength: config.BatchLength, BatchLength: config.BatchLength,
Workers: 1,
} }
levelQueue, err := NewLevelQueue(handle, levelCfg, exemplar) levelQueue, err := NewLevelQueue(handle, levelCfg, exemplar)
@ -100,6 +105,19 @@ func (p *PersistableChannelQueue) Run(atShutdown, atTerminate func(context.Conte
// Just run the level queue - we shut it down later // Just run the level queue - we shut it down later
go p.internal.Run(func(_ context.Context, _ func()) {}, func(_ context.Context, _ func()) {}) go p.internal.Run(func(_ context.Context, _ func()) {}, func(_ context.Context, _ func()) {})
wg := sync.WaitGroup{}
for i := 0; i < p.workers; i++ {
wg.Add(1)
go func() {
p.worker()
wg.Done()
}()
}
wg.Wait()
}
func (p *PersistableChannelQueue) worker() {
delay := time.Millisecond * 300 delay := time.Millisecond * 300
var datas = make([]Data, 0, p.batchLength) var datas = make([]Data, 0, p.batchLength)
loop: loop:

View File

@ -35,6 +35,7 @@ func TestPersistableChannelQueue(t *testing.T) {
DataDir: tmpDir, DataDir: tmpDir,
BatchLength: 2, BatchLength: 2,
QueueLength: 20, QueueLength: 20,
Workers: 1,
}, &testData{}) }, &testData{})
assert.NoError(t, err) assert.NoError(t, err)
@ -83,6 +84,7 @@ func TestPersistableChannelQueue(t *testing.T) {
DataDir: tmpDir, DataDir: tmpDir,
BatchLength: 2, BatchLength: 2,
QueueLength: 20, QueueLength: 20,
Workers: 1,
}, &testData{}) }, &testData{})
assert.NoError(t, err) assert.NoError(t, err)

View File

@ -29,6 +29,7 @@ func TestLevelQueue(t *testing.T) {
queue, err := NewLevelQueue(handle, LevelQueueConfiguration{ queue, err := NewLevelQueue(handle, LevelQueueConfiguration{
DataDir: "level-queue-test-data", DataDir: "level-queue-test-data",
BatchLength: 2, BatchLength: 2,
Workers: 1,
}, &testData{}) }, &testData{})
assert.NoError(t, err) assert.NoError(t, err)
@ -76,6 +77,7 @@ func TestLevelQueue(t *testing.T) {
queue, err = NewLevelQueue(handle, LevelQueueConfiguration{ queue, err = NewLevelQueue(handle, LevelQueueConfiguration{
DataDir: "level-queue-test-data", DataDir: "level-queue-test-data",
BatchLength: 2, BatchLength: 2,
Workers: 1,
}, &testData{}) }, &testData{})
assert.NoError(t, err) assert.NoError(t, err)

View File

@ -11,6 +11,7 @@ import (
"fmt" "fmt"
"reflect" "reflect"
"strings" "strings"
"sync"
"time" "time"
"code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/log"
@ -36,6 +37,7 @@ type RedisQueue struct {
batchLength int batchLength int
closed chan struct{} closed chan struct{}
exemplar interface{} exemplar interface{}
workers int
} }
// RedisQueueConfiguration is the configuration for the redis queue // RedisQueueConfiguration is the configuration for the redis queue
@ -45,6 +47,7 @@ type RedisQueueConfiguration struct {
DBIndex int DBIndex int
BatchLength int BatchLength int
QueueName string QueueName string
Workers int
} }
// NewRedisQueue creates single redis or cluster redis queue // NewRedisQueue creates single redis or cluster redis queue
@ -62,6 +65,7 @@ func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error)
batchLength: config.BatchLength, batchLength: config.BatchLength,
exemplar: exemplar, exemplar: exemplar,
closed: make(chan struct{}), closed: make(chan struct{}),
workers: config.Workers,
} }
if len(dbs) == 0 { if len(dbs) == 0 {
return nil, errors.New("no redis host found") return nil, errors.New("no redis host found")
@ -86,6 +90,18 @@ func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error)
func (r *RedisQueue) Run(atShutdown, atTerminate func(context.Context, func())) { func (r *RedisQueue) Run(atShutdown, atTerminate func(context.Context, func())) {
atShutdown(context.Background(), r.Shutdown) atShutdown(context.Background(), r.Shutdown)
atTerminate(context.Background(), r.Terminate) atTerminate(context.Background(), r.Terminate)
wg := sync.WaitGroup{}
for i := 0; i < r.workers; i++ {
wg.Add(1)
go func() {
r.worker()
wg.Done()
}()
}
wg.Wait()
}
func (r *RedisQueue) worker() {
var i int var i int
var datas = make([]Data, 0, r.batchLength) var datas = make([]Data, 0, r.batchLength)
for { for {

View File

@ -44,6 +44,7 @@ func CreateQueue(name string, handle queue.HandlerFunc, exemplar interface{}) qu
opts["Password"] = q.Password opts["Password"] = q.Password
opts["DBIndex"] = q.DBIndex opts["DBIndex"] = q.DBIndex
opts["QueueName"] = name opts["QueueName"] = name
opts["Workers"] = q.Workers
cfg, err := json.Marshal(opts) cfg, err := json.Marshal(opts)
if err != nil { if err != nil {
@ -117,6 +118,8 @@ func newQueueService() {
Queue.MaxAttempts = sec.Key("MAX_ATTEMPTS").MustInt(10) Queue.MaxAttempts = sec.Key("MAX_ATTEMPTS").MustInt(10)
Queue.Timeout = sec.Key("TIMEOUT").MustDuration(GracefulHammerTime + 30*time.Second) Queue.Timeout = sec.Key("TIMEOUT").MustDuration(GracefulHammerTime + 30*time.Second)
Queue.Workers = sec.Key("WORKER").MustInt(1) Queue.Workers = sec.Key("WORKER").MustInt(1)
Cfg.Section("queue.notification").Key("WORKER").MustInt(5)
} }
// ParseQueueConnStr parses a queue connection string // ParseQueueConnStr parses a queue connection string