forked from forgejo/forgejo
Queue: Make resizing worker pools
This commit is contained in:
parent
0edb70a099
commit
e6ebb47299
11 changed files with 543 additions and 363 deletions
|
@ -9,7 +9,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
|
@ -22,19 +21,23 @@ const LevelQueueType Type = "level"
|
|||
|
||||
// LevelQueueConfiguration is the configuration for a LevelQueue
|
||||
type LevelQueueConfiguration struct {
|
||||
DataDir string
|
||||
BatchLength int
|
||||
Workers int
|
||||
DataDir string
|
||||
QueueLength int
|
||||
BatchLength int
|
||||
Workers int
|
||||
BlockTimeout time.Duration
|
||||
BoostTimeout time.Duration
|
||||
BoostWorkers int
|
||||
}
|
||||
|
||||
// LevelQueue implements a disk library queue
|
||||
type LevelQueue struct {
|
||||
handle HandlerFunc
|
||||
queue *levelqueue.Queue
|
||||
batchLength int
|
||||
closed chan struct{}
|
||||
exemplar interface{}
|
||||
workers int
|
||||
pool *WorkerPool
|
||||
queue *levelqueue.Queue
|
||||
closed chan struct{}
|
||||
terminated chan struct{}
|
||||
exemplar interface{}
|
||||
workers int
|
||||
}
|
||||
|
||||
// NewLevelQueue creates a ledis local queue
|
||||
|
@ -50,13 +53,25 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
dataChan := make(chan Data, config.QueueLength)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
return &LevelQueue{
|
||||
handle: handle,
|
||||
queue: queue,
|
||||
batchLength: config.BatchLength,
|
||||
exemplar: exemplar,
|
||||
closed: make(chan struct{}),
|
||||
workers: config.Workers,
|
||||
pool: &WorkerPool{
|
||||
baseCtx: ctx,
|
||||
cancel: cancel,
|
||||
batchLength: config.BatchLength,
|
||||
handle: handle,
|
||||
dataChan: dataChan,
|
||||
blockTimeout: config.BlockTimeout,
|
||||
boostTimeout: config.BoostTimeout,
|
||||
boostWorkers: config.BoostWorkers,
|
||||
},
|
||||
queue: queue,
|
||||
exemplar: exemplar,
|
||||
closed: make(chan struct{}),
|
||||
terminated: make(chan struct{}),
|
||||
workers: config.Workers,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -65,72 +80,66 @@ func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func()))
|
|||
atShutdown(context.Background(), l.Shutdown)
|
||||
atTerminate(context.Background(), l.Terminate)
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < l.workers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
l.worker()
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
go l.pool.addWorkers(l.pool.baseCtx, l.workers)
|
||||
|
||||
go l.readToChan()
|
||||
|
||||
log.Trace("Waiting til closed")
|
||||
<-l.closed
|
||||
|
||||
log.Trace("Waiting til done")
|
||||
l.pool.Wait()
|
||||
// FIXME: graceful: Needs HammerContext
|
||||
log.Trace("Waiting til cleaned")
|
||||
|
||||
l.pool.CleanUp(context.TODO())
|
||||
log.Trace("cleaned")
|
||||
|
||||
}
|
||||
|
||||
func (l *LevelQueue) worker() {
|
||||
var i int
|
||||
var datas = make([]Data, 0, l.batchLength)
|
||||
func (l *LevelQueue) readToChan() {
|
||||
for {
|
||||
select {
|
||||
case <-l.closed:
|
||||
if len(datas) > 0 {
|
||||
log.Trace("Handling: %d data, %v", len(datas), datas)
|
||||
l.handle(datas...)
|
||||
}
|
||||
// tell the pool to shutdown.
|
||||
l.pool.cancel()
|
||||
return
|
||||
default:
|
||||
}
|
||||
i++
|
||||
if len(datas) > l.batchLength || (len(datas) > 0 && i > 3) {
|
||||
log.Trace("Handling: %d data, %v", len(datas), datas)
|
||||
l.handle(datas...)
|
||||
datas = make([]Data, 0, l.batchLength)
|
||||
i = 0
|
||||
continue
|
||||
}
|
||||
|
||||
bs, err := l.queue.RPop()
|
||||
if err != nil {
|
||||
if err != levelqueue.ErrNotFound {
|
||||
log.Error("RPop: %v", err)
|
||||
bs, err := l.queue.RPop()
|
||||
if err != nil {
|
||||
if err != levelqueue.ErrNotFound {
|
||||
log.Error("RPop: %v", err)
|
||||
}
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
continue
|
||||
}
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(bs) == 0 {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
continue
|
||||
}
|
||||
if len(bs) == 0 {
|
||||
time.Sleep(time.Millisecond * 100)
|
||||
continue
|
||||
}
|
||||
|
||||
var data Data
|
||||
if l.exemplar != nil {
|
||||
t := reflect.TypeOf(l.exemplar)
|
||||
n := reflect.New(t)
|
||||
ne := n.Elem()
|
||||
err = json.Unmarshal(bs, ne.Addr().Interface())
|
||||
data = ne.Interface().(Data)
|
||||
} else {
|
||||
err = json.Unmarshal(bs, &data)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("Unmarshal: %v", err)
|
||||
var data Data
|
||||
if l.exemplar != nil {
|
||||
t := reflect.TypeOf(l.exemplar)
|
||||
n := reflect.New(t)
|
||||
ne := n.Elem()
|
||||
err = json.Unmarshal(bs, ne.Addr().Interface())
|
||||
data = ne.Interface().(Data)
|
||||
} else {
|
||||
err = json.Unmarshal(bs, &data)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("LevelQueue failed to unmarshal: %v", err)
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Trace("LevelQueue: task found: %#v", data)
|
||||
l.pool.Push(data)
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
continue
|
||||
|
||||
}
|
||||
|
||||
log.Trace("LevelQueue: task found: %#v", data)
|
||||
|
||||
datas = append(datas, data)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -163,6 +172,7 @@ func (l *LevelQueue) Shutdown() {
|
|||
|
||||
// Terminate this queue and close the queue
|
||||
func (l *LevelQueue) Terminate() {
|
||||
log.Trace("Terminating")
|
||||
l.Shutdown()
|
||||
if err := l.queue.Close(); err != nil && err.Error() != "leveldb: closed" {
|
||||
log.Error("Error whilst closing internal queue: %v", err)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue