Initial commit

This commit is contained in:
Donny
2019-04-22 20:46:32 +08:00
commit 49ab8aadd1
25441 changed files with 4055000 additions and 0 deletions

27
vendor/github.com/Dai0522/workpool/BUILD.bazel generated vendored Normal file
View File

@@ -0,0 +1,27 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"buffer.go",
"pool.go",
"task.go",
],
importmap = "go-common/vendor/github.com/Dai0522/workpool",
importpath = "github.com/Dai0522/workpool",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

38
vendor/github.com/Dai0522/workpool/README.md generated vendored Normal file
View File

@@ -0,0 +1,38 @@
# workpool
基于ringbuffer的无锁golang workpool
# usage
```
type TestTask struct {
name string
}
func (t *TestTask) Run() *[]byte {
fmt.Println(t.name)
res := []byte(t.name)
time.Sleep(time.Duration(1 * time.Second))
return &res
}
func createPool() *workpool.Pool {
conf := &workpool.PoolConfig{
MaxWorkers: 1024,
MaxIdleWorkers: 512,
MinIdleWorkers: 128,
KeepAlive: time.Duration(30 * time.Second),
}
p, err := workpool.NewWorkerPool(1024, conf)
if err != nil {
panic(err)
}
p.Start()
return p
}
wp := createPool()
ft := workpool.NewFutureTask(&TestTask{
name: "daiwei",
})
wp.Submit(ft)
res, _ := ft.Wait(time.Duration(3 * time.Second))
```

82
vendor/github.com/Dai0522/workpool/buffer.go generated vendored Normal file
View File

@@ -0,0 +1,82 @@
package workpool
import (
"errors"
"runtime"
"sync/atomic"
)
// ringBuffer .
type ringBuffer struct {
capacity uint64
mask uint64
padding1 [7]uint64
lastCommintIdx uint64
padding2 [7]uint64
nextFreeIdx uint64
padding3 [7]uint64
readerIdx uint64
padding4 [7]uint64
slots []*worker
}
// newRingBuffer .
func newRingBuffer(c uint64) (*ringBuffer, error) {
if c == 0 || c&3 != 0 {
return nil, errors.New("capacity must be N power of 2")
}
return &ringBuffer{
lastCommintIdx: 0,
nextFreeIdx: 1,
readerIdx: 0,
capacity: c,
mask: c - 1,
slots: make([]*worker, c),
}, nil
}
// push .
func (r *ringBuffer) push(w *worker) error {
var head, tail, next uint64
for {
head = r.nextFreeIdx
tail = r.readerIdx
if (head > tail+r.capacity-2) || (head < tail-1) {
return errors.New("buffer is full")
}
next = (head + 1) & r.mask
if atomic.CompareAndSwapUint64(&r.nextFreeIdx, head, next) {
break
}
runtime.Gosched()
}
r.slots[head] = w
for !atomic.CompareAndSwapUint64(&r.lastCommintIdx, head-1, head) {
runtime.Gosched()
}
return nil
}
// pop .
func (r *ringBuffer) pop() *worker {
var head, next uint64
for {
head = r.readerIdx
if head == r.lastCommintIdx {
return r.slots[head]
}
next = (head + 1) & r.mask
if atomic.CompareAndSwapUint64(&r.readerIdx, head, next) {
break
}
runtime.Gosched()
}
return r.slots[head]
}
// size .
func (r *ringBuffer) size() uint64 {
return r.lastCommintIdx - r.readerIdx
}

224
vendor/github.com/Dai0522/workpool/pool.go generated vendored Normal file
View File

@@ -0,0 +1,224 @@
package workpool
import (
"errors"
"runtime"
"sync"
"time"
)
const (
stateCreate = 0
stateRunning = 1
stateStopping = 2
stateShutdown = 3
)
// PoolConfig .
type PoolConfig struct {
MaxWorkers uint64
MaxIdleWorkers uint64
MinIdleWorkers uint64
KeepAlive time.Duration
}
// Pool .
type Pool struct {
conf *PoolConfig
padding1 [8]uint64
ready *ringBuffer
curWorkers uint64
padding2 [8]uint64
lock sync.Mutex
state uint8
stop chan uint8
}
// worker .
type worker struct {
id uint64
lastUseTime time.Time
ftch chan *FutureTask
}
var wChanCap = func() int {
// Use blocking worker if GOMAXPROCS=1.
// This immediately switches Serve to WorkerFunc, which results
// in higher performance (under go1.5 at least).
if runtime.GOMAXPROCS(0) == 1 {
return 0
}
// Use non-blocking worker if GOMAXPROCS>1,
// since otherwise the Serve caller (Acceptor) may lag accepting
// new task if WorkerFunc is CPU-bound.
return 1
}()
func newWorker(wid uint64) *worker {
return &worker{
id: wid,
lastUseTime: time.Now(),
ftch: make(chan *FutureTask, wChanCap),
}
}
// NewWorkerPool .
func NewWorkerPool(capacity uint64, conf *PoolConfig) (p *Pool, err error) {
if capacity == 0 || capacity&3 != 0 {
err = errors.New("capacity must bigger than zero and N power of 2")
return
}
rb, err := newRingBuffer(capacity)
if err != nil {
return
}
p = &Pool{
conf: conf,
ready: rb,
curWorkers: 0,
state: stateCreate,
stop: make(chan uint8, 1),
}
return
}
func (p *Pool) changeState(old, new uint8) bool {
p.lock.Lock()
defer p.lock.Unlock()
if p.state != old {
return false
}
p.state = new
return true
}
// Start .
func (p *Pool) Start() error {
if !p.changeState(stateCreate, stateRunning) {
return errors.New("workerpool already started")
}
go func() {
defer close(p.stop)
for {
p.clean()
select {
case <-p.stop:
p.cleanAll()
for !p.changeState(stateStopping, stateShutdown) {
runtime.Gosched()
}
return
default:
time.Sleep(p.conf.KeepAlive)
}
}
}()
return nil
}
// Stop .
func (p *Pool) Stop() error {
if !p.changeState(stateRunning, stateStopping) {
return errors.New("workerpool is stopping")
}
p.stop <- stateStopping
return nil
}
// Submit .
func (p *Pool) Submit(ft *FutureTask) error {
w, err := p.getReadyWorker()
if err != nil {
return err
}
w.ftch <- ft
return nil
}
// getReadyWorker .
func (p *Pool) getReadyWorker() (w *worker, err error) {
w = p.ready.pop()
if w == nil {
p.lock.Lock()
workerID := p.curWorkers
if p.curWorkers >= p.conf.MaxWorkers {
err = errors.New("workerpool is full")
p.lock.Unlock()
return
}
p.curWorkers++
p.lock.Unlock()
w = newWorker(workerID)
go func(w *worker) {
for {
ft, ok := <-w.ftch
if !ok {
return
}
ft.out <- ft.T.Run()
p.release(w)
}
}(w)
}
return
}
// close worker
func (p *Pool) close(w *worker) {
p.lock.Lock()
defer p.lock.Unlock()
if p.curWorkers > 0 {
p.curWorkers--
}
close(w.ftch)
}
// release worker
func (p *Pool) release(w *worker) {
if p.state > stateRunning {
p.close(w)
return
}
w.lastUseTime = time.Now()
if err := p.ready.push(w); err != nil {
p.close(w)
}
}
// clean: clean idle goroutine
func (p *Pool) clean() {
for {
size := p.ready.size()
if size <= p.conf.MinIdleWorkers {
return
}
w := p.ready.pop()
if w == nil {
return
}
currentTime := time.Now()
if currentTime.Sub(w.lastUseTime) < p.conf.KeepAlive {
p.release(w)
return
}
p.close(w)
}
}
// cleanAll
func (p *Pool) cleanAll() {
for {
w := p.ready.pop()
if w == nil {
return
}
p.release(w)
}
}

35
vendor/github.com/Dai0522/workpool/task.go generated vendored Normal file
View File

@@ -0,0 +1,35 @@
package workpool
import (
"fmt"
"time"
)
// Task interface
type Task interface {
Run() *[]byte
}
// FutureTask out must be blocking chan (size=0)
type FutureTask struct {
T Task
out chan *[]byte
}
// NewFutureTask .
func NewFutureTask(t Task) *FutureTask {
return &FutureTask{
T: t,
out: make(chan *[]byte, 1),
}
}
// Wait for task return until timeout
func (ft *FutureTask) Wait(timeout time.Duration) (res *[]byte, err error) {
select {
case res = <-ft.out:
case <-time.After(timeout):
err = fmt.Errorf("task(%+v) timeout", ft)
}
return
}