Initial commit

This commit is contained in:
Donny
2019-04-22 20:46:32 +08:00
commit 49ab8aadd1
25441 changed files with 4055000 additions and 0 deletions

19
library/sync/BUILD Normal file
View File

@@ -0,0 +1,19 @@
package(default_visibility = ["//visibility:public"])
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//library/sync/errgroup:all-srcs",
"//library/sync/errgroup.v2:all-srcs",
"//library/sync/pipeline:all-srcs",
],
tags = ["automanaged"],
)

View File

@@ -0,0 +1,41 @@
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"errgroup.go",
],
importpath = "go-common/library/sync/errgroup.v2",
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = [
"errgroup_test.go",
"example_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,7 @@
### errgroup
#### Version 1.1.0
> 1.支持 MaxProc 限制并发执行数
#### Version 1.0.0
> 1.提供带recover的errgroupWait()返回的err包含完整的堆栈信息

View File

@@ -0,0 +1,5 @@
# Author
weicheng
# Reviewer
haoguanwei

View File

@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- weicheng
reviewers:
- haoguanwei
- weicheng

View File

@@ -0,0 +1,3 @@
# go-common/errgroup.v2
提供带recover的errgrouperr中包含详细堆栈信息

View File

@@ -0,0 +1,47 @@
// Package errgroup provides synchronization, error propagation, and Context
// errgroup 包为一组子任务的 goroutine 提供了 goroutine 同步,错误取消功能.
//
//errgroup 包含三种常用方式
//
//1、直接使用 此时不会因为一个任务失败导致所有任务被 cancel:
// g := &errgroup.Group{}
// g.Go(func(ctx context.Context) {
// // NOTE: 此时 ctx 为 context.Background()
// // do something
// })
//
//2、WithContext 使用 WithContext 时不会因为一个任务失败导致所有任务被 cancel:
// g := errgroup.WithContext(ctx)
// g.Go(func(ctx context.Context) {
// // NOTE: 此时 ctx 为 errgroup.WithContext 传递的 ctx
// // do something
// })
//
//3、WithCancel 使用 WithCancel 时如果有一个人任务失败会导致所有*未进行或进行中*的任务被 cancel:
// g := errgroup.WithCancel(ctx)
// g.Go(func(ctx context.Context) {
// // NOTE: 此时 ctx 是从 errgroup.WithContext 传递的 ctx 派生出的 ctx
// // do something
// })
//
//设置最大并行数 GOMAXPROCS 对以上三种使用方式均起效
//NOTE: 由于 errgroup 实现问题,设定 GOMAXPROCS 的 errgroup 需要立即调用 Wait() 例如:
//
// g := errgroup.WithCancel(ctx)
// g.GOMAXPROCS(2)
// // task1
// g.Go(func(ctx context.Context) {
// fmt.Println("task1")
// })
// // task2
// g.Go(func(ctx context.Context) {
// fmt.Println("task2")
// })
// // task3
// g.Go(func(ctx context.Context) {
// fmt.Println("task3")
// })
// // NOTE: 此时设置的 GOMAXPROCS 为2, 添加了三个任务 task1, task2, task3 此时 task3 是不会运行的!
// // 只有调用了 Wait task3 才有运行的机会
// g.Wait() // task3 运行
package errgroup

View File

@@ -0,0 +1,125 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package errgroup provides synchronization, error propagation, and Context
// cancelation for groups of goroutines working on subtasks of a common task.
package errgroup
import (
"context"
"fmt"
"runtime"
"sync"
)
// A Group is a collection of goroutines working on subtasks that are part of
// the same overall task.
//
// A zero Group is valid and does not cancel on error.
type Group struct {
err error
wg sync.WaitGroup
errOnce sync.Once
workerOnce sync.Once
ch chan func(ctx context.Context) error
chs []func(ctx context.Context) error
ctx context.Context
cancel func()
}
// WithContext create a Group.
// given function from Go will receive this context,
func WithContext(ctx context.Context) *Group {
return &Group{ctx: ctx}
}
// WithCancel create a new Group and an associated Context derived from ctx.
//
// given function from Go will receive context derived from this ctx,
// The derived Context is canceled the first time a function passed to Go
// returns a non-nil error or the first time Wait returns, whichever occurs
// first.
func WithCancel(ctx context.Context) *Group {
ctx, cancel := context.WithCancel(ctx)
return &Group{ctx: ctx, cancel: cancel}
}
func (g *Group) do(f func(ctx context.Context) error) {
ctx := g.ctx
if ctx == nil {
ctx = context.Background()
}
var err error
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)]
err = fmt.Errorf("errgroup: panic recovered: %s\n%s", r, buf)
}
if err != nil {
g.errOnce.Do(func() {
g.err = err
if g.cancel != nil {
g.cancel()
}
})
}
g.wg.Done()
}()
err = f(ctx)
}
// GOMAXPROCS set max goroutine to work.
func (g *Group) GOMAXPROCS(n int) {
if n <= 0 {
panic("errgroup: GOMAXPROCS must great than 0")
}
g.workerOnce.Do(func() {
g.ch = make(chan func(context.Context) error, n)
for i := 0; i < n; i++ {
go func() {
for f := range g.ch {
g.do(f)
}
}()
}
})
}
// Go calls the given function in a new goroutine.
//
// The first call to return a non-nil error cancels the group; its error will be
// returned by Wait.
func (g *Group) Go(f func(ctx context.Context) error) {
g.wg.Add(1)
if g.ch != nil {
select {
case g.ch <- f:
default:
g.chs = append(g.chs, f)
}
return
}
go g.do(f)
}
// Wait blocks until all function calls from the Go method have returned, then
// returns the first non-nil error (if any) from them.
func (g *Group) Wait() error {
if g.ch != nil {
for _, f := range g.chs {
g.ch <- f
}
}
g.wg.Wait()
if g.ch != nil {
close(g.ch) // let all receiver exit
}
if g.cancel != nil {
g.cancel()
}
return g.err
}

View File

@@ -0,0 +1,266 @@
package errgroup
import (
"context"
"errors"
"fmt"
"math"
"net/http"
"os"
"testing"
"time"
)
type ABC struct {
CBA int
}
func TestNormal(t *testing.T) {
var (
abcs = make(map[int]*ABC)
g Group
err error
)
for i := 0; i < 10; i++ {
abcs[i] = &ABC{CBA: i}
}
g.Go(func(context.Context) (err error) {
abcs[1].CBA++
return
})
g.Go(func(context.Context) (err error) {
abcs[2].CBA++
return
})
if err = g.Wait(); err != nil {
t.Log(err)
}
t.Log(abcs)
}
func sleep1s(context.Context) error {
time.Sleep(time.Second)
return nil
}
func TestGOMAXPROCS(t *testing.T) {
// 没有并发数限制
g := Group{}
now := time.Now()
g.Go(sleep1s)
g.Go(sleep1s)
g.Go(sleep1s)
g.Go(sleep1s)
g.Wait()
sec := math.Round(time.Since(now).Seconds())
if sec != 1 {
t.FailNow()
}
// 限制并发数
g2 := Group{}
g2.GOMAXPROCS(2)
now = time.Now()
g2.Go(sleep1s)
g2.Go(sleep1s)
g2.Go(sleep1s)
g2.Go(sleep1s)
g2.Wait()
sec = math.Round(time.Since(now).Seconds())
if sec != 2 {
t.FailNow()
}
// context canceled
var canceled bool
g3 := WithCancel(context.Background())
g3.GOMAXPROCS(2)
g3.Go(func(context.Context) error {
return fmt.Errorf("error for testing errgroup context")
})
g3.Go(func(ctx context.Context) error {
time.Sleep(time.Second)
select {
case <-ctx.Done():
canceled = true
default:
}
return nil
})
g3.Wait()
if !canceled {
t.FailNow()
}
}
func TestRecover(t *testing.T) {
var (
abcs = make(map[int]*ABC)
g Group
err error
)
g.Go(func(context.Context) (err error) {
abcs[1].CBA++
return
})
g.Go(func(context.Context) (err error) {
abcs[2].CBA++
return
})
if err = g.Wait(); err != nil {
t.Logf("error:%+v", err)
return
}
t.FailNow()
}
func TestRecover2(t *testing.T) {
var (
g Group
err error
)
g.Go(func(context.Context) (err error) {
panic("2233")
})
if err = g.Wait(); err != nil {
t.Logf("error:%+v", err)
return
}
t.FailNow()
}
var (
Web = fakeSearch("web")
Image = fakeSearch("image")
Video = fakeSearch("video")
)
type Result string
type Search func(ctx context.Context, query string) (Result, error)
func fakeSearch(kind string) Search {
return func(_ context.Context, query string) (Result, error) {
return Result(fmt.Sprintf("%s result for %q", kind, query)), nil
}
}
// JustErrors illustrates the use of a Group in place of a sync.WaitGroup to
// simplify goroutine counting and error handling. This example is derived from
// the sync.WaitGroup example at https://golang.org/pkg/sync/#example_WaitGroup.
func ExampleGroup_justErrors() {
var g Group
var urls = []string{
"http://www.golang.org/",
"http://www.google.com/",
"http://www.somestupidname.com/",
}
for _, url := range urls {
// Launch a goroutine to fetch the URL.
url := url // https://golang.org/doc/faq#closures_and_goroutines
g.Go(func(context.Context) error {
// Fetch the URL.
resp, err := http.Get(url)
if err == nil {
resp.Body.Close()
}
return err
})
}
// Wait for all HTTP fetches to complete.
if err := g.Wait(); err == nil {
fmt.Println("Successfully fetched all URLs.")
}
}
// Parallel illustrates the use of a Group for synchronizing a simple parallel
// task: the "Google Search 2.0" function from
// https://talks.golang.org/2012/concurrency.slide#46, augmented with a Context
// and error-handling.
func ExampleGroup_parallel() {
Google := func(ctx context.Context, query string) ([]Result, error) {
g := WithContext(ctx)
searches := []Search{Web, Image, Video}
results := make([]Result, len(searches))
for i, search := range searches {
i, search := i, search // https://golang.org/doc/faq#closures_and_goroutines
g.Go(func(context.Context) error {
result, err := search(ctx, query)
if err == nil {
results[i] = result
}
return err
})
}
if err := g.Wait(); err != nil {
return nil, err
}
return results, nil
}
results, err := Google(context.Background(), "golang")
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
for _, result := range results {
fmt.Println(result)
}
// Output:
// web result for "golang"
// image result for "golang"
// video result for "golang"
}
func TestZeroGroup(t *testing.T) {
err1 := errors.New("errgroup_test: 1")
err2 := errors.New("errgroup_test: 2")
cases := []struct {
errs []error
}{
{errs: []error{}},
{errs: []error{nil}},
{errs: []error{err1}},
{errs: []error{err1, nil}},
{errs: []error{err1, nil, err2}},
}
for _, tc := range cases {
var g Group
var firstErr error
for i, err := range tc.errs {
err := err
g.Go(func(context.Context) error { return err })
if firstErr == nil && err != nil {
firstErr = err
}
if gErr := g.Wait(); gErr != firstErr {
t.Errorf("after g.Go(func() error { return err }) for err in %v\n"+
"g.Wait() = %v; want %v", tc.errs[:i+1], err, firstErr)
}
}
}
}
func TestWithCancel(t *testing.T) {
g := WithCancel(context.Background())
g.Go(func(ctx context.Context) error {
time.Sleep(100 * time.Millisecond)
return fmt.Errorf("boom")
})
var doneErr error
g.Go(func(ctx context.Context) error {
select {
case <-ctx.Done():
doneErr = ctx.Err()
}
return doneErr
})
g.Wait()
if doneErr != context.Canceled {
t.Error("error should be Canceled")
}
}

View File

@@ -0,0 +1,63 @@
package errgroup
import (
"context"
)
func fakeRunTask(ctx context.Context) error {
return nil
}
func ExampleGroup_group() {
g := Group{}
g.Go(func(context.Context) error {
return fakeRunTask(context.Background())
})
g.Go(func(context.Context) error {
return fakeRunTask(context.Background())
})
if err := g.Wait(); err != nil {
// handle err
}
}
func ExampleGroup_ctx() {
g := WithContext(context.Background())
g.Go(func(ctx context.Context) error {
return fakeRunTask(ctx)
})
g.Go(func(ctx context.Context) error {
return fakeRunTask(ctx)
})
if err := g.Wait(); err != nil {
// handle err
}
}
func ExampleGroup_cancel() {
g := WithCancel(context.Background())
g.Go(func(ctx context.Context) error {
return fakeRunTask(ctx)
})
g.Go(func(ctx context.Context) error {
return fakeRunTask(ctx)
})
if err := g.Wait(); err != nil {
// handle err
}
}
func ExampleGroup_maxproc() {
g := Group{}
// set max concurrency
g.GOMAXPROCS(2)
g.Go(func(ctx context.Context) error {
return fakeRunTask(context.Background())
})
g.Go(func(ctx context.Context) error {
return fakeRunTask(context.Background())
})
if err := g.Wait(); err != nil {
// handle err
}
}

View File

@@ -0,0 +1,39 @@
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["errgroup.go"],
importpath = "go-common/library/sync/errgroup",
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = [
"errgroup_test.go",
"example_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = ["@org_golang_x_net//context:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,7 @@
### errgroup
#### Version 1.1.0
> 1.支持 MaxProc 限制并发执行数
#### Version 1.0.0
> 1.提供带recover的errgroupWait()返回的err包含完整的堆栈信息

View File

@@ -0,0 +1,5 @@
# Author
peiyifei
# Reviewer
haoguanwei

View File

@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- peiyifei
reviewers:
- haoguanwei
- peiyifei

View File

@@ -0,0 +1,3 @@
# go-common/errgroup
提供带recover的errgrouperr中包含详细堆栈信息

View File

@@ -0,0 +1,114 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package errgroup provides synchronization, error propagation, and Context
// cancelation for groups of goroutines working on subtasks of a common task.
package errgroup
import (
"context"
"fmt"
"runtime"
"sync"
)
// A Group is a collection of goroutines working on subtasks that are part of
// the same overall task.
//
// A zero Group is valid and does not cancel on error.
type Group struct {
err error
wg sync.WaitGroup
errOnce sync.Once
workerOnce sync.Once
ch chan func() error
chs []func() error
cancel func()
}
// WithContext returns a new Group and an associated Context derived from ctx.
//
// The derived Context is canceled the first time a function passed to Go
// returns a non-nil error or the first time Wait returns, whichever occurs
// first.
func WithContext(ctx context.Context) (*Group, context.Context) {
ctx, cancel := context.WithCancel(ctx)
return &Group{cancel: cancel}, ctx
}
func (g *Group) do(f func() error) {
var err error
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 64<<10)
buf = buf[:runtime.Stack(buf, false)]
err = fmt.Errorf("errgroup: panic recovered: %s\n%s", r, buf)
}
if err != nil {
g.errOnce.Do(func() {
g.err = err
if g.cancel != nil {
g.cancel()
}
})
}
g.wg.Done()
}()
err = f()
}
// GOMAXPROCS set max goroutine to work.
func (g *Group) GOMAXPROCS(n int) {
if n <= 0 {
panic("errgroup: GOMAXPROCS must great than 0")
}
g.workerOnce.Do(func() {
g.ch = make(chan func() error, n)
for i := 0; i < n; i++ {
go func() {
for f := range g.ch {
g.do(f)
}
}()
}
})
}
// Go calls the given function in a new goroutine.
//
// The first call to return a non-nil error cancels the group; its error will be
// returned by Wait.
func (g *Group) Go(f func() error) {
g.wg.Add(1)
if g.ch != nil {
select {
case g.ch <- f:
default:
g.chs = append(g.chs, f)
}
return
}
go g.do(f)
}
// Wait blocks until all function calls from the Go method have returned, then
// returns the first non-nil error (if any) from them.
func (g *Group) Wait() error {
if g.ch != nil {
for _, f := range g.chs {
g.ch <- f
}
}
g.wg.Wait()
if g.ch != nil {
close(g.ch) // let all receiver exit
}
if g.cancel != nil {
g.cancel()
}
return g.err
}

View File

@@ -0,0 +1,288 @@
package errgroup
import (
"errors"
"fmt"
"math"
"net/http"
"os"
"testing"
"time"
"golang.org/x/net/context"
)
type ABC struct {
CBA int
}
func TestNormal(t *testing.T) {
var (
abcs = make(map[int]*ABC)
g Group
err error
)
for i := 0; i < 10; i++ {
abcs[i] = &ABC{CBA: i}
}
g.Go(func() (err error) {
abcs[1].CBA++
return
})
g.Go(func() (err error) {
abcs[2].CBA++
return
})
if err = g.Wait(); err != nil {
t.Log(err)
}
t.Log(abcs)
}
func sleep1s() error {
time.Sleep(time.Second)
return nil
}
func TestGOMAXPROCS(t *testing.T) {
// 没有并发数限制
g := Group{}
now := time.Now()
g.Go(sleep1s)
g.Go(sleep1s)
g.Go(sleep1s)
g.Go(sleep1s)
g.Wait()
sec := math.Round(time.Since(now).Seconds())
if sec != 1 {
t.FailNow()
}
// 限制并发数
g2 := Group{}
g2.GOMAXPROCS(2)
now = time.Now()
g2.Go(sleep1s)
g2.Go(sleep1s)
g2.Go(sleep1s)
g2.Go(sleep1s)
g2.Wait()
sec = math.Round(time.Since(now).Seconds())
if sec != 2 {
t.FailNow()
}
// context canceled
var canceled bool
g3, ctx := WithContext(context.Background())
g3.GOMAXPROCS(2)
g3.Go(func() error {
return fmt.Errorf("error for testing errgroup context")
})
g3.Go(func() error {
time.Sleep(time.Second)
select {
case <-ctx.Done():
canceled = true
default:
}
return nil
})
g3.Wait()
if !canceled {
t.FailNow()
}
}
func TestRecover(t *testing.T) {
var (
abcs = make(map[int]*ABC)
g Group
err error
)
g.Go(func() (err error) {
abcs[1].CBA++
return
})
g.Go(func() (err error) {
abcs[2].CBA++
return
})
if err = g.Wait(); err != nil {
t.Logf("error:%+v", err)
return
}
t.FailNow()
}
func TestRecover2(t *testing.T) {
var (
g Group
err error
)
g.Go(func() (err error) {
panic("2233")
})
if err = g.Wait(); err != nil {
t.Logf("error:%+v", err)
return
}
t.FailNow()
}
var (
Web = fakeSearch("web")
Image = fakeSearch("image")
Video = fakeSearch("video")
)
type Result string
type Search func(ctx context.Context, query string) (Result, error)
func fakeSearch(kind string) Search {
return func(_ context.Context, query string) (Result, error) {
return Result(fmt.Sprintf("%s result for %q", kind, query)), nil
}
}
// JustErrors illustrates the use of a Group in place of a sync.WaitGroup to
// simplify goroutine counting and error handling. This example is derived from
// the sync.WaitGroup example at https://golang.org/pkg/sync/#example_WaitGroup.
func ExampleGroup_justErrors() {
var g Group
var urls = []string{
"http://www.golang.org/",
"http://www.google.com/",
"http://www.somestupidname.com/",
}
for _, url := range urls {
// Launch a goroutine to fetch the URL.
url := url // https://golang.org/doc/faq#closures_and_goroutines
g.Go(func() error {
// Fetch the URL.
resp, err := http.Get(url)
if err == nil {
resp.Body.Close()
}
return err
})
}
// Wait for all HTTP fetches to complete.
if err := g.Wait(); err == nil {
fmt.Println("Successfully fetched all URLs.")
}
}
// Parallel illustrates the use of a Group for synchronizing a simple parallel
// task: the "Google Search 2.0" function from
// https://talks.golang.org/2012/concurrency.slide#46, augmented with a Context
// and error-handling.
func ExampleGroup_parallel() {
Google := func(ctx context.Context, query string) ([]Result, error) {
g, ctx := WithContext(ctx)
searches := []Search{Web, Image, Video}
results := make([]Result, len(searches))
for i, search := range searches {
i, search := i, search // https://golang.org/doc/faq#closures_and_goroutines
g.Go(func() error {
result, err := search(ctx, query)
if err == nil {
results[i] = result
}
return err
})
}
if err := g.Wait(); err != nil {
return nil, err
}
return results, nil
}
results, err := Google(context.Background(), "golang")
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
for _, result := range results {
fmt.Println(result)
}
// Output:
// web result for "golang"
// image result for "golang"
// video result for "golang"
}
func TestZeroGroup(t *testing.T) {
err1 := errors.New("errgroup_test: 1")
err2 := errors.New("errgroup_test: 2")
cases := []struct {
errs []error
}{
{errs: []error{}},
{errs: []error{nil}},
{errs: []error{err1}},
{errs: []error{err1, nil}},
{errs: []error{err1, nil, err2}},
}
for _, tc := range cases {
var g Group
var firstErr error
for i, err := range tc.errs {
err := err
g.Go(func() error { return err })
if firstErr == nil && err != nil {
firstErr = err
}
if gErr := g.Wait(); gErr != firstErr {
t.Errorf("after g.Go(func() error { return err }) for err in %v\n"+
"g.Wait() = %v; want %v", tc.errs[:i+1], err, firstErr)
}
}
}
}
func TestWithContext(t *testing.T) {
errDoom := errors.New("group_test: doomed")
cases := []struct {
errs []error
want error
}{
{want: nil},
{errs: []error{nil}, want: nil},
{errs: []error{errDoom}, want: errDoom},
{errs: []error{errDoom, nil}, want: errDoom},
}
for _, tc := range cases {
g, ctx := WithContext(context.Background())
for _, err := range tc.errs {
err := err
g.Go(func() error { return err })
}
if err := g.Wait(); err != tc.want {
t.Errorf("after %T.Go(func() error { return err }) for err in %v\n"+
"g.Wait() = %v; want %v",
g, tc.errs, err, tc.want)
}
canceled := false
select {
case <-ctx.Done():
canceled = true
default:
}
if !canceled {
t.Errorf("after %T.Go(func() error { return err }) for err in %v\n"+
"ctx.Done() was not closed",
g, tc.errs)
}
}
}

View File

@@ -0,0 +1,65 @@
package errgroup
import (
"context"
"sync"
)
func fakeRunTask(ctx context.Context) error {
return nil
}
func ExampleGroup_group() {
g := Group{}
g.Go(func() error {
return fakeRunTask(context.Background())
})
g.Go(func() error {
return fakeRunTask(context.Background())
})
if err := g.Wait(); err != nil {
// handle err
}
}
func ExampleGroup_ctx() {
g, ctx := WithContext(context.Background())
g.Go(func() error {
return fakeRunTask(ctx)
})
g.Go(func() error {
return fakeRunTask(ctx)
})
if err := g.Wait(); err != nil {
// handle err
}
}
func ExampleGroup_maxproc() {
g := Group{}
// set max concurrency
g.GOMAXPROCS(2)
g.Go(func() error {
return fakeRunTask(context.Background())
})
g.Go(func() error {
return fakeRunTask(context.Background())
})
if err := g.Wait(); err != nil {
// handle err
}
}
func ExampleGroup_waitgroup() {
var wg sync.WaitGroup
wg.Add(2)
go func() {
// do something
wg.Done()
}()
go func() {
// do something
wg.Done()
}()
wg.Wait()
}

View File

@@ -0,0 +1,48 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["pipeline_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//library/net/metadata:go_default_library",
"//library/time:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["pipeline.go"],
importpath = "go-common/library/sync/pipeline",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/net/metadata:go_default_library",
"//library/time:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//library/sync/pipeline/fanout:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,6 @@
### pipeline
#### Version 1.1.0
> 1. 增加平滑时间的支持
#### Version 1.0.0
> 1. 提供聚合方法 内部区分压测流量

View File

@@ -0,0 +1,5 @@
# Author
wangxu01
# Reviewer
zhapuyu

View File

@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- wangxu01
reviewers:
- wangxu01
- zhapuyu

View File

@@ -0,0 +1,3 @@
# go-common/sync/pipeline
提供内存批量聚合工具 内部区分压测流量

View File

@@ -0,0 +1,45 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"example_test.go",
"fanout_test.go",
],
embed = [":go_default_library"],
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["fanout.go"],
importpath = "go-common/library/sync/pipeline/fanout",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/log:go_default_library",
"//library/net/metadata:go_default_library",
"//library/net/trace:go_default_library",
"//library/stat/prom:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,4 @@
### pipeline/fanout
#### Version 1.0.0
> 1. library/cache包改为fanout

View File

@@ -0,0 +1,6 @@
# Author
wangxu01
# Reviewer
maojian
zhapuyu

View File

@@ -0,0 +1,8 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- wangxu01
reviewers:
- maojian
- wangxu01
- zhapuyu

View File

@@ -0,0 +1,19 @@
# go-common/sync/pipeline/fanout
以前的library/cache包改为pipeline/fanout
增加使用范围 不止由于异步增加缓存 也可以用在其他地方
功能:
* 支持定义Worker 数量的goroutine进行消费
* 内部支持的元数据传递library/net/metadata
* 后续会作废library/cache以及统一收敛Go并行里面的扇出模型
示例:
```golang
//名称为cache 执行线程为1 buffer长度为1024
cache := fanout.New("cache", fanout.Worker(1), fanout.Buffer(1024))
cache.Do(c, func(c context.Context) { SomeFunc(c, args...) })
cache.Close()
```

View File

@@ -0,0 +1,22 @@
package fanout
import "context"
// addCache 加缓存的例子
func addCache(c context.Context, id, value int) {
// some thing...
}
func Example() {
// 这里只是举个例子 真正使用的时候 应该用bm/rpc 传过来的context
var c = context.Background()
// 新建一个fanout 对象 名称为cache 名称主要用来上报监控和打日志使用 最好不要重复
// (可选参数) worker数量为1 表示后台只有1个线程在工作
// (可选参数) buffer 为1024 表示缓存chan长度为1024 如果chan慢了 再调用Do方法就会报错 设定长度主要为了防止OOM
cache := New("cache", Worker(1), Buffer(1024))
// 需要异步执行的方法
// 这里传进来的c里面的meta信息会被复制 超时会忽略 addCache拿到的context已经没有超时信息了
cache.Do(c, func(c context.Context) { addCache(c, 0, 0) })
// 程序结束的时候关闭fanout 会等待后台线程完成后返回
cache.Close()
}

View File

@@ -0,0 +1,151 @@
package fanout
import (
"context"
"errors"
"runtime"
"sync"
"go-common/library/log"
"go-common/library/net/metadata"
"go-common/library/net/trace"
"go-common/library/stat/prom"
)
var (
// ErrFull chan full.
ErrFull = errors.New("fanout: chan full")
stats = prom.BusinessInfoCount
traceTags = []trace.Tag{
trace.Tag{Key: trace.TagSpanKind, Value: "background"},
trace.Tag{Key: trace.TagComponent, Value: "sync/pipeline/fanout"},
}
)
type options struct {
worker int
buffer int
}
// Option fanout option
type Option func(*options)
// Worker specifies the worker of fanout
func Worker(n int) Option {
if n <= 0 {
panic("fanout: worker should > 0")
}
return func(o *options) {
o.worker = n
}
}
// Buffer specifies the buffer of fanout
func Buffer(n int) Option {
if n <= 0 {
panic("fanout: buffer should > 0")
}
return func(o *options) {
o.buffer = n
}
}
type item struct {
f func(c context.Context)
ctx context.Context
}
// Fanout async consume data from chan.
type Fanout struct {
name string
ch chan item
options *options
waiter sync.WaitGroup
ctx context.Context
cancel func()
}
// New new a fanout struct.
func New(name string, opts ...Option) *Fanout {
if name == "" {
name = "fanout"
}
o := &options{
worker: 1,
buffer: 1024,
}
for _, op := range opts {
op(o)
}
c := &Fanout{
ch: make(chan item, o.buffer),
name: name,
options: o,
}
c.ctx, c.cancel = context.WithCancel(context.Background())
c.waiter.Add(o.worker)
for i := 0; i < o.worker; i++ {
go c.proc()
}
return c
}
func (c *Fanout) proc() {
defer c.waiter.Done()
for {
select {
case t := <-c.ch:
wrapFunc(t.f)(t.ctx)
stats.State(c.name+"_channel", int64(len(c.ch)))
case <-c.ctx.Done():
return
}
}
}
func wrapFunc(f func(c context.Context)) (res func(context.Context)) {
res = func(ctx context.Context) {
defer func() {
if r := recover(); r != nil {
buf := make([]byte, 64*1024)
buf = buf[:runtime.Stack(buf, false)]
log.Error("panic in fanout proc, err: %s, stack: %s", r, buf)
}
}()
f(ctx)
if tr, ok := trace.FromContext(ctx); ok {
tr.Finish(nil)
}
}
return
}
// Do save a callback func.
func (c *Fanout) Do(ctx context.Context, f func(ctx context.Context)) (err error) {
if f == nil || c.ctx.Err() != nil {
return c.ctx.Err()
}
nakeCtx := metadata.WithContext(ctx)
if tr, ok := trace.FromContext(ctx); ok {
tr = tr.Fork("", "Fanout:Do").SetTag(traceTags...)
nakeCtx = trace.NewContext(nakeCtx, tr)
}
select {
case c.ch <- item{f: f, ctx: nakeCtx}:
default:
err = ErrFull
}
stats.State(c.name+"_channel", int64(len(c.ch)))
return
}
// Close close fanout
func (c *Fanout) Close() error {
if err := c.ctx.Err(); err != nil {
return err
}
c.cancel()
c.waiter.Wait()
return nil
}

View File

@@ -0,0 +1,30 @@
package fanout
import (
"context"
"testing"
"time"
)
func TestFanout_Do(t *testing.T) {
ca := New("cache", Worker(1), Buffer(1024))
var run bool
ca.Do(context.Background(), func(c context.Context) {
run = true
panic("error")
})
time.Sleep(time.Millisecond * 50)
t.Log("not panic")
if !run {
t.Fatal("expect run be true")
}
}
func TestFanout_Close(t *testing.T) {
ca := New("cache", Worker(1), Buffer(1024))
ca.Close()
err := ca.Do(context.Background(), func(c context.Context) {})
if err == nil {
t.Fatal("expect get err")
}
}

View File

@@ -0,0 +1,185 @@
package pipeline
import (
"context"
"errors"
"sync"
"time"
"go-common/library/net/metadata"
xtime "go-common/library/time"
)
// ErrFull channel full error
var ErrFull = errors.New("channel full")
type message struct {
key string
value interface{}
}
// Pipeline pipeline struct
type Pipeline struct {
Do func(c context.Context, index int, values map[string][]interface{})
Split func(key string) int
chans []chan *message
mirrorChans []chan *message
config *Config
wait sync.WaitGroup
}
// Config Pipeline config
type Config struct {
// MaxSize merge size
MaxSize int
// Interval merge interval
Interval xtime.Duration
// Buffer channel size
Buffer int
// Worker channel number
Worker int
// Smooth smoothing interval
Smooth bool
}
func (c *Config) fix() {
if c.MaxSize <= 0 {
c.MaxSize = 1000
}
if c.Interval <= 0 {
c.Interval = xtime.Duration(time.Second)
}
if c.Buffer <= 0 {
c.Buffer = 1000
}
if c.Worker <= 0 {
c.Worker = 10
}
}
// NewPipeline new pipline
func NewPipeline(config *Config) (res *Pipeline) {
if config == nil {
config = &Config{}
}
config.fix()
res = &Pipeline{
chans: make([]chan *message, config.Worker),
mirrorChans: make([]chan *message, config.Worker),
config: config,
}
for i := 0; i < config.Worker; i++ {
res.chans[i] = make(chan *message, config.Buffer)
res.mirrorChans[i] = make(chan *message, config.Buffer)
}
return
}
// Start start all mergeproc
func (p *Pipeline) Start() {
if p.Do == nil {
panic("pipeline: do func is nil")
}
if p.Split == nil {
panic("pipeline: split func is nil")
}
var mirror bool
p.wait.Add(len(p.chans) + len(p.mirrorChans))
for i, ch := range p.chans {
go p.mergeproc(mirror, i, ch)
}
mirror = true
for i, ch := range p.mirrorChans {
go p.mergeproc(mirror, i, ch)
}
}
// SyncAdd sync add a value to channal, channel shard in split method
func (p *Pipeline) SyncAdd(c context.Context, key string, value interface{}) {
ch, msg := p.add(c, key, value)
ch <- msg
}
// Add async add a value to channal, channel shard in split method
func (p *Pipeline) Add(c context.Context, key string, value interface{}) (err error) {
ch, msg := p.add(c, key, value)
select {
case ch <- msg:
default:
err = ErrFull
}
return
}
func (p *Pipeline) add(c context.Context, key string, value interface{}) (ch chan *message, m *message) {
shard := p.Split(key) % p.config.Worker
if metadata.Bool(c, metadata.Mirror) {
ch = p.mirrorChans[shard]
} else {
ch = p.chans[shard]
}
m = &message{key: key, value: value}
return
}
// Close all goroutinue
func (p *Pipeline) Close() (err error) {
for _, ch := range p.chans {
ch <- nil
}
for _, ch := range p.mirrorChans {
ch <- nil
}
p.wait.Wait()
return
}
func (p *Pipeline) mergeproc(mirror bool, index int, ch <-chan *message) {
defer p.wait.Done()
var (
m *message
vals = make(map[string][]interface{}, p.config.MaxSize)
closed bool
count int
inteval = p.config.Interval
oldTicker = true
)
if p.config.Smooth && index > 0 {
inteval = xtime.Duration(int64(index) * (int64(p.config.Interval) / int64(p.config.Worker)))
}
ticker := time.NewTicker(time.Duration(inteval))
for {
select {
case m = <-ch:
if m == nil {
closed = true
break
}
count++
vals[m.key] = append(vals[m.key], m.value)
if count >= p.config.MaxSize {
break
}
continue
case <-ticker.C:
if p.config.Smooth && oldTicker {
ticker.Stop()
ticker = time.NewTicker(time.Duration(p.config.Interval))
oldTicker = false
}
}
if len(vals) > 0 {
ctx := context.Background()
if mirror {
ctx = metadata.NewContext(ctx, metadata.MD{metadata.Mirror: true})
}
p.Do(ctx, index, vals)
vals = make(map[string][]interface{}, p.config.MaxSize)
count = 0
}
if closed {
ticker.Stop()
return
}
}
}

View File

@@ -0,0 +1,132 @@
package pipeline
import (
"context"
"reflect"
"strconv"
"testing"
"time"
"go-common/library/net/metadata"
xtime "go-common/library/time"
)
func TestPipeline(t *testing.T) {
conf := &Config{
MaxSize: 3,
Interval: xtime.Duration(time.Millisecond * 20),
Buffer: 3,
Worker: 10,
}
type recv struct {
mirror bool
ch int
values map[string][]interface{}
}
var runs []recv
do := func(c context.Context, ch int, values map[string][]interface{}) {
runs = append(runs, recv{
mirror: metadata.Bool(c, metadata.Mirror),
values: values,
ch: ch,
})
}
split := func(s string) int {
n, _ := strconv.Atoi(s)
return n
}
p := NewPipeline(conf)
p.Do = do
p.Split = split
p.Start()
p.Add(context.Background(), "1", 1)
p.Add(context.Background(), "1", 2)
p.Add(context.Background(), "11", 3)
p.Add(context.Background(), "2", 3)
time.Sleep(time.Millisecond * 60)
mirrorCtx := metadata.NewContext(context.Background(), metadata.MD{metadata.Mirror: true})
p.Add(mirrorCtx, "2", 3)
time.Sleep(time.Millisecond * 60)
p.SyncAdd(mirrorCtx, "5", 5)
time.Sleep(time.Millisecond * 60)
p.Close()
expt := []recv{
{
mirror: false,
ch: 1,
values: map[string][]interface{}{
"1": {1, 2},
"11": {3},
},
},
{
mirror: false,
ch: 2,
values: map[string][]interface{}{
"2": {3},
},
},
{
mirror: true,
ch: 2,
values: map[string][]interface{}{
"2": {3},
},
},
{
mirror: true,
ch: 5,
values: map[string][]interface{}{
"5": {5},
},
},
}
if !reflect.DeepEqual(runs, expt) {
t.Errorf("expect get %+v,\n got: %+v", expt, runs)
}
}
func TestPipelineSmooth(t *testing.T) {
conf := &Config{
MaxSize: 100,
Interval: xtime.Duration(time.Second),
Buffer: 100,
Worker: 10,
Smooth: true,
}
type result struct {
index int
ts time.Time
}
var results []result
do := func(c context.Context, index int, values map[string][]interface{}) {
results = append(results, result{
index: index,
ts: time.Now(),
})
}
split := func(s string) int {
n, _ := strconv.Atoi(s)
return n
}
p := NewPipeline(conf)
p.Do = do
p.Split = split
p.Start()
for i := 0; i < 10; i++ {
p.Add(context.Background(), strconv.Itoa(i), 1)
}
time.Sleep(time.Millisecond * 1500)
if len(results) != conf.Worker {
t.Errorf("expect results equal worker")
t.FailNow()
}
for i, r := range results {
if i > 0 {
if r.ts.Sub(results[i-1].ts) < time.Millisecond*20 {
t.Errorf("expect runs be smooth")
t.FailNow()
}
}
}
}