Initial commit

This commit is contained in:
Donny
2019-04-22 20:46:32 +08:00
commit 49ab8aadd1
25441 changed files with 4055000 additions and 0 deletions

17
library/queue/BUILD Normal file
View File

@@ -0,0 +1,17 @@
package(default_visibility = ["//visibility:public"])
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//library/queue/databus:all-srcs",
],
tags = ["automanaged"],
)

View File

@@ -0,0 +1,6 @@
### go-common/queue
#### V1.0.1
> 1.添加sdk监控
#### v1.0.0
> 1. report的sdk支持uint类型写入index

View File

@@ -0,0 +1,78 @@
load(
"@io_bazel_rules_go//proto:def.bzl",
"go_proto_library",
)
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
package(default_visibility = ["//visibility:public"])
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//library/queue/databus/databusutil:all-srcs",
"//library/queue/databus/metadata:all-srcs",
"//library/queue/databus/report:all-srcs",
],
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["databus.go"],
embed = [":databus_go_proto"],
importpath = "go-common/library/queue/databus",
tags = ["automanaged"],
deps = [
"//library/cache/redis:go_default_library",
"//library/conf/env:go_default_library",
"//library/container/pool:go_default_library",
"//library/log:go_default_library",
"//library/naming:go_default_library",
"//library/naming/discovery:go_default_library",
"//library/net/netutil:go_default_library",
"//library/net/trace:go_default_library",
"//library/stat/prom:go_default_library",
"//library/time:go_default_library",
"@com_github_gogo_protobuf//gogoproto:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
],
)
go_test(
name = "go_default_xtest",
srcs = ["databus_test.go"],
tags = ["automanaged"],
deps = [
"//library/naming/discovery:go_default_library",
"//library/queue/databus:go_default_library",
"//library/time:go_default_library",
],
)
proto_library(
name = "databus_proto",
srcs = ["databus.proto"],
tags = ["automanaged"],
deps = ["@gogo_special_proto//github.com/gogo/protobuf/gogoproto"],
)
go_proto_library(
name = "databus_go_proto",
compilers = ["@io_bazel_rules_go//proto:gogofast_proto"],
importpath = "go-common/library/queue/databus",
proto = ":databus_proto",
tags = ["automanaged"],
deps = ["@com_github_gogo_protobuf//gogoproto:go_default_library"],
)

View File

@@ -0,0 +1,364 @@
package databus
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
"net/url"
"sync"
"sync/atomic"
"time"
"go-common/library/cache/redis"
"go-common/library/conf/env"
"go-common/library/container/pool"
"go-common/library/log"
"go-common/library/naming"
"go-common/library/naming/discovery"
"go-common/library/net/netutil"
"go-common/library/net/trace"
"go-common/library/stat/prom"
xtime "go-common/library/time"
)
const (
_appid = "middleware.databus"
)
type dial func() (redis.Conn, error)
// Config databus config.
type Config struct {
Key string
Secret string
Group string
Topic string
Action string // shoule be "pub" or "sub" or "pubsub"
Buffer int
Name string // redis name, for trace
Proto string
Addr string
Auth string
Active int // pool
Idle int // pool
DialTimeout xtime.Duration
ReadTimeout xtime.Duration
WriteTimeout xtime.Duration
IdleTimeout xtime.Duration
Direct bool
}
const (
_family = "databus"
_actionSub = "sub"
_actionPub = "pub"
_actionAll = "pubsub"
_cmdPub = "SET"
_cmdSub = "MGET"
_authFormat = "%s:%s@%s/topic=%s&role=%s"
_open = int32(0)
_closed = int32(1)
_scheme = "databus"
)
var (
// ErrAction action error.
ErrAction = errors.New("action unknown")
// ErrFull chan full
ErrFull = errors.New("chan full")
// ErrNoInstance no instances
ErrNoInstance = errors.New("no databus instances found")
bk = netutil.DefaultBackoffConfig
stats = prom.LibClient
)
// Message Data.
type Message struct {
Key string `json:"key"`
Value json.RawMessage `json:"value"`
Topic string `json:"topic"`
Partition int32 `json:"partition"`
Offset int64 `json:"offset"`
Timestamp int64 `json:"timestamp"`
d *Databus
}
// Commit ack message.
func (m *Message) Commit() (err error) {
m.d.lock.Lock()
if m.Offset >= m.d.marked[m.Partition] {
m.d.marked[m.Partition] = m.Offset
}
m.d.lock.Unlock()
return nil
}
// Databus databus struct.
type Databus struct {
conf *Config
d dial
p *redis.Pool
dis naming.Resolver
msgs chan *Message
lock sync.RWMutex
marked map[int32]int64
idx int64
closed int32
}
// New new a databus.
func New(c *Config) *Databus {
if c.Buffer == 0 {
c.Buffer = 1024
}
d := &Databus{
conf: c,
msgs: make(chan *Message, c.Buffer),
marked: make(map[int32]int64),
closed: _open,
}
if !c.Direct && env.DeployEnv != "" && env.DeployEnv != env.DeployEnvDev {
d.dis = discovery.Build(_appid)
e := d.dis.Watch()
select {
case <-e:
d.disc()
case <-time.After(10 * time.Second):
panic("init discovery err")
}
go d.discoveryproc(e)
log.Info("init databus discvoery info successfully")
}
if c.Action == _actionSub || c.Action == _actionAll {
if d.dis == nil {
d.d = d.dial
} else {
d.d = d.dialInstance
}
go d.subproc()
}
if c.Action == _actionPub || c.Action == _actionAll {
// new pool
d.p = d.redisPool(c)
if d.dis != nil {
d.p.New = func(ctx context.Context) (io.Closer, error) {
return d.dialInstance()
}
}
}
return d
}
func (d *Databus) redisPool(c *Config) *redis.Pool {
config := &redis.Config{
Name: c.Name,
Proto: c.Proto,
Addr: c.Addr,
Auth: fmt.Sprintf(_authFormat, c.Key, c.Secret, c.Group, c.Topic, c.Action),
DialTimeout: c.DialTimeout,
ReadTimeout: c.ReadTimeout,
WriteTimeout: c.WriteTimeout,
}
config.Config = &pool.Config{
Active: c.Active,
Idle: c.Idle,
IdleTimeout: c.IdleTimeout,
}
stat := redis.DialStats(statfunc)
return redis.NewPool(config, stat)
}
func statfunc(cmd string, err *error) func() {
now := time.Now()
return func() {
stats.Timing(fmt.Sprintf("databus:%s", cmd), int64(time.Since(now)/time.Millisecond))
if err != nil && *err != nil {
stats.Incr("databus", (*err).Error())
}
}
}
func (d *Databus) redisOptions() []redis.DialOption {
cnop := redis.DialConnectTimeout(time.Duration(d.conf.DialTimeout))
rdop := redis.DialReadTimeout(time.Duration(d.conf.ReadTimeout))
wrop := redis.DialWriteTimeout(time.Duration(d.conf.WriteTimeout))
auop := redis.DialPassword(fmt.Sprintf(_authFormat, d.conf.Key, d.conf.Secret, d.conf.Group, d.conf.Topic, d.conf.Action))
stat := redis.DialStats(statfunc)
return []redis.DialOption{cnop, rdop, wrop, auop, stat}
}
func (d *Databus) dial() (redis.Conn, error) {
return redis.Dial(d.conf.Proto, d.conf.Addr, d.redisOptions()...)
}
func (d *Databus) dialInstance() (redis.Conn, error) {
if insMap, ok := d.dis.Fetch(context.Background()); ok {
ins, ok := insMap[env.Zone]
if !ok || len(ins) == 0 {
for _, is := range insMap {
ins = append(ins, is...)
}
}
if len(ins) > 0 {
var in *naming.Instance
if d.conf.Action == "pub" {
i := atomic.AddInt64(&d.idx, 1)
in = ins[i%int64(len(ins))]
} else {
in = ins[rand.Intn(len(ins))]
}
for _, addr := range in.Addrs {
u, err := url.Parse(addr)
if err == nil && u.Scheme == _scheme {
return redis.Dial("tcp", u.Host, d.redisOptions()...)
}
}
}
}
if d.conf.Proto != "" && d.conf.Addr != "" {
log.Warn("Databus: no instances(%s,%s) found in discovery,Use config(%s,%s)", _appid, env.Zone, d.conf.Proto, d.conf.Addr)
return redis.Dial(d.conf.Proto, d.conf.Addr, d.redisOptions()...)
}
return nil, ErrNoInstance
}
func (d *Databus) disc() {
if d.p != nil {
op := d.p
np := d.redisPool(d.conf)
np.New = func(ctx context.Context) (io.Closer, error) {
return d.dialInstance()
}
d.p = np
op.Close()
op = nil
log.Info("discovery event renew redis pool group(%s) topic(%s)", d.conf.Group, d.conf.Topic)
}
if insMap, ok := d.dis.Fetch(context.Background()); ok {
if ins, ok := insMap[env.Zone]; ok && len(ins) > 0 {
log.Info("get databus instances len(%d)", len(ins))
}
}
}
func (d *Databus) discoveryproc(e <-chan struct{}) {
if d.dis == nil {
return
}
for {
<-e
d.disc()
}
}
func (d *Databus) subproc() {
var (
err error
r []byte
res [][]byte
c redis.Conn
retry int
commited = make(map[int32]int64)
commit = make(map[int32]int64)
)
for {
if atomic.LoadInt32(&d.closed) == _closed {
if c != nil {
c.Close()
}
close(d.msgs)
return
}
if err != nil {
time.Sleep(bk.Backoff(retry))
retry++
} else {
retry = 0
}
if c == nil || c.Err() != nil {
if c, err = d.d(); err != nil {
log.Error("redis.Dial(%s@%s) group(%s) retry error(%v)", d.conf.Proto, d.conf.Addr, d.conf.Group, err)
continue
}
}
d.lock.RLock()
for k, v := range d.marked {
if commited[k] != v {
commit[k] = v
}
}
d.lock.RUnlock()
// TODO pipeline commit offset
for k, v := range commit {
if _, err = c.Do("SET", k, v); err != nil {
c.Close()
log.Error("group(%s) conn.Do(SET,%d,%d) commit error(%v)", d.conf.Group, k, v, err)
break
}
delete(commit, k)
commited[k] = v
}
if err != nil {
continue
}
// pull messages
if res, err = redis.ByteSlices(c.Do(_cmdSub, "")); err != nil {
c.Close()
log.Error("group(%s) conn.Do(MGET) error(%v)", d.conf.Group, err)
continue
}
for _, r = range res {
msg := &Message{d: d}
if err = json.Unmarshal(r, msg); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", r, err)
continue
}
d.msgs <- msg
}
}
}
// Messages get message chan.
func (d *Databus) Messages() <-chan *Message {
return d.msgs
}
// Send send message to databus.
func (d *Databus) Send(c context.Context, k string, v interface{}) (err error) {
var b []byte
// trace info
if t, ok := trace.FromContext(c); ok {
t = t.Fork(_family, _cmdPub)
t.SetTag(trace.String(trace.TagAddress, d.conf.Addr), trace.String(trace.TagComment, k))
defer t.Finish(&err)
}
// send message
if b, err = json.Marshal(v); err != nil {
log.Error("json.Marshal(%v) error(%v)", v, err)
return
}
conn := d.p.Get(context.TODO())
if _, err = conn.Do(_cmdPub, k, b); err != nil {
log.Error("conn.Do(%s,%s,%s) error(%v)", _cmdPub, k, b, err)
}
conn.Close()
return
}
// Close close databus conn.
func (d *Databus) Close() (err error) {
if !atomic.CompareAndSwapInt32(&d.closed, _open, _closed) {
return
}
if d.p != nil {
d.p.Close()
}
return nil
}

View File

@@ -0,0 +1,909 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: library/queue/databus/databus.proto
/*
Package databus is a generated protocol buffer package.
It is generated from these files:
library/queue/databus/databus.proto
It has these top-level messages:
Header
MessagePB
*/
package databus
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import encoding_json "encoding/json"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type Header struct {
Metadata map[string]string `protobuf:"bytes,1,rep,name=metadata" json:"metadata" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *Header) Reset() { *m = Header{} }
func (m *Header) String() string { return proto.CompactTextString(m) }
func (*Header) ProtoMessage() {}
func (*Header) Descriptor() ([]byte, []int) { return fileDescriptorDatabus, []int{0} }
func (m *Header) GetMetadata() map[string]string {
if m != nil {
return m.Metadata
}
return nil
}
type MessagePB struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key"`
Value encoding_json.RawMessage `protobuf:"bytes,2,opt,name=value,proto3,casttype=encoding/json.RawMessage" json:"value"`
Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic"`
Partition int32 `protobuf:"varint,4,opt,name=partition,proto3" json:"partition"`
Offset int64 `protobuf:"varint,5,opt,name=offset,proto3" json:"offset"`
Timestamp int64 `protobuf:"varint,6,opt,name=timestamp,proto3" json:"timestamp"`
Metadata map[string]string `protobuf:"bytes,7,rep,name=metadata" json:"metadata" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *MessagePB) Reset() { *m = MessagePB{} }
func (m *MessagePB) String() string { return proto.CompactTextString(m) }
func (*MessagePB) ProtoMessage() {}
func (*MessagePB) Descriptor() ([]byte, []int) { return fileDescriptorDatabus, []int{1} }
func (m *MessagePB) GetKey() string {
if m != nil {
return m.Key
}
return ""
}
func (m *MessagePB) GetValue() encoding_json.RawMessage {
if m != nil {
return m.Value
}
return nil
}
func (m *MessagePB) GetTopic() string {
if m != nil {
return m.Topic
}
return ""
}
func (m *MessagePB) GetPartition() int32 {
if m != nil {
return m.Partition
}
return 0
}
func (m *MessagePB) GetOffset() int64 {
if m != nil {
return m.Offset
}
return 0
}
func (m *MessagePB) GetTimestamp() int64 {
if m != nil {
return m.Timestamp
}
return 0
}
func (m *MessagePB) GetMetadata() map[string]string {
if m != nil {
return m.Metadata
}
return nil
}
func init() {
proto.RegisterType((*Header)(nil), "infra.databus.Header")
proto.RegisterType((*MessagePB)(nil), "infra.databus.MessagePB")
}
func (m *Header) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Header) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Metadata) > 0 {
for k, _ := range m.Metadata {
dAtA[i] = 0xa
i++
v := m.Metadata[k]
mapSize := 1 + len(k) + sovDatabus(uint64(len(k))) + 1 + len(v) + sovDatabus(uint64(len(v)))
i = encodeVarintDatabus(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
i++
i = encodeVarintDatabus(dAtA, i, uint64(len(k)))
i += copy(dAtA[i:], k)
dAtA[i] = 0x12
i++
i = encodeVarintDatabus(dAtA, i, uint64(len(v)))
i += copy(dAtA[i:], v)
}
}
return i, nil
}
func (m *MessagePB) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *MessagePB) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Key) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintDatabus(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if len(m.Value) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintDatabus(dAtA, i, uint64(len(m.Value)))
i += copy(dAtA[i:], m.Value)
}
if len(m.Topic) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintDatabus(dAtA, i, uint64(len(m.Topic)))
i += copy(dAtA[i:], m.Topic)
}
if m.Partition != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintDatabus(dAtA, i, uint64(m.Partition))
}
if m.Offset != 0 {
dAtA[i] = 0x28
i++
i = encodeVarintDatabus(dAtA, i, uint64(m.Offset))
}
if m.Timestamp != 0 {
dAtA[i] = 0x30
i++
i = encodeVarintDatabus(dAtA, i, uint64(m.Timestamp))
}
if len(m.Metadata) > 0 {
for k, _ := range m.Metadata {
dAtA[i] = 0x3a
i++
v := m.Metadata[k]
mapSize := 1 + len(k) + sovDatabus(uint64(len(k))) + 1 + len(v) + sovDatabus(uint64(len(v)))
i = encodeVarintDatabus(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
i++
i = encodeVarintDatabus(dAtA, i, uint64(len(k)))
i += copy(dAtA[i:], k)
dAtA[i] = 0x12
i++
i = encodeVarintDatabus(dAtA, i, uint64(len(v)))
i += copy(dAtA[i:], v)
}
}
return i, nil
}
func encodeVarintDatabus(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *Header) Size() (n int) {
var l int
_ = l
if len(m.Metadata) > 0 {
for k, v := range m.Metadata {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovDatabus(uint64(len(k))) + 1 + len(v) + sovDatabus(uint64(len(v)))
n += mapEntrySize + 1 + sovDatabus(uint64(mapEntrySize))
}
}
return n
}
func (m *MessagePB) Size() (n int) {
var l int
_ = l
l = len(m.Key)
if l > 0 {
n += 1 + l + sovDatabus(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovDatabus(uint64(l))
}
l = len(m.Topic)
if l > 0 {
n += 1 + l + sovDatabus(uint64(l))
}
if m.Partition != 0 {
n += 1 + sovDatabus(uint64(m.Partition))
}
if m.Offset != 0 {
n += 1 + sovDatabus(uint64(m.Offset))
}
if m.Timestamp != 0 {
n += 1 + sovDatabus(uint64(m.Timestamp))
}
if len(m.Metadata) > 0 {
for k, v := range m.Metadata {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovDatabus(uint64(len(k))) + 1 + len(v) + sovDatabus(uint64(len(v)))
n += mapEntrySize + 1 + sovDatabus(uint64(mapEntrySize))
}
}
return n
}
func sovDatabus(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozDatabus(x uint64) (n int) {
return sovDatabus(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Header) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Header: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Header: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthDatabus
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Metadata == nil {
m.Metadata = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthDatabus
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthDatabus
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipDatabus(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthDatabus
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Metadata[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipDatabus(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthDatabus
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *MessagePB) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: MessagePB: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: MessagePB: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthDatabus
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthDatabus
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthDatabus
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Topic = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType)
}
m.Partition = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Partition |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
}
m.Offset = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Offset |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
}
m.Timestamp = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Timestamp |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthDatabus
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Metadata == nil {
m.Metadata = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthDatabus
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDatabus
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthDatabus
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipDatabus(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthDatabus
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Metadata[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipDatabus(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthDatabus
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipDatabus(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDatabus
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDatabus
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDatabus
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthDatabus
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDatabus
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipDatabus(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthDatabus = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowDatabus = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("library/queue/databus/databus.proto", fileDescriptorDatabus) }
var fileDescriptorDatabus = []byte{
// 376 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x92, 0x41, 0x4a, 0xc3, 0x40,
0x18, 0x85, 0x99, 0xc6, 0xa4, 0x66, 0x6c, 0x41, 0x06, 0x85, 0xb1, 0x8b, 0x26, 0xb4, 0x28, 0x01,
0x31, 0x01, 0xdd, 0x88, 0xdd, 0x05, 0x04, 0x37, 0x85, 0x92, 0xa5, 0xbb, 0x49, 0x3b, 0x89, 0xa3,
0x4d, 0x26, 0x26, 0x13, 0xa5, 0xc7, 0x10, 0x3c, 0x94, 0x4b, 0x4f, 0x10, 0xa4, 0xcb, 0x1c, 0xc1,
0x95, 0x64, 0x12, 0xd3, 0xd6, 0x03, 0xb8, 0xfa, 0xdf, 0xff, 0x78, 0xff, 0x97, 0x90, 0x17, 0x38,
0x5e, 0x32, 0x3f, 0x25, 0xe9, 0xca, 0x79, 0xce, 0x69, 0x4e, 0x9d, 0x05, 0x11, 0xc4, 0xcf, 0xb3,
0xdf, 0x69, 0x27, 0x29, 0x17, 0x1c, 0xf5, 0x59, 0x1c, 0xa4, 0xc4, 0x6e, 0xcc, 0xc1, 0x45, 0xc8,
0xc4, 0x43, 0xee, 0xdb, 0x73, 0x1e, 0x39, 0x21, 0x0f, 0xb9, 0x23, 0x53, 0x7e, 0x1e, 0xc8, 0x4d,
0x2e, 0x52, 0xd5, 0xd7, 0xa3, 0x77, 0x00, 0xb5, 0x3b, 0x4a, 0x16, 0x34, 0x45, 0x53, 0xb8, 0x1f,
0x51, 0x41, 0x2a, 0x10, 0x06, 0xa6, 0x62, 0x1d, 0x5c, 0x8e, 0xed, 0x1d, 0xb6, 0x5d, 0x07, 0xed,
0x69, 0x93, 0xba, 0x8d, 0x45, 0xba, 0x72, 0x7b, 0x65, 0x61, 0xb4, 0x87, 0x5e, 0xab, 0x06, 0x13,
0xd8, 0xdf, 0x09, 0xa2, 0x43, 0xa8, 0x3c, 0xd1, 0x15, 0x06, 0x26, 0xb0, 0x74, 0xaf, 0x92, 0xe8,
0x08, 0xaa, 0x2f, 0x64, 0x99, 0x53, 0xdc, 0x91, 0x5e, 0xbd, 0xdc, 0x74, 0xae, 0xc1, 0xe8, 0x4d,
0x81, 0xfa, 0x94, 0x66, 0x19, 0x09, 0xe9, 0xcc, 0x45, 0x27, 0x5b, 0x97, 0x6e, 0xb7, 0x2c, 0x8c,
0x6a, 0xad, 0x11, 0x93, 0x6d, 0x44, 0xcf, 0x3d, 0x2d, 0x0b, 0xa3, 0x36, 0xbe, 0x0b, 0x03, 0xd3,
0x78, 0xce, 0x17, 0x2c, 0x0e, 0x9d, 0xc7, 0x8c, 0xc7, 0xb6, 0x47, 0x5e, 0x1b, 0x64, 0xf3, 0x24,
0x64, 0x40, 0x55, 0xf0, 0x84, 0xcd, 0xb1, 0x22, 0xc9, 0x7a, 0x75, 0x2c, 0x0d, 0xaf, 0x1e, 0xe8,
0x1c, 0xea, 0x09, 0x49, 0x05, 0x13, 0x8c, 0xc7, 0x78, 0xcf, 0x04, 0x96, 0xea, 0xf6, 0xcb, 0xc2,
0xd8, 0x98, 0xde, 0x46, 0xa2, 0x11, 0xd4, 0x78, 0x10, 0x64, 0x54, 0x60, 0xd5, 0x04, 0x96, 0xe2,
0xc2, 0xb2, 0x30, 0x1a, 0xc7, 0x6b, 0x66, 0x05, 0x14, 0x2c, 0xa2, 0x99, 0x20, 0x51, 0x82, 0x35,
0x19, 0x93, 0xc0, 0xd6, 0xf4, 0x36, 0x12, 0xcd, 0xb6, 0x0a, 0xe9, 0xca, 0x42, 0xce, 0xfe, 0x14,
0xd2, 0x7e, 0xa2, 0x7f, 0xe8, 0xc4, 0x3d, 0xfe, 0x58, 0x0f, 0xc1, 0xe7, 0x7a, 0x08, 0xbe, 0xd6,
0x43, 0x70, 0xdf, 0x6d, 0xde, 0xc1, 0xd7, 0xe4, 0x8f, 0x74, 0xf5, 0x13, 0x00, 0x00, 0xff, 0xff,
0xa5, 0x22, 0x22, 0x61, 0xad, 0x02, 0x00, 0x00,
}

View File

@@ -0,0 +1,19 @@
syntax = "proto3";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
package infra.databus;
option go_package = "databus";
message Header {
map<string, string> metadata = 1 [(gogoproto.jsontag) = "metadata"];
}
message MessagePB {
string key = 1 [(gogoproto.jsontag) = "key"];
bytes value = 2 [(gogoproto.jsontag) = "value", (gogoproto.casttype) = "encoding/json.RawMessage"];
string topic = 3 [(gogoproto.jsontag) = "topic"];
int32 partition = 4 [(gogoproto.jsontag) = "partition"];
int64 offset = 5 [(gogoproto.jsontag) = "offset"];
int64 timestamp = 6 [(gogoproto.jsontag) = "timestamp"];
map<string, string> metadata = 7 [(gogoproto.jsontag) = "metadata"];
}

View File

@@ -0,0 +1,152 @@
package databus_test
import (
"context"
"testing"
"time"
"go-common/library/naming/discovery"
"go-common/library/queue/databus"
xtime "go-common/library/time"
)
var (
pCfg = &databus.Config{
// Key: "0PvKGhAqDvsK7zitmS8t",
// Secret: "0PvKGhAqDvsK7zitmS8u",
// Group: "databus_test_group",
// Topic: "databus_test_topic",
Key: "dbe67e6a4c36f877",
Secret: "8c775ea242caa367ba5c876c04576571",
Group: "Test1-MainCommonArch-P",
Topic: "test1",
Action: "pub",
Name: "databus",
Proto: "tcp",
// Addr: "172.16.33.158:6205",
Addr: "172.18.33.50:6205",
Active: 10,
Idle: 5,
DialTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(time.Second),
ReadTimeout: xtime.Duration(time.Second),
IdleTimeout: xtime.Duration(time.Minute),
}
sCfg = &databus.Config{
// Key: "0PvKGhAqDvsK7zitmS8t",
// Secret: "0PvKGhAqDvsK7zitmS8u",
// Group: "databus_test_group",
// Topic: "databus_test_topic",
Key: "dbe67e6a4c36f877",
Secret: "8c775ea242caa367ba5c876c04576571",
Group: "Test1-MainCommonArch-S",
Topic: "test1",
Action: "sub",
Name: "databus",
Proto: "tcp",
// Addr: "172.16.33.158:6205",
Addr: "172.18.33.50:6205",
Active: 10,
Idle: 5,
DialTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(time.Second),
ReadTimeout: xtime.Duration(time.Second * 35),
IdleTimeout: xtime.Duration(time.Minute),
}
dCfg = &discovery.Config{
Nodes: []string{"172.18.33.50:7171"},
Key: "0c4b8fe3ff35a4b6",
Secret: "b370880d1aca7d3a289b9b9a7f4d6812",
Zone: "sh001",
Env: "uat",
}
)
type TestMsg struct {
Now int64 `json:"now"`
}
func testSub(t *testing.T, d *databus.Databus) {
for {
m, ok := <-d.Messages()
if !ok {
return
}
t.Logf("sub message: %s", string(m.Value))
if err := m.Commit(); err != nil {
t.Errorf("sub commit error(%v)\n", err)
}
}
}
func testPub(t *testing.T, d *databus.Databus) {
// pub
m := &TestMsg{Now: time.Now().UnixNano()}
if err := d.Send(context.TODO(), "test", m); err != nil {
t.Errorf("d.Send(test) error(%v)", err)
} else {
t.Logf("pub message %v", m)
}
}
func TestDatabus(t *testing.T) {
d := databus.New(pCfg)
// pub
testPub(t, d)
testPub(t, d)
testPub(t, d)
d.Close()
// sub
d = databus.New(sCfg)
go testSub(t, d)
time.Sleep(time.Second * 15)
d.Close()
}
func TestDiscoveryDatabus(t *testing.T) {
d := databus.New(pCfg)
// pub
testPub(t, d)
testPub(t, d)
testPub(t, d)
d.Close()
// sub
d = databus.New(sCfg)
go testSub(t, d)
time.Sleep(time.Second * 15)
d.Close()
}
func BenchmarkPub(b *testing.B) {
d := databus.New(pCfg)
defer d.Close()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m := &TestMsg{Now: time.Now().UnixNano()}
if err := d.Send(context.TODO(), "test", m); err != nil {
b.Errorf("d.Send(test) error(%v)", err)
continue
}
}
})
}
func BenchmarkDiscoveryPub(b *testing.B) {
d := databus.New(pCfg)
defer d.Close()
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
m := &TestMsg{Now: time.Now().UnixNano()}
if err := d.Send(context.TODO(), "test", m); err != nil {
b.Errorf("d.Send(test) error(%v)", err)
continue
}
}
})
}

View File

@@ -0,0 +1,48 @@
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"group.go",
],
importpath = "go-common/library/queue/databus/databusutil",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/queue/databus:go_default_library",
"//library/time:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["group_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//library/log:go_default_library",
"//library/queue/databus:go_default_library",
"//library/sync/errgroup:go_default_library",
"//library/time:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,100 @@
/*
Package databusutil provides a util for building databus based async job with
single partition message aggregation and parallel consumption features.
Group
The group is the primary struct for working with this util.
Applications create groups by calling the package NewGroup function with a
databusutil config and a databus message chan.
To start a initiated group, the application must call the group Start method.
The application must call the group Close method when the application is
done with the group.
Callbacks
After a new group is created, the following callbacks: New, Split and Do must
be assigned, otherwise the job will not works as your expectation.
The callback New represents how the consume proc of the group parsing the target
object from a new databus message that it received for merging, if the error
returned is not nil, the consume proc will omit this message and continue.
A example of the callback New is:
func newTestMsg(msg *databus.Message) (res interface{}, err error) {
res = new(testMsg)
if err = json.Unmarshal(msg.Value, &res); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
}
return
}
The callback Split represents how the consume proc of the group getting the
sharding dimension from a databus message or the object parsed from the databus
message, it will be used along with the configuration item Num to decide which
merge goroutine to use to merge the parsed object. In more detail, if we take
the result of callback Split as sr, then the sharding result will be sr % Num.
A example of the callback Split is:
func split(msg *databus.Message, data interface{}) int {
t, ok := data.(*testMsg)
if !ok {
return 0
}
return int(t.Mid)
}
If your messages is already assigned to their partitions corresponding to the split you want,
you may want to directly use its partition as split, here is the example:
func anotherSplit(msg *databus.Message, data interface{}) int {
return int(msg.Partition)
}
Do not forget to ensure the max value your callback Split returns, as maxSplit,
greater than or equal to the configuration item Num, otherwise the merge
goroutines will not be fully used, in more detail, the last (Num - maxSplit)
merge goroutines are initiated by will never be used.
The callback Do represents how the merge proc of the group processing the merged
objects, define your business in it.
A example of the callback Do is:
func do(msgs []interface{}) {
for _, m := range msgs {
// process messages you merged here, the example type asserts and prints each
if msg, ok := m.(*testMsg); ok {
fmt.Printf("msg: %+v", msg)
}
}
}
Usage Example
The typical usage for databusutil is:
// new a databus to subscribe from
dsSub := databus.New(dsSubConf)
defer dsSub.Close()
// new a group
g := NewGroup(
c,
dsSub.Messages(),
)
// fill callbacks
g.New = yourNewFunc
g.Split = yourSplitFunc
g.Do = yourDoFunc
// start the group
g.Start()
// must close the group before the job exits
defer g.Close()
// signal handler
*/
package databusutil

View File

@@ -0,0 +1,223 @@
package databusutil
import (
"runtime"
"sync"
"time"
"go-common/library/queue/databus"
xtime "go-common/library/time"
)
const (
_stateStarted = 1
_stateClosed = 2
)
// Config the config is the base configuration for initiating a new group.
type Config struct {
// Size merge size
Size int
// Num merge goroutine num
Num int
// Ticker duration of submit merges when no new message
Ticker xtime.Duration
// Chan size of merge chan and done chan
Chan int
}
func (c *Config) fix() {
if c.Size <= 0 {
c.Size = 1024
}
if int64(c.Ticker) <= 0 {
c.Ticker = xtime.Duration(time.Second * 5)
}
if c.Num <= 0 {
c.Num = runtime.GOMAXPROCS(0)
}
if c.Chan <= 0 {
c.Chan = 1024
}
}
type message struct {
next *message
data *databus.Message
object interface{}
done bool
}
// Group group.
type Group struct {
c *Config
head, last *message
state int
mu sync.Mutex
mc []chan *message // merge chan
dc chan []*message // done chan
qc chan struct{} // quit chan
msg <-chan *databus.Message
New func(msg *databus.Message) (interface{}, error)
Split func(msg *databus.Message, data interface{}) int
Do func(msgs []interface{})
pool *sync.Pool
}
// NewGroup new a group.
func NewGroup(c *Config, m <-chan *databus.Message) *Group {
// NOTE if c || m == nil runtime panic
if c == nil {
c = new(Config)
}
c.fix()
g := &Group{
c: c,
msg: m,
mc: make([]chan *message, c.Num),
dc: make(chan []*message, c.Chan),
qc: make(chan struct{}),
pool: &sync.Pool{
New: func() interface{} {
return new(message)
},
},
}
for i := 0; i < c.Num; i++ {
g.mc[i] = make(chan *message, c.Chan)
}
return g
}
// Start start group, it is safe for concurrent use by multiple goroutines.
func (g *Group) Start() {
g.mu.Lock()
if g.state == _stateStarted {
g.mu.Unlock()
return
}
g.state = _stateStarted
g.mu.Unlock()
go g.consumeproc()
for i := 0; i < g.c.Num; i++ {
go g.mergeproc(g.mc[i])
}
go g.commitproc()
}
// Close close group, it is safe for concurrent use by multiple goroutines.
func (g *Group) Close() (err error) {
g.mu.Lock()
if g.state == _stateClosed {
g.mu.Unlock()
return
}
g.state = _stateClosed
g.mu.Unlock()
close(g.qc)
return
}
func (g *Group) message() *message {
return g.pool.Get().(*message)
}
func (g *Group) freeMessage(m *message) {
*m = message{}
g.pool.Put(m)
}
func (g *Group) consumeproc() {
var (
ok bool
err error
msg *databus.Message
)
for {
select {
case <-g.qc:
return
case msg, ok = <-g.msg:
if !ok {
g.Close()
return
}
}
// marked head to first commit
m := g.message()
m.data = msg
if m.object, err = g.New(msg); err != nil {
g.freeMessage(m)
continue
}
g.mu.Lock()
if g.head == nil {
g.head = m
g.last = m
} else {
g.last.next = m
g.last = m
}
g.mu.Unlock()
g.mc[g.Split(m.data, m.object)%g.c.Num] <- m
}
}
func (g *Group) mergeproc(mc <-chan *message) {
ticker := time.NewTicker(time.Duration(g.c.Ticker))
msgs := make([]interface{}, 0, g.c.Size)
marks := make([]*message, 0, g.c.Size)
for {
select {
case <-g.qc:
return
case msg := <-mc:
msgs = append(msgs, msg.object)
marks = append(marks, msg)
if len(msgs) < g.c.Size {
continue
}
case <-ticker.C:
}
if len(msgs) > 0 {
g.Do(msgs)
msgs = make([]interface{}, 0, g.c.Size)
}
if len(marks) > 0 {
g.dc <- marks
marks = make([]*message, 0, g.c.Size)
}
}
}
func (g *Group) commitproc() {
commits := make(map[int32]*databus.Message)
for {
select {
case <-g.qc:
return
case done := <-g.dc:
// merge partitions to commit offset
for _, d := range done {
d.done = true
}
g.mu.Lock()
for g.head != nil && g.head.done {
cur := g.head
commits[cur.data.Partition] = cur.data
g.head = cur.next
g.freeMessage(cur)
}
g.mu.Unlock()
for k, m := range commits {
m.Commit()
delete(commits, k)
}
}
}
}

View File

@@ -0,0 +1,392 @@
package databusutil
import (
"context"
"encoding/json"
"runtime"
"strconv"
"sync"
"testing"
"time"
"go-common/library/log"
"go-common/library/queue/databus"
"go-common/library/sync/errgroup"
xtime "go-common/library/time"
)
type testMsg struct {
Seq int64 `json:"seq"`
Mid int64 `json:"mid"`
Now int64 `json:"now"`
}
var (
_sendSeqsList = make([][]int64, _groupNum)
_recvSeqsList = make([][]int64, _groupNum)
_sMus = make([]sync.Mutex, _groupNum)
_rMus = make([]sync.Mutex, _groupNum)
_groupNum = 8
_tc = 20
_ts = time.Now().Unix()
_st = _ts - _ts%10 + 1000
_ed = _bSt + int64(_groupNum*_tc) - 1
_dsPubConf = &databus.Config{
Key: "0PvKGhAqDvsK7zitmS8t",
Secret: "0PvKGhAqDvsK7zitmS8u",
Group: "databus_test_group",
Topic: "databus_test_topic",
Action: "pub",
Name: "databus",
Proto: "tcp",
Addr: "172.16.33.158:6205",
Active: 1,
Idle: 1,
DialTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(time.Second),
ReadTimeout: xtime.Duration(time.Second),
IdleTimeout: xtime.Duration(time.Minute),
}
_dsSubConf = &databus.Config{
Key: "0PvKGhAqDvsK7zitmS8t",
Secret: "0PvKGhAqDvsK7zitmS8u",
Group: "databus_test_group",
Topic: "databus_test_topic",
Action: "sub",
Name: "databus",
Proto: "tcp",
Addr: "172.16.33.158:6205",
Active: 1,
Idle: 1,
DialTimeout: xtime.Duration(time.Second),
WriteTimeout: xtime.Duration(time.Second),
ReadTimeout: xtime.Duration(time.Second * 35),
IdleTimeout: xtime.Duration(time.Minute),
}
)
func TestGroup(t *testing.T) {
for i := 0; i < _groupNum; i++ {
_sendSeqsList[i] = make([]int64, 0)
_recvSeqsList[i] = make([]int64, 0)
}
taskCounts := taskCount(_groupNum, _st, _ed)
runtime.GOMAXPROCS(32)
log.Init(&log.Config{
Dir: "/data/log/queue",
})
c := &Config{
Size: 200,
Ticker: xtime.Duration(time.Second),
Num: _groupNum,
Chan: 1024,
}
dsSub := databus.New(_dsSubConf)
defer dsSub.Close()
group := NewGroup(
c,
dsSub.Messages(),
)
group.New = newTestMsg
group.Split = split
group.Do = do
eg, _ := errgroup.WithContext(context.Background())
// go produce test messages
eg.Go(func() error {
send(_st, _ed)
return nil
})
// go consume test messages
eg.Go(func() error {
group.Start()
defer group.Close()
m := make(map[int]struct{})
for len(m) < _groupNum {
for i := 0; i < _groupNum; i++ {
_, ok := m[i]
if ok {
continue
}
_rMus[i].Lock()
if len(_recvSeqsList[i]) == taskCounts[i] {
m[i] = struct{}{}
}
_rMus[i].Unlock()
log.Info("_recvSeqsList[%d] length: %d, expect: %d", i, len(_recvSeqsList[i]), taskCounts[i])
}
log.Info("m length: %d", len(m))
time.Sleep(time.Millisecond * 500)
}
// check seqs list, sendSeqsList and recvSeqsList will not change since now, so no need to lock
for num := 0; num < _groupNum; num++ {
sendSeqs := _sendSeqsList[num]
recvSeqs := _recvSeqsList[num]
if len(sendSeqs) != taskCounts[num] {
t.Errorf("sendSeqs length of proc %d is incorrect, expcted %d but got %d", num, taskCounts[num], len(sendSeqs))
t.FailNow()
}
if len(recvSeqs) != taskCounts[num] {
t.Errorf("recvSeqs length of proc %d is incorrect, expcted %d but got %d", num, taskCounts[num], len(recvSeqs))
t.FailNow()
}
for i := range recvSeqs {
if recvSeqs[i] != sendSeqs[i] {
t.Errorf("res is incorrect for proc %d, expcted recvSeqs[%d] equal to sendSeqs[%d] but not, recvSeqs[%d]: %d, sendSeqs[%d]: %d", num, i, i, i, recvSeqs[i], i, sendSeqs[i])
t.FailNow()
}
}
t.Logf("proc %d processed %d messages, expected %d messages, check ok", num, taskCounts[num], len(recvSeqs))
}
return nil
})
eg.Wait()
}
func do(msgs []interface{}) {
for _, m := range msgs {
if msg, ok := m.(*testMsg); ok {
shard := int(msg.Mid) % _groupNum
if msg.Seq < _st {
log.Info("proc %d processed old seq: %d, mid: %d", shard, msg.Seq, msg.Mid)
continue
}
_rMus[shard].Lock()
_recvSeqsList[shard] = append(_recvSeqsList[shard], msg.Seq)
_rMus[shard].Unlock()
log.Info("proc %d processed seq: %d, mid: %d", shard, msg.Seq, msg.Mid)
}
}
}
func send(st, ed int64) error {
dsPub := databus.New(_dsPubConf)
defer dsPub.Close()
ts := time.Now().Unix()
for i := st; i <= ed; i++ {
mid := int64(i)
seq := i
k := _dsPubConf.Topic + strconv.FormatInt(mid, 10)
n := &testMsg{
Seq: seq,
Mid: mid,
Now: ts,
}
dsPub.Send(context.TODO(), k, n)
// NOTE: sleep here to avoid network latency caused message out of sequence
time.Sleep(time.Millisecond * 500)
shard := int(mid) % _groupNum
_sMus[shard].Lock()
_sendSeqsList[shard] = append(_sendSeqsList[shard], seq)
_sMus[shard].Unlock()
}
return nil
}
func newTestMsg(msg *databus.Message) (res interface{}, err error) {
res = new(testMsg)
if err = json.Unmarshal(msg.Value, &res); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
}
return
}
func split(msg *databus.Message, data interface{}) int {
t, ok := data.(*testMsg)
if !ok {
return 0
}
return int(t.Mid)
}
func taskCount(num int, st, ed int64) []int {
res := make([]int, num)
for i := st; i <= ed; i++ {
res[int(i)%num]++
}
return res
}
func TestTaskCount(t *testing.T) {
groupNum := 10
c := 100
ts := time.Now().Unix()
st := ts - ts%10 + 1000
ed := st + int64(groupNum*c) - 1
res := taskCount(groupNum, st, ed)
for i, v := range res {
if v != c {
t.Errorf("res is incorrect, expected task count 10 for proc %d but got %d", i, v)
t.FailNow()
}
t.Logf("i: %d, v: %d", i, v)
}
}
var (
_bGroupNum = 3
_bSendSeqsList = make([][]int64, _bGroupNum)
_bRecvSeqsList = make([][]int64, _bGroupNum)
_bSMus = make([]sync.Mutex, _bGroupNum)
_bRMus = make([]sync.Mutex, _bGroupNum)
_bTc = 20
_bTs = time.Now().Unix()
_bSt = _bTs - _bTs%10 + 1000
_bEd = _bSt + int64(_bGroupNum*_bTc) - 1
_bTaskCounts = taskCount(_bGroupNum, _bSt, _bEd)
_blockDo = true
_blockDoMu sync.Mutex
_blocked = false
)
func TestGroup_Blocking(t *testing.T) {
for i := 0; i < _bGroupNum; i++ {
_bSendSeqsList[i] = make([]int64, 0)
_bRecvSeqsList[i] = make([]int64, 0)
}
runtime.GOMAXPROCS(32)
log.Init(&log.Config{
Dir: "/data/log/queue",
})
c := &Config{
Size: 20,
Ticker: xtime.Duration(time.Second),
Num: _bGroupNum,
Chan: 5,
}
dsSub := databus.New(_dsSubConf)
defer dsSub.Close()
g := NewGroup(
c,
dsSub.Messages(),
)
g.New = newTestMsg
g.Split = split
g.Do = func(msgs []interface{}) {
blockingDo(t, g, msgs)
}
eg, _ := errgroup.WithContext(context.Background())
// go produce test messages
eg.Go(func() error {
dsPub := databus.New(_dsPubConf)
defer dsPub.Close()
ts := time.Now().Unix()
for i := _bSt; i <= _bEd; i++ {
mid := int64(i)
seq := i
k := _dsPubConf.Topic + strconv.FormatInt(mid, 10)
n := &testMsg{
Seq: seq,
Mid: mid,
Now: ts,
}
dsPub.Send(context.TODO(), k, n)
// NOTE: sleep here to avoid network latency caused message out of sequence
time.Sleep(time.Millisecond * 500)
shard := int(mid) % _bGroupNum
_bSMus[shard].Lock()
_bSendSeqsList[shard] = append(_bSendSeqsList[shard], seq)
_bSMus[shard].Unlock()
}
return nil
})
// go consume test messages
eg.Go(func() error {
g.Start()
defer g.Close()
m := make(map[int]struct{})
// wait until all proc process theirs messages done
for len(m) < _bGroupNum {
for i := 0; i < _bGroupNum; i++ {
_, ok := m[i]
if ok {
continue
}
_bRMus[i].Lock()
if len(_bRecvSeqsList[i]) == _bTaskCounts[i] {
m[i] = struct{}{}
}
_bRMus[i].Unlock()
log.Info("_bRecvSeqsList[%d] length: %d, expect: %d, blockDo: %t", i, len(_bRecvSeqsList[i]), _bTaskCounts[i], _blockDo)
}
log.Info("m length: %d", len(m))
time.Sleep(time.Millisecond * 500)
}
return nil
})
eg.Wait()
}
func blockingDo(t *testing.T, g *Group, msgs []interface{}) {
_blockDoMu.Lock()
if !_blockDo {
_blockDoMu.Unlock()
processMsg(msgs)
return
}
// blocking to see if consume proc blocks finally
lastGLen := 0
cnt := 0
for i := 0; i < 60; i++ {
// print seqs status, not lock because final stable
for i, v := range _bRecvSeqsList {
log.Info("_bRecvSeqsList[%d] length: %d, expect: %d", i, len(v), _bTaskCounts[i])
}
gLen := 0
for h := g.head; h != nil; h = h.next {
gLen++
}
if gLen == lastGLen {
cnt++
} else {
cnt = 0
}
lastGLen = gLen
log.Info("blocking test: gLen: %d, cnt: %d, _bSt: %d, _bEd: %d", gLen, cnt, _bSt, _bEd)
if cnt == 5 {
_blocked = true
log.Info("blocking test: consumeproc now is blocked, now trying to unblocking do callback")
break
}
time.Sleep(time.Millisecond * 500)
}
// assert blocked
if !_blocked {
t.Errorf("res is incorrect, _blocked should be true but got false")
t.FailNow()
}
// unblocking and check if consume proc unblocking too
_blockDo = false
_blockDoMu.Unlock()
processMsg(msgs)
}
func processMsg(msgs []interface{}) {
for _, m := range msgs {
if msg, ok := m.(*testMsg); ok {
shard := int(msg.Mid) % _bGroupNum
if msg.Seq < _bSt {
log.Info("proc %d processed old seq: %d, mid: %d", shard, msg.Seq, msg.Mid)
continue
}
_bRMus[shard].Lock()
_bRecvSeqsList[shard] = append(_bRecvSeqsList[shard], msg.Seq)
log.Info("appended: %d", msg.Seq)
_bRMus[shard].Unlock()
log.Info("proc %d processed seq: %d, mid: %d", shard, msg.Seq, msg.Mid)
}
}
}

View File

@@ -0,0 +1,29 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["metadata.go"],
importpath = "go-common/library/queue/databus/metadata",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = ["//library/net/metadata:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,28 @@
package metadata
import (
"context"
"go-common/library/net/metadata"
)
// FromContext get metadata from context.
func FromContext(c context.Context) map[string]string {
return map[string]string{
metadata.Color: metadata.String(c, metadata.Color),
metadata.Caller: metadata.String(c, metadata.Caller),
metadata.Mirror: metadata.String(c, metadata.Mirror),
metadata.RemoteIP: metadata.String(c, metadata.RemoteIP),
}
}
// NewContext new metadata context.
func NewContext(c context.Context, meta map[string]string) context.Context {
md := metadata.MD{
metadata.Color: meta[metadata.Color],
metadata.Caller: meta[metadata.Caller],
metadata.Mirror: meta[metadata.Mirror],
metadata.RemoteIP: meta[metadata.RemoteIP],
}
return metadata.NewContext(c, md)
}

View File

@@ -0,0 +1,50 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"agent.go",
"conf.go",
],
importpath = "go-common/library/queue/databus/report",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/conf/env:go_default_library",
"//library/queue/databus:go_default_library",
"//library/time:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["agent_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//library/queue/databus:go_default_library",
"//library/time:go_default_library",
],
)

View File

@@ -0,0 +1,202 @@
package report
import (
"context"
"encoding/json"
"strconv"
"time"
"go-common/library/conf/env"
"go-common/library/queue/databus"
"github.com/pkg/errors"
)
type conf struct {
Secret string
Addr string
}
var (
mn *databus.Databus
user *databus.Databus
// ErrInit report init error
ErrInit = errors.New("report initialization failed")
)
const (
_timeFormat = "2006-01-02 15:04:05"
_uname = "uname"
_uid = "uid"
_business = "business"
_type = "type"
_oid = "oid"
_action = "action"
_ctime = "ctime"
_platform = "platform"
_build = "build"
_buvid = "buvid"
_ip = "ip"
_mid = "mid"
_indexInt = "int_"
_indexStr = "str_"
_extra = "extra_data"
)
// ManagerInfo manager report info.
type ManagerInfo struct {
// common
Uname string
UID int64
Business int
Type int
Oid int64
Action string
Ctime time.Time
// extra
Index []interface{}
Content map[string]interface{}
}
// UserInfo user report info
type UserInfo struct {
Mid int64
Platform string
Build int64
Buvid string
Business int
Type int
Oid int64
Action string
Ctime time.Time
IP string
// extra
Index []interface{}
Content map[string]interface{}
}
// UserActionLog 用户行为日志
type UserActionLog struct {
Uname string `json:"uname"`
UID int64 `json:"uid"`
Business int `json:"business"`
Type int `json:"type"`
Oid int64 `json:"oid"`
Action string `json:"action"`
Platform string `json:"platform"`
Build int64 `json:"build"`
Buvid string `json:"buvid"`
IP string `json:"ip"`
Mid int64 `json:"mid"`
Int0 int64 `json:"int_0"`
Int1 int64 `json:"int_1"`
Int2 int64 `json:"int_2"`
Str0 string `json:"str_0"`
Str1 string `json:"str_1"`
Str2 string `json:"str_2"`
Ctime string `json:"ctime"`
Extra string `json:"extra_data"`
}
// AuditLog 审核日志
type AuditLog struct {
Uname string `json:"uname"`
UID int64 `json:"uid"`
Business int `json:"business"`
Type int `json:"type"`
Oid int64 `json:"oid"`
Action string `json:"action"`
Int0 int64 `json:"int_0"`
Int1 int64 `json:"int_1"`
Int2 int64 `json:"int_2"`
Str0 string `json:"str_0"`
Str1 string `json:"str_1"`
Str2 string `json:"str_2"`
Ctime string `json:"ctime"`
Extra string `json:"extra_data"`
}
// InitManager init manager report log agent.
func InitManager(c *databus.Config) {
if c == nil {
c = _managerConfig
if d, ok := _defaultManagerConfig[env.DeployEnv]; ok {
c.Secret = d.Secret
c.Addr = d.Addr
}
}
mn = databus.New(c)
}
// InitUser init user report log agent.
func InitUser(c *databus.Config) {
if c == nil {
c = _userConfig
if d, ok := _defaultUserConfig[env.DeployEnv]; ok {
c.Secret = d.Secret
c.Addr = d.Addr
}
}
user = databus.New(c)
}
// Manager log a message for manager, xx-admin.
func Manager(m *ManagerInfo) error {
if mn == nil || m == nil {
return ErrInit
}
v := map[string]interface{}{}
if len(m.Content) > 0 {
extraData, _ := json.Marshal(m.Content)
v[_extra] = string(extraData)
}
v[_business] = m.Business
v[_type] = m.Type
v[_uid] = m.UID
v[_oid] = m.Oid
v[_uname] = m.Uname
v[_action] = m.Action
v[_ctime] = m.Ctime.Format(_timeFormat)
return report(mn, v, m.Index...)
}
// User log a message for user, xx-interface.
func User(u *UserInfo) error {
if user == nil || u == nil {
return ErrInit
}
v := map[string]interface{}{}
if len(u.Content) > 0 {
extraData, _ := json.Marshal(u.Content)
v[_extra] = string(extraData)
}
v[_business] = u.Business
v[_type] = u.Type
v[_mid] = u.Mid
v[_oid] = u.Oid
v[_build] = u.Build
v[_action] = u.Action
v[_platform] = u.Platform
v[_buvid] = u.Buvid
v[_ip] = u.IP
v[_ctime] = u.Ctime.Format(_timeFormat)
return report(user, v, u.Index...)
}
func report(h *databus.Databus, v map[string]interface{}, extras ...interface{}) error {
var i, j int
for _, extra := range extras {
switch ex := extra.(type) {
case string:
v[_indexStr+strconv.Itoa(i)] = ex
i++
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
v[_indexInt+strconv.Itoa(j)] = ex
j++
}
}
return h.Send(context.Background(), v[_ctime].(string), v)
}

View File

@@ -0,0 +1,141 @@
package report
import (
"sync"
"testing"
"time"
"go-common/library/queue/databus"
xtime "go-common/library/time"
)
var (
mnOnce sync.Once
mnUatOnce sync.Once
userOnce sync.Once
userUatOnce sync.Once
)
func newManager() {
InitManager(nil)
}
func newUatManager() {
InitManager(&databus.Config{
Key: "2511663d546f1413",
Secret: "cde3b480836cc76df3d635470f991caa",
Group: "LogAudit-MainSearch-P",
Topic: "LogAudit-T",
Action: "pub",
Buffer: 10240,
Name: "log-audit/log-sub",
Proto: "tcp",
Addr: "172.18.33.50:6205",
Active: 100,
Idle: 100,
DialTimeout: xtime.Duration(time.Millisecond * 200),
ReadTimeout: xtime.Duration(time.Millisecond * 200),
WriteTimeout: xtime.Duration(time.Millisecond * 200),
IdleTimeout: xtime.Duration(time.Second * 80),
})
}
func newUser() {
InitUser(nil)
}
func newUatUser() {
InitManager(&databus.Config{
Key: "2511663d546f1413",
Secret: "cde3b480836cc76df3d635470f991caa",
Group: "LogUserAction-MainSearch-P",
Topic: "LogUserAction-T",
Action: "pub",
Buffer: 10240,
Name: "log-user-action/log-sub",
Proto: "tcp",
Addr: "172.18.33.50:6205",
Active: 100,
Idle: 100,
DialTimeout: xtime.Duration(time.Millisecond * 200),
ReadTimeout: xtime.Duration(time.Millisecond * 200),
WriteTimeout: xtime.Duration(time.Millisecond * 200),
IdleTimeout: xtime.Duration(time.Second * 80),
})
}
func Test_Manager(b *testing.T) {
mnOnce.Do(newManager)
Manager(&ManagerInfo{
Uname: "dz",
UID: 64,
Business: 0,
Type: 1,
Oid: 2,
Action: "action",
Ctime: time.Now(),
Index: []interface{}{5, 6, 7, "a", "b", "c"},
Content: map[string]interface{}{
"json": "json",
},
})
}
func Test_UatManager(b *testing.T) {
mnUatOnce.Do(newUatManager)
Manager(&ManagerInfo{
Uname: "dz",
UID: 64,
Business: 0,
Type: 1,
Oid: 2,
Action: "action",
Ctime: time.Now(),
Index: []interface{}{5, 6, 7, "a", "b", "c"},
Content: map[string]interface{}{
"json": "json",
},
})
}
func Test_User(b *testing.T) {
userOnce.Do(newUser)
User(&UserInfo{
Mid: 1,
Platform: "platform",
Build: 2,
Buvid: "buvid",
Business: 0,
Type: 3,
Oid: 4,
Action: "action",
Ctime: time.Now(),
IP: "127.0.0.1",
// extra
Index: []interface{}{5, 6, 7, "a", "b", "c"},
Content: map[string]interface{}{
"json": "json",
},
})
}
func Test_UatUser(b *testing.T) {
userUatOnce.Do(newUatUser)
User(&UserInfo{
Mid: 1,
Platform: "platform",
Build: 2,
Buvid: "buvid",
Business: 0,
Type: 3,
Oid: 4,
Action: "action",
Ctime: time.Now(),
IP: "127.0.0.1",
// extra
Index: []interface{}{5, 6, 7, "a", "b", "c"},
Content: map[string]interface{}{
"json": "json",
},
})
}

View File

@@ -0,0 +1,85 @@
package report
import (
"time"
"go-common/library/conf/env"
"go-common/library/queue/databus"
xtime "go-common/library/time"
)
var (
_defaultManagerConfig = map[string]*conf{
env.DeployEnvFat1: {
Secret: "971d048a2818e37ae124a0293c300e89",
Addr: "172.16.33.158:6205",
},
env.DeployEnvUat: {
Secret: "cde3b480836cc76df3d635470f991caa",
Addr: "172.18.33.50:6205",
},
env.DeployEnvPre: {
Secret: "4a933f8170a4711ace8ec363f7e5f23c",
Addr: "172.18.21.90:6205",
},
env.DeployEnvProd: {
Secret: "4a933f8170a4711ace8ec363f7e5f23c",
Addr: "172.18.21.90:6205",
},
}
_defaultUserConfig = map[string]*conf{
env.DeployEnvFat1: {
Secret: "971d048a2818e37ae124a0293c300e89",
Addr: "172.16.33.158:6205",
},
env.DeployEnvUat: {
Secret: "cde3b480836cc76df3d635470f991caa",
Addr: "172.18.33.50:6205",
},
env.DeployEnvPre: {
Secret: "4a933f8170a4711ace8ec363f7e5f23c",
Addr: "172.18.21.90:6205",
},
env.DeployEnvProd: {
Secret: "4a933f8170a4711ace8ec363f7e5f23c",
Addr: "172.18.21.90:6205",
},
}
_managerConfig = &databus.Config{
Key: "2511663d546f1413",
Secret: "971d048a2818e37ae124a0293c300e89",
Group: "LogAudit-MainSearch-P",
Topic: "LogAudit-T",
Action: "pub",
Buffer: 10240,
Name: "log-audit/log-sub",
Proto: "tcp",
Addr: "172.16.33.158:6205",
Active: 100,
Idle: 10,
DialTimeout: xtime.Duration(time.Millisecond * 200),
ReadTimeout: xtime.Duration(time.Millisecond * 200),
WriteTimeout: xtime.Duration(time.Millisecond * 200),
IdleTimeout: xtime.Duration(time.Second * 80),
}
_userConfig = &databus.Config{
Key: "2511663d546f1413",
Secret: "971d048a2818e37ae124a0293c300e89",
Group: "LogUserAction-MainSearch-P",
Topic: "LogUserAction-T",
Action: "pub",
Buffer: 10240,
Name: "log-user-action/log-sub",
Proto: "tcp",
Addr: "172.16.33.158:6205",
Active: 100,
Idle: 10,
DialTimeout: xtime.Duration(time.Millisecond * 200),
ReadTimeout: xtime.Duration(time.Millisecond * 200),
WriteTimeout: xtime.Duration(time.Millisecond * 200),
IdleTimeout: xtime.Duration(time.Second * 80),
}
)