Initial commit

This commit is contained in:
Donny
2019-04-22 20:46:32 +08:00
commit 49ab8aadd1
25441 changed files with 4055000 additions and 0 deletions

87
library/log/BUILD Normal file
View File

@@ -0,0 +1,87 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"agent_test.go",
"dsn_test.go",
"encode_test.go",
"log_test.go",
"pattern_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//library/log/internal:go_default_library",
"//library/net/metadata:go_default_library",
"//library/time:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"agent.go",
"doc.go",
"dsn.go",
"file.go",
"handler.go",
"level.go",
"log.go",
"logrus.go",
"pattern.go",
"stdout.go",
"util.go",
"verbose.go",
],
importpath = "go-common/library/log",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/conf/dsn:go_default_library",
"//library/conf/env:go_default_library",
"//library/log/internal:go_default_library",
"//library/log/internal/filewriter:go_default_library",
"//library/net/metadata:go_default_library",
"//library/net/trace:go_default_library",
"//library/stat/prom:go_default_library",
"//library/time:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/github.com/sirupsen/logrus:go_default_library",
],
)
go_test(
name = "go_default_xtest",
srcs = ["example_test.go"],
tags = ["automanaged"],
deps = ["//library/log:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//library/log/anticheat:all-srcs",
"//library/log/benchmark:all-srcs",
"//library/log/infoc:all-srcs",
"//library/log/internal:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

127
library/log/CHANGELOG.md Normal file
View File

@@ -0,0 +1,127 @@
### go-common/log
### v1.16.1
> 1.修复文件 handler 不能删除已有老文件问题
### v1.16
> 1. 增加是否批量写日志的判断
### v1.15
> 1.log 如果已定义了 source 字段就不再重新获取
> 2.合并了部分代码
> 3.添加了 log.no-agent flag 可以强制关闭 log-agent
### v1.15
> 1.修复向log agent批量写日志的bug
### v1.14
> 1. 实现文件日志,移除 log4go 依赖
### v1.13.1
> 1.修复infov参数丢失...问题
### v1.13.2
> 1.优化log agent的性能
### v1.13.1
> 1.infoc support mirror request
### v1.13.0
> 1.support mirror request
### v1.12.2
> 1.fix logw bug and add test
### v1.12.1
> 1.add infoc write timeout
### v1.12.0
> 1.add log doc
### v1.11.0
> 1.use library/conf/dsn parse
### v1.10.1
> 1.修复pattern中获取当前行信息的错误之前的设置在go 1.9中会获取到错误的行在go 1.10中是正确的(ノへ ̄、)。
### v1.10.0
> 1.log error report to prometheus
### v1.9.0
> 1.log dsn
### v1.8.4
> 1.优化文件日志输出内容
### v1.8.4
> 1.library/log enhancement
### v1.8.4
> 1.infoc新增超过最大重试次数的日志
### v1.8.3
> 1.修改report包从log协议改成databus
### v1.8.2
> 1.fixed funcname
### v1.8.1
> 1.新增report包支持上报行为日志
### v1.8.0
> 1. 优化log.D pool
#### v1.7.1
> 1. agent log enhance
#### v1.7.0
> 1. update infoc sdk
#### v1.6.3
> 1. add zone info
#### v1.6.2
> 1. update verbose doc and stdout log
#### v1.6.1
> 1. 更改默认日志发送等待时间
#### v1.6.0
> 1. add stdout log handler
#### v1.5.3
> 1. close log nil check
#### v1.5.2
> 1. 优先使用Caster环境变量
#### v1.5.1
> 1. 支持Caster环境变量
#### v1.5.0
> 1. agent批量写日志
#### v1.4.0
> 1. 移除log.XXXContext()
#### v1.3.0
> 1. 添加verbose log
#### v1.2.3
> 1. 修复agent退出连接未关闭
> 2. 修复conn重连导致的饥饿无法退出
#### v1.2.2
> 1. 完善net/http, net/rpc日志
#### v1.2.1
> 1. 修复log handler未初始化panic
#### v1.2.0
> 1. 结构化日志
#### v1.1.0
> 1. 剔除elk, synclog
#### v1.0.0
> 1. 初始化项目,更新依赖

View File

@@ -0,0 +1,9 @@
# Owner
maojian
# Author
zhaogangtao
# Reviewer
zhoujiahui
zhapuyu

12
library/log/OWNERS Normal file
View File

@@ -0,0 +1,12 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- maojian
- zhaogangtao
labels:
- library
- library/log
reviewers:
- zhaogangtao
- zhapuyu
- zhoujiahui

11
library/log/README.md Normal file
View File

@@ -0,0 +1,11 @@
#### log
##### 项目简介
> 日志基础库
##### 编译环境
> 请只用golang v1.7.x以上版本编译执行。
##### 依赖包
> 1.公共包go-common

219
library/log/agent.go Normal file
View File

@@ -0,0 +1,219 @@
package log
import (
"context"
"fmt"
stdlog "log"
"net"
"strconv"
"sync"
"time"
"go-common/library/conf/env"
"go-common/library/log/internal"
"go-common/library/net/metadata"
"go-common/library/net/trace"
xtime "go-common/library/time"
)
const (
_agentTimeout = xtime.Duration(20 * time.Millisecond)
_mergeWait = 1 * time.Second
_maxBuffer = 10 * 1024 * 1024 // 10mb
_defaultChan = 2048
_defaultAgentConfig = "unixpacket:///var/run/lancer/collector_tcp.sock?timeout=100ms&chan=1024"
)
var (
_logSeparator = []byte("\u0001")
_defaultTaskIDs = map[string]string{
env.DeployEnvFat1: "000069",
env.DeployEnvUat: "000069",
env.DeployEnvPre: "000161",
env.DeployEnvProd: "000161",
}
)
// AgentHandler agent struct.
type AgentHandler struct {
c *AgentConfig
msgs chan []core.Field
waiter sync.WaitGroup
pool sync.Pool
enc core.Encoder
batchSend bool
filters map[string]struct{}
}
// AgentConfig agent config.
type AgentConfig struct {
TaskID string
Buffer int
Proto string `dsn:"network"`
Addr string `dsn:"address"`
Chan int `dsn:"query.chan"`
Timeout xtime.Duration `dsn:"query.timeout"`
}
// NewAgent a Agent.
func NewAgent(ac *AgentConfig) (a *AgentHandler) {
if ac == nil {
ac = parseDSN(_agentDSN)
}
if len(ac.TaskID) == 0 {
ac.TaskID = _defaultTaskIDs[env.DeployEnv]
}
a = &AgentHandler{
c: ac,
enc: core.NewJSONEncoder(core.EncoderConfig{
EncodeTime: core.EpochTimeEncoder,
EncodeDuration: core.SecondsDurationEncoder,
}, core.NewBuffer(0)),
}
a.pool.New = func() interface{} {
return make([]core.Field, 0, 16)
}
if ac.Chan == 0 {
ac.Chan = _defaultChan
}
a.msgs = make(chan []core.Field, ac.Chan)
if ac.Timeout == 0 {
ac.Timeout = _agentTimeout
}
if ac.Buffer == 0 {
ac.Buffer = 100
}
a.waiter.Add(1)
// set fixed k/v into enc buffer
KV(_appID, c.Family).AddTo(a.enc)
KV(_deplyEnv, env.DeployEnv).AddTo(a.enc)
KV(_instanceID, c.Host).AddTo(a.enc)
KV(_zone, env.Zone).AddTo(a.enc)
if a.c.Proto == "unixpacket" {
a.batchSend = true
}
go a.writeproc()
return
}
func (h *AgentHandler) data() []core.Field {
return h.pool.Get().([]core.Field)
}
func (h *AgentHandler) free(f []core.Field) {
f = f[0:0]
h.pool.Put(f)
}
// Log log to udp statsd daemon.
func (h *AgentHandler) Log(ctx context.Context, lv Level, args ...D) {
if args == nil {
return
}
f := h.data()
for i := range args {
f = append(f, args[i])
}
if t, ok := trace.FromContext(ctx); ok {
if s, ok := t.(fmt.Stringer); ok {
f = append(f, KV(_tid, s.String()))
} else {
f = append(f, KV(_tid, fmt.Sprintf("%s", t)))
}
}
if caller := metadata.String(ctx, metadata.Caller); caller != "" {
f = append(f, KV(_caller, caller))
}
if color := metadata.String(ctx, metadata.Color); color != "" {
f = append(f, KV(_color, color))
}
if cluster := metadata.String(ctx, metadata.Cluster); cluster != "" {
f = append(f, KV(_cluster, cluster))
}
if metadata.Bool(ctx, metadata.Mirror) {
f = append(f, KV(_mirror, true))
}
select {
case h.msgs <- f:
default:
}
}
// writeproc write data into connection.
func (h *AgentHandler) writeproc() {
var (
conn net.Conn
err error
count int
quit bool
)
buf := core.NewBuffer(2048)
defer h.waiter.Done()
taskID := []byte(h.c.TaskID)
tick := time.NewTicker(_mergeWait)
for {
select {
case d := <-h.msgs:
if d == nil {
quit = true
goto DUMP
}
if buf.Len() >= _maxBuffer {
buf.Reset() // avoid oom
}
now := time.Now()
buf.Write(taskID)
buf.Write([]byte(strconv.FormatInt(now.UnixNano()/1e6, 10)))
h.enc.Encode(buf, d...)
h.free(d)
if h.batchSend {
buf.Write(_logSeparator)
if count++; count < h.c.Buffer && buf.Len() < _maxBuffer {
continue
}
}
case <-tick.C:
}
if conn == nil || err != nil {
if conn, err = net.DialTimeout(h.c.Proto, h.c.Addr, time.Duration(h.c.Timeout)); err != nil {
stdlog.Printf("net.DialTimeout(%s:%s) error(%v)\n", h.c.Proto, h.c.Addr, err)
continue
}
}
DUMP:
if conn != nil && buf.Len() > 0 {
count = 0
if _, err = conn.Write(buf.Bytes()); err != nil {
stdlog.Printf("conn.Write(%d bytes) error(%v)\n", buf.Len(), err)
conn.Close()
} else {
// only succeed reset buffer, let conn reconnect.
buf.Reset()
}
}
if quit {
if conn != nil && err == nil {
conn.Close()
}
return
}
}
}
// Close close the connection.
func (h *AgentHandler) Close() (err error) {
h.msgs <- nil
h.waiter.Wait()
return nil
}
// SetFormat .
func (h *AgentHandler) SetFormat(string) {
// discard setformat
}

17
library/log/agent_test.go Normal file
View File

@@ -0,0 +1,17 @@
package log
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestBatchSendSwitch(t *testing.T) {
agent := NewAgent(nil)
assert.Equal(t, true, agent.batchSend)
agent = NewAgent(&AgentConfig{TaskID: "000161", Proto: "unixpacket", Addr: "/var/run/lancer/collector_tcp.sock"})
assert.Equal(t, true, agent.batchSend)
agent = NewAgent(&AgentConfig{TaskID: "000161", Proto: "unixgram", Addr: "/var/run/lancer/collector.sock"})
assert.Equal(t, false, agent.batchSend)
}

View File

@@ -0,0 +1,44 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["anticheat_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = ["//library/log/infoc:go_default_library"],
)
go_library(
name = "go_default_library",
srcs = ["anticheat.go"],
importpath = "go-common/library/log/anticheat",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/log:go_default_library",
"//library/log/infoc:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/net/metadata:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,178 @@
package anticheat
import (
"context"
"net/http"
"strconv"
"time"
"go-common/library/log"
"go-common/library/log/infoc"
bm "go-common/library/net/http/blademaster"
"go-common/library/net/metadata"
)
// AntiCheat send anti-cheating info to berserker.
type AntiCheat struct {
infoc *infoc.Infoc
}
// New new AntiCheat logger.
func New(c *infoc.Config) (a *AntiCheat) {
return &AntiCheat{infoc: infoc.New(c)}
}
// antiCheat 尽可能多的提供信息.
type antiCheat struct {
Buvid string
Build string
Client string // for example ClientWeb
IP string
UID string
Aid string
Mid string
Sid string
Refer string
URL string
From string
ItemID string
ItemType string // for example ItemTypeAv
Action string // for example ActionClick
ActionID string
UA string
TS string
Extra string
}
// anti-cheat const.
const (
ClientWeb = "web"
ClientIphone = "iphone"
ClientIpad = "ipad"
ClientAndroid = "android"
// AntiCheat ItemType
ItemTypeAv = "av"
ItemTypeBangumi = "bangumi"
ItemTypeLive = "live"
ItemTypeTopic = "topic"
ItemTypeRank = "rank"
ItemTypeActivity = "activity"
ItemTypeTag = "tag"
ItemTypeAD = "ad"
ItemTypeLV = "lv"
// AntiCheat Action
ActionClick = "click"
ActionPlay = "play"
ActionFav = "fav"
ActionCoin = "coin"
ActionDM = "dm"
ActionToView = "toview"
ActionShare = "share"
ActionSpace = "space"
Actionfollow = "follow"
ActionHeartbeat = "heartbeat"
ActionAnswer = "answer"
)
func (a *antiCheat) toSlice() (as []interface{}) {
as = make([]interface{}, 0, 18)
as = append(as, a.Buvid, a.Build, a.Client, a.IP, a.UID, a.Aid, a.Mid)
as = append(as, a.Sid, a.Refer, a.URL, a.From, a.ItemID, a.ItemType)
as = append(as, a.Action, a.ActionID, a.UA, a.TS, a.Extra)
return
}
// InfoAntiCheat2 for new http framework(bm).
func (a *AntiCheat) InfoAntiCheat2(ctx *bm.Context, uid, aid, mid, itemID, itemType, action, actionID string) error {
return a.infoAntiCheat(ctx, ctx.Request, metadata.String(ctx, metadata.RemoteIP), uid, aid, mid, itemID, itemType, action, actionID)
}
// infoAntiCheat common logic.
func (a *AntiCheat) infoAntiCheat(ctx context.Context, req *http.Request, IP, uid, aid, mid, itemID, itemType, action, actionID string) error {
params := req.Form
ac := &antiCheat{
UID: uid,
Aid: aid,
Mid: mid,
ItemID: itemID,
ItemType: itemType,
Action: action,
ActionID: actionID,
IP: IP,
URL: req.URL.Path,
Refer: req.Header.Get("Referer"),
UA: req.Header.Get("User-Agent"),
TS: strconv.FormatInt(time.Now().Unix(), 10),
}
ac.From = params.Get("from")
if csid, err := req.Cookie("sid"); err == nil {
ac.Sid = csid.Value
}
var cli string
switch {
case len(params.Get("access_key")) == 0:
cli = ClientWeb
if ck, err := req.Cookie("buvid3"); err == nil {
ac.Buvid = ck.Value
}
case params.Get("platform") == "ios":
cli = ClientIphone
if params.Get("device") == "pad" {
cli = ClientIpad
}
case params.Get("platform") == "android":
cli = ClientAndroid
default:
log.Warn("unkown plat(%s)", params.Get("platform"))
}
ac.Client = cli
if cli != ClientWeb {
ac.Buvid = req.Header.Get("buvid")
ac.Build = params.Get("build")
}
return a.infoc.Infov(ctx, ac.toSlice()...)
}
// ServiceAntiCheat common anti-cheat.
func (a *AntiCheat) ServiceAntiCheat(p map[string]string) error {
return a.infoc.Info(convertBase(p)...)
}
// ServiceAntiCheatBus for answer anti-cheat.
func (a *AntiCheat) ServiceAntiCheatBus(p map[string]string, bus []interface{}) error {
ac := append(convertBase(p), bus...)
return a.infoc.Info(ac...)
}
// ServiceAntiCheatv support mirror request
func (a *AntiCheat) ServiceAntiCheatv(ctx context.Context, p map[string]string) error {
return a.infoc.Infov(ctx, convertBase(p)...)
}
// ServiceAntiCheatBusv support mirror request
func (a *AntiCheat) ServiceAntiCheatBusv(ctx context.Context, p map[string]string, bus []interface{}) error {
ac := append(convertBase(p), bus...)
return a.infoc.Infov(ctx, ac...)
}
func convertBase(p map[string]string) (res []interface{}) {
ac := &antiCheat{
ItemType: p["itemType"],
Action: p["action"],
IP: p["ip"],
Mid: p["mid"],
UID: p["fid"],
Aid: p["aid"],
Sid: p["sid"],
UA: p["ua"],
Buvid: p["buvid"],
Refer: p["refer"],
URL: p["url"],
TS: strconv.FormatInt(time.Now().Unix(), 10),
}
res = ac.toSlice()
return
}

View File

@@ -0,0 +1,67 @@
package anticheat
import (
"sync"
"testing"
"go-common/library/log/infoc"
)
var (
once sync.Once
a *AntiCheat
)
func onceInit() {
a = New(&infoc.Config{
TaskID: "000146",
Addr: "172.16.0.204:514",
Proto: "tcp",
ChanSize: 1,
})
}
// go test -test.v -test.bench Benchmark_InfoAntiCheat
// func Benchmark_InfoAntiCheat(b *testing.B) {
// once.Do(onceInit)
// client := httpx.NewClient(&httpx.ClientConfig{
// App: &conf.App{
// Key: "appKey",
// Secret: "appSecret",
// },
// Timeout: 1,
// })
// params := url.Values{}
// params.Set("access_key", "infoc_access_key")
// params.Set("platform", "android")
// params.Set("build", "1111111")
// req, err := client.NewRequest("GET", "foo-api", "127.1.1.1", params)
// if err != nil {
// b.FailNow()
// }
// c := wctx.NewContext(ctx, req, nil, time.Millisecond*100)
// for j := 0; j < b.N; j++ {
// a.InfoAntiCheat(c, "infoc-test", "ip-address", "mid", "4", "5", "6", "7")
// }
// }
// go test -test.v -test.bench Benchmark_ServiceAntiCheat
func Benchmark_ServiceAntiCheat(b *testing.B) {
once.Do(onceInit)
ac := map[string]string{
"itemType": infoc.ItemTypeAv,
"action": infoc.ActionShare,
"ip": "remoteIP",
"mid": "mid",
"fid": "fid",
"aid": "aid",
"sid": "sid",
"ua": "ua",
"buvid": "buvid",
"refer": "refer",
"url": "infoc-test",
}
for j := 0; j < b.N; j++ {
a.ServiceAntiCheat(ac)
}
}

View File

@@ -0,0 +1,39 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "benchmark",
embed = [":go_default_library"],
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["main.go"],
importpath = "go-common/library/log/benchmark",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/log:go_default_library",
"//library/time:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,109 @@
package main
import (
"context"
"flag"
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"os"
"sync/atomic"
"time"
"go-common/library/log"
xtime "go-common/library/time"
)
var isClient bool
func init() {
os.Setenv("LOG_AGENT", "tcp://127.0.0.1:8080")
flag.BoolVar(&isClient, "c", false, "-c=true")
}
func main() {
flag.Parse()
if isClient {
go func() {
fmt.Println(http.ListenAndServe("localhost:6060", nil))
}()
log.Init(&log.Config{
Stdout: false,
Agent: &log.AgentConfig{
Proto: "tcp",
Addr: "127.0.0.1:8080",
Chan: 2048,
Buffer: 1,
Timeout: xtime.Duration(time.Second),
}})
for i := 0; i < 3; i++ {
go func() {
arg := `area:"reply"
message:"\345\233\236\345\244\215 @\347\231\276\350\215\211\345\221\263\346\235\245\344\274\212\344\273\275 :\345\223\210\357\274\214\344\275\240\345\260\261\345\217\252\347\234\213\345\210\260\344\272\206pick me up\346\262\241\347\234\213\345\210\260\346\222\236\357\274\237\347\262\211\344\270\235\350\247\243\351\207\212\344\272\206\346\227\240\346\225\260\351\201\215\344\273\245\345\211\215\351\202\243\344\270\252\345\245\263\345\233\242\344\270\200\344\270\252\346\234\210\345\260\261\346\262\241\345\255\246\345\225\245\350\210\236\357\274\214\347\277\273\346\235\245\350\246\206\345\216\273\350\267\263\357\274\214\345\217\202\345\212\240101\347\232\204\346\227\266\345\200\2313\345\244\251\344\270\200\344\270\252\350\210\236\350\277\230\345\234\250\350\277\231\350\257\264\343\200\202\346\234\215\344\272\206"
id:1075824609 oid:31813039 mid:76839481 `
for {
log.Infov(context.Background(),
log.KV("user", "test_user"),
log.KV("ip", "127.0.0.1:8080"),
log.KV("path", "127.0.0.1:8080/test_user"),
log.KV("ret", 0),
log.KV("args", arg),
log.KV("stack", "nil"),
log.KV("error", ""),
log.KV("ts", time.Second.Seconds()),
)
}
}()
}
time.Sleep(time.Hour)
} else {
startServer()
}
}
func startServer() {
go calcuQPS()
lis, err := net.Listen("tcp", ":8080")
if err != nil {
panic(err)
}
for {
conn, err := lis.Accept()
if err != nil {
return
}
go serve(conn)
}
}
var total int64
func calcuQPS() {
var lastTotal int64
var lastUpdated time.Time
for {
time.Sleep(time.Second * 3)
t := atomic.LoadInt64(&total)
n := time.Now()
change := t - lastTotal
gap := n.Sub(lastUpdated)
fmt.Println("bw:", change/(gap.Nanoseconds()/1e7))
lastTotal = t
lastUpdated = n
}
}
func serve(conn net.Conn) {
p := make([]byte, 4096)
for {
nRead, err := conn.Read(p)
if err != nil {
conn.Close()
break
}
atomic.AddInt64(&total, int64(nRead))
}
fmt.Println(string(p))
}

65
library/log/doc.go Normal file
View File

@@ -0,0 +1,65 @@
/*Package log 是kratos日志库.
一、主要功能:
1. 日志打印到elk
2. 日志打印到本地内部使用log4go
3. 日志打印到标准输出
4. verbose日志实现参考glog实现可通过设置不同verbose级别默认不开启
二、日志配置
1. 默认agent配置
目前日志已经实现默认配置可以根据env自动切换远程日志。可以直接使用以下方式
log.Init(nil)
2. 启动参数 or 环境变量
启动参数 环境变量 说明
log.stdout LOG_STDOUT 是否开启标准输出
log.agent LOG_AGENT 远端日志地址unixpacket:///var/run/lancer/collector_tcp.sock?timeout=100ms&chan=1024
log.dir LOG_DIR 文件日志路径
log.v LOG_V verbose日志级别
log.module LOG_MODULE 可单独配置每个文件的verbose级别file=1,file2=2
log.filter LOG_FILTER 配置需要过滤的字段field1,field2
3. 配置文件
但是如果有特殊需要可以走一下格式配置:
[log]
family = "xxx-service"
dir = "/data/log/xxx-service/"
stdout = true
vLevel = 3
filter = ["fileld1", "field2"]
[log.module]
"dao_user" = 2
"servic*" = 1
[log.agent]
taskID = "00000x"
proto = "unixpacket"
addr = "/var/run/lancer/collector_tcp.sock"
chanSize = 10240
三、配置说明
1.log
family 项目名,默认读环境变量$APPID
studout 标准输出prod环境不建议开启
filter 配置需要过滤掉的字段,以“***”替换
dir 文件日志地址prod环境不建议开启
v 开启verbose级别日志可指定全局级别
2. log.module
可单独配置每个文件的verbose级别
3. log.agent
远端日志配置项
taskID lancer分配的taskID
proto 网络协议常见tcp, udp, unixgram
addr 网络地址常见ip:prot, sock
chanSize 日志队列长度
*/
package log

68
library/log/dsn.go Normal file
View File

@@ -0,0 +1,68 @@
package log
import (
"bytes"
"fmt"
"strconv"
"strings"
"go-common/library/conf/dsn"
"github.com/pkg/errors"
)
type verboseModule map[string]int32
type logFilter []string
func (f *logFilter) String() string {
return fmt.Sprint(*f)
}
// Set sets the value of the named command-line flag.
// format: -log.filter key1,key2
func (f *logFilter) Set(value string) error {
for _, i := range strings.Split(value, ",") {
*f = append(*f, strings.TrimSpace(i))
}
return nil
}
func (m verboseModule) String() string {
// FIXME strings.Builder
var buf bytes.Buffer
for k, v := range m {
buf.WriteString(k)
buf.WriteString(strconv.FormatInt(int64(v), 10))
buf.WriteString(",")
}
return buf.String()
}
// Set sets the value of the named command-line flag.
// format: -log.module file=1,file2=2
func (m verboseModule) Set(value string) error {
for _, i := range strings.Split(value, ",") {
kv := strings.Split(i, "=")
if len(kv) == 2 {
if v, err := strconv.ParseInt(kv[1], 10, 64); err == nil {
m[strings.TrimSpace(kv[0])] = int32(v)
}
}
}
return nil
}
// parseDSN parse log agent dsn.
// unixgram:///var/run/lancer/collector.sock?timeout=100ms&chan=1024
func parseDSN(rawdsn string) *AgentConfig {
ac := new(AgentConfig)
d, err := dsn.Parse(rawdsn)
if err != nil {
panic(errors.WithMessage(err, fmt.Sprintf("log: invalid dsn: %s", rawdsn)))
}
if _, err = d.Bind(ac); err != nil {
panic(errors.WithMessage(err, fmt.Sprintf("log: invalid dsn: %s", rawdsn)))
}
return ac
}

22
library/log/dsn_test.go Normal file
View File

@@ -0,0 +1,22 @@
package log
import (
"flag"
"testing"
"time"
xtime "go-common/library/time"
"github.com/stretchr/testify/assert"
)
func TestUnixParseDSN(t *testing.T) {
dsn := "unix:///data/log/lancer.sock?chan=1024&timeout=5s"
flag.Parse()
flag.Set("log.stdout", "true")
as := parseDSN(dsn)
assert.Equal(t, "unix", as.Proto)
assert.Equal(t, "/data/log/lancer.sock", as.Addr)
assert.Equal(t, 1024, as.Chan)
assert.Equal(t, xtime.Duration(5*time.Second), as.Timeout)
}

View File

@@ -0,0 +1,26 @@
package log
import (
"fmt"
"testing"
"time"
"go-common/library/log/internal"
)
func TestJsonEncode(t *testing.T) {
enc := core.NewJSONEncoder(core.EncoderConfig{
EncodeTime: core.EpochTimeEncoder,
EncodeDuration: core.SecondsDurationEncoder,
}, core.NewBuffer(0))
KV("constant", "constant").AddTo(enc)
for i := 0; i < 3; i++ {
b := core.GetPool()
err := enc.Encode(b, KV("no", i), KV("cat", "is cat"), KV("dog", time.Now()))
if err != nil {
t.Fatalf("enc.Encode error(%v)", err)
}
fmt.Println(string(b.Bytes()))
b.Free()
}
}

View File

@@ -0,0 +1,55 @@
package log_test
import (
"context"
"go-common/library/log"
)
// This example will logging a text to log file.
func ExampleInfo() {
fc := &log.Config{
Family: "test-log",
Dir: "/data/log/test",
}
log.Init(fc)
defer log.Close()
log.Info("test %s", "file log")
ac := &log.Config{
Family: "test-log",
Agent: &log.AgentConfig{
TaskID: "000003",
Addr: "172.16.0.204:514",
Proto: "tcp",
Chan: 1024,
},
}
log.Init(ac)
defer log.Close()
log.Info("test %s", "agent log")
}
// This example will logging a structured text to log agent.
func ExampleInfov() {
ac := &log.Config{
Family: "test-log",
Agent: &log.AgentConfig{
TaskID: "000003",
Addr: "172.16.0.204:514",
Proto: "tcp",
Chan: 1024,
},
}
log.Init(ac)
defer log.Close()
log.Infov(context.TODO(), log.KV("key1", "val1"), log.KV("key2", "val2"))
}
// This example will set log format
func ExampleSetFormat() {
log.SetFormat("%L %T %f %M")
log.Info("hello")
// log output:
// INFO 2018-06-28T12:15:48.713784 main.main:8 hello
}

92
library/log/file.go Normal file
View File

@@ -0,0 +1,92 @@
package log
import (
"context"
"io"
"path/filepath"
"time"
"go-common/library/log/internal/filewriter"
)
// level idx
const (
_infoIdx = iota
_warnIdx
_errorIdx
_totalIdx
)
var _fileNames = map[int]string{
_infoIdx: "info.log",
_warnIdx: "warning.log",
_errorIdx: "error.log",
}
// FileHandler .
type FileHandler struct {
render Render
fws [_totalIdx]*filewriter.FileWriter
}
// NewFile crete a file logger.
func NewFile(dir string, bufferSize, rotateSize int64, maxLogFile int) *FileHandler {
// new info writer
newWriter := func(name string) *filewriter.FileWriter {
var options []filewriter.Option
if rotateSize > 0 {
options = append(options, filewriter.MaxSize(rotateSize))
}
if maxLogFile > 0 {
options = append(options, filewriter.MaxFile(maxLogFile))
}
w, err := filewriter.New(filepath.Join(dir, name), options...)
if err != nil {
panic(err)
}
return w
}
handler := &FileHandler{
render: newPatternRender("[%D %T] [%L] [%S] %M"),
}
for idx, name := range _fileNames {
handler.fws[idx] = newWriter(name)
}
return handler
}
// Log loggint to file .
func (h *FileHandler) Log(ctx context.Context, lv Level, args ...D) {
d := make(map[string]interface{}, 10+len(args))
for _, arg := range args {
d[arg.Key] = arg.Value
}
// add extra fields
addExtraField(ctx, d)
d[_time] = time.Now().Format(_timeFormat)
var w io.Writer
switch lv {
case _warnLevel:
w = h.fws[_warnIdx]
case _errorLevel:
w = h.fws[_errorIdx]
default:
w = h.fws[_infoIdx]
}
h.render.Render(w, d)
w.Write([]byte("\n"))
}
// Close log handler
func (h *FileHandler) Close() error {
for _, fw := range h.fws {
// ignored error
fw.Close()
}
return nil
}
// SetFormat set log format
func (h *FileHandler) SetFormat(format string) {
h.render = newPatternRender(format)
}

115
library/log/handler.go Normal file
View File

@@ -0,0 +1,115 @@
package log
import (
"context"
"time"
pkgerr "github.com/pkg/errors"
)
const (
_timeFormat = "2006-01-02T15:04:05.999999"
// log level defined in level.go.
_levelValue = "level_value"
// log level name: INFO, WARN...
_level = "level"
// log time.
_time = "time"
// request path.
// _title = "title"
// log file.
_source = "source"
// common log filed.
_log = "log"
// app name.
_appID = "app_id"
// container ID.
_instanceID = "instance_id"
// uniq ID from trace.
_tid = "traceid"
// request time.
// _ts = "ts"
// requester.
_caller = "caller"
// container environment: prod, pre, uat, fat.
_deplyEnv = "env"
// container area.
_zone = "zone"
// mirror flag
_mirror = "mirror"
// color.
_color = "color"
// cluster.
_cluster = "cluster"
)
// Handler is used to handle log events, outputting them to
// stdio or sending them to remote services. See the "handlers"
// directory for implementations.
//
// It is left up to Handlers to implement thread-safety.
type Handler interface {
// Log handle log
// variadic D is k-v struct represent log content
Log(context.Context, Level, ...D)
// SetFormat set render format on log output
// see StdoutHandler.SetFormat for detail
SetFormat(string)
// Close handler
Close() error
}
func newHandlers(filters []string, handlers ...Handler) *Handlers {
set := make(map[string]struct{})
for _, k := range filters {
set[k] = struct{}{}
}
return &Handlers{filters: set, handlers: handlers}
}
// Handlers a bundle for hander with filter function.
type Handlers struct {
filters map[string]struct{}
handlers []Handler
}
// Log handlers logging.
func (hs Handlers) Log(c context.Context, lv Level, d ...D) {
var fn string
for i := range d {
if _, ok := hs.filters[d[i].Key]; ok {
d[i].Value = "***"
}
if d[i].Key == _source {
fn = d[i].Value.(string)
}
}
if fn == "" {
d = append(d, KV(_source, funcName(4)))
}
d = append(d, KV(_time, time.Now()), KV(_levelValue, int(lv)), KV(_level, lv.String()))
errIncr(lv, fn)
for _, h := range hs.handlers {
h.Log(c, lv, d...)
}
}
// Close close resource.
func (hs Handlers) Close() (err error) {
for _, h := range hs.handlers {
if e := h.Close(); e != nil {
err = pkgerr.WithStack(e)
}
}
return
}
// SetFormat .
func (hs Handlers) SetFormat(format string) {
for _, h := range hs.handlers {
h.SetFormat(format)
}
}

View File

@@ -0,0 +1,42 @@
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["infoc.go"],
importpath = "go-common/library/log/infoc",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/log:go_default_library",
"//library/net/metadata:go_default_library",
"//library/net/netutil:go_default_library",
"//library/time:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["infoc_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = ["//library/net/metadata:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

245
library/log/infoc/infoc.go Normal file
View File

@@ -0,0 +1,245 @@
package infoc
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"net"
"strconv"
"strings"
"sync"
"time"
"go-common/library/log"
"go-common/library/net/metadata"
"go-common/library/net/netutil"
xtime "go-common/library/time"
)
const (
_infocSpliter = "\001"
_infocReplacer = "|"
_infocLenStart = 2
_infocLenEnd = 6
_protocolLen = 6
_infocTimeout = 50 * time.Millisecond
)
var (
_infocMagic = []byte{172, 190} // NOTE: magic 0xAC0xBE
_infocHeaderLen = []byte{0, 0, 0, 0} // NOTE: body len placeholder
_infocType = []byte{0, 0} // NOTE: type 0
_maxRetry = 10
// ErrFull error chan buffer full.
ErrFull = errors.New("infoc: chan buffer full")
)
var (
// ClientWeb ...
ClientWeb = "web"
ClientIphone = "iphone"
ClientIpad = "ipad"
ClientAndroid = "android"
ItemTypeAv = "av"
ItemTypeBangumi = "bangumi"
ItemTypeLive = "live"
ItemTypeTopic = "topic"
ItemTypeRank = "rank"
ItemTypeActivity = "activity"
ItemTypeTag = "tag"
ItemTypeAD = "ad"
ItemTypeLV = "lv"
ActionClick = "click"
ActionPlay = "play"
ActionFav = "fav"
ActionCoin = "coin"
ActionDM = "dm"
ActionToView = "toview"
ActionShare = "share"
ActionSpace = "space"
Actionfollow = "follow"
ActionHeartbeat = "heartbeat"
ActionAnswer = "answer"
)
// Config is infoc config.
type Config struct {
TaskID string
// udp or tcp
Proto string
Addr string
ChanSize int
DialTimeout xtime.Duration
WriteTimeout xtime.Duration
}
// Infoc infoc struct.
type Infoc struct {
c *Config
header []byte
msgs chan *bytes.Buffer
dialTimeout time.Duration
writeTimeout time.Duration
pool sync.Pool
waiter sync.WaitGroup
}
// New new infoc logger.
func New(c *Config) (i *Infoc) {
i = &Infoc{
c: c,
header: []byte(c.TaskID),
msgs: make(chan *bytes.Buffer, c.ChanSize),
dialTimeout: time.Duration(c.DialTimeout),
writeTimeout: time.Duration(c.WriteTimeout),
pool: sync.Pool{
New: func() interface{} {
return &bytes.Buffer{}
},
},
}
if i.dialTimeout == 0 {
i.dialTimeout = _infocTimeout
}
if i.writeTimeout == 0 {
i.writeTimeout = _infocTimeout
}
i.waiter.Add(1)
go i.writeproc()
return
}
func (i *Infoc) buf() *bytes.Buffer {
return i.pool.Get().(*bytes.Buffer)
}
func (i *Infoc) freeBuf(buf *bytes.Buffer) {
buf.Reset()
i.pool.Put(buf)
}
// Info record log to file.
func (i *Infoc) Info(args ...interface{}) (err error) {
err, res := i.info(args...)
if err != nil {
return
}
select {
case i.msgs <- res:
default:
i.freeBuf(res)
err = ErrFull
}
return
}
// Infov support filter mirror request
func (i *Infoc) Infov(ctx context.Context, args ...interface{}) (err error) {
if metadata.Bool(ctx, metadata.Mirror) {
return
}
return i.Info(args...)
}
func getValue(i interface{}) (s string) {
switch v := i.(type) {
case int:
s = strconv.FormatInt(int64(v), 10)
case int64:
s = strconv.FormatInt(v, 10)
case string:
s = v
case bool:
s = strconv.FormatBool(v)
default:
s = fmt.Sprint(i)
}
return
}
// Close close the connection.
func (i *Infoc) Close() error {
i.msgs <- nil
i.waiter.Wait()
return nil
}
// writeproc write data into connection.
func (i *Infoc) writeproc() {
var (
msg *bytes.Buffer
conn net.Conn
err error
)
bc := netutil.BackoffConfig{
MaxDelay: 15 * time.Second,
BaseDelay: 1.0 * time.Second,
Factor: 1.6,
Jitter: 0.2,
}
for {
if msg = <-i.msgs; msg == nil {
break // quit infoc writeproc
}
var j int
for j = 0; j < _maxRetry; j++ {
if conn == nil || err != nil {
if conn, err = net.DialTimeout(i.c.Proto, i.c.Addr, i.dialTimeout); err != nil {
log.Error("infoc net dial error(%v)", err)
time.Sleep(bc.Backoff(j))
continue
}
}
if i.writeTimeout != 0 {
conn.SetWriteDeadline(time.Now().Add(i.writeTimeout))
}
if _, err = conn.Write(msg.Bytes()); err != nil {
log.Error("infoc conn write error(%v)", err)
conn.Close()
time.Sleep(bc.Backoff(j))
continue
}
break
}
if j == _maxRetry {
log.Error("infoc reached max retry times")
}
i.freeBuf(msg)
}
i.waiter.Done()
if conn != nil && err == nil {
conn.Close()
}
}
func (i *Infoc) info(args ...interface{}) (err error, buf *bytes.Buffer) {
if len(args) == 0 {
return nil, nil
}
res := i.buf()
res.Write(_infocMagic) // type and body buf, for calc length.
res.Write(_infocHeaderLen) // placeholder
res.Write(_infocType)
res.Write(i.header)
res.WriteString(strconv.FormatInt(time.Now().UnixNano()/int64(time.Millisecond), 10))
// // append first arg
_, err = res.WriteString(getValue(args[0]))
for _, arg := range args[1:] {
// append ,arg
res.WriteString(_infocSpliter)
_, err = res.WriteString(strings.Replace(getValue(arg), _infocSpliter, _infocReplacer, -1))
}
if err != nil {
i.freeBuf(res)
return
}
bs := res.Bytes()
binary.BigEndian.PutUint32(bs[_infocLenStart:_infocLenEnd], uint32(res.Len()-_protocolLen))
buf = res
return
}

View File

@@ -0,0 +1,90 @@
package infoc
import (
"bytes"
"context"
"os"
"sync"
"testing"
"time"
"go-common/library/net/metadata"
)
var (
once sync.Once
i1 *Infoc
)
func TestMain(m *testing.M) {
once.Do(createInfoc)
defer i1.Close()
os.Exit(m.Run())
}
func createInfoc() {
i1 = New(&Config{
TaskID: "000146",
Addr: "172.16.0.204:514",
Proto: "tcp",
ChanSize: 1,
})
}
func Test_Infoc(b *testing.T) {
err := i1.Info("infoc-test", "ip", "mid", 222)
time.Sleep(2 * time.Second)
if err != nil {
b.Fatalf("err %+v", err)
}
}
func Test_Infocv(b *testing.T) {
i1.Infov(context.Background(), "infoc-test", "ip", "mid", 222)
ctx := metadata.NewContext(context.Background(), metadata.MD{metadata.Mirror: true})
i1.Infov(ctx, "infoc-test", "ip", "mid", 222)
ctx = metadata.NewContext(context.Background(), metadata.MD{metadata.Mirror: "1"})
err := i1.Infov(ctx, "infoc-test", "ip", "mid", 222)
time.Sleep(2 * time.Second)
if err != nil {
b.Fatalf("err %+v", err)
}
var args []interface{}
args = append(args, "infoc-test")
args = append(args, "ip")
args = append(args, "mid")
args = append(args, 222)
err1, buf1 := i1.info(args...)
err2, buf2 := i1.info(args)
if bytes.Equal(buf1.Bytes(), buf2.Bytes()) {
b.Fatalf("err %+v,%+v,%+v,%+v", err1, err2, buf1.Bytes(), buf2.Bytes())
}
args = append([]interface{}{})
args = append(args, "infoc-test")
err1, buf1 = i1.info(args...)
err2, buf2 = i1.info(args)
if bytes.Equal(buf1.Bytes(), buf2.Bytes()) {
b.Fatalf("err %+v,%+v,%+v,%+v", err1, err2, buf1.Bytes(), buf2.Bytes())
}
}
func BenchmarkInfoc(b *testing.B) {
once.Do(createInfoc)
b.RunParallel(func(pb *testing.PB) {
var f float32 = 3.55051
var i8 int8 = 2
var u8 uint8 = 2
for pb.Next() {
i1.Info("infoc-test", "ip", "mid", i8, u8, f)
}
})
}

View File

@@ -0,0 +1,51 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"buffer.go",
"bufferpool.go",
"encoder.go",
"field.go",
"json_encoder.go",
"pool.go",
],
importpath = "go-common/library/log/internal",
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//library/log/internal/filewriter:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = [
"buffer_test.go",
"pool_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"],
)

View File

@@ -0,0 +1,97 @@
package core
import "strconv"
const _size = 1024 // by default, create 1 KiB buffers
// NewBuffer is new buffer
func NewBuffer(_size int) *Buffer {
return &Buffer{bs: make([]byte, 0, _size)}
}
// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so
// the only way to construct one is via a Pool.
type Buffer struct {
bs []byte
pool Pool
}
// AppendByte writes a single byte to the Buffer.
func (b *Buffer) AppendByte(v byte) {
b.bs = append(b.bs, v)
}
// AppendString writes a string to the Buffer.
func (b *Buffer) AppendString(s string) {
b.bs = append(b.bs, s...)
}
// AppendInt appends an integer to the underlying buffer (assuming base 10).
func (b *Buffer) AppendInt(i int64) {
b.bs = strconv.AppendInt(b.bs, i, 10)
}
// AppendUint appends an unsigned integer to the underlying buffer (assuming
// base 10).
func (b *Buffer) AppendUint(i uint64) {
b.bs = strconv.AppendUint(b.bs, i, 10)
}
// AppendBool appends a bool to the underlying buffer.
func (b *Buffer) AppendBool(v bool) {
b.bs = strconv.AppendBool(b.bs, v)
}
// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN
// or +/- Inf.
func (b *Buffer) AppendFloat(f float64, bitSize int) {
b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize)
}
// Len returns the length of the underlying byte slice.
func (b *Buffer) Len() int {
return len(b.bs)
}
// Cap returns the capacity of the underlying byte slice.
func (b *Buffer) Cap() int {
return cap(b.bs)
}
// Bytes returns a mutable reference to the underlying byte slice.
func (b *Buffer) Bytes() []byte {
return b.bs
}
// String returns a string copy of the underlying byte slice.
func (b *Buffer) String() string {
return string(b.bs)
}
// Reset resets the underlying byte slice. Subsequent writes re-use the slice's
// backing array.
func (b *Buffer) Reset() {
b.bs = b.bs[:0]
}
// Write implements io.Writer.
func (b *Buffer) Write(bs []byte) (int, error) {
b.bs = append(b.bs, bs...)
return len(bs), nil
}
// TrimNewline trims any final "\n" byte from the end of the buffer.
func (b *Buffer) TrimNewline() {
if i := len(b.bs) - 1; i >= 0 {
if b.bs[i] == '\n' {
b.bs = b.bs[:i]
}
}
}
// Free returns the Buffer to its Pool.
//
// Callers must not retain references to the Buffer after calling Free.
func (b *Buffer) Free() {
b.pool.put(b)
}

View File

@@ -0,0 +1,91 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package core
import (
"bytes"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestBufferWrites(t *testing.T) {
buf := NewPool(0).Get()
tests := []struct {
desc string
f func()
want string
}{
{"AppendByte", func() { buf.AppendByte('v') }, "v"},
{"AppendString", func() { buf.AppendString("foo") }, "foo"},
{"AppendIntPositive", func() { buf.AppendInt(42) }, "42"},
{"AppendIntNegative", func() { buf.AppendInt(-42) }, "-42"},
{"AppendUint", func() { buf.AppendUint(42) }, "42"},
{"AppendBool", func() { buf.AppendBool(true) }, "true"},
{"AppendFloat64", func() { buf.AppendFloat(3.14, 64) }, "3.14"},
// Intenationally introduce some floating-point error.
{"AppendFloat32", func() { buf.AppendFloat(float64(float32(3.14)), 32) }, "3.14"},
{"AppendWrite", func() { buf.Write([]byte("foo")) }, "foo"},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
buf.Reset()
tt.f()
assert.Equal(t, tt.want, buf.String(), "Unexpected buffer.String().")
assert.Equal(t, tt.want, string(buf.Bytes()), "Unexpected string(buffer.Bytes()).")
assert.Equal(t, len(tt.want), buf.Len(), "Unexpected buffer length.")
// We're not writing more than a kibibyte in tests.
assert.Equal(t, _size, buf.Cap(), "Expected buffer capacity to remain constant.")
})
}
}
func BenchmarkBuffers(b *testing.B) {
// Because we use the strconv.AppendFoo functions so liberally, we can't
// use the standard library's bytes.Buffer anyways (without incurring a
// bunch of extra allocations). Nevertheless, let's make sure that we're
// not losing any precious nanoseconds.
str := strings.Repeat("a", 1024)
slice := make([]byte, 1024)
buf := bytes.NewBuffer(slice)
custom := NewPool(0).Get()
b.Run("ByteSlice", func(b *testing.B) {
for i := 0; i < b.N; i++ {
slice = append(slice, str...)
slice = slice[:0]
}
})
b.Run("BytesBuffer", func(b *testing.B) {
for i := 0; i < b.N; i++ {
buf.WriteString(str)
buf.Reset()
}
})
b.Run("CustomBuffer", func(b *testing.B) {
for i := 0; i < b.N; i++ {
custom.AppendString(str)
custom.Reset()
}
})
}

View File

@@ -0,0 +1,29 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package core houses zap's shared internal buffer pool. Third-party
// packages can recreate the same functionality with buffers.NewPool.
package core
var (
_pool = NewPool(_size)
// GetPool retrieves a buffer from the pool, creating one if necessary.
GetPool = _pool.Get
)

View File

@@ -0,0 +1,187 @@
package core
import (
"time"
)
// DefaultLineEnding defines the default line ending when writing logs.
// Alternate line endings specified in EncoderConfig can override this
// behavior.
const DefaultLineEnding = "\n"
// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a
// map- or struct-like object to the logging context. Like maps, ObjectEncoders
// aren't safe for concurrent use (though typical use shouldn't require locks).
type ObjectEncoder interface {
// Logging-specific marshalers.
AddArray(key string, marshaler ArrayMarshaler) error
AddObject(key string, marshaler ObjectMarshaler) error
// Built-in types.
AddBinary(key string, value []byte) // for arbitrary bytes
AddByteString(key string, value []byte) // for UTF-8 encoded bytes
AddBool(key string, value bool)
AddComplex128(key string, value complex128)
AddComplex64(key string, value complex64)
AddDuration(key string, value time.Duration)
AddFloat64(key string, value float64)
AddFloat32(key string, value float32)
AddInt(key string, value int)
AddInt64(key string, value int64)
AddInt32(key string, value int32)
AddInt16(key string, value int16)
AddInt8(key string, value int8)
AddString(key, value string)
AddTime(key string, value time.Time)
AddUint(key string, value uint)
AddUint64(key string, value uint64)
AddUint32(key string, value uint32)
AddUint16(key string, value uint16)
AddUint8(key string, value uint8)
AddUintptr(key string, value uintptr)
// AddReflected uses reflection to serialize arbitrary objects, so it's slow
// and allocation-heavy.
AddReflected(key string, value interface{}) error
// OpenNamespace opens an isolated namespace where all subsequent fields will
// be added. Applications can use namespaces to prevent key collisions when
// injecting loggers into sub-components or third-party libraries.
OpenNamespace(key string)
}
// ObjectMarshaler allows user-defined types to efficiently add themselves to the
// logging context, and to selectively omit information which shouldn't be
// included in logs (e.g., passwords).
type ObjectMarshaler interface {
MarshalLogObject(ObjectEncoder) error
}
// ObjectMarshalerFunc is a type adapter that turns a function into an
// ObjectMarshaler.
type ObjectMarshalerFunc func(ObjectEncoder) error
// MarshalLogObject calls the underlying function.
func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error {
return f(enc)
}
// ArrayMarshaler allows user-defined types to efficiently add themselves to the
// logging context, and to selectively omit information which shouldn't be
// included in logs (e.g., passwords).
type ArrayMarshaler interface {
MarshalLogArray(ArrayEncoder) error
}
// ArrayMarshalerFunc is a type adapter that turns a function into an
// ArrayMarshaler.
type ArrayMarshalerFunc func(ArrayEncoder) error
// MarshalLogArray calls the underlying function.
func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error {
return f(enc)
}
// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding
// array-like objects to the logging context. Of note, it supports mixed-type
// arrays even though they aren't typical in Go. Like slices, ArrayEncoders
// aren't safe for concurrent use (though typical use shouldn't require locks).
type ArrayEncoder interface {
// Built-in types.
PrimitiveArrayEncoder
// Time-related types.
AppendDuration(time.Duration)
AppendTime(time.Time)
// Logging-specific marshalers.
AppendArray(ArrayMarshaler) error
AppendObject(ObjectMarshaler) error
// AppendReflected uses reflection to serialize arbitrary objects, so it's
// slow and allocation-heavy.
AppendReflected(value interface{}) error
}
// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals
// only in Go's built-in types. It's included only so that Duration- and
// TimeEncoders cannot trigger infinite recursion.
type PrimitiveArrayEncoder interface {
// Built-in types.
AppendBool(bool)
AppendByteString([]byte) // for UTF-8 encoded bytes
AppendComplex128(complex128)
AppendComplex64(complex64)
AppendFloat64(float64)
AppendFloat32(float32)
AppendInt(int)
AppendInt64(int64)
AppendInt32(int32)
AppendInt16(int16)
AppendInt8(int8)
AppendString(string)
AppendUint(uint)
AppendUint64(uint64)
AppendUint32(uint32)
AppendUint16(uint16)
AppendUint8(uint8)
AppendUintptr(uintptr)
}
// An EncoderConfig allows users to configure the concrete encoders supplied by
// zapcore.
type EncoderConfig struct {
EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"`
EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"`
// Configure the primitive representations of common complex types. For
// example, some users may want all time.Times serialized as floating-point
// seconds since epoch, while others may prefer ISO8601 strings.
/*EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"`
EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"`
EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"`
EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"`
// Unlike the other primitive type encoders, EncodeName is optional. The
// zero value falls back to FullNameEncoder.
EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`*/
}
// Encoder is a format-agnostic interface for all log entry marshalers. Since
// log encoders don't need to support the same wide range of use cases as
// general-purpose marshalers, it's possible to make them faster and
// lower-allocation.
//
// Implementations of the ObjectEncoder interface's methods can, of course,
// freely modify the receiver. However, the Clone and EncodeEntry methods will
// be called concurrently and shouldn't modify the receiver.
type Encoder interface {
ObjectEncoder
// Clone copies the encoder, ensuring that adding fields to the copy doesn't
// affect the original.
Clone() Encoder
// EncodeEntry encodes an entry and fields, along with any accumulated
// context, into a byte buffer and returns it.
Encode(*Buffer, ...Field) error
}
// A TimeEncoder serializes a time.Time to a primitive type.
type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
// A DurationEncoder serializes a time.Duration to a primitive type.
type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
// since the Unix epoch.
func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
//var d []byte
enc.AppendString(t.Format("2006-01-02T15:04:05.999999"))
//enc.AppendByteString(t.AppendFormat(d, "2006-01-02T15:04:05.999999"))
/*nanos := t.UnixNano()
sec := float64(nanos) / float64(time.Second)
enc.AppendFloat64(sec)*/
}
// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
enc.AppendFloat64(float64(d) / float64(time.Second))
}

View File

@@ -0,0 +1,6 @@
package core
// Field is for encoder
type Field interface {
AddTo(enc ObjectEncoder)
}

View File

@@ -0,0 +1,40 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["filewriter_test.go"],
embed = [":go_default_library"],
tags = ["automanaged"],
deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"],
)
go_library(
name = "go_default_library",
srcs = [
"filewriter.go",
"option.go",
],
importpath = "go-common/library/log/internal/filewriter",
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,344 @@
package filewriter
import (
"bytes"
"container/list"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
// FileWriter create file log writer
type FileWriter struct {
opt option
dir string
fname string
ch chan *bytes.Buffer
stdlog *log.Logger
pool *sync.Pool
lastRotateFormat string
lastSplitNum int
current *wrapFile
files *list.List
closed int32
wg sync.WaitGroup
}
type rotateItem struct {
rotateTime int64
rotateNum int
fname string
}
func parseRotateItem(dir, fname, rotateFormat string) (*list.List, error) {
fis, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
// parse exists log file filename
parse := func(s string) (rt rotateItem, err error) {
// remove filename and left "." error.log.2018-09-12.001 -> 2018-09-12.001
rt.fname = s
s = strings.TrimLeft(s[len(fname):], ".")
seqs := strings.Split(s, ".")
var t time.Time
switch len(seqs) {
case 2:
if rt.rotateNum, err = strconv.Atoi(seqs[1]); err != nil {
return
}
fallthrough
case 1:
if t, err = time.Parse(rotateFormat, seqs[0]); err != nil {
return
}
rt.rotateTime = t.Unix()
}
return
}
var items []rotateItem
for _, fi := range fis {
if strings.HasPrefix(fi.Name(), fname) && fi.Name() != fname {
rt, err := parse(fi.Name())
if err != nil {
// TODO deal with error
continue
}
items = append(items, rt)
}
}
sort.Slice(items, func(i, j int) bool {
if items[i].rotateTime == items[j].rotateTime {
return items[i].rotateNum > items[j].rotateNum
}
return items[i].rotateTime > items[j].rotateTime
})
l := list.New()
for _, item := range items {
l.PushBack(item)
}
return l, nil
}
type wrapFile struct {
fsize int64
fp *os.File
}
func (w *wrapFile) size() int64 {
return w.fsize
}
func (w *wrapFile) write(p []byte) (n int, err error) {
n, err = w.fp.Write(p)
w.fsize += int64(n)
return
}
func newWrapFile(fpath string) (*wrapFile, error) {
fp, err := os.OpenFile(fpath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
fi, err := fp.Stat()
if err != nil {
return nil, err
}
return &wrapFile{fp: fp, fsize: fi.Size()}, nil
}
// New FileWriter A FileWriter is safe for use by multiple goroutines simultaneously.
func New(fpath string, fns ...Option) (*FileWriter, error) {
opt := defaultOption
for _, fn := range fns {
fn(&opt)
}
fname := filepath.Base(fpath)
if fname == "" {
return nil, fmt.Errorf("filename can't empty")
}
dir := filepath.Dir(fpath)
fi, err := os.Stat(dir)
if err == nil && !fi.IsDir() {
return nil, fmt.Errorf("%s already exists and not a directory", dir)
}
if os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0755); err != nil {
return nil, fmt.Errorf("create dir %s error: %s", dir, err.Error())
}
}
current, err := newWrapFile(fpath)
if err != nil {
return nil, err
}
stdlog := log.New(os.Stderr, "flog ", log.LstdFlags)
ch := make(chan *bytes.Buffer, opt.ChanSize)
files, err := parseRotateItem(dir, fname, opt.RotateFormat)
if err != nil {
// set files a empty list
files = list.New()
stdlog.Printf("parseRotateItem error: %s", err)
}
lastRotateFormat := time.Now().Format(opt.RotateFormat)
var lastSplitNum int
if files.Len() > 0 {
rt := files.Front().Value.(rotateItem)
// check contains is mush esay than compared with timestamp
if strings.Contains(rt.fname, lastRotateFormat) {
lastSplitNum = rt.rotateNum
}
}
fw := &FileWriter{
opt: opt,
dir: dir,
fname: fname,
stdlog: stdlog,
ch: ch,
pool: &sync.Pool{New: func() interface{} { return new(bytes.Buffer) }},
lastSplitNum: lastSplitNum,
lastRotateFormat: lastRotateFormat,
files: files,
current: current,
}
fw.wg.Add(1)
go fw.daemon()
return fw, nil
}
// Write write data to log file, return write bytes is pseudo just for implement io.Writer.
func (f *FileWriter) Write(p []byte) (int, error) {
// atomic is not necessary
if atomic.LoadInt32(&f.closed) == 1 {
f.stdlog.Printf("%s", p)
return 0, fmt.Errorf("filewriter already closed")
}
// because write to file is asynchronousc,
// copy p to internal buf prevent p be change on outside
buf := f.getBuf()
buf.Write(p)
if f.opt.WriteTimeout == 0 {
select {
case f.ch <- buf:
return len(p), nil
default:
// TODO: write discard log to to stdout?
return 0, fmt.Errorf("log channel is full, discard log")
}
}
// write log with timeout
timeout := time.NewTimer(f.opt.WriteTimeout)
select {
case f.ch <- buf:
return len(p), nil
case <-timeout.C:
// TODO: write discard log to to stdout?
return 0, fmt.Errorf("log channel is full, discard log")
}
}
func (f *FileWriter) daemon() {
// TODO: check aggsbuf size prevent it too big
aggsbuf := &bytes.Buffer{}
tk := time.NewTicker(f.opt.RotateInterval)
// TODO: make it configrable
aggstk := time.NewTicker(10 * time.Millisecond)
var err error
for {
select {
case t := <-tk.C:
f.checkRotate(t)
case buf, ok := <-f.ch:
if ok {
aggsbuf.Write(buf.Bytes())
f.putBuf(buf)
}
case <-aggstk.C:
if aggsbuf.Len() > 0 {
if err = f.write(aggsbuf.Bytes()); err != nil {
f.stdlog.Printf("write log error: %s", err)
}
aggsbuf.Reset()
}
}
if atomic.LoadInt32(&f.closed) != 1 {
continue
}
// read all buf from channel and break loop
if err = f.write(aggsbuf.Bytes()); err != nil {
f.stdlog.Printf("write log error: %s", err)
}
for buf := range f.ch {
if err = f.write(buf.Bytes()); err != nil {
f.stdlog.Printf("write log error: %s", err)
}
f.putBuf(buf)
}
break
}
f.wg.Done()
}
// Close close file writer
func (f *FileWriter) Close() error {
atomic.StoreInt32(&f.closed, 1)
close(f.ch)
f.wg.Wait()
return nil
}
func (f *FileWriter) checkRotate(t time.Time) {
formatFname := func(format string, num int) string {
if num == 0 {
return fmt.Sprintf("%s.%s", f.fname, format)
}
return fmt.Sprintf("%s.%s.%03d", f.fname, format, num)
}
format := t.Format(f.opt.RotateFormat)
if f.opt.MaxFile != 0 {
for f.files.Len() > f.opt.MaxFile {
rt := f.files.Remove(f.files.Front()).(rotateItem)
fpath := filepath.Join(f.dir, rt.fname)
if err := os.Remove(fpath); err != nil {
f.stdlog.Printf("remove file %s error: %s", fpath, err)
}
}
}
if format != f.lastRotateFormat || (f.opt.MaxSize != 0 && f.current.size() > f.opt.MaxSize) {
var err error
// close current file first
if err = f.current.fp.Close(); err != nil {
f.stdlog.Printf("close current file error: %s", err)
}
// rename file
fname := formatFname(f.lastRotateFormat, f.lastSplitNum)
oldpath := filepath.Join(f.dir, f.fname)
newpath := filepath.Join(f.dir, fname)
if err = os.Rename(oldpath, newpath); err != nil {
f.stdlog.Printf("rename file %s to %s error: %s", oldpath, newpath, err)
return
}
f.files.PushBack(rotateItem{fname: fname /*rotateNum: f.lastSplitNum, rotateTime: t.Unix() unnecessary*/})
if format != f.lastRotateFormat {
f.lastRotateFormat = format
f.lastSplitNum = 0
} else {
f.lastSplitNum++
}
// recreate current file
f.current, err = newWrapFile(filepath.Join(f.dir, f.fname))
if err != nil {
f.stdlog.Printf("create log file error: %s", err)
}
}
}
func (f *FileWriter) write(p []byte) error {
// f.current may be nil, if newWrapFile return err in checkRotate, redirect log to stderr
if f.current == nil {
f.stdlog.Printf("can't write log to file, please check stderr log for detail")
f.stdlog.Printf("%s", p)
}
_, err := f.current.write(p)
return err
}
func (f *FileWriter) putBuf(buf *bytes.Buffer) {
buf.Reset()
f.pool.Put(buf)
}
func (f *FileWriter) getBuf() *bytes.Buffer {
return f.pool.Get().(*bytes.Buffer)
}

View File

@@ -0,0 +1,221 @@
package filewriter
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
const logdir = "testlog"
func touch(dir, name string) {
os.MkdirAll(dir, 0755)
fp, err := os.OpenFile(filepath.Join(dir, name), os.O_CREATE, 0644)
if err != nil {
panic(err)
}
fp.Close()
}
func TestMain(m *testing.M) {
ret := m.Run()
os.RemoveAll(logdir)
os.Exit(ret)
}
func TestParseRotate(t *testing.T) {
touch := func(dir, name string) {
os.MkdirAll(dir, 0755)
fp, err := os.OpenFile(filepath.Join(dir, name), os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
fp.Close()
}
dir := filepath.Join(logdir, "test-parse-rotate")
names := []string{"info.log.2018-11-11", "info.log.2018-11-11.001", "info.log.2018-11-11.002", "info.log." + time.Now().Format("2006-01-02") + ".005"}
for _, name := range names {
touch(dir, name)
}
l, err := parseRotateItem(dir, "info.log", "2006-01-02")
if err != nil {
t.Fatal(err)
}
assert.Equal(t, len(names), l.Len())
rt := l.Front().Value.(rotateItem)
assert.Equal(t, 5, rt.rotateNum)
}
func TestRotateExists(t *testing.T) {
dir := filepath.Join(logdir, "test-rotate-exists")
names := []string{"info.log." + time.Now().Format("2006-01-02") + ".005"}
for _, name := range names {
touch(dir, name)
}
fw, err := New(logdir+"/test-rotate-exists/info.log",
MaxSize(1024*1024),
func(opt *option) { opt.RotateInterval = time.Millisecond },
)
if err != nil {
t.Fatal(err)
}
data := make([]byte, 1024)
for i := range data {
data[i] = byte(i)
}
for i := 0; i < 10; i++ {
for i := 0; i < 1024; i++ {
_, err = fw.Write(data)
if err != nil {
t.Error(err)
}
}
time.Sleep(10 * time.Millisecond)
}
fw.Close()
fis, err := ioutil.ReadDir(logdir + "/test-rotate-exists")
if err != nil {
t.Fatal(err)
}
var fnams []string
for _, fi := range fis {
fnams = append(fnams, fi.Name())
}
assert.Contains(t, fnams, "info.log."+time.Now().Format("2006-01-02")+".006")
}
func TestSizeRotate(t *testing.T) {
fw, err := New(logdir+"/test-rotate/info.log",
MaxSize(1024*1024),
func(opt *option) { opt.RotateInterval = 1 * time.Millisecond },
)
if err != nil {
t.Fatal(err)
}
data := make([]byte, 1024)
for i := range data {
data[i] = byte(i)
}
for i := 0; i < 10; i++ {
for i := 0; i < 1024; i++ {
_, err = fw.Write(data)
if err != nil {
t.Error(err)
}
}
time.Sleep(10 * time.Millisecond)
}
fw.Close()
fis, err := ioutil.ReadDir(logdir + "/test-rotate")
if err != nil {
t.Fatal(err)
}
assert.True(t, len(fis) > 5, "expect more than 5 file get %d", len(fis))
}
func TestMaxFile(t *testing.T) {
fw, err := New(logdir+"/test-maxfile/info.log",
MaxSize(1024*1024),
MaxFile(1),
func(opt *option) { opt.RotateInterval = 1 * time.Millisecond },
)
if err != nil {
t.Fatal(err)
}
data := make([]byte, 1024)
for i := range data {
data[i] = byte(i)
}
for i := 0; i < 10; i++ {
for i := 0; i < 1024; i++ {
_, err = fw.Write(data)
if err != nil {
t.Error(err)
}
}
time.Sleep(10 * time.Millisecond)
}
fw.Close()
fis, err := ioutil.ReadDir(logdir + "/test-maxfile")
if err != nil {
t.Fatal(err)
}
assert.True(t, len(fis) <= 2, fmt.Sprintf("expect 2 file get %d", len(fis)))
}
func TestMaxFile2(t *testing.T) {
files := []string{
"info.log.2018-12-01",
"info.log.2018-12-02",
"info.log.2018-12-03",
"info.log.2018-12-04",
"info.log.2018-12-05",
"info.log.2018-12-05.001",
}
for _, file := range files {
touch(logdir+"/test-maxfile2", file)
}
fw, err := New(logdir+"/test-maxfile2/info.log",
MaxSize(1024*1024),
MaxFile(3),
func(opt *option) { opt.RotateInterval = 1 * time.Millisecond },
)
if err != nil {
t.Fatal(err)
}
data := make([]byte, 1024)
for i := range data {
data[i] = byte(i)
}
for i := 0; i < 10; i++ {
for i := 0; i < 1024; i++ {
_, err = fw.Write(data)
if err != nil {
t.Error(err)
}
}
time.Sleep(10 * time.Millisecond)
}
fw.Close()
fis, err := ioutil.ReadDir(logdir + "/test-maxfile2")
if err != nil {
t.Fatal(err)
}
assert.True(t, len(fis) == 4, fmt.Sprintf("expect 4 file get %d", len(fis)))
}
func TestFileWriter(t *testing.T) {
fw, err := New("testlog/info.log")
if err != nil {
t.Fatal(err)
}
defer fw.Close()
_, err = fw.Write([]byte("Hello World!\n"))
if err != nil {
t.Error(err)
}
}
func BenchmarkFileWriter(b *testing.B) {
fw, err := New("testlog/bench/info.log",
func(opt *option) { opt.WriteTimeout = time.Second }, MaxSize(1024*1024*8), /*32MB*/
func(opt *option) { opt.RotateInterval = 10 * time.Millisecond },
)
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
_, err = fw.Write([]byte("Hello World!\n"))
if err != nil {
b.Error(err)
}
}
}

View File

@@ -0,0 +1,69 @@
package filewriter
import (
"fmt"
"strings"
"time"
)
// RotateFormat
const (
RotateDaily = "2006-01-02"
)
var defaultOption = option{
RotateFormat: RotateDaily,
MaxSize: 1 << 30,
ChanSize: 1024 * 8,
RotateInterval: 10 * time.Second,
}
type option struct {
RotateFormat string
MaxFile int
MaxSize int64
ChanSize int
// TODO export Option
RotateInterval time.Duration
WriteTimeout time.Duration
}
// Option filewriter option
type Option func(opt *option)
// RotateFormat e.g 2006-01-02 meaning rotate log file every day.
// NOTE: format can't contain ".", "." will cause panic ヽ(*。>Д<)o゜.
func RotateFormat(format string) Option {
if strings.Contains(format, ".") {
panic(fmt.Sprintf("rotate format can't contain '.' format: %s", format))
}
return func(opt *option) {
opt.RotateFormat = format
}
}
// MaxFile default 999, 0 meaning unlimit.
// TODO: don't create file list if MaxSize is unlimt.
func MaxFile(n int) Option {
return func(opt *option) {
opt.MaxFile = n
}
}
// MaxSize set max size for single log file,
// defult 1GB, 0 meaning unlimit.
func MaxSize(n int64) Option {
return func(opt *option) {
opt.MaxSize = n
}
}
// ChanSize set internal chan size default 8192 use about 64k memory on x64 platfrom static,
// because filewriter has internal object pool, change chan size bigger may cause filewriter use
// a lot of memory, because sync.Pool can't set expire time memory won't free until program exit.
func ChanSize(n int) Option {
return func(opt *option) {
opt.ChanSize = n
}
}

View File

@@ -0,0 +1,424 @@
package core
import (
"encoding/base64"
"encoding/json"
"math"
"sync"
"time"
"unicode/utf8"
)
// For JSON-escaping; see jsonEncoder.safeAddString below.
const _hex = "0123456789abcdef"
var _ ObjectEncoder = &jsonEncoder{}
var _jsonPool = sync.Pool{New: func() interface{} {
return &jsonEncoder{}
}}
func getJSONEncoder() *jsonEncoder {
return _jsonPool.Get().(*jsonEncoder)
}
func putJSONEncoder(enc *jsonEncoder) {
if enc.reflectBuf != nil {
enc.reflectBuf.Free()
}
enc.EncoderConfig = nil
enc.buf = nil
enc.spaced = false
enc.openNamespaces = 0
enc.reflectBuf = nil
enc.reflectEnc = nil
_jsonPool.Put(enc)
}
type jsonEncoder struct {
*EncoderConfig
buf *Buffer
spaced bool // include spaces after colons and commas
openNamespaces int
// for encoding generic values by reflection
reflectBuf *Buffer
reflectEnc *json.Encoder
}
// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
// appropriately escapes all field keys and values.
//
// Note that the encoder doesn't deduplicate keys, so it's possible to produce
// a message like
// {"foo":"bar","foo":"baz"}
// This is permitted by the JSON specification, but not encouraged. Many
// libraries will ignore duplicate key-value pairs (typically keeping the last
// pair) when unmarshaling, but users should attempt to avoid adding duplicate
// keys.
func NewJSONEncoder(cfg EncoderConfig, buf *Buffer) Encoder {
return newJSONEncoder(cfg, false, buf)
}
func newJSONEncoder(cfg EncoderConfig, spaced bool, buf *Buffer) *jsonEncoder {
return &jsonEncoder{
EncoderConfig: &cfg,
buf: buf,
spaced: spaced,
}
}
func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error {
enc.addKey(key)
return enc.AppendArray(arr)
}
func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error {
enc.addKey(key)
return enc.AppendObject(obj)
}
func (enc *jsonEncoder) AddBinary(key string, val []byte) {
enc.AddString(key, base64.StdEncoding.EncodeToString(val))
}
func (enc *jsonEncoder) AddByteString(key string, val []byte) {
enc.addKey(key)
enc.AppendByteString(val)
}
func (enc *jsonEncoder) AddBool(key string, val bool) {
enc.addKey(key)
enc.AppendBool(val)
}
func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
enc.addKey(key)
enc.AppendComplex128(val)
}
func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
enc.addKey(key)
enc.AppendDuration(val)
}
func (enc *jsonEncoder) AddFloat64(key string, val float64) {
enc.addKey(key)
enc.AppendFloat64(val)
}
func (enc *jsonEncoder) AddInt64(key string, val int64) {
enc.addKey(key)
enc.AppendInt64(val)
}
func (enc *jsonEncoder) resetReflectBuf() {
if enc.reflectBuf == nil {
enc.reflectBuf = GetPool()
enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
} else {
enc.reflectBuf.Reset()
}
}
func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error {
enc.resetReflectBuf()
err := enc.reflectEnc.Encode(obj)
if err != nil {
return err
}
enc.reflectBuf.TrimNewline()
enc.addKey(key)
_, err = enc.buf.Write(enc.reflectBuf.Bytes())
return err
}
func (enc *jsonEncoder) OpenNamespace(key string) {
enc.addKey(key)
enc.buf.AppendByte('{')
enc.openNamespaces++
}
func (enc *jsonEncoder) AddString(key, val string) {
enc.addKey(key)
enc.AppendString(val)
}
func (enc *jsonEncoder) AddTime(key string, val time.Time) {
enc.addKey(key)
enc.AppendTime(val)
}
func (enc *jsonEncoder) AddUint64(key string, val uint64) {
enc.addKey(key)
enc.AppendUint64(val)
}
func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
enc.addElementSeparator()
enc.buf.AppendByte('[')
err := arr.MarshalLogArray(enc)
enc.buf.AppendByte(']')
return err
}
func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
enc.addElementSeparator()
enc.buf.AppendByte('{')
err := obj.MarshalLogObject(enc)
enc.buf.AppendByte('}')
return err
}
func (enc *jsonEncoder) AppendBool(val bool) {
enc.addElementSeparator()
enc.buf.AppendBool(val)
}
func (enc *jsonEncoder) AppendByteString(val []byte) {
enc.addElementSeparator()
enc.buf.AppendByte('"')
enc.safeAddByteString(val)
enc.buf.AppendByte('"')
}
func (enc *jsonEncoder) AppendComplex128(val complex128) {
enc.addElementSeparator()
// Cast to a platform-independent, fixed-size type.
r, i := float64(real(val)), float64(imag(val))
enc.buf.AppendByte('"')
// Because we're always in a quoted string, we can use strconv without
// special-casing NaN and +/-Inf.
enc.buf.AppendFloat(r, 64)
enc.buf.AppendByte('+')
enc.buf.AppendFloat(i, 64)
enc.buf.AppendByte('i')
enc.buf.AppendByte('"')
}
func (enc *jsonEncoder) AppendDuration(val time.Duration) {
cur := enc.buf.Len()
enc.EncodeDuration(val, enc)
if cur == enc.buf.Len() {
// User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep
// JSON valid.
enc.AppendInt64(int64(val))
}
}
func (enc *jsonEncoder) AppendInt64(val int64) {
enc.addElementSeparator()
enc.buf.AppendInt(val)
}
func (enc *jsonEncoder) AppendReflected(val interface{}) error {
enc.resetReflectBuf()
err := enc.reflectEnc.Encode(val)
if err != nil {
return err
}
enc.reflectBuf.TrimNewline()
enc.addElementSeparator()
_, err = enc.buf.Write(enc.reflectBuf.Bytes())
return err
}
func (enc *jsonEncoder) AppendString(val string) {
enc.addElementSeparator()
enc.buf.AppendByte('"')
enc.safeAddString(val)
enc.buf.AppendByte('"')
}
func (enc *jsonEncoder) AppendTime(val time.Time) {
cur := enc.buf.Len()
enc.EncodeTime(val, enc)
if cur == enc.buf.Len() {
// User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep
// output JSON valid.
enc.AppendInt64(val.UnixNano())
}
}
func (enc *jsonEncoder) AppendUint64(val uint64) {
enc.addElementSeparator()
enc.buf.AppendUint(val)
}
func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) }
func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) Clone() Encoder {
clone := enc.clone()
return clone
}
func (enc *jsonEncoder) clone() *jsonEncoder {
clone := getJSONEncoder()
clone.EncoderConfig = enc.EncoderConfig
clone.spaced = enc.spaced
clone.openNamespaces = enc.openNamespaces
clone.buf = GetPool()
return clone
}
func (enc *jsonEncoder) Encode(buf *Buffer, fields ...Field) error {
final := enc.clone()
final.buf = buf
final.buf.AppendByte('{')
if enc.buf.Len() > 0 {
final.addElementSeparator()
final.buf.Write(enc.buf.Bytes())
}
for i := range fields {
fields[i].AddTo(final)
}
final.closeOpenNamespaces()
final.buf.AppendString("}\n")
putJSONEncoder(final)
return nil
}
func (enc *jsonEncoder) closeOpenNamespaces() {
for i := 0; i < enc.openNamespaces; i++ {
enc.buf.AppendByte('}')
}
}
func (enc *jsonEncoder) addKey(key string) {
enc.addElementSeparator()
enc.buf.AppendByte('"')
enc.safeAddString(key)
enc.buf.AppendByte('"')
enc.buf.AppendByte(':')
if enc.spaced {
enc.buf.AppendByte(' ')
}
}
func (enc *jsonEncoder) addElementSeparator() {
last := enc.buf.Len() - 1
if last < 0 {
return
}
switch enc.buf.Bytes()[last] {
case '{', '[', ':', ',', ' ':
return
default:
enc.buf.AppendByte(',')
if enc.spaced {
enc.buf.AppendByte(' ')
}
}
}
func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
enc.addElementSeparator()
switch {
case math.IsNaN(val):
enc.buf.AppendString(`"NaN"`)
case math.IsInf(val, 1):
enc.buf.AppendString(`"+Inf"`)
case math.IsInf(val, -1):
enc.buf.AppendString(`"-Inf"`)
default:
enc.buf.AppendFloat(val, bitSize)
}
}
// safeAddString JSON-escapes a string and appends it to the internal buffer.
// Unlike the standard library's encoder, it doesn't attempt to protect the
// user from browser vulnerabilities or JSONP-related problems.
func (enc *jsonEncoder) safeAddString(s string) {
for i := 0; i < len(s); {
if enc.tryAddRuneSelf(s[i]) {
i++
continue
}
r, size := utf8.DecodeRuneInString(s[i:])
if enc.tryAddRuneError(r, size) {
i++
continue
}
enc.buf.AppendString(s[i : i+size])
i += size
}
}
// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
func (enc *jsonEncoder) safeAddByteString(s []byte) {
for i := 0; i < len(s); {
if enc.tryAddRuneSelf(s[i]) {
i++
continue
}
r, size := utf8.DecodeRune(s[i:])
if enc.tryAddRuneError(r, size) {
i++
continue
}
enc.buf.Write(s[i : i+size])
i += size
}
}
// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
if b >= utf8.RuneSelf {
return false
}
if 0x20 <= b && b != '\\' && b != '"' {
enc.buf.AppendByte(b)
return true
}
switch b {
case '\\', '"':
enc.buf.AppendByte('\\')
enc.buf.AppendByte(b)
case '\n':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('n')
case '\r':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('r')
case '\t':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('t')
default:
// Encode bytes < 0x20, except for the escape sequences above.
enc.buf.AppendString(`\u00`)
enc.buf.AppendByte(_hex[b>>4])
enc.buf.AppendByte(_hex[b&0xF])
}
return true
}
func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
if r == utf8.RuneError && size == 1 {
enc.buf.AppendString(`\ufffd`)
return true
}
return false
}

View File

@@ -0,0 +1,52 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package core
import "sync"
// A Pool is a type-safe wrapper around a sync.Pool.
type Pool struct {
p *sync.Pool
}
// NewPool constructs a new Pool.
func NewPool(size int) Pool {
if size == 0 {
size = _size
}
return Pool{p: &sync.Pool{
New: func() interface{} {
return &Buffer{bs: make([]byte, 0, size)}
},
}}
}
// Get retrieves a Buffer from the pool, creating one if necessary.
func (p Pool) Get() *Buffer {
buf := p.p.Get().(*Buffer)
buf.Reset()
buf.pool = p
return buf
}
func (p Pool) put(buf *Buffer) {
p.p.Put(buf)
}

View File

@@ -0,0 +1,52 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package core
import (
"sync"
"testing"
"github.com/stretchr/testify/assert"
)
func TestBuffers(t *testing.T) {
const dummyData = "dummy data"
p := NewPool(0)
var wg sync.WaitGroup
for g := 0; g < 10; g++ {
wg.Add(1)
go func() {
for i := 0; i < 100; i++ {
buf := p.Get()
assert.Zero(t, buf.Len(), "Expected truncated buffer")
assert.NotZero(t, buf.Cap(), "Expected non-zero capacity")
buf.AppendString(dummyData)
assert.Equal(t, buf.Len(), len(dummyData), "Expected buffer to contain dummy data")
buf.Free()
}
wg.Done()
}()
}
wg.Wait()
}

29
library/log/level.go Normal file
View File

@@ -0,0 +1,29 @@
package log
// Level of severity.
type Level int
// Verbose is a boolean type that implements Info, Infov (like Printf) etc.
type Verbose bool
// common log level.
const (
_debugLevel Level = iota
_infoLevel
_warnLevel
_errorLevel
_fatalLevel
)
var levelNames = [...]string{
_debugLevel: "DEBUG",
_infoLevel: "INFO",
_warnLevel: "WARN",
_errorLevel: "ERROR",
_fatalLevel: "FATAL",
}
// String implementation.
func (l Level) String() string {
return levelNames[l]
}

318
library/log/log.go Normal file
View File

@@ -0,0 +1,318 @@
package log
import (
"context"
"flag"
"fmt"
"io"
"os"
"strconv"
"time"
"go-common/library/conf/env"
"go-common/library/log/internal"
"go-common/library/stat/prom"
xtime "go-common/library/time"
)
// Config log config.
type Config struct {
Family string
Host string
// stdout
Stdout bool
// file
Dir string
// buffer size
FileBufferSize int64
// MaxLogFile
MaxLogFile int
// RotateSize
RotateSize int64
// log-agent
Agent *AgentConfig
// V Enable V-leveled logging at the specified level.
V int32
// Module=""
// The syntax of the argument is a map of pattern=N,
// where pattern is a literal file name (minus the ".go" suffix) or
// "glob" pattern and N is a V level. For instance:
// [module]
// "service" = 1
// "dao*" = 2
// sets the V level to 2 in all Go files whose names begin "dao".
Module map[string]int32
// Filter tell log handler which field are sensitive message, use * instead.
Filter []string
}
// errProm prometheus error counter.
var errProm = prom.BusinessErrCount
// Render render log output
type Render interface {
Render(io.Writer, map[string]interface{}) error
RenderString(map[string]interface{}) string
}
// D represents a map of entry level data used for structured logging.
// type D map[string]interface{}
type D struct {
Key string
Value interface{}
}
// AddTo exports a field through the ObjectEncoder interface. It's primarily
// useful to library authors, and shouldn't be necessary in most applications.
func (d D) AddTo(enc core.ObjectEncoder) {
var err error
switch val := d.Value.(type) {
case bool:
enc.AddBool(d.Key, val)
case complex128:
enc.AddComplex128(d.Key, val)
case complex64:
enc.AddComplex64(d.Key, val)
case float64:
enc.AddFloat64(d.Key, val)
case float32:
enc.AddFloat32(d.Key, val)
case int:
enc.AddInt(d.Key, val)
case int64:
enc.AddInt64(d.Key, val)
case int32:
enc.AddInt32(d.Key, val)
case int16:
enc.AddInt16(d.Key, val)
case int8:
enc.AddInt8(d.Key, val)
case string:
enc.AddString(d.Key, val)
case uint:
enc.AddUint(d.Key, val)
case uint64:
enc.AddUint64(d.Key, val)
case uint32:
enc.AddUint32(d.Key, val)
case uint16:
enc.AddUint16(d.Key, val)
case uint8:
enc.AddUint8(d.Key, val)
case []byte:
enc.AddByteString(d.Key, val)
case uintptr:
enc.AddUintptr(d.Key, val)
case time.Time:
enc.AddTime(d.Key, val)
case xtime.Time:
enc.AddTime(d.Key, val.Time())
case time.Duration:
enc.AddDuration(d.Key, val)
case xtime.Duration:
enc.AddDuration(d.Key, time.Duration(val))
case error:
enc.AddString(d.Key, val.Error())
case fmt.Stringer:
enc.AddString(d.Key, val.String())
default:
err = enc.AddReflected(d.Key, val)
}
if err != nil {
enc.AddString(fmt.Sprintf("%sError", d.Key), err.Error())
}
}
// KV return a log kv for logging field.
func KV(key string, value interface{}) D {
return D{
Key: key,
Value: value,
}
}
var (
h Handler
c *Config
)
func init() {
host, _ := os.Hostname()
c = &Config{
Family: env.AppID,
Host: host,
}
h = newHandlers([]string{}, NewStdout())
addFlag(flag.CommandLine)
}
var (
_v int
_stdout bool
_dir string
_agentDSN string
_filter logFilter
_module = verboseModule{}
_noagent bool
)
// addFlag init log from dsn.
func addFlag(fs *flag.FlagSet) {
if lv, err := strconv.ParseInt(os.Getenv("LOG_V"), 10, 64); err == nil {
_v = int(lv)
}
_stdout, _ = strconv.ParseBool(os.Getenv("LOG_STDOUT"))
_dir = os.Getenv("LOG_DIR")
if _agentDSN = os.Getenv("LOG_AGENT"); _agentDSN == "" {
_agentDSN = _defaultAgentConfig
}
if tm := os.Getenv("LOG_MODULE"); len(tm) > 0 {
_module.Set(tm)
}
if tf := os.Getenv("LOG_FILTER"); len(tf) > 0 {
_filter.Set(tf)
}
_noagent, _ = strconv.ParseBool(os.Getenv("LOG_NO_AGENT"))
// get val from flag
fs.IntVar(&_v, "log.v", _v, "log verbose level, or use LOG_V env variable.")
fs.BoolVar(&_stdout, "log.stdout", _stdout, "log enable stdout or not, or use LOG_STDOUT env variable.")
fs.StringVar(&_dir, "log.dir", _dir, "log file `path, or use LOG_DIR env variable.")
fs.StringVar(&_agentDSN, "log.agent", _agentDSN, "log agent dsn, or use LOG_AGENT env variable.")
fs.Var(&_module, "log.module", "log verbose for specified module, or use LOG_MODULE env variable, format: file=1,file2=2.")
fs.Var(&_filter, "log.filter", "log field for sensitive message, or use LOG_FILTER env variable, format: field1,field2.")
fs.BoolVar(&_noagent, "log.noagent", false, "force disable log agent print log to stderr, or use LOG_NO_AGENT")
}
// Init create logger with context.
func Init(conf *Config) {
var isNil bool
if conf == nil {
isNil = true
conf = &Config{
Stdout: _stdout,
Dir: _dir,
V: int32(_v),
Module: _module,
Filter: _filter,
}
}
if len(env.AppID) != 0 {
conf.Family = env.AppID // for caster
}
conf.Host = env.Hostname
if len(conf.Host) == 0 {
host, _ := os.Hostname()
conf.Host = host
}
var hs []Handler
// when env is dev
if conf.Stdout || (isNil && (env.DeployEnv == "" || env.DeployEnv == env.DeployEnvDev)) || _noagent {
hs = append(hs, NewStdout())
}
if conf.Dir != "" {
hs = append(hs, NewFile(conf.Dir, conf.FileBufferSize, conf.RotateSize, conf.MaxLogFile))
}
// when env is not dev
if !_noagent && (conf.Agent != nil || (isNil && env.DeployEnv != "" && env.DeployEnv != env.DeployEnvDev)) {
hs = append(hs, NewAgent(conf.Agent))
}
h = newHandlers(conf.Filter, hs...)
c = conf
}
// Info logs a message at the info log level.
func Info(format string, args ...interface{}) {
h.Log(context.Background(), _infoLevel, KV(_log, fmt.Sprintf(format, args...)))
}
// Warn logs a message at the warning log level.
func Warn(format string, args ...interface{}) {
h.Log(context.Background(), _warnLevel, KV(_log, fmt.Sprintf(format, args...)))
}
// Error logs a message at the error log level.
func Error(format string, args ...interface{}) {
h.Log(context.Background(), _errorLevel, KV(_log, fmt.Sprintf(format, args...)))
}
// Infov logs a message at the info log level.
func Infov(ctx context.Context, args ...D) {
h.Log(ctx, _infoLevel, args...)
}
// Warnv logs a message at the warning log level.
func Warnv(ctx context.Context, args ...D) {
h.Log(ctx, _warnLevel, args...)
}
// Errorv logs a message at the error log level.
func Errorv(ctx context.Context, args ...D) {
h.Log(ctx, _errorLevel, args...)
}
func logw(args []interface{}) []D {
if len(args)%2 != 0 {
Warn("log: the variadic must be plural, the last one will ignored")
}
ds := make([]D, 0, len(args)/2)
for i := 0; i < len(args)-1; i = i + 2 {
if key, ok := args[i].(string); ok {
ds = append(ds, KV(key, args[i+1]))
} else {
Warn("log: key must be string, get %T, ignored", args[i])
}
}
return ds
}
// Infow logs a message with some additional context. The variadic key-value pairs are treated as they are in With.
func Infow(ctx context.Context, args ...interface{}) {
h.Log(ctx, _infoLevel, logw(args)...)
}
// Warnw logs a message with some additional context. The variadic key-value pairs are treated as they are in With.
func Warnw(ctx context.Context, args ...interface{}) {
h.Log(ctx, _warnLevel, logw(args)...)
}
// Errorw logs a message with some additional context. The variadic key-value pairs are treated as they are in With.
func Errorw(ctx context.Context, args ...interface{}) {
h.Log(ctx, _errorLevel, logw(args)...)
}
// SetFormat only effective on stdout and file handler
// %T time format at "15:04:05.999" on stdout handler, "15:04:05 MST" on file handler
// %t time format at "15:04:05" on stdout handler, "15:04" on file on file handler
// %D data format at "2006/01/02"
// %d data format at "01/02"
// %L log level e.g. INFO WARN ERROR
// %M log message and additional fields: key=value this is log message
// NOTE below pattern not support on file handler
// %f function name and line number e.g. model.Get:121
// %i instance id
// %e deploy env e.g. dev uat fat prod
// %z zone
// %S full file name and line number: /a/b/c/d.go:23
// %s final file name element and line number: d.go:23
func SetFormat(format string) {
h.SetFormat(format)
}
// Close close resource.
func Close() (err error) {
err = h.Close()
h = _defaultStdout
return
}
func errIncr(lv Level, source string) {
if lv == _errorLevel {
errProm.Incr(source)
}
}

102
library/log/log_test.go Normal file
View File

@@ -0,0 +1,102 @@
package log
import (
"context"
"testing"
"go-common/library/net/metadata"
"github.com/stretchr/testify/assert"
)
func initStdout() {
conf := &Config{
Stdout: true,
}
Init(conf)
}
func initFile() {
conf := &Config{
Dir: "/tmp",
// VLevel: 2,
Module: map[string]int32{"log_test": 1},
}
Init(conf)
}
func initAgent() {
conf := &Config{
Agent: &AgentConfig{
TaskID: "000003",
Addr: "172.16.0.204:514",
Proto: "tcp",
Chan: 1024,
Buffer: 10,
},
}
Init(conf)
}
type TestLog struct {
A string
B int
C string
D string
}
func testLog(t *testing.T) {
t.Run("Error", func(t *testing.T) {
Error("hello %s", "world")
Errorv(context.Background(), KV("key", 2222222), KV("test2", "test"))
})
t.Run("Warn", func(t *testing.T) {
Warn("hello %s", "world")
Warnv(context.Background(), KV("key", 2222222), KV("test2", "test"))
})
t.Run("Info", func(t *testing.T) {
Info("hello %s", "world")
Infov(context.Background(), KV("key", 2222222), KV("test2", "test"))
})
}
func TestLogAgent(t *testing.T) {
initAgent()
testLog(t)
assert.Equal(t, nil, Close())
}
func TestFile(t *testing.T) {
initFile()
testLog(t)
assert.Equal(t, nil, Close())
}
func TestStdout(t *testing.T) {
initStdout()
testLog(t)
assert.Equal(t, nil, Close())
}
func TestLogW(t *testing.T) {
D := logw([]interface{}{"i", "like", "a", "dog"})
if len(D) != 2 || D[0].Key != "i" || D[0].Value != "like" || D[1].Key != "a" || D[1].Value != "dog" {
t.Fatalf("logw out put should be ' {i like} {a dog}'")
}
D = logw([]interface{}{"i", "like", "dog"})
if len(D) != 1 || D[0].Key != "i" || D[0].Value != "like" {
t.Fatalf("logw out put should be ' {i like}'")
}
}
func TestLogWithMirror(t *testing.T) {
Info("test log")
mdcontext := metadata.NewContext(context.Background(), metadata.MD{metadata.Mirror: "true"})
Infov(mdcontext, KV("key1", "val1"), KV("key2", ""), KV("log", "log content"), KV("msg", "msg content"))
mdcontext = metadata.NewContext(context.Background(), metadata.MD{metadata.Mirror: "***"})
Infov(mdcontext, KV("key1", "val1"), KV("key2", ""), KV("log", "log content"), KV("msg", "msg content"))
Infov(context.Background(), KV("key1", "val1"), KV("key2", ""), KV("log", "log content"), KV("msg", "msg content"))
}

61
library/log/logrus.go Normal file
View File

@@ -0,0 +1,61 @@
package log
import (
"context"
"io/ioutil"
"os"
"github.com/sirupsen/logrus"
)
func init() {
redirectLogrus()
}
func redirectLogrus() {
// FIXME: because of different stack depth call runtime.Caller will get error function name.
logrus.AddHook(redirectHook{})
if os.Getenv("LOGRUS_STDOUT") == "" {
logrus.SetOutput(ioutil.Discard)
}
}
type redirectHook struct{}
func (redirectHook) Levels() []logrus.Level {
return logrus.AllLevels
}
func (redirectHook) Fire(entry *logrus.Entry) error {
lv := _infoLevel
var logrusLv string
var verbose int32
switch entry.Level {
case logrus.FatalLevel, logrus.PanicLevel:
logrusLv = entry.Level.String()
fallthrough
case logrus.ErrorLevel:
lv = _errorLevel
case logrus.WarnLevel:
lv = _warnLevel
case logrus.InfoLevel:
lv = _infoLevel
case logrus.DebugLevel:
// use verbose log replace of debuglevel
verbose = 10
}
args := make([]D, 0, len(entry.Data)+1)
args = append(args, D{Key: _log, Value: entry.Message})
for k, v := range entry.Data {
args = append(args, D{Key: k, Value: v})
}
if logrusLv != "" {
args = append(args, D{Key: "logrus_lv", Value: logrusLv})
}
if verbose != 0 {
V(verbose).Infov(context.Background(), args...)
} else {
h.Log(context.Background(), lv, args...)
}
return nil
}

167
library/log/pattern.go Normal file
View File

@@ -0,0 +1,167 @@
package log
import (
"bytes"
"fmt"
"io"
"path"
"runtime"
"strings"
"sync"
"time"
)
var patternMap = map[string]func(map[string]interface{}) string{
"T": longTime,
"t": shortTime,
"D": longDate,
"d": shortDate,
"L": keyFactory(_level),
"f": keyFactory(_source),
"i": keyFactory(_instanceID),
"e": keyFactory(_deplyEnv),
"z": keyFactory(_zone),
"S": longSource,
"s": shortSource,
"M": message,
}
// newPatternRender new pattern render
func newPatternRender(format string) Render {
p := &pattern{
bufPool: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }},
}
b := make([]byte, 0, len(format))
for i := 0; i < len(format); i++ {
if format[i] != '%' {
b = append(b, format[i])
continue
}
if i+1 >= len(format) {
b = append(b, format[i])
continue
}
f, ok := patternMap[string(format[i+1])]
if !ok {
b = append(b, format[i])
continue
}
if len(b) != 0 {
p.funcs = append(p.funcs, textFactory(string(b)))
b = b[:0]
}
p.funcs = append(p.funcs, f)
i++
}
if len(b) != 0 {
p.funcs = append(p.funcs, textFactory(string(b)))
}
return p
}
type pattern struct {
funcs []func(map[string]interface{}) string
bufPool sync.Pool
}
// Render implemet Formater
func (p *pattern) Render(w io.Writer, d map[string]interface{}) error {
buf := p.bufPool.Get().(*bytes.Buffer)
defer func() {
buf.Reset()
p.bufPool.Put(buf)
}()
for _, f := range p.funcs {
buf.WriteString(f(d))
}
_, err := buf.WriteTo(w)
return err
}
// Render implemet Formater as string
func (p *pattern) RenderString(d map[string]interface{}) string {
// TODO strings.Builder
buf := p.bufPool.Get().(*bytes.Buffer)
defer func() {
buf.Reset()
p.bufPool.Put(buf)
}()
for _, f := range p.funcs {
buf.WriteString(f(d))
}
return buf.String()
}
func textFactory(text string) func(map[string]interface{}) string {
return func(map[string]interface{}) string {
return text
}
}
func keyFactory(key string) func(map[string]interface{}) string {
return func(d map[string]interface{}) string {
if v, ok := d[key]; ok {
if s, ok := v.(string); ok {
return s
}
return fmt.Sprint(v)
}
return ""
}
}
func longSource(map[string]interface{}) string {
if _, file, lineNo, ok := runtime.Caller(6); ok {
return fmt.Sprintf("%s:%d", file, lineNo)
}
return "unknown:0"
}
func shortSource(map[string]interface{}) string {
if _, file, lineNo, ok := runtime.Caller(6); ok {
return fmt.Sprintf("%s:%d", path.Base(file), lineNo)
}
return "unknown:0"
}
func longTime(map[string]interface{}) string {
return time.Now().Format("15:04:05.000")
}
func shortTime(map[string]interface{}) string {
return time.Now().Format("15:04")
}
func longDate(map[string]interface{}) string {
return time.Now().Format("2006/01/02")
}
func shortDate(map[string]interface{}) string {
return time.Now().Format("01/02")
}
func isInternalKey(k string) bool {
switch k {
case _level, _levelValue, _time, _source, _instanceID, _appID, _deplyEnv, _zone:
return true
}
return false
}
func message(d map[string]interface{}) string {
var m string
var s []string
for k, v := range d {
if k == _log {
m = fmt.Sprint(v)
continue
}
if isInternalKey(k) {
continue
}
s = append(s, fmt.Sprintf("%s=%v", k, v))
}
s = append(s, m)
return strings.Join(s, " ")
}

View File

@@ -0,0 +1,35 @@
package log
import (
"bytes"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestPatternDefault(t *testing.T) {
buf := &bytes.Buffer{}
p := newPatternRender("%L %T %f %M")
p.Render(buf, map[string]interface{}{_level: _infoLevel.String(), _log: "hello", _time: time.Now().Format(_timeFormat), _source: "xxx:123"})
fields := strings.Fields(buf.String())
assert.Equal(t, 4, len(fields))
assert.Equal(t, "INFO", fields[0])
assert.Equal(t, "hello", fields[3])
}
func TestKV(t *testing.T) {
buf := &bytes.Buffer{}
p := newPatternRender("%M")
p.Render(buf, map[string]interface{}{_level: _infoLevel.String(), _log: "2233", "hello": "test"})
assert.Equal(t, "hello=test 2233", buf.String())
}
func TestBadSymbol(t *testing.T) {
buf := &bytes.Buffer{}
p := newPatternRender("%12 %% %xd %M")
p.Render(buf, map[string]interface{}{_level: _infoLevel.String(), _log: "2233"})
assert.Equal(t, "%12 %% %xd 2233", buf.String())
}

61
library/log/stdout.go Normal file
View File

@@ -0,0 +1,61 @@
package log
import (
"context"
"io"
"os"
"time"
)
const defaultPattern = "%L %d-%T %f %M"
var _defaultStdout = NewStdout()
// StdoutHandler stdout log handler
type StdoutHandler struct {
out io.Writer
render Render
}
// NewStdout create a stdout log handler
func NewStdout() *StdoutHandler {
return &StdoutHandler{
out: os.Stderr,
render: newPatternRender(defaultPattern),
}
}
// Log stdout loging, only for developing env.
func (h *StdoutHandler) Log(ctx context.Context, lv Level, args ...D) {
d := make(map[string]interface{}, 10+len(args))
for _, arg := range args {
d[arg.Key] = arg.Value
}
// add extra fields
addExtraField(ctx, d)
d[_time] = time.Now().Format(_timeFormat)
h.render.Render(h.out, d)
h.out.Write([]byte("\n"))
}
// Close stdout loging
func (h *StdoutHandler) Close() error {
return nil
}
// SetFormat set stdout log output format
// %T time format at "15:04:05.999"
// %t time format at "15:04:05"
// %D data format at "2006/01/02"
// %d data format at "01/02"
// %L log level e.g. INFO WARN ERROR
// %f function name and line number e.g. model.Get:121
// %i instance id
// %e deploy env e.g. dev uat fat prod
// %z zone
// %S full file name and line number: /a/b/c/d.go:23
// %s final file name element and line number: d.go:23
// %M log message and additional fields: key=value this is log message
func (h *StdoutHandler) SetFormat(format string) {
h.render = newPatternRender(format)
}

54
library/log/util.go Normal file
View File

@@ -0,0 +1,54 @@
package log
import (
"context"
"fmt"
"runtime"
"strconv"
"sync"
"go-common/library/conf/env"
"go-common/library/net/metadata"
"go-common/library/net/trace"
)
var fm sync.Map
func addExtraField(ctx context.Context, fields map[string]interface{}) {
if t, ok := trace.FromContext(ctx); ok {
if s, ok := t.(fmt.Stringer); ok {
fields[_tid] = s.String()
} else {
fields[_tid] = fmt.Sprintf("%s", t)
}
}
if caller := metadata.String(ctx, metadata.Caller); caller != "" {
fields[_caller] = caller
}
if color := metadata.String(ctx, metadata.Color); color != "" {
fields[_color] = color
}
if cluster := metadata.String(ctx, metadata.Cluster); cluster != "" {
fields[_cluster] = cluster
}
fields[_deplyEnv] = env.DeployEnv
fields[_zone] = env.Zone
fields[_appID] = c.Family
fields[_instanceID] = c.Host
if metadata.Bool(ctx, metadata.Mirror) {
fields[_mirror] = true
}
}
// funcName get func name.
func funcName(skip int) (name string) {
if pc, _, lineNo, ok := runtime.Caller(skip); ok {
if v, ok := fm.Load(pc); ok {
name = v.(string)
} else {
name = runtime.FuncForPC(pc).Name() + ":" + strconv.FormatInt(int64(lineNo), 10)
fm.Store(pc, name)
}
}
return
}

83
library/log/verbose.go Normal file
View File

@@ -0,0 +1,83 @@
package log
import (
"context"
"fmt"
"path/filepath"
"runtime"
"strings"
)
// V reports whether verbosity at the call site is at least the requested level.
// The returned value is a boolean of type Verbose, which implements Info, Infov etc.
// These methods will write to the Info log if called.
// Thus, one may write either
// if log.V(2) { log.Info("log this") }
// or
// log.V(2).Info("log this")
// The second form is shorter but the first is cheaper if logging is off because it does
// not evaluate its arguments.
//
// Whether an individual call to V generates a log record depends on the setting of
// the Config.VLevel and Config.Module flags; both are off by default. If the level in the call to
// V is at least the value of Config.VLevel, or of Config.Module for the source file containing the
// call, the V call will log.
// v must be more than 0.
func V(v int32) Verbose {
var (
file string
)
if v < 0 {
return Verbose(false)
} else if c.V >= v {
return Verbose(true)
}
if pc, _, _, ok := runtime.Caller(1); ok {
file, _ = runtime.FuncForPC(pc).FileLine(pc)
}
if strings.HasSuffix(file, ".go") {
file = file[:len(file)-3]
}
if slash := strings.LastIndex(file, "/"); slash >= 0 {
file = file[slash+1:]
}
for filter, lvl := range c.Module {
var match bool
if match = filter == file; !match {
match, _ = filepath.Match(filter, file)
}
if match {
return Verbose(lvl >= v)
}
}
return Verbose(false)
}
// Info logs a message at the info log level.
func (v Verbose) Info(format string, args ...interface{}) {
if v {
h.Log(context.Background(), _infoLevel, KV(_log, fmt.Sprintf(format, args...)))
}
}
// Infov logs a message at the info log level.
func (v Verbose) Infov(ctx context.Context, args ...D) {
if v {
h.Log(ctx, _infoLevel, args...)
}
}
// Infow logs a message with some additional context. The variadic key-value pairs are treated as they are in With.
func (v Verbose) Infow(ctx context.Context, args ...interface{}) {
if v {
h.Log(ctx, _infoLevel, logw(args)...)
}
}
// Close close resource.
func (v Verbose) Close() (err error) {
if h == nil {
return
}
return h.Close()
}