Initial commit

This commit is contained in:
Donny
2019-04-22 20:46:32 +08:00
commit 49ab8aadd1
25441 changed files with 4055000 additions and 0 deletions

View File

@@ -0,0 +1,51 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"buffer.go",
"bufferpool.go",
"encoder.go",
"field.go",
"json_encoder.go",
"pool.go",
],
importpath = "go-common/library/log/internal",
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//library/log/internal/filewriter:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = [
"buffer_test.go",
"pool_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"],
)

View File

@@ -0,0 +1,97 @@
package core
import "strconv"
const _size = 1024 // by default, create 1 KiB buffers
// NewBuffer is new buffer
func NewBuffer(_size int) *Buffer {
return &Buffer{bs: make([]byte, 0, _size)}
}
// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so
// the only way to construct one is via a Pool.
type Buffer struct {
bs []byte
pool Pool
}
// AppendByte writes a single byte to the Buffer.
func (b *Buffer) AppendByte(v byte) {
b.bs = append(b.bs, v)
}
// AppendString writes a string to the Buffer.
func (b *Buffer) AppendString(s string) {
b.bs = append(b.bs, s...)
}
// AppendInt appends an integer to the underlying buffer (assuming base 10).
func (b *Buffer) AppendInt(i int64) {
b.bs = strconv.AppendInt(b.bs, i, 10)
}
// AppendUint appends an unsigned integer to the underlying buffer (assuming
// base 10).
func (b *Buffer) AppendUint(i uint64) {
b.bs = strconv.AppendUint(b.bs, i, 10)
}
// AppendBool appends a bool to the underlying buffer.
func (b *Buffer) AppendBool(v bool) {
b.bs = strconv.AppendBool(b.bs, v)
}
// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN
// or +/- Inf.
func (b *Buffer) AppendFloat(f float64, bitSize int) {
b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize)
}
// Len returns the length of the underlying byte slice.
func (b *Buffer) Len() int {
return len(b.bs)
}
// Cap returns the capacity of the underlying byte slice.
func (b *Buffer) Cap() int {
return cap(b.bs)
}
// Bytes returns a mutable reference to the underlying byte slice.
func (b *Buffer) Bytes() []byte {
return b.bs
}
// String returns a string copy of the underlying byte slice.
func (b *Buffer) String() string {
return string(b.bs)
}
// Reset resets the underlying byte slice. Subsequent writes re-use the slice's
// backing array.
func (b *Buffer) Reset() {
b.bs = b.bs[:0]
}
// Write implements io.Writer.
func (b *Buffer) Write(bs []byte) (int, error) {
b.bs = append(b.bs, bs...)
return len(bs), nil
}
// TrimNewline trims any final "\n" byte from the end of the buffer.
func (b *Buffer) TrimNewline() {
if i := len(b.bs) - 1; i >= 0 {
if b.bs[i] == '\n' {
b.bs = b.bs[:i]
}
}
}
// Free returns the Buffer to its Pool.
//
// Callers must not retain references to the Buffer after calling Free.
func (b *Buffer) Free() {
b.pool.put(b)
}

View File

@@ -0,0 +1,91 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package core
import (
"bytes"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestBufferWrites(t *testing.T) {
buf := NewPool(0).Get()
tests := []struct {
desc string
f func()
want string
}{
{"AppendByte", func() { buf.AppendByte('v') }, "v"},
{"AppendString", func() { buf.AppendString("foo") }, "foo"},
{"AppendIntPositive", func() { buf.AppendInt(42) }, "42"},
{"AppendIntNegative", func() { buf.AppendInt(-42) }, "-42"},
{"AppendUint", func() { buf.AppendUint(42) }, "42"},
{"AppendBool", func() { buf.AppendBool(true) }, "true"},
{"AppendFloat64", func() { buf.AppendFloat(3.14, 64) }, "3.14"},
// Intenationally introduce some floating-point error.
{"AppendFloat32", func() { buf.AppendFloat(float64(float32(3.14)), 32) }, "3.14"},
{"AppendWrite", func() { buf.Write([]byte("foo")) }, "foo"},
}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
buf.Reset()
tt.f()
assert.Equal(t, tt.want, buf.String(), "Unexpected buffer.String().")
assert.Equal(t, tt.want, string(buf.Bytes()), "Unexpected string(buffer.Bytes()).")
assert.Equal(t, len(tt.want), buf.Len(), "Unexpected buffer length.")
// We're not writing more than a kibibyte in tests.
assert.Equal(t, _size, buf.Cap(), "Expected buffer capacity to remain constant.")
})
}
}
func BenchmarkBuffers(b *testing.B) {
// Because we use the strconv.AppendFoo functions so liberally, we can't
// use the standard library's bytes.Buffer anyways (without incurring a
// bunch of extra allocations). Nevertheless, let's make sure that we're
// not losing any precious nanoseconds.
str := strings.Repeat("a", 1024)
slice := make([]byte, 1024)
buf := bytes.NewBuffer(slice)
custom := NewPool(0).Get()
b.Run("ByteSlice", func(b *testing.B) {
for i := 0; i < b.N; i++ {
slice = append(slice, str...)
slice = slice[:0]
}
})
b.Run("BytesBuffer", func(b *testing.B) {
for i := 0; i < b.N; i++ {
buf.WriteString(str)
buf.Reset()
}
})
b.Run("CustomBuffer", func(b *testing.B) {
for i := 0; i < b.N; i++ {
custom.AppendString(str)
custom.Reset()
}
})
}

View File

@@ -0,0 +1,29 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// Package core houses zap's shared internal buffer pool. Third-party
// packages can recreate the same functionality with buffers.NewPool.
package core
var (
_pool = NewPool(_size)
// GetPool retrieves a buffer from the pool, creating one if necessary.
GetPool = _pool.Get
)

View File

@@ -0,0 +1,187 @@
package core
import (
"time"
)
// DefaultLineEnding defines the default line ending when writing logs.
// Alternate line endings specified in EncoderConfig can override this
// behavior.
const DefaultLineEnding = "\n"
// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a
// map- or struct-like object to the logging context. Like maps, ObjectEncoders
// aren't safe for concurrent use (though typical use shouldn't require locks).
type ObjectEncoder interface {
// Logging-specific marshalers.
AddArray(key string, marshaler ArrayMarshaler) error
AddObject(key string, marshaler ObjectMarshaler) error
// Built-in types.
AddBinary(key string, value []byte) // for arbitrary bytes
AddByteString(key string, value []byte) // for UTF-8 encoded bytes
AddBool(key string, value bool)
AddComplex128(key string, value complex128)
AddComplex64(key string, value complex64)
AddDuration(key string, value time.Duration)
AddFloat64(key string, value float64)
AddFloat32(key string, value float32)
AddInt(key string, value int)
AddInt64(key string, value int64)
AddInt32(key string, value int32)
AddInt16(key string, value int16)
AddInt8(key string, value int8)
AddString(key, value string)
AddTime(key string, value time.Time)
AddUint(key string, value uint)
AddUint64(key string, value uint64)
AddUint32(key string, value uint32)
AddUint16(key string, value uint16)
AddUint8(key string, value uint8)
AddUintptr(key string, value uintptr)
// AddReflected uses reflection to serialize arbitrary objects, so it's slow
// and allocation-heavy.
AddReflected(key string, value interface{}) error
// OpenNamespace opens an isolated namespace where all subsequent fields will
// be added. Applications can use namespaces to prevent key collisions when
// injecting loggers into sub-components or third-party libraries.
OpenNamespace(key string)
}
// ObjectMarshaler allows user-defined types to efficiently add themselves to the
// logging context, and to selectively omit information which shouldn't be
// included in logs (e.g., passwords).
type ObjectMarshaler interface {
MarshalLogObject(ObjectEncoder) error
}
// ObjectMarshalerFunc is a type adapter that turns a function into an
// ObjectMarshaler.
type ObjectMarshalerFunc func(ObjectEncoder) error
// MarshalLogObject calls the underlying function.
func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error {
return f(enc)
}
// ArrayMarshaler allows user-defined types to efficiently add themselves to the
// logging context, and to selectively omit information which shouldn't be
// included in logs (e.g., passwords).
type ArrayMarshaler interface {
MarshalLogArray(ArrayEncoder) error
}
// ArrayMarshalerFunc is a type adapter that turns a function into an
// ArrayMarshaler.
type ArrayMarshalerFunc func(ArrayEncoder) error
// MarshalLogArray calls the underlying function.
func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error {
return f(enc)
}
// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding
// array-like objects to the logging context. Of note, it supports mixed-type
// arrays even though they aren't typical in Go. Like slices, ArrayEncoders
// aren't safe for concurrent use (though typical use shouldn't require locks).
type ArrayEncoder interface {
// Built-in types.
PrimitiveArrayEncoder
// Time-related types.
AppendDuration(time.Duration)
AppendTime(time.Time)
// Logging-specific marshalers.
AppendArray(ArrayMarshaler) error
AppendObject(ObjectMarshaler) error
// AppendReflected uses reflection to serialize arbitrary objects, so it's
// slow and allocation-heavy.
AppendReflected(value interface{}) error
}
// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals
// only in Go's built-in types. It's included only so that Duration- and
// TimeEncoders cannot trigger infinite recursion.
type PrimitiveArrayEncoder interface {
// Built-in types.
AppendBool(bool)
AppendByteString([]byte) // for UTF-8 encoded bytes
AppendComplex128(complex128)
AppendComplex64(complex64)
AppendFloat64(float64)
AppendFloat32(float32)
AppendInt(int)
AppendInt64(int64)
AppendInt32(int32)
AppendInt16(int16)
AppendInt8(int8)
AppendString(string)
AppendUint(uint)
AppendUint64(uint64)
AppendUint32(uint32)
AppendUint16(uint16)
AppendUint8(uint8)
AppendUintptr(uintptr)
}
// An EncoderConfig allows users to configure the concrete encoders supplied by
// zapcore.
type EncoderConfig struct {
EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"`
EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"`
// Configure the primitive representations of common complex types. For
// example, some users may want all time.Times serialized as floating-point
// seconds since epoch, while others may prefer ISO8601 strings.
/*EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"`
EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"`
EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"`
EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"`
// Unlike the other primitive type encoders, EncodeName is optional. The
// zero value falls back to FullNameEncoder.
EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`*/
}
// Encoder is a format-agnostic interface for all log entry marshalers. Since
// log encoders don't need to support the same wide range of use cases as
// general-purpose marshalers, it's possible to make them faster and
// lower-allocation.
//
// Implementations of the ObjectEncoder interface's methods can, of course,
// freely modify the receiver. However, the Clone and EncodeEntry methods will
// be called concurrently and shouldn't modify the receiver.
type Encoder interface {
ObjectEncoder
// Clone copies the encoder, ensuring that adding fields to the copy doesn't
// affect the original.
Clone() Encoder
// EncodeEntry encodes an entry and fields, along with any accumulated
// context, into a byte buffer and returns it.
Encode(*Buffer, ...Field) error
}
// A TimeEncoder serializes a time.Time to a primitive type.
type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
// A DurationEncoder serializes a time.Duration to a primitive type.
type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
// since the Unix epoch.
func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
//var d []byte
enc.AppendString(t.Format("2006-01-02T15:04:05.999999"))
//enc.AppendByteString(t.AppendFormat(d, "2006-01-02T15:04:05.999999"))
/*nanos := t.UnixNano()
sec := float64(nanos) / float64(time.Second)
enc.AppendFloat64(sec)*/
}
// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
enc.AppendFloat64(float64(d) / float64(time.Second))
}

View File

@@ -0,0 +1,6 @@
package core
// Field is for encoder
type Field interface {
AddTo(enc ObjectEncoder)
}

View File

@@ -0,0 +1,40 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["filewriter_test.go"],
embed = [":go_default_library"],
tags = ["automanaged"],
deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"],
)
go_library(
name = "go_default_library",
srcs = [
"filewriter.go",
"option.go",
],
importpath = "go-common/library/log/internal/filewriter",
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,344 @@
package filewriter
import (
"bytes"
"container/list"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
)
// FileWriter create file log writer
type FileWriter struct {
opt option
dir string
fname string
ch chan *bytes.Buffer
stdlog *log.Logger
pool *sync.Pool
lastRotateFormat string
lastSplitNum int
current *wrapFile
files *list.List
closed int32
wg sync.WaitGroup
}
type rotateItem struct {
rotateTime int64
rotateNum int
fname string
}
func parseRotateItem(dir, fname, rotateFormat string) (*list.List, error) {
fis, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
// parse exists log file filename
parse := func(s string) (rt rotateItem, err error) {
// remove filename and left "." error.log.2018-09-12.001 -> 2018-09-12.001
rt.fname = s
s = strings.TrimLeft(s[len(fname):], ".")
seqs := strings.Split(s, ".")
var t time.Time
switch len(seqs) {
case 2:
if rt.rotateNum, err = strconv.Atoi(seqs[1]); err != nil {
return
}
fallthrough
case 1:
if t, err = time.Parse(rotateFormat, seqs[0]); err != nil {
return
}
rt.rotateTime = t.Unix()
}
return
}
var items []rotateItem
for _, fi := range fis {
if strings.HasPrefix(fi.Name(), fname) && fi.Name() != fname {
rt, err := parse(fi.Name())
if err != nil {
// TODO deal with error
continue
}
items = append(items, rt)
}
}
sort.Slice(items, func(i, j int) bool {
if items[i].rotateTime == items[j].rotateTime {
return items[i].rotateNum > items[j].rotateNum
}
return items[i].rotateTime > items[j].rotateTime
})
l := list.New()
for _, item := range items {
l.PushBack(item)
}
return l, nil
}
type wrapFile struct {
fsize int64
fp *os.File
}
func (w *wrapFile) size() int64 {
return w.fsize
}
func (w *wrapFile) write(p []byte) (n int, err error) {
n, err = w.fp.Write(p)
w.fsize += int64(n)
return
}
func newWrapFile(fpath string) (*wrapFile, error) {
fp, err := os.OpenFile(fpath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, err
}
fi, err := fp.Stat()
if err != nil {
return nil, err
}
return &wrapFile{fp: fp, fsize: fi.Size()}, nil
}
// New FileWriter A FileWriter is safe for use by multiple goroutines simultaneously.
func New(fpath string, fns ...Option) (*FileWriter, error) {
opt := defaultOption
for _, fn := range fns {
fn(&opt)
}
fname := filepath.Base(fpath)
if fname == "" {
return nil, fmt.Errorf("filename can't empty")
}
dir := filepath.Dir(fpath)
fi, err := os.Stat(dir)
if err == nil && !fi.IsDir() {
return nil, fmt.Errorf("%s already exists and not a directory", dir)
}
if os.IsNotExist(err) {
if err = os.MkdirAll(dir, 0755); err != nil {
return nil, fmt.Errorf("create dir %s error: %s", dir, err.Error())
}
}
current, err := newWrapFile(fpath)
if err != nil {
return nil, err
}
stdlog := log.New(os.Stderr, "flog ", log.LstdFlags)
ch := make(chan *bytes.Buffer, opt.ChanSize)
files, err := parseRotateItem(dir, fname, opt.RotateFormat)
if err != nil {
// set files a empty list
files = list.New()
stdlog.Printf("parseRotateItem error: %s", err)
}
lastRotateFormat := time.Now().Format(opt.RotateFormat)
var lastSplitNum int
if files.Len() > 0 {
rt := files.Front().Value.(rotateItem)
// check contains is mush esay than compared with timestamp
if strings.Contains(rt.fname, lastRotateFormat) {
lastSplitNum = rt.rotateNum
}
}
fw := &FileWriter{
opt: opt,
dir: dir,
fname: fname,
stdlog: stdlog,
ch: ch,
pool: &sync.Pool{New: func() interface{} { return new(bytes.Buffer) }},
lastSplitNum: lastSplitNum,
lastRotateFormat: lastRotateFormat,
files: files,
current: current,
}
fw.wg.Add(1)
go fw.daemon()
return fw, nil
}
// Write write data to log file, return write bytes is pseudo just for implement io.Writer.
func (f *FileWriter) Write(p []byte) (int, error) {
// atomic is not necessary
if atomic.LoadInt32(&f.closed) == 1 {
f.stdlog.Printf("%s", p)
return 0, fmt.Errorf("filewriter already closed")
}
// because write to file is asynchronousc,
// copy p to internal buf prevent p be change on outside
buf := f.getBuf()
buf.Write(p)
if f.opt.WriteTimeout == 0 {
select {
case f.ch <- buf:
return len(p), nil
default:
// TODO: write discard log to to stdout?
return 0, fmt.Errorf("log channel is full, discard log")
}
}
// write log with timeout
timeout := time.NewTimer(f.opt.WriteTimeout)
select {
case f.ch <- buf:
return len(p), nil
case <-timeout.C:
// TODO: write discard log to to stdout?
return 0, fmt.Errorf("log channel is full, discard log")
}
}
func (f *FileWriter) daemon() {
// TODO: check aggsbuf size prevent it too big
aggsbuf := &bytes.Buffer{}
tk := time.NewTicker(f.opt.RotateInterval)
// TODO: make it configrable
aggstk := time.NewTicker(10 * time.Millisecond)
var err error
for {
select {
case t := <-tk.C:
f.checkRotate(t)
case buf, ok := <-f.ch:
if ok {
aggsbuf.Write(buf.Bytes())
f.putBuf(buf)
}
case <-aggstk.C:
if aggsbuf.Len() > 0 {
if err = f.write(aggsbuf.Bytes()); err != nil {
f.stdlog.Printf("write log error: %s", err)
}
aggsbuf.Reset()
}
}
if atomic.LoadInt32(&f.closed) != 1 {
continue
}
// read all buf from channel and break loop
if err = f.write(aggsbuf.Bytes()); err != nil {
f.stdlog.Printf("write log error: %s", err)
}
for buf := range f.ch {
if err = f.write(buf.Bytes()); err != nil {
f.stdlog.Printf("write log error: %s", err)
}
f.putBuf(buf)
}
break
}
f.wg.Done()
}
// Close close file writer
func (f *FileWriter) Close() error {
atomic.StoreInt32(&f.closed, 1)
close(f.ch)
f.wg.Wait()
return nil
}
func (f *FileWriter) checkRotate(t time.Time) {
formatFname := func(format string, num int) string {
if num == 0 {
return fmt.Sprintf("%s.%s", f.fname, format)
}
return fmt.Sprintf("%s.%s.%03d", f.fname, format, num)
}
format := t.Format(f.opt.RotateFormat)
if f.opt.MaxFile != 0 {
for f.files.Len() > f.opt.MaxFile {
rt := f.files.Remove(f.files.Front()).(rotateItem)
fpath := filepath.Join(f.dir, rt.fname)
if err := os.Remove(fpath); err != nil {
f.stdlog.Printf("remove file %s error: %s", fpath, err)
}
}
}
if format != f.lastRotateFormat || (f.opt.MaxSize != 0 && f.current.size() > f.opt.MaxSize) {
var err error
// close current file first
if err = f.current.fp.Close(); err != nil {
f.stdlog.Printf("close current file error: %s", err)
}
// rename file
fname := formatFname(f.lastRotateFormat, f.lastSplitNum)
oldpath := filepath.Join(f.dir, f.fname)
newpath := filepath.Join(f.dir, fname)
if err = os.Rename(oldpath, newpath); err != nil {
f.stdlog.Printf("rename file %s to %s error: %s", oldpath, newpath, err)
return
}
f.files.PushBack(rotateItem{fname: fname /*rotateNum: f.lastSplitNum, rotateTime: t.Unix() unnecessary*/})
if format != f.lastRotateFormat {
f.lastRotateFormat = format
f.lastSplitNum = 0
} else {
f.lastSplitNum++
}
// recreate current file
f.current, err = newWrapFile(filepath.Join(f.dir, f.fname))
if err != nil {
f.stdlog.Printf("create log file error: %s", err)
}
}
}
func (f *FileWriter) write(p []byte) error {
// f.current may be nil, if newWrapFile return err in checkRotate, redirect log to stderr
if f.current == nil {
f.stdlog.Printf("can't write log to file, please check stderr log for detail")
f.stdlog.Printf("%s", p)
}
_, err := f.current.write(p)
return err
}
func (f *FileWriter) putBuf(buf *bytes.Buffer) {
buf.Reset()
f.pool.Put(buf)
}
func (f *FileWriter) getBuf() *bytes.Buffer {
return f.pool.Get().(*bytes.Buffer)
}

View File

@@ -0,0 +1,221 @@
package filewriter
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
const logdir = "testlog"
func touch(dir, name string) {
os.MkdirAll(dir, 0755)
fp, err := os.OpenFile(filepath.Join(dir, name), os.O_CREATE, 0644)
if err != nil {
panic(err)
}
fp.Close()
}
func TestMain(m *testing.M) {
ret := m.Run()
os.RemoveAll(logdir)
os.Exit(ret)
}
func TestParseRotate(t *testing.T) {
touch := func(dir, name string) {
os.MkdirAll(dir, 0755)
fp, err := os.OpenFile(filepath.Join(dir, name), os.O_CREATE, 0644)
if err != nil {
t.Fatal(err)
}
fp.Close()
}
dir := filepath.Join(logdir, "test-parse-rotate")
names := []string{"info.log.2018-11-11", "info.log.2018-11-11.001", "info.log.2018-11-11.002", "info.log." + time.Now().Format("2006-01-02") + ".005"}
for _, name := range names {
touch(dir, name)
}
l, err := parseRotateItem(dir, "info.log", "2006-01-02")
if err != nil {
t.Fatal(err)
}
assert.Equal(t, len(names), l.Len())
rt := l.Front().Value.(rotateItem)
assert.Equal(t, 5, rt.rotateNum)
}
func TestRotateExists(t *testing.T) {
dir := filepath.Join(logdir, "test-rotate-exists")
names := []string{"info.log." + time.Now().Format("2006-01-02") + ".005"}
for _, name := range names {
touch(dir, name)
}
fw, err := New(logdir+"/test-rotate-exists/info.log",
MaxSize(1024*1024),
func(opt *option) { opt.RotateInterval = time.Millisecond },
)
if err != nil {
t.Fatal(err)
}
data := make([]byte, 1024)
for i := range data {
data[i] = byte(i)
}
for i := 0; i < 10; i++ {
for i := 0; i < 1024; i++ {
_, err = fw.Write(data)
if err != nil {
t.Error(err)
}
}
time.Sleep(10 * time.Millisecond)
}
fw.Close()
fis, err := ioutil.ReadDir(logdir + "/test-rotate-exists")
if err != nil {
t.Fatal(err)
}
var fnams []string
for _, fi := range fis {
fnams = append(fnams, fi.Name())
}
assert.Contains(t, fnams, "info.log."+time.Now().Format("2006-01-02")+".006")
}
func TestSizeRotate(t *testing.T) {
fw, err := New(logdir+"/test-rotate/info.log",
MaxSize(1024*1024),
func(opt *option) { opt.RotateInterval = 1 * time.Millisecond },
)
if err != nil {
t.Fatal(err)
}
data := make([]byte, 1024)
for i := range data {
data[i] = byte(i)
}
for i := 0; i < 10; i++ {
for i := 0; i < 1024; i++ {
_, err = fw.Write(data)
if err != nil {
t.Error(err)
}
}
time.Sleep(10 * time.Millisecond)
}
fw.Close()
fis, err := ioutil.ReadDir(logdir + "/test-rotate")
if err != nil {
t.Fatal(err)
}
assert.True(t, len(fis) > 5, "expect more than 5 file get %d", len(fis))
}
func TestMaxFile(t *testing.T) {
fw, err := New(logdir+"/test-maxfile/info.log",
MaxSize(1024*1024),
MaxFile(1),
func(opt *option) { opt.RotateInterval = 1 * time.Millisecond },
)
if err != nil {
t.Fatal(err)
}
data := make([]byte, 1024)
for i := range data {
data[i] = byte(i)
}
for i := 0; i < 10; i++ {
for i := 0; i < 1024; i++ {
_, err = fw.Write(data)
if err != nil {
t.Error(err)
}
}
time.Sleep(10 * time.Millisecond)
}
fw.Close()
fis, err := ioutil.ReadDir(logdir + "/test-maxfile")
if err != nil {
t.Fatal(err)
}
assert.True(t, len(fis) <= 2, fmt.Sprintf("expect 2 file get %d", len(fis)))
}
func TestMaxFile2(t *testing.T) {
files := []string{
"info.log.2018-12-01",
"info.log.2018-12-02",
"info.log.2018-12-03",
"info.log.2018-12-04",
"info.log.2018-12-05",
"info.log.2018-12-05.001",
}
for _, file := range files {
touch(logdir+"/test-maxfile2", file)
}
fw, err := New(logdir+"/test-maxfile2/info.log",
MaxSize(1024*1024),
MaxFile(3),
func(opt *option) { opt.RotateInterval = 1 * time.Millisecond },
)
if err != nil {
t.Fatal(err)
}
data := make([]byte, 1024)
for i := range data {
data[i] = byte(i)
}
for i := 0; i < 10; i++ {
for i := 0; i < 1024; i++ {
_, err = fw.Write(data)
if err != nil {
t.Error(err)
}
}
time.Sleep(10 * time.Millisecond)
}
fw.Close()
fis, err := ioutil.ReadDir(logdir + "/test-maxfile2")
if err != nil {
t.Fatal(err)
}
assert.True(t, len(fis) == 4, fmt.Sprintf("expect 4 file get %d", len(fis)))
}
func TestFileWriter(t *testing.T) {
fw, err := New("testlog/info.log")
if err != nil {
t.Fatal(err)
}
defer fw.Close()
_, err = fw.Write([]byte("Hello World!\n"))
if err != nil {
t.Error(err)
}
}
func BenchmarkFileWriter(b *testing.B) {
fw, err := New("testlog/bench/info.log",
func(opt *option) { opt.WriteTimeout = time.Second }, MaxSize(1024*1024*8), /*32MB*/
func(opt *option) { opt.RotateInterval = 10 * time.Millisecond },
)
if err != nil {
b.Fatal(err)
}
for i := 0; i < b.N; i++ {
_, err = fw.Write([]byte("Hello World!\n"))
if err != nil {
b.Error(err)
}
}
}

View File

@@ -0,0 +1,69 @@
package filewriter
import (
"fmt"
"strings"
"time"
)
// RotateFormat
const (
RotateDaily = "2006-01-02"
)
var defaultOption = option{
RotateFormat: RotateDaily,
MaxSize: 1 << 30,
ChanSize: 1024 * 8,
RotateInterval: 10 * time.Second,
}
type option struct {
RotateFormat string
MaxFile int
MaxSize int64
ChanSize int
// TODO export Option
RotateInterval time.Duration
WriteTimeout time.Duration
}
// Option filewriter option
type Option func(opt *option)
// RotateFormat e.g 2006-01-02 meaning rotate log file every day.
// NOTE: format can't contain ".", "." will cause panic ヽ(*。>Д<)o゜.
func RotateFormat(format string) Option {
if strings.Contains(format, ".") {
panic(fmt.Sprintf("rotate format can't contain '.' format: %s", format))
}
return func(opt *option) {
opt.RotateFormat = format
}
}
// MaxFile default 999, 0 meaning unlimit.
// TODO: don't create file list if MaxSize is unlimt.
func MaxFile(n int) Option {
return func(opt *option) {
opt.MaxFile = n
}
}
// MaxSize set max size for single log file,
// defult 1GB, 0 meaning unlimit.
func MaxSize(n int64) Option {
return func(opt *option) {
opt.MaxSize = n
}
}
// ChanSize set internal chan size default 8192 use about 64k memory on x64 platfrom static,
// because filewriter has internal object pool, change chan size bigger may cause filewriter use
// a lot of memory, because sync.Pool can't set expire time memory won't free until program exit.
func ChanSize(n int) Option {
return func(opt *option) {
opt.ChanSize = n
}
}

View File

@@ -0,0 +1,424 @@
package core
import (
"encoding/base64"
"encoding/json"
"math"
"sync"
"time"
"unicode/utf8"
)
// For JSON-escaping; see jsonEncoder.safeAddString below.
const _hex = "0123456789abcdef"
var _ ObjectEncoder = &jsonEncoder{}
var _jsonPool = sync.Pool{New: func() interface{} {
return &jsonEncoder{}
}}
func getJSONEncoder() *jsonEncoder {
return _jsonPool.Get().(*jsonEncoder)
}
func putJSONEncoder(enc *jsonEncoder) {
if enc.reflectBuf != nil {
enc.reflectBuf.Free()
}
enc.EncoderConfig = nil
enc.buf = nil
enc.spaced = false
enc.openNamespaces = 0
enc.reflectBuf = nil
enc.reflectEnc = nil
_jsonPool.Put(enc)
}
type jsonEncoder struct {
*EncoderConfig
buf *Buffer
spaced bool // include spaces after colons and commas
openNamespaces int
// for encoding generic values by reflection
reflectBuf *Buffer
reflectEnc *json.Encoder
}
// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
// appropriately escapes all field keys and values.
//
// Note that the encoder doesn't deduplicate keys, so it's possible to produce
// a message like
// {"foo":"bar","foo":"baz"}
// This is permitted by the JSON specification, but not encouraged. Many
// libraries will ignore duplicate key-value pairs (typically keeping the last
// pair) when unmarshaling, but users should attempt to avoid adding duplicate
// keys.
func NewJSONEncoder(cfg EncoderConfig, buf *Buffer) Encoder {
return newJSONEncoder(cfg, false, buf)
}
func newJSONEncoder(cfg EncoderConfig, spaced bool, buf *Buffer) *jsonEncoder {
return &jsonEncoder{
EncoderConfig: &cfg,
buf: buf,
spaced: spaced,
}
}
func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error {
enc.addKey(key)
return enc.AppendArray(arr)
}
func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error {
enc.addKey(key)
return enc.AppendObject(obj)
}
func (enc *jsonEncoder) AddBinary(key string, val []byte) {
enc.AddString(key, base64.StdEncoding.EncodeToString(val))
}
func (enc *jsonEncoder) AddByteString(key string, val []byte) {
enc.addKey(key)
enc.AppendByteString(val)
}
func (enc *jsonEncoder) AddBool(key string, val bool) {
enc.addKey(key)
enc.AppendBool(val)
}
func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
enc.addKey(key)
enc.AppendComplex128(val)
}
func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
enc.addKey(key)
enc.AppendDuration(val)
}
func (enc *jsonEncoder) AddFloat64(key string, val float64) {
enc.addKey(key)
enc.AppendFloat64(val)
}
func (enc *jsonEncoder) AddInt64(key string, val int64) {
enc.addKey(key)
enc.AppendInt64(val)
}
func (enc *jsonEncoder) resetReflectBuf() {
if enc.reflectBuf == nil {
enc.reflectBuf = GetPool()
enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
} else {
enc.reflectBuf.Reset()
}
}
func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error {
enc.resetReflectBuf()
err := enc.reflectEnc.Encode(obj)
if err != nil {
return err
}
enc.reflectBuf.TrimNewline()
enc.addKey(key)
_, err = enc.buf.Write(enc.reflectBuf.Bytes())
return err
}
func (enc *jsonEncoder) OpenNamespace(key string) {
enc.addKey(key)
enc.buf.AppendByte('{')
enc.openNamespaces++
}
func (enc *jsonEncoder) AddString(key, val string) {
enc.addKey(key)
enc.AppendString(val)
}
func (enc *jsonEncoder) AddTime(key string, val time.Time) {
enc.addKey(key)
enc.AppendTime(val)
}
func (enc *jsonEncoder) AddUint64(key string, val uint64) {
enc.addKey(key)
enc.AppendUint64(val)
}
func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
enc.addElementSeparator()
enc.buf.AppendByte('[')
err := arr.MarshalLogArray(enc)
enc.buf.AppendByte(']')
return err
}
func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
enc.addElementSeparator()
enc.buf.AppendByte('{')
err := obj.MarshalLogObject(enc)
enc.buf.AppendByte('}')
return err
}
func (enc *jsonEncoder) AppendBool(val bool) {
enc.addElementSeparator()
enc.buf.AppendBool(val)
}
func (enc *jsonEncoder) AppendByteString(val []byte) {
enc.addElementSeparator()
enc.buf.AppendByte('"')
enc.safeAddByteString(val)
enc.buf.AppendByte('"')
}
func (enc *jsonEncoder) AppendComplex128(val complex128) {
enc.addElementSeparator()
// Cast to a platform-independent, fixed-size type.
r, i := float64(real(val)), float64(imag(val))
enc.buf.AppendByte('"')
// Because we're always in a quoted string, we can use strconv without
// special-casing NaN and +/-Inf.
enc.buf.AppendFloat(r, 64)
enc.buf.AppendByte('+')
enc.buf.AppendFloat(i, 64)
enc.buf.AppendByte('i')
enc.buf.AppendByte('"')
}
func (enc *jsonEncoder) AppendDuration(val time.Duration) {
cur := enc.buf.Len()
enc.EncodeDuration(val, enc)
if cur == enc.buf.Len() {
// User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep
// JSON valid.
enc.AppendInt64(int64(val))
}
}
func (enc *jsonEncoder) AppendInt64(val int64) {
enc.addElementSeparator()
enc.buf.AppendInt(val)
}
func (enc *jsonEncoder) AppendReflected(val interface{}) error {
enc.resetReflectBuf()
err := enc.reflectEnc.Encode(val)
if err != nil {
return err
}
enc.reflectBuf.TrimNewline()
enc.addElementSeparator()
_, err = enc.buf.Write(enc.reflectBuf.Bytes())
return err
}
func (enc *jsonEncoder) AppendString(val string) {
enc.addElementSeparator()
enc.buf.AppendByte('"')
enc.safeAddString(val)
enc.buf.AppendByte('"')
}
func (enc *jsonEncoder) AppendTime(val time.Time) {
cur := enc.buf.Len()
enc.EncodeTime(val, enc)
if cur == enc.buf.Len() {
// User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep
// output JSON valid.
enc.AppendInt64(val.UnixNano())
}
}
func (enc *jsonEncoder) AppendUint64(val uint64) {
enc.addElementSeparator()
enc.buf.AppendUint(val)
}
func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) }
func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) Clone() Encoder {
clone := enc.clone()
return clone
}
func (enc *jsonEncoder) clone() *jsonEncoder {
clone := getJSONEncoder()
clone.EncoderConfig = enc.EncoderConfig
clone.spaced = enc.spaced
clone.openNamespaces = enc.openNamespaces
clone.buf = GetPool()
return clone
}
func (enc *jsonEncoder) Encode(buf *Buffer, fields ...Field) error {
final := enc.clone()
final.buf = buf
final.buf.AppendByte('{')
if enc.buf.Len() > 0 {
final.addElementSeparator()
final.buf.Write(enc.buf.Bytes())
}
for i := range fields {
fields[i].AddTo(final)
}
final.closeOpenNamespaces()
final.buf.AppendString("}\n")
putJSONEncoder(final)
return nil
}
func (enc *jsonEncoder) closeOpenNamespaces() {
for i := 0; i < enc.openNamespaces; i++ {
enc.buf.AppendByte('}')
}
}
func (enc *jsonEncoder) addKey(key string) {
enc.addElementSeparator()
enc.buf.AppendByte('"')
enc.safeAddString(key)
enc.buf.AppendByte('"')
enc.buf.AppendByte(':')
if enc.spaced {
enc.buf.AppendByte(' ')
}
}
func (enc *jsonEncoder) addElementSeparator() {
last := enc.buf.Len() - 1
if last < 0 {
return
}
switch enc.buf.Bytes()[last] {
case '{', '[', ':', ',', ' ':
return
default:
enc.buf.AppendByte(',')
if enc.spaced {
enc.buf.AppendByte(' ')
}
}
}
func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
enc.addElementSeparator()
switch {
case math.IsNaN(val):
enc.buf.AppendString(`"NaN"`)
case math.IsInf(val, 1):
enc.buf.AppendString(`"+Inf"`)
case math.IsInf(val, -1):
enc.buf.AppendString(`"-Inf"`)
default:
enc.buf.AppendFloat(val, bitSize)
}
}
// safeAddString JSON-escapes a string and appends it to the internal buffer.
// Unlike the standard library's encoder, it doesn't attempt to protect the
// user from browser vulnerabilities or JSONP-related problems.
func (enc *jsonEncoder) safeAddString(s string) {
for i := 0; i < len(s); {
if enc.tryAddRuneSelf(s[i]) {
i++
continue
}
r, size := utf8.DecodeRuneInString(s[i:])
if enc.tryAddRuneError(r, size) {
i++
continue
}
enc.buf.AppendString(s[i : i+size])
i += size
}
}
// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
func (enc *jsonEncoder) safeAddByteString(s []byte) {
for i := 0; i < len(s); {
if enc.tryAddRuneSelf(s[i]) {
i++
continue
}
r, size := utf8.DecodeRune(s[i:])
if enc.tryAddRuneError(r, size) {
i++
continue
}
enc.buf.Write(s[i : i+size])
i += size
}
}
// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
if b >= utf8.RuneSelf {
return false
}
if 0x20 <= b && b != '\\' && b != '"' {
enc.buf.AppendByte(b)
return true
}
switch b {
case '\\', '"':
enc.buf.AppendByte('\\')
enc.buf.AppendByte(b)
case '\n':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('n')
case '\r':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('r')
case '\t':
enc.buf.AppendByte('\\')
enc.buf.AppendByte('t')
default:
// Encode bytes < 0x20, except for the escape sequences above.
enc.buf.AppendString(`\u00`)
enc.buf.AppendByte(_hex[b>>4])
enc.buf.AppendByte(_hex[b&0xF])
}
return true
}
func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
if r == utf8.RuneError && size == 1 {
enc.buf.AppendString(`\ufffd`)
return true
}
return false
}

View File

@@ -0,0 +1,52 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package core
import "sync"
// A Pool is a type-safe wrapper around a sync.Pool.
type Pool struct {
p *sync.Pool
}
// NewPool constructs a new Pool.
func NewPool(size int) Pool {
if size == 0 {
size = _size
}
return Pool{p: &sync.Pool{
New: func() interface{} {
return &Buffer{bs: make([]byte, 0, size)}
},
}}
}
// Get retrieves a Buffer from the pool, creating one if necessary.
func (p Pool) Get() *Buffer {
buf := p.p.Get().(*Buffer)
buf.Reset()
buf.pool = p
return buf
}
func (p Pool) put(buf *Buffer) {
p.p.Put(buf)
}

View File

@@ -0,0 +1,52 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package core
import (
"sync"
"testing"
"github.com/stretchr/testify/assert"
)
func TestBuffers(t *testing.T) {
const dummyData = "dummy data"
p := NewPool(0)
var wg sync.WaitGroup
for g := 0; g < 10; g++ {
wg.Add(1)
go func() {
for i := 0; i < 100; i++ {
buf := p.Get()
assert.Zero(t, buf.Len(), "Expected truncated buffer")
assert.NotZero(t, buf.Cap(), "Expected non-zero capacity")
buf.AppendString(dummyData)
assert.Equal(t, buf.Len(), len(dummyData), "Expected buffer to contain dummy data")
buf.Free()
}
wg.Done()
}()
}
wg.Wait()
}