Initial commit

This commit is contained in:
Donny
2019-04-22 20:46:32 +08:00
commit 49ab8aadd1
25441 changed files with 4055000 additions and 0 deletions

View File

@@ -0,0 +1,24 @@
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app/service/main/rank/api/gorpc:all-srcs",
"//app/service/main/rank/cmd:all-srcs",
"//app/service/main/rank/conf:all-srcs",
"//app/service/main/rank/dao:all-srcs",
"//app/service/main/rank/model:all-srcs",
"//app/service/main/rank/server/gorpc:all-srcs",
"//app/service/main/rank/server/grpc:all-srcs",
"//app/service/main/rank/server/http:all-srcs",
"//app/service/main/rank/service:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,11 @@
### rank-service
# v1.0.2
1. fix Archive-Notify-T id=0
# v1.0.1
1. fix pb dump
# v1.0.0
1. 项目初始化

View File

@@ -0,0 +1,11 @@
# Owner
guanhuaxin
chenzhihui
zhapuyu
# Author
libingqi
# Reviewer
chenzhihui
zhapuyu

View File

@@ -0,0 +1,17 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- chenzhihui
- guanhuaxin
- libingqi
- zhapuyu
labels:
- main
- service
- service/main/rank
options:
no_parent_owners: true
reviewers:
- chenzhihui
- libingqi
- zhapuyu

View File

@@ -0,0 +1,12 @@
# rank-service
# 项目简介
1.稿件播放数,收藏数,发布时间排行
# 编译环境
golang v1.7.x以上版本
# 依赖包
公共包go-common
# 编译执行

View File

@@ -0,0 +1,32 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["rank.go"],
importpath = "go-common/app/service/main/rank/api/gorpc",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/service/main/rank/model:go_default_library",
"//library/net/rpc:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,48 @@
package rank
import (
"context"
"go-common/app/service/main/rank/model"
"go-common/library/net/rpc"
)
const (
_mget = "RPC.Mget"
_sort = "RPC.Sort"
_group = "RPC.Group"
)
const (
_appid = "main.search.rank-service"
)
// Service .
type Service struct {
client *rpc.Client2
}
// New .
func New(c *rpc.ClientConfig) (s *Service) {
s = &Service{}
s.client = rpc.NewDiscoveryCli(_appid, c)
return
}
// Mget .
func (s *Service) Mget(c context.Context, arg *model.MgetReq) (res *model.MgetResp, err error) {
err = s.client.Call(c, _mget, arg, &res)
return
}
// Sort .
func (s *Service) Sort(c context.Context, arg *model.SortReq) (res *model.SortResp, err error) {
err = s.client.Call(c, _sort, arg, &res)
return
}
// Group .
func (s *Service) Group(c context.Context, arg *model.GroupReq) (res *model.GroupResp, err error) {
err = s.client.Call(c, _group, arg, &res)
return
}

View File

@@ -0,0 +1 @@
# HTTP API文档

View File

@@ -0,0 +1,52 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_binary(
name = "cmd",
embed = [":go_default_library"],
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["main.go"],
data = [
"rank-service.toml",
"test.toml",
],
importpath = "go-common/app/service/main/rank/cmd",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/service/main/rank/conf:go_default_library",
"//app/service/main/rank/server/gorpc:go_default_library",
"//app/service/main/rank/server/http:go_default_library",
"//app/service/main/rank/service:go_default_library",
"//library/conf/env:go_default_library",
"//library/ecode/tip:go_default_library",
"//library/log:go_default_library",
"//library/naming:go_default_library",
"//library/naming/discovery:go_default_library",
"//library/net/ip:go_default_library",
"//library/net/trace:go_default_library",
],
)

View File

@@ -0,0 +1,81 @@
package main
import (
"context"
"flag"
"os"
"os/signal"
"syscall"
"time"
"go-common/app/service/main/rank/conf"
rpc "go-common/app/service/main/rank/server/gorpc"
"go-common/app/service/main/rank/server/http"
"go-common/app/service/main/rank/service"
"go-common/library/conf/env"
ecode "go-common/library/ecode/tip"
"go-common/library/log"
"go-common/library/naming"
"go-common/library/naming/discovery"
xip "go-common/library/net/ip"
"go-common/library/net/trace"
)
func main() {
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
log.Init(conf.Conf.Log)
defer log.Close()
log.Info("start")
trace.Init(conf.Conf.Tracer)
defer trace.Close()
ecode.Init(conf.Conf.Ecode)
// int service
svr := service.New(conf.Conf)
rpcSvr := rpc.New(conf.Conf, svr)
http.Init(conf.Conf, svr)
// start discovery register
var (
err error
cancel context.CancelFunc
)
if env.IP == "" {
ip := xip.InternalIP()
dis := discovery.New(nil)
ins := &naming.Instance{
Zone: env.Zone,
Env: env.DeployEnv,
AppID: env.AppID,
Addrs: []string{
"http://" + ip + ":" + env.HTTPPort,
"gorpc://" + ip + ":" + env.GORPCPort,
},
}
cancel, err = dis.Register(context.Background(), ins)
if err != nil {
panic(err)
}
}
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT)
for {
s := <-c
log.Info("get a signal %s", s.String())
switch s {
case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT:
if cancel != nil {
cancel()
}
rpcSvr.Close()
time.Sleep(time.Second * 2)
svr.Close()
log.Info("rank-service exit")
return
case syscall.SIGHUP:
default:
return
}
}
}

View File

@@ -0,0 +1,99 @@
# uat config.
[bm]
addr = "0.0.0.0:7861"
timeout = "1s"
[rpcServer]
proto = "tcp"
addr = "0.0.0.0:7869"
[rank]
switchAll = false
ticker = "10s"
rowsLimit = 1000
batchSleep = "10ms"
batchStep = "24h"
filePath = "/data/backup/rank-service/"
fileName = "rmap_%d.pb"
[log]
dir="/data/log/rank-service"
[mysql]
[mysql.bilibiliArchive]
addr = "172.22.34.101:3306"
dsn = "bili_search:BzwF6Ez64RT6Yy2alSKDFGCPgGX5tMlj@tcp(172.22.34.101:3306)/bilibili_archive?timeout=500ms&readTimeout=500ms&writeTimeout=500ms&parseTime=true&loc=Local&charset=utf8,utf8mb4"
active = 20
idle = 10
idleTimeout ="4h"
queryTimeout = "5s"
execTimeout = "5s"
tranTimeout = "5s"
[mysql.archiveStat]
addr = "172.22.34.101:3306"
dsn = "bili_search:BzwF6Ez64RT6Yy2alSKDFGCPgGX5tMlj@tcp(172.22.34.101:3306)/archive_stat?timeout=500ms&readTimeout=500ms&writeTimeout=500ms&parseTime=true&loc=Local&charset=utf8,utf8mb4"
active = 20
idle = 10
idleTimeout ="4h"
queryTimeout = "5s"
execTimeout = "5s"
tranTimeout = "5s"
[mysql.bilibiliTV]
addr = "172.22.34.101:3309"
dsn = "bili_search:OsQ2cmCoWpafd91gEAPIiNV87Fjq6nZu@tcp(172.22.34.101:3309)/bilibili_tv?timeout=500ms&readTimeout=500ms&writeTimeout=500ms&parseTime=true&loc=Local&charset=utf8,utf8mb4"
active = 20
idle = 10
idleTimeout ="4h"
queryTimeout = "5s"
execTimeout = "5s"
tranTimeout = "5s"
[databus]
[databus.Archive]
key = "2511663d546f1413"
secret = "cde3b480836cc76df3d635470f991caa"
group = "Archive-MainSearch-S"
topic = "Archive-T"
action = "sub"
name = "rank-service/archive-sub"
proto = "tcp"
addr = "172.18.33.50:6205"
idle = 1
active = 1
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.StatView]
key = "2511663d546f1413"
secret = "cde3b480836cc76df3d635470f991caa"
group = "StatView-MainSearch-S"
topic = "StatView-T"
action = "sub"
name = "rank-service/statView-sub"
proto = "tcp"
addr = "172.18.33.50:6205"
idle = 1
active = 1
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.UgcTvBinlog]
key = "2511663d546f1413"
secret = "cde3b480836cc76df3d635470f991caa"
group = "UgcTvBinlog-MainSearch-S"
topic = "UgcTvBinlog-T"
action = "sub"
name = "rank-service/ugcTvBinlog-sub"
proto = "tcp"
addr = "172.18.33.50:6205"
idle = 1
active = 1
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"

View File

@@ -0,0 +1,96 @@
# dev config.
[bm]
addr = "0.0.0.0:7861"
timeout = "1s"
[rank]
switchAll = false
switchIncr = false
ticker = "10s"
rowsLimit = 1000
batchSleep = "10ms"
batchStep = "24h"
filePath = "/data/backup/rank-service/"
fileName = "rmap_%d.pb"
[log]
dir="/data/log/rank-service"
[mysql]
[mysql.bilibiliArchive]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_archive?timeout=500ms&readTimeout=500ms&writeTimeout=500ms&parseTime=true&loc=Local&charset=utf8,utf8mb4"
active = 20
idle = 10
idleTimeout ="4h"
queryTimeout = "500ms"
execTimeout = "500ms"
tranTimeout = "500ms"
[mysql.archiveStat]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/archive_stat?timeout=500ms&readTimeout=500ms&writeTimeout=500ms&parseTime=true&loc=Local&charset=utf8,utf8mb4"
active = 20
idle = 10
idleTimeout ="4h"
queryTimeout = "500ms"
execTimeout = "500ms"
tranTimeout = "500ms"
[mysql.bilibiliTV]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_tv?timeout=500ms&readTimeout=500ms&writeTimeout=500ms&parseTime=true&loc=Local&charset=utf8,utf8mb4"
active = 20
idle = 10
idleTimeout ="4h"
queryTimeout = "500ms"
execTimeout = "500ms"
tranTimeout = "500ms"
[databus]
[databus.StatView]
key = "170e302355453683"
secret = "3d0e8db7bed0503949e545a469789279"
group = "Favorite-MainSearch-S"
topic = "StatView-T"
action = "sub"
name = "rank-service/statView-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 1
active = 1
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.UgcTvBinlog]
key = "170e302355453683"
secret = "3d0e8db7bed0503949e545a469789279"
group = "Favorite-MainSearch-S"
topic = "UgcTvBinlog-T"
action = "sub"
name = "rank-service/UgcTvBinlog-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 1
active = 1
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.Archive]
key = "170e302355453683"
secret = "3d0e8db7bed0503949e545a469789279"
group = "Rank-MainSearch-S"
topic = "Archive-T"
action = "sub"
name = "rank-service/archive-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 1
active = 1
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"

View File

@@ -0,0 +1,41 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["conf.go"],
importpath = "go-common/app/service/main/rank/conf",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/conf:go_default_library",
"//library/database/sql:go_default_library",
"//library/ecode/tip:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/net/http/blademaster/middleware/verify:go_default_library",
"//library/net/rpc:go_default_library",
"//library/net/trace:go_default_library",
"//library/queue/databus:go_default_library",
"//library/time:go_default_library",
"//vendor/github.com/BurntSushi/toml:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,116 @@
package conf
import (
"errors"
"flag"
"go-common/library/conf"
"go-common/library/database/sql"
ecode "go-common/library/ecode/tip"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
"go-common/library/net/http/blademaster/middleware/verify"
"go-common/library/net/rpc"
"go-common/library/net/trace"
"go-common/library/queue/databus"
"go-common/library/time"
"github.com/BurntSushi/toml"
)
var (
confPath string
client *conf.Client
// Conf config
Conf = &Config{}
)
// Config .
type Config struct {
Rank *Rank
Log *log.Config
BM *bm.ServerConfig
RPCServer *rpc.ServerConfig
Verify *verify.Config
Tracer *trace.Config
MySQL *DB
Databus *Databus
Ecode *ecode.Config
}
// DB .
type DB struct {
BilibiliArchive *sql.Config
ArchiveStat *sql.Config
BilibiliTV *sql.Config
}
// Databus .
type Databus struct {
StatView *databus.Config
Archive *databus.Config
UgcTvBinlog *databus.Config
}
// Rank .
type Rank struct {
SwitchAll bool
SwitchIncr bool
RowsLimit int
Ticker time.Duration
BatchSleep time.Duration
BatchStep time.Duration
FilePath string
FileName string
}
func init() {
flag.StringVar(&confPath, "conf", "", "default config path")
}
// Init init conf
func Init() error {
if confPath != "" {
return local()
}
return remote()
}
func local() (err error) {
_, err = toml.DecodeFile(confPath, &Conf)
return
}
func remote() (err error) {
if client, err = conf.New(); err != nil {
return
}
if err = load(); err != nil {
return
}
go func() {
for range client.Event() {
log.Info("config reload")
if load() != nil {
log.Error("config reload error (%v)", err)
}
}
}()
return
}
func load() (err error) {
var (
s string
ok bool
tmpConf *Config
)
if s, ok = client.Toml2(); !ok {
return errors.New("load config center error")
}
if _, err = toml.Decode(s, &tmpConf); err != nil {
return errors.New("could not decode config")
}
*Conf = *tmpConf
return
}

View File

@@ -0,0 +1,56 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"dao_test.go",
"mysql_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/service/main/rank/conf:go_default_library",
"//library/time:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"dao.go",
"mysql.go",
],
importpath = "go-common/app/service/main/rank/dao",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/service/main/rank/conf:go_default_library",
"//app/service/main/rank/model:go_default_library",
"//library/database/sql:go_default_library",
"//library/log:go_default_library",
"//library/time:go_default_library",
"//library/xstr:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,46 @@
package dao
import (
"context"
"go-common/app/service/main/rank/conf"
xsql "go-common/library/database/sql"
)
// Dao dao
type Dao struct {
c *conf.Config
dbArchive *xsql.DB
dbStat *xsql.DB
dbTV *xsql.DB
}
// New init mysql db
func New(c *conf.Config) (dao *Dao) {
dao = &Dao{
c: c,
dbArchive: xsql.NewMySQL(c.MySQL.BilibiliArchive),
dbStat: xsql.NewMySQL(c.MySQL.ArchiveStat),
dbTV: xsql.NewMySQL(c.MySQL.BilibiliTV),
}
return
}
// Close close the resource.
func (d *Dao) Close() {
d.dbArchive.Close()
d.dbStat.Close()
d.dbTV.Close()
}
// Ping dao ping
func (d *Dao) Ping(c context.Context) error {
var err error
if err = d.dbArchive.Ping(c); err != nil {
return err
}
if err = d.dbStat.Ping(c); err != nil {
return err
}
return d.dbTV.Ping(c)
}

View File

@@ -0,0 +1,36 @@
package dao
import (
"flag"
"os"
"testing"
"go-common/app/service/main/rank/conf"
)
var (
d *Dao
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.search.rank-service")
flag.Set("conf_token", "8da2368e2495e20a841b5125bf00b761")
flag.Set("tree_id", "56749")
flag.Set("conf_version", "server-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../cmd/test.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
m.Run()
os.Exit(0)
}

View File

@@ -0,0 +1,191 @@
package dao
import (
"context"
"database/sql"
"fmt"
"go-common/app/service/main/rank/model"
"go-common/library/log"
xtime "go-common/library/time"
"go-common/library/xstr"
)
const (
_maxArchiveIDSQL = `SELECT MAX(id) FROM archive`
_archiveMetasSQL = `SELECT id,typeid,pubtime FROM archive WHERE id>? ORDER BY id LIMIT ?`
_archiveMetasMtimeSQL = `SELECT id,typeid,pubtime FROM archive WHERE id>? AND mtime BETWEEN ? AND ? ORDER BY mtime,id LIMIT ?`
_archiveTypesSQL = `SELECT id,pid FROM archive_type WHERE id in (%s)`
_archiveStatsSQL = `SELECT aid,click FROM archive_stat_%s WHERE aid in (%s)`
_archiveStatsMtimeSQL = `SELECT id,aid,click FROM archive_stat_%s WHERE id > ? AND mtime BETWEEN ? AND ? ORDER BY mtime,id LIMIT ?`
_archiveTVsSQL = `SELECT aid,result,deleted,valid FROM ugc_archive WHERE aid in (%s)`
_archiveTVsMtimeSQL = `SELECT id,aid,result,deleted,valid FROM ugc_archive WHERE id > ? AND mtime BETWEEN ? AND ? ORDER BY mtime,id LIMIT ?`
_archiveStatSharding = 100
)
// MaxOid .
func (d *Dao) MaxOid(c context.Context) (oid int64, err error) {
row := d.dbArchive.QueryRow(c, _maxArchiveIDSQL)
if err = row.Scan(&oid); err != nil {
if err == sql.ErrNoRows {
err = nil
} else {
log.Error("row.Scan error(%v)", err)
}
}
return
}
// ArchiveMetas .
func (d *Dao) ArchiveMetas(c context.Context, id int64, limit int) ([]*model.ArchiveMeta, error) {
rows, err := d.dbArchive.Query(c, _archiveMetasSQL, id, limit)
if err != nil {
log.Error("d.dbArchive.Query(%s,%d,%d) error()", _archiveMetasSQL, id, limit, err)
return nil, err
}
defer rows.Close()
as := make([]*model.ArchiveMeta, 0)
for rows.Next() {
a := new(model.ArchiveMeta)
if err = rows.Scan(&a.ID, &a.Typeid, &a.Pubtime); err != nil {
log.Error("rows.Scan() error(%v)", err)
return nil, err
}
as = append(as, a)
}
return as, rows.Err()
}
// ArchiveMetasIncrs .
func (d *Dao) ArchiveMetasIncrs(c context.Context, aid int64, begin, end xtime.Time, limit int) ([]*model.ArchiveMeta, error) {
rows, err := d.dbArchive.Query(c, _archiveMetasMtimeSQL, aid, begin, end, limit)
if err != nil {
log.Error("d.dbArchive.Query(%s,%d,%s,%s,%d) error()", _archiveMetasMtimeSQL, aid, begin, end, limit, err)
return nil, err
}
defer rows.Close()
as := make([]*model.ArchiveMeta, 0)
for rows.Next() {
a := new(model.ArchiveMeta)
if err = rows.Scan(&a.ID, &a.Typeid, &a.Pubtime); err != nil {
log.Error("rows.Scan() error(%v)", err)
return nil, err
}
as = append(as, a)
}
return as, rows.Err()
}
// ArchiveTypes .
func (d *Dao) ArchiveTypes(c context.Context, ids []int64) (map[int64]*model.ArchiveType, error) {
idsStr := xstr.JoinInts(ids)
rows, err := d.dbArchive.Query(c, fmt.Sprintf(_archiveTypesSQL, idsStr))
if err != nil {
log.Error("d.dbArchive.Query(%s) error(%v)", fmt.Sprintf(_archiveTypesSQL, idsStr), err)
return nil, err
}
defer rows.Close()
as := make(map[int64]*model.ArchiveType)
for rows.Next() {
a := new(model.ArchiveType)
if err = rows.Scan(&a.ID, &a.Pid); err != nil {
log.Error("rows.Scan() error(%v)", err)
return nil, err
}
as[a.ID] = a
}
return as, rows.Err()
}
// ArchiveStats .
func (d *Dao) ArchiveStats(c context.Context, aids []int64) (map[int64]*model.ArchiveStat, error) {
tableMap := make(map[int64][]int64)
for _, aid := range aids {
mod := aid % _archiveStatSharding
tableMap[mod] = append(tableMap[mod], aid)
}
as := make(map[int64]*model.ArchiveStat)
for tbl, aids := range tableMap {
aidsStr := xstr.JoinInts(aids)
rows, err := d.dbStat.Query(c, fmt.Sprintf(_archiveStatsSQL, fmt.Sprintf("%02d", tbl), aidsStr))
if err != nil {
log.Error("d.dbStat.Query(%s) error(%v)", fmt.Sprintf(_archiveStatsSQL, fmt.Sprintf("%02d", tbl), aidsStr), err)
return nil, err
}
defer rows.Close()
for rows.Next() {
a := new(model.ArchiveStat)
if err = rows.Scan(&a.Aid, &a.Click); err != nil {
log.Error("rows.Scan() error(%v)", err)
return nil, err
}
as[a.Aid] = a
}
if err = rows.Err(); err != nil {
return nil, err
}
}
return as, nil
}
// ArchiveStatsIncrs .
func (d *Dao) ArchiveStatsIncrs(c context.Context, tbl int, id int64, begin, end xtime.Time, limit int) ([]*model.ArchiveStat, error) {
rows, err := d.dbStat.Query(c, fmt.Sprintf(_archiveStatsMtimeSQL, fmt.Sprintf("%02d", tbl)), id, begin, end, limit)
if err != nil {
log.Error("d.dbStat.Query(%s,%d,%s,%s,%d) error(%v)", fmt.Sprintf(_archiveStatsMtimeSQL, fmt.Sprintf("%02d", tbl)), id, begin, end, limit, err)
return nil, err
}
defer rows.Close()
as := make([]*model.ArchiveStat, 0)
for rows.Next() {
a := new(model.ArchiveStat)
if err = rows.Scan(&a.ID, &a.Aid, &a.Click); err != nil {
log.Error("rows.Scan() error(%v)", err)
return nil, err
}
as = append(as, a)
}
return as, rows.Err()
}
// ArchiveTVs .
func (d *Dao) ArchiveTVs(c context.Context, aids []int64) (map[int64]*model.ArchiveTv, error) {
aidsStr := xstr.JoinInts(aids)
rows, err := d.dbTV.Query(c, fmt.Sprintf(_archiveTVsSQL, aidsStr))
if err != nil {
log.Error("d.dbTV.Query(%s) error(%v)", fmt.Sprintf(_archiveTVsSQL, aidsStr), err)
return nil, err
}
defer rows.Close()
as := make(map[int64]*model.ArchiveTv)
for rows.Next() {
a := new(model.ArchiveTv)
if err = rows.Scan(&a.Aid, &a.Result, &a.Deleted, &a.Valid); err != nil {
log.Error("rows.Scan() error(%v)", err)
return nil, err
}
as[a.Aid] = a
}
return as, rows.Err()
}
// ArchiveTVsIncrs .
func (d *Dao) ArchiveTVsIncrs(c context.Context, id int64, begin, end xtime.Time, limit int) ([]*model.ArchiveTv, error) {
rows, err := d.dbTV.Query(c, _archiveTVsMtimeSQL, id, begin, end, limit)
if err != nil {
log.Error("d.dbTV.Query(%s,%d,%s,%s,%d) error(%v)", _archiveTVsMtimeSQL, id, begin, end, limit, err)
return nil, err
}
defer rows.Close()
as := make([]*model.ArchiveTv, 0)
for rows.Next() {
a := new(model.ArchiveTv)
if err = rows.Scan(&a.ID, &a.Aid, &a.Result, &a.Deleted, &a.Valid); err != nil {
log.Error("rows.Scan() error(%v)", err)
return nil, err
}
as = append(as, a)
}
return as, rows.Err()
}

View File

@@ -0,0 +1,132 @@
package dao
import (
"context"
"testing"
xtime "go-common/library/time"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoMaxOid(t *testing.T) {
var (
c = context.Background()
)
convey.Convey("MaxOid", t, func(ctx convey.C) {
oid, err := d.MaxOid(c)
ctx.Convey("Then err should be nil.oid should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(oid, convey.ShouldNotBeNil)
})
})
}
func TestDaoArchiveMetas(t *testing.T) {
var (
c = context.Background()
id = int64(1)
limit = int(1)
)
convey.Convey("ArchiveMetas", t, func(ctx convey.C) {
p1, err := d.ArchiveMetas(c, id, limit)
ctx.Convey("Then err should be nil.p1 should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(p1, convey.ShouldNotBeNil)
})
})
}
func TestDaoArchiveMetasIncrs(t *testing.T) {
var (
c = context.Background()
id = int64(1)
begin xtime.Time
end xtime.Time
limit = int(1)
)
convey.Convey("ArchiveMetasIncrs", t, func(ctx convey.C) {
p1, err := d.ArchiveMetasIncrs(c, id, begin, end, limit)
ctx.Convey("Then err should be nil.p1 should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(p1, convey.ShouldNotBeNil)
})
})
}
func TestDaoArchiveTypes(t *testing.T) {
var (
c = context.Background()
ids = []int64{1}
)
convey.Convey("ArchiveTypes", t, func(ctx convey.C) {
p1, err := d.ArchiveTypes(c, ids)
ctx.Convey("Then err should be nil.p1 should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(p1, convey.ShouldNotBeNil)
})
})
}
func TestDaoArchiveStats(t *testing.T) {
var (
c = context.Background()
aids = []int64{1}
)
convey.Convey("ArchiveStats", t, func(ctx convey.C) {
p1, err := d.ArchiveStats(c, aids)
ctx.Convey("Then err should be nil.p1 should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(p1, convey.ShouldNotBeNil)
})
})
}
func TestDaoArchiveStatsIncrs(t *testing.T) {
var (
c = context.Background()
tbl = int(1)
id = int64(1)
begin xtime.Time
end xtime.Time
limit = int(1)
)
convey.Convey("ArchiveStatsIncrs", t, func(ctx convey.C) {
p1, err := d.ArchiveStatsIncrs(c, tbl, id, begin, end, limit)
ctx.Convey("Then err should be nil.p1 should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(p1, convey.ShouldNotBeNil)
})
})
}
func TestDaoArchiveTVs(t *testing.T) {
var (
c = context.Background()
aids = []int64{1}
)
convey.Convey("ArchiveTVs", t, func(ctx convey.C) {
p1, err := d.ArchiveTVs(c, aids)
ctx.Convey("Then err should be nil.p1 should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(p1, convey.ShouldNotBeNil)
})
})
}
func TestDaoArchiveTVsIncrs(t *testing.T) {
var (
c = context.Background()
id = int64(1)
begin xtime.Time
end xtime.Time
limit = int(1)
)
convey.Convey("ArchiveTVsIncrs", t, func(ctx convey.C) {
p1, err := d.ArchiveTVsIncrs(c, id, begin, end, limit)
ctx.Convey("Then err should be nil.p1 should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(p1, convey.ShouldNotBeNil)
})
})
}

View File

@@ -0,0 +1,58 @@
load(
"@io_bazel_rules_go//proto:def.bzl",
"go_proto_library",
)
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
proto_library(
name = "model_proto",
srcs = ["model.proto"],
tags = ["automanaged"],
deps = ["@gogo_special_proto//github.com/gogo/protobuf/gogoproto"],
)
go_proto_library(
name = "model_go_proto",
compilers = ["@io_bazel_rules_go//proto:gogofast_proto"],
importpath = "go-common/app/service/main/rank/model",
proto = ":model_proto",
tags = ["automanaged"],
deps = [
"//library/time:go_default_library",
"@com_github_gogo_protobuf//gogoproto:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["model.go"],
embed = [":model_go_proto"],
importpath = "go-common/app/service/main/rank/model",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/time:go_default_library",
"@com_github_gogo_protobuf//gogoproto:go_default_library",
"@com_github_golang_protobuf//proto:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,191 @@
package model
import (
"database/sql/driver"
"encoding/json"
"strconv"
"time"
xtime "go-common/library/time"
)
const (
// BusinessArchive .
BusinessArchive = "archive"
// RankOrderByDesc .
RankOrderByDesc = "desc"
// RankOrderByAsc .
RankOrderByAsc = "asc"
// SyncInsert .
SyncInsert = "insert"
// SyncUpdate .
SyncUpdate = "update"
// SyncDelete .
SyncDelete = "delete"
// TimeFormat .
TimeFormat = "2006-01-02 15:04:05"
// FlagExist .
FlagExist = true
)
// ArchiveMeta .
type ArchiveMeta struct {
ID int64 `json:"id"`
Aid int64 `json:"aid"`
Typeid int64 `json:"typeid"`
Pubtime Stime `json:"pubtime"`
*ArchiveType
*ArchiveStat
*ArchiveTv
}
// ArchiveType .
type ArchiveType struct {
ID int64 `json:"id"`
Pid int64 `json:"pid"`
}
// ArchiveStat .
type ArchiveStat struct {
ID int64 `json:"id"`
Aid int64 `json:"aid"`
Click int64 `json:"click"`
}
// ArchiveTv .
type ArchiveTv struct {
ID int64 `json:"id"`
Aid int64 `json:"aid"`
Result int8 `json:"result"`
Deleted int8 `json:"deleted"`
Valid int8 `json:"valid"`
}
// StatViewMsg .
type StatViewMsg struct {
Type string `json:"type"`
ID int64 `json:"id"`
Count int `json:"count"`
Timestamp int64 `json:"timestamp"`
}
// CanalMsg .
type CanalMsg struct {
Action string `json:"action"`
Table string `json:"table"`
New json.RawMessage `json:"new"`
Old json.RawMessage `json:"old"`
}
// SetPubtime .
func (a *ArchiveMeta) SetPubtime() xtime.Time {
return xtime.Time(a.Pubtime)
}
// SetPid .
func (a *ArchiveType) SetPid() int16 {
return int16(a.Pid)
}
// SetClick .
func (a *ArchiveStat) SetClick() int {
return int(a.Click)
}
// DoReq .
type DoReq struct {
Business string `form:"business" validate:"required"`
Action string `form:"action" validate:"required"`
MinID int64 `form:"minid"`
MaxID int64 `form:"maxid"`
BeginTime string `form:"begintime"`
EndTime string `form:"endtime"`
}
// MgetReq .
type MgetReq struct {
Business string `form:"business" validate:"required"`
Oids []int64 `form:"oids,split" validate:"required"`
}
// MgetResp resp of mget
type MgetResp struct {
List map[int64]*Field `json:"list"`
}
// SortReq .
type SortReq struct {
Business string `form:"business" validate:"required"`
Field string `form:"field" validate:"required"`
Order string `form:"order" validate:"required"`
Filters map[string]string `form:"filters" validate:"required"`
Oids []int64 `form:"oids,split" validate:"required"`
Pn int `form:"pn"`
Ps int `form:"ps"`
}
// SortResp .
type SortResp struct {
Result []int64 `json:"result"`
Page *Page `json:"page"`
}
// GroupReq .
type GroupReq struct {
Business string `form:"business" validate:"required"`
Field string `form:"field" validate:"required"`
Oids []int64 `form:"Oids,split" validate:"required"`
}
// GroupResp .
type GroupResp struct {
List []*Group `json:"list"`
}
// Group .
type Group struct {
Key string `json:"key"`
Count int `json:"count"`
}
// Page Pager
type Page struct {
Pn int `json:"pn"`
Ps int `json:"ps"`
Total int `json:"total"`
}
// Stime .
type Stime int64
// Scan scan time.
func (st *Stime) Scan(src interface{}) (err error) {
switch sc := src.(type) {
case time.Time:
*st = Stime(sc.Unix())
case string:
var i int64
i, err = strconv.ParseInt(sc, 10, 64)
*st = Stime(i)
}
return
}
// Value get time value.
func (st Stime) Value() (driver.Value, error) {
return time.Unix(int64(st), 0), nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (st *Stime) UnmarshalJSON(data []byte) error {
timestamp, err := strconv.ParseInt(string(data), 10, 64)
if err == nil {
*st = Stime(timestamp)
return nil
}
t, err := time.ParseInLocation(`"2006-01-02 15:04:05"`, string(data), time.Local)
if err == nil {
*st = Stime(t.Unix())
}
return nil
}

View File

@@ -0,0 +1,679 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: model.proto
/*
Package model is a generated protocol buffer package.
v0.1.0
收藏夹信息
It is generated from these files:
model.proto
It has these top-level messages:
Field
Fields
*/
package model
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import go_common_library_time "go-common/library/time"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Field struct {
Flag bool `protobuf:"varint,1,opt,name=Flag,proto3" json:"flag"`
Oid int64 `protobuf:"varint,2,opt,name=Oid,proto3" json:"oid"`
Pid int16 `protobuf:"varint,3,opt,name=Pid,proto3,casttype=int16" json:"pid"`
Click int `protobuf:"varint,4,opt,name=Click,proto3,casttype=int" json:"click"`
Pubtime go_common_library_time.Time `protobuf:"varint,5,opt,name=Pubtime,proto3,casttype=go-common/library/time.Time" json:"pubtime"`
Result int8 `protobuf:"varint,6,opt,name=Result,proto3,casttype=int8" json:"result"`
Deleted int8 `protobuf:"varint,7,opt,name=Deleted,proto3,casttype=int8" json:"deleted"`
Valid int8 `protobuf:"varint,8,opt,name=Valid,proto3,casttype=int8" json:"valid"`
}
func (m *Field) Reset() { *m = Field{} }
func (*Field) ProtoMessage() {}
func (*Field) Descriptor() ([]byte, []int) { return fileDescriptorModel, []int{0} }
type Fields struct {
Fields []*Field `protobuf:"bytes,1,rep,name=Fields" json:"Fields,omitempty"`
}
func (m *Fields) Reset() { *m = Fields{} }
func (*Fields) ProtoMessage() {}
func (*Fields) Descriptor() ([]byte, []int) { return fileDescriptorModel, []int{1} }
func init() {
proto.RegisterType((*Field)(nil), "model.Field")
proto.RegisterType((*Fields)(nil), "model.Fields")
}
func (m *Field) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Field) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Flag {
dAtA[i] = 0x8
i++
if m.Flag {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.Oid != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintModel(dAtA, i, uint64(m.Oid))
}
if m.Pid != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintModel(dAtA, i, uint64(m.Pid))
}
if m.Click != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintModel(dAtA, i, uint64(m.Click))
}
if m.Pubtime != 0 {
dAtA[i] = 0x28
i++
i = encodeVarintModel(dAtA, i, uint64(m.Pubtime))
}
if m.Result != 0 {
dAtA[i] = 0x30
i++
i = encodeVarintModel(dAtA, i, uint64(m.Result))
}
if m.Deleted != 0 {
dAtA[i] = 0x38
i++
i = encodeVarintModel(dAtA, i, uint64(m.Deleted))
}
if m.Valid != 0 {
dAtA[i] = 0x40
i++
i = encodeVarintModel(dAtA, i, uint64(m.Valid))
}
return i, nil
}
func (m *Fields) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Fields) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Fields) > 0 {
for _, msg := range m.Fields {
dAtA[i] = 0xa
i++
i = encodeVarintModel(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func encodeVarintModel(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *Field) Size() (n int) {
var l int
_ = l
if m.Flag {
n += 2
}
if m.Oid != 0 {
n += 1 + sovModel(uint64(m.Oid))
}
if m.Pid != 0 {
n += 1 + sovModel(uint64(m.Pid))
}
if m.Click != 0 {
n += 1 + sovModel(uint64(m.Click))
}
if m.Pubtime != 0 {
n += 1 + sovModel(uint64(m.Pubtime))
}
if m.Result != 0 {
n += 1 + sovModel(uint64(m.Result))
}
if m.Deleted != 0 {
n += 1 + sovModel(uint64(m.Deleted))
}
if m.Valid != 0 {
n += 1 + sovModel(uint64(m.Valid))
}
return n
}
func (m *Fields) Size() (n int) {
var l int
_ = l
if len(m.Fields) > 0 {
for _, e := range m.Fields {
l = e.Size()
n += 1 + l + sovModel(uint64(l))
}
}
return n
}
func sovModel(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozModel(x uint64) (n int) {
return sovModel(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Field) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Field{`,
`Flag:` + fmt.Sprintf("%v", this.Flag) + `,`,
`Oid:` + fmt.Sprintf("%v", this.Oid) + `,`,
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
`Click:` + fmt.Sprintf("%v", this.Click) + `,`,
`Pubtime:` + fmt.Sprintf("%v", this.Pubtime) + `,`,
`Result:` + fmt.Sprintf("%v", this.Result) + `,`,
`Deleted:` + fmt.Sprintf("%v", this.Deleted) + `,`,
`Valid:` + fmt.Sprintf("%v", this.Valid) + `,`,
`}`,
}, "")
return s
}
func (this *Fields) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Fields{`,
`Fields:` + strings.Replace(fmt.Sprintf("%v", this.Fields), "Field", "Field", 1) + `,`,
`}`,
}, "")
return s
}
func valueToStringModel(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Field) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Field: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Field: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Flag", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Flag = bool(v != 0)
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Oid", wireType)
}
m.Oid = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Oid |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
}
m.Pid = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Pid |= (int16(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Click", wireType)
}
m.Click = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Click |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Pubtime", wireType)
}
m.Pubtime = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Pubtime |= (go_common_library_time.Time(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
}
m.Result = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Result |= (int8(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
}
m.Deleted = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Deleted |= (int8(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Valid", wireType)
}
m.Valid = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Valid |= (int8(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipModel(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthModel
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Fields) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Fields: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Fields: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowModel
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthModel
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Fields = append(m.Fields, &Field{})
if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipModel(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthModel
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipModel(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowModel
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowModel
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowModel
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthModel
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowModel
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipModel(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthModel = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowModel = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("model.proto", fileDescriptorModel) }
var fileDescriptorModel = []byte{
// 376 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x91, 0xcf, 0x8e, 0xd3, 0x30,
0x10, 0x87, 0xeb, 0x4d, 0x9d, 0x54, 0x5e, 0x0e, 0x60, 0x2e, 0xe6, 0x8f, 0xec, 0xa8, 0x42, 0x50,
0xad, 0xd4, 0x54, 0x80, 0x84, 0x38, 0x97, 0xd5, 0x5e, 0x59, 0x59, 0x88, 0x7b, 0x12, 0x7b, 0x83,
0x85, 0x13, 0x57, 0x59, 0x07, 0x89, 0x1b, 0x8f, 0xc1, 0x23, 0xed, 0x71, 0x4f, 0x88, 0x93, 0x45,
0xc3, 0x2d, 0x8f, 0x90, 0xd3, 0xca, 0x4e, 0xdb, 0xdb, 0xcc, 0xf7, 0xfb, 0x46, 0x23, 0x7b, 0xd0,
0x79, 0x6d, 0x84, 0xd4, 0xd9, 0xae, 0x35, 0xd6, 0x60, 0x18, 0x9a, 0xe7, 0xeb, 0x4a, 0xd9, 0x6f,
0x5d, 0x91, 0x95, 0xa6, 0xde, 0x54, 0xa6, 0x32, 0x9b, 0x90, 0x16, 0xdd, 0x4d, 0xe8, 0x42, 0x13,
0xaa, 0x69, 0x6a, 0xf9, 0xe7, 0x0c, 0xc1, 0x2b, 0x25, 0xb5, 0xc0, 0x2f, 0xd1, 0xfc, 0x4a, 0xe7,
0x15, 0x01, 0x29, 0x58, 0x2d, 0xb6, 0x8b, 0xc1, 0xb1, 0xf9, 0x8d, 0xce, 0x2b, 0x1e, 0x28, 0x7e,
0x86, 0xa2, 0xcf, 0x4a, 0x90, 0xb3, 0x14, 0xac, 0xa2, 0x6d, 0x32, 0x38, 0x16, 0x19, 0x25, 0xb8,
0x67, 0x78, 0x89, 0xa2, 0x6b, 0x25, 0x48, 0x94, 0x82, 0x15, 0xdc, 0x3e, 0xf6, 0xd1, 0x4e, 0x89,
0xd1, 0x31, 0xa8, 0x1a, 0xfb, 0xf6, 0x03, 0xf7, 0x21, 0x7e, 0x8d, 0xe0, 0x27, 0xad, 0xca, 0xef,
0x64, 0x7e, 0xb2, 0x60, 0xe9, 0xc1, 0xe8, 0x58, 0xa4, 0x1a, 0xcb, 0xa7, 0x18, 0x5f, 0xa2, 0xe4,
0xba, 0x2b, 0xac, 0xaa, 0x25, 0x81, 0x61, 0xd5, 0xc5, 0xe0, 0x58, 0xb2, 0x9b, 0xd0, 0xe8, 0xd8,
0x8b, 0xca, 0xac, 0x4b, 0x53, 0xd7, 0xa6, 0xd9, 0x68, 0x55, 0xb4, 0x79, 0xfb, 0x73, 0xe3, 0x93,
0xec, 0x8b, 0xaa, 0x25, 0x3f, 0x8e, 0xe2, 0x0b, 0x14, 0x73, 0x79, 0xdb, 0x69, 0x4b, 0xe2, 0xb0,
0x0e, 0x0f, 0x8e, 0xc5, 0x6d, 0x20, 0xa3, 0x63, 0x73, 0xd5, 0xd8, 0x8f, 0xfc, 0x60, 0xe0, 0x35,
0x4a, 0x2e, 0xa5, 0x96, 0x56, 0x0a, 0x92, 0x04, 0xf9, 0xa9, 0xdf, 0x28, 0x26, 0x74, 0xb2, 0x8f,
0x0e, 0x7e, 0x83, 0xe0, 0xd7, 0x5c, 0x2b, 0x41, 0x16, 0x41, 0x7e, 0xe2, 0x1f, 0xf2, 0xc3, 0x83,
0x93, 0x3a, 0xe5, 0xcb, 0x0c, 0xc5, 0xe1, 0x5f, 0x6f, 0xf1, 0xab, 0x63, 0x45, 0x40, 0x1a, 0xad,
0xce, 0xdf, 0x3d, 0xca, 0xa6, 0xb3, 0x05, 0xc8, 0x0f, 0xd9, 0x36, 0xbd, 0xdb, 0xd3, 0xd9, 0xfd,
0x9e, 0xce, 0xfe, 0xee, 0xe9, 0xec, 0x57, 0x4f, 0xc1, 0x5d, 0x4f, 0xc1, 0x7d, 0x4f, 0xc1, 0xbf,
0x9e, 0x82, 0xdf, 0xff, 0xe9, 0xac, 0x88, 0xc3, 0xc5, 0xde, 0x3f, 0x04, 0x00, 0x00, 0xff, 0xff,
0x09, 0x14, 0xcb, 0x4c, 0xf6, 0x01, 0x00, 0x00,
}

View File

@@ -0,0 +1,32 @@
syntax = "proto3";
/*
* v0.1.0
* 收藏夹信息
*/
package model;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.goproto_enum_prefix_all) = false;
option (gogoproto.goproto_getters_all) = false;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.goproto_stringer_all) = false;
option (gogoproto.stringer_all) = true;
message Field {
bool Flag = 1 [(gogoproto.jsontag) = "flag"];
int64 Oid = 2 [(gogoproto.jsontag) = "oid"];
int32 Pid = 3 [(gogoproto.jsontag) = "pid",(gogoproto.casttype) = "int16"];
int32 Click = 4 [(gogoproto.jsontag) = "click",(gogoproto.casttype) = "int"];
int64 Pubtime = 5 [(gogoproto.jsontag) = "pubtime", (gogoproto.casttype) = "go-common/library/time.Time"];
int32 Result = 6 [(gogoproto.jsontag) = "result",(gogoproto.casttype) = "int8"];
int32 Deleted = 7 [(gogoproto.jsontag) = "deleted",(gogoproto.casttype) = "int8"];
int32 Valid = 8 [(gogoproto.jsontag) = "valid",(gogoproto.casttype) = "int8"];
}
message Fields{
repeated Field Fields= 1;
}

View File

@@ -0,0 +1,35 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["rpc.go"],
importpath = "go-common/app/service/main/rank/server/gorpc",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/service/main/rank/conf:go_default_library",
"//app/service/main/rank/model:go_default_library",
"//app/service/main/rank/service:go_default_library",
"//library/net/rpc:go_default_library",
"//library/net/rpc/context:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,60 @@
package rpc
import (
"go-common/app/service/main/rank/conf"
"go-common/app/service/main/rank/model"
"go-common/app/service/main/rank/service"
"go-common/library/net/rpc"
"go-common/library/net/rpc/context"
)
// RPC favorite rpc.
type RPC struct {
c *conf.Config
s *service.Service
}
// New init rpc.
func New(c *conf.Config, s *service.Service) (svr *rpc.Server) {
r := &RPC{
c: c,
s: s,
}
svr = rpc.NewServer(c.RPCServer)
if err := svr.Register(r); err != nil {
panic(err)
}
return
}
// Ping check connection success.
func (r *RPC) Ping(c context.Context, arg *struct{}, res *struct{}) (err error) {
return
}
// Mget .
func (r *RPC) Mget(c context.Context, a *model.MgetReq, res *model.MgetResp) (err error) {
var v *model.MgetResp
if v, err = r.s.Mget(c, a); err == nil {
*res = *v
}
return
}
// Sort .
func (r *RPC) Sort(c context.Context, a *model.SortReq, res *model.SortResp) (err error) {
var v *model.SortResp
if v, err = r.s.Sort(c, a); err == nil {
*res = *v
}
return
}
// Group .
func (r *RPC) Group(c context.Context, a *model.GroupReq, res *model.GroupResp) (err error) {
var v *model.GroupResp
if v, err = r.s.Group(c, a); err == nil {
*res = *v
}
return
}

View File

@@ -0,0 +1,28 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["server.go"],
importpath = "go-common/app/service/main/rank/server/grpc",
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,3 @@
package grpc
// TODO

View File

@@ -0,0 +1,36 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["http.go"],
importpath = "go-common/app/service/main/rank/server/http",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/service/main/rank/conf:go_default_library",
"//app/service/main/rank/model:go_default_library",
"//app/service/main/rank/service:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/net/http/blademaster/middleware/verify:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,108 @@
package http
import (
"net/http"
"strings"
"go-common/app/service/main/rank/conf"
"go-common/app/service/main/rank/model"
"go-common/app/service/main/rank/service"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
"go-common/library/net/http/blademaster/middleware/verify"
)
var (
srv *service.Service
vfy *verify.Verify
)
// Init init
func Init(c *conf.Config, s *service.Service) {
srv = s
vfy = verify.New(c.Verify)
engine := bm.DefaultServer(c.BM)
router(engine)
if err := engine.Start(); err != nil {
log.Error("engine.Start() error(%v)", err)
panic(err)
}
}
func router(e *bm.Engine) {
e.Ping(ping)
e.Register(register)
g := e.Group("/x/internal/rank")
{
g.GET("/do", do)
g.GET("/mget", mget)
g.GET("/sort", sort)
g.GET("/group", vfy.Verify, group)
}
}
func ping(c *bm.Context) {
if err := srv.Ping(c); err != nil {
log.Error("ping error(%v)", err)
c.AbortWithStatus(http.StatusServiceUnavailable)
}
}
func register(c *bm.Context) {
c.JSON(map[string]interface{}{}, nil)
}
func do(c *bm.Context) {
arg := new(model.DoReq)
if err := c.Bind(arg); err != nil {
return
}
c.JSON(srv.Do(c, arg), nil)
}
func mget(c *bm.Context) {
arg := new(model.MgetReq)
if err := c.Bind(arg); err != nil {
return
}
c.JSON(srv.Mget(c, arg))
}
func sort(c *bm.Context) {
arg := new(struct {
Business string `form:"business" validate:"required"`
Field string `form:"field" validate:"required"`
Order string `form:"order" validate:"required"`
Filters []string `form:"filters,split"`
Oids []int64 `form:"oids,split" validate:"required"`
Pn int `form:"pn"`
Ps int `form:"ps"`
})
if err := c.Bind(arg); err != nil {
return
}
filterMap := make(map[string]string)
for _, v := range arg.Filters {
strs := strings.Split(v, "|")
if len(strs) == 2 {
filterMap[strs[0]] = strs[1]
}
}
a := new(model.SortReq)
a.Business = arg.Business
a.Field = arg.Field
a.Order = arg.Order
a.Filters = filterMap
a.Oids = arg.Oids
a.Pn = arg.Pn
a.Ps = arg.Ps
c.JSON(srv.Sort(c, a))
}
func group(c *bm.Context) {
arg := new(model.GroupReq)
if err := c.Bind(arg); err != nil {
return
}
c.JSON(srv.Group(c, arg))
}

View File

@@ -0,0 +1,61 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"all_test.go",
"api_test.go",
"service_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/service/main/rank/conf:go_default_library",
"//app/service/main/rank/model:go_default_library",
"//library/log:go_default_library",
"//library/time:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"all.go",
"api.go",
"incr.go",
"service.go",
],
importpath = "go-common/app/service/main/rank/service",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/service/main/rank/conf:go_default_library",
"//app/service/main/rank/dao:go_default_library",
"//app/service/main/rank/model:go_default_library",
"//library/log:go_default_library",
"//library/queue/databus:go_default_library",
"//library/time:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,172 @@
package service
import (
"context"
"fmt"
"time"
"go-common/app/service/main/rank/model"
"go-common/library/log"
xtime "go-common/library/time"
)
func (s *Service) all(c context.Context, minAid, maxAid int64) error {
aid := minAid
limit := s.c.Rank.RowsLimit
for {
log.Info("do all aid:%d", aid)
if maxAid != 0 && aid > maxAid {
break
}
arcs, err := s.dao.ArchiveMetas(c, aid, limit)
if err != nil {
log.Error("s.dao.ArchiveMetas(%d,%d) error(%v)", aid, limit, err)
continue
}
if len(arcs) == 0 {
break
}
aid = arcs[len(arcs)-1].ID
var (
typeids []int64
aids []int64
)
typeidsMap := make(map[int64]struct{})
for _, v := range arcs {
if _, ok := typeidsMap[v.Typeid]; !ok {
typeids = append(typeids, v.Typeid)
typeidsMap[v.Typeid] = struct{}{}
}
aids = append(aids, v.ID)
}
// ptypeid
typesMap, err := s.dao.ArchiveTypes(c, typeids)
if err != nil {
log.Error("s.dao.ArchiveTypes(%+v) error(%v)", typeids, err)
continue
}
// view
statsMap, err := s.dao.ArchiveStats(c, aids)
if err != nil {
log.Error("s.dao.ArchiveStats(%+v) error(%v)", aids, err)
continue
}
// tv
tvsMap, err := s.dao.ArchiveTVs(c, aids)
if err != nil {
log.Error("s.dao.ArchiveTVs(%+v) error(%v)", aids, err)
continue
}
// data append
for _, a := range arcs {
f := new(model.Field)
f.Flag = model.FlagExist
f.Oid = a.ID
f.Pubtime = a.SetPubtime()
if v, ok := typesMap[a.Typeid]; ok {
f.Pid = v.SetPid()
}
if v, ok := statsMap[a.ID]; ok {
f.Click = v.SetClick()
}
if v, ok := tvsMap[a.ID]; ok {
f.Result = v.Result
f.Deleted = v.Deleted
f.Valid = v.Valid
}
s.setField(a.ID, f) // write map
}
time.Sleep(time.Duration(s.c.Rank.BatchSleep))
}
fmt.Println("all map len:", minAid, maxAid, len(s.rmap))
log.Info("do all(%d,%d) successful,map len(%d)", minAid, maxAid, len(s.rmap))
return nil
}
func (s *Service) patch(c context.Context, begin, end time.Time) error {
step := int64(time.Duration(xtime.Time(s.c.Rank.BatchStep)) / time.Second)
limit := s.c.Rank.RowsLimit
for i := begin.Unix(); i <= end.Unix(); i += step {
// archive meta and type
var aid int64
for {
arcs, err := s.dao.ArchiveMetasIncrs(c, aid, xtime.Time(i), xtime.Time(i+step), limit)
if err != nil {
log.Error("s.dao.ArchiveMetas(%d,%d) error(%v)", aid, limit, err)
continue
}
if len(arcs) == 0 {
break
}
aid = arcs[len(arcs)-1].ID
var typeids []int64
typeidsMap := make(map[int64]struct{})
for _, v := range arcs {
if _, ok := typeidsMap[v.Typeid]; !ok {
typeids = append(typeids, v.Typeid)
typeidsMap[v.Typeid] = struct{}{}
}
}
// ptypeid
typesMap, err := s.dao.ArchiveTypes(c, typeids)
if err != nil {
log.Error("s.dao.ArchiveTypes(%+v) error(%v)", typeids, err)
continue
}
// data append
for _, a := range arcs {
f := new(model.Field)
f.Flag = model.FlagExist
f.Oid = a.ID
f.Pubtime = a.SetPubtime()
if v, ok := typesMap[a.Typeid]; ok {
f.Pid = v.SetPid()
}
s.setField(a.ID, f) // write map
}
time.Sleep(time.Duration(s.c.Rank.BatchSleep))
}
// archive tv
var id int64
for {
tvs, err := s.dao.ArchiveTVsIncrs(c, id, xtime.Time(i), xtime.Time(i+step), limit)
if err != nil {
log.Error("s.dao.ArchiveTVsIncrs(%d,%s,%s,%d) error(%v)", id, xtime.Time(i), xtime.Time(i+step), limit, err)
continue
}
if len(tvs) == 0 {
break
}
id = tvs[len(tvs)-1].ID
// data append
for _, a := range tvs {
s.field(a.Aid).Result = a.Result
s.field(a.Aid).Deleted = a.Deleted
s.field(a.Aid).Valid = a.Valid
}
time.Sleep(time.Duration(s.c.Rank.BatchSleep))
}
// archive stats
for tbl := 0; tbl < 100; tbl++ {
var id int64
for {
stats, err := s.dao.ArchiveStatsIncrs(c, tbl, id, xtime.Time(i), xtime.Time(i+step), limit)
if err != nil {
log.Error("s.dao.ArchiveTVsIncrs(%d,%s,%s,%d) error(%v)", id, xtime.Time(i), xtime.Time(i+step), limit, err)
continue
}
if len(stats) == 0 {
break
}
id = stats[len(stats)-1].ID
// data append
for _, a := range stats {
s.field(a.Aid).Click = a.SetClick()
}
time.Sleep(time.Duration(s.c.Rank.BatchSleep))
}
}
}
log.Info("patch(%s,%s) successful,len(%d)", begin, end, len(s.rmap))
return nil
}

View File

@@ -0,0 +1,25 @@
package service
import (
"context"
"testing"
"time"
"go-common/app/service/main/rank/model"
. "github.com/smartystreets/goconvey/convey"
)
func Test_patch(t *testing.T) {
Convey("patch", t, func() {
var (
begin = "2018-01-10 03:04:05"
end = "2018-08-25 03:04:05"
)
timeBegin, _ := time.Parse(model.TimeFormat, begin)
timeEnd, _ := time.Parse(model.TimeFormat, end)
err := s.patch(context.Background(), timeBegin, timeEnd)
t.Logf("err:%+v", err)
So(err, ShouldBeNil)
})
}

View File

@@ -0,0 +1,165 @@
package service
import (
"context"
"fmt"
"sort"
"strconv"
"time"
"go-common/app/service/main/rank/model"
)
// Do .
func (s *Service) Do(c context.Context, arg *model.DoReq) error {
switch arg.Action {
case "all":
go s.all(context.Background(), 0, 0)
case "patchid":
go s.all(context.Background(), arg.MinID, arg.MaxID)
case "patchtime":
timeLayout := "2006-01-02 15:04:05"
loc, _ := time.LoadLocation("Local")
beginTime, err := time.ParseInLocation(timeLayout, arg.BeginTime, loc)
if err != nil {
return err
}
endTime, err := time.ParseInLocation(timeLayout, arg.EndTime, loc)
if err != nil {
return err
}
go s.patch(context.Background(), beginTime, endTime)
default:
return nil
}
return nil
}
// Mget .
func (s *Service) Mget(c context.Context, arg *model.MgetReq) (*model.MgetResp, error) {
res := new(model.MgetResp)
tmap := make(map[int64]*model.Field)
for _, id := range arg.Oids {
field := s.field(id)
tmap[id] = field
}
res.List = tmap
return res, nil
}
// Sort .
func (s *Service) Sort(c context.Context, arg *model.SortReq) (*model.SortResp, error) {
var isResult, isDeleted, isValid, isPid bool
filter := new(model.Field)
res := new(model.SortResp)
res.Page = new(model.Page)
res.Page.Pn = arg.Pn
res.Page.Ps = arg.Ps
for k, v := range arg.Filters {
if k == "result" && v != "" {
isResult = true
result, _ := strconv.ParseInt(v, 10, 8)
filter.Result = int8(result)
}
if k == "deleted" && v != "" {
isDeleted = true
deleted, _ := strconv.ParseInt(v, 10, 8)
filter.Deleted = int8(deleted)
}
if k == "valid" && v != "" {
isValid = true
valid, _ := strconv.ParseInt(v, 10, 8)
filter.Valid = int8(valid)
}
if k == "pid" && v != "" {
isPid = true
pid, _ := strconv.ParseInt(v, 10, 16)
filter.Pid = int16(pid)
}
}
fs := make([]*model.Field, 0)
for _, oid := range arg.Oids {
f := s.field(oid)
if isResult && filter.Result != f.Result {
continue
}
if isDeleted && filter.Deleted != f.Deleted {
continue
}
if isValid && filter.Valid != f.Valid {
continue
}
if isPid && filter.Pid != f.Pid {
continue
}
if !f.Flag {
continue
}
fs = append(fs, f)
}
if len(fs) == 0 {
return res, nil
}
// deep copy
fss := make([]*model.Field, 0)
for _, v := range fs {
cv := *v
fss = append(fss, &cv)
}
// sort
sort.Slice(fss, func(i, j int) bool {
if arg.Field == "click" {
if arg.Order == model.RankOrderByAsc {
return fss[i].Click < fss[j].Click
}
return fss[i].Click > fss[j].Click
}
if arg.Field == "pubtime" {
if arg.Order == model.RankOrderByAsc {
return fss[i].Pubtime < fss[j].Pubtime
}
return fss[i].Pubtime > fss[j].Pubtime
}
return true
})
for _, f := range fss {
res.Result = append(res.Result, f.Oid)
}
res.Page.Total = len(res.Result)
start := (arg.Pn - 1) * arg.Ps
end := arg.Pn * arg.Ps
if start > len(res.Result) {
res.Result = []int64{}
return res, nil
}
if end > len(res.Result) || end == 0 {
end = len(res.Result)
}
res.Result = res.Result[start:end]
return res, nil
}
// Group .
func (s *Service) Group(c context.Context, arg *model.GroupReq) (*model.GroupResp, error) {
res := new(model.GroupResp)
tmap := make(map[int16]int)
for _, oid := range arg.Oids {
f := s.field(oid)
if !f.Flag {
continue
}
if _, ok := tmap[f.Pid]; ok {
tmap[f.Pid]++
continue
}
tmap[f.Pid] = 1
}
for k, v := range tmap {
g := new(model.Group)
g.Key = fmt.Sprintf("%d", k)
g.Count = v
res.List = append(res.List, g)
}
return res, nil
}

View File

@@ -0,0 +1,67 @@
package service
import (
"context"
"fmt"
"testing"
"time"
"go-common/app/service/main/rank/model"
xtime "go-common/library/time"
. "github.com/smartystreets/goconvey/convey"
)
func Test_Sort(t *testing.T) {
Convey("Sort", t, func() {
s.rmap = make(map[int][]*model.Field)
for i := 1; i < 80; i++ {
f := new(model.Field)
f.Flag = true
f.Oid = int64(i)
f.Pid = int16(i) % 10
f.Click = i * i
f.Pubtime = xtime.Time(time.Now().Unix())
s.setField(int64(i), f)
}
fmt.Println(s.rmap)
arg := &model.SortReq{
Business: "archive",
Field: "click",
Order: "desc",
Pn: 1,
Ps: 30,
}
for i := 1; i < 60; i++ {
arg.Oids = append(arg.Oids, int64(i))
}
res, err := s.Sort(context.Background(), arg)
fmt.Println(res.Page)
t.Logf("res:%+v", res)
So(res, ShouldNotBeNil)
So(err, ShouldBeNil)
})
}
func Test_Group(t *testing.T) {
Convey("Group", t, func() {
s.rmap = make(map[int][]*model.Field)
for i := 1; i < 30; i++ {
f := new(model.Field)
f.Flag = true
f.Oid = int64(i)
f.Pid = int16(i) % 10
f.Click = i * i
f.Pubtime = xtime.Time(time.Now().Unix())
s.setField(int64(i), f)
}
arg := &model.GroupReq{
Business: "archive",
Oids: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
}
res, err := s.Group(context.Background(), arg)
t.Logf("res:%+v", res)
So(res, ShouldNotBeNil)
So(err, ShouldBeNil)
})
}

View File

@@ -0,0 +1,168 @@
package service
import (
"context"
"encoding/json"
"go-common/app/service/main/rank/model"
"go-common/library/log"
)
func (s *Service) consumeStatView() {
defer s.waiter.Done()
for {
msg, ok := <-s.statViewSub.Messages()
if !ok {
log.Info("consumeproc exit")
close(s.procChan)
return
}
msg.Commit()
m := &model.StatViewMsg{}
if err := json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal() error(%v)", err)
continue
}
// log.Info("consumer topic:%s, Key:%s, Value:%s ", msg.Topic, msg.Key, msg.Value)
s.procChan <- m
}
}
func (s *Service) consumeArchive() {
defer s.waiter.Done()
var err error
for {
msg, ok := <-s.archiveSub.Messages()
if !ok {
log.Error("s.archiveSub.Messages channel closed")
return
}
m := &model.CanalMsg{}
if err = json.Unmarshal(msg.Value, &m); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", msg, err)
continue
}
log.Info("consumeArchive topic:%s, Key:%s, Value:%s, ", msg.Topic, msg.Key, msg.Value)
switch m.Table {
case "archive":
s.setArchiveMeta(context.Background(), m)
default:
}
if err = msg.Commit(); err != nil {
log.Error("commit msg(%v) error(%v)", msg, err)
}
}
}
func (s *Service) consumeArchiveTv() {
defer s.waiter.Done()
var err error
for {
msg, ok := <-s.archiveTvSub.Messages()
if !ok {
log.Error("s.archiveTvSub.Messages channel closed")
return
}
m := &model.CanalMsg{}
if err = json.Unmarshal(msg.Value, &m); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", msg, err)
continue
}
// log.Info("consumeArchiveTv topic:%s, Key:%s, Value:%s ", msg.Topic, msg.Key, msg.Value)
switch m.Table {
case "ugc_archive":
s.setTv(context.Background(), m)
default:
}
if err = msg.Commit(); err != nil {
log.Error("commit msg(%v) error(%v)", msg, err)
}
}
}
func (s *Service) batchProc(ch chan *model.StatViewMsg) {
defer s.waiter.Done()
for {
m, ok := <-ch
if !ok {
log.Info("jobproc exit")
return
}
switch m.Type {
case "archive":
s.field(m.ID).Click = m.Count
default:
}
}
}
func (s *Service) setArchiveMeta(c context.Context, m *model.CanalMsg) {
o := &model.ArchiveMeta{}
if err := json.Unmarshal(m.New, o); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", m, err)
return
}
switch m.Action {
case model.SyncInsert:
s.field(o.Aid).Flag = model.FlagExist
s.field(o.Aid).Oid = o.Aid
s.field(o.Aid).Pubtime = o.SetPubtime()
typeMap, err := s.dao.ArchiveTypes(c, []int64{o.Typeid})
if err != nil {
log.Error("s.dao.ArchiveTypes(%d)", o.Typeid, err)
return
}
if v, ok := typeMap[o.Typeid]; ok {
s.field(o.Aid).Pid = v.SetPid()
}
case model.SyncUpdate:
oo := &model.ArchiveMeta{}
if err := json.Unmarshal(m.Old, oo); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", m, err)
return
}
s.field(o.Aid).Flag = model.FlagExist
if o.Typeid != oo.Typeid {
typeMap, err := s.dao.ArchiveTypes(c, []int64{o.Typeid})
if err != nil {
log.Error("s.dao.ArchiveTypes(%d)", o.Typeid, err)
return
}
if v, ok := typeMap[o.Typeid]; ok {
s.field(o.Aid).Pid = v.SetPid()
}
}
if o.Pubtime != oo.Pubtime {
s.field(o.Aid).Pubtime = o.SetPubtime()
}
}
}
func (s *Service) setTv(c context.Context, m *model.CanalMsg) {
o := &model.ArchiveMeta{}
if err := json.Unmarshal(m.New, o); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", m, err)
return
}
switch m.Action {
case model.SyncInsert:
s.field(o.Aid).Result = o.Result
s.field(o.Aid).Deleted = o.Deleted
s.field(o.Aid).Valid = o.Valid
case model.SyncUpdate:
oo := &model.ArchiveMeta{}
if err := json.Unmarshal(m.Old, oo); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", m, err)
return
}
if o.Result != oo.Result {
s.field(o.Aid).Result = o.Result
}
if o.Deleted != oo.Deleted {
s.field(o.Aid).Deleted = o.Deleted
}
if o.Valid != oo.Valid {
s.field(o.Aid).Valid = o.Valid
}
}
}

View File

@@ -0,0 +1,263 @@
package service
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"runtime"
"strconv"
"sync"
"time"
"go-common/app/service/main/rank/conf"
"go-common/app/service/main/rank/dao"
"go-common/app/service/main/rank/model"
"go-common/library/log"
"go-common/library/queue/databus"
)
const (
_bucketSize int64 = 100000 // 10w
)
// Service struct
type Service struct {
c *conf.Config
dao *dao.Dao
rmap map[int][]*model.Field
mux *sync.RWMutex
waiter *sync.WaitGroup
// databus
statViewSub *databus.Databus
archiveSub *databus.Databus
archiveTvSub *databus.Databus
procChan chan *model.StatViewMsg
}
// New init
func New(c *conf.Config) (s *Service) {
s = &Service{
c: c,
dao: dao.New(c),
mux: new(sync.RWMutex),
waiter: new(sync.WaitGroup),
// rank map
rmap: make(map[int][]*model.Field),
// databus
statViewSub: databus.New(c.Databus.StatView),
archiveSub: databus.New(c.Databus.Archive),
archiveTvSub: databus.New(c.Databus.UgcTvBinlog),
procChan: make(chan *model.StatViewMsg, 1024),
}
if err := os.MkdirAll(s.c.Rank.FilePath, 0644); err != nil {
panic(err)
}
if s.c.Rank.SwitchIncr {
s.incr()
}
s.flush()
s.waiter.Add(1)
go s.dump()
fmt.Println("map len:", len(s.rmap))
return
}
// Ping Service
func (s *Service) Ping(c context.Context) (err error) {
return s.dao.Ping(c)
}
// Close Service
func (s *Service) Close() {
s.dao.Close()
if err := s.statViewSub.Close(); err != nil {
log.Error("s.statViewSub.Close() error(%v)", err)
}
if err := s.archiveSub.Close(); err != nil {
log.Error("s.statViewSub.Close() error(%v)", err)
}
if err := s.archiveTvSub.Close(); err != nil {
log.Error("s.archiveTvSub.Close() error(%v)", err)
}
}
func (s *Service) mod(oid int64) int {
return int(oid % _bucketSize)
}
func (s *Service) bucket(oid int64) int {
return int(oid / _bucketSize)
}
func (s *Service) incr() {
for i := 0; i < runtime.NumCPU(); i++ {
s.waiter.Add(1)
go s.batchProc(s.procChan)
}
s.waiter.Add(1)
go s.consumeStatView()
s.waiter.Add(1)
go s.consumeArchive()
s.waiter.Add(1)
go s.consumeArchiveTv()
}
func (s *Service) flush() error {
c := context.Background()
files, err := ioutil.ReadDir(s.c.Rank.FilePath)
if err != nil {
log.Error("ioutil.ReadDir() error(%v)", err)
}
if len(files) < 2 { // one snapshot.pb and a timestamp.txt at least
if !s.c.Rank.SwitchAll {
return nil
}
return s.all(c, 0, 0)
}
maxID, err := s.dao.MaxOid(context.Background())
if err != nil {
log.Error("s.dao.MaxOid() error(%v)", err)
return err
}
for i := 0; i <= int(maxID/_bucketSize); i++ {
if s.load(i) != nil {
s.all(c, int64(i)*_bucketSize, int64(i+1)*_bucketSize)
continue
}
}
fi, err := os.Open(s.c.Rank.FilePath + "timestamp.txt")
if err != nil {
log.Error(" os.Open(%s) error(%v)", s.c.Rank.FilePath+"timestamp.txt", err)
return err
}
defer fi.Close()
data, err := ioutil.ReadAll(fi)
if err != nil {
log.Error("ioutil.ReadAll() error(%v)", err)
return err
}
begin, _ := strconv.ParseInt(string(data[:]), 10, 64)
if begin <= 1 {
begin = time.Now().Unix() - 3600 // 1h data
}
return s.patch(c, time.Unix(begin, 0), time.Now())
}
func (s *Service) load(index int) error {
fi, err := os.Open(s.c.Rank.FilePath + fmt.Sprintf(s.c.Rank.FileName, index))
if err != nil {
log.Error(" os.Open() error(%v)", err)
return err
}
defer fi.Close()
fd, err := ioutil.ReadAll(fi)
if err != nil {
log.Error("ioutil.ReadAll() error(%v)", err)
return err
}
fs := new(model.Fields)
if err := fs.Unmarshal(fd); err != nil {
log.Error("fs.Unmarshal() error(%v)", err)
return err
}
s.rmap[index] = fs.Fields
return nil
}
func (s *Service) dump() error {
for {
time.Sleep(time.Duration(s.c.Rank.Ticker))
now := time.Now().Unix()
for k, sli := range s.rmap {
csli := make([]*model.Field, 0)
for _, v := range sli {
cv := *v
csli = append(csli, &cv)
}
fs := new(model.Fields)
fs.Fields = csli
buf, err := fs.Marshal()
if err != nil {
log.Error("fs.Marshal() error(%v)", err)
}
path := fmt.Sprintf("/tmp/"+s.c.Rank.FileName, k)
if err = ioutil.WriteFile(path, buf, 0644); err != nil {
log.Error("ioutil.WriteFile(%s) error(%v)", path, err)
}
newPath := s.c.Rank.FilePath + fmt.Sprintf(s.c.Rank.FileName, k)
s.fileCopy(path, newPath)
}
if err := ioutil.WriteFile(s.c.Rank.FilePath+"timestamp.txt", []byte(fmt.Sprintf("%d", now)), 0644); err != nil {
log.Error("ioutil.WriteFile(%d) error(%v)", now, err)
}
log.Info("rmap dump successful now(%d)", now)
}
}
func (s *Service) fileCopy(path, newPath string) {
originalFile, err := os.Open(path)
if err != nil {
log.Error("os.Open(%s) error(%v)", path, err)
}
defer originalFile.Close()
newFile, err := os.Create(newPath)
if err != nil {
log.Error("os.Create(%s) error(%v)", path, err)
}
defer newFile.Close()
_, err = io.Copy(newFile, originalFile)
if err != nil {
log.Error("os.Copy() error(%v)", err)
}
err = newFile.Sync()
if err != nil {
log.Error("newFile.Sync() error(%v)", err)
}
}
func (s *Service) field(oid int64) (f *model.Field) {
bk := s.bucket(oid)
md := s.mod(oid)
s.mux.RLock()
r, ok := s.rmap[bk]
s.mux.RUnlock()
if !ok {
s.mux.Lock()
r, ok = s.rmap[bk]
if !ok {
for i := 0; i < int(_bucketSize); i++ {
r = append(r, new(model.Field))
}
s.rmap[bk] = r
}
s.mux.Unlock()
}
if f = r[md]; f == nil {
f = new(model.Field)
r[md] = f
}
return
}
func (s *Service) setField(oid int64, f *model.Field) {
bk := s.bucket(oid)
md := s.mod(oid)
s.mux.RLock()
r, ok := s.rmap[bk]
s.mux.RUnlock()
if !ok {
s.mux.Lock()
r, ok = s.rmap[bk]
if !ok {
for i := 0; i < int(_bucketSize); i++ {
r = append(r, new(model.Field))
}
s.rmap[bk] = r
}
s.mux.Unlock()
}
r[md] = f
}

View File

@@ -0,0 +1,130 @@
package service
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"testing"
"time"
"go-common/app/service/main/rank/conf"
"go-common/app/service/main/rank/model"
"go-common/library/log"
xtime "go-common/library/time"
. "github.com/smartystreets/goconvey/convey"
)
var (
s *Service
)
func init() {
dir, _ := filepath.Abs("../cmd/test.toml")
flag.Set("conf", dir)
err := conf.Init()
if err != nil {
fmt.Printf("conf.Init() error(%v)", err)
}
s = New(conf.Conf)
}
func Test_dump(t *testing.T) {
Convey("dump", t, func() {
s.rmap = make(map[int][]*model.Field)
for i := 1; i < 30; i++ {
f := new(model.Field)
f.Oid = int64(i)
f.Pid = int16(i)
f.Click = i * i
f.Pubtime = xtime.Time(time.Now().Unix())
s.setField(int64(i), f)
}
err := s.dump()
t.Logf("err:%+v", err)
So(err, ShouldBeNil)
})
}
func Test_Marshal(t *testing.T) {
Convey("Dump", t, func() {
slic := make([]*model.Field, 10)
for i := 1; i < 30; i++ {
f := new(model.Field)
f.Oid = int64(i)
f.Pid = int16(i)
f.Click = i * i
f.Pubtime = xtime.Time(time.Now().Unix())
slic = append(slic, f)
}
fields := new(model.Fields)
fields.Fields = slic
_, err := fields.Marshal()
if err != nil {
log.Error("fs.Marshal() error(%v)", err)
}
t.Logf("err:%+v", err)
So(err, ShouldBeNil)
})
}
func Test_field(t *testing.T) {
Convey("field", t, func() {
var oid int64 = 11
f := &model.Field{
Oid: 123,
Pid: 22,
Click: 33,
Pubtime: 1551231231,
}
for i := 0; i < 30; i++ {
f.Oid = int64(i)
s.setField(int64(i), f)
}
ff := s.field(11)
t.Logf("field:%+v", ff)
So(s.rmap[s.bucket(oid)][s.mod(oid)], ShouldNotBeNil)
})
}
func Test_setField(t *testing.T) {
Convey("setField", t, func() {
var oid int64 = 11
f := &model.Field{
Oid: 123,
Pid: 22,
Click: 33,
Pubtime: 1551231231,
}
for i := 0; i < 30; i++ {
f.Oid = int64(i)
s.setField(int64(i), f)
}
t.Logf("field:%+v", s.rmap[s.bucket(oid)][s.mod(oid)])
So(s.rmap[s.bucket(oid)][s.mod(oid)], ShouldNotBeNil)
})
}
func Test_timestamp(t *testing.T) {
Convey("timestamp", t, func() {
now := time.Now().Unix()
if err := ioutil.WriteFile(s.c.Rank.FilePath+"timestamp.txt", []byte(fmt.Sprintf("%d", now)), 0644); err != nil {
log.Error("ioutil.WriteFile(%d) error(%v)", now, err)
}
fi, err := os.Open(s.c.Rank.FilePath + "timestamp.txt")
if err != nil {
log.Error(" os.Open(%s) error(%v)", s.c.Rank.FilePath+"timestamp.txt", err)
}
defer fi.Close()
data, err := ioutil.ReadAll(fi)
if err != nil {
log.Error("ioutil.ReadAll() error(%v)", err)
}
begin, _ := strconv.ParseInt(string(data[:]), 10, 64)
fmt.Println(string(data[:]), begin)
So(begin, ShouldNotEqual, 0)
})
}