Initial commit

This commit is contained in:
Donny
2019-04-22 20:46:32 +08:00
commit 49ab8aadd1
25441 changed files with 4055000 additions and 0 deletions

17
build/BUILD Normal file
View File

@@ -0,0 +1,17 @@
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//build/linter:all-srcs",
"//build/visible_to:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

6
build/OWNERS Normal file
View File

@@ -0,0 +1,6 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- wangweizhen
labels:
- area/chore

30
build/check.sh Normal file
View File

@@ -0,0 +1,30 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
export KRATOS_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KRATOS_ROOT}/build/lib/init.sh"
#kratos::util::ensure-gnu-sed
if ! bazel version |grep $bazel_version >/dev/null ; then
kratos::log::info "We suggest you to use bazel $bazel_version for building quickly.
Mac: brew upgrade bazel
Ubuntu: sudo apt-get upgrade bazel
Centos/Redhat: sudo yum update bazel
Fedore: sudo dnf update bazel
For more information.Please read this document https://docs.bazel.build/versions/master/install.html
" >&2
fi
if [ $(uname -s) = "Linux" ]; then
kratos::util::ensure-bazel
fi
if [ $(uname -s) = "Darwin" ];
then
kratos::util::ensure-homebrew
kratos::util::ensure-homebrew-bazel
fi

0
build/config.yaml Normal file
View File

465
build/labels-temp.yaml Normal file
View File

@@ -0,0 +1,465 @@
default:
labels:
- color: 0ffa16
description: Indicates a PR has been approved by an approver from all required OWNERS files.
name: approved
target: prs
prowPlugin: approve
addedBy: approvers
- color: fef2c0
description: Indicates a cherrypick PR into a release branch has been approved by the release branch manager. # Consumed by the kubernetes/kubernetes cherrypick-queue.
name: cherrypick-approved
target: prs
addedBy: humans
- color: fef2c0
description: Denotes a PR to master as a candidate for cherry picking into a release branch.
name: cherrypick-candidate
target: prs
addedBy: humans
- color: d455d0
description: Indicates an issue is a duplicate of other open issue.
name: triage/duplicate
target: both
addedBy: humans
- color: d455d0
description: Indicates an issue needs more information in order to work on it.
name: triage/needs-information
target: both
addedBy: humans
- color: d455d0
description: Indicates an issue can not be reproduced as described.
name: triage/not-reproducible
target: both
addedBy: humans
- color: d455d0
description: Indicates an issue that is a support question.
name: triage/support
target: both
addedBy: humans
- color: d455d0
description: Indicates an issue that can not or will not be resolved.
name: triage/unresolved
target: both
addedBy: humans
- color: c0ff4a
description: Denotes an issue or PR intended to be handled by the code of conduct committee. # (as of yet non-existent)
name: committee/conduct
target: both
prowPlugin: label
addedBy: anyone
- color: c0ff4a
description: Denotes an issue or PR intended to be handled by the steering committee.
name: committee/steering
target: both
prowPlugin: label
addedBy: anyone
- color: e11d21
description: Indicates the PR's author has not signed the CNCF CLA.
name: 'cncf-cla: no'
target: prs
prowPlugin: cla
addedBy: prow
- color: bfe5bf
description: Indicates the PR's author has signed the CNCF CLA.
name: 'cncf-cla: yes'
target: prs
prowPlugin: cla
addedBy: prow
- color: e11d21
description: DEPRECATED. Indicates that a PR should not merge. Label can only be manually applied/removed.
name: do-not-merge
target: prs
addedBy: humans
- color: e11d21
description: Indicates that a PR should not merge because it touches files in blocked paths.
name: do-not-merge/blocked-paths
target: prs
prowPlugin: blockade
addedBy: prow
- color: e11d21
description: Indicates that a PR is not yet approved to merge into a release branch.
name: do-not-merge/cherry-pick-not-approved
target: prs
addedBy: mungegithub cherrypick-label-unapproved munger
- color: e11d21
description: Indicates that a PR should not merge because someone has issued a /hold command.
name: do-not-merge/hold
target: prs
prowPlugin: hold
addedBy: anyone
- color: e11d21
description: Indicates that a PR should not merge because it has an invalid OWNERS file in it.
name: do-not-merge/invalid-owners-file
target: prs
prowPlugin: verify-owners
addedBy: prow
- color: e11d21
description: Indicates that a PR should not merge because it's missing one of the release note labels.
name: do-not-merge/release-note-label-needed
target: prs
prowPlugin: releasenote
addedBy: prow
- color: e11d21
description: Indicates that a PR should not merge because it is a work in progress.
name: do-not-merge/work-in-progress
target: prs
prowPlugin: wip
addedBy: prow
- color: 7057ff
description: Denotes an issue ready for a new contributor, according to the "help wanted" guidelines.
name: 'good first issue'
target: issues
prowPlugin: help
addedBy: anyone
- color: 006b75
description: Denotes an issue that needs help from a contributor. Must meet "help wanted" guidelines.
name: 'help wanted'
target: issues
prowPlugin: help
addedBy: anyone
- color: e11d21
description: Categorizes issue or PR as related to a bug.
name: kind/bug
target: both
prowPlugin: label
addedBy: anyone
- color: c7def8
description: Categorizes issue or PR as related to cleaning up code, process, or technical debt.
name: kind/cleanup
target: both
prowPlugin: label
addedBy: anyone
- color: c7def8
description: Categorizes issue or PR as related to design.
name: kind/design
target: both
prowPlugin: label
addedBy: anyone
- color: c7def8
description: Categorizes issue or PR as related to documentation.
name: kind/documentation
target: both
prowPlugin: label
addedBy: anyone
- color: e11d21
description: Categorizes issue or PR as related to a consistently or frequently failing test.
name: kind/failing-test
target: both
prowPlugin: label
addedBy: anyone
- color: c7def8
description: Categorizes issue or PR as related to a new feature.
name: kind/feature
target: both
prowPlugin: label
addedBy: anyone
- color: f7c6c7
description: Categorizes issue or PR as related to a flaky test.
name: kind/flake
target: both
prowPlugin: label
addedBy: anyone
- color: 15dd18
description: Indicates that a PR is ready to be merged.
name: lgtm
target: prs
prowPlugin: lgtm
addedBy: reviewers or members
- color: d3e2f0
description: Indicates that an issue or PR should not be auto-closed due to staleness.
name: lifecycle/frozen
target: both
prowPlugin: lifecycle
addedBy: anyone
- color: 8fc951
description: Indicates that an issue or PR is actively being worked on by a contributor.
name: lifecycle/active
target: both
prowPlugin: lifecycle
addedBy: anyone
- color: "604460"
description: Denotes an issue or PR that has aged beyond stale and will be auto-closed.
name: lifecycle/rotten
target: both
prowPlugin: lifecycle
addedBy: anyone or [@fejta-bot](https://github.com/fejta-bot) via [periodic-test-infra-rotten prowjob](https://prow.k8s.io/?job=periodic-test-infra-rotten)
- color: "795548"
description: Denotes an issue or PR has remained open with no activity and has become stale.
name: lifecycle/stale
addedBy: anyone or [@fejta-bot](https://github.com/fejta-bot) via [periodic-test-infra-stale prowjob](https://prow.k8s.io/?job=periodic-test-infra-stale)
- color: BDBDBD
description: Indicates a PR cannot be merged because it has merge conflicts with HEAD.
name: needs-rebase
target: prs
prowPlugin: needs-rebase
addedBy: prow
- color: fef2c0
description: Lowest priority. Possibly useful, but not yet enough support to actually get it done. # These are mostly place-holders for potentially good ideas, so that they don't get completely forgotten, and can be referenced /deduped every time they come up.
name: priority/awaiting-more-evidence
target: both
prowPlugin: label
addedBy: anyone
- color: fbca04
description: Higher priority than priority/awaiting-more-evidence. # There appears to be general agreement that this would be good to have, but we may not have anyone available to work on it right now or in the immediate future. Community contributions would be most welcome in the mean time (although it might take a while to get them reviewed if reviewers are fully occupied with higher priority issues, for example immediately before a release).
name: priority/backlog
target: both
prowPlugin: label
addedBy: anyone
- color: e11d21
description: Highest priority. Must be actively worked on as someone's top priority right now. # Stuff is burning. If it's not being actively worked on, someone is expected to drop what they're doing immediately to work on it. Team leaders are responsible for making sure that all the issues, labeled with this priority, in their area are being actively worked on. Examples include user-visible bugs in core features, broken builds or tests and critical security issues.
name: priority/critical-urgent
target: both
prowPlugin: label
addedBy: anyone
- color: eb6420
description: Important over the long term, but may not be staffed and/or may need multiple releases to complete.
name: priority/important-longterm
target: both
prowPlugin: label
addedBy: anyone
- color: eb6420
description: Must be staffed and worked on either currently, or very soon, ideally in time for the next release.
name: priority/important-soon
target: both
prowPlugin: label
addedBy: anyone
- color: ffaa00
description: DEPRECATED. # Bumps a PR up in priority in the submit-queue for kubernetes/kubernetes because that PR blocks others by being a dependency for other pending changes. This will stop having any effect once kubernetes/kubernetes migrates away from the submit queue.
name: queue/blocks-others
target: prs
addedBy: humans
- color: ffaa00
description: DEPRECATED. # Bumps a PR up in priority in the submit-queue for kubernetes/kubernetes because that PR that contains a critical fix, and will merge even if e2e tests are failing. This will stop having any effect once kubernetes/kubernetes migrates away from the submit queue.
name: queue/critical-fix
target: prs
addedBy: humans
- color: ffaa00
description: DEPRECATED. # Bumps a PR up in priority in the submit-queue for kubernetes/kubernetes because that PR that will fix or unblock the submit queue. This will stop having any effect once kubernetes/kubernetes migrates away from the submit queue.
name: queue/fix
target: prs
addedBy: humans
- color: ffaa00
description: DEPRECATED. # Bumps a PR up in priority in the submit-queue for kubernetes/kubernetes because that PR that has had difficulty getting merged because merges ahead of it have forced it to be rebased multiple times. This will stop having any effect once kubernetes/kubernetes migrates away from the submit queue.
name: queue/multiple-rebases
target: prs
addedBy: humans
- color: c2e0c6
description: Denotes a PR that will be considered when it comes time to generate release notes.
name: release-note
target: prs
prowPlugin: releasenote
addedBy: prow
- color: c2e0c6
description: Denotes a PR that introduces potentially breaking changes that require user action. # These actions will be specifically called out when it comes time to generate release notes.
name: release-note-action-required
target: prs
prowPlugin: releasenote
addedBy: prow
- color: c2e0c6
description: Denotes a PR that doesn't merit a release note. # will be ignored when it comes time to generate release notes.
name: release-note-none
target: prs
prowPlugin: releasenote
addedBy: prow or member or author
- color: eb6420
description: Indicates that a PR doesn't need to be retested prior to merge. # At present only submit-queue consumes this label. Unclear whether we're going to modify tide to respect this or not ([kubernetes/test-infra#5334](https://github.com/kubernetes/test-infra/issues/5334))
name: retest-not-required
target: prs
addedBy: humans
- color: fbca04
description: Indicates that a PR doesn't need to be retested prior to merge because it only changes docs. # At present only submit-queue consumes this label. Unclear whether we're going to modify tide to respect this or not ([kubernetes/test-infra#7195](https://github.com/kubernetes/test-infra/issues/7195))
name: retest-not-required-docs-only
target: prs
prowPlugin: docs-no-retest
addedBy: prow
- color: ee9900
description: Denotes a PR that changes 100-499 lines, ignoring generated files.
name: size/L
target: prs
prowPlugin: size
addedBy: prow
- color: eebb00
description: Denotes a PR that changes 30-99 lines, ignoring generated files.
name: size/M
target: prs
prowPlugin: size
addedBy: prow
- color: 77bb00
description: Denotes a PR that changes 10-29 lines, ignoring generated files.
name: size/S
target: prs
prowPlugin: size
addedBy: prow
- color: ee5500
description: Denotes a PR that changes 500-999 lines, ignoring generated files.
name: size/XL
target: prs
prowPlugin: size
addedBy: prow
- color: "009900"
description: Denotes a PR that changes 0-9 lines, ignoring generated files.
name: size/XS
target: prs
prowPlugin: size
addedBy: prow
- color: ee0000
description: Denotes a PR that changes 1000+ lines, ignoring generated files.
name: size/XXL
target: prs
prowPlugin: size
addedBy: prow
- color: fef2c0
description: Used during release burndown. Denotes an issue or PR is approved to be part of the release # A bot will warn then kick issues out of the milestone that lack this label.
name: status/approved-for-milestone
target: both
prowPlugin: milestonestatus
addedBy: "members of a configurable github team. default: [@kubernetes/kubernetes-milestone-maintainers](https://github.com/orgs/kubernetes/teams/kubernetes-milestone-maintainers/members)"
- color: fef2c0
description: Used during release burndown. Denotes that an issue is actively being worked.
name: status/in-progress
target: both
prowPlugin: milestonestatus
addedBy: "members of a configurable github team. default: [@kubernetes/kubernetes-milestone-maintainers](https://github.com/orgs/kubernetes/teams/kubernetes-milestone-maintainers/members)"
- color: fef2c0
description: Used during release burndown. Denotes that a fix for an issue is actively being reviewed.
name: status/in-review
target: both
prowPlugin: milestonestatus
addedBy: "members of a configurable github team. default: [@kubernetes/kubernetes-milestone-maintainers](https://github.com/orgs/kubernetes/teams/kubernetes-milestone-maintainers/members)"
- color: eb6420
description: library
name: library
target: both
prowPlugin: label
addedBy: anyone
- color: f1f442
description: Live Department
name: live
target: both
prowPlugin: label
addedBy: anyone
- color: f1f442
description: MTC
name: main
target: both
prowPlugin: label
addedBy: anyone
- color: f1f442
description: Ops Department
name: ops
target: both
prowPlugin: label
addedBy: anyone
- color: f1f442
description: EP Department
name: ep
target: both
prowPlugin: label
addedBy: anyone
- color: f1f442
description: Openplatform Department
name: openplatform
target: both
prowPlugin: label
addedBy: anyone
- color: f1f442
description: BBQ Department
name: bbq
target: both
prowPlugin: label
addedBy: anyone
- color: f1f442
description: Video Department
name: video
target: both
prowPlugin: label
addedBy: anyone
- color: ffb3a7
description: business application
name: business
target: both
prowPlugin: label
addedBy: anyone
- color: ffb3a7
description: admin
name: admin
target: both
prowPlugin: label
addedBy: anyone
- color: ffb3a7
description: common
name: common
target: both
prowPlugin: label
addedBy: anyone
- color: ffb3a7
description: infra
name: infra
target: both
prowPlugin: label
addedBy: anyone
- color: ffb3a7
description: interface
name: interface
target: both
prowPlugin: label
addedBy: anyone
- color: ffb3a7
description: job
name: job
target: both
prowPlugin: label
addedBy: anyone
- color: ffb3a7
description: service
name: service
target: both
prowPlugin: label
addedBy: anyone
- color: ffb3a7
description: tool
name: tool
target: both
prowPlugin: label
addedBy: anyone
- color: ffb3a7
description: vendor
name: vendor
target: both
prowPlugin: label
addedBy: anyone
- color: 4B0082
description: create a new project
name: new-project
target: both
prowPlugin: label
addedBy: anyone
- color: 4B0082
description: create a new project
name: new-main-job-project
target: both
prowPlugin: label
addedBy: anyone
- color: 4B0082
description: create a new project
name: new-main-service-project
target: both
prowPlugin: label
addedBy: anyone
- color: 4B0082
description: create a new project
name: new-main-interface-project
target: both
prowPlugin: label
addedBy: anyone
- color: 4B0082
description: create a new project
name: new-main-admin-project
target: both
prowPlugin: label
addedBy: anyone
- color: 0052cc
description: Issues or PRs related to config
name: area/chore
target: both
addedBy: label

133
build/lib/init.sh Normal file
View File

@@ -0,0 +1,133 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Unset CDPATH so that path interpolation can work correctly
# https://github.com/kratosrnetes/kratosrnetes/issues/52255
unset CDPATH
# The root of the build/dist directory
KRATOS_ROOT="$(cd "$(dirname "${BASH_SOURCE}")/../.." && pwd -P)"
KRATOS_OUTPUT_SUBPATH="${KRATOS_OUTPUT_SUBPATH:-_output/local}"
KRATOS_OUTPUT="${KRATOS_ROOT}/${KRATOS_OUTPUT_SUBPATH}"
KRATOS_OUTPUT_BINPATH="${KRATOS_OUTPUT}/bin"
# This controls rsync compression. Set to a value > 0 to enable rsync
# compression for build container
KRATOS_RSYNC_COMPRESS="${KRATOS_RSYNC_COMPRESS:-0}"
# Set no_proxy for localhost if behind a proxy, otherwise,
# the connections to localhost in scripts will time out
export no_proxy=127.0.0.1,localhost
export bazel_version="0.20.0"
# This is a symlink to binaries for "this platform", e.g. build tools.
THIS_PLATFORM_BIN="${KRATOS_ROOT}/_output/bin"
source "${KRATOS_ROOT}/build/lib/util.sh"
source "${KRATOS_ROOT}/build/lib/logging.sh"
#kratos::log::install_errexit
#source "${KRATOS_ROOT}/build/lib/version.sh"
#source "${KRATOS_ROOT}/build/lib/golang.sh"
#source "${KRATOS_ROOT}/build/lib/etcd.sh"
# This emulates "readlink -f" which is not available on MacOS X.
# Test:
# T=/tmp/$$.$RANDOM
# mkdir $T
# touch $T/file
# mkdir $T/dir
# ln -s $T/file $T/linkfile
# ln -s $T/dir $T/linkdir
# function testone() {
# X=$(readlink -f $1 2>&1)
# Y=$(kratos::readlinkdashf $1 2>&1)
# if [ "$X" != "$Y" ]; then
# echo readlinkdashf $1: expected "$X", got "$Y"
# fi
# }
# testone /
# testone /tmp
# testone $T
# testone $T/file
# testone $T/dir
# testone $T/linkfile
# testone $T/linkdir
# testone $T/nonexistant
# testone $T/linkdir/file
# testone $T/linkdir/dir
# testone $T/linkdir/linkfile
# testone $T/linkdir/linkdir
function kratos::readlinkdashf {
# run in a subshell for simpler 'cd'
(
if [[ -d "$1" ]]; then # This also catch symlinks to dirs.
cd "$1"
pwd -P
else
cd "$(dirname "$1")"
local f
f=$(basename "$1")
if [[ -L "$f" ]]; then
readlink "$f"
else
echo "$(pwd -P)/${f}"
fi
fi
)
}
# This emulates "realpath" which is not available on MacOS X
# Test:
# T=/tmp/$$.$RANDOM
# mkdir $T
# touch $T/file
# mkdir $T/dir
# ln -s $T/file $T/linkfile
# ln -s $T/dir $T/linkdir
# function testone() {
# X=$(realpath $1 2>&1)
# Y=$(kratos::realpath $1 2>&1)
# if [ "$X" != "$Y" ]; then
# echo realpath $1: expected "$X", got "$Y"
# fi
# }
# testone /
# testone /tmp
# testone $T
# testone $T/file
# testone $T/dir
# testone $T/linkfile
# testone $T/linkdir
# testone $T/nonexistant
# testone $T/linkdir/file
# testone $T/linkdir/dir
# testone $T/linkdir/linkfile
# testone $T/linkdir/linkdir
kratos::realpath() {
if [[ ! -e "$1" ]]; then
echo "$1: No such file or directory" >&2
return 1
fi
kratos::readlinkdashf "$1"
}

13
build/lib/lib.sh Normal file
View File

@@ -0,0 +1,13 @@
set -o errexit
set -o nounset
set -o pipefail
# Unset CDPATH so that path interpolation can work correctly
# https://github.com/kratosrnetes/kratosrnetes/issues/52255
unset CDPATH
# The root of the build/dist directory
if [ -z "$KRATOS_ROOT" ]
then
KRATOS_ROOT="$(cd "$(dirname "${BASH_SOURCE}")/../.." && pwd -P)"
if

171
build/lib/logging.sh Normal file
View File

@@ -0,0 +1,171 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Controls verbosity of the script output and logging.
KRATOS_VERBOSE="${KRATOS_VERBOSE:-5}"
# Handler for when we exit automatically on an error.
# Borrowed from https://gist.github.com/ahendrix/7030300
kratos::log::errexit() {
local err="${PIPESTATUS[@]}"
# If the shell we are in doesn't have errexit set (common in subshells) then
# don't dump stacks.
set +o | grep -qe "-o errexit" || return
set +o xtrace
local code="${1:-1}"
# Print out the stack trace described by $function_stack
if [ ${#FUNCNAME[@]} -gt 2 ]
then
kratos::log::error "Call tree:"
for ((i=1;i<${#FUNCNAME[@]}-1;i++))
do
kratos::log::error " $i: ${BASH_SOURCE[$i+1]}:${BASH_LINENO[$i]} ${FUNCNAME[$i]}(...)"
done
fi
kratos::log::error_exit "Error in ${BASH_SOURCE[1]}:${BASH_LINENO[0]}. '${BASH_COMMAND}' exited with status $err" "${1:-1}" 1
}
kratos::log::install_errexit() {
# trap ERR to provide an error handler whenever a command exits nonzero this
# is a more verbose version of set -o errexit
trap 'kratos::log::errexit' ERR
# setting errtrace allows our ERR trap handler to be propagated to functions,
# expansions and subshells
set -o errtrace
}
# Print out the stack trace
#
# Args:
# $1 The number of stack frames to skip when printing.
kratos::log::stack() {
local stack_skip=${1:-0}
stack_skip=$((stack_skip + 1))
if [[ ${#FUNCNAME[@]} -gt $stack_skip ]]; then
echo "Call stack:" >&2
local i
for ((i=1 ; i <= ${#FUNCNAME[@]} - $stack_skip ; i++))
do
local frame_no=$((i - 1 + stack_skip))
local source_file=${BASH_SOURCE[$frame_no]}
local source_lineno=${BASH_LINENO[$((frame_no - 1))]}
local funcname=${FUNCNAME[$frame_no]}
echo " $i: ${source_file}:${source_lineno} ${funcname}(...)" >&2
done
fi
}
# Log an error and exit.
# Args:
# $1 Message to log with the error
# $2 The error code to return
# $3 The number of stack frames to skip when printing.
kratos::log::error_exit() {
local message="${1:-}"
local code="${2:-1}"
local stack_skip="${3:-0}"
stack_skip=$((stack_skip + 1))
if [[ ${KRATOS_VERBOSE} -ge 4 ]]; then
local source_file=${BASH_SOURCE[$stack_skip]}
local source_line=${BASH_LINENO[$((stack_skip - 1))]}
echo "!!! Error in ${source_file}:${source_line}" >&2
[[ -z ${1-} ]] || {
echo " ${1}" >&2
}
kratos::log::stack $stack_skip
echo "Exiting with status ${code}" >&2
fi
exit "${code}"
}
# Log an error but keep going. Don't dump the stack or exit.
kratos::log::error() {
timestamp=$(date +"[%m%d %H:%M:%S]")
echo "!!! $timestamp ${1-}" >&2
shift
for message; do
echo " $message" >&2
done
}
# Print an usage message to stderr. The arguments are printed directly.
kratos::log::usage() {
echo >&2
local message
for message; do
echo "$message" >&2
done
echo >&2
}
kratos::log::usage_from_stdin() {
local messages=()
while read -r line; do
messages+=("$line")
done
kratos::log::usage "${messages[@]}"
}
# Print out some info that isn't a top level status line
kratos::log::info() {
local V="${V:-0}"
if [[ $KRATOS_VERBOSE < $V ]]; then
return
fi
for message; do
echo "$message"
done
}
# Just like kratos::log::info, but no \n, so you can make a progress bar
kratos::log::progress() {
for message; do
echo -e -n "$message"
done
}
kratos::log::info_from_stdin() {
local messages=()
while read -r line; do
messages+=("$line")
done
kratos::log::info "${messages[@]}"
}
# Print a status line. Formatted to show up in a stream of output.
kratos::log::status() {
local V="${V:-0}"
if [[ $KRATOS_VERBOSE < $V ]]; then
return
fi
timestamp=$(date +"[%m%d %H:%M:%S]")
echo "+++ $timestamp $1"
shift
for message; do
echo " $message"
done
}

825
build/lib/util.sh Normal file
View File

@@ -0,0 +1,825 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kratos::util::sortable_date() {
date "+%Y%m%d-%H%M%S"
}
kratos::util::wait_for_url() {
local url=$1
local prefix=${2:-}
local wait=${3:-1}
local times=${4:-30}
which curl >/dev/null || {
kratos::log::usage "curl must be installed"
exit 1
}
local i
for i in $(seq 1 "$times"); do
local out
if out=$(curl --max-time 1 -gkfs "$url" 2>/dev/null); then
kratos::log::status "On try ${i}, ${prefix}: ${out}"
return 0
fi
sleep "${wait}"
done
kratos::log::error "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each"
return 1
}
# Example: kratos::util::trap_add 'echo "in trap DEBUG"' DEBUG
# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal
kratos::util::trap_add() {
local trap_add_cmd
trap_add_cmd=$1
shift
for trap_add_name in "$@"; do
local existing_cmd
local new_cmd
# Grab the currently defined trap commands for this trap
existing_cmd=`trap -p "${trap_add_name}" | awk -F"'" '{print $2}'`
if [[ -z "${existing_cmd}" ]]; then
new_cmd="${trap_add_cmd}"
else
new_cmd="${trap_add_cmd};${existing_cmd}"
fi
# Assign the test
trap "${new_cmd}" "${trap_add_name}"
done
}
# Opposite of kratos::util::ensure-temp-dir()
kratos::util::cleanup-temp-dir() {
rm -rf "${KRATOS_TEMP}"
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KRATOS_TEMP
kratos::util::ensure-temp-dir() {
if [[ -z ${KRATOS_TEMP-} ]]; then
KRATOS_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kratosrnetes.XXXXXX)
kratos::util::trap_add kratos::util::cleanup-temp-dir EXIT
fi
}
# This figures out the host platform without relying on golang. We need this as
# we don't want a golang install to be a prerequisite to building yet we need
# this info to figure out where the final binaries are placed.
kratos::util::host_platform() {
local host_os
local host_arch
case "$(uname -s)" in
Darwin)
host_os=darwin
;;
Linux)
host_os=linux
;;
*)
kratos::log::error "Unsupported host OS. Must be Linux or Mac OS X."
exit 1
;;
esac
case "$(uname -m)" in
x86_64*)
host_arch=amd64
;;
i?86_64*)
host_arch=amd64
;;
amd64*)
host_arch=amd64
;;
aarch64*)
host_arch=arm64
;;
arm64*)
host_arch=arm64
;;
arm*)
host_arch=arm
;;
i?86*)
host_arch=x86
;;
s390x*)
host_arch=s390x
;;
ppc64le*)
host_arch=ppc64le
;;
*)
kratos::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le."
exit 1
;;
esac
echo "${host_os}/${host_arch}"
}
kratos::util::find-binary-for-platform() {
local -r lookfor="$1"
local -r platform="$2"
local locations=(
"${KRATOS_ROOT}/_output/bin/${lookfor}"
"${KRATOS_ROOT}/_output/dockerized/bin/${platform}/${lookfor}"
"${KRATOS_ROOT}/_output/local/bin/${platform}/${lookfor}"
"${KRATOS_ROOT}/platforms/${platform}/${lookfor}"
)
# Also search for binary in bazel build tree.
# The bazel go rules place binaries in subtrees like
# "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure
# the platform name is matched in the path.
locations+=($(find "${KRATOS_ROOT}/bazel-bin/" -type f -executable \
-path "*/${platform/\//_}*/${lookfor}" 2>/dev/null || true) )
# List most recently-updated location.
local -r bin=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
echo -n "${bin}"
}
kratos::util::find-binary() {
kratos::util::find-binary-for-platform "$1" "$(kratos::util::host_platform)"
}
# Run all known doc generators (today gendocs and genman for kratosctl)
# $1 is the directory to put those generated documents
kratos::util::gen-docs() {
local dest="$1"
# Find binary
gendocs=$(kratos::util::find-binary "gendocs")
genkratosdocs=$(kratos::util::find-binary "genkratosdocs")
genman=$(kratos::util::find-binary "genman")
genyaml=$(kratos::util::find-binary "genyaml")
genfeddocs=$(kratos::util::find-binary "genfeddocs")
mkdir -p "${dest}/docs/user-guide/kratosctl/"
"${gendocs}" "${dest}/docs/user-guide/kratosctl/"
mkdir -p "${dest}/docs/admin/"
"${genkratosdocs}" "${dest}/docs/admin/" "kratos-apiserver"
"${genkratosdocs}" "${dest}/docs/admin/" "kratos-controller-manager"
"${genkratosdocs}" "${dest}/docs/admin/" "cloud-controller-manager"
"${genkratosdocs}" "${dest}/docs/admin/" "kratos-proxy"
"${genkratosdocs}" "${dest}/docs/admin/" "kratos-scheduler"
"${genkratosdocs}" "${dest}/docs/admin/" "kratoslet"
"${genkratosdocs}" "${dest}/docs/admin/" "kratosadm"
mkdir -p "${dest}/docs/man/man1/"
"${genman}" "${dest}/docs/man/man1/" "kratos-apiserver"
"${genman}" "${dest}/docs/man/man1/" "kratos-controller-manager"
"${genman}" "${dest}/docs/man/man1/" "cloud-controller-manager"
"${genman}" "${dest}/docs/man/man1/" "kratos-proxy"
"${genman}" "${dest}/docs/man/man1/" "kratos-scheduler"
"${genman}" "${dest}/docs/man/man1/" "kratoslet"
"${genman}" "${dest}/docs/man/man1/" "kratosctl"
"${genman}" "${dest}/docs/man/man1/" "kratosadm"
mkdir -p "${dest}/docs/yaml/kratosctl/"
"${genyaml}" "${dest}/docs/yaml/kratosctl/"
# create the list of generated files
pushd "${dest}" > /dev/null
touch docs/.generated_docs
find . -type f | cut -sd / -f 2- | LC_ALL=C sort > docs/.generated_docs
popd > /dev/null
}
# Puts a placeholder for every generated doc. This makes the link checker work.
kratos::util::set-placeholder-gen-docs() {
local list_file="${KRATOS_ROOT}/docs/.generated_docs"
if [[ -e "${list_file}" ]]; then
# remove all of the old docs; we don't want to check them in.
while read file; do
if [[ "${list_file}" != "${KRATOS_ROOT}/${file}" ]]; then
cp "${KRATOS_ROOT}/build/autogenerated_placeholder.txt" "${KRATOS_ROOT}/${file}"
fi
done <"${list_file}"
# The docs/.generated_docs file lists itself, so we don't need to explicitly
# delete it.
fi
}
# Removes previously generated docs-- we don't want to check them in. $KRATOS_ROOT
# must be set.
kratos::util::remove-gen-docs() {
if [ -e "${KRATOS_ROOT}/docs/.generated_docs" ]; then
# remove all of the old docs; we don't want to check them in.
while read file; do
rm "${KRATOS_ROOT}/${file}" 2>/dev/null || true
done <"${KRATOS_ROOT}/docs/.generated_docs"
# The docs/.generated_docs file lists itself, so we don't need to explicitly
# delete it.
fi
}
# Takes a group/version and returns the path to its location on disk, sans
# "pkg". E.g.:
# * default behavior: extensions/v1beta1 -> apis/extensions/v1beta1
# * default behavior for only a group: experimental -> apis/experimental
# * Special handling for empty group: v1 -> api/v1, unversioned -> api/unversioned
# * Special handling for groups suffixed with ".k8s.io": foo.k8s.io/v1 -> apis/foo/v1
# * Very special handling for when both group and version are "": / -> api
kratos::util::group-version-to-pkg-path() {
staging_apis=(
$(
cd "${KRATOS_ROOT}/staging/src/k8s.io/api" &&
find . -name types.go -exec dirname {} \; | sed "s|\./||g" | sort
))
local group_version="$1"
if [[ " ${staging_apis[@]} " =~ " ${group_version/.*k8s.io/} " ]]; then
echo "vendor/k8s.io/api/${group_version/.*k8s.io/}"
return
fi
# "v1" is the API GroupVersion
if [[ "${group_version}" == "v1" ]]; then
echo "vendor/k8s.io/api/core/v1"
return
fi
# Special cases first.
# TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api,
# moving the results to pkg/apis/api.
case "${group_version}" in
# both group and version are "", this occurs when we generate deep copies for internal objects of the legacy v1 API.
__internal)
echo "pkg/apis/core"
;;
meta/v1)
echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1"
;;
meta/v1beta1)
echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1"
;;
unversioned)
echo "pkg/api/unversioned"
;;
*.k8s.io)
echo "pkg/apis/${group_version%.*k8s.io}"
;;
*.k8s.io/*)
echo "pkg/apis/${group_version/.*k8s.io/}"
;;
*)
echo "pkg/apis/${group_version%__internal}"
;;
esac
}
# Takes a group/version and returns the swagger-spec file name.
# default behavior: extensions/v1beta1 -> extensions_v1beta1
# special case for v1: v1 -> v1
kratos::util::gv-to-swagger-name() {
local group_version="$1"
case "${group_version}" in
v1)
echo "v1"
;;
*)
echo "${group_version%/*}_${group_version#*/}"
;;
esac
}
# Fetches swagger spec from apiserver.
# Assumed vars:
# SWAGGER_API_PATH: Base path for swaggerapi on apiserver. Ex:
# http://localhost:8080/swaggerapi.
# SWAGGER_ROOT_DIR: Root dir where we want to to save the fetched spec.
# VERSIONS: Array of group versions to include in swagger spec.
kratos::util::fetch-swagger-spec() {
for ver in ${VERSIONS}; do
if [[ " ${KRATOS_NONSERVER_GROUP_VERSIONS} " == *" ${ver} "* ]]; then
continue
fi
# fetch the swagger spec for each group version.
if [[ ${ver} == "v1" ]]; then
SUBPATH="api"
else
SUBPATH="apis"
fi
SUBPATH="${SUBPATH}/${ver}"
SWAGGER_JSON_NAME="$(kratos::util::gv-to-swagger-name ${ver}).json"
curl -w "\n" -fs "${SWAGGER_API_PATH}${SUBPATH}" > "${SWAGGER_ROOT_DIR}/${SWAGGER_JSON_NAME}"
# fetch the swagger spec for the discovery mechanism at group level.
if [[ ${ver} == "v1" ]]; then
continue
fi
SUBPATH="apis/"${ver%/*}
SWAGGER_JSON_NAME="${ver%/*}.json"
curl -w "\n" -fs "${SWAGGER_API_PATH}${SUBPATH}" > "${SWAGGER_ROOT_DIR}/${SWAGGER_JSON_NAME}"
done
# fetch swagger specs for other discovery mechanism.
curl -w "\n" -fs "${SWAGGER_API_PATH}" > "${SWAGGER_ROOT_DIR}/resourceListing.json"
curl -w "\n" -fs "${SWAGGER_API_PATH}version" > "${SWAGGER_ROOT_DIR}/version.json"
curl -w "\n" -fs "${SWAGGER_API_PATH}api" > "${SWAGGER_ROOT_DIR}/api.json"
curl -w "\n" -fs "${SWAGGER_API_PATH}apis" > "${SWAGGER_ROOT_DIR}/apis.json"
curl -w "\n" -fs "${SWAGGER_API_PATH}logs" > "${SWAGGER_ROOT_DIR}/logs.json"
}
# Returns the name of the upstream remote repository name for the local git
# repo, e.g. "upstream" or "origin".
kratos::util::git_upstream_remote_name() {
git remote -v | grep fetch |\
grep -E 'github.com[/:]kratosrnetes/kratosrnetes|k8s.io/kratosrnetes' |\
head -n 1 | awk '{print $1}'
}
# Ensures the current directory is a git tree for doing things like restoring or
# validating godeps
kratos::util::create-fake-git-tree() {
local -r target_dir=${1:-$(pwd)}
pushd "${target_dir}" >/dev/null
git init >/dev/null
git config --local user.email "nobody@k8s.io"
git config --local user.name "$0"
git add . >/dev/null
git commit -q -m "Snapshot" >/dev/null
if (( ${KRATOS_VERBOSE:-5} >= 6 )); then
kratos::log::status "${target_dir} is now a git tree."
fi
popd >/dev/null
}
# Checks whether godep restore was run in the current GOPATH, i.e. that all referenced repos exist
# and are checked out to the referenced rev.
kratos::util::godep_restored() {
local -r godeps_json=${1:-Godeps/Godeps.json}
local -r gopath=${2:-${GOPATH%:*}}
if ! which jq &>/dev/null; then
echo "jq not found. Please install." 1>&2
return 1
fi
local root
local old_rev=""
while read path rev; do
rev=$(echo "${rev}" | sed "s/['\"]//g") # remove quotes which are around revs sometimes
if [[ "${rev}" == "${old_rev}" ]] && [[ "${path}" == "${root}"* ]]; then
# avoid checking the same git/hg root again
continue
fi
root="${path}"
while [ "${root}" != "." -a ! -d "${gopath}/src/${root}/.git" -a ! -d "${gopath}/src/${root}/.hg" ]; do
root=$(dirname "${root}")
done
if [ "${root}" == "." ]; then
echo "No checkout of ${path} found in GOPATH \"${gopath}\"." 1>&2
return 1
fi
local head
if [ -d "${gopath}/src/${root}/.git" ]; then
head="$(cd "${gopath}/src/${root}" && git rev-parse HEAD)"
else
head="$(cd "${gopath}/src/${root}" && hg parent --template '{node}')"
fi
if [ "${head}" != "${rev}" ]; then
echo "Unexpected HEAD '${head}' at ${gopath}/src/${root}, expected '${rev}'." 1>&2
return 1
fi
old_rev="${rev}"
done < <(jq '.Deps|.[]|.ImportPath + " " + .Rev' -r < "${godeps_json}")
return 0
}
# Exits script if working directory is dirty. If it's run interactively in the terminal
# the user can commit changes in a second terminal. This script will wait.
kratos::util::ensure_clean_working_dir() {
while ! git diff HEAD --exit-code &>/dev/null; do
echo -e "\nUnexpected dirty working directory:\n"
if tty -s; then
git status -s
else
git diff -a # be more verbose in log files without tty
exit 1
fi | sed 's/^/ /'
echo -e "\nCommit your changes in another terminal and then continue here by pressing enter."
read
done 1>&2
}
# Ensure that the given godep version is installed and in the path. Almost
# nobody should use any version but the default.
kratos::util::ensure_godep_version() {
GODEP_VERSION=${1:-"v79"} # this version is known to work
if [[ "$(godep version 2>/dev/null)" == *"godep ${GODEP_VERSION}"* ]]; then
return
fi
kratos::log::status "Installing godep version ${GODEP_VERSION}"
go install ./vendor/github.com/tools/godep/
GP="$(echo $GOPATH | cut -f1 -d:)"
hash -r # force bash to clear PATH cache
PATH="${GP}/bin:${PATH}"
if [[ "$(godep version 2>/dev/null)" != *"godep ${GODEP_VERSION}"* ]]; then
kratos::log::error "Expected godep ${GODEP_VERSION}, got $(godep version)"
return 1
fi
}
# Ensure that none of the staging repos is checked out in the GOPATH because this
# easily confused godep.
kratos::util::ensure_no_staging_repos_in_gopath() {
kratos::util::ensure_single_dir_gopath
local error=0
for repo_file in "${KRATOS_ROOT}"/staging/src/k8s.io/*; do
if [[ ! -d "$repo_file" ]]; then
# not a directory or there were no files
continue;
fi
repo="$(basename "$repo_file")"
if [ -e "${GOPATH}/src/k8s.io/${repo}" ]; then
echo "k8s.io/${repo} exists in GOPATH. Remove before running godep-save.sh." 1>&2
error=1
fi
done
if [ "${error}" = "1" ]; then
exit 1
fi
}
# Installs the specified go package at a particular commit.
kratos::util::go_install_from_commit() {
local -r pkg=$1
local -r commit=$2
kratos::util::ensure-temp-dir
mkdir -p "${KRATOS_TEMP}/go/src"
GOPATH="${KRATOS_TEMP}/go" go get -d -u "${pkg}"
(
cd "${KRATOS_TEMP}/go/src/${pkg}"
git checkout -q "${commit}"
GOPATH="${KRATOS_TEMP}/go" go install "${pkg}"
)
PATH="${KRATOS_TEMP}/go/bin:${PATH}"
hash -r # force bash to clear PATH cache
}
# Checks that the GOPATH is simple, i.e. consists only of one directory, not multiple.
kratos::util::ensure_single_dir_gopath() {
if [[ "${GOPATH}" == *:* ]]; then
echo "GOPATH must consist of a single directory." 1>&2
exit 1
fi
}
# Checks whether there are any files matching pattern $2 changed between the
# current branch and upstream branch named by $1.
# Returns 1 (false) if there are no changes, 0 (true) if there are changes
# detected.
kratos::util::has_changes_against_upstream_branch() {
local -r git_branch=$1
local -r pattern=$2
local -r not_pattern=${3:-totallyimpossiblepattern}
local full_branch
full_branch="$(kratos::util::git_upstream_remote_name)/${git_branch}"
echo "Checking for '${pattern}' changes against '${full_branch}'"
# make sure the branch is valid, otherwise the check will pass erroneously.
if ! git describe "${full_branch}" >/dev/null; then
# abort!
exit 1
fi
# notice this uses ... to find the first shared ancestor
if git diff --name-only "${full_branch}...HEAD" | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
return 0
fi
# also check for pending changes
if git status --porcelain | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
echo "Detected '${pattern}' uncommitted changes."
return 0
fi
echo "No '${pattern}' changes detected."
return 1
}
kratos::util::download_file() {
local -r url=$1
local -r destination_file=$2
rm ${destination_file} 2&> /dev/null || true
for i in $(seq 5)
do
if ! curl -fsSL --retry 3 --keepalive-time 2 ${url} -o ${destination_file}; then
echo "Downloading ${url} failed. $((5-i)) retries left."
sleep 1
else
echo "Downloading ${url} succeed"
return 0
fi
done
return 1
}
# Test whether openssl is installed.
# Sets:
# OPENSSL_BIN: The path to the openssl binary to use
function kratos::util::test_openssl_installed {
openssl version >& /dev/null
if [ "$?" != "0" ]; then
echo "Failed to run openssl. Please ensure openssl is installed"
exit 1
fi
OPENSSL_BIN=$(command -v openssl)
}
# creates a client CA, args are sudo, dest-dir, ca-id, purpose
# purpose is dropped in after "key encipherment", you usually want
# '"client auth"'
# '"server auth"'
# '"client auth","server auth"'
function kratos::util::create_signing_certkey {
local sudo=$1
local dest_dir=$2
local id=$3
local purpose=$4
# Create client ca
${sudo} /bin/bash -e <<EOF
rm -f "${dest_dir}/${id}-ca.crt" "${dest_dir}/${id}-ca.key"
${OPENSSL_BIN} req -x509 -sha256 -new -nodes -days 365 -newkey rsa:2048 -keyout "${dest_dir}/${id}-ca.key" -out "${dest_dir}/${id}-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment",${purpose}]}}}' > "${dest_dir}/${id}-ca-config.json"
EOF
}
# signs a client certificate: args are sudo, dest-dir, CA, filename (roughly), username, groups...
function kratos::util::create_client_certkey {
local sudo=$1
local dest_dir=$2
local ca=$3
local id=$4
local cn=${5:-$4}
local groups=""
local SEP=""
shift 5
while [ -n "${1:-}" ]; do
groups+="${SEP}{\"O\":\"$1\"}"
SEP=","
shift 1
done
${sudo} /bin/bash -e <<EOF
cd ${dest_dir}
echo '{"CN":"${cn}","names":[${groups}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare client-${id}
mv "client-${id}-key.pem" "client-${id}.key"
mv "client-${id}.pem" "client-${id}.crt"
rm -f "client-${id}.csr"
EOF
}
# signs a serving certificate: args are sudo, dest-dir, ca, filename (roughly), subject, hosts...
function kratos::util::create_serving_certkey {
local sudo=$1
local dest_dir=$2
local ca=$3
local id=$4
local cn=${5:-$4}
local hosts=""
local SEP=""
shift 5
while [ -n "${1:-}" ]; do
hosts+="${SEP}\"$1\""
SEP=","
shift 1
done
${sudo} /bin/bash -e <<EOF
cd ${dest_dir}
echo '{"CN":"${cn}","hosts":[${hosts}],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare serving-${id}
mv "serving-${id}-key.pem" "serving-${id}.key"
mv "serving-${id}.pem" "serving-${id}.crt"
rm -f "serving-${id}.csr"
EOF
}
# creates a self-contained kratosconfig: args are sudo, dest-dir, ca file, host, port, client id, token(optional)
function kratos::util::write_client_kratosconfig {
local sudo=$1
local dest_dir=$2
local ca_file=$3
local api_host=$4
local api_port=$5
local client_id=$6
local token=${7:-}
cat <<EOF | ${sudo} tee "${dest_dir}"/${client_id}.kratosconfig > /dev/null
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: ${ca_file}
server: https://${api_host}:${api_port}/
name: local-up-cluster
users:
- user:
token: ${token}
client-certificate: ${dest_dir}/client-${client_id}.crt
client-key: ${dest_dir}/client-${client_id}.key
name: local-up-cluster
contexts:
- context:
cluster: local-up-cluster
user: local-up-cluster
name: local-up-cluster
current-context: local-up-cluster
EOF
# flatten the kratosconfig files to make them self contained
username=$(whoami)
${sudo} /bin/bash -e <<EOF
$(kratos::util::find-binary kratosctl) --kratosconfig="${dest_dir}/${client_id}.kratosconfig" config view --minify --flatten > "/tmp/${client_id}.kratosconfig"
mv -f "/tmp/${client_id}.kratosconfig" "${dest_dir}/${client_id}.kratosconfig"
chown ${username} "${dest_dir}/${client_id}.kratosconfig"
EOF
}
# Determines if docker can be run, failures may simply require that the user be added to the docker group.
function kratos::util::ensure_docker_daemon_connectivity {
DOCKER=(docker ${DOCKER_OPTS})
if ! "${DOCKER[@]}" info > /dev/null 2>&1 ; then
cat <<'EOF' >&2
Can't connect to 'docker' daemon. please fix and retry.
Possible causes:
- Docker Daemon not started
- Linux: confirm via your init system
- macOS w/ docker-machine: run `docker-machine ls` and `docker-machine start <name>`
- macOS w/ Docker for Mac: Check the menu bar and start the Docker application
- DOCKER_HOST hasn't been set or is set incorrectly
- Linux: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
- macOS w/ docker-machine: run `eval "$(docker-machine env <name>)"`
- macOS w/ Docker for Mac: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
- Other things to check:
- Linux: User isn't in 'docker' group. Add and relogin.
- Something like 'sudo usermod -a -G docker ${USER}'
- RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8
EOF
return 1
fi
}
# Wait for background jobs to finish. Return with
# an error status if any of the jobs failed.
kratos::util::wait-for-jobs() {
local fail=0
local job
for job in $(jobs -p); do
wait "${job}" || fail=$((fail + 1))
done
return ${fail}
}
# kratos::util::join <delim> <list...>
# Concatenates the list elements with the delimiter passed as first parameter
#
# Ex: kratos::util::join , a b c
# -> a,b,c
function kratos::util::join {
local IFS="$1"
shift
echo "$*"
}
# Downloads cfssl/cfssljson into $1 directory if they do not already exist in PATH
#
# Assumed vars:
# $1 (cfssl directory) (optional)
#
# Sets:
# CFSSL_BIN: The path of the installed cfssl binary
# CFSSLJSON_BIN: The path of the installed cfssljson binary
#
function kratos::util::ensure-cfssl {
if command -v cfssl &>/dev/null && command -v cfssljson &>/dev/null; then
CFSSL_BIN=$(command -v cfssl)
CFSSLJSON_BIN=$(command -v cfssljson)
return 0
fi
# Create a temp dir for cfssl if no directory was given
local cfssldir=${1:-}
if [[ -z "${cfssldir}" ]]; then
kratos::util::ensure-temp-dir
cfssldir="${KRATOS_TEMP}/cfssl"
fi
mkdir -p "${cfssldir}"
pushd "${cfssldir}" > /dev/null
echo "Unable to successfully run 'cfssl' from $PATH; downloading instead..."
kernel=$(uname -s)
case "${kernel}" in
Linux)
curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
;;
Darwin)
curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64
curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64
;;
*)
echo "Unknown, unsupported platform: ${kernel}." >&2
echo "Supported platforms: Linux, Darwin." >&2
exit 2
esac
chmod +x cfssl || true
chmod +x cfssljson || true
CFSSL_BIN="${cfssldir}/cfssl"
CFSSLJSON_BIN="${cfssldir}/cfssljson"
if [[ ! -x ${CFSSL_BIN} || ! -x ${CFSSLJSON_BIN} ]]; then
echo "Failed to download 'cfssl'. Please install cfssl and cfssljson and verify they are in \$PATH."
echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
exit 1
fi
popd > /dev/null
}
# kratos::util::ensure_dockerized
# Confirms that the script is being run inside a kratos-build image
#
function kratos::util::ensure_dockerized {
if [[ -f /kratos-build-image ]]; then
return 0
else
echo "ERROR: This script is designed to be run inside a kratos-build container"
exit 1
fi
}
# kratos::util::ensure-gnu-sed
# Determines which sed binary is gnu-sed on linux/darwin
#
# Sets:
# SED: The name of the gnu-sed binary
#
function kratos::util::ensure-gnu-sed {
if LANG=C sed --help 2>&1 | grep -q GNU; then
SED="sed"
elif which gsed &>/dev/null; then
SED="gsed"
else
kratos::log::error "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2
return 1
fi
}
function kratos::util::ensure-homebrew {
if ! brew --version > /dev/null ; then
echo "install homebrew..."
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
}
function kratos::util::ensure-homebrew-bazel {
if ! brew ls --versions bazel > /dev/null ; then
echo "install bazel..."
brew install bazel
fi
}
function kratos::util::ensure-bazel {
if ! bazel version > /dev/null ; then
echo "Please install bazel by being compiled from code."
fi
}
# Some useful colors.
if [[ -z "${color_start-}" ]]; then
declare -r color_start="\033["
declare -r color_red="${color_start}0;31m"
declare -r color_yellow="${color_start}0;33m"
declare -r color_green="${color_start}0;32m"
declare -r color_norm="${color_start}0m"
fi
# ex: ts=2 sw=2 et filetype=sh

176
build/lib/version.sh Normal file
View File

@@ -0,0 +1,176 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# Version management helpers. These functions help to set, save and load the
# following variables:
#
# KRATOS_GIT_COMMIT - The git commit id corresponding to this
# source code.
# KRATOS_GIT_TREE_STATE - "clean" indicates no changes since the git commit id
# "dirty" indicates source code changes after the git commit id
# "archive" indicates the tree was produced by 'git archive'
# KRATOS_GIT_VERSION - "vX.Y" used to indicate the last release version.
# KRATOS_GIT_MAJOR - The major part of the version
# KRATOS_GIT_MINOR - The minor component of the version
# Grovels through git to set a set of env variables.
#
# If KRATOS_GIT_VERSION_FILE, this function will load from that file instead of
# querying git.
kratos::version::get_version_vars() {
KRATOS_BUILD_TIME=`date +%Y.%m.%d-%H:%M:%S%Z`
if [[ -n ${KRATOS_GIT_VERSION_FILE-} ]]; then
kratos::version::load_version_vars "${KRATOS_GIT_VERSION_FILE}"
return
fi
# If the kratosrnetes source was exported through git archive, then
# we likely don't have a git tree, but these magic values may be filled in.
if [[ '$Format:%%$' == "%" ]]; then
KRATOS_GIT_COMMIT='$Format:%H$'
KRATOS_GIT_TREE_STATE="archive"
# When a 'git archive' is exported, the '$Format:%D$' below will look
# something like 'HEAD -> release-1.8, tag: v1.8.3' where then 'tag: '
# can be extracted from it.
if [[ '$Format:%D$' =~ tag:\ (v[^ ]+) ]]; then
KRATOS_GIT_VERSION="${BASH_REMATCH[1]}"
fi
fi
local git=(git --work-tree "${KRATOS_ROOT}")
if [[ -n ${KRATOS_GIT_COMMIT-} ]] || KRATOS_GIT_COMMIT=$("${git[@]}" rev-parse "HEAD^{commit}" 2>/dev/null); then
if [[ -z ${KRATOS_GIT_TREE_STATE-} ]]; then
# Check if the tree is dirty. default to dirty
if git_status=$("${git[@]}" status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then
KRATOS_GIT_TREE_STATE="clean"
else
KRATOS_GIT_TREE_STATE="dirty"
fi
fi
# Use git describe to find the version based on tags.
if [[ -n ${KRATOS_GIT_VERSION-} ]] || KRATOS_GIT_VERSION=$("${git[@]}" describe --tags --abbrev=14 "${KRATOS_GIT_COMMIT}^{commit}" 2>/dev/null); then
# This translates the "git describe" to an actual semver.org
# compatible semantic version that looks something like this:
# v1.1.0-alpha.0.6+84c76d1142ea4d
#
# TODO: We continue calling this "git version" because so many
# downstream consumers are expecting it there.
DASHES_IN_VERSION=$(echo "${KRATOS_GIT_VERSION}" | sed "s/[^-]//g")
if [[ "${DASHES_IN_VERSION}" == "---" ]] ; then
# We have distance to subversion (v1.1.0-subversion-1-gCommitHash)
KRATOS_GIT_VERSION=$(echo "${KRATOS_GIT_VERSION}" | sed "s/-\([0-9]\{1,\}\)-g\([0-9a-f]\{14\}\)$/.\1\+\2/")
elif [[ "${DASHES_IN_VERSION}" == "--" ]] ; then
# We have distance to base tag (v1.1.0-1-gCommitHash)
KRATOS_GIT_VERSION=$(echo "${KRATOS_GIT_VERSION}" | sed "s/-g\([0-9a-f]\{14\}\)$/+\1/")
fi
if [[ "${KRATOS_GIT_TREE_STATE}" == "dirty" ]]; then
# git describe --dirty only considers changes to existing files, but
# that is problematic since new untracked .go files affect the build,
# so use our idea of "dirty" from git status instead.
KRATOS_GIT_VERSION+="-dirty"
fi
# Try to match the "git describe" output to a regex to try to extract
# the "major" and "minor" versions and whether this is the exact tagged
# version or whether the tree is between two tagged versions.
if [[ "${KRATOS_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?([+].*)?$ ]]; then
KRATOS_GIT_MAJOR=${BASH_REMATCH[1]}
KRATOS_GIT_MINOR=${BASH_REMATCH[2]}
if [[ -n "${BASH_REMATCH[4]}" ]]; then
KRATOS_GIT_MINOR+="+"
fi
fi
# If KRATOS_GIT_VERSION is not a valid Semantic Version, then refuse to build.
#if ! [[ "${KRATOS_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?(-[0-9A-Za-z.-]+)?(\+[0-9A-Za-z.-]+)?$ ]]; then
# echo "KRATOS_GIT_VERSION should be a valid Semantic Version"
# echo "Please see more details here: https://semver.org"
# exit 1
#fi
fi
fi
}
# Saves the environment flags to $1
kratos::version::save_version_vars() {
local version_file=${1-}
[[ -n ${version_file} ]] || {
echo "!!! Internal error. No file specified in kratos::version::save_version_vars"
return 1
}
cat <<EOF >"${version_file}"
KRATOS_GIT_COMMIT='${KRATOS_GIT_COMMIT-}'
KRATOS_GIT_TREE_STATE='${KRATOS_GIT_TREE_STATE-}'
KRATOS_GIT_VERSION='${KRATOS_GIT_VERSION-}'
KRATOS_GIT_MAJOR='${KRATOS_GIT_MAJOR-}'
KRATOS_GIT_MINOR='${KRATOS_GIT_MINOR-}'
EOF
}
# Loads up the version variables from file $1
kratos::version::load_version_vars() {
local version_file=${1-}
[[ -n ${version_file} ]] || {
echo "!!! Internal error. No file specified in kratos::version::load_version_vars"
return 1
}
source "${version_file}"
}
kratos::version::ldflag() {
local key=${1}
local val=${2}
# If you update these, also update the list pkg/version/def.bzl.
echo "-X '${KRATOS_GO_PACKAGE}/pkg/version.${key}=${val}'"
echo "-X '${KRATOS_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.${key}=${val}'"
}
# Prints the value that needs to be passed to the -ldflags parameter of go build
# in order to set the Kubernetes based on the git tree status.
# IMPORTANT: if you update any of these, also update the lists in
# pkg/version/def.bzl and .build/print-workspace-status.sh.
kratos::version::ldflags() {
kratos::version::get_version_vars
local buildDate=
[[ -z ${SOURCE_DATE_EPOCH-} ]] || buildDate="--date=@${SOURCE_DATE_EPOCH}"
local -a ldflags=($(kratos::version::ldflag "buildDate" "$(date ${buildDate} -u +'%Y-%m-%dT%H:%M:%SZ')"))
if [[ -n ${KRATOS_GIT_COMMIT-} ]]; then
ldflags+=($(kratos::version::ldflag "gitCommit" "${KRATOS_GIT_COMMIT}"))
ldflags+=($(kratos::version::ldflag "gitTreeState" "${KRATOS_GIT_TREE_STATE}"))
fi
if [[ -n ${KRATOS_GIT_VERSION-} ]]; then
ldflags+=($(kratos::version::ldflag "gitVersion" "${KRATOS_GIT_VERSION}"))
fi
if [[ -n ${KRATOS_GIT_MAJOR-} && -n ${KRATOS_GIT_MINOR-} ]]; then
ldflags+=(
$(kratos::version::ldflag "gitMajor" "${KRATOS_GIT_MAJOR}")
$(kratos::version::ldflag "gitMinor" "${KRATOS_GIT_MINOR}")
)
fi
# The -ldflags parameter takes a single string, so join the output.
echo "${ldflags[*]-}"
}

16
build/linter/BUILD Normal file
View File

@@ -0,0 +1,16 @@
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//build/linter/internal:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

76
build/linter/deps.bzl Normal file
View File

@@ -0,0 +1,76 @@
# ****************************************************************
# List of external dependencies
# ****************************************************************
# These deps are derived empirically, starting from the grpc tag and
# then updating other dependents to the master commit.
DEPS = {
"com_github_alecthomas_gometalinter": {
"rule": "go_repository",
"importpath": "github.com/alecthomas/gometalinter",
"tag": "v2.0.5",
},
"com_github_tsenart_deadcode": {
"rule": "go_repository",
"importpath": "github.com/tsenart/deadcode",
},
"com_github_mdempsky_maligned": {
"rule": "go_repository",
"importpath": "github.com/mdempsky/maligned",
},
"com_github_mibk_dupl": {
"rule": "go_repository",
"importpath": "github.com/mibk/dupl",
},
"com_github_kisielk_errcheck": {
"rule": "go_repository",
"importpath": "github.com/kisielk/errcheck",
},
"com_github_goastscanner_gas": {
"rule": "go_repository",
"importpath": "github.com/GoASTScanner/gas",
},
"com_github_jgautheron_goconst": {
"rule": "go_repository",
"importpath": "github.com/jgautheron/goconst/cmd/goconst",
},
"com_github_alecthomas_gocyclo": {
"rule": "go_repository",
"importpath": "github.com/alecthomas/gocyclo",
},
"org_golang_x_goimports": {
"rule": "go_repository",
"importpath": "golang.org/x/tools/cmd/goimports",
},
"com_github_golang_lint": {
"rule": "go_repository",
"importpath": "github.com/golang/lint/golint",
},
"co_honnef_tools_gosimple": {
"rule": "go_repository",
"importpath": "honnef.co/go/tools/cmd/gosimple",
},
"org_golang_x_gotype": {
"rule": "go_repository",
"importpath": "golang.org/x/tools/cmd/gotype",
},
"com_github_gordonklaus_ineffassign": {
"rule": "go_repository",
"importpath": "github.com/gordonklaus/ineffassign",
},
}

View File

@@ -0,0 +1,13 @@
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,17 @@
load("//build/linter:internal/require.bzl", "require")
load("//build/linter:deps.bzl", "DEPS")
def linter_repositories(excludes = [],
lang_deps = {},
lang_requires = [],
overrides = {},
strict = False,
verbose = 0):
return require(
keys = lang_requires,
deps = lang_deps,
excludes = excludes,
overrides = overrides,
verbose = verbose,
strict = strict,
)

View File

@@ -0,0 +1,89 @@
def _needs_install(name, dep, hkeys=["sha256", "sha1", "tag"], verbose=0, strict=False):
# Does it already exist?
existing_rule = native.existing_rule(name)
if not existing_rule:
return True
# If it has already been defined and our dependency lists a
# hash, do these match? If a hash mismatch is encountered, has
# the user specifically granted permission to continue?
for hkey in hkeys:
expected = dep.get(hkey)
actual = existing_rule.get(hkey)
if expected:
# By guarding the check of expected vs actual with a
# pre-check for actual=None (or empty string), we're
# basically saying "If the user did not bother to set a
# sha256 or sha1 hash for the rule, they probably don't
# care about overriding a dependency either, so don't
# complain about it." In particular, rules_go does not a
# set a sha256 for their com_google_protobuf http_archive
# rule, so this gives us a convenient loophole to prevent
# collisions on this dependency. The "strict" flag can be
# used as as master switch to disable blowing up the
# loading phase due to dependency collisions.
if actual and expected != actual and strict:
msg = """
An existing {0} rule '{1}' was already loaded with a {2} value of '{3}'. Refusing to overwrite this with the requested value ('{4}').
Either remove the pre-existing rule from your WORKSPACE or exclude it from loading by rules_protobuf (strict={5}.
""".format(existing_rule["kind"], name, hkey, actual, expected, strict)
fail(msg)
else:
if verbose > 1: print("Skip reload %s: %s = %s" % (name, hkey, actual))
return False
# No kheys for this rule - in this case no reload; first one loaded wins.
if verbose > 1: print("Skipping reload of existing target %s" % name)
return False
def _install(deps, verbose, strict):
"""Install a list if dependencies for matching native rules.
Return:
list of deps that have no matching native rule.
"""
todo = []
for d in deps:
name = d.get("name")
rule = d.pop("rule", None)
if not rule:
fail("Missing attribute 'rule': %s" % name)
if hasattr(native, rule):
rule = getattr(native, rule)
if verbose: print("Loading %s)" % name)
rule(**d)
else:
d["rule"] = rule
todo.append(d)
return todo
def require(keys,
deps = {},
overrides = {},
excludes = [],
verbose = 0,
strict = False):
#
# Make a list of non-excluded required deps with merged data.
#
required = []
for key in keys:
dep = deps.get(key)
if not dep:
fail("Unknown workspace dependency: %s" % key)
d = dict(**dep) # copy the 'frozen' object.
if not key in excludes:
over = overrides.get(key)
data = d + over if over else d
if _needs_install(key, data, verbose=verbose, strict=strict):
data["name"] = key
required.append(data)
return _install(required, verbose, strict)

33
build/linter/rules.bzl Normal file
View File

@@ -0,0 +1,33 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_repository")
load("//build/linter:deps.bzl", "DEPS")
load("//build/linter:internal/linter_repositories.bzl", "linter_repositories")
def go_lint_repositories(
lang_deps = DEPS,
lang_requires = [
"com_github_alecthomas_gometalinter",
"com_github_tsenart_deadcode",
"com_github_mdempsky_maligned",
"com_github_mibk_dupl",
"com_github_kisielk_errcheck",
"com_github_goastscanner_gas",
"com_github_jgautheron_goconst",
"com_github_alecthomas_gocyclo",
"org_golang_x_goimports",
"com_github_golang_lint",
"co_honnef_tools_gosimple",
"org_golang_x_gotype",
"com_github_gordonklaus_ineffassign",
], **kwargs):
rem = linter_repositories(lang_deps = lang_deps,
lang_requires = lang_requires,
**kwargs)
# Load remaining (special) deps
for dep in rem:
rule = dep.pop("rule")
if "go_repository" == rule:
go_repository(**dep)
else:
fail("Unknown loading rule %s for %s" % (rule, dep))

171
build/make-rules/verify.sh Normal file
View File

@@ -0,0 +1,171 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KRATOS_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KRATOS_ROOT}/hack/lib/util.sh"
# include shell2junit library
# source "${KRATOS_ROOT}/third_party/forked/shell2junit/sh2ju.sh"
# Excluded check patterns are always skipped.
EXCLUDED_PATTERNS=(
"verify-all.sh" # this script calls the make rule and would cause a loop
"verify-linkcheck.sh" # runs in separate Jenkins job once per day due to high network usage
"verify-test-owners.sh" # TODO(rmmh): figure out how to avoid endless conflicts
"verify-*-dockerized.sh" # Don't run any scripts that intended to be run dockerized
"verify-typecheck.sh" # runs in separate typecheck job
)
# Only run whitelisted fast checks in quick mode.
# These run in <10s each on enisoc's workstation, assuming that
# `make` and `hack/godep-restore.sh` had already been run.
QUICK_PATTERNS+=(
"verify-api-groups.sh"
"verify-bazel.sh"
"verify-boilerplate.sh"
"verify-generated-files-remake"
"verify-godep-licenses.sh"
"verify-gofmt.sh"
"verify-imports.sh"
"verify-pkg-names.sh"
"verify-readonly-packages.sh"
"verify-spelling.sh"
"verify-staging-client-go.sh"
"verify-test-images.sh"
"verify-test-owners.sh"
)
EXCLUDED_CHECKS=$(ls ${EXCLUDED_PATTERNS[@]/#/${KRATOS_ROOT}\/hack\/} 2>/dev/null || true)
QUICK_CHECKS=$(ls ${QUICK_PATTERNS[@]/#/${KRATOS_ROOT}\/hack\/} 2>/dev/null || true)
function is-excluded {
for e in ${EXCLUDED_CHECKS[@]}; do
if [[ $1 -ef "$e" ]]; then
return
fi
done
return 1
}
function is-quick {
for e in ${QUICK_CHECKS[@]}; do
if [[ $1 -ef "$e" ]]; then
return
fi
done
return 1
}
function is-explicitly-chosen {
local name="${1#verify-}"
name="${name%.*}"
for e in ${WHAT}; do
if [[ $e == "$name" ]]; then
return
fi
done
return 1
}
function run-cmd {
local filename="${2##*/verify-}"
local testname="${filename%%.*}"
local output="${KRATOS_JUNIT_REPORT_DIR:-/tmp/junit-results}"
local tr
if ${SILENT}; then
juLog -output="${output}" -class="verify" -name="${testname}" "$@" &> /dev/null
tr=$?
else
juLog -output="${output}" -class="verify" -name="${testname}" "$@"
tr=$?
fi
return ${tr}
}
# Collect Failed tests in this Array , initialize it to nil
FAILED_TESTS=()
function print-failed-tests {
echo -e "========================"
echo -e "${color_red}FAILED TESTS${color_norm}"
echo -e "========================"
for t in ${FAILED_TESTS[@]}; do
echo -e "${color_red}${t}${color_norm}"
done
}
function run-checks {
local -r pattern=$1
local -r runner=$2
local t
for t in $(ls ${pattern})
do
local check_name="$(basename "${t}")"
if [[ ! -z ${WHAT:-} ]]; then
if ! is-explicitly-chosen "${check_name}"; then
continue
fi
else
if is-excluded "${t}" ; then
echo "Skipping ${check_name}"
continue
fi
if ${QUICK} && ! is-quick "${t}" ; then
echo "Skipping ${check_name} in quick mode"
continue
fi
fi
echo -e "Verifying ${check_name}"
local start=$(date +%s)
run-cmd "${runner}" "${t}" && tr=$? || tr=$?
local elapsed=$(($(date +%s) - ${start}))
if [[ ${tr} -eq 0 ]]; then
echo -e "${color_green}SUCCESS${color_norm} ${check_name}\t${elapsed}s"
else
echo -e "${color_red}FAILED${color_norm} ${check_name}\t${elapsed}s"
ret=1
FAILED_TESTS+=(${t})
fi
done
}
SILENT=${SILENT:-false}
QUICK=${QUICK:-false}
if ${SILENT} ; then
echo "Running in silent mode, run with SILENT=false if you want to see script logs."
fi
if ${QUICK} ; then
echo "Running in quick mode (QUICK=true). Only fast checks will run."
fi
ret=0
run-checks "${KRATOS_ROOT}/hack/verify-*.sh" bash
run-checks "${KRATOS_ROOT}/hack/verify-*.py" python
if [[ ${ret} -eq 1 ]]; then
print-failed-tests
fi
exit ${ret}
# ex: ts=2 sw=2 et filetype=sh

View File

@@ -0,0 +1,50 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This command is used by bazel as the workspace_status_command
# to implement build stamping with git information.
set -o errexit
set -o nounset
set -o pipefail
export KRATOS_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KRATOS_ROOT}/build/lib/version.sh"
kratos::version::get_version_vars
# Prefix with STABLE_ so that these values are saved to stable-status.txt
# instead of volatile-status.txt.
# Stamped rules will be retriggered by changes to stable-status.txt, but not by
# changes to volatile-status.txt.
# IMPORTANT: the camelCase vars should match the lists in hack/lib/version.sh
# and pkg/version/def.bzl.
cat <<EOF
STABLE_BUILD_GIT_COMMIT ${KRATOS_GIT_COMMIT-}
STABLE_BUILD_SCM_STATUS ${KRATOS_GIT_TREE_STATE-}
STABLE_BUILD_SCM_REVISION ${KRATOS_GIT_VERSION-}
STABLE_BUILD_MAJOR_VERSION ${KRATOS_GIT_MAJOR-}
STABLE_BUILD_MINOR_VERSION ${KRATOS_GIT_MINOR-}
STABLE_BUILD_TIME ${KRATOS_BUILD_TIME-}
STABLE_DOCKER_TAG ${KRATOS_GIT_VERSION/+/_}
gitCommit ${KRATOS_GIT_COMMIT-}
gitTreeState ${KRATOS_GIT_TREE_STATE-}
gitVersion ${KRATOS_GIT_VERSION-}
gitMajor ${KRATOS_GIT_MAJOR-}
gitMinor ${KRATOS_GIT_MINOR-}
buildDate $(date \
${SOURCE_DATE_EPOCH:+"--date=@${SOURCE_DATE_EPOCH}"} \
-u +'%Y-%m-%dT%H:%M:%SZ')
EOF

View File

@@ -0,0 +1,4 @@
{
"GoPrefix": "go-common",
"AddSourcesRules": true
}

View File

@@ -0,0 +1,20 @@
{
"Vendor": true,
"Deadline": "5m",
"Cyclo": 50,
"Enable": [
"gofmt",
"vet",
"golint",
"vetshadow",
"gocyclo",
"megacheck",
"deadcode",
"gosimple",
"unused"
],
"Exclude": [
".+\\.pb\\.go",
".+\\.pb\\.gw\\.go"
]
}

22
build/root/.kazelcfg.json Normal file
View File

@@ -0,0 +1,22 @@
{
"GoPrefix": "go-common",
"SkippedPaths": [
"^release",
"^//release",
"^//app/tool/protoc-gen-bm/third_party",
"^app/tool/protoc-gen-bm/third_party",
"^//vendor/github.com/gogo",
"^vendor/github.com/gogo",
"^//vendor/github.com/golang/protobuf",
"^vendor/github.com/golang/protobuf",
"^vendor/google.golang.org/grpc",
"^//vendor/google.golang.org/grpc",
"^vendor/github.com/tsuna/gohbase/pb",
"^//vendor/github.com/tsuna/gohbase/pb",
"^//vendor/golang.org/x/net",
"^vendor/golang.org/x/net"
],
"AddSourcesRules": true,
"VendorMultipleBuildFiles": true,
"ManageGoRules": true
}

38
build/root/BUILD.root Normal file
View File

@@ -0,0 +1,38 @@
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
# gazelle:prefix go-common
package(default_visibility = ["//visibility:public"])
load("@bazel_gazelle//:def.bzl", "gazelle")
gazelle(name = "gazelle")
xcode_version(
name = "xcode_version",
version = "10.0",
)
filegroup(
name = "package-srcs",
srcs = glob(["**"], exclude=["bazel-*/**", ".git/**", ".idea/**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app:all-srcs",
"//build:all-srcs",
"//library:all-srcs",
"//vendor:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

156
build/root/Makefile Normal file
View File

@@ -0,0 +1,156 @@
# Don't allow an implicit 'all' rule. This is not a user-facing file.
ifeq ($(MAKECMDGOALS),)
$(error This Makefile requires an explicit rule to be specified)
endif
ifeq ($(DBG_MAKEFILE),1)
$(warning ***** starting Makefile.generated_files for goal(s) "$(MAKECMDGOALS)")
$(warning ***** $(shell date))
endif
GOBIN := $(go env GOBIN)
ifeq ($(GOBIN),)
GOBIN := ~/go/bin
endif
# It's necessary to set this because some environments don't link sh -> bash.
SHELL := /bin/bash
ARCH := "`uname -s`"
LINUX := "Linux"
MAC := "Darwin"
# We don't need make's built-in rules.
MAKEFLAGS += --no-builtin-rules
.SUFFIXES:
# Constants used throughout.
.EXPORT_ALL_VARIABLES:
OUT_DIR ?= _output
BIN_DIR := $(OUT_DIR)/bin
.PHONY: build update clean
all: check bazel-update
build: init check bazel-build
test-coverage:
bazel coverage --test_env=DEPLOY_ENV=uat --test_timeout=60 --test_env=APP_ID=bazel.test --test_output=all --cache_test_results=no //app/service/main/account/dao/...
simple-build:
bazel build --watchfs -- //tools/... -//vendor/...
ifeq ($(WHAT),)
bazel-build:
bazel build --config=office --watchfs //app/... //build/... //library/...
else
bazel-build:
bazel build --config=ci -- //$(WHAT)/...
endif
build-keep-going:
bazel build --config=ci -k //app/... //build/... //library/...
cat bazel-out/stable-status.txt
clean:
bazel clean --expunge
rm -rf _output
update: init bazel-update
bazel-update:
./build/update-bazel.sh
prow-update:
./build/update-prow.sh
test:
@if [ "$(WHAT)" != "" ]; \
then \
cd $(WHAT) && make ; \
else \
echo "Please input the WHAT" ;\
fi
bazel-test:
@if [ "$(WHAT)" != "" ]; \
then \
bazel test --watchfs -- //$(WHAT)/... ; \
else \
echo "Please input the WHAT" ;\
fi
check:
@./build/check.sh
init:
@if [ ! -f .git/hooks/pre-commit ] ; \
then \
echo "make all" >> .git/hooks/pre-commit; \
sudo chmod +x .git/hooks/pre-commit; \
fi
build-all-kratos:
bazel build --platforms=@io_bazel_rules_go//go/toolchain:linux_386 //app/tool/kratos:kratos
bazel build --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //app/tool/kratos:kratos
bazel build --platforms=@io_bazel_rules_go//go/toolchain:darwin_amd64 //app/tool/kratos:kratos
install-kratos: init check build-kratos
@if [[ "$(ARCH)" == "Linux" ]]; then \
cp bazel-bin/app/tool/kratos/linux_amd64_pure_stripped/kratos $(GOBIN); \
fi; \
if [[ "$(ARCH)" == "Darwin" ]]; then \
cp bazel-bin/app/tool/kratos/darwin_amd64_stripped/kratos $(GOBIN); \
fi
build-kratos:
bazel build //app/tool/kratos:kratos
ci-bazel-build:
bazel build --config=ci -- //app/...
ci-bazel-build-a:
bazel build --config=ci -- //app/admin/...
ci-bazel-build-b:
bazel build --config=ci -- //app/interface/...
ci-bazel-build-c:
bazel build --config=ci -- //app/job/... //app/tool/... //app/common/... //app/infra/...
ci-bazel-build-d:
bazel build --config=ci -- //app/service/... //library/...
ci-bazel-build-common:
bazel build --config=ci -- //app/common/...
ci-bazel-build-infra:
bazel build --config=ci -- //app/infra/...
ci-bazel-build-tool:
bazel build --config=ci -- //app/tool/...
ci-bazel-build-main:
bazel build --config=ci -- //app/admin/main/... //app/interface/main/... //app/job/main/... //app/service/main/...
ci-bazel-build-live:
bazel build --config=ci -- //app/admin/live/... //app/interface/live/... //app/job/live/... //app/job/live-userexp/... //app/service/live/...
ci-bazel-build-ep:
bazel build --config=ci -- //app/admin/ep/... //app/service/ep/...
ci-bazel-build-openplatform:
bazel build --config=ci -- //app/admin/openplatform/... //app/interface/openplatform/... //app/job/openplatform/... //app/service/openplatform/...
ci-bazel-build-bbq:
bazel build --config=ci -- //app/interface/bbq/... //app/job/bbq/... //app/service/bbq/...
ci-bazel-build-video:
bazel build --config=ci -- //app/interface/video/... //app/service/video/...
ci-bazel-build-ops:
bazel build --config=ci -- //app/service/ops/...
ci-bazel-build-library:
bazel build --config=ci -- //library/...
ci-bazel-build-admin-main:
bazel build --config=ci -- //app/admin/main/...
ci-bazel-build-interface-main:
bazel build --config=ci -- //app/interface/main/...
ci-bazel-build-job-main:
bazel build --config=ci -- //app/job/main/...
ci-bazel-build-service-main:
bazel build --config=ci -- //app/service/main/...

View File

@@ -0,0 +1,807 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Don't allow users to call this directly. There are too many variables this
# assumes to inherit from the main Makefile. This is not a user-facing file.
ifeq ($(CALLED_FROM_MAIN_MAKEFILE),)
$(error Please use the main Makefile, e.g. `make generated_files`)
endif
# Don't allow an implicit 'all' rule. This is not a user-facing file.
ifeq ($(MAKECMDGOALS),)
$(error This Makefile requires an explicit rule to be specified)
endif
ifeq ($(DBG_MAKEFILE),1)
$(warning ***** starting Makefile.generated_files for goal(s) "$(MAKECMDGOALS)")
$(warning ***** $(shell date))
endif
# It's necessary to set this because some environments don't link sh -> bash.
SHELL := /bin/bash
# This rule collects all the generated file sets into a single rule. Other
# rules should depend on this to ensure generated files are rebuilt.
.PHONY: generated_files
generated_files: gen_deepcopy gen_defaulter gen_conversion gen_openapi
.PHONY: verify_generated_files
verify_generated_files: verify_gen_deepcopy \
verify_gen_defaulter \
verify_gen_conversion
# Code-generation logic.
#
# This stuff can be pretty tricky, and there's probably some corner cases that
# we don't handle well. That said, here's a straightforward test to prove that
# the most common cases work. Sadly, it is manual.
#
# make clean
# find . -name .make\* | xargs rm -f
# find . -name zz_generated\* | xargs rm -f
# # verify `find . -name zz_generated.deepcopy.go | wc -l` is 0
# # verify `find . -name .make | wc -l` is 0
#
# make nonexistent
# # expect "No rule to make target"
# # verify `find .make/ -type f | wc -l` has many files
#
# make gen_deepcopy
# # expect deepcopy-gen is built exactly once
# # expect many files to be regenerated
# # verify `find . -name zz_generated.deepcopy.go | wc -l` has files
# make gen_deepcopy
# # expect nothing to be rebuilt, finish in O(seconds)
# touch pkg/api/types.go
# make gen_deepcopy
# # expect one file to be regenerated
# make gen_deepcopy
# # expect nothing to be rebuilt, finish in O(seconds)
# touch vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go
# make gen_deepcopy
# # expect deepcopy-gen is built exactly once
# # expect many files to be regenerated
# # verify `find . -name zz_generated.deepcopy.go | wc -l` has files
# make gen_deepcopy
# # expect nothing to be rebuilt, finish in O(seconds)
#
# make gen_conversion
# # expect conversion-gen is built exactly once
# # expect many files to be regenerated
# # verify `find . -name zz_generated.conversion.go | wc -l` has files
# make gen_conversion
# # expect nothing to be rebuilt, finish in O(seconds)
# touch pkg/api/types.go
# make gen_conversion
# # expect one file to be regenerated
# make gen_conversion
# # expect nothing to be rebuilt, finish in O(seconds)
# touch vendor/k8s.io/code-generator/cmd/conversion-gen/main.go
# make gen_conversion
# # expect conversion-gen is built exactly once
# # expect many files to be regenerated
# # verify `find . -name zz_generated.conversion.go | wc -l` has files
# make gen_conversion
# # expect nothing to be rebuilt, finish in O(seconds)
#
# make all
# # expect it to build
#
# make test
# # expect it to pass
#
# make clean
# # verify `find . -name zz_generated.deepcopy.go | wc -l` is 0
# # verify `find . -name .make | wc -l` is 0
#
# make all WHAT=cmd/kube-proxy
# # expect it to build
#
# make clean
# make test WHAT=cmd/kube-proxy
# # expect it to pass
# This variable holds a list of every directory that contains Go files in this
# project. Other rules and variables can use this as a starting point to
# reduce filesystem accesses.
ifeq ($(DBG_MAKEFILE),1)
$(warning ***** finding all *.go dirs)
endif
ALL_GO_DIRS := $(shell \
hack/make-rules/helpers/cache_go_dirs.sh $(META_DIR)/all_go_dirs.mk \
)
# The name of the metadata file which lists *.go files in each pkg.
GOFILES_META := gofiles.mk
# Establish a dependency between the deps file and the dir. Whenever a dir
# changes (files added or removed) the deps file will be considered stale.
#
# The variable value was set in $(GOFILES_META) and included as part of the
# dependency management logic.
#
# This is looser than we really need (e.g. we don't really care about non *.go
# files or even *_test.go files), but this is much easier to represent.
#
# Because we 'sinclude' the deps file, it is considered for rebuilding, as part
# of make's normal evaluation. If it gets rebuilt, make will restart.
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
$(foreach dir, $(ALL_GO_DIRS), $(eval \
$(META_DIR)/$(dir)/$(GOFILES_META): $(dir) \
))
# How to rebuild a deps file. When make determines that the deps file is stale
# (see above), it executes this rule, and then re-loads the deps file.
#
# This is looser than we really need (e.g. we don't really care about test
# files), but this is MUCH faster than calling `go list`.
#
# We regenerate the output file in order to satisfy make's "newer than" rules,
# but we only need to rebuild targets if the contents actually changed. That
# is what the .stamp file represents.
$(foreach dir, $(ALL_GO_DIRS), \
$(META_DIR)/$(dir)/$(GOFILES_META)):
FILES=$$(ls $</*.go | grep --color=never -v $(GENERATED_FILE_PREFIX)); \
mkdir -p $(@D); \
echo "gofiles__$< := $$(echo $${FILES})" >$@.tmp; \
if ! cmp -s $@.tmp $@; then \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: gofiles changed for $@"; \
fi; \
touch $@.stamp; \
fi; \
mv $@.tmp $@
# This is required to fill in the DAG, since some cases (e.g. 'make clean all')
# will reference the .stamp file when it doesn't exist. We don't need to
# rebuild it in that case, just keep make happy.
$(foreach dir, $(ALL_GO_DIRS), \
$(META_DIR)/$(dir)/$(GOFILES_META).stamp):
# Include any deps files as additional Makefile rules. This triggers make to
# consider the deps files for rebuild, which makes the whole
# dependency-management logic work. 'sinclude' is "silent include" which does
# not fail if the file does not exist.
$(foreach dir, $(ALL_GO_DIRS), $(eval \
sinclude $(META_DIR)/$(dir)/$(GOFILES_META) \
))
# Generate a list of all files that have a `+k8s:` comment-tag. This will be
# used to derive lists of files/dirs for generation tools.
ifeq ($(DBG_MAKEFILE),1)
$(warning ***** finding all +k8s: tags)
endif
ALL_K8S_TAG_FILES := $(shell \
find $(ALL_GO_DIRS) -maxdepth 1 -type f -name \*.go \
| xargs grep --color=never -l '^// *+k8s:' \
)
#
# Deep-copy generation
#
# Any package that wants deep-copy functions generated must include a
# comment-tag in column 0 of one file of the form:
# // +k8s:deepcopy-gen=<VALUE>
#
# The <VALUE> may be one of:
# generate: generate deep-copy functions into the package
# register: generate deep-copy functions and register them with a
# scheme
# The result file, in each pkg, of deep-copy generation.
DEEPCOPY_BASENAME := $(GENERATED_FILE_PREFIX)deepcopy
DEEPCOPY_FILENAME := $(DEEPCOPY_BASENAME).go
# The tool used to generate deep copies.
DEEPCOPY_GEN := $(BIN_DIR)/deepcopy-gen
# Find all the directories that request deep-copy generation.
ifeq ($(DBG_MAKEFILE),1)
$(warning ***** finding all +k8s:deepcopy-gen tags)
endif
DEEPCOPY_DIRS := $(shell \
grep --color=never -l '+k8s:deepcopy-gen=' $(ALL_K8S_TAG_FILES) \
| xargs -n1 dirname \
| LC_ALL=C sort -u \
)
DEEPCOPY_FILES := $(addsuffix /$(DEEPCOPY_FILENAME), $(DEEPCOPY_DIRS))
# Shell function for reuse in rules.
RUN_GEN_DEEPCOPY = \
function run_gen_deepcopy() { \
if [[ -f $(META_DIR)/$(DEEPCOPY_GEN).todo ]]; then \
pkgs=$$(cat $(META_DIR)/$(DEEPCOPY_GEN).todo | paste -sd, -); \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: running $(DEEPCOPY_GEN) for $$pkgs"; \
fi; \
./hack/run-in-gopath.sh $(DEEPCOPY_GEN) \
--v $(KUBE_VERBOSE) \
--logtostderr \
-i "$$pkgs" \
--bounding-dirs $(PRJ_SRC_PATH),"k8s.io/api" \
-O $(DEEPCOPY_BASENAME) \
"$$@"; \
fi \
}; \
run_gen_deepcopy
# This rule aggregates the set of files to generate and then generates them all
# in a single run of the tool.
.PHONY: gen_deepcopy
gen_deepcopy: $(DEEPCOPY_FILES) $(DEEPCOPY_GEN)
$(RUN_GEN_DEEPCOPY)
.PHONY: verify_gen_deepcopy
verify_gen_deepcopy: $(DEEPCOPY_GEN)
$(RUN_GEN_DEEPCOPY) --verify-only
# For each dir in DEEPCOPY_DIRS, this establishes a dependency between the
# output file and the input files that should trigger a rebuild.
#
# Note that this is a deps-only statement, not a full rule (see below). This
# has to be done in a distinct step because wildcards don't work in static
# pattern rules.
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
#
# We depend on the $(GOFILES_META).stamp to detect when the set of input files
# has changed. This allows us to detect deleted input files.
$(foreach dir, $(DEEPCOPY_DIRS), $(eval \
$(dir)/$(DEEPCOPY_FILENAME): $(META_DIR)/$(dir)/$(GOFILES_META).stamp \
$(gofiles__$(dir)) \
))
# Unilaterally remove any leftovers from previous runs.
$(shell rm -f $(META_DIR)/$(DEEPCOPY_GEN)*.todo)
# How to regenerate deep-copy code. This is a little slow to run, so we batch
# it up and trigger the batch from the 'generated_files' target.
$(DEEPCOPY_FILES): $(DEEPCOPY_GEN)
mkdir -p $$(dirname $(META_DIR)/$(DEEPCOPY_GEN))
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: deepcopy needed $(@D): $?"; \
ls -lf --full-time $@ $? || true; \
fi
echo $(PRJ_SRC_PATH)/$(@D) >> $(META_DIR)/$(DEEPCOPY_GEN).todo
# This calculates the dependencies for the generator tool, so we only rebuild
# it when needed. It is PHONY so that it always runs, but it only updates the
# file if the contents have actually changed. We 'sinclude' this later.
.PHONY: $(META_DIR)/$(DEEPCOPY_GEN).mk
$(META_DIR)/$(DEEPCOPY_GEN).mk:
mkdir -p $(@D); \
(echo -n "$(DEEPCOPY_GEN): "; \
./hack/run-in-gopath.sh go list \
-f '{{.ImportPath}}{{"\n"}}{{range .Deps}}{{.}}{{"\n"}}{{end}}' \
./vendor/k8s.io/code-generator/cmd/deepcopy-gen \
| grep --color=never "^$(PRJ_SRC_PATH)/" \
| xargs ./hack/run-in-gopath.sh go list \
-f '{{$$d := .Dir}}{{$$d}}{{"\n"}}{{range .GoFiles}}{{$$d}}/{{.}}{{"\n"}}{{end}}' \
| paste -sd' ' - \
| sed 's/ / \\=,/g' \
| tr '=,' '\n\t' \
| sed "s|$$(pwd -P)/||"; \
) > $@.tmp; \
if ! cmp -s $@.tmp $@; then \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: $(DEEPCOPY_GEN).mk changed"; \
fi; \
cat $@.tmp > $@; \
rm -f $@.tmp; \
fi
# Include dependency info for the generator tool. This will cause the rule of
# the same name to be considered and if it is updated, make will restart.
sinclude $(META_DIR)/$(DEEPCOPY_GEN).mk
# How to build the generator tool. The deps for this are defined in
# the $(DEEPCOPY_GEN).mk, above.
#
# A word on the need to touch: This rule might trigger if, for example, a
# non-Go file was added or deleted from a directory on which this depends.
# This target needs to be reconsidered, but Go realizes it doesn't actually
# have to be rebuilt. In that case, make will forever see the dependency as
# newer than the binary, and try to rebuild it over and over. So we touch it,
# and make is happy.
$(DEEPCOPY_GEN):
hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/deepcopy-gen
touch $@
#
# Defaulter generation
#
# Any package that wants defaulter functions generated must include a
# comment-tag in column 0 of one file of the form:
# // +k8s:defaulter-gen=<VALUE>
#
# The <VALUE> depends on context:
# on types:
# true: always generate a defaulter for this type
# false: never generate a defaulter for this type
# on functions:
# covers: if the function name matches SetDefault_NAME, instructs
# the generator not to recurse
# on packages:
# FIELDNAME: any object with a field of this name is a candidate
# for having a defaulter generated
# The result file, in each pkg, of defaulter generation.
DEFAULTER_BASENAME := $(GENERATED_FILE_PREFIX)defaults
DEFAULTER_FILENAME := $(DEFAULTER_BASENAME).go
# The tool used to generate defaulters.
DEFAULTER_GEN := $(BIN_DIR)/defaulter-gen
# All directories that request any form of defaulter generation.
ifeq ($(DBG_MAKEFILE),1)
$(warning ***** finding all +k8s:defaulter-gen tags)
endif
DEFAULTER_DIRS := $(shell \
grep --color=never -l '+k8s:defaulter-gen=' $(ALL_K8S_TAG_FILES) \
| xargs -n1 dirname \
| LC_ALL=C sort -u \
)
DEFAULTER_FILES := $(addsuffix /$(DEFAULTER_FILENAME), $(DEFAULTER_DIRS))
RUN_GEN_DEFAULTER := \
function run_gen_defaulter() { \
if [[ -f $(META_DIR)/$(DEFAULTER_GEN).todo ]]; then \
pkgs=$$(cat $(META_DIR)/$(DEFAULTER_GEN).todo | paste -sd, -); \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: running $(DEFAULTER_GEN) for $$pkgs"; \
fi; \
./hack/run-in-gopath.sh $(DEFAULTER_GEN) \
--v $(KUBE_VERBOSE) \
--logtostderr \
-i "$$pkgs" \
--extra-peer-dirs $$(echo $(addprefix $(PRJ_SRC_PATH)/, $(DEFAULTER_DIRS)) | sed 's/ /,/g') \
-O $(DEFAULTER_BASENAME) \
"$$@"; \
fi \
}; \
run_gen_defaulter
# This rule aggregates the set of files to generate and then generates them all
# in a single run of the tool.
.PHONY: gen_defaulter
gen_defaulter: $(DEFAULTER_FILES) $(DEFAULTER_GEN)
$(RUN_GEN_DEFAULTER)
.PHONY: verify_gen_deepcopy
verify_gen_defaulter: $(DEFAULTER_GEN)
$(RUN_GEN_DEFAULTER) --verify-only
# For each dir in DEFAULTER_DIRS, this establishes a dependency between the
# output file and the input files that should trigger a rebuild.
#
# The variable value was set in $(GOFILES_META) and included as part of the
# dependency management logic.
#
# Note that this is a deps-only statement, not a full rule (see below). This
# has to be done in a distinct step because wildcards don't work in static
# pattern rules.
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
#
# We depend on the $(GOFILES_META).stamp to detect when the set of input files
# has changed. This allows us to detect deleted input files.
$(foreach dir, $(DEFAULTER_DIRS), $(eval \
$(dir)/$(DEFAULTER_FILENAME): $(META_DIR)/$(dir)/$(GOFILES_META).stamp \
$(gofiles__$(dir)) \
))
# For each dir in DEFAULTER_DIRS, for each target in $(defaulters__$(dir)),
# this establishes a dependency between the output file and the input files
# that should trigger a rebuild.
#
# The variable value was set in $(GOFILES_META) and included as part of the
# dependency management logic.
#
# Note that this is a deps-only statement, not a full rule (see below). This
# has to be done in a distinct step because wildcards don't work in static
# pattern rules.
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
#
# We depend on the $(GOFILES_META).stamp to detect when the set of input files
# has changed. This allows us to detect deleted input files.
$(foreach dir, $(DEFAULTER_DIRS), \
$(foreach tgt, $(defaulters__$(dir)), $(eval \
$(dir)/$(DEFAULTER_FILENAME): $(META_DIR)/$(tgt)/$(GOFILES_META).stamp \
$(gofiles__$(tgt)) \
)) \
)
# Unilaterally remove any leftovers from previous runs.
$(shell rm -f $(META_DIR)/$(DEFAULTER_GEN)*.todo)
# How to regenerate defaulter code. This is a little slow to run, so we batch
# it up and trigger the batch from the 'generated_files' target.
$(DEFAULTER_FILES): $(DEFAULTER_GEN)
mkdir -p $$(dirname $(META_DIR)/$(DEFAULTER_GEN))
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: defaulter needed $(@D): $?"; \
ls -lf --full-time $@ $? || true; \
fi
echo $(PRJ_SRC_PATH)/$(@D) >> $(META_DIR)/$(DEFAULTER_GEN).todo
# This calculates the dependencies for the generator tool, so we only rebuild
# it when needed. It is PHONY so that it always runs, but it only updates the
# file if the contents have actually changed. We 'sinclude' this later.
.PHONY: $(META_DIR)/$(DEFAULTER_GEN).mk
$(META_DIR)/$(DEFAULTER_GEN).mk:
mkdir -p $(@D); \
(echo -n "$(DEFAULTER_GEN): "; \
./hack/run-in-gopath.sh go list \
-f '{{.ImportPath}}{{"\n"}}{{range .Deps}}{{.}}{{"\n"}}{{end}}' \
./vendor/k8s.io/code-generator/cmd/defaulter-gen \
| grep --color=never "^$(PRJ_SRC_PATH)/" \
| xargs ./hack/run-in-gopath.sh go list \
-f '{{$$d := .Dir}}{{$$d}}{{"\n"}}{{range .GoFiles}}{{$$d}}/{{.}}{{"\n"}}{{end}}' \
| paste -sd' ' - \
| sed 's/ / \\=,/g' \
| tr '=,' '\n\t' \
| sed "s|$$(pwd -P)/||"; \
) > $@.tmp; \
if ! cmp -s $@.tmp $@; then \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: $(DEFAULTER_GEN).mk changed"; \
fi; \
cat $@.tmp > $@; \
rm -f $@.tmp; \
fi
# Include dependency info for the generator tool. This will cause the rule of
# the same name to be considered and if it is updated, make will restart.
sinclude $(META_DIR)/$(DEFAULTER_GEN).mk
# How to build the generator tool. The deps for this are defined in
# the $(DEFAULTER_GEN).mk, above.
#
# A word on the need to touch: This rule might trigger if, for example, a
# non-Go file was added or deleted from a directory on which this depends.
# This target needs to be reconsidered, but Go realizes it doesn't actually
# have to be rebuilt. In that case, make will forever see the dependency as
# newer than the binary, and try to rebuild it over and over. So we touch it,
# and make is happy.
$(DEFAULTER_GEN):
hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/defaulter-gen
touch $@
#
# Open-api generation
#
# Any package that wants open-api functions generated must include a
# comment-tag in column 0 of one file of the form:
# // +k8s:openapi-gen=true
#
# The result file, in each pkg, of open-api generation.
OPENAPI_BASENAME := $(GENERATED_FILE_PREFIX)openapi
OPENAPI_FILENAME := $(OPENAPI_BASENAME).go
OPENAPI_OUTPUT_PKG := pkg/generated/openapi
# The tool used to generate open apis.
OPENAPI_GEN := $(BIN_DIR)/openapi-gen
# Find all the directories that request open-api generation.
ifeq ($(DBG_MAKEFILE),1)
$(warning ***** finding all +k8s:openapi-gen tags)
endif
OPENAPI_DIRS := $(shell \
grep --color=never -l '+k8s:openapi-gen=' $(ALL_K8S_TAG_FILES) \
| xargs -n1 dirname \
| LC_ALL=C sort -u \
)
OPENAPI_OUTFILE := $(OPENAPI_OUTPUT_PKG)/$(OPENAPI_FILENAME)
# This rule is the user-friendly entrypoint for openapi generation.
.PHONY: gen_openapi
gen_openapi: $(OPENAPI_OUTFILE) $(OPENAPI_GEN)
# For each dir in OPENAPI_DIRS, this establishes a dependency between the
# output file and the input files that should trigger a rebuild.
#
# Note that this is a deps-only statement, not a full rule (see below). This
# has to be done in a distinct step because wildcards don't work in static
# pattern rules.
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
#
# We depend on the $(GOFILES_META).stamp to detect when the set of input files
# has changed. This allows us to detect deleted input files.
$(foreach dir, $(OPENAPI_DIRS), $(eval \
$(OPENAPI_OUTFILE): $(META_DIR)/$(dir)/$(GOFILES_META).stamp \
$(gofiles__$(dir)) \
))
# How to regenerate open-api code. This emits a single file for all results.
$(OPENAPI_OUTFILE): $(OPENAPI_GEN) $(OPENAPI_GEN)
function run_gen_openapi() { \
./hack/run-in-gopath.sh $(OPENAPI_GEN) \
--v $(KUBE_VERBOSE) \
--logtostderr \
-i $$(echo $(addprefix $(PRJ_SRC_PATH)/, $(OPENAPI_DIRS)) | sed 's/ /,/g') \
-p $(PRJ_SRC_PATH)/$(OPENAPI_OUTPUT_PKG) \
-O $(OPENAPI_BASENAME) \
"$$@"; \
}; \
run_gen_openapi
# This calculates the dependencies for the generator tool, so we only rebuild
# it when needed. It is PHONY so that it always runs, but it only updates the
# file if the contents have actually changed. We 'sinclude' this later.
.PHONY: $(META_DIR)/$(OPENAPI_GEN).mk
$(META_DIR)/$(OPENAPI_GEN).mk:
mkdir -p $(@D); \
(echo -n "$(OPENAPI_GEN): "; \
./hack/run-in-gopath.sh go list \
-f '{{.ImportPath}}{{"\n"}}{{range .Deps}}{{.}}{{"\n"}}{{end}}' \
./vendor/k8s.io/code-generator/cmd/openapi-gen \
| grep --color=never "^$(PRJ_SRC_PATH)/" \
| xargs ./hack/run-in-gopath.sh go list \
-f '{{$$d := .Dir}}{{$$d}}{{"\n"}}{{range .GoFiles}}{{$$d}}/{{.}}{{"\n"}}{{end}}' \
| paste -sd' ' - \
| sed 's/ / \\=,/g' \
| tr '=,' '\n\t' \
| sed "s|$$(pwd -P)/||"; \
) > $@.tmp; \
if ! cmp -s $@.tmp $@; then \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: $(OPENAPI_GEN).mk changed"; \
fi; \
cat $@.tmp > $@; \
rm -f $@.tmp; \
fi
# Include dependency info for the generator tool. This will cause the rule of
# the same name to be considered and if it is updated, make will restart.
sinclude $(META_DIR)/$(OPENAPI_GEN).mk
# How to build the generator tool. The deps for this are defined in
# the $(OPENAPI_GEN).mk, above.
#
# A word on the need to touch: This rule might trigger if, for example, a
# non-Go file was added or deleted from a directory on which this depends.
# This target needs to be reconsidered, but Go realizes it doesn't actually
# have to be rebuilt. In that case, make will forever see the dependency as
# newer than the binary, and try to rebuild it over and over. So we touch it,
# and make is happy.
$(OPENAPI_GEN):
hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/openapi-gen
touch $@
#
# Conversion generation
#
# Any package that wants conversion functions generated must include one or
# more comment-tags in any .go file, in column 0, of the form:
# // +k8s:conversion-gen=<CONVERSION_TARGET_DIR>
#
# The CONVERSION_TARGET_DIR is a project-local path to another directory which
# should be considered when evaluating peer types for conversions. Types which
# are found in the source package (where conversions are being generated)
# but do not have a peer in one of the target directories will not have
# conversions generated.
#
# TODO: it might be better in the long term to make peer-types explicit in the
# IDL.
# The result file, in each pkg, of conversion generation.
CONVERSION_BASENAME := $(GENERATED_FILE_PREFIX)conversion
CONVERSION_FILENAME := $(CONVERSION_BASENAME).go
# The tool used to generate conversions.
CONVERSION_GEN := $(BIN_DIR)/conversion-gen
# The name of the metadata file listing conversion peers for each pkg.
CONVERSIONS_META := conversions.mk
# All directories that request any form of conversion generation.
ifeq ($(DBG_MAKEFILE),1)
$(warning ***** finding all +k8s:conversion-gen tags)
endif
CONVERSION_DIRS := $(shell \
grep --color=never '^// *+k8s:conversion-gen=' $(ALL_K8S_TAG_FILES) \
| cut -f1 -d: \
| xargs -n1 dirname \
| LC_ALL=C sort -u \
)
CONVERSION_FILES := $(addsuffix /$(CONVERSION_FILENAME), $(CONVERSION_DIRS))
CONVERSION_EXTRA_PEER_DIRS := k8s.io/kubernetes/pkg/apis/core,k8s.io/kubernetes/pkg/apis/core/v1,k8s.io/api/core/v1
# Shell function for reuse in rules.
RUN_GEN_CONVERSION = \
function run_gen_conversion() { \
if [[ -f $(META_DIR)/$(CONVERSION_GEN).todo ]]; then \
pkgs=$$(cat $(META_DIR)/$(CONVERSION_GEN).todo | paste -sd, -); \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: running $(CONVERSION_GEN) for $$pkgs"; \
fi; \
./hack/run-in-gopath.sh $(CONVERSION_GEN) \
--extra-peer-dirs $(CONVERSION_EXTRA_PEER_DIRS) \
--v $(KUBE_VERBOSE) \
--logtostderr \
-i "$$pkgs" \
-O $(CONVERSION_BASENAME) \
"$$@"; \
fi \
}; \
run_gen_conversion
# This rule aggregates the set of files to generate and then generates them all
# in a single run of the tool.
.PHONY: gen_conversion
gen_conversion: $(CONVERSION_FILES) $(CONVERSION_GEN)
$(RUN_GEN_CONVERSION)
.PHONY: verify_gen_conversion
verify_gen_conversion: $(CONVERSION_GEN)
$(RUN_GEN_CONVERSION) --verify-only
# Establish a dependency between the deps file and the dir. Whenever a dir
# changes (files added or removed) the deps file will be considered stale.
#
# This is looser than we really need (e.g. we don't really care about non *.go
# files or even *_test.go files), but this is much easier to represent.
#
# Because we 'sinclude' the deps file, it is considered for rebuilding, as part
# of make's normal evaluation. If it gets rebuilt, make will restart.
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
$(foreach dir, $(CONVERSION_DIRS), $(eval \
$(META_DIR)/$(dir)/$(CONVERSIONS_META): $(dir) \
))
# How to rebuild a deps file. When make determines that the deps file is stale
# (see above), it executes this rule, and then re-loads the deps file.
#
# This is looser than we really need (e.g. we don't really care about test
# files), but this is MUCH faster than calling `go list`.
#
# We regenerate the output file in order to satisfy make's "newer than" rules,
# but we only need to rebuild targets if the contents actually changed. That
# is what the .stamp file represents.
$(foreach dir, $(CONVERSION_DIRS), \
$(META_DIR)/$(dir)/$(CONVERSIONS_META)):
TAGS=$$(grep --color=never -h '^// *+k8s:conversion-gen=' $</*.go \
| cut -f2- -d= \
| sed 's|$(PRJ_SRC_PATH)/||' \
| sed 's|^k8s.io/|vendor/k8s.io/|'); \
mkdir -p $(@D); \
echo "conversions__$< := $$(echo $${TAGS})" >$@.tmp; \
if ! cmp -s $@.tmp $@; then \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: conversions changed for $@"; \
fi; \
touch $@.stamp; \
fi; \
mv $@.tmp $@
# Include any deps files as additional Makefile rules. This triggers make to
# consider the deps files for rebuild, which makes the whole
# dependency-management logic work. 'sinclude' is "silent include" which does
# not fail if the file does not exist.
$(foreach dir, $(CONVERSION_DIRS), $(eval \
sinclude $(META_DIR)/$(dir)/$(CONVERSIONS_META) \
))
# For each dir in CONVERSION_DIRS, this establishes a dependency between the
# output file and the input files that should trigger a rebuild.
#
# The variable value was set in $(GOFILES_META) and included as part of the
# dependency management logic.
#
# Note that this is a deps-only statement, not a full rule (see below). This
# has to be done in a distinct step because wildcards don't work in static
# pattern rules.
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
#
# We depend on the $(GOFILES_META).stamp to detect when the set of input files
# has changed. This allows us to detect deleted input files.
$(foreach dir, $(CONVERSION_DIRS), $(eval \
$(dir)/$(CONVERSION_FILENAME): $(META_DIR)/$(dir)/$(GOFILES_META).stamp \
$(gofiles__$(dir)) \
))
# For each dir in CONVERSION_DIRS, for each target in $(conversions__$(dir)),
# this establishes a dependency between the output file and the input files
# that should trigger a rebuild.
#
# The variable value was set in $(GOFILES_META) and included as part of the
# dependency management logic.
#
# Note that this is a deps-only statement, not a full rule (see below). This
# has to be done in a distinct step because wildcards don't work in static
# pattern rules.
#
# The '$(eval)' is needed because this has a different RHS for each LHS, and
# would otherwise produce results that make can't parse.
#
# We depend on the $(GOFILES_META).stamp to detect when the set of input files
# has changed. This allows us to detect deleted input files.
$(foreach dir, $(CONVERSION_DIRS), \
$(foreach tgt, $(conversions__$(dir)), $(eval \
$(dir)/$(CONVERSION_FILENAME): $(META_DIR)/$(tgt)/$(GOFILES_META).stamp \
$(gofiles__$(tgt)) \
)) \
)
# Unilaterally remove any leftovers from previous runs.
$(shell rm -f $(META_DIR)/$(CONVERSION_GEN)*.todo)
# How to regenerate conversion code. This is a little slow to run, so we batch
# it up and trigger the batch from the 'generated_files' target.
$(CONVERSION_FILES): $(CONVERSION_GEN)
mkdir -p $$(dirname $(META_DIR)/$(CONVERSION_GEN))
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: conversion needed $(@D): $?"; \
ls -lf --full-time $@ $? || true; \
fi
echo $(PRJ_SRC_PATH)/$(@D) >> $(META_DIR)/$(CONVERSION_GEN).todo
# This calculates the dependencies for the generator tool, so we only rebuild
# it when needed. It is PHONY so that it always runs, but it only updates the
# file if the contents have actually changed. We 'sinclude' this later.
.PHONY: $(META_DIR)/$(CONVERSION_GEN).mk
$(META_DIR)/$(CONVERSION_GEN).mk:
mkdir -p $(@D); \
(echo -n "$(CONVERSION_GEN): "; \
./hack/run-in-gopath.sh go list \
-f '{{.ImportPath}}{{"\n"}}{{range .Deps}}{{.}}{{"\n"}}{{end}}' \
./vendor/k8s.io/code-generator/cmd/conversion-gen \
| grep --color=never "^$(PRJ_SRC_PATH)/" \
| xargs ./hack/run-in-gopath.sh go list \
-f '{{$$d := .Dir}}{{$$d}}{{"\n"}}{{range .GoFiles}}{{$$d}}/{{.}}{{"\n"}}{{end}}' \
| paste -sd' ' - \
| sed 's/ / \\=,/g' \
| tr '=,' '\n\t' \
| sed "s|$$(pwd -P)/||"; \
) > $@.tmp; \
if ! cmp -s $@.tmp $@; then \
if [[ "$(DBG_CODEGEN)" == 1 ]]; then \
echo "DBG: $(CONVERSION_GEN).mk changed"; \
fi; \
cat $@.tmp > $@; \
rm -f $@.tmp; \
fi
# Include dependency info for the generator tool. This will cause the rule of
# the same name to be considered and if it is updated, make will restart.
sinclude $(META_DIR)/$(CONVERSION_GEN).mk
# How to build the generator tool. The deps for this are defined in
# the $(CONVERSION_GEN).mk, above.
#
# A word on the need to touch: This rule might trigger if, for example, a
# non-Go file was added or deleted from a directory on which this depends.
# This target needs to be reconsidered, but Go realizes it doesn't actually
# have to be rebuilt. In that case, make will forever see the dependency as
# newer than the binary, and try to rebuild it over and over. So we touch it,
# and make is happy.
$(CONVERSION_GEN):
hack/make-rules/build.sh ./vendor/k8s.io/code-generator/cmd/conversion-gen
touch $@

4
build/root/OWNERS Normal file
View File

@@ -0,0 +1,4 @@
# See the OWNERS docs at https://go.k8s.io/owners
labels:
- area/chore

152
build/root/WORKSPACE Normal file
View File

@@ -0,0 +1,152 @@
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "io_bazel_rules_go",
url = "http://bazel-cabin.bilibili.co/go/rules_go/rules_go-0.16.4-hack.tar.gz",
sha256 = "891470f5ea5026891f76f66122c7a7d13255c1905cb5ce4ee01c4b46278f9201",
)
http_archive(
name = "bazel_skylib",
sha256 = "b5f6abe419da897b7901f90cbab08af958b97a8f3575b0d3dd062ac7ce78541f",
strip_prefix = "bazel-skylib-0.5.0",
urls = ["http://bazel-cabin.bilibili.co/bazel-skylib/0.5.0.tar.gz"],
)
http_archive(
name = "bazel_gazelle",
urls = ["http://bazel-cabin.bilibili.co/go/gazelle/bazel-gazelle-0.15.0.tar.gz"],
sha256 = "6e875ab4b6bf64a38c352887760f21203ab054676d9c1b274963907e0768740d",
)
load("@bazel_skylib//:lib.bzl", "versions")
versions.check(minimum_bazel_version = "0.15.0")
load("@io_bazel_rules_go//go:def.bzl", "go_download_sdk")
go_download_sdk(
name = "go_sdk",
urls = ["http://bazel-cabin.bilibili.co/go/{}"],
sdks = {
"linux_amd64": ("go1.11.4.linux-amd64.tar.gz",
"fb26c30e6a04ad937bbc657a1b5bba92f80096af1e8ee6da6430c045a8db3a5b"),
"darwin_amd64": ("go1.11.4.darwin-amd64.tar.gz",
"48ea987fb610894b3108ecf42e7a4fd1c1e3eabcaeb570e388c75af1f1375f80"),
"windows_amd64": ("go1.11.4.windows-amd64.zip",
"eeb20e21702f2b9469d9381df5de85e2f731b64a1f54effe196d0f7d0227fe14"),
},
)
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
#go_repository(
# name = "org_golang_x_tools",
# # release-branch.go1.9, as of 2017-08-25
# urls = ["http://bazel-cabin.bilibili.co/golang-lib/tools/3e7aa9e59977626dc60433e9aeadf1bb63d28295"],
# importpath = "golang.org/x/tools",
# strip_prefix = "tools-3e7aa9e59977626dc60433e9aeadf1bb63d28295",
# type = "zip",
# patches = [
# "@io_bazel_rules_go//third_party:org_golang_x_tools-gazelle.patch",
# "@io_bazel_rules_go//third_party:org_golang_x_tools-extras.patch",
# ],
# patch_args = ["-p1"],
#)#
go_repository(
name = "com_google_protobuf",
build_file_proto_mode = "disable_global",
importpath = "github.com/google/protobuf",
urls = ["http://bazel-cabin.bilibili.co/google/protobuf/48cb18e5c419ddd23d9badcfe4e9df7bde1979b2"],
strip_prefix = "protobuf-48cb18e5c419ddd23d9badcfe4e9df7bde1979b2",
type = "zip",
)
go_repository(
name = "org_golang_x_net",
urls = ["http://bazel-cabin.bilibili.co/golang-lib/golang/net/4dfa2610cdf3b287375bbba5b8f2a14d3b01d8de"],
importpath = "golang.org/x/net",
strip_prefix = "net-4dfa2610cdf3b287375bbba5b8f2a14d3b01d8de",
type = "zip",
)
go_repository(
name = "org_golang_google_genproto",
build_file_proto_mode = "disable_global",
urls = ["http://bazel-cabin.bilibili.co/golang-lib/go-genproto/c7e5094acea1ca1b899e2259d80a6b0f882f81f8"],
strip_prefix = "go-genproto-c7e5094acea1ca1b899e2259d80a6b0f882f81f8",
type = "zip",
importpath = "github.com/google/go-genproto",
)
go_repository(
name = "com_github_gogo_protobuf",
build_file_proto_mode = "disable_global",
urls = ["http://bazel-cabin.bilibili.co/golang-lib/gogo/636bf0302bc95575d69441b25a2603156ffdddf1"],
strip_prefix = "protobuf-636bf0302bc95575d69441b25a2603156ffdddf1",
importpath = "github.com/gogo/protobuf",
type = "zip",
)
go_repository(
name = "org_golang_x_text",
urls = ["http://bazel-cabin.bilibili.co/golang-lib/golang/text/f21a4dfb5e38f5895301dc265a8def02365cc3d0"],
strip_prefix = "text-f21a4dfb5e38f5895301dc265a8def02365cc3d0",
type = "zip",
importpath = "golang.org/x/text",
)#
go_repository(
name = "org_golang_google_grpc",
#build_file_proto_mode = "disable_global",
urls = ["http://bazel-cabin.bilibili.co/golang-lib/grpc-go/df014850f6dee74ba2fc94874043a9f3f75fbfd8"],
strip_prefix = "grpc-go-df014850f6dee74ba2fc94874043a9f3f75fbfd8",
type = "zip",
importpath = "google.golang.org/grpc",
)
go_repository(
name = "go_googleapis",
build_file_proto_mode = "disable_global",
urls = ["http://bazel-cabin.bilibili.co/golang-lib/googleapis/b71d0c74de0b84f2f10a2c61cd66fbb48873709f"],
strip_prefix = "googleapis-b71d0c74de0b84f2f10a2c61cd66fbb48873709f",
type = "zip",
importpath = "google.golang.org/api",
patches = [
"@io_bazel_rules_go//third_party:go_googleapis-directives.patch",
"@io_bazel_rules_go//third_party:go_googleapis-gazelle.patch",
"@io_bazel_rules_go//third_party:go_googleapis-fix.patch",
],
patch_args = ["-p1"],
)
go_repository(
name = "com_github_golang_protobuf",
build_file_proto_mode = "disable_global",
urls = ["http://bazel-cabin.bilibili.co/golang-lib/golang/protobuf/aa810b61a9c79d51363740d207bb46cf8e620ed5"],
strip_prefix = "protobuf-aa810b61a9c79d51363740d207bb46cf8e620ed5",
type = "zip",
importpath = "github.com/golang/protobuf",
patch_args = ["-p1"],
patches = ["@io_bazel_rules_go//third_party:com_github_golang_protobuf-extras.patch"],
)
go_repository(
name = "org_golang_x_sys",
importpath = "golang.org/x/sys",
urls = ["http://bazel-cabin.bilibili.co/golang-lib/sys/e4b3c5e9061176387e7cea65e4dc5853801f3fb7"],
strip_prefix = "sys-e4b3c5e9061176387e7cea65e4dc5853801f3fb7",
type = "zip",
)
gazelle_dependencies()
load("@io_bazel_rules_go//go:def.bzl", "go_rules_dependencies", "go_register_toolchains")
go_rules_dependencies()
go_register_toolchains()
load("//build:workspace.bzl", "bili_workspace")
bili_workspace()

25969
build/root/go_common_job.yaml Normal file

File diff suppressed because it is too large Load Diff

2446
build/root/labels.yaml Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,2 @@
presubmits:
platform/go-common:

View File

@@ -0,0 +1,18 @@
presubmits:
platform/go-common:
- name: go-common-make-update
always_run: true
run_pr_pushed: true
namespace: default
spec:
containers:
- image: go_lint
name: go-common-make-update
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- ./build/update-bazel.sh

View File

@@ -0,0 +1,282 @@
presubmits:
platform/go-common:
- name: go-common-build-common
always_run: true
namespace: default
run_pr_pushed: true
trusted_labels:
- library
- new-project
spec:
containers:
- image: bazel_build
name: go-common-build-common
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- make
- ci-bazel-build-common
- name: go-common-build-infra
always_run: true
namespace: default
run_pr_pushed: true
trusted_labels:
- library
- new-project
spec:
containers:
- image: bazel_build
name: go-common-build-infra
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- make
- ci-bazel-build-infra
- name: go-common-build-tool
always_run: true
namespace: default
run_pr_pushed: true
trusted_labels:
- library
- new-project
spec:
containers:
- image: bazel_build
name: go-common-build-tool
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- make
- ci-bazel-build-tool
- name: go-common-build-live
always_run: true
namespace: default
run_pr_pushed: true
trusted_labels:
- library
- new-project
spec:
containers:
- image: bazel_build
name: go-common-build-live
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- make
- ci-bazel-build-live
- name: go-common-build-ep
always_run: true
namespace: default
run_pr_pushed: true
trusted_labels:
- library
- new-project
spec:
containers:
- image: bazel_build
name: go-common-build-ep
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- make
- ci-bazel-build-ep
- name: go-common-build-openplatform
always_run: true
namespace: default
run_pr_pushed: true
trusted_labels:
- library
- new-project
spec:
containers:
- image: bazel_build
name: go-common-build-openplatform
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- make
- ci-bazel-build-openplatform
- name: go-common-build-bbq
always_run: true
namespace: default
run_pr_pushed: true
trusted_labels:
- library
- new-project
spec:
containers:
- image: bazel_build
name: go-common-build-bbq
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- make
- ci-bazel-build-bbq
- name: go-common-build-video
always_run: true
namespace: default
run_pr_pushed: true
trusted_labels:
- library
- new-project
spec:
containers:
- image: bazel_build
name: go-common-build-video
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- make
- ci-bazel-build-video
- name: go-common-build-ops
always_run: true
namespace: default
run_pr_pushed: true
trusted_labels:
- library
- new-project
spec:
containers:
- image: bazel_build
name: go-common-build-ops
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- make
- ci-bazel-build-ops
- name: go-common-build-library
always_run: true
namespace: default
run_pr_pushed: true
trusted_labels:
- library
- new-project
spec:
containers:
- image: bazel_build
name: go-common-build-library
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- make
- ci-bazel-build-library
- name: go-common-build-service-main
always_run: true
namespace: default
run_pr_pushed: true
trusted_labels:
- library
- new-service-main-project
spec:
containers:
- image: bazel_build
name: go-common-build-service-main
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- make
- ci-bazel-build-service-main
- name: go-common-build-interface-main
always_run: true
namespace: default
run_pr_pushed: true
trusted_labels:
- library
- new-interface-main-project
spec:
containers:
- image: bazel_build
name: go-common-build-interface-main
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- make
- ci-bazel-build-interface-main
- name: go-common-build-job-main
always_run: true
namespace: default
run_pr_pushed: true
trusted_labels:
- library
- new-job-main-project
spec:
containers:
- image: bazel_build
name: go-common-build-job-main
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- make
- ci-bazel-build-job-main
- name: go-common-build-admin-main
always_run: true
namespace: default
run_pr_pushed: true
trusted_labels:
- library
- new-admin-main-project
spec:
containers:
- image: bazel_build
name: go-common-build-admin-main
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- make
- ci-bazel-build-admin-main

View File

@@ -0,0 +1,5 @@
image:
- name: bazel_build
image: hub.bilibili.co/k8s-prow/bazelbuild:v20190123123430-77f54bb27
- name: go_lint
image: hub.bilibili.co/k8s-prow/golint:v20190123123017-77f54bb27

View File

@@ -0,0 +1,22 @@
presubmits:
platform/go-common:
- name: __bazel_build_job_name__
always_run: true
namespace: default
untrusted_labels:
- library
- new-project
spec:
containers:
- image: bazel_build
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- make
- bazel-build
- WHAT=<<bazel_dir_param>>

View File

@@ -0,0 +1,18 @@
presubmits:
platform/go-common:
- name: __bazel_test_job_name__
always_run: true
namespace: default
optional: true
spec:
containers:
- image: bazel_build
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- ./build/unit_test.sh
- <<bazel_dir_param>>

View File

@@ -0,0 +1,18 @@
presubmits:
platform/go-common:
- name: __go_linter_job_name__
always_run: true
namespace: default
optional: true
spec:
containers:
- image: go_lint
imagePullPolicy: IfNotPresent
args:
- --id=$(BUILD_ID)
- --job=$(JOB_NAME)
- --root=/root/go/src
- --repo=git.bilibili.co/$(REPO_OWNER)/$(REPO_NAME)=$(PULL_REFS)
- --command
- ./build/verify-lint.sh
- ./<<bazel_dir_param>>/...

249
build/unit_test.sh Normal file
View File

@@ -0,0 +1,249 @@
#!/bin/bash
CI_SERVER_URL="http://git.bilibili.co"
CI_UATSVEN_URL="http://uat-sven.bilibili.co"
CI_PRIVATE_TOKEN="WVYk-ezyXKq-C82v-1Bi"
CI_PROJECT_ID="682"
CI_COMMIT_SHA=${PULL_PULL_SHA}
exitCode=0
# get packages
dirs=(dao)
declare packages
function GetPackages(){
reg="library"
length=${#dirs[@]}
for((i=0;i<length;i++))
do
reg+="|$1/"${dirs[i]}"(/.*)*"
done
for value in `find $1 -type d |grep -E ${reg}`
do
len=`ls ${value}| grep .go | wc -l`
if [[ ${len} -gt 0 ]];then
packages+="go-common/"${value}" "
fi
done
}
# upload data to apm
# $1: SvenURL
# $2: file result.out path
function Upload () {
if [[ ! -f "$2/result.out" ]] || [[ ! -f "$2/cover.html" ]] || [[ ! -f "$2/coverage.dat" ]]; then
echo "==================================WARNING!======================================"
echo "No test found!~ 请完善如下路径测试用例: ${pkg} "
exit 1
fi
json=$(curl $1 -H "Content-type: multipart/form-data" -F "html_file=@$2/cover.html" -F "report_file=@$2/result.out" -F "data_file=@$2/coverage.dat")
if [[ "${json}" = "" ]]; then
echo "shell.Upload curl $1 fail"
exit 1
fi
msg=$(echo ${json} | jq -r '.message')
data=$(echo ${json} | jq -r '.data')
code=$(echo ${json} | jq -r '.code')
if [[ "${data}" = "" ]]; then
echo "shell.Upload curl $1 fail,data return null"
exit 1
fi
echo "=============================================================================="
if [[ ${code} -ne 0 ]]; then
echo -e "返回 message(${msg})"
echo -e "返回 data(${data})\n\n"
fi
return ${code}
}
# GoTest execute go test and go tool
# $1: pkg
function GoTest(){
go test -v $1 -coverprofile=cover.out -covermode=set -convey-json -timeout=60s > result.out
go tool cover -html=cover.out -o cover.html
}
# BazelTest execute bazel coverage and go tool
# $1: pkg
function BazelTest(){
pkg=${1//go-common//}":go_default_test"
path=${1//go-common\//}
bazel coverage --instrumentation_filter="//${path}[:]" --test_env=DEPLOY_ENV=uat --test_timeout=60 --test_env=APP_ID=bazel.test --test_output=all --cache_test_results=no --test_arg=-convey-json ${pkg} > result.out
if [[ ! -s result.out ]]; then
echo "==================================WARNING!======================================"
echo "No test case found,请完善如下路径测试用例: ${pkg} "
exit 1
else
echo $?
cat bazel-out/k8-fastbuild/testlogs/${path}/go_default_test/coverage.dat | grep -v "/monkey.go" > coverage.dat
go tool cover -html=coverage.dat -o cover.html
fi
}
# UTLint check the *_test.go files in the pkg
# $1: pkg
function UTLint()
{
path=${1//go-common\//}
declare -i numCase=0
declare -i numAssertion=0
files=$(ls ${path} | grep -E "(.*)_test\.go")
if [[ ${#files} -eq 0 ]];then
echo "shell.UTLint no *_test.go files in pkg:$1"
exit 1
fi
for file in ${files}
do
numCase+=`grep -c -E "^func Test(.+)\(t \*testing\.T\) \{$" ${path}/${file}`
numAssertion+=`grep -c -E "^(.*)So\((.+)\)$" ${path}/${file}`
done
if [[ ${numCase} -eq 0 || ${numAssertion} -eq 0 ]];then
echo -e "shell.UTLint no test case or assertion in pkg:$1"
exit 1
fi
echo "shell.UTLint pkg:$1 succeeded"
}
# upload path to apm
# $1: SvenURL
# $2: file result.out path
function UpPath() {
curl $1 -H "Content-type: multipart/form-data" -F "path_file=@$2/path.out"
}
function ReadDir(){
# get go-common/app all dir path
PathDirs=`find app -maxdepth 3 -type d`
value=""
for dir in ${PathDirs}
do
if [[ -d "$dir" ]];then
for file in `find ${dir} -maxdepth 1 -type f |grep "CONTRIBUTORS.md"`
do
owner=""
substr=${dir#*"go-common"}
while read line
do
if [[ "${line}" = "# Owner" ]];then
continue
elif [[ "${line}" = "" ]]|| [[ "${line}" = "#"* ]];then
break
else
owner+="${line},"
fi
done < ${file}
value+="{\"path\":\"go-common${substr}\",\"owner\":\"${owner%,}\"},"
done
fi
done
# delete "," at the end of value
value=${value%,}
echo "[${value}]" > path.out
}
# start work
function Start(){
GetPackages $1
if [[ ${packages} = "" ]]; then
echo "shell.Start no change packages"
exit 0
fi
#Get gitlab result
gitMergeRequestUrl="${CI_SERVER_URL}/api/v4/projects/${CI_PROJECT_ID}/repository/commits/${CI_COMMIT_SHA}/merge_requests?private_token=${CI_PRIVATE_TOKEN}"
gitCommitUrl="${CI_SERVER_URL}/api/v4/projects/${CI_PROJECT_ID}/repository/commits/${CI_COMMIT_SHA}/statuses?private_token=${CI_PRIVATE_TOKEN}"
mergeJson=$(curl -s ${gitMergeRequestUrl})
commitJson=$(curl -s ${gitCommitUrl})
if [[ "${mergeJson}" != "[]" ]] && [[ "${commitJson}" != "[]" ]]; then
merge_id=$(echo ${mergeJson} | jq -r '.[0].iid')
exitCode=$?
if [[ ${exitCode} -ne 0 ]]; then
echo "shell.Start curl ${gitMergeRequestUrl%=*}=*** error .return(${mergeJson})"
exit 1
fi
username=$(echo ${mergeJson} | jq -r '.[0].author.username')
authorname=$(echo ${commitJson} | jq -r '.[0].author.username')
else
echo "Test not run, maybe you should try create a merge request first!"
exit 0
fi
#Magic time
Magic
#Normal process
for pkg in ${packages}
do
svenUrl="${CI_UATSVEN_URL}/x/admin/apm/ut/upload?merge_id=${merge_id}&username=${username}&author=${authorname}&commit_id=${CI_COMMIT_SHA}&pkg=${pkg}"
echo "shell.Start ut lint pkg:${pkg}"
UTLint "${pkg}"
echo "shell.Start Go bazel test pkg:${pkg}"
BazelTest "${pkg}"
Upload ${svenUrl} $(pwd)
exitCode=$?
if [[ ${exitCode} -ne 0 ]]; then
echo "shell.Start upload fail, status(${exitCode})"
exit 1
fi
done
# upload all dirs
ReadDir
pathUrl="${CI_UATSVEN_URL}/x/admin/apm/ut/upload/app"
UpPath ${pathUrl} $(pwd)
echo "UpPath has finshed...... $(pwd)"
return 0
}
# Check determine whether the standard is up to standard
#$1: commit_id
function Check(){
curl "${CI_UATSVEN_URL}/x/admin/apm/ut/git/report?project_id=${CI_PROJECT_ID}&merge_id=${merge_id}&commit_id=$1"
checkURL="${CI_UATSVEN_URL}/x/admin/apm/ut/check?commit_id=$1"
json=$(curl -s ${checkURL})
code=$(echo ${json} | jq -r '.code')
if [[ ${code} -ne 0 ]]; then
echo -e "curl ${checkURL} response(${json})"
exit 1
fi
package=$(echo ${json} | jq -r '.data.package')
coverage=$(echo ${json} | jq -r '.data.coverage')
passRate=$(echo ${json} | jq -r '.data.pass_rate')
standard=$(echo ${json} | jq -r '.data.standard')
increase=$(echo ${json} | jq -r '.data.increase')
tyrant=$(echo ${json} | jq -r '.data.tyrant')
lastCID=$(echo ${json} | jq -r '.data.last_cid')
if ${tyrant}; then
echo -e "\t续命失败!\n\t大佬本次执行结果未达标哦(灬ꈍ ꈍ灬)请再次优化ut重新提交🆙"
echo -e "\t---------------------------------------------------------------------"
printf "\t%-14s %-14s %-14s %-14s\n" "本次覆盖率(%)" "本次通过率(%)" "本次增长量(%)" 执行pkg
printf "\t%-13.2f %-13.2f %-13.2f %-12s\n" ${coverage} ${passRate} ${increase} ${package}
echo -e "\t(达标标准:覆盖率>=${standard} && 通过率=100% && 同比当前package历史最高覆盖率的增长率>=0)"
echo -e "\t---------------------------------------------------------------------"
exitCode=1
else
echo -e "\t恭喜你续命成功可以请求MR了"
fi
}
# Magic ignore method Check()
function Magic(){
url="http://git.bilibili.co/api/v4/projects/${CI_PROJECT_ID}/merge_requests/${merge_id}/notes?private_token=${CI_PRIVATE_TOKEN}"
json=$(curl -s ${url})
for comment in $(echo ${json} | jq -r '.[].body')
do
if [[ ${comment} == "+skiput" ]]; then
exit 0
fi
done
}
# run
Start $1
echo -e "【我们万众一心】:"
Check ${CI_COMMIT_SHA}
echo -e "本次执行详细结果查询地址请访问http://sven.bilibili.co/#/ut?merge_id=${merge_id}&&pn=1&ps=20"
if [[ ${exitCode} -ne 0 ]]; then
echo -e "执行失败请解决问题后再次提交。具体请参考http://info.bilibili.co/pages/viewpage.action?pageId=9841745"
exit 1
else
echo -e "执行成功."
exit 0
fi

43
build/update-bazel.sh Normal file
View File

@@ -0,0 +1,43 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o pipefail
export KRATOS_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KRATOS_ROOT}/build/lib/init.sh"
#kratos::util::ensure-gnu-sed
# Remove generated files prior to running kazel.
# TODO(spxtr): Remove this line once Bazel is the only way to build.
# rm -f "${KRATOS_ROOT}/pkg/generated/openapi/zz_generated.openapi.go"
# Ensure that we find the binaries we build before anything else.
export GOBIN="${KRATOS_OUTPUT_BINPATH}"
PATH="${GOBIN}:${PATH}"
# Install tools we need, but only from vendor/...
go install ./vendor/github.com/hawkingrei/kazel
# gazelle gets confused by our staging/ directory, prepending an extra
# "k8s.io/kratosrnetes/staging/src" to the import path.
# gazelle won't follow the symlinks in vendor/, so we can't just exclude
# staging/. Instead we just fix the bad paths with sed.
if ! kazel; then
kratos::log::info "Please remember to run the 'make update' in the root directory of go-common, or run 'kratos update' in any position of go-common.
For more information.Please read this document http://info.bilibili.co/pages/viewpage.action?pageId=8466415" >&2
exit 1
fi

47
build/update-prow.sh Normal file
View File

@@ -0,0 +1,47 @@
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o pipefail
export KRATOS_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KRATOS_ROOT}/build/lib/init.sh"
#kratos::util::ensure-gnu-sed
# Remove generated files prior to running kazel.
# TODO(spxtr): Remove this line once Bazel is the only way to build.
# rm -f "${KRATOS_ROOT}/pkg/generated/openapi/zz_generated.openapi.go"
# Ensure that we find the binaries we build before anything else.
export GOBIN="${KRATOS_OUTPUT_BINPATH}"
PATH="${GOBIN}:${PATH}"
# Install tools we need, but only from vendor/...
go install ./vendor/github.com/hawkingrei/kazel
go install ./app/tool/owner
go install ./app/tool/mkprow
# gazelle gets confused by our staging/ directory, prepending an extra
# "k8s.io/kratosrnetes/staging/src" to the import path.
# gazelle won't follow the symlinks in vendor/, so we can't just exclude
# staging/. Instead we just fix the bad paths with sed.
owner
mkprow
if ! kazel; then
kratos::log::info "Please remember to run the 'make update' in the root directory of go-common, or run 'kratos update' in any position of go-common.
For more information.Please read this document http://info.bilibili.co/pages/viewpage.action?pageId=8466415" >&2
exit 1
fi

View File

@@ -0,0 +1,35 @@
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
gometalinter --deadline=50s --vendor \
--cyclo-over=50 --dupl-threshold=100 \
--exclude=".*should not use dot imports \(golint\)$" \
--disable-all \
--enable=vet \
--enable=deadcode \
--enable=golint \
--enable=vetshadow \
--enable=gocyclo \
--enable=unused \
--enable=gofmt \
--skip=.git \
--skip=.tool \
--skip=vendor \
--tests \
$@

38
build/verify-lint.sh Normal file
View File

@@ -0,0 +1,38 @@
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
bgr -type=dir -script=./app/tool/bgr -hit=main $@
gometalinter --deadline=50s --vendor \
--cyclo-over=50 --dupl-threshold=100 \
--exclude=".*should not use dot imports \(golint\)$" \
--disable-all \
--enable=vet \
--enable=deadcode \
--enable=golint \
--enable=vetshadow \
--enable=gocyclo \
--enable=gofmt \
--enable=ineffassign \
--enable=structcheck \
--skip=.git \
--skip=.tool \
--skip=vendor \
--tests \
$@

349
build/visible_to/BUILD Normal file
View File

@@ -0,0 +1,349 @@
# Package groups defined for use in kubernetes visibility rules.
#
# See associated README.md for explanation.
#
# Style suggestions:
#
# - Sort package group definitions by name.
#
# - Prefer obvious package group names.
#
# E.g "pkg_kubectl_cmd_util_CONSUMERS" names a group
# of packages allowed to depend on (consume) the
# //pkg/kubectl/cmd/util package.
#
#
# - A group name ending in _BAD wants to be deleted.
#
# Such a group wants to contract, rather than expand.
# It likely exists to permit a legacy unintentional
# dependency that requires more work to remove.
#
# - Prefer defining new groups to expanding groups.
#
# The former permits tight targeting, the latter can
# allow unnecessary visibility and thus bad deps.
#
package_group(
name = "COMMON_generators",
packages = [
"//cmd/gendocs",
"//cmd/genman",
"//cmd/genyaml",
],
)
package_group(
name = "COMMON_testing",
packages = [
"//hack",
"//hack/lib",
"//hack/make-rules",
"//test/e2e",
"//test/e2e/framework",
"//test/e2e/kubectl",
"//test/e2e/workload",
"//test/integration/etcd",
"//test/integration/framework",
"//test/integration/kubectl",
],
)
package_group(
name = "cluster",
packages = [
"//cluster/...",
],
)
package_group(
name = "KUBEADM_BAD",
packages = [
"//cmd/kubeadm/app/cmd",
],
)
package_group(
name = "cmd_kubectl_CONSUMERS",
packages = [
"//cmd",
],
)
package_group(
name = "cmd_kubectl_app_CONSUMERS",
packages = [
"//cmd/kubectl",
],
)
package_group(
name = "pkg_kubectl_CONSUMERS_BAD",
includes = [
":KUBEADM_BAD",
],
packages = [
"//cmd/clicheck",
"//cmd/hyperkube",
"//pkg",
],
)
package_group(
name = "pkg_kubectl_CONSUMERS",
includes = [
":COMMON_generators",
":pkg_kubectl_CONSUMERS_BAD",
],
packages = [
"//cmd/kubectl",
"//cmd/kubectl/app",
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/auth",
"//pkg/kubectl/cmd/config",
"//pkg/kubectl/cmd/rollout",
"//pkg/kubectl/cmd/set",
"//pkg/kubectl/cmd/testing",
"//pkg/kubectl/cmd/util",
"//pkg/kubectl/cmd/util/editor",
],
)
package_group(
name = "pkg_kubectl_cmd_CONSUMERS_BAD",
packages = [
"//cmd/clicheck",
"//cmd/hyperkube",
],
)
package_group(
name = "pkg_kubectl_cmd_CONSUMERS",
includes = [
":COMMON_generators",
":pkg_kubectl_cmd_CONSUMERS_BAD",
],
packages = [
"//cmd/kubectl",
"//cmd/kubectl/app",
"//pkg/kubectl",
"//pkg/kubectl/cmd",
],
)
package_group(
name = "pkg_kubectl_cmd_auth_CONSUMERS",
packages = [
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/rollout",
],
)
package_group(
name = "pkg_kubectl_cmd_config_CONSUMERS",
packages = [
"//pkg/kubectl/cmd",
],
)
package_group(
name = "pkg_kubectl_cmd_rollout_CONSUMERS",
packages = [
"//pkg/kubectl/cmd",
],
)
package_group(
name = "pkg_kubectl_cmd_set_CONSUMERS",
packages = [
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/rollout",
],
)
package_group(
name = "pkg_kubectl_cmd_templates_CONSUMERS",
includes = [
":COMMON_generators",
":COMMON_testing",
],
packages = [
"//cmd/kubectl",
"//cmd/kubectl/app",
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/auth",
"//pkg/kubectl/cmd/config",
"//pkg/kubectl/cmd/resource",
"//pkg/kubectl/cmd/rollout",
"//pkg/kubectl/cmd/set",
"//pkg/kubectl/cmd/templates",
"//pkg/kubectl/cmd/util",
"//pkg/kubectl/cmd/util/sanity",
],
)
package_group(
name = "pkg_kubectl_cmd_testdata_edit_CONSUMERS",
packages = [
"//pkg/kubectl/cmd",
],
)
package_group(
name = "pkg_kubectl_cmd_testing_CONSUMERS",
packages = [
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/auth",
"//pkg/kubectl/cmd/resource",
"//pkg/kubectl/cmd/set",
"//pkg/kubectl/explain",
],
)
package_group(
name = "pkg_kubectl_cmd_util_CONSUMERS_BAD",
includes = [
":KUBEADM_BAD",
],
packages = [
"//cmd/clicheck",
"//cmd/hyperkube",
"//cmd/kube-proxy/app",
"//cmd/kube-scheduler/app",
],
)
package_group(
name = "pkg_kubectl_cmd_util_CONSUMERS",
includes = [
":COMMON_generators",
":COMMON_testing",
":pkg_kubectl_cmd_util_CONSUMERS_BAD",
],
packages = [
"//cmd/kubectl",
"//cmd/kubectl/app",
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/auth",
"//pkg/kubectl/cmd/config",
"//pkg/kubectl/cmd/resource",
"//pkg/kubectl/cmd/rollout",
"//pkg/kubectl/cmd/set",
"//pkg/kubectl/cmd/testing",
"//pkg/kubectl/cmd/util",
"//pkg/kubectl/cmd/util/editor",
],
)
package_group(
name = "pkg_kubectl_cmd_util_editor_CONSUMERS",
packages = [
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/util",
],
)
package_group(
name = "pkg_kubectl_cmd_util_jsonmerge_CONSUMERS",
packages = [
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/util",
],
)
package_group(
name = "pkg_kubectl_cmd_util_sanity_CONSUMERS",
packages = [
"//cmd/clicheck",
"//pkg/kubectl/cmd/util",
],
)
package_group(
name = "pkg_kubectl_metricsutil_CONSUMERS_BAD",
packages = [
"//cmd/clicheck",
"//cmd/hyperkube",
],
)
package_group(
name = "pkg_kubectl_metricsutil_CONSUMERS",
includes = [
":COMMON_generators",
":pkg_kubectl_metricsutil_CONSUMERS_BAD",
],
packages = [
"//cmd/kubectl",
"//cmd/kubectl/app",
"//pkg/kubectl",
"//pkg/kubectl/cmd",
],
)
package_group(
name = "pkg_kubectl_resource_CONSUMERS",
includes = [
":COMMON_generators",
":COMMON_testing",
],
packages = [
"//cmd/kubectl",
"//cmd/kubectl/app",
"//pkg/kubectl",
"//pkg/kubectl/cmd",
"//pkg/kubectl/cmd/auth",
"//pkg/kubectl/cmd/config",
"//pkg/kubectl/cmd/resource",
"//pkg/kubectl/cmd/rollout",
"//pkg/kubectl/cmd/set",
"//pkg/kubectl/cmd/testing",
"//pkg/kubectl/cmd/util",
"//pkg/kubectl/cmd/util/editor",
],
)
package_group(
name = "pkg_kubectl_testing_CONSUMERS",
packages = [
"//pkg/kubectl",
"//pkg/printers/internalversion",
],
)
package_group(
name = "pkg_kubectl_util_CONSUMERS",
packages = [
"//pkg/kubectl",
"//pkg/kubectl/cmd",
"//pkg/kubectl/proxy",
],
)
package_group(
name = "pkg_kubectl_validation_CONSUMERS",
packages = [
"//pkg/kubectl",
"//pkg/kubectl/cmd/testing",
"//pkg/kubectl/cmd/util",
"//pkg/kubectl/resource",
],
)
# Added by ./hack/verify-bazel.sh; should be excluded from
# that script since it makes no sense here.
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
)
# Added by ./hack/verify-bazel.sh; should be excluded from
# that script since it makes no sense here.
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

184
build/visible_to/README.md Normal file
View File

@@ -0,0 +1,184 @@
# Package Groups Used in Kubernetes Visibility Rules
## Background
`BUILD` rules define dependencies, answering the question:
on what packages does _foo_ depend?
The `BUILD` file in this package allows one to define
_allowed_ reverse dependencies, answering the question:
given a package _foo_, what other specific packages are
allowed to depend on it?
This is done via visibility rules.
Visibility rules discourage unintended, spurious
dependencies that blur code boundaries, slow CICD queues and
generally inhibit progress.
#### Facts
* A package is any directory that contains a `BUILD` file.
* A `package_group` is a `BUILD` file rule that defines a named
set of packages for use in other rules, e.g., given
```
package_group(
name = "database_CONSUMERS",
packages = [
"//foo/dbinitializer",
"//foo/backend/...", # `backend` and everything below it
],
)
```
one can specify the following visibility rule in any `BUILD` rule:
```
visibility = [ "//build/visible_to:database_CONSUMERS" ],
```
* A visibility rule takes a list of package groups as its
argument - or one of the pre-defined groups
`//visibility:private` or `//visibility:public`.
* If no visibility is explicitly defined, a package is
_private_ by default.
* Violations in visibility cause `make bazel-build` to fail,
which in turn causes the submit queue to fail - that's the
enforcement.
#### Why define all package groups meant for visibility here (in one file)?
* Ease discovery of appropriate groups for use in a rule.
* Ease reuse (inclusions) of commonly used groups.
* Consistent style:
* easy to read `//build/visible_to:math_library_CONSUMERS` rules,
* call out bad dependencies for eventual removal.
* Make it more obvious in code reviews when visibility is being
modified.
* One set of `OWNERS` to manage visibility.
The alternative is to use special [package literals] directly
in visibility rules, e.g.
```
visibility = [
"//foo/dbinitializer:__pkg__",
"//foo/backend:__subpackages__",
],
```
The difference in style is similar to the difference between
using a named static constant like `MAX_NODES` rather than a
literal like `12`. Names are preferable to literals for intent
documentation, search, changing one place rather than _n_,
associating usage in distant code blocks, etc.
## Rule Examples
#### Nobody outside this package can depend on me.
```
visibility = ["//visibility:private"],
```
Since this is the default, there's no reason to use this
rule except as a means to override, for some specific
target, some broader, whole-package visibility rule.
#### Anyone can depend on me (eschew this).
```
visibility = ["//visibility:public"],
```
#### Only some servers can depend on me.
Appropriate for, say, backend storage utilities.
```
visibility = ["//visible_to:server_foo","//visible_to:server_bar"].
```
#### Both some client and some server can see me.
Appropriate for shared API definition files and generated code:
```
visibility = ["//visible_to:client_foo,//visible_to:server_foo"],
```
## Handy commands
#### Quickly check for visibility violations
```
bazel build --check_visibility --nobuild \
//cmd/... //pkg/... //plugin/... \
//third_party/... //examples/... //test/... //vendor/k8s.io/...
```
#### Who depends on target _q_?
To create a seed set for a visibility group, one can ask what
packages currently depend on (must currently be able to see) a
given Go library target? It's a time consuming query.
```
q=//pkg/kubectl/cmd:go_default_library
bazel query "rdeps(...,${q})" | \
grep go_default_library | \
sed 's/\(.*\):go_default_library/ "\1",/'
```
#### What targets below _p_ are visible to anyone?
A means to look for things one missed when locking down _p_.
```
p=//pkg/kubectl/cmd
bazel query "visible(...,${p}/...)"
```
#### What packages below _p_ may target _q_ depend on without violating visibility rules?
A means to pinpoint unexpected visibility.
```
p=//pkg/kubectl
q=//cmd/kubelet:kubelet
bazel query "visible(${q},${p}/...)" | more
```
#### What packages does target _q_ need?
```
q=//cmd/kubectl:kubectl
bazel query "buildfiles(deps($q))" | \
grep -v @bazel_tools | \
grep -v @io_bazel_rules | \
grep -v @io_kubernetes_build | \
grep -v @local_config | \
grep -v @local_jdk | \
grep -v //visible_to: | \
sed 's/:BUILD//' | \
sort | uniq > ~/KUBECTL_BUILD.txt
```
or try
```
bazel query --nohost_deps --noimplicit_deps \
"kind('source file', deps($q))" | wc -
```
#### How does kubectl depend on pkg/util/parsers?
```
bazel query "somepath(cmd/kubectl:kubectl, pkg/util/parsers:go_default_library)"
```
[package literals]: https://bazel.build/versions/master/docs/be/common-definitions.html#common.visibility

24
build/workspace.bzl Normal file
View File

@@ -0,0 +1,24 @@
# TensorFlow external dependencies that can be loaded in WORKSPACE files.
load("//vendor:repo.bzl", "bili_http_archive")
# Sanitize a dependency so that it works correctly from code that includes
# TensorFlow as a submodule.
def clean_dep(dep):
return str(Label(dep))
# If TensorFlow is linked as a submodule.
# path_prefix is no longer used.
# tf_repo_name is thought to be under consideration.
def bili_workspace(path_prefix="", tf_repo_name=""):
# Note that we check the minimum bazel version in WORKSPACE.
bili_http_archive(
name = "pcre",
sha256 = "84c3c4d2eb9166aaed44e39b89e4b6a49eac6fed273bdb844c94fb6c8bdda1b5",
urls = [
"http://bazel-cabin.bilibili.co/clib/pcre-8.42.zip",
],
strip_prefix = "pcre-8.42",
build_file = clean_dep("//vendor:pcre.BUILD"),
)