Update buildroot to 1.16 (#5154)

* Update buildroot to 1.16

* Update go.mod to use go 1.16

* Ran go mod vendor for failing test

* Update 1.15 reference at more places
This commit is contained in:
Dharmit Shah
2021-10-20 17:33:14 +05:30
committed by GitHub
parent 8a6084e126
commit fd029225e6
17 changed files with 4 additions and 1598 deletions

View File

@@ -1,7 +1,7 @@
# This Dockerfile builds an image containing the Linux, Mac and Windows version of odo
# layered on top of the ubi7/ubi image.
FROM registry.svc.ci.openshift.org/openshift/release:golang-1.15 AS builder
FROM registry.svc.ci.openshift.org/openshift/release:golang-1.16 AS builder
COPY . /go/src/github.com/openshift/odo
WORKDIR /go/src/github.com/openshift/odo

View File

@@ -7,7 +7,7 @@ toc::[]
== Setting up
Requires *Go 1.15*
Requires *Go 1.16*
Testing and release builds happen with the above version. Developers are advised to stick to this version if they can but it is not compulsory.

3
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/openshift/odo
go 1.15
go 1.16
require (
github.com/Netflix/go-expect v0.0.0-20201125194554-85d881c3777e
@@ -31,7 +31,6 @@ require (
github.com/onsi/gomega v1.14.0
github.com/openshift/api v0.0.0-20201216151826-78a19e96f9eb
github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49
github.com/openshift/library-go v0.0.0-20210106214821-c4d0b9c8d55f
github.com/openshift/oc v0.0.0-alpha.0.0.20210325095525-2513fdbb36e2
github.com/operator-framework/api v0.3.20
github.com/operator-framework/operator-lifecycle-manager v0.17.0

13
go.sum
View File

@@ -60,7 +60,6 @@ github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DataDog/zstd v1.4.0/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
@@ -142,7 +141,6 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
@@ -248,9 +246,6 @@ github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8l
github.com/devfile/api/v2 v2.0.0-20210910153124-da620cd1a7a1/go.mod h1:kLX/nW93gigOHXK3NLeJL2fSS/sgEe+OHu8bo3aoOi4=
github.com/devfile/api/v2 v2.0.0-20210917193329-089a48011460 h1:cmd+3poyUwevcWchYdvE02YT1nQU4SJpA5/wrdLrpWE=
github.com/devfile/api/v2 v2.0.0-20210917193329-089a48011460/go.mod h1:kLX/nW93gigOHXK3NLeJL2fSS/sgEe+OHu8bo3aoOi4=
github.com/devfile/api/v2 v2.1.0 h1:hodMaQpa9onnx8uG5NBxIuzBjC7AUkLTGCcuvZyED7g=
github.com/devfile/api/v2 v2.1.0/go.mod h1:Cot4snybn3qhIh48oIFi9McocnIx7zY5fFbjfrIpPvg=
github.com/devfile/library v1.1.1-0.20210910214722-7c5ff63711ec h1:UtJiFJfnC7fhup2MbGOzt6DCKMFKJTw47aHHETLfcZA=
github.com/devfile/library v1.1.1-0.20210910214722-7c5ff63711ec/go.mod h1:svPWwWb+BP15SXCHl0dyOeE4Sohrjl5a2BaOzc/riLc=
github.com/devfile/library v1.2.0 h1:OT1Irwg5EZhlCpsAFkjIzd3bqyzbLG0JmFPMHeE1e7M=
github.com/devfile/library v1.2.0/go.mod h1:gyiQS+ZImnM4/d+wFUl3gJmIozOSXMenl0WX8cx4zu4=
@@ -296,7 +291,6 @@ github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1 h1:yY9rWGoXv1U5pl4gxqlULARMQD7x0QG85lqEXTWysik=
github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM=
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8=
github.com/emicklei/dot v0.15.0 h1:XDBW0Xco1QNyRb33cqLe10cT04yMWL1XpCZfa98Q6Og=
github.com/emicklei/dot v0.15.0/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s=
@@ -322,7 +316,6 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
@@ -1165,7 +1158,6 @@ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
@@ -1659,7 +1651,6 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.20.1 h1:ud1c3W3YNzGd6ABJlbFfKXBKXO+1KdGfcgGGNgFR03E=
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
@@ -1670,7 +1661,6 @@ k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdB
k8s.io/apiextensions-apiserver v0.18.0-beta.2/go.mod h1:Hnrg5jx8/PbxRbUoqDGxtQkULjwx8FDW4WYJaKNK+fk=
k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY=
k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M=
k8s.io/apiextensions-apiserver v0.18.9/go.mod h1:JagmAhU0TVENzgUZqHJsjCSDh7YuV5o6g01G1Fwh7zI=
k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg=
k8s.io/apiextensions-apiserver v0.20.0/go.mod h1:ZH+C33L2Bh1LY1+HphoRmN1IQVLTShVcTojivK3N9xg=
@@ -1683,7 +1673,6 @@ k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo=
k8s.io/apiserver v0.18.0-beta.2/go.mod h1:bnblMkMoCFnIfVnVftd0SXJPzyvrk3RtaqSbblphF/A=
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg=
k8s.io/apiserver v0.18.9/go.mod h1:vXQzMtUCLsGg1Bh+7Jo2mZKHpHZFCZn8eTNSepcIA1M=
k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA=
k8s.io/apiserver v0.20.0/go.mod h1:6gRIWiOkvGvQt12WTYmsiYoUyYW0FXSiMdNl4m+sxY8=
@@ -1697,7 +1686,6 @@ k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+
k8s.io/code-generator v0.18.0-beta.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
k8s.io/code-generator v0.18.9/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk=
k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk=
@@ -1711,7 +1699,6 @@ k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1a
k8s.io/component-base v0.18.0-beta.2/go.mod h1:HVk5FpRnyzQ/MjBr9//e/yEBjTVa2qjGXCTuUzcD7ks=
k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c=
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14=
k8s.io/component-base v0.18.9/go.mod h1:tUo4qZtV8m7t/U+0DgY+fcnn4BFZ480fZdzxOkWH4zk=
k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo=
k8s.io/component-base v0.20.0/go.mod h1:wKPj+RHnAr8LW2EIBIK7AxOHPde4gme2lzXwVSoRXeA=

View File

@@ -1,6 +1,5 @@
# Dockerfile to bootstrap build and test in openshift-ci
FROM registry.ci.openshift.org/openshift/release:golang-1.15
FROM registry.ci.openshift.org/openshift/release:golang-1.16
RUN yum -y install make wget gcc git httpd-tools

View File

@@ -1,30 +0,0 @@
package appsserialization
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
appsv1 "github.com/openshift/api/apps/v1"
)
var (
// for decoding, we want to be tolerant of groupified and non-groupified
annotationDecodingScheme = runtime.NewScheme()
annotationDecoder runtime.Decoder
// for encoding, we want to be strict on groupified
annotationEncodingScheme = runtime.NewScheme()
annotationEncoder runtime.Encoder
)
func init() {
utilruntime.Must(appsv1.Install(annotationDecodingScheme))
utilruntime.Must(appsv1.DeprecatedInstallWithoutGroup(annotationDecodingScheme))
annotationDecoderCodecFactory := serializer.NewCodecFactory(annotationDecodingScheme)
annotationDecoder = annotationDecoderCodecFactory.UniversalDecoder(appsv1.GroupVersion)
utilruntime.Must(appsv1.Install(annotationEncodingScheme))
annotationEncoderCodecFactory := serializer.NewCodecFactory(annotationEncodingScheme)
annotationEncoder = annotationEncoderCodecFactory.LegacyCodec(appsv1.GroupVersion)
}

View File

@@ -1,31 +0,0 @@
package appsserialization
import (
"fmt"
appsv1 "github.com/openshift/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DecodeDeploymentConfig decodes a DeploymentConfig from controller using annotation codec.
// An error is returned if the controller doesn't contain an encoded config or decoding fail.
func DecodeDeploymentConfig(controller metav1.ObjectMetaAccessor) (*appsv1.DeploymentConfig, error) {
encodedConfig, exists := controller.GetObjectMeta().GetAnnotations()[appsv1.DeploymentEncodedConfigAnnotation]
if !exists {
return nil, fmt.Errorf("object %s does not have encoded deployment config annotation", controller.GetObjectMeta().GetName())
}
config, err := runtime.Decode(annotationDecoder, []byte(encodedConfig))
if err != nil {
return nil, err
}
externalConfig, ok := config.(*appsv1.DeploymentConfig)
if !ok {
return nil, fmt.Errorf("object %+v is not v1.DeploymentConfig", config)
}
return externalConfig, nil
}
func EncodeDeploymentConfig(config *appsv1.DeploymentConfig) ([]byte, error) {
return runtime.Encode(annotationEncoder, config)
}

View File

@@ -1,60 +0,0 @@
package appsutil
const (
// FailedRcCreateReason is added in a deployment config when it cannot create a new replication
// controller.
FailedRcCreateReason = "ReplicationControllerCreateError"
// NewReplicationControllerReason is added in a deployment config when it creates a new replication
// controller.
NewReplicationControllerReason = "NewReplicationControllerCreated"
// NewRcAvailableReason is added in a deployment config when its newest replication controller is made
// available ie. the number of new pods that have passed readiness checks and run for at least
// minReadySeconds is at least the minimum available pods that need to run for the deployment config.
NewRcAvailableReason = "NewReplicationControllerAvailable"
// TimedOutReason is added in a deployment config when its newest replication controller fails to show
// any progress within the given deadline (progressDeadlineSeconds).
TimedOutReason = "ProgressDeadlineExceeded"
// PausedConfigReason is added in a deployment config when it is paused. Lack of progress shouldn't be
// estimated once a deployment config is paused.
PausedConfigReason = "DeploymentConfigPaused"
// CancelledRolloutReason is added in a deployment config when its newest rollout was
// interrupted by cancellation.
CancelledRolloutReason = "RolloutCancelled"
// DeploymentConfigLabel is the name of a label used to correlate a deployment with the
DeploymentConfigLabel = "deploymentconfig"
// DeploymentLabel is the name of a label used to correlate a deployment with the Pod created
DeploymentLabel = "deployment"
// MaxDeploymentDurationSeconds represents the maximum duration that a deployment is allowed to run.
// This is set as the default value for ActiveDeadlineSeconds for the deployer pod.
// Currently set to 6 hours.
MaxDeploymentDurationSeconds int64 = 21600
// DefaultRecreateTimeoutSeconds is the default TimeoutSeconds for RecreateDeploymentStrategyParams.
// Used by strategies:
DefaultRecreateTimeoutSeconds int64 = 10 * 60
DefaultRollingTimeoutSeconds int64 = 10 * 60
// PreHookPodSuffix is the suffix added to all pre hook pods
PreHookPodSuffix = "hook-pre"
// MidHookPodSuffix is the suffix added to all mid hook pods
MidHookPodSuffix = "hook-mid"
// PostHookPodSuffix is the suffix added to all post hook pods
PostHookPodSuffix = "hook-post"
// Used only internally by utils:
// DeploymentStatusReasonAnnotation represents the reason for deployment being in a given state
// Used for specifying the reason for cancellation or failure of a deployment
DeploymentIgnorePodAnnotation = "deploy.openshift.io/deployer-pod.ignore"
DeploymentReplicasAnnotation = "openshift.io/deployment.replicas"
DeploymentFailedUnrelatedDeploymentExists = "unrelated pod with the same name as this deployment is already running"
DeploymentFailedUnableToCreateDeployerPod = "unable to create deployer pod"
DeploymentFailedDeployerPodNoLongerExists = "deployer pod no longer exists"
deploymentCancelledByUser = "cancelled by the user"
deploymentCancelledNewerDeploymentExists = "newer deployment was found running"
)

View File

@@ -1,34 +0,0 @@
package appsutil
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/scale/scheme/autoscalingv1"
)
// rcMapper pins preferred version to v1 and scale kind to autoscaling/v1 Scale
// this avoids putting complete server discovery (including extension APIs) in the critical path for deployments
type rcMapper struct{}
func (rcMapper) ResourceFor(gvr schema.GroupVersionResource) (schema.GroupVersionResource, error) {
if gvr.Group == "" && gvr.Resource == "replicationcontrollers" {
return schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"}, nil
}
return schema.GroupVersionResource{}, fmt.Errorf("unknown replication controller resource: %#v", gvr)
}
func (rcMapper) ScaleForResource(gvr schema.GroupVersionResource) (schema.GroupVersionKind, error) {
rcGvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"}
if gvr == rcGvr {
return autoscalingv1.SchemeGroupVersion.WithKind("Scale"), nil
}
return schema.GroupVersionKind{}, fmt.Errorf("unknown replication controller resource: %#v", gvr)
}
func NewReplicationControllerScaleClient(client kubernetes.Interface) scaleclient.ScalesGetter {
return scaleclient.New(client.CoreV1().RESTClient(), rcMapper{}, dynamic.LegacyAPIPathResolverFunc, rcMapper{})
}

View File

@@ -1,629 +0,0 @@
package appsutil
import (
"context"
"fmt"
"reflect"
"sort"
"strconv"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/watch"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
watchtools "k8s.io/client-go/tools/watch"
appsv1 "github.com/openshift/api/apps/v1"
"github.com/openshift/library-go/pkg/apps/appsserialization"
"github.com/openshift/library-go/pkg/build/naming"
)
// DeployerPodNameForDeployment returns the name of a pod for a given deployment
func DeployerPodNameForDeployment(deployment string) string {
return naming.GetPodName(deployment, "deploy")
}
// WaitForRunningDeployerPod waits a given period of time until the deployer pod
// for given replication controller is not running.
func WaitForRunningDeployerPod(podClient corev1client.PodsGetter, rc *corev1.ReplicationController, timeout time.Duration) error {
podName := DeployerPodNameForDeployment(rc.Name)
canGetLogs := func(p *corev1.Pod) bool {
return corev1.PodSucceeded == p.Status.Phase || corev1.PodFailed == p.Status.Phase || corev1.PodRunning == p.Status.Phase
}
fieldSelector := fields.OneTermEqualSelector("metadata.name", podName).String()
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fieldSelector
return podClient.Pods(rc.Namespace).List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fieldSelector
return podClient.Pods(rc.Namespace).Watch(context.TODO(), options)
},
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
_, err := watchtools.UntilWithSync(ctx, lw, &corev1.Pod{}, nil, func(e watch.Event) (bool, error) {
switch e.Type {
case watch.Added, watch.Modified:
newPod, ok := e.Object.(*corev1.Pod)
if !ok {
return true, fmt.Errorf("unknown event object %#v", e.Object)
}
return canGetLogs(newPod), nil
case watch.Deleted:
return true, fmt.Errorf("pod got deleted %#v", e.Object)
case watch.Error:
return true, fmt.Errorf("encountered error while watching for pod: %v", e.Object)
default:
return true, fmt.Errorf("unexpected event type: %T", e.Type)
}
})
return err
}
func newControllerRef(config *appsv1.DeploymentConfig) *metav1.OwnerReference {
deploymentConfigControllerRefKind := appsv1.GroupVersion.WithKind("DeploymentConfig")
blockOwnerDeletion := true
isController := true
return &metav1.OwnerReference{
APIVersion: deploymentConfigControllerRefKind.GroupVersion().String(),
Kind: deploymentConfigControllerRefKind.Kind,
Name: config.Name,
UID: config.UID,
BlockOwnerDeletion: &blockOwnerDeletion,
Controller: &isController,
}
}
// MakeDeployment creates a deployment represented as a ReplicationController and based on the given DeploymentConfig.
// The controller replica count will be zero.
func MakeDeployment(config *appsv1.DeploymentConfig) (*v1.ReplicationController, error) {
// EncodeDeploymentConfig encodes config as a string using codec.
encodedConfig, err := appsserialization.EncodeDeploymentConfig(config)
if err != nil {
return nil, err
}
deploymentName := LatestDeploymentNameForConfig(config)
podSpec := config.Spec.Template.Spec.DeepCopy()
// Fix trailing and leading whitespace in the image field
// This is needed to sanitize old deployment configs where spaces were permitted but
// kubernetes 3.7 (#47491) tightened the validation of container image fields.
for i := range podSpec.Containers {
podSpec.Containers[i].Image = strings.TrimSpace(podSpec.Containers[i].Image)
}
controllerLabels := make(labels.Set)
for k, v := range config.Labels {
controllerLabels[k] = v
}
// Correlate the deployment with the config.
// TODO: Using the annotation constant for now since the value is correct
// but we could consider adding a new constant to the public types.
controllerLabels[appsv1.DeploymentConfigAnnotation] = config.Name
// Ensure that pods created by this deployment controller can be safely associated back
// to the controller, and that multiple deployment controllers for the same config don't
// manipulate each others' pods.
selector := map[string]string{}
for k, v := range config.Spec.Selector {
selector[k] = v
}
selector[DeploymentConfigLabel] = config.Name
selector[DeploymentLabel] = deploymentName
podLabels := make(labels.Set)
for k, v := range config.Spec.Template.Labels {
podLabels[k] = v
}
podLabels[DeploymentConfigLabel] = config.Name
podLabels[DeploymentLabel] = deploymentName
podAnnotations := make(labels.Set)
for k, v := range config.Spec.Template.Annotations {
podAnnotations[k] = v
}
podAnnotations[appsv1.DeploymentAnnotation] = deploymentName
podAnnotations[appsv1.DeploymentConfigAnnotation] = config.Name
podAnnotations[appsv1.DeploymentVersionAnnotation] = strconv.FormatInt(config.Status.LatestVersion, 10)
controllerRef := newControllerRef(config)
zero := int32(0)
deployment := &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentName,
Namespace: config.Namespace,
Annotations: map[string]string{
appsv1.DeploymentConfigAnnotation: config.Name,
appsv1.DeploymentEncodedConfigAnnotation: string(encodedConfig),
appsv1.DeploymentStatusAnnotation: string(appsv1.DeploymentStatusNew),
appsv1.DeploymentVersionAnnotation: strconv.FormatInt(config.Status.LatestVersion, 10),
// This is the target replica count for the new deployment.
appsv1.DesiredReplicasAnnotation: strconv.Itoa(int(config.Spec.Replicas)),
DeploymentReplicasAnnotation: strconv.Itoa(0),
},
Labels: controllerLabels,
OwnerReferences: []metav1.OwnerReference{*controllerRef},
},
Spec: v1.ReplicationControllerSpec{
// The deployment should be inactive initially
Replicas: &zero,
Selector: selector,
MinReadySeconds: config.Spec.MinReadySeconds,
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
Annotations: podAnnotations,
},
Spec: *podSpec,
},
},
}
if config.Status.Details != nil && len(config.Status.Details.Message) > 0 {
deployment.Annotations[appsv1.DeploymentStatusReasonAnnotation] = config.Status.Details.Message
}
if value, ok := config.Annotations[DeploymentIgnorePodAnnotation]; ok {
deployment.Annotations[DeploymentIgnorePodAnnotation] = value
}
return deployment, nil
}
// SetDeploymentCondition updates the deployment to include the provided condition. If the condition that
// we are about to add already exists and has the same status and reason then we are not going to update.
func SetDeploymentCondition(status *appsv1.DeploymentConfigStatus, condition appsv1.DeploymentCondition) {
currentCond := GetDeploymentCondition(*status, condition.Type)
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
return
}
// Preserve lastTransitionTime if we are not switching between statuses of a condition.
if currentCond != nil && currentCond.Status == condition.Status {
condition.LastTransitionTime = currentCond.LastTransitionTime
}
newConditions := filterOutCondition(status.Conditions, condition.Type)
status.Conditions = append(newConditions, condition)
}
// RemoveDeploymentCondition removes the deployment condition with the provided type.
func RemoveDeploymentCondition(status *appsv1.DeploymentConfigStatus, condType appsv1.DeploymentConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType)
}
// filterOutCondition returns a new slice of deployment conditions without conditions with the provided type.
func filterOutCondition(conditions []appsv1.DeploymentCondition, condType appsv1.DeploymentConditionType) []appsv1.DeploymentCondition {
var newConditions []appsv1.DeploymentCondition
for _, c := range conditions {
if c.Type == condType {
continue
}
newConditions = append(newConditions, c)
}
return newConditions
}
// IsOwnedByConfig checks whether the provided replication controller is part of a
// deployment configuration.
// TODO: Switch to use owner references once we got those working.
func IsOwnedByConfig(obj metav1.Object) bool {
_, ok := obj.GetAnnotations()[appsv1.DeploymentConfigAnnotation]
return ok
}
// DeploymentsForCleanup determines which deployments for a configuration are relevant for the
// revision history limit quota
func DeploymentsForCleanup(configuration *appsv1.DeploymentConfig, deployments []*v1.ReplicationController) []v1.ReplicationController {
// if the past deployment quota has been exceeded, we need to prune the oldest deployments
// until we are not exceeding the quota any longer, so we sort oldest first
sort.Sort(sort.Reverse(ByLatestVersionDesc(deployments)))
relevantDeployments := []v1.ReplicationController{}
activeDeployment := ActiveDeployment(deployments)
if activeDeployment == nil {
// if cleanup policy is set but no successful deployments have happened, there will be
// no active deployment. We can consider all of the deployments in this case except for
// the latest one
for i := range deployments {
deployment := deployments[i]
if deploymentVersionFor(deployment) != configuration.Status.LatestVersion {
relevantDeployments = append(relevantDeployments, *deployment)
}
}
} else {
// if there is an active deployment, we need to filter out any deployments that we don't
// care about, namely the active deployment and any newer deployments
for i := range deployments {
deployment := deployments[i]
if deployment != activeDeployment && deploymentVersionFor(deployment) < deploymentVersionFor(activeDeployment) {
relevantDeployments = append(relevantDeployments, *deployment)
}
}
}
return relevantDeployments
}
// LabelForDeployment builds a string identifier for a Deployment.
func LabelForDeployment(deployment *v1.ReplicationController) string {
return fmt.Sprintf("%s/%s", deployment.Namespace, deployment.Name)
}
// LabelForDeploymentConfig builds a string identifier for a DeploymentConfig.
func LabelForDeploymentConfig(config runtime.Object) string {
accessor, _ := meta.Accessor(config)
return fmt.Sprintf("%s/%s", accessor.GetNamespace(), accessor.GetName())
}
// LatestDeploymentNameForConfig returns a stable identifier for deployment config
func LatestDeploymentNameForConfig(config *appsv1.DeploymentConfig) string {
return LatestDeploymentNameForConfigAndVersion(config.Name, config.Status.LatestVersion)
}
// DeploymentNameForConfigVersion returns the name of the version-th deployment
// for the config that has the provided name
func DeploymentNameForConfigVersion(name string, version int64) string {
return fmt.Sprintf("%s-%d", name, version)
}
// LatestDeploymentNameForConfigAndVersion returns a stable identifier for config based on its version.
func LatestDeploymentNameForConfigAndVersion(name string, version int64) string {
return fmt.Sprintf("%s-%d", name, version)
}
func DeployerPodNameFor(obj runtime.Object) string {
return AnnotationFor(obj, appsv1.DeploymentPodAnnotation)
}
func DeploymentConfigNameFor(obj runtime.Object) string {
return AnnotationFor(obj, appsv1.DeploymentConfigAnnotation)
}
func DeploymentStatusReasonFor(obj runtime.Object) string {
return AnnotationFor(obj, appsv1.DeploymentStatusReasonAnnotation)
}
func DeleteStatusReasons(rc *v1.ReplicationController) {
delete(rc.Annotations, appsv1.DeploymentStatusReasonAnnotation)
delete(rc.Annotations, appsv1.DeploymentCancelledAnnotation)
}
func SetCancelledByUserReason(rc *v1.ReplicationController) {
rc.Annotations[appsv1.DeploymentCancelledAnnotation] = "true"
rc.Annotations[appsv1.DeploymentStatusReasonAnnotation] = deploymentCancelledByUser
}
func SetCancelledByNewerDeployment(rc *v1.ReplicationController) {
rc.Annotations[appsv1.DeploymentCancelledAnnotation] = "true"
rc.Annotations[appsv1.DeploymentStatusReasonAnnotation] = deploymentCancelledNewerDeploymentExists
}
// HasSynced checks if the provided deployment config has been noticed by the deployment
// config controller.
func HasSynced(dc *appsv1.DeploymentConfig, generation int64) bool {
return dc.Status.ObservedGeneration >= generation
}
// HasChangeTrigger returns whether the provided deployment configuration has
// a config change trigger or not
func HasChangeTrigger(config *appsv1.DeploymentConfig) bool {
for _, trigger := range config.Spec.Triggers {
if trigger.Type == appsv1.DeploymentTriggerOnConfigChange {
return true
}
}
return false
}
// HasTrigger returns whether the provided deployment configuration has any trigger
// defined or not.
func HasTrigger(config *appsv1.DeploymentConfig) bool {
return HasChangeTrigger(config) || HasImageChangeTrigger(config)
}
// HasLastTriggeredImage returns whether all image change triggers in provided deployment
// configuration has the lastTriggerImage field set (iow. all images were updated for
// them). Returns false if deployment configuration has no image change trigger defined.
func HasLastTriggeredImage(config *appsv1.DeploymentConfig) bool {
hasImageTrigger := false
for _, trigger := range config.Spec.Triggers {
if trigger.Type == appsv1.DeploymentTriggerOnImageChange {
hasImageTrigger = true
if len(trigger.ImageChangeParams.LastTriggeredImage) == 0 {
return false
}
}
}
return hasImageTrigger
}
// IsInitialDeployment returns whether the deployment configuration is the first version
// of this configuration.
func IsInitialDeployment(config *appsv1.DeploymentConfig) bool {
return config.Status.LatestVersion == 0
}
// IsRollingConfig returns true if the strategy type is a rolling update.
func IsRollingConfig(config *appsv1.DeploymentConfig) bool {
return config.Spec.Strategy.Type == appsv1.DeploymentStrategyTypeRolling
}
// ResolveFenceposts is copy from k8s deployment_utils to avoid unnecessary imports
func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, int(desired), true)
if err != nil {
return 0, 0, err
}
unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, int(desired), false)
if err != nil {
return 0, 0, err
}
if surge == 0 && unavailable == 0 {
// Validation should never allow the user to explicitly use zero values for both maxSurge
// maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero.
// If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the
// theory that surge might not work due to quota.
unavailable = 1
}
return int32(surge), int32(unavailable), nil
}
// MaxUnavailable returns the maximum unavailable pods a rolling deployment config can take.
func MaxUnavailable(config *appsv1.DeploymentConfig) int32 {
if !IsRollingConfig(config) {
return int32(0)
}
// Error caught by validation
_, maxUnavailable, _ := ResolveFenceposts(config.Spec.Strategy.RollingParams.MaxSurge, config.Spec.Strategy.RollingParams.MaxUnavailable, config.Spec.Replicas)
return maxUnavailable
}
// MaxSurge returns the maximum surge pods a rolling deployment config can take.
func MaxSurge(config appsv1.DeploymentConfig) int32 {
if !IsRollingConfig(&config) {
return int32(0)
}
// Error caught by validation
maxSurge, _, _ := ResolveFenceposts(config.Spec.Strategy.RollingParams.MaxSurge, config.Spec.Strategy.RollingParams.MaxUnavailable, config.Spec.Replicas)
return maxSurge
}
// AnnotationFor returns the annotation with key for obj.
func AnnotationFor(obj runtime.Object, key string) string {
objectMeta, err := meta.Accessor(obj)
if err != nil {
return ""
}
if objectMeta == nil || reflect.ValueOf(objectMeta).IsNil() {
return ""
}
return objectMeta.GetAnnotations()[key]
}
// ActiveDeployment returns the latest complete deployment, or nil if there is
// no such deployment. The active deployment is not always the same as the
// latest deployment.
func ActiveDeployment(input []*v1.ReplicationController) *v1.ReplicationController {
var activeDeployment *v1.ReplicationController
var lastCompleteDeploymentVersion int64 = 0
for i := range input {
deployment := input[i]
deploymentVersion := DeploymentVersionFor(deployment)
if IsCompleteDeployment(deployment) && deploymentVersion > lastCompleteDeploymentVersion {
activeDeployment = deployment
lastCompleteDeploymentVersion = deploymentVersion
}
}
return activeDeployment
}
// ConfigSelector returns a label Selector which can be used to find all
// deployments for a DeploymentConfig.
//
// TODO: Using the annotation constant for now since the value is correct
// but we could consider adding a new constant to the public types.
func ConfigSelector(name string) labels.Selector {
return labels.SelectorFromValidatedSet(labels.Set{appsv1.DeploymentConfigAnnotation: name})
}
// IsCompleteDeployment returns true if the passed deployment is in state complete.
func IsCompleteDeployment(deployment runtime.Object) bool {
return DeploymentStatusFor(deployment) == appsv1.DeploymentStatusComplete
}
// IsFailedDeployment returns true if the passed deployment failed.
func IsFailedDeployment(deployment runtime.Object) bool {
return DeploymentStatusFor(deployment) == appsv1.DeploymentStatusFailed
}
// IsTerminatedDeployment returns true if the passed deployment has terminated (either
// complete or failed).
func IsTerminatedDeployment(deployment runtime.Object) bool {
return IsCompleteDeployment(deployment) || IsFailedDeployment(deployment)
}
func IsDeploymentCancelled(deployment runtime.Object) bool {
value := AnnotationFor(deployment, appsv1.DeploymentCancelledAnnotation)
return strings.EqualFold(value, "true")
}
// DeployerPodSelector returns a label Selector which can be used to find all
// deployer pods associated with a deployment with name.
func DeployerPodSelector(name string) labels.Selector {
return labels.SelectorFromValidatedSet(labels.Set{appsv1.DeployerPodForDeploymentLabel: name})
}
func DeploymentStatusFor(deployment runtime.Object) appsv1.DeploymentStatus {
return appsv1.DeploymentStatus(AnnotationFor(deployment, appsv1.DeploymentStatusAnnotation))
}
func SetDeploymentLatestVersionAnnotation(rc *v1.ReplicationController, version string) {
if rc.Annotations == nil {
rc.Annotations = map[string]string{}
}
rc.Annotations[appsv1.DeploymentVersionAnnotation] = version
}
func DeploymentVersionFor(obj runtime.Object) int64 {
v, err := strconv.ParseInt(AnnotationFor(obj, appsv1.DeploymentVersionAnnotation), 10, 64)
if err != nil {
return -1
}
return v
}
func DeploymentNameFor(obj runtime.Object) string {
return AnnotationFor(obj, appsv1.DeploymentAnnotation)
}
func deploymentVersionFor(obj runtime.Object) int64 {
v, err := strconv.ParseInt(AnnotationFor(obj, appsv1.DeploymentVersionAnnotation), 10, 64)
if err != nil {
return -1
}
return v
}
// LatestDeploymentInfo returns info about the latest deployment for a config,
// or nil if there is no latest deployment. The latest deployment is not
// always the same as the active deployment.
func LatestDeploymentInfo(config *appsv1.DeploymentConfig, deployments []*v1.ReplicationController) (bool, *v1.ReplicationController) {
if config.Status.LatestVersion == 0 || len(deployments) == 0 {
return false, nil
}
sort.Sort(ByLatestVersionDesc(deployments))
candidate := deployments[0]
return deploymentVersionFor(candidate) == config.Status.LatestVersion, candidate
}
// GetDeploymentCondition returns the condition with the provided type.
func GetDeploymentCondition(status appsv1.DeploymentConfigStatus, condType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == condType {
return &c
}
}
return nil
}
// GetReplicaCountForDeployments returns the sum of all replicas for the
// given deployments.
func GetReplicaCountForDeployments(deployments []*v1.ReplicationController) int32 {
totalReplicaCount := int32(0)
for _, deployment := range deployments {
count := deployment.Spec.Replicas
if count == nil {
continue
}
totalReplicaCount += *count
}
return totalReplicaCount
}
// GetStatusReplicaCountForDeployments returns the sum of the replicas reported in the
// status of the given deployments.
func GetStatusReplicaCountForDeployments(deployments []*v1.ReplicationController) int32 {
totalReplicaCount := int32(0)
for _, deployment := range deployments {
totalReplicaCount += deployment.Status.Replicas
}
return totalReplicaCount
}
// GetReadyReplicaCountForReplicationControllers returns the number of ready pods corresponding to
// the given replication controller.
func GetReadyReplicaCountForReplicationControllers(replicationControllers []*v1.ReplicationController) int32 {
totalReadyReplicas := int32(0)
for _, rc := range replicationControllers {
if rc != nil {
totalReadyReplicas += rc.Status.ReadyReplicas
}
}
return totalReadyReplicas
}
// GetAvailableReplicaCountForReplicationControllers returns the number of available pods corresponding to
// the given replication controller.
func GetAvailableReplicaCountForReplicationControllers(replicationControllers []*v1.ReplicationController) int32 {
totalAvailableReplicas := int32(0)
for _, rc := range replicationControllers {
if rc != nil {
totalAvailableReplicas += rc.Status.AvailableReplicas
}
}
return totalAvailableReplicas
}
// HasImageChangeTrigger returns whether the provided deployment configuration has
// an image change trigger or not.
func HasImageChangeTrigger(config *appsv1.DeploymentConfig) bool {
for _, trigger := range config.Spec.Triggers {
if trigger.Type == appsv1.DeploymentTriggerOnImageChange {
return true
}
}
return false
}
// CanTransitionPhase returns whether it is allowed to go from the current to the next phase.
func CanTransitionPhase(current, next appsv1.DeploymentStatus) bool {
switch current {
case appsv1.DeploymentStatusNew:
switch next {
case appsv1.DeploymentStatusPending,
appsv1.DeploymentStatusRunning,
appsv1.DeploymentStatusFailed,
appsv1.DeploymentStatusComplete:
return true
}
case appsv1.DeploymentStatusPending:
switch next {
case appsv1.DeploymentStatusRunning,
appsv1.DeploymentStatusFailed,
appsv1.DeploymentStatusComplete:
return true
}
case appsv1.DeploymentStatusRunning:
switch next {
case appsv1.DeploymentStatusFailed, appsv1.DeploymentStatusComplete:
return true
}
}
return false
}
type ByLatestVersionAsc []*v1.ReplicationController
func (d ByLatestVersionAsc) Len() int { return len(d) }
func (d ByLatestVersionAsc) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d ByLatestVersionAsc) Less(i, j int) bool {
return DeploymentVersionFor(d[i]) < DeploymentVersionFor(d[j])
}
// ByLatestVersionDesc sorts deployments by LatestVersion descending.
type ByLatestVersionDesc []*v1.ReplicationController
func (d ByLatestVersionDesc) Len() int { return len(d) }
func (d ByLatestVersionDesc) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d ByLatestVersionDesc) Less(i, j int) bool {
return DeploymentVersionFor(d[j]) < DeploymentVersionFor(d[i])
}

View File

@@ -1,73 +0,0 @@
package naming
import (
"fmt"
"hash/fnv"
kvalidation "k8s.io/apimachinery/pkg/util/validation"
)
// GetName returns a name given a base ("deployment-5") and a suffix ("deploy")
// It will first attempt to join them with a dash. If the resulting name is longer
// than maxLength: if the suffix is too long, it will truncate the base name and add
// an 8-character hash of the [base]-[suffix] string. If the suffix is not too long,
// it will truncate the base, add the hash of the base and return [base]-[hash]-[suffix]
func GetName(base, suffix string, maxLength int) string {
if maxLength <= 0 {
return ""
}
name := fmt.Sprintf("%s-%s", base, suffix)
if len(name) <= maxLength {
return name
}
baseLength := maxLength - 10 /*length of -hash-*/ - len(suffix)
// if the suffix is too long, ignore it
if baseLength < 0 {
prefix := base[0:min(len(base), max(0, maxLength-9))]
// Calculate hash on initial base-suffix string
shortName := fmt.Sprintf("%s-%s", prefix, hash(name))
return shortName[:min(maxLength, len(shortName))]
}
prefix := base[0:baseLength]
// Calculate hash on initial base-suffix string
return fmt.Sprintf("%s-%s-%s", prefix, hash(base), suffix)
}
// GetPodName calls GetName with the length restriction for pods
func GetPodName(base, suffix string) string {
return GetName(base, suffix, kvalidation.DNS1123SubdomainMaxLength)
}
// GetConfigMapName calls GetName with the length restriction for ConfigMaps
func GetConfigMapName(base, suffix string) string {
return GetName(base, suffix, kvalidation.DNS1123SubdomainMaxLength)
}
// max returns the greater of its 2 inputs
func max(a, b int) int {
if b > a {
return b
}
return a
}
// min returns the lesser of its 2 inputs
func min(a, b int) int {
if b < a {
return b
}
return a
}
// hash calculates the hexadecimal representation (8-chars)
// of the hash of the passed in string using the FNV-a algorithm
func hash(s string) string {
hash := fnv.New32a()
hash.Write([]byte(s))
intHash := hash.Sum32()
result := fmt.Sprintf("%08x", intHash)
return result
}

View File

@@ -1,150 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watch
import (
"sync"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
)
func newEventProcessor(out chan<- watch.Event) *eventProcessor {
return &eventProcessor{
out: out,
cond: sync.NewCond(&sync.Mutex{}),
done: make(chan struct{}),
}
}
// eventProcessor buffers events and writes them to an out chan when a reader
// is waiting. Because of the requirement to buffer events, it synchronizes
// input with a condition, and synchronizes output with a channels. It needs to
// be able to yield while both waiting on an input condition and while blocked
// on writing to the output channel.
type eventProcessor struct {
out chan<- watch.Event
cond *sync.Cond
buff []watch.Event
done chan struct{}
}
func (e *eventProcessor) run() {
for {
batch := e.takeBatch()
e.writeBatch(batch)
if e.stopped() {
return
}
}
}
func (e *eventProcessor) takeBatch() []watch.Event {
e.cond.L.Lock()
defer e.cond.L.Unlock()
for len(e.buff) == 0 && !e.stopped() {
e.cond.Wait()
}
batch := e.buff
e.buff = nil
return batch
}
func (e *eventProcessor) writeBatch(events []watch.Event) {
for _, event := range events {
select {
case e.out <- event:
case <-e.done:
return
}
}
}
func (e *eventProcessor) push(event watch.Event) {
e.cond.L.Lock()
defer e.cond.L.Unlock()
defer e.cond.Signal()
e.buff = append(e.buff, event)
}
func (e *eventProcessor) stopped() bool {
select {
case <-e.done:
return true
default:
return false
}
}
func (e *eventProcessor) stop() {
close(e.done)
e.cond.Signal()
}
// NewIndexerInformerWatcher will create an IndexerInformer and wrap it into watch.Interface
// so you can use it anywhere where you'd have used a regular Watcher returned from Watch method.
// it also returns a channel you can use to wait for the informers to fully shutdown.
func NewIndexerInformerWatcher(lw cache.ListerWatcher, objType runtime.Object) (cache.Indexer, cache.Controller, watch.Interface, <-chan struct{}) {
ch := make(chan watch.Event)
w := watch.NewProxyWatcher(ch)
e := newEventProcessor(ch)
indexer, informer := cache.NewIndexerInformer(lw, objType, 0, cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
e.push(watch.Event{
Type: watch.Added,
Object: obj.(runtime.Object),
})
},
UpdateFunc: func(old, new interface{}) {
e.push(watch.Event{
Type: watch.Modified,
Object: new.(runtime.Object),
})
},
DeleteFunc: func(obj interface{}) {
staleObj, stale := obj.(cache.DeletedFinalStateUnknown)
if stale {
// We have no means of passing the additional information down using
// watch API based on watch.Event but the caller can filter such
// objects by checking if metadata.deletionTimestamp is set
obj = staleObj
}
e.push(watch.Event{
Type: watch.Deleted,
Object: obj.(runtime.Object),
})
},
}, cache.Indexers{})
go e.run()
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
defer e.stop()
informer.Run(w.StopChan())
}()
return indexer, informer, w, doneCh
}

View File

@@ -1,290 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watch
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"time"
"github.com/davecgh/go-spew/spew"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
)
// resourceVersionGetter is an interface used to get resource version from events.
// We can't reuse an interface from meta otherwise it would be a cyclic dependency and we need just this one method
type resourceVersionGetter interface {
GetResourceVersion() string
}
// RetryWatcher will make sure that in case the underlying watcher is closed (e.g. due to API timeout or etcd timeout)
// it will get restarted from the last point without the consumer even knowing about it.
// RetryWatcher does that by inspecting events and keeping track of resourceVersion.
// Especially useful when using watch.UntilWithoutRetry where premature termination is causing issues and flakes.
// Please note that this is not resilient to etcd cache not having the resource version anymore - you would need to
// use Informers for that.
type RetryWatcher struct {
lastResourceVersion string
watcherClient cache.Watcher
resultChan chan watch.Event
stopChan chan struct{}
doneChan chan struct{}
minRestartDelay time.Duration
}
// NewRetryWatcher creates a new RetryWatcher.
// It will make sure that watches gets restarted in case of recoverable errors.
// The initialResourceVersion will be given to watch method when first called.
func NewRetryWatcher(initialResourceVersion string, watcherClient cache.Watcher) (*RetryWatcher, error) {
return newRetryWatcher(initialResourceVersion, watcherClient, 1*time.Second)
}
func newRetryWatcher(initialResourceVersion string, watcherClient cache.Watcher, minRestartDelay time.Duration) (*RetryWatcher, error) {
switch initialResourceVersion {
case "", "0":
// TODO: revisit this if we ever get WATCH v2 where it means start "now"
// without doing the synthetic list of objects at the beginning (see #74022)
return nil, fmt.Errorf("initial RV %q is not supported due to issues with underlying WATCH", initialResourceVersion)
default:
break
}
rw := &RetryWatcher{
lastResourceVersion: initialResourceVersion,
watcherClient: watcherClient,
stopChan: make(chan struct{}),
doneChan: make(chan struct{}),
resultChan: make(chan watch.Event, 0),
minRestartDelay: minRestartDelay,
}
go rw.receive()
return rw, nil
}
func (rw *RetryWatcher) send(event watch.Event) bool {
// Writing to an unbuffered channel is blocking operation
// and we need to check if stop wasn't requested while doing so.
select {
case rw.resultChan <- event:
return true
case <-rw.stopChan:
return false
}
}
// doReceive returns true when it is done, false otherwise.
// If it is not done the second return value holds the time to wait before calling it again.
func (rw *RetryWatcher) doReceive() (bool, time.Duration) {
watcher, err := rw.watcherClient.Watch(metav1.ListOptions{
ResourceVersion: rw.lastResourceVersion,
AllowWatchBookmarks: true,
})
// We are very unlikely to hit EOF here since we are just establishing the call,
// but it may happen that the apiserver is just shutting down (e.g. being restarted)
// This is consistent with how it is handled for informers
switch err {
case nil:
break
case io.EOF:
// watch closed normally
return false, 0
case io.ErrUnexpectedEOF:
klog.V(1).Infof("Watch closed with unexpected EOF: %v", err)
return false, 0
default:
msg := "Watch failed: %v"
if net.IsProbableEOF(err) || net.IsTimeout(err) {
klog.V(5).Infof(msg, err)
// Retry
return false, 0
}
klog.Errorf(msg, err)
// Retry
return false, 0
}
if watcher == nil {
klog.Error("Watch returned nil watcher")
// Retry
return false, 0
}
ch := watcher.ResultChan()
defer watcher.Stop()
for {
select {
case <-rw.stopChan:
klog.V(4).Info("Stopping RetryWatcher.")
return true, 0
case event, ok := <-ch:
if !ok {
klog.V(4).Infof("Failed to get event! Re-creating the watcher. Last RV: %s", rw.lastResourceVersion)
return false, 0
}
// We need to inspect the event and get ResourceVersion out of it
switch event.Type {
case watch.Added, watch.Modified, watch.Deleted, watch.Bookmark:
metaObject, ok := event.Object.(resourceVersionGetter)
if !ok {
_ = rw.send(watch.Event{
Type: watch.Error,
Object: &apierrors.NewInternalError(errors.New("retryWatcher: doesn't support resourceVersion")).ErrStatus,
})
// We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data!
return true, 0
}
resourceVersion := metaObject.GetResourceVersion()
if resourceVersion == "" {
_ = rw.send(watch.Event{
Type: watch.Error,
Object: &apierrors.NewInternalError(fmt.Errorf("retryWatcher: object %#v doesn't support resourceVersion", event.Object)).ErrStatus,
})
// We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data!
return true, 0
}
// All is fine; send the non-bookmark events and update resource version.
if event.Type != watch.Bookmark {
ok = rw.send(event)
if !ok {
return true, 0
}
}
rw.lastResourceVersion = resourceVersion
continue
case watch.Error:
// This round trip allows us to handle unstructured status
errObject := apierrors.FromObject(event.Object)
statusErr, ok := errObject.(*apierrors.StatusError)
if !ok {
klog.Error(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object))
// Retry unknown errors
return false, 0
}
status := statusErr.ErrStatus
statusDelay := time.Duration(0)
if status.Details != nil {
statusDelay = time.Duration(status.Details.RetryAfterSeconds) * time.Second
}
switch status.Code {
case http.StatusGone:
// Never retry RV too old errors
_ = rw.send(event)
return true, 0
case http.StatusGatewayTimeout, http.StatusInternalServerError:
// Retry
return false, statusDelay
default:
// We retry by default. RetryWatcher is meant to proceed unless it is certain
// that it can't. If we are not certain, we proceed with retry and leave it
// up to the user to timeout if needed.
// Log here so we have a record of hitting the unexpected error
// and we can whitelist some error codes if we missed any that are expected.
klog.V(5).Info(spew.Sprintf("Retrying after unexpected error: %#+v", event.Object))
// Retry
return false, statusDelay
}
default:
klog.Errorf("Failed to recognize Event type %q", event.Type)
_ = rw.send(watch.Event{
Type: watch.Error,
Object: &apierrors.NewInternalError(fmt.Errorf("retryWatcher failed to recognize Event type %q", event.Type)).ErrStatus,
})
// We are unable to restart the watch and have to stop the loop or this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data!
return true, 0
}
}
}
}
// receive reads the result from a watcher, restarting it if necessary.
func (rw *RetryWatcher) receive() {
defer close(rw.doneChan)
defer close(rw.resultChan)
klog.V(4).Info("Starting RetryWatcher.")
defer klog.V(4).Info("Stopping RetryWatcher.")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
select {
case <-rw.stopChan:
cancel()
return
case <-ctx.Done():
return
}
}()
// We use non sliding until so we don't introduce delays on happy path when WATCH call
// timeouts or gets closed and we need to reestablish it while also avoiding hot loops.
wait.NonSlidingUntilWithContext(ctx, func(ctx context.Context) {
done, retryAfter := rw.doReceive()
if done {
cancel()
return
}
time.Sleep(retryAfter)
klog.V(4).Infof("Restarting RetryWatcher at RV=%q", rw.lastResourceVersion)
}, rw.minRestartDelay)
}
// ResultChan implements Interface.
func (rw *RetryWatcher) ResultChan() <-chan watch.Event {
return rw.resultChan
}
// Stop implements Interface.
func (rw *RetryWatcher) Stop() {
close(rw.stopChan)
}
// Done allows the caller to be notified when Retry watcher stops.
func (rw *RetryWatcher) Done() <-chan struct{} {
return rw.doneChan
}

View File

@@ -1,167 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package watch
import (
"context"
"errors"
"fmt"
"time"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
)
// PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet,
// or an error if the condition failed or detected an error state.
type PreconditionFunc func(store cache.Store) (bool, error)
// ConditionFunc returns true if the condition has been reached, false if it has not been reached yet,
// or an error if the condition cannot be checked and should terminate. In general, it is better to define
// level driven conditions over edge driven conditions (pod has ready=true, vs pod modified and ready changed
// from false to true).
type ConditionFunc func(event watch.Event) (bool, error)
// ErrWatchClosed is returned when the watch channel is closed before timeout in UntilWithoutRetry.
var ErrWatchClosed = errors.New("watch closed before UntilWithoutRetry timeout")
// UntilWithoutRetry reads items from the watch until each provided condition succeeds, and then returns the last watch
// encountered. The first condition that returns an error terminates the watch (and the event is also returned).
// If no event has been received, the returned event will be nil.
// Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition.
// Waits until context deadline or until context is canceled.
//
// Warning: Unless you have a very specific use case (probably a special Watcher) don't use this function!!!
// Warning: This will fail e.g. on API timeouts and/or 'too old resource version' error.
// Warning: You are most probably looking for a function *Until* or *UntilWithSync* below,
// Warning: solving such issues.
// TODO: Consider making this function private to prevent misuse when the other occurrences in our codebase are gone.
func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions ...ConditionFunc) (*watch.Event, error) {
ch := watcher.ResultChan()
defer watcher.Stop()
var lastEvent *watch.Event
for _, condition := range conditions {
// check the next condition against the previous event and short circuit waiting for the next watch
if lastEvent != nil {
done, err := condition(*lastEvent)
if err != nil {
return lastEvent, err
}
if done {
continue
}
}
ConditionSucceeded:
for {
select {
case event, ok := <-ch:
if !ok {
return lastEvent, ErrWatchClosed
}
lastEvent = &event
done, err := condition(event)
if err != nil {
return lastEvent, err
}
if done {
break ConditionSucceeded
}
case <-ctx.Done():
return lastEvent, wait.ErrWaitTimeout
}
}
}
return lastEvent, nil
}
// Until wraps the watcherClient's watch function with RetryWatcher making sure that watcher gets restarted in case of errors.
// The initialResourceVersion will be given to watch method when first called. It shall not be "" or "0"
// given the underlying WATCH call issues (#74022). If you want the initial list ("", "0") done for you use ListWatchUntil instead.
// Remaining behaviour is identical to function UntilWithoutRetry. (See above.)
// Until can deal with API timeouts and lost connections.
// It guarantees you to see all events and in the order they happened.
// Due to this guarantee there is no way it can deal with 'Resource version too old error'. It will fail in this case.
// (See `UntilWithSync` if you'd prefer to recover from all the errors including RV too old by re-listing
// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.)
// The most frequent usage for Until would be a test where you want to verify exact order of events ("edges").
func Until(ctx context.Context, initialResourceVersion string, watcherClient cache.Watcher, conditions ...ConditionFunc) (*watch.Event, error) {
w, err := NewRetryWatcher(initialResourceVersion, watcherClient)
if err != nil {
return nil, err
}
return UntilWithoutRetry(ctx, w, conditions...)
}
// UntilWithSync creates an informer from lw, optionally checks precondition when the store is synced,
// and watches the output until each provided condition succeeds, in a way that is identical
// to function UntilWithoutRetry. (See above.)
// UntilWithSync can deal with all errors like API timeout, lost connections and 'Resource version too old'.
// It is the only function that can recover from 'Resource version too old', Until and UntilWithoutRetry will
// just fail in that case. On the other hand it can't provide you with guarantees as strong as using simple
// Watch method with Until. It can skip some intermediate events in case of watch function failing but it will
// re-list to recover and you always get an event, if there has been a change, after recovery.
// Also with the current implementation based on DeltaFIFO, order of the events you receive is guaranteed only for
// particular object, not between more of them even it's the same resource.
// The most frequent usage would be a command that needs to watch the "state of the world" and should't fail, like:
// waiting for object reaching a state, "small" controllers, ...
func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition PreconditionFunc, conditions ...ConditionFunc) (*watch.Event, error) {
indexer, informer, watcher, done := NewIndexerInformerWatcher(lw, objType)
// We need to wait for the internal informers to fully stop so it's easier to reason about
// and it works with non-thread safe clients.
defer func() { <-done }()
// Proxy watcher can be stopped multiple times so it's fine to use defer here to cover alternative branches and
// let UntilWithoutRetry to stop it
defer watcher.Stop()
if precondition != nil {
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %v", ctx.Err())
}
done, err := precondition(indexer)
if err != nil {
return nil, err
}
if done {
return nil, nil
}
}
return UntilWithoutRetry(ctx, watcher, conditions...)
}
// ContextWithOptionalTimeout wraps context.WithTimeout and handles infinite timeouts expressed as 0 duration.
func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
if timeout < 0 {
// This should be handled in validation
klog.Errorf("Timeout for context shall not be negative!")
timeout = 0
}
if timeout == 0 {
return context.WithCancel(parent)
}
return context.WithTimeout(parent, timeout)
}

View File

@@ -1,4 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- caesarxuchao

View File

@@ -1,105 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package retry
import (
"time"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/wait"
)
// DefaultRetry is the recommended retry for a conflict where multiple clients
// are making changes to the same resource.
var DefaultRetry = wait.Backoff{
Steps: 5,
Duration: 10 * time.Millisecond,
Factor: 1.0,
Jitter: 0.1,
}
// DefaultBackoff is the recommended backoff for a conflict where a client
// may be attempting to make an unrelated modification to a resource under
// active management by one or more controllers.
var DefaultBackoff = wait.Backoff{
Steps: 4,
Duration: 10 * time.Millisecond,
Factor: 5.0,
Jitter: 0.1,
}
// OnError allows the caller to retry fn in case the error returned by fn is retriable
// according to the provided function. backoff defines the maximum retries and the wait
// interval between two retries.
func OnError(backoff wait.Backoff, retriable func(error) bool, fn func() error) error {
var lastErr error
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
err := fn()
switch {
case err == nil:
return true, nil
case retriable(err):
lastErr = err
return false, nil
default:
return false, err
}
})
if err == wait.ErrWaitTimeout {
err = lastErr
}
return err
}
// RetryOnConflict is used to make an update to a resource when you have to worry about
// conflicts caused by other code making unrelated updates to the resource at the same
// time. fn should fetch the resource to be modified, make appropriate changes to it, try
// to update it, and return (unmodified) the error from the update function. On a
// successful update, RetryOnConflict will return nil. If the update function returns a
// "Conflict" error, RetryOnConflict will wait some amount of time as described by
// backoff, and then try again. On a non-"Conflict" error, or if it retries too many times
// and gives up, RetryOnConflict will return an error to the caller.
//
// err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// // Fetch the resource here; you need to refetch it on every try, since
// // if you got a conflict on the last update attempt then you need to get
// // the current version before making your own changes.
// pod, err := c.Pods("mynamespace").Get(name, metav1.GetOptions{})
// if err ! nil {
// return err
// }
//
// // Make whatever updates to the resource are needed
// pod.Status.Phase = v1.PodFailed
//
// // Try to update
// _, err = c.Pods("mynamespace").UpdateStatus(pod)
// // You have to return err itself here (not wrapped inside another error)
// // so that RetryOnConflict can identify it correctly.
// return err
// })
// if err != nil {
// // May be conflict if max retries were hit, or may be something unrelated
// // like permissions or a network error
// return err
// }
// ...
//
// TODO: Make Backoff an interface?
func RetryOnConflict(backoff wait.Backoff, fn func() error) error {
return OnError(backoff, errors.IsConflict, fn)
}

6
vendor/modules.txt vendored
View File

@@ -494,10 +494,6 @@ github.com/openshift/client-go/route/clientset/versioned/typed/route/v1/fake
github.com/openshift/client-go/user/clientset/versioned/scheme
github.com/openshift/client-go/user/clientset/versioned/typed/user/v1
# github.com/openshift/library-go v0.0.0-20210106214821-c4d0b9c8d55f
## explicit
github.com/openshift/library-go/pkg/apps/appsserialization
github.com/openshift/library-go/pkg/apps/appsutil
github.com/openshift/library-go/pkg/build/naming
github.com/openshift/library-go/pkg/oauth/oauthdiscovery
# github.com/openshift/oc v0.0.0-alpha.0.0.20210325095525-2513fdbb36e2
## explicit
@@ -1071,7 +1067,6 @@ k8s.io/client-go/tools/record
k8s.io/client-go/tools/record/util
k8s.io/client-go/tools/reference
k8s.io/client-go/tools/remotecommand
k8s.io/client-go/tools/watch
k8s.io/client-go/transport
k8s.io/client-go/transport/spdy
k8s.io/client-go/util/cert
@@ -1081,7 +1076,6 @@ k8s.io/client-go/util/flowcontrol
k8s.io/client-go/util/homedir
k8s.io/client-go/util/jsonpath
k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry
k8s.io/client-go/util/workqueue
# k8s.io/component-base v0.21.3
k8s.io/component-base/config