Update godeps
This commit is contained in:
parent
1c8773fc98
commit
1bc383f9c5
1723 changed files with 287976 additions and 411028 deletions
18
vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/controller/.import-restrictions
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
{
|
||||
"Rules": [
|
||||
{
|
||||
"SelectorRegexp": "k8s[.]io/kubernetes/pkg/client/unversioned$",
|
||||
"ForbiddenPrefixes": [
|
||||
"k8s.io/kubernetes/pkg/client/unversioned"
|
||||
]
|
||||
},
|
||||
{
|
||||
"SelectorRegexp": "k8s[.]io/kubernetes/pkg/client/unversioned/testclient$",
|
||||
"ForbiddenPrefixes": [
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||
]
|
||||
}
|
||||
|
||||
]
|
||||
}
|
||||
|
||||
70
vendor/k8s.io/kubernetes/pkg/controller/BUILD
generated
vendored
Normal file
70
vendor/k8s.io/kubernetes/pkg/controller/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"client_builder.go",
|
||||
"controller_ref_manager.go",
|
||||
"controller_utils.go",
|
||||
"doc.go",
|
||||
"lookup_cache.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/errors:go_default_library",
|
||||
"//pkg/api/meta:go_default_library",
|
||||
"//pkg/api/unversioned:go_default_library",
|
||||
"//pkg/api/validation:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset/typed/core/internalversion:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/labels:go_default_library",
|
||||
"//pkg/runtime:go_default_library",
|
||||
"//pkg/serviceaccount:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//pkg/util/integer:go_default_library",
|
||||
"//pkg/util/sets:go_default_library",
|
||||
"//pkg/watch:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/golang/groupcache/lru",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["controller_utils_test.go"],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/unversioned:go_default_library",
|
||||
"//pkg/apimachinery/registered:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/runtime:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//pkg/util/sets:go_default_library",
|
||||
"//pkg/util/testing:go_default_library",
|
||||
"//pkg/util/uuid:go_default_library",
|
||||
],
|
||||
)
|
||||
166
vendor/k8s.io/kubernetes/pkg/controller/client_builder.go
generated
vendored
Normal file
166
vendor/k8s.io/kubernetes/pkg/controller/client_builder.go
generated
vendored
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// ControllerClientBuilder allow syou to get clients and configs for controllers
|
||||
type ControllerClientBuilder interface {
|
||||
Config(name string) (*restclient.Config, error)
|
||||
Client(name string) (clientset.Interface, error)
|
||||
ClientOrDie(name string) clientset.Interface
|
||||
}
|
||||
|
||||
// SimpleControllerClientBuilder returns a fixed client with different user agents
|
||||
type SimpleControllerClientBuilder struct {
|
||||
// ClientConfig is a skeleton config to clone and use as the basis for each controller client
|
||||
ClientConfig *restclient.Config
|
||||
}
|
||||
|
||||
func (b SimpleControllerClientBuilder) Config(name string) (*restclient.Config, error) {
|
||||
clientConfig := *b.ClientConfig
|
||||
return &clientConfig, nil
|
||||
}
|
||||
|
||||
func (b SimpleControllerClientBuilder) Client(name string) (clientset.Interface, error) {
|
||||
clientConfig, err := b.Config(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return clientset.NewForConfig(restclient.AddUserAgent(clientConfig, name))
|
||||
}
|
||||
|
||||
func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interface {
|
||||
client, err := b.Client(name)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
// SAControllerClientBuilder is a ControllerClientBuilder that returns clients identifying as
|
||||
// service accounts
|
||||
type SAControllerClientBuilder struct {
|
||||
// ClientConfig is a skeleton config to clone and use as the basis for each controller client
|
||||
ClientConfig *restclient.Config
|
||||
|
||||
// CoreClient is used to provision service accounts if needed and watch for their associated tokens
|
||||
// to construct a controller client
|
||||
CoreClient unversionedcore.CoreInterface
|
||||
|
||||
// Namespace is the namespace used to host the service accounts that will back the
|
||||
// controllers. It must be highly privileged namespace which normal users cannot inspect.
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// config returns a complete clientConfig for constructing clients. This is separate in anticipation of composition
|
||||
// which means that not all clientsets are known here
|
||||
func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, error) {
|
||||
clientConfig := restclient.AnonymousClientConfig(b.ClientConfig)
|
||||
|
||||
// we need the SA UID to find a matching SA token
|
||||
sa, err := b.CoreClient.ServiceAccounts(b.Namespace).Get(name)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
} else if apierrors.IsNotFound(err) {
|
||||
// check to see if the namespace exists. If it isn't a NotFound, just try to create the SA.
|
||||
// It'll probably fail, but perhaps that will have a better message.
|
||||
if _, err := b.CoreClient.Namespaces().Get(b.Namespace); apierrors.IsNotFound(err) {
|
||||
_, err = b.CoreClient.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: b.Namespace}})
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
sa, err = b.CoreClient.ServiceAccounts(b.Namespace).Create(
|
||||
&api.ServiceAccount{ObjectMeta: api.ObjectMeta{Namespace: b.Namespace, Name: name}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)})
|
||||
return b.CoreClient.Secrets(b.Namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)})
|
||||
return b.CoreClient.Secrets(b.Namespace).Watch(options)
|
||||
},
|
||||
}
|
||||
_, err = cache.ListWatchUntil(30*time.Second, lw,
|
||||
func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Deleted:
|
||||
return false, nil
|
||||
case watch.Error:
|
||||
return false, fmt.Errorf("error watching")
|
||||
|
||||
case watch.Added, watch.Modified:
|
||||
secret := event.Object.(*api.Secret)
|
||||
if !serviceaccount.IsServiceAccountToken(secret, sa) ||
|
||||
len(secret.Data[api.ServiceAccountTokenKey]) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
// TODO maybe verify the token is valid
|
||||
clientConfig.BearerToken = string(secret.Data[api.ServiceAccountTokenKey])
|
||||
restclient.AddUserAgent(clientConfig, serviceaccount.MakeUsername(b.Namespace, name))
|
||||
return true, nil
|
||||
|
||||
default:
|
||||
return false, fmt.Errorf("unexpected event type: %v", event.Type)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get token for service account: %v", err)
|
||||
}
|
||||
|
||||
return clientConfig, nil
|
||||
}
|
||||
|
||||
func (b SAControllerClientBuilder) Client(name string) (clientset.Interface, error) {
|
||||
clientConfig, err := b.Config(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return clientset.NewForConfig(clientConfig)
|
||||
}
|
||||
|
||||
func (b SAControllerClientBuilder) ClientOrDie(name string) clientset.Interface {
|
||||
client, err := b.Client(name)
|
||||
if err != nil {
|
||||
glog.Fatal(err)
|
||||
}
|
||||
return client
|
||||
}
|
||||
39
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
39
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
|
|
@ -31,7 +31,6 @@ import (
|
|||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
|
|
@ -54,7 +53,7 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
KeyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
|
||||
KeyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc
|
||||
)
|
||||
|
||||
type ResyncPeriodFunc func() time.Duration
|
||||
|
|
@ -220,6 +219,8 @@ type Expectations interface {
|
|||
|
||||
// ControlleeExpectations track controllee creates/deletes.
|
||||
type ControlleeExpectations struct {
|
||||
// Important: Since these two int64 fields are using sync/atomic, they have to be at the top of the struct due to a bug on 32-bit platforms
|
||||
// See: https://golang.org/pkg/sync/atomic/ for more information
|
||||
add int64
|
||||
del int64
|
||||
key string
|
||||
|
|
@ -342,12 +343,28 @@ func NewUIDTrackingControllerExpectations(ce ControllerExpectationsInterface) *U
|
|||
return &UIDTrackingControllerExpectations{ControllerExpectationsInterface: ce, uidStore: cache.NewStore(UIDSetKeyFunc)}
|
||||
}
|
||||
|
||||
// Reasons for pod events
|
||||
const (
|
||||
// FailedCreatePodReason is added in an event and in a replica set condition
|
||||
// when a pod for a replica set is failed to be created.
|
||||
FailedCreatePodReason = "FailedCreate"
|
||||
// SuccessfulCreatePodReason is added in an event when a pod for a replica set
|
||||
// is successfully created.
|
||||
SuccessfulCreatePodReason = "SuccessfulCreate"
|
||||
// FailedDeletePodReason is added in an event and in a replica set condition
|
||||
// when a pod for a replica set is failed to be deleted.
|
||||
FailedDeletePodReason = "FailedDelete"
|
||||
// SuccessfulDeletePodReason is added in an event when a pod for a replica set
|
||||
// is successfully deleted.
|
||||
SuccessfulDeletePodReason = "SuccessfulDelete"
|
||||
)
|
||||
|
||||
// PodControlInterface is an interface that knows how to add or delete pods
|
||||
// created as an interface to allow testing.
|
||||
type PodControlInterface interface {
|
||||
// CreatePods creates new pods according to the spec.
|
||||
CreatePods(namespace string, template *api.PodTemplateSpec, object runtime.Object) error
|
||||
// CreatePodsOnNode creates a new pod accorting to the spec on the specified node.
|
||||
// CreatePodsOnNode creates a new pod according to the spec on the specified node.
|
||||
CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error
|
||||
// CreatePodsWithControllerRef creates new pods according to the spec, and sets object as the pod's controller.
|
||||
CreatePodsWithControllerRef(namespace string, template *api.PodTemplateSpec, object runtime.Object, controllerRef *api.OwnerReference) error
|
||||
|
|
@ -373,6 +390,12 @@ func getPodsLabelSet(template *api.PodTemplateSpec) labels.Set {
|
|||
return desiredLabels
|
||||
}
|
||||
|
||||
func getPodsFinalizers(template *api.PodTemplateSpec) []string {
|
||||
desiredFinalizers := make([]string, len(template.Finalizers))
|
||||
copy(desiredFinalizers, template.Finalizers)
|
||||
return desiredFinalizers
|
||||
}
|
||||
|
||||
func getPodsAnnotationSet(template *api.PodTemplateSpec, object runtime.Object) (labels.Set, error) {
|
||||
desiredAnnotations := make(labels.Set)
|
||||
for k, v := range template.Annotations {
|
||||
|
|
@ -438,6 +461,7 @@ func (r RealPodControl) PatchPod(namespace, name string, data []byte) error {
|
|||
|
||||
func GetPodFromTemplate(template *api.PodTemplateSpec, parentObject runtime.Object, controllerRef *api.OwnerReference) (*api.Pod, error) {
|
||||
desiredLabels := getPodsLabelSet(template)
|
||||
desiredFinalizers := getPodsFinalizers(template)
|
||||
desiredAnnotations, err := getPodsAnnotationSet(template, parentObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -453,6 +477,7 @@ func GetPodFromTemplate(template *api.PodTemplateSpec, parentObject runtime.Obje
|
|||
Labels: desiredLabels,
|
||||
Annotations: desiredAnnotations,
|
||||
GenerateName: prefix,
|
||||
Finalizers: desiredFinalizers,
|
||||
},
|
||||
}
|
||||
if controllerRef != nil {
|
||||
|
|
@ -476,7 +501,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *api.Pod
|
|||
return fmt.Errorf("unable to create pods, no labels")
|
||||
}
|
||||
if newPod, err := r.KubeClient.Core().Pods(namespace).Create(pod); err != nil {
|
||||
r.Recorder.Eventf(object, api.EventTypeWarning, "FailedCreate", "Error creating: %v", err)
|
||||
r.Recorder.Eventf(object, api.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err)
|
||||
return fmt.Errorf("unable to create pods: %v", err)
|
||||
} else {
|
||||
accessor, err := meta.Accessor(object)
|
||||
|
|
@ -485,7 +510,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *api.Pod
|
|||
return nil
|
||||
}
|
||||
glog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name)
|
||||
r.Recorder.Eventf(object, api.EventTypeNormal, "SuccessfulCreate", "Created pod: %v", newPod.Name)
|
||||
r.Recorder.Eventf(object, api.EventTypeNormal, SuccessfulCreatePodReason, "Created pod: %v", newPod.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -496,11 +521,11 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime
|
|||
return fmt.Errorf("object does not have ObjectMeta, %v", err)
|
||||
}
|
||||
if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil {
|
||||
r.Recorder.Eventf(object, api.EventTypeWarning, "FailedDelete", "Error deleting: %v", err)
|
||||
r.Recorder.Eventf(object, api.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err)
|
||||
return fmt.Errorf("unable to delete pods: %v", err)
|
||||
} else {
|
||||
glog.V(4).Infof("Controller %v deleted pod %v", accessor.GetName(), podID)
|
||||
r.Recorder.Eventf(object, api.EventTypeNormal, "SuccessfulDelete", "Deleted pod: %v", podID)
|
||||
r.Recorder.Eventf(object, api.EventTypeNormal, SuccessfulDeletePodReason, "Deleted pod: %v", podID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
52
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD
generated
vendored
Normal file
52
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
"go_test",
|
||||
"cgo_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["deployment_util.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/annotations:go_default_library",
|
||||
"//pkg/api/meta:go_default_library",
|
||||
"//pkg/api/unversioned:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/labels:go_default_library",
|
||||
"//pkg/runtime:go_default_library",
|
||||
"//pkg/util/errors:go_default_library",
|
||||
"//pkg/util/integer:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//pkg/util/labels:go_default_library",
|
||||
"//pkg/util/pod:go_default_library",
|
||||
"//pkg/util/wait:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["deployment_util_test.go"],
|
||||
library = "go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/unversioned:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
||||
"//pkg/client/testing/core:go_default_library",
|
||||
"//pkg/runtime:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
],
|
||||
)
|
||||
355
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
355
vendor/k8s.io/kubernetes/pkg/controller/deployment/util/deployment_util.go
generated
vendored
|
|
@ -20,29 +20,33 @@ import (
|
|||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/annotations"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/util/integer"
|
||||
intstrutil "k8s.io/kubernetes/pkg/util/intstr"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
podutil "k8s.io/kubernetes/pkg/util/pod"
|
||||
rsutil "k8s.io/kubernetes/pkg/util/replicaset"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
||||
const (
|
||||
// RevisionAnnotation is the revision annotation of a deployment's replica sets which records its rollout sequence
|
||||
RevisionAnnotation = "deployment.kubernetes.io/revision"
|
||||
// RevisionHistoryAnnotation maintains the history of all old revisions that a replica set has served for a deployment.
|
||||
RevisionHistoryAnnotation = "deployment.kubernetes.io/revision-history"
|
||||
// DesiredReplicasAnnotation is the desired replicas for a deployment recorded as an annotation
|
||||
// in its replica sets. Helps in separating scaling events from the rollout process and for
|
||||
// determining if the new replica set for a deployment is really saturated.
|
||||
|
|
@ -59,13 +63,133 @@ const (
|
|||
// RollbackDone is the done rollback event reason
|
||||
RollbackDone = "DeploymentRollback"
|
||||
// OverlapAnnotation marks deployments with overlapping selector with other deployments
|
||||
// TODO: Delete this annotation when we gracefully handle overlapping selectors. See https://github.com/kubernetes/kubernetes/issues/2210
|
||||
// TODO: Delete this annotation when we gracefully handle overlapping selectors.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/2210
|
||||
OverlapAnnotation = "deployment.kubernetes.io/error-selector-overlapping-with"
|
||||
// SelectorUpdateAnnotation marks the last time deployment selector update
|
||||
// TODO: Delete this annotation when we gracefully handle overlapping selectors. See https://github.com/kubernetes/kubernetes/issues/2210
|
||||
// TODO: Delete this annotation when we gracefully handle overlapping selectors.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/2210
|
||||
SelectorUpdateAnnotation = "deployment.kubernetes.io/selector-updated-at"
|
||||
|
||||
// Reasons for deployment conditions
|
||||
//
|
||||
// Progressing:
|
||||
//
|
||||
// ReplicaSetUpdatedReason is added in a deployment when one of its replica sets is updated as part
|
||||
// of the rollout process.
|
||||
ReplicaSetUpdatedReason = "ReplicaSetUpdated"
|
||||
// FailedRSCreateReason is added in a deployment when it cannot create a new replica set.
|
||||
FailedRSCreateReason = "ReplicaSetCreateError"
|
||||
// NewReplicaSetReason is added in a deployment when it creates a new replica set.
|
||||
NewReplicaSetReason = "NewReplicaSetCreated"
|
||||
// FoundNewRSReason is added in a deployment when it adopts an existing replica set.
|
||||
FoundNewRSReason = "FoundNewReplicaSet"
|
||||
// NewRSAvailableReason is added in a deployment when its newest replica set is made available
|
||||
// ie. the number of new pods that have passed readiness checks and run for at least minReadySeconds
|
||||
// is at least the minimum available pods that need to run for the deployment.
|
||||
NewRSAvailableReason = "NewReplicaSetAvailable"
|
||||
// TimedOutReason is added in a deployment when its newest replica set fails to show any progress
|
||||
// within the given deadline (progressDeadlineSeconds).
|
||||
TimedOutReason = "ProgressDeadlineExceeded"
|
||||
// PausedDeployReason is added in a deployment when it is paused. Lack of progress shouldn't be
|
||||
// estimated once a deployment is paused.
|
||||
PausedDeployReason = "DeploymentPaused"
|
||||
// ResumedDeployReason is added in a deployment when it is resumed. Useful for not failing accidentally
|
||||
// deployments that paused amidst a rollout and are bounded by a deadline.
|
||||
ResumedDeployReason = "DeploymentResumed"
|
||||
//
|
||||
// Available:
|
||||
//
|
||||
// MinimumReplicasAvailable is added in a deployment when it has its minimum replicas required available.
|
||||
MinimumReplicasAvailable = "MinimumReplicasAvailable"
|
||||
// MinimumReplicasUnavailable is added in a deployment when it doesn't have the minimum required replicas
|
||||
// available.
|
||||
MinimumReplicasUnavailable = "MinimumReplicasUnavailable"
|
||||
)
|
||||
|
||||
// NewDeploymentCondition creates a new deployment condition.
|
||||
func NewDeploymentCondition(condType extensions.DeploymentConditionType, status api.ConditionStatus, reason, message string) *extensions.DeploymentCondition {
|
||||
return &extensions.DeploymentCondition{
|
||||
Type: condType,
|
||||
Status: status,
|
||||
LastUpdateTime: unversioned.Now(),
|
||||
LastTransitionTime: unversioned.Now(),
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeploymentCondition returns the condition with the provided type.
|
||||
func GetDeploymentCondition(status extensions.DeploymentStatus, condType extensions.DeploymentConditionType) *extensions.DeploymentCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == condType {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDeploymentCondition updates the deployment to include the provided condition. If the condition that
|
||||
// we are about to add already exists and has the same status and reason then we are not going to update.
|
||||
func SetDeploymentCondition(status *extensions.DeploymentStatus, condition extensions.DeploymentCondition) {
|
||||
currentCond := GetDeploymentCondition(*status, condition.Type)
|
||||
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
|
||||
return
|
||||
}
|
||||
// Do not update lastTransitionTime if the status of the condition doesn't change.
|
||||
if currentCond != nil && currentCond.Status == condition.Status {
|
||||
condition.LastTransitionTime = currentCond.LastTransitionTime
|
||||
}
|
||||
newConditions := filterOutCondition(status.Conditions, condition.Type)
|
||||
status.Conditions = append(newConditions, condition)
|
||||
}
|
||||
|
||||
// RemoveDeploymentCondition removes the deployment condition with the provided type.
|
||||
func RemoveDeploymentCondition(status *extensions.DeploymentStatus, condType extensions.DeploymentConditionType) {
|
||||
status.Conditions = filterOutCondition(status.Conditions, condType)
|
||||
}
|
||||
|
||||
// filterOutCondition returns a new slice of deployment conditions without conditions with the provided type.
|
||||
func filterOutCondition(conditions []extensions.DeploymentCondition, condType extensions.DeploymentConditionType) []extensions.DeploymentCondition {
|
||||
var newConditions []extensions.DeploymentCondition
|
||||
for _, c := range conditions {
|
||||
if c.Type == condType {
|
||||
continue
|
||||
}
|
||||
newConditions = append(newConditions, c)
|
||||
}
|
||||
return newConditions
|
||||
}
|
||||
|
||||
// ReplicaSetToDeploymentCondition converts a replica set condition into a deployment condition.
|
||||
// Useful for promoting replica set failure conditions into deployments.
|
||||
func ReplicaSetToDeploymentCondition(cond extensions.ReplicaSetCondition) extensions.DeploymentCondition {
|
||||
return extensions.DeploymentCondition{
|
||||
Type: extensions.DeploymentConditionType(cond.Type),
|
||||
Status: cond.Status,
|
||||
LastTransitionTime: cond.LastTransitionTime,
|
||||
LastUpdateTime: cond.LastTransitionTime,
|
||||
Reason: cond.Reason,
|
||||
Message: cond.Message,
|
||||
}
|
||||
}
|
||||
|
||||
// SetDeploymentRevision updates the revision for a deployment.
|
||||
func SetDeploymentRevision(deployment *extensions.Deployment, revision string) bool {
|
||||
updated := false
|
||||
|
||||
if deployment.Annotations == nil {
|
||||
deployment.Annotations = make(map[string]string)
|
||||
}
|
||||
if deployment.Annotations[RevisionAnnotation] != revision {
|
||||
deployment.Annotations[RevisionAnnotation] = revision
|
||||
updated = true
|
||||
}
|
||||
|
||||
return updated
|
||||
}
|
||||
|
||||
// MaxRevision finds the highest revision in the replica sets
|
||||
func MaxRevision(allRSs []*extensions.ReplicaSet) int64 {
|
||||
max := int64(0)
|
||||
|
|
@ -97,6 +221,19 @@ func LastRevision(allRSs []*extensions.ReplicaSet) int64 {
|
|||
return secMax
|
||||
}
|
||||
|
||||
// Revision returns the revision number of the input object.
|
||||
func Revision(obj runtime.Object) (int64, error) {
|
||||
acc, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
v, ok := acc.GetAnnotations()[RevisionAnnotation]
|
||||
if !ok {
|
||||
return 0, nil
|
||||
}
|
||||
return strconv.ParseInt(v, 10, 64)
|
||||
}
|
||||
|
||||
// SetNewReplicaSetAnnotations sets new replica set's annotations appropriately by updating its revision and
|
||||
// copying required deployment annotations to it; it returns true if replica set's annotation is changed.
|
||||
func SetNewReplicaSetAnnotations(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, newRevision string, exists bool) bool {
|
||||
|
|
@ -106,14 +243,29 @@ func SetNewReplicaSetAnnotations(deployment *extensions.Deployment, newRS *exten
|
|||
if newRS.Annotations == nil {
|
||||
newRS.Annotations = make(map[string]string)
|
||||
}
|
||||
oldRevision, ok := newRS.Annotations[RevisionAnnotation]
|
||||
// The newRS's revision should be the greatest among all RSes. Usually, its revision number is newRevision (the max revision number
|
||||
// of all old RSes + 1). However, it's possible that some of the old RSes are deleted after the newRS revision being updated, and
|
||||
// newRevision becomes smaller than newRS's revision. We should only update newRS revision when it's smaller than newRevision.
|
||||
if newRS.Annotations[RevisionAnnotation] < newRevision {
|
||||
if oldRevision < newRevision {
|
||||
newRS.Annotations[RevisionAnnotation] = newRevision
|
||||
annotationChanged = true
|
||||
glog.V(4).Infof("Updating replica set %q revision to %s", newRS.Name, newRevision)
|
||||
}
|
||||
// If a revision annotation already existed and this replica set was updated with a new revision
|
||||
// then that means we are rolling back to this replica set. We need to preserve the old revisions
|
||||
// for historical information.
|
||||
if ok && annotationChanged {
|
||||
revisionHistoryAnnotation := newRS.Annotations[RevisionHistoryAnnotation]
|
||||
oldRevisions := strings.Split(revisionHistoryAnnotation, ",")
|
||||
if len(oldRevisions[0]) == 0 {
|
||||
newRS.Annotations[RevisionHistoryAnnotation] = oldRevision
|
||||
} else {
|
||||
oldRevisions = append(oldRevisions, oldRevision)
|
||||
newRS.Annotations[RevisionHistoryAnnotation] = strings.Join(oldRevisions, ",")
|
||||
}
|
||||
}
|
||||
// If the new replica set is about to be created, we need to add replica annotations to it.
|
||||
if !exists && SetReplicasAnnotations(newRS, deployment.Spec.Replicas, deployment.Spec.Replicas+MaxSurge(*deployment)) {
|
||||
annotationChanged = true
|
||||
}
|
||||
|
|
@ -123,6 +275,7 @@ func SetNewReplicaSetAnnotations(deployment *extensions.Deployment, newRS *exten
|
|||
var annotationsToSkip = map[string]bool{
|
||||
annotations.LastAppliedConfigAnnotation: true,
|
||||
RevisionAnnotation: true,
|
||||
RevisionHistoryAnnotation: true,
|
||||
DesiredReplicasAnnotation: true,
|
||||
MaxReplicasAnnotation: true,
|
||||
OverlapAnnotation: true,
|
||||
|
|
@ -368,11 +521,18 @@ func GetNewReplicaSet(deployment *extensions.Deployment, c clientset.Interface)
|
|||
}
|
||||
|
||||
// listReplicaSets lists all RSes the given deployment targets with the given client interface.
|
||||
func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]extensions.ReplicaSet, error) {
|
||||
func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, error) {
|
||||
return ListReplicaSets(deployment,
|
||||
func(namespace string, options api.ListOptions) ([]extensions.ReplicaSet, error) {
|
||||
func(namespace string, options api.ListOptions) ([]*extensions.ReplicaSet, error) {
|
||||
rsList, err := c.Extensions().ReplicaSets(namespace).List(options)
|
||||
return rsList.Items, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret := []*extensions.ReplicaSet{}
|
||||
for i := range rsList.Items {
|
||||
ret = append(ret, &rsList.Items[i])
|
||||
}
|
||||
return ret, err
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -385,11 +545,11 @@ func listPods(deployment *extensions.Deployment, c clientset.Interface) (*api.Po
|
|||
}
|
||||
|
||||
// TODO: switch this to full namespacers
|
||||
type rsListFunc func(string, api.ListOptions) ([]extensions.ReplicaSet, error)
|
||||
type rsListFunc func(string, api.ListOptions) ([]*extensions.ReplicaSet, error)
|
||||
type podListFunc func(string, api.ListOptions) (*api.PodList, error)
|
||||
|
||||
// ListReplicaSets returns a slice of RSes the given deployment targets.
|
||||
func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([]extensions.ReplicaSet, error) {
|
||||
func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([]*extensions.ReplicaSet, error) {
|
||||
// TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector
|
||||
// should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830;
|
||||
// or use controllerRef, see https://github.com/kubernetes/kubernetes/issues/2210
|
||||
|
|
@ -442,7 +602,7 @@ func equalIgnoreHash(template1, template2 api.PodTemplateSpec) (bool, error) {
|
|||
}
|
||||
|
||||
// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template).
|
||||
func FindNewReplicaSet(deployment *extensions.Deployment, rsList []extensions.ReplicaSet) (*extensions.ReplicaSet, error) {
|
||||
func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) (*extensions.ReplicaSet, error) {
|
||||
newRSTemplate := GetNewReplicaSetTemplate(deployment)
|
||||
for i := range rsList {
|
||||
equal, err := equalIgnoreHash(rsList[i].Spec.Template, newRSTemplate)
|
||||
|
|
@ -451,7 +611,7 @@ func FindNewReplicaSet(deployment *extensions.Deployment, rsList []extensions.Re
|
|||
}
|
||||
if equal {
|
||||
// This is the new ReplicaSet.
|
||||
return &rsList[i], nil
|
||||
return rsList[i], nil
|
||||
}
|
||||
}
|
||||
// new ReplicaSet does not exist.
|
||||
|
|
@ -460,11 +620,11 @@ func FindNewReplicaSet(deployment *extensions.Deployment, rsList []extensions.Re
|
|||
|
||||
// FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given PodList and slice of RSes.
|
||||
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
|
||||
func FindOldReplicaSets(deployment *extensions.Deployment, rsList []extensions.ReplicaSet, podList *api.PodList) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
|
||||
func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, podList *api.PodList) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
|
||||
// Find all pods whose labels match deployment.Spec.Selector, and corresponding replica sets for pods in podList.
|
||||
// All pods and replica sets are labeled with pod-template-hash to prevent overlapping
|
||||
oldRSs := map[string]extensions.ReplicaSet{}
|
||||
allOldRSs := map[string]extensions.ReplicaSet{}
|
||||
oldRSs := map[string]*extensions.ReplicaSet{}
|
||||
allOldRSs := map[string]*extensions.ReplicaSet{}
|
||||
newRSTemplate := GetNewReplicaSetTemplate(deployment)
|
||||
for _, pod := range podList.Items {
|
||||
podLabelsSelector := labels.Set(pod.ObjectMeta.Labels)
|
||||
|
|
@ -490,12 +650,12 @@ func FindOldReplicaSets(deployment *extensions.Deployment, rsList []extensions.R
|
|||
requiredRSs := []*extensions.ReplicaSet{}
|
||||
for key := range oldRSs {
|
||||
value := oldRSs[key]
|
||||
requiredRSs = append(requiredRSs, &value)
|
||||
requiredRSs = append(requiredRSs, value)
|
||||
}
|
||||
allRSs := []*extensions.ReplicaSet{}
|
||||
for key := range allOldRSs {
|
||||
value := allOldRSs[key]
|
||||
allRSs = append(allRSs, &value)
|
||||
allRSs = append(allRSs, value)
|
||||
}
|
||||
return requiredRSs, allRSs, nil
|
||||
}
|
||||
|
|
@ -578,68 +738,40 @@ func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template api.P
|
|||
|
||||
// GetReplicaCountForReplicaSets returns the sum of Replicas of the given replica sets.
|
||||
func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
|
||||
totalReplicaCount := int32(0)
|
||||
totalReplicas := int32(0)
|
||||
for _, rs := range replicaSets {
|
||||
if rs != nil {
|
||||
totalReplicaCount += rs.Spec.Replicas
|
||||
totalReplicas += rs.Spec.Replicas
|
||||
}
|
||||
}
|
||||
return totalReplicaCount
|
||||
return totalReplicas
|
||||
}
|
||||
|
||||
// GetActualReplicaCountForReplicaSets returns the sum of actual replicas of the given replica sets.
|
||||
func GetActualReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
|
||||
totalReplicaCount := int32(0)
|
||||
totalActualReplicas := int32(0)
|
||||
for _, rs := range replicaSets {
|
||||
if rs != nil {
|
||||
totalReplicaCount += rs.Status.Replicas
|
||||
totalActualReplicas += rs.Status.Replicas
|
||||
}
|
||||
}
|
||||
return totalReplicaCount
|
||||
return totalActualReplicas
|
||||
}
|
||||
|
||||
// GetAvailablePodsForReplicaSets returns the number of available pods (listed from clientset) corresponding to the given replica sets.
|
||||
func GetAvailablePodsForReplicaSets(c clientset.Interface, deployment *extensions.Deployment, rss []*extensions.ReplicaSet, minReadySeconds int32) (int32, error) {
|
||||
podList, err := listPods(deployment, c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return CountAvailablePodsForReplicaSets(podList, rss, minReadySeconds)
|
||||
}
|
||||
|
||||
// CountAvailablePodsForReplicaSets returns the number of available pods corresponding to the given pod list and replica sets.
|
||||
// Note that the input pod list should be the pods targeted by the deployment of input replica sets.
|
||||
func CountAvailablePodsForReplicaSets(podList *api.PodList, rss []*extensions.ReplicaSet, minReadySeconds int32) (int32, error) {
|
||||
rsPods, err := filterPodsMatchingReplicaSets(rss, podList, minReadySeconds)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return countAvailablePods(rsPods, minReadySeconds), nil
|
||||
}
|
||||
|
||||
// GetAvailablePodsForDeployment returns the number of available pods (listed from clientset) corresponding to the given deployment.
|
||||
func GetAvailablePodsForDeployment(c clientset.Interface, deployment *extensions.Deployment) (int32, error) {
|
||||
podList, err := listPods(deployment, c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return countAvailablePods(podList.Items, deployment.Spec.MinReadySeconds), nil
|
||||
}
|
||||
|
||||
func countAvailablePods(pods []api.Pod, minReadySeconds int32) int32 {
|
||||
availablePodCount := int32(0)
|
||||
for _, pod := range pods {
|
||||
// TODO: Make the time.Now() as argument to allow unit test this.
|
||||
// FIXME: avoid using time.Now
|
||||
if IsPodAvailable(&pod, minReadySeconds, time.Now()) {
|
||||
glog.V(4).Infof("Pod %s/%s is available.", pod.Namespace, pod.Name)
|
||||
availablePodCount++
|
||||
// GetAvailableReplicaCountForReplicaSets returns the number of available pods corresponding to the given replica sets.
|
||||
func GetAvailableReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
|
||||
totalAvailableReplicas := int32(0)
|
||||
for _, rs := range replicaSets {
|
||||
if rs != nil {
|
||||
totalAvailableReplicas += rs.Status.AvailableReplicas
|
||||
}
|
||||
}
|
||||
return availablePodCount
|
||||
return totalAvailableReplicas
|
||||
}
|
||||
|
||||
// IsPodAvailable return true if the pod is available.
|
||||
// TODO: Remove this once we start using replica set status for calculating available pods
|
||||
// for a deployment.
|
||||
func IsPodAvailable(pod *api.Pod, minReadySeconds int32, now time.Time) bool {
|
||||
if !controller.IsPodActive(pod) {
|
||||
return false
|
||||
|
|
@ -662,43 +794,61 @@ func IsPodAvailable(pod *api.Pod, minReadySeconds int32, now time.Time) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// filterPodsMatchingReplicaSets filters the given pod list and only return the ones targeted by the input replicasets
|
||||
func filterPodsMatchingReplicaSets(replicaSets []*extensions.ReplicaSet, podList *api.PodList, minReadySeconds int32) ([]api.Pod, error) {
|
||||
allRSPods := []api.Pod{}
|
||||
for _, rs := range replicaSets {
|
||||
matchingFunc, err := rsutil.MatchingPodsFunc(rs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if matchingFunc == nil {
|
||||
continue
|
||||
}
|
||||
rsPods := podutil.Filter(podList, matchingFunc)
|
||||
avaPodsCount := countAvailablePods(rsPods, minReadySeconds)
|
||||
if avaPodsCount > rs.Spec.Replicas {
|
||||
msg := fmt.Sprintf("Found %s/%s with %d available pods, more than its spec replicas %d", rs.Namespace, rs.Name, avaPodsCount, rs.Spec.Replicas)
|
||||
glog.Errorf("ERROR: %s", msg)
|
||||
return nil, fmt.Errorf(msg)
|
||||
}
|
||||
allRSPods = append(allRSPods, podutil.Filter(podList, matchingFunc)...)
|
||||
}
|
||||
return allRSPods, nil
|
||||
}
|
||||
|
||||
// Revision returns the revision number of the input replica set
|
||||
func Revision(rs *extensions.ReplicaSet) (int64, error) {
|
||||
v, ok := rs.Annotations[RevisionAnnotation]
|
||||
if !ok {
|
||||
return 0, nil
|
||||
}
|
||||
return strconv.ParseInt(v, 10, 64)
|
||||
}
|
||||
|
||||
// IsRollingUpdate returns true if the strategy type is a rolling update.
|
||||
func IsRollingUpdate(deployment *extensions.Deployment) bool {
|
||||
return deployment.Spec.Strategy.Type == extensions.RollingUpdateDeploymentStrategyType
|
||||
}
|
||||
|
||||
// DeploymentComplete considers a deployment to be complete once its desired replicas equals its
|
||||
// updatedReplicas and it doesn't violate minimum availability.
|
||||
func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool {
|
||||
return newStatus.UpdatedReplicas == deployment.Spec.Replicas &&
|
||||
newStatus.AvailableReplicas >= deployment.Spec.Replicas-MaxUnavailable(*deployment)
|
||||
}
|
||||
|
||||
// DeploymentProgressing reports progress for a deployment. Progress is estimated by comparing the
|
||||
// current with the new status of the deployment that the controller is observing. The following
|
||||
// algorithm is already used in the kubectl rolling updater to report lack of progress.
|
||||
func DeploymentProgressing(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool {
|
||||
oldStatus := deployment.Status
|
||||
|
||||
// Old replicas that need to be scaled down
|
||||
oldStatusOldReplicas := oldStatus.Replicas - oldStatus.UpdatedReplicas
|
||||
newStatusOldReplicas := newStatus.Replicas - newStatus.UpdatedReplicas
|
||||
|
||||
return (newStatus.UpdatedReplicas > oldStatus.UpdatedReplicas) || (newStatusOldReplicas < oldStatusOldReplicas)
|
||||
}
|
||||
|
||||
// used for unit testing
|
||||
var nowFn = func() time.Time { return time.Now() }
|
||||
|
||||
// DeploymentTimedOut considers a deployment to have timed out once its condition that reports progress
|
||||
// is older than progressDeadlineSeconds or a Progressing condition with a TimedOutReason reason already
|
||||
// exists.
|
||||
func DeploymentTimedOut(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool {
|
||||
if deployment.Spec.ProgressDeadlineSeconds == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Look for the Progressing condition. If it doesn't exist, we have no base to estimate progress.
|
||||
// If it's already set with a TimedOutReason reason, we have already timed out, no need to check
|
||||
// again.
|
||||
condition := GetDeploymentCondition(*newStatus, extensions.DeploymentProgressing)
|
||||
if condition == nil {
|
||||
return false
|
||||
}
|
||||
if condition.Reason == TimedOutReason {
|
||||
return true
|
||||
}
|
||||
|
||||
// Look at the difference in seconds between now and the last time we reported any
|
||||
// progress or tried to create a replica set, or resumed a paused deployment and
|
||||
// compare against progressDeadlineSeconds.
|
||||
from := condition.LastUpdateTime
|
||||
delta := time.Duration(*deployment.Spec.ProgressDeadlineSeconds) * time.Second
|
||||
return from.Add(delta).Before(nowFn())
|
||||
}
|
||||
|
||||
// NewRSNewReplicas calculates the number of replicas a deployment's new RS should have.
|
||||
// When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it.
|
||||
// 1) The new RS is saturated: newRS's replicas == deployment's replicas
|
||||
|
|
@ -824,12 +974,12 @@ func LastSelectorUpdate(d *extensions.Deployment) unversioned.Time {
|
|||
|
||||
// BySelectorLastUpdateTime sorts a list of deployments by the last update time of their selector,
|
||||
// first using their creation timestamp and then their names as a tie breaker.
|
||||
type BySelectorLastUpdateTime []extensions.Deployment
|
||||
type BySelectorLastUpdateTime []*extensions.Deployment
|
||||
|
||||
func (o BySelectorLastUpdateTime) Len() int { return len(o) }
|
||||
func (o BySelectorLastUpdateTime) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
func (o BySelectorLastUpdateTime) Less(i, j int) bool {
|
||||
ti, tj := LastSelectorUpdate(&o[i]), LastSelectorUpdate(&o[j])
|
||||
ti, tj := LastSelectorUpdate(o[i]), LastSelectorUpdate(o[j])
|
||||
if ti.Equal(tj) {
|
||||
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
|
||||
return o[i].Name < o[j].Name
|
||||
|
|
@ -838,3 +988,22 @@ func (o BySelectorLastUpdateTime) Less(i, j int) bool {
|
|||
}
|
||||
return ti.Before(tj)
|
||||
}
|
||||
|
||||
// OverlapsWith returns true when two given deployments are different and overlap with each other
|
||||
func OverlapsWith(current, other *extensions.Deployment) (bool, error) {
|
||||
if current.UID == other.UID {
|
||||
return false, nil
|
||||
}
|
||||
currentSelector, err := unversioned.LabelSelectorAsSelector(current.Spec.Selector)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("deployment %s/%s has invalid label selector: %v", current.Namespace, current.Name, err)
|
||||
}
|
||||
otherSelector, err := unversioned.LabelSelectorAsSelector(other.Spec.Selector)
|
||||
if err != nil {
|
||||
// Broken selectors from other deployments shouldn't block current deployment. Just log the error and continue.
|
||||
glog.V(2).Infof("Skip overlapping check: deployment %s/%s has invalid label selector: %v", other.Namespace, other.Name, err)
|
||||
return false, nil
|
||||
}
|
||||
return (!currentSelector.Empty() && currentSelector.Matches(labels.Set(other.Spec.Template.Labels))) ||
|
||||
(!otherSelector.Empty() && otherSelector.Matches(labels.Set(current.Spec.Template.Labels))), nil
|
||||
}
|
||||
|
|
|
|||
326
vendor/k8s.io/kubernetes/pkg/controller/framework/controller.go
generated
vendored
326
vendor/k8s.io/kubernetes/pkg/controller/framework/controller.go
generated
vendored
|
|
@ -1,326 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
||||
// Config contains all the settings for a Controller.
|
||||
type Config struct {
|
||||
// The queue for your objects; either a cache.FIFO or
|
||||
// a cache.DeltaFIFO. Your Process() function should accept
|
||||
// the output of this Oueue's Pop() method.
|
||||
cache.Queue
|
||||
|
||||
// Something that can list and watch your objects.
|
||||
cache.ListerWatcher
|
||||
|
||||
// Something that can process your objects.
|
||||
Process ProcessFunc
|
||||
|
||||
// The type of your objects.
|
||||
ObjectType runtime.Object
|
||||
|
||||
// Reprocess everything at least this often.
|
||||
// Note that if it takes longer for you to clear the queue than this
|
||||
// period, you will end up processing items in the order determined
|
||||
// by cache.FIFO.Replace(). Currently, this is random. If this is a
|
||||
// problem, we can change that replacement policy to append new
|
||||
// things to the end of the queue instead of replacing the entire
|
||||
// queue.
|
||||
FullResyncPeriod time.Duration
|
||||
|
||||
// If true, when Process() returns an error, re-enqueue the object.
|
||||
// TODO: add interface to let you inject a delay/backoff or drop
|
||||
// the object completely if desired. Pass the object in
|
||||
// question to this interface as a parameter.
|
||||
RetryOnError bool
|
||||
}
|
||||
|
||||
// ProcessFunc processes a single object.
|
||||
type ProcessFunc func(obj interface{}) error
|
||||
|
||||
// Controller is a generic controller framework.
|
||||
type Controller struct {
|
||||
config Config
|
||||
reflector *cache.Reflector
|
||||
reflectorMutex sync.RWMutex
|
||||
}
|
||||
|
||||
// TODO make the "Controller" private, and convert all references to use ControllerInterface instead
|
||||
type ControllerInterface interface {
|
||||
Run(stopCh <-chan struct{})
|
||||
HasSynced() bool
|
||||
}
|
||||
|
||||
// New makes a new Controller from the given Config.
|
||||
func New(c *Config) *Controller {
|
||||
ctlr := &Controller{
|
||||
config: *c,
|
||||
}
|
||||
return ctlr
|
||||
}
|
||||
|
||||
// Run begins processing items, and will continue until a value is sent down stopCh.
|
||||
// It's an error to call Run more than once.
|
||||
// Run blocks; call via go.
|
||||
func (c *Controller) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
r := cache.NewReflector(
|
||||
c.config.ListerWatcher,
|
||||
c.config.ObjectType,
|
||||
c.config.Queue,
|
||||
c.config.FullResyncPeriod,
|
||||
)
|
||||
|
||||
c.reflectorMutex.Lock()
|
||||
c.reflector = r
|
||||
c.reflectorMutex.Unlock()
|
||||
|
||||
r.RunUntil(stopCh)
|
||||
|
||||
wait.Until(c.processLoop, time.Second, stopCh)
|
||||
}
|
||||
|
||||
// Returns true once this controller has completed an initial resource listing
|
||||
func (c *Controller) HasSynced() bool {
|
||||
return c.config.Queue.HasSynced()
|
||||
}
|
||||
|
||||
// Requeue adds the provided object back into the queue if it does not already exist.
|
||||
func (c *Controller) Requeue(obj interface{}) error {
|
||||
return c.config.Queue.AddIfNotPresent(cache.Deltas{
|
||||
cache.Delta{
|
||||
Type: cache.Sync,
|
||||
Object: obj,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// processLoop drains the work queue.
|
||||
// TODO: Consider doing the processing in parallel. This will require a little thought
|
||||
// to make sure that we don't end up processing the same object multiple times
|
||||
// concurrently.
|
||||
func (c *Controller) processLoop() {
|
||||
for {
|
||||
obj, err := c.config.Queue.Pop(cache.PopProcessFunc(c.config.Process))
|
||||
if err != nil {
|
||||
if c.config.RetryOnError {
|
||||
// This is the safe way to re-enqueue.
|
||||
c.config.Queue.AddIfNotPresent(obj)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ResourceEventHandler can handle notifications for events that happen to a
|
||||
// resource. The events are informational only, so you can't return an
|
||||
// error.
|
||||
// * OnAdd is called when an object is added.
|
||||
// * OnUpdate is called when an object is modified. Note that oldObj is the
|
||||
// last known state of the object-- it is possible that several changes
|
||||
// were combined together, so you can't use this to see every single
|
||||
// change. OnUpdate is also called when a re-list happens, and it will
|
||||
// get called even if nothing changed. This is useful for periodically
|
||||
// evaluating or syncing something.
|
||||
// * OnDelete will get the final state of the item if it is known, otherwise
|
||||
// it will get an object of type cache.DeletedFinalStateUnknown. This can
|
||||
// happen if the watch is closed and misses the delete event and we don't
|
||||
// notice the deletion until the subsequent re-list.
|
||||
type ResourceEventHandler interface {
|
||||
OnAdd(obj interface{})
|
||||
OnUpdate(oldObj, newObj interface{})
|
||||
OnDelete(obj interface{})
|
||||
}
|
||||
|
||||
// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or
|
||||
// as few of the notification functions as you want while still implementing
|
||||
// ResourceEventHandler.
|
||||
type ResourceEventHandlerFuncs struct {
|
||||
AddFunc func(obj interface{})
|
||||
UpdateFunc func(oldObj, newObj interface{})
|
||||
DeleteFunc func(obj interface{})
|
||||
}
|
||||
|
||||
// OnAdd calls AddFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) {
|
||||
if r.AddFunc != nil {
|
||||
r.AddFunc(obj)
|
||||
}
|
||||
}
|
||||
|
||||
// OnUpdate calls UpdateFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) {
|
||||
if r.UpdateFunc != nil {
|
||||
r.UpdateFunc(oldObj, newObj)
|
||||
}
|
||||
}
|
||||
|
||||
// OnDelete calls DeleteFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
|
||||
if r.DeleteFunc != nil {
|
||||
r.DeleteFunc(obj)
|
||||
}
|
||||
}
|
||||
|
||||
// DeletionHandlingMetaNamespaceKeyFunc checks for
|
||||
// cache.DeletedFinalStateUnknown objects before calling
|
||||
// cache.MetaNamespaceKeyFunc.
|
||||
func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
||||
if d, ok := obj.(cache.DeletedFinalStateUnknown); ok {
|
||||
return d.Key, nil
|
||||
}
|
||||
return cache.MetaNamespaceKeyFunc(obj)
|
||||
}
|
||||
|
||||
// NewInformer returns a cache.Store and a controller for populating the store
|
||||
// while also providing event notifications. You should only used the returned
|
||||
// cache.Store for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
//
|
||||
func NewInformer(
|
||||
lw cache.ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
) (cache.Store, *Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := cache.NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: lw,
|
||||
ObjectType: objType,
|
||||
FullResyncPeriod: resyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(cache.Deltas) {
|
||||
switch d.Type {
|
||||
case cache.Sync, cache.Added, cache.Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, d.Object)
|
||||
} else {
|
||||
if err := clientState.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case cache.Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(d.Object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return clientState, New(cfg)
|
||||
}
|
||||
|
||||
// NewIndexerInformer returns a cache.Indexer and a controller for populating the index
|
||||
// while also providing event notifications. You should only used the returned
|
||||
// cache.Index for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
//
|
||||
func NewIndexerInformer(
|
||||
lw cache.ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
indexers cache.Indexers,
|
||||
) (cache.Indexer, *Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: lw,
|
||||
ObjectType: objType,
|
||||
FullResyncPeriod: resyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(cache.Deltas) {
|
||||
switch d.Type {
|
||||
case cache.Sync, cache.Added, cache.Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, d.Object)
|
||||
} else {
|
||||
if err := clientState.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case cache.Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(d.Object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return clientState, New(cfg)
|
||||
}
|
||||
18
vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go
generated
vendored
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package framework implements all the grunt work involved in running a simple controller.
|
||||
package framework
|
||||
262
vendor/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go
generated
vendored
262
vendor/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go
generated
vendored
|
|
@ -1,262 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
func NewFakeControllerSource() *FakeControllerSource {
|
||||
return &FakeControllerSource{
|
||||
Items: map[nnu]runtime.Object{},
|
||||
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
|
||||
}
|
||||
}
|
||||
|
||||
func NewFakePVControllerSource() *FakePVControllerSource {
|
||||
return &FakePVControllerSource{
|
||||
FakeControllerSource{
|
||||
Items: map[nnu]runtime.Object{},
|
||||
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
|
||||
}}
|
||||
}
|
||||
|
||||
func NewFakePVCControllerSource() *FakePVCControllerSource {
|
||||
return &FakePVCControllerSource{
|
||||
FakeControllerSource{
|
||||
Items: map[nnu]runtime.Object{},
|
||||
Broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
|
||||
}}
|
||||
}
|
||||
|
||||
// FakeControllerSource implements listing/watching for testing.
|
||||
type FakeControllerSource struct {
|
||||
lock sync.RWMutex
|
||||
Items map[nnu]runtime.Object
|
||||
changes []watch.Event // one change per resourceVersion
|
||||
Broadcaster *watch.Broadcaster
|
||||
}
|
||||
|
||||
type FakePVControllerSource struct {
|
||||
FakeControllerSource
|
||||
}
|
||||
|
||||
type FakePVCControllerSource struct {
|
||||
FakeControllerSource
|
||||
}
|
||||
|
||||
// namespace, name, uid to be used as a key.
|
||||
type nnu struct {
|
||||
namespace, name string
|
||||
uid types.UID
|
||||
}
|
||||
|
||||
// Add adds an object to the set and sends an add event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Add(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Added, Object: obj}, 1)
|
||||
}
|
||||
|
||||
// Modify updates an object in the set and sends a modified event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Modify(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 1)
|
||||
}
|
||||
|
||||
// Delete deletes an object from the set and sends a delete event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Delete(lastValue runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 1)
|
||||
}
|
||||
|
||||
// AddDropWatch adds an object to the set but forgets to send an add event to
|
||||
// watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) AddDropWatch(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Added, Object: obj}, 0)
|
||||
}
|
||||
|
||||
// ModifyDropWatch updates an object in the set but forgets to send a modify
|
||||
// event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) ModifyDropWatch(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 0)
|
||||
}
|
||||
|
||||
// DeleteDropWatch deletes an object from the set but forgets to send a delete
|
||||
// event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) DeleteDropWatch(lastValue runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 0)
|
||||
}
|
||||
|
||||
func (f *FakeControllerSource) key(accessor meta.Object) nnu {
|
||||
return nnu{accessor.GetNamespace(), accessor.GetName(), accessor.GetUID()}
|
||||
}
|
||||
|
||||
// Change records the given event (setting the object's resource version) and
|
||||
// sends a watch event with the specified probability.
|
||||
func (f *FakeControllerSource) Change(e watch.Event, watchProbability float64) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
accessor, err := meta.Accessor(e.Object)
|
||||
if err != nil {
|
||||
panic(err) // this is test code only
|
||||
}
|
||||
|
||||
resourceVersion := len(f.changes) + 1
|
||||
accessor.SetResourceVersion(strconv.Itoa(resourceVersion))
|
||||
f.changes = append(f.changes, e)
|
||||
key := f.key(accessor)
|
||||
switch e.Type {
|
||||
case watch.Added, watch.Modified:
|
||||
f.Items[key] = e.Object
|
||||
case watch.Deleted:
|
||||
delete(f.Items, key)
|
||||
}
|
||||
|
||||
if rand.Float64() < watchProbability {
|
||||
f.Broadcaster.Action(e.Type, e.Object)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeControllerSource) getListItemsLocked() ([]runtime.Object, error) {
|
||||
list := make([]runtime.Object, 0, len(f.Items))
|
||||
for _, obj := range f.Items {
|
||||
// Must make a copy to allow clients to modify the object.
|
||||
// Otherwise, if they make a change and write it back, they
|
||||
// will inadvertently change our canonical copy (in
|
||||
// addition to racing with other clients).
|
||||
objCopy, err := api.Scheme.DeepCopy(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list = append(list, objCopy.(runtime.Object))
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// List returns a list object, with its resource version set.
|
||||
func (f *FakeControllerSource) List(options api.ListOptions) (runtime.Object, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list, err := f.getListItemsLocked()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listObj := &api.List{}
|
||||
if err := meta.SetList(listObj, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objMeta, err := api.ListMetaFor(listObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resourceVersion := len(f.changes)
|
||||
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
||||
return listObj, nil
|
||||
}
|
||||
|
||||
// List returns a list object, with its resource version set.
|
||||
func (f *FakePVControllerSource) List(options api.ListOptions) (runtime.Object, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list, err := f.FakeControllerSource.getListItemsLocked()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listObj := &api.PersistentVolumeList{}
|
||||
if err := meta.SetList(listObj, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objMeta, err := api.ListMetaFor(listObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resourceVersion := len(f.changes)
|
||||
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
||||
return listObj, nil
|
||||
}
|
||||
|
||||
// List returns a list object, with its resource version set.
|
||||
func (f *FakePVCControllerSource) List(options api.ListOptions) (runtime.Object, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list, err := f.FakeControllerSource.getListItemsLocked()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listObj := &api.PersistentVolumeClaimList{}
|
||||
if err := meta.SetList(listObj, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objMeta, err := api.ListMetaFor(listObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resourceVersion := len(f.changes)
|
||||
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
||||
return listObj, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch, which will be pre-populated with all changes
|
||||
// after resourceVersion.
|
||||
func (f *FakeControllerSource) Watch(options api.ListOptions) (watch.Interface, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
rc, err := strconv.Atoi(options.ResourceVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rc < len(f.changes) {
|
||||
changes := []watch.Event{}
|
||||
for _, c := range f.changes[rc:] {
|
||||
// Must make a copy to allow clients to modify the
|
||||
// object. Otherwise, if they make a change and write
|
||||
// it back, they will inadvertently change the our
|
||||
// canonical copy (in addition to racing with other
|
||||
// clients).
|
||||
objCopy, err := api.Scheme.DeepCopy(c.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
changes = append(changes, watch.Event{Type: c.Type, Object: objCopy.(runtime.Object)})
|
||||
}
|
||||
return f.Broadcaster.WatchWithPrefix(changes), nil
|
||||
} else if rc > len(f.changes) {
|
||||
return nil, errors.New("resource version in the future not supported by this fake")
|
||||
}
|
||||
return f.Broadcaster.Watch(), nil
|
||||
}
|
||||
|
||||
// Shutdown closes the underlying broadcaster, waiting for events to be
|
||||
// delivered. It's an error to call any method after calling shutdown. This is
|
||||
// enforced by Shutdown() leaving f locked.
|
||||
func (f *FakeControllerSource) Shutdown() {
|
||||
f.lock.Lock() // Purposely no unlock.
|
||||
f.Broadcaster.Shutdown()
|
||||
}
|
||||
383
vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go
generated
vendored
383
vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go
generated
vendored
|
|
@ -1,383 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
)
|
||||
|
||||
// if you use this, there is one behavior change compared to a standard Informer.
|
||||
// When you receive a notification, the cache will be AT LEAST as fresh as the
|
||||
// notification, but it MAY be more fresh. You should NOT depend on the contents
|
||||
// of the cache exactly matching the notification you've received in handler
|
||||
// functions. If there was a create, followed by a delete, the cache may NOT
|
||||
// have your item. This has advantages over the broadcaster since it allows us
|
||||
// to share a common cache across many controllers. Extending the broadcaster
|
||||
// would have required us keep duplicate caches for each watch.
|
||||
type SharedInformer interface {
|
||||
// events to a single handler are delivered sequentially, but there is no coordination between different handlers
|
||||
// You may NOT add a handler *after* the SharedInformer is running. That will result in an error being returned.
|
||||
// TODO we should try to remove this restriction eventually.
|
||||
AddEventHandler(handler ResourceEventHandler) error
|
||||
GetStore() cache.Store
|
||||
// GetController gives back a synthetic interface that "votes" to start the informer
|
||||
GetController() ControllerInterface
|
||||
Run(stopCh <-chan struct{})
|
||||
HasSynced() bool
|
||||
LastSyncResourceVersion() string
|
||||
}
|
||||
|
||||
type SharedIndexInformer interface {
|
||||
SharedInformer
|
||||
// AddIndexers add indexers to the informer before it starts.
|
||||
AddIndexers(indexers cache.Indexers) error
|
||||
GetIndexer() cache.Indexer
|
||||
}
|
||||
|
||||
// NewSharedInformer creates a new instance for the listwatcher.
|
||||
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
|
||||
// be shared amongst all consumers.
|
||||
func NewSharedInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
|
||||
return NewSharedIndexInformer(lw, objType, resyncPeriod, cache.Indexers{})
|
||||
}
|
||||
|
||||
// NewSharedIndexInformer creates a new instance for the listwatcher.
|
||||
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
|
||||
// be shared amongst all consumers.
|
||||
func NewSharedIndexInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers cache.Indexers) SharedIndexInformer {
|
||||
sharedIndexInformer := &sharedIndexInformer{
|
||||
processor: &sharedProcessor{},
|
||||
indexer: cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
|
||||
listerWatcher: lw,
|
||||
objectType: objType,
|
||||
fullResyncPeriod: resyncPeriod,
|
||||
}
|
||||
return sharedIndexInformer
|
||||
}
|
||||
|
||||
type sharedIndexInformer struct {
|
||||
indexer cache.Indexer
|
||||
controller *Controller
|
||||
|
||||
processor *sharedProcessor
|
||||
|
||||
// This block is tracked to handle late initialization of the controller
|
||||
listerWatcher cache.ListerWatcher
|
||||
objectType runtime.Object
|
||||
fullResyncPeriod time.Duration
|
||||
|
||||
started bool
|
||||
startedLock sync.Mutex
|
||||
|
||||
// blockDeltas gives a way to stop all event distribution so that a late event handler
|
||||
// can safely join the shared informer.
|
||||
blockDeltas sync.Mutex
|
||||
// stopCh is the channel used to stop the main Run process. We have to track it so that
|
||||
// late joiners can have a proper stop
|
||||
stopCh <-chan struct{}
|
||||
}
|
||||
|
||||
// dummyController hides the fact that a SharedInformer is different from a dedicated one
|
||||
// where a caller can `Run`. The run method is disonnected in this case, because higher
|
||||
// level logic will decide when to start the SharedInformer and related controller.
|
||||
// Because returning information back is always asynchronous, the legacy callers shouldn't
|
||||
// notice any change in behavior.
|
||||
type dummyController struct {
|
||||
informer *sharedIndexInformer
|
||||
}
|
||||
|
||||
func (v *dummyController) Run(stopCh <-chan struct{}) {
|
||||
}
|
||||
|
||||
func (v *dummyController) HasSynced() bool {
|
||||
return v.informer.HasSynced()
|
||||
}
|
||||
|
||||
type updateNotification struct {
|
||||
oldObj interface{}
|
||||
newObj interface{}
|
||||
}
|
||||
|
||||
type addNotification struct {
|
||||
newObj interface{}
|
||||
}
|
||||
|
||||
type deleteNotification struct {
|
||||
oldObj interface{}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, s.indexer)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: s.listerWatcher,
|
||||
ObjectType: s.objectType,
|
||||
FullResyncPeriod: s.fullResyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: s.HandleDeltas,
|
||||
}
|
||||
|
||||
func() {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
s.controller = New(cfg)
|
||||
s.started = true
|
||||
}()
|
||||
|
||||
s.stopCh = stopCh
|
||||
s.processor.run(stopCh)
|
||||
s.controller.Run(stopCh)
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) isStarted() bool {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
return s.started
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HasSynced() bool {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.controller == nil {
|
||||
return false
|
||||
}
|
||||
return s.controller.HasSynced()
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) LastSyncResourceVersion() string {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.controller == nil {
|
||||
return ""
|
||||
}
|
||||
return s.controller.reflector.LastSyncResourceVersion()
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetStore() cache.Store {
|
||||
return s.indexer
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetIndexer() cache.Indexer {
|
||||
return s.indexer
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) AddIndexers(indexers cache.Indexers) error {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.started {
|
||||
return fmt.Errorf("informer has already started")
|
||||
}
|
||||
|
||||
return s.indexer.AddIndexers(indexers)
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetController() ControllerInterface {
|
||||
return &dummyController{informer: s}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) error {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if !s.started {
|
||||
listener := newProcessListener(handler)
|
||||
s.processor.listeners = append(s.processor.listeners, listener)
|
||||
return nil
|
||||
}
|
||||
|
||||
// in order to safely join, we have to
|
||||
// 1. stop sending add/update/delete notifications
|
||||
// 2. do a list against the store
|
||||
// 3. send synthetic "Add" events to the new handler
|
||||
// 4. unblock
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
|
||||
listener := newProcessListener(handler)
|
||||
s.processor.listeners = append(s.processor.listeners, listener)
|
||||
|
||||
go listener.run(s.stopCh)
|
||||
go listener.pop(s.stopCh)
|
||||
|
||||
items := s.indexer.List()
|
||||
for i := range items {
|
||||
listener.add(addNotification{newObj: items[i]})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||
s.blockDeltas.Lock()
|
||||
defer s.blockDeltas.Unlock()
|
||||
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(cache.Deltas) {
|
||||
switch d.Type {
|
||||
case cache.Sync, cache.Added, cache.Updated:
|
||||
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
|
||||
if err := s.indexer.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object})
|
||||
} else {
|
||||
if err := s.indexer.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(addNotification{newObj: d.Object})
|
||||
}
|
||||
case cache.Deleted:
|
||||
if err := s.indexer.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(deleteNotification{oldObj: d.Object})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type sharedProcessor struct {
|
||||
listeners []*processorListener
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) distribute(obj interface{}) {
|
||||
for _, listener := range p.listeners {
|
||||
listener.add(obj)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) run(stopCh <-chan struct{}) {
|
||||
for _, listener := range p.listeners {
|
||||
go listener.run(stopCh)
|
||||
go listener.pop(stopCh)
|
||||
}
|
||||
}
|
||||
|
||||
type processorListener struct {
|
||||
// lock/cond protects access to 'pendingNotifications'.
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
|
||||
// pendingNotifications is an unbounded slice that holds all notifications not yet distributed
|
||||
// there is one per listener, but a failing/stalled listener will have infinite pendingNotifications
|
||||
// added until we OOM.
|
||||
// TODO This is no worse that before, since reflectors were backed by unbounded DeltaFIFOs, but
|
||||
// we should try to do something better
|
||||
pendingNotifications []interface{}
|
||||
|
||||
nextCh chan interface{}
|
||||
|
||||
handler ResourceEventHandler
|
||||
}
|
||||
|
||||
func newProcessListener(handler ResourceEventHandler) *processorListener {
|
||||
ret := &processorListener{
|
||||
pendingNotifications: []interface{}{},
|
||||
nextCh: make(chan interface{}),
|
||||
handler: handler,
|
||||
}
|
||||
|
||||
ret.cond.L = &ret.lock
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p *processorListener) add(notification interface{}) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
p.pendingNotifications = append(p.pendingNotifications, notification)
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
|
||||
func (p *processorListener) pop(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
for {
|
||||
blockingGet := func() (interface{}, bool) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
for len(p.pendingNotifications) == 0 {
|
||||
// check if we're shutdown
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil, true
|
||||
default:
|
||||
}
|
||||
p.cond.Wait()
|
||||
}
|
||||
|
||||
nt := p.pendingNotifications[0]
|
||||
p.pendingNotifications = p.pendingNotifications[1:]
|
||||
return nt, false
|
||||
}
|
||||
|
||||
notification, stopped := blockingGet()
|
||||
if stopped {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
case p.nextCh <- notification:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *processorListener) run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
for {
|
||||
var next interface{}
|
||||
select {
|
||||
case <-stopCh:
|
||||
func() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
p.cond.Broadcast()
|
||||
}()
|
||||
return
|
||||
case next = <-p.nextCh:
|
||||
}
|
||||
|
||||
switch notification := next.(type) {
|
||||
case updateNotification:
|
||||
p.handler.OnUpdate(notification.oldObj, notification.newObj)
|
||||
case addNotification:
|
||||
p.handler.OnAdd(notification.newObj)
|
||||
case deleteNotification:
|
||||
p.handler.OnDelete(notification.oldObj)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
|
||||
}
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue