Move Ingress godeps to vendor/
This commit is contained in:
parent
0d4f49e50e
commit
ca620e4074
2059 changed files with 3706 additions and 213845 deletions
5
vendor/k8s.io/kubernetes/pkg/controller/OWNERS
generated
vendored
Normal file
5
vendor/k8s.io/kubernetes/pkg/controller/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
assignees:
|
||||
- bprashanth
|
||||
- davidopp
|
||||
- derekwaynecarr
|
||||
- mikedanese
|
||||
661
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
Normal file
661
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
Normal file
|
|
@ -0,0 +1,661 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/integer"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
const (
|
||||
CreatedByAnnotation = "kubernetes.io/created-by"
|
||||
|
||||
// If a watch drops a delete event for a pod, it'll take this long
|
||||
// before a dormant controller waiting for those packets is woken up anyway. It is
|
||||
// specifically targeted at the case where some problem prevents an update
|
||||
// of expectations, without it the controller could stay asleep forever. This should
|
||||
// be set based on the expected latency of watch events.
|
||||
//
|
||||
// Currently a controller can service (create *and* observe the watch events for said
|
||||
// creation) about 10 pods a second, so it takes about 1 min to service
|
||||
// 500 pods. Just creation is limited to 20qps, and watching happens with ~10-30s
|
||||
// latency/pod at the scale of 3000 pods over 100 nodes.
|
||||
ExpectationsTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
KeyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
|
||||
)
|
||||
|
||||
type ResyncPeriodFunc func() time.Duration
|
||||
|
||||
// Returns 0 for resyncPeriod in case resyncing is not needed.
|
||||
func NoResyncPeriodFunc() time.Duration {
|
||||
return 0
|
||||
}
|
||||
|
||||
// StaticResyncPeriodFunc returns the resync period specified
|
||||
func StaticResyncPeriodFunc(resyncPeriod time.Duration) ResyncPeriodFunc {
|
||||
return func() time.Duration {
|
||||
return resyncPeriod
|
||||
}
|
||||
}
|
||||
|
||||
// Expectations are a way for controllers to tell the controller manager what they expect. eg:
|
||||
// ControllerExpectations: {
|
||||
// controller1: expects 2 adds in 2 minutes
|
||||
// controller2: expects 2 dels in 2 minutes
|
||||
// controller3: expects -1 adds in 2 minutes => controller3's expectations have already been met
|
||||
// }
|
||||
//
|
||||
// Implementation:
|
||||
// ControlleeExpectation = pair of atomic counters to track controllee's creation/deletion
|
||||
// ControllerExpectationsStore = TTLStore + a ControlleeExpectation per controller
|
||||
//
|
||||
// * Once set expectations can only be lowered
|
||||
// * A controller isn't synced till its expectations are either fulfilled, or expire
|
||||
// * Controllers that don't set expectations will get woken up for every matching controllee
|
||||
|
||||
// ExpKeyFunc to parse out the key from a ControlleeExpectation
|
||||
var ExpKeyFunc = func(obj interface{}) (string, error) {
|
||||
if e, ok := obj.(*ControlleeExpectations); ok {
|
||||
return e.key, nil
|
||||
}
|
||||
return "", fmt.Errorf("Could not find key for obj %#v", obj)
|
||||
}
|
||||
|
||||
// ControllerExpectationsInterface is an interface that allows users to set and wait on expectations.
|
||||
// Only abstracted out for testing.
|
||||
// Warning: if using KeyFunc it is not safe to use a single ControllerExpectationsInterface with different
|
||||
// types of controllers, because the keys might conflict across types.
|
||||
type ControllerExpectationsInterface interface {
|
||||
GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error)
|
||||
SatisfiedExpectations(controllerKey string) bool
|
||||
DeleteExpectations(controllerKey string)
|
||||
SetExpectations(controllerKey string, add, del int) error
|
||||
ExpectCreations(controllerKey string, adds int) error
|
||||
ExpectDeletions(controllerKey string, dels int) error
|
||||
CreationObserved(controllerKey string)
|
||||
DeletionObserved(controllerKey string)
|
||||
RaiseExpectations(controllerKey string, add, del int)
|
||||
LowerExpectations(controllerKey string, add, del int)
|
||||
}
|
||||
|
||||
// ControllerExpectations is a cache mapping controllers to what they expect to see before being woken up for a sync.
|
||||
type ControllerExpectations struct {
|
||||
cache.Store
|
||||
}
|
||||
|
||||
// GetExpectations returns the ControlleeExpectations of the given controller.
|
||||
func (r *ControllerExpectations) GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error) {
|
||||
if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists {
|
||||
return exp.(*ControlleeExpectations), true, nil
|
||||
} else {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteExpectations deletes the expectations of the given controller from the TTLStore.
|
||||
func (r *ControllerExpectations) DeleteExpectations(controllerKey string) {
|
||||
if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists {
|
||||
if err := r.Delete(exp); err != nil {
|
||||
glog.V(2).Infof("Error deleting expectations for controller %v: %v", controllerKey, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SatisfiedExpectations returns true if the required adds/dels for the given controller have been observed.
|
||||
// Add/del counts are established by the controller at sync time, and updated as controllees are observed by the controller
|
||||
// manager.
|
||||
func (r *ControllerExpectations) SatisfiedExpectations(controllerKey string) bool {
|
||||
if exp, exists, err := r.GetExpectations(controllerKey); exists {
|
||||
if exp.Fulfilled() {
|
||||
return true
|
||||
} else if exp.isExpired() {
|
||||
glog.V(4).Infof("Controller expectations expired %#v", exp)
|
||||
return true
|
||||
} else {
|
||||
glog.V(4).Infof("Controller still waiting on expectations %#v", exp)
|
||||
return false
|
||||
}
|
||||
} else if err != nil {
|
||||
glog.V(2).Infof("Error encountered while checking expectations %#v, forcing sync", err)
|
||||
} else {
|
||||
// When a new controller is created, it doesn't have expectations.
|
||||
// When it doesn't see expected watch events for > TTL, the expectations expire.
|
||||
// - In this case it wakes up, creates/deletes controllees, and sets expectations again.
|
||||
// When it has satisfied expectations and no controllees need to be created/destroyed > TTL, the expectations expire.
|
||||
// - In this case it continues without setting expectations till it needs to create/delete controllees.
|
||||
glog.V(4).Infof("Controller %v either never recorded expectations, or the ttl expired.", controllerKey)
|
||||
}
|
||||
// Trigger a sync if we either encountered and error (which shouldn't happen since we're
|
||||
// getting from local store) or this controller hasn't established expectations.
|
||||
return true
|
||||
}
|
||||
|
||||
// TODO: Extend ExpirationCache to support explicit expiration.
|
||||
// TODO: Make this possible to disable in tests.
|
||||
// TODO: Support injection of clock.
|
||||
func (exp *ControlleeExpectations) isExpired() bool {
|
||||
return util.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout
|
||||
}
|
||||
|
||||
// SetExpectations registers new expectations for the given controller. Forgets existing expectations.
|
||||
func (r *ControllerExpectations) SetExpectations(controllerKey string, add, del int) error {
|
||||
exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: util.RealClock{}.Now()}
|
||||
glog.V(4).Infof("Setting expectations %+v", exp)
|
||||
return r.Add(exp)
|
||||
}
|
||||
|
||||
func (r *ControllerExpectations) ExpectCreations(controllerKey string, adds int) error {
|
||||
return r.SetExpectations(controllerKey, adds, 0)
|
||||
}
|
||||
|
||||
func (r *ControllerExpectations) ExpectDeletions(controllerKey string, dels int) error {
|
||||
return r.SetExpectations(controllerKey, 0, dels)
|
||||
}
|
||||
|
||||
// Decrements the expectation counts of the given controller.
|
||||
func (r *ControllerExpectations) LowerExpectations(controllerKey string, add, del int) {
|
||||
if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists {
|
||||
exp.Add(int64(-add), int64(-del))
|
||||
// The expectations might've been modified since the update on the previous line.
|
||||
glog.V(4).Infof("Lowered expectations %+v", exp)
|
||||
}
|
||||
}
|
||||
|
||||
// Increments the expectation counts of the given controller.
|
||||
func (r *ControllerExpectations) RaiseExpectations(controllerKey string, add, del int) {
|
||||
if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists {
|
||||
exp.Add(int64(add), int64(del))
|
||||
// The expectations might've been modified since the update on the previous line.
|
||||
glog.V(4).Infof("Raised expectations %+v", exp)
|
||||
}
|
||||
}
|
||||
|
||||
// CreationObserved atomically decrements the `add` expecation count of the given controller.
|
||||
func (r *ControllerExpectations) CreationObserved(controllerKey string) {
|
||||
r.LowerExpectations(controllerKey, 1, 0)
|
||||
}
|
||||
|
||||
// DeletionObserved atomically decrements the `del` expectation count of the given controller.
|
||||
func (r *ControllerExpectations) DeletionObserved(controllerKey string) {
|
||||
r.LowerExpectations(controllerKey, 0, 1)
|
||||
}
|
||||
|
||||
// Expectations are either fulfilled, or expire naturally.
|
||||
type Expectations interface {
|
||||
Fulfilled() bool
|
||||
}
|
||||
|
||||
// ControlleeExpectations track controllee creates/deletes.
|
||||
type ControlleeExpectations struct {
|
||||
add int64
|
||||
del int64
|
||||
key string
|
||||
timestamp time.Time
|
||||
}
|
||||
|
||||
// Add increments the add and del counters.
|
||||
func (e *ControlleeExpectations) Add(add, del int64) {
|
||||
atomic.AddInt64(&e.add, add)
|
||||
atomic.AddInt64(&e.del, del)
|
||||
}
|
||||
|
||||
// Fulfilled returns true if this expectation has been fulfilled.
|
||||
func (e *ControlleeExpectations) Fulfilled() bool {
|
||||
// TODO: think about why this line being atomic doesn't matter
|
||||
return atomic.LoadInt64(&e.add) <= 0 && atomic.LoadInt64(&e.del) <= 0
|
||||
}
|
||||
|
||||
// GetExpectations returns the add and del expectations of the controllee.
|
||||
func (e *ControlleeExpectations) GetExpectations() (int64, int64) {
|
||||
return atomic.LoadInt64(&e.add), atomic.LoadInt64(&e.del)
|
||||
}
|
||||
|
||||
// NewControllerExpectations returns a store for ControllerExpectations.
|
||||
func NewControllerExpectations() *ControllerExpectations {
|
||||
return &ControllerExpectations{cache.NewStore(ExpKeyFunc)}
|
||||
}
|
||||
|
||||
// UIDSetKeyFunc to parse out the key from a UIDSet.
|
||||
var UIDSetKeyFunc = func(obj interface{}) (string, error) {
|
||||
if u, ok := obj.(*UIDSet); ok {
|
||||
return u.key, nil
|
||||
}
|
||||
return "", fmt.Errorf("Could not find key for obj %#v", obj)
|
||||
}
|
||||
|
||||
// UIDSet holds a key and a set of UIDs. Used by the
|
||||
// UIDTrackingControllerExpectations to remember which UID it has seen/still
|
||||
// waiting for.
|
||||
type UIDSet struct {
|
||||
sets.String
|
||||
key string
|
||||
}
|
||||
|
||||
// UIDTrackingControllerExpectations tracks the UID of the pods it deletes.
|
||||
// This cache is needed over plain old expectations to safely handle graceful
|
||||
// deletion. The desired behavior is to treat an update that sets the
|
||||
// DeletionTimestamp on an object as a delete. To do so consistenly, one needs
|
||||
// to remember the expected deletes so they aren't double counted.
|
||||
// TODO: Track creates as well (#22599)
|
||||
type UIDTrackingControllerExpectations struct {
|
||||
ControllerExpectationsInterface
|
||||
// TODO: There is a much nicer way to do this that involves a single store,
|
||||
// a lock per entry, and a ControlleeExpectationsInterface type.
|
||||
uidStoreLock sync.Mutex
|
||||
// Store used for the UIDs associated with any expectation tracked via the
|
||||
// ControllerExpectationsInterface.
|
||||
uidStore cache.Store
|
||||
}
|
||||
|
||||
// GetUIDs is a convenience method to avoid exposing the set of expected uids.
|
||||
// The returned set is not thread safe, all modifications must be made holding
|
||||
// the uidStoreLock.
|
||||
func (u *UIDTrackingControllerExpectations) GetUIDs(controllerKey string) sets.String {
|
||||
if uid, exists, err := u.uidStore.GetByKey(controllerKey); err == nil && exists {
|
||||
return uid.(*UIDSet).String
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpectDeletions records expectations for the given deleteKeys, against the given controller.
|
||||
func (u *UIDTrackingControllerExpectations) ExpectDeletions(rcKey string, deletedKeys []string) error {
|
||||
u.uidStoreLock.Lock()
|
||||
defer u.uidStoreLock.Unlock()
|
||||
|
||||
if existing := u.GetUIDs(rcKey); existing != nil && existing.Len() != 0 {
|
||||
glog.Errorf("Clobbering existing delete keys: %+v", existing)
|
||||
}
|
||||
expectedUIDs := sets.NewString()
|
||||
for _, k := range deletedKeys {
|
||||
expectedUIDs.Insert(k)
|
||||
}
|
||||
glog.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys)
|
||||
if err := u.uidStore.Add(&UIDSet{expectedUIDs, rcKey}); err != nil {
|
||||
return err
|
||||
}
|
||||
return u.ControllerExpectationsInterface.ExpectDeletions(rcKey, expectedUIDs.Len())
|
||||
}
|
||||
|
||||
// DeletionObserved records the given deleteKey as a deletion, for the given rc.
|
||||
func (u *UIDTrackingControllerExpectations) DeletionObserved(rcKey, deleteKey string) {
|
||||
u.uidStoreLock.Lock()
|
||||
defer u.uidStoreLock.Unlock()
|
||||
|
||||
uids := u.GetUIDs(rcKey)
|
||||
if uids != nil && uids.Has(deleteKey) {
|
||||
glog.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey)
|
||||
u.ControllerExpectationsInterface.DeletionObserved(rcKey)
|
||||
uids.Delete(deleteKey)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteExpectations deletes the UID set and invokes DeleteExpectations on the
|
||||
// underlying ControllerExpectationsInterface.
|
||||
func (u *UIDTrackingControllerExpectations) DeleteExpectations(rcKey string) {
|
||||
u.uidStoreLock.Lock()
|
||||
defer u.uidStoreLock.Unlock()
|
||||
|
||||
u.ControllerExpectationsInterface.DeleteExpectations(rcKey)
|
||||
if uidExp, exists, err := u.uidStore.GetByKey(rcKey); err == nil && exists {
|
||||
if err := u.uidStore.Delete(uidExp); err != nil {
|
||||
glog.V(2).Infof("Error deleting uid expectations for controller %v: %v", rcKey, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewUIDTrackingControllerExpectations returns a wrapper around
|
||||
// ControllerExpectations that is aware of deleteKeys.
|
||||
func NewUIDTrackingControllerExpectations(ce ControllerExpectationsInterface) *UIDTrackingControllerExpectations {
|
||||
return &UIDTrackingControllerExpectations{ControllerExpectationsInterface: ce, uidStore: cache.NewStore(UIDSetKeyFunc)}
|
||||
}
|
||||
|
||||
// PodControlInterface is an interface that knows how to add or delete pods
|
||||
// created as an interface to allow testing.
|
||||
type PodControlInterface interface {
|
||||
// CreatePods creates new pods according to the spec.
|
||||
CreatePods(namespace string, template *api.PodTemplateSpec, object runtime.Object) error
|
||||
// CreatePodsOnNode creates a new pod accorting to the spec on the specified node.
|
||||
CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error
|
||||
// DeletePod deletes the pod identified by podID.
|
||||
DeletePod(namespace string, podID string, object runtime.Object) error
|
||||
}
|
||||
|
||||
// RealPodControl is the default implementation of PodControlInterface.
|
||||
type RealPodControl struct {
|
||||
KubeClient clientset.Interface
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
var _ PodControlInterface = &RealPodControl{}
|
||||
|
||||
func getPodsLabelSet(template *api.PodTemplateSpec) labels.Set {
|
||||
desiredLabels := make(labels.Set)
|
||||
for k, v := range template.Labels {
|
||||
desiredLabels[k] = v
|
||||
}
|
||||
return desiredLabels
|
||||
}
|
||||
|
||||
func getPodsAnnotationSet(template *api.PodTemplateSpec, object runtime.Object) (labels.Set, error) {
|
||||
desiredAnnotations := make(labels.Set)
|
||||
for k, v := range template.Annotations {
|
||||
desiredAnnotations[k] = v
|
||||
}
|
||||
createdByRef, err := api.GetReference(object)
|
||||
if err != nil {
|
||||
return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err)
|
||||
}
|
||||
|
||||
// TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients
|
||||
// would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.
|
||||
// We need to consistently handle this case of annotation versioning.
|
||||
codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"})
|
||||
|
||||
createdByRefJson, err := runtime.Encode(codec, &api.SerializedReference{
|
||||
Reference: *createdByRef,
|
||||
})
|
||||
if err != nil {
|
||||
return desiredAnnotations, fmt.Errorf("unable to serialize controller reference: %v", err)
|
||||
}
|
||||
desiredAnnotations[CreatedByAnnotation] = string(createdByRefJson)
|
||||
return desiredAnnotations, nil
|
||||
}
|
||||
|
||||
func getPodsPrefix(controllerName string) string {
|
||||
// use the dash (if the name isn't too long) to make the pod name a bit prettier
|
||||
prefix := fmt.Sprintf("%s-", controllerName)
|
||||
if ok, _ := validation.ValidatePodName(prefix, true); !ok {
|
||||
prefix = controllerName
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
||||
func (r RealPodControl) CreatePods(namespace string, template *api.PodTemplateSpec, object runtime.Object) error {
|
||||
return r.createPods("", namespace, template, object)
|
||||
}
|
||||
|
||||
func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error {
|
||||
return r.createPods(nodeName, namespace, template, object)
|
||||
}
|
||||
|
||||
func GetPodFromTemplate(template *api.PodTemplateSpec, parentObject runtime.Object) (*api.Pod, error) {
|
||||
desiredLabels := getPodsLabelSet(template)
|
||||
desiredAnnotations, err := getPodsAnnotationSet(template, parentObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
accessor, err := meta.Accessor(parentObject)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parentObject does not have ObjectMeta, %v", err)
|
||||
}
|
||||
prefix := getPodsPrefix(accessor.GetName())
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: desiredLabels,
|
||||
Annotations: desiredAnnotations,
|
||||
GenerateName: prefix,
|
||||
},
|
||||
}
|
||||
if err := api.Scheme.Convert(&template.Spec, &pod.Spec); err != nil {
|
||||
return nil, fmt.Errorf("unable to convert pod template: %v", err)
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
func (r RealPodControl) createPods(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error {
|
||||
pod, err := GetPodFromTemplate(template, object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(nodeName) != 0 {
|
||||
pod.Spec.NodeName = nodeName
|
||||
}
|
||||
if labels.Set(pod.Labels).AsSelector().Empty() {
|
||||
return fmt.Errorf("unable to create pods, no labels")
|
||||
}
|
||||
if newPod, err := r.KubeClient.Core().Pods(namespace).Create(pod); err != nil {
|
||||
r.Recorder.Eventf(object, api.EventTypeWarning, "FailedCreate", "Error creating: %v", err)
|
||||
return fmt.Errorf("unable to create pods: %v", err)
|
||||
} else {
|
||||
accessor, err := meta.Accessor(object)
|
||||
if err != nil {
|
||||
glog.Errorf("parentObject does not have ObjectMeta, %v", err)
|
||||
return nil
|
||||
}
|
||||
glog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name)
|
||||
r.Recorder.Eventf(object, api.EventTypeNormal, "SuccessfulCreate", "Created pod: %v", newPod.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r RealPodControl) DeletePod(namespace string, podID string, object runtime.Object) error {
|
||||
accessor, err := meta.Accessor(object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("object does not have ObjectMeta, %v", err)
|
||||
}
|
||||
if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil {
|
||||
r.Recorder.Eventf(object, api.EventTypeWarning, "FailedDelete", "Error deleting: %v", err)
|
||||
return fmt.Errorf("unable to delete pods: %v", err)
|
||||
} else {
|
||||
glog.V(4).Infof("Controller %v deleted pod %v", accessor.GetName(), podID)
|
||||
r.Recorder.Eventf(object, api.EventTypeNormal, "SuccessfulDelete", "Deleted pod: %v", podID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FakePodControl struct {
|
||||
sync.Mutex
|
||||
Templates []api.PodTemplateSpec
|
||||
DeletePodName []string
|
||||
Err error
|
||||
}
|
||||
|
||||
var _ PodControlInterface = &FakePodControl{}
|
||||
|
||||
func (f *FakePodControl) CreatePods(namespace string, spec *api.PodTemplateSpec, object runtime.Object) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if f.Err != nil {
|
||||
return f.Err
|
||||
}
|
||||
f.Templates = append(f.Templates, *spec)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakePodControl) CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if f.Err != nil {
|
||||
return f.Err
|
||||
}
|
||||
f.Templates = append(f.Templates, *template)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakePodControl) DeletePod(namespace string, podID string, object runtime.Object) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if f.Err != nil {
|
||||
return f.Err
|
||||
}
|
||||
f.DeletePodName = append(f.DeletePodName, podID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakePodControl) Clear() {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.DeletePodName = []string{}
|
||||
f.Templates = []api.PodTemplateSpec{}
|
||||
}
|
||||
|
||||
// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete.
|
||||
type ActivePods []*api.Pod
|
||||
|
||||
func (s ActivePods) Len() int { return len(s) }
|
||||
func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
func (s ActivePods) Less(i, j int) bool {
|
||||
// 1. Unassigned < assigned
|
||||
// If only one of the pods is unassigned, the unassigned one is smaller
|
||||
if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) {
|
||||
return len(s[i].Spec.NodeName) == 0
|
||||
}
|
||||
// 2. PodPending < PodUnknown < PodRunning
|
||||
m := map[api.PodPhase]int{api.PodPending: 0, api.PodUnknown: 1, api.PodRunning: 2}
|
||||
if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
|
||||
return m[s[i].Status.Phase] < m[s[j].Status.Phase]
|
||||
}
|
||||
// 3. Not ready < ready
|
||||
// If only one of the pods is not ready, the not ready one is smaller
|
||||
if api.IsPodReady(s[i]) != api.IsPodReady(s[j]) {
|
||||
return !api.IsPodReady(s[i])
|
||||
}
|
||||
// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
|
||||
// see https://github.com/kubernetes/kubernetes/issues/22065
|
||||
// 4. Been ready for empty time < less time < more time
|
||||
// If both pods are ready, the latest ready one is smaller
|
||||
if api.IsPodReady(s[i]) && api.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
|
||||
return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j]))
|
||||
}
|
||||
// 5. Pods with containers with higher restart counts < lower restart counts
|
||||
if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) {
|
||||
return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
|
||||
}
|
||||
// 6. Empty creation time pods < newer pods < older pods
|
||||
if !s[i].CreationTimestamp.Equal(s[j].CreationTimestamp) {
|
||||
return afterOrZero(s[i].CreationTimestamp, s[j].CreationTimestamp)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// afterOrZero checks if time t1 is after time t2; if one of them
|
||||
// is zero, the zero time is seen as after non-zero time.
|
||||
func afterOrZero(t1, t2 unversioned.Time) bool {
|
||||
if t1.Time.IsZero() || t2.Time.IsZero() {
|
||||
return t1.Time.IsZero()
|
||||
}
|
||||
return t1.After(t2.Time)
|
||||
}
|
||||
|
||||
func podReadyTime(pod *api.Pod) unversioned.Time {
|
||||
if api.IsPodReady(pod) {
|
||||
for _, c := range pod.Status.Conditions {
|
||||
// we only care about pod ready conditions
|
||||
if c.Type == api.PodReady && c.Status == api.ConditionTrue {
|
||||
return c.LastTransitionTime
|
||||
}
|
||||
}
|
||||
}
|
||||
return unversioned.Time{}
|
||||
}
|
||||
|
||||
func maxContainerRestarts(pod *api.Pod) int {
|
||||
maxRestarts := 0
|
||||
for _, c := range pod.Status.ContainerStatuses {
|
||||
maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount))
|
||||
}
|
||||
return maxRestarts
|
||||
}
|
||||
|
||||
// FilterActivePods returns pods that have not terminated.
|
||||
func FilterActivePods(pods []api.Pod) []*api.Pod {
|
||||
var result []*api.Pod
|
||||
for i := range pods {
|
||||
p := pods[i]
|
||||
if IsPodActive(p) {
|
||||
result = append(result, &p)
|
||||
} else {
|
||||
glog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v",
|
||||
p.Namespace, p.Name, p.Status.Phase, p.DeletionTimestamp)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func IsPodActive(p api.Pod) bool {
|
||||
return api.PodSucceeded != p.Status.Phase &&
|
||||
api.PodFailed != p.Status.Phase &&
|
||||
p.DeletionTimestamp == nil
|
||||
}
|
||||
|
||||
// FilterActiveReplicaSets returns replica sets that have (or at least ought to have) pods.
|
||||
func FilterActiveReplicaSets(replicaSets []*extensions.ReplicaSet) []*extensions.ReplicaSet {
|
||||
active := []*extensions.ReplicaSet{}
|
||||
for i := range replicaSets {
|
||||
if replicaSets[i].Spec.Replicas > 0 {
|
||||
active = append(active, replicaSets[i])
|
||||
}
|
||||
}
|
||||
return active
|
||||
}
|
||||
|
||||
// PodKey returns a key unique to the given pod within a cluster.
|
||||
// It's used so we consistently use the same key scheme in this module.
|
||||
// It does exactly what cache.MetaNamespaceKeyFunc would have done
|
||||
// expcept there's not possibility for error since we know the exact type.
|
||||
func PodKey(pod *api.Pod) string {
|
||||
return fmt.Sprintf("%v/%v", pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
// ControllersByCreationTimestamp sorts a list of ReplicationControllers by creation timestamp, using their names as a tie breaker.
|
||||
type ControllersByCreationTimestamp []*api.ReplicationController
|
||||
|
||||
func (o ControllersByCreationTimestamp) Len() int { return len(o) }
|
||||
func (o ControllersByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
func (o ControllersByCreationTimestamp) Less(i, j int) bool {
|
||||
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
|
||||
return o[i].Name < o[j].Name
|
||||
}
|
||||
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
|
||||
}
|
||||
|
||||
// ReplicaSetsByCreationTimestamp sorts a list of ReplicationSets by creation timestamp, using their names as a tie breaker.
|
||||
type ReplicaSetsByCreationTimestamp []*extensions.ReplicaSet
|
||||
|
||||
func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) }
|
||||
func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool {
|
||||
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
|
||||
return o[i].Name < o[j].Name
|
||||
}
|
||||
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
|
||||
}
|
||||
19
vendor/k8s.io/kubernetes/pkg/controller/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/controller/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package controller contains code for controllers (like the replication
|
||||
// controller).
|
||||
package controller
|
||||
327
vendor/k8s.io/kubernetes/pkg/controller/framework/controller.go
generated
vendored
Normal file
327
vendor/k8s.io/kubernetes/pkg/controller/framework/controller.go
generated
vendored
Normal file
|
|
@ -0,0 +1,327 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
||||
// Config contains all the settings for a Controller.
|
||||
type Config struct {
|
||||
// The queue for your objects; either a cache.FIFO or
|
||||
// a cache.DeltaFIFO. Your Process() function should accept
|
||||
// the output of this Oueue's Pop() method.
|
||||
cache.Queue
|
||||
|
||||
// Something that can list and watch your objects.
|
||||
cache.ListerWatcher
|
||||
|
||||
// Something that can process your objects.
|
||||
Process ProcessFunc
|
||||
|
||||
// The type of your objects.
|
||||
ObjectType runtime.Object
|
||||
|
||||
// Reprocess everything at least this often.
|
||||
// Note that if it takes longer for you to clear the queue than this
|
||||
// period, you will end up processing items in the order determined
|
||||
// by cache.FIFO.Replace(). Currently, this is random. If this is a
|
||||
// problem, we can change that replacement policy to append new
|
||||
// things to the end of the queue instead of replacing the entire
|
||||
// queue.
|
||||
FullResyncPeriod time.Duration
|
||||
|
||||
// If true, when Process() returns an error, re-enqueue the object.
|
||||
// TODO: add interface to let you inject a delay/backoff or drop
|
||||
// the object completely if desired. Pass the object in
|
||||
// question to this interface as a parameter.
|
||||
RetryOnError bool
|
||||
}
|
||||
|
||||
// ProcessFunc processes a single object.
|
||||
type ProcessFunc func(obj interface{}) error
|
||||
|
||||
// Controller is a generic controller framework.
|
||||
type Controller struct {
|
||||
config Config
|
||||
reflector *cache.Reflector
|
||||
reflectorMutex sync.RWMutex
|
||||
}
|
||||
|
||||
// TODO make the "Controller" private, and convert all references to use ControllerInterface instead
|
||||
type ControllerInterface interface {
|
||||
Run(stopCh <-chan struct{})
|
||||
HasSynced() bool
|
||||
}
|
||||
|
||||
// New makes a new Controller from the given Config.
|
||||
func New(c *Config) *Controller {
|
||||
ctlr := &Controller{
|
||||
config: *c,
|
||||
}
|
||||
return ctlr
|
||||
}
|
||||
|
||||
// Run begins processing items, and will continue until a value is sent down stopCh.
|
||||
// It's an error to call Run more than once.
|
||||
// Run blocks; call via go.
|
||||
func (c *Controller) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
r := cache.NewReflector(
|
||||
c.config.ListerWatcher,
|
||||
c.config.ObjectType,
|
||||
c.config.Queue,
|
||||
c.config.FullResyncPeriod,
|
||||
)
|
||||
|
||||
c.reflectorMutex.Lock()
|
||||
c.reflector = r
|
||||
c.reflectorMutex.Unlock()
|
||||
|
||||
r.RunUntil(stopCh)
|
||||
|
||||
wait.Until(c.processLoop, time.Second, stopCh)
|
||||
}
|
||||
|
||||
// Returns true once this controller has completed an initial resource listing
|
||||
func (c *Controller) HasSynced() bool {
|
||||
return c.config.Queue.HasSynced()
|
||||
}
|
||||
|
||||
// Requeue adds the provided object back into the queue if it does not already exist.
|
||||
func (c *Controller) Requeue(obj interface{}) error {
|
||||
return c.config.Queue.AddIfNotPresent(cache.Deltas{
|
||||
cache.Delta{
|
||||
Type: cache.Sync,
|
||||
Object: obj,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// processLoop drains the work queue.
|
||||
// TODO: Consider doing the processing in parallel. This will require a little thought
|
||||
// to make sure that we don't end up processing the same object multiple times
|
||||
// concurrently.
|
||||
func (c *Controller) processLoop() {
|
||||
for {
|
||||
obj := c.config.Queue.Pop()
|
||||
err := c.config.Process(obj)
|
||||
if err != nil {
|
||||
if c.config.RetryOnError {
|
||||
// This is the safe way to re-enqueue.
|
||||
c.config.Queue.AddIfNotPresent(obj)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ResourceEventHandler can handle notifications for events that happen to a
|
||||
// resource. The events are informational only, so you can't return an
|
||||
// error.
|
||||
// * OnAdd is called when an object is added.
|
||||
// * OnUpdate is called when an object is modified. Note that oldObj is the
|
||||
// last known state of the object-- it is possible that several changes
|
||||
// were combined together, so you can't use this to see every single
|
||||
// change. OnUpdate is also called when a re-list happens, and it will
|
||||
// get called even if nothing changed. This is useful for periodically
|
||||
// evaluating or syncing something.
|
||||
// * OnDelete will get the final state of the item if it is known, otherwise
|
||||
// it will get an object of type cache.DeletedFinalStateUnknown. This can
|
||||
// happen if the watch is closed and misses the delete event and we don't
|
||||
// notice the deletion until the subsequent re-list.
|
||||
type ResourceEventHandler interface {
|
||||
OnAdd(obj interface{})
|
||||
OnUpdate(oldObj, newObj interface{})
|
||||
OnDelete(obj interface{})
|
||||
}
|
||||
|
||||
// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or
|
||||
// as few of the notification functions as you want while still implementing
|
||||
// ResourceEventHandler.
|
||||
type ResourceEventHandlerFuncs struct {
|
||||
AddFunc func(obj interface{})
|
||||
UpdateFunc func(oldObj, newObj interface{})
|
||||
DeleteFunc func(obj interface{})
|
||||
}
|
||||
|
||||
// OnAdd calls AddFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) {
|
||||
if r.AddFunc != nil {
|
||||
r.AddFunc(obj)
|
||||
}
|
||||
}
|
||||
|
||||
// OnUpdate calls UpdateFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) {
|
||||
if r.UpdateFunc != nil {
|
||||
r.UpdateFunc(oldObj, newObj)
|
||||
}
|
||||
}
|
||||
|
||||
// OnDelete calls DeleteFunc if it's not nil.
|
||||
func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
|
||||
if r.DeleteFunc != nil {
|
||||
r.DeleteFunc(obj)
|
||||
}
|
||||
}
|
||||
|
||||
// DeletionHandlingMetaNamespaceKeyFunc checks for
|
||||
// cache.DeletedFinalStateUnknown objects before calling
|
||||
// cache.MetaNamespaceKeyFunc.
|
||||
func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) {
|
||||
if d, ok := obj.(cache.DeletedFinalStateUnknown); ok {
|
||||
return d.Key, nil
|
||||
}
|
||||
return cache.MetaNamespaceKeyFunc(obj)
|
||||
}
|
||||
|
||||
// NewInformer returns a cache.Store and a controller for populating the store
|
||||
// while also providing event notifications. You should only used the returned
|
||||
// cache.Store for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
//
|
||||
func NewInformer(
|
||||
lw cache.ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
) (cache.Store, *Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := cache.NewStore(DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: lw,
|
||||
ObjectType: objType,
|
||||
FullResyncPeriod: resyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(cache.Deltas) {
|
||||
switch d.Type {
|
||||
case cache.Sync, cache.Added, cache.Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, d.Object)
|
||||
} else {
|
||||
if err := clientState.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case cache.Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(d.Object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return clientState, New(cfg)
|
||||
}
|
||||
|
||||
// NewIndexerInformer returns a cache.Indexer and a controller for populating the index
|
||||
// while also providing event notifications. You should only used the returned
|
||||
// cache.Index for Get/List operations; Add/Modify/Deletes will cause the event
|
||||
// notifications to be faulty.
|
||||
//
|
||||
// Parameters:
|
||||
// * lw is list and watch functions for the source of the resource you want to
|
||||
// be informed of.
|
||||
// * objType is an object of the type that you expect to receive.
|
||||
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
|
||||
// calls, even if nothing changed). Otherwise, re-list will be delayed as
|
||||
// long as possible (until the upstream source closes the watch or times out,
|
||||
// or you stop the controller).
|
||||
// * h is the object you want notifications sent to.
|
||||
//
|
||||
func NewIndexerInformer(
|
||||
lw cache.ListerWatcher,
|
||||
objType runtime.Object,
|
||||
resyncPeriod time.Duration,
|
||||
h ResourceEventHandler,
|
||||
indexers cache.Indexers,
|
||||
) (cache.Indexer, *Controller) {
|
||||
// This will hold the client state, as we know it.
|
||||
clientState := cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
|
||||
// This will hold incoming changes. Note how we pass clientState in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, clientState)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: lw,
|
||||
ObjectType: objType,
|
||||
FullResyncPeriod: resyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: func(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(cache.Deltas) {
|
||||
switch d.Type {
|
||||
case cache.Sync, cache.Added, cache.Updated:
|
||||
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
|
||||
if err := clientState.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnUpdate(old, d.Object)
|
||||
} else {
|
||||
if err := clientState.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnAdd(d.Object)
|
||||
}
|
||||
case cache.Deleted:
|
||||
if err := clientState.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
h.OnDelete(d.Object)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return clientState, New(cfg)
|
||||
}
|
||||
18
vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/pkg/controller/framework/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package framework implements all the grunt work involved in running a simple controller.
|
||||
package framework
|
||||
188
vendor/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go
generated
vendored
Normal file
188
vendor/k8s.io/kubernetes/pkg/controller/framework/fake_controller_source.go
generated
vendored
Normal file
|
|
@ -0,0 +1,188 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
func NewFakeControllerSource() *FakeControllerSource {
|
||||
return &FakeControllerSource{
|
||||
items: map[nnu]runtime.Object{},
|
||||
broadcaster: watch.NewBroadcaster(100, watch.WaitIfChannelFull),
|
||||
}
|
||||
}
|
||||
|
||||
// FakeControllerSource implements listing/watching for testing.
|
||||
type FakeControllerSource struct {
|
||||
lock sync.RWMutex
|
||||
items map[nnu]runtime.Object
|
||||
changes []watch.Event // one change per resourceVersion
|
||||
broadcaster *watch.Broadcaster
|
||||
}
|
||||
|
||||
// namespace, name, uid to be used as a key.
|
||||
type nnu struct {
|
||||
namespace, name string
|
||||
uid types.UID
|
||||
}
|
||||
|
||||
// Add adds an object to the set and sends an add event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Add(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Added, Object: obj}, 1)
|
||||
}
|
||||
|
||||
// Modify updates an object in the set and sends a modified event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Modify(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 1)
|
||||
}
|
||||
|
||||
// Delete deletes an object from the set and sends a delete event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) Delete(lastValue runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 1)
|
||||
}
|
||||
|
||||
// AddDropWatch adds an object to the set but forgets to send an add event to
|
||||
// watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) AddDropWatch(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Added, Object: obj}, 0)
|
||||
}
|
||||
|
||||
// ModifyDropWatch updates an object in the set but forgets to send a modify
|
||||
// event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) ModifyDropWatch(obj runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Modified, Object: obj}, 0)
|
||||
}
|
||||
|
||||
// DeleteDropWatch deletes an object from the set but forgets to send a delete
|
||||
// event to watchers.
|
||||
// obj's ResourceVersion is set.
|
||||
func (f *FakeControllerSource) DeleteDropWatch(lastValue runtime.Object) {
|
||||
f.Change(watch.Event{Type: watch.Deleted, Object: lastValue}, 0)
|
||||
}
|
||||
|
||||
func (f *FakeControllerSource) key(accessor meta.Object) nnu {
|
||||
return nnu{accessor.GetNamespace(), accessor.GetName(), accessor.GetUID()}
|
||||
}
|
||||
|
||||
// Change records the given event (setting the object's resource version) and
|
||||
// sends a watch event with the specified probability.
|
||||
func (f *FakeControllerSource) Change(e watch.Event, watchProbability float64) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
accessor, err := meta.Accessor(e.Object)
|
||||
if err != nil {
|
||||
panic(err) // this is test code only
|
||||
}
|
||||
|
||||
resourceVersion := len(f.changes) + 1
|
||||
accessor.SetResourceVersion(strconv.Itoa(resourceVersion))
|
||||
f.changes = append(f.changes, e)
|
||||
key := f.key(accessor)
|
||||
switch e.Type {
|
||||
case watch.Added, watch.Modified:
|
||||
f.items[key] = e.Object
|
||||
case watch.Deleted:
|
||||
delete(f.items, key)
|
||||
}
|
||||
|
||||
if rand.Float64() < watchProbability {
|
||||
f.broadcaster.Action(e.Type, e.Object)
|
||||
}
|
||||
}
|
||||
|
||||
// List returns a list object, with its resource version set.
|
||||
func (f *FakeControllerSource) List(options api.ListOptions) (runtime.Object, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
list := make([]runtime.Object, 0, len(f.items))
|
||||
for _, obj := range f.items {
|
||||
// Must make a copy to allow clients to modify the object.
|
||||
// Otherwise, if they make a change and write it back, they
|
||||
// will inadvertently change our canonical copy (in
|
||||
// addition to racing with other clients).
|
||||
objCopy, err := api.Scheme.DeepCopy(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list = append(list, objCopy.(runtime.Object))
|
||||
}
|
||||
listObj := &api.List{}
|
||||
if err := meta.SetList(listObj, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objMeta, err := api.ListMetaFor(listObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resourceVersion := len(f.changes)
|
||||
objMeta.ResourceVersion = strconv.Itoa(resourceVersion)
|
||||
return listObj, nil
|
||||
}
|
||||
|
||||
// Watch returns a watch, which will be pre-populated with all changes
|
||||
// after resourceVersion.
|
||||
func (f *FakeControllerSource) Watch(options api.ListOptions) (watch.Interface, error) {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
rc, err := strconv.Atoi(options.ResourceVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rc < len(f.changes) {
|
||||
changes := []watch.Event{}
|
||||
for _, c := range f.changes[rc:] {
|
||||
// Must make a copy to allow clients to modify the
|
||||
// object. Otherwise, if they make a change and write
|
||||
// it back, they will inadvertently change the our
|
||||
// canonical copy (in addition to racing with other
|
||||
// clients).
|
||||
objCopy, err := api.Scheme.DeepCopy(c.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
changes = append(changes, watch.Event{Type: c.Type, Object: objCopy.(runtime.Object)})
|
||||
}
|
||||
return f.broadcaster.WatchWithPrefix(changes), nil
|
||||
} else if rc > len(f.changes) {
|
||||
return nil, errors.New("resource version in the future not supported by this fake")
|
||||
}
|
||||
return f.broadcaster.Watch(), nil
|
||||
}
|
||||
|
||||
// Shutdown closes the underlying broadcaster, waiting for events to be
|
||||
// delivered. It's an error to call any method after calling shutdown. This is
|
||||
// enforced by Shutdown() leaving f locked.
|
||||
func (f *FakeControllerSource) Shutdown() {
|
||||
f.lock.Lock() // Purposely no unlock.
|
||||
f.broadcaster.Shutdown()
|
||||
}
|
||||
343
vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go
generated
vendored
Normal file
343
vendor/k8s.io/kubernetes/pkg/controller/framework/shared_informer.go
generated
vendored
Normal file
|
|
@ -0,0 +1,343 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
)
|
||||
|
||||
// if you use this, there is one behavior change compared to a standard Informer.
|
||||
// When you receive a notification, the cache will be AT LEAST as fresh as the
|
||||
// notification, but it MAY be more fresh. You should NOT depend on the contents
|
||||
// of the cache exactly matching the notification you've received in handler
|
||||
// functions. If there was a create, followed by a delete, the cache may NOT
|
||||
// have your item. This has advantages over the broadcaster since it allows us
|
||||
// to share a common cache across many controllers. Extending the broadcaster
|
||||
// would have required us keep duplicate caches for each watch.
|
||||
type SharedInformer interface {
|
||||
// events to a single handler are delivered sequentially, but there is no coordination between different handlers
|
||||
// You may NOT add a handler *after* the SharedInformer is running. That will result in an error being returned.
|
||||
// TODO we should try to remove this restriction eventually.
|
||||
AddEventHandler(handler ResourceEventHandler) error
|
||||
GetStore() cache.Store
|
||||
// GetController gives back a synthetic interface that "votes" to start the informer
|
||||
GetController() ControllerInterface
|
||||
Run(stopCh <-chan struct{})
|
||||
HasSynced() bool
|
||||
}
|
||||
|
||||
type SharedIndexInformer interface {
|
||||
SharedInformer
|
||||
// AddIndexers add indexers to the informer before it starts.
|
||||
AddIndexers(indexers cache.Indexers) error
|
||||
GetIndexer() cache.Indexer
|
||||
}
|
||||
|
||||
// NewSharedInformer creates a new instance for the listwatcher.
|
||||
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
|
||||
// be shared amongst all consumers.
|
||||
func NewSharedInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
|
||||
sharedInformer := &sharedIndexInformer{
|
||||
processor: &sharedProcessor{},
|
||||
indexer: cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{}),
|
||||
listerWatcher: lw,
|
||||
objectType: objType,
|
||||
fullResyncPeriod: resyncPeriod,
|
||||
}
|
||||
return sharedInformer
|
||||
}
|
||||
|
||||
// NewSharedIndexInformer creates a new instance for the listwatcher.
|
||||
// TODO: create a cache/factory of these at a higher level for the list all, watch all of a given resource that can
|
||||
// be shared amongst all consumers.
|
||||
func NewSharedIndexInformer(lw cache.ListerWatcher, objType runtime.Object, resyncPeriod time.Duration, indexers cache.Indexers) SharedIndexInformer {
|
||||
sharedIndexInformer := &sharedIndexInformer{
|
||||
processor: &sharedProcessor{},
|
||||
indexer: cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
|
||||
listerWatcher: lw,
|
||||
objectType: objType,
|
||||
fullResyncPeriod: resyncPeriod,
|
||||
}
|
||||
return sharedIndexInformer
|
||||
}
|
||||
|
||||
type sharedIndexInformer struct {
|
||||
indexer cache.Indexer
|
||||
controller *Controller
|
||||
|
||||
processor *sharedProcessor
|
||||
|
||||
// This block is tracked to handle late initialization of the controller
|
||||
listerWatcher cache.ListerWatcher
|
||||
objectType runtime.Object
|
||||
fullResyncPeriod time.Duration
|
||||
|
||||
started bool
|
||||
startedLock sync.Mutex
|
||||
}
|
||||
|
||||
// dummyController hides the fact that a SharedInformer is different from a dedicated one
|
||||
// where a caller can `Run`. The run method is disonnected in this case, because higher
|
||||
// level logic will decide when to start the SharedInformer and related controller.
|
||||
// Because returning information back is always asynchronous, the legacy callers shouldn't
|
||||
// notice any change in behavior.
|
||||
type dummyController struct {
|
||||
informer *sharedIndexInformer
|
||||
}
|
||||
|
||||
func (v *dummyController) Run(stopCh <-chan struct{}) {
|
||||
}
|
||||
|
||||
func (v *dummyController) HasSynced() bool {
|
||||
return v.informer.HasSynced()
|
||||
}
|
||||
|
||||
type updateNotification struct {
|
||||
oldObj interface{}
|
||||
newObj interface{}
|
||||
}
|
||||
|
||||
type addNotification struct {
|
||||
newObj interface{}
|
||||
}
|
||||
|
||||
type deleteNotification struct {
|
||||
oldObj interface{}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, s.indexer)
|
||||
|
||||
cfg := &Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: s.listerWatcher,
|
||||
ObjectType: s.objectType,
|
||||
FullResyncPeriod: s.fullResyncPeriod,
|
||||
RetryOnError: false,
|
||||
|
||||
Process: s.HandleDeltas,
|
||||
}
|
||||
s.controller = New(cfg)
|
||||
|
||||
func() {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
s.started = true
|
||||
}()
|
||||
|
||||
s.processor.run(stopCh)
|
||||
s.controller.Run(stopCh)
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) isStarted() bool {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
return s.started
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HasSynced() bool {
|
||||
return s.controller.HasSynced()
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetStore() cache.Store {
|
||||
return s.indexer
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetIndexer() cache.Indexer {
|
||||
return s.indexer
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) AddIndexers(indexers cache.Indexers) error {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.started {
|
||||
return fmt.Errorf("informer has already started")
|
||||
}
|
||||
|
||||
oldIndexers := s.indexer.GetIndexers()
|
||||
|
||||
for name, indexFunc := range oldIndexers {
|
||||
if _, exist := indexers[name]; exist {
|
||||
return fmt.Errorf("there is an index named %s already exist", name)
|
||||
}
|
||||
indexers[name] = indexFunc
|
||||
}
|
||||
|
||||
s.indexer = cache.NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) GetController() ControllerInterface {
|
||||
return &dummyController{informer: s}
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) error {
|
||||
s.startedLock.Lock()
|
||||
defer s.startedLock.Unlock()
|
||||
|
||||
if s.started {
|
||||
return fmt.Errorf("informer has already started")
|
||||
}
|
||||
|
||||
listener := newProcessListener(handler)
|
||||
s.processor.listeners = append(s.processor.listeners, listener)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
|
||||
// from oldest to newest
|
||||
for _, d := range obj.(cache.Deltas) {
|
||||
switch d.Type {
|
||||
case cache.Sync, cache.Added, cache.Updated:
|
||||
if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
|
||||
if err := s.indexer.Update(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object})
|
||||
} else {
|
||||
if err := s.indexer.Add(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(addNotification{newObj: d.Object})
|
||||
}
|
||||
case cache.Deleted:
|
||||
if err := s.indexer.Delete(d.Object); err != nil {
|
||||
return err
|
||||
}
|
||||
s.processor.distribute(deleteNotification{oldObj: d.Object})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type sharedProcessor struct {
|
||||
listeners []*processorListener
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) distribute(obj interface{}) {
|
||||
for _, listener := range p.listeners {
|
||||
listener.add(obj)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *sharedProcessor) run(stopCh <-chan struct{}) {
|
||||
for _, listener := range p.listeners {
|
||||
go listener.run(stopCh)
|
||||
go listener.pop(stopCh)
|
||||
}
|
||||
}
|
||||
|
||||
type processorListener struct {
|
||||
// lock/cond protects access to 'pendingNotifications'.
|
||||
lock sync.RWMutex
|
||||
cond sync.Cond
|
||||
|
||||
// pendingNotifications is an unbounded slice that holds all notifications not yet distributed
|
||||
// there is one per listener, but a failing/stalled listener will have infinite pendingNotifications
|
||||
// added until we OOM.
|
||||
// TODO This is no worse that before, since reflectors were backed by unbounded DeltaFIFOs, but
|
||||
// we should try to do something better
|
||||
pendingNotifications []interface{}
|
||||
|
||||
nextCh chan interface{}
|
||||
|
||||
handler ResourceEventHandler
|
||||
}
|
||||
|
||||
func newProcessListener(handler ResourceEventHandler) *processorListener {
|
||||
ret := &processorListener{
|
||||
pendingNotifications: []interface{}{},
|
||||
nextCh: make(chan interface{}),
|
||||
handler: handler,
|
||||
}
|
||||
|
||||
ret.cond.L = &ret.lock
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p *processorListener) add(notification interface{}) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
p.pendingNotifications = append(p.pendingNotifications, notification)
|
||||
p.cond.Broadcast()
|
||||
}
|
||||
|
||||
func (p *processorListener) pop(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
for {
|
||||
for len(p.pendingNotifications) == 0 {
|
||||
// check if we're shutdown
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
p.cond.Wait()
|
||||
}
|
||||
notification := p.pendingNotifications[0]
|
||||
p.pendingNotifications = p.pendingNotifications[1:]
|
||||
|
||||
select {
|
||||
case <-stopCh:
|
||||
return
|
||||
case p.nextCh <- notification:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *processorListener) run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
for {
|
||||
var next interface{}
|
||||
select {
|
||||
case <-stopCh:
|
||||
func() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
p.cond.Broadcast()
|
||||
}()
|
||||
return
|
||||
case next = <-p.nextCh:
|
||||
}
|
||||
|
||||
switch notification := next.(type) {
|
||||
case updateNotification:
|
||||
p.handler.OnUpdate(notification.oldObj, notification.newObj)
|
||||
case addNotification:
|
||||
p.handler.OnAdd(notification.newObj)
|
||||
case deleteNotification:
|
||||
p.handler.OnDelete(notification.oldObj)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
|
||||
}
|
||||
}
|
||||
}
|
||||
90
vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go
generated
vendored
Normal file
90
vendor/k8s.io/kubernetes/pkg/controller/lookup_cache.go
generated
vendored
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"hash/adler32"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
)
|
||||
|
||||
type objectWithMeta interface {
|
||||
meta.Object
|
||||
}
|
||||
|
||||
// keyFunc returns the key of an object, which is used to look up in the cache for it's matching object.
|
||||
// Since we match objects by namespace and Labels/Selector, so if two objects have the same namespace and labels,
|
||||
// they will have the same key.
|
||||
func keyFunc(obj objectWithMeta) uint64 {
|
||||
hash := adler32.New()
|
||||
hashutil.DeepHashObject(hash, &equivalenceLabelObj{
|
||||
namespace: obj.GetNamespace(),
|
||||
labels: obj.GetLabels(),
|
||||
})
|
||||
return uint64(hash.Sum32())
|
||||
}
|
||||
|
||||
type equivalenceLabelObj struct {
|
||||
namespace string
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
// MatchingCache save label and selector matching relationship
|
||||
type MatchingCache struct {
|
||||
mutex sync.RWMutex
|
||||
cache *lru.Cache
|
||||
}
|
||||
|
||||
// NewMatchingCache return a NewMatchingCache, which save label and selector matching relationship.
|
||||
func NewMatchingCache(maxCacheEntries int) *MatchingCache {
|
||||
return &MatchingCache{
|
||||
cache: lru.New(maxCacheEntries),
|
||||
}
|
||||
}
|
||||
|
||||
// Add will add matching information to the cache.
|
||||
func (c *MatchingCache) Add(labelObj objectWithMeta, selectorObj objectWithMeta) {
|
||||
key := keyFunc(labelObj)
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.cache.Add(key, selectorObj)
|
||||
}
|
||||
|
||||
// GetMatchingObject lookup the matching object for a given object.
|
||||
// Note: the cache information may be invalid since the controller may be deleted or updated,
|
||||
// we need check in the external request to ensure the cache data is not dirty.
|
||||
func (c *MatchingCache) GetMatchingObject(labelObj objectWithMeta) (controller interface{}, exists bool) {
|
||||
key := keyFunc(labelObj)
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
return c.cache.Get(key)
|
||||
}
|
||||
|
||||
// Update update the cached matching information.
|
||||
func (c *MatchingCache) Update(labelObj objectWithMeta, selectorObj objectWithMeta) {
|
||||
c.Add(labelObj, selectorObj)
|
||||
}
|
||||
|
||||
// InvalidateAll invalidate the whole cache.
|
||||
func (c *MatchingCache) InvalidateAll() {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.cache = lru.New(c.cache.MaxEntries)
|
||||
}
|
||||
19
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package podautoscaler contains logic for autoscaling number of
|
||||
// pods based on metrics observed.
|
||||
package podautoscaler
|
||||
403
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
Normal file
403
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/horizontal.go
generated
vendored
Normal file
|
|
@ -0,0 +1,403 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podautoscaler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
const (
|
||||
// Usage shoud exceed the tolerance before we start downscale or upscale the pods.
|
||||
// TODO: make it a flag or HPA spec element.
|
||||
tolerance = 0.1
|
||||
|
||||
defaultTargetCPUUtilizationPercentage = 80
|
||||
|
||||
HpaCustomMetricsTargetAnnotationName = "alpha/target.custom-metrics.podautoscaler.kubernetes.io"
|
||||
HpaCustomMetricsStatusAnnotationName = "alpha/status.custom-metrics.podautoscaler.kubernetes.io"
|
||||
)
|
||||
|
||||
type HorizontalController struct {
|
||||
scaleNamespacer unversionedextensions.ScalesGetter
|
||||
hpaNamespacer unversionedextensions.HorizontalPodAutoscalersGetter
|
||||
|
||||
metricsClient metrics.MetricsClient
|
||||
eventRecorder record.EventRecorder
|
||||
|
||||
// A store of HPA objects, populated by the controller.
|
||||
store cache.Store
|
||||
// Watches changes to all HPA objects.
|
||||
controller *framework.Controller
|
||||
}
|
||||
|
||||
var downscaleForbiddenWindow = 5 * time.Minute
|
||||
var upscaleForbiddenWindow = 3 * time.Minute
|
||||
|
||||
func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, *framework.Controller) {
|
||||
return framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&extensions.HorizontalPodAutoscaler{},
|
||||
resyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
hpa := obj.(*extensions.HorizontalPodAutoscaler)
|
||||
hasCPUPolicy := hpa.Spec.CPUUtilization != nil
|
||||
_, hasCustomMetricsPolicy := hpa.Annotations[HpaCustomMetricsTargetAnnotationName]
|
||||
if !hasCPUPolicy && !hasCustomMetricsPolicy {
|
||||
controller.eventRecorder.Event(hpa, api.EventTypeNormal, "DefaultPolicy", "No scaling policy specified - will use default one. See documentation for details")
|
||||
}
|
||||
err := controller.reconcileAutoscaler(hpa)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to reconcile %s: %v", hpa.Name, err)
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
hpa := cur.(*extensions.HorizontalPodAutoscaler)
|
||||
err := controller.reconcileAutoscaler(hpa)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to reconcile %s: %v", hpa.Name, err)
|
||||
}
|
||||
},
|
||||
// We are not interested in deletions.
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedextensions.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient, resyncPeriod time.Duration) *HorizontalController {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: evtNamespacer.Events("")})
|
||||
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||
|
||||
controller := &HorizontalController{
|
||||
metricsClient: metricsClient,
|
||||
eventRecorder: recorder,
|
||||
scaleNamespacer: scaleNamespacer,
|
||||
hpaNamespacer: hpaNamespacer,
|
||||
}
|
||||
store, frameworkController := newInformer(controller, resyncPeriod)
|
||||
controller.store = store
|
||||
controller.controller = frameworkController
|
||||
|
||||
return controller
|
||||
}
|
||||
|
||||
func (a *HorizontalController) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
glog.Infof("Starting HPA Controller")
|
||||
go a.controller.Run(stopCh)
|
||||
<-stopCh
|
||||
glog.Infof("Shutting down HPA Controller")
|
||||
}
|
||||
|
||||
func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale) (int32, *int32, time.Time, error) {
|
||||
targetUtilization := int32(defaultTargetCPUUtilizationPercentage)
|
||||
if hpa.Spec.CPUUtilization != nil {
|
||||
targetUtilization = hpa.Spec.CPUUtilization.TargetPercentage
|
||||
}
|
||||
currentReplicas := scale.Status.Replicas
|
||||
|
||||
if scale.Status.Selector == nil {
|
||||
errMsg := "selector is required"
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "SelectorRequired", errMsg)
|
||||
return 0, nil, time.Time{}, fmt.Errorf(errMsg)
|
||||
}
|
||||
|
||||
selector, err := unversioned.LabelSelectorAsSelector(scale.Status.Selector)
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("couldn't convert selector string to a corresponding selector object: %v", err)
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "InvalidSelector", errMsg)
|
||||
return 0, nil, time.Time{}, fmt.Errorf(errMsg)
|
||||
}
|
||||
currentUtilization, timestamp, err := a.metricsClient.GetCPUUtilization(hpa.Namespace, selector)
|
||||
|
||||
// TODO: what to do on partial errors (like metrics obtained for 75% of pods).
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetMetrics", err.Error())
|
||||
return 0, nil, time.Time{}, fmt.Errorf("failed to get CPU utilization: %v", err)
|
||||
}
|
||||
|
||||
utilization := int32(*currentUtilization)
|
||||
|
||||
usageRatio := float64(utilization) / float64(targetUtilization)
|
||||
if math.Abs(1.0-usageRatio) > tolerance {
|
||||
return int32(math.Ceil(usageRatio * float64(currentReplicas))), &utilization, timestamp, nil
|
||||
} else {
|
||||
return currentReplicas, &utilization, timestamp, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Computes the desired number of replicas based on the CustomMetrics passed in cmAnnotation as json-serialized
|
||||
// extensions.CustomMetricsTargetList.
|
||||
// Returns number of replicas, metric which required highest number of replicas,
|
||||
// status string (also json-serialized extensions.CustomMetricsCurrentStatusList),
|
||||
// last timestamp of the metrics involved in computations or error, if occurred.
|
||||
func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale,
|
||||
cmAnnotation string) (replicas int32, metric string, status string, timestamp time.Time, err error) {
|
||||
|
||||
currentReplicas := scale.Status.Replicas
|
||||
replicas = 0
|
||||
metric = ""
|
||||
status = ""
|
||||
timestamp = time.Time{}
|
||||
err = nil
|
||||
|
||||
if cmAnnotation == "" {
|
||||
return
|
||||
}
|
||||
|
||||
var targetList extensions.CustomMetricTargetList
|
||||
if err := json.Unmarshal([]byte(cmAnnotation), &targetList); err != nil {
|
||||
return 0, "", "", time.Time{}, fmt.Errorf("failed to parse custom metrics annotation: %v", err)
|
||||
}
|
||||
if len(targetList.Items) == 0 {
|
||||
return 0, "", "", time.Time{}, fmt.Errorf("no custom metrics in annotation")
|
||||
}
|
||||
|
||||
statusList := extensions.CustomMetricCurrentStatusList{
|
||||
Items: make([]extensions.CustomMetricCurrentStatus, 0),
|
||||
}
|
||||
|
||||
for _, customMetricTarget := range targetList.Items {
|
||||
if scale.Status.Selector == nil {
|
||||
errMsg := "selector is required"
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "SelectorRequired", errMsg)
|
||||
return 0, "", "", time.Time{}, fmt.Errorf("selector is required")
|
||||
}
|
||||
|
||||
selector, err := unversioned.LabelSelectorAsSelector(scale.Status.Selector)
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("couldn't convert selector string to a corresponding selector object: %v", err)
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "InvalidSelector", errMsg)
|
||||
return 0, "", "", time.Time{}, fmt.Errorf("couldn't convert selector string to a corresponding selector object: %v", err)
|
||||
}
|
||||
value, currentTimestamp, err := a.metricsClient.GetCustomMetric(customMetricTarget.Name, hpa.Namespace, selector)
|
||||
// TODO: what to do on partial errors (like metrics obtained for 75% of pods).
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetCustomMetrics", err.Error())
|
||||
return 0, "", "", time.Time{}, fmt.Errorf("failed to get custom metric value: %v", err)
|
||||
}
|
||||
floatTarget := float64(customMetricTarget.TargetValue.MilliValue()) / 1000.0
|
||||
usageRatio := *value / floatTarget
|
||||
|
||||
replicaCountProposal := int32(0)
|
||||
if math.Abs(1.0-usageRatio) > tolerance {
|
||||
replicaCountProposal = int32(math.Ceil(usageRatio * float64(currentReplicas)))
|
||||
} else {
|
||||
replicaCountProposal = currentReplicas
|
||||
}
|
||||
if replicaCountProposal > replicas {
|
||||
timestamp = currentTimestamp
|
||||
replicas = replicaCountProposal
|
||||
metric = fmt.Sprintf("Custom metric %s", customMetricTarget.Name)
|
||||
}
|
||||
quantity, err := resource.ParseQuantity(fmt.Sprintf("%.3f", *value))
|
||||
if err != nil {
|
||||
return 0, "", "", time.Time{}, fmt.Errorf("failed to set custom metric value: %v", err)
|
||||
}
|
||||
statusList.Items = append(statusList.Items, extensions.CustomMetricCurrentStatus{
|
||||
Name: customMetricTarget.Name,
|
||||
CurrentValue: *quantity,
|
||||
})
|
||||
}
|
||||
byteStatusList, err := json.Marshal(statusList)
|
||||
if err != nil {
|
||||
return 0, "", "", time.Time{}, fmt.Errorf("failed to serialize custom metric status: %v", err)
|
||||
}
|
||||
|
||||
return replicas, metric, string(byteStatusList), timestamp, nil
|
||||
}
|
||||
|
||||
func (a *HorizontalController) reconcileAutoscaler(hpa *extensions.HorizontalPodAutoscaler) error {
|
||||
reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleRef.Kind, hpa.Namespace, hpa.Spec.ScaleRef.Name)
|
||||
|
||||
scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetScale", err.Error())
|
||||
return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
|
||||
}
|
||||
currentReplicas := scale.Status.Replicas
|
||||
|
||||
cpuDesiredReplicas := int32(0)
|
||||
var cpuCurrentUtilization *int32 = nil
|
||||
cpuTimestamp := time.Time{}
|
||||
|
||||
cmDesiredReplicas := int32(0)
|
||||
cmMetric := ""
|
||||
cmStatus := ""
|
||||
cmTimestamp := time.Time{}
|
||||
|
||||
desiredReplicas := int32(0)
|
||||
rescaleReason := ""
|
||||
timestamp := time.Now()
|
||||
|
||||
if currentReplicas > hpa.Spec.MaxReplicas {
|
||||
rescaleReason = "Current number of replicas above Spec.MaxReplicas"
|
||||
desiredReplicas = hpa.Spec.MaxReplicas
|
||||
} else if hpa.Spec.MinReplicas != nil && currentReplicas < *hpa.Spec.MinReplicas {
|
||||
rescaleReason = "Current number of replicas below Spec.MinReplicas"
|
||||
desiredReplicas = *hpa.Spec.MinReplicas
|
||||
} else if currentReplicas == 0 {
|
||||
rescaleReason = "Current number of replicas must be greater than 0"
|
||||
desiredReplicas = 1
|
||||
} else {
|
||||
// All basic scenarios covered, the state should be sane, lets use metrics.
|
||||
cmAnnotation, cmAnnotationFound := hpa.Annotations[HpaCustomMetricsTargetAnnotationName]
|
||||
|
||||
if hpa.Spec.CPUUtilization != nil || !cmAnnotationFound {
|
||||
cpuDesiredReplicas, cpuCurrentUtilization, cpuTimestamp, err = a.computeReplicasForCPUUtilization(hpa, scale)
|
||||
if err != nil {
|
||||
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedComputeReplicas", err.Error())
|
||||
return fmt.Errorf("failed to compute desired number of replicas based on CPU utilization for %s: %v", reference, err)
|
||||
}
|
||||
}
|
||||
|
||||
if cmAnnotationFound {
|
||||
cmDesiredReplicas, cmMetric, cmStatus, cmTimestamp, err = a.computeReplicasForCustomMetrics(hpa, scale, cmAnnotation)
|
||||
if err != nil {
|
||||
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedComputeCMReplicas", err.Error())
|
||||
return fmt.Errorf("failed to compute desired number of replicas based on Custom Metrics for %s: %v", reference, err)
|
||||
}
|
||||
}
|
||||
|
||||
rescaleMetric := ""
|
||||
if cpuDesiredReplicas > desiredReplicas {
|
||||
desiredReplicas = cpuDesiredReplicas
|
||||
timestamp = cpuTimestamp
|
||||
rescaleMetric = "CPU utilization"
|
||||
}
|
||||
if cmDesiredReplicas > desiredReplicas {
|
||||
desiredReplicas = cmDesiredReplicas
|
||||
timestamp = cmTimestamp
|
||||
rescaleMetric = cmMetric
|
||||
}
|
||||
if desiredReplicas > currentReplicas {
|
||||
rescaleReason = fmt.Sprintf("%s above target", rescaleMetric)
|
||||
} else if desiredReplicas < currentReplicas {
|
||||
rescaleReason = "All metrics below target"
|
||||
}
|
||||
|
||||
if hpa.Spec.MinReplicas != nil && desiredReplicas < *hpa.Spec.MinReplicas {
|
||||
desiredReplicas = *hpa.Spec.MinReplicas
|
||||
}
|
||||
|
||||
// TODO: remove when pod idling is done.
|
||||
if desiredReplicas == 0 {
|
||||
desiredReplicas = 1
|
||||
}
|
||||
|
||||
if desiredReplicas > hpa.Spec.MaxReplicas {
|
||||
desiredReplicas = hpa.Spec.MaxReplicas
|
||||
}
|
||||
}
|
||||
|
||||
rescale := shouldScale(hpa, currentReplicas, desiredReplicas, timestamp)
|
||||
if rescale {
|
||||
scale.Spec.Replicas = desiredReplicas
|
||||
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
|
||||
if err != nil {
|
||||
a.eventRecorder.Eventf(hpa, api.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error())
|
||||
return fmt.Errorf("failed to rescale %s: %v", reference, err)
|
||||
}
|
||||
a.eventRecorder.Eventf(hpa, api.EventTypeNormal, "SuccessfulRescale", "New size: %d; reason: %s", desiredReplicas, rescaleReason)
|
||||
glog.Infof("Successfull rescale of %s, old size: %d, new size: %d, reason: %s",
|
||||
hpa.Name, currentReplicas, desiredReplicas, rescaleReason)
|
||||
} else {
|
||||
desiredReplicas = currentReplicas
|
||||
}
|
||||
|
||||
return a.updateStatus(hpa, currentReplicas, desiredReplicas, cpuCurrentUtilization, cmStatus, rescale)
|
||||
}
|
||||
|
||||
func shouldScale(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, timestamp time.Time) bool {
|
||||
if desiredReplicas != currentReplicas {
|
||||
// Going down only if the usageRatio dropped significantly below the target
|
||||
// and there was no rescaling in the last downscaleForbiddenWindow.
|
||||
if desiredReplicas < currentReplicas &&
|
||||
(hpa.Status.LastScaleTime == nil ||
|
||||
hpa.Status.LastScaleTime.Add(downscaleForbiddenWindow).Before(timestamp)) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Going up only if the usage ratio increased significantly above the target
|
||||
// and there was no rescaling in the last upscaleForbiddenWindow.
|
||||
if desiredReplicas > currentReplicas &&
|
||||
(hpa.Status.LastScaleTime == nil ||
|
||||
hpa.Status.LastScaleTime.Add(upscaleForbiddenWindow).Before(timestamp)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *HorizontalController) updateCurrentReplicasInStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas int32) {
|
||||
err := a.updateStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentCPUUtilizationPercentage, hpa.Annotations[HpaCustomMetricsStatusAnnotationName], false)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *HorizontalController) updateStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, cpuCurrentUtilization *int32, cmStatus string, rescale bool) error {
|
||||
hpa.Status = extensions.HorizontalPodAutoscalerStatus{
|
||||
CurrentReplicas: currentReplicas,
|
||||
DesiredReplicas: desiredReplicas,
|
||||
CurrentCPUUtilizationPercentage: cpuCurrentUtilization,
|
||||
LastScaleTime: hpa.Status.LastScaleTime,
|
||||
}
|
||||
if cmStatus != "" {
|
||||
hpa.Annotations[HpaCustomMetricsStatusAnnotationName] = cmStatus
|
||||
}
|
||||
|
||||
if rescale {
|
||||
now := unversioned.NewTime(time.Now())
|
||||
hpa.Status.LastScaleTime = &now
|
||||
}
|
||||
|
||||
_, err := a.hpaNamespacer.HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(hpa)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedUpdateStatus", err.Error())
|
||||
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
|
||||
}
|
||||
glog.V(2).Infof("Successfully updated status for %s", hpa.Name)
|
||||
return nil
|
||||
}
|
||||
268
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go
generated
vendored
Normal file
268
vendor/k8s.io/kubernetes/pkg/controller/podautoscaler/metrics/metrics_client.go
generated
vendored
Normal file
|
|
@ -0,0 +1,268 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultHeapsterNamespace = "kube-system"
|
||||
DefaultHeapsterScheme = "http"
|
||||
DefaultHeapsterService = "heapster"
|
||||
DefaultHeapsterPort = "" // use the first exposed port on the service
|
||||
)
|
||||
|
||||
var heapsterQueryStart = -5 * time.Minute
|
||||
|
||||
// MetricsClient is an interface for getting metrics for pods.
|
||||
type MetricsClient interface {
|
||||
// GetCPUUtilization returns the average utilization over all pods represented as a percent of requested CPU
|
||||
// (e.g. 70 means that an average pod uses 70% of the requested CPU)
|
||||
// and the time of generation of the oldest of utilization reports for pods.
|
||||
GetCPUUtilization(namespace string, selector labels.Selector) (*int, time.Time, error)
|
||||
|
||||
// GetCustomMetric returns the average value of the given custom metrics from the
|
||||
// pods picked using the namespace and selector passed as arguments.
|
||||
GetCustomMetric(customMetricName string, namespace string, selector labels.Selector) (*float64, time.Time, error)
|
||||
}
|
||||
|
||||
type intAndFloat struct {
|
||||
intValue int64
|
||||
floatValue float64
|
||||
}
|
||||
|
||||
// Aggregates results into ResourceConsumption. Also returns number of pods included in the aggregation.
|
||||
type metricAggregator func(heapster.MetricResultList) (intAndFloat, int, time.Time)
|
||||
|
||||
type metricDefinition struct {
|
||||
name string
|
||||
aggregator metricAggregator
|
||||
}
|
||||
|
||||
// HeapsterMetricsClient is Heapster-based implementation of MetricsClient
|
||||
type HeapsterMetricsClient struct {
|
||||
client clientset.Interface
|
||||
heapsterNamespace string
|
||||
heapsterScheme string
|
||||
heapsterService string
|
||||
heapsterPort string
|
||||
}
|
||||
|
||||
var averageFunction = func(metrics heapster.MetricResultList) (intAndFloat, int, time.Time) {
|
||||
sum, count, timestamp := calculateSumFromTimeSample(metrics, time.Minute)
|
||||
result := intAndFloat{0, 0}
|
||||
if count > 0 {
|
||||
result.intValue = sum.intValue / int64(count)
|
||||
result.floatValue = sum.floatValue / float64(count)
|
||||
}
|
||||
return result, count, timestamp
|
||||
}
|
||||
|
||||
var heapsterCpuUsageMetricDefinition = metricDefinition{"cpu-usage", averageFunction}
|
||||
|
||||
func getHeapsterCustomMetricDefinition(metricName string) metricDefinition {
|
||||
return metricDefinition{"custom/" + metricName, averageFunction}
|
||||
}
|
||||
|
||||
// NewHeapsterMetricsClient returns a new instance of Heapster-based implementation of MetricsClient interface.
|
||||
func NewHeapsterMetricsClient(client clientset.Interface, namespace, scheme, service, port string) *HeapsterMetricsClient {
|
||||
return &HeapsterMetricsClient{
|
||||
client: client,
|
||||
heapsterNamespace: namespace,
|
||||
heapsterScheme: scheme,
|
||||
heapsterService: service,
|
||||
heapsterPort: port,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetCPUUtilization(namespace string, selector labels.Selector) (*int, time.Time, error) {
|
||||
avgConsumption, avgRequest, timestamp, err := h.GetCpuConsumptionAndRequestInMillis(namespace, selector)
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get CPU consumption and request: %v", err)
|
||||
}
|
||||
utilization := int((avgConsumption * 100) / avgRequest)
|
||||
return &utilization, timestamp, nil
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetCpuConsumptionAndRequestInMillis(namespace string, selector labels.Selector) (avgConsumption int64,
|
||||
avgRequest int64, timestamp time.Time, err error) {
|
||||
|
||||
podList, err := h.client.Core().Pods(namespace).
|
||||
List(api.ListOptions{LabelSelector: selector})
|
||||
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("failed to get pod list: %v", err)
|
||||
}
|
||||
podNames := []string{}
|
||||
requestSum := int64(0)
|
||||
missing := false
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase == api.PodPending {
|
||||
// Skip pending pods.
|
||||
continue
|
||||
}
|
||||
|
||||
podNames = append(podNames, pod.Name)
|
||||
for _, container := range pod.Spec.Containers {
|
||||
containerRequest := container.Resources.Requests[api.ResourceCPU]
|
||||
if containerRequest.Amount != nil {
|
||||
requestSum += containerRequest.MilliValue()
|
||||
} else {
|
||||
missing = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(podNames) == 0 && len(podList.Items) > 0 {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("no running pods")
|
||||
}
|
||||
if missing || requestSum == 0 {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("some pods do not have request for cpu")
|
||||
}
|
||||
glog.V(4).Infof("%s %s - sum of CPU requested: %d", namespace, selector, requestSum)
|
||||
requestAvg := requestSum / int64(len(podList.Items))
|
||||
// Consumption is already averaged and in millis.
|
||||
consumption, timestamp, err := h.getForPods(heapsterCpuUsageMetricDefinition, namespace, podNames)
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, err
|
||||
}
|
||||
return consumption.intValue, requestAvg, timestamp, nil
|
||||
}
|
||||
|
||||
// GetCustomMetric returns the average value of the given custom metric from the
|
||||
// pods picked using the namespace and selector passed as arguments.
|
||||
func (h *HeapsterMetricsClient) GetCustomMetric(customMetricName string, namespace string, selector labels.Selector) (*float64, time.Time, error) {
|
||||
metricSpec := getHeapsterCustomMetricDefinition(customMetricName)
|
||||
|
||||
podList, err := h.client.Core().Pods(namespace).List(api.ListOptions{LabelSelector: selector})
|
||||
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod list: %v", err)
|
||||
}
|
||||
podNames := []string{}
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase == api.PodPending {
|
||||
// Skip pending pods.
|
||||
continue
|
||||
}
|
||||
podNames = append(podNames, pod.Name)
|
||||
}
|
||||
if len(podNames) == 0 && len(podList.Items) > 0 {
|
||||
return nil, time.Time{}, fmt.Errorf("no running pods")
|
||||
}
|
||||
|
||||
value, timestamp, err := h.getForPods(metricSpec, namespace, podNames)
|
||||
if err != nil {
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
return &value.floatValue, timestamp, nil
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) getForPods(metricSpec metricDefinition, namespace string, podNames []string) (*intAndFloat, time.Time, error) {
|
||||
|
||||
now := time.Now()
|
||||
|
||||
startTime := now.Add(heapsterQueryStart)
|
||||
metricPath := fmt.Sprintf("/api/v1/model/namespaces/%s/pod-list/%s/metrics/%s",
|
||||
namespace,
|
||||
strings.Join(podNames, ","),
|
||||
metricSpec.name)
|
||||
|
||||
resultRaw, err := h.client.Core().Services(h.heapsterNamespace).
|
||||
ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, map[string]string{"start": startTime.Format(time.RFC3339)}).
|
||||
DoRaw()
|
||||
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pods metrics: %v", err)
|
||||
}
|
||||
|
||||
var metrics heapster.MetricResultList
|
||||
err = json.Unmarshal(resultRaw, &metrics)
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to unmarshall heapster response: %v", err)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
|
||||
|
||||
sum, count, timestamp := metricSpec.aggregator(metrics)
|
||||
if count != len(podNames) {
|
||||
return nil, time.Time{}, fmt.Errorf("metrics obtained for %d/%d of pods", count, len(podNames))
|
||||
}
|
||||
|
||||
return &sum, timestamp, nil
|
||||
}
|
||||
|
||||
func calculateSumFromTimeSample(metrics heapster.MetricResultList, duration time.Duration) (sum intAndFloat, count int, timestamp time.Time) {
|
||||
sum = intAndFloat{0, 0}
|
||||
count = 0
|
||||
timestamp = time.Time{}
|
||||
var oldest *time.Time // creation time of the oldest of used samples across pods
|
||||
oldest = nil
|
||||
for _, metrics := range metrics.Items {
|
||||
var newest *heapster.MetricPoint // creation time of the newest sample for pod
|
||||
newest = nil
|
||||
for i, metricPoint := range metrics.Metrics {
|
||||
if newest == nil || newest.Timestamp.Before(metricPoint.Timestamp) {
|
||||
newest = &metrics.Metrics[i]
|
||||
}
|
||||
}
|
||||
if newest != nil {
|
||||
if oldest == nil || newest.Timestamp.Before(*oldest) {
|
||||
oldest = &newest.Timestamp
|
||||
}
|
||||
intervalSum := intAndFloat{0, 0}
|
||||
intSumCount := 0
|
||||
floatSumCount := 0
|
||||
for _, metricPoint := range metrics.Metrics {
|
||||
if metricPoint.Timestamp.Add(duration).After(newest.Timestamp) {
|
||||
intervalSum.intValue += int64(metricPoint.Value)
|
||||
intSumCount++
|
||||
if metricPoint.FloatValue != nil {
|
||||
intervalSum.floatValue += *metricPoint.FloatValue
|
||||
floatSumCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
if newest.FloatValue == nil {
|
||||
if intSumCount > 0 {
|
||||
sum.intValue += int64(intervalSum.intValue / int64(intSumCount))
|
||||
sum.floatValue += float64(intervalSum.intValue / int64(intSumCount))
|
||||
}
|
||||
} else {
|
||||
if floatSumCount > 0 {
|
||||
sum.intValue += int64(intervalSum.floatValue / float64(floatSumCount))
|
||||
sum.floatValue += intervalSum.floatValue / float64(floatSumCount)
|
||||
}
|
||||
}
|
||||
count++
|
||||
}
|
||||
}
|
||||
if oldest != nil {
|
||||
timestamp = *oldest
|
||||
}
|
||||
return sum, count, timestamp
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue