Update godeps
This commit is contained in:
parent
ffe6baa14c
commit
9b142b56f8
1137 changed files with 22773 additions and 189176 deletions
5
Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/OWNERS
generated
vendored
Normal file
5
Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
assignees:
|
||||
- bprashanth
|
||||
- davidopp
|
||||
- derekwaynecarr
|
||||
- mikedanese
|
||||
647
Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
Normal file
647
Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
Normal file
|
|
@ -0,0 +1,647 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/integer"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
const (
|
||||
CreatedByAnnotation = "kubernetes.io/created-by"
|
||||
|
||||
// If a watch drops a delete event for a pod, it'll take this long
|
||||
// before a dormant controller waiting for those packets is woken up anyway. It is
|
||||
// specifically targeted at the case where some problem prevents an update
|
||||
// of expectations, without it the controller could stay asleep forever. This should
|
||||
// be set based on the expected latency of watch events.
|
||||
//
|
||||
// Currently a controller can service (create *and* observe the watch events for said
|
||||
// creation) about 10 pods a second, so it takes about 1 min to service
|
||||
// 500 pods. Just creation is limited to 20qps, and watching happens with ~10-30s
|
||||
// latency/pod at the scale of 3000 pods over 100 nodes.
|
||||
ExpectationsTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
KeyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
|
||||
)
|
||||
|
||||
type ResyncPeriodFunc func() time.Duration
|
||||
|
||||
// Returns 0 for resyncPeriod in case resyncing is not needed.
|
||||
func NoResyncPeriodFunc() time.Duration {
|
||||
return 0
|
||||
}
|
||||
|
||||
// StaticResyncPeriodFunc returns the resync period specified
|
||||
func StaticResyncPeriodFunc(resyncPeriod time.Duration) ResyncPeriodFunc {
|
||||
return func() time.Duration {
|
||||
return resyncPeriod
|
||||
}
|
||||
}
|
||||
|
||||
// Expectations are a way for controllers to tell the controller manager what they expect. eg:
|
||||
// ControllerExpectations: {
|
||||
// controller1: expects 2 adds in 2 minutes
|
||||
// controller2: expects 2 dels in 2 minutes
|
||||
// controller3: expects -1 adds in 2 minutes => controller3's expectations have already been met
|
||||
// }
|
||||
//
|
||||
// Implementation:
|
||||
// ControlleeExpectation = pair of atomic counters to track controllee's creation/deletion
|
||||
// ControllerExpectationsStore = TTLStore + a ControlleeExpectation per controller
|
||||
//
|
||||
// * Once set expectations can only be lowered
|
||||
// * A controller isn't synced till its expectations are either fulfilled, or expire
|
||||
// * Controllers that don't set expectations will get woken up for every matching controllee
|
||||
|
||||
// ExpKeyFunc to parse out the key from a ControlleeExpectation
|
||||
var ExpKeyFunc = func(obj interface{}) (string, error) {
|
||||
if e, ok := obj.(*ControlleeExpectations); ok {
|
||||
return e.key, nil
|
||||
}
|
||||
return "", fmt.Errorf("Could not find key for obj %#v", obj)
|
||||
}
|
||||
|
||||
// ControllerExpectationsInterface is an interface that allows users to set and wait on expectations.
|
||||
// Only abstracted out for testing.
|
||||
// Warning: if using KeyFunc it is not safe to use a single ControllerExpectationsInterface with different
|
||||
// types of controllers, because the keys might conflict across types.
|
||||
type ControllerExpectationsInterface interface {
|
||||
GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error)
|
||||
SatisfiedExpectations(controllerKey string) bool
|
||||
DeleteExpectations(controllerKey string)
|
||||
SetExpectations(controllerKey string, add, del int) error
|
||||
ExpectCreations(controllerKey string, adds int) error
|
||||
ExpectDeletions(controllerKey string, dels int) error
|
||||
CreationObserved(controllerKey string)
|
||||
DeletionObserved(controllerKey string)
|
||||
RaiseExpectations(controllerKey string, add, del int)
|
||||
LowerExpectations(controllerKey string, add, del int)
|
||||
}
|
||||
|
||||
// ControllerExpectations is a cache mapping controllers to what they expect to see before being woken up for a sync.
|
||||
type ControllerExpectations struct {
|
||||
cache.Store
|
||||
}
|
||||
|
||||
// GetExpectations returns the ControlleeExpectations of the given controller.
|
||||
func (r *ControllerExpectations) GetExpectations(controllerKey string) (*ControlleeExpectations, bool, error) {
|
||||
if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists {
|
||||
return exp.(*ControlleeExpectations), true, nil
|
||||
} else {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteExpectations deletes the expectations of the given controller from the TTLStore.
|
||||
func (r *ControllerExpectations) DeleteExpectations(controllerKey string) {
|
||||
if exp, exists, err := r.GetByKey(controllerKey); err == nil && exists {
|
||||
if err := r.Delete(exp); err != nil {
|
||||
glog.V(2).Infof("Error deleting expectations for controller %v: %v", controllerKey, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SatisfiedExpectations returns true if the required adds/dels for the given controller have been observed.
|
||||
// Add/del counts are established by the controller at sync time, and updated as controllees are observed by the controller
|
||||
// manager.
|
||||
func (r *ControllerExpectations) SatisfiedExpectations(controllerKey string) bool {
|
||||
if exp, exists, err := r.GetExpectations(controllerKey); exists {
|
||||
if exp.Fulfilled() {
|
||||
return true
|
||||
} else if exp.isExpired() {
|
||||
glog.V(4).Infof("Controller expectations expired %#v", exp)
|
||||
return true
|
||||
} else {
|
||||
glog.V(4).Infof("Controller still waiting on expectations %#v", exp)
|
||||
return false
|
||||
}
|
||||
} else if err != nil {
|
||||
glog.V(2).Infof("Error encountered while checking expectations %#v, forcing sync", err)
|
||||
} else {
|
||||
// When a new controller is created, it doesn't have expectations.
|
||||
// When it doesn't see expected watch events for > TTL, the expectations expire.
|
||||
// - In this case it wakes up, creates/deletes controllees, and sets expectations again.
|
||||
// When it has satisfied expectations and no controllees need to be created/destroyed > TTL, the expectations expire.
|
||||
// - In this case it continues without setting expectations till it needs to create/delete controllees.
|
||||
glog.V(4).Infof("Controller %v either never recorded expectations, or the ttl expired.", controllerKey)
|
||||
}
|
||||
// Trigger a sync if we either encountered and error (which shouldn't happen since we're
|
||||
// getting from local store) or this controller hasn't established expectations.
|
||||
return true
|
||||
}
|
||||
|
||||
// TODO: Extend ExpirationCache to support explicit expiration.
|
||||
// TODO: Make this possible to disable in tests.
|
||||
// TODO: Support injection of clock.
|
||||
func (exp *ControlleeExpectations) isExpired() bool {
|
||||
return util.RealClock{}.Since(exp.timestamp) > ExpectationsTimeout
|
||||
}
|
||||
|
||||
// SetExpectations registers new expectations for the given controller. Forgets existing expectations.
|
||||
func (r *ControllerExpectations) SetExpectations(controllerKey string, add, del int) error {
|
||||
exp := &ControlleeExpectations{add: int64(add), del: int64(del), key: controllerKey, timestamp: util.RealClock{}.Now()}
|
||||
glog.V(4).Infof("Setting expectations %+v", exp)
|
||||
return r.Add(exp)
|
||||
}
|
||||
|
||||
func (r *ControllerExpectations) ExpectCreations(controllerKey string, adds int) error {
|
||||
return r.SetExpectations(controllerKey, adds, 0)
|
||||
}
|
||||
|
||||
func (r *ControllerExpectations) ExpectDeletions(controllerKey string, dels int) error {
|
||||
return r.SetExpectations(controllerKey, 0, dels)
|
||||
}
|
||||
|
||||
// Decrements the expectation counts of the given controller.
|
||||
func (r *ControllerExpectations) LowerExpectations(controllerKey string, add, del int) {
|
||||
if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists {
|
||||
exp.Add(int64(-add), int64(-del))
|
||||
// The expectations might've been modified since the update on the previous line.
|
||||
glog.V(4).Infof("Lowered expectations %+v", exp)
|
||||
}
|
||||
}
|
||||
|
||||
// Increments the expectation counts of the given controller.
|
||||
func (r *ControllerExpectations) RaiseExpectations(controllerKey string, add, del int) {
|
||||
if exp, exists, err := r.GetExpectations(controllerKey); err == nil && exists {
|
||||
exp.Add(int64(add), int64(del))
|
||||
// The expectations might've been modified since the update on the previous line.
|
||||
glog.V(4).Infof("Raised expectations %+v", exp)
|
||||
}
|
||||
}
|
||||
|
||||
// CreationObserved atomically decrements the `add` expecation count of the given controller.
|
||||
func (r *ControllerExpectations) CreationObserved(controllerKey string) {
|
||||
r.LowerExpectations(controllerKey, 1, 0)
|
||||
}
|
||||
|
||||
// DeletionObserved atomically decrements the `del` expectation count of the given controller.
|
||||
func (r *ControllerExpectations) DeletionObserved(controllerKey string) {
|
||||
r.LowerExpectations(controllerKey, 0, 1)
|
||||
}
|
||||
|
||||
// Expectations are either fulfilled, or expire naturally.
|
||||
type Expectations interface {
|
||||
Fulfilled() bool
|
||||
}
|
||||
|
||||
// ControlleeExpectations track controllee creates/deletes.
|
||||
type ControlleeExpectations struct {
|
||||
add int64
|
||||
del int64
|
||||
key string
|
||||
timestamp time.Time
|
||||
}
|
||||
|
||||
// Add increments the add and del counters.
|
||||
func (e *ControlleeExpectations) Add(add, del int64) {
|
||||
atomic.AddInt64(&e.add, add)
|
||||
atomic.AddInt64(&e.del, del)
|
||||
}
|
||||
|
||||
// Fulfilled returns true if this expectation has been fulfilled.
|
||||
func (e *ControlleeExpectations) Fulfilled() bool {
|
||||
// TODO: think about why this line being atomic doesn't matter
|
||||
return atomic.LoadInt64(&e.add) <= 0 && atomic.LoadInt64(&e.del) <= 0
|
||||
}
|
||||
|
||||
// GetExpectations returns the add and del expectations of the controllee.
|
||||
func (e *ControlleeExpectations) GetExpectations() (int64, int64) {
|
||||
return atomic.LoadInt64(&e.add), atomic.LoadInt64(&e.del)
|
||||
}
|
||||
|
||||
// NewControllerExpectations returns a store for ControllerExpectations.
|
||||
func NewControllerExpectations() *ControllerExpectations {
|
||||
return &ControllerExpectations{cache.NewStore(ExpKeyFunc)}
|
||||
}
|
||||
|
||||
// UIDSetKeyFunc to parse out the key from a UIDSet.
|
||||
var UIDSetKeyFunc = func(obj interface{}) (string, error) {
|
||||
if u, ok := obj.(*UIDSet); ok {
|
||||
return u.key, nil
|
||||
}
|
||||
return "", fmt.Errorf("Could not find key for obj %#v", obj)
|
||||
}
|
||||
|
||||
// UIDSet holds a key and a set of UIDs. Used by the
|
||||
// UIDTrackingControllerExpectations to remember which UID it has seen/still
|
||||
// waiting for.
|
||||
type UIDSet struct {
|
||||
sets.String
|
||||
key string
|
||||
}
|
||||
|
||||
// UIDTrackingControllerExpectations tracks the UID of the pods it deletes.
|
||||
// This cache is needed over plain old expectations to safely handle graceful
|
||||
// deletion. The desired behavior is to treat an update that sets the
|
||||
// DeletionTimestamp on an object as a delete. To do so consistenly, one needs
|
||||
// to remember the expected deletes so they aren't double counted.
|
||||
// TODO: Track creates as well (#22599)
|
||||
type UIDTrackingControllerExpectations struct {
|
||||
ControllerExpectationsInterface
|
||||
// TODO: There is a much nicer way to do this that involves a single store,
|
||||
// a lock per entry, and a ControlleeExpectationsInterface type.
|
||||
uidStoreLock sync.Mutex
|
||||
// Store used for the UIDs associated with any expectation tracked via the
|
||||
// ControllerExpectationsInterface.
|
||||
uidStore cache.Store
|
||||
}
|
||||
|
||||
// GetUIDs is a convenience method to avoid exposing the set of expected uids.
|
||||
// The returned set is not thread safe, all modifications must be made holding
|
||||
// the uidStoreLock.
|
||||
func (u *UIDTrackingControllerExpectations) GetUIDs(controllerKey string) sets.String {
|
||||
if uid, exists, err := u.uidStore.GetByKey(controllerKey); err == nil && exists {
|
||||
return uid.(*UIDSet).String
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpectDeletions records expectations for the given deleteKeys, against the given controller.
|
||||
func (u *UIDTrackingControllerExpectations) ExpectDeletions(rcKey string, deletedKeys []string) error {
|
||||
u.uidStoreLock.Lock()
|
||||
defer u.uidStoreLock.Unlock()
|
||||
|
||||
if existing := u.GetUIDs(rcKey); existing != nil && existing.Len() != 0 {
|
||||
glog.Errorf("Clobbering existing delete keys: %+v", existing)
|
||||
}
|
||||
expectedUIDs := sets.NewString()
|
||||
for _, k := range deletedKeys {
|
||||
expectedUIDs.Insert(k)
|
||||
}
|
||||
glog.V(4).Infof("Controller %v waiting on deletions for: %+v", rcKey, deletedKeys)
|
||||
if err := u.uidStore.Add(&UIDSet{expectedUIDs, rcKey}); err != nil {
|
||||
return err
|
||||
}
|
||||
return u.ControllerExpectationsInterface.ExpectDeletions(rcKey, expectedUIDs.Len())
|
||||
}
|
||||
|
||||
// DeletionObserved records the given deleteKey as a deletion, for the given rc.
|
||||
func (u *UIDTrackingControllerExpectations) DeletionObserved(rcKey, deleteKey string) {
|
||||
u.uidStoreLock.Lock()
|
||||
defer u.uidStoreLock.Unlock()
|
||||
|
||||
uids := u.GetUIDs(rcKey)
|
||||
if uids != nil && uids.Has(deleteKey) {
|
||||
glog.V(4).Infof("Controller %v received delete for pod %v", rcKey, deleteKey)
|
||||
u.ControllerExpectationsInterface.DeletionObserved(rcKey)
|
||||
uids.Delete(deleteKey)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteExpectations deletes the UID set and invokes DeleteExpectations on the
|
||||
// underlying ControllerExpectationsInterface.
|
||||
func (u *UIDTrackingControllerExpectations) DeleteExpectations(rcKey string) {
|
||||
u.uidStoreLock.Lock()
|
||||
defer u.uidStoreLock.Unlock()
|
||||
|
||||
u.ControllerExpectationsInterface.DeleteExpectations(rcKey)
|
||||
if uidExp, exists, err := u.uidStore.GetByKey(rcKey); err == nil && exists {
|
||||
if err := u.uidStore.Delete(uidExp); err != nil {
|
||||
glog.V(2).Infof("Error deleting uid expectations for controller %v: %v", rcKey, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewUIDTrackingControllerExpectations returns a wrapper around
|
||||
// ControllerExpectations that is aware of deleteKeys.
|
||||
func NewUIDTrackingControllerExpectations(ce ControllerExpectationsInterface) *UIDTrackingControllerExpectations {
|
||||
return &UIDTrackingControllerExpectations{ControllerExpectationsInterface: ce, uidStore: cache.NewStore(UIDSetKeyFunc)}
|
||||
}
|
||||
|
||||
// PodControlInterface is an interface that knows how to add or delete pods
|
||||
// created as an interface to allow testing.
|
||||
type PodControlInterface interface {
|
||||
// CreatePods creates new pods according to the spec.
|
||||
CreatePods(namespace string, template *api.PodTemplateSpec, object runtime.Object) error
|
||||
// CreatePodsOnNode creates a new pod accorting to the spec on the specified node.
|
||||
CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error
|
||||
// DeletePod deletes the pod identified by podID.
|
||||
DeletePod(namespace string, podID string, object runtime.Object) error
|
||||
}
|
||||
|
||||
// RealPodControl is the default implementation of PodControlInterface.
|
||||
type RealPodControl struct {
|
||||
KubeClient clientset.Interface
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
var _ PodControlInterface = &RealPodControl{}
|
||||
|
||||
func getPodsLabelSet(template *api.PodTemplateSpec) labels.Set {
|
||||
desiredLabels := make(labels.Set)
|
||||
for k, v := range template.Labels {
|
||||
desiredLabels[k] = v
|
||||
}
|
||||
return desiredLabels
|
||||
}
|
||||
|
||||
func getPodsAnnotationSet(template *api.PodTemplateSpec, object runtime.Object) (labels.Set, error) {
|
||||
desiredAnnotations := make(labels.Set)
|
||||
for k, v := range template.Annotations {
|
||||
desiredAnnotations[k] = v
|
||||
}
|
||||
createdByRef, err := api.GetReference(object)
|
||||
if err != nil {
|
||||
return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err)
|
||||
}
|
||||
|
||||
// TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients
|
||||
// would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.
|
||||
// We need to consistently handle this case of annotation versioning.
|
||||
codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"})
|
||||
|
||||
createdByRefJson, err := runtime.Encode(codec, &api.SerializedReference{
|
||||
Reference: *createdByRef,
|
||||
})
|
||||
if err != nil {
|
||||
return desiredAnnotations, fmt.Errorf("unable to serialize controller reference: %v", err)
|
||||
}
|
||||
desiredAnnotations[CreatedByAnnotation] = string(createdByRefJson)
|
||||
return desiredAnnotations, nil
|
||||
}
|
||||
|
||||
func getPodsPrefix(controllerName string) string {
|
||||
// use the dash (if the name isn't too long) to make the pod name a bit prettier
|
||||
prefix := fmt.Sprintf("%s-", controllerName)
|
||||
if ok, _ := validation.ValidatePodName(prefix, true); !ok {
|
||||
prefix = controllerName
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
||||
func (r RealPodControl) CreatePods(namespace string, template *api.PodTemplateSpec, object runtime.Object) error {
|
||||
return r.createPods("", namespace, template, object)
|
||||
}
|
||||
|
||||
func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error {
|
||||
return r.createPods(nodeName, namespace, template, object)
|
||||
}
|
||||
|
||||
func (r RealPodControl) createPods(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error {
|
||||
desiredLabels := getPodsLabelSet(template)
|
||||
desiredAnnotations, err := getPodsAnnotationSet(template, object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
meta, err := api.ObjectMetaFor(object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("object does not have ObjectMeta, %v", err)
|
||||
}
|
||||
prefix := getPodsPrefix(meta.Name)
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: desiredLabels,
|
||||
Annotations: desiredAnnotations,
|
||||
GenerateName: prefix,
|
||||
},
|
||||
}
|
||||
if err := api.Scheme.Convert(&template.Spec, &pod.Spec); err != nil {
|
||||
return fmt.Errorf("unable to convert pod template: %v", err)
|
||||
}
|
||||
if len(nodeName) != 0 {
|
||||
pod.Spec.NodeName = nodeName
|
||||
}
|
||||
if labels.Set(pod.Labels).AsSelector().Empty() {
|
||||
return fmt.Errorf("unable to create pods, no labels")
|
||||
}
|
||||
if newPod, err := r.KubeClient.Core().Pods(namespace).Create(pod); err != nil {
|
||||
r.Recorder.Eventf(object, api.EventTypeWarning, "FailedCreate", "Error creating: %v", err)
|
||||
return fmt.Errorf("unable to create pods: %v", err)
|
||||
} else {
|
||||
glog.V(4).Infof("Controller %v created pod %v", meta.Name, newPod.Name)
|
||||
r.Recorder.Eventf(object, api.EventTypeNormal, "SuccessfulCreate", "Created pod: %v", newPod.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r RealPodControl) DeletePod(namespace string, podID string, object runtime.Object) error {
|
||||
meta, err := api.ObjectMetaFor(object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("object does not have ObjectMeta, %v", err)
|
||||
}
|
||||
if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil {
|
||||
r.Recorder.Eventf(object, api.EventTypeWarning, "FailedDelete", "Error deleting: %v", err)
|
||||
return fmt.Errorf("unable to delete pods: %v", err)
|
||||
} else {
|
||||
glog.V(4).Infof("Controller %v deleted pod %v", meta.Name, podID)
|
||||
r.Recorder.Eventf(object, api.EventTypeNormal, "SuccessfulDelete", "Deleted pod: %v", podID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FakePodControl struct {
|
||||
sync.Mutex
|
||||
Templates []api.PodTemplateSpec
|
||||
DeletePodName []string
|
||||
Err error
|
||||
}
|
||||
|
||||
var _ PodControlInterface = &FakePodControl{}
|
||||
|
||||
func (f *FakePodControl) CreatePods(namespace string, spec *api.PodTemplateSpec, object runtime.Object) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if f.Err != nil {
|
||||
return f.Err
|
||||
}
|
||||
f.Templates = append(f.Templates, *spec)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakePodControl) CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if f.Err != nil {
|
||||
return f.Err
|
||||
}
|
||||
f.Templates = append(f.Templates, *template)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakePodControl) DeletePod(namespace string, podID string, object runtime.Object) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if f.Err != nil {
|
||||
return f.Err
|
||||
}
|
||||
f.DeletePodName = append(f.DeletePodName, podID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakePodControl) Clear() {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.DeletePodName = []string{}
|
||||
f.Templates = []api.PodTemplateSpec{}
|
||||
}
|
||||
|
||||
// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete.
|
||||
type ActivePods []*api.Pod
|
||||
|
||||
func (s ActivePods) Len() int { return len(s) }
|
||||
func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
func (s ActivePods) Less(i, j int) bool {
|
||||
// 1. Unassigned < assigned
|
||||
// If only one of the pods is unassigned, the unassigned one is smaller
|
||||
if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) {
|
||||
return len(s[i].Spec.NodeName) == 0
|
||||
}
|
||||
// 2. PodPending < PodUnknown < PodRunning
|
||||
m := map[api.PodPhase]int{api.PodPending: 0, api.PodUnknown: 1, api.PodRunning: 2}
|
||||
if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
|
||||
return m[s[i].Status.Phase] < m[s[j].Status.Phase]
|
||||
}
|
||||
// 3. Not ready < ready
|
||||
// If only one of the pods is not ready, the not ready one is smaller
|
||||
if api.IsPodReady(s[i]) != api.IsPodReady(s[j]) {
|
||||
return !api.IsPodReady(s[i])
|
||||
}
|
||||
// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
|
||||
// see https://github.com/kubernetes/kubernetes/issues/22065
|
||||
// 4. Been ready for empty time < less time < more time
|
||||
// If both pods are ready, the latest ready one is smaller
|
||||
if api.IsPodReady(s[i]) && api.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
|
||||
return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j]))
|
||||
}
|
||||
// 5. Pods with containers with higher restart counts < lower restart counts
|
||||
if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) {
|
||||
return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
|
||||
}
|
||||
// 6. Empty creation time pods < newer pods < older pods
|
||||
if !s[i].CreationTimestamp.Equal(s[j].CreationTimestamp) {
|
||||
return afterOrZero(s[i].CreationTimestamp, s[j].CreationTimestamp)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// afterOrZero checks if time t1 is after time t2; if one of them
|
||||
// is zero, the zero time is seen as after non-zero time.
|
||||
func afterOrZero(t1, t2 unversioned.Time) bool {
|
||||
if t1.Time.IsZero() || t2.Time.IsZero() {
|
||||
return t1.Time.IsZero()
|
||||
}
|
||||
return t1.After(t2.Time)
|
||||
}
|
||||
|
||||
func podReadyTime(pod *api.Pod) unversioned.Time {
|
||||
if api.IsPodReady(pod) {
|
||||
for _, c := range pod.Status.Conditions {
|
||||
// we only care about pod ready conditions
|
||||
if c.Type == api.PodReady && c.Status == api.ConditionTrue {
|
||||
return c.LastTransitionTime
|
||||
}
|
||||
}
|
||||
}
|
||||
return unversioned.Time{}
|
||||
}
|
||||
|
||||
func maxContainerRestarts(pod *api.Pod) int {
|
||||
maxRestarts := 0
|
||||
for _, c := range pod.Status.ContainerStatuses {
|
||||
maxRestarts = integer.IntMax(maxRestarts, c.RestartCount)
|
||||
}
|
||||
return maxRestarts
|
||||
}
|
||||
|
||||
// FilterActivePods returns pods that have not terminated.
|
||||
func FilterActivePods(pods []api.Pod) []*api.Pod {
|
||||
var result []*api.Pod
|
||||
for i := range pods {
|
||||
p := pods[i]
|
||||
if IsPodActive(p) {
|
||||
result = append(result, &p)
|
||||
} else {
|
||||
glog.V(4).Infof("Ignoring inactive pod %v/%v in state %v, deletion time %v",
|
||||
p.Namespace, p.Name, p.Status.Phase, p.DeletionTimestamp)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func IsPodActive(p api.Pod) bool {
|
||||
return api.PodSucceeded != p.Status.Phase &&
|
||||
api.PodFailed != p.Status.Phase &&
|
||||
p.DeletionTimestamp == nil
|
||||
}
|
||||
|
||||
// FilterActiveReplicaSets returns replica sets that have (or at least ought to have) pods.
|
||||
func FilterActiveReplicaSets(replicaSets []*extensions.ReplicaSet) []*extensions.ReplicaSet {
|
||||
active := []*extensions.ReplicaSet{}
|
||||
for i := range replicaSets {
|
||||
if replicaSets[i].Spec.Replicas > 0 {
|
||||
active = append(active, replicaSets[i])
|
||||
}
|
||||
}
|
||||
return active
|
||||
}
|
||||
|
||||
// PodKey returns a key unique to the given pod within a cluster.
|
||||
// It's used so we consistently use the same key scheme in this module.
|
||||
// It does exactly what cache.MetaNamespaceKeyFunc would have done
|
||||
// expcept there's not possibility for error since we know the exact type.
|
||||
func PodKey(pod *api.Pod) string {
|
||||
return fmt.Sprintf("%v/%v", pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
// ControllersByCreationTimestamp sorts a list of ReplicationControllers by creation timestamp, using their names as a tie breaker.
|
||||
type ControllersByCreationTimestamp []*api.ReplicationController
|
||||
|
||||
func (o ControllersByCreationTimestamp) Len() int { return len(o) }
|
||||
func (o ControllersByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
func (o ControllersByCreationTimestamp) Less(i, j int) bool {
|
||||
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
|
||||
return o[i].Name < o[j].Name
|
||||
}
|
||||
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
|
||||
}
|
||||
|
||||
// ReplicaSetsByCreationTimestamp sorts a list of ReplicationSets by creation timestamp, using their names as a tie breaker.
|
||||
type ReplicaSetsByCreationTimestamp []*extensions.ReplicaSet
|
||||
|
||||
func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) }
|
||||
func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool {
|
||||
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
|
||||
return o[i].Name < o[j].Name
|
||||
}
|
||||
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
|
||||
}
|
||||
19
Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/doc.go
generated
vendored
Normal file
19
Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package controller contains code for controllers (like the replication
|
||||
// controller).
|
||||
package controller
|
||||
404
Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/controller_test.go
generated
vendored
404
Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/framework/controller_test.go
generated
vendored
|
|
@ -1,404 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
"github.com/google/gofuzz"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
// source simulates an apiserver object endpoint.
|
||||
source := framework.NewFakeControllerSource()
|
||||
|
||||
// This will hold the downstream state, as we know it.
|
||||
downstream := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
// This will hold incoming changes. Note how we pass downstream in as a
|
||||
// KeyLister, that way resync operations will result in the correct set
|
||||
// of update/delete deltas.
|
||||
fifo := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, downstream)
|
||||
|
||||
// Let's do threadsafe output to get predictable test results.
|
||||
deletionCounter := make(chan string, 1000)
|
||||
|
||||
cfg := &framework.Config{
|
||||
Queue: fifo,
|
||||
ListerWatcher: source,
|
||||
ObjectType: &api.Pod{},
|
||||
FullResyncPeriod: time.Millisecond * 100,
|
||||
RetryOnError: false,
|
||||
|
||||
// Let's implement a simple controller that just deletes
|
||||
// everything that comes in.
|
||||
Process: func(obj interface{}) error {
|
||||
// Obj is from the Pop method of the Queue we make above.
|
||||
newest := obj.(cache.Deltas).Newest()
|
||||
|
||||
if newest.Type != cache.Deleted {
|
||||
// Update our downstream store.
|
||||
err := downstream.Add(newest.Object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete this object.
|
||||
source.Delete(newest.Object.(runtime.Object))
|
||||
} else {
|
||||
// Update our downstream store.
|
||||
err := downstream.Delete(newest.Object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// fifo's KeyOf is easiest, because it handles
|
||||
// DeletedFinalStateUnknown markers.
|
||||
key, err := fifo.KeyOf(newest.Object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Report this deletion.
|
||||
deletionCounter <- key
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Create the controller and run it until we close stop.
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
go framework.New(cfg).Run(stop)
|
||||
|
||||
// Let's add a few objects to the source.
|
||||
testIDs := []string{"a-hello", "b-controller", "c-framework"}
|
||||
for _, name := range testIDs {
|
||||
// Note that these pods are not valid-- the fake source doesn't
|
||||
// call validation or anything.
|
||||
source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}})
|
||||
}
|
||||
|
||||
// Let's wait for the controller to process the things we just added.
|
||||
outputSet := sets.String{}
|
||||
for i := 0; i < len(testIDs); i++ {
|
||||
outputSet.Insert(<-deletionCounter)
|
||||
}
|
||||
|
||||
for _, key := range outputSet.List() {
|
||||
fmt.Println(key)
|
||||
}
|
||||
// Output:
|
||||
// a-hello
|
||||
// b-controller
|
||||
// c-framework
|
||||
}
|
||||
|
||||
func ExampleInformer() {
|
||||
// source simulates an apiserver object endpoint.
|
||||
source := framework.NewFakeControllerSource()
|
||||
|
||||
// Let's do threadsafe output to get predictable test results.
|
||||
deletionCounter := make(chan string, 1000)
|
||||
|
||||
// Make a controller that immediately deletes anything added to it, and
|
||||
// logs anything deleted.
|
||||
_, controller := framework.NewInformer(
|
||||
source,
|
||||
&api.Pod{},
|
||||
time.Millisecond*100,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
source.Delete(obj.(runtime.Object))
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
key = "oops something went wrong with the key"
|
||||
}
|
||||
|
||||
// Report this deletion.
|
||||
deletionCounter <- key
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
// Run the controller and run it until we close stop.
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
go controller.Run(stop)
|
||||
|
||||
// Let's add a few objects to the source.
|
||||
testIDs := []string{"a-hello", "b-controller", "c-framework"}
|
||||
for _, name := range testIDs {
|
||||
// Note that these pods are not valid-- the fake source doesn't
|
||||
// call validation or anything.
|
||||
source.Add(&api.Pod{ObjectMeta: api.ObjectMeta{Name: name}})
|
||||
}
|
||||
|
||||
// Let's wait for the controller to process the things we just added.
|
||||
outputSet := sets.String{}
|
||||
for i := 0; i < len(testIDs); i++ {
|
||||
outputSet.Insert(<-deletionCounter)
|
||||
}
|
||||
|
||||
for _, key := range outputSet.List() {
|
||||
fmt.Println(key)
|
||||
}
|
||||
// Output:
|
||||
// a-hello
|
||||
// b-controller
|
||||
// c-framework
|
||||
}
|
||||
|
||||
func TestHammerController(t *testing.T) {
|
||||
// This test executes a bunch of requests through the fake source and
|
||||
// controller framework to make sure there's no locking/threading
|
||||
// errors. If an error happens, it should hang forever or trigger the
|
||||
// race detector.
|
||||
|
||||
// source simulates an apiserver object endpoint.
|
||||
source := framework.NewFakeControllerSource()
|
||||
|
||||
// Let's do threadsafe output to get predictable test results.
|
||||
outputSetLock := sync.Mutex{}
|
||||
// map of key to operations done on the key
|
||||
outputSet := map[string][]string{}
|
||||
|
||||
recordFunc := func(eventType string, obj interface{}) {
|
||||
key, err := framework.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
t.Errorf("something wrong with key: %v", err)
|
||||
key = "oops something went wrong with the key"
|
||||
}
|
||||
|
||||
// Record some output when items are deleted.
|
||||
outputSetLock.Lock()
|
||||
defer outputSetLock.Unlock()
|
||||
outputSet[key] = append(outputSet[key], eventType)
|
||||
}
|
||||
|
||||
// Make a controller which just logs all the changes it gets.
|
||||
_, controller := framework.NewInformer(
|
||||
source,
|
||||
&api.Pod{},
|
||||
time.Millisecond*100,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) { recordFunc("add", obj) },
|
||||
UpdateFunc: func(oldObj, newObj interface{}) { recordFunc("update", newObj) },
|
||||
DeleteFunc: func(obj interface{}) { recordFunc("delete", obj) },
|
||||
},
|
||||
)
|
||||
|
||||
if controller.HasSynced() {
|
||||
t.Errorf("Expected HasSynced() to return false before we started the controller")
|
||||
}
|
||||
|
||||
// Run the controller and run it until we close stop.
|
||||
stop := make(chan struct{})
|
||||
go controller.Run(stop)
|
||||
|
||||
// Let's wait for the controller to do its initial sync
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if !controller.HasSynced() {
|
||||
t.Errorf("Expected HasSynced() to return true after the initial sync")
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
const threads = 3
|
||||
wg.Add(threads)
|
||||
for i := 0; i < threads; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
// Let's add a few objects to the source.
|
||||
currentNames := sets.String{}
|
||||
rs := rand.NewSource(rand.Int63())
|
||||
f := fuzz.New().NilChance(.5).NumElements(0, 2).RandSource(rs)
|
||||
r := rand.New(rs) // Mustn't use r and f concurrently!
|
||||
for i := 0; i < 100; i++ {
|
||||
var name string
|
||||
var isNew bool
|
||||
if currentNames.Len() == 0 || r.Intn(3) == 1 {
|
||||
f.Fuzz(&name)
|
||||
isNew = true
|
||||
} else {
|
||||
l := currentNames.List()
|
||||
name = l[r.Intn(len(l))]
|
||||
}
|
||||
|
||||
pod := &api.Pod{}
|
||||
f.Fuzz(pod)
|
||||
pod.ObjectMeta.Name = name
|
||||
pod.ObjectMeta.Namespace = "default"
|
||||
// Add, update, or delete randomly.
|
||||
// Note that these pods are not valid-- the fake source doesn't
|
||||
// call validation or perform any other checking.
|
||||
if isNew {
|
||||
currentNames.Insert(name)
|
||||
source.Add(pod)
|
||||
continue
|
||||
}
|
||||
switch r.Intn(2) {
|
||||
case 0:
|
||||
currentNames.Insert(name)
|
||||
source.Modify(pod)
|
||||
case 1:
|
||||
currentNames.Delete(name)
|
||||
source.Delete(pod)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Let's wait for the controller to finish processing the things we just added.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
close(stop)
|
||||
|
||||
outputSetLock.Lock()
|
||||
t.Logf("got: %#v", outputSet)
|
||||
}
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
// This test is going to exercise the various paths that result in a
|
||||
// call to update.
|
||||
|
||||
// source simulates an apiserver object endpoint.
|
||||
source := framework.NewFakeControllerSource()
|
||||
|
||||
const (
|
||||
FROM = "from"
|
||||
ADD_MISSED = "missed the add event"
|
||||
TO = "to"
|
||||
)
|
||||
|
||||
// These are the transitions we expect to see; because this is
|
||||
// asynchronous, there are a lot of valid possibilities.
|
||||
type pair struct{ from, to string }
|
||||
allowedTransitions := map[pair]bool{
|
||||
pair{FROM, TO}: true,
|
||||
pair{FROM, ADD_MISSED}: true,
|
||||
pair{ADD_MISSED, TO}: true,
|
||||
|
||||
// Because a resync can happen when we've already observed one
|
||||
// of the above but before the item is deleted.
|
||||
pair{TO, TO}: true,
|
||||
// Because a resync could happen before we observe an update.
|
||||
pair{FROM, FROM}: true,
|
||||
}
|
||||
|
||||
pod := func(name, check string, final bool) *api.Pod {
|
||||
p := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{"check": check},
|
||||
},
|
||||
}
|
||||
if final {
|
||||
p.Labels["final"] = "true"
|
||||
}
|
||||
return p
|
||||
}
|
||||
deletePod := func(p *api.Pod) bool {
|
||||
return p.Labels["final"] == "true"
|
||||
}
|
||||
|
||||
tests := []func(string){
|
||||
func(name string) {
|
||||
name = "a-" + name
|
||||
source.Add(pod(name, FROM, false))
|
||||
source.Modify(pod(name, TO, true))
|
||||
},
|
||||
func(name string) {
|
||||
name = "b-" + name
|
||||
source.Add(pod(name, FROM, false))
|
||||
source.ModifyDropWatch(pod(name, TO, true))
|
||||
},
|
||||
func(name string) {
|
||||
name = "c-" + name
|
||||
source.AddDropWatch(pod(name, FROM, false))
|
||||
source.Modify(pod(name, ADD_MISSED, false))
|
||||
source.Modify(pod(name, TO, true))
|
||||
},
|
||||
func(name string) {
|
||||
name = "d-" + name
|
||||
source.Add(pod(name, FROM, true))
|
||||
},
|
||||
}
|
||||
|
||||
const threads = 3
|
||||
|
||||
var testDoneWG sync.WaitGroup
|
||||
testDoneWG.Add(threads * len(tests))
|
||||
|
||||
// Make a controller that deletes things once it observes an update.
|
||||
// It calls Done() on the wait group on deletions so we can tell when
|
||||
// everything we've added has been deleted.
|
||||
_, controller := framework.NewInformer(
|
||||
source,
|
||||
&api.Pod{},
|
||||
time.Millisecond*1,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
o, n := oldObj.(*api.Pod), newObj.(*api.Pod)
|
||||
from, to := o.Labels["check"], n.Labels["check"]
|
||||
if !allowedTransitions[pair{from, to}] {
|
||||
t.Errorf("observed transition %q -> %q for %v", from, to, n.Name)
|
||||
}
|
||||
if deletePod(n) {
|
||||
source.Delete(n)
|
||||
}
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
testDoneWG.Done()
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
// Run the controller and run it until we close stop.
|
||||
// Once Run() is called, calls to testDoneWG.Done() might start, so
|
||||
// all testDoneWG.Add() calls must happen before this point
|
||||
stop := make(chan struct{})
|
||||
go controller.Run(stop)
|
||||
|
||||
// run every test a few times, in parallel
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(threads * len(tests))
|
||||
for i := 0; i < threads; i++ {
|
||||
for j, f := range tests {
|
||||
go func(name string, f func(string)) {
|
||||
defer wg.Done()
|
||||
f(name)
|
||||
}(fmt.Sprintf("%v-%v", i, j), f)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
// Let's wait for the controller to process the things we just added.
|
||||
testDoneWG.Wait()
|
||||
close(stop)
|
||||
}
|
||||
|
|
@ -1,94 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
// ensure the watch delivers the requested and only the requested items.
|
||||
func consume(t *testing.T, w watch.Interface, rvs []string, done *sync.WaitGroup) {
|
||||
defer done.Done()
|
||||
for _, rv := range rvs {
|
||||
got, ok := <-w.ResultChan()
|
||||
if !ok {
|
||||
t.Errorf("%#v: unexpected channel close, wanted %v", rvs, rv)
|
||||
return
|
||||
}
|
||||
gotRV := got.Object.(*api.Pod).ObjectMeta.ResourceVersion
|
||||
if e, a := rv, gotRV; e != a {
|
||||
t.Errorf("wanted %v, got %v", e, a)
|
||||
} else {
|
||||
t.Logf("Got %v as expected", gotRV)
|
||||
}
|
||||
}
|
||||
// We should not get anything else.
|
||||
got, open := <-w.ResultChan()
|
||||
if open {
|
||||
t.Errorf("%#v: unwanted object %#v", rvs, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRCNumber(t *testing.T) {
|
||||
pod := func(name string) *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(3)
|
||||
|
||||
source := NewFakeControllerSource()
|
||||
source.Add(pod("foo"))
|
||||
source.Modify(pod("foo"))
|
||||
source.Modify(pod("foo"))
|
||||
|
||||
w, err := source.Watch(api.ListOptions{ResourceVersion: "1"})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
go consume(t, w, []string{"2", "3"}, wg)
|
||||
|
||||
list, err := source.List(api.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if e, a := "3", list.(*api.List).ResourceVersion; e != a {
|
||||
t.Errorf("wanted %v, got %v", e, a)
|
||||
}
|
||||
|
||||
w2, err := source.Watch(api.ListOptions{ResourceVersion: "2"})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
go consume(t, w2, []string{"3"}, wg)
|
||||
|
||||
w3, err := source.Watch(api.ListOptions{ResourceVersion: "3"})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
go consume(t, w3, []string{}, wg)
|
||||
source.Shutdown()
|
||||
wg.Wait()
|
||||
}
|
||||
90
Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/lookup_cache.go
generated
vendored
Normal file
90
Godeps/_workspace/src/k8s.io/kubernetes/pkg/controller/lookup_cache.go
generated
vendored
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"hash/adler32"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||
)
|
||||
|
||||
type objectWithMeta interface {
|
||||
meta.Object
|
||||
}
|
||||
|
||||
// keyFunc returns the key of an object, which is used to look up in the cache for it's matching object.
|
||||
// Since we match objects by namespace and Labels/Selector, so if two objects have the same namespace and labels,
|
||||
// they will have the same key.
|
||||
func keyFunc(obj objectWithMeta) uint64 {
|
||||
hash := adler32.New()
|
||||
hashutil.DeepHashObject(hash, &equivalenceLabelObj{
|
||||
namespace: obj.GetNamespace(),
|
||||
labels: obj.GetLabels(),
|
||||
})
|
||||
return uint64(hash.Sum32())
|
||||
}
|
||||
|
||||
type equivalenceLabelObj struct {
|
||||
namespace string
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
// MatchingCache save label and selector matching relationship
|
||||
type MatchingCache struct {
|
||||
mutex sync.RWMutex
|
||||
cache *lru.Cache
|
||||
}
|
||||
|
||||
// NewMatchingCache return a NewMatchingCache, which save label and selector matching relationship.
|
||||
func NewMatchingCache(maxCacheEntries int) *MatchingCache {
|
||||
return &MatchingCache{
|
||||
cache: lru.New(maxCacheEntries),
|
||||
}
|
||||
}
|
||||
|
||||
// Add will add matching information to the cache.
|
||||
func (c *MatchingCache) Add(labelObj objectWithMeta, selectorObj objectWithMeta) {
|
||||
key := keyFunc(labelObj)
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.cache.Add(key, selectorObj)
|
||||
}
|
||||
|
||||
// GetMatchingObject lookup the matching object for a given object.
|
||||
// Note: the cache information may be invalid since the controller may be deleted or updated,
|
||||
// we need check in the external request to ensure the cache data is not dirty.
|
||||
func (c *MatchingCache) GetMatchingObject(labelObj objectWithMeta) (controller interface{}, exists bool) {
|
||||
key := keyFunc(labelObj)
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
return c.cache.Get(key)
|
||||
}
|
||||
|
||||
// Update update the cached matching information.
|
||||
func (c *MatchingCache) Update(labelObj objectWithMeta, selectorObj objectWithMeta) {
|
||||
c.Add(labelObj, selectorObj)
|
||||
}
|
||||
|
||||
// InvalidateAll invalidate the whole cache.
|
||||
func (c *MatchingCache) InvalidateAll() {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
c.cache = lru.New(c.cache.MaxEntries)
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue