Update go dependencies

This commit is contained in:
Manuel de Brito Fontes 2017-11-12 14:14:23 -03:00
parent a858c549d9
commit f3bde94d68
643 changed files with 14296 additions and 19354 deletions

View file

@ -22,8 +22,6 @@ go_test(
"store_test.go",
"undelta_store_test.go",
],
features = ["-race"],
importpath = "k8s.io/client-go/tools/cache",
library = ":go_default_library",
deps = [
"//vendor/github.com/google/gofuzz:go_default_library",
@ -35,6 +33,7 @@ go_test(
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/tools/cache/testing:go_default_library",
],
)
@ -62,7 +61,6 @@ go_library(
"thread_safe_store.go",
"undelta_store.go",
],
importpath = "k8s.io/client-go/tools/cache",
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
@ -80,9 +78,9 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/pager:go_default_library",
"//vendor/k8s.io/client-go/util/buffer:go_default_library",
],
)

View file

@ -539,10 +539,6 @@ func (f *DeltaFIFO) Resync() error {
f.lock.Lock()
defer f.lock.Unlock()
if f.knownObjects == nil {
return nil
}
keys := f.knownObjects.ListKeys()
for _, k := range keys {
if err := f.syncKeyLocked(k); err != nil {

View file

@ -22,6 +22,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
)
func testIndexFunc(obj interface{}) ([]string, error) {
@ -77,11 +78,6 @@ func TestMultiIndexKeys(t *testing.T) {
if len(erniePods) != 2 {
t.Errorf("Expected 2 pods but got %v", len(erniePods))
}
for _, erniePod := range erniePods {
if erniePod.(*v1.Pod).Name != "one" && erniePod.(*v1.Pod).Name != "tre" {
t.Errorf("Expected only 'one' or 'tre' but got %s", erniePod.(*v1.Pod).Name)
}
}
bertPods, err := index.ByIndex("byUser", "bert")
if err != nil {
@ -90,11 +86,6 @@ func TestMultiIndexKeys(t *testing.T) {
if len(bertPods) != 2 {
t.Errorf("Expected 2 pods but got %v", len(bertPods))
}
for _, bertPod := range bertPods {
if bertPod.(*v1.Pod).Name != "one" && bertPod.(*v1.Pod).Name != "two" {
t.Errorf("Expected only 'one' or 'two' but got %s", bertPod.(*v1.Pod).Name)
}
}
oscarPods, err := index.ByIndex("byUser", "oscar")
if err != nil {
@ -103,11 +94,6 @@ func TestMultiIndexKeys(t *testing.T) {
if len(oscarPods) != 1 {
t.Errorf("Expected 1 pods but got %v", len(erniePods))
}
for _, oscarPod := range oscarPods {
if oscarPod.(*v1.Pod).Name != "two" {
t.Errorf("Expected only 'two' but got %s", oscarPod.(*v1.Pod).Name)
}
}
ernieAndBertKeys, err := index.Index("byUser", pod1)
if err != nil {
@ -116,11 +102,6 @@ func TestMultiIndexKeys(t *testing.T) {
if len(ernieAndBertKeys) != 3 {
t.Errorf("Expected 3 pods but got %v", len(ernieAndBertKeys))
}
for _, ernieAndBertKey := range ernieAndBertKeys {
if ernieAndBertKey.(*v1.Pod).Name != "one" && ernieAndBertKey.(*v1.Pod).Name != "two" && ernieAndBertKey.(*v1.Pod).Name != "tre" {
t.Errorf("Expected only 'one', 'two' or 'tre' but got %s", ernieAndBertKey.(*v1.Pod).Name)
}
}
index.Delete(pod3)
erniePods, err = index.ByIndex("byUser", "ernie")
@ -130,12 +111,6 @@ func TestMultiIndexKeys(t *testing.T) {
if len(erniePods) != 1 {
t.Errorf("Expected 1 pods but got %v", len(erniePods))
}
for _, erniePod := range erniePods {
if erniePod.(*v1.Pod).Name != "one" {
t.Errorf("Expected only 'one' but got %s", erniePod.(*v1.Pod).Name)
}
}
elmoPods, err := index.ByIndex("byUser", "elmo")
if err != nil {
t.Errorf("unexpected error: %v", err)
@ -144,7 +119,11 @@ func TestMultiIndexKeys(t *testing.T) {
t.Errorf("Expected 0 pods but got %v", len(elmoPods))
}
copyOfPod2 := pod2.DeepCopy()
obj, err := scheme.Scheme.DeepCopy(pod2)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
copyOfPod2 := obj.(*v1.Pod)
copyOfPod2.Annotations["users"] = "oscar"
index.Update(copyOfPod2)
bertPods, err = index.ByIndex("byUser", "bert")
@ -154,10 +133,5 @@ func TestMultiIndexKeys(t *testing.T) {
if len(bertPods) != 1 {
t.Errorf("Expected 1 pods but got %v", len(bertPods))
}
for _, bertPod := range bertPods {
if bertPod.(*v1.Pod).Name != "one" {
t.Errorf("Expected only 'one' but got %s", bertPod.(*v1.Pod).Name)
}
}
}

View file

@ -51,7 +51,8 @@ type WatchFunc func(options metav1.ListOptions) (watch.Interface, error)
type ListWatch struct {
ListFunc ListFunc
WatchFunc WatchFunc
// DisableChunking requests no chunking for this list watcher.
// DisableChunking requests no chunking for this list watcher. It has no effect in Kubernetes 1.8, but in
// 1.9 will allow a controller to opt out of chunking.
DisableChunking bool
}
@ -92,7 +93,9 @@ func timeoutFromListOptions(options metav1.ListOptions) time.Duration {
// List a set of apiserver resources
func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) {
if !lw.DisableChunking {
// chunking will become the default for list watchers starting in Kubernetes 1.9, unless
// otherwise disabled.
if false && !lw.DisableChunking {
return pager.New(pager.SimplePageFunc(lw.ListFunc)).List(context.TODO(), options)
}
return lw.ListFunc(options)

View file

@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/client-go/kubernetes/scheme"
)
var mutationDetectionEnabled = false
@ -95,13 +96,18 @@ func (d *defaultCacheMutationDetector) AddObject(obj interface{}) {
if _, ok := obj.(DeletedFinalStateUnknown); ok {
return
}
if obj, ok := obj.(runtime.Object); ok {
copiedObj := obj.DeepCopyObject()
d.lock.Lock()
defer d.lock.Unlock()
d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj})
if _, ok := obj.(runtime.Object); !ok {
return
}
copiedObj, err := scheme.Scheme.Copy(obj.(runtime.Object))
if err != nil {
return
}
d.lock.Lock()
defer d.lock.Unlock()
d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj})
}
func (d *defaultCacheMutationDetector) CompareObjects() {

View file

@ -34,12 +34,11 @@ func BenchmarkListener(b *testing.B) {
var swg sync.WaitGroup
swg.Add(b.N)
b.SetParallelism(concurrencyLevel)
// Preallocate enough space so that benchmark does not run out of it
pl := newProcessListener(&ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
swg.Done()
},
}, 0, 0, time.Now(), 1024*1024)
}, 0, 0, time.Now())
var wg wait.Group
defer wg.Wait() // Wait for .run and .pop to stop
defer close(pl.addCh) // Tell .run and .pop to stop

View file

@ -25,7 +25,6 @@ import (
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/buffer"
"github.com/golang/glog"
)
@ -93,13 +92,8 @@ func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEve
// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced.
type InformerSynced func() bool
const (
// syncedPollPeriod controls how often you look at the status of your sync funcs
syncedPollPeriod = 100 * time.Millisecond
// initialBufferSize is the initial number of event notifications that can be buffered.
initialBufferSize = 1024
)
// syncedPollPeriod controls how often you look at the status of your sync funcs
const syncedPollPeriod = 100 * time.Millisecond
// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false
// if the controller should shutdown
@ -319,7 +313,7 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
}
}
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize)
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now())
if !s.started {
s.processor.addListener(listener)
@ -471,13 +465,6 @@ type processorListener struct {
handler ResourceEventHandler
// pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed.
// There is one per listener, but a failing/stalled listener will have infinite pendingNotifications
// added until we OOM.
// TODO: This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but
// we should try to do something better.
pendingNotifications buffer.RingGrowing
// requestedResyncPeriod is how frequently the listener wants a full resync from the shared informer
requestedResyncPeriod time.Duration
// resyncPeriod is how frequently the listener wants a full resync from the shared informer. This
@ -490,12 +477,11 @@ type processorListener struct {
resyncLock sync.Mutex
}
func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener {
func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time) *processorListener {
ret := &processorListener{
nextCh: make(chan interface{}),
addCh: make(chan interface{}),
handler: handler,
pendingNotifications: *buffer.NewRingGrowing(bufferSize),
requestedResyncPeriod: requestedResyncPeriod,
resyncPeriod: resyncPeriod,
}
@ -513,16 +499,25 @@ func (p *processorListener) pop() {
defer utilruntime.HandleCrash()
defer close(p.nextCh) // Tell .run() to stop
// pendingNotifications is an unbounded slice that holds all notifications not yet distributed
// there is one per listener, but a failing/stalled listener will have infinite pendingNotifications
// added until we OOM.
// TODO This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but
// we should try to do something better
var pendingNotifications []interface{}
var nextCh chan<- interface{}
var notification interface{}
for {
select {
case nextCh <- notification:
// Notification dispatched
var ok bool
notification, ok = p.pendingNotifications.ReadOne()
if !ok { // Nothing to pop
if len(pendingNotifications) == 0 { // Nothing to pop
nextCh = nil // Disable this select case
notification = nil
} else {
notification = pendingNotifications[0]
pendingNotifications[0] = nil
pendingNotifications = pendingNotifications[1:]
}
case notificationToAdd, ok := <-p.addCh:
if !ok {
@ -533,7 +528,7 @@ func (p *processorListener) pop() {
notification = notificationToAdd
nextCh = p.nextCh
} else { // There is already a notification waiting to be dispatched
p.pendingNotifications.WriteOne(notificationToAdd)
pendingNotifications = append(pendingNotifications, notificationToAdd)
}
}
}

View file

@ -241,7 +241,7 @@ func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
// updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj
// updateIndices must be called from a function that already has a lock on the cache
func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) {
func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) error {
// if we got an old object, we need to remove it before we add it again
if oldObj != nil {
c.deleteFromIndices(oldObj, key)
@ -249,7 +249,7 @@ func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, ke
for name, indexFunc := range c.indexers {
indexValues, err := indexFunc(newObj)
if err != nil {
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
return err
}
index := c.indices[name]
if index == nil {
@ -266,15 +266,16 @@ func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, ke
set.Insert(key)
}
}
return nil
}
// deleteFromIndices removes the object from each of the managed indexes
// it is intended to be called from a function that already has a lock on the cache
func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) {
func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) error {
for name, indexFunc := range c.indexers {
indexValues, err := indexFunc(obj)
if err != nil {
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
return err
}
index := c.indices[name]
@ -288,6 +289,7 @@ func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) {
}
}
}
return nil
}
func (c *threadSafeMap) Resync() error {