Update godeps
This commit is contained in:
parent
a965f44f84
commit
73e22a50d2
453 changed files with 84778 additions and 70308 deletions
40
vendor/k8s.io/kubernetes/pkg/storage/cacher.go
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/storage/cacher.go
generated
vendored
|
|
@ -161,6 +161,9 @@ type Cacher struct {
|
|||
watcherIdx int
|
||||
watchers indexedWatchers
|
||||
|
||||
// Incoming events that should be dispatched to watchers.
|
||||
incoming chan watchCacheEvent
|
||||
|
||||
// Handling graceful termination.
|
||||
stopLock sync.RWMutex
|
||||
stopped bool
|
||||
|
|
@ -197,6 +200,8 @@ func NewCacherFromConfig(config CacherConfig) *Cacher {
|
|||
allWatchers: make(map[int]*cacheWatcher),
|
||||
valueWatchers: make(map[string]watchersMap),
|
||||
},
|
||||
// TODO: Figure out the correct value for the buffer size.
|
||||
incoming: make(chan watchCacheEvent, 100),
|
||||
// We need to (potentially) stop both:
|
||||
// - wait.Until go-routine
|
||||
// - reflector.ListAndWatch
|
||||
|
|
@ -205,6 +210,7 @@ func NewCacherFromConfig(config CacherConfig) *Cacher {
|
|||
stopCh: make(chan struct{}),
|
||||
}
|
||||
watchCache.SetOnEvent(cacher.processEvent)
|
||||
go cacher.dispatchEvents()
|
||||
|
||||
stopCh := cacher.stopCh
|
||||
cacher.stopWg.Add(1)
|
||||
|
|
@ -403,8 +409,32 @@ func (c *Cacher) triggerValues(event *watchCacheEvent) ([]string, bool) {
|
|||
return result, len(result) > 0
|
||||
}
|
||||
|
||||
// TODO: Most probably splitting this method to a separate thread will visibily
|
||||
// improve throughput of our watch machinery. So what we should do is to:
|
||||
// - OnEvent handler simply put an element to channel
|
||||
// - processEvent be another goroutine processing events from that channel
|
||||
// Additionally, if we make this channel buffered, cacher will be more resistant
|
||||
// to single watchers being slow - see cacheWatcher::add method.
|
||||
func (c *Cacher) processEvent(event watchCacheEvent) {
|
||||
triggerValues, supported := c.triggerValues(&event)
|
||||
c.incoming <- event
|
||||
}
|
||||
|
||||
func (c *Cacher) dispatchEvents() {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-c.incoming:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
c.dispatchEvent(&event)
|
||||
case <-c.stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cacher) dispatchEvent(event *watchCacheEvent) {
|
||||
triggerValues, supported := c.triggerValues(event)
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
|
@ -608,10 +638,10 @@ func (c *cacheWatcher) stop() {
|
|||
|
||||
var timerPool sync.Pool
|
||||
|
||||
func (c *cacheWatcher) add(event watchCacheEvent) {
|
||||
func (c *cacheWatcher) add(event *watchCacheEvent) {
|
||||
// Try to send the event immediately, without blocking.
|
||||
select {
|
||||
case c.input <- event:
|
||||
case c.input <- *event:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
|
@ -619,6 +649,7 @@ func (c *cacheWatcher) add(event watchCacheEvent) {
|
|||
// OK, block sending, but only for up to 5 seconds.
|
||||
// cacheWatcher.add is called very often, so arrange
|
||||
// to reuse timers instead of constantly allocating.
|
||||
startTime := time.Now()
|
||||
const timeout = 5 * time.Second
|
||||
t, ok := timerPool.Get().(*time.Timer)
|
||||
if ok {
|
||||
|
|
@ -629,7 +660,7 @@ func (c *cacheWatcher) add(event watchCacheEvent) {
|
|||
defer timerPool.Put(t)
|
||||
|
||||
select {
|
||||
case c.input <- event:
|
||||
case c.input <- *event:
|
||||
stopped := t.Stop()
|
||||
if !stopped {
|
||||
// Consume triggered (but not yet received) timer event
|
||||
|
|
@ -643,6 +674,7 @@ func (c *cacheWatcher) add(event watchCacheEvent) {
|
|||
c.forget(false)
|
||||
c.stop()
|
||||
}
|
||||
glog.V(2).Infof("cacheWatcher add function blocked processing for %v", time.Since(startTime))
|
||||
}
|
||||
|
||||
func (c *cacheWatcher) sendWatchCacheEvent(event watchCacheEvent) {
|
||||
|
|
|
|||
17
vendor/k8s.io/kubernetes/pkg/storage/etcd/etcd_watcher.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/storage/etcd/etcd_watcher.go
generated
vendored
|
|
@ -19,6 +19,7 @@ package etcd
|
|||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
|
@ -107,6 +108,10 @@ type etcdWatcher struct {
|
|||
// Injectable for testing. Send the event down the outgoing channel.
|
||||
emit func(watch.Event)
|
||||
|
||||
// HighWaterMarks for performance debugging.
|
||||
incomingHWM HighWaterMark
|
||||
outgoingHWM HighWaterMark
|
||||
|
||||
cache etcdCache
|
||||
}
|
||||
|
||||
|
|
@ -150,6 +155,10 @@ func newEtcdWatcher(
|
|||
cancel: nil,
|
||||
}
|
||||
w.emit = func(e watch.Event) {
|
||||
if curLen := int64(len(w.outgoing)); w.outgoingHWM.Update(curLen) {
|
||||
// Monitor if this gets backed up, and how much.
|
||||
glog.V(1).Infof("watch (%v): %v objects queued in outgoing channel.", reflect.TypeOf(e.Object).String(), curLen)
|
||||
}
|
||||
// Give up on user stop, without this we leak a lot of goroutines in tests.
|
||||
select {
|
||||
case w.outgoing <- e:
|
||||
|
|
@ -262,10 +271,6 @@ func convertRecursiveResponse(node *etcd.Node, response *etcd.Response, incoming
|
|||
incoming <- &copied
|
||||
}
|
||||
|
||||
var (
|
||||
watchChannelHWM HighWaterMark
|
||||
)
|
||||
|
||||
// translate pulls stuff from etcd, converts, and pushes out the outgoing channel. Meant to be
|
||||
// called as a goroutine.
|
||||
func (w *etcdWatcher) translate() {
|
||||
|
|
@ -308,9 +313,9 @@ func (w *etcdWatcher) translate() {
|
|||
return
|
||||
case res, ok := <-w.etcdIncoming:
|
||||
if ok {
|
||||
if curLen := int64(len(w.etcdIncoming)); watchChannelHWM.Update(curLen) {
|
||||
if curLen := int64(len(w.etcdIncoming)); w.incomingHWM.Update(curLen) {
|
||||
// Monitor if this gets backed up, and how much.
|
||||
glog.V(2).Infof("watch: %v objects queued in channel.", curLen)
|
||||
glog.V(1).Infof("watch: %v objects queued in incoming channel.", curLen)
|
||||
}
|
||||
w.sendResult(res)
|
||||
}
|
||||
|
|
|
|||
6
vendor/k8s.io/kubernetes/pkg/storage/etcd3/watcher.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/storage/etcd3/watcher.go
generated
vendored
|
|
@ -190,6 +190,10 @@ func (wc *watchChan) processEvent(wg *sync.WaitGroup) {
|
|||
if res == nil {
|
||||
continue
|
||||
}
|
||||
if len(wc.resultChan) == outgoingBufSize {
|
||||
glog.Warningf("Fast watcher, slow processing. Number of buffered events: %d."+
|
||||
"Probably caused by slow dispatching events to watchers", outgoingBufSize)
|
||||
}
|
||||
// If user couldn't receive results fast enough, we also block incoming events from watcher.
|
||||
// Because storing events in local will cause more memory usage.
|
||||
// The worst case would be closing the fast watcher.
|
||||
|
|
@ -300,7 +304,7 @@ func (wc *watchChan) sendError(err error) {
|
|||
|
||||
func (wc *watchChan) sendEvent(e *event) {
|
||||
if len(wc.incomingEventChan) == incomingBufSize {
|
||||
glog.V(2).Infof("Fast watcher, slow processing. Number of buffered events: %d."+
|
||||
glog.Warningf("Fast watcher, slow processing. Number of buffered events: %d."+
|
||||
"Probably caused by slow decoding, user not receiving fast, or other processing logic",
|
||||
incomingBufSize)
|
||||
}
|
||||
|
|
|
|||
9
vendor/k8s.io/kubernetes/pkg/storage/storagebackend/factory/etcd2.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/storage/storagebackend/factory/etcd2.go
generated
vendored
|
|
@ -30,16 +30,17 @@ import (
|
|||
utilnet "k8s.io/kubernetes/pkg/util/net"
|
||||
)
|
||||
|
||||
func newETCD2Storage(c storagebackend.Config) (storage.Interface, error) {
|
||||
func newETCD2Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, error) {
|
||||
tr, err := newTransportForETCD2(c.CertFile, c.KeyFile, c.CAFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
client, err := newETCD2Client(tr, c.ServerList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
return etcd.NewEtcdStorage(client, c.Codec, c.Prefix, c.Quorum, c.DeserializationCacheSize), nil
|
||||
s := etcd.NewEtcdStorage(client, c.Codec, c.Prefix, c.Quorum, c.DeserializationCacheSize)
|
||||
return s, tr.CloseIdleConnections, nil
|
||||
}
|
||||
|
||||
func newETCD2Client(tr *http.Transport, serverList []string) (etcd2client.Client, error) {
|
||||
|
|
|
|||
15
vendor/k8s.io/kubernetes/pkg/storage/storagebackend/factory/etcd3.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/storage/storagebackend/factory/etcd3.go
generated
vendored
|
|
@ -26,7 +26,7 @@ import (
|
|||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func newETCD3Storage(c storagebackend.Config) (storage.Interface, error) {
|
||||
func newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, error) {
|
||||
tlsInfo := transport.TLSInfo{
|
||||
CertFile: c.CertFile,
|
||||
KeyFile: c.KeyFile,
|
||||
|
|
@ -34,7 +34,7 @@ func newETCD3Storage(c storagebackend.Config) (storage.Interface, error) {
|
|||
}
|
||||
tlsConfig, err := tlsInfo.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cfg := clientv3.Config{
|
||||
|
|
@ -43,8 +43,13 @@ func newETCD3Storage(c storagebackend.Config) (storage.Interface, error) {
|
|||
}
|
||||
client, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
etcd3.StartCompactor(context.Background(), client)
|
||||
return etcd3.New(client, c.Codec, c.Prefix), nil
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
etcd3.StartCompactor(ctx, client)
|
||||
destroyFunc := func() {
|
||||
cancel()
|
||||
client.Close()
|
||||
}
|
||||
return etcd3.New(client, c.Codec, c.Prefix), destroyFunc, nil
|
||||
}
|
||||
|
|
|
|||
7
vendor/k8s.io/kubernetes/pkg/storage/storagebackend/factory/factory.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/storage/storagebackend/factory/factory.go
generated
vendored
|
|
@ -23,8 +23,11 @@ import (
|
|||
"k8s.io/kubernetes/pkg/storage/storagebackend"
|
||||
)
|
||||
|
||||
// DestroyFunc is to destroy any resources used by the storage returned in Create() together.
|
||||
type DestroyFunc func()
|
||||
|
||||
// Create creates a storage backend based on given config.
|
||||
func Create(c storagebackend.Config) (storage.Interface, error) {
|
||||
func Create(c storagebackend.Config) (storage.Interface, DestroyFunc, error) {
|
||||
switch c.Type {
|
||||
case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD2:
|
||||
return newETCD2Storage(c)
|
||||
|
|
@ -35,6 +38,6 @@ func Create(c storagebackend.Config) (storage.Interface, error) {
|
|||
// - Support non-quorum read.
|
||||
return newETCD3Storage(c)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown storage type: %s", c.Type)
|
||||
return nil, nil, fmt.Errorf("unknown storage type: %s", c.Type)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue