Update go dependencies

This commit is contained in:
Manuel Alejandro de Brito Fontes 2019-03-28 20:43:46 -03:00
parent 14a9e9f3fa
commit 14f4a7b8e8
No known key found for this signature in database
GPG key ID: 786136016A8BA02A
1349 changed files with 128369 additions and 32627 deletions

View file

@ -1,3 +1,5 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sig-auth-authenticators-approvers
reviewers:

3
vendor/k8s.io/client-go/tools/cache/OWNERS generated vendored Executable file → Normal file
View file

@ -1,3 +1,5 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- thockin
- lavalamp
@ -22,7 +24,6 @@ reviewers:
- erictune
- davidopp
- pmorie
- kargakis
- janetkuo
- justinsb
- eparis

View file

@ -28,9 +28,9 @@ import (
// Config contains all the settings for a Controller.
type Config struct {
// The queue for your objects; either a FIFO or
// a DeltaFIFO. Your Process() function should accept
// the output of this Queue's Pop() method.
// The queue for your objects - has to be a DeltaFIFO due to
// assumptions in the implementation. Your Process() function
// should accept the output of this Queue's Pop() method.
Queue
// Something that can list and watch your objects.
@ -285,45 +285,7 @@ func NewInformer(
// This will hold the client state, as we know it.
clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc)
// This will hold incoming changes. Note how we pass clientState in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState)
cfg := &Config{
Queue: fifo,
ListerWatcher: lw,
ObjectType: objType,
FullResyncPeriod: resyncPeriod,
RetryOnError: false,
Process: func(obj interface{}) error {
// from oldest to newest
for _, d := range obj.(Deltas) {
switch d.Type {
case Sync, Added, Updated:
if old, exists, err := clientState.Get(d.Object); err == nil && exists {
if err := clientState.Update(d.Object); err != nil {
return err
}
h.OnUpdate(old, d.Object)
} else {
if err := clientState.Add(d.Object); err != nil {
return err
}
h.OnAdd(d.Object)
}
case Deleted:
if err := clientState.Delete(d.Object); err != nil {
return err
}
h.OnDelete(d.Object)
}
}
return nil
},
}
return clientState, New(cfg)
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState)
}
// NewIndexerInformer returns a Indexer and a controller for populating the index
@ -352,6 +314,30 @@ func NewIndexerInformer(
// This will hold the client state, as we know it.
clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers)
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState)
}
// newInformer returns a controller for populating the store while also
// providing event notifications.
//
// Parameters
// * lw is list and watch functions for the source of the resource you want to
// be informed of.
// * objType is an object of the type that you expect to receive.
// * resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate
// calls, even if nothing changed). Otherwise, re-list will be delayed as
// long as possible (until the upstream source closes the watch or times out,
// or you stop the controller).
// * h is the object you want notifications sent to.
// * clientState is the store you want to populate
//
func newInformer(
lw ListerWatcher,
objType runtime.Object,
resyncPeriod time.Duration,
h ResourceEventHandler,
clientState Store,
) Controller {
// This will hold incoming changes. Note how we pass clientState in as a
// KeyLister, that way resync operations will result in the correct set
// of update/delete deltas.
@ -390,5 +376,5 @@ func NewIndexerInformer(
return nil
},
}
return clientState, New(cfg)
return New(cfg)
}

View file

@ -466,6 +466,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
if f.knownObjects == nil {
// Do deletion detection against our own list.
queuedDeletions := 0
for k, oldItem := range f.items {
if keys.Has(k) {
continue
@ -474,6 +475,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
if n := oldItem.Newest(); n != nil {
deletedObj = n.Object
}
queuedDeletions++
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
return err
}
@ -481,7 +483,9 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
if !f.populated {
f.populated = true
f.initialPopulationCount = len(list)
// While there shouldn't be any queued deletions in the initial
// population of the queue, it's better to be on the safe side.
f.initialPopulationCount = len(list) + queuedDeletions
}
return nil

View file

@ -144,13 +144,13 @@ func (c *ExpirationCache) ListKeys() []string {
// Add timestamps an item and inserts it into the cache, overwriting entries
// that might exist under the same key.
func (c *ExpirationCache) Add(obj interface{}) error {
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
key, err := c.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
c.cacheStorage.Add(key, &timestampedEntry{obj, c.clock.Now()})
return nil
}
@ -163,12 +163,12 @@ func (c *ExpirationCache) Update(obj interface{}) error {
// Delete removes an item from the cache.
func (c *ExpirationCache) Delete(obj interface{}) error {
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
key, err := c.keyFunc(obj)
if err != nil {
return KeyError{obj, err}
}
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
c.cacheStorage.Delete(key)
return nil
}
@ -177,8 +177,6 @@ func (c *ExpirationCache) Delete(obj interface{}) error {
// before attempting the replace operation. The replace operation will
// delete the contents of the ExpirationCache `c`.
func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error {
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
items := make(map[string]interface{}, len(list))
ts := c.clock.Now()
for _, item := range list {
@ -188,6 +186,8 @@ func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) er
}
items[key] = &timestampedEntry{item, ts}
}
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
c.cacheStorage.Replace(items, resourceVersion)
return nil
}

View file

@ -40,7 +40,7 @@ func (f *FakeCustomStore) Add(obj interface{}) error {
// Update calls the custom Update function if defined
func (f *FakeCustomStore) Update(obj interface{}) error {
if f.UpdateFunc != nil {
return f.Update(obj)
return f.UpdateFunc(obj)
}
return nil
}

View file

@ -31,7 +31,14 @@ import (
type AppendFunc func(interface{})
func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
selectAll := selector.Empty()
for _, m := range store.List() {
if selectAll {
// Avoid computing labels of the objects to speed up common flows
// of listing all objects.
appendFn(m)
continue
}
metadata, err := meta.Accessor(m)
if err != nil {
return err
@ -44,8 +51,15 @@ func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error {
}
func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error {
selectAll := selector.Empty()
if namespace == metav1.NamespaceAll {
for _, m := range indexer.List() {
if selectAll {
// Avoid computing labels of the objects to speed up common flows
// of listing all objects.
appendFn(m)
continue
}
metadata, err := meta.Accessor(m)
if err != nil {
return err
@ -74,6 +88,12 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec
return nil
}
for _, m := range items {
if selectAll {
// Avoid computing labels of the objects to speed up common flows
// of listing all objects.
appendFn(m)
continue
}
metadata, err := meta.Accessor(m)
if err != nil {
return err

View file

@ -27,15 +27,25 @@ import (
"k8s.io/client-go/tools/pager"
)
// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource.
type ListerWatcher interface {
// Lister is any object that knows how to perform an initial list.
type Lister interface {
// List should return a list type object; the Items field will be extracted, and the
// ResourceVersion field will be used to start the watch in the right place.
List(options metav1.ListOptions) (runtime.Object, error)
}
// Watcher is any object that knows how to start a watch on a resource.
type Watcher interface {
// Watch should begin a watch at the specified version.
Watch(options metav1.ListOptions) (watch.Interface, error)
}
// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource.
type ListerWatcher interface {
Lister
Watcher
}
// ListFunc knows how to list resources
type ListFunc func(options metav1.ListOptions) (runtime.Object, error)

View file

@ -24,10 +24,8 @@ import (
"net"
"net/url"
"reflect"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
@ -41,6 +39,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/klog"
"k8s.io/utils/trace"
)
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
@ -95,17 +94,10 @@ func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyn
return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod)
}
// reflectorDisambiguator is used to disambiguate started reflectors.
// initialized to an unstable value to ensure meaning isn't attributed to the suffix.
var reflectorDisambiguator = int64(time.Now().UnixNano() % 12345)
// NewNamedReflector same as NewReflector, but with a specified name for logging
func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
reflectorSuffix := atomic.AddInt64(&reflectorDisambiguator, 1)
r := &Reflector{
name: name,
// we need this to be unique per process (some names are still the same) but obvious who it belongs to
metrics: newReflectorMetrics(makeValidPrometheusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))),
name: name,
listerWatcher: lw,
store: store,
expectedType: reflect.TypeOf(expectedType),
@ -173,27 +165,55 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
// to be served from cache and potentially be delayed relative to
// etcd contents. Reflector framework will catch up via Watch() eventually.
options := metav1.ListOptions{ResourceVersion: "0"}
r.metrics.numberOfLists.Inc()
start := r.clock.Now()
list, err := r.listerWatcher.List(options)
if err != nil {
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
if err := func() error {
initTrace := trace.New("Reflector " + r.name + " ListAndWatch")
defer initTrace.LogIfLong(10 * time.Second)
var list runtime.Object
var err error
listCh := make(chan struct{}, 1)
panicCh := make(chan interface{}, 1)
go func() {
defer func() {
if r := recover(); r != nil {
panicCh <- r
}
}()
list, err = r.listerWatcher.List(options)
close(listCh)
}()
select {
case <-stopCh:
return nil
case r := <-panicCh:
panic(r)
case <-listCh:
}
if err != nil {
return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedType, err)
}
initTrace.Step("Objects listed")
listMetaInterface, err := meta.ListAccessor(list)
if err != nil {
return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
}
resourceVersion = listMetaInterface.GetResourceVersion()
initTrace.Step("Resource version extracted")
items, err := meta.ExtractList(list)
if err != nil {
return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
}
initTrace.Step("Objects extracted")
if err := r.syncWith(items, resourceVersion); err != nil {
return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
}
initTrace.Step("SyncWith done")
r.setLastSyncResourceVersion(resourceVersion)
initTrace.Step("Resource version updated")
return nil
}(); err != nil {
return err
}
r.metrics.listDuration.Observe(time.Since(start).Seconds())
listMetaInterface, err := meta.ListAccessor(list)
if err != nil {
return fmt.Errorf("%s: Unable to understand list result %#v: %v", r.name, list, err)
}
resourceVersion = listMetaInterface.GetResourceVersion()
items, err := meta.ExtractList(list)
if err != nil {
return fmt.Errorf("%s: Unable to understand list result %#v (%v)", r.name, list, err)
}
r.metrics.numberOfItemsInList.Observe(float64(len(items)))
if err := r.syncWith(items, resourceVersion); err != nil {
return fmt.Errorf("%s: Unable to sync list result: %v", r.name, err)
}
r.setLastSyncResourceVersion(resourceVersion)
resyncerrc := make(chan error, 1)
cancelCh := make(chan struct{})
@ -239,7 +259,6 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
TimeoutSeconds: &timeoutSeconds,
}
r.metrics.numberOfWatches.Inc()
w, err := r.listerWatcher.Watch(options)
if err != nil {
switch err {
@ -291,11 +310,6 @@ func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, err
// Stopping the watcher should be idempotent and if we return from this function there's no way
// we're coming back in with the same watch interface.
defer w.Stop()
// update metrics
defer func() {
r.metrics.numberOfItemsInWatch.Observe(float64(eventCount))
r.metrics.watchDuration.Observe(time.Since(start).Seconds())
}()
loop:
for {
@ -351,7 +365,6 @@ loop:
watchDuration := r.clock.Now().Sub(start)
if watchDuration < 1*time.Second && eventCount == 0 {
r.metrics.numberOfShortWatches.Inc()
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
}
klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
@ -370,9 +383,4 @@ func (r *Reflector) setLastSyncResourceVersion(v string) {
r.lastSyncResourceVersionMutex.Lock()
defer r.lastSyncResourceVersionMutex.Unlock()
r.lastSyncResourceVersion = v
rv, err := strconv.Atoi(v)
if err == nil {
r.metrics.lastResourceVersion.Set(float64(rv))
}
}

View file

@ -25,8 +25,8 @@ import (
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/buffer"
"k8s.io/client-go/util/retry"
"k8s.io/utils/buffer"
"k8s.io/klog"
)

View file

@ -148,12 +148,19 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{},
}
index := c.indices[indexName]
// need to de-dupe the return list. Since multiple keys are allowed, this can happen.
returnKeySet := sets.String{}
for _, indexKey := range indexKeys {
set := index[indexKey]
for _, key := range set.UnsortedList() {
returnKeySet.Insert(key)
var returnKeySet sets.String
if len(indexKeys) == 1 {
// In majority of cases, there is exactly one value matching.
// Optimize the most common path - deduping is not needed here.
returnKeySet = index[indexKeys[0]]
} else {
// Need to de-dupe the return list.
// Since multiple keys are allowed, this can happen.
returnKeySet = sets.String{}
for _, indexKey := range indexKeys {
for key := range index[indexKey] {
returnKeySet.Insert(key)
}
}
}

View file

@ -17,6 +17,8 @@ limitations under the License.
package api
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
)
@ -150,6 +152,25 @@ type AuthProviderConfig struct {
Config map[string]string `json:"config,omitempty"`
}
var _ fmt.Stringer = new(AuthProviderConfig)
var _ fmt.GoStringer = new(AuthProviderConfig)
// GoString implements fmt.GoStringer and sanitizes sensitive fields of
// AuthProviderConfig to prevent accidental leaking via logs.
func (c AuthProviderConfig) GoString() string {
return c.String()
}
// String implements fmt.Stringer and sanitizes sensitive fields of
// AuthProviderConfig to prevent accidental leaking via logs.
func (c AuthProviderConfig) String() string {
cfg := "<nil>"
if c.Config != nil {
cfg = "--- REDACTED ---"
}
return fmt.Sprintf("api.AuthProviderConfig{Name: %q, Config: map[string]string{%s}}", c.Name, cfg)
}
// ExecConfig specifies a command to provide client credentials. The command is exec'd
// and outputs structured stdout holding credentials.
//
@ -172,6 +193,29 @@ type ExecConfig struct {
APIVersion string `json:"apiVersion,omitempty"`
}
var _ fmt.Stringer = new(ExecConfig)
var _ fmt.GoStringer = new(ExecConfig)
// GoString implements fmt.GoStringer and sanitizes sensitive fields of
// ExecConfig to prevent accidental leaking via logs.
func (c ExecConfig) GoString() string {
return c.String()
}
// String implements fmt.Stringer and sanitizes sensitive fields of ExecConfig
// to prevent accidental leaking via logs.
func (c ExecConfig) String() string {
var args []string
if len(c.Args) > 0 {
args = []string{"--- REDACTED ---"}
}
env := "[]ExecEnvVar(nil)"
if len(c.Env) > 0 {
env = "[]ExecEnvVar{--- REDACTED ---}"
}
return fmt.Sprintf("api.AuthProviderConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q}", c.Command, args, env, c.APIVersion)
}
// ExecEnvVar is used for setting environment variables when executing an exec-based
// credential plugin.
type ExecEnvVar struct {

View file

@ -150,7 +150,12 @@ func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) {
// if we got a default namespace, determine whether it was explicit or implicit
if raw, err := mergedKubeConfig.RawConfig(); err == nil {
if context := raw.Contexts[raw.CurrentContext]; context != nil && len(context.Namespace) > 0 {
// determine the current context
currentContext := raw.CurrentContext
if config.overrides != nil && len(config.overrides.CurrentContext) > 0 {
currentContext = config.overrides.CurrentContext
}
if context := raw.Contexts[currentContext]; context != nil && len(context.Namespace) > 0 {
return ns, false, nil
}
}

View file

@ -1,3 +1,5 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- mikedanese
- timothysc

View file

@ -89,10 +89,13 @@ func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) {
if lec.Lock == nil {
return nil, fmt.Errorf("Lock must not be nil.")
}
return &LeaderElector{
config: lec,
clock: clock.RealClock{},
}, nil
le := LeaderElector{
config: lec,
clock: clock.RealClock{},
metrics: globalMetricsFactory.newLeaderMetrics(),
}
le.metrics.leaderOff(le.config.Name)
return &le, nil
}
type LeaderElectionConfig struct {
@ -118,6 +121,13 @@ type LeaderElectionConfig struct {
// WatchDog may be null if its not needed/configured.
WatchDog *HealthzAdaptor
// ReleaseOnCancel should be set true if the lock should be released
// when the run context is cancelled. If you set this to true, you must
// ensure all code guarded by this lease has successfully completed
// prior to cancelling the context, or you may have two processes
// simultaneously acting on the critical path.
ReleaseOnCancel bool
// Name is the name of the resource lock for debugging
Name string
}
@ -152,6 +162,8 @@ type LeaderElector struct {
// clock is wrapper around time to allow for less flaky testing
clock clock.Clock
metrics leaderMetricsAdapter
// name is the name of the resource lock for debugging
name string
}
@ -211,6 +223,7 @@ func (le *LeaderElector) acquire(ctx context.Context) bool {
return
}
le.config.Lock.RecordEvent("became leader")
le.metrics.leaderOn(le.config.Name)
klog.Infof("successfully acquired lease %v", desc)
cancel()
}, le.config.RetryPeriod, JitterFactor, true, ctx.Done())
@ -246,9 +259,32 @@ func (le *LeaderElector) renew(ctx context.Context) {
return
}
le.config.Lock.RecordEvent("stopped leading")
le.metrics.leaderOff(le.config.Name)
klog.Infof("failed to renew lease %v: %v", desc, err)
cancel()
}, le.config.RetryPeriod, ctx.Done())
// if we hold the lease, give it up
if le.config.ReleaseOnCancel {
le.release()
}
}
// release attempts to release the leader lease if we have acquired it.
func (le *LeaderElector) release() bool {
if !le.IsLeader() {
return true
}
leaderElectionRecord := rl.LeaderElectionRecord{
LeaderTransitions: le.observedRecord.LeaderTransitions,
}
if err := le.config.Lock.Update(leaderElectionRecord); err != nil {
klog.Errorf("Failed to release lock: %v", err)
return false
}
le.observedRecord = leaderElectionRecord
le.observedTime = le.clock.Now()
return true
}
// tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired,
@ -284,7 +320,8 @@ func (le *LeaderElector) tryAcquireOrRenew() bool {
le.observedRecord = *oldLeaderElectionRecord
le.observedTime = le.clock.Now()
}
if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
if len(oldLeaderElectionRecord.HolderIdentity) > 0 &&
le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
!le.IsLeader() {
klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
return false

109
vendor/k8s.io/client-go/tools/leaderelection/metrics.go generated vendored Normal file
View file

@ -0,0 +1,109 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"sync"
)
// This file provides abstractions for setting the provider (e.g., prometheus)
// of metrics.
type leaderMetricsAdapter interface {
leaderOn(name string)
leaderOff(name string)
}
// GaugeMetric represents a single numerical value that can arbitrarily go up
// and down.
type SwitchMetric interface {
On(name string)
Off(name string)
}
type noopMetric struct{}
func (noopMetric) On(name string) {}
func (noopMetric) Off(name string) {}
// defaultLeaderMetrics expects the caller to lock before setting any metrics.
type defaultLeaderMetrics struct {
// leader's value indicates if the current process is the owner of name lease
leader SwitchMetric
}
func (m *defaultLeaderMetrics) leaderOn(name string) {
if m == nil {
return
}
m.leader.On(name)
}
func (m *defaultLeaderMetrics) leaderOff(name string) {
if m == nil {
return
}
m.leader.Off(name)
}
type noMetrics struct{}
func (noMetrics) leaderOn(name string) {}
func (noMetrics) leaderOff(name string) {}
// MetricsProvider generates various metrics used by the leader election.
type MetricsProvider interface {
NewLeaderMetric() SwitchMetric
}
type noopMetricsProvider struct{}
func (_ noopMetricsProvider) NewLeaderMetric() SwitchMetric {
return noopMetric{}
}
var globalMetricsFactory = leaderMetricsFactory{
metricsProvider: noopMetricsProvider{},
}
type leaderMetricsFactory struct {
metricsProvider MetricsProvider
onlyOnce sync.Once
}
func (f *leaderMetricsFactory) setProvider(mp MetricsProvider) {
f.onlyOnce.Do(func() {
f.metricsProvider = mp
})
}
func (f *leaderMetricsFactory) newLeaderMetrics() leaderMetricsAdapter {
mp := f.metricsProvider
if mp == (noopMetricsProvider{}) {
return noMetrics{}
}
return &defaultLeaderMetrics{
leader: mp.NewLeaderMetric(),
}
}
// SetProvider sets the metrics provider for all subsequently created work
// queues. Only the first call has an effect.
func SetProvider(metricsProvider MetricsProvider) {
globalMetricsFactory.setProvider(metricsProvider)
}

View file

@ -93,6 +93,9 @@ func (cml *ConfigMapLock) Update(ler LeaderElectionRecord) error {
// RecordEvent in leader election while adding meta-data
func (cml *ConfigMapLock) RecordEvent(s string) {
if cml.LockConfig.EventRecorder == nil {
return
}
events := fmt.Sprintf("%v %v", cml.LockConfig.Identity, s)
cml.LockConfig.EventRecorder.Eventf(&v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
}

View file

@ -88,6 +88,9 @@ func (el *EndpointsLock) Update(ler LeaderElectionRecord) error {
// RecordEvent in leader election while adding meta-data
func (el *EndpointsLock) RecordEvent(s string) {
if el.LockConfig.EventRecorder == nil {
return
}
events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s)
el.LockConfig.EventRecorder.Eventf(&v1.Endpoints{ObjectMeta: el.e.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
}

View file

@ -20,14 +20,16 @@ import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
)
const (
LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader"
EndpointsResourceLock = "endpoints"
ConfigMapsResourceLock = "configmaps"
LeasesResourceLock = "leases"
)
// LeaderElectionRecord is the record that is stored in the leader election annotation.
@ -35,6 +37,11 @@ const (
// with a random string (e.g. UUID) with only slight modification of this code.
// TODO(mikedanese): this should potentially be versioned
type LeaderElectionRecord struct {
// HolderIdentity is the ID that owns the lease. If empty, no one owns this lease and
// all callers may acquire. Versions of this library prior to Kubernetes 1.14 will not
// attempt to acquire leases with empty identities and will wait for the full lease
// interval to expire before attempting to reacquire. This value is set to empty when
// a client voluntarily steps down.
HolderIdentity string `json:"holderIdentity"`
LeaseDurationSeconds int `json:"leaseDurationSeconds"`
AcquireTime metav1.Time `json:"acquireTime"`
@ -42,11 +49,19 @@ type LeaderElectionRecord struct {
LeaderTransitions int `json:"leaderTransitions"`
}
// EventRecorder records a change in the ResourceLock.
type EventRecorder interface {
Eventf(obj runtime.Object, eventType, reason, message string, args ...interface{})
}
// ResourceLockConfig common data that exists across different
// resource locks
type ResourceLockConfig struct {
Identity string
EventRecorder record.EventRecorder
// Identity is the unique string identifying a lease holder across
// all participants in an election.
Identity string
// EventRecorder is optional.
EventRecorder EventRecorder
}
// Interface offers a common interface for locking on arbitrary
@ -76,7 +91,7 @@ type Interface interface {
}
// Manufacture will create a lock of a given type according to the input parameters
func New(lockType string, ns string, name string, client corev1.CoreV1Interface, rlc ResourceLockConfig) (Interface, error) {
func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interface, coordinationClient coordinationv1.CoordinationV1Interface, rlc ResourceLockConfig) (Interface, error) {
switch lockType {
case EndpointsResourceLock:
return &EndpointsLock{
@ -84,7 +99,7 @@ func New(lockType string, ns string, name string, client corev1.CoreV1Interface,
Namespace: ns,
Name: name,
},
Client: client,
Client: coreClient,
LockConfig: rlc,
}, nil
case ConfigMapsResourceLock:
@ -93,7 +108,16 @@ func New(lockType string, ns string, name string, client corev1.CoreV1Interface,
Namespace: ns,
Name: name,
},
Client: client,
Client: coreClient,
LockConfig: rlc,
}, nil
case LeasesResourceLock:
return &LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Client: coordinationClient,
LockConfig: rlc,
}, nil
default:

View file

@ -0,0 +1,124 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourcelock
import (
"errors"
"fmt"
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
coordinationv1client "k8s.io/client-go/kubernetes/typed/coordination/v1"
)
type LeaseLock struct {
// LeaseMeta should contain a Name and a Namespace of a
// LeaseMeta object that the LeaderElector will attempt to lead.
LeaseMeta metav1.ObjectMeta
Client coordinationv1client.LeasesGetter
LockConfig ResourceLockConfig
lease *coordinationv1.Lease
}
// Get returns the election record from a Lease spec
func (ll *LeaseLock) Get() (*LeaderElectionRecord, error) {
var err error
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Get(ll.LeaseMeta.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return LeaseSpecToLeaderElectionRecord(&ll.lease.Spec), nil
}
// Create attempts to create a Lease
func (ll *LeaseLock) Create(ler LeaderElectionRecord) error {
var err error
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Create(&coordinationv1.Lease{
ObjectMeta: metav1.ObjectMeta{
Name: ll.LeaseMeta.Name,
Namespace: ll.LeaseMeta.Namespace,
},
Spec: LeaderElectionRecordToLeaseSpec(&ler),
})
return err
}
// Update will update an existing Lease spec.
func (ll *LeaseLock) Update(ler LeaderElectionRecord) error {
if ll.lease == nil {
return errors.New("lease not initialized, call get or create first")
}
ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler)
var err error
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ll.lease)
return err
}
// RecordEvent in leader election while adding meta-data
func (ll *LeaseLock) RecordEvent(s string) {
if ll.LockConfig.EventRecorder == nil {
return
}
events := fmt.Sprintf("%v %v", ll.LockConfig.Identity, s)
ll.LockConfig.EventRecorder.Eventf(&coordinationv1.Lease{ObjectMeta: ll.lease.ObjectMeta}, corev1.EventTypeNormal, "LeaderElection", events)
}
// Describe is used to convert details on current resource lock
// into a string
func (ll *LeaseLock) Describe() string {
return fmt.Sprintf("%v/%v", ll.LeaseMeta.Namespace, ll.LeaseMeta.Name)
}
// returns the Identity of the lock
func (ll *LeaseLock) Identity() string {
return ll.LockConfig.Identity
}
func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElectionRecord {
holderIdentity := ""
if spec.HolderIdentity != nil {
holderIdentity = *spec.HolderIdentity
}
leaseDurationSeconds := 0
if spec.LeaseDurationSeconds != nil {
leaseDurationSeconds = int(*spec.LeaseDurationSeconds)
}
leaseTransitions := 0
if spec.LeaseTransitions != nil {
leaseTransitions = int(*spec.LeaseTransitions)
}
return &LeaderElectionRecord{
HolderIdentity: holderIdentity,
LeaseDurationSeconds: leaseDurationSeconds,
AcquireTime: metav1.Time{spec.AcquireTime.Time},
RenewTime: metav1.Time{spec.RenewTime.Time},
LeaderTransitions: leaseTransitions,
}
}
func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.LeaseSpec {
leaseDurationSeconds := int32(ler.LeaseDurationSeconds)
leaseTransitions := int32(ler.LeaderTransitions)
return coordinationv1.LeaseSpec{
HolderIdentity: &ler.HolderIdentity,
LeaseDurationSeconds: &leaseDurationSeconds,
AcquireTime: &metav1.MicroTime{ler.AcquireTime.Time},
RenewTime: &metav1.MicroTime{ler.RenewTime.Time},
LeaseTransitions: &leaseTransitions,
}
}

2
vendor/k8s.io/client-go/tools/metrics/OWNERS generated vendored Executable file → Normal file
View file

@ -1,3 +1,5 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- wojtek-t
- eparis

2
vendor/k8s.io/client-go/tools/record/OWNERS generated vendored Executable file → Normal file
View file

@ -1,3 +1,5 @@
# See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- lavalamp
- smarterclayton

View file

@ -21,7 +21,7 @@ import (
"math/rand"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -29,10 +29,8 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/record/util"
ref "k8s.io/client-go/tools/reference"
"net/http"
"k8s.io/klog"
)
@ -157,16 +155,6 @@ func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrela
}
}
func isKeyNotFoundError(err error) bool {
statusErr, _ := err.(*errors.StatusError)
if statusErr != nil && statusErr.Status().Code == http.StatusNotFound {
return true
}
return false
}
// recordEvent attempts to write event to a sink. It returns true if the event
// was successfully recorded or discarded, false if it should be retried.
// If updateExistingEvent is false, it creates a new event, otherwise it updates
@ -178,7 +166,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv
newEvent, err = sink.Patch(event, patch)
}
// Update can fail because the event may have been removed and it no longer exists.
if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) {
if !updateExistingEvent || (updateExistingEvent && util.IsKeyNotFoundError(err)) {
// Making sure that ResourceVersion is empty on creation
event.ResourceVersion = ""
newEvent, err = sink.Create(event)
@ -260,7 +248,7 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m
return
}
if !validateEventType(eventtype) {
if !util.ValidateEventType(eventtype) {
klog.Errorf("Unsupported event type: '%v'", eventtype)
return
}
@ -275,14 +263,6 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m
}()
}
func validateEventType(eventtype string) bool {
switch eventtype {
case v1.EventTypeNormal, v1.EventTypeWarning:
return true
}
return false
}
func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) {
recorder.generateEvent(object, nil, metav1.Now(), eventtype, reason, message)
}

44
vendor/k8s.io/client-go/tools/record/util/util.go generated vendored Normal file
View file

@ -0,0 +1,44 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"net/http"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
)
// ValidateEventType checks that eventtype is an expected type of event
func ValidateEventType(eventtype string) bool {
switch eventtype {
case v1.EventTypeNormal, v1.EventTypeWarning:
return true
}
return false
}
// IsKeyNotFoundError is utility function that checks if an error is not found error
func IsKeyNotFoundError(err error) bool {
statusErr, _ := err.(*errors.StatusError)
if statusErr != nil && statusErr.Status().Code == http.StatusNotFound {
return true
}
return false
}