Update go dependencies
This commit is contained in:
parent
060e449056
commit
4fb61c73d1
1192 changed files with 185874 additions and 302749 deletions
8
vendor/k8s.io/client-go/tools/cache/BUILD
generated
vendored
8
vendor/k8s.io/client-go/tools/cache/BUILD
generated
vendored
|
|
@ -27,16 +27,16 @@ go_test(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/github.com/google/gofuzz:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/clock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
@ -64,7 +64,6 @@ go_library(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/hashicorp/golang-lru:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
|
@ -72,6 +71,8 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/cache:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
|
|
@ -79,6 +80,5 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/clock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
|||
1
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
1
vendor/k8s.io/client-go/tools/cache/OWNERS
generated
vendored
|
|
@ -10,7 +10,6 @@ reviewers:
|
|||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- erictune
|
||||
- davidopp
|
||||
- pmorie
|
||||
|
|
|
|||
43
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
43
vendor/k8s.io/client-go/tools/cache/controller.go
generated
vendored
|
|
@ -21,9 +21,9 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/clock"
|
||||
)
|
||||
|
||||
// Config contains all the settings for a Controller.
|
||||
|
|
@ -207,6 +207,47 @@ func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
// FilteringResourceEventHandler applies the provided filter to all events coming
|
||||
// in, ensuring the appropriate nested handler method is invoked. An object
|
||||
// that starts passing the filter after an update is considered an add, and an
|
||||
// object that stops passing the filter after an update is considered a delete.
|
||||
type FilteringResourceEventHandler struct {
|
||||
FilterFunc func(obj interface{}) bool
|
||||
Handler ResourceEventHandler
|
||||
}
|
||||
|
||||
// OnAdd calls the nested handler only if the filter succeeds
|
||||
func (r FilteringResourceEventHandler) OnAdd(obj interface{}) {
|
||||
if !r.FilterFunc(obj) {
|
||||
return
|
||||
}
|
||||
r.Handler.OnAdd(obj)
|
||||
}
|
||||
|
||||
// OnUpdate ensures the proper handler is called depending on whether the filter matches
|
||||
func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) {
|
||||
newer := r.FilterFunc(newObj)
|
||||
older := r.FilterFunc(oldObj)
|
||||
switch {
|
||||
case newer && older:
|
||||
r.Handler.OnUpdate(oldObj, newObj)
|
||||
case newer && !older:
|
||||
r.Handler.OnAdd(newObj)
|
||||
case !newer && older:
|
||||
r.Handler.OnDelete(oldObj)
|
||||
default:
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
|
||||
// OnDelete calls the nested handler only if the filter succeeds
|
||||
func (r FilteringResourceEventHandler) OnDelete(obj interface{}) {
|
||||
if !r.FilterFunc(obj) {
|
||||
return
|
||||
}
|
||||
r.Handler.OnDelete(obj)
|
||||
}
|
||||
|
||||
// DeletionHandlingMetaNamespaceKeyFunc checks for
|
||||
// DeletedFinalStateUnknown objects before calling
|
||||
// MetaNamespaceKeyFunc.
|
||||
|
|
|
|||
2
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/expiration_cache.go
generated
vendored
|
|
@ -21,7 +21,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
)
|
||||
|
||||
// ExpirationCache implements the store interface
|
||||
|
|
|
|||
2
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go
generated
vendored
|
|
@ -17,8 +17,8 @@ limitations under the License.
|
|||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/clock"
|
||||
)
|
||||
|
||||
type fakeThreadSafeMap struct {
|
||||
|
|
|
|||
73
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
73
vendor/k8s.io/client-go/tools/cache/mutation_cache.go
generated
vendored
|
|
@ -20,16 +20,19 @@ import (
|
|||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilcache "k8s.io/apimachinery/pkg/util/cache"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// MutationCache is able to take the result of update operations and stores them in an LRU
|
||||
// that can be used to provide a more current view of a requested object. It requires interpretting
|
||||
// that can be used to provide a more current view of a requested object. It requires interpreting
|
||||
// resourceVersions for comparisons.
|
||||
// Implementations must be thread-safe.
|
||||
// TODO find a way to layer this into an informer/lister
|
||||
|
|
@ -50,19 +53,20 @@ type ResourceVersionComparator interface {
|
|||
// - increases when updated
|
||||
// - is comparable across the same resource in a namespace
|
||||
//
|
||||
// Most backends will have these semantics. Indexer may be nil.
|
||||
func NewIntegerResourceVersionMutationCache(backingCache Store, indexer Indexer) MutationCache {
|
||||
lru, err := lru.New(100)
|
||||
if err != nil {
|
||||
// errors only happen on invalid sizes, this would be programmer error
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Most backends will have these semantics. Indexer may be nil. ttl controls how long an item
|
||||
// remains in the mutation cache before it is removed.
|
||||
//
|
||||
// If includeAdds is true, objects in the mutation cache will be returned even if they don't exist
|
||||
// in the underlying store. This is only safe if your use of the cache can handle mutation entries
|
||||
// remaining in the cache for up to ttl when mutations and deletes occur very closely in time.
|
||||
func NewIntegerResourceVersionMutationCache(backingCache Store, indexer Indexer, ttl time.Duration, includeAdds bool) MutationCache {
|
||||
return &mutationCache{
|
||||
backingCache: backingCache,
|
||||
indexer: indexer,
|
||||
mutationCache: lru,
|
||||
mutationCache: utilcache.NewLRUExpireCache(100),
|
||||
comparator: etcdObjectVersioner{},
|
||||
ttl: ttl,
|
||||
includeAdds: includeAdds,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -73,7 +77,9 @@ type mutationCache struct {
|
|||
lock sync.Mutex
|
||||
backingCache Store
|
||||
indexer Indexer
|
||||
mutationCache *lru.Cache
|
||||
mutationCache *utilcache.LRUExpireCache
|
||||
includeAdds bool
|
||||
ttl time.Duration
|
||||
|
||||
comparator ResourceVersionComparator
|
||||
}
|
||||
|
|
@ -90,9 +96,15 @@ func (c *mutationCache) GetByKey(key string) (interface{}, bool, error) {
|
|||
return nil, false, err
|
||||
}
|
||||
if !exists {
|
||||
// we can't distinguish between, "didn't observe create" and "was deleted after create", so
|
||||
// if the key is missing, we always return it as missing
|
||||
return nil, false, nil
|
||||
if !c.includeAdds {
|
||||
// we can't distinguish between, "didn't observe create" and "was deleted after create", so
|
||||
// if the key is missing, we always return it as missing
|
||||
return nil, false, nil
|
||||
}
|
||||
obj, exists = c.mutationCache.Get(key)
|
||||
if !exists {
|
||||
return nil, false, nil
|
||||
}
|
||||
}
|
||||
objRuntime, ok := obj.(runtime.Object)
|
||||
if !ok {
|
||||
|
|
@ -114,7 +126,9 @@ func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, er
|
|||
return nil, err
|
||||
}
|
||||
var items []interface{}
|
||||
keySet := sets.NewString()
|
||||
for _, key := range keys {
|
||||
keySet.Insert(key)
|
||||
obj, exists, err := c.indexer.GetByKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -128,6 +142,33 @@ func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, er
|
|||
items = append(items, obj)
|
||||
}
|
||||
}
|
||||
|
||||
if c.includeAdds {
|
||||
fn := c.indexer.GetIndexers()[name]
|
||||
// Keys() is returned oldest to newest, so full traversal does not alter the LRU behavior
|
||||
for _, key := range c.mutationCache.Keys() {
|
||||
updated, ok := c.mutationCache.Get(key)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if keySet.Has(key.(string)) {
|
||||
continue
|
||||
}
|
||||
elements, err := fn(updated)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err)
|
||||
continue
|
||||
}
|
||||
for _, inIndex := range elements {
|
||||
if inIndex != indexKey {
|
||||
continue
|
||||
}
|
||||
items = append(items, updated)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return items, nil
|
||||
}
|
||||
|
||||
|
|
@ -175,7 +216,7 @@ func (c *mutationCache) Mutation(obj interface{}) {
|
|||
}
|
||||
}
|
||||
}
|
||||
c.mutationCache.Add(key, obj)
|
||||
c.mutationCache.Add(key, obj, c.ttl)
|
||||
}
|
||||
|
||||
// etcdObjectVersioner implements versioning and extracting etcd node information
|
||||
|
|
|
|||
2
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
|
|
@ -38,10 +38,10 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/util/clock"
|
||||
)
|
||||
|
||||
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
|
||||
|
|
|
|||
2
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
|
|
@ -22,9 +22,9 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/clock"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
|
|
|||
2
vendor/k8s.io/client-go/tools/clientcmd/BUILD
generated
vendored
2
vendor/k8s.io/client-go/tools/clientcmd/BUILD
generated
vendored
|
|
@ -48,11 +48,11 @@ go_library(
|
|||
"//vendor/github.com/howeyc/gopass:go_default_library",
|
||||
"//vendor/github.com/imdario/mergo:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/auth:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
|
|
|
|||
18
vendor/k8s.io/client-go/tools/clientcmd/client_config.go
generated
vendored
18
vendor/k8s.io/client-go/tools/clientcmd/client_config.go
generated
vendored
|
|
@ -27,7 +27,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"github.com/imdario/mergo"
|
||||
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
clientauth "k8s.io/client-go/tools/auth"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
|
@ -205,7 +205,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo,
|
|||
// we want this order of precedence for user identifcation
|
||||
// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files)
|
||||
// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority)
|
||||
// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file
|
||||
// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file
|
||||
// 4. if there is not enough information to identify the user, prompt if possible
|
||||
func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) {
|
||||
mergedConfig := &restclient.Config{}
|
||||
|
|
@ -296,6 +296,14 @@ func canIdentifyUser(config restclient.Config) bool {
|
|||
|
||||
// Namespace implements ClientConfig
|
||||
func (config *DirectClientConfig) Namespace() (string, bool, error) {
|
||||
if config.overrides != nil && config.overrides.Context.Namespace != "" {
|
||||
// In the event we have an empty config but we do have a namespace override, we should return
|
||||
// the namespace override instead of having config.ConfirmUsable() return an error. This allows
|
||||
// things like in-cluster clients to execute `kubectl get pods --namespace=foo` and have the
|
||||
// --namespace flag honored instead of being ignored.
|
||||
return config.overrides.Context.Namespace, true, nil
|
||||
}
|
||||
|
||||
if err := config.ConfirmUsable(); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
|
@ -309,11 +317,7 @@ func (config *DirectClientConfig) Namespace() (string, bool, error) {
|
|||
return v1.NamespaceDefault, false, nil
|
||||
}
|
||||
|
||||
overridden := false
|
||||
if config.overrides != nil && config.overrides.Context.Namespace != "" {
|
||||
overridden = true
|
||||
}
|
||||
return configContext.Namespace, overridden, nil
|
||||
return configContext.Namespace, false, nil
|
||||
}
|
||||
|
||||
// ConfigAccess implements ClientConfig
|
||||
|
|
|
|||
2
vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
generated
vendored
2
vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go
generated
vendored
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
|
|
|||
40
vendor/k8s.io/client-go/tools/leaderelection/BUILD
generated
vendored
Normal file
40
vendor/k8s.io/client-go/tools/leaderelection/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["leaderelection.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["leaderelection_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1/fake:go_default_library",
|
||||
"//vendor/k8s.io/client-go/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
13
vendor/k8s.io/client-go/tools/leaderelection/OWNERS
generated
vendored
Normal file
13
vendor/k8s.io/client-go/tools/leaderelection/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
approvers:
|
||||
- mikedanese
|
||||
- timothysc
|
||||
reviewers:
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- mikedanese
|
||||
- gmarek
|
||||
- eparis
|
||||
- timothysc
|
||||
- ingvagabund
|
||||
- resouer
|
||||
- goltermann
|
||||
274
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
Normal file
274
vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
generated
vendored
Normal file
|
|
@ -0,0 +1,274 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package leaderelection implements leader election of a set of endpoints.
|
||||
// It uses an annotation in the endpoints object to store the record of the
|
||||
// election state.
|
||||
//
|
||||
// This implementation does not guarantee that only one client is acting as a
|
||||
// leader (a.k.a. fencing). A client observes timestamps captured locally to
|
||||
// infer the state of the leader election. Thus the implementation is tolerant
|
||||
// to arbitrary clock skew, but is not tolerant to arbitrary clock skew rate.
|
||||
//
|
||||
// However the level of tolerance to skew rate can be configured by setting
|
||||
// RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a
|
||||
// maximum tolerated ratio of time passed on the fastest node to time passed on
|
||||
// the slowest node can be approximately achieved with a configuration that sets
|
||||
// the same ratio of LeaseDuration to RenewDeadline. For example if a user wanted
|
||||
// to tolerate some nodes progressing forward in time twice as fast as other nodes,
|
||||
// the user could set LeaseDuration to 60 seconds and RenewDeadline to 30 seconds.
|
||||
//
|
||||
// While not required, some method of clock synchronization between nodes in the
|
||||
// cluster is highly recommended. It's important to keep in mind when configuring
|
||||
// this client that the tolerance to skew rate varies inversely to master
|
||||
// availability.
|
||||
//
|
||||
// Larger clusters often have a more lenient SLA for API latency. This should be
|
||||
// taken into account when configuring the client. The rate of leader transitions
|
||||
// should be monitored and RetryPeriod and LeaseDuration should be increased
|
||||
// until the rate is stable and acceptably low. It's important to keep in mind
|
||||
// when configuring this client that the tolerance to API latency varies inversely
|
||||
// to master availability.
|
||||
//
|
||||
// DISCLAIMER: this is an alpha API. This library will likely change significantly
|
||||
// or even be removed entirely in subsequent releases. Depend on this API at
|
||||
// your own risk.
|
||||
package leaderelection
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
JitterFactor = 1.2
|
||||
)
|
||||
|
||||
// NewLeaderElector creates a LeaderElector from a LeaderElectionConfig
|
||||
func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) {
|
||||
if lec.LeaseDuration <= lec.RenewDeadline {
|
||||
return nil, fmt.Errorf("leaseDuration must be greater than renewDeadline")
|
||||
}
|
||||
if lec.RenewDeadline <= time.Duration(JitterFactor*float64(lec.RetryPeriod)) {
|
||||
return nil, fmt.Errorf("renewDeadline must be greater than retryPeriod*JitterFactor")
|
||||
}
|
||||
if lec.Lock == nil {
|
||||
return nil, fmt.Errorf("Lock must not be nil.")
|
||||
}
|
||||
return &LeaderElector{
|
||||
config: lec,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type LeaderElectionConfig struct {
|
||||
// Lock is the resource that will be used for locking
|
||||
Lock rl.Interface
|
||||
|
||||
// LeaseDuration is the duration that non-leader candidates will
|
||||
// wait to force acquire leadership. This is measured against time of
|
||||
// last observed ack.
|
||||
LeaseDuration time.Duration
|
||||
// RenewDeadline is the duration that the acting master will retry
|
||||
// refreshing leadership before giving up.
|
||||
RenewDeadline time.Duration
|
||||
// RetryPeriod is the duration the LeaderElector clients should wait
|
||||
// between tries of actions.
|
||||
RetryPeriod time.Duration
|
||||
|
||||
// Callbacks are callbacks that are triggered during certain lifecycle
|
||||
// events of the LeaderElector
|
||||
Callbacks LeaderCallbacks
|
||||
}
|
||||
|
||||
// LeaderCallbacks are callbacks that are triggered during certain
|
||||
// lifecycle events of the LeaderElector. These are invoked asynchronously.
|
||||
//
|
||||
// possible future callbacks:
|
||||
// * OnChallenge()
|
||||
type LeaderCallbacks struct {
|
||||
// OnStartedLeading is called when a LeaderElector client starts leading
|
||||
OnStartedLeading func(stop <-chan struct{})
|
||||
// OnStoppedLeading is called when a LeaderElector client stops leading
|
||||
OnStoppedLeading func()
|
||||
// OnNewLeader is called when the client observes a leader that is
|
||||
// not the previously observed leader. This includes the first observed
|
||||
// leader when the client starts.
|
||||
OnNewLeader func(identity string)
|
||||
}
|
||||
|
||||
// LeaderElector is a leader election client.
|
||||
//
|
||||
// possible future methods:
|
||||
// * (le *LeaderElector) IsLeader()
|
||||
// * (le *LeaderElector) GetLeader()
|
||||
type LeaderElector struct {
|
||||
config LeaderElectionConfig
|
||||
// internal bookkeeping
|
||||
observedRecord rl.LeaderElectionRecord
|
||||
observedTime time.Time
|
||||
// used to implement OnNewLeader(), may lag slightly from the
|
||||
// value observedRecord.HolderIdentity if the transition has
|
||||
// not yet been reported.
|
||||
reportedLeader string
|
||||
}
|
||||
|
||||
// Run starts the leader election loop
|
||||
func (le *LeaderElector) Run() {
|
||||
defer func() {
|
||||
runtime.HandleCrash()
|
||||
le.config.Callbacks.OnStoppedLeading()
|
||||
}()
|
||||
le.acquire()
|
||||
stop := make(chan struct{})
|
||||
go le.config.Callbacks.OnStartedLeading(stop)
|
||||
le.renew()
|
||||
close(stop)
|
||||
}
|
||||
|
||||
// RunOrDie starts a client with the provided config or panics if the config
|
||||
// fails to validate.
|
||||
func RunOrDie(lec LeaderElectionConfig) {
|
||||
le, err := NewLeaderElector(lec)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
le.Run()
|
||||
}
|
||||
|
||||
// GetLeader returns the identity of the last observed leader or returns the empty string if
|
||||
// no leader has yet been observed.
|
||||
func (le *LeaderElector) GetLeader() string {
|
||||
return le.observedRecord.HolderIdentity
|
||||
}
|
||||
|
||||
// IsLeader returns true if the last observed leader was this client else returns false.
|
||||
func (le *LeaderElector) IsLeader() bool {
|
||||
return le.observedRecord.HolderIdentity == le.config.Lock.Identity()
|
||||
}
|
||||
|
||||
// acquire loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew succeeds.
|
||||
func (le *LeaderElector) acquire() {
|
||||
stop := make(chan struct{})
|
||||
glog.Infof("attempting to acquire leader lease...")
|
||||
wait.JitterUntil(func() {
|
||||
succeeded := le.tryAcquireOrRenew()
|
||||
le.maybeReportTransition()
|
||||
desc := le.config.Lock.Describe()
|
||||
if !succeeded {
|
||||
glog.V(4).Infof("failed to acquire lease %v", desc)
|
||||
return
|
||||
}
|
||||
le.config.Lock.RecordEvent("became leader")
|
||||
glog.Infof("successfully acquired lease %v", desc)
|
||||
close(stop)
|
||||
}, le.config.RetryPeriod, JitterFactor, true, stop)
|
||||
}
|
||||
|
||||
// renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails.
|
||||
func (le *LeaderElector) renew() {
|
||||
stop := make(chan struct{})
|
||||
wait.Until(func() {
|
||||
err := wait.Poll(le.config.RetryPeriod, le.config.RenewDeadline, func() (bool, error) {
|
||||
return le.tryAcquireOrRenew(), nil
|
||||
})
|
||||
le.maybeReportTransition()
|
||||
desc := le.config.Lock.Describe()
|
||||
if err == nil {
|
||||
glog.V(4).Infof("successfully renewed lease %v", desc)
|
||||
return
|
||||
}
|
||||
le.config.Lock.RecordEvent("stopped leading")
|
||||
glog.Infof("failed to renew lease %v", desc)
|
||||
close(stop)
|
||||
}, 0, stop)
|
||||
}
|
||||
|
||||
// tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired,
|
||||
// else it tries to renew the lease if it has already been acquired. Returns true
|
||||
// on success else returns false.
|
||||
func (le *LeaderElector) tryAcquireOrRenew() bool {
|
||||
now := metav1.Now()
|
||||
leaderElectionRecord := rl.LeaderElectionRecord{
|
||||
HolderIdentity: le.config.Lock.Identity(),
|
||||
LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second),
|
||||
RenewTime: now,
|
||||
AcquireTime: now,
|
||||
}
|
||||
|
||||
// 1. obtain or create the ElectionRecord
|
||||
oldLeaderElectionRecord, err := le.config.Lock.Get()
|
||||
if err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
glog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
|
||||
return false
|
||||
}
|
||||
if err = le.config.Lock.Create(leaderElectionRecord); err != nil {
|
||||
glog.Errorf("error initially creating leader election record: %v", err)
|
||||
return false
|
||||
}
|
||||
le.observedRecord = leaderElectionRecord
|
||||
le.observedTime = time.Now()
|
||||
return true
|
||||
}
|
||||
|
||||
// 2. Record obtained, check the Identity & Time
|
||||
if !reflect.DeepEqual(le.observedRecord, *oldLeaderElectionRecord) {
|
||||
le.observedRecord = *oldLeaderElectionRecord
|
||||
le.observedTime = time.Now()
|
||||
}
|
||||
if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
|
||||
oldLeaderElectionRecord.HolderIdentity != le.config.Lock.Identity() {
|
||||
glog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
|
||||
return false
|
||||
}
|
||||
|
||||
// 3. We're going to try to update. The leaderElectionRecord is set to it's default
|
||||
// here. Let's correct it before updating.
|
||||
if oldLeaderElectionRecord.HolderIdentity == le.config.Lock.Identity() {
|
||||
leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime
|
||||
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions
|
||||
} else {
|
||||
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1
|
||||
}
|
||||
|
||||
// update the lock itself
|
||||
if err = le.config.Lock.Update(leaderElectionRecord); err != nil {
|
||||
glog.Errorf("Failed to update lock: %v", err)
|
||||
return false
|
||||
}
|
||||
le.observedRecord = leaderElectionRecord
|
||||
le.observedTime = time.Now()
|
||||
return true
|
||||
}
|
||||
|
||||
func (l *LeaderElector) maybeReportTransition() {
|
||||
if l.observedRecord.HolderIdentity == l.reportedLeader {
|
||||
return
|
||||
}
|
||||
l.reportedLeader = l.observedRecord.HolderIdentity
|
||||
if l.config.Callbacks.OnNewLeader != nil {
|
||||
go l.config.Callbacks.OnNewLeader(l.reportedLeader)
|
||||
}
|
||||
}
|
||||
25
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/BUILD
generated
vendored
Normal file
25
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"configmaplock.go",
|
||||
"endpointslock.go",
|
||||
"interface.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
109
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
Normal file
109
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go
generated
vendored
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcelock
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
// TODO: This is almost a exact replica of Endpoints lock.
|
||||
// going forwards as we self host more and more components
|
||||
// and use ConfigMaps as the means to pass that configuration
|
||||
// data we will likely move to deprecate the Endpoints lock.
|
||||
|
||||
type ConfigMapLock struct {
|
||||
// ConfigMapMeta should contain a Name and a Namespace of an
|
||||
// ConfigMapMeta object that the Leadercmlector will attempt to lead.
|
||||
ConfigMapMeta metav1.ObjectMeta
|
||||
Client corev1client.ConfigMapsGetter
|
||||
LockConfig ResourceLockConfig
|
||||
cm *v1.ConfigMap
|
||||
}
|
||||
|
||||
// Get returns the cmlection record from a ConfigMap Annotation
|
||||
func (cml *ConfigMapLock) Get() (*LeaderElectionRecord, error) {
|
||||
var record LeaderElectionRecord
|
||||
var err error
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(cml.ConfigMapMeta.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cml.cm.Annotations == nil {
|
||||
cml.cm.Annotations = make(map[string]string)
|
||||
}
|
||||
if recordBytes, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey]; found {
|
||||
if err := json.Unmarshal([]byte(recordBytes), &record); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &record, nil
|
||||
}
|
||||
|
||||
// Create attempts to create a LeadercmlectionRecord annotation
|
||||
func (cml *ConfigMapLock) Create(ler LeaderElectionRecord) error {
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Create(&v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cml.ConfigMapMeta.Name,
|
||||
Namespace: cml.ConfigMapMeta.Namespace,
|
||||
Annotations: map[string]string{
|
||||
LeaderElectionRecordAnnotationKey: string(recordBytes),
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Update will update and existing annotation on a given resource.
|
||||
func (cml *ConfigMapLock) Update(ler LeaderElectionRecord) error {
|
||||
if cml.cm == nil {
|
||||
return errors.New("endpoint not initialized, call get or create first")
|
||||
}
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(cml.cm)
|
||||
return err
|
||||
}
|
||||
|
||||
// RecordEvent in leader cmlection while adding meta-data
|
||||
func (cml *ConfigMapLock) RecordEvent(s string) {
|
||||
events := fmt.Sprintf("%v %v", cml.LockConfig.Identity, s)
|
||||
cml.LockConfig.EventRecorder.Eventf(&v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
|
||||
}
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
func (cml *ConfigMapLock) Describe() string {
|
||||
return fmt.Sprintf("%v/%v", cml.ConfigMapMeta.Namespace, cml.ConfigMapMeta.Name)
|
||||
}
|
||||
|
||||
// returns the Identity of the lock
|
||||
func (cml *ConfigMapLock) Identity() string {
|
||||
return cml.LockConfig.Identity
|
||||
}
|
||||
104
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
Normal file
104
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go
generated
vendored
Normal file
|
|
@ -0,0 +1,104 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcelock
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
type EndpointsLock struct {
|
||||
// EndpointsMeta should contain a Name and a Namespace of an
|
||||
// Endpoints object that the LeaderElector will attempt to lead.
|
||||
EndpointsMeta metav1.ObjectMeta
|
||||
Client corev1client.EndpointsGetter
|
||||
LockConfig ResourceLockConfig
|
||||
e *v1.Endpoints
|
||||
}
|
||||
|
||||
// Get returns the election record from a Endpoints Annotation
|
||||
func (el *EndpointsLock) Get() (*LeaderElectionRecord, error) {
|
||||
var record LeaderElectionRecord
|
||||
var err error
|
||||
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if el.e.Annotations == nil {
|
||||
el.e.Annotations = make(map[string]string)
|
||||
}
|
||||
if recordBytes, found := el.e.Annotations[LeaderElectionRecordAnnotationKey]; found {
|
||||
if err := json.Unmarshal([]byte(recordBytes), &record); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &record, nil
|
||||
}
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord annotation
|
||||
func (el *EndpointsLock) Create(ler LeaderElectionRecord) error {
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Create(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: el.EndpointsMeta.Name,
|
||||
Namespace: el.EndpointsMeta.Namespace,
|
||||
Annotations: map[string]string{
|
||||
LeaderElectionRecordAnnotationKey: string(recordBytes),
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Update will update and existing annotation on a given resource.
|
||||
func (el *EndpointsLock) Update(ler LeaderElectionRecord) error {
|
||||
if el.e == nil {
|
||||
return errors.New("endpoint not initialized, call get or create first")
|
||||
}
|
||||
recordBytes, err := json.Marshal(ler)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(el.e)
|
||||
return err
|
||||
}
|
||||
|
||||
// RecordEvent in leader election while adding meta-data
|
||||
func (el *EndpointsLock) RecordEvent(s string) {
|
||||
events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s)
|
||||
el.LockConfig.EventRecorder.Eventf(&v1.Endpoints{ObjectMeta: el.e.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
|
||||
}
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
func (el *EndpointsLock) Describe() string {
|
||||
return fmt.Sprintf("%v/%v", el.EndpointsMeta.Namespace, el.EndpointsMeta.Name)
|
||||
}
|
||||
|
||||
// returns the Identity of the lock
|
||||
func (el *EndpointsLock) Identity() string {
|
||||
return el.LockConfig.Identity
|
||||
}
|
||||
102
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
Normal file
102
vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go
generated
vendored
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcelock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
cs "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
const (
|
||||
LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader"
|
||||
EndpointsResourceLock = "endpoints"
|
||||
ConfigMapsResourceLock = "configmaps"
|
||||
)
|
||||
|
||||
// LeaderElectionRecord is the record that is stored in the leader election annotation.
|
||||
// This information should be used for observational purposes only and could be replaced
|
||||
// with a random string (e.g. UUID) with only slight modification of this code.
|
||||
// TODO(mikedanese): this should potentially be versioned
|
||||
type LeaderElectionRecord struct {
|
||||
HolderIdentity string `json:"holderIdentity"`
|
||||
LeaseDurationSeconds int `json:"leaseDurationSeconds"`
|
||||
AcquireTime metav1.Time `json:"acquireTime"`
|
||||
RenewTime metav1.Time `json:"renewTime"`
|
||||
LeaderTransitions int `json:"leaderTransitions"`
|
||||
}
|
||||
|
||||
// ResourceLockConfig common data that exists across different
|
||||
// resource locks
|
||||
type ResourceLockConfig struct {
|
||||
Identity string
|
||||
EventRecorder record.EventRecorder
|
||||
}
|
||||
|
||||
// Interface offers a common interface for locking on arbitrary
|
||||
// resources used in leader election. The Interface is used
|
||||
// to hide the details on specific implementations in order to allow
|
||||
// them to change over time. This interface is strictly for use
|
||||
// by the leaderelection code.
|
||||
type Interface interface {
|
||||
// Get returns the LeaderElectionRecord
|
||||
Get() (*LeaderElectionRecord, error)
|
||||
|
||||
// Create attempts to create a LeaderElectionRecord
|
||||
Create(ler LeaderElectionRecord) error
|
||||
|
||||
// Update will update and existing LeaderElectionRecord
|
||||
Update(ler LeaderElectionRecord) error
|
||||
|
||||
// RecordEvent is used to record events
|
||||
RecordEvent(string)
|
||||
|
||||
// Identity will return the locks Identity
|
||||
Identity() string
|
||||
|
||||
// Describe is used to convert details on current resource lock
|
||||
// into a string
|
||||
Describe() string
|
||||
}
|
||||
|
||||
// Manufacture will create a lock of a given type according to the input parameters
|
||||
func New(lockType string, ns string, name string, client *cs.Clientset, rlc ResourceLockConfig) (Interface, error) {
|
||||
switch lockType {
|
||||
case EndpointsResourceLock:
|
||||
return &EndpointsLock{
|
||||
EndpointsMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Client: client,
|
||||
LockConfig: rlc,
|
||||
}, nil
|
||||
case ConfigMapsResourceLock:
|
||||
return &ConfigMapLock{
|
||||
ConfigMapMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
Client: client,
|
||||
LockConfig: rlc,
|
||||
}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("Invalid lock-type %s", lockType)
|
||||
}
|
||||
}
|
||||
8
vendor/k8s.io/client-go/tools/record/BUILD
generated
vendored
8
vendor/k8s.io/client-go/tools/record/BUILD
generated
vendored
|
|
@ -17,16 +17,16 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1/ref:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/clock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
@ -42,16 +42,16 @@ go_library(
|
|||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/golang/groupcache/lru:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1/ref:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/clock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
|||
4
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
4
vendor/k8s.io/client-go/tools/record/event.go
generated
vendored
|
|
@ -21,15 +21,15 @@ import (
|
|||
"math/rand"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/api/v1/ref"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/util/clock"
|
||||
|
||||
"net/http"
|
||||
|
||||
|
|
|
|||
57
vendor/k8s.io/client-go/tools/record/events_cache.go
generated
vendored
57
vendor/k8s.io/client-go/tools/record/events_cache.go
generated
vendored
|
|
@ -25,11 +25,11 @@ import (
|
|||
|
||||
"github.com/golang/groupcache/lru"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/util/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -49,6 +49,7 @@ func getEventKey(event *v1.Event) string {
|
|||
event.InvolvedObject.Kind,
|
||||
event.InvolvedObject.Namespace,
|
||||
event.InvolvedObject.Name,
|
||||
event.InvolvedObject.FieldPath,
|
||||
string(event.InvolvedObject.UID),
|
||||
event.InvolvedObject.APIVersion,
|
||||
event.Type,
|
||||
|
|
@ -93,7 +94,7 @@ type EventAggregatorMessageFunc func(event *v1.Event) string
|
|||
|
||||
// EventAggregratorByReasonMessageFunc returns an aggregate message by prefixing the incoming message
|
||||
func EventAggregatorByReasonMessageFunc(event *v1.Event) string {
|
||||
return "(events with common reason combined)"
|
||||
return "(combined from similar events): " + event.Message
|
||||
}
|
||||
|
||||
// EventAggregator identifies similar events and aggregates them into a single event
|
||||
|
|
@ -141,11 +142,22 @@ type aggregateRecord struct {
|
|||
lastTimestamp metav1.Time
|
||||
}
|
||||
|
||||
// EventAggregate identifies similar events and groups into a common event if required
|
||||
func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, error) {
|
||||
aggregateKey, localKey := e.keyFunc(newEvent)
|
||||
// EventAggregate checks if a similar event has been seen according to the
|
||||
// aggregation configuration (max events, max interval, etc) and returns:
|
||||
//
|
||||
// - The (potentially modified) event that should be created
|
||||
// - The cache key for the event, for correlation purposes. This will be set to
|
||||
// the full key for normal events, and to the result of
|
||||
// EventAggregatorMessageFunc for aggregate events.
|
||||
func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string) {
|
||||
now := metav1.NewTime(e.clock.Now())
|
||||
record := aggregateRecord{localKeys: sets.NewString(), lastTimestamp: now}
|
||||
var record aggregateRecord
|
||||
// eventKey is the full cache key for this event
|
||||
eventKey := getEventKey(newEvent)
|
||||
// aggregateKey is for the aggregate event, if one is needed.
|
||||
aggregateKey, localKey := e.keyFunc(newEvent)
|
||||
|
||||
// Do we have a record of similar events in our cache?
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
value, found := e.cache.Get(aggregateKey)
|
||||
|
|
@ -153,24 +165,30 @@ func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, error)
|
|||
record = value.(aggregateRecord)
|
||||
}
|
||||
|
||||
// if the last event was far enough in the past, it is not aggregated, and we must reset state
|
||||
// Is the previous record too old? If so, make a fresh one. Note: if we didn't
|
||||
// find a similar record, its lastTimestamp will be the zero value, so we
|
||||
// create a new one in that case.
|
||||
maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second
|
||||
interval := now.Time.Sub(record.lastTimestamp.Time)
|
||||
if interval > maxInterval {
|
||||
record = aggregateRecord{localKeys: sets.NewString()}
|
||||
}
|
||||
|
||||
// Write the new event into the aggregation record and put it on the cache
|
||||
record.localKeys.Insert(localKey)
|
||||
record.lastTimestamp = now
|
||||
e.cache.Add(aggregateKey, record)
|
||||
|
||||
// If we are not yet over the threshold for unique events, don't correlate them
|
||||
if uint(record.localKeys.Len()) < e.maxEvents {
|
||||
return newEvent, nil
|
||||
return newEvent, eventKey
|
||||
}
|
||||
|
||||
// do not grow our local key set any larger than max
|
||||
record.localKeys.PopAny()
|
||||
|
||||
// create a new aggregate event
|
||||
// create a new aggregate event, and return the aggregateKey as the cache key
|
||||
// (so that it can be overwritten.)
|
||||
eventCopy := &v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()),
|
||||
|
|
@ -185,7 +203,7 @@ func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, error)
|
|||
Reason: newEvent.Reason,
|
||||
Source: newEvent.Source,
|
||||
}
|
||||
return eventCopy, nil
|
||||
return eventCopy, aggregateKey
|
||||
}
|
||||
|
||||
// eventLog records data about when an event was observed
|
||||
|
|
@ -215,22 +233,22 @@ func newEventLogger(lruCacheEntries int, clock clock.Clock) *eventLogger {
|
|||
return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock}
|
||||
}
|
||||
|
||||
// eventObserve records the event, and determines if its frequency should update
|
||||
func (e *eventLogger) eventObserve(newEvent *v1.Event) (*v1.Event, []byte, error) {
|
||||
// eventObserve records an event, or updates an existing one if key is a cache hit
|
||||
func (e *eventLogger) eventObserve(newEvent *v1.Event, key string) (*v1.Event, []byte, error) {
|
||||
var (
|
||||
patch []byte
|
||||
err error
|
||||
)
|
||||
key := getEventKey(newEvent)
|
||||
eventCopy := *newEvent
|
||||
event := &eventCopy
|
||||
|
||||
e.Lock()
|
||||
defer e.Unlock()
|
||||
|
||||
// Check if there is an existing event we should update
|
||||
lastObservation := e.lastEventObservationFromCache(key)
|
||||
|
||||
// we have seen this event before, so we must prepare a patch
|
||||
// If we found a result, prepare a patch
|
||||
if lastObservation.count > 0 {
|
||||
// update the event based on the last observation so patch will work as desired
|
||||
event.Name = lastObservation.name
|
||||
|
|
@ -241,6 +259,7 @@ func (e *eventLogger) eventObserve(newEvent *v1.Event) (*v1.Event, []byte, error
|
|||
eventCopy2 := *event
|
||||
eventCopy2.Count = 0
|
||||
eventCopy2.LastTimestamp = metav1.NewTime(time.Unix(0, 0))
|
||||
eventCopy2.Message = ""
|
||||
|
||||
newData, _ := json.Marshal(event)
|
||||
oldData, _ := json.Marshal(eventCopy2)
|
||||
|
|
@ -337,6 +356,7 @@ func NewEventCorrelator(clock clock.Clock) *EventCorrelator {
|
|||
defaultAggregateMaxEvents,
|
||||
defaultAggregateIntervalInSeconds,
|
||||
clock),
|
||||
|
||||
logger: newEventLogger(cacheSize, clock),
|
||||
}
|
||||
}
|
||||
|
|
@ -346,11 +366,8 @@ func (c *EventCorrelator) EventCorrelate(newEvent *v1.Event) (*EventCorrelateRes
|
|||
if c.filterFunc(newEvent) {
|
||||
return &EventCorrelateResult{Skip: true}, nil
|
||||
}
|
||||
aggregateEvent, err := c.aggregator.EventAggregate(newEvent)
|
||||
if err != nil {
|
||||
return &EventCorrelateResult{}, err
|
||||
}
|
||||
observedEvent, patch, err := c.logger.eventObserve(aggregateEvent)
|
||||
aggregateEvent, ckey := c.aggregator.EventAggregate(newEvent)
|
||||
observedEvent, patch, err := c.logger.eventObserve(aggregateEvent, ckey)
|
||||
return &EventCorrelateResult{Event: observedEvent, Patch: patch}, err
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue