Update godeps

This commit is contained in:
Manuel de Brito Fontes 2017-01-05 18:10:28 -03:00
parent 597a0e691a
commit 9085e24a29
50 changed files with 5284 additions and 4676 deletions

View file

@ -80,7 +80,15 @@ func BeforeDelete(strategy RESTDeleteStrategy, ctx api.Context, obj runtime.Obje
// if we are already being deleted, we may only shorten the deletion grace period
// this means the object was gracefully deleted previously but deletionGracePeriodSeconds was not set,
// so we force deletion immediately
if objectMeta.DeletionGracePeriodSeconds == nil {
// IMPORTANT:
// The deletion operation happens in two phases.
// 1. Update to set DeletionGracePeriodSeconds and DeletionTimestamp
// 2. Delete the object from storage.
// If the update succeeds, but the delete fails (network error, internal storage error, etc.),
// a resource was previously left in a state that was non-recoverable. We
// check if the existing stored resource has a grace period as 0 and if so
// attempt to delete immediately in order to recover from this scenario.
if objectMeta.DeletionGracePeriodSeconds == nil || *objectMeta.DeletionGracePeriodSeconds == 0 {
return false, false, nil
}
// only a shorter grace period may be provided by a user

View file

@ -289,6 +289,10 @@ type StorageMetadata interface {
// ProducesMIMETypes returns a list of the MIME types the specified HTTP verb (GET, POST, DELETE,
// PATCH) can respond with.
ProducesMIMETypes(verb string) []string
// ProducesObject returns an object the specified HTTP verb respond with. It will overwrite storage object if
// it is not nil. Only the type of the return object matters, the value will be ignored.
ProducesObject(verb string) interface{}
}
// ConnectRequest is an object passed to admission control for Connect operations

View file

@ -2665,15 +2665,6 @@ func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList {
}
}
// TODO(freehan): allow user to update loadbalancerSourceRanges
// Only allow removing LoadBalancerSourceRanges when change service type from LoadBalancer
// to non-LoadBalancer or adding LoadBalancerSourceRanges when change service type from
// non-LoadBalancer to LoadBalancer.
if service.Spec.Type != api.ServiceTypeLoadBalancer && oldService.Spec.Type != api.ServiceTypeLoadBalancer ||
service.Spec.Type == api.ServiceTypeLoadBalancer && oldService.Spec.Type == api.ServiceTypeLoadBalancer {
allErrs = append(allErrs, ValidateImmutableField(service.Spec.LoadBalancerSourceRanges, oldService.Spec.LoadBalancerSourceRanges, field.NewPath("spec", "loadBalancerSourceRanges"))...)
}
allErrs = append(allErrs, validateServiceFields(service)...)
allErrs = append(allErrs, validateServiceAnnotations(service, oldService)...)
return allErrs

File diff suppressed because it is too large Load diff

View file

@ -424,6 +424,9 @@ type KubeletConfiguration struct {
// Comma-delimited list of minimum reclaims (e.g. imagefs.available=2Gi) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure.
// +optional
EvictionMinimumReclaim string `json:"evictionMinimumReclaim,omitempty"`
// If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling.
// +optional
ExperimentalKernelMemcgNotification bool `json:"experimentalKernelMemcgNotification"`
// Maximum number of pods per core. Cannot exceed MaxPods
PodsPerCore int32 `json:"podsPerCore"`
// enableControllerAttachDetach enables the Attach/Detach controller to

View file

@ -374,6 +374,9 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
if obj.EvictionPressureTransitionPeriod == zeroDuration {
obj.EvictionPressureTransitionPeriod = unversioned.Duration{Duration: 5 * time.Minute}
}
if obj.ExperimentalKernelMemcgNotification == nil {
obj.ExperimentalKernelMemcgNotification = boolVar(false)
}
if obj.SystemReserved == nil {
obj.SystemReserved = make(map[string]string)
}

View file

@ -462,6 +462,8 @@ type KubeletConfiguration struct {
EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod"`
// Comma-delimited list of minimum reclaims (e.g. imagefs.available=2Gi) that describes the minimum amount of resource the kubelet will reclaim when performing a pod eviction if that resource is under pressure.
EvictionMinimumReclaim string `json:"evictionMinimumReclaim"`
// If enabled, the kubelet will integrate with the kernel memcg notification to determine if memory eviction thresholds are crossed rather than polling.
ExperimentalKernelMemcgNotification *bool `json:"experimentalKernelMemcgNotification"`
// Maximum number of pods per core. Cannot exceed MaxPods
PodsPerCore int32 `json:"podsPerCore"`
// enableControllerAttachDetach enables the Attach/Detach controller to

View file

@ -387,6 +387,9 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfigu
out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod
out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
if err := api.Convert_Pointer_bool_To_bool(&in.ExperimentalKernelMemcgNotification, &out.ExperimentalKernelMemcgNotification, s); err != nil {
return err
}
out.PodsPerCore = in.PodsPerCore
if err := api.Convert_Pointer_bool_To_bool(&in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach, s); err != nil {
return err
@ -556,6 +559,9 @@ func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigu
out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod
out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
if err := api.Convert_bool_To_Pointer_bool(&in.ExperimentalKernelMemcgNotification, &out.ExperimentalKernelMemcgNotification, s); err != nil {
return err
}
out.PodsPerCore = in.PodsPerCore
if err := api.Convert_bool_To_Pointer_bool(&in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach, s); err != nil {
return err

View file

@ -403,6 +403,13 @@ func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c *
out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod
out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
if in.ExperimentalKernelMemcgNotification != nil {
in, out := &in.ExperimentalKernelMemcgNotification, &out.ExperimentalKernelMemcgNotification
*out = new(bool)
**out = **in
} else {
out.ExperimentalKernelMemcgNotification = nil
}
out.PodsPerCore = in.PodsPerCore
if in.EnableControllerAttachDetach != nil {
in, out := &in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach

View file

@ -358,6 +358,7 @@ func DeepCopy_componentconfig_KubeletConfiguration(in interface{}, out interface
out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod
out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
out.ExperimentalKernelMemcgNotification = in.ExperimentalKernelMemcgNotification
out.PodsPerCore = in.PodsPerCore
out.EnableControllerAttachDetach = in.EnableControllerAttachDetach
if in.SystemReserved != nil {

View file

@ -259,12 +259,16 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
r.setLastSyncResourceVersion(resourceVersion)
resyncerrc := make(chan error, 1)
cancelCh := make(chan struct{})
defer close(cancelCh)
go func() {
for {
select {
case <-resyncCh:
case <-stopCh:
return
case <-cancelCh:
return
}
glog.V(4).Infof("%s: forcing resync", r.name)
if err := r.store.Resync(); err != nil {

View file

@ -531,17 +531,23 @@ func (c *Cacher) dispatchEvents() {
func (c *Cacher) dispatchEvent(event *watchCacheEvent) {
triggerValues, supported := c.triggerValues(event)
// TODO: For now we assume we have a given <timeout> budget for dispatching
// a single event. We should consider changing to the approach with:
// - budget has upper bound at <max_timeout>
// - we add <portion> to current timeout every second
timeout := time.Duration(250) * time.Millisecond
c.Lock()
defer c.Unlock()
// Iterate over "allWatchers" no matter what the trigger function is.
for _, watcher := range c.watchers.allWatchers {
watcher.add(event)
watcher.add(event, &timeout)
}
if supported {
// Iterate over watchers interested in the given values of the trigger.
for _, triggerValue := range triggerValues {
for _, watcher := range c.watchers.valueWatchers[triggerValue] {
watcher.add(event)
watcher.add(event, &timeout)
}
}
} else {
@ -554,7 +560,7 @@ func (c *Cacher) dispatchEvent(event *watchCacheEvent) {
// Iterate over watchers interested in exact values for all values.
for _, watchers := range c.watchers.valueWatchers {
for _, watcher := range watchers {
watcher.add(event)
watcher.add(event, &timeout)
}
}
}
@ -728,7 +734,7 @@ func (c *cacheWatcher) stop() {
var timerPool sync.Pool
func (c *cacheWatcher) add(event *watchCacheEvent) {
func (c *cacheWatcher) add(event *watchCacheEvent, timeout *time.Duration) {
// Try to send the event immediately, without blocking.
select {
case c.input <- *event:
@ -736,20 +742,16 @@ func (c *cacheWatcher) add(event *watchCacheEvent) {
default:
}
// OK, block sending, but only for up to 5 seconds.
// OK, block sending, but only for up to <timeout>.
// cacheWatcher.add is called very often, so arrange
// to reuse timers instead of constantly allocating.
trace := util.NewTrace(
fmt.Sprintf("cacheWatcher %v: waiting for add (initial result size %v)",
reflect.TypeOf(event.Object).String(), len(c.result)))
defer trace.LogIfLong(50 * time.Millisecond)
startTime := time.Now()
const timeout = 5 * time.Second
t, ok := timerPool.Get().(*time.Timer)
if ok {
t.Reset(timeout)
t.Reset(*timeout)
} else {
t = time.NewTimer(timeout)
t = time.NewTimer(*timeout)
}
defer timerPool.Put(t)
@ -768,6 +770,10 @@ func (c *cacheWatcher) add(event *watchCacheEvent) {
c.forget(false)
c.stop()
}
if *timeout = *timeout - time.Since(startTime); *timeout < 0 {
*timeout = 0
}
}
// NOTE: sendWatchCacheEvent is assumed to not modify <event> !!!

View file

@ -20,7 +20,7 @@ go_library(
deps = [
"//pkg/util/clock:go_default_library",
"//pkg/util/integer:go_default_library",
"//pkg/util/ratelimit:go_default_library",
"//vendor:github.com/juju/ratelimit",
],
)

View file

@ -19,7 +19,7 @@ package flowcontrol
import (
"sync"
"k8s.io/kubernetes/pkg/util/ratelimit"
"github.com/juju/ratelimit"
)
type RateLimiter interface {

View file

@ -74,9 +74,9 @@ func (mounter *Mounter) Mount(source string, target string, fstype string, optio
}
return doMount(mounterPath, source, target, fstype, bindRemountOpts)
}
// These filesystem types are expected to be supported by the mount utility on the host across all Linux distros.
var defaultMounterFsTypes = sets.NewString("tmpfs", "ext4", "ext3", "ext2")
if !defaultMounterFsTypes.Has(fstype) {
// The list of filesystems that require containerized mounter on GCI image cluster
fsTypesNeedMounter := sets.NewString("nfs", "glusterfs")
if fsTypesNeedMounter.Has(fstype) {
mounterPath = mounter.mounterPath
}
return doMount(mounterPath, source, target, fstype, options)

View file

@ -23,8 +23,6 @@ import (
"time"
)
var letters = []rune("abcdefghijklmnopqrstuvwxyz0123456789")
var numLetters = len(letters)
var rng = struct {
sync.Mutex
rand *rand.Rand
@ -72,12 +70,16 @@ func Perm(n int) []int {
return rng.rand.Perm(n)
}
// String generates a random alphanumeric string n characters long. This will
// panic if n is less than zero.
// We omit vowels from the set of available characters to reduce the chances
// of "bad words" being formed.
var alphanums = []rune("bcdfghjklmnpqrstvwxz0123456789")
// String generates a random alphanumeric string, without vowels, which is n
// characters long. This will panic if n is less than zero.
func String(length int) string {
b := make([]rune, length)
for i := range b {
b[i] = letters[Intn(numLetters)]
b[i] = alphanums[Intn(len(alphanums))]
}
return string(b)
}

View file

@ -1,25 +0,0 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["bucket.go"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["bucket_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [],
)

View file

@ -1,170 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ratelimit
import (
"math"
"sync"
"time"
)
// Bucket models a token bucket
type Bucket struct {
unitsPerNano float64
nanosPerUnit float64
capacity int64
mutex sync.Mutex
available int64
lastRefill int64
// fractionalAvailable "buffers" any amounts that flowed into the bucket smaller than one unit
// This lets us retain precision even with pathological refill rates like (1E9 + 1) per second
fractionalAvailable float64
}
// NewBucketWithRate creates a new token bucket, with maximum capacity = initial capacity, and a refill rate of qps
// We use floats for refill calculations, which introduces the possibility of truncation and rounding errors.
// For "sensible" qps values though, is is acceptable: jbeda did some tests here https://play.golang.org/p/LSKUOGz2LG
func NewBucketWithRate(qps float64, capacity int64) *Bucket {
unitsPerNano := qps / 1E9
nanosPerUnit := 1E9 / qps
b := &Bucket{
unitsPerNano: unitsPerNano,
nanosPerUnit: nanosPerUnit,
capacity: capacity,
available: capacity,
lastRefill: time.Now().UnixNano(),
}
return b
}
// Take takes n units from the bucket, reducing the available quantity even below zero,
// but then returns the amount of time we should wait
func (b *Bucket) Take(n int64) time.Duration {
b.mutex.Lock()
defer b.mutex.Unlock()
var d time.Duration
if b.available >= n {
// Fast path when bucket has sufficient availability before refilling
} else {
b.refill()
if b.available < n {
deficit := n - b.available
d = time.Duration(int64(float64(deficit) * b.nanosPerUnit))
}
}
b.available -= n
return d
}
// TakeAvailable immediately takes whatever quantity is available, up to max
func (b *Bucket) TakeAvailable(max int64) int64 {
b.mutex.Lock()
defer b.mutex.Unlock()
var took int64
if b.available >= max {
// Fast path when bucket has sufficient availability before refilling
took = max
} else {
b.refill()
took = b.available
if took < 0 {
took = 0
} else if took > max {
took = max
}
}
if took > 0 {
b.available -= took
}
return took
}
// Wait combines a call to Take with a sleep call
func (b *Bucket) Wait(n int64) {
d := b.Take(n)
if d != 0 {
time.Sleep(d)
}
}
// Capacity returns the maximum capacity of the bucket
func (b *Bucket) Capacity() int64 {
return b.capacity
}
// Available returns the quantity available in the bucket (which may be negative), but does not take it.
// This function is for diagnostic / informational purposes only - the returned capacity may immediately
// be inaccurate if another thread is operating on the bucket concurrently.
func (b *Bucket) Available() int64 {
b.mutex.Lock()
defer b.mutex.Unlock()
b.refill()
return b.available
}
// refill replenishes the bucket based on elapsed time; mutex must be held
func (b *Bucket) refill() {
// Note that we really want a monotonic clock here, but go says no:
// https://github.com/golang/go/issues/12914
now := time.Now().UnixNano()
b.refillAtTimestamp(now)
}
// refillAtTimestamp is the logic of the refill function, for testing
func (b *Bucket) refillAtTimestamp(now int64) {
nanosSinceLastRefill := now - b.lastRefill
if nanosSinceLastRefill <= 0 {
// we really want monotonic
return
}
// Compute units that have flowed into bucket
refillFloat := (float64(nanosSinceLastRefill) * b.unitsPerNano) + b.fractionalAvailable
if refillFloat > float64(b.capacity) {
// float64 > MaxInt64 can be converted to negative int64; side step this
b.available = b.capacity
// Don't worry about the fractional units with huge refill rates
} else {
whole, fraction := math.Modf(refillFloat)
refill := int64(whole)
b.fractionalAvailable = fraction
if refill != 0 {
// Refill with overflow
b.available += refill
if b.available >= b.capacity {
b.available = b.capacity
b.fractionalAvailable = 0
}
}
}
b.lastRefill = now
}

View file

@ -25,8 +25,8 @@ go_library(
tags = ["automanaged"],
deps = [
"//pkg/util/clock:go_default_library",
"//pkg/util/ratelimit:go_default_library",
"//pkg/util/runtime:go_default_library",
"//vendor:github.com/juju/ratelimit",
],
)

View file

@ -21,7 +21,7 @@ import (
"sync"
"time"
"k8s.io/kubernetes/pkg/util/ratelimit"
"github.com/juju/ratelimit"
)
type RateLimiter interface {
@ -35,7 +35,7 @@ type RateLimiter interface {
}
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential
// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential
func DefaultControllerRateLimiter() RateLimiter {
return NewMaxOfRateLimiter(
NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),

View file

@ -39,8 +39,8 @@ var (
// them irrelevant. (Next we'll take it out, which may muck with
// scripts consuming the kubectl version output - but most of
// these should be looking at gitVersion already anyways.)
gitMajor string = "1" // major version, always numeric
gitMinor string = "5+" // minor version, numeric possibly followed by "+"
gitMajor string = "1" // major version, always numeric
gitMinor string = "5" // minor version, numeric possibly followed by "+"
// semantic version, derived by build scripts (see
// https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md
@ -51,7 +51,7 @@ var (
// semantic version is a git hash, but the version itself is no
// longer the direct output of "git describe", but a slight
// translation to be semver compliant.
gitVersion string = "v1.5.0-beta.2+$Format:%h$"
gitVersion string = "v1.5.1+$Format:%h$"
gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty"

View file

@ -94,7 +94,7 @@ func UnmountPath(mountPath string, mounter mount.Interface) error {
return err
}
if notMnt {
glog.V(4).Info("%q is unmounted, deleting the directory", mountPath)
glog.V(4).Infof("%q is unmounted, deleting the directory", mountPath)
return os.Remove(mountPath)
}
return nil

View file

@ -51,6 +51,17 @@ func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error {
return err
}
// chown and chmod pass through to the underlying file for symlinks.
// Symlinks have a mode of 777 but this really doesn't mean anything.
// The permissions of the underlying file are what matter.
// However, if one reads the mode of a symlink then chmods the symlink
// with that mode, it changes the mode of the underlying file, overridden
// the defaultMode and permissions initialized by the volume plugin, which
// is not what we want; thus, we skip chown/chmod for symlinks.
if info.Mode()&os.ModeSymlink != 0 {
return nil
}
stat, ok := info.Sys().(*syscall.Stat_t)
if !ok {
return nil