Update godeps
This commit is contained in:
parent
86dbf979cb
commit
f7011d22f8
108 changed files with 7093 additions and 4947 deletions
2
vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/api/v1/generated.proto
generated
vendored
|
|
@ -1514,7 +1514,7 @@ message NodeSpec {
|
|||
optional string providerID = 3;
|
||||
|
||||
// Unschedulable controls node schedulability of new pods. By default, node is schedulable.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration"`
|
||||
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration"
|
||||
// +optional
|
||||
optional bool unschedulable = 4;
|
||||
}
|
||||
|
|
|
|||
2
vendor/k8s.io/kubernetes/pkg/api/v1/types.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/api/v1/types.go
generated
vendored
|
|
@ -2818,7 +2818,7 @@ type NodeSpec struct {
|
|||
// +optional
|
||||
ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
|
||||
// Unschedulable controls node schedulability of new pods. By default, node is schedulable.
|
||||
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration"`
|
||||
// More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration"
|
||||
// +optional
|
||||
Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"`
|
||||
}
|
||||
|
|
|
|||
2
vendor/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/api/v1/types_swagger_doc_generated.go
generated
vendored
|
|
@ -901,7 +901,7 @@ var map_NodeSpec = map[string]string{
|
|||
"podCIDR": "PodCIDR represents the pod IP range assigned to the node.",
|
||||
"externalID": "External ID of the node assigned by some machine database (e.g. a cloud provider). Deprecated.",
|
||||
"providerID": "ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>",
|
||||
"unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration\"`",
|
||||
"unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#manual-node-administration\"",
|
||||
}
|
||||
|
||||
func (NodeSpec) SwaggerDoc() map[string]string {
|
||||
|
|
|
|||
8
vendor/k8s.io/kubernetes/pkg/api/validation/validation.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/api/validation/validation.go
generated
vendored
|
|
@ -2666,7 +2666,13 @@ func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList {
|
|||
}
|
||||
|
||||
// TODO(freehan): allow user to update loadbalancerSourceRanges
|
||||
allErrs = append(allErrs, ValidateImmutableField(service.Spec.LoadBalancerSourceRanges, oldService.Spec.LoadBalancerSourceRanges, field.NewPath("spec", "loadBalancerSourceRanges"))...)
|
||||
// Only allow removing LoadBalancerSourceRanges when change service type from LoadBalancer
|
||||
// to non-LoadBalancer or adding LoadBalancerSourceRanges when change service type from
|
||||
// non-LoadBalancer to LoadBalancer.
|
||||
if service.Spec.Type != api.ServiceTypeLoadBalancer && oldService.Spec.Type != api.ServiceTypeLoadBalancer ||
|
||||
service.Spec.Type == api.ServiceTypeLoadBalancer && oldService.Spec.Type == api.ServiceTypeLoadBalancer {
|
||||
allErrs = append(allErrs, ValidateImmutableField(service.Spec.LoadBalancerSourceRanges, oldService.Spec.LoadBalancerSourceRanges, field.NewPath("spec", "loadBalancerSourceRanges"))...)
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, validateServiceFields(service)...)
|
||||
allErrs = append(allErrs, validateServiceAnnotations(service, oldService)...)
|
||||
|
|
|
|||
1
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go
generated
vendored
|
|
@ -50,6 +50,7 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
|||
&HorizontalPodAutoscaler{},
|
||||
&HorizontalPodAutoscalerList{},
|
||||
&api.ListOptions{},
|
||||
&api.DeleteOptions{},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
13
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/apis/autoscaling/v1/BUILD
generated
vendored
|
|
@ -37,3 +37,16 @@ go_library(
|
|||
"//vendor:github.com/ugorji/go/codec",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_xtest",
|
||||
srcs = ["defaults_test.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/install:go_default_library",
|
||||
"//pkg/apis/autoscaling/install:go_default_library",
|
||||
"//pkg/apis/autoscaling/v1:go_default_library",
|
||||
"//pkg/runtime:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
|||
1
vendor/k8s.io/kubernetes/pkg/apis/batch/register.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/apis/batch/register.go
generated
vendored
|
|
@ -52,6 +52,7 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
|||
&CronJob{},
|
||||
&CronJobList{},
|
||||
&api.ListOptions{},
|
||||
&api.DeleteOptions{},
|
||||
)
|
||||
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJob"), &CronJob{})
|
||||
scheme.AddKnownTypeWithName(SchemeGroupVersion.WithKind("ScheduledJobList"), &CronJobList{})
|
||||
|
|
|
|||
5972
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go
generated
vendored
5972
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.generated.go
generated
vendored
File diff suppressed because it is too large
Load diff
8
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/types.go
generated
vendored
|
|
@ -294,7 +294,7 @@ type KubeletConfiguration struct {
|
|||
// And all Burstable and BestEffort pods are brought up under their
|
||||
// specific top level QoS cgroup.
|
||||
// +optional
|
||||
CgroupsPerQOS bool `json:"cgroupsPerQOS,omitempty"`
|
||||
ExperimentalCgroupsPerQOS bool `json:"experimentalCgroupsPerQOS,omitempty"`
|
||||
// driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)
|
||||
// +optional
|
||||
CgroupDriver string `json:"cgroupDriver,omitempty"`
|
||||
|
|
@ -307,7 +307,7 @@ type KubeletConfiguration struct {
|
|||
// +optional
|
||||
SystemCgroups string `json:"systemCgroups,omitempty"`
|
||||
// CgroupRoot is the root cgroup to use for pods.
|
||||
// If CgroupsPerQOS is enabled, this is the root of the QoS cgroup hierarchy.
|
||||
// If ExperimentalCgroupsPerQOS is enabled, this is the root of the QoS cgroup hierarchy.
|
||||
// +optional
|
||||
CgroupRoot string `json:"cgroupRoot,omitempty"`
|
||||
// containerRuntime is the container runtime to use.
|
||||
|
|
@ -466,6 +466,10 @@ type KubeletConfiguration struct {
|
|||
// TODO(#34726:1.8.0): Remove the opt-in for failing when swap is enabled.
|
||||
// Tells the Kubelet to fail to start if swap is enabled on the node.
|
||||
ExperimentalFailSwapOn bool `json:"experimentalFailSwapOn,omitempty"`
|
||||
// This flag, if set, enables a check prior to mount operations to verify that the required components
|
||||
// (binaries, etc.) to mount the volume are available on the underlying node. If the check is enabled
|
||||
// and fails the mount operation fails.
|
||||
ExperimentalCheckNodeCapabilitiesBeforeMount bool `json:"ExperimentalCheckNodeCapabilitiesBeforeMount,omitempty"`
|
||||
}
|
||||
|
||||
type KubeletAuthorizationMode string
|
||||
|
|
|
|||
12
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/defaults.go
generated
vendored
|
|
@ -204,8 +204,8 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
|
|||
if obj.CertDirectory == "" {
|
||||
obj.CertDirectory = "/var/run/kubernetes"
|
||||
}
|
||||
if obj.CgroupsPerQOS == nil {
|
||||
obj.CgroupsPerQOS = boolVar(false)
|
||||
if obj.ExperimentalCgroupsPerQOS == nil {
|
||||
obj.ExperimentalCgroupsPerQOS = boolVar(false)
|
||||
}
|
||||
if obj.ContainerRuntime == "" {
|
||||
obj.ContainerRuntime = "docker"
|
||||
|
|
@ -391,9 +391,9 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
|
|||
temp := int32(defaultIPTablesDropBit)
|
||||
obj.IPTablesDropBit = &temp
|
||||
}
|
||||
if obj.CgroupsPerQOS == nil {
|
||||
if obj.ExperimentalCgroupsPerQOS == nil {
|
||||
temp := false
|
||||
obj.CgroupsPerQOS = &temp
|
||||
obj.ExperimentalCgroupsPerQOS = &temp
|
||||
}
|
||||
if obj.CgroupDriver == "" {
|
||||
obj.CgroupDriver = "cgroupfs"
|
||||
|
|
@ -401,8 +401,8 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
|
|||
// NOTE: this is for backwards compatibility with earlier releases where cgroup-root was optional.
|
||||
// if cgroups per qos is not enabled, and cgroup-root is not specified, we need to default to the
|
||||
// container runtime default and not default to the root cgroup.
|
||||
if obj.CgroupsPerQOS != nil {
|
||||
if *obj.CgroupsPerQOS {
|
||||
if obj.ExperimentalCgroupsPerQOS != nil {
|
||||
if *obj.ExperimentalCgroupsPerQOS {
|
||||
if obj.CgroupRoot == "" {
|
||||
obj.CgroupRoot = "/"
|
||||
}
|
||||
|
|
|
|||
6
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/types.go
generated
vendored
|
|
@ -355,7 +355,7 @@ type KubeletConfiguration struct {
|
|||
// And all Burstable and BestEffort pods are brought up under their
|
||||
// specific top level QoS cgroup.
|
||||
// +optional
|
||||
CgroupsPerQOS *bool `json:"cgroupsPerQOS,omitempty"`
|
||||
ExperimentalCgroupsPerQOS *bool `json:"experimentalCgroupsPerQOS,omitempty"`
|
||||
// driver that the kubelet uses to manipulate cgroups on the host (cgroupfs or systemd)
|
||||
// +optional
|
||||
CgroupDriver string `json:"cgroupDriver,omitempty"`
|
||||
|
|
@ -505,6 +505,10 @@ type KubeletConfiguration struct {
|
|||
// TODO(#34726:1.8.0): Remove the opt-in for failing when swap is enabled.
|
||||
// Tells the Kubelet to fail to start if swap is enabled on the node.
|
||||
ExperimentalFailSwapOn bool `json:"experimentalFailSwapOn,omitempty"`
|
||||
// This flag, if set, enables a check prior to mount operations to verify that the required components
|
||||
// (binaries, etc.) to mount the volume are available on the underlying node. If the check is enabled
|
||||
// and fails the mount operation fails.
|
||||
ExperimentalCheckNodeCapabilitiesBeforeMount bool `json:"ExperimentalCheckNodeCapabilitiesBeforeMount,omitempty"`
|
||||
}
|
||||
|
||||
type KubeletAuthorizationMode string
|
||||
|
|
|
|||
|
|
@ -330,7 +330,7 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfigu
|
|||
out.RuntimeCgroups = in.RuntimeCgroups
|
||||
out.SystemCgroups = in.SystemCgroups
|
||||
out.CgroupRoot = in.CgroupRoot
|
||||
if err := api.Convert_Pointer_bool_To_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil {
|
||||
if err := api.Convert_Pointer_bool_To_bool(&in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.CgroupDriver = in.CgroupDriver
|
||||
|
|
@ -407,6 +407,7 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_componentconfig_KubeletConfigu
|
|||
out.FeatureGates = in.FeatureGates
|
||||
out.EnableCRI = in.EnableCRI
|
||||
out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn
|
||||
out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -495,7 +496,7 @@ func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigu
|
|||
out.CloudProvider = in.CloudProvider
|
||||
out.CloudConfigFile = in.CloudConfigFile
|
||||
out.KubeletCgroups = in.KubeletCgroups
|
||||
if err := api.Convert_bool_To_Pointer_bool(&in.CgroupsPerQOS, &out.CgroupsPerQOS, s); err != nil {
|
||||
if err := api.Convert_bool_To_Pointer_bool(&in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.CgroupDriver = in.CgroupDriver
|
||||
|
|
@ -575,6 +576,7 @@ func autoConvert_componentconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigu
|
|||
out.FeatureGates = in.FeatureGates
|
||||
out.EnableCRI = in.EnableCRI
|
||||
out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn
|
||||
out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
7
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1/zz_generated.deepcopy.go
generated
vendored
|
|
@ -302,12 +302,12 @@ func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c *
|
|||
out.RuntimeCgroups = in.RuntimeCgroups
|
||||
out.SystemCgroups = in.SystemCgroups
|
||||
out.CgroupRoot = in.CgroupRoot
|
||||
if in.CgroupsPerQOS != nil {
|
||||
in, out := &in.CgroupsPerQOS, &out.CgroupsPerQOS
|
||||
if in.ExperimentalCgroupsPerQOS != nil {
|
||||
in, out := &in.ExperimentalCgroupsPerQOS, &out.ExperimentalCgroupsPerQOS
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
} else {
|
||||
out.CgroupsPerQOS = nil
|
||||
out.ExperimentalCgroupsPerQOS = nil
|
||||
}
|
||||
out.CgroupDriver = in.CgroupDriver
|
||||
out.ContainerRuntime = in.ContainerRuntime
|
||||
|
|
@ -461,6 +461,7 @@ func DeepCopy_v1alpha1_KubeletConfiguration(in interface{}, out interface{}, c *
|
|||
out.FeatureGates = in.FeatureGates
|
||||
out.EnableCRI = in.EnableCRI
|
||||
out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn
|
||||
out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
3
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/apis/componentconfig/zz_generated.deepcopy.go
generated
vendored
|
|
@ -308,7 +308,7 @@ func DeepCopy_componentconfig_KubeletConfiguration(in interface{}, out interface
|
|||
out.CloudProvider = in.CloudProvider
|
||||
out.CloudConfigFile = in.CloudConfigFile
|
||||
out.KubeletCgroups = in.KubeletCgroups
|
||||
out.CgroupsPerQOS = in.CgroupsPerQOS
|
||||
out.ExperimentalCgroupsPerQOS = in.ExperimentalCgroupsPerQOS
|
||||
out.CgroupDriver = in.CgroupDriver
|
||||
out.RuntimeCgroups = in.RuntimeCgroups
|
||||
out.SystemCgroups = in.SystemCgroups
|
||||
|
|
@ -392,6 +392,7 @@ func DeepCopy_componentconfig_KubeletConfiguration(in interface{}, out interface
|
|||
out.FeatureGates = in.FeatureGates
|
||||
out.EnableCRI = in.EnableCRI
|
||||
out.ExperimentalFailSwapOn = in.ExperimentalFailSwapOn
|
||||
out.ExperimentalCheckNodeCapabilitiesBeforeMount = in.ExperimentalCheckNodeCapabilitiesBeforeMount
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
2
vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go
generated
vendored
|
|
@ -124,7 +124,6 @@ type ThirdPartyResource struct {
|
|||
Description string `json:"description,omitempty"`
|
||||
|
||||
// Versions are versions for this third party object
|
||||
// +optional
|
||||
Versions []APIVersion `json:"versions,omitempty"`
|
||||
}
|
||||
|
||||
|
|
@ -143,7 +142,6 @@ type ThirdPartyResourceList struct {
|
|||
// TODO: we should consider merge this struct with GroupVersion in unversioned.go
|
||||
type APIVersion struct {
|
||||
// Name of this version (e.g. 'v1').
|
||||
// +optional
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
|
|
|
|||
3
vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/apis/extensions/validation/validation.go
generated
vendored
|
|
@ -68,6 +68,9 @@ func ValidateThirdPartyResource(obj *extensions.ThirdPartyResource) field.ErrorL
|
|||
allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&obj.ObjectMeta, false, ValidateThirdPartyResourceName, field.NewPath("metadata"))...)
|
||||
|
||||
versions := sets.String{}
|
||||
if len(obj.Versions) == 0 {
|
||||
allErrs = append(allErrs, field.Required(field.NewPath("versions"), "must specify at least one version"))
|
||||
}
|
||||
for ix := range obj.Versions {
|
||||
version := &obj.Versions[ix]
|
||||
if len(version.Name) == 0 {
|
||||
|
|
|
|||
1
vendor/k8s.io/kubernetes/pkg/auth/user/user.go
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/auth/user/user.go
generated
vendored
|
|
@ -69,6 +69,7 @@ func (i *DefaultInfo) GetExtra() map[string][]string {
|
|||
// well-known user and group names
|
||||
const (
|
||||
SystemPrivilegedGroup = "system:masters"
|
||||
NodesGroup = "system:nodes"
|
||||
AllUnauthenticated = "system:unauthenticated"
|
||||
AllAuthenticated = "system:authenticated"
|
||||
|
||||
|
|
|
|||
9
vendor/k8s.io/kubernetes/pkg/client/cache/controller.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/client/cache/controller.go
generated
vendored
|
|
@ -29,7 +29,7 @@ import (
|
|||
type Config struct {
|
||||
// The queue for your objects; either a FIFO or
|
||||
// a DeltaFIFO. Your Process() function should accept
|
||||
// the output of this Oueue's Pop() method.
|
||||
// the output of this Queue's Pop() method.
|
||||
Queue
|
||||
|
||||
// Something that can list and watch your objects.
|
||||
|
|
@ -121,6 +121,11 @@ func (c *Controller) Requeue(obj interface{}) error {
|
|||
// TODO: Consider doing the processing in parallel. This will require a little thought
|
||||
// to make sure that we don't end up processing the same object multiple times
|
||||
// concurrently.
|
||||
//
|
||||
// TODO: Plumb through the stopCh here (and down to the queue) so that this can
|
||||
// actually exit when the controller is stopped. Or just give up on this stuff
|
||||
// ever being stoppable. Converting this whole package to use Context would
|
||||
// also be helpful.
|
||||
func (c *Controller) processLoop() {
|
||||
for {
|
||||
obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process))
|
||||
|
|
@ -134,7 +139,7 @@ func (c *Controller) processLoop() {
|
|||
}
|
||||
|
||||
// ResourceEventHandler can handle notifications for events that happen to a
|
||||
// resource. The events are informational only, so you can't return an
|
||||
// resource. The events are informational only, so you can't return an
|
||||
// error.
|
||||
// * OnAdd is called when an object is added.
|
||||
// * OnUpdate is called when an object is modified. Note that oldObj is the
|
||||
|
|
|
|||
10
vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/client/cache/reflector.go
generated
vendored
|
|
@ -45,7 +45,7 @@ import (
|
|||
|
||||
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
|
||||
type Reflector struct {
|
||||
// name identifies this reflector. By default it will be a file:line if possible.
|
||||
// name identifies this reflector. By default it will be a file:line if possible.
|
||||
name string
|
||||
|
||||
// The type of object we expect to place in the store.
|
||||
|
|
@ -74,12 +74,6 @@ var (
|
|||
// However, it can be modified to avoid periodic resync to break the
|
||||
// TCP connection.
|
||||
minWatchTimeout = 5 * time.Minute
|
||||
// If we are within 'forceResyncThreshold' from the next planned resync
|
||||
// and are just before issuing Watch(), resync will be forced now.
|
||||
forceResyncThreshold = 3 * time.Second
|
||||
// We try to set timeouts for Watch() so that we will finish about
|
||||
// than 'timeoutThreshold' from next planned periodic resync.
|
||||
timeoutThreshold = 1 * time.Second
|
||||
)
|
||||
|
||||
// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector
|
||||
|
|
@ -114,7 +108,7 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{},
|
|||
return r
|
||||
}
|
||||
|
||||
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
|
||||
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
|
||||
// call chains to NewReflector, so they'd be low entropy names for reflectors
|
||||
var internalPackages = []string{"kubernetes/pkg/client/cache/", "/runtime/asm_"}
|
||||
|
||||
|
|
|
|||
|
|
@ -148,14 +148,14 @@ type EventSinkImpl struct {
|
|||
Interface EventInterface
|
||||
}
|
||||
|
||||
func (e EventSinkImpl) Create(event *api.Event) (*api.Event, error) {
|
||||
func (e *EventSinkImpl) Create(event *api.Event) (*api.Event, error) {
|
||||
return e.Interface.CreateWithEventNamespace(event)
|
||||
}
|
||||
|
||||
func (e EventSinkImpl) Update(event *api.Event) (*api.Event, error) {
|
||||
func (e *EventSinkImpl) Update(event *api.Event) (*api.Event, error) {
|
||||
return e.Interface.UpdateWithEventNamespace(event)
|
||||
}
|
||||
|
||||
func (e EventSinkImpl) Patch(event *api.Event, data []byte) (*api.Event, error) {
|
||||
func (e *EventSinkImpl) Patch(event *api.Event, data []byte) (*api.Event, error) {
|
||||
return e.Interface.PatchWithEventNamespace(event, data)
|
||||
}
|
||||
|
|
|
|||
49
vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go
generated
vendored
49
vendor/k8s.io/kubernetes/pkg/client/typed/discovery/discovery_client.go
generated
vendored
|
|
@ -213,9 +213,11 @@ func (d *DiscoveryClient) serverPreferredResources(namespaced bool) ([]unversion
|
|||
const maxRetries = 2
|
||||
var failedGroups map[unversioned.GroupVersion]error
|
||||
var results []unversioned.GroupVersionResource
|
||||
var resources map[unversioned.GroupResource]string
|
||||
RetrieveGroups:
|
||||
for i := 0; i < maxRetries; i++ {
|
||||
results = []unversioned.GroupVersionResource{}
|
||||
resources = map[unversioned.GroupResource]string{}
|
||||
failedGroups = make(map[unversioned.GroupVersion]error)
|
||||
serverGroupList, err := d.ServerGroups()
|
||||
if err != nil {
|
||||
|
|
@ -223,25 +225,40 @@ RetrieveGroups:
|
|||
}
|
||||
|
||||
for _, apiGroup := range serverGroupList.Groups {
|
||||
preferredVersion := apiGroup.PreferredVersion
|
||||
groupVersion := unversioned.GroupVersion{Group: apiGroup.Name, Version: preferredVersion.Version}
|
||||
apiResourceList, err := d.ServerResourcesForGroupVersion(preferredVersion.GroupVersion)
|
||||
if err != nil {
|
||||
if i < maxRetries-1 {
|
||||
continue RetrieveGroups
|
||||
}
|
||||
failedGroups[groupVersion] = err
|
||||
continue
|
||||
}
|
||||
for _, apiResource := range apiResourceList.APIResources {
|
||||
// ignore the root scoped resources if "namespaced" is true.
|
||||
if namespaced && !apiResource.Namespaced {
|
||||
versions := apiGroup.Versions
|
||||
for _, version := range versions {
|
||||
groupVersion := unversioned.GroupVersion{Group: apiGroup.Name, Version: version.Version}
|
||||
apiResourceList, err := d.ServerResourcesForGroupVersion(version.GroupVersion)
|
||||
if err != nil {
|
||||
if i < maxRetries-1 {
|
||||
continue RetrieveGroups
|
||||
}
|
||||
failedGroups[groupVersion] = err
|
||||
continue
|
||||
}
|
||||
if strings.Contains(apiResource.Name, "/") {
|
||||
continue
|
||||
for _, apiResource := range apiResourceList.APIResources {
|
||||
// ignore the root scoped resources if "namespaced" is true.
|
||||
if namespaced && !apiResource.Namespaced {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(apiResource.Name, "/") {
|
||||
continue
|
||||
}
|
||||
gvr := groupVersion.WithResource(apiResource.Name)
|
||||
if _, ok := resources[gvr.GroupResource()]; ok {
|
||||
if gvr.Version != apiGroup.PreferredVersion.Version {
|
||||
continue
|
||||
}
|
||||
// remove previous entry, because it will be replaced with a preferred one
|
||||
for i := range results {
|
||||
if results[i].GroupResource() == gvr.GroupResource() {
|
||||
results = append(results[:i], results[i+1:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
resources[gvr.GroupResource()] = gvr.Version
|
||||
results = append(results, gvr)
|
||||
}
|
||||
results = append(results, groupVersion.WithResource(apiResource.Name))
|
||||
}
|
||||
}
|
||||
if len(failedGroups) == 0 {
|
||||
|
|
|
|||
15
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go
generated
vendored
15
vendor/k8s.io/kubernetes/pkg/cloudprovider/providers/gce/gce.go
generated
vendored
|
|
@ -608,6 +608,8 @@ func (gce *GCECloud) EnsureLoadBalancer(clusterName string, apiService *api.Serv
|
|||
// an IP, we assume they are managing it themselves. Otherwise, we will
|
||||
// release the IP in case of early-terminating failure or upon successful
|
||||
// creating of the LB.
|
||||
// TODO(#36535): boil this logic down into a set of component functions
|
||||
// and key the flag values off of errors returned.
|
||||
isUserOwnedIP := false // if this is set, we never release the IP
|
||||
isSafeToReleaseIP := false
|
||||
defer func() {
|
||||
|
|
@ -735,7 +737,7 @@ func (gce *GCECloud) EnsureLoadBalancer(clusterName string, apiService *api.Serv
|
|||
return nil, fmt.Errorf("Error checking HTTP health check %s: %v", loadBalancerName, err)
|
||||
}
|
||||
if path, healthCheckNodePort := apiservice.GetServiceHealthCheckPathPort(apiService); path != "" {
|
||||
glog.V(4).Infof("service %v needs health checks on :%d/%s)", apiService.Name, healthCheckNodePort, path)
|
||||
glog.V(4).Infof("service %v (%v) needs health checks on :%d%s)", apiService.Name, loadBalancerName, healthCheckNodePort, path)
|
||||
if err != nil {
|
||||
// This logic exists to detect a transition for a pre-existing service and turn on
|
||||
// the tpNeedsUpdate flag to delete/recreate fwdrule/tpool adding the health check
|
||||
|
|
@ -1080,13 +1082,18 @@ func (gce *GCECloud) createTargetPool(name, serviceName, region string, hosts []
|
|||
for _, host := range hosts {
|
||||
instances = append(instances, makeHostURL(gce.projectID, host.Zone, host.Name))
|
||||
}
|
||||
// health check management is coupled with targetPools to prevent leaks. A
|
||||
// target pool is the only thing that requires a health check, so we delete
|
||||
// associated checks on teardown, and ensure checks on setup.
|
||||
hcLinks := []string{}
|
||||
if hc != nil {
|
||||
var err error
|
||||
if hc, err = gce.ensureHttpHealthCheck(name, hc.RequestPath, int32(hc.Port)); err != nil || hc == nil {
|
||||
return fmt.Errorf("Failed to ensure health check for %v port %d path %v: %v", name, hc.Port, hc.RequestPath, err)
|
||||
}
|
||||
hcLinks = append(hcLinks, hc.SelfLink)
|
||||
}
|
||||
if len(hcLinks) > 0 {
|
||||
glog.Infof("Creating targetpool %v with healthchecking", name)
|
||||
}
|
||||
glog.Infof("Creating targetpool %v with %d healthchecks", name, len(hcLinks))
|
||||
pool := &compute.TargetPool{
|
||||
Name: name,
|
||||
Description: fmt.Sprintf(`{"kubernetes.io/service-name":"%s"}`, serviceName),
|
||||
|
|
|
|||
2
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go
generated
vendored
|
|
@ -520,11 +520,11 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime
|
|||
if err != nil {
|
||||
return fmt.Errorf("object does not have ObjectMeta, %v", err)
|
||||
}
|
||||
glog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID)
|
||||
if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil {
|
||||
r.Recorder.Eventf(object, api.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err)
|
||||
return fmt.Errorf("unable to delete pods: %v", err)
|
||||
} else {
|
||||
glog.V(4).Infof("Controller %v deleted pod %v", accessor.GetName(), podID)
|
||||
r.Recorder.Eventf(object, api.EventTypeNormal, SuccessfulDeletePodReason, "Deleted pod: %v", podID)
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
4
vendor/k8s.io/kubernetes/pkg/kubectl/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubectl/BUILD
generated
vendored
|
|
@ -46,7 +46,6 @@ go_library(
|
|||
"sorted_resource_name_list.go",
|
||||
"sorting_printer.go",
|
||||
"stop.go",
|
||||
"version.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
|
|
@ -69,6 +68,7 @@ go_library(
|
|||
"//pkg/apis/batch/v2alpha1:go_default_library",
|
||||
"//pkg/apis/certificates:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/apis/policy:go_default_library",
|
||||
"//pkg/apis/rbac:go_default_library",
|
||||
"//pkg/apis/storage:go_default_library",
|
||||
"//pkg/apis/storage/util:go_default_library",
|
||||
|
|
@ -101,7 +101,6 @@ go_library(
|
|||
"//pkg/util/uuid:go_default_library",
|
||||
"//pkg/util/validation:go_default_library",
|
||||
"//pkg/util/wait:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/watch:go_default_library",
|
||||
"//vendor:github.com/emicklei/go-restful/swagger",
|
||||
"//vendor:github.com/ghodss/yaml",
|
||||
|
|
@ -155,6 +154,7 @@ go_test(
|
|||
"//pkg/apimachinery/registered:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/apis/policy:go_default_library",
|
||||
"//pkg/apis/storage:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
||||
|
|
|
|||
17
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/kubectl/cmd/util/factory.go
generated
vendored
|
|
@ -386,18 +386,6 @@ func (f *factory) UnstructuredObject() (meta.RESTMapper, runtime.ObjectTyper, er
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Register unknown APIs as third party for now to make
|
||||
// validation happy. TODO perhaps make a dynamic schema
|
||||
// validator to avoid this.
|
||||
for _, group := range groupResources {
|
||||
for _, version := range group.Group.Versions {
|
||||
gv := unversioned.GroupVersion{Group: group.Group.Name, Version: version.Version}
|
||||
if !registered.IsRegisteredVersion(gv) {
|
||||
registered.AddThirdPartyAPIGroupVersions(gv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mapper := discovery.NewDeferredDiscoveryRESTMapper(discoveryClient, meta.InterfacesForUnstructured)
|
||||
typer := discovery.NewUnstructuredObjectTyper(groupResources)
|
||||
return NewShortcutExpander(mapper, discoveryClient), typer, nil
|
||||
|
|
@ -1148,10 +1136,7 @@ func (c *clientSwaggerSchema) ValidateBytes(data []byte) error {
|
|||
return err
|
||||
}
|
||||
if ok := registered.IsEnabledVersion(gvk.GroupVersion()); !ok {
|
||||
return fmt.Errorf("API version %q isn't supported, only supports API versions %q", gvk.GroupVersion().String(), registered.EnabledVersions())
|
||||
}
|
||||
if registered.IsThirdPartyAPIGroupVersion(gvk.GroupVersion()) {
|
||||
// Don't attempt to validate third party objects
|
||||
// if we don't have this in our scheme, just skip validation because its an object we don't recognize
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
38
vendor/k8s.io/kubernetes/pkg/kubectl/describe.go
generated
vendored
38
vendor/k8s.io/kubernetes/pkg/kubectl/describe.go
generated
vendored
|
|
@ -40,6 +40,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/apis/certificates"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/apis/storage"
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/util"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
|
@ -120,6 +121,7 @@ func describerMap(c clientset.Interface) map[unversioned.GroupKind]Describer {
|
|||
apps.Kind("StatefulSet"): &StatefulSetDescriber{c},
|
||||
certificates.Kind("CertificateSigningRequest"): &CertificateSigningRequestDescriber{c},
|
||||
storage.Kind("StorageClass"): &StorageClassDescriber{c},
|
||||
policy.Kind("PodDisruptionBudget"): &PodDisruptionBudgetDescriber{c},
|
||||
}
|
||||
|
||||
return m
|
||||
|
|
@ -1801,6 +1803,7 @@ func (d *NodeDescriber) Describe(namespace, name string, describerSettings Descr
|
|||
func describeNode(node *api.Node, nodeNonTerminatedPodsList *api.PodList, events *api.EventList, canViewPods bool) (string, error) {
|
||||
return tabbedString(func(out io.Writer) error {
|
||||
fmt.Fprintf(out, "Name:\t%s\n", node.Name)
|
||||
fmt.Fprintf(out, "Role:\t%s\n", findNodeRole(node))
|
||||
printLabelsMultiline(out, "Labels", node.Labels)
|
||||
printTaintsInAnnotationMultiline(out, "Taints", node.Annotations)
|
||||
fmt.Fprintf(out, "CreationTimestamp:\t%s\n", node.CreationTimestamp.Time.Format(time.RFC1123Z))
|
||||
|
|
@ -2407,6 +2410,41 @@ func (s *StorageClassDescriber) Describe(namespace, name string, describerSettin
|
|||
})
|
||||
}
|
||||
|
||||
type PodDisruptionBudgetDescriber struct {
|
||||
clientset.Interface
|
||||
}
|
||||
|
||||
func (p *PodDisruptionBudgetDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) {
|
||||
pdb, err := p.Policy().PodDisruptionBudgets(namespace).Get(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return tabbedString(func(out io.Writer) error {
|
||||
fmt.Fprintf(out, "Name:\t%s\n", pdb.Name)
|
||||
fmt.Fprintf(out, "Min available:\t%s\n", pdb.Spec.MinAvailable.String())
|
||||
if pdb.Spec.Selector != nil {
|
||||
fmt.Fprintf(out, "Selector:\t%s\n", unversioned.FormatLabelSelector(pdb.Spec.Selector))
|
||||
} else {
|
||||
fmt.Fprintf(out, "Selector:\t<unset>\n")
|
||||
}
|
||||
fmt.Fprintf(out, "Status:\n")
|
||||
fmt.Fprintf(out, " Allowed disruptions:\t%d\n", pdb.Status.PodDisruptionsAllowed)
|
||||
fmt.Fprintf(out, " Current:\t%d\n", pdb.Status.CurrentHealthy)
|
||||
fmt.Fprintf(out, " Desired:\t%d\n", pdb.Status.DesiredHealthy)
|
||||
fmt.Fprintf(out, " Total:\t%d\n", pdb.Status.ExpectedPods)
|
||||
if describerSettings.ShowEvents {
|
||||
events, err := p.Core().Events(namespace).Search(pdb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if events != nil {
|
||||
DescribeEvents(events, out)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// newErrNoDescriber creates a new ErrNoDescriber with the names of the provided types.
|
||||
func newErrNoDescriber(types ...reflect.Type) error {
|
||||
names := make([]string, 0, len(types))
|
||||
|
|
|
|||
11
vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/kubectl/resource_filter.go
generated
vendored
|
|
@ -58,6 +58,17 @@ func filterPods(obj runtime.Object, options PrintOptions) bool {
|
|||
|
||||
// Filter loops through a collection of FilterFuncs until it finds one that can filter the given resource
|
||||
func (f Filters) Filter(obj runtime.Object, opts *PrintOptions) (bool, error) {
|
||||
// check if the object is unstructured. If so, let's attempt to convert it to a type we can understand
|
||||
// before apply filter func.
|
||||
switch obj.(type) {
|
||||
case *runtime.UnstructuredList, *runtime.Unstructured, *runtime.Unknown:
|
||||
if objBytes, err := runtime.Encode(api.Codecs.LegacyCodec(), obj); err == nil {
|
||||
if decodedObj, err := runtime.Decode(api.Codecs.UniversalDecoder(), objBytes); err == nil {
|
||||
obj = decodedObj
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, filter := range f {
|
||||
if ok := filter(obj, *opts); ok {
|
||||
return true, nil
|
||||
|
|
|
|||
57
vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go
generated
vendored
57
vendor/k8s.io/kubernetes/pkg/kubectl/resource_printer.go
generated
vendored
|
|
@ -40,6 +40,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/apis/certificates"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||
"k8s.io/kubernetes/pkg/apis/storage"
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/util"
|
||||
|
|
@ -474,6 +475,7 @@ func (h *HumanReadablePrinter) AfterPrint(output io.Writer, res string) error {
|
|||
var (
|
||||
podColumns = []string{"NAME", "READY", "STATUS", "RESTARTS", "AGE"}
|
||||
podTemplateColumns = []string{"TEMPLATE", "CONTAINER(S)", "IMAGE(S)", "PODLABELS"}
|
||||
podDisruptionBudgetColumns = []string{"NAME", "MIN-AVAILABLE", "ALLOWED-DISRUPTIONS", "AGE"}
|
||||
replicationControllerColumns = []string{"NAME", "DESIRED", "CURRENT", "READY", "AGE"}
|
||||
replicaSetColumns = []string{"NAME", "DESIRED", "CURRENT", "READY", "AGE"}
|
||||
jobColumns = []string{"NAME", "DESIRED", "SUCCESSFUL", "AGE"}
|
||||
|
|
@ -536,6 +538,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() {
|
|||
h.Handler(podColumns, h.printPod)
|
||||
h.Handler(podTemplateColumns, printPodTemplate)
|
||||
h.Handler(podTemplateColumns, printPodTemplateList)
|
||||
h.Handler(podDisruptionBudgetColumns, printPodDisruptionBudget)
|
||||
h.Handler(podDisruptionBudgetColumns, printPodDisruptionBudgetList)
|
||||
h.Handler(replicationControllerColumns, printReplicationController)
|
||||
h.Handler(replicationControllerColumns, printReplicationControllerList)
|
||||
h.Handler(replicaSetColumns, printReplicaSet)
|
||||
|
|
@ -828,6 +832,37 @@ func printPodTemplateList(podList *api.PodTemplateList, w io.Writer, options Pri
|
|||
return nil
|
||||
}
|
||||
|
||||
func printPodDisruptionBudget(pdb *policy.PodDisruptionBudget, w io.Writer, options PrintOptions) error {
|
||||
// name, minavailable, selector
|
||||
name := formatResourceName(options.Kind, pdb.Name, options.WithKind)
|
||||
namespace := pdb.Namespace
|
||||
|
||||
if options.WithNamespace {
|
||||
if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%d\t%s\n",
|
||||
name,
|
||||
pdb.Spec.MinAvailable.String(),
|
||||
pdb.Status.PodDisruptionsAllowed,
|
||||
translateTimestamp(pdb.CreationTimestamp),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func printPodDisruptionBudgetList(pdbList *policy.PodDisruptionBudgetList, w io.Writer, options PrintOptions) error {
|
||||
for _, pdb := range pdbList.Items {
|
||||
if err := printPodDisruptionBudget(&pdb, w, options); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO(AdoHe): try to put wide output in a single method
|
||||
func printReplicationController(controller *api.ReplicationController, w io.Writer, options PrintOptions) error {
|
||||
name := formatResourceName(options.Kind, controller.Name, options.WithKind)
|
||||
|
|
@ -1491,6 +1526,10 @@ func printNode(node *api.Node, w io.Writer, options PrintOptions) error {
|
|||
if node.Spec.Unschedulable {
|
||||
status = append(status, "SchedulingDisabled")
|
||||
}
|
||||
role := findNodeRole(node)
|
||||
if role != "" {
|
||||
status = append(status, role)
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s", name, strings.Join(status, ","), translateTimestamp(node.CreationTimestamp)); err != nil {
|
||||
return err
|
||||
|
|
@ -1520,6 +1559,22 @@ func getNodeExternalIP(node *api.Node) string {
|
|||
return "<none>"
|
||||
}
|
||||
|
||||
// findNodeRole returns the role of a given node, or "" if none found.
|
||||
// The role is determined by looking in order for:
|
||||
// * a kubernetes.io/role label
|
||||
// * a kubeadm.alpha.kubernetes.io/role label
|
||||
// If no role is found, ("", nil) is returned
|
||||
func findNodeRole(node *api.Node) string {
|
||||
if role := node.Labels[unversioned.NodeLabelRole]; role != "" {
|
||||
return role
|
||||
}
|
||||
if role := node.Labels[unversioned.NodeLabelKubeadmAlphaRole]; role != "" {
|
||||
return role
|
||||
}
|
||||
// No role found
|
||||
return ""
|
||||
}
|
||||
|
||||
func printNodeList(list *api.NodeList, w io.Writer, options PrintOptions) error {
|
||||
for _, node := range list.Items {
|
||||
if err := printNode(&node, w, options); err != nil {
|
||||
|
|
@ -2263,7 +2318,7 @@ func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) er
|
|||
// check if the object is unstructured. If so, let's attempt to convert it to a type we can understand before
|
||||
// trying to print, since the printers are keyed by type. This is extremely expensive.
|
||||
switch obj.(type) {
|
||||
case *runtime.Unstructured, *runtime.Unknown:
|
||||
case *runtime.UnstructuredList, *runtime.Unstructured, *runtime.Unknown:
|
||||
if objBytes, err := runtime.Encode(api.Codecs.LegacyCodec(), obj); err == nil {
|
||||
if decodedObj, err := runtime.Decode(api.Codecs.UniversalDecoder(), objBytes); err == nil {
|
||||
obj = decodedObj
|
||||
|
|
|
|||
28
vendor/k8s.io/kubernetes/pkg/kubectl/version.go
generated
vendored
28
vendor/k8s.io/kubernetes/pkg/kubectl/version.go
generated
vendored
|
|
@ -1,28 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubectl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
)
|
||||
|
||||
func GetClientVersion(w io.Writer) {
|
||||
fmt.Fprintf(w, "Client Version: %#v\n", version.Get())
|
||||
}
|
||||
1
vendor/k8s.io/kubernetes/pkg/storage/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/pkg/storage/BUILD
generated
vendored
|
|
@ -87,6 +87,7 @@ go_test(
|
|||
"//pkg/storage/etcd:go_default_library",
|
||||
"//pkg/storage/etcd/etcdtest:go_default_library",
|
||||
"//pkg/storage/etcd/testing:go_default_library",
|
||||
"//pkg/storage/etcd3:go_default_library",
|
||||
"//pkg/util/sets:go_default_library",
|
||||
"//pkg/util/wait:go_default_library",
|
||||
"//pkg/watch:go_default_library",
|
||||
|
|
|
|||
29
vendor/k8s.io/kubernetes/pkg/util/config/feature_gate.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/util/config/feature_gate.go
generated
vendored
|
|
@ -43,18 +43,26 @@ const (
|
|||
dynamicKubeletConfig = "DynamicKubeletConfig"
|
||||
dynamicVolumeProvisioning = "DynamicVolumeProvisioning"
|
||||
streamingProxyRedirects = "StreamingProxyRedirects"
|
||||
|
||||
// experimentalHostUserNamespaceDefaulting Default userns=host for containers
|
||||
// that are using other host namespaces, host mounts, the pod contains a privileged container,
|
||||
// or specific non-namespaced capabilities
|
||||
// (MKNOD, SYS_MODULE, SYS_TIME). This should only be enabled if user namespace remapping is enabled
|
||||
// in the docker daemon.
|
||||
experimentalHostUserNamespaceDefaultingGate = "ExperimentalHostUserNamespaceDefaulting"
|
||||
)
|
||||
|
||||
var (
|
||||
// Default values for recorded features. Every new feature gate should be
|
||||
// represented here.
|
||||
knownFeatures = map[string]featureSpec{
|
||||
allAlphaGate: {false, alpha},
|
||||
externalTrafficLocalOnly: {true, beta},
|
||||
appArmor: {true, beta},
|
||||
dynamicKubeletConfig: {false, alpha},
|
||||
dynamicVolumeProvisioning: {true, alpha},
|
||||
streamingProxyRedirects: {false, alpha},
|
||||
allAlphaGate: {false, alpha},
|
||||
externalTrafficLocalOnly: {true, beta},
|
||||
appArmor: {true, beta},
|
||||
dynamicKubeletConfig: {false, alpha},
|
||||
dynamicVolumeProvisioning: {true, alpha},
|
||||
streamingProxyRedirects: {false, alpha},
|
||||
experimentalHostUserNamespaceDefaultingGate: {false, alpha},
|
||||
}
|
||||
|
||||
// Special handling for a few gates.
|
||||
|
|
@ -115,6 +123,10 @@ type FeatureGate interface {
|
|||
// owner: timstclair
|
||||
// alpha: v1.5
|
||||
StreamingProxyRedirects() bool
|
||||
|
||||
// owner: @pweil-
|
||||
// alpha: v1.5
|
||||
ExperimentalHostUserNamespaceDefaulting() bool
|
||||
}
|
||||
|
||||
// featureGate implements FeatureGate as well as pflag.Value for flag parsing.
|
||||
|
|
@ -209,6 +221,11 @@ func (f *featureGate) StreamingProxyRedirects() bool {
|
|||
return f.lookup(streamingProxyRedirects)
|
||||
}
|
||||
|
||||
// ExperimentalHostUserNamespaceDefaulting returns value for experimentalHostUserNamespaceDefaulting
|
||||
func (f *featureGate) ExperimentalHostUserNamespaceDefaulting() bool {
|
||||
return f.lookup(experimentalHostUserNamespaceDefaultingGate)
|
||||
}
|
||||
|
||||
func (f *featureGate) lookup(key string) bool {
|
||||
defaultValue := f.known[key].enabled
|
||||
if f.enabled != nil {
|
||||
|
|
|
|||
13
vendor/k8s.io/kubernetes/pkg/util/mount/mount.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/util/mount/mount.go
generated
vendored
|
|
@ -30,7 +30,8 @@ import (
|
|||
|
||||
const (
|
||||
// Default mount command if mounter path is not specified
|
||||
defaultMountCommand = "mount"
|
||||
defaultMountCommand = "mount"
|
||||
MountsInGlobalPDPath = "mounts"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
|
|
@ -189,9 +190,15 @@ func getDeviceNameFromMount(mounter Interface, mountPath, pluginDir string) (str
|
|||
glog.V(4).Infof("Directory %s is not mounted", mountPath)
|
||||
return "", fmt.Errorf("directory %s is not mounted", mountPath)
|
||||
}
|
||||
basemountPath := path.Join(pluginDir, MountsInGlobalPDPath)
|
||||
for _, ref := range refs {
|
||||
if strings.HasPrefix(ref, pluginDir) {
|
||||
return path.Base(ref), nil
|
||||
if strings.HasPrefix(ref, basemountPath) {
|
||||
volumeID, err := filepath.Rel(basemountPath, ref)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get volume id from mount %s - %v", mountPath, err)
|
||||
return "", err
|
||||
}
|
||||
return volumeID, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
143
vendor/k8s.io/kubernetes/pkg/util/wait/wait.go
generated
vendored
143
vendor/k8s.io/kubernetes/pkg/util/wait/wait.go
generated
vendored
|
|
@ -36,33 +36,42 @@ var ForeverTestTimeout = time.Second * 30
|
|||
// NeverStop may be passed to Until to make it never stop.
|
||||
var NeverStop <-chan struct{} = make(chan struct{})
|
||||
|
||||
// Forever is syntactic sugar on top of Until
|
||||
// Forever calls f every period for ever.
|
||||
//
|
||||
// Forever is syntactic sugar on top of Until.
|
||||
func Forever(f func(), period time.Duration) {
|
||||
Until(f, period, NeverStop)
|
||||
}
|
||||
|
||||
// Until loops until stop channel is closed, running f every period.
|
||||
// Until is syntactic sugar on top of JitterUntil with zero jitter
|
||||
// factor, with sliding = true (which means the timer for period
|
||||
// starts after the f completes).
|
||||
//
|
||||
// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
|
||||
// with sliding = true (which means the timer for period starts after the f
|
||||
// completes).
|
||||
func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
|
||||
JitterUntil(f, period, 0.0, true, stopCh)
|
||||
}
|
||||
|
||||
// NonSlidingUntil loops until stop channel is closed, running f every
|
||||
// period. NonSlidingUntil is syntactic sugar on top of JitterUntil
|
||||
// with zero jitter factor, with sliding = false (meaning the timer for
|
||||
// period starts at the same time as the function starts).
|
||||
// period.
|
||||
//
|
||||
// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
|
||||
// factor, with sliding = false (meaning the timer for period starts at the same
|
||||
// time as the function starts).
|
||||
func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
|
||||
JitterUntil(f, period, 0.0, false, stopCh)
|
||||
}
|
||||
|
||||
// JitterUntil loops until stop channel is closed, running f every period.
|
||||
//
|
||||
// If jitterFactor is positive, the period is jittered before every run of f.
|
||||
// If jitterFactor is not positive, the period is unchanged.
|
||||
// Catches any panics, and keeps going. f may not be invoked if
|
||||
// stop channel is already closed. Pass NeverStop to Until if you
|
||||
// don't want it stop.
|
||||
// If jitterFactor is not positive, the period is unchanged and not jitterd.
|
||||
//
|
||||
// If slidingis true, the period is computed after f runs. If it is false then
|
||||
// period includes the runtime for f.
|
||||
//
|
||||
// Close stopCh to stop. f may not be invoked if stop channel is already
|
||||
// closed. Pass NeverStop to if you don't want it stop.
|
||||
func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
|
||||
for {
|
||||
|
||||
|
|
@ -104,9 +113,11 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b
|
|||
}
|
||||
}
|
||||
|
||||
// Jitter returns a time.Duration between duration and duration + maxFactor * duration,
|
||||
// to allow clients to avoid converging on periodic behavior. If maxFactor is 0.0, a
|
||||
// suggested default value will be chosen.
|
||||
// Jitter returns a time.Duration between duration and duration + maxFactor *
|
||||
// duration.
|
||||
//
|
||||
// This allows clients to avoid converging on periodic behavior. If maxFactor
|
||||
// is 0.0, a suggested default value will be chosen.
|
||||
func Jitter(duration time.Duration, maxFactor float64) time.Duration {
|
||||
if maxFactor <= 0.0 {
|
||||
maxFactor = 1.0
|
||||
|
|
@ -115,26 +126,31 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration {
|
|||
return wait
|
||||
}
|
||||
|
||||
// ErrWaitTimeout is returned when the condition exited without success
|
||||
// ErrWaitTimeout is returned when the condition exited without success.
|
||||
var ErrWaitTimeout = errors.New("timed out waiting for the condition")
|
||||
|
||||
// ConditionFunc returns true if the condition is satisfied, or an error
|
||||
// if the loop should be aborted.
|
||||
type ConditionFunc func() (done bool, err error)
|
||||
|
||||
// Backoff is parameters applied to a Backoff function.
|
||||
// Backoff holds parameters applied to a Backoff function.
|
||||
type Backoff struct {
|
||||
Duration time.Duration
|
||||
Factor float64
|
||||
Jitter float64
|
||||
Steps int
|
||||
Duration time.Duration // the base duration
|
||||
Factor float64 // Duration is multipled by factor each iteration
|
||||
Jitter float64 // The amount of jitter applied each iteration
|
||||
Steps int // Exit with error after this many steps
|
||||
}
|
||||
|
||||
// ExponentialBackoff repeats a condition check up to steps times, increasing the wait
|
||||
// by multipling the previous duration by factor. If jitter is greater than zero,
|
||||
// a random amount of each duration is added (between duration and duration*(1+jitter)).
|
||||
// If the condition never returns true, ErrWaitTimeout is returned. All other errors
|
||||
// terminate immediately.
|
||||
// ExponentialBackoff repeats a condition check with exponential backoff.
|
||||
//
|
||||
// It checks the condition up to Steps times, increasing the wait by multipling
|
||||
// the previous duration by Factor.
|
||||
//
|
||||
// If Jitter is greater than zero, a random amount of each duration is added
|
||||
// (between duration and duration*(1+jitter)).
|
||||
//
|
||||
// If the condition never returns true, ErrWaitTimeout is returned. All other
|
||||
// errors terminate immediately.
|
||||
func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
|
||||
duration := backoff.Duration
|
||||
for i := 0; i < backoff.Steps; i++ {
|
||||
|
|
@ -154,22 +170,33 @@ func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
|
|||
}
|
||||
|
||||
// Poll tries a condition func until it returns true, an error, or the timeout
|
||||
// is reached. condition will always be invoked at least once but some intervals
|
||||
// may be missed if the condition takes too long or the time window is too short.
|
||||
// is reached.
|
||||
//
|
||||
// Poll always waits the interval before the run of 'condition'.
|
||||
// 'condition' will always be invoked at least once.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
//
|
||||
// If you want to Poll something forever, see PollInfinite.
|
||||
// Poll always waits the interval before the first check of the condition.
|
||||
func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
|
||||
return pollInternal(poller(interval, timeout), condition)
|
||||
}
|
||||
|
||||
func pollInternal(wait WaitFunc, condition ConditionFunc) error {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
return WaitFor(wait, condition, done)
|
||||
return WaitFor(wait, condition, NeverStop)
|
||||
}
|
||||
|
||||
// PollImmediate is identical to Poll, except that it performs the first check
|
||||
// immediately, not waiting interval beforehand.
|
||||
// PollImmediate tries a condition func until it returns true, an error, or the timeout
|
||||
// is reached.
|
||||
//
|
||||
// Poll always checks 'condition' before waiting for the interval. 'condition'
|
||||
// will always be invoked at least once.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
//
|
||||
// If you want to Poll something forever, see PollInfinite.
|
||||
func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
|
||||
return pollImmediateInternal(poller(interval, timeout), condition)
|
||||
}
|
||||
|
|
@ -185,16 +212,24 @@ func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error {
|
|||
return pollInternal(wait, condition)
|
||||
}
|
||||
|
||||
// PollInfinite polls forever.
|
||||
// PollInfinite tries a condition func until it returns true or an error
|
||||
//
|
||||
// PollInfinite always waits the interval before the run of 'condition'.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
func PollInfinite(interval time.Duration, condition ConditionFunc) error {
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
return PollUntil(interval, condition, done)
|
||||
}
|
||||
|
||||
// PollImmediateInfinite is identical to PollInfinite, except that it
|
||||
// performs the first check immediately, not waiting interval
|
||||
// beforehand.
|
||||
// PollImmediateInfinite tries a condition func until it returns true or an error
|
||||
//
|
||||
// PollImmediateInfinite runs the 'condition' before waiting for the interval.
|
||||
//
|
||||
// Some intervals may be missed if the condition takes too long or the time
|
||||
// window is too short.
|
||||
func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
|
||||
done, err := condition()
|
||||
if err != nil {
|
||||
|
|
@ -206,7 +241,11 @@ func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) erro
|
|||
return PollInfinite(interval, condition)
|
||||
}
|
||||
|
||||
// PollUntil is like Poll, but it takes a stop change instead of total duration
|
||||
// PollUntil tries a condition func until it returns true, an error or stopCh is
|
||||
// closed.
|
||||
//
|
||||
// PolUntil always waits interval before the first run of 'condition'.
|
||||
// 'condition' will always be invoked at least once.
|
||||
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
|
||||
return WaitFor(poller(interval, 0), condition, stopCh)
|
||||
}
|
||||
|
|
@ -215,11 +254,16 @@ func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan st
|
|||
// should be executed and is closed when the last test should be invoked.
|
||||
type WaitFunc func(done <-chan struct{}) <-chan struct{}
|
||||
|
||||
// WaitFor gets a channel from wait(), and then invokes fn once for every value
|
||||
// placed on the channel and once more when the channel is closed. If fn
|
||||
// returns an error the loop ends and that error is returned, and if fn returns
|
||||
// true the loop ends and nil is returned. ErrWaitTimeout will be returned if
|
||||
// the channel is closed without fn ever returning true.
|
||||
// WaitFor continually checks 'fn' as driven by 'wait'.
|
||||
//
|
||||
// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value
|
||||
// placed on the channel and once more when the channel is closed.
|
||||
//
|
||||
// If 'fn' returns an error the loop ends and that error is returned, and if
|
||||
// 'fn' returns true the loop ends and nil is returned.
|
||||
//
|
||||
// ErrWaitTimeout will be returned if the channel is closed without fn ever
|
||||
// returning true.
|
||||
func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
|
||||
c := wait(done)
|
||||
for {
|
||||
|
|
@ -238,11 +282,14 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
|
|||
return ErrWaitTimeout
|
||||
}
|
||||
|
||||
// poller returns a WaitFunc that will send to the channel every
|
||||
// interval until timeout has elapsed and then close the channel.
|
||||
// Over very short intervals you may receive no ticks before
|
||||
// the channel is closed. If timeout is 0, the channel
|
||||
// will never be closed.
|
||||
// poller returns a WaitFunc that will send to the channel every interval until
|
||||
// timeout has elapsed and then closes the channel.
|
||||
//
|
||||
// Over very short intervals you may receive no ticks before the channel is
|
||||
// closed. A timeout of 0 is interpreted as an infinity.
|
||||
//
|
||||
// Output ticks are not buffered. If the channel is not ready to receive an
|
||||
// item, the tick is skipped.
|
||||
func poller(interval, timeout time.Duration) WaitFunc {
|
||||
return WaitFunc(func(done <-chan struct{}) <-chan struct{} {
|
||||
ch := make(chan struct{})
|
||||
|
|
|
|||
10
vendor/k8s.io/kubernetes/pkg/util/workqueue/delaying_queue.go
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/util/workqueue/delaying_queue.go
generated
vendored
|
|
@ -24,7 +24,7 @@ import (
|
|||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
)
|
||||
|
||||
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
|
||||
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
|
||||
// requeue items after failures without ending up in a hot-loop.
|
||||
type DelayingInterface interface {
|
||||
Interface
|
||||
|
|
@ -68,6 +68,9 @@ type delayingType struct {
|
|||
stopCh chan struct{}
|
||||
|
||||
// heartbeat ensures we wait no more than maxWait before firing
|
||||
//
|
||||
// TODO: replace with Ticker (and add to clock) so this can be cleaned up.
|
||||
// clock.Tick will leak.
|
||||
heartbeat <-chan time.Time
|
||||
|
||||
// waitingForAdd is an ordered slice of items to be added to the contained work queue
|
||||
|
|
@ -115,7 +118,7 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
|
|||
}
|
||||
}
|
||||
|
||||
// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening.
|
||||
// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening.
|
||||
// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an
|
||||
// expired item sitting for more than 10 seconds.
|
||||
const maxWait = 10 * time.Second
|
||||
|
|
@ -192,6 +195,9 @@ func (q *delayingType) waitingLoop() {
|
|||
// inserts the given entry into the sorted entries list
|
||||
// same semantics as append()... the given slice may be modified,
|
||||
// and the returned value should be used
|
||||
//
|
||||
// TODO: This should probably be converted to use container/heap to improve
|
||||
// running time for a large number of items.
|
||||
func insert(entries []waitFor, knownEntries map[t]time.Time, entry waitFor) []waitFor {
|
||||
// if the entry is already in our retry list and the existing time is before the new one, just skip it
|
||||
existingTime, exists := knownEntries[entry.data]
|
||||
|
|
|
|||
4
vendor/k8s.io/kubernetes/pkg/util/workqueue/parallelizer.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/util/workqueue/parallelizer.go
generated
vendored
|
|
@ -33,6 +33,10 @@ func Parallelize(workers, pieces int, doWorkPiece DoWorkPieceFunc) {
|
|||
}
|
||||
close(toProcess)
|
||||
|
||||
if pieces < workers {
|
||||
workers = pieces
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
|
|
|
|||
2
vendor/k8s.io/kubernetes/pkg/util/workqueue/queue.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/util/workqueue/queue.go
generated
vendored
|
|
@ -154,7 +154,7 @@ func (q *Type) Done(item interface{}) {
|
|||
}
|
||||
}
|
||||
|
||||
// Shutdown will cause q to ignore all new items added to it. As soon as the
|
||||
// ShutDown will cause q to ignore all new items added to it. As soon as the
|
||||
// worker goroutines have drained the existing items in the queue, they will be
|
||||
// instructed to exit.
|
||||
func (q *Type) ShutDown() {
|
||||
|
|
|
|||
5
vendor/k8s.io/kubernetes/pkg/util/workqueue/rate_limitting_queue.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/util/workqueue/rate_limitting_queue.go
generated
vendored
|
|
@ -16,10 +16,10 @@ limitations under the License.
|
|||
|
||||
package workqueue
|
||||
|
||||
// RateLimitingInterface is an Interface that can Add an item at a later time. This makes it easier to
|
||||
// requeue items after failures without ending up in a hot-loop.
|
||||
// RateLimitingInterface is an interface that rate limits items being added to the queue.
|
||||
type RateLimitingInterface interface {
|
||||
DelayingInterface
|
||||
|
||||
// AddRateLimited adds an item to the workqueue after the rate limiter says its ok
|
||||
AddRateLimited(item interface{})
|
||||
|
||||
|
|
@ -27,6 +27,7 @@ type RateLimitingInterface interface {
|
|||
// or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you
|
||||
// still have to call `Done` on the queue.
|
||||
Forget(item interface{})
|
||||
|
||||
// NumRequeues returns back how many times the item was requeued
|
||||
NumRequeues(item interface{}) int
|
||||
}
|
||||
|
|
|
|||
6
vendor/k8s.io/kubernetes/pkg/version/base.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/version/base.go
generated
vendored
|
|
@ -39,8 +39,8 @@ var (
|
|||
// them irrelevant. (Next we'll take it out, which may muck with
|
||||
// scripts consuming the kubectl version output - but most of
|
||||
// these should be looking at gitVersion already anyways.)
|
||||
gitMajor string = "" // major version, always numeric
|
||||
gitMinor string = "" // minor version, numeric possibly followed by "+"
|
||||
gitMajor string = "1" // major version, always numeric
|
||||
gitMinor string = "5+" // minor version, numeric possibly followed by "+"
|
||||
|
||||
// semantic version, derived by build scripts (see
|
||||
// https://github.com/kubernetes/kubernetes/blob/master/docs/design/versioning.md
|
||||
|
|
@ -51,7 +51,7 @@ var (
|
|||
// semantic version is a git hash, but the version itself is no
|
||||
// longer the direct output of "git describe", but a slight
|
||||
// translation to be semver compliant.
|
||||
gitVersion string = "v0.0.0-master+$Format:%h$"
|
||||
gitVersion string = "v1.5.0-beta.2+$Format:%h$"
|
||||
gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
|
||||
gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty"
|
||||
|
||||
|
|
|
|||
7
vendor/k8s.io/kubernetes/pkg/volume/util.go
generated
vendored
7
vendor/k8s.io/kubernetes/pkg/volume/util.go
generated
vendored
|
|
@ -83,7 +83,12 @@ func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *api.P
|
|||
return fmt.Errorf("unexpected error creating recycler pod: %+v\n", err)
|
||||
}
|
||||
}
|
||||
defer recyclerClient.DeletePod(pod.Name, pod.Namespace)
|
||||
defer func(pod *api.Pod) {
|
||||
glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name)
|
||||
if err := recyclerClient.DeletePod(pod.Name, pod.Namespace); err != nil {
|
||||
glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
}(pod)
|
||||
|
||||
// Now only the old pod or the new pod run. Watch it until it finishes
|
||||
// and send all events on the pod to the PV
|
||||
|
|
|
|||
12
vendor/k8s.io/kubernetes/pkg/volume/volume.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/volume/volume.go
generated
vendored
|
|
@ -94,6 +94,18 @@ type Attributes struct {
|
|||
type Mounter interface {
|
||||
// Uses Interface to provide the path for Docker binds.
|
||||
Volume
|
||||
|
||||
// CanMount is called immediately prior to Setup to check if
|
||||
// the required components (binaries, etc.) are available on
|
||||
// the underlying node to complete the subsequent SetUp (mount)
|
||||
// operation. If CanMount returns error, the mount operation is
|
||||
// aborted and an event is generated indicating that the node
|
||||
// does not have the required binaries to complete the mount.
|
||||
// If CanMount succeeds, the mount operation continues
|
||||
// normally. The CanMount check can be enabled or disabled
|
||||
// using the experimental-check-mount-binaries binary flag
|
||||
CanMount() error
|
||||
|
||||
// SetUp prepares and mounts/unpacks the volume to a
|
||||
// self-determined directory path. The mount point and its
|
||||
// content should be owned by 'fsGroup' so that it can be
|
||||
|
|
|
|||
6
vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/volume/volume_linux.go
generated
vendored
|
|
@ -71,7 +71,11 @@ func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error {
|
|||
mask = roMask
|
||||
}
|
||||
|
||||
err = chmodRunner.Chmod(path, info.Mode()|mask|os.ModeSetgid)
|
||||
if info.IsDir() {
|
||||
mask |= os.ModeSetgid
|
||||
}
|
||||
|
||||
err = chmodRunner.Chmod(path, info.Mode()|mask)
|
||||
if err != nil {
|
||||
glog.Errorf("Chmod failed on %v: %v", path, err)
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue