Update go dependencies
This commit is contained in:
parent
432f534383
commit
f4a4daed84
1299 changed files with 71186 additions and 91183 deletions
19
vendor/k8s.io/kubernetes/pkg/kubelet/BUILD
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/kubelet/BUILD
generated
vendored
|
|
@ -11,6 +11,7 @@ go_library(
|
|||
srcs = [
|
||||
"active_deadline.go",
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"kubelet.go",
|
||||
"kubelet_getters.go",
|
||||
"kubelet_network.go",
|
||||
|
|
@ -39,14 +40,14 @@ go_library(
|
|||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/apis/core/v1/helper/qos:go_default_library",
|
||||
"//pkg/capabilities:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/fieldpath:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/apis/config:go_default_library",
|
||||
"//pkg/kubelet/apis/cri:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//pkg/kubelet/apis/pluginregistration/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/apis/pluginregistration/v1:go_default_library",
|
||||
"//pkg/kubelet/apis/podresources:go_default_library",
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/kubelet/certificate:go_default_library",
|
||||
"//pkg/kubelet/checkpointmanager:go_default_library",
|
||||
|
|
@ -96,8 +97,8 @@ go_library(
|
|||
"//pkg/kubelet/util/queue:go_default_library",
|
||||
"//pkg/kubelet/util/sliceutils:go_default_library",
|
||||
"//pkg/kubelet/volumemanager:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/sysctl:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
|
|
@ -117,6 +118,7 @@ go_library(
|
|||
"//pkg/volume/validation:go_default_library",
|
||||
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
|
@ -139,13 +141,14 @@ go_library(
|
|||
"//staging/src/k8s.io/client-go/util/certificate:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/integer:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
|
||||
"//third_party/forked/golang/expansion:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/golang/groupcache/lru:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/events:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v2:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
@ -163,6 +166,7 @@ go_test(
|
|||
"kubelet_resources_test.go",
|
||||
"kubelet_test.go",
|
||||
"kubelet_volumes_test.go",
|
||||
"main_test.go",
|
||||
"oom_watcher_test.go",
|
||||
"pod_container_deletor_test.go",
|
||||
"pod_workers_test.go",
|
||||
|
|
@ -204,15 +208,15 @@ go_test(
|
|||
"//pkg/kubelet/util/queue:go_default_library",
|
||||
"//pkg/kubelet/util/sliceutils:go_default_library",
|
||||
"//pkg/kubelet/volumemanager:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/aws_ebs:go_default_library",
|
||||
"//pkg/volume/awsebs:go_default_library",
|
||||
"//pkg/volume/azure_dd:go_default_library",
|
||||
"//pkg/volume/gce_pd:go_default_library",
|
||||
"//pkg/volume/gcepd:go_default_library",
|
||||
"//pkg/volume/host_path:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
|
|
@ -233,6 +237,7 @@ go_test(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
|
|
|
|||
6
vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD
generated
vendored
|
|
@ -13,7 +13,9 @@ go_library(
|
|||
"well_known_labels.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/apis",
|
||||
deps = select({
|
||||
deps = [
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"//pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
|
|
@ -37,8 +39,10 @@ filegroup(
|
|||
"//pkg/kubelet/apis/cri:all-srcs",
|
||||
"//pkg/kubelet/apis/deviceplugin/v1alpha:all-srcs",
|
||||
"//pkg/kubelet/apis/deviceplugin/v1beta1:all-srcs",
|
||||
"//pkg/kubelet/apis/pluginregistration/v1:all-srcs",
|
||||
"//pkg/kubelet/apis/pluginregistration/v1alpha1:all-srcs",
|
||||
"//pkg/kubelet/apis/pluginregistration/v1beta1:all-srcs",
|
||||
"//pkg/kubelet/apis/podresources:all-srcs",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
|
|
|
|||
3013
vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2/api.pb.go
generated
vendored
3013
vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2/api.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
76
vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go
generated
vendored
76
vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go
generated
vendored
|
|
@ -16,6 +16,12 @@ limitations under the License.
|
|||
|
||||
package apis
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
const (
|
||||
LabelHostname = "kubernetes.io/hostname"
|
||||
LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone"
|
||||
|
|
@ -26,8 +32,78 @@ const (
|
|||
|
||||
LabelOS = "beta.kubernetes.io/os"
|
||||
LabelArch = "beta.kubernetes.io/arch"
|
||||
|
||||
// GA versions of the legacy beta labels.
|
||||
// TODO: update kubelet and controllers to set both beta and GA labels, then export these constants
|
||||
labelZoneFailureDomainGA = "failure-domain.kubernetes.io/zone"
|
||||
labelZoneRegionGA = "failure-domain.kubernetes.io/region"
|
||||
labelInstanceTypeGA = "kubernetes.io/instance-type"
|
||||
labelOSGA = "kubernetes.io/os"
|
||||
labelArchGA = "kubernetes.io/arch"
|
||||
|
||||
// LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*)
|
||||
LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io"
|
||||
// LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*)
|
||||
LabelNamespaceSuffixNode = "node.kubernetes.io"
|
||||
|
||||
// LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled
|
||||
LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io"
|
||||
)
|
||||
|
||||
// When the --failure-domains scheduler flag is not specified,
|
||||
// DefaultFailureDomains defines the set of label keys used when TopologyKey is empty in PreferredDuringScheduling anti-affinity.
|
||||
var DefaultFailureDomains string = LabelHostname + "," + LabelZoneFailureDomain + "," + LabelZoneRegion
|
||||
|
||||
var kubeletLabels = sets.NewString(
|
||||
LabelHostname,
|
||||
LabelZoneFailureDomain,
|
||||
LabelZoneRegion,
|
||||
LabelInstanceType,
|
||||
LabelOS,
|
||||
LabelArch,
|
||||
|
||||
labelZoneFailureDomainGA,
|
||||
labelZoneRegionGA,
|
||||
labelInstanceTypeGA,
|
||||
labelOSGA,
|
||||
labelArchGA,
|
||||
)
|
||||
|
||||
var kubeletLabelNamespaces = sets.NewString(
|
||||
LabelNamespaceSuffixKubelet,
|
||||
LabelNamespaceSuffixNode,
|
||||
)
|
||||
|
||||
// KubeletLabels returns the list of label keys kubelets are allowed to set on their own Node objects
|
||||
func KubeletLabels() []string {
|
||||
return kubeletLabels.List()
|
||||
}
|
||||
|
||||
// KubeletLabelNamespaces returns the list of label key namespaces kubelets are allowed to set on their own Node objects
|
||||
func KubeletLabelNamespaces() []string {
|
||||
return kubeletLabelNamespaces.List()
|
||||
}
|
||||
|
||||
// IsKubeletLabel returns true if the label key is one that kubelets are allowed to set on their own Node object.
|
||||
// This checks if the key is in the KubeletLabels() list, or has a namespace in the KubeletLabelNamespaces() list.
|
||||
func IsKubeletLabel(key string) bool {
|
||||
if kubeletLabels.Has(key) {
|
||||
return true
|
||||
}
|
||||
|
||||
namespace := getLabelNamespace(key)
|
||||
for allowedNamespace := range kubeletLabelNamespaces {
|
||||
if namespace == allowedNamespace || strings.HasSuffix(namespace, "."+allowedNamespace) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getLabelNamespace(key string) string {
|
||||
if parts := strings.SplitN(key, "/", 2); len(parts) == 2 {
|
||||
return parts[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
|
|||
2
vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/container/BUILD
generated
vendored
|
|
@ -34,7 +34,7 @@ go_library(
|
|||
"//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//third_party/forked/golang/expansion:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
|||
4
vendor/k8s.io/kubernetes/pkg/kubelet/container/container_gc.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/container/container_gc.go
generated
vendored
|
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
// Specified a policy for garbage collecting containers.
|
||||
|
|
@ -82,6 +82,6 @@ func (cgc *realContainerGC) GarbageCollect() error {
|
|||
}
|
||||
|
||||
func (cgc *realContainerGC) DeleteAllUnusedContainers() error {
|
||||
glog.Infof("attempting to delete unused containers")
|
||||
klog.Infof("attempting to delete unused containers")
|
||||
return cgc.runtime.GarbageCollect(cgc.policy, cgc.sourcesReadyProvider.AllReady(), true)
|
||||
}
|
||||
|
|
|
|||
8
vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go
generated
vendored
|
|
@ -21,7 +21,7 @@ import (
|
|||
"hash/fnv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
@ -75,13 +75,13 @@ func ShouldContainerBeRestarted(container *v1.Container, pod *v1.Pod, podStatus
|
|||
}
|
||||
// Check RestartPolicy for dead container
|
||||
if pod.Spec.RestartPolicy == v1.RestartPolicyNever {
|
||||
glog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, format.Pod(pod))
|
||||
klog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, format.Pod(pod))
|
||||
return false
|
||||
}
|
||||
if pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure {
|
||||
// Check the exit code.
|
||||
if status.ExitCode == 0 {
|
||||
glog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, format.Pod(pod))
|
||||
klog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, format.Pod(pod))
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
@ -311,7 +311,7 @@ func MakePortMappings(container *v1.Container) (ports []PortMapping) {
|
|||
|
||||
// Protect against exposing the same protocol-port more than once in a container.
|
||||
if _, ok := names[pm.Name]; ok {
|
||||
glog.Warningf("Port name conflicted, %q is defined more than once", pm.Name)
|
||||
klog.Warningf("Port name conflicted, %q is defined more than once", pm.Name)
|
||||
continue
|
||||
}
|
||||
ports = append(ports, pm)
|
||||
|
|
|
|||
4
vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go
generated
vendored
|
|
@ -25,11 +25,11 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/klog"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
|
@ -203,7 +203,7 @@ func BuildContainerID(typ, ID string) ContainerID {
|
|||
func ParseContainerID(containerID string) ContainerID {
|
||||
var id ContainerID
|
||||
if err := id.ParseString(containerID); err != nil {
|
||||
glog.Error(err)
|
||||
klog.Error(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
@ -14,5 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Common types in the Kubelet.
|
||||
package types // import "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
package kubelet
|
||||
|
||||
const (
|
||||
NetworkNotReadyErrorMsg = "network is not ready"
|
||||
)
|
||||
268
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go
generated
vendored
268
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go
generated
vendored
|
|
@ -31,8 +31,6 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"k8s.io/api/core/v1"
|
||||
|
|
@ -54,13 +52,15 @@ import (
|
|||
"k8s.io/client-go/util/certificate"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/client-go/util/integer"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||
"k8s.io/klog"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletconfiginternal "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
pluginwatcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1alpha1"
|
||||
pluginwatcherapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
kubeletcertificate "k8s.io/kubernetes/pkg/kubelet/certificate"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||
|
|
@ -98,6 +98,7 @@ import (
|
|||
"k8s.io/kubernetes/pkg/kubelet/sysctl"
|
||||
"k8s.io/kubernetes/pkg/kubelet/token"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/manager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/pluginwatcher"
|
||||
|
|
@ -193,6 +194,7 @@ type Bootstrap interface {
|
|||
StartGarbageCollection()
|
||||
ListenAndServe(address net.IP, port uint, tlsOptions *server.TLSOptions, auth server.AuthInterface, enableDebuggingHandlers, enableContentionProfiling bool)
|
||||
ListenAndServeReadOnly(address net.IP, port uint)
|
||||
ListenAndServePodResources()
|
||||
Run(<-chan kubetypes.PodUpdate)
|
||||
RunOnce(<-chan kubetypes.PodUpdate) ([]RunPodResult, error)
|
||||
}
|
||||
|
|
@ -276,13 +278,13 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku
|
|||
|
||||
// define file config source
|
||||
if kubeCfg.StaticPodPath != "" {
|
||||
glog.Infof("Adding pod path: %v", kubeCfg.StaticPodPath)
|
||||
klog.Infof("Adding pod path: %v", kubeCfg.StaticPodPath)
|
||||
config.NewSourceFile(kubeCfg.StaticPodPath, nodeName, kubeCfg.FileCheckFrequency.Duration, cfg.Channel(kubetypes.FileSource))
|
||||
}
|
||||
|
||||
// define url config source
|
||||
if kubeCfg.StaticPodURL != "" {
|
||||
glog.Infof("Adding pod url %q with HTTP header %v", kubeCfg.StaticPodURL, manifestURLHeader)
|
||||
klog.Infof("Adding pod url %q with HTTP header %v", kubeCfg.StaticPodURL, manifestURLHeader)
|
||||
config.NewSourceURL(kubeCfg.StaticPodURL, manifestURLHeader, nodeName, kubeCfg.HTTPCheckFrequency.Duration, cfg.Channel(kubetypes.HTTPSource))
|
||||
}
|
||||
|
||||
|
|
@ -292,7 +294,7 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku
|
|||
|
||||
var updatechannel chan<- interface{}
|
||||
if bootstrapCheckpointPath != "" {
|
||||
glog.Infof("Adding checkpoint path: %v", bootstrapCheckpointPath)
|
||||
klog.Infof("Adding checkpoint path: %v", bootstrapCheckpointPath)
|
||||
updatechannel = cfg.Channel(kubetypes.ApiserverSource)
|
||||
err := cfg.Restore(bootstrapCheckpointPath, updatechannel)
|
||||
if err != nil {
|
||||
|
|
@ -301,7 +303,7 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku
|
|||
}
|
||||
|
||||
if kubeDeps.KubeClient != nil {
|
||||
glog.Infof("Watching apiserver")
|
||||
klog.Infof("Watching apiserver")
|
||||
if updatechannel == nil {
|
||||
updatechannel = cfg.Channel(kubetypes.ApiserverSource)
|
||||
}
|
||||
|
|
@ -392,7 +394,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
return nil, fmt.Errorf("error fetching current instance name from cloud provider: %v", err)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName)
|
||||
klog.V(2).Infof("cloud provider determined current node name to be %s", nodeName)
|
||||
}
|
||||
|
||||
if kubeDeps.PodConfig == nil {
|
||||
|
|
@ -471,7 +473,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
for _, ipEntry := range kubeCfg.ClusterDNS {
|
||||
ip := net.ParseIP(ipEntry)
|
||||
if ip == nil {
|
||||
glog.Warningf("Invalid clusterDNS ip '%q'", ipEntry)
|
||||
klog.Warningf("Invalid clusterDNS ip '%q'", ipEntry)
|
||||
} else {
|
||||
clusterDNS = append(clusterDNS, ip)
|
||||
}
|
||||
|
|
@ -480,52 +482,53 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
parsedNodeIP := net.ParseIP(nodeIP)
|
||||
protocol := utilipt.ProtocolIpv4
|
||||
if parsedNodeIP != nil && parsedNodeIP.To4() == nil {
|
||||
glog.V(0).Infof("IPv6 node IP (%s), assume IPv6 operation", nodeIP)
|
||||
klog.V(0).Infof("IPv6 node IP (%s), assume IPv6 operation", nodeIP)
|
||||
protocol = utilipt.ProtocolIpv6
|
||||
}
|
||||
|
||||
klet := &Kubelet{
|
||||
hostname: hostname,
|
||||
hostnameOverridden: len(hostnameOverride) > 0,
|
||||
nodeName: nodeName,
|
||||
kubeClient: kubeDeps.KubeClient,
|
||||
csiClient: kubeDeps.CSIClient,
|
||||
heartbeatClient: kubeDeps.HeartbeatClient,
|
||||
onRepeatedHeartbeatFailure: kubeDeps.OnHeartbeatFailure,
|
||||
rootDirectory: rootDirectory,
|
||||
resyncInterval: kubeCfg.SyncFrequency.Duration,
|
||||
sourcesReady: config.NewSourcesReady(kubeDeps.PodConfig.SeenAllSources),
|
||||
registerNode: registerNode,
|
||||
registerWithTaints: registerWithTaints,
|
||||
registerSchedulable: registerSchedulable,
|
||||
dnsConfigurer: dns.NewConfigurer(kubeDeps.Recorder, nodeRef, parsedNodeIP, clusterDNS, kubeCfg.ClusterDomain, kubeCfg.ResolverConfig),
|
||||
serviceLister: serviceLister,
|
||||
nodeInfo: nodeInfo,
|
||||
masterServiceNamespace: masterServiceNamespace,
|
||||
streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration,
|
||||
recorder: kubeDeps.Recorder,
|
||||
cadvisor: kubeDeps.CAdvisorInterface,
|
||||
cloud: kubeDeps.Cloud,
|
||||
externalCloudProvider: cloudprovider.IsExternal(cloudProvider),
|
||||
providerID: providerID,
|
||||
nodeRef: nodeRef,
|
||||
nodeLabels: nodeLabels,
|
||||
nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration,
|
||||
os: kubeDeps.OSInterface,
|
||||
oomWatcher: oomWatcher,
|
||||
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
|
||||
cgroupRoot: kubeCfg.CgroupRoot,
|
||||
mounter: kubeDeps.Mounter,
|
||||
maxPods: int(kubeCfg.MaxPods),
|
||||
podsPerCore: int(kubeCfg.PodsPerCore),
|
||||
syncLoopMonitor: atomic.Value{},
|
||||
daemonEndpoints: daemonEndpoints,
|
||||
containerManager: kubeDeps.ContainerManager,
|
||||
containerRuntimeName: containerRuntime,
|
||||
redirectContainerStreaming: crOptions.RedirectContainerStreaming,
|
||||
nodeIP: parsedNodeIP,
|
||||
nodeIPValidator: validateNodeIP,
|
||||
clock: clock.RealClock{},
|
||||
hostname: hostname,
|
||||
hostnameOverridden: len(hostnameOverride) > 0,
|
||||
nodeName: nodeName,
|
||||
kubeClient: kubeDeps.KubeClient,
|
||||
csiClient: kubeDeps.CSIClient,
|
||||
heartbeatClient: kubeDeps.HeartbeatClient,
|
||||
onRepeatedHeartbeatFailure: kubeDeps.OnHeartbeatFailure,
|
||||
rootDirectory: rootDirectory,
|
||||
resyncInterval: kubeCfg.SyncFrequency.Duration,
|
||||
sourcesReady: config.NewSourcesReady(kubeDeps.PodConfig.SeenAllSources),
|
||||
registerNode: registerNode,
|
||||
registerWithTaints: registerWithTaints,
|
||||
registerSchedulable: registerSchedulable,
|
||||
dnsConfigurer: dns.NewConfigurer(kubeDeps.Recorder, nodeRef, parsedNodeIP, clusterDNS, kubeCfg.ClusterDomain, kubeCfg.ResolverConfig),
|
||||
serviceLister: serviceLister,
|
||||
nodeInfo: nodeInfo,
|
||||
masterServiceNamespace: masterServiceNamespace,
|
||||
streamingConnectionIdleTimeout: kubeCfg.StreamingConnectionIdleTimeout.Duration,
|
||||
recorder: kubeDeps.Recorder,
|
||||
cadvisor: kubeDeps.CAdvisorInterface,
|
||||
cloud: kubeDeps.Cloud,
|
||||
externalCloudProvider: cloudprovider.IsExternal(cloudProvider),
|
||||
providerID: providerID,
|
||||
nodeRef: nodeRef,
|
||||
nodeLabels: nodeLabels,
|
||||
nodeStatusUpdateFrequency: kubeCfg.NodeStatusUpdateFrequency.Duration,
|
||||
nodeStatusReportFrequency: kubeCfg.NodeStatusReportFrequency.Duration,
|
||||
os: kubeDeps.OSInterface,
|
||||
oomWatcher: oomWatcher,
|
||||
cgroupsPerQOS: kubeCfg.CgroupsPerQOS,
|
||||
cgroupRoot: kubeCfg.CgroupRoot,
|
||||
mounter: kubeDeps.Mounter,
|
||||
maxPods: int(kubeCfg.MaxPods),
|
||||
podsPerCore: int(kubeCfg.PodsPerCore),
|
||||
syncLoopMonitor: atomic.Value{},
|
||||
daemonEndpoints: daemonEndpoints,
|
||||
containerManager: kubeDeps.ContainerManager,
|
||||
containerRuntimeName: containerRuntime,
|
||||
redirectContainerStreaming: crOptions.RedirectContainerStreaming,
|
||||
nodeIP: parsedNodeIP,
|
||||
nodeIPValidator: validateNodeIP,
|
||||
clock: clock.RealClock{},
|
||||
enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach,
|
||||
iptClient: utilipt.New(utilexec.New(), utildbus.New(), protocol),
|
||||
makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains,
|
||||
|
|
@ -563,7 +566,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
klet.configMapManager = configMapManager
|
||||
|
||||
if klet.experimentalHostUserNamespaceDefaulting {
|
||||
glog.Infof("Experimental host user namespace defaulting is enabled.")
|
||||
klog.Infof("Experimental host user namespace defaulting is enabled.")
|
||||
}
|
||||
|
||||
machineInfo, err := klet.cadvisor.MachineInfo()
|
||||
|
|
@ -607,7 +610,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
klet.resourceAnalyzer = serverstats.NewResourceAnalyzer(klet, kubeCfg.VolumeStatsAggPeriod.Duration)
|
||||
|
||||
if containerRuntime == "rkt" {
|
||||
glog.Fatalln("rktnetes has been deprecated in favor of rktlet. Please see https://github.com/kubernetes-incubator/rktlet for more information.")
|
||||
klog.Fatalln("rktnetes has been deprecated in favor of rktlet. Please see https://github.com/kubernetes-incubator/rktlet for more information.")
|
||||
}
|
||||
|
||||
// if left at nil, that means it is unneeded
|
||||
|
|
@ -627,10 +630,10 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
}
|
||||
|
||||
// The unix socket for kubelet <-> dockershim communication.
|
||||
glog.V(5).Infof("RemoteRuntimeEndpoint: %q, RemoteImageEndpoint: %q",
|
||||
klog.V(5).Infof("RemoteRuntimeEndpoint: %q, RemoteImageEndpoint: %q",
|
||||
remoteRuntimeEndpoint,
|
||||
remoteImageEndpoint)
|
||||
glog.V(2).Infof("Starting the GRPC server for the docker CRI shim.")
|
||||
klog.V(2).Infof("Starting the GRPC server for the docker CRI shim.")
|
||||
server := dockerremote.NewDockerServer(remoteRuntimeEndpoint, ds)
|
||||
if err := server.Start(); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -717,8 +720,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, plegChannelCapacity, plegRelistPeriod, klet.podCache, clock.RealClock{})
|
||||
klet.runtimeState = newRuntimeState(maxWaitForContainerRuntime)
|
||||
klet.runtimeState.addHealthCheck("PLEG", klet.pleg.Healthy)
|
||||
if err := klet.updatePodCIDR(kubeCfg.PodCIDR); err != nil {
|
||||
glog.Errorf("Pod CIDR update failed %v", err)
|
||||
if _, err := klet.updatePodCIDR(kubeCfg.PodCIDR); err != nil {
|
||||
klog.Errorf("Pod CIDR update failed %v", err)
|
||||
}
|
||||
|
||||
// setup containerGC
|
||||
|
|
@ -777,7 +780,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
tokenManager := token.NewManager(kubeDeps.KubeClient)
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.MountPropagation) {
|
||||
glog.Warning("Mount propagation feature gate has been deprecated and will be removed in the next release")
|
||||
return nil, fmt.Errorf("mount propagation feature gate has been deprecated and will be removed in 1.14")
|
||||
}
|
||||
|
||||
klet.volumePluginMgr, err =
|
||||
|
|
@ -786,7 +789,10 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||
return nil, err
|
||||
}
|
||||
if klet.enablePluginsWatcher {
|
||||
klet.pluginWatcher = pluginwatcher.NewWatcher(klet.getPluginsDir())
|
||||
klet.pluginWatcher = pluginwatcher.NewWatcher(
|
||||
klet.getPluginsRegistrationDir(), /* sockDir */
|
||||
klet.getPluginsDir(), /* deprecatedSockDir */
|
||||
)
|
||||
}
|
||||
|
||||
// If the experimentalMounterPathFlag is set, we do not want to
|
||||
|
|
@ -1036,8 +1042,9 @@ type Kubelet struct {
|
|||
// used for generating ContainerStatus.
|
||||
reasonCache *ReasonCache
|
||||
|
||||
// nodeStatusUpdateFrequency specifies how often kubelet posts node status to master.
|
||||
// Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod
|
||||
// nodeStatusUpdateFrequency specifies how often kubelet computes node status. If node lease
|
||||
// feature is not enabled, it is also the frequency that kubelet posts node status to master.
|
||||
// In that case, be cautious when changing the constant, it must work with nodeMonitorGracePeriod
|
||||
// in nodecontroller. There are several constraints:
|
||||
// 1. nodeMonitorGracePeriod must be N times more than nodeStatusUpdateFrequency, where
|
||||
// N means number of retries allowed for kubelet to post node status. It is pointless
|
||||
|
|
@ -1049,6 +1056,13 @@ type Kubelet struct {
|
|||
// as it takes time to gather all necessary node information.
|
||||
nodeStatusUpdateFrequency time.Duration
|
||||
|
||||
// nodeStatusUpdateFrequency is the frequency that kubelet posts node
|
||||
// status to master. It is only used when node lease feature is enabled.
|
||||
nodeStatusReportFrequency time.Duration
|
||||
|
||||
// lastStatusReportTime is the time when node status was last reported.
|
||||
lastStatusReportTime time.Time
|
||||
|
||||
// syncNodeStatusMux is a lock on updating the node status, because this path is not thread-safe.
|
||||
// This lock is used by Kublet.syncNodeStatus function and shouldn't be used anywhere else.
|
||||
syncNodeStatusMux sync.Mutex
|
||||
|
|
@ -1234,6 +1248,7 @@ func allGlobalUnicastIPs() ([]net.IP, error) {
|
|||
// 1. the root directory
|
||||
// 2. the pods directory
|
||||
// 3. the plugins directory
|
||||
// 4. the pod-resources directory
|
||||
func (kl *Kubelet) setupDataDirs() error {
|
||||
kl.rootDirectory = path.Clean(kl.rootDirectory)
|
||||
if err := os.MkdirAll(kl.getRootDir(), 0750); err != nil {
|
||||
|
|
@ -1248,6 +1263,12 @@ func (kl *Kubelet) setupDataDirs() error {
|
|||
if err := os.MkdirAll(kl.getPluginsDir(), 0750); err != nil {
|
||||
return fmt.Errorf("error creating plugins directory: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(kl.getPluginsRegistrationDir(), 0750); err != nil {
|
||||
return fmt.Errorf("error creating plugins registry directory: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(kl.getPodResourcesDir(), 0750); err != nil {
|
||||
return fmt.Errorf("error creating podresources directory: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -1256,23 +1277,23 @@ func (kl *Kubelet) StartGarbageCollection() {
|
|||
loggedContainerGCFailure := false
|
||||
go wait.Until(func() {
|
||||
if err := kl.containerGC.GarbageCollect(); err != nil {
|
||||
glog.Errorf("Container garbage collection failed: %v", err)
|
||||
klog.Errorf("Container garbage collection failed: %v", err)
|
||||
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.ContainerGCFailed, err.Error())
|
||||
loggedContainerGCFailure = true
|
||||
} else {
|
||||
var vLevel glog.Level = 4
|
||||
var vLevel klog.Level = 4
|
||||
if loggedContainerGCFailure {
|
||||
vLevel = 1
|
||||
loggedContainerGCFailure = false
|
||||
}
|
||||
|
||||
glog.V(vLevel).Infof("Container garbage collection succeeded")
|
||||
klog.V(vLevel).Infof("Container garbage collection succeeded")
|
||||
}
|
||||
}, ContainerGCPeriod, wait.NeverStop)
|
||||
|
||||
// when the high threshold is set to 100, stub the image GC manager
|
||||
if kl.kubeletConfiguration.ImageGCHighThresholdPercent == 100 {
|
||||
glog.V(2).Infof("ImageGCHighThresholdPercent is set 100, Disable image GC")
|
||||
klog.V(2).Infof("ImageGCHighThresholdPercent is set 100, Disable image GC")
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -1280,21 +1301,21 @@ func (kl *Kubelet) StartGarbageCollection() {
|
|||
go wait.Until(func() {
|
||||
if err := kl.imageManager.GarbageCollect(); err != nil {
|
||||
if prevImageGCFailed {
|
||||
glog.Errorf("Image garbage collection failed multiple times in a row: %v", err)
|
||||
klog.Errorf("Image garbage collection failed multiple times in a row: %v", err)
|
||||
// Only create an event for repeated failures
|
||||
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.ImageGCFailed, err.Error())
|
||||
} else {
|
||||
glog.Errorf("Image garbage collection failed once. Stats initialization may not have completed yet: %v", err)
|
||||
klog.Errorf("Image garbage collection failed once. Stats initialization may not have completed yet: %v", err)
|
||||
}
|
||||
prevImageGCFailed = true
|
||||
} else {
|
||||
var vLevel glog.Level = 4
|
||||
var vLevel klog.Level = 4
|
||||
if prevImageGCFailed {
|
||||
vLevel = 1
|
||||
prevImageGCFailed = false
|
||||
}
|
||||
|
||||
glog.V(vLevel).Infof("Image garbage collection succeeded")
|
||||
klog.V(vLevel).Infof("Image garbage collection succeeded")
|
||||
}
|
||||
}, ImageGCPeriod, wait.NeverStop)
|
||||
}
|
||||
|
|
@ -1303,7 +1324,11 @@ func (kl *Kubelet) StartGarbageCollection() {
|
|||
// Note that the modules here must not depend on modules that are not initialized here.
|
||||
func (kl *Kubelet) initializeModules() error {
|
||||
// Prometheus metrics.
|
||||
metrics.Register(kl.runtimeCache, collectors.NewVolumeStatsCollector(kl))
|
||||
metrics.Register(
|
||||
kl.runtimeCache,
|
||||
collectors.NewVolumeStatsCollector(kl),
|
||||
collectors.NewLogMetricsCollector(kl.StatsProvider.ListPodStats),
|
||||
)
|
||||
|
||||
// Setup filesystem directories.
|
||||
if err := kl.setupDataDirs(); err != nil {
|
||||
|
|
@ -1313,7 +1338,7 @@ func (kl *Kubelet) initializeModules() error {
|
|||
// If the container logs directory does not exist, create it.
|
||||
if _, err := os.Stat(ContainerLogsDir); err != nil {
|
||||
if err := kl.os.MkdirAll(ContainerLogsDir, 0755); err != nil {
|
||||
glog.Errorf("Failed to create directory %q: %v", ContainerLogsDir, err)
|
||||
klog.Errorf("Failed to create directory %q: %v", ContainerLogsDir, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1341,7 +1366,7 @@ func (kl *Kubelet) initializeRuntimeDependentModules() {
|
|||
if err := kl.cadvisor.Start(); err != nil {
|
||||
// Fail kubelet and rely on the babysitter to retry starting kubelet.
|
||||
// TODO(random-liu): Add backoff logic in the babysitter
|
||||
glog.Fatalf("Failed to start cAdvisor %v", err)
|
||||
klog.Fatalf("Failed to start cAdvisor %v", err)
|
||||
}
|
||||
|
||||
// trigger on-demand stats collection once so that we have capacity information for ephemeral storage.
|
||||
|
|
@ -1351,12 +1376,12 @@ func (kl *Kubelet) initializeRuntimeDependentModules() {
|
|||
node, err := kl.getNodeAnyWay()
|
||||
if err != nil {
|
||||
// Fail kubelet and rely on the babysitter to retry starting kubelet.
|
||||
glog.Fatalf("Kubelet failed to get node info: %v", err)
|
||||
klog.Fatalf("Kubelet failed to get node info: %v", err)
|
||||
}
|
||||
// containerManager must start after cAdvisor because it needs filesystem capacity information
|
||||
if err := kl.containerManager.Start(node, kl.GetActivePods, kl.sourcesReady, kl.statusManager, kl.runtimeService); err != nil {
|
||||
// Fail kubelet and rely on the babysitter to retry starting kubelet.
|
||||
glog.Fatalf("Failed to start ContainerManager %v", err)
|
||||
klog.Fatalf("Failed to start ContainerManager %v", err)
|
||||
}
|
||||
// eviction manager must start after cadvisor because it needs to know if the container runtime has a dedicated imagefs
|
||||
kl.evictionManager.Start(kl.StatsProvider, kl.GetActivePods, kl.podResourcesAreReclaimed, evictionMonitoringPeriod)
|
||||
|
|
@ -1366,14 +1391,14 @@ func (kl *Kubelet) initializeRuntimeDependentModules() {
|
|||
kl.containerLogManager.Start()
|
||||
if kl.enablePluginsWatcher {
|
||||
// Adding Registration Callback function for CSI Driver
|
||||
kl.pluginWatcher.AddHandler("CSIPlugin", pluginwatcher.PluginHandler(csi.PluginHandler))
|
||||
kl.pluginWatcher.AddHandler(pluginwatcherapi.CSIPlugin, pluginwatcher.PluginHandler(csi.PluginHandler))
|
||||
// Adding Registration Callback function for Device Manager
|
||||
kl.pluginWatcher.AddHandler(pluginwatcherapi.DevicePlugin, kl.containerManager.GetPluginRegistrationHandler())
|
||||
// Start the plugin watcher
|
||||
glog.V(4).Infof("starting watcher")
|
||||
klog.V(4).Infof("starting watcher")
|
||||
if err := kl.pluginWatcher.Start(); err != nil {
|
||||
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error())
|
||||
glog.Fatalf("failed to start Plugin Watcher. err: %v", err)
|
||||
klog.Fatalf("failed to start Plugin Watcher. err: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1384,7 +1409,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
|||
kl.logServer = http.StripPrefix("/logs/", http.FileServer(http.Dir("/var/log/")))
|
||||
}
|
||||
if kl.kubeClient == nil {
|
||||
glog.Warning("No api server defined - no node status update will be sent.")
|
||||
klog.Warning("No api server defined - no node status update will be sent.")
|
||||
}
|
||||
|
||||
// Start the cloud provider sync manager
|
||||
|
|
@ -1394,7 +1419,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
|||
|
||||
if err := kl.initializeModules(); err != nil {
|
||||
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error())
|
||||
glog.Fatal(err)
|
||||
klog.Fatal(err)
|
||||
}
|
||||
|
||||
// Start volume manager
|
||||
|
|
@ -1502,7 +1527,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||
// since kubelet first saw the pod if firstSeenTime is set.
|
||||
metrics.PodWorkerStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime))
|
||||
} else {
|
||||
glog.V(3).Infof("First seen time not recorded for pod %q", pod.UID)
|
||||
klog.V(3).Infof("First seen time not recorded for pod %q", pod.UID)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1561,8 +1586,8 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||
|
||||
// If the network plugin is not ready, only start the pod if it uses the host network
|
||||
if rs := kl.runtimeState.networkErrors(); len(rs) != 0 && !kubecontainer.IsHostNetworkPod(pod) {
|
||||
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.NetworkNotReady, "network is not ready: %v", rs)
|
||||
return fmt.Errorf("network is not ready: %v", rs)
|
||||
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.NetworkNotReady, "%s: %v", NetworkNotReadyErrorMsg, rs)
|
||||
return fmt.Errorf("%s: %v", NetworkNotReadyErrorMsg, rs)
|
||||
}
|
||||
|
||||
// Create Cgroups for the pod and apply resource parameters
|
||||
|
|
@ -1601,7 +1626,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||
if !(podKilled && pod.Spec.RestartPolicy == v1.RestartPolicyNever) {
|
||||
if !pcm.Exists(pod) {
|
||||
if err := kl.containerManager.UpdateQOSCgroups(); err != nil {
|
||||
glog.V(2).Infof("Failed to update QoS cgroups while syncing pod: %v", err)
|
||||
klog.V(2).Infof("Failed to update QoS cgroups while syncing pod: %v", err)
|
||||
}
|
||||
if err := pcm.EnsureExists(pod); err != nil {
|
||||
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToCreatePodContainer, "unable to ensure pod container exists: %v", err)
|
||||
|
|
@ -1619,9 +1644,9 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||
if mirrorPod.DeletionTimestamp != nil || !kl.podManager.IsMirrorPodOf(mirrorPod, pod) {
|
||||
// The mirror pod is semantically different from the static pod. Remove
|
||||
// it. The mirror pod will get recreated later.
|
||||
glog.Warningf("Deleting mirror pod %q because it is outdated", format.Pod(mirrorPod))
|
||||
klog.Warningf("Deleting mirror pod %q because it is outdated", format.Pod(mirrorPod))
|
||||
if err := kl.podManager.DeleteMirrorPod(podFullName); err != nil {
|
||||
glog.Errorf("Failed deleting mirror pod %q: %v", format.Pod(mirrorPod), err)
|
||||
klog.Errorf("Failed deleting mirror pod %q: %v", format.Pod(mirrorPod), err)
|
||||
} else {
|
||||
deleted = true
|
||||
}
|
||||
|
|
@ -1630,11 +1655,11 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||
if mirrorPod == nil || deleted {
|
||||
node, err := kl.GetNode()
|
||||
if err != nil || node.DeletionTimestamp != nil {
|
||||
glog.V(4).Infof("No need to create a mirror pod, since node %q has been removed from the cluster", kl.nodeName)
|
||||
klog.V(4).Infof("No need to create a mirror pod, since node %q has been removed from the cluster", kl.nodeName)
|
||||
} else {
|
||||
glog.V(4).Infof("Creating a mirror pod for static pod %q", format.Pod(pod))
|
||||
klog.V(4).Infof("Creating a mirror pod for static pod %q", format.Pod(pod))
|
||||
if err := kl.podManager.CreateMirrorPod(pod); err != nil {
|
||||
glog.Errorf("Failed creating a mirror pod for %q: %v", format.Pod(pod), err)
|
||||
klog.Errorf("Failed creating a mirror pod for %q: %v", format.Pod(pod), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1643,7 +1668,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||
// Make data directories for the pod
|
||||
if err := kl.makePodDataDirs(pod); err != nil {
|
||||
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToMakePodDataDirectories, "error making pod data directories: %v", err)
|
||||
glog.Errorf("Unable to make pod data directories for pod %q: %v", format.Pod(pod), err)
|
||||
klog.Errorf("Unable to make pod data directories for pod %q: %v", format.Pod(pod), err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -1652,7 +1677,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||
// Wait for volumes to attach/mount
|
||||
if err := kl.volumeManager.WaitForAttachAndMount(pod); err != nil {
|
||||
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err)
|
||||
glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", format.Pod(pod), err)
|
||||
klog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", format.Pod(pod), err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
@ -1801,8 +1826,8 @@ func (kl *Kubelet) canRunPod(pod *v1.Pod) lifecycle.PodAdmitResult {
|
|||
// no changes are seen to the configuration, will synchronize the last known desired
|
||||
// state every sync-frequency seconds. Never returns.
|
||||
func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHandler) {
|
||||
glog.Info("Starting kubelet main sync loop.")
|
||||
// The resyncTicker wakes up kubelet to checks if there are any pod workers
|
||||
klog.Info("Starting kubelet main sync loop.")
|
||||
// The syncTicker wakes up kubelet to checks if there are any pod workers
|
||||
// that need to be sync'd. A one-second period is sufficient because the
|
||||
// sync interval is defaulted to 10s.
|
||||
syncTicker := time.NewTicker(time.Second)
|
||||
|
|
@ -1818,7 +1843,7 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand
|
|||
duration := base
|
||||
for {
|
||||
if rs := kl.runtimeState.runtimeErrors(); len(rs) != 0 {
|
||||
glog.Infof("skipping pod synchronization - %v", rs)
|
||||
klog.Infof("skipping pod synchronization - %v", rs)
|
||||
// exponential backoff
|
||||
time.Sleep(duration)
|
||||
duration = time.Duration(math.Min(float64(max), factor*float64(duration)))
|
||||
|
|
@ -1874,39 +1899,39 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
|
|||
// Update from a config source; dispatch it to the right handler
|
||||
// callback.
|
||||
if !open {
|
||||
glog.Errorf("Update channel is closed. Exiting the sync loop.")
|
||||
klog.Errorf("Update channel is closed. Exiting the sync loop.")
|
||||
return false
|
||||
}
|
||||
|
||||
switch u.Op {
|
||||
case kubetypes.ADD:
|
||||
glog.V(2).Infof("SyncLoop (ADD, %q): %q", u.Source, format.Pods(u.Pods))
|
||||
klog.V(2).Infof("SyncLoop (ADD, %q): %q", u.Source, format.Pods(u.Pods))
|
||||
// After restarting, kubelet will get all existing pods through
|
||||
// ADD as if they are new pods. These pods will then go through the
|
||||
// admission process and *may* be rejected. This can be resolved
|
||||
// once we have checkpointing.
|
||||
handler.HandlePodAdditions(u.Pods)
|
||||
case kubetypes.UPDATE:
|
||||
glog.V(2).Infof("SyncLoop (UPDATE, %q): %q", u.Source, format.PodsWithDeletionTimestamps(u.Pods))
|
||||
klog.V(2).Infof("SyncLoop (UPDATE, %q): %q", u.Source, format.PodsWithDeletionTimestamps(u.Pods))
|
||||
handler.HandlePodUpdates(u.Pods)
|
||||
case kubetypes.REMOVE:
|
||||
glog.V(2).Infof("SyncLoop (REMOVE, %q): %q", u.Source, format.Pods(u.Pods))
|
||||
klog.V(2).Infof("SyncLoop (REMOVE, %q): %q", u.Source, format.Pods(u.Pods))
|
||||
handler.HandlePodRemoves(u.Pods)
|
||||
case kubetypes.RECONCILE:
|
||||
glog.V(4).Infof("SyncLoop (RECONCILE, %q): %q", u.Source, format.Pods(u.Pods))
|
||||
klog.V(4).Infof("SyncLoop (RECONCILE, %q): %q", u.Source, format.Pods(u.Pods))
|
||||
handler.HandlePodReconcile(u.Pods)
|
||||
case kubetypes.DELETE:
|
||||
glog.V(2).Infof("SyncLoop (DELETE, %q): %q", u.Source, format.Pods(u.Pods))
|
||||
klog.V(2).Infof("SyncLoop (DELETE, %q): %q", u.Source, format.Pods(u.Pods))
|
||||
// DELETE is treated as a UPDATE because of graceful deletion.
|
||||
handler.HandlePodUpdates(u.Pods)
|
||||
case kubetypes.RESTORE:
|
||||
glog.V(2).Infof("SyncLoop (RESTORE, %q): %q", u.Source, format.Pods(u.Pods))
|
||||
klog.V(2).Infof("SyncLoop (RESTORE, %q): %q", u.Source, format.Pods(u.Pods))
|
||||
// These are pods restored from the checkpoint. Treat them as new
|
||||
// pods.
|
||||
handler.HandlePodAdditions(u.Pods)
|
||||
case kubetypes.SET:
|
||||
// TODO: Do we want to support this?
|
||||
glog.Errorf("Kubelet does not support snapshot update")
|
||||
klog.Errorf("Kubelet does not support snapshot update")
|
||||
}
|
||||
|
||||
if u.Op != kubetypes.RESTORE {
|
||||
|
|
@ -1925,11 +1950,11 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
|
|||
if isSyncPodWorthy(e) {
|
||||
// PLEG event for a pod; sync it.
|
||||
if pod, ok := kl.podManager.GetPodByUID(e.ID); ok {
|
||||
glog.V(2).Infof("SyncLoop (PLEG): %q, event: %#v", format.Pod(pod), e)
|
||||
klog.V(2).Infof("SyncLoop (PLEG): %q, event: %#v", format.Pod(pod), e)
|
||||
handler.HandlePodSyncs([]*v1.Pod{pod})
|
||||
} else {
|
||||
// If the pod no longer exists, ignore the event.
|
||||
glog.V(4).Infof("SyncLoop (PLEG): ignore irrelevant event: %#v", e)
|
||||
klog.V(4).Infof("SyncLoop (PLEG): ignore irrelevant event: %#v", e)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1944,7 +1969,7 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
|
|||
if len(podsToSync) == 0 {
|
||||
break
|
||||
}
|
||||
glog.V(4).Infof("SyncLoop (SYNC): %d pods; %s", len(podsToSync), format.Pods(podsToSync))
|
||||
klog.V(4).Infof("SyncLoop (SYNC): %d pods; %s", len(podsToSync), format.Pods(podsToSync))
|
||||
handler.HandlePodSyncs(podsToSync)
|
||||
case update := <-kl.livenessManager.Updates():
|
||||
if update.Result == proberesults.Failure {
|
||||
|
|
@ -1955,21 +1980,21 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
|
|||
pod, ok := kl.podManager.GetPodByUID(update.PodUID)
|
||||
if !ok {
|
||||
// If the pod no longer exists, ignore the update.
|
||||
glog.V(4).Infof("SyncLoop (container unhealthy): ignore irrelevant update: %#v", update)
|
||||
klog.V(4).Infof("SyncLoop (container unhealthy): ignore irrelevant update: %#v", update)
|
||||
break
|
||||
}
|
||||
glog.V(1).Infof("SyncLoop (container unhealthy): %q", format.Pod(pod))
|
||||
klog.V(1).Infof("SyncLoop (container unhealthy): %q", format.Pod(pod))
|
||||
handler.HandlePodSyncs([]*v1.Pod{pod})
|
||||
}
|
||||
case <-housekeepingCh:
|
||||
if !kl.sourcesReady.AllReady() {
|
||||
// If the sources aren't ready or volume manager has not yet synced the states,
|
||||
// skip housekeeping, as we may accidentally delete pods from unready sources.
|
||||
glog.V(4).Infof("SyncLoop (housekeeping, skipped): sources aren't ready yet.")
|
||||
klog.V(4).Infof("SyncLoop (housekeeping, skipped): sources aren't ready yet.")
|
||||
} else {
|
||||
glog.V(4).Infof("SyncLoop (housekeeping)")
|
||||
klog.V(4).Infof("SyncLoop (housekeeping)")
|
||||
if err := handler.HandlePodCleanups(); err != nil {
|
||||
glog.Errorf("Failed cleaning pods: %v", err)
|
||||
klog.Errorf("Failed cleaning pods: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -2092,7 +2117,7 @@ func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) {
|
|||
// Deletion is allowed to fail because the periodic cleanup routine
|
||||
// will trigger deletion again.
|
||||
if err := kl.deletePod(pod); err != nil {
|
||||
glog.V(2).Infof("Failed to delete pod %q, err: %v", format.Pod(pod), err)
|
||||
klog.V(2).Infof("Failed to delete pod %q, err: %v", format.Pod(pod), err)
|
||||
}
|
||||
kl.probeManager.RemovePod(pod)
|
||||
}
|
||||
|
|
@ -2151,20 +2176,20 @@ func (kl *Kubelet) updateRuntimeUp() {
|
|||
|
||||
s, err := kl.containerRuntime.Status()
|
||||
if err != nil {
|
||||
glog.Errorf("Container runtime sanity check failed: %v", err)
|
||||
klog.Errorf("Container runtime sanity check failed: %v", err)
|
||||
return
|
||||
}
|
||||
if s == nil {
|
||||
glog.Errorf("Container runtime status is nil")
|
||||
klog.Errorf("Container runtime status is nil")
|
||||
return
|
||||
}
|
||||
// Periodically log the whole runtime status for debugging.
|
||||
// TODO(random-liu): Consider to send node event when optional
|
||||
// condition is unmet.
|
||||
glog.V(4).Infof("Container runtime status: %v", s)
|
||||
klog.V(4).Infof("Container runtime status: %v", s)
|
||||
networkReady := s.GetRuntimeCondition(kubecontainer.NetworkReady)
|
||||
if networkReady == nil || !networkReady.Status {
|
||||
glog.Errorf("Container runtime network not ready: %v", networkReady)
|
||||
klog.Errorf("Container runtime network not ready: %v", networkReady)
|
||||
kl.runtimeState.setNetworkState(fmt.Errorf("runtime network not ready: %v", networkReady))
|
||||
} else {
|
||||
// Set nil if the container runtime network is ready.
|
||||
|
|
@ -2176,7 +2201,7 @@ func (kl *Kubelet) updateRuntimeUp() {
|
|||
runtimeReady := s.GetRuntimeCondition(kubecontainer.RuntimeReady)
|
||||
// If RuntimeReady is not set or is false, report an error.
|
||||
if runtimeReady == nil || !runtimeReady.Status {
|
||||
glog.Errorf("Container runtime not ready: %v", runtimeReady)
|
||||
klog.Errorf("Container runtime not ready: %v", runtimeReady)
|
||||
return
|
||||
}
|
||||
kl.oneTimeInitializer.Do(kl.initializeRuntimeDependentModules)
|
||||
|
|
@ -2209,6 +2234,11 @@ func (kl *Kubelet) ListenAndServeReadOnly(address net.IP, port uint) {
|
|||
server.ListenAndServeKubeletReadOnlyServer(kl, kl.resourceAnalyzer, address, port)
|
||||
}
|
||||
|
||||
// ListenAndServePodResources runs the kubelet podresources grpc service
|
||||
func (kl *Kubelet) ListenAndServePodResources() {
|
||||
server.ListenAndServePodResources(util.LocalEndpoint(kl.getPodResourcesDir(), podresources.Socket), kl.podManager, kl.containerManager)
|
||||
}
|
||||
|
||||
// Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around.
|
||||
func (kl *Kubelet) cleanUpContainersInPod(podID types.UID, exitedContainerID string) {
|
||||
if podStatus, err := kl.podCache.Get(podID); err == nil {
|
||||
|
|
@ -2233,12 +2263,12 @@ func (kl *Kubelet) fastStatusUpdateOnce() {
|
|||
time.Sleep(100 * time.Millisecond)
|
||||
node, err := kl.GetNode()
|
||||
if err != nil {
|
||||
glog.Errorf(err.Error())
|
||||
klog.Errorf(err.Error())
|
||||
continue
|
||||
}
|
||||
if node.Spec.PodCIDR != "" {
|
||||
if err := kl.updatePodCIDR(node.Spec.PodCIDR); err != nil {
|
||||
glog.Errorf("Pod CIDR update failed %v", err)
|
||||
if _, err := kl.updatePodCIDR(node.Spec.PodCIDR); err != nil {
|
||||
klog.Errorf("Pod CIDR update failed %v", err)
|
||||
continue
|
||||
}
|
||||
kl.updateRuntimeUp()
|
||||
|
|
|
|||
19
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go
generated
vendored
|
|
@ -22,8 +22,8 @@ import (
|
|||
"net"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
|
@ -57,6 +57,14 @@ func (kl *Kubelet) getPluginsDir() string {
|
|||
return filepath.Join(kl.getRootDir(), config.DefaultKubeletPluginsDirName)
|
||||
}
|
||||
|
||||
// getPluginsRegistrationDir returns the full path to the directory under which
|
||||
// plugins socket should be placed to be registered.
|
||||
// More information is available about plugin registration in the pluginwatcher
|
||||
// module
|
||||
func (kl *Kubelet) getPluginsRegistrationDir() string {
|
||||
return filepath.Join(kl.getRootDir(), config.DefaultKubeletPluginsRegistrationDirName)
|
||||
}
|
||||
|
||||
// getPluginDir returns a data directory name for a given plugin name.
|
||||
// Plugins can use these directories to store data that they need to persist.
|
||||
// For per-pod plugin data, see getPodPluginDir.
|
||||
|
|
@ -139,6 +147,11 @@ func (kl *Kubelet) getPodContainerDir(podUID types.UID, ctrName string) string {
|
|||
return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletContainersDirName, ctrName)
|
||||
}
|
||||
|
||||
// getPodResourcesSocket returns the full path to the directory containing the pod resources socket
|
||||
func (kl *Kubelet) getPodResourcesDir() string {
|
||||
return filepath.Join(kl.getRootDir(), config.DefaultKubeletPodResourcesDirName)
|
||||
}
|
||||
|
||||
// GetPods returns all pods bound to the kubelet and their spec, and the mirror
|
||||
// pods.
|
||||
func (kl *Kubelet) GetPods() []*v1.Pod {
|
||||
|
|
@ -261,13 +274,13 @@ func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, err
|
|||
if pathExists, pathErr := volumeutil.PathExists(podVolDir); pathErr != nil {
|
||||
return volumes, fmt.Errorf("Error checking if path %q exists: %v", podVolDir, pathErr)
|
||||
} else if !pathExists {
|
||||
glog.Warningf("Path %q does not exist", podVolDir)
|
||||
klog.Warningf("Path %q does not exist", podVolDir)
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
volumePluginDirs, err := ioutil.ReadDir(podVolDir)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not read directory %s: %v", podVolDir, err)
|
||||
klog.Errorf("Could not read directory %s: %v", podVolDir, err)
|
||||
return volumes, err
|
||||
}
|
||||
for _, volumePluginDir := range volumePluginDirs {
|
||||
|
|
|
|||
16
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network.go
generated
vendored
16
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network.go
generated
vendored
|
|
@ -19,8 +19,8 @@ package kubelet
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
)
|
||||
|
|
@ -55,26 +55,28 @@ func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool {
|
|||
}
|
||||
|
||||
// updatePodCIDR updates the pod CIDR in the runtime state if it is different
|
||||
// from the current CIDR.
|
||||
func (kl *Kubelet) updatePodCIDR(cidr string) error {
|
||||
// from the current CIDR. Return true if pod CIDR is actually changed.
|
||||
func (kl *Kubelet) updatePodCIDR(cidr string) (bool, error) {
|
||||
kl.updatePodCIDRMux.Lock()
|
||||
defer kl.updatePodCIDRMux.Unlock()
|
||||
|
||||
podCIDR := kl.runtimeState.podCIDR()
|
||||
|
||||
if podCIDR == cidr {
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// kubelet -> generic runtime -> runtime shim -> network plugin
|
||||
// docker/non-cri implementations have a passthrough UpdatePodCIDR
|
||||
if err := kl.getRuntime().UpdatePodCIDR(cidr); err != nil {
|
||||
return fmt.Errorf("failed to update pod CIDR: %v", err)
|
||||
// If updatePodCIDR would fail, theoretically pod CIDR could not change.
|
||||
// But it is better to be on the safe side to still return true here.
|
||||
return true, fmt.Errorf("failed to update pod CIDR: %v", err)
|
||||
}
|
||||
|
||||
glog.Infof("Setting Pod CIDR: %v -> %v", podCIDR, cidr)
|
||||
klog.Infof("Setting Pod CIDR: %v -> %v", podCIDR, cidr)
|
||||
kl.runtimeState.setPodCIDR(cidr)
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetPodDNS returns DNS settings for the pod.
|
||||
|
|
|
|||
30
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go
generated
vendored
|
|
@ -21,7 +21,7 @@ package kubelet
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
)
|
||||
|
||||
|
|
@ -33,73 +33,73 @@ import (
|
|||
// Marked connection will get SNAT on POSTROUTING Chain in nat table
|
||||
func (kl *Kubelet) syncNetworkUtil() {
|
||||
if kl.iptablesMasqueradeBit < 0 || kl.iptablesMasqueradeBit > 31 {
|
||||
glog.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", kl.iptablesMasqueradeBit)
|
||||
klog.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", kl.iptablesMasqueradeBit)
|
||||
return
|
||||
}
|
||||
|
||||
if kl.iptablesDropBit < 0 || kl.iptablesDropBit > 31 {
|
||||
glog.Errorf("invalid iptables-drop-bit %v not in [0, 31]", kl.iptablesDropBit)
|
||||
klog.Errorf("invalid iptables-drop-bit %v not in [0, 31]", kl.iptablesDropBit)
|
||||
return
|
||||
}
|
||||
|
||||
if kl.iptablesDropBit == kl.iptablesMasqueradeBit {
|
||||
glog.Errorf("iptables-masquerade-bit %v and iptables-drop-bit %v must be different", kl.iptablesMasqueradeBit, kl.iptablesDropBit)
|
||||
klog.Errorf("iptables-masquerade-bit %v and iptables-drop-bit %v must be different", kl.iptablesMasqueradeBit, kl.iptablesDropBit)
|
||||
return
|
||||
}
|
||||
|
||||
// Setup KUBE-MARK-DROP rules
|
||||
dropMark := getIPTablesMark(kl.iptablesDropBit)
|
||||
if _, err := kl.iptClient.EnsureChain(utiliptables.TableNAT, KubeMarkDropChain); err != nil {
|
||||
glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkDropChain, err)
|
||||
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkDropChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkDropChain, "-j", "MARK", "--set-xmark", dropMark); err != nil {
|
||||
glog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkDropChain, err)
|
||||
klog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkDropChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureChain(utiliptables.TableFilter, KubeFirewallChain); err != nil {
|
||||
glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableFilter, KubeFirewallChain, err)
|
||||
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableFilter, KubeFirewallChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableFilter, KubeFirewallChain,
|
||||
"-m", "comment", "--comment", "kubernetes firewall for dropping marked packets",
|
||||
"-m", "mark", "--mark", dropMark,
|
||||
"-j", "DROP"); err != nil {
|
||||
glog.Errorf("Failed to ensure rule to drop packet marked by %v in %v chain %v: %v", KubeMarkDropChain, utiliptables.TableFilter, KubeFirewallChain, err)
|
||||
klog.Errorf("Failed to ensure rule to drop packet marked by %v in %v chain %v: %v", KubeMarkDropChain, utiliptables.TableFilter, KubeFirewallChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainOutput, "-j", string(KubeFirewallChain)); err != nil {
|
||||
glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainOutput, KubeFirewallChain, err)
|
||||
klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainOutput, KubeFirewallChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainInput, "-j", string(KubeFirewallChain)); err != nil {
|
||||
glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainInput, KubeFirewallChain, err)
|
||||
klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainInput, KubeFirewallChain, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Setup KUBE-MARK-MASQ rules
|
||||
masqueradeMark := getIPTablesMark(kl.iptablesMasqueradeBit)
|
||||
if _, err := kl.iptClient.EnsureChain(utiliptables.TableNAT, KubeMarkMasqChain); err != nil {
|
||||
glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkMasqChain, err)
|
||||
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkMasqChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureChain(utiliptables.TableNAT, KubePostroutingChain); err != nil {
|
||||
glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubePostroutingChain, err)
|
||||
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubePostroutingChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkMasqChain, "-j", "MARK", "--set-xmark", masqueradeMark); err != nil {
|
||||
glog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkMasqChain, err)
|
||||
klog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkMasqChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableNAT, utiliptables.ChainPostrouting,
|
||||
"-m", "comment", "--comment", "kubernetes postrouting rules", "-j", string(KubePostroutingChain)); err != nil {
|
||||
glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, KubePostroutingChain, err)
|
||||
klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, KubePostroutingChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubePostroutingChain,
|
||||
"-m", "comment", "--comment", "kubernetes service traffic requiring SNAT",
|
||||
"-m", "mark", "--mark", masqueradeMark, "-j", "MASQUERADE"); err != nil {
|
||||
glog.Errorf("Failed to ensure SNAT rule for packets marked by %v in %v chain %v: %v", KubeMarkMasqChain, utiliptables.TableNAT, KubePostroutingChain, err)
|
||||
klog.Errorf("Failed to ensure SNAT rule for packets marked by %v in %v chain %v: %v", KubeMarkMasqChain, utiliptables.TableNAT, KubePostroutingChain, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
|||
140
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go
generated
vendored
140
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go
generated
vendored
|
|
@ -21,25 +21,27 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
goruntime "runtime"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
taintutil "k8s.io/kubernetes/pkg/util/taints"
|
||||
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
|
|
@ -63,14 +65,14 @@ func (kl *Kubelet) registerWithAPIServer() {
|
|||
|
||||
node, err := kl.initialNode()
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to construct v1.Node object for kubelet: %v", err)
|
||||
klog.Errorf("Unable to construct v1.Node object for kubelet: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
glog.Infof("Attempting to register node %s", node.Name)
|
||||
klog.Infof("Attempting to register node %s", node.Name)
|
||||
registered := kl.tryRegisterWithAPIServer(node)
|
||||
if registered {
|
||||
glog.Infof("Successfully registered node %s", node.Name)
|
||||
klog.Infof("Successfully registered node %s", node.Name)
|
||||
kl.registrationCompleted = true
|
||||
return
|
||||
}
|
||||
|
|
@ -89,27 +91,27 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
|
|||
}
|
||||
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
glog.Errorf("Unable to register node %q with API server: %v", kl.nodeName, err)
|
||||
klog.Errorf("Unable to register node %q with API server: %v", kl.nodeName, err)
|
||||
return false
|
||||
}
|
||||
|
||||
existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(string(kl.nodeName), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err)
|
||||
klog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err)
|
||||
return false
|
||||
}
|
||||
if existingNode == nil {
|
||||
glog.Errorf("Unable to register node %q with API server: no node instance returned", kl.nodeName)
|
||||
klog.Errorf("Unable to register node %q with API server: no node instance returned", kl.nodeName)
|
||||
return false
|
||||
}
|
||||
|
||||
originalNode := existingNode.DeepCopy()
|
||||
if originalNode == nil {
|
||||
glog.Errorf("Nil %q node object", kl.nodeName)
|
||||
klog.Errorf("Nil %q node object", kl.nodeName)
|
||||
return false
|
||||
}
|
||||
|
||||
glog.Infof("Node %s was previously registered", kl.nodeName)
|
||||
klog.Infof("Node %s was previously registered", kl.nodeName)
|
||||
|
||||
// Edge case: the node was previously registered; reconcile
|
||||
// the value of the controller-managed attach-detach
|
||||
|
|
@ -119,7 +121,7 @@ func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
|
|||
requiresUpdate = kl.reconcileExtendedResource(node, existingNode) || requiresUpdate
|
||||
if requiresUpdate {
|
||||
if _, _, err := nodeutil.PatchNodeStatus(kl.kubeClient.CoreV1(), types.NodeName(kl.nodeName), originalNode, existingNode); err != nil {
|
||||
glog.Errorf("Unable to reconcile node %q with API server: error updating node: %v", kl.nodeName, err)
|
||||
klog.Errorf("Unable to reconcile node %q with API server: error updating node: %v", kl.nodeName, err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
@ -151,7 +153,7 @@ func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool
|
|||
kubeletapis.LabelArch,
|
||||
}
|
||||
|
||||
var needsUpdate bool = false
|
||||
needsUpdate := false
|
||||
if existingNode.Labels == nil {
|
||||
existingNode.Labels = make(map[string]string)
|
||||
}
|
||||
|
|
@ -191,10 +193,10 @@ func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v
|
|||
// not have the same value, update the existing node with
|
||||
// the correct value of the annotation.
|
||||
if !newSet {
|
||||
glog.Info("Controller attach-detach setting changed to false; updating existing Node")
|
||||
klog.Info("Controller attach-detach setting changed to false; updating existing Node")
|
||||
delete(existingNode.Annotations, volutil.ControllerManagedAttachAnnotation)
|
||||
} else {
|
||||
glog.Info("Controller attach-detach setting changed to true; updating existing Node")
|
||||
klog.Info("Controller attach-detach setting changed to true; updating existing Node")
|
||||
if existingNode.Annotations == nil {
|
||||
existingNode.Annotations = make(map[string]string)
|
||||
}
|
||||
|
|
@ -232,7 +234,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
|||
}
|
||||
|
||||
unschedulableTaint := v1.Taint{
|
||||
Key: algorithm.TaintNodeUnschedulable,
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
|
|
@ -247,7 +249,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
|||
|
||||
if kl.externalCloudProvider {
|
||||
taint := v1.Taint{
|
||||
Key: algorithm.TaintExternalCloudProvider,
|
||||
Key: schedulerapi.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
|
@ -273,24 +275,24 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
|||
node.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
glog.Infof("Setting node annotation to enable volume controller attach/detach")
|
||||
klog.Infof("Setting node annotation to enable volume controller attach/detach")
|
||||
node.Annotations[volutil.ControllerManagedAttachAnnotation] = "true"
|
||||
} else {
|
||||
glog.Infof("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes")
|
||||
klog.Infof("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes")
|
||||
}
|
||||
|
||||
if kl.keepTerminatedPodVolumes {
|
||||
if node.Annotations == nil {
|
||||
node.Annotations = make(map[string]string)
|
||||
}
|
||||
glog.Infof("Setting node annotation to keep pod volumes of terminated pods attached to the node")
|
||||
klog.Infof("Setting node annotation to keep pod volumes of terminated pods attached to the node")
|
||||
node.Annotations[volutil.KeepTerminatedPodVolumesAnnotation] = "true"
|
||||
}
|
||||
|
||||
// @question: should this be place after the call to the cloud provider? which also applies labels
|
||||
for k, v := range kl.nodeLabels {
|
||||
if cv, found := node.ObjectMeta.Labels[k]; found {
|
||||
glog.Warningf("the node label %s=%s will overwrite default setting %s", k, v, cv)
|
||||
klog.Warningf("the node label %s=%s will overwrite default setting %s", k, v, cv)
|
||||
}
|
||||
node.ObjectMeta.Labels[k] = v
|
||||
}
|
||||
|
|
@ -321,7 +323,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
|||
return nil, err
|
||||
}
|
||||
if instanceType != "" {
|
||||
glog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType)
|
||||
klog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelInstanceType, instanceType)
|
||||
node.ObjectMeta.Labels[kubeletapis.LabelInstanceType] = instanceType
|
||||
}
|
||||
// If the cloud has zone information, label the node with the zone information
|
||||
|
|
@ -332,11 +334,11 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
|||
return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err)
|
||||
}
|
||||
if zone.FailureDomain != "" {
|
||||
glog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain)
|
||||
klog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneFailureDomain, zone.FailureDomain)
|
||||
node.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = zone.FailureDomain
|
||||
}
|
||||
if zone.Region != "" {
|
||||
glog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region)
|
||||
klog.Infof("Adding node label from cloud provider: %s=%s", kubeletapis.LabelZoneRegion, zone.Region)
|
||||
node.ObjectMeta.Labels[kubeletapis.LabelZoneRegion] = zone.Region
|
||||
}
|
||||
}
|
||||
|
|
@ -348,8 +350,8 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
|||
}
|
||||
|
||||
// syncNodeStatus should be called periodically from a goroutine.
|
||||
// It synchronizes node status to master, registering the kubelet first if
|
||||
// necessary.
|
||||
// It synchronizes node status to master if there is any change or enough time
|
||||
// passed from the last sync, registering the kubelet first if necessary.
|
||||
func (kl *Kubelet) syncNodeStatus() {
|
||||
kl.syncNodeStatusMux.Lock()
|
||||
defer kl.syncNodeStatusMux.Unlock()
|
||||
|
|
@ -362,19 +364,20 @@ func (kl *Kubelet) syncNodeStatus() {
|
|||
kl.registerWithAPIServer()
|
||||
}
|
||||
if err := kl.updateNodeStatus(); err != nil {
|
||||
glog.Errorf("Unable to update node status: %v", err)
|
||||
klog.Errorf("Unable to update node status: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// updateNodeStatus updates node status to master with retries.
|
||||
// updateNodeStatus updates node status to master with retries if there is any
|
||||
// change or enough time passed from the last sync.
|
||||
func (kl *Kubelet) updateNodeStatus() error {
|
||||
glog.V(5).Infof("Updating node status")
|
||||
klog.V(5).Infof("Updating node status")
|
||||
for i := 0; i < nodeStatusUpdateRetry; i++ {
|
||||
if err := kl.tryUpdateNodeStatus(i); err != nil {
|
||||
if i > 0 && kl.onRepeatedHeartbeatFailure != nil {
|
||||
kl.onRepeatedHeartbeatFailure()
|
||||
}
|
||||
glog.Errorf("Error updating node status, will retry: %v", err)
|
||||
klog.Errorf("Error updating node status, will retry: %v", err)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -382,7 +385,8 @@ func (kl *Kubelet) updateNodeStatus() error {
|
|||
return fmt.Errorf("update node status exceeds retry count")
|
||||
}
|
||||
|
||||
// tryUpdateNodeStatus tries to update node status to master.
|
||||
// tryUpdateNodeStatus tries to update node status to master if there is any
|
||||
// change or enough time passed from the last sync.
|
||||
func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
|
||||
// In large clusters, GET and PUT operations on Node objects coming
|
||||
// from here are the majority of load on apiserver and etcd.
|
||||
|
|
@ -404,18 +408,31 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
|
|||
return fmt.Errorf("nil %q node object", kl.nodeName)
|
||||
}
|
||||
|
||||
podCIDRChanged := false
|
||||
if node.Spec.PodCIDR != "" {
|
||||
if err := kl.updatePodCIDR(node.Spec.PodCIDR); err != nil {
|
||||
glog.Errorf(err.Error())
|
||||
// Pod CIDR could have been updated before, so we cannot rely on
|
||||
// node.Spec.PodCIDR being non-empty. We also need to know if pod CIDR is
|
||||
// actually changed.
|
||||
if podCIDRChanged, err = kl.updatePodCIDR(node.Spec.PodCIDR); err != nil {
|
||||
klog.Errorf(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
kl.setNodeStatus(node)
|
||||
|
||||
now := kl.clock.Now()
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.NodeLease) && now.Before(kl.lastStatusReportTime.Add(kl.nodeStatusReportFrequency)) {
|
||||
if !podCIDRChanged && !nodeStatusHasChanged(&originalNode.Status, &node.Status) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Patch the current status on the API server
|
||||
updatedNode, _, err := nodeutil.PatchNodeStatus(kl.heartbeatClient.CoreV1(), types.NodeName(kl.nodeName), originalNode, node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kl.lastStatusReportTime = now
|
||||
kl.setLastObservedNodeAddresses(updatedNode.Status.Addresses)
|
||||
// If update finishes successfully, mark the volumeInUse as reportedInUse to indicate
|
||||
// those volumes are already updated in the node's status
|
||||
|
|
@ -426,7 +443,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
|
|||
// recordNodeStatusEvent records an event of the given type with the given
|
||||
// message for the node.
|
||||
func (kl *Kubelet) recordNodeStatusEvent(eventType, event string) {
|
||||
glog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName)
|
||||
klog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName)
|
||||
// TODO: This requires a transaction, either both node status is updated
|
||||
// and event is recorded or neither should happen, see issue #6055.
|
||||
kl.recorder.Eventf(kl.nodeRef, eventType, event, "Node %s status is now: %s", kl.nodeName, event)
|
||||
|
|
@ -458,9 +475,9 @@ func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) error {
|
|||
// refactor the node status condition code out to a different file.
|
||||
func (kl *Kubelet) setNodeStatus(node *v1.Node) {
|
||||
for i, f := range kl.setNodeStatusFuncs {
|
||||
glog.V(5).Infof("Setting node status at position %v", i)
|
||||
klog.V(5).Infof("Setting node status at position %v", i)
|
||||
if err := f(node); err != nil {
|
||||
glog.Warningf("Failed to set some node status fields: %s", err)
|
||||
klog.Warningf("Failed to set some node status fields: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -502,7 +519,6 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
|
|||
setters = append(setters, nodestatus.VolumeLimits(kl.volumePluginMgr.ListVolumePluginWithLimits))
|
||||
}
|
||||
setters = append(setters,
|
||||
nodestatus.OutOfDiskCondition(kl.clock.Now, kl.recordNodeStatusEvent),
|
||||
nodestatus.MemoryPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderMemoryPressure, kl.recordNodeStatusEvent),
|
||||
nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent),
|
||||
nodestatus.PIDPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderPIDPressure, kl.recordNodeStatusEvent),
|
||||
|
|
@ -554,3 +570,53 @@ func validateNodeIP(nodeIP net.IP) error {
|
|||
}
|
||||
return fmt.Errorf("Node IP: %q not found in the host's network interfaces", nodeIP.String())
|
||||
}
|
||||
|
||||
// nodeStatusHasChanged compares the original node and current node's status and
|
||||
// returns true if any change happens. The heartbeat timestamp is ignored.
|
||||
func nodeStatusHasChanged(originalStatus *v1.NodeStatus, status *v1.NodeStatus) bool {
|
||||
if originalStatus == nil && status == nil {
|
||||
return false
|
||||
}
|
||||
if originalStatus == nil || status == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Compare node conditions here because we need to ignore the heartbeat timestamp.
|
||||
if nodeConditionsHaveChanged(originalStatus.Conditions, status.Conditions) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Compare other fields of NodeStatus.
|
||||
originalStatusCopy := originalStatus.DeepCopy()
|
||||
statusCopy := status.DeepCopy()
|
||||
originalStatusCopy.Conditions = nil
|
||||
statusCopy.Conditions = nil
|
||||
return !apiequality.Semantic.DeepEqual(originalStatusCopy, statusCopy)
|
||||
}
|
||||
|
||||
// nodeConditionsHaveChanged compares the original node and current node's
|
||||
// conditions and returns true if any change happens. The heartbeat timestamp is
|
||||
// ignored.
|
||||
func nodeConditionsHaveChanged(originalConditions []v1.NodeCondition, conditions []v1.NodeCondition) bool {
|
||||
if len(originalConditions) != len(conditions) {
|
||||
return true
|
||||
}
|
||||
|
||||
originalConditionsCopy := make([]v1.NodeCondition, 0, len(originalConditions))
|
||||
originalConditionsCopy = append(originalConditionsCopy, originalConditions...)
|
||||
conditionsCopy := make([]v1.NodeCondition, 0, len(conditions))
|
||||
conditionsCopy = append(conditionsCopy, conditions...)
|
||||
|
||||
sort.SliceStable(originalConditionsCopy, func(i, j int) bool { return originalConditionsCopy[i].Type < originalConditionsCopy[j].Type })
|
||||
sort.SliceStable(conditionsCopy, func(i, j int) bool { return conditionsCopy[i].Type < conditionsCopy[j].Type })
|
||||
|
||||
replacedheartbeatTime := metav1.Time{}
|
||||
for i := range conditionsCopy {
|
||||
originalConditionsCopy[i].LastHeartbeatTime = replacedheartbeatTime
|
||||
conditionsCopy[i].LastHeartbeatTime = replacedheartbeatTime
|
||||
if !apiequality.Semantic.DeepEqual(&originalConditionsCopy[i], &conditionsCopy[i]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
129
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go
generated
vendored
129
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go
generated
vendored
|
|
@ -32,7 +32,6 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
@ -41,6 +40,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilvalidation "k8s.io/apimachinery/pkg/util/validation"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
podshelper "k8s.io/kubernetes/pkg/apis/core/pods"
|
||||
|
|
@ -104,7 +104,7 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol
|
|||
}
|
||||
vol, ok := podVolumes[device.Name]
|
||||
if !ok || vol.BlockVolumeMapper == nil {
|
||||
glog.Errorf("Block volume cannot be satisfied for container %q, because the volume is missing or the volume mapper is nil: %+v", container.Name, device)
|
||||
klog.Errorf("Block volume cannot be satisfied for container %q, because the volume is missing or the volume mapper is nil: %+v", container.Name, device)
|
||||
return nil, fmt.Errorf("cannot find volume %q to pass into container %q", device.Name, container.Name)
|
||||
}
|
||||
// Get a symbolic link associated to a block device under pod device path
|
||||
|
|
@ -118,7 +118,7 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol
|
|||
if vol.ReadOnly {
|
||||
permission = "r"
|
||||
}
|
||||
glog.V(4).Infof("Device will be attached to container %q. Path on host: %v", container.Name, symlinkPath)
|
||||
klog.V(4).Infof("Device will be attached to container %q. Path on host: %v", container.Name, symlinkPath)
|
||||
devices = append(devices, kubecontainer.DeviceInfo{PathOnHost: symlinkPath, PathInContainer: device.DevicePath, Permissions: permission})
|
||||
}
|
||||
}
|
||||
|
|
@ -128,7 +128,6 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol
|
|||
|
||||
// makeMounts determines the mount points for the given container.
|
||||
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap, mounter mountutil.Interface, expandEnvs []kubecontainer.EnvVar) ([]kubecontainer.Mount, func(), error) {
|
||||
|
||||
// Kubernetes only mounts on /etc/hosts if:
|
||||
// - container is not an infrastructure (pause) container
|
||||
// - container is not already mounting on /etc/hosts
|
||||
|
|
@ -136,15 +135,15 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
|
|||
// Kubernetes will not mount /etc/hosts if:
|
||||
// - when the Pod sandbox is being created, its IP is still unknown. Hence, PodIP will not have been set.
|
||||
mountEtcHostsFile := len(podIP) > 0 && runtime.GOOS != "windows"
|
||||
glog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile)
|
||||
klog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile)
|
||||
mounts := []kubecontainer.Mount{}
|
||||
var cleanupAction func() = nil
|
||||
var cleanupAction func()
|
||||
for i, mount := range container.VolumeMounts {
|
||||
// do not mount /etc/hosts if container is already mounting on the path
|
||||
mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath)
|
||||
vol, ok := podVolumes[mount.Name]
|
||||
if !ok || vol.Mounter == nil {
|
||||
glog.Errorf("Mount cannot be satisfied for container %q, because the volume is missing or the volume mounter is nil: %+v", container.Name, mount)
|
||||
klog.Errorf("Mount cannot be satisfied for container %q, because the volume is missing or the volume mounter is nil: %+v", container.Name, mount)
|
||||
return nil, cleanupAction, fmt.Errorf("cannot find volume %q to mount into container %q", mount.Name, container.Name)
|
||||
}
|
||||
|
||||
|
|
@ -183,7 +182,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
|
|||
hostPath = filepath.Join(volumePath, mount.SubPath)
|
||||
|
||||
if subPathExists, err := mounter.ExistsPath(hostPath); err != nil {
|
||||
glog.Errorf("Could not determine if subPath %s exists; will not attempt to change its permissions", hostPath)
|
||||
klog.Errorf("Could not determine if subPath %s exists; will not attempt to change its permissions", hostPath)
|
||||
} else if !subPathExists {
|
||||
// Create the sub path now because if it's auto-created later when referenced, it may have an
|
||||
// incorrect ownership and mode. For example, the sub path directory must have at least g+rwx
|
||||
|
|
@ -196,7 +195,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
|
|||
}
|
||||
if err := mounter.SafeMakeDir(mount.SubPath, volumePath, perm); err != nil {
|
||||
// Don't pass detailed error back to the user because it could give information about host filesystem
|
||||
glog.Errorf("failed to create subPath directory for volumeMount %q of container %q: %v", mount.Name, container.Name, err)
|
||||
klog.Errorf("failed to create subPath directory for volumeMount %q of container %q: %v", mount.Name, container.Name, err)
|
||||
return nil, cleanupAction, fmt.Errorf("failed to create subPath directory for volumeMount %q of container %q", mount.Name, container.Name)
|
||||
}
|
||||
}
|
||||
|
|
@ -210,19 +209,19 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
|
|||
})
|
||||
if err != nil {
|
||||
// Don't pass detailed error back to the user because it could give information about host filesystem
|
||||
glog.Errorf("failed to prepare subPath for volumeMount %q of container %q: %v", mount.Name, container.Name, err)
|
||||
klog.Errorf("failed to prepare subPath for volumeMount %q of container %q: %v", mount.Name, container.Name, err)
|
||||
return nil, cleanupAction, fmt.Errorf("failed to prepare subPath for volumeMount %q of container %q", mount.Name, container.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Docker Volume Mounts fail on Windows if it is not of the form C:/
|
||||
containerPath := mount.MountPath
|
||||
if runtime.GOOS == "windows" {
|
||||
if (strings.HasPrefix(hostPath, "/") || strings.HasPrefix(hostPath, "\\")) && !strings.Contains(hostPath, ":") {
|
||||
hostPath = "c:" + hostPath
|
||||
}
|
||||
if volumeutil.IsWindowsLocalPath(runtime.GOOS, hostPath) {
|
||||
hostPath = volumeutil.MakeAbsolutePath(runtime.GOOS, hostPath)
|
||||
}
|
||||
if !filepath.IsAbs(containerPath) {
|
||||
|
||||
containerPath := mount.MountPath
|
||||
// IsAbs returns false for UNC path/SMB shares/named pipes in Windows. So check for those specifically and skip MakeAbsolutePath
|
||||
if !volumeutil.IsWindowsUNCPath(runtime.GOOS, containerPath) && !filepath.IsAbs(containerPath) {
|
||||
containerPath = volumeutil.MakeAbsolutePath(runtime.GOOS, containerPath)
|
||||
}
|
||||
|
||||
|
|
@ -230,7 +229,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
|
|||
if err != nil {
|
||||
return nil, cleanupAction, err
|
||||
}
|
||||
glog.V(5).Infof("Pod %q container %q mount %q has propagation %q", format.Pod(pod), container.Name, mount.Name, propagation)
|
||||
klog.V(5).Infof("Pod %q container %q mount %q has propagation %q", format.Pod(pod), container.Name, mount.Name, propagation)
|
||||
|
||||
mustMountRO := vol.Mounter.GetAttributes().ReadOnly
|
||||
|
||||
|
|
@ -263,10 +262,6 @@ func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.M
|
|||
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
|
||||
}
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.MountPropagation) {
|
||||
// mount propagation is disabled, use private as in the old versions
|
||||
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
|
||||
}
|
||||
switch {
|
||||
case mountMode == nil:
|
||||
// PRIVATE is the default
|
||||
|
|
@ -361,11 +356,9 @@ func hostsEntriesFromHostAliases(hostAliases []v1.HostAlias) []byte {
|
|||
var buffer bytes.Buffer
|
||||
buffer.WriteString("\n")
|
||||
buffer.WriteString("# Entries added by HostAliases.\n")
|
||||
// write each IP/hostname pair as an entry into hosts file
|
||||
// for each IP, write all aliases onto single line in hosts file
|
||||
for _, hostAlias := range hostAliases {
|
||||
for _, hostname := range hostAlias.Hostnames {
|
||||
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostAlias.IP, hostname))
|
||||
}
|
||||
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostAlias.IP, strings.Join(hostAlias.Hostnames, "\t")))
|
||||
}
|
||||
return buffer.Bytes()
|
||||
}
|
||||
|
|
@ -378,7 +371,7 @@ func truncatePodHostnameIfNeeded(podName, hostname string) (string, error) {
|
|||
return hostname, nil
|
||||
}
|
||||
truncated := hostname[:hostnameMaxLen]
|
||||
glog.Errorf("hostname for pod:%q was longer than %d. Truncated hostname to :%q", podName, hostnameMaxLen, truncated)
|
||||
klog.Errorf("hostname for pod:%q was longer than %d. Truncated hostname to :%q", podName, hostnameMaxLen, truncated)
|
||||
// hostname should not end with '-' or '.'
|
||||
truncated = strings.TrimRight(truncated, "-.")
|
||||
if len(truncated) == 0 {
|
||||
|
|
@ -470,7 +463,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai
|
|||
if len(container.TerminationMessagePath) != 0 && runtime.GOOS != "windows" {
|
||||
p := kl.getPodContainerDir(pod.UID, container.Name)
|
||||
if err := os.MkdirAll(p, 0750); err != nil {
|
||||
glog.Errorf("Error on creating %q: %v", p, err)
|
||||
klog.Errorf("Error on creating %q: %v", p, err)
|
||||
} else {
|
||||
opts.PodContainerDir = p
|
||||
}
|
||||
|
|
@ -488,7 +481,7 @@ var masterServices = sets.NewString("kubernetes")
|
|||
|
||||
// getServiceEnvVarMap makes a map[string]string of env vars for services a
|
||||
// pod in namespace ns should see.
|
||||
func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
|
||||
func (kl *Kubelet) getServiceEnvVarMap(ns string, enableServiceLinks bool) (map[string]string, error) {
|
||||
var (
|
||||
serviceMap = make(map[string]*v1.Service)
|
||||
m = make(map[string]string)
|
||||
|
|
@ -514,19 +507,16 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
|
|||
}
|
||||
serviceName := service.Name
|
||||
|
||||
switch service.Namespace {
|
||||
// for the case whether the master service namespace is the namespace the pod
|
||||
// is in, the pod should receive all the services in the namespace.
|
||||
//
|
||||
// ordering of the case clauses below enforces this
|
||||
case ns:
|
||||
serviceMap[serviceName] = service
|
||||
case kl.masterServiceNamespace:
|
||||
if masterServices.Has(serviceName) {
|
||||
if _, exists := serviceMap[serviceName]; !exists {
|
||||
serviceMap[serviceName] = service
|
||||
}
|
||||
// We always want to add environment variabled for master services
|
||||
// from the master service namespace, even if enableServiceLinks is false.
|
||||
// We also add environment variables for other services in the same
|
||||
// namespace, if enableServiceLinks is true.
|
||||
if service.Namespace == kl.masterServiceNamespace && masterServices.Has(serviceName) {
|
||||
if _, exists := serviceMap[serviceName]; !exists {
|
||||
serviceMap[serviceName] = service
|
||||
}
|
||||
} else if service.Namespace == ns && enableServiceLinks {
|
||||
serviceMap[serviceName] = service
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -553,7 +543,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container
|
|||
// To avoid this users can: (1) wait between starting a service and starting; or (2) detect
|
||||
// missing service env var and exit and be restarted; or (3) use DNS instead of env vars
|
||||
// and keep trying to resolve the DNS name of the service (recommended).
|
||||
serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace)
|
||||
serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace, *pod.Spec.EnableServiceLinks)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
|
@ -811,7 +801,7 @@ func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *k
|
|||
return err
|
||||
}
|
||||
if err := kl.containerManager.UpdateQOSCgroups(); err != nil {
|
||||
glog.V(2).Infof("Failed to update QoS cgroups while killing pod: %v", err)
|
||||
klog.V(2).Infof("Failed to update QoS cgroups while killing pod: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -839,7 +829,7 @@ func (kl *Kubelet) getPullSecretsForPod(pod *v1.Pod) []v1.Secret {
|
|||
for _, secretRef := range pod.Spec.ImagePullSecrets {
|
||||
secret, err := kl.secretManager.GetSecret(pod.Namespace, secretRef.Name)
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to retrieve pull secret %s/%s for %s/%s due to %v. The image pull may not succeed.", pod.Namespace, secretRef.Name, pod.Namespace, pod.Name, err)
|
||||
klog.Warningf("Unable to retrieve pull secret %s/%s for %s/%s due to %v. The image pull may not succeed.", pod.Namespace, secretRef.Name, pod.Namespace, pod.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -893,13 +883,13 @@ func (kl *Kubelet) IsPodDeleted(uid types.UID) bool {
|
|||
func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bool {
|
||||
if !notRunning(status.ContainerStatuses) {
|
||||
// We shouldnt delete pods that still have running containers
|
||||
glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod))
|
||||
klog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod))
|
||||
return false
|
||||
}
|
||||
// pod's containers should be deleted
|
||||
runtimeStatus, err := kl.podCache.Get(pod.UID)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("Pod %q is terminated, Error getting runtimeStatus from the podCache: %s", format.Pod(pod), err)
|
||||
klog.V(3).Infof("Pod %q is terminated, Error getting runtimeStatus from the podCache: %s", format.Pod(pod), err)
|
||||
return false
|
||||
}
|
||||
if len(runtimeStatus.ContainerStatuses) > 0 {
|
||||
|
|
@ -907,18 +897,18 @@ func (kl *Kubelet) PodResourcesAreReclaimed(pod *v1.Pod, status v1.PodStatus) bo
|
|||
for _, status := range runtimeStatus.ContainerStatuses {
|
||||
statusStr += fmt.Sprintf("%+v ", *status)
|
||||
}
|
||||
glog.V(3).Infof("Pod %q is terminated, but some containers have not been cleaned up: %s", format.Pod(pod), statusStr)
|
||||
klog.V(3).Infof("Pod %q is terminated, but some containers have not been cleaned up: %s", format.Pod(pod), statusStr)
|
||||
return false
|
||||
}
|
||||
if kl.podVolumesExist(pod.UID) && !kl.keepTerminatedPodVolumes {
|
||||
// We shouldnt delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes
|
||||
glog.V(3).Infof("Pod %q is terminated, but some volumes have not been cleaned up", format.Pod(pod))
|
||||
klog.V(3).Infof("Pod %q is terminated, but some volumes have not been cleaned up", format.Pod(pod))
|
||||
return false
|
||||
}
|
||||
if kl.kubeletConfiguration.CgroupsPerQOS {
|
||||
pcm := kl.containerManager.NewPodContainerManager()
|
||||
if pcm.Exists(pod) {
|
||||
glog.V(3).Infof("Pod %q is terminated, but pod cgroup sandbox has not been cleaned up", format.Pod(pod))
|
||||
klog.V(3).Infof("Pod %q is terminated, but pod cgroup sandbox has not been cleaned up", format.Pod(pod))
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
@ -1017,7 +1007,7 @@ func (kl *Kubelet) HandlePodCleanups() error {
|
|||
|
||||
runningPods, err := kl.runtimeCache.GetPods()
|
||||
if err != nil {
|
||||
glog.Errorf("Error listing containers: %#v", err)
|
||||
klog.Errorf("Error listing containers: %#v", err)
|
||||
return err
|
||||
}
|
||||
for _, pod := range runningPods {
|
||||
|
|
@ -1033,7 +1023,7 @@ func (kl *Kubelet) HandlePodCleanups() error {
|
|||
// TODO: Evaluate the performance impact of bypassing the runtime cache.
|
||||
runningPods, err = kl.containerRuntime.GetPods(false)
|
||||
if err != nil {
|
||||
glog.Errorf("Error listing containers: %#v", err)
|
||||
klog.Errorf("Error listing containers: %#v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -1046,7 +1036,7 @@ func (kl *Kubelet) HandlePodCleanups() error {
|
|||
// We want all cleanup tasks to be run even if one of them failed. So
|
||||
// we just log an error here and continue other cleanup tasks.
|
||||
// This also applies to the other clean up tasks.
|
||||
glog.Errorf("Failed cleaning up orphaned pod directories: %v", err)
|
||||
klog.Errorf("Failed cleaning up orphaned pod directories: %v", err)
|
||||
}
|
||||
|
||||
// Remove any orphaned mirror pods.
|
||||
|
|
@ -1080,10 +1070,10 @@ func (kl *Kubelet) podKiller() {
|
|||
|
||||
if !exists {
|
||||
go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod) {
|
||||
glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name)
|
||||
klog.V(2).Infof("Killing unwanted pod %q", runningPod.Name)
|
||||
err := kl.killPod(apiPod, runningPod, nil, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err)
|
||||
klog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err)
|
||||
}
|
||||
lock.Lock()
|
||||
killing.Delete(string(runningPod.ID))
|
||||
|
|
@ -1289,7 +1279,7 @@ func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
|
|||
case pendingInitialization > 0:
|
||||
fallthrough
|
||||
case waiting > 0:
|
||||
glog.V(5).Infof("pod waiting > 0, pending")
|
||||
klog.V(5).Infof("pod waiting > 0, pending")
|
||||
// One or more containers has not been started
|
||||
return v1.PodPending
|
||||
case running > 0 && unknown == 0:
|
||||
|
|
@ -1316,7 +1306,7 @@ func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
|
|||
// and in the process of restarting
|
||||
return v1.PodRunning
|
||||
default:
|
||||
glog.V(5).Infof("pod default case, pending")
|
||||
klog.V(5).Infof("pod default case, pending")
|
||||
return v1.PodPending
|
||||
}
|
||||
}
|
||||
|
|
@ -1324,21 +1314,20 @@ func getPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
|
|||
// generateAPIPodStatus creates the final API pod status for a pod, given the
|
||||
// internal pod status.
|
||||
func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
|
||||
glog.V(3).Infof("Generating status for %q", format.Pod(pod))
|
||||
klog.V(3).Infof("Generating status for %q", format.Pod(pod))
|
||||
|
||||
s := kl.convertStatusToAPIStatus(pod, podStatus)
|
||||
|
||||
// check if an internal module has requested the pod is evicted.
|
||||
for _, podSyncHandler := range kl.PodSyncHandlers {
|
||||
if result := podSyncHandler.ShouldEvict(pod); result.Evict {
|
||||
return v1.PodStatus{
|
||||
Phase: v1.PodFailed,
|
||||
Reason: result.Reason,
|
||||
Message: result.Message,
|
||||
}
|
||||
s.Phase = v1.PodFailed
|
||||
s.Reason = result.Reason
|
||||
s.Message = result.Message
|
||||
return *s
|
||||
}
|
||||
}
|
||||
|
||||
s := kl.convertStatusToAPIStatus(pod, podStatus)
|
||||
|
||||
// Assume info is ready to process
|
||||
spec := &pod.Spec
|
||||
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
|
||||
|
|
@ -1347,7 +1336,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
|||
if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded {
|
||||
// API server shows terminal phase; transitions are not allowed
|
||||
if s.Phase != pod.Status.Phase {
|
||||
glog.Errorf("Pod attempted illegal phase transition from %s to %s: %v", pod.Status.Phase, s.Phase, s)
|
||||
klog.Errorf("Pod attempted illegal phase transition from %s to %s: %v", pod.Status.Phase, s.Phase, s)
|
||||
// Force back to phase from the API server
|
||||
s.Phase = pod.Status.Phase
|
||||
}
|
||||
|
|
@ -1367,7 +1356,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
|||
if kl.kubeClient != nil {
|
||||
hostIP, err := kl.getHostIPAnyWay()
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Cannot get host IP: %v", err)
|
||||
klog.V(4).Infof("Cannot get host IP: %v", err)
|
||||
} else {
|
||||
s.HostIP = hostIP.String()
|
||||
if kubecontainer.IsHostNetworkPod(pod) && s.PodIP == "" {
|
||||
|
|
@ -1674,13 +1663,13 @@ func (kl *Kubelet) cleanupOrphanedPodCgroups(cgroupPods map[types.UID]cm.CgroupN
|
|||
// process in the cgroup to the minimum value while we wait. if the kubelet
|
||||
// is configured to keep terminated volumes, we will delete the cgroup and not block.
|
||||
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist && !kl.keepTerminatedPodVolumes {
|
||||
glog.V(3).Infof("Orphaned pod %q found, but volumes not yet removed. Reducing cpu to minimum", uid)
|
||||
klog.V(3).Infof("Orphaned pod %q found, but volumes not yet removed. Reducing cpu to minimum", uid)
|
||||
if err := pcm.ReduceCPULimits(val); err != nil {
|
||||
glog.Warningf("Failed to reduce cpu time for pod %q pending volume cleanup due to %v", uid, err)
|
||||
klog.Warningf("Failed to reduce cpu time for pod %q pending volume cleanup due to %v", uid, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
glog.V(3).Infof("Orphaned pod %q found, removing pod cgroups", uid)
|
||||
klog.V(3).Infof("Orphaned pod %q found, removing pod cgroups", uid)
|
||||
// Destroy all cgroups of pod that should not be running,
|
||||
// by first killing all the attached processes to these cgroups.
|
||||
// We ignore errors thrown by the method, as the housekeeping loop would
|
||||
|
|
@ -1743,13 +1732,13 @@ func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool {
|
|||
if volume.PersistentVolumeClaim != nil {
|
||||
pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err)
|
||||
klog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err)
|
||||
continue
|
||||
}
|
||||
if pvc != nil {
|
||||
referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Warningf("unable to retrieve pv %s - %v", pvc.Spec.VolumeName, err)
|
||||
klog.Warningf("unable to retrieve pv %s - %v", pvc.Spec.VolumeName, err)
|
||||
continue
|
||||
}
|
||||
if referencedVolume != nil && referencedVolume.Spec.HostPath != nil {
|
||||
|
|
|
|||
4
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_resources.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_resources.go
generated
vendored
|
|
@ -19,7 +19,7 @@ package kubelet
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
|
|
@ -42,7 +42,7 @@ func (kl *Kubelet) defaultPodLimitsForDownwardAPI(pod *v1.Pod, container *v1.Con
|
|||
return nil, nil, fmt.Errorf("failed to find node object, expected a node")
|
||||
}
|
||||
allocatable := node.Status.Allocatable
|
||||
glog.Infof("allocatable: %v", allocatable)
|
||||
klog.Infof("allocatable: %v", allocatable)
|
||||
outputPod := pod.DeepCopy()
|
||||
for idx := range outputPod.Spec.Containers {
|
||||
resource.MergeContainerResourceLimits(&outputPod.Spec.Containers[idx], allocatable)
|
||||
|
|
|
|||
18
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go
generated
vendored
|
|
@ -19,11 +19,11 @@ package kubelet
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/util/removeall"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
|
|
@ -62,11 +62,11 @@ func (kl *Kubelet) podVolumesExist(podUID types.UID) bool {
|
|||
// There are some volume plugins such as flexvolume might not have mounts. See issue #61229
|
||||
volumePaths, err := kl.getMountedVolumePathListFromDisk(podUID)
|
||||
if err != nil {
|
||||
glog.Errorf("pod %q found, but error %v occurred during checking mounted volumes from disk", podUID, err)
|
||||
klog.Errorf("pod %q found, but error %v occurred during checking mounted volumes from disk", podUID, err)
|
||||
return true
|
||||
}
|
||||
if len(volumePaths) > 0 {
|
||||
glog.V(4).Infof("pod %q found, but volumes are still mounted on disk %v", podUID, volumePaths)
|
||||
klog.V(4).Infof("pod %q found, but volumes are still mounted on disk %v", podUID, volumePaths)
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
@ -85,7 +85,7 @@ func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *v1.Pod, o
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to instantiate mounter for volume: %s using plugin: %s with a root cause: %v", spec.Name(), plugin.GetPluginName(), err)
|
||||
}
|
||||
glog.V(10).Infof("Using volume plugin %q to mount %s", plugin.GetPluginName(), spec.Name())
|
||||
klog.V(10).Infof("Using volume plugin %q to mount %s", plugin.GetPluginName(), spec.Name())
|
||||
return physicalMounter, nil
|
||||
}
|
||||
|
||||
|
|
@ -115,7 +115,7 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon
|
|||
// If volumes have not been unmounted/detached, do not delete directory.
|
||||
// Doing so may result in corruption of data.
|
||||
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist {
|
||||
glog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up", uid)
|
||||
klog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up", uid)
|
||||
continue
|
||||
}
|
||||
// If there are still volume directories, do not delete directory
|
||||
|
|
@ -128,18 +128,18 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon
|
|||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume paths are still present on disk", uid))
|
||||
continue
|
||||
}
|
||||
glog.V(3).Infof("Orphaned pod %q found, removing", uid)
|
||||
klog.V(3).Infof("Orphaned pod %q found, removing", uid)
|
||||
if err := removeall.RemoveAllOneFilesystem(kl.mounter, kl.getPodDir(uid)); err != nil {
|
||||
glog.Errorf("Failed to remove orphaned pod %q dir; err: %v", uid, err)
|
||||
klog.Errorf("Failed to remove orphaned pod %q dir; err: %v", uid, err)
|
||||
orphanRemovalErrors = append(orphanRemovalErrors, err)
|
||||
}
|
||||
}
|
||||
|
||||
logSpew := func(errs []error) {
|
||||
if len(errs) > 0 {
|
||||
glog.Errorf("%v : There were a total of %v errors similar to this. Turn up verbosity to see them.", errs[0], len(errs))
|
||||
klog.Errorf("%v : There were a total of %v errors similar to this. Turn up verbosity to see them.", errs[0], len(errs))
|
||||
for _, err := range errs {
|
||||
glog.V(5).Infof("Orphan pod: %v", err)
|
||||
klog.V(5).Infof("Orphan pod: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
6
vendor/k8s.io/kubernetes/pkg/kubelet/oom_watcher.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/oom_watcher.go
generated
vendored
|
|
@ -17,13 +17,13 @@ limitations under the License.
|
|||
package kubelet
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/cadvisor/events"
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
)
|
||||
|
||||
|
|
@ -65,10 +65,10 @@ func (ow *realOOMWatcher) Start(ref *v1.ObjectReference) error {
|
|||
defer runtime.HandleCrash()
|
||||
|
||||
for event := range eventChannel.GetChannel() {
|
||||
glog.V(2).Infof("Got sys oom event from cadvisor: %v", event)
|
||||
klog.V(2).Infof("Got sys oom event from cadvisor: %v", event)
|
||||
ow.recorder.PastEventf(ref, metav1.Time{Time: event.Timestamp}, v1.EventTypeWarning, systemOOMEvent, "System OOM encountered")
|
||||
}
|
||||
glog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor")
|
||||
klog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor")
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
6
vendor/k8s.io/kubernetes/pkg/kubelet/pod_container_deletor.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/pod_container_deletor.go
generated
vendored
|
|
@ -19,8 +19,8 @@ package kubelet
|
|||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
|
|
@ -72,7 +72,7 @@ func getContainersToDeleteInPod(filterContainerID string, podStatus *kubecontain
|
|||
}(filterContainerID, podStatus)
|
||||
|
||||
if filterContainerID != "" && matchedContainer == nil {
|
||||
glog.Warningf("Container %q not found in pod's containers", filterContainerID)
|
||||
klog.Warningf("Container %q not found in pod's containers", filterContainerID)
|
||||
return containerStatusbyCreatedList{}
|
||||
}
|
||||
|
||||
|
|
@ -106,7 +106,7 @@ func (p *podContainerDeletor) deleteContainersInPod(filterContainerID string, po
|
|||
select {
|
||||
case p.worker <- candidate.ID:
|
||||
default:
|
||||
glog.Warningf("Failed to issue the request to remove container %v", candidate.ID)
|
||||
klog.Warningf("Failed to issue the request to remove container %v", candidate.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
13
vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go
generated
vendored
13
vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go
generated
vendored
|
|
@ -18,15 +18,16 @@ package kubelet
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
||||
|
|
@ -96,8 +97,11 @@ const (
|
|||
// jitter factor for resyncInterval
|
||||
workerResyncIntervalJitterFactor = 0.5
|
||||
|
||||
// jitter factor for backOffPeriod
|
||||
// jitter factor for backOffPeriod and backOffOnTransientErrorPeriod
|
||||
workerBackOffPeriodJitterFactor = 0.5
|
||||
|
||||
// backoff period when transient error occurred.
|
||||
backOffOnTransientErrorPeriod = time.Second
|
||||
)
|
||||
|
||||
type podWorkers struct {
|
||||
|
|
@ -183,7 +187,7 @@ func (p *podWorkers) managePodLoop(podUpdates <-chan UpdatePodOptions) {
|
|||
}
|
||||
if err != nil {
|
||||
// IMPORTANT: we do not log errors here, the syncPodFn is responsible for logging errors
|
||||
glog.Errorf("Error syncing pod %s (%q), skipping: %v", update.Pod.UID, format.Pod(update.Pod), err)
|
||||
klog.Errorf("Error syncing pod %s (%q), skipping: %v", update.Pod.UID, format.Pod(update.Pod), err)
|
||||
}
|
||||
p.wrapUp(update.Pod.UID, err)
|
||||
}
|
||||
|
|
@ -263,6 +267,9 @@ func (p *podWorkers) wrapUp(uid types.UID, syncErr error) {
|
|||
case syncErr == nil:
|
||||
// No error; requeue at the regular resync interval.
|
||||
p.workQueue.Enqueue(uid, wait.Jitter(p.resyncInterval, workerResyncIntervalJitterFactor))
|
||||
case strings.Contains(syncErr.Error(), NetworkNotReadyErrorMsg):
|
||||
// Network is not ready; back off for short period of time and retry as network might be ready soon.
|
||||
p.workQueue.Enqueue(uid, wait.Jitter(backOffOnTransientErrorPeriod, workerBackOffPeriodJitterFactor))
|
||||
default:
|
||||
// Error occurred during the sync; back off and then retry.
|
||||
p.workQueue.Enqueue(uid, wait.Jitter(p.backOffPeriod, workerBackOffPeriodJitterFactor))
|
||||
|
|
|
|||
30
vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go
generated
vendored
|
|
@ -21,8 +21,8 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
|
|
@ -51,15 +51,15 @@ func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult,
|
|||
// If the container logs directory does not exist, create it.
|
||||
if _, err := os.Stat(ContainerLogsDir); err != nil {
|
||||
if err := kl.os.MkdirAll(ContainerLogsDir, 0755); err != nil {
|
||||
glog.Errorf("Failed to create directory %q: %v", ContainerLogsDir, err)
|
||||
klog.Errorf("Failed to create directory %q: %v", ContainerLogsDir, err)
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case u := <-updates:
|
||||
glog.Infof("processing manifest with %d pods", len(u.Pods))
|
||||
klog.Infof("processing manifest with %d pods", len(u.Pods))
|
||||
result, err := kl.runOnce(u.Pods, runOnceRetryDelay)
|
||||
glog.Infof("finished processing %d pods", len(u.Pods))
|
||||
klog.Infof("finished processing %d pods", len(u.Pods))
|
||||
return result, err
|
||||
case <-time.After(runOnceManifestDelay):
|
||||
return nil, fmt.Errorf("no pod manifest update after %v", runOnceManifestDelay)
|
||||
|
|
@ -85,7 +85,7 @@ func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results []
|
|||
}(pod)
|
||||
}
|
||||
|
||||
glog.Infof("Waiting for %d pods", len(admitted))
|
||||
klog.Infof("Waiting for %d pods", len(admitted))
|
||||
failedPods := []string{}
|
||||
for i := 0; i < len(admitted); i++ {
|
||||
res := <-ch
|
||||
|
|
@ -93,19 +93,19 @@ func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results []
|
|||
if res.Err != nil {
|
||||
faliedContainerName, err := kl.getFailedContainers(res.Pod)
|
||||
if err != nil {
|
||||
glog.Infof("unable to get failed containers' names for pod %q, error:%v", format.Pod(res.Pod), err)
|
||||
klog.Infof("unable to get failed containers' names for pod %q, error:%v", format.Pod(res.Pod), err)
|
||||
} else {
|
||||
glog.Infof("unable to start pod %q because container:%v failed", format.Pod(res.Pod), faliedContainerName)
|
||||
klog.Infof("unable to start pod %q because container:%v failed", format.Pod(res.Pod), faliedContainerName)
|
||||
}
|
||||
failedPods = append(failedPods, format.Pod(res.Pod))
|
||||
} else {
|
||||
glog.Infof("started pod %q", format.Pod(res.Pod))
|
||||
klog.Infof("started pod %q", format.Pod(res.Pod))
|
||||
}
|
||||
}
|
||||
if len(failedPods) > 0 {
|
||||
return results, fmt.Errorf("error running pods: %v", failedPods)
|
||||
}
|
||||
glog.Infof("%d pods started", len(pods))
|
||||
klog.Infof("%d pods started", len(pods))
|
||||
return results, err
|
||||
}
|
||||
|
||||
|
|
@ -120,14 +120,14 @@ func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error {
|
|||
}
|
||||
|
||||
if kl.isPodRunning(pod, status) {
|
||||
glog.Infof("pod %q containers running", format.Pod(pod))
|
||||
klog.Infof("pod %q containers running", format.Pod(pod))
|
||||
return nil
|
||||
}
|
||||
glog.Infof("pod %q containers not running: syncing", format.Pod(pod))
|
||||
klog.Infof("pod %q containers not running: syncing", format.Pod(pod))
|
||||
|
||||
glog.Infof("Creating a mirror pod for static pod %q", format.Pod(pod))
|
||||
klog.Infof("Creating a mirror pod for static pod %q", format.Pod(pod))
|
||||
if err := kl.podManager.CreateMirrorPod(pod); err != nil {
|
||||
glog.Errorf("Failed creating a mirror pod %q: %v", format.Pod(pod), err)
|
||||
klog.Errorf("Failed creating a mirror pod %q: %v", format.Pod(pod), err)
|
||||
}
|
||||
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
|
||||
if err = kl.syncPod(syncPodOptions{
|
||||
|
|
@ -142,7 +142,7 @@ func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error {
|
|||
return fmt.Errorf("timeout error: pod %q containers not running after %d retries", format.Pod(pod), runOnceMaxRetries)
|
||||
}
|
||||
// TODO(proppy): health checking would be better than waiting + checking the state at the next iteration.
|
||||
glog.Infof("pod %q containers synced, waiting for %v", format.Pod(pod), delay)
|
||||
klog.Infof("pod %q containers synced, waiting for %v", format.Pod(pod), delay)
|
||||
time.Sleep(delay)
|
||||
retry++
|
||||
delay *= runOnceRetryDelayBackoff
|
||||
|
|
@ -154,7 +154,7 @@ func (kl *Kubelet) isPodRunning(pod *v1.Pod, status *kubecontainer.PodStatus) bo
|
|||
for _, c := range pod.Spec.Containers {
|
||||
cs := status.FindContainerStatusByName(c.Name)
|
||||
if cs == nil || cs.State != kubecontainer.ContainerStateRunning {
|
||||
glog.Infof("Container %q for pod %q not running", c.Name, format.Pod(pod))
|
||||
klog.Infof("Container %q for pod %q not running", c.Name, format.Pod(pod))
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
|||
9
vendor/k8s.io/kubernetes/pkg/kubelet/runtime.go
generated
vendored
9
vendor/k8s.io/kubernetes/pkg/kubelet/runtime.go
generated
vendored
|
|
@ -27,7 +27,6 @@ type runtimeState struct {
|
|||
lastBaseRuntimeSync time.Time
|
||||
baseRuntimeSyncThreshold time.Duration
|
||||
networkError error
|
||||
internalError error
|
||||
cidr string
|
||||
healthChecks []*healthCheck
|
||||
}
|
||||
|
|
@ -75,12 +74,11 @@ func (s *runtimeState) runtimeErrors() []string {
|
|||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
var ret []string
|
||||
if !s.lastBaseRuntimeSync.Add(s.baseRuntimeSyncThreshold).After(time.Now()) {
|
||||
if s.lastBaseRuntimeSync.IsZero() {
|
||||
ret = append(ret, "container runtime status check may not have completed yet")
|
||||
} else if !s.lastBaseRuntimeSync.Add(s.baseRuntimeSyncThreshold).After(time.Now()) {
|
||||
ret = append(ret, "container runtime is down")
|
||||
}
|
||||
if s.internalError != nil {
|
||||
ret = append(ret, s.internalError.Error())
|
||||
}
|
||||
for _, hc := range s.healthChecks {
|
||||
if ok, err := hc.fn(); !ok {
|
||||
ret = append(ret, fmt.Sprintf("%s is not healthy: %v", hc.name, err))
|
||||
|
|
@ -107,6 +105,5 @@ func newRuntimeState(
|
|||
lastBaseRuntimeSync: time.Time{},
|
||||
baseRuntimeSyncThreshold: runtimeSyncThreshold,
|
||||
networkError: fmt.Errorf("network state unknown"),
|
||||
internalError: nil,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
60
vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD
generated
vendored
60
vendor/k8s.io/kubernetes/pkg/kubelet/types/BUILD
generated
vendored
|
|
@ -1,60 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"constants.go",
|
||||
"doc.go",
|
||||
"labels.go",
|
||||
"pod_status.go",
|
||||
"pod_update.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/types",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/scheduling:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"labels_test.go",
|
||||
"pod_status_test.go",
|
||||
"pod_update_test.go",
|
||||
"types_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
32
vendor/k8s.io/kubernetes/pkg/kubelet/types/constants.go
generated
vendored
32
vendor/k8s.io/kubernetes/pkg/kubelet/types/constants.go
generated
vendored
|
|
@ -1,32 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
const (
|
||||
// system default DNS resolver configuration
|
||||
ResolvConfDefault = "/etc/resolv.conf"
|
||||
|
||||
// different container runtimes
|
||||
DockerContainerRuntime = "docker"
|
||||
RemoteContainerRuntime = "remote"
|
||||
|
||||
// User visible keys for managing node allocatable enforcement on the node.
|
||||
NodeAllocatableEnforcementKey = "pods"
|
||||
SystemReservedEnforcementKey = "system-reserved"
|
||||
KubeReservedEnforcementKey = "kube-reserved"
|
||||
NodeAllocatableNoneKey = "none"
|
||||
)
|
||||
41
vendor/k8s.io/kubernetes/pkg/kubelet/types/labels.go
generated
vendored
41
vendor/k8s.io/kubernetes/pkg/kubelet/types/labels.go
generated
vendored
|
|
@ -1,41 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
const (
|
||||
KubernetesPodNameLabel = "io.kubernetes.pod.name"
|
||||
KubernetesPodNamespaceLabel = "io.kubernetes.pod.namespace"
|
||||
KubernetesPodUIDLabel = "io.kubernetes.pod.uid"
|
||||
KubernetesContainerNameLabel = "io.kubernetes.container.name"
|
||||
KubernetesContainerTypeLabel = "io.kubernetes.container.type"
|
||||
)
|
||||
|
||||
func GetContainerName(labels map[string]string) string {
|
||||
return labels[KubernetesContainerNameLabel]
|
||||
}
|
||||
|
||||
func GetPodName(labels map[string]string) string {
|
||||
return labels[KubernetesPodNameLabel]
|
||||
}
|
||||
|
||||
func GetPodUID(labels map[string]string) string {
|
||||
return labels[KubernetesPodUIDLabel]
|
||||
}
|
||||
|
||||
func GetPodNamespace(labels map[string]string) string {
|
||||
return labels[KubernetesPodNamespaceLabel]
|
||||
}
|
||||
40
vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_status.go
generated
vendored
40
vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_status.go
generated
vendored
|
|
@ -1,40 +0,0 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// PodConditionsByKubelet is the list of pod conditions owned by kubelet
|
||||
var PodConditionsByKubelet = []v1.PodConditionType{
|
||||
v1.PodScheduled,
|
||||
v1.PodReady,
|
||||
v1.PodInitialized,
|
||||
v1.PodReasonUnschedulable,
|
||||
v1.ContainersReady,
|
||||
}
|
||||
|
||||
// PodConditionByKubelet returns if the pod condition type is owned by kubelet
|
||||
func PodConditionByKubelet(conditionType v1.PodConditionType) bool {
|
||||
for _, c := range PodConditionsByKubelet {
|
||||
if c == conditionType {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
199
vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go
generated
vendored
199
vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go
generated
vendored
|
|
@ -1,199 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
kubeapi "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
const (
|
||||
ConfigSourceAnnotationKey = "kubernetes.io/config.source"
|
||||
ConfigMirrorAnnotationKey = v1.MirrorPodAnnotationKey
|
||||
ConfigFirstSeenAnnotationKey = "kubernetes.io/config.seen"
|
||||
ConfigHashAnnotationKey = "kubernetes.io/config.hash"
|
||||
CriticalPodAnnotationKey = "scheduler.alpha.kubernetes.io/critical-pod"
|
||||
)
|
||||
|
||||
// PodOperation defines what changes will be made on a pod configuration.
|
||||
type PodOperation int
|
||||
|
||||
const (
|
||||
// This is the current pod configuration
|
||||
SET PodOperation = iota
|
||||
// Pods with the given ids are new to this source
|
||||
ADD
|
||||
// Pods with the given ids are gracefully deleted from this source
|
||||
DELETE
|
||||
// Pods with the given ids have been removed from this source
|
||||
REMOVE
|
||||
// Pods with the given ids have been updated in this source
|
||||
UPDATE
|
||||
// Pods with the given ids have unexpected status in this source,
|
||||
// kubelet should reconcile status with this source
|
||||
RECONCILE
|
||||
// Pods with the given ids have been restored from a checkpoint.
|
||||
RESTORE
|
||||
|
||||
// These constants identify the sources of pods
|
||||
// Updates from a file
|
||||
FileSource = "file"
|
||||
// Updates from querying a web page
|
||||
HTTPSource = "http"
|
||||
// Updates from Kubernetes API Server
|
||||
ApiserverSource = "api"
|
||||
// Updates from all sources
|
||||
AllSource = "*"
|
||||
|
||||
NamespaceDefault = metav1.NamespaceDefault
|
||||
)
|
||||
|
||||
// PodUpdate defines an operation sent on the channel. You can add or remove single services by
|
||||
// sending an array of size one and Op == ADD|REMOVE (with REMOVE, only the ID is required).
|
||||
// For setting the state of the system to a given state for this source configuration, set
|
||||
// Pods as desired and Op to SET, which will reset the system state to that specified in this
|
||||
// operation for this source channel. To remove all pods, set Pods to empty object and Op to SET.
|
||||
//
|
||||
// Additionally, Pods should never be nil - it should always point to an empty slice. While
|
||||
// functionally similar, this helps our unit tests properly check that the correct PodUpdates
|
||||
// are generated.
|
||||
type PodUpdate struct {
|
||||
Pods []*v1.Pod
|
||||
Op PodOperation
|
||||
Source string
|
||||
}
|
||||
|
||||
// Gets all validated sources from the specified sources.
|
||||
func GetValidatedSources(sources []string) ([]string, error) {
|
||||
validated := make([]string, 0, len(sources))
|
||||
for _, source := range sources {
|
||||
switch source {
|
||||
case AllSource:
|
||||
return []string{FileSource, HTTPSource, ApiserverSource}, nil
|
||||
case FileSource, HTTPSource, ApiserverSource:
|
||||
validated = append(validated, source)
|
||||
break
|
||||
case "":
|
||||
break
|
||||
default:
|
||||
return []string{}, fmt.Errorf("unknown pod source %q", source)
|
||||
}
|
||||
}
|
||||
return validated, nil
|
||||
}
|
||||
|
||||
// GetPodSource returns the source of the pod based on the annotation.
|
||||
func GetPodSource(pod *v1.Pod) (string, error) {
|
||||
if pod.Annotations != nil {
|
||||
if source, ok := pod.Annotations[ConfigSourceAnnotationKey]; ok {
|
||||
return source, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("cannot get source of pod %q", pod.UID)
|
||||
}
|
||||
|
||||
// SyncPodType classifies pod updates, eg: create, update.
|
||||
type SyncPodType int
|
||||
|
||||
const (
|
||||
// SyncPodSync is when the pod is synced to ensure desired state
|
||||
SyncPodSync SyncPodType = iota
|
||||
// SyncPodUpdate is when the pod is updated from source
|
||||
SyncPodUpdate
|
||||
// SyncPodCreate is when the pod is created from source
|
||||
SyncPodCreate
|
||||
// SyncPodKill is when the pod is killed based on a trigger internal to the kubelet for eviction.
|
||||
// If a SyncPodKill request is made to pod workers, the request is never dropped, and will always be processed.
|
||||
SyncPodKill
|
||||
)
|
||||
|
||||
func (sp SyncPodType) String() string {
|
||||
switch sp {
|
||||
case SyncPodCreate:
|
||||
return "create"
|
||||
case SyncPodUpdate:
|
||||
return "update"
|
||||
case SyncPodSync:
|
||||
return "sync"
|
||||
case SyncPodKill:
|
||||
return "kill"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// IsCriticalPod returns true if the pod bears the critical pod annotation key or if pod's priority is greater than
|
||||
// or equal to SystemCriticalPriority. Both the default scheduler and the kubelet use this function
|
||||
// to make admission and scheduling decisions.
|
||||
func IsCriticalPod(pod *v1.Pod) bool {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
|
||||
if pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(*pod.Spec.Priority) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) {
|
||||
if IsCritical(pod.Namespace, pod.Annotations) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Preemptable returns true if preemptor pod can preempt preemptee pod
|
||||
// if preemptee is not critical or if preemptor's priority is greater than preemptee's priority
|
||||
func Preemptable(preemptor, preemptee *v1.Pod) bool {
|
||||
if IsCriticalPod(preemptor) && !IsCriticalPod(preemptee) {
|
||||
return true
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodPriority) {
|
||||
if (preemptor != nil && preemptor.Spec.Priority != nil) &&
|
||||
(preemptee != nil && preemptee.Spec.Priority != nil) {
|
||||
return *(preemptor.Spec.Priority) > *(preemptee.Spec.Priority)
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCritical returns true if parameters bear the critical pod annotation
|
||||
// key. The DaemonSetController use this key directly to make scheduling decisions.
|
||||
// TODO: @ravig - Deprecated. Remove this when we move to resolving critical pods based on priorityClassName.
|
||||
func IsCritical(ns string, annotations map[string]string) bool {
|
||||
// Critical pods are restricted to "kube-system" namespace as of now.
|
||||
if ns != kubeapi.NamespaceSystem {
|
||||
return false
|
||||
}
|
||||
val, ok := annotations[CriticalPodAnnotationKey]
|
||||
if ok && val == "" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec.
|
||||
func IsCriticalPodBasedOnPriority(priority int32) bool {
|
||||
if priority >= scheduling.SystemCriticalPriority {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
100
vendor/k8s.io/kubernetes/pkg/kubelet/types/types.go
generated
vendored
100
vendor/k8s.io/kubernetes/pkg/kubelet/types/types.go
generated
vendored
|
|
@ -1,100 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// TODO: Reconcile custom types in kubelet/types and this subpackage
|
||||
|
||||
type HttpGetter interface {
|
||||
Get(url string) (*http.Response, error)
|
||||
}
|
||||
|
||||
// Timestamp wraps around time.Time and offers utilities to format and parse
|
||||
// the time using RFC3339Nano
|
||||
type Timestamp struct {
|
||||
time time.Time
|
||||
}
|
||||
|
||||
// NewTimestamp returns a Timestamp object using the current time.
|
||||
func NewTimestamp() *Timestamp {
|
||||
return &Timestamp{time.Now()}
|
||||
}
|
||||
|
||||
// ConvertToTimestamp takes a string, parses it using the RFC3339Nano layout,
|
||||
// and converts it to a Timestamp object.
|
||||
func ConvertToTimestamp(timeString string) *Timestamp {
|
||||
parsed, _ := time.Parse(time.RFC3339Nano, timeString)
|
||||
return &Timestamp{parsed}
|
||||
}
|
||||
|
||||
// Get returns the time as time.Time.
|
||||
func (t *Timestamp) Get() time.Time {
|
||||
return t.time
|
||||
}
|
||||
|
||||
// GetString returns the time in the string format using the RFC3339Nano
|
||||
// layout.
|
||||
func (t *Timestamp) GetString() string {
|
||||
return t.time.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
// A type to help sort container statuses based on container names.
|
||||
type SortedContainerStatuses []v1.ContainerStatus
|
||||
|
||||
func (s SortedContainerStatuses) Len() int { return len(s) }
|
||||
func (s SortedContainerStatuses) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
func (s SortedContainerStatuses) Less(i, j int) bool {
|
||||
return s[i].Name < s[j].Name
|
||||
}
|
||||
|
||||
// SortInitContainerStatuses ensures that statuses are in the order that their
|
||||
// init container appears in the pod spec
|
||||
func SortInitContainerStatuses(p *v1.Pod, statuses []v1.ContainerStatus) {
|
||||
containers := p.Spec.InitContainers
|
||||
current := 0
|
||||
for _, container := range containers {
|
||||
for j := current; j < len(statuses); j++ {
|
||||
if container.Name == statuses[j].Name {
|
||||
statuses[current], statuses[j] = statuses[j], statuses[current]
|
||||
current++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reservation represents reserved resources for non-pod components.
|
||||
type Reservation struct {
|
||||
// System represents resources reserved for non-kubernetes components.
|
||||
System v1.ResourceList
|
||||
// Kubernetes represents resources reserved for kubernetes system components.
|
||||
Kubernetes v1.ResourceList
|
||||
}
|
||||
|
||||
// A pod UID which has been translated/resolved to the representation known to kubelets.
|
||||
type ResolvedPodUID types.UID
|
||||
|
||||
// A pod UID for a mirror pod.
|
||||
type MirrorPodUID types.UID
|
||||
33
vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD
generated
vendored
33
vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD
generated
vendored
|
|
@ -8,11 +8,27 @@ load(
|
|||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["util_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
srcs = [
|
||||
"util_unix_test.go",
|
||||
"util_windows_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
go_library(
|
||||
|
|
@ -29,16 +45,19 @@ go_library(
|
|||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"//vendor/github.com/Microsoft/go-winio:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
|
|
|
|||
20
vendor/k8s.io/kubernetes/pkg/kubelet/util/util.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/kubelet/util/util.go
generated
vendored
|
|
@ -17,9 +17,6 @@ limitations under the License.
|
|||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
|
@ -28,20 +25,3 @@ import (
|
|||
func FromApiserverCache(opts *metav1.GetOptions) {
|
||||
opts.ResourceVersion = "0"
|
||||
}
|
||||
|
||||
func parseEndpoint(endpoint string) (string, string, error) {
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if u.Scheme == "tcp" {
|
||||
return "tcp", u.Host, nil
|
||||
} else if u.Scheme == "unix" {
|
||||
return "unix", u.Path, nil
|
||||
} else if u.Scheme == "" {
|
||||
return "", "", fmt.Errorf("Using %q as endpoint is deprecated, please consider using full url format", endpoint)
|
||||
} else {
|
||||
return u.Scheme, "", fmt.Errorf("protocol %q not supported", u.Scheme)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
36
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go
generated
vendored
36
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go
generated
vendored
|
|
@ -21,11 +21,13 @@ package util
|
|||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -72,8 +74,38 @@ func parseEndpointWithFallbackProtocol(endpoint string, fallbackProtocol string)
|
|||
fallbackEndpoint := fallbackProtocol + "://" + endpoint
|
||||
protocol, addr, err = parseEndpoint(fallbackEndpoint)
|
||||
if err == nil {
|
||||
glog.Warningf("Using %q as endpoint is deprecated, please consider using full url format %q.", endpoint, fallbackEndpoint)
|
||||
klog.Warningf("Using %q as endpoint is deprecated, please consider using full url format %q.", endpoint, fallbackEndpoint)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func parseEndpoint(endpoint string) (string, string, error) {
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "tcp":
|
||||
return "tcp", u.Host, nil
|
||||
|
||||
case "unix":
|
||||
return "unix", u.Path, nil
|
||||
|
||||
case "":
|
||||
return "", "", fmt.Errorf("Using %q as endpoint is deprecated, please consider using full url format", endpoint)
|
||||
|
||||
default:
|
||||
return u.Scheme, "", fmt.Errorf("protocol %q not supported", u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
// LocalEndpoint returns the full path to a unix socket at the given endpoint
|
||||
func LocalEndpoint(path, file string) string {
|
||||
u := url.URL{
|
||||
Scheme: unixProtocol,
|
||||
Path: path,
|
||||
}
|
||||
return filepath.Join(u.String(), file+".sock")
|
||||
}
|
||||
|
|
|
|||
5
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go
generated
vendored
|
|
@ -40,3 +40,8 @@ func LockAndCheckSubPath(volumePath, subPath string) ([]uintptr, error) {
|
|||
// UnlockPath empty implementation
|
||||
func UnlockPath(fileHandles []uintptr) {
|
||||
}
|
||||
|
||||
// LocalEndpoint empty implementation
|
||||
func LocalEndpoint(path, file string) string {
|
||||
return ""
|
||||
}
|
||||
|
|
|
|||
75
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go
generated
vendored
75
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go
generated
vendored
|
|
@ -21,11 +21,16 @@ package util
|
|||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
)
|
||||
|
||||
const (
|
||||
tcpProtocol = "tcp"
|
||||
tcpProtocol = "tcp"
|
||||
npipeProtocol = "npipe"
|
||||
)
|
||||
|
||||
func CreateListener(endpoint string) (net.Listener, error) {
|
||||
|
|
@ -33,11 +38,17 @@ func CreateListener(endpoint string) (net.Listener, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if protocol != tcpProtocol {
|
||||
return nil, fmt.Errorf("only support tcp endpoint")
|
||||
}
|
||||
|
||||
return net.Listen(protocol, addr)
|
||||
switch protocol {
|
||||
case tcpProtocol:
|
||||
return net.Listen(tcpProtocol, addr)
|
||||
|
||||
case npipeProtocol:
|
||||
return winio.ListenPipe(addr, nil)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("only support tcp and npipe endpoint")
|
||||
}
|
||||
}
|
||||
|
||||
func GetAddressAndDialer(endpoint string) (string, func(addr string, timeout time.Duration) (net.Conn, error), error) {
|
||||
|
|
@ -45,13 +56,59 @@ func GetAddressAndDialer(endpoint string) (string, func(addr string, timeout tim
|
|||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if protocol != tcpProtocol {
|
||||
return "", nil, fmt.Errorf("only support tcp endpoint")
|
||||
|
||||
if protocol == tcpProtocol {
|
||||
return addr, tcpDial, nil
|
||||
}
|
||||
|
||||
return addr, dial, nil
|
||||
if protocol == npipeProtocol {
|
||||
return addr, npipeDial, nil
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("only support tcp and npipe endpoint")
|
||||
}
|
||||
|
||||
func dial(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
func tcpDial(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return net.DialTimeout(tcpProtocol, addr, timeout)
|
||||
}
|
||||
|
||||
func npipeDial(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return winio.DialPipe(addr, &timeout)
|
||||
}
|
||||
|
||||
func parseEndpoint(endpoint string) (string, string, error) {
|
||||
// url.Parse doesn't recognize \, so replace with / first.
|
||||
endpoint = strings.Replace(endpoint, "\\", "/", -1)
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if u.Scheme == "tcp" {
|
||||
return "tcp", u.Host, nil
|
||||
} else if u.Scheme == "npipe" {
|
||||
if strings.HasPrefix(u.Path, "//./pipe") {
|
||||
return "npipe", u.Path, nil
|
||||
}
|
||||
|
||||
// fallback host if not provided.
|
||||
host := u.Host
|
||||
if host == "" {
|
||||
host = "."
|
||||
}
|
||||
return "npipe", fmt.Sprintf("//%s%s", host, u.Path), nil
|
||||
} else if u.Scheme == "" {
|
||||
return "", "", fmt.Errorf("Using %q as endpoint is deprecated, please consider using full url format", endpoint)
|
||||
} else {
|
||||
return u.Scheme, "", fmt.Errorf("protocol %q not supported", u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
// LocalEndpoint returns the full path to a windows named pipe
|
||||
func LocalEndpoint(path, file string) string {
|
||||
u := url.URL{
|
||||
Scheme: npipeProtocol,
|
||||
Path: path,
|
||||
}
|
||||
return u.String() + "//./pipe/" + file
|
||||
}
|
||||
|
|
|
|||
20
vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go
generated
vendored
20
vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go
generated
vendored
|
|
@ -21,7 +21,7 @@ import (
|
|||
"net"
|
||||
"runtime"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
|
|
@ -29,8 +29,8 @@ import (
|
|||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/configmap"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
|
|
@ -162,7 +162,7 @@ func (kvh *kubeletVolumeHost) GetCloudProvider() cloudprovider.Interface {
|
|||
func (kvh *kubeletVolumeHost) GetMounter(pluginName string) mount.Interface {
|
||||
exec, err := kvh.getMountExec(pluginName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error())
|
||||
klog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error())
|
||||
// Use the default mounter
|
||||
exec = nil
|
||||
}
|
||||
|
|
@ -200,6 +200,10 @@ func (kvh *kubeletVolumeHost) GetServiceAccountTokenFunc() func(namespace, name
|
|||
return kvh.tokenManager.GetServiceAccountToken
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) DeleteServiceAccountTokenFunc() func(podUID types.UID) {
|
||||
return kvh.tokenManager.DeleteServiceAccountToken
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetNodeLabels() (map[string]string, error) {
|
||||
node, err := kvh.kubelet.GetNode()
|
||||
if err != nil {
|
||||
|
|
@ -219,7 +223,7 @@ func (kvh *kubeletVolumeHost) GetEventRecorder() record.EventRecorder {
|
|||
func (kvh *kubeletVolumeHost) GetExec(pluginName string) mount.Exec {
|
||||
exec, err := kvh.getMountExec(pluginName)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error())
|
||||
klog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error())
|
||||
// Use the default exec
|
||||
exec = nil
|
||||
}
|
||||
|
|
@ -234,7 +238,7 @@ func (kvh *kubeletVolumeHost) GetExec(pluginName string) mount.Exec {
|
|||
// os.Exec should be used.
|
||||
func (kvh *kubeletVolumeHost) getMountExec(pluginName string) (mount.Exec, error) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.MountContainers) {
|
||||
glog.V(5).Infof("using default mounter/exec for %s", pluginName)
|
||||
klog.V(5).Infof("using default mounter/exec for %s", pluginName)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
|
@ -244,10 +248,10 @@ func (kvh *kubeletVolumeHost) getMountExec(pluginName string) (mount.Exec, error
|
|||
}
|
||||
if pod == nil {
|
||||
// Use default mounter/exec for this plugin
|
||||
glog.V(5).Infof("using default mounter/exec for %s", pluginName)
|
||||
klog.V(5).Infof("using default mounter/exec for %s", pluginName)
|
||||
return nil, nil
|
||||
}
|
||||
glog.V(5).Infof("using container %s/%s/%s to execute mount utilities for %s", pod.Namespace, pod.Name, container, pluginName)
|
||||
klog.V(5).Infof("using container %s/%s/%s to execute mount utilities for %s", pod.Namespace, pod.Name, container, pluginName)
|
||||
return &containerExec{
|
||||
pod: pod,
|
||||
containerName: container,
|
||||
|
|
@ -267,6 +271,6 @@ var _ mount.Exec = &containerExec{}
|
|||
|
||||
func (e *containerExec) Run(cmd string, args ...string) ([]byte, error) {
|
||||
cmdline := append([]string{cmd}, args...)
|
||||
glog.V(5).Infof("Exec mounter running in pod %s/%s/%s: %v", e.pod.Namespace, e.pod.Name, e.containerName, cmdline)
|
||||
klog.V(5).Infof("Exec mounter running in pod %s/%s/%s: %v", e.pod.Namespace, e.pod.Name, e.containerName, cmdline)
|
||||
return e.kl.RunInContainer(container.GetPodFullName(e.pod), e.pod.UID, e.containerName, cmdline)
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue