Switch to go modules
This commit is contained in:
parent
461954facb
commit
1720059244
763 changed files with 24896 additions and 177398 deletions
317
vendor/k8s.io/kubernetes/pkg/kubelet/BUILD
generated
vendored
317
vendor/k8s.io/kubernetes/pkg/kubelet/BUILD
generated
vendored
|
|
@ -1,317 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"active_deadline.go",
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"kubelet.go",
|
||||
"kubelet_getters.go",
|
||||
"kubelet_network.go",
|
||||
"kubelet_network_linux.go",
|
||||
"kubelet_network_others.go",
|
||||
"kubelet_node_status.go",
|
||||
"kubelet_pods.go",
|
||||
"kubelet_resources.go",
|
||||
"kubelet_volumes.go",
|
||||
"oom_watcher.go",
|
||||
"pod_container_deletor.go",
|
||||
"pod_workers.go",
|
||||
"reason_cache.go",
|
||||
"runonce.go",
|
||||
"runtime.go",
|
||||
"util.go",
|
||||
"volume_host.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/api/v1/resource:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/pods:go_default_library",
|
||||
"//pkg/apis/core/v1:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/apis/core/v1/helper/qos:go_default_library",
|
||||
"//pkg/capabilities:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/fieldpath:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/apis/config:go_default_library",
|
||||
"//pkg/kubelet/apis/cri:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//pkg/kubelet/apis/pluginregistration/v1:go_default_library",
|
||||
"//pkg/kubelet/apis/podresources:go_default_library",
|
||||
"//pkg/kubelet/cadvisor:go_default_library",
|
||||
"//pkg/kubelet/certificate:go_default_library",
|
||||
"//pkg/kubelet/checkpointmanager:go_default_library",
|
||||
"//pkg/kubelet/cloudresource:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/config:go_default_library",
|
||||
"//pkg/kubelet/configmap:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/dockershim:go_default_library",
|
||||
"//pkg/kubelet/dockershim/remote:go_default_library",
|
||||
"//pkg/kubelet/envvars:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/kubelet/eviction:go_default_library",
|
||||
"//pkg/kubelet/images:go_default_library",
|
||||
"//pkg/kubelet/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/kuberuntime:go_default_library",
|
||||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/logs:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/metrics/collectors:go_default_library",
|
||||
"//pkg/kubelet/mountpod:go_default_library",
|
||||
"//pkg/kubelet/network/dns:go_default_library",
|
||||
"//pkg/kubelet/nodelease:go_default_library",
|
||||
"//pkg/kubelet/nodestatus:go_default_library",
|
||||
"//pkg/kubelet/pleg:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/preemption:go_default_library",
|
||||
"//pkg/kubelet/prober:go_default_library",
|
||||
"//pkg/kubelet/prober/results:go_default_library",
|
||||
"//pkg/kubelet/remote:go_default_library",
|
||||
"//pkg/kubelet/runtimeclass:go_default_library",
|
||||
"//pkg/kubelet/secret:go_default_library",
|
||||
"//pkg/kubelet/server:go_default_library",
|
||||
"//pkg/kubelet/server/portforward:go_default_library",
|
||||
"//pkg/kubelet/server/remotecommand:go_default_library",
|
||||
"//pkg/kubelet/server/stats:go_default_library",
|
||||
"//pkg/kubelet/server/streaming:go_default_library",
|
||||
"//pkg/kubelet/stats:go_default_library",
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
"//pkg/kubelet/sysctl:go_default_library",
|
||||
"//pkg/kubelet/token:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/kubelet/util/manager:go_default_library",
|
||||
"//pkg/kubelet/util/pluginwatcher:go_default_library",
|
||||
"//pkg/kubelet/util/queue:go_default_library",
|
||||
"//pkg/kubelet/util/sliceutils:go_default_library",
|
||||
"//pkg/kubelet/volumemanager:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/sysctl:go_default_library",
|
||||
"//pkg/securitycontext:go_default_library",
|
||||
"//pkg/util/dbus:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/oom:go_default_library",
|
||||
"//pkg/util/removeall:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/csi:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/subpath:go_default_library",
|
||||
"//pkg/volume/util/types:go_default_library",
|
||||
"//pkg/volume/util/volumepathhandler:go_default_library",
|
||||
"//pkg/volume/validation:go_default_library",
|
||||
"//staging/src/k8s.io/api/authentication/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/certificate:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//third_party/forked/golang/expansion:go_default_library",
|
||||
"//vendor/github.com/golang/groupcache/lru:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/events:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v2:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
"//vendor/k8s.io/utils/integer:go_default_library",
|
||||
"//vendor/k8s.io/utils/path:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"active_deadline_test.go",
|
||||
"kubelet_getters_test.go",
|
||||
"kubelet_network_test.go",
|
||||
"kubelet_node_status_test.go",
|
||||
"kubelet_pods_linux_test.go",
|
||||
"kubelet_pods_test.go",
|
||||
"kubelet_pods_windows_test.go",
|
||||
"kubelet_resources_test.go",
|
||||
"kubelet_test.go",
|
||||
"kubelet_volumes_linux_test.go",
|
||||
"kubelet_volumes_test.go",
|
||||
"oom_watcher_test.go",
|
||||
"pod_container_deletor_test.go",
|
||||
"pod_workers_test.go",
|
||||
"reason_cache_test.go",
|
||||
"runonce_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/core/install:go_default_library",
|
||||
"//pkg/capabilities:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/cadvisor/testing:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/config:go_default_library",
|
||||
"//pkg/kubelet/configmap:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//pkg/kubelet/eviction:go_default_library",
|
||||
"//pkg/kubelet/images:go_default_library",
|
||||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/logs:go_default_library",
|
||||
"//pkg/kubelet/network/dns:go_default_library",
|
||||
"//pkg/kubelet/nodestatus:go_default_library",
|
||||
"//pkg/kubelet/pleg:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/pod/testing:go_default_library",
|
||||
"//pkg/kubelet/prober/results:go_default_library",
|
||||
"//pkg/kubelet/prober/testing:go_default_library",
|
||||
"//pkg/kubelet/secret:go_default_library",
|
||||
"//pkg/kubelet/server/portforward:go_default_library",
|
||||
"//pkg/kubelet/server/remotecommand:go_default_library",
|
||||
"//pkg/kubelet/server/stats:go_default_library",
|
||||
"//pkg/kubelet/stats:go_default_library",
|
||||
"//pkg/kubelet/status:go_default_library",
|
||||
"//pkg/kubelet/status/testing:go_default_library",
|
||||
"//pkg/kubelet/token:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/kubelet/util/queue:go_default_library",
|
||||
"//pkg/kubelet/util/sliceutils:go_default_library",
|
||||
"//pkg/kubelet/volumemanager:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/awsebs:go_default_library",
|
||||
"//pkg/volume/azure_dd:go_default_library",
|
||||
"//pkg/volume/gcepd:go_default_library",
|
||||
"//pkg/volume/host_path:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/subpath:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/testing:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
|
||||
"//vendor/github.com/google/cadvisor/info/v2:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubelet/apis:all-srcs",
|
||||
"//pkg/kubelet/cadvisor:all-srcs",
|
||||
"//pkg/kubelet/certificate:all-srcs",
|
||||
"//pkg/kubelet/checkpoint:all-srcs",
|
||||
"//pkg/kubelet/checkpointmanager:all-srcs",
|
||||
"//pkg/kubelet/client:all-srcs",
|
||||
"//pkg/kubelet/cloudresource:all-srcs",
|
||||
"//pkg/kubelet/cm:all-srcs",
|
||||
"//pkg/kubelet/config:all-srcs",
|
||||
"//pkg/kubelet/configmap:all-srcs",
|
||||
"//pkg/kubelet/container:all-srcs",
|
||||
"//pkg/kubelet/custommetrics:all-srcs",
|
||||
"//pkg/kubelet/dockershim:all-srcs",
|
||||
"//pkg/kubelet/envvars:all-srcs",
|
||||
"//pkg/kubelet/events:all-srcs",
|
||||
"//pkg/kubelet/eviction:all-srcs",
|
||||
"//pkg/kubelet/images:all-srcs",
|
||||
"//pkg/kubelet/kubeletconfig:all-srcs",
|
||||
"//pkg/kubelet/kuberuntime:all-srcs",
|
||||
"//pkg/kubelet/leaky:all-srcs",
|
||||
"//pkg/kubelet/lifecycle:all-srcs",
|
||||
"//pkg/kubelet/logs:all-srcs",
|
||||
"//pkg/kubelet/metrics:all-srcs",
|
||||
"//pkg/kubelet/mountpod:all-srcs",
|
||||
"//pkg/kubelet/network:all-srcs",
|
||||
"//pkg/kubelet/nodelease:all-srcs",
|
||||
"//pkg/kubelet/nodestatus:all-srcs",
|
||||
"//pkg/kubelet/pleg:all-srcs",
|
||||
"//pkg/kubelet/pod:all-srcs",
|
||||
"//pkg/kubelet/preemption:all-srcs",
|
||||
"//pkg/kubelet/prober:all-srcs",
|
||||
"//pkg/kubelet/qos:all-srcs",
|
||||
"//pkg/kubelet/remote:all-srcs",
|
||||
"//pkg/kubelet/runtimeclass:all-srcs",
|
||||
"//pkg/kubelet/secret:all-srcs",
|
||||
"//pkg/kubelet/server:all-srcs",
|
||||
"//pkg/kubelet/stats:all-srcs",
|
||||
"//pkg/kubelet/status:all-srcs",
|
||||
"//pkg/kubelet/sysctl:all-srcs",
|
||||
"//pkg/kubelet/token:all-srcs",
|
||||
"//pkg/kubelet/types:all-srcs",
|
||||
"//pkg/kubelet/util:all-srcs",
|
||||
"//pkg/kubelet/volumemanager:all-srcs",
|
||||
"//pkg/kubelet/winstats:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
14
vendor/k8s.io/kubernetes/pkg/kubelet/OWNERS
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/kubelet/OWNERS
generated
vendored
|
|
@ -1,14 +0,0 @@
|
|||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- Random-Liu
|
||||
- dchen1107
|
||||
- derekwaynecarr
|
||||
- tallclair
|
||||
- vishh
|
||||
- yujuhong
|
||||
reviewers:
|
||||
- sig-node-reviewers
|
||||
labels:
|
||||
- area/kubelet
|
||||
- sig/node
|
||||
98
vendor/k8s.io/kubernetes/pkg/kubelet/active_deadline.go
generated
vendored
98
vendor/k8s.io/kubernetes/pkg/kubelet/active_deadline.go
generated
vendored
|
|
@ -1,98 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
)
|
||||
|
||||
const (
|
||||
reason = "DeadlineExceeded"
|
||||
message = "Pod was active on the node longer than the specified deadline"
|
||||
)
|
||||
|
||||
// activeDeadlineHandler knows how to enforce active deadlines on pods.
|
||||
type activeDeadlineHandler struct {
|
||||
// the clock to use for deadline enforcement
|
||||
clock clock.Clock
|
||||
// the provider of pod status
|
||||
podStatusProvider status.PodStatusProvider
|
||||
// the recorder to dispatch events when we identify a pod has exceeded active deadline
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// newActiveDeadlineHandler returns an active deadline handler that can enforce pod active deadline
|
||||
func newActiveDeadlineHandler(
|
||||
podStatusProvider status.PodStatusProvider,
|
||||
recorder record.EventRecorder,
|
||||
clock clock.Clock,
|
||||
) (*activeDeadlineHandler, error) {
|
||||
|
||||
// check for all required fields
|
||||
if clock == nil || podStatusProvider == nil || recorder == nil {
|
||||
return nil, fmt.Errorf("Required arguments must not be nil: %v, %v, %v", clock, podStatusProvider, recorder)
|
||||
}
|
||||
return &activeDeadlineHandler{
|
||||
clock: clock,
|
||||
podStatusProvider: podStatusProvider,
|
||||
recorder: recorder,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ShouldSync returns true if the pod is past its active deadline.
|
||||
func (m *activeDeadlineHandler) ShouldSync(pod *v1.Pod) bool {
|
||||
return m.pastActiveDeadline(pod)
|
||||
}
|
||||
|
||||
// ShouldEvict returns true if the pod is past its active deadline.
|
||||
// It dispatches an event that the pod should be evicted if it is past its deadline.
|
||||
func (m *activeDeadlineHandler) ShouldEvict(pod *v1.Pod) lifecycle.ShouldEvictResponse {
|
||||
if !m.pastActiveDeadline(pod) {
|
||||
return lifecycle.ShouldEvictResponse{Evict: false}
|
||||
}
|
||||
m.recorder.Eventf(pod, v1.EventTypeNormal, reason, message)
|
||||
return lifecycle.ShouldEvictResponse{Evict: true, Reason: reason, Message: message}
|
||||
}
|
||||
|
||||
// pastActiveDeadline returns true if the pod has been active for more than its ActiveDeadlineSeconds
|
||||
func (m *activeDeadlineHandler) pastActiveDeadline(pod *v1.Pod) bool {
|
||||
// no active deadline was specified
|
||||
if pod.Spec.ActiveDeadlineSeconds == nil {
|
||||
return false
|
||||
}
|
||||
// get the latest status to determine if it was started
|
||||
podStatus, ok := m.podStatusProvider.GetPodStatus(pod.UID)
|
||||
if !ok {
|
||||
podStatus = pod.Status
|
||||
}
|
||||
// we have no start time so just return
|
||||
if podStatus.StartTime.IsZero() {
|
||||
return false
|
||||
}
|
||||
// determine if the deadline was exceeded
|
||||
start := podStatus.StartTime.Time
|
||||
duration := m.clock.Since(start)
|
||||
allowedDuration := time.Duration(*pod.Spec.ActiveDeadlineSeconds) * time.Second
|
||||
return duration >= allowedDuration
|
||||
}
|
||||
51
vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD
generated
vendored
51
vendor/k8s.io/kubernetes/pkg/kubelet/apis/BUILD
generated
vendored
|
|
@ -1,51 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"well_known_annotations.go",
|
||||
"well_known_annotations_windows.go",
|
||||
"well_known_labels.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/apis",
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"//pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubelet/apis/config:all-srcs",
|
||||
"//pkg/kubelet/apis/cri:all-srcs",
|
||||
"//pkg/kubelet/apis/deviceplugin/v1alpha:all-srcs",
|
||||
"//pkg/kubelet/apis/deviceplugin/v1beta1:all-srcs",
|
||||
"//pkg/kubelet/apis/pluginregistration/v1:all-srcs",
|
||||
"//pkg/kubelet/apis/pluginregistration/v1alpha1:all-srcs",
|
||||
"//pkg/kubelet/apis/pluginregistration/v1beta1:all-srcs",
|
||||
"//pkg/kubelet/apis/podresources:all-srcs",
|
||||
"//pkg/kubelet/apis/resourcemetrics/v1alpha1:all-srcs",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
30
vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/BUILD
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/BUILD
generated
vendored
|
|
@ -1,30 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["services.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/apis/cri",
|
||||
deps = ["//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:all-srcs",
|
||||
"//pkg/kubelet/apis/cri/testing:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
119
vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/services.go
generated
vendored
119
vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/services.go
generated
vendored
|
|
@ -1,119 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cri
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
// RuntimeVersioner contains methods for runtime name, version and API version.
|
||||
type RuntimeVersioner interface {
|
||||
// Version returns the runtime name, runtime version and runtime API version
|
||||
Version(apiVersion string) (*runtimeapi.VersionResponse, error)
|
||||
}
|
||||
|
||||
// ContainerManager contains methods to manipulate containers managed by a
|
||||
// container runtime. The methods are thread-safe.
|
||||
type ContainerManager interface {
|
||||
// CreateContainer creates a new container in specified PodSandbox.
|
||||
CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error)
|
||||
// StartContainer starts the container.
|
||||
StartContainer(containerID string) error
|
||||
// StopContainer stops a running container with a grace period (i.e., timeout).
|
||||
StopContainer(containerID string, timeout int64) error
|
||||
// RemoveContainer removes the container.
|
||||
RemoveContainer(containerID string) error
|
||||
// ListContainers lists all containers by filters.
|
||||
ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error)
|
||||
// ContainerStatus returns the status of the container.
|
||||
ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error)
|
||||
// UpdateContainerResources updates the cgroup resources for the container.
|
||||
UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources) error
|
||||
// ExecSync executes a command in the container, and returns the stdout output.
|
||||
// If command exits with a non-zero exit code, an error is returned.
|
||||
ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error)
|
||||
// Exec prepares a streaming endpoint to execute a command in the container, and returns the address.
|
||||
Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error)
|
||||
// Attach prepares a streaming endpoint to attach to a running container, and returns the address.
|
||||
Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error)
|
||||
// ReopenContainerLog asks runtime to reopen the stdout/stderr log file
|
||||
// for the container. If it returns error, new container log file MUST NOT
|
||||
// be created.
|
||||
ReopenContainerLog(ContainerID string) error
|
||||
}
|
||||
|
||||
// PodSandboxManager contains methods for operating on PodSandboxes. The methods
|
||||
// are thread-safe.
|
||||
type PodSandboxManager interface {
|
||||
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure
|
||||
// the sandbox is in ready state.
|
||||
RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error)
|
||||
// StopPodSandbox stops the sandbox. If there are any running containers in the
|
||||
// sandbox, they should be force terminated.
|
||||
StopPodSandbox(podSandboxID string) error
|
||||
// RemovePodSandbox removes the sandbox. If there are running containers in the
|
||||
// sandbox, they should be forcibly removed.
|
||||
RemovePodSandbox(podSandboxID string) error
|
||||
// PodSandboxStatus returns the Status of the PodSandbox.
|
||||
PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error)
|
||||
// ListPodSandbox returns a list of Sandbox.
|
||||
ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error)
|
||||
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address.
|
||||
PortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error)
|
||||
}
|
||||
|
||||
// ContainerStatsManager contains methods for retrieving the container
|
||||
// statistics.
|
||||
type ContainerStatsManager interface {
|
||||
// ContainerStats returns stats of the container. If the container does not
|
||||
// exist, the call returns an error.
|
||||
ContainerStats(containerID string) (*runtimeapi.ContainerStats, error)
|
||||
// ListContainerStats returns stats of all running containers.
|
||||
ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error)
|
||||
}
|
||||
|
||||
// RuntimeService interface should be implemented by a container runtime.
|
||||
// The methods should be thread-safe.
|
||||
type RuntimeService interface {
|
||||
RuntimeVersioner
|
||||
ContainerManager
|
||||
PodSandboxManager
|
||||
ContainerStatsManager
|
||||
|
||||
// UpdateRuntimeConfig updates runtime configuration if specified
|
||||
UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error
|
||||
// Status returns the status of the runtime.
|
||||
Status() (*runtimeapi.RuntimeStatus, error)
|
||||
}
|
||||
|
||||
// ImageManagerService interface should be implemented by a container image
|
||||
// manager.
|
||||
// The methods should be thread-safe.
|
||||
type ImageManagerService interface {
|
||||
// ListImages lists the existing images.
|
||||
ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error)
|
||||
// ImageStatus returns the status of the image.
|
||||
ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error)
|
||||
// PullImage pulls an image with the authentication config.
|
||||
PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error)
|
||||
// RemoveImage removes the image.
|
||||
RemoveImage(image *runtimeapi.ImageSpec) error
|
||||
// ImageFsInfo returns information of the filesystem that is used to store images.
|
||||
ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error)
|
||||
}
|
||||
25
vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_annotations.go
generated
vendored
25
vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_annotations.go
generated
vendored
|
|
@ -1,25 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apis
|
||||
|
||||
const (
|
||||
// When kubelet is started with the "external" cloud provider, then
|
||||
// it sets this annotation on the node to denote an ip address set from the
|
||||
// cmd line flag (--node-ip). This ip is verified with the cloudprovider as valid by
|
||||
// the cloud-controller-manager
|
||||
AnnotationProvidedIPAddr = "alpha.kubernetes.io/provided-node-ip"
|
||||
)
|
||||
41
vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_annotations_windows.go
generated
vendored
41
vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_annotations_windows.go
generated
vendored
|
|
@ -1,41 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apis
|
||||
|
||||
import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
const (
|
||||
// HypervIsolationAnnotationKey and HypervIsolationValue are used to run windows containers with hyperv isolation.
|
||||
// Refer https://aka.ms/hyperv-container.
|
||||
HypervIsolationAnnotationKey = "experimental.windows.kubernetes.io/isolation-type"
|
||||
HypervIsolationValue = "hyperv"
|
||||
)
|
||||
|
||||
// ShouldIsolatedByHyperV returns true if a windows container should be run with hyperv isolation.
|
||||
func ShouldIsolatedByHyperV(annotations map[string]string) bool {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.HyperVContainer) {
|
||||
return false
|
||||
}
|
||||
|
||||
v, ok := annotations[HypervIsolationAnnotationKey]
|
||||
return ok && v == HypervIsolationValue
|
||||
}
|
||||
93
vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go
generated
vendored
93
vendor/k8s.io/kubernetes/pkg/kubelet/apis/well_known_labels.go
generated
vendored
|
|
@ -1,93 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apis
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
const (
|
||||
// The OS/Arch labels are promoted to GA in 1.14. kubelet applies both beta
|
||||
// and GA labels to ensure backward compatibility.
|
||||
// TODO: stop applying the beta OS/Arch labels in Kubernetes 1.18.
|
||||
LabelOS = "beta.kubernetes.io/os"
|
||||
LabelArch = "beta.kubernetes.io/arch"
|
||||
|
||||
// GA versions of the legacy beta labels.
|
||||
// TODO: update kubelet and controllers to set both beta and GA labels, then export these constants
|
||||
labelZoneFailureDomainGA = "failure-domain.kubernetes.io/zone"
|
||||
labelZoneRegionGA = "failure-domain.kubernetes.io/region"
|
||||
labelInstanceTypeGA = "kubernetes.io/instance-type"
|
||||
)
|
||||
|
||||
var kubeletLabels = sets.NewString(
|
||||
v1.LabelHostname,
|
||||
v1.LabelZoneFailureDomain,
|
||||
v1.LabelZoneRegion,
|
||||
v1.LabelInstanceType,
|
||||
v1.LabelOSStable,
|
||||
v1.LabelArchStable,
|
||||
|
||||
LabelOS,
|
||||
LabelArch,
|
||||
|
||||
labelZoneFailureDomainGA,
|
||||
labelZoneRegionGA,
|
||||
labelInstanceTypeGA,
|
||||
)
|
||||
|
||||
var kubeletLabelNamespaces = sets.NewString(
|
||||
v1.LabelNamespaceSuffixKubelet,
|
||||
v1.LabelNamespaceSuffixNode,
|
||||
)
|
||||
|
||||
// KubeletLabels returns the list of label keys kubelets are allowed to set on their own Node objects
|
||||
func KubeletLabels() []string {
|
||||
return kubeletLabels.List()
|
||||
}
|
||||
|
||||
// KubeletLabelNamespaces returns the list of label key namespaces kubelets are allowed to set on their own Node objects
|
||||
func KubeletLabelNamespaces() []string {
|
||||
return kubeletLabelNamespaces.List()
|
||||
}
|
||||
|
||||
// IsKubeletLabel returns true if the label key is one that kubelets are allowed to set on their own Node object.
|
||||
// This checks if the key is in the KubeletLabels() list, or has a namespace in the KubeletLabelNamespaces() list.
|
||||
func IsKubeletLabel(key string) bool {
|
||||
if kubeletLabels.Has(key) {
|
||||
return true
|
||||
}
|
||||
|
||||
namespace := getLabelNamespace(key)
|
||||
for allowedNamespace := range kubeletLabelNamespaces {
|
||||
if namespace == allowedNamespace || strings.HasSuffix(namespace, "."+allowedNamespace) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getLabelNamespace(key string) string {
|
||||
if parts := strings.SplitN(key, "/", 2); len(parts) == 2 {
|
||||
return parts[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
19
vendor/k8s.io/kubernetes/pkg/kubelet/doc.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/kubelet/doc.go
generated
vendored
|
|
@ -1,19 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package kubelet is the package that contains the libraries that drive the Kubelet binary.
|
||||
// The kubelet is responsible for node level pod management. It runs on each worker in the cluster.
|
||||
package kubelet // import "k8s.io/kubernetes/pkg/kubelet"
|
||||
27
vendor/k8s.io/kubernetes/pkg/kubelet/errors.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/kubelet/errors.go
generated
vendored
|
|
@ -1,27 +0,0 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import "errors"
|
||||
|
||||
const (
|
||||
NetworkNotReadyErrorMsg = "network is not ready"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNetworkUnknown = errors.New("network state unknown")
|
||||
)
|
||||
2285
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go
generated
vendored
2285
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet.go
generated
vendored
File diff suppressed because it is too large
Load diff
346
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go
generated
vendored
346
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go
generated
vendored
|
|
@ -1,346 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"path/filepath"
|
||||
|
||||
cadvisorapiv1 "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
utilpath "k8s.io/utils/path"
|
||||
)
|
||||
|
||||
// getRootDir returns the full path to the directory under which kubelet can
|
||||
// store data. These functions are useful to pass interfaces to other modules
|
||||
// that may need to know where to write data without getting a whole kubelet
|
||||
// instance.
|
||||
func (kl *Kubelet) getRootDir() string {
|
||||
return kl.rootDirectory
|
||||
}
|
||||
|
||||
// getPodsDir returns the full path to the directory under which pod
|
||||
// directories are created.
|
||||
func (kl *Kubelet) getPodsDir() string {
|
||||
return filepath.Join(kl.getRootDir(), config.DefaultKubeletPodsDirName)
|
||||
}
|
||||
|
||||
// getPluginsDir returns the full path to the directory under which plugin
|
||||
// directories are created. Plugins can use these directories for data that
|
||||
// they need to persist. Plugins should create subdirectories under this named
|
||||
// after their own names.
|
||||
func (kl *Kubelet) getPluginsDir() string {
|
||||
return filepath.Join(kl.getRootDir(), config.DefaultKubeletPluginsDirName)
|
||||
}
|
||||
|
||||
// getPluginsRegistrationDir returns the full path to the directory under which
|
||||
// plugins socket should be placed to be registered.
|
||||
// More information is available about plugin registration in the pluginwatcher
|
||||
// module
|
||||
func (kl *Kubelet) getPluginsRegistrationDir() string {
|
||||
return filepath.Join(kl.getRootDir(), config.DefaultKubeletPluginsRegistrationDirName)
|
||||
}
|
||||
|
||||
// getPluginDir returns a data directory name for a given plugin name.
|
||||
// Plugins can use these directories to store data that they need to persist.
|
||||
// For per-pod plugin data, see getPodPluginDir.
|
||||
func (kl *Kubelet) getPluginDir(pluginName string) string {
|
||||
return filepath.Join(kl.getPluginsDir(), pluginName)
|
||||
}
|
||||
|
||||
// getVolumeDevicePluginsDir returns the full path to the directory under which plugin
|
||||
// directories are created. Plugins can use these directories for data that
|
||||
// they need to persist. Plugins should create subdirectories under this named
|
||||
// after their own names.
|
||||
func (kl *Kubelet) getVolumeDevicePluginsDir() string {
|
||||
return filepath.Join(kl.getRootDir(), config.DefaultKubeletPluginsDirName)
|
||||
}
|
||||
|
||||
// getVolumeDevicePluginDir returns a data directory name for a given plugin name.
|
||||
// Plugins can use these directories to store data that they need to persist.
|
||||
// For per-pod plugin data, see getVolumeDevicePluginsDir.
|
||||
func (kl *Kubelet) getVolumeDevicePluginDir(pluginName string) string {
|
||||
return filepath.Join(kl.getVolumeDevicePluginsDir(), pluginName, config.DefaultKubeletVolumeDevicesDirName)
|
||||
}
|
||||
|
||||
// GetPodDir returns the full path to the per-pod data directory for the
|
||||
// specified pod. This directory may not exist if the pod does not exist.
|
||||
func (kl *Kubelet) GetPodDir(podUID types.UID) string {
|
||||
return kl.getPodDir(podUID)
|
||||
}
|
||||
|
||||
// getPodDir returns the full path to the per-pod directory for the pod with
|
||||
// the given UID.
|
||||
func (kl *Kubelet) getPodDir(podUID types.UID) string {
|
||||
return filepath.Join(kl.getPodsDir(), string(podUID))
|
||||
}
|
||||
|
||||
// getPodVolumesSubpathsDir returns the full path to the per-pod subpaths directory under
|
||||
// which subpath volumes are created for the specified pod. This directory may not
|
||||
// exist if the pod does not exist or subpaths are not specified.
|
||||
func (kl *Kubelet) getPodVolumeSubpathsDir(podUID types.UID) string {
|
||||
return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletVolumeSubpathsDirName)
|
||||
}
|
||||
|
||||
// getPodVolumesDir returns the full path to the per-pod data directory under
|
||||
// which volumes are created for the specified pod. This directory may not
|
||||
// exist if the pod does not exist.
|
||||
func (kl *Kubelet) getPodVolumesDir(podUID types.UID) string {
|
||||
return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletVolumesDirName)
|
||||
}
|
||||
|
||||
// getPodVolumeDir returns the full path to the directory which represents the
|
||||
// named volume under the named plugin for specified pod. This directory may not
|
||||
// exist if the pod does not exist.
|
||||
func (kl *Kubelet) getPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
|
||||
return filepath.Join(kl.getPodVolumesDir(podUID), pluginName, volumeName)
|
||||
}
|
||||
|
||||
// getPodVolumeDevicesDir returns the full path to the per-pod data directory under
|
||||
// which volumes are created for the specified pod. This directory may not
|
||||
// exist if the pod does not exist.
|
||||
func (kl *Kubelet) getPodVolumeDevicesDir(podUID types.UID) string {
|
||||
return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletVolumeDevicesDirName)
|
||||
}
|
||||
|
||||
// getPodVolumeDeviceDir returns the full path to the directory which represents the
|
||||
// named plugin for specified pod. This directory may not exist if the pod does not exist.
|
||||
func (kl *Kubelet) getPodVolumeDeviceDir(podUID types.UID, pluginName string) string {
|
||||
return filepath.Join(kl.getPodVolumeDevicesDir(podUID), pluginName)
|
||||
}
|
||||
|
||||
// getPodPluginsDir returns the full path to the per-pod data directory under
|
||||
// which plugins may store data for the specified pod. This directory may not
|
||||
// exist if the pod does not exist.
|
||||
func (kl *Kubelet) getPodPluginsDir(podUID types.UID) string {
|
||||
return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletPluginsDirName)
|
||||
}
|
||||
|
||||
// getPodPluginDir returns a data directory name for a given plugin name for a
|
||||
// given pod UID. Plugins can use these directories to store data that they
|
||||
// need to persist. For non-per-pod plugin data, see getPluginDir.
|
||||
func (kl *Kubelet) getPodPluginDir(podUID types.UID, pluginName string) string {
|
||||
return filepath.Join(kl.getPodPluginsDir(podUID), pluginName)
|
||||
}
|
||||
|
||||
// getPodContainerDir returns the full path to the per-pod data directory under
|
||||
// which container data is held for the specified pod. This directory may not
|
||||
// exist if the pod or container does not exist.
|
||||
func (kl *Kubelet) getPodContainerDir(podUID types.UID, ctrName string) string {
|
||||
return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletContainersDirName, ctrName)
|
||||
}
|
||||
|
||||
// getPodResourcesSocket returns the full path to the directory containing the pod resources socket
|
||||
func (kl *Kubelet) getPodResourcesDir() string {
|
||||
return filepath.Join(kl.getRootDir(), config.DefaultKubeletPodResourcesDirName)
|
||||
}
|
||||
|
||||
// GetPods returns all pods bound to the kubelet and their spec, and the mirror
|
||||
// pods.
|
||||
func (kl *Kubelet) GetPods() []*v1.Pod {
|
||||
return kl.podManager.GetPods()
|
||||
}
|
||||
|
||||
// GetRunningPods returns all pods running on kubelet from looking at the
|
||||
// container runtime cache. This function converts kubecontainer.Pod to
|
||||
// v1.Pod, so only the fields that exist in both kubecontainer.Pod and
|
||||
// v1.Pod are considered meaningful.
|
||||
func (kl *Kubelet) GetRunningPods() ([]*v1.Pod, error) {
|
||||
pods, err := kl.runtimeCache.GetPods()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
apiPods := make([]*v1.Pod, 0, len(pods))
|
||||
for _, pod := range pods {
|
||||
apiPods = append(apiPods, pod.ToAPIPod())
|
||||
}
|
||||
return apiPods, nil
|
||||
}
|
||||
|
||||
// GetPodByFullName gets the pod with the given 'full' name, which
|
||||
// incorporates the namespace as well as whether the pod was found.
|
||||
func (kl *Kubelet) GetPodByFullName(podFullName string) (*v1.Pod, bool) {
|
||||
return kl.podManager.GetPodByFullName(podFullName)
|
||||
}
|
||||
|
||||
// GetPodByName provides the first pod that matches namespace and name, as well
|
||||
// as whether the pod was found.
|
||||
func (kl *Kubelet) GetPodByName(namespace, name string) (*v1.Pod, bool) {
|
||||
return kl.podManager.GetPodByName(namespace, name)
|
||||
}
|
||||
|
||||
// GetPodByCgroupfs provides the pod that maps to the specified cgroup, as well
|
||||
// as whether the pod was found.
|
||||
func (kl *Kubelet) GetPodByCgroupfs(cgroupfs string) (*v1.Pod, bool) {
|
||||
pcm := kl.containerManager.NewPodContainerManager()
|
||||
if result, podUID := pcm.IsPodCgroup(cgroupfs); result {
|
||||
return kl.podManager.GetPodByUID(podUID)
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// GetHostname Returns the hostname as the kubelet sees it.
|
||||
func (kl *Kubelet) GetHostname() string {
|
||||
return kl.hostname
|
||||
}
|
||||
|
||||
// getRuntime returns the current Runtime implementation in use by the kubelet.
|
||||
func (kl *Kubelet) getRuntime() kubecontainer.Runtime {
|
||||
return kl.containerRuntime
|
||||
}
|
||||
|
||||
// GetNode returns the node info for the configured node name of this Kubelet.
|
||||
func (kl *Kubelet) GetNode() (*v1.Node, error) {
|
||||
if kl.kubeClient == nil {
|
||||
return kl.initialNode()
|
||||
}
|
||||
return kl.nodeInfo.GetNodeInfo(string(kl.nodeName))
|
||||
}
|
||||
|
||||
// getNodeAnyWay() must return a *v1.Node which is required by RunGeneralPredicates().
|
||||
// The *v1.Node is obtained as follows:
|
||||
// Return kubelet's nodeInfo for this node, except on error or if in standalone mode,
|
||||
// in which case return a manufactured nodeInfo representing a node with no pods,
|
||||
// zero capacity, and the default labels.
|
||||
func (kl *Kubelet) getNodeAnyWay() (*v1.Node, error) {
|
||||
if kl.kubeClient != nil {
|
||||
if n, err := kl.nodeInfo.GetNodeInfo(string(kl.nodeName)); err == nil {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
return kl.initialNode()
|
||||
}
|
||||
|
||||
// GetNodeConfig returns the container manager node config.
|
||||
func (kl *Kubelet) GetNodeConfig() cm.NodeConfig {
|
||||
return kl.containerManager.GetNodeConfig()
|
||||
}
|
||||
|
||||
// GetPodCgroupRoot returns the listeral cgroupfs value for the cgroup containing all pods
|
||||
func (kl *Kubelet) GetPodCgroupRoot() string {
|
||||
return kl.containerManager.GetPodCgroupRoot()
|
||||
}
|
||||
|
||||
// GetHostIP returns host IP or nil in case of error.
|
||||
func (kl *Kubelet) GetHostIP() (net.IP, error) {
|
||||
node, err := kl.GetNode()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get node: %v", err)
|
||||
}
|
||||
return utilnode.GetNodeHostIP(node)
|
||||
}
|
||||
|
||||
// getHostIPAnyway attempts to return the host IP from kubelet's nodeInfo, or
|
||||
// the initialNode.
|
||||
func (kl *Kubelet) getHostIPAnyWay() (net.IP, error) {
|
||||
node, err := kl.getNodeAnyWay()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return utilnode.GetNodeHostIP(node)
|
||||
}
|
||||
|
||||
// GetExtraSupplementalGroupsForPod returns a list of the extra
|
||||
// supplemental groups for the Pod. These extra supplemental groups come
|
||||
// from annotations on persistent volumes that the pod depends on.
|
||||
func (kl *Kubelet) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
|
||||
return kl.volumeManager.GetExtraSupplementalGroupsForPod(pod)
|
||||
}
|
||||
|
||||
// getPodVolumePathListFromDisk returns a list of the volume paths by reading the
|
||||
// volume directories for the given pod from the disk.
|
||||
func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, error) {
|
||||
volumes := []string{}
|
||||
podVolDir := kl.getPodVolumesDir(podUID)
|
||||
|
||||
if pathExists, pathErr := mount.PathExists(podVolDir); pathErr != nil {
|
||||
return volumes, fmt.Errorf("Error checking if path %q exists: %v", podVolDir, pathErr)
|
||||
} else if !pathExists {
|
||||
klog.Warningf("Path %q does not exist", podVolDir)
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
volumePluginDirs, err := ioutil.ReadDir(podVolDir)
|
||||
if err != nil {
|
||||
klog.Errorf("Could not read directory %s: %v", podVolDir, err)
|
||||
return volumes, err
|
||||
}
|
||||
for _, volumePluginDir := range volumePluginDirs {
|
||||
volumePluginName := volumePluginDir.Name()
|
||||
volumePluginPath := filepath.Join(podVolDir, volumePluginName)
|
||||
volumeDirs, err := utilpath.ReadDirNoStat(volumePluginPath)
|
||||
if err != nil {
|
||||
return volumes, fmt.Errorf("Could not read directory %s: %v", volumePluginPath, err)
|
||||
}
|
||||
for _, volumeDir := range volumeDirs {
|
||||
volumes = append(volumes, filepath.Join(volumePluginPath, volumeDir))
|
||||
}
|
||||
}
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
func (kl *Kubelet) getMountedVolumePathListFromDisk(podUID types.UID) ([]string, error) {
|
||||
mountedVolumes := []string{}
|
||||
volumePaths, err := kl.getPodVolumePathListFromDisk(podUID)
|
||||
if err != nil {
|
||||
return mountedVolumes, err
|
||||
}
|
||||
for _, volumePath := range volumePaths {
|
||||
isNotMount, err := kl.mounter.IsLikelyNotMountPoint(volumePath)
|
||||
if err != nil {
|
||||
return mountedVolumes, err
|
||||
}
|
||||
if !isNotMount {
|
||||
mountedVolumes = append(mountedVolumes, volumePath)
|
||||
}
|
||||
}
|
||||
return mountedVolumes, nil
|
||||
}
|
||||
|
||||
// podVolumesSubpathsDirExists returns true if the pod volume-subpaths directory for
|
||||
// a given pod exists
|
||||
func (kl *Kubelet) podVolumeSubpathsDirExists(podUID types.UID) (bool, error) {
|
||||
podVolDir := kl.getPodVolumeSubpathsDir(podUID)
|
||||
|
||||
if pathExists, pathErr := mount.PathExists(podVolDir); pathErr != nil {
|
||||
return true, fmt.Errorf("Error checking if path %q exists: %v", podVolDir, pathErr)
|
||||
} else if !pathExists {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetVersionInfo returns information about the version of cAdvisor in use.
|
||||
func (kl *Kubelet) GetVersionInfo() (*cadvisorapiv1.VersionInfo, error) {
|
||||
return kl.cadvisor.VersionInfo()
|
||||
}
|
||||
|
||||
// GetCachedMachineInfo assumes that the machine info can't change without a reboot
|
||||
func (kl *Kubelet) GetCachedMachineInfo() (*cadvisorapiv1.MachineInfo, error) {
|
||||
return kl.machineInfo, nil
|
||||
}
|
||||
87
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network.go
generated
vendored
87
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network.go
generated
vendored
|
|
@ -1,87 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
)
|
||||
|
||||
const (
|
||||
// KubeMarkMasqChain is the mark-for-masquerade chain
|
||||
// TODO: clean up this logic in kube-proxy
|
||||
KubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ"
|
||||
|
||||
// KubeMarkDropChain is the mark-for-drop chain
|
||||
KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP"
|
||||
|
||||
// KubePostroutingChain is kubernetes postrouting rules
|
||||
KubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING"
|
||||
|
||||
// KubeFirewallChain is kubernetes firewall rules
|
||||
KubeFirewallChain utiliptables.Chain = "KUBE-FIREWALL"
|
||||
)
|
||||
|
||||
// providerRequiresNetworkingConfiguration returns whether the cloud provider
|
||||
// requires special networking configuration.
|
||||
func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool {
|
||||
// TODO: We should have a mechanism to say whether native cloud provider
|
||||
// is used or whether we are using overlay networking. We should return
|
||||
// true for cloud providers if they implement Routes() interface and
|
||||
// we are not using overlay networking.
|
||||
if kl.cloud == nil || kl.cloud.ProviderName() != "gce" {
|
||||
return false
|
||||
}
|
||||
_, supported := kl.cloud.Routes()
|
||||
return supported
|
||||
}
|
||||
|
||||
// updatePodCIDR updates the pod CIDR in the runtime state if it is different
|
||||
// from the current CIDR. Return true if pod CIDR is actually changed.
|
||||
func (kl *Kubelet) updatePodCIDR(cidr string) (bool, error) {
|
||||
kl.updatePodCIDRMux.Lock()
|
||||
defer kl.updatePodCIDRMux.Unlock()
|
||||
|
||||
podCIDR := kl.runtimeState.podCIDR()
|
||||
|
||||
if podCIDR == cidr {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// kubelet -> generic runtime -> runtime shim -> network plugin
|
||||
// docker/non-cri implementations have a passthrough UpdatePodCIDR
|
||||
if err := kl.getRuntime().UpdatePodCIDR(cidr); err != nil {
|
||||
// If updatePodCIDR would fail, theoretically pod CIDR could not change.
|
||||
// But it is better to be on the safe side to still return true here.
|
||||
return true, fmt.Errorf("failed to update pod CIDR: %v", err)
|
||||
}
|
||||
|
||||
klog.Infof("Setting Pod CIDR: %v -> %v", podCIDR, cidr)
|
||||
kl.runtimeState.setPodCIDR(cidr)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetPodDNS returns DNS settings for the pod.
|
||||
// This function is defined in kubecontainer.RuntimeHelper interface so we
|
||||
// have to implement it.
|
||||
func (kl *Kubelet) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) {
|
||||
return kl.dnsConfigurer.GetPodDNS(pod)
|
||||
}
|
||||
111
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go
generated
vendored
111
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_linux.go
generated
vendored
|
|
@ -1,111 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
)
|
||||
|
||||
// syncNetworkUtil ensures the network utility are present on host.
|
||||
// Network util includes:
|
||||
// 1. In nat table, KUBE-MARK-DROP rule to mark connections for dropping
|
||||
// Marked connection will be drop on INPUT/OUTPUT Chain in filter table
|
||||
// 2. In nat table, KUBE-MARK-MASQ rule to mark connections for SNAT
|
||||
// Marked connection will get SNAT on POSTROUTING Chain in nat table
|
||||
func (kl *Kubelet) syncNetworkUtil() {
|
||||
if kl.iptablesMasqueradeBit < 0 || kl.iptablesMasqueradeBit > 31 {
|
||||
klog.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", kl.iptablesMasqueradeBit)
|
||||
return
|
||||
}
|
||||
|
||||
if kl.iptablesDropBit < 0 || kl.iptablesDropBit > 31 {
|
||||
klog.Errorf("invalid iptables-drop-bit %v not in [0, 31]", kl.iptablesDropBit)
|
||||
return
|
||||
}
|
||||
|
||||
if kl.iptablesDropBit == kl.iptablesMasqueradeBit {
|
||||
klog.Errorf("iptables-masquerade-bit %v and iptables-drop-bit %v must be different", kl.iptablesMasqueradeBit, kl.iptablesDropBit)
|
||||
return
|
||||
}
|
||||
|
||||
// Setup KUBE-MARK-DROP rules
|
||||
dropMark := getIPTablesMark(kl.iptablesDropBit)
|
||||
if _, err := kl.iptClient.EnsureChain(utiliptables.TableNAT, KubeMarkDropChain); err != nil {
|
||||
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkDropChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkDropChain, "-j", "MARK", "--set-xmark", dropMark); err != nil {
|
||||
klog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkDropChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureChain(utiliptables.TableFilter, KubeFirewallChain); err != nil {
|
||||
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableFilter, KubeFirewallChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableFilter, KubeFirewallChain,
|
||||
"-m", "comment", "--comment", "kubernetes firewall for dropping marked packets",
|
||||
"-m", "mark", "--mark", dropMark,
|
||||
"-j", "DROP"); err != nil {
|
||||
klog.Errorf("Failed to ensure rule to drop packet marked by %v in %v chain %v: %v", KubeMarkDropChain, utiliptables.TableFilter, KubeFirewallChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainOutput, "-j", string(KubeFirewallChain)); err != nil {
|
||||
klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainOutput, KubeFirewallChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainInput, "-j", string(KubeFirewallChain)); err != nil {
|
||||
klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainInput, KubeFirewallChain, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Setup KUBE-MARK-MASQ rules
|
||||
masqueradeMark := getIPTablesMark(kl.iptablesMasqueradeBit)
|
||||
if _, err := kl.iptClient.EnsureChain(utiliptables.TableNAT, KubeMarkMasqChain); err != nil {
|
||||
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubeMarkMasqChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureChain(utiliptables.TableNAT, KubePostroutingChain); err != nil {
|
||||
klog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, KubePostroutingChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubeMarkMasqChain, "-j", "MARK", "--set-xmark", masqueradeMark); err != nil {
|
||||
klog.Errorf("Failed to ensure marking rule for %v: %v", KubeMarkMasqChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Prepend, utiliptables.TableNAT, utiliptables.ChainPostrouting,
|
||||
"-m", "comment", "--comment", "kubernetes postrouting rules", "-j", string(KubePostroutingChain)); err != nil {
|
||||
klog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, KubePostroutingChain, err)
|
||||
return
|
||||
}
|
||||
if _, err := kl.iptClient.EnsureRule(utiliptables.Append, utiliptables.TableNAT, KubePostroutingChain,
|
||||
"-m", "comment", "--comment", "kubernetes service traffic requiring SNAT",
|
||||
"-m", "mark", "--mark", masqueradeMark, "-j", "MASQUERADE"); err != nil {
|
||||
klog.Errorf("Failed to ensure SNAT rule for packets marked by %v in %v chain %v: %v", KubeMarkMasqChain, utiliptables.TableNAT, KubePostroutingChain, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// getIPTablesMark returns the fwmark given the bit
|
||||
func getIPTablesMark(bit int) string {
|
||||
value := 1 << uint(bit)
|
||||
return fmt.Sprintf("%#08x/%#08x", value, value)
|
||||
}
|
||||
22
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_others.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_network_others.go
generated
vendored
|
|
@ -1,22 +0,0 @@
|
|||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
// Do nothing.
|
||||
func (kl *Kubelet) syncNetworkUtil() {}
|
||||
645
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go
generated
vendored
645
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status.go
generated
vendored
|
|
@ -1,645 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
goruntime "runtime"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
taintutil "k8s.io/kubernetes/pkg/util/taints"
|
||||
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// registerWithAPIServer registers the node with the cluster master. It is safe
|
||||
// to call multiple times, but not concurrently (kl.registrationCompleted is
|
||||
// not locked).
|
||||
func (kl *Kubelet) registerWithAPIServer() {
|
||||
if kl.registrationCompleted {
|
||||
return
|
||||
}
|
||||
step := 100 * time.Millisecond
|
||||
|
||||
for {
|
||||
time.Sleep(step)
|
||||
step = step * 2
|
||||
if step >= 7*time.Second {
|
||||
step = 7 * time.Second
|
||||
}
|
||||
|
||||
node, err := kl.initialNode()
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to construct v1.Node object for kubelet: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
klog.Infof("Attempting to register node %s", node.Name)
|
||||
registered := kl.tryRegisterWithAPIServer(node)
|
||||
if registered {
|
||||
klog.Infof("Successfully registered node %s", node.Name)
|
||||
kl.registrationCompleted = true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tryRegisterWithAPIServer makes an attempt to register the given node with
|
||||
// the API server, returning a boolean indicating whether the attempt was
|
||||
// successful. If a node with the same name already exists, it reconciles the
|
||||
// value of the annotation for controller-managed attach-detach of attachable
|
||||
// persistent volumes for the node.
|
||||
func (kl *Kubelet) tryRegisterWithAPIServer(node *v1.Node) bool {
|
||||
_, err := kl.kubeClient.CoreV1().Nodes().Create(node)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
klog.Errorf("Unable to register node %q with API server: %v", kl.nodeName, err)
|
||||
return false
|
||||
}
|
||||
|
||||
existingNode, err := kl.kubeClient.CoreV1().Nodes().Get(string(kl.nodeName), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
klog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err)
|
||||
return false
|
||||
}
|
||||
if existingNode == nil {
|
||||
klog.Errorf("Unable to register node %q with API server: no node instance returned", kl.nodeName)
|
||||
return false
|
||||
}
|
||||
|
||||
originalNode := existingNode.DeepCopy()
|
||||
if originalNode == nil {
|
||||
klog.Errorf("Nil %q node object", kl.nodeName)
|
||||
return false
|
||||
}
|
||||
|
||||
klog.Infof("Node %s was previously registered", kl.nodeName)
|
||||
|
||||
// Edge case: the node was previously registered; reconcile
|
||||
// the value of the controller-managed attach-detach
|
||||
// annotation.
|
||||
requiresUpdate := kl.reconcileCMADAnnotationWithExistingNode(node, existingNode)
|
||||
requiresUpdate = kl.updateDefaultLabels(node, existingNode) || requiresUpdate
|
||||
requiresUpdate = kl.reconcileExtendedResource(node, existingNode) || requiresUpdate
|
||||
if requiresUpdate {
|
||||
if _, _, err := nodeutil.PatchNodeStatus(kl.kubeClient.CoreV1(), types.NodeName(kl.nodeName), originalNode, existingNode); err != nil {
|
||||
klog.Errorf("Unable to reconcile node %q with API server: error updating node: %v", kl.nodeName, err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Zeros out extended resource capacity during reconciliation.
|
||||
func (kl *Kubelet) reconcileExtendedResource(initialNode, node *v1.Node) bool {
|
||||
requiresUpdate := false
|
||||
for k := range node.Status.Capacity {
|
||||
if v1helper.IsExtendedResourceName(k) {
|
||||
klog.Infof("Zero out resource %s capacity in existing node.", k)
|
||||
node.Status.Capacity[k] = *resource.NewQuantity(int64(0), resource.DecimalSI)
|
||||
node.Status.Allocatable[k] = *resource.NewQuantity(int64(0), resource.DecimalSI)
|
||||
requiresUpdate = true
|
||||
}
|
||||
}
|
||||
return requiresUpdate
|
||||
}
|
||||
|
||||
// updateDefaultLabels will set the default labels on the node
|
||||
func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool {
|
||||
defaultLabels := []string{
|
||||
v1.LabelHostname,
|
||||
v1.LabelZoneFailureDomain,
|
||||
v1.LabelZoneRegion,
|
||||
v1.LabelInstanceType,
|
||||
v1.LabelOSStable,
|
||||
v1.LabelArchStable,
|
||||
kubeletapis.LabelOS,
|
||||
kubeletapis.LabelArch,
|
||||
}
|
||||
|
||||
needsUpdate := false
|
||||
if existingNode.Labels == nil {
|
||||
existingNode.Labels = make(map[string]string)
|
||||
}
|
||||
//Set default labels but make sure to not set labels with empty values
|
||||
for _, label := range defaultLabels {
|
||||
if _, hasInitialValue := initialNode.Labels[label]; !hasInitialValue {
|
||||
continue
|
||||
}
|
||||
|
||||
if existingNode.Labels[label] != initialNode.Labels[label] {
|
||||
existingNode.Labels[label] = initialNode.Labels[label]
|
||||
needsUpdate = true
|
||||
}
|
||||
|
||||
if existingNode.Labels[label] == "" {
|
||||
delete(existingNode.Labels, label)
|
||||
}
|
||||
}
|
||||
|
||||
return needsUpdate
|
||||
}
|
||||
|
||||
// reconcileCMADAnnotationWithExistingNode reconciles the controller-managed
|
||||
// attach-detach annotation on a new node and the existing node, returning
|
||||
// whether the existing node must be updated.
|
||||
func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v1.Node) bool {
|
||||
var (
|
||||
existingCMAAnnotation = existingNode.Annotations[volutil.ControllerManagedAttachAnnotation]
|
||||
newCMAAnnotation, newSet = node.Annotations[volutil.ControllerManagedAttachAnnotation]
|
||||
)
|
||||
|
||||
if newCMAAnnotation == existingCMAAnnotation {
|
||||
return false
|
||||
}
|
||||
|
||||
// If the just-constructed node and the existing node do
|
||||
// not have the same value, update the existing node with
|
||||
// the correct value of the annotation.
|
||||
if !newSet {
|
||||
klog.Info("Controller attach-detach setting changed to false; updating existing Node")
|
||||
delete(existingNode.Annotations, volutil.ControllerManagedAttachAnnotation)
|
||||
} else {
|
||||
klog.Info("Controller attach-detach setting changed to true; updating existing Node")
|
||||
if existingNode.Annotations == nil {
|
||||
existingNode.Annotations = make(map[string]string)
|
||||
}
|
||||
existingNode.Annotations[volutil.ControllerManagedAttachAnnotation] = newCMAAnnotation
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// initialNode constructs the initial v1.Node for this Kubelet, incorporating node
|
||||
// labels, information from the cloud provider, and Kubelet configuration.
|
||||
func (kl *Kubelet) initialNode() (*v1.Node, error) {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: string(kl.nodeName),
|
||||
Labels: map[string]string{
|
||||
v1.LabelHostname: kl.hostname,
|
||||
v1.LabelOSStable: goruntime.GOOS,
|
||||
v1.LabelArchStable: goruntime.GOARCH,
|
||||
kubeletapis.LabelOS: goruntime.GOOS,
|
||||
kubeletapis.LabelArch: goruntime.GOARCH,
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Unschedulable: !kl.registerSchedulable,
|
||||
},
|
||||
}
|
||||
nodeTaints := make([]v1.Taint, 0)
|
||||
if len(kl.registerWithTaints) > 0 {
|
||||
taints := make([]v1.Taint, len(kl.registerWithTaints))
|
||||
for i := range kl.registerWithTaints {
|
||||
if err := k8s_api_v1.Convert_core_Taint_To_v1_Taint(&kl.registerWithTaints[i], &taints[i], nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
nodeTaints = append(nodeTaints, taints...)
|
||||
}
|
||||
|
||||
unschedulableTaint := v1.Taint{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
// If TaintNodesByCondition enabled, taint node with TaintNodeUnschedulable when initializing
|
||||
// node to avoid race condition; refer to #63897 for more detail.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) {
|
||||
if node.Spec.Unschedulable &&
|
||||
!taintutil.TaintExists(nodeTaints, &unschedulableTaint) {
|
||||
nodeTaints = append(nodeTaints, unschedulableTaint)
|
||||
}
|
||||
}
|
||||
|
||||
if kl.externalCloudProvider {
|
||||
taint := v1.Taint{
|
||||
Key: schedulerapi.TaintExternalCloudProvider,
|
||||
Value: "true",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
nodeTaints = append(nodeTaints, taint)
|
||||
}
|
||||
if len(nodeTaints) > 0 {
|
||||
node.Spec.Taints = nodeTaints
|
||||
}
|
||||
// Initially, set NodeNetworkUnavailable to true.
|
||||
if kl.providerRequiresNetworkingConfiguration() {
|
||||
node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "NoRouteCreated",
|
||||
Message: "Node created without a route",
|
||||
LastTransitionTime: metav1.NewTime(kl.clock.Now()),
|
||||
})
|
||||
}
|
||||
|
||||
if kl.enableControllerAttachDetach {
|
||||
if node.Annotations == nil {
|
||||
node.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
klog.Infof("Setting node annotation to enable volume controller attach/detach")
|
||||
node.Annotations[volutil.ControllerManagedAttachAnnotation] = "true"
|
||||
} else {
|
||||
klog.Infof("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes")
|
||||
}
|
||||
|
||||
if kl.keepTerminatedPodVolumes {
|
||||
if node.Annotations == nil {
|
||||
node.Annotations = make(map[string]string)
|
||||
}
|
||||
klog.Infof("Setting node annotation to keep pod volumes of terminated pods attached to the node")
|
||||
node.Annotations[volutil.KeepTerminatedPodVolumesAnnotation] = "true"
|
||||
}
|
||||
|
||||
// @question: should this be place after the call to the cloud provider? which also applies labels
|
||||
for k, v := range kl.nodeLabels {
|
||||
if cv, found := node.ObjectMeta.Labels[k]; found {
|
||||
klog.Warningf("the node label %s=%s will overwrite default setting %s", k, v, cv)
|
||||
}
|
||||
node.ObjectMeta.Labels[k] = v
|
||||
}
|
||||
|
||||
if kl.providerID != "" {
|
||||
node.Spec.ProviderID = kl.providerID
|
||||
}
|
||||
|
||||
if kl.cloud != nil {
|
||||
instances, ok := kl.cloud.Instances()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to get instances from cloud provider")
|
||||
}
|
||||
|
||||
// TODO: We can't assume that the node has credentials to talk to the
|
||||
// cloudprovider from arbitrary nodes. At most, we should talk to a
|
||||
// local metadata server here.
|
||||
var err error
|
||||
if node.Spec.ProviderID == "" {
|
||||
node.Spec.ProviderID, err = cloudprovider.GetInstanceProviderID(context.TODO(), kl.cloud, kl.nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
instanceType, err := instances.InstanceType(context.TODO(), kl.nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if instanceType != "" {
|
||||
klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelInstanceType, instanceType)
|
||||
node.ObjectMeta.Labels[v1.LabelInstanceType] = instanceType
|
||||
}
|
||||
// If the cloud has zone information, label the node with the zone information
|
||||
zones, ok := kl.cloud.Zones()
|
||||
if ok {
|
||||
zone, err := zones.GetZone(context.TODO())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err)
|
||||
}
|
||||
if zone.FailureDomain != "" {
|
||||
klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneFailureDomain, zone.FailureDomain)
|
||||
node.ObjectMeta.Labels[v1.LabelZoneFailureDomain] = zone.FailureDomain
|
||||
}
|
||||
if zone.Region != "" {
|
||||
klog.Infof("Adding node label from cloud provider: %s=%s", v1.LabelZoneRegion, zone.Region)
|
||||
node.ObjectMeta.Labels[v1.LabelZoneRegion] = zone.Region
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
kl.setNodeStatus(node)
|
||||
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// syncNodeStatus should be called periodically from a goroutine.
|
||||
// It synchronizes node status to master if there is any change or enough time
|
||||
// passed from the last sync, registering the kubelet first if necessary.
|
||||
func (kl *Kubelet) syncNodeStatus() {
|
||||
kl.syncNodeStatusMux.Lock()
|
||||
defer kl.syncNodeStatusMux.Unlock()
|
||||
|
||||
if kl.kubeClient == nil || kl.heartbeatClient == nil {
|
||||
return
|
||||
}
|
||||
if kl.registerNode {
|
||||
// This will exit immediately if it doesn't need to do anything.
|
||||
kl.registerWithAPIServer()
|
||||
}
|
||||
if err := kl.updateNodeStatus(); err != nil {
|
||||
klog.Errorf("Unable to update node status: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// updateNodeStatus updates node status to master with retries if there is any
|
||||
// change or enough time passed from the last sync.
|
||||
func (kl *Kubelet) updateNodeStatus() error {
|
||||
klog.V(5).Infof("Updating node status")
|
||||
for i := 0; i < nodeStatusUpdateRetry; i++ {
|
||||
if err := kl.tryUpdateNodeStatus(i); err != nil {
|
||||
if i > 0 && kl.onRepeatedHeartbeatFailure != nil {
|
||||
kl.onRepeatedHeartbeatFailure()
|
||||
}
|
||||
klog.Errorf("Error updating node status, will retry: %v", err)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("update node status exceeds retry count")
|
||||
}
|
||||
|
||||
// tryUpdateNodeStatus tries to update node status to master if there is any
|
||||
// change or enough time passed from the last sync.
|
||||
func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error {
|
||||
// In large clusters, GET and PUT operations on Node objects coming
|
||||
// from here are the majority of load on apiserver and etcd.
|
||||
// To reduce the load on etcd, we are serving GET operations from
|
||||
// apiserver cache (the data might be slightly delayed but it doesn't
|
||||
// seem to cause more conflict - the delays are pretty small).
|
||||
// If it result in a conflict, all retries are served directly from etcd.
|
||||
opts := metav1.GetOptions{}
|
||||
if tryNumber == 0 {
|
||||
util.FromApiserverCache(&opts)
|
||||
}
|
||||
node, err := kl.heartbeatClient.CoreV1().Nodes().Get(string(kl.nodeName), opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
|
||||
}
|
||||
|
||||
originalNode := node.DeepCopy()
|
||||
if originalNode == nil {
|
||||
return fmt.Errorf("nil %q node object", kl.nodeName)
|
||||
}
|
||||
|
||||
podCIDRChanged := false
|
||||
if node.Spec.PodCIDR != "" {
|
||||
// Pod CIDR could have been updated before, so we cannot rely on
|
||||
// node.Spec.PodCIDR being non-empty. We also need to know if pod CIDR is
|
||||
// actually changed.
|
||||
if podCIDRChanged, err = kl.updatePodCIDR(node.Spec.PodCIDR); err != nil {
|
||||
klog.Errorf(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
kl.setNodeStatus(node)
|
||||
|
||||
now := kl.clock.Now()
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.NodeLease) && now.Before(kl.lastStatusReportTime.Add(kl.nodeStatusReportFrequency)) {
|
||||
if !podCIDRChanged && !nodeStatusHasChanged(&originalNode.Status, &node.Status) {
|
||||
// We must mark the volumes as ReportedInUse in volume manager's dsw even
|
||||
// if no changes were made to the node status (no volumes were added or removed
|
||||
// from the VolumesInUse list).
|
||||
//
|
||||
// The reason is that on a kubelet restart, the volume manager's dsw is
|
||||
// repopulated and the volume ReportedInUse is initialized to false, while the
|
||||
// VolumesInUse list from the Node object still contains the state from the
|
||||
// previous kubelet instantiation.
|
||||
//
|
||||
// Once the volumes are added to the dsw, the ReportedInUse field needs to be
|
||||
// synced from the VolumesInUse list in the Node.Status.
|
||||
//
|
||||
// The MarkVolumesAsReportedInUse() call cannot be performed in dsw directly
|
||||
// because it does not have access to the Node object.
|
||||
// This also cannot be populated on node status manager init because the volume
|
||||
// may not have been added to dsw at that time.
|
||||
kl.volumeManager.MarkVolumesAsReportedInUse(node.Status.VolumesInUse)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Patch the current status on the API server
|
||||
updatedNode, _, err := nodeutil.PatchNodeStatus(kl.heartbeatClient.CoreV1(), types.NodeName(kl.nodeName), originalNode, node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kl.lastStatusReportTime = now
|
||||
kl.setLastObservedNodeAddresses(updatedNode.Status.Addresses)
|
||||
// If update finishes successfully, mark the volumeInUse as reportedInUse to indicate
|
||||
// those volumes are already updated in the node's status
|
||||
kl.volumeManager.MarkVolumesAsReportedInUse(updatedNode.Status.VolumesInUse)
|
||||
return nil
|
||||
}
|
||||
|
||||
// recordNodeStatusEvent records an event of the given type with the given
|
||||
// message for the node.
|
||||
func (kl *Kubelet) recordNodeStatusEvent(eventType, event string) {
|
||||
klog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName)
|
||||
// TODO: This requires a transaction, either both node status is updated
|
||||
// and event is recorded or neither should happen, see issue #6055.
|
||||
kl.recorder.Eventf(kl.nodeRef, eventType, event, "Node %s status is now: %s", kl.nodeName, event)
|
||||
}
|
||||
|
||||
// recordEvent records an event for this node, the Kubelet's nodeRef is passed to the recorder
|
||||
func (kl *Kubelet) recordEvent(eventType, event, message string) {
|
||||
kl.recorder.Eventf(kl.nodeRef, eventType, event, message)
|
||||
}
|
||||
|
||||
// record if node schedulable change.
|
||||
func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) error {
|
||||
kl.lastNodeUnschedulableLock.Lock()
|
||||
defer kl.lastNodeUnschedulableLock.Unlock()
|
||||
if kl.lastNodeUnschedulable != node.Spec.Unschedulable {
|
||||
if node.Spec.Unschedulable {
|
||||
kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeNotSchedulable)
|
||||
} else {
|
||||
kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeSchedulable)
|
||||
}
|
||||
kl.lastNodeUnschedulable = node.Spec.Unschedulable
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setNodeStatus fills in the Status fields of the given Node, overwriting
|
||||
// any fields that are currently set.
|
||||
// TODO(madhusudancs): Simplify the logic for setting node conditions and
|
||||
// refactor the node status condition code out to a different file.
|
||||
func (kl *Kubelet) setNodeStatus(node *v1.Node) {
|
||||
for i, f := range kl.setNodeStatusFuncs {
|
||||
klog.V(5).Infof("Setting node status at position %v", i)
|
||||
if err := f(node); err != nil {
|
||||
klog.Warningf("Failed to set some node status fields: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (kl *Kubelet) setLastObservedNodeAddresses(addresses []v1.NodeAddress) {
|
||||
kl.lastObservedNodeAddressesMux.Lock()
|
||||
defer kl.lastObservedNodeAddressesMux.Unlock()
|
||||
kl.lastObservedNodeAddresses = addresses
|
||||
}
|
||||
func (kl *Kubelet) getLastObservedNodeAddresses() []v1.NodeAddress {
|
||||
kl.lastObservedNodeAddressesMux.Lock()
|
||||
defer kl.lastObservedNodeAddressesMux.Unlock()
|
||||
return kl.lastObservedNodeAddresses
|
||||
}
|
||||
|
||||
// defaultNodeStatusFuncs is a factory that generates the default set of
|
||||
// setNodeStatus funcs
|
||||
func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
|
||||
// if cloud is not nil, we expect the cloud resource sync manager to exist
|
||||
var nodeAddressesFunc func() ([]v1.NodeAddress, error)
|
||||
if kl.cloud != nil {
|
||||
nodeAddressesFunc = kl.cloudResourceSyncManager.NodeAddresses
|
||||
}
|
||||
var validateHostFunc func() error
|
||||
if kl.appArmorValidator != nil {
|
||||
validateHostFunc = kl.appArmorValidator.ValidateHost
|
||||
}
|
||||
var setters []func(n *v1.Node) error
|
||||
setters = append(setters,
|
||||
nodestatus.NodeAddress(kl.nodeIP, kl.nodeIPValidator, kl.hostname, kl.hostnameOverridden, kl.externalCloudProvider, kl.cloud, nodeAddressesFunc),
|
||||
nodestatus.MachineInfo(string(kl.nodeName), kl.maxPods, kl.podsPerCore, kl.GetCachedMachineInfo, kl.containerManager.GetCapacity,
|
||||
kl.containerManager.GetDevicePluginResourceCapacity, kl.containerManager.GetNodeAllocatableReservation, kl.recordEvent),
|
||||
nodestatus.VersionInfo(kl.cadvisor.VersionInfo, kl.containerRuntime.Type, kl.containerRuntime.Version),
|
||||
nodestatus.DaemonEndpoints(kl.daemonEndpoints),
|
||||
nodestatus.Images(kl.nodeStatusMaxImages, kl.imageManager.GetImageList),
|
||||
nodestatus.GoRuntime(),
|
||||
)
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
||||
setters = append(setters, nodestatus.VolumeLimits(kl.volumePluginMgr.ListVolumePluginWithLimits))
|
||||
}
|
||||
setters = append(setters,
|
||||
nodestatus.MemoryPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderMemoryPressure, kl.recordNodeStatusEvent),
|
||||
nodestatus.DiskPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderDiskPressure, kl.recordNodeStatusEvent),
|
||||
nodestatus.PIDPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderPIDPressure, kl.recordNodeStatusEvent),
|
||||
nodestatus.ReadyCondition(kl.clock.Now, kl.runtimeState.runtimeErrors, kl.runtimeState.networkErrors, kl.runtimeState.storageErrors, validateHostFunc, kl.containerManager.Status, kl.recordNodeStatusEvent),
|
||||
nodestatus.VolumesInUse(kl.volumeManager.ReconcilerStatesHasBeenSynced, kl.volumeManager.GetVolumesInUse),
|
||||
nodestatus.RemoveOutOfDiskCondition(),
|
||||
// TODO(mtaufen): I decided not to move this setter for now, since all it does is send an event
|
||||
// and record state back to the Kubelet runtime object. In the future, I'd like to isolate
|
||||
// these side-effects by decoupling the decisions to send events and partial status recording
|
||||
// from the Node setters.
|
||||
kl.recordNodeSchedulableEvent,
|
||||
)
|
||||
return setters
|
||||
}
|
||||
|
||||
// Validate given node IP belongs to the current host
|
||||
func validateNodeIP(nodeIP net.IP) error {
|
||||
// Honor IP limitations set in setNodeStatus()
|
||||
if nodeIP.To4() == nil && nodeIP.To16() == nil {
|
||||
return fmt.Errorf("nodeIP must be a valid IP address")
|
||||
}
|
||||
if nodeIP.IsLoopback() {
|
||||
return fmt.Errorf("nodeIP can't be loopback address")
|
||||
}
|
||||
if nodeIP.IsMulticast() {
|
||||
return fmt.Errorf("nodeIP can't be a multicast address")
|
||||
}
|
||||
if nodeIP.IsLinkLocalUnicast() {
|
||||
return fmt.Errorf("nodeIP can't be a link-local unicast address")
|
||||
}
|
||||
if nodeIP.IsUnspecified() {
|
||||
return fmt.Errorf("nodeIP can't be an all zeros address")
|
||||
}
|
||||
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, addr := range addrs {
|
||||
var ip net.IP
|
||||
switch v := addr.(type) {
|
||||
case *net.IPNet:
|
||||
ip = v.IP
|
||||
case *net.IPAddr:
|
||||
ip = v.IP
|
||||
}
|
||||
if ip != nil && ip.Equal(nodeIP) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Node IP: %q not found in the host's network interfaces", nodeIP.String())
|
||||
}
|
||||
|
||||
// nodeStatusHasChanged compares the original node and current node's status and
|
||||
// returns true if any change happens. The heartbeat timestamp is ignored.
|
||||
func nodeStatusHasChanged(originalStatus *v1.NodeStatus, status *v1.NodeStatus) bool {
|
||||
if originalStatus == nil && status == nil {
|
||||
return false
|
||||
}
|
||||
if originalStatus == nil || status == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Compare node conditions here because we need to ignore the heartbeat timestamp.
|
||||
if nodeConditionsHaveChanged(originalStatus.Conditions, status.Conditions) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Compare other fields of NodeStatus.
|
||||
originalStatusCopy := originalStatus.DeepCopy()
|
||||
statusCopy := status.DeepCopy()
|
||||
originalStatusCopy.Conditions = nil
|
||||
statusCopy.Conditions = nil
|
||||
return !apiequality.Semantic.DeepEqual(originalStatusCopy, statusCopy)
|
||||
}
|
||||
|
||||
// nodeConditionsHaveChanged compares the original node and current node's
|
||||
// conditions and returns true if any change happens. The heartbeat timestamp is
|
||||
// ignored.
|
||||
func nodeConditionsHaveChanged(originalConditions []v1.NodeCondition, conditions []v1.NodeCondition) bool {
|
||||
if len(originalConditions) != len(conditions) {
|
||||
return true
|
||||
}
|
||||
|
||||
originalConditionsCopy := make([]v1.NodeCondition, 0, len(originalConditions))
|
||||
originalConditionsCopy = append(originalConditionsCopy, originalConditions...)
|
||||
conditionsCopy := make([]v1.NodeCondition, 0, len(conditions))
|
||||
conditionsCopy = append(conditionsCopy, conditions...)
|
||||
|
||||
sort.SliceStable(originalConditionsCopy, func(i, j int) bool { return originalConditionsCopy[i].Type < originalConditionsCopy[j].Type })
|
||||
sort.SliceStable(conditionsCopy, func(i, j int) bool { return conditionsCopy[i].Type < conditionsCopy[j].Type })
|
||||
|
||||
replacedheartbeatTime := metav1.Time{}
|
||||
for i := range conditionsCopy {
|
||||
originalConditionsCopy[i].LastHeartbeatTime = replacedheartbeatTime
|
||||
conditionsCopy[i].LastHeartbeatTime = replacedheartbeatTime
|
||||
if !apiequality.Semantic.DeepEqual(&originalConditionsCopy[i], &conditionsCopy[i]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
1768
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go
generated
vendored
1768
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_pods.go
generated
vendored
File diff suppressed because it is too large
Load diff
57
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_resources.go
generated
vendored
57
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_resources.go
generated
vendored
|
|
@ -1,57 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
)
|
||||
|
||||
// defaultPodLimitsForDownwardAPI copies the input pod, and optional container,
|
||||
// and applies default resource limits. it returns a copy of the input pod,
|
||||
// and a copy of the input container (if specified) with default limits
|
||||
// applied. if a container has no limit specified, it will default the limit to
|
||||
// the node allocatable.
|
||||
// TODO: if/when we have pod level resources, we need to update this function
|
||||
// to use those limits instead of node allocatable.
|
||||
func (kl *Kubelet) defaultPodLimitsForDownwardAPI(pod *v1.Pod, container *v1.Container) (*v1.Pod, *v1.Container, error) {
|
||||
if pod == nil {
|
||||
return nil, nil, fmt.Errorf("invalid input, pod cannot be nil")
|
||||
}
|
||||
|
||||
node, err := kl.getNodeAnyWay()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to find node object, expected a node")
|
||||
}
|
||||
allocatable := node.Status.Allocatable
|
||||
klog.Infof("allocatable: %v", allocatable)
|
||||
outputPod := pod.DeepCopy()
|
||||
for idx := range outputPod.Spec.Containers {
|
||||
resource.MergeContainerResourceLimits(&outputPod.Spec.Containers[idx], allocatable)
|
||||
}
|
||||
|
||||
var outputContainer *v1.Container
|
||||
if container != nil {
|
||||
outputContainer = container.DeepCopy()
|
||||
resource.MergeContainerResourceLimits(outputContainer, allocatable)
|
||||
}
|
||||
return outputPod, outputContainer, nil
|
||||
}
|
||||
163
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go
generated
vendored
163
vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go
generated
vendored
|
|
@ -1,163 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/klog"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/util/removeall"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||
)
|
||||
|
||||
// ListVolumesForPod returns a map of the mounted volumes for the given pod.
|
||||
// The key in the map is the OuterVolumeSpecName (i.e. pod.Spec.Volumes[x].Name)
|
||||
func (kl *Kubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) {
|
||||
volumesToReturn := make(map[string]volume.Volume)
|
||||
podVolumes := kl.volumeManager.GetMountedVolumesForPod(
|
||||
volumetypes.UniquePodName(podUID))
|
||||
for outerVolumeSpecName, volume := range podVolumes {
|
||||
// TODO: volume.Mounter could be nil if volume object is recovered
|
||||
// from reconciler's sync state process. PR 33616 will fix this problem
|
||||
// to create Mounter object when recovering volume state.
|
||||
if volume.Mounter == nil {
|
||||
continue
|
||||
}
|
||||
volumesToReturn[outerVolumeSpecName] = volume.Mounter
|
||||
}
|
||||
|
||||
return volumesToReturn, len(volumesToReturn) > 0
|
||||
}
|
||||
|
||||
// podVolumesExist checks with the volume manager and returns true any of the
|
||||
// pods for the specified volume are mounted.
|
||||
func (kl *Kubelet) podVolumesExist(podUID types.UID) bool {
|
||||
if mountedVolumes :=
|
||||
kl.volumeManager.GetMountedVolumesForPod(
|
||||
volumetypes.UniquePodName(podUID)); len(mountedVolumes) > 0 {
|
||||
return true
|
||||
}
|
||||
// TODO: This checks pod volume paths and whether they are mounted. If checking returns error, podVolumesExist will return true
|
||||
// which means we consider volumes might exist and requires further checking.
|
||||
// There are some volume plugins such as flexvolume might not have mounts. See issue #61229
|
||||
volumePaths, err := kl.getMountedVolumePathListFromDisk(podUID)
|
||||
if err != nil {
|
||||
klog.Errorf("pod %q found, but error %v occurred during checking mounted volumes from disk", podUID, err)
|
||||
return true
|
||||
}
|
||||
if len(volumePaths) > 0 {
|
||||
klog.V(4).Infof("pod %q found, but volumes are still mounted on disk %v", podUID, volumePaths)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// newVolumeMounterFromPlugins attempts to find a plugin by volume spec, pod
|
||||
// and volume options and then creates a Mounter.
|
||||
// Returns a valid mounter or an error.
|
||||
func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't use volume plugins for %s: %v", spec.Name(), err)
|
||||
}
|
||||
physicalMounter, err := plugin.NewMounter(spec, pod, opts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to instantiate mounter for volume: %s using plugin: %s with a root cause: %v", spec.Name(), plugin.GetPluginName(), err)
|
||||
}
|
||||
klog.V(10).Infof("Using volume plugin %q to mount %s", plugin.GetPluginName(), spec.Name())
|
||||
return physicalMounter, nil
|
||||
}
|
||||
|
||||
// cleanupOrphanedPodDirs removes the volumes of pods that should not be
|
||||
// running and that have no containers running. Note that we roll up logs here since it runs in the main loop.
|
||||
func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecontainer.Pod) error {
|
||||
allPods := sets.NewString()
|
||||
for _, pod := range pods {
|
||||
allPods.Insert(string(pod.UID))
|
||||
}
|
||||
for _, pod := range runningPods {
|
||||
allPods.Insert(string(pod.ID))
|
||||
}
|
||||
|
||||
found, err := kl.listPodsFromDisk()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
orphanRemovalErrors := []error{}
|
||||
orphanVolumeErrors := []error{}
|
||||
|
||||
for _, uid := range found {
|
||||
if allPods.Has(string(uid)) {
|
||||
continue
|
||||
}
|
||||
// If volumes have not been unmounted/detached, do not delete directory.
|
||||
// Doing so may result in corruption of data.
|
||||
// TODO: getMountedVolumePathListFromDisk() call may be redundant with
|
||||
// kl.getPodVolumePathListFromDisk(). Can this be cleaned up?
|
||||
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist {
|
||||
klog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up", uid)
|
||||
continue
|
||||
}
|
||||
// If there are still volume directories, do not delete directory
|
||||
volumePaths, err := kl.getPodVolumePathListFromDisk(uid)
|
||||
if err != nil {
|
||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but error %v occurred during reading volume dir from disk", uid, err))
|
||||
continue
|
||||
}
|
||||
if len(volumePaths) > 0 {
|
||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume paths are still present on disk", uid))
|
||||
continue
|
||||
}
|
||||
|
||||
// If there are any volume-subpaths, do not cleanup directories
|
||||
volumeSubpathExists, err := kl.podVolumeSubpathsDirExists(uid)
|
||||
if err != nil {
|
||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but error %v occurred during reading of volume-subpaths dir from disk", uid, err))
|
||||
continue
|
||||
}
|
||||
if volumeSubpathExists {
|
||||
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume subpaths are still present on disk", uid))
|
||||
continue
|
||||
}
|
||||
|
||||
klog.V(3).Infof("Orphaned pod %q found, removing", uid)
|
||||
if err := removeall.RemoveAllOneFilesystem(kl.mounter, kl.getPodDir(uid)); err != nil {
|
||||
klog.Errorf("Failed to remove orphaned pod %q dir; err: %v", uid, err)
|
||||
orphanRemovalErrors = append(orphanRemovalErrors, err)
|
||||
}
|
||||
}
|
||||
|
||||
logSpew := func(errs []error) {
|
||||
if len(errs) > 0 {
|
||||
klog.Errorf("%v : There were a total of %v errors similar to this. Turn up verbosity to see them.", errs[0], len(errs))
|
||||
for _, err := range errs {
|
||||
klog.V(5).Infof("Orphan pod: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
logSpew(orphanVolumeErrors)
|
||||
logSpew(orphanRemovalErrors)
|
||||
return utilerrors.NewAggregate(orphanRemovalErrors)
|
||||
}
|
||||
74
vendor/k8s.io/kubernetes/pkg/kubelet/oom_watcher.go
generated
vendored
74
vendor/k8s.io/kubernetes/pkg/kubelet/oom_watcher.go
generated
vendored
|
|
@ -1,74 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"github.com/google/cadvisor/events"
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
)
|
||||
|
||||
// OOMWatcher defines the interface of OOM watchers.
|
||||
type OOMWatcher interface {
|
||||
Start(ref *v1.ObjectReference) error
|
||||
}
|
||||
|
||||
type realOOMWatcher struct {
|
||||
cadvisor cadvisor.Interface
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// NewOOMWatcher creates and initializes a OOMWatcher based on parameters.
|
||||
func NewOOMWatcher(cadvisor cadvisor.Interface, recorder record.EventRecorder) OOMWatcher {
|
||||
return &realOOMWatcher{
|
||||
cadvisor: cadvisor,
|
||||
recorder: recorder,
|
||||
}
|
||||
}
|
||||
|
||||
const systemOOMEvent = "SystemOOM"
|
||||
|
||||
// Watches cadvisor for system oom's and records an event for every system oom encountered.
|
||||
func (ow *realOOMWatcher) Start(ref *v1.ObjectReference) error {
|
||||
request := events.Request{
|
||||
EventType: map[cadvisorapi.EventType]bool{
|
||||
cadvisorapi.EventOom: true,
|
||||
},
|
||||
ContainerName: "/",
|
||||
IncludeSubcontainers: false,
|
||||
}
|
||||
eventChannel, err := ow.cadvisor.WatchEvents(&request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer runtime.HandleCrash()
|
||||
|
||||
for event := range eventChannel.GetChannel() {
|
||||
klog.V(2).Infof("Got sys oom event from cadvisor: %v", event)
|
||||
ow.recorder.PastEventf(ref, metav1.Time{Time: event.Timestamp}, v1.EventTypeWarning, systemOOMEvent, "System OOM encountered")
|
||||
}
|
||||
klog.Errorf("Unexpectedly stopped receiving OOM notifications from cAdvisor")
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
112
vendor/k8s.io/kubernetes/pkg/kubelet/pod_container_deletor.go
generated
vendored
112
vendor/k8s.io/kubernetes/pkg/kubelet/pod_container_deletor.go
generated
vendored
|
|
@ -1,112 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/klog"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
const (
|
||||
// The limit on the number of buffered container deletion requests
|
||||
// This number is a bit arbitrary and may be adjusted in the future.
|
||||
containerDeletorBufferLimit = 50
|
||||
)
|
||||
|
||||
type containerStatusbyCreatedList []*kubecontainer.ContainerStatus
|
||||
|
||||
type podContainerDeletor struct {
|
||||
worker chan<- kubecontainer.ContainerID
|
||||
containersToKeep int
|
||||
}
|
||||
|
||||
func (a containerStatusbyCreatedList) Len() int { return len(a) }
|
||||
func (a containerStatusbyCreatedList) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a containerStatusbyCreatedList) Less(i, j int) bool { return a[i].CreatedAt.After(a[j].CreatedAt) }
|
||||
|
||||
func newPodContainerDeletor(runtime kubecontainer.Runtime, containersToKeep int) *podContainerDeletor {
|
||||
buffer := make(chan kubecontainer.ContainerID, containerDeletorBufferLimit)
|
||||
go wait.Until(func() {
|
||||
for {
|
||||
id := <-buffer
|
||||
runtime.DeleteContainer(id)
|
||||
}
|
||||
}, 0, wait.NeverStop)
|
||||
|
||||
return &podContainerDeletor{
|
||||
worker: buffer,
|
||||
containersToKeep: containersToKeep,
|
||||
}
|
||||
}
|
||||
|
||||
// getContainersToDeleteInPod returns the exited containers in a pod whose name matches the name inferred from filterContainerId (if not empty), ordered by the creation time from the latest to the earliest.
|
||||
// If filterContainerID is empty, all dead containers in the pod are returned.
|
||||
func getContainersToDeleteInPod(filterContainerID string, podStatus *kubecontainer.PodStatus, containersToKeep int) containerStatusbyCreatedList {
|
||||
matchedContainer := func(filterContainerId string, podStatus *kubecontainer.PodStatus) *kubecontainer.ContainerStatus {
|
||||
if filterContainerId == "" {
|
||||
return nil
|
||||
}
|
||||
for _, containerStatus := range podStatus.ContainerStatuses {
|
||||
if containerStatus.ID.ID == filterContainerId {
|
||||
return containerStatus
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}(filterContainerID, podStatus)
|
||||
|
||||
if filterContainerID != "" && matchedContainer == nil {
|
||||
klog.Warningf("Container %q not found in pod's containers", filterContainerID)
|
||||
return containerStatusbyCreatedList{}
|
||||
}
|
||||
|
||||
// Find the exited containers whose name matches the name of the container with id being filterContainerId
|
||||
var candidates containerStatusbyCreatedList
|
||||
for _, containerStatus := range podStatus.ContainerStatuses {
|
||||
if containerStatus.State != kubecontainer.ContainerStateExited {
|
||||
continue
|
||||
}
|
||||
if matchedContainer == nil || matchedContainer.Name == containerStatus.Name {
|
||||
candidates = append(candidates, containerStatus)
|
||||
}
|
||||
}
|
||||
|
||||
if len(candidates) <= containersToKeep {
|
||||
return containerStatusbyCreatedList{}
|
||||
}
|
||||
sort.Sort(candidates)
|
||||
return candidates[containersToKeep:]
|
||||
}
|
||||
|
||||
// deleteContainersInPod issues container deletion requests for containers selected by getContainersToDeleteInPod.
|
||||
func (p *podContainerDeletor) deleteContainersInPod(filterContainerID string, podStatus *kubecontainer.PodStatus, removeAll bool) {
|
||||
containersToKeep := p.containersToKeep
|
||||
if removeAll {
|
||||
containersToKeep = 0
|
||||
filterContainerID = ""
|
||||
}
|
||||
|
||||
for _, candidate := range getContainersToDeleteInPod(filterContainerID, podStatus, containersToKeep) {
|
||||
select {
|
||||
case p.worker <- candidate.ID:
|
||||
default:
|
||||
klog.Warningf("Failed to issue the request to remove container %v", candidate.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
340
vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go
generated
vendored
340
vendor/k8s.io/kubernetes/pkg/kubelet/pod_workers.go
generated
vendored
|
|
@ -1,340 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/queue"
|
||||
)
|
||||
|
||||
// OnCompleteFunc is a function that is invoked when an operation completes.
|
||||
// If err is non-nil, the operation did not complete successfully.
|
||||
type OnCompleteFunc func(err error)
|
||||
|
||||
// PodStatusFunc is a function that is invoked to generate a pod status.
|
||||
type PodStatusFunc func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus
|
||||
|
||||
// KillPodOptions are options when performing a pod update whose update type is kill.
|
||||
type KillPodOptions struct {
|
||||
// PodStatusFunc is the function to invoke to set pod status in response to a kill request.
|
||||
PodStatusFunc PodStatusFunc
|
||||
// PodTerminationGracePeriodSecondsOverride is optional override to use if a pod is being killed as part of kill operation.
|
||||
PodTerminationGracePeriodSecondsOverride *int64
|
||||
}
|
||||
|
||||
// UpdatePodOptions is an options struct to pass to a UpdatePod operation.
|
||||
type UpdatePodOptions struct {
|
||||
// pod to update
|
||||
Pod *v1.Pod
|
||||
// the mirror pod for the pod to update, if it is a static pod
|
||||
MirrorPod *v1.Pod
|
||||
// the type of update (create, update, sync, kill)
|
||||
UpdateType kubetypes.SyncPodType
|
||||
// optional callback function when operation completes
|
||||
// this callback is not guaranteed to be completed since a pod worker may
|
||||
// drop update requests if it was fulfilling a previous request. this is
|
||||
// only guaranteed to be invoked in response to a kill pod request which is
|
||||
// always delivered.
|
||||
OnCompleteFunc OnCompleteFunc
|
||||
// if update type is kill, use the specified options to kill the pod.
|
||||
KillPodOptions *KillPodOptions
|
||||
}
|
||||
|
||||
// PodWorkers is an abstract interface for testability.
|
||||
type PodWorkers interface {
|
||||
UpdatePod(options *UpdatePodOptions)
|
||||
ForgetNonExistingPodWorkers(desiredPods map[types.UID]empty)
|
||||
ForgetWorker(uid types.UID)
|
||||
}
|
||||
|
||||
// syncPodOptions provides the arguments to a SyncPod operation.
|
||||
type syncPodOptions struct {
|
||||
// the mirror pod for the pod to sync, if it is a static pod
|
||||
mirrorPod *v1.Pod
|
||||
// pod to sync
|
||||
pod *v1.Pod
|
||||
// the type of update (create, update, sync)
|
||||
updateType kubetypes.SyncPodType
|
||||
// the current status
|
||||
podStatus *kubecontainer.PodStatus
|
||||
// if update type is kill, use the specified options to kill the pod.
|
||||
killPodOptions *KillPodOptions
|
||||
}
|
||||
|
||||
// the function to invoke to perform a sync.
|
||||
type syncPodFnType func(options syncPodOptions) error
|
||||
|
||||
const (
|
||||
// jitter factor for resyncInterval
|
||||
workerResyncIntervalJitterFactor = 0.5
|
||||
|
||||
// jitter factor for backOffPeriod and backOffOnTransientErrorPeriod
|
||||
workerBackOffPeriodJitterFactor = 0.5
|
||||
|
||||
// backoff period when transient error occurred.
|
||||
backOffOnTransientErrorPeriod = time.Second
|
||||
)
|
||||
|
||||
type podWorkers struct {
|
||||
// Protects all per worker fields.
|
||||
podLock sync.Mutex
|
||||
|
||||
// Tracks all running per-pod goroutines - per-pod goroutine will be
|
||||
// processing updates received through its corresponding channel.
|
||||
podUpdates map[types.UID]chan UpdatePodOptions
|
||||
// Track the current state of per-pod goroutines.
|
||||
// Currently all update request for a given pod coming when another
|
||||
// update of this pod is being processed are ignored.
|
||||
isWorking map[types.UID]bool
|
||||
// Tracks the last undelivered work item for this pod - a work item is
|
||||
// undelivered if it comes in while the worker is working.
|
||||
lastUndeliveredWorkUpdate map[types.UID]UpdatePodOptions
|
||||
|
||||
workQueue queue.WorkQueue
|
||||
|
||||
// This function is run to sync the desired stated of pod.
|
||||
// NOTE: This function has to be thread-safe - it can be called for
|
||||
// different pods at the same time.
|
||||
syncPodFn syncPodFnType
|
||||
|
||||
// The EventRecorder to use
|
||||
recorder record.EventRecorder
|
||||
|
||||
// backOffPeriod is the duration to back off when there is a sync error.
|
||||
backOffPeriod time.Duration
|
||||
|
||||
// resyncInterval is the duration to wait until the next sync.
|
||||
resyncInterval time.Duration
|
||||
|
||||
// podCache stores kubecontainer.PodStatus for all pods.
|
||||
podCache kubecontainer.Cache
|
||||
}
|
||||
|
||||
func newPodWorkers(syncPodFn syncPodFnType, recorder record.EventRecorder, workQueue queue.WorkQueue,
|
||||
resyncInterval, backOffPeriod time.Duration, podCache kubecontainer.Cache) *podWorkers {
|
||||
return &podWorkers{
|
||||
podUpdates: map[types.UID]chan UpdatePodOptions{},
|
||||
isWorking: map[types.UID]bool{},
|
||||
lastUndeliveredWorkUpdate: map[types.UID]UpdatePodOptions{},
|
||||
syncPodFn: syncPodFn,
|
||||
recorder: recorder,
|
||||
workQueue: workQueue,
|
||||
resyncInterval: resyncInterval,
|
||||
backOffPeriod: backOffPeriod,
|
||||
podCache: podCache,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *podWorkers) managePodLoop(podUpdates <-chan UpdatePodOptions) {
|
||||
var lastSyncTime time.Time
|
||||
for update := range podUpdates {
|
||||
err := func() error {
|
||||
podUID := update.Pod.UID
|
||||
// This is a blocking call that would return only if the cache
|
||||
// has an entry for the pod that is newer than minRuntimeCache
|
||||
// Time. This ensures the worker doesn't start syncing until
|
||||
// after the cache is at least newer than the finished time of
|
||||
// the previous sync.
|
||||
status, err := p.podCache.GetNewerThan(podUID, lastSyncTime)
|
||||
if err != nil {
|
||||
// This is the legacy event thrown by manage pod loop
|
||||
// all other events are now dispatched from syncPodFn
|
||||
p.recorder.Eventf(update.Pod, v1.EventTypeWarning, events.FailedSync, "error determining status: %v", err)
|
||||
return err
|
||||
}
|
||||
err = p.syncPodFn(syncPodOptions{
|
||||
mirrorPod: update.MirrorPod,
|
||||
pod: update.Pod,
|
||||
podStatus: status,
|
||||
killPodOptions: update.KillPodOptions,
|
||||
updateType: update.UpdateType,
|
||||
})
|
||||
lastSyncTime = time.Now()
|
||||
return err
|
||||
}()
|
||||
// notify the call-back function if the operation succeeded or not
|
||||
if update.OnCompleteFunc != nil {
|
||||
update.OnCompleteFunc(err)
|
||||
}
|
||||
if err != nil {
|
||||
// IMPORTANT: we do not log errors here, the syncPodFn is responsible for logging errors
|
||||
klog.Errorf("Error syncing pod %s (%q), skipping: %v", update.Pod.UID, format.Pod(update.Pod), err)
|
||||
}
|
||||
p.wrapUp(update.Pod.UID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply the new setting to the specified pod.
|
||||
// If the options provide an OnCompleteFunc, the function is invoked if the update is accepted.
|
||||
// Update requests are ignored if a kill pod request is pending.
|
||||
func (p *podWorkers) UpdatePod(options *UpdatePodOptions) {
|
||||
pod := options.Pod
|
||||
uid := pod.UID
|
||||
var podUpdates chan UpdatePodOptions
|
||||
var exists bool
|
||||
|
||||
p.podLock.Lock()
|
||||
defer p.podLock.Unlock()
|
||||
if podUpdates, exists = p.podUpdates[uid]; !exists {
|
||||
// We need to have a buffer here, because checkForUpdates() method that
|
||||
// puts an update into channel is called from the same goroutine where
|
||||
// the channel is consumed. However, it is guaranteed that in such case
|
||||
// the channel is empty, so buffer of size 1 is enough.
|
||||
podUpdates = make(chan UpdatePodOptions, 1)
|
||||
p.podUpdates[uid] = podUpdates
|
||||
|
||||
// Creating a new pod worker either means this is a new pod, or that the
|
||||
// kubelet just restarted. In either case the kubelet is willing to believe
|
||||
// the status of the pod for the first pod worker sync. See corresponding
|
||||
// comment in syncPod.
|
||||
go func() {
|
||||
defer runtime.HandleCrash()
|
||||
p.managePodLoop(podUpdates)
|
||||
}()
|
||||
}
|
||||
if !p.isWorking[pod.UID] {
|
||||
p.isWorking[pod.UID] = true
|
||||
podUpdates <- *options
|
||||
} else {
|
||||
// if a request to kill a pod is pending, we do not let anything overwrite that request.
|
||||
update, found := p.lastUndeliveredWorkUpdate[pod.UID]
|
||||
if !found || update.UpdateType != kubetypes.SyncPodKill {
|
||||
p.lastUndeliveredWorkUpdate[pod.UID] = *options
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *podWorkers) removeWorker(uid types.UID) {
|
||||
if ch, ok := p.podUpdates[uid]; ok {
|
||||
close(ch)
|
||||
delete(p.podUpdates, uid)
|
||||
// If there is an undelivered work update for this pod we need to remove it
|
||||
// since per-pod goroutine won't be able to put it to the already closed
|
||||
// channel when it finishes processing the current work update.
|
||||
if _, cached := p.lastUndeliveredWorkUpdate[uid]; cached {
|
||||
delete(p.lastUndeliveredWorkUpdate, uid)
|
||||
}
|
||||
}
|
||||
}
|
||||
func (p *podWorkers) ForgetWorker(uid types.UID) {
|
||||
p.podLock.Lock()
|
||||
defer p.podLock.Unlock()
|
||||
p.removeWorker(uid)
|
||||
}
|
||||
|
||||
func (p *podWorkers) ForgetNonExistingPodWorkers(desiredPods map[types.UID]empty) {
|
||||
p.podLock.Lock()
|
||||
defer p.podLock.Unlock()
|
||||
for key := range p.podUpdates {
|
||||
if _, exists := desiredPods[key]; !exists {
|
||||
p.removeWorker(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *podWorkers) wrapUp(uid types.UID, syncErr error) {
|
||||
// Requeue the last update if the last sync returned error.
|
||||
switch {
|
||||
case syncErr == nil:
|
||||
// No error; requeue at the regular resync interval.
|
||||
p.workQueue.Enqueue(uid, wait.Jitter(p.resyncInterval, workerResyncIntervalJitterFactor))
|
||||
case strings.Contains(syncErr.Error(), NetworkNotReadyErrorMsg):
|
||||
// Network is not ready; back off for short period of time and retry as network might be ready soon.
|
||||
p.workQueue.Enqueue(uid, wait.Jitter(backOffOnTransientErrorPeriod, workerBackOffPeriodJitterFactor))
|
||||
default:
|
||||
// Error occurred during the sync; back off and then retry.
|
||||
p.workQueue.Enqueue(uid, wait.Jitter(p.backOffPeriod, workerBackOffPeriodJitterFactor))
|
||||
}
|
||||
p.checkForUpdates(uid)
|
||||
}
|
||||
|
||||
func (p *podWorkers) checkForUpdates(uid types.UID) {
|
||||
p.podLock.Lock()
|
||||
defer p.podLock.Unlock()
|
||||
if workUpdate, exists := p.lastUndeliveredWorkUpdate[uid]; exists {
|
||||
p.podUpdates[uid] <- workUpdate
|
||||
delete(p.lastUndeliveredWorkUpdate, uid)
|
||||
} else {
|
||||
p.isWorking[uid] = false
|
||||
}
|
||||
}
|
||||
|
||||
// killPodNow returns a KillPodFunc that can be used to kill a pod.
|
||||
// It is intended to be injected into other modules that need to kill a pod.
|
||||
func killPodNow(podWorkers PodWorkers, recorder record.EventRecorder) eviction.KillPodFunc {
|
||||
return func(pod *v1.Pod, status v1.PodStatus, gracePeriodOverride *int64) error {
|
||||
// determine the grace period to use when killing the pod
|
||||
gracePeriod := int64(0)
|
||||
if gracePeriodOverride != nil {
|
||||
gracePeriod = *gracePeriodOverride
|
||||
} else if pod.Spec.TerminationGracePeriodSeconds != nil {
|
||||
gracePeriod = *pod.Spec.TerminationGracePeriodSeconds
|
||||
}
|
||||
|
||||
// we timeout and return an error if we don't get a callback within a reasonable time.
|
||||
// the default timeout is relative to the grace period (we settle on 10s to wait for kubelet->runtime traffic to complete in sigkill)
|
||||
timeout := int64(gracePeriod + (gracePeriod / 2))
|
||||
minTimeout := int64(10)
|
||||
if timeout < minTimeout {
|
||||
timeout = minTimeout
|
||||
}
|
||||
timeoutDuration := time.Duration(timeout) * time.Second
|
||||
|
||||
// open a channel we block against until we get a result
|
||||
type response struct {
|
||||
err error
|
||||
}
|
||||
ch := make(chan response, 1)
|
||||
podWorkers.UpdatePod(&UpdatePodOptions{
|
||||
Pod: pod,
|
||||
UpdateType: kubetypes.SyncPodKill,
|
||||
OnCompleteFunc: func(err error) {
|
||||
ch <- response{err: err}
|
||||
},
|
||||
KillPodOptions: &KillPodOptions{
|
||||
PodStatusFunc: func(p *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
|
||||
return status
|
||||
},
|
||||
PodTerminationGracePeriodSecondsOverride: gracePeriodOverride,
|
||||
},
|
||||
})
|
||||
|
||||
// wait for either a response, or a timeout
|
||||
select {
|
||||
case r := <-ch:
|
||||
return r.err
|
||||
case <-time.After(timeoutDuration):
|
||||
recorder.Eventf(pod, v1.EventTypeWarning, events.ExceededGracePeriod, "Container runtime did not kill the pod within specified grace period.")
|
||||
return fmt.Errorf("timeout waiting to kill pod")
|
||||
}
|
||||
}
|
||||
}
|
||||
105
vendor/k8s.io/kubernetes/pkg/kubelet/reason_cache.go
generated
vendored
105
vendor/k8s.io/kubernetes/pkg/kubelet/reason_cache.go
generated
vendored
|
|
@ -1,105 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
)
|
||||
|
||||
// ReasonCache stores the failure reason of the latest container start
|
||||
// in a string, keyed by <pod_UID>_<container_name>. The goal is to
|
||||
// propagate this reason to the container status. This endeavor is
|
||||
// "best-effort" for two reasons:
|
||||
// 1. The cache is not persisted.
|
||||
// 2. We use an LRU cache to avoid extra garbage collection work. This
|
||||
// means that some entries may be recycled before a pod has been
|
||||
// deleted.
|
||||
// TODO(random-liu): Use more reliable cache which could collect garbage of failed pod.
|
||||
// TODO(random-liu): Move reason cache to somewhere better.
|
||||
type ReasonCache struct {
|
||||
lock sync.Mutex
|
||||
cache *lru.Cache
|
||||
}
|
||||
|
||||
// Reason is the cached item in ReasonCache
|
||||
type reasonItem struct {
|
||||
Err error
|
||||
Message string
|
||||
}
|
||||
|
||||
// maxReasonCacheEntries is the cache entry number in lru cache. 1000 is a proper number
|
||||
// for our 100 pods per node target. If we support more pods per node in the future, we
|
||||
// may want to increase the number.
|
||||
const maxReasonCacheEntries = 1000
|
||||
|
||||
// NewReasonCache creates an instance of 'ReasonCache'.
|
||||
func NewReasonCache() *ReasonCache {
|
||||
return &ReasonCache{cache: lru.New(maxReasonCacheEntries)}
|
||||
}
|
||||
|
||||
func (c *ReasonCache) composeKey(uid types.UID, name string) string {
|
||||
return fmt.Sprintf("%s_%s", uid, name)
|
||||
}
|
||||
|
||||
// add adds error reason into the cache
|
||||
func (c *ReasonCache) add(uid types.UID, name string, reason error, message string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.cache.Add(c.composeKey(uid, name), reasonItem{reason, message})
|
||||
}
|
||||
|
||||
// Update updates the reason cache with the SyncPodResult. Only SyncResult with
|
||||
// StartContainer action will change the cache.
|
||||
func (c *ReasonCache) Update(uid types.UID, result kubecontainer.PodSyncResult) {
|
||||
for _, r := range result.SyncResults {
|
||||
if r.Action != kubecontainer.StartContainer {
|
||||
continue
|
||||
}
|
||||
name := r.Target.(string)
|
||||
if r.Error != nil {
|
||||
c.add(uid, name, r.Error, r.Message)
|
||||
} else {
|
||||
c.Remove(uid, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove removes error reason from the cache
|
||||
func (c *ReasonCache) Remove(uid types.UID, name string) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.cache.Remove(c.composeKey(uid, name))
|
||||
}
|
||||
|
||||
// Get gets error reason from the cache. The return values are error reason, error message and
|
||||
// whether an error reason is found in the cache. If no error reason is found, empty string will
|
||||
// be returned for error reason and error message.
|
||||
func (c *ReasonCache) Get(uid types.UID, name string) (*reasonItem, bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
value, ok := c.cache.Get(c.composeKey(uid, name))
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
info := value.(reasonItem)
|
||||
return &info, true
|
||||
}
|
||||
177
vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go
generated
vendored
177
vendor/k8s.io/kubernetes/pkg/kubelet/runonce.go
generated
vendored
|
|
@ -1,177 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
)
|
||||
|
||||
const (
|
||||
runOnceManifestDelay = 1 * time.Second
|
||||
runOnceMaxRetries = 10
|
||||
runOnceRetryDelay = 1 * time.Second
|
||||
runOnceRetryDelayBackoff = 2
|
||||
)
|
||||
|
||||
// RunPodResult defines the running results of a Pod.
|
||||
type RunPodResult struct {
|
||||
Pod *v1.Pod
|
||||
Err error
|
||||
}
|
||||
|
||||
// RunOnce polls from one configuration update and run the associated pods.
|
||||
func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult, error) {
|
||||
// Setup filesystem directories.
|
||||
if err := kl.setupDataDirs(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the container logs directory does not exist, create it.
|
||||
if _, err := os.Stat(ContainerLogsDir); err != nil {
|
||||
if err := kl.os.MkdirAll(ContainerLogsDir, 0755); err != nil {
|
||||
klog.Errorf("Failed to create directory %q: %v", ContainerLogsDir, err)
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case u := <-updates:
|
||||
klog.Infof("processing manifest with %d pods", len(u.Pods))
|
||||
result, err := kl.runOnce(u.Pods, runOnceRetryDelay)
|
||||
klog.Infof("finished processing %d pods", len(u.Pods))
|
||||
return result, err
|
||||
case <-time.After(runOnceManifestDelay):
|
||||
return nil, fmt.Errorf("no pod manifest update after %v", runOnceManifestDelay)
|
||||
}
|
||||
}
|
||||
|
||||
// runOnce runs a given set of pods and returns their status.
|
||||
func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results []RunPodResult, err error) {
|
||||
ch := make(chan RunPodResult)
|
||||
admitted := []*v1.Pod{}
|
||||
for _, pod := range pods {
|
||||
// Check if we can admit the pod.
|
||||
if ok, reason, message := kl.canAdmitPod(admitted, pod); !ok {
|
||||
kl.rejectPod(pod, reason, message)
|
||||
results = append(results, RunPodResult{pod, nil})
|
||||
continue
|
||||
}
|
||||
|
||||
admitted = append(admitted, pod)
|
||||
go func(pod *v1.Pod) {
|
||||
err := kl.runPod(pod, retryDelay)
|
||||
ch <- RunPodResult{pod, err}
|
||||
}(pod)
|
||||
}
|
||||
|
||||
klog.Infof("Waiting for %d pods", len(admitted))
|
||||
failedPods := []string{}
|
||||
for i := 0; i < len(admitted); i++ {
|
||||
res := <-ch
|
||||
results = append(results, res)
|
||||
if res.Err != nil {
|
||||
faliedContainerName, err := kl.getFailedContainers(res.Pod)
|
||||
if err != nil {
|
||||
klog.Infof("unable to get failed containers' names for pod %q, error:%v", format.Pod(res.Pod), err)
|
||||
} else {
|
||||
klog.Infof("unable to start pod %q because container:%v failed", format.Pod(res.Pod), faliedContainerName)
|
||||
}
|
||||
failedPods = append(failedPods, format.Pod(res.Pod))
|
||||
} else {
|
||||
klog.Infof("started pod %q", format.Pod(res.Pod))
|
||||
}
|
||||
}
|
||||
if len(failedPods) > 0 {
|
||||
return results, fmt.Errorf("error running pods: %v", failedPods)
|
||||
}
|
||||
klog.Infof("%d pods started", len(pods))
|
||||
return results, err
|
||||
}
|
||||
|
||||
// runPod runs a single pod and wait until all containers are running.
|
||||
func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error {
|
||||
delay := retryDelay
|
||||
retry := 0
|
||||
for {
|
||||
status, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to get status for pod %q: %v", format.Pod(pod), err)
|
||||
}
|
||||
|
||||
if kl.isPodRunning(pod, status) {
|
||||
klog.Infof("pod %q containers running", format.Pod(pod))
|
||||
return nil
|
||||
}
|
||||
klog.Infof("pod %q containers not running: syncing", format.Pod(pod))
|
||||
|
||||
klog.Infof("Creating a mirror pod for static pod %q", format.Pod(pod))
|
||||
if err := kl.podManager.CreateMirrorPod(pod); err != nil {
|
||||
klog.Errorf("Failed creating a mirror pod %q: %v", format.Pod(pod), err)
|
||||
}
|
||||
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
|
||||
if err = kl.syncPod(syncPodOptions{
|
||||
pod: pod,
|
||||
mirrorPod: mirrorPod,
|
||||
podStatus: status,
|
||||
updateType: kubetypes.SyncPodUpdate,
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error syncing pod %q: %v", format.Pod(pod), err)
|
||||
}
|
||||
if retry >= runOnceMaxRetries {
|
||||
return fmt.Errorf("timeout error: pod %q containers not running after %d retries", format.Pod(pod), runOnceMaxRetries)
|
||||
}
|
||||
// TODO(proppy): health checking would be better than waiting + checking the state at the next iteration.
|
||||
klog.Infof("pod %q containers synced, waiting for %v", format.Pod(pod), delay)
|
||||
time.Sleep(delay)
|
||||
retry++
|
||||
delay *= runOnceRetryDelayBackoff
|
||||
}
|
||||
}
|
||||
|
||||
// isPodRunning returns true if all containers of a manifest are running.
|
||||
func (kl *Kubelet) isPodRunning(pod *v1.Pod, status *kubecontainer.PodStatus) bool {
|
||||
for _, c := range pod.Spec.Containers {
|
||||
cs := status.FindContainerStatusByName(c.Name)
|
||||
if cs == nil || cs.State != kubecontainer.ContainerStateRunning {
|
||||
klog.Infof("Container %q for pod %q not running", c.Name, format.Pod(pod))
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// getFailedContainer returns failed container name for pod.
|
||||
func (kl *Kubelet) getFailedContainers(pod *v1.Pod) ([]string, error) {
|
||||
status, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get status for pod %q: %v", format.Pod(pod), err)
|
||||
}
|
||||
var containerNames []string
|
||||
for _, cs := range status.ContainerStatuses {
|
||||
if cs.State != kubecontainer.ContainerStateRunning && cs.ExitCode != 0 {
|
||||
containerNames = append(containerNames, cs.Name)
|
||||
}
|
||||
}
|
||||
return containerNames, nil
|
||||
}
|
||||
129
vendor/k8s.io/kubernetes/pkg/kubelet/runtime.go
generated
vendored
129
vendor/k8s.io/kubernetes/pkg/kubelet/runtime.go
generated
vendored
|
|
@ -1,129 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
)
|
||||
|
||||
type runtimeState struct {
|
||||
sync.RWMutex
|
||||
lastBaseRuntimeSync time.Time
|
||||
baseRuntimeSyncThreshold time.Duration
|
||||
networkError error
|
||||
storageError error
|
||||
cidr string
|
||||
healthChecks []*healthCheck
|
||||
}
|
||||
|
||||
// A health check function should be efficient and not rely on external
|
||||
// components (e.g., container runtime).
|
||||
type healthCheckFnType func() (bool, error)
|
||||
|
||||
type healthCheck struct {
|
||||
name string
|
||||
fn healthCheckFnType
|
||||
}
|
||||
|
||||
func (s *runtimeState) addHealthCheck(name string, f healthCheckFnType) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.healthChecks = append(s.healthChecks, &healthCheck{name: name, fn: f})
|
||||
}
|
||||
|
||||
func (s *runtimeState) setRuntimeSync(t time.Time) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.lastBaseRuntimeSync = t
|
||||
}
|
||||
|
||||
func (s *runtimeState) setNetworkState(err error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.networkError = err
|
||||
}
|
||||
|
||||
func (s *runtimeState) setStorageState(err error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.storageError = err
|
||||
}
|
||||
|
||||
func (s *runtimeState) setPodCIDR(cidr string) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.cidr = cidr
|
||||
}
|
||||
|
||||
func (s *runtimeState) podCIDR() string {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.cidr
|
||||
}
|
||||
|
||||
func (s *runtimeState) runtimeErrors() error {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
errs := []error{}
|
||||
if s.lastBaseRuntimeSync.IsZero() {
|
||||
errs = append(errs, errors.New("container runtime status check may not have completed yet."))
|
||||
} else if !s.lastBaseRuntimeSync.Add(s.baseRuntimeSyncThreshold).After(time.Now()) {
|
||||
errs = append(errs, errors.New("container runtime is down."))
|
||||
}
|
||||
for _, hc := range s.healthChecks {
|
||||
if ok, err := hc.fn(); !ok {
|
||||
errs = append(errs, fmt.Errorf("%s is not healthy: %v.", hc.name, err))
|
||||
}
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func (s *runtimeState) networkErrors() error {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
errs := []error{}
|
||||
if s.networkError != nil {
|
||||
errs = append(errs, s.networkError)
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func (s *runtimeState) storageErrors() error {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
errs := []error{}
|
||||
if s.storageError != nil {
|
||||
errs = append(errs, s.storageError)
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func newRuntimeState(
|
||||
runtimeSyncThreshold time.Duration,
|
||||
) *runtimeState {
|
||||
return &runtimeState{
|
||||
lastBaseRuntimeSync: time.Time{},
|
||||
baseRuntimeSyncThreshold: runtimeSyncThreshold,
|
||||
networkError: ErrNetworkUnknown,
|
||||
}
|
||||
}
|
||||
129
vendor/k8s.io/kubernetes/pkg/kubelet/util.go
generated
vendored
129
vendor/k8s.io/kubernetes/pkg/kubelet/util.go
generated
vendored
|
|
@ -1,129 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/capabilities"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
)
|
||||
|
||||
// Check whether we have the capabilities to run the specified pod.
|
||||
func canRunPod(pod *v1.Pod) error {
|
||||
if !capabilities.Get().AllowPrivileged {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if securitycontext.HasPrivilegedRequest(&container) {
|
||||
return fmt.Errorf("pod with UID %q specified privileged container, but is disallowed", pod.UID)
|
||||
}
|
||||
}
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if securitycontext.HasPrivilegedRequest(&container) {
|
||||
return fmt.Errorf("pod with UID %q specified privileged init container, but is disallowed", pod.UID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pod.Spec.HostNetwork {
|
||||
allowed, err := allowHostNetwork(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !allowed {
|
||||
return fmt.Errorf("pod with UID %q specified host networking, but is disallowed", pod.UID)
|
||||
}
|
||||
}
|
||||
|
||||
if pod.Spec.HostPID {
|
||||
allowed, err := allowHostPID(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !allowed {
|
||||
return fmt.Errorf("pod with UID %q specified host PID, but is disallowed", pod.UID)
|
||||
}
|
||||
}
|
||||
|
||||
if pod.Spec.HostIPC {
|
||||
allowed, err := allowHostIPC(pod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !allowed {
|
||||
return fmt.Errorf("pod with UID %q specified host ipc, but is disallowed", pod.UID)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Determined whether the specified pod is allowed to use host networking
|
||||
func allowHostNetwork(pod *v1.Pod) (bool, error) {
|
||||
podSource, err := kubetypes.GetPodSource(pod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, source := range capabilities.Get().PrivilegedSources.HostNetworkSources {
|
||||
if source == podSource {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Determined whether the specified pod is allowed to use host PID
|
||||
func allowHostPID(pod *v1.Pod) (bool, error) {
|
||||
podSource, err := kubetypes.GetPodSource(pod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, source := range capabilities.Get().PrivilegedSources.HostPIDSources {
|
||||
if source == podSource {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Determined whether the specified pod is allowed to use host ipc
|
||||
func allowHostIPC(pod *v1.Pod) (bool, error) {
|
||||
podSource, err := kubetypes.GetPodSource(pod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, source := range capabilities.Get().PrivilegedSources.HostIPCSources {
|
||||
if source == podSource {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// dirExists returns true if the path exists and represents a directory.
|
||||
func dirExists(path string) bool {
|
||||
s, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return s.IsDir()
|
||||
}
|
||||
|
||||
// empty is a placeholder type used to implement a set
|
||||
type empty struct{}
|
||||
90
vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD
generated
vendored
90
vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD
generated
vendored
|
|
@ -1,90 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"util_unix_test.go",
|
||||
"util_windows_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"boottime_util_darwin.go",
|
||||
"boottime_util_linux.go",
|
||||
"doc.go",
|
||||
"util.go",
|
||||
"util_unix.go",
|
||||
"util_unsupported.go",
|
||||
"util_windows.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/util",
|
||||
deps = [
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//vendor/golang.org/x/sys/unix:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"//vendor/github.com/Microsoft/go-winio:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/kubelet/util/cache:all-srcs",
|
||||
"//pkg/kubelet/util/format:all-srcs",
|
||||
"//pkg/kubelet/util/ioutils:all-srcs",
|
||||
"//pkg/kubelet/util/manager:all-srcs",
|
||||
"//pkg/kubelet/util/pluginwatcher:all-srcs",
|
||||
"//pkg/kubelet/util/queue:all-srcs",
|
||||
"//pkg/kubelet/util/sliceutils:all-srcs",
|
||||
"//pkg/kubelet/util/store:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
44
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_darwin.go
generated
vendored
44
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_darwin.go
generated
vendored
|
|
@ -1,44 +0,0 @@
|
|||
// +build darwin
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// GetBootTime returns the time at which the machine was started, truncated to the nearest second
|
||||
func GetBootTime() (time.Time, error) {
|
||||
output, err := unix.SysctlRaw("kern.boottime")
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
var timeval syscall.Timeval
|
||||
if len(output) != int(unsafe.Sizeof(timeval)) {
|
||||
return time.Time{}, fmt.Errorf("unexpected output when calling syscall kern.bootime. Expected len(output) to be %v, but got %v",
|
||||
int(unsafe.Sizeof(timeval)), len(output))
|
||||
}
|
||||
timeval = *(*syscall.Timeval)(unsafe.Pointer(&output[0]))
|
||||
sec, nsec := timeval.Unix()
|
||||
return time.Unix(sec, nsec).Truncate(time.Second), nil
|
||||
}
|
||||
36
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_linux.go
generated
vendored
36
vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_linux.go
generated
vendored
|
|
@ -1,36 +0,0 @@
|
|||
// +build freebsd linux
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// GetBootTime returns the time at which the machine was started, truncated to the nearest second
|
||||
func GetBootTime() (time.Time, error) {
|
||||
currentTime := time.Now()
|
||||
var info unix.Sysinfo_t
|
||||
if err := unix.Sysinfo(&info); err != nil {
|
||||
return time.Time{}, fmt.Errorf("error getting system uptime: %s", err)
|
||||
}
|
||||
return currentTime.Add(-time.Duration(info.Uptime) * time.Second).Truncate(time.Second), nil
|
||||
}
|
||||
18
vendor/k8s.io/kubernetes/pkg/kubelet/util/doc.go
generated
vendored
18
vendor/k8s.io/kubernetes/pkg/kubelet/util/doc.go
generated
vendored
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Utility functions.
|
||||
package util // import "k8s.io/kubernetes/pkg/kubelet/util"
|
||||
27
vendor/k8s.io/kubernetes/pkg/kubelet/util/util.go
generated
vendored
27
vendor/k8s.io/kubernetes/pkg/kubelet/util/util.go
generated
vendored
|
|
@ -1,27 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// FromApiserverCache modifies <opts> so that the GET request will
|
||||
// be served from apiserver cache instead of from etcd.
|
||||
func FromApiserverCache(opts *metav1.GetOptions) {
|
||||
opts.ResourceVersion = "0"
|
||||
}
|
||||
111
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go
generated
vendored
111
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unix.go
generated
vendored
|
|
@ -1,111 +0,0 @@
|
|||
// +build freebsd linux darwin
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
// unixProtocol is the network protocol of unix socket.
|
||||
unixProtocol = "unix"
|
||||
)
|
||||
|
||||
func CreateListener(endpoint string) (net.Listener, error) {
|
||||
protocol, addr, err := parseEndpointWithFallbackProtocol(endpoint, unixProtocol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if protocol != unixProtocol {
|
||||
return nil, fmt.Errorf("only support unix socket endpoint")
|
||||
}
|
||||
|
||||
// Unlink to cleanup the previous socket file.
|
||||
err = unix.Unlink(addr)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("failed to unlink socket file %q: %v", addr, err)
|
||||
}
|
||||
|
||||
return net.Listen(protocol, addr)
|
||||
}
|
||||
|
||||
func GetAddressAndDialer(endpoint string) (string, func(addr string, timeout time.Duration) (net.Conn, error), error) {
|
||||
protocol, addr, err := parseEndpointWithFallbackProtocol(endpoint, unixProtocol)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if protocol != unixProtocol {
|
||||
return "", nil, fmt.Errorf("only support unix socket endpoint")
|
||||
}
|
||||
|
||||
return addr, dial, nil
|
||||
}
|
||||
|
||||
func dial(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return net.DialTimeout(unixProtocol, addr, timeout)
|
||||
}
|
||||
|
||||
func parseEndpointWithFallbackProtocol(endpoint string, fallbackProtocol string) (protocol string, addr string, err error) {
|
||||
if protocol, addr, err = parseEndpoint(endpoint); err != nil && protocol == "" {
|
||||
fallbackEndpoint := fallbackProtocol + "://" + endpoint
|
||||
protocol, addr, err = parseEndpoint(fallbackEndpoint)
|
||||
if err == nil {
|
||||
klog.Warningf("Using %q as endpoint is deprecated, please consider using full url format %q.", endpoint, fallbackEndpoint)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func parseEndpoint(endpoint string) (string, string, error) {
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "tcp":
|
||||
return "tcp", u.Host, nil
|
||||
|
||||
case "unix":
|
||||
return "unix", u.Path, nil
|
||||
|
||||
case "":
|
||||
return "", "", fmt.Errorf("Using %q as endpoint is deprecated, please consider using full url format", endpoint)
|
||||
|
||||
default:
|
||||
return u.Scheme, "", fmt.Errorf("protocol %q not supported", u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
// LocalEndpoint returns the full path to a unix socket at the given endpoint
|
||||
func LocalEndpoint(path, file string) string {
|
||||
u := url.URL{
|
||||
Scheme: unixProtocol,
|
||||
Path: path,
|
||||
}
|
||||
return filepath.Join(u.String(), file+".sock")
|
||||
}
|
||||
52
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go
generated
vendored
52
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go
generated
vendored
|
|
@ -1,52 +0,0 @@
|
|||
// +build !freebsd,!linux,!windows,!darwin
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
func CreateListener(endpoint string) (net.Listener, error) {
|
||||
return nil, fmt.Errorf("CreateListener is unsupported in this build")
|
||||
}
|
||||
|
||||
func GetAddressAndDialer(endpoint string) (string, func(addr string, timeout time.Duration) (net.Conn, error), error) {
|
||||
return "", nil, fmt.Errorf("GetAddressAndDialer is unsupported in this build")
|
||||
}
|
||||
|
||||
// LockAndCheckSubPath empty implementation
|
||||
func LockAndCheckSubPath(volumePath, subPath string) ([]uintptr, error) {
|
||||
return []uintptr{}, nil
|
||||
}
|
||||
|
||||
// UnlockPath empty implementation
|
||||
func UnlockPath(fileHandles []uintptr) {
|
||||
}
|
||||
|
||||
// LocalEndpoint empty implementation
|
||||
func LocalEndpoint(path, file string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetBootTime empty implementation
|
||||
func GetBootTime() (time.Time, error) {
|
||||
return time.Time{}, fmt.Errorf("GetBootTime is unsupported in this build")
|
||||
}
|
||||
127
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go
generated
vendored
127
vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go
generated
vendored
|
|
@ -1,127 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
)
|
||||
|
||||
const (
|
||||
tcpProtocol = "tcp"
|
||||
npipeProtocol = "npipe"
|
||||
)
|
||||
|
||||
func CreateListener(endpoint string) (net.Listener, error) {
|
||||
protocol, addr, err := parseEndpoint(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch protocol {
|
||||
case tcpProtocol:
|
||||
return net.Listen(tcpProtocol, addr)
|
||||
|
||||
case npipeProtocol:
|
||||
return winio.ListenPipe(addr, nil)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("only support tcp and npipe endpoint")
|
||||
}
|
||||
}
|
||||
|
||||
func GetAddressAndDialer(endpoint string) (string, func(addr string, timeout time.Duration) (net.Conn, error), error) {
|
||||
protocol, addr, err := parseEndpoint(endpoint)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if protocol == tcpProtocol {
|
||||
return addr, tcpDial, nil
|
||||
}
|
||||
|
||||
if protocol == npipeProtocol {
|
||||
return addr, npipeDial, nil
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("only support tcp and npipe endpoint")
|
||||
}
|
||||
|
||||
func tcpDial(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return net.DialTimeout(tcpProtocol, addr, timeout)
|
||||
}
|
||||
|
||||
func npipeDial(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return winio.DialPipe(addr, &timeout)
|
||||
}
|
||||
|
||||
func parseEndpoint(endpoint string) (string, string, error) {
|
||||
// url.Parse doesn't recognize \, so replace with / first.
|
||||
endpoint = strings.Replace(endpoint, "\\", "/", -1)
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if u.Scheme == "tcp" {
|
||||
return "tcp", u.Host, nil
|
||||
} else if u.Scheme == "npipe" {
|
||||
if strings.HasPrefix(u.Path, "//./pipe") {
|
||||
return "npipe", u.Path, nil
|
||||
}
|
||||
|
||||
// fallback host if not provided.
|
||||
host := u.Host
|
||||
if host == "" {
|
||||
host = "."
|
||||
}
|
||||
return "npipe", fmt.Sprintf("//%s%s", host, u.Path), nil
|
||||
} else if u.Scheme == "" {
|
||||
return "", "", fmt.Errorf("Using %q as endpoint is deprecated, please consider using full url format", endpoint)
|
||||
} else {
|
||||
return u.Scheme, "", fmt.Errorf("protocol %q not supported", u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
// LocalEndpoint returns the full path to a windows named pipe
|
||||
func LocalEndpoint(path, file string) string {
|
||||
u := url.URL{
|
||||
Scheme: npipeProtocol,
|
||||
Path: path,
|
||||
}
|
||||
return u.String() + "//./pipe/" + file
|
||||
}
|
||||
|
||||
var tickCount = syscall.NewLazyDLL("kernel32.dll").NewProc("GetTickCount64")
|
||||
|
||||
// GetBootTime returns the time at which the machine was started, truncated to the nearest second
|
||||
func GetBootTime() (time.Time, error) {
|
||||
currentTime := time.Now()
|
||||
output, _, err := tickCount.Call()
|
||||
if errno, ok := err.(syscall.Errno); !ok || errno != 0 {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return currentTime.Add(-time.Duration(output) * time.Millisecond).Truncate(time.Second), nil
|
||||
}
|
||||
281
vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go
generated
vendored
281
vendor/k8s.io/kubernetes/pkg/kubelet/volume_host.go
generated
vendored
|
|
@ -1,281 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/configmap"
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/mountpod"
|
||||
"k8s.io/kubernetes/pkg/kubelet/secret"
|
||||
"k8s.io/kubernetes/pkg/kubelet/token"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/subpath"
|
||||
)
|
||||
|
||||
// NewInitializedVolumePluginMgr returns a new instance of
|
||||
// volume.VolumePluginMgr initialized with kubelets implementation of the
|
||||
// volume.VolumeHost interface.
|
||||
//
|
||||
// kubelet - used by VolumeHost methods to expose kubelet specific parameters
|
||||
// plugins - used to initialize volumePluginMgr
|
||||
func NewInitializedVolumePluginMgr(
|
||||
kubelet *Kubelet,
|
||||
secretManager secret.Manager,
|
||||
configMapManager configmap.Manager,
|
||||
tokenManager *token.Manager,
|
||||
plugins []volume.VolumePlugin,
|
||||
prober volume.DynamicPluginProber) (*volume.VolumePluginMgr, error) {
|
||||
|
||||
mountPodManager, err := mountpod.NewManager(kubelet.getRootDir(), kubelet.podManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvh := &kubeletVolumeHost{
|
||||
kubelet: kubelet,
|
||||
volumePluginMgr: volume.VolumePluginMgr{},
|
||||
secretManager: secretManager,
|
||||
configMapManager: configMapManager,
|
||||
tokenManager: tokenManager,
|
||||
mountPodManager: mountPodManager,
|
||||
}
|
||||
|
||||
if err := kvh.volumePluginMgr.InitPlugins(plugins, prober, kvh); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"Could not initialize volume plugins for KubeletVolumePluginMgr: %v",
|
||||
err)
|
||||
}
|
||||
|
||||
return &kvh.volumePluginMgr, nil
|
||||
}
|
||||
|
||||
// Compile-time check to ensure kubeletVolumeHost implements the VolumeHost interface
|
||||
var _ volume.VolumeHost = &kubeletVolumeHost{}
|
||||
var _ volume.KubeletVolumeHost = &kubeletVolumeHost{}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetPluginDir(pluginName string) string {
|
||||
return kvh.kubelet.getPluginDir(pluginName)
|
||||
}
|
||||
|
||||
type kubeletVolumeHost struct {
|
||||
kubelet *Kubelet
|
||||
volumePluginMgr volume.VolumePluginMgr
|
||||
secretManager secret.Manager
|
||||
tokenManager *token.Manager
|
||||
configMapManager configmap.Manager
|
||||
mountPodManager mountpod.Manager
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) SetKubeletError(err error) {
|
||||
kvh.kubelet.runtimeState.setStorageState(err)
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetVolumeDevicePluginDir(pluginName string) string {
|
||||
return kvh.kubelet.getVolumeDevicePluginDir(pluginName)
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetPodsDir() string {
|
||||
return kvh.kubelet.getPodsDir()
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
|
||||
dir := kvh.kubelet.getPodVolumeDir(podUID, pluginName, volumeName)
|
||||
if runtime.GOOS == "windows" {
|
||||
dir = util.GetWindowsPath(dir)
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetPodVolumeDeviceDir(podUID types.UID, pluginName string) string {
|
||||
return kvh.kubelet.getPodVolumeDeviceDir(podUID, pluginName)
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetPodPluginDir(podUID types.UID, pluginName string) string {
|
||||
return kvh.kubelet.getPodPluginDir(podUID, pluginName)
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetKubeClient() clientset.Interface {
|
||||
return kvh.kubelet.kubeClient
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetSubpather() subpath.Interface {
|
||||
return kvh.kubelet.subpather
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) NewWrapperMounter(
|
||||
volName string,
|
||||
spec volume.Spec,
|
||||
pod *v1.Pod,
|
||||
opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
|
||||
wrapperVolumeName := "wrapped_" + volName
|
||||
if spec.Volume != nil {
|
||||
spec.Volume.Name = wrapperVolumeName
|
||||
}
|
||||
|
||||
return kvh.kubelet.newVolumeMounterFromPlugins(&spec, pod, opts)
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) {
|
||||
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
|
||||
wrapperVolumeName := "wrapped_" + volName
|
||||
if spec.Volume != nil {
|
||||
spec.Volume.Name = wrapperVolumeName
|
||||
}
|
||||
|
||||
plugin, err := kvh.kubelet.volumePluginMgr.FindPluginBySpec(&spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return plugin.NewUnmounter(spec.Name(), podUID)
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetCloudProvider() cloudprovider.Interface {
|
||||
return kvh.kubelet.cloud
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetMounter(pluginName string) mount.Interface {
|
||||
exec, err := kvh.getMountExec(pluginName)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error())
|
||||
// Use the default mounter
|
||||
exec = nil
|
||||
}
|
||||
if exec == nil {
|
||||
return kvh.kubelet.mounter
|
||||
}
|
||||
return mount.NewExecMounter(exec, kvh.kubelet.mounter)
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetHostName() string {
|
||||
return kvh.kubelet.hostname
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetHostIP() (net.IP, error) {
|
||||
return kvh.kubelet.GetHostIP()
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetNodeAllocatable() (v1.ResourceList, error) {
|
||||
node, err := kvh.kubelet.getNodeAnyWay()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error retrieving node: %v", err)
|
||||
}
|
||||
return node.Status.Allocatable, nil
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) {
|
||||
return kvh.secretManager.GetSecret
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) {
|
||||
return kvh.configMapManager.GetConfigMap
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetServiceAccountTokenFunc() func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
|
||||
return kvh.tokenManager.GetServiceAccountToken
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) DeleteServiceAccountTokenFunc() func(podUID types.UID) {
|
||||
return kvh.tokenManager.DeleteServiceAccountToken
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetNodeLabels() (map[string]string, error) {
|
||||
node, err := kvh.kubelet.GetNode()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error retrieving node: %v", err)
|
||||
}
|
||||
return node.Labels, nil
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetNodeName() types.NodeName {
|
||||
return kvh.kubelet.nodeName
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetEventRecorder() record.EventRecorder {
|
||||
return kvh.kubelet.recorder
|
||||
}
|
||||
|
||||
func (kvh *kubeletVolumeHost) GetExec(pluginName string) mount.Exec {
|
||||
exec, err := kvh.getMountExec(pluginName)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("Error finding mount pod for plugin %s: %s", pluginName, err.Error())
|
||||
// Use the default exec
|
||||
exec = nil
|
||||
}
|
||||
if exec == nil {
|
||||
return mount.NewOsExec()
|
||||
}
|
||||
return exec
|
||||
}
|
||||
|
||||
// getMountExec returns mount.Exec implementation that leads to pod with mount
|
||||
// utilities. It returns nil,nil when there is no such pod and default mounter /
|
||||
// os.Exec should be used.
|
||||
func (kvh *kubeletVolumeHost) getMountExec(pluginName string) (mount.Exec, error) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.MountContainers) {
|
||||
klog.V(5).Infof("using default mounter/exec for %s", pluginName)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
pod, container, err := kvh.mountPodManager.GetMountPod(pluginName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pod == nil {
|
||||
// Use default mounter/exec for this plugin
|
||||
klog.V(5).Infof("using default mounter/exec for %s", pluginName)
|
||||
return nil, nil
|
||||
}
|
||||
klog.V(5).Infof("using container %s/%s/%s to execute mount utilities for %s", pod.Namespace, pod.Name, container, pluginName)
|
||||
return &containerExec{
|
||||
pod: pod,
|
||||
containerName: container,
|
||||
kl: kvh.kubelet,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// containerExec is implementation of mount.Exec that executes commands in given
|
||||
// container in given pod.
|
||||
type containerExec struct {
|
||||
pod *v1.Pod
|
||||
containerName string
|
||||
kl *Kubelet
|
||||
}
|
||||
|
||||
var _ mount.Exec = &containerExec{}
|
||||
|
||||
func (e *containerExec) Run(cmd string, args ...string) ([]byte, error) {
|
||||
cmdline := append([]string{cmd}, args...)
|
||||
klog.V(5).Infof("Exec mounter running in pod %s/%s/%s: %v", e.pod.Namespace, e.pod.Name, e.containerName, cmdline)
|
||||
return e.kl.RunInContainer(container.GetPodFullName(e.pod), e.pod.UID, e.containerName, cmdline)
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue