Update go dependencies

This commit is contained in:
Manuel de Brito Fontes 2018-04-21 14:10:40 -03:00
parent 293223eea0
commit b7a799bf82
No known key found for this signature in database
GPG key ID: 786136016A8BA02A
432 changed files with 37346 additions and 25783 deletions

View file

@ -42,7 +42,7 @@ go_library(
"//pkg/fieldpath:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/apis/cri:go_default_library",
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/cadvisor:go_default_library",
"//pkg/kubelet/certificate:go_default_library",
@ -61,7 +61,9 @@ go_library(
"//pkg/kubelet/kubeletconfig:go_default_library",
"//pkg/kubelet/kuberuntime:go_default_library",
"//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/logs:go_default_library",
"//pkg/kubelet/metrics:go_default_library",
"//pkg/kubelet/metrics/collectors:go_default_library",
"//pkg/kubelet/mountpod:go_default_library",
"//pkg/kubelet/network:go_default_library",
"//pkg/kubelet/network/dns:go_default_library",
@ -103,7 +105,7 @@ go_library(
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//pkg/volume/util/volumepathhandler:go_default_library",
"//pkg/volume/validation:go_default_library",
"//third_party/forked/golang/expansion:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
@ -162,14 +164,13 @@ go_test(
"//conditions:default": [],
}),
embed = [":go_default_library"],
importpath = "k8s.io/kubernetes/pkg/kubelet",
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/core/install:go_default_library",
"//pkg/capabilities:go_default_library",
"//pkg/cloudprovider/providers/fake:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/cadvisor/testing:go_default_library",
"//pkg/kubelet/cm:go_default_library",
@ -181,6 +182,7 @@ go_test(
"//pkg/kubelet/gpu:go_default_library",
"//pkg/kubelet/images:go_default_library",
"//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/logs:go_default_library",
"//pkg/kubelet/network:go_default_library",
"//pkg/kubelet/network/testing:go_default_library",
"//pkg/kubelet/pleg:go_default_library",
@ -205,7 +207,7 @@ go_test(
"//pkg/volume:go_default_library",
"//pkg/volume/host_path:go_default_library",
"//pkg/volume/testing:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//pkg/volume/util:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
"//vendor/github.com/google/cadvisor/info/v2:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
@ -268,6 +270,7 @@ filegroup(
"//pkg/kubelet/kuberuntime:all-srcs",
"//pkg/kubelet/leaky:all-srcs",
"//pkg/kubelet/lifecycle:all-srcs",
"//pkg/kubelet/logs:all-srcs",
"//pkg/kubelet/metrics:all-srcs",
"//pkg/kubelet/mountpod:all-srcs",
"//pkg/kubelet/network:all-srcs",
@ -278,7 +281,6 @@ filegroup(
"//pkg/kubelet/qos:all-srcs",
"//pkg/kubelet/remote:all-srcs",
"//pkg/kubelet/rkt:all-srcs",
"//pkg/kubelet/rktshim:all-srcs",
"//pkg/kubelet/secret:all-srcs",
"//pkg/kubelet/server:all-srcs",
"//pkg/kubelet/stats:all-srcs",

View file

@ -10,8 +10,20 @@ go_library(
srcs = [
"well_known_annotations.go",
"well_known_labels.go",
],
] + select({
"@io_bazel_rules_go//go/platform:windows": [
"well_known_annotations_windows.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/kubelet/apis",
deps = select({
"@io_bazel_rules_go//go/platform:windows": [
"//pkg/features:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
],
"//conditions:default": [],
}),
)
filegroup(
@ -27,6 +39,7 @@ filegroup(
":package-srcs",
"//pkg/kubelet/apis/cri:all-srcs",
"//pkg/kubelet/apis/deviceplugin/v1alpha:all-srcs",
"//pkg/kubelet/apis/deviceplugin/v1beta1:all-srcs",
"//pkg/kubelet/apis/kubeletconfig:all-srcs",
"//pkg/kubelet/apis/stats/v1alpha1:all-srcs",
],

View file

@ -9,7 +9,7 @@ go_library(
name = "go_default_library",
srcs = ["services.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/apis/cri",
deps = ["//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library"],
deps = ["//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library"],
)
filegroup(
@ -23,8 +23,8 @@ filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/kubelet/apis/cri/runtime/v1alpha2:all-srcs",
"//pkg/kubelet/apis/cri/testing:all-srcs",
"//pkg/kubelet/apis/cri/v1alpha1/runtime:all-srcs",
],
tags = ["automanaged"],
)

View file

@ -11,7 +11,7 @@ go_library(
"api.pb.go",
"constants.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime",
importpath = "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2",
deps = [
"//vendor/github.com/gogo/protobuf/gogoproto:go_default_library",
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
@ -37,5 +37,4 @@ filegroup(
filegroup(
name = "go_default_library_protos",
srcs = ["api.proto"],
visibility = ["//visibility:public"],
)

View file

@ -1,7 +1,8 @@
// To regenerate api.pb.go run hack/update-generated-runtime.sh
syntax = 'proto3';
package runtime;
package runtime.v1alpha2;
option go_package = "v1alpha2";
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
@ -63,6 +64,12 @@ service RuntimeService {
rpc ContainerStatus(ContainerStatusRequest) returns (ContainerStatusResponse) {}
// UpdateContainerResources updates ContainerConfig of the container.
rpc UpdateContainerResources(UpdateContainerResourcesRequest) returns (UpdateContainerResourcesResponse) {}
// ReopenContainerLog asks runtime to reopen the stdout/stderr log file
// for the container. This is often called after the log file has been
// rotated. If the container is not running, container runtime can choose
// to either create a new log file and return nil, or return an error.
// Once it returns error, new container log file MUST NOT be created.
rpc ReopenContainerLog(ReopenContainerLogRequest) returns (ReopenContainerLogResponse) {}
// ExecSync runs a command in a container synchronously.
rpc ExecSync(ExecSyncRequest) returns (ExecSyncResponse) {}
@ -174,14 +181,39 @@ message Mount {
MountPropagation propagation = 5;
}
// A NamespaceMode describes the intended namespace configuration for each
// of the namespaces (Network, PID, IPC) in NamespaceOption. Runtimes should
// map these modes as appropriate for the technology underlying the runtime.
enum NamespaceMode {
// A POD namespace is common to all containers in a pod.
// For example, a container with a PID namespace of POD expects to view
// all of the processes in all of the containers in the pod.
POD = 0;
// A CONTAINER namespace is restricted to a single container.
// For example, a container with a PID namespace of CONTAINER expects to
// view only the processes in that container.
CONTAINER = 1;
// A NODE namespace is the namespace of the Kubernetes node.
// For example, a container with a PID namespace of NODE expects to view
// all of the processes on the host running the kubelet.
NODE = 2;
}
// NamespaceOption provides options for Linux namespaces.
message NamespaceOption {
// If set, use the host's network namespace.
bool host_network = 1;
// If set, use the host's PID namespace.
bool host_pid = 2;
// If set, use the host's IPC namespace.
bool host_ipc = 3;
// Network namespace for this container/sandbox.
// Note: There is currently no way to set CONTAINER scoped network in the Kubernetes API.
// Namespaces currently set by the kubelet: POD, NODE
NamespaceMode network = 1;
// PID namespace for this container/sandbox.
// Note: The CRI default is POD, but the v1.PodSpec default is CONTAINER.
// The kubelet's runtime manager will set this to CONTAINER explicitly for v1 pods.
// Namespaces currently set by the kubelet: POD, CONTAINER, NODE
NamespaceMode pid = 2;
// IPC namespace for this container/sandbox.
// Note: There is currently no way to set CONTAINER scoped IPC in the Kubernetes API.
// Namespaces currently set by the kubelet: POD, NODE
NamespaceMode ipc = 3;
}
// Int64Value is the wrapper of int64.
@ -203,6 +235,8 @@ message LinuxSandboxSecurityContext {
SELinuxOption selinux_options = 2;
// UID to run sandbox processes as, when applicable.
Int64Value run_as_user = 3;
// GID to run sandbox processes as, when applicable.
Int64Value run_as_group = 8;
// If set, the root filesystem of the sandbox is read-only.
bool readonly_rootfs = 4;
// List of groups applied to the first process run in the sandbox, in
@ -384,7 +418,7 @@ message PodSandboxStatus {
message PodSandboxStatusResponse {
// Status of the PodSandbox.
PodSandboxStatus status = 1;
// Info is extra information of the PodSandbox. The key could be abitrary string, and
// Info is extra information of the PodSandbox. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. network namespace for linux container based container runtime.
// It should only be returned non-empty when Verbose is true.
@ -519,6 +553,9 @@ message LinuxContainerSecurityContext {
// UID to run the container process as. Only one of run_as_user and
// run_as_username can be specified at a time.
Int64Value run_as_user = 5;
// GID to run the container process as. Only one of run_as_group and
// run_as_groupname can be specified at a time.
Int64Value run_as_group = 12;
// User name to run the container process as. If specified, the user MUST
// exist in the container image (i.e. in the /etc/passwd inside the image),
// and be resolved there by the runtime; otherwise, the runtime MUST error.
@ -556,6 +593,26 @@ message LinuxContainerConfig {
LinuxContainerSecurityContext security_context = 2;
}
// WindowsContainerConfig contains platform-specific configuration for
// Windows-based containers.
message WindowsContainerConfig {
// Resources specification for the container.
WindowsContainerResources resources = 1;
}
// WindowsContainerResources specifies Windows specific configuration for
// resources.
message WindowsContainerResources {
// CPU shares (relative weight vs. other containers). Default: 0 (not specified).
int64 cpu_shares = 1;
// Number of CPUs available to the container. Default: 0 (not specified).
int64 cpu_count = 2;
// Specifies the portion of processor cycles that this container can use as a percentage times 100.
int64 cpu_maximum = 3;
// Memory limit in bytes. Default: 0 (not specified).
int64 memory_limit_in_bytes = 4;
}
// ContainerMetadata holds all necessary information for building the container
// name. The container runtime is encouraged to expose the metadata in its user
// interface for better user experience. E.g., runtime can construct a unique
@ -643,6 +700,8 @@ message ContainerConfig {
// Configuration specific to Linux containers.
LinuxContainerConfig linux = 15;
// Configuration specific to Windows containers.
WindowsContainerConfig windows = 16;
}
message CreateContainerRequest {
@ -800,7 +859,7 @@ message ContainerStatus {
message ContainerStatusResponse {
// Status of the container.
ContainerStatus status = 1;
// Info is extra information of the Container. The key could be abitrary string, and
// Info is extra information of the Container. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. pid for linux container based container runtime.
// It should only be returned non-empty when Verbose is true.
@ -941,7 +1000,7 @@ message ImageStatusRequest {
message ImageStatusResponse {
// Status of the image.
Image image = 1;
// Info is extra information of the Image. The key could be abitrary string, and
// Info is extra information of the Image. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful
// for debug, e.g. image config for oci image based container runtime.
// It should only be returned non-empty when Verbose is true.
@ -1036,7 +1095,7 @@ message StatusRequest {
message StatusResponse {
// Status of the Runtime.
RuntimeStatus status = 1;
// Info is extra information of the Runtime. The key could be abitrary string, and
// Info is extra information of the Runtime. The key could be arbitrary string, and
// value should be in json format. The information could include anything useful for
// debug, e.g. plugins used by the container runtime.
// It should only be returned non-empty when Verbose is true.
@ -1051,18 +1110,18 @@ message UInt64Value {
uint64 value = 1;
}
// StorageIdentifier uniquely identify the storage..
message StorageIdentifier{
// UUID of the device.
string uuid = 1;
// FilesystemIdentifier uniquely identify the filesystem.
message FilesystemIdentifier{
// Mountpoint of a filesystem.
string mountpoint = 1;
}
// FilesystemUsage provides the filesystem usage information.
message FilesystemUsage {
// Timestamp in nanoseconds at which the information were collected. Must be > 0.
int64 timestamp = 1;
// The underlying storage of the filesystem.
StorageIdentifier storage_id = 2;
// The unique identifier of the filesystem.
FilesystemIdentifier fs_id = 2;
// UsedBytes represents the bytes used for images on the filesystem.
// This may differ from the total bytes used on the filesystem and may not
// equal CapacityBytes - AvailableBytes.
@ -1153,3 +1212,11 @@ message MemoryUsage {
// The amount of working set memory in bytes.
UInt64Value working_set_bytes = 2;
}
message ReopenContainerLogRequest {
// ID of the container for which to reopen the log.
string container_id = 1;
}
message ReopenContainerLogResponse{
}

View file

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package runtime
package v1alpha2
// This file contains all constants defined in CRI.
@ -38,7 +38,7 @@ const (
// LogTag is the tag of a log line in CRI container log.
// Currently defined log tags:
// * First tag: Partial/End - P/E.
// * First tag: Partial/Full - P/F.
// The field in the container log format can be extended to include multiple
// tags by using a delimiter, but changes should be rare. If it becomes clear
// that better extensibility is desired, a more extensible format (e.g., json)

View file

@ -19,7 +19,7 @@ package cri
import (
"time"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
// RuntimeVersioner contains methods for runtime name, version and API version.
@ -52,6 +52,10 @@ type ContainerManager interface {
Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error)
// Attach prepares a streaming endpoint to attach to a running container, and returns the address.
Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error)
// ReopenContainerLog asks runtime to reopen the stdout/stderr log file
// for the container. If it returns error, new container log file MUST NOT
// be created.
ReopenContainerLog(ContainerID string) error
}
// PodSandboxManager contains methods for operating on PodSandboxes. The methods
@ -74,7 +78,7 @@ type PodSandboxManager interface {
PortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error)
}
// ContainerStatsManager contains methods for retriving the container
// ContainerStatsManager contains methods for retrieving the container
// statistics.
type ContainerStatsManager interface {
// ContainerStats returns stats of the container. If the container does not

View file

@ -0,0 +1,41 @@
// +build windows
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apis
import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
)
const (
// HypervIsolationAnnotationKey and HypervIsolationValue are used to run windows containers with hyperv isolation.
// Refer https://aka.ms/hyperv-container.
HypervIsolationAnnotationKey = "experimental.windows.kubernetes.io/isolation-type"
HypervIsolationValue = "hyperv"
)
// ShouldIsolatedByHyperV returns true if a windows container should be run with hyperv isolation.
func ShouldIsolatedByHyperV(annotations map[string]string) bool {
if !utilfeature.DefaultFeatureGate.Enabled(features.HyperVContainer) {
return false
}
v, ok := annotations[HypervIsolationAnnotationKey]
return ok && v == HypervIsolationValue
}

View file

@ -54,7 +54,7 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/kubelet/util/ioutils:go_default_library",
"//pkg/util/hash:go_default_library",
@ -88,7 +88,6 @@ go_test(
"sync_result_test.go",
],
embed = [":go_default_library"],
importpath = "k8s.io/kubernetes/pkg/kubelet/container",
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/core/install:go_default_library",
@ -102,7 +101,6 @@ go_test(
go_test(
name = "go_default_xtest",
srcs = ["runtime_cache_test.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/container_test",
deps = [
":go_default_library",
"//pkg/kubelet/container/testing:go_default_library",

View file

@ -19,6 +19,8 @@ package container
import (
"fmt"
"time"
"github.com/golang/glog"
)
// Specified a policy for garbage collecting containers.
@ -58,7 +60,7 @@ type realContainerGC struct {
// Policy for garbage collection.
policy ContainerGCPolicy
// sourcesReadyProvider provides the readyness of kubelet configuration sources.
// sourcesReadyProvider provides the readiness of kubelet configuration sources.
sourcesReadyProvider SourcesReadyProvider
}
@ -80,5 +82,6 @@ func (cgc *realContainerGC) GarbageCollect() error {
}
func (cgc *realContainerGC) DeleteAllUnusedContainers() error {
glog.Infof("attempting to delete unused containers")
return cgc.runtime.GarbageCollect(cgc.policy, cgc.sourcesReadyProvider.AllReady(), true)
}

View file

@ -31,7 +31,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/kubelet/util/ioutils"
hashutil "k8s.io/kubernetes/pkg/util/hash"
@ -46,9 +46,9 @@ type HandlerRunner interface {
// RuntimeHelper wraps kubelet to make container runtime
// able to get necessary informations like the RunContainerOptions, DNS settings, Host IP.
type RuntimeHelper interface {
GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, err error)
GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, cleanupAction func(), err error)
GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error)
// GetPodCgroupParent returns the CgroupName identifer, and its literal cgroupfs form on the host
// GetPodCgroupParent returns the CgroupName identifier, and its literal cgroupfs form on the host
// of a pod.
GetPodCgroupParent(pod *v1.Pod) string
GetPodDir(podUID types.UID) string
@ -302,7 +302,7 @@ func GetContainerSpec(pod *v1.Pod, containerName string) *v1.Container {
// HasPrivilegedContainer returns true if any of the containers in the pod are privileged.
func HasPrivilegedContainer(pod *v1.Pod) bool {
for _, c := range pod.Spec.Containers {
for _, c := range append(pod.Spec.Containers, pod.Spec.InitContainers...) {
if c.SecurityContext != nil &&
c.SecurityContext.Privileged != nil &&
*c.SecurityContext.Privileged {

View file

@ -254,6 +254,20 @@ func TestHasPrivilegedContainer(t *testing.T) {
t.Errorf("%s expected %t but got %t", k, v.expected, actual)
}
}
// Test init containers as well.
for k, v := range tests {
pod := &v1.Pod{
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{SecurityContext: v.securityContext},
},
},
}
actual := HasPrivilegedContainer(pod)
if actual != v.expected {
t.Errorf("%s expected %t but got %t", k, v.expected, actual)
}
}
}
func TestMakePortMappings(t *testing.T) {

View file

@ -29,7 +29,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/remotecommand"
"k8s.io/client-go/util/flowcontrol"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/volume"
)
@ -265,6 +265,13 @@ const (
ContainerStateUnknown ContainerState = "unknown"
)
type ContainerType string
const (
ContainerTypeInit ContainerType = "INIT"
ContainerTypeRegular ContainerType = "REGULAR"
)
// Container provides the runtime information for a container, such as ID, hash,
// state of the container.
type Container struct {
@ -375,6 +382,11 @@ type EnvVar struct {
Value string
}
type Annotation struct {
Name string
Value string
}
type Mount struct {
// Name of the volume mount.
// TODO(yifan): Remove this field, as this is not representing the unique name of the mount,
@ -424,6 +436,10 @@ type RunContainerOptions struct {
Devices []DeviceInfo
// The port mappings for the containers.
PortMappings []PortMapping
// The annotations for the container
// These annotations are generated by other components (i.e.,
// not users). Currently, only device plugins populate the annotations.
Annotations []Annotation
// If the container has specified the TerminationMessagePath, then
// this directory will be used to create and mount the log file to
// container.TerminationMessagePath
@ -454,6 +470,9 @@ type VolumeInfo struct {
// Whether the volume permission is set to read-only or not
// This value is passed from volume.spec
ReadOnly bool
// Inner volume spec name, which is the PV name if used, otherwise
// it is the same as the outer volume spec name.
InnerVolumeSpecName string
}
type VolumeMap map[string]VolumeInfo

View file

@ -17,8 +17,10 @@ limitations under the License.
package kubelet
import (
"context"
"crypto/tls"
"fmt"
"math"
"net"
"net/http"
"net/url"
@ -73,7 +75,9 @@ import (
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig"
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/logs"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/metrics/collectors"
"k8s.io/kubernetes/pkg/kubelet/network"
"k8s.io/kubernetes/pkg/kubelet/network/dns"
"k8s.io/kubernetes/pkg/kubelet/pleg"
@ -252,8 +256,8 @@ type Dependencies struct {
// KubeletConfiguration or returns an error.
func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName, bootstrapCheckpointPath string) (*config.PodConfig, error) {
manifestURLHeader := make(http.Header)
if len(kubeCfg.ManifestURLHeader) > 0 {
for k, v := range kubeCfg.ManifestURLHeader {
if len(kubeCfg.StaticPodURLHeader) > 0 {
for k, v := range kubeCfg.StaticPodURLHeader {
for i := range v {
manifestURLHeader.Add(k, v[i])
}
@ -264,23 +268,25 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku
cfg := config.NewPodConfig(config.PodConfigNotificationIncremental, kubeDeps.Recorder)
// define file config source
if kubeCfg.PodManifestPath != "" {
glog.Infof("Adding manifest path: %v", kubeCfg.PodManifestPath)
config.NewSourceFile(kubeCfg.PodManifestPath, nodeName, kubeCfg.FileCheckFrequency.Duration, cfg.Channel(kubetypes.FileSource))
if kubeCfg.StaticPodPath != "" {
glog.Infof("Adding pod path: %v", kubeCfg.StaticPodPath)
config.NewSourceFile(kubeCfg.StaticPodPath, nodeName, kubeCfg.FileCheckFrequency.Duration, cfg.Channel(kubetypes.FileSource))
}
// define url config source
if kubeCfg.ManifestURL != "" {
glog.Infof("Adding manifest url %q with HTTP header %v", kubeCfg.ManifestURL, manifestURLHeader)
config.NewSourceURL(kubeCfg.ManifestURL, manifestURLHeader, nodeName, kubeCfg.HTTPCheckFrequency.Duration, cfg.Channel(kubetypes.HTTPSource))
if kubeCfg.StaticPodURL != "" {
glog.Infof("Adding pod url %q with HTTP header %v", kubeCfg.StaticPodURL, manifestURLHeader)
config.NewSourceURL(kubeCfg.StaticPodURL, manifestURLHeader, nodeName, kubeCfg.HTTPCheckFrequency.Duration, cfg.Channel(kubetypes.HTTPSource))
}
// Restore from the checkpoint path
// NOTE: This MUST happen before creating the apiserver source
// below, or the checkpoint would override the source of truth.
updatechannel := cfg.Channel(kubetypes.ApiserverSource)
var updatechannel chan<- interface{}
if bootstrapCheckpointPath != "" {
glog.Infof("Adding checkpoint path: %v", bootstrapCheckpointPath)
updatechannel = cfg.Channel(kubetypes.ApiserverSource)
err := cfg.Restore(bootstrapCheckpointPath, updatechannel)
if err != nil {
return nil, err
@ -289,6 +295,9 @@ func makePodSourceConfig(kubeCfg *kubeletconfiginternal.KubeletConfiguration, ku
if kubeDeps.KubeClient != nil {
glog.Infof("Watching apiserver")
if updatechannel == nil {
updatechannel = cfg.Channel(kubetypes.ApiserverSource)
}
config.NewSourceApiserver(kubeDeps.KubeClient, nodeName, updatechannel)
}
return cfg, nil
@ -369,7 +378,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
return nil, fmt.Errorf("failed to get instances from cloud provider")
}
nodeName, err = instances.CurrentNodeName(hostname)
nodeName, err = instances.CurrentNodeName(context.TODO(), hostname)
if err != nil {
return nil, fmt.Errorf("error fetching current instance name from cloud provider: %v", err)
}
@ -377,7 +386,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName)
if utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) {
nodeAddresses, err := instances.NodeAddresses(nodeName)
nodeAddresses, err := instances.NodeAddresses(context.TODO(), nodeName)
if err != nil {
return nil, fmt.Errorf("failed to get the addresses of the current instance from the cloud provider: %v", err)
}
@ -551,11 +560,11 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
return nil, err
}
klet.networkPlugin = plug
machineInfo, err := klet.GetCachedMachineInfo()
machineInfo, err := klet.cadvisor.MachineInfo()
if err != nil {
return nil, err
}
klet.machineInfo = machineInfo
imageBackOff := flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff)
@ -590,6 +599,10 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
var nl *NoOpLegacyHost
pluginSettings.LegacyRuntimeHost = nl
if containerRuntime == kubetypes.RktContainerRuntime {
glog.Warningln("rktnetes has been deprecated in favor of rktlet. Please see https://github.com/kubernetes-incubator/rktlet for more information.")
}
// rktnetes cannot be run with CRI.
if containerRuntime != kubetypes.RktContainerRuntime {
// kubelet defers to the runtime shim to setup networking. Setting
@ -610,9 +623,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
if err != nil {
return nil, err
}
if err := ds.Start(); err != nil {
return nil, err
}
// For now, the CRI shim redirects the streaming requests to the
// kubelet, which handles the requests using DockerService..
klet.criHandler = ds
@ -633,8 +643,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
return nil, err
}
if !supported {
klet.dockerLegacyService = ds.NewDockerLegacyService()
legacyLogProvider = dockershim.NewLegacyLogProvider(klet.dockerLegacyService)
klet.dockerLegacyService = ds
legacyLogProvider = ds
}
case kubetypes.RemoteContainerRuntime:
// No-op.
@ -687,7 +697,8 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
klet.podManager,
klet.runtimeCache,
runtimeService,
imageService)
imageService,
stats.NewLogMetricsService())
}
} else {
// rkt uses the legacy, non-CRI, integration. Configure it the old way.
@ -743,12 +754,27 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
klet.containerDeletor = newPodContainerDeletor(klet.containerRuntime, integer.IntMax(containerGCPolicy.MaxPerPodContainer, minDeadContainerInPod))
// setup imageManager
imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, kubeDeps.Recorder, nodeRef, imageGCPolicy)
imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, kubeDeps.Recorder, nodeRef, imageGCPolicy, crOptions.PodSandboxImage)
if err != nil {
return nil, fmt.Errorf("failed to initialize image manager: %v", err)
}
klet.imageManager = imageManager
if containerRuntime == kubetypes.RemoteContainerRuntime && utilfeature.DefaultFeatureGate.Enabled(features.CRIContainerLogRotation) {
// setup containerLogManager for CRI container runtime
containerLogManager, err := logs.NewContainerLogManager(
klet.runtimeService,
kubeCfg.ContainerLogMaxSize,
int(kubeCfg.ContainerLogMaxFiles),
)
if err != nil {
return nil, fmt.Errorf("failed to initialize container log manager: %v", err)
}
klet.containerLogManager = containerLogManager
} else {
klet.containerLogManager = logs.NewStubContainerLogManager()
}
klet.statusManager = status.NewManager(klet.kubeClient, klet.podManager, klet)
if utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) && kubeDeps.TLSOptions != nil {
@ -773,7 +799,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
kubeDeps.TLSOptions.Config.GetCertificate = func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
cert := klet.serverCertificateManager.Current()
if cert == nil {
return nil, fmt.Errorf("no certificate available")
return nil, fmt.Errorf("no serving certificate available for the kubelet")
}
return cert, nil
}
@ -984,6 +1010,9 @@ type Kubelet struct {
// Manager for image garbage collection.
imageManager images.ImageGCManager
// Manager for container logs.
containerLogManager logs.ContainerLogManager
// Secret manager.
secretManager secret.Manager
@ -1264,7 +1293,7 @@ func (kl *Kubelet) StartGarbageCollection() {
// Note that the modules here must not depend on modules that are not initialized here.
func (kl *Kubelet) initializeModules() error {
// Prometheus metrics.
metrics.Register(kl.runtimeCache)
metrics.Register(kl.runtimeCache, collectors.NewVolumeStatsCollector(kl))
// Setup filesystem directories.
if err := kl.setupDataDirs(); err != nil {
@ -1286,16 +1315,6 @@ func (kl *Kubelet) initializeModules() error {
kl.serverCertificateManager.Start()
}
// Start container manager.
node, err := kl.getNodeAnyWay()
if err != nil {
return fmt.Errorf("Kubelet failed to get node info: %v", err)
}
if err := kl.containerManager.Start(node, kl.GetActivePods, kl.sourcesReady, kl.statusManager, kl.runtimeService); err != nil {
return fmt.Errorf("Failed to start ContainerManager %v", err)
}
// Start out of memory watcher.
if err := kl.oomWatcher.Start(kl.nodeRef); err != nil {
return fmt.Errorf("Failed to start OOM watcher %v", err)
@ -1319,8 +1338,27 @@ func (kl *Kubelet) initializeRuntimeDependentModules() {
// TODO(random-liu): Add backoff logic in the babysitter
glog.Fatalf("Failed to start cAdvisor %v", err)
}
// trigger on-demand stats collection once so that we have capacity information for ephemeral storage.
// ignore any errors, since if stats collection is not successful, the container manager will fail to start below.
kl.StatsProvider.GetCgroupStats("/", true)
// Start container manager.
node, err := kl.getNodeAnyWay()
if err != nil {
// Fail kubelet and rely on the babysitter to retry starting kubelet.
glog.Fatalf("Kubelet failed to get node info: %v", err)
}
// containerManager must start after cAdvisor because it needs filesystem capacity information
if err := kl.containerManager.Start(node, kl.GetActivePods, kl.sourcesReady, kl.statusManager, kl.runtimeService); err != nil {
// Fail kubelet and rely on the babysitter to retry starting kubelet.
glog.Fatalf("Failed to start ContainerManager %v", err)
}
// eviction manager must start after cadvisor because it needs to know if the container runtime has a dedicated imagefs
kl.evictionManager.Start(kl.cadvisor, kl.GetActivePods, kl.podResourcesAreReclaimed, kl.containerManager, evictionMonitoringPeriod)
kl.evictionManager.Start(kl.StatsProvider, kl.GetActivePods, kl.podResourcesAreReclaimed, evictionMonitoringPeriod)
// container log manager must start after container runtime is up to retrieve information from container runtime
// and inform container to reopen log file after log rotation.
kl.containerLogManager.Start()
}
// Run starts the kubelet reacting to config updates
@ -1334,8 +1372,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
if err := kl.initializeModules(); err != nil {
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error())
glog.Error(err)
kl.runtimeState.setInitError(err)
glog.Fatal(err)
}
// Start volume manager
@ -1746,12 +1783,22 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand
housekeepingTicker := time.NewTicker(housekeepingPeriod)
defer housekeepingTicker.Stop()
plegCh := kl.pleg.Watch()
const (
base = 100 * time.Millisecond
max = 5 * time.Second
factor = 2
)
duration := base
for {
if rs := kl.runtimeState.runtimeErrors(); len(rs) != 0 {
glog.Infof("skipping pod synchronization - %v", rs)
time.Sleep(5 * time.Second)
// exponential backoff
time.Sleep(duration)
duration = time.Duration(math.Min(float64(max), factor*float64(duration)))
continue
}
// reset backoff if we have a success
duration = base
kl.syncLoopMonitor.Store(kl.clock.Now())
if !kl.syncLoopIteration(updates, handler, syncTicker.C, housekeepingTicker.C, plegCh) {

View file

@ -211,6 +211,11 @@ func (kl *Kubelet) GetNodeConfig() cm.NodeConfig {
return kl.containerManager.GetNodeConfig()
}
// GetPodCgroupRoot returns the listeral cgroupfs value for the cgroup containing all pods
func (kl *Kubelet) GetPodCgroupRoot() string {
return kl.containerManager.GetPodCgroupRoot()
}
// GetHostIP returns host IP or nil in case of error.
func (kl *Kubelet) GetHostIP() (net.IP, error) {
node, err := kl.GetNode()
@ -269,6 +274,24 @@ func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, err
return volumes, nil
}
func (kl *Kubelet) getMountedVolumePathListFromDisk(podUID types.UID) ([]string, error) {
mountedVolumes := []string{}
volumePaths, err := kl.getPodVolumePathListFromDisk(podUID)
if err != nil {
return mountedVolumes, err
}
for _, volumePath := range volumePaths {
isNotMount, err := kl.mounter.IsLikelyNotMountPoint(volumePath)
if err != nil {
return mountedVolumes, err
}
if !isNotMount {
mountedVolumes = append(mountedVolumes, volumePath)
}
}
return mountedVolumes, nil
}
// GetVersionInfo returns information about the version of cAdvisor in use.
func (kl *Kubelet) GetVersionInfo() (*cadvisorapiv1.VersionInfo, error) {
return kl.cadvisor.VersionInfo()
@ -276,12 +299,5 @@ func (kl *Kubelet) GetVersionInfo() (*cadvisorapiv1.VersionInfo, error) {
// GetCachedMachineInfo assumes that the machine info can't change without a reboot
func (kl *Kubelet) GetCachedMachineInfo() (*cadvisorapiv1.MachineInfo, error) {
if kl.machineInfo == nil {
info, err := kl.cadvisor.MachineInfo()
if err != nil {
return nil, err
}
kl.machineInfo = info
}
return kl.machineInfo, nil
}

View file

@ -22,7 +22,7 @@ import (
"github.com/golang/glog"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/network"
@ -282,7 +282,7 @@ func getIPTablesMark(bit int) string {
return fmt.Sprintf("%#08x/%#08x", value, value)
}
// GetPodDNS returns DNS setttings for the pod.
// GetPodDNS returns DNS settings for the pod.
// This function is defined in kubecontainer.RuntimeHelper interface so we
// have to implement it.
func (kl *Kubelet) GetPodDNS(pod *v1.Pod) (*runtimeapi.DNSConfig, error) {

View file

@ -17,12 +17,13 @@ limitations under the License.
package kubelet
import (
"context"
"fmt"
"math"
"net"
goruntime "runtime"
"sort"
"strings"
"sync"
"time"
"github.com/golang/glog"
@ -41,11 +42,10 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
volutil "k8s.io/kubernetes/pkg/volume/util"
)
const (
@ -189,8 +189,8 @@ func (kl *Kubelet) updateDefaultLabels(initialNode, existingNode *v1.Node) bool
// whether the existing node must be updated.
func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v1.Node) bool {
var (
existingCMAAnnotation = existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation]
newCMAAnnotation, newSet = node.Annotations[volumehelper.ControllerManagedAttachAnnotation]
existingCMAAnnotation = existingNode.Annotations[volutil.ControllerManagedAttachAnnotation]
newCMAAnnotation, newSet = node.Annotations[volutil.ControllerManagedAttachAnnotation]
)
if newCMAAnnotation == existingCMAAnnotation {
@ -202,13 +202,13 @@ func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v
// the correct value of the annotation.
if !newSet {
glog.Info("Controller attach-detach setting changed to false; updating existing Node")
delete(existingNode.Annotations, volumehelper.ControllerManagedAttachAnnotation)
delete(existingNode.Annotations, volutil.ControllerManagedAttachAnnotation)
} else {
glog.Info("Controller attach-detach setting changed to true; updating existing Node")
if existingNode.Annotations == nil {
existingNode.Annotations = make(map[string]string)
}
existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation] = newCMAAnnotation
existingNode.Annotations[volutil.ControllerManagedAttachAnnotation] = newCMAAnnotation
}
return true
@ -269,7 +269,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
}
glog.Infof("Setting node annotation to enable volume controller attach/detach")
node.Annotations[volumehelper.ControllerManagedAttachAnnotation] = "true"
node.Annotations[volutil.ControllerManagedAttachAnnotation] = "true"
} else {
glog.Infof("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes")
}
@ -279,7 +279,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
node.Annotations = make(map[string]string)
}
glog.Infof("Setting node annotation to keep pod volumes of terminated pods attached to the node")
node.Annotations[volumehelper.KeepTerminatedPodVolumesAnnotation] = "true"
node.Annotations[volutil.KeepTerminatedPodVolumesAnnotation] = "true"
}
// @question: should this be place after the call to the cloud provider? which also applies labels
@ -303,7 +303,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
// TODO(roberthbailey): Can we do this without having credentials to talk
// to the cloud provider?
// TODO: ExternalID is deprecated, we'll have to drop this code
externalID, err := instances.ExternalID(kl.nodeName)
externalID, err := instances.ExternalID(context.TODO(), kl.nodeName)
if err != nil {
return nil, fmt.Errorf("failed to get external ID from cloud provider: %v", err)
}
@ -313,13 +313,13 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
// cloudprovider from arbitrary nodes. At most, we should talk to a
// local metadata server here.
if node.Spec.ProviderID == "" {
node.Spec.ProviderID, err = cloudprovider.GetInstanceProviderID(kl.cloud, kl.nodeName)
node.Spec.ProviderID, err = cloudprovider.GetInstanceProviderID(context.TODO(), kl.cloud, kl.nodeName)
if err != nil {
return nil, err
}
}
instanceType, err := instances.InstanceType(kl.nodeName)
instanceType, err := instances.InstanceType(context.TODO(), kl.nodeName)
if err != nil {
return nil, err
}
@ -330,7 +330,7 @@ func (kl *Kubelet) initialNode() (*v1.Node, error) {
// If the cloud has zone information, label the node with the zone information
zones, ok := kl.cloud.Zones()
if ok {
zone, err := zones.GetZone()
zone, err := zones.GetZone(context.TODO())
if err != nil {
return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err)
}
@ -453,7 +453,7 @@ func (kl *Kubelet) setNodeAddress(node *v1.Node) error {
// to the cloud provider?
// TODO(justinsb): We can if CurrentNodeName() was actually CurrentNode() and returned an interface
// TODO: If IP addresses couldn't be fetched from the cloud provider, should kubelet fallback on the other methods for getting the IP below?
nodeAddresses, err := instances.NodeAddresses(kl.nodeName)
nodeAddresses, err := instances.NodeAddresses(context.TODO(), kl.nodeName)
if err != nil {
return fmt.Errorf("failed to get node address from cloud provider: %v", err)
}
@ -548,6 +548,10 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) {
}
}
var devicePluginAllocatable v1.ResourceList
var devicePluginCapacity v1.ResourceList
var removedDevicePlugins []string
// TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start
// cAdvisor locally, e.g. for test-cmd.sh, and in integration test.
info, err := kl.GetCachedMachineInfo()
@ -592,16 +596,25 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) {
}
}
devicePluginCapacity, removedDevicePlugins := kl.containerManager.GetDevicePluginResourceCapacity()
devicePluginCapacity, devicePluginAllocatable, removedDevicePlugins = kl.containerManager.GetDevicePluginResourceCapacity()
if devicePluginCapacity != nil {
for k, v := range devicePluginCapacity {
glog.V(2).Infof("Update capacity for %s to %d", k, v.Value())
node.Status.Capacity[k] = v
}
}
for _, removedResource := range removedDevicePlugins {
glog.V(2).Infof("Remove capacity for %s", removedResource)
delete(node.Status.Capacity, v1.ResourceName(removedResource))
glog.V(2).Infof("Set capacity for %s to 0 on device removal", removedResource)
// Set the capacity of the removed resource to 0 instead of
// removing the resource from the node status. This is to indicate
// that the resource is managed by device plugin and had been
// registered before.
//
// This is required to differentiate the device plugin managed
// resources and the cluster-level resources, which are absent in
// node status.
node.Status.Capacity[v1.ResourceName(removedResource)] = *resource.NewQuantity(int64(0), resource.DecimalSI)
}
}
@ -629,6 +642,12 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) {
}
node.Status.Allocatable[k] = value
}
if devicePluginAllocatable != nil {
for k, v := range devicePluginAllocatable {
glog.V(2).Infof("Update allocatable for %s to %d", k, v.Value())
node.Status.Allocatable[k] = v
}
}
// for every huge page reservation, we need to remove it from allocatable memory
for k, v := range node.Status.Capacity {
if v1helper.IsHugePageResourceName(k) {
@ -682,7 +701,6 @@ func (kl *Kubelet) setNodeStatusImages(node *v1.Node) {
return
}
// sort the images from max to min, and only set top N images into the node status.
sort.Sort(sliceutils.ByImageSize(containerImages))
if maxImagesInNodeStatus < len(containerImages) {
containerImages = containerImages[0:maxImagesInNodeStatus]
}
@ -723,17 +741,28 @@ func (kl *Kubelet) setNodeReadyCondition(node *v1.Node) {
// This is due to an issue with version skewed kubelet and master components.
// ref: https://github.com/kubernetes/kubernetes/issues/16961
currentTime := metav1.NewTime(kl.clock.Now())
var newNodeReadyCondition v1.NodeCondition
newNodeReadyCondition := v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: "kubelet is posting ready status",
LastHeartbeatTime: currentTime,
}
rs := append(kl.runtimeState.runtimeErrors(), kl.runtimeState.networkErrors()...)
if len(rs) == 0 {
newNodeReadyCondition = v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: "kubelet is posting ready status",
LastHeartbeatTime: currentTime,
requiredCapacities := []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory, v1.ResourcePods}
if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
requiredCapacities = append(requiredCapacities, v1.ResourceEphemeralStorage)
}
missingCapacities := []string{}
for _, resource := range requiredCapacities {
if _, found := node.Status.Capacity[resource]; !found {
missingCapacities = append(missingCapacities, string(resource))
}
} else {
}
if len(missingCapacities) > 0 {
rs = append(rs, fmt.Sprintf("Missing node capacity for resources: %s", strings.Join(missingCapacities, ", ")))
}
if len(rs) > 0 {
newNodeReadyCondition = v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
@ -742,7 +771,6 @@ func (kl *Kubelet) setNodeReadyCondition(node *v1.Node) {
LastHeartbeatTime: currentTime,
}
}
// Append AppArmor status if it's enabled.
// TODO(tallclair): This is a temporary message until node feature reporting is added.
if newNodeReadyCondition.Status == v1.ConditionTrue &&
@ -841,6 +869,62 @@ func (kl *Kubelet) setNodeMemoryPressureCondition(node *v1.Node) {
}
}
// setNodePIDPressureCondition for the node.
// TODO: this needs to move somewhere centralized...
func (kl *Kubelet) setNodePIDPressureCondition(node *v1.Node) {
currentTime := metav1.NewTime(kl.clock.Now())
var condition *v1.NodeCondition
// Check if NodePIDPressure condition already exists and if it does, just pick it up for update.
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == v1.NodePIDPressure {
condition = &node.Status.Conditions[i]
}
}
newCondition := false
// If the NodePIDPressure condition doesn't exist, create one
if condition == nil {
condition = &v1.NodeCondition{
Type: v1.NodePIDPressure,
Status: v1.ConditionUnknown,
}
// cannot be appended to node.Status.Conditions here because it gets
// copied to the slice. So if we append to the slice here none of the
// updates we make below are reflected in the slice.
newCondition = true
}
// Update the heartbeat time
condition.LastHeartbeatTime = currentTime
// Note: The conditions below take care of the case when a new NodePIDPressure condition is
// created and as well as the case when the condition already exists. When a new condition
// is created its status is set to v1.ConditionUnknown which matches either
// condition.Status != v1.ConditionTrue or
// condition.Status != v1.ConditionFalse in the conditions below depending on whether
// the kubelet is under PID pressure or not.
if kl.evictionManager.IsUnderPIDPressure() {
if condition.Status != v1.ConditionTrue {
condition.Status = v1.ConditionTrue
condition.Reason = "KubeletHasInsufficientPID"
condition.Message = "kubelet has insufficient PID available"
condition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasInsufficientPID")
}
} else if condition.Status != v1.ConditionFalse {
condition.Status = v1.ConditionFalse
condition.Reason = "KubeletHasSufficientPID"
condition.Message = "kubelet has sufficient PID available"
condition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasSufficientPID")
}
if newCondition {
node.Status.Conditions = append(node.Status.Conditions, *condition)
}
}
// setNodeDiskPressureCondition for the node.
// TODO: this needs to move somewhere centralized...
func (kl *Kubelet) setNodeDiskPressureCondition(node *v1.Node) {
@ -932,10 +1016,15 @@ func (kl *Kubelet) setNodeOODCondition(node *v1.Node) {
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
// TODO: why is this a package var?
var oldNodeUnschedulable bool
var (
oldNodeUnschedulable bool
oldNodeUnschedulableLock sync.Mutex
)
// record if node schedulable change.
func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) {
oldNodeUnschedulableLock.Lock()
defer oldNodeUnschedulableLock.Unlock()
if oldNodeUnschedulable != node.Spec.Unschedulable {
if node.Spec.Unschedulable {
kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeNotSchedulable)
@ -983,6 +1072,7 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
withoutError(kl.setNodeOODCondition),
withoutError(kl.setNodeMemoryPressureCondition),
withoutError(kl.setNodeDiskPressureCondition),
withoutError(kl.setNodePIDPressureCondition),
withoutError(kl.setNodeReadyCondition),
withoutError(kl.setNodeVolumesInUseStatus),
withoutError(kl.recordNodeSchedulableEvent),

View file

@ -53,7 +53,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
"k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/pkg/volume/util"
)
const (
@ -96,7 +96,7 @@ func generateImageTags() []string {
// that kubelet report up to maxNamesPerImageInNodeStatus tags.
count := rand.IntnRange(maxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1)
for ; count > 0; count-- {
tagList = append(tagList, "gcr.io/google_containers:v"+strconv.Itoa(count))
tagList = append(tagList, "k8s.gcr.io:v"+strconv.Itoa(count))
}
return tagList
}
@ -120,12 +120,12 @@ func applyNodeStatusPatch(originalNode *v1.Node, patch []byte) (*v1.Node, error)
type localCM struct {
cm.ContainerManager
allocatable v1.ResourceList
capacity v1.ResourceList
allocatableReservation v1.ResourceList
capacity v1.ResourceList
}
func (lcm *localCM) GetNodeAllocatableReservation() v1.ResourceList {
return lcm.allocatable
return lcm.allocatableReservation
}
func (lcm *localCM) GetCapacity() v1.ResourceList {
@ -222,13 +222,15 @@ func TestUpdateNewNodeStatus(t *testing.T) {
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
}
kubeClient := testKubelet.fakeKubeClient
@ -248,7 +250,21 @@ func TestUpdateNewNodeStatus(t *testing.T) {
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
}
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 400,
Capacity: 5000,
Available: 600,
}, nil)
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 400,
Capacity: 5000,
Available: 600,
}, nil)
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
maxAge := 0 * time.Second
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
kubelet.machineInfo = machineInfo
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
@ -279,6 +295,14 @@ func TestUpdateNewNodeStatus(t *testing.T) {
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientPID",
Message: fmt.Sprintf("kubelet has sufficient PID available"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
@ -301,14 +325,16 @@ func TestUpdateNewNodeStatus(t *testing.T) {
KubeProxyVersion: version.Get().String(),
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
},
Addresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
@ -335,7 +361,8 @@ func TestUpdateNewNodeStatus(t *testing.T) {
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type, "NotReady should be last")
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
"NotReady should be last")
assert.Len(t, updatedNode.Status.Images, maxImagesInNodeStatus)
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
}
@ -347,13 +374,14 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatable: v1.ResourceList{
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
}
@ -387,6 +415,14 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientPID",
Message: fmt.Sprintf("kubelet has sufficient PID available"),
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
@ -423,7 +459,21 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
}
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 400,
Capacity: 5000,
Available: 600,
}, nil)
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 400,
Capacity: 5000,
Available: 600,
}, nil)
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
maxAge := 0 * time.Second
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
kubelet.machineInfo = machineInfo
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
@ -454,6 +504,14 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientPID",
Message: fmt.Sprintf("kubelet has sufficient PID available"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
@ -476,14 +534,16 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
KubeProxyVersion: version.Get().String(),
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
},
Addresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
@ -492,12 +552,12 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
// images will be sorted from max to min in node status.
Images: []v1.ContainerImage{
{
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
SizeBytes: 456,
Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
SizeBytes: 123,
},
{
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
SizeBytes: 123,
Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
SizeBytes: 456,
},
},
},
@ -565,7 +625,7 @@ func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
kubelet.heartbeatClient, err = v1core.NewForConfig(config)
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatable: v1.ResourceList{
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
},
@ -591,13 +651,15 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
},
}
@ -619,7 +681,21 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
maxAge := 0 * time.Second
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 400,
Capacity: 10E9,
}, nil)
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 400,
Capacity: 20E9,
}, nil)
kubelet.machineInfo = machineInfo
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
@ -650,6 +726,14 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientPID",
Message: fmt.Sprintf("kubelet has sufficient PID available"),
LastHeartbeatTime: metav1.Time{},
LastTransitionTime: metav1.Time{},
},
{}, //placeholder
},
NodeInfo: v1.NodeSystemInfo{
@ -665,14 +749,16 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
KubeProxyVersion: version.Get().String(),
},
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(10E9, resource.BinarySI),
},
Addresses: []v1.NodeAddress{
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
@ -680,12 +766,12 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
},
Images: []v1.ContainerImage{
{
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
SizeBytes: 456,
Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
SizeBytes: 123,
},
{
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
SizeBytes: 123,
Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
SizeBytes: 456,
},
},
},
@ -856,6 +942,7 @@ func TestRegisterWithApiServer(t *testing.T) {
Usage: 9,
Capacity: 10,
}, nil)
kubelet.machineInfo = machineInfo
done := make(chan struct{})
go func() {
@ -895,7 +982,7 @@ func TestTryRegisterWithApiServer(t *testing.T) {
if cmad {
node.Annotations = make(map[string]string)
node.Annotations[volumehelper.ControllerManagedAttachAnnotation] = "true"
node.Annotations[util.ControllerManagedAttachAnnotation] = "true"
}
return node
@ -1047,7 +1134,7 @@ func TestTryRegisterWithApiServer(t *testing.T) {
require.NoError(t, err)
}
actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[volumehelper.ControllerManagedAttachAnnotation])
actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[util.ControllerManagedAttachAnnotation])
assert.Equal(t, tc.savedNodeCMAD, actualCMAD, "test [%s]", tc.name)
}
}
@ -1063,12 +1150,14 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{
ContainerManager: cm.NewStubContainerManager(),
allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(40000, resource.DecimalSI),
allocatableReservation: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(40000, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(1000, resource.BinarySI),
},
capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
},
}
kubeClient := testKubelet.fakeKubeClient
@ -1089,20 +1178,36 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
maxAge := 0 * time.Second
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 400,
Capacity: 3000,
Available: 600,
}, nil)
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
Usage: 400,
Capacity: 3000,
Available: 600,
}, nil)
kubelet.machineInfo = machineInfo
expectedNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
},
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
},
},
}

View file

@ -49,7 +49,7 @@ import (
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/fieldpath"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/envvars"
@ -61,9 +61,9 @@ import (
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
utilfile "k8s.io/kubernetes/pkg/util/file"
"k8s.io/kubernetes/pkg/volume"
mountutil "k8s.io/kubernetes/pkg/util/mount"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
volumevalidation "k8s.io/kubernetes/pkg/volume/validation"
"k8s.io/kubernetes/third_party/forked/golang/expansion"
)
@ -129,7 +129,7 @@ func makeAbsolutePath(goos, path string) string {
// makeBlockVolumes maps the raw block devices specified in the path of the container
// Experimental
func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVolumes kubecontainer.VolumeMap, blkutil volumeutil.BlockVolumePathHandler) ([]kubecontainer.DeviceInfo, error) {
func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVolumes kubecontainer.VolumeMap, blkutil volumepathhandler.BlockVolumePathHandler) ([]kubecontainer.DeviceInfo, error) {
var devices []kubecontainer.DeviceInfo
for _, device := range container.VolumeDevices {
// check path is absolute
@ -161,7 +161,7 @@ func (kl *Kubelet) makeBlockVolumes(pod *v1.Pod, container *v1.Container, podVol
}
// makeMounts determines the mount points for the given container.
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.Mount, error) {
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap, mounter mountutil.Interface) ([]kubecontainer.Mount, func(), error) {
// Kubernetes only mounts on /etc/hosts if:
// - container is not an infrastructure (pause) container
// - container is not already mounting on /etc/hosts
@ -171,13 +171,14 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
mountEtcHostsFile := len(podIP) > 0 && runtime.GOOS != "windows"
glog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile)
mounts := []kubecontainer.Mount{}
for _, mount := range container.VolumeMounts {
var cleanupAction func() = nil
for i, mount := range container.VolumeMounts {
// do not mount /etc/hosts if container is already mounting on the path
mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath)
vol, ok := podVolumes[mount.Name]
if !ok || vol.Mounter == nil {
glog.Errorf("Mount cannot be satisfied for container %q, because the volume is missing or the volume mounter is nil: %+v", container.Name, mount)
return nil, fmt.Errorf("cannot find volume %q to mount into container %q", mount.Name, container.Name)
return nil, cleanupAction, fmt.Errorf("cannot find volume %q to mount into container %q", mount.Name, container.Name)
}
relabelVolume := false
@ -188,27 +189,35 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
vol.SELinuxLabeled = true
relabelVolume = true
}
hostPath, err := volume.GetPath(vol.Mounter)
hostPath, err := volumeutil.GetPath(vol.Mounter)
if err != nil {
return nil, err
return nil, cleanupAction, err
}
if mount.SubPath != "" {
if !utilfeature.DefaultFeatureGate.Enabled(features.VolumeSubpath) {
return nil, cleanupAction, fmt.Errorf("volume subpaths are disabled")
}
if filepath.IsAbs(mount.SubPath) {
return nil, fmt.Errorf("error SubPath `%s` must not be an absolute path", mount.SubPath)
return nil, cleanupAction, fmt.Errorf("error SubPath `%s` must not be an absolute path", mount.SubPath)
}
err = volumevalidation.ValidatePathNoBacksteps(mount.SubPath)
if err != nil {
return nil, fmt.Errorf("unable to provision SubPath `%s`: %v", mount.SubPath, err)
return nil, cleanupAction, fmt.Errorf("unable to provision SubPath `%s`: %v", mount.SubPath, err)
}
fileinfo, err := os.Lstat(hostPath)
if err != nil {
return nil, err
return nil, cleanupAction, err
}
perm := fileinfo.Mode()
hostPath = filepath.Join(hostPath, mount.SubPath)
volumePath, err := filepath.EvalSymlinks(hostPath)
if err != nil {
return nil, cleanupAction, err
}
hostPath = filepath.Join(volumePath, mount.SubPath)
if subPathExists, err := utilfile.FileOrSymlinkExists(hostPath); err != nil {
glog.Errorf("Could not determine if subPath %s exists; will not attempt to change its permissions", hostPath)
@ -217,17 +226,25 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
// incorrect ownership and mode. For example, the sub path directory must have at least g+rwx
// when the pod specifies an fsGroup, and if the directory is not created here, Docker will
// later auto-create it with the incorrect mode 0750
if err := os.MkdirAll(hostPath, perm); err != nil {
glog.Errorf("failed to mkdir:%s", hostPath)
return nil, err
}
// chmod the sub path because umask may have prevented us from making the sub path with the same
// permissions as the mounter path
if err := os.Chmod(hostPath, perm); err != nil {
return nil, err
// Make extra care not to escape the volume!
if err := mounter.SafeMakeDir(hostPath, volumePath, perm); err != nil {
glog.Errorf("failed to mkdir %q: %v", hostPath, err)
return nil, cleanupAction, err
}
}
hostPath, cleanupAction, err = mounter.PrepareSafeSubpath(mountutil.Subpath{
VolumeMountIndex: i,
Path: hostPath,
VolumeName: vol.InnerVolumeSpecName,
VolumePath: volumePath,
PodDir: podDir,
ContainerName: container.Name,
})
if err != nil {
// Don't pass detailed error back to the user because it could give information about host filesystem
glog.Errorf("failed to prepare subPath for volumeMount %q of container %q: %v", mount.Name, container.Name, err)
return nil, cleanupAction, fmt.Errorf("failed to prepare subPath for volumeMount %q of container %q", mount.Name, container.Name)
}
}
// Docker Volume Mounts fail on Windows if it is not of the form C:/
@ -243,15 +260,17 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
propagation, err := translateMountPropagation(mount.MountPropagation)
if err != nil {
return nil, err
return nil, cleanupAction, err
}
glog.V(5).Infof("Pod %q container %q mount %q has propagation %q", format.Pod(pod), container.Name, mount.Name, propagation)
mustMountRO := vol.Mounter.GetAttributes().ReadOnly && utilfeature.DefaultFeatureGate.Enabled(features.ReadOnlyAPIDataVolumes)
mounts = append(mounts, kubecontainer.Mount{
Name: mount.Name,
ContainerPath: containerPath,
HostPath: hostPath,
ReadOnly: mount.ReadOnly,
ReadOnly: mount.ReadOnly || mustMountRO,
SELinuxRelabel: relabelVolume,
Propagation: propagation,
})
@ -260,16 +279,22 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
hostAliases := pod.Spec.HostAliases
hostsMount, err := makeHostsMount(podDir, podIP, hostName, hostDomain, hostAliases, pod.Spec.HostNetwork)
if err != nil {
return nil, err
return nil, cleanupAction, err
}
mounts = append(mounts, *hostsMount)
}
return mounts, nil
return mounts, cleanupAction, nil
}
// translateMountPropagation transforms v1.MountPropagationMode to
// runtimeapi.MountPropagation.
func translateMountPropagation(mountMode *v1.MountPropagationMode) (runtimeapi.MountPropagation, error) {
if runtime.GOOS == "windows" {
// Windows containers doesn't support mount propagation, use private for it.
// Refer https://docs.docker.com/storage/bind-mounts/#configure-bind-propagation.
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
}
if !utilfeature.DefaultFeatureGate.Enabled(features.MountPropagation) {
// mount propagation is disabled, use private as in the old versions
return runtimeapi.MountPropagation_PROPAGATION_PRIVATE, nil
@ -430,49 +455,49 @@ func (kl *Kubelet) GetPodCgroupParent(pod *v1.Pod) string {
// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by
// the container runtime to set parameters for launching a container.
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, error) {
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, func(), error) {
opts, err := kl.containerManager.GetResources(pod, container)
if err != nil {
return nil, err
return nil, nil, err
}
cgroupParent := kl.GetPodCgroupParent(pod)
opts.CgroupParent = cgroupParent
hostname, hostDomainName, err := kl.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, err
return nil, nil, err
}
opts.Hostname = hostname
podName := volumehelper.GetUniquePodName(pod)
podName := volumeutil.GetUniquePodName(pod)
volumes := kl.volumeManager.GetMountedVolumesForPod(podName)
opts.PortMappings = kubecontainer.MakePortMappings(container)
// TODO(random-liu): Move following convert functions into pkg/kubelet/container
devices, err := kl.makeGPUDevices(pod, container)
if err != nil {
return nil, err
return nil, nil, err
}
opts.Devices = append(opts.Devices, devices...)
// TODO: remove feature gate check after no longer needed
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
blkutil := volumeutil.NewBlockVolumePathHandler()
blkutil := volumepathhandler.NewBlockVolumePathHandler()
blkVolumes, err := kl.makeBlockVolumes(pod, container, volumes, blkutil)
if err != nil {
return nil, err
return nil, nil, err
}
opts.Devices = append(opts.Devices, blkVolumes...)
}
mounts, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes)
mounts, cleanupAction, err := makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes, kl.mounter)
if err != nil {
return nil, err
return nil, cleanupAction, err
}
opts.Mounts = append(opts.Mounts, mounts...)
envs, err := kl.makeEnvironmentVariables(pod, container, podIP)
if err != nil {
return nil, err
return nil, cleanupAction, err
}
opts.Envs = append(opts.Envs, envs...)
@ -492,7 +517,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Contai
opts.EnableHostUserNamespace = kl.enableHostUserNamespace(pod)
}
return opts, nil
return opts, cleanupAction, nil
}
var masterServices = sets.NewString("kubernetes")
@ -1127,7 +1152,7 @@ func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *v1.PodS
switch {
case previous:
if lastState.Terminated == nil {
if lastState.Terminated == nil || lastState.Terminated.ContainerID == "" {
return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q in pod %q not found", containerName, podName)
}
cID = lastState.Terminated.ContainerID
@ -1136,9 +1161,21 @@ func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *v1.PodS
cID = cStatus.ContainerID
case terminated != nil:
cID = terminated.ContainerID
// in cases where the next container didn't start, terminated.ContainerID will be empty, so get logs from the lastState.Terminated.
if terminated.ContainerID == "" {
if lastState.Terminated != nil && lastState.Terminated.ContainerID != "" {
cID = lastState.Terminated.ContainerID
} else {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is terminated", containerName, podName)
}
} else {
cID = terminated.ContainerID
}
case lastState.Terminated != nil:
if lastState.Terminated.ContainerID == "" {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is terminated", containerName, podName)
}
cID = lastState.Terminated.ContainerID
case waiting != nil:
@ -1345,16 +1382,22 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
spec := &pod.Spec
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
s.Phase = getPhase(spec, allStatus)
// Check for illegal phase transition
if pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded {
// API server shows terminal phase; transitions are not allowed
if s.Phase != pod.Status.Phase {
glog.Errorf("Pod attempted illegal phase transition from %s to %s: %v", pod.Status.Phase, s.Phase, s)
// Force back to phase from the API server
s.Phase = pod.Status.Phase
}
}
kl.probeManager.UpdatePodStatus(pod.UID, s)
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase))
// s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus()
// does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure
// it is set to true. If the existing PodStatus does not have a PodScheduled condition, then create one that is set to true.
if _, oldPodScheduled := podutil.GetPodCondition(&pod.Status, v1.PodScheduled); oldPodScheduled != nil {
s.Conditions = append(s.Conditions, *oldPodScheduled)
}
podutil.UpdatePodCondition(&pod.Status, &v1.PodCondition{
// Status manager will take care of the LastTransitionTimestamp, either preserve
// the timestamp from apiserver, or set a new one. When kubelet sees the pod,
// `PodScheduled` condition must be true.
s.Conditions = append(s.Conditions, v1.PodCondition{
Type: v1.PodScheduled,
Status: v1.ConditionTrue,
})

View file

@ -42,11 +42,12 @@ import (
// to "v1"?
"k8s.io/kubernetes/pkg/api/legacyscheme"
_ "k8s.io/kubernetes/pkg/apis/core/install"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/server/portforward"
"k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
"k8s.io/kubernetes/pkg/util/mount"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
@ -303,6 +304,7 @@ func TestMakeMounts(t *testing.T) {
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
fm := &mount.FakeMounter{}
pod := v1.Pod{
Spec: v1.PodSpec{
HostNetwork: true,
@ -315,7 +317,7 @@ func TestMakeMounts(t *testing.T) {
return
}
mounts, err := makeMounts(&pod, "/pod", &tc.container, "fakepodname", "", "", tc.podVolumes)
mounts, _, err := makeMounts(&pod, "/pod", &tc.container, "fakepodname", "", "", tc.podVolumes, fm)
// validate only the error if we expect an error
if tc.expectErr {
@ -338,7 +340,7 @@ func TestMakeMounts(t *testing.T) {
t.Errorf("Failed to enable feature gate for MountPropagation: %v", err)
return
}
mounts, err = makeMounts(&pod, "/pod", &tc.container, "fakepodname", "", "", tc.podVolumes)
mounts, _, err = makeMounts(&pod, "/pod", &tc.container, "fakepodname", "", "", tc.podVolumes, fm)
if !tc.expectErr {
expectedPrivateMounts := []kubecontainer.Mount{}
for _, mount := range tc.expectedMounts {
@ -353,6 +355,62 @@ func TestMakeMounts(t *testing.T) {
}
}
func TestDisabledSubpath(t *testing.T) {
fm := &mount.FakeMounter{}
pod := v1.Pod{
Spec: v1.PodSpec{
HostNetwork: true,
},
}
podVolumes := kubecontainer.VolumeMap{
"disk": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/mnt/disk"}},
}
cases := map[string]struct {
container v1.Container
expectError bool
}{
"subpath not specified": {
v1.Container{
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/mnt/path3",
Name: "disk",
ReadOnly: true,
},
},
},
false,
},
"subpath specified": {
v1.Container{
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/mnt/path3",
SubPath: "/must/not/be/absolute",
Name: "disk",
ReadOnly: true,
},
},
},
true,
},
}
utilfeature.DefaultFeatureGate.Set("VolumeSubpath=false")
defer utilfeature.DefaultFeatureGate.Set("VolumeSubpath=true")
for name, test := range cases {
_, _, err := makeMounts(&pod, "/pod", &test.container, "fakepodname", "", "", podVolumes, fm)
if err != nil && !test.expectError {
t.Errorf("test %v failed: %v", name, err)
}
if err == nil && test.expectError {
t.Errorf("test %v failed: expected error", name)
}
}
}
func TestMakeBlockVolumes(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()

View file

@ -24,6 +24,7 @@ import (
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/util/mount"
)
func TestMakeMountsWindows(t *testing.T) {
@ -64,7 +65,8 @@ func TestMakeMountsWindows(t *testing.T) {
},
}
mounts, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", "", podVolumes)
fm := &mount.FakeMounter{}
mounts, _, _ := makeMounts(&pod, "/pod", &container, "fakepodname", "", "", podVolumes, fm)
expectedMounts := []kubecontainer.Mount{
{

View file

@ -52,6 +52,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/gpu"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/logs"
"k8s.io/kubernetes/pkg/kubelet/network"
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
"k8s.io/kubernetes/pkg/kubelet/pleg"
@ -72,7 +73,7 @@ import (
"k8s.io/kubernetes/pkg/volume"
_ "k8s.io/kubernetes/pkg/volume/host_path"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/pkg/volume/util"
)
func init() {
@ -127,12 +128,12 @@ func newTestKubelet(t *testing.T, controllerAttachDetachEnabled bool) *TestKubel
imageList := []kubecontainer.Image{
{
ID: "abc",
RepoTags: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
RepoTags: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
Size: 123,
},
{
ID: "efg",
RepoTags: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
RepoTags: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
Size: 456,
},
}
@ -256,12 +257,22 @@ func newTestKubeletWithImageList(
HighThresholdPercent: 90,
LowThresholdPercent: 80,
}
imageGCManager, err := images.NewImageGCManager(fakeRuntime, kubelet.StatsProvider, fakeRecorder, fakeNodeRef, fakeImageGCPolicy)
imageGCManager, err := images.NewImageGCManager(fakeRuntime, kubelet.StatsProvider, fakeRecorder, fakeNodeRef, fakeImageGCPolicy, "")
assert.NoError(t, err)
kubelet.imageManager = &fakeImageGCManager{
fakeImageService: fakeRuntime,
ImageGCManager: imageGCManager,
}
kubelet.containerLogManager = logs.NewStubContainerLogManager()
containerGCPolicy := kubecontainer.ContainerGCPolicy{
MinAge: time.Duration(0),
MaxPerPodContainer: 1,
MaxContainers: -1,
}
containerGC, err := kubecontainer.NewContainerGC(fakeRuntime, containerGCPolicy, kubelet.sourcesReady)
assert.NoError(t, err)
kubelet.containerGC = containerGC
fakeClock := clock.NewFakeClock(time.Now())
kubelet.backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
kubelet.backOff.Clock = fakeClock
@ -334,8 +345,6 @@ func newTestPods(count int) []*v1.Pod {
return pods
}
var emptyPodUIDs map[types.UID]kubetypes.SyncPodType
func TestSyncLoopAbort(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
@ -583,8 +592,10 @@ func TestHandlePluginResources(t *testing.T) {
kl := testKubelet.kubelet
adjustedResource := v1.ResourceName("domain1.com/adjustedResource")
unadjustedResouce := v1.ResourceName("domain2.com/unadjustedResouce")
emptyResource := v1.ResourceName("domain2.com/emptyResource")
missingResource := v1.ResourceName("domain2.com/missingResource")
failedResource := v1.ResourceName("domain2.com/failedResource")
resourceQuantity0 := *resource.NewQuantity(int64(0), resource.DecimalSI)
resourceQuantity1 := *resource.NewQuantity(int64(1), resource.DecimalSI)
resourceQuantity2 := *resource.NewQuantity(int64(2), resource.DecimalSI)
resourceQuantityInvalid := *resource.NewQuantity(int64(-1), resource.DecimalSI)
@ -592,9 +603,9 @@ func TestHandlePluginResources(t *testing.T) {
nodes := []*v1.Node{
{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: v1.ResourceList{
adjustedResource: resourceQuantity1,
unadjustedResouce: resourceQuantity1,
v1.ResourcePods: allowedPodQuantity,
adjustedResource: resourceQuantity1,
emptyResource: resourceQuantity0,
v1.ResourcePods: allowedPodQuantity,
}}},
}
kl.nodeInfo = testNodeInfo{nodes: nodes}
@ -607,6 +618,7 @@ func TestHandlePluginResources(t *testing.T) {
// quantity unchanged.
updateResourceMap := map[v1.ResourceName]resource.Quantity{
adjustedResource: resourceQuantity2,
emptyResource: resourceQuantity0,
failedResource: resourceQuantityInvalid,
}
pod := attrs.Pod
@ -634,7 +646,7 @@ func TestHandlePluginResources(t *testing.T) {
// pod requiring adjustedResource can be successfully allocated because updatePluginResourcesFunc
// adjusts node.allocatableResource for this resource to a sufficient value.
fittingPodspec := v1.PodSpec{NodeName: string(kl.nodeName),
fittingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
adjustedResource: resourceQuantity2,
@ -644,14 +656,30 @@ func TestHandlePluginResources(t *testing.T) {
},
}}},
}
// pod requiring unadjustedResouce with insufficient quantity will still fail PredicateAdmit.
exceededPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
// pod requiring emptyResource (extended resources with 0 allocatable) will
// not pass PredicateAdmit.
emptyPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
unadjustedResouce: resourceQuantity2,
emptyResource: resourceQuantity2,
},
Requests: v1.ResourceList{
unadjustedResouce: resourceQuantity2,
emptyResource: resourceQuantity2,
},
}}},
}
// pod requiring missingResource will pass PredicateAdmit.
//
// Extended resources missing in node status are ignored in PredicateAdmit.
// This is required to support extended resources that are not managed by
// device plugin, such as cluster-level resources.
missingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
missingResource: resourceQuantity2,
},
Requests: v1.ResourceList{
missingResource: resourceQuantity2,
},
}}},
}
@ -666,21 +694,18 @@ func TestHandlePluginResources(t *testing.T) {
},
}}},
}
pods := []*v1.Pod{
podWithUIDNameNsSpec("123", "fittingpod", "foo", fittingPodspec),
podWithUIDNameNsSpec("456", "exceededpod", "foo", exceededPodSpec),
podWithUIDNameNsSpec("789", "failedpod", "foo", failedPodSpec),
}
// The latter two pod should be rejected.
fittingPod := pods[0]
exceededPod := pods[1]
failedPod := pods[2]
kl.HandlePodAdditions(pods)
fittingPod := podWithUIDNameNsSpec("1", "fittingpod", "foo", fittingPodSpec)
emptyPod := podWithUIDNameNsSpec("2", "emptypod", "foo", emptyPodSpec)
missingPod := podWithUIDNameNsSpec("3", "missingpod", "foo", missingPodSpec)
failedPod := podWithUIDNameNsSpec("4", "failedpod", "foo", failedPodSpec)
kl.HandlePodAdditions([]*v1.Pod{fittingPod, emptyPod, missingPod, failedPod})
// Check pod status stored in the status map.
checkPodStatus(t, kl, fittingPod, v1.PodPending)
checkPodStatus(t, kl, exceededPod, v1.PodFailed)
checkPodStatus(t, kl, emptyPod, v1.PodFailed)
checkPodStatus(t, kl, missingPod, v1.PodPending)
checkPodStatus(t, kl, failedPod, v1.PodFailed)
}
@ -733,7 +758,7 @@ func TestValidateContainerLogStatus(t *testing.T) {
Running: &v1.ContainerStateRunning{},
},
LastTerminationState: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
},
},
},
@ -761,9 +786,51 @@ func TestValidateContainerLogStatus(t *testing.T) {
},
},
},
success: false,
pSuccess: false,
},
{
statuses: []v1.ContainerStatus{
{
Name: containerName,
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
},
},
},
success: true,
pSuccess: false,
},
{
statuses: []v1.ContainerStatus{
{
Name: containerName,
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
},
LastTerminationState: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
},
},
},
success: false,
pSuccess: false,
},
{
statuses: []v1.ContainerStatus{
{
Name: containerName,
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{},
},
LastTerminationState: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{ContainerID: "docker://fakeid"},
},
},
},
success: true,
pSuccess: true,
},
{
statuses: []v1.ContainerStatus{
{
@ -2092,7 +2159,7 @@ func waitForVolumeUnmount(
func() (bool, error) {
// Verify volumes detached
podVolumes = volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
util.GetUniquePodName(pod))
if len(podVolumes) != 0 {
return false, nil

View file

@ -57,6 +57,18 @@ func (kl *Kubelet) podVolumesExist(podUID types.UID) bool {
volumetypes.UniquePodName(podUID)); len(mountedVolumes) > 0 {
return true
}
// TODO: This checks pod volume paths and whether they are mounted. If checking returns error, podVolumesExist will return true
// which means we consider volumes might exist and requires further checking.
// There are some volume plugins such as flexvolume might not have mounts. See issue #61229
volumePaths, err := kl.getMountedVolumePathListFromDisk(podUID)
if err != nil {
glog.Errorf("pod %q found, but error %v occurred during checking mounted volumes from disk", podUID, err)
return true
}
if len(volumePaths) > 0 {
glog.V(4).Infof("pod %q found, but volumes are still mounted on disk %v", podUID, volumePaths)
return true
}
return false
}

View file

@ -28,7 +28,7 @@ import (
core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/pkg/volume/util"
)
func TestListVolumesForPod(t *testing.T) {
@ -64,7 +64,7 @@ func TestListVolumesForPod(t *testing.T) {
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
assert.NoError(t, err)
podName := volumehelper.GetUniquePodName(pod)
podName := util.GetUniquePodName(pod)
volumesToReturn, volumeExsit := kubelet.ListVolumesForPod(types.UID(podName))
assert.True(t, volumeExsit, "expected to find volumes for pod %q", podName)
@ -180,7 +180,7 @@ func TestVolumeAttachAndMountControllerDisabled(t *testing.T) {
assert.NoError(t, err)
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
util.GetUniquePodName(pod))
expectedPodVolumes := []string{"vol1"}
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
@ -227,7 +227,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
assert.NoError(t, err)
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
util.GetUniquePodName(pod))
expectedPodVolumes := []string{"vol1"}
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
@ -252,7 +252,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
// Verify volumes unmounted
podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
util.GetUniquePodName(pod))
assert.Len(t, podVolumes, 0,
"Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
@ -317,7 +317,7 @@ func TestVolumeAttachAndMountControllerEnabled(t *testing.T) {
assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod))
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
util.GetUniquePodName(pod))
expectedPodVolumes := []string{"vol1"}
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
@ -386,7 +386,7 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(pod))
podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
util.GetUniquePodName(pod))
expectedPodVolumes := []string{"vol1"}
assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
@ -410,7 +410,7 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
// Verify volumes unmounted
podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
volumehelper.GetUniquePodName(pod))
util.GetUniquePodName(pod))
assert.Len(t, podVolumes, 0,
"Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)

View file

@ -128,7 +128,6 @@ func drainWorkers(podWorkers *podWorkers, numPods int) {
func TestUpdatePod(t *testing.T) {
podWorkers, processed := createPodWorkers()
// Check whether all pod updates will be processed.
numPods := 20
for i := 0; i < numPods; i++ {
for j := i; j < numPods; j++ {
@ -151,6 +150,7 @@ func TestUpdatePod(t *testing.T) {
continue
}
// PodWorker guarantees the first and the last event will be processed
first := 0
last := len(processed[uid]) - 1
if processed[uid][first].name != string(0) {

View file

@ -29,7 +29,6 @@ type runtimeState struct {
networkError error
internalError error
cidr string
initError error
healthChecks []*healthCheck
}
@ -78,19 +77,10 @@ func (s *runtimeState) podCIDR() string {
return s.cidr
}
func (s *runtimeState) setInitError(err error) {
s.Lock()
defer s.Unlock()
s.initError = err
}
func (s *runtimeState) runtimeErrors() []string {
s.RLock()
defer s.RUnlock()
var ret []string
if s.initError != nil {
ret = append(ret, s.initError.Error())
}
if !s.lastBaseRuntimeSync.Add(s.baseRuntimeSyncThreshold).After(time.Now()) {
ret = append(ret, "container runtime is down")
}

View file

@ -18,6 +18,7 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/kubelet/types",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
@ -32,7 +33,6 @@ go_test(
"types_test.go",
],
embed = [":go_default_library"],
importpath = "k8s.io/kubernetes/pkg/kubelet/types",
deps = [
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library",

View file

@ -29,4 +29,5 @@ const (
NodeAllocatableEnforcementKey = "pods"
SystemReservedEnforcementKey = "system-reserved"
KubeReservedEnforcementKey = "kube-reserved"
NodeAllocatableNoneKey = "none"
)

View file

@ -21,6 +21,7 @@ const (
KubernetesPodNamespaceLabel = "io.kubernetes.pod.namespace"
KubernetesPodUIDLabel = "io.kubernetes.pod.uid"
KubernetesContainerNameLabel = "io.kubernetes.container.name"
KubernetesContainerTypeLabel = "io.kubernetes.container.type"
)
func GetContainerName(labels map[string]string) string {

View file

@ -22,6 +22,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeapi "k8s.io/kubernetes/pkg/apis/core"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
)
const (
@ -139,15 +140,16 @@ func (sp SyncPodType) String() string {
}
}
// IsCriticalPod returns true if the pod bears the critical pod annotation
// key. Both the rescheduler and the kubelet use this key to make admission
// and scheduling decisions.
// IsCriticalPod returns true if the pod bears the critical pod annotation key or if pod's priority is greater than
// or equal to SystemCriticalPriority. Both the rescheduler(deprecated in 1.10) and the kubelet use this function
// to make admission and scheduling decisions.
func IsCriticalPod(pod *v1.Pod) bool {
return IsCritical(pod.Namespace, pod.Annotations)
return IsCritical(pod.Namespace, pod.Annotations) || (pod.Spec.Priority != nil && IsCriticalPodBasedOnPriority(pod.Namespace, *pod.Spec.Priority))
}
// IsCritical returns true if parameters bear the critical pod annotation
// key. The DaemonSetController use this key directly to make scheduling decisions.
// TODO: @ravig - Deprecated. Remove this when we move to resolving critical pods based on priorityClassName.
func IsCritical(ns string, annotations map[string]string) bool {
// Critical pods are restricted to "kube-system" namespace as of now.
if ns != kubeapi.NamespaceSystem {
@ -159,3 +161,15 @@ func IsCritical(ns string, annotations map[string]string) bool {
}
return false
}
// IsCriticalPodBasedOnPriority checks if the given pod is a critical pod based on priority resolved from pod Spec.
func IsCriticalPodBasedOnPriority(ns string, priority int32) bool {
// Critical pods are restricted to "kube-system" namespace as of now.
if ns != kubeapi.NamespaceSystem {
return false
}
if priority >= schedulerapi.SystemCriticalPriority {
return true
}
return false
}

View file

@ -158,7 +158,7 @@ func TestIsCriticalPod(t *testing.T) {
{
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod3",
Name: "pod4",
Namespace: "kube-system",
Annotations: map[string]string{
"scheduler.alpha.kubernetes.io/critical-pod": "",

View file

@ -8,10 +8,13 @@ load(
go_test(
name = "go_default_test",
srcs = ["util_test.go"],
srcs = [
"util_test.go",
],
embed = [":go_default_library"],
importpath = "k8s.io/kubernetes/pkg/kubelet/util",
deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"],
deps = [
"//vendor/github.com/stretchr/testify/assert:go_default_library",
],
)
go_library(

View file

@ -23,7 +23,6 @@ go_test(
name = "go_default_test",
srcs = ["resources_test.go"],
embed = [":go_default_library"],
importpath = "k8s.io/kubernetes/pkg/kubelet/util/format",
deps = [
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",

View file

@ -51,7 +51,7 @@ func PodWithDeletionTimestamp(pod *v1.Pod) string {
return Pod(pod) + deletionTimestamp
}
// Pods returns a string representating a list of pods in a human
// Pods returns a string representation a list of pods in a human
// readable format.
func Pods(pods []*v1.Pod) string {
return aggregatePods(pods, Pod)

View file

@ -33,7 +33,6 @@ go_test(
name = "go_default_test",
srcs = ["sliceutils_test.go"],
embed = [":go_default_library"],
importpath = "k8s.io/kubernetes/pkg/kubelet/util/sliceutils",
deps = [
"//pkg/kubelet/container:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",

View file

@ -31,3 +31,12 @@ func CreateListener(endpoint string) (net.Listener, error) {
func GetAddressAndDialer(endpoint string) (string, func(addr string, timeout time.Duration) (net.Conn, error), error) {
return "", nil, fmt.Errorf("GetAddressAndDialer is unsupported in this build")
}
// LockAndCheckSubPath empty implementation
func LockAndCheckSubPath(volumePath, subPath string) ([]uintptr, error) {
return []uintptr{}, nil
}
// UnlockPath empty implementation
func UnlockPath(fileHandles []uintptr) {
}

View file

@ -36,6 +36,7 @@ import (
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
// NewInitializedVolumePluginMgr returns a new instance of
@ -94,7 +95,7 @@ func (kvh *kubeletVolumeHost) GetVolumeDevicePluginDir(pluginName string) string
func (kvh *kubeletVolumeHost) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
dir := kvh.kubelet.getPodVolumeDir(podUID, pluginName, volumeName)
if runtime.GOOS == "windows" {
dir = volume.GetWindowsPath(dir)
dir = util.GetWindowsPath(dir)
}
return dir
}