Update go dependencies
This commit is contained in:
parent
293223eea0
commit
b7a799bf82
432 changed files with 37346 additions and 25783 deletions
88
vendor/k8s.io/kubernetes/pkg/scheduler/BUILD
generated
vendored
Normal file
88
vendor/k8s.io/kubernetes/pkg/scheduler/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["scheduler_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/core:go_default_library",
|
||||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//pkg/scheduler/volumebinder:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"scheduler.go",
|
||||
"testutil.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler",
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/core:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//pkg/scheduler/schedulercache:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//pkg/scheduler/volumebinder:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/scheduler/algorithm:all-srcs",
|
||||
"//pkg/scheduler/algorithmprovider:all-srcs",
|
||||
"//pkg/scheduler/api:all-srcs",
|
||||
"//pkg/scheduler/core:all-srcs",
|
||||
"//pkg/scheduler/factory:all-srcs",
|
||||
"//pkg/scheduler/metrics:all-srcs",
|
||||
"//pkg/scheduler/schedulercache:all-srcs",
|
||||
"//pkg/scheduler/testing:all-srcs",
|
||||
"//pkg/scheduler/util:all-srcs",
|
||||
"//pkg/scheduler/volumebinder:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
4
vendor/k8s.io/kubernetes/pkg/scheduler/OWNERS
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/pkg/scheduler/OWNERS
generated
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
approvers:
|
||||
- sig-scheduling-maintainers
|
||||
reviewers:
|
||||
- sig-scheduling
|
||||
43
vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD
generated
vendored
Normal file
43
vendor/k8s.io/kubernetes/pkg/scheduler/api/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"register.go",
|
||||
"types.go",
|
||||
"zz_generated.deepcopy.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/api",
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/scheduler/api/latest:all-srcs",
|
||||
"//pkg/scheduler/api/v1:all-srcs",
|
||||
"//pkg/scheduler/api/validation:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
20
vendor/k8s.io/kubernetes/pkg/scheduler/api/doc.go
generated
vendored
Normal file
20
vendor/k8s.io/kubernetes/pkg/scheduler/api/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
// Package api contains scheduler API objects.
|
||||
package api // import "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
55
vendor/k8s.io/kubernetes/pkg/scheduler/api/register.go
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/scheduler/api/register.go
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered.
|
||||
// TODO: remove this, scheduler should not have its own scheme.
|
||||
var Scheme = runtime.NewScheme()
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
// TODO this should be in the "scheduler" group
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal}
|
||||
|
||||
var (
|
||||
// SchemeBuilder defines a SchemeBuilder object.
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
// AddToScheme is used to add stored functions to scheme.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
if err := addKnownTypes(Scheme); err != nil {
|
||||
// Programmer error.
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
|
||||
return err
|
||||
}
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&Policy{},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
281
vendor/k8s.io/kubernetes/pkg/scheduler/api/types.go
generated
vendored
Normal file
281
vendor/k8s.io/kubernetes/pkg/scheduler/api/types.go
generated
vendored
Normal file
|
|
@ -0,0 +1,281 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxUint defines the max unsigned int value.
|
||||
MaxUint = ^uint(0)
|
||||
// MaxInt defines the max signed int value.
|
||||
MaxInt = int(MaxUint >> 1)
|
||||
// MaxTotalPriority defines the max total priority value.
|
||||
MaxTotalPriority = MaxInt
|
||||
// MaxPriority defines the max priority value.
|
||||
MaxPriority = 10
|
||||
// MaxWeight defines the max weight value.
|
||||
MaxWeight = MaxInt / MaxPriority
|
||||
// HighestUserDefinablePriority is the highest priority for user defined priority classes. Priority values larger than 1 billion are reserved for Kubernetes system use.
|
||||
HighestUserDefinablePriority = int32(1000000000)
|
||||
// SystemCriticalPriority is the beginning of the range of priority values for critical system components.
|
||||
SystemCriticalPriority = 2 * HighestUserDefinablePriority
|
||||
// NOTE: In order to avoid conflict of names with user-defined priority classes, all the names must
|
||||
// start with scheduling.SystemPriorityClassPrefix which is by default "system-".
|
||||
SystemClusterCritical = "system-cluster-critical"
|
||||
SystemNodeCritical = "system-node-critical"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Policy describes a struct of a policy resource in api.
|
||||
type Policy struct {
|
||||
metav1.TypeMeta
|
||||
// Holds the information to configure the fit predicate functions.
|
||||
// If unspecified, the default predicate functions will be applied.
|
||||
// If empty list, all predicates (except the mandatory ones) will be
|
||||
// bypassed.
|
||||
Predicates []PredicatePolicy
|
||||
// Holds the information to configure the priority functions.
|
||||
// If unspecified, the default priority functions will be applied.
|
||||
// If empty list, all priority functions will be bypassed.
|
||||
Priorities []PriorityPolicy
|
||||
// Holds the information to communicate with the extender(s)
|
||||
ExtenderConfigs []ExtenderConfig
|
||||
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
|
||||
// corresponding to every RequiredDuringScheduling affinity rule.
|
||||
// HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100.
|
||||
HardPodAffinitySymmetricWeight int32
|
||||
|
||||
// When AlwaysCheckAllPredicates is set to true, scheduler checks all
|
||||
// the configured predicates even after one or more of them fails.
|
||||
// When the flag is set to false, scheduler skips checking the rest
|
||||
// of the predicates after it finds one predicate that failed.
|
||||
AlwaysCheckAllPredicates bool
|
||||
}
|
||||
|
||||
// PredicatePolicy describes a struct of a predicate policy.
|
||||
type PredicatePolicy struct {
|
||||
// Identifier of the predicate policy
|
||||
// For a custom predicate, the name can be user-defined
|
||||
// For the Kubernetes provided predicates, the name is the identifier of the pre-defined predicate
|
||||
Name string
|
||||
// Holds the parameters to configure the given predicate
|
||||
Argument *PredicateArgument
|
||||
}
|
||||
|
||||
// PriorityPolicy describes a struct of a priority policy.
|
||||
type PriorityPolicy struct {
|
||||
// Identifier of the priority policy
|
||||
// For a custom priority, the name can be user-defined
|
||||
// For the Kubernetes provided priority functions, the name is the identifier of the pre-defined priority function
|
||||
Name string
|
||||
// The numeric multiplier for the node scores that the priority function generates
|
||||
// The weight should be a positive integer
|
||||
Weight int
|
||||
// Holds the parameters to configure the given priority function
|
||||
Argument *PriorityArgument
|
||||
}
|
||||
|
||||
// PredicateArgument represents the arguments to configure predicate functions in scheduler policy configuration.
|
||||
// Only one of its members may be specified
|
||||
type PredicateArgument struct {
|
||||
// The predicate that provides affinity for pods belonging to a service
|
||||
// It uses a label to identify nodes that belong to the same "group"
|
||||
ServiceAffinity *ServiceAffinity
|
||||
// The predicate that checks whether a particular node has a certain label
|
||||
// defined or not, regardless of value
|
||||
LabelsPresence *LabelsPresence
|
||||
}
|
||||
|
||||
// PriorityArgument represents the arguments to configure priority functions in scheduler policy configuration.
|
||||
// Only one of its members may be specified
|
||||
type PriorityArgument struct {
|
||||
// The priority function that ensures a good spread (anti-affinity) for pods belonging to a service
|
||||
// It uses a label to identify nodes that belong to the same "group"
|
||||
ServiceAntiAffinity *ServiceAntiAffinity
|
||||
// The priority function that checks whether a particular node has a certain label
|
||||
// defined or not, regardless of value
|
||||
LabelPreference *LabelPreference
|
||||
}
|
||||
|
||||
// ServiceAffinity holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration.
|
||||
type ServiceAffinity struct {
|
||||
// The list of labels that identify node "groups"
|
||||
// All of the labels should match for the node to be considered a fit for hosting the pod
|
||||
Labels []string
|
||||
}
|
||||
|
||||
// LabelsPresence holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration.
|
||||
type LabelsPresence struct {
|
||||
// The list of labels that identify node "groups"
|
||||
// All of the labels should be either present (or absent) for the node to be considered a fit for hosting the pod
|
||||
Labels []string
|
||||
// The boolean flag that indicates whether the labels should be present or absent from the node
|
||||
Presence bool
|
||||
}
|
||||
|
||||
// ServiceAntiAffinity holds the parameters that are used to configure the corresponding priority function
|
||||
type ServiceAntiAffinity struct {
|
||||
// Used to identify node "groups"
|
||||
Label string
|
||||
}
|
||||
|
||||
// LabelPreference holds the parameters that are used to configure the corresponding priority function
|
||||
type LabelPreference struct {
|
||||
// Used to identify node "groups"
|
||||
Label string
|
||||
// This is a boolean flag
|
||||
// If true, higher priority is given to nodes that have the label
|
||||
// If false, higher priority is given to nodes that do not have the label
|
||||
Presence bool
|
||||
}
|
||||
|
||||
// ExtenderManagedResource describes the arguments of extended resources
|
||||
// managed by an extender.
|
||||
type ExtenderManagedResource struct {
|
||||
// Name is the extended resource name.
|
||||
Name v1.ResourceName
|
||||
// IgnoredByScheduler indicates whether kube-scheduler should ignore this
|
||||
// resource when applying predicates.
|
||||
IgnoredByScheduler bool
|
||||
}
|
||||
|
||||
// ExtenderConfig holds the parameters used to communicate with the extender. If a verb is unspecified/empty,
|
||||
// it is assumed that the extender chose not to provide that extension.
|
||||
type ExtenderConfig struct {
|
||||
// URLPrefix at which the extender is available
|
||||
URLPrefix string
|
||||
// Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender.
|
||||
FilterVerb string
|
||||
// Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender.
|
||||
PrioritizeVerb string
|
||||
// The numeric multiplier for the node scores that the prioritize call generates.
|
||||
// The weight should be a positive integer
|
||||
Weight int
|
||||
// Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender.
|
||||
// If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender
|
||||
// can implement this function.
|
||||
BindVerb string
|
||||
// EnableHTTPS specifies whether https should be used to communicate with the extender
|
||||
EnableHTTPS bool
|
||||
// TLSConfig specifies the transport layer security config
|
||||
TLSConfig *restclient.TLSClientConfig
|
||||
// HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize
|
||||
// timeout is ignored, k8s/other extenders priorities are used to select the node.
|
||||
HTTPTimeout time.Duration
|
||||
// NodeCacheCapable specifies that the extender is capable of caching node information,
|
||||
// so the scheduler should only send minimal information about the eligible nodes
|
||||
// assuming that the extender already cached full details of all nodes in the cluster
|
||||
NodeCacheCapable bool
|
||||
// ManagedResources is a list of extended resources that are managed by
|
||||
// this extender.
|
||||
// - A pod will be sent to the extender on the Filter, Prioritize and Bind
|
||||
// (if the extender is the binder) phases iff the pod requests at least
|
||||
// one of the extended resources in this list. If empty or unspecified,
|
||||
// all pods will be sent to this extender.
|
||||
// - If IgnoredByScheduler is set to true for a resource, kube-scheduler
|
||||
// will skip checking the resource in predicates.
|
||||
// +optional
|
||||
ManagedResources []ExtenderManagedResource
|
||||
}
|
||||
|
||||
// ExtenderArgs represents the arguments needed by the extender to filter/prioritize
|
||||
// nodes for a pod.
|
||||
type ExtenderArgs struct {
|
||||
// Pod being scheduled
|
||||
Pod v1.Pod
|
||||
// List of candidate nodes where the pod can be scheduled; to be populated
|
||||
// only if ExtenderConfig.NodeCacheCapable == false
|
||||
Nodes *v1.NodeList
|
||||
// List of candidate node names where the pod can be scheduled; to be
|
||||
// populated only if ExtenderConfig.NodeCacheCapable == true
|
||||
NodeNames *[]string
|
||||
}
|
||||
|
||||
// FailedNodesMap represents the filtered out nodes, with node names and failure messages
|
||||
type FailedNodesMap map[string]string
|
||||
|
||||
// ExtenderFilterResult represents the results of a filter call to an extender
|
||||
type ExtenderFilterResult struct {
|
||||
// Filtered set of nodes where the pod can be scheduled; to be populated
|
||||
// only if ExtenderConfig.NodeCacheCapable == false
|
||||
Nodes *v1.NodeList
|
||||
// Filtered set of nodes where the pod can be scheduled; to be populated
|
||||
// only if ExtenderConfig.NodeCacheCapable == true
|
||||
NodeNames *[]string
|
||||
// Filtered out nodes where the pod can't be scheduled and the failure messages
|
||||
FailedNodes FailedNodesMap
|
||||
// Error message indicating failure
|
||||
Error string
|
||||
}
|
||||
|
||||
// ExtenderBindingArgs represents the arguments to an extender for binding a pod to a node.
|
||||
type ExtenderBindingArgs struct {
|
||||
// PodName is the name of the pod being bound
|
||||
PodName string
|
||||
// PodNamespace is the namespace of the pod being bound
|
||||
PodNamespace string
|
||||
// PodUID is the UID of the pod being bound
|
||||
PodUID types.UID
|
||||
// Node selected by the scheduler
|
||||
Node string
|
||||
}
|
||||
|
||||
// ExtenderBindingResult represents the result of binding of a pod to a node from an extender.
|
||||
type ExtenderBindingResult struct {
|
||||
// Error message indicating failure
|
||||
Error string
|
||||
}
|
||||
|
||||
// HostPriority represents the priority of scheduling to a particular host, higher priority is better.
|
||||
type HostPriority struct {
|
||||
// Name of the host
|
||||
Host string
|
||||
// Score associated with the host
|
||||
Score int
|
||||
}
|
||||
|
||||
// HostPriorityList declares a []HostPriority type.
|
||||
type HostPriorityList []HostPriority
|
||||
|
||||
// SystemPriorityClasses defines special priority classes which are used by system critical pods that should not be preempted by workload pods.
|
||||
var SystemPriorityClasses = map[string]int32{
|
||||
SystemClusterCritical: SystemCriticalPriority,
|
||||
SystemNodeCritical: SystemCriticalPriority + 1000,
|
||||
}
|
||||
|
||||
func (h HostPriorityList) Len() int {
|
||||
return len(h)
|
||||
}
|
||||
|
||||
func (h HostPriorityList) Less(i, j int) bool {
|
||||
if h[i].Score == h[j].Score {
|
||||
return h[i].Host < h[j].Host
|
||||
}
|
||||
return h[i].Score < h[j].Score
|
||||
}
|
||||
|
||||
func (h HostPriorityList) Swap(i, j int) {
|
||||
h[i], h[j] = h[j], h[i]
|
||||
}
|
||||
485
vendor/k8s.io/kubernetes/pkg/scheduler/api/zz_generated.deepcopy.go
generated
vendored
Normal file
485
vendor/k8s.io/kubernetes/pkg/scheduler/api/zz_generated.deepcopy.go
generated
vendored
Normal file
|
|
@ -0,0 +1,485 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExtenderArgs) DeepCopyInto(out *ExtenderArgs) {
|
||||
*out = *in
|
||||
in.Pod.DeepCopyInto(&out.Pod)
|
||||
if in.Nodes != nil {
|
||||
in, out := &in.Nodes, &out.Nodes
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.NodeList)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.NodeNames != nil {
|
||||
in, out := &in.NodeNames, &out.NodeNames
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new([]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderArgs.
|
||||
func (in *ExtenderArgs) DeepCopy() *ExtenderArgs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExtenderArgs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExtenderBindingArgs) DeepCopyInto(out *ExtenderBindingArgs) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingArgs.
|
||||
func (in *ExtenderBindingArgs) DeepCopy() *ExtenderBindingArgs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExtenderBindingArgs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExtenderBindingResult) DeepCopyInto(out *ExtenderBindingResult) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderBindingResult.
|
||||
func (in *ExtenderBindingResult) DeepCopy() *ExtenderBindingResult {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExtenderBindingResult)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExtenderConfig) DeepCopyInto(out *ExtenderConfig) {
|
||||
*out = *in
|
||||
if in.TLSConfig != nil {
|
||||
in, out := &in.TLSConfig, &out.TLSConfig
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(rest.TLSClientConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.ManagedResources != nil {
|
||||
in, out := &in.ManagedResources, &out.ManagedResources
|
||||
*out = make([]ExtenderManagedResource, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderConfig.
|
||||
func (in *ExtenderConfig) DeepCopy() *ExtenderConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExtenderConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExtenderFilterResult) DeepCopyInto(out *ExtenderFilterResult) {
|
||||
*out = *in
|
||||
if in.Nodes != nil {
|
||||
in, out := &in.Nodes, &out.Nodes
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(v1.NodeList)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.NodeNames != nil {
|
||||
in, out := &in.NodeNames, &out.NodeNames
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new([]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
}
|
||||
if in.FailedNodes != nil {
|
||||
in, out := &in.FailedNodes, &out.FailedNodes
|
||||
*out = make(FailedNodesMap, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderFilterResult.
|
||||
func (in *ExtenderFilterResult) DeepCopy() *ExtenderFilterResult {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExtenderFilterResult)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderManagedResource.
|
||||
func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExtenderManagedResource)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in FailedNodesMap) DeepCopyInto(out *FailedNodesMap) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(FailedNodesMap, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedNodesMap.
|
||||
func (in FailedNodesMap) DeepCopy() FailedNodesMap {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FailedNodesMap)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HostPriority) DeepCopyInto(out *HostPriority) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriority.
|
||||
func (in *HostPriority) DeepCopy() *HostPriority {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HostPriority)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in HostPriorityList) DeepCopyInto(out *HostPriorityList) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(HostPriorityList, len(*in))
|
||||
copy(*out, *in)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPriorityList.
|
||||
func (in HostPriorityList) DeepCopy() HostPriorityList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HostPriorityList)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LabelPreference) DeepCopyInto(out *LabelPreference) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelPreference.
|
||||
func (in *LabelPreference) DeepCopy() *LabelPreference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(LabelPreference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LabelsPresence) DeepCopyInto(out *LabelsPresence) {
|
||||
*out = *in
|
||||
if in.Labels != nil {
|
||||
in, out := &in.Labels, &out.Labels
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelsPresence.
|
||||
func (in *LabelsPresence) DeepCopy() *LabelsPresence {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(LabelsPresence)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Policy) DeepCopyInto(out *Policy) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Predicates != nil {
|
||||
in, out := &in.Predicates, &out.Predicates
|
||||
*out = make([]PredicatePolicy, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Priorities != nil {
|
||||
in, out := &in.Priorities, &out.Priorities
|
||||
*out = make([]PriorityPolicy, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ExtenderConfigs != nil {
|
||||
in, out := &in.ExtenderConfigs, &out.ExtenderConfigs
|
||||
*out = make([]ExtenderConfig, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
|
||||
func (in *Policy) DeepCopy() *Policy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Policy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Policy) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PredicateArgument) DeepCopyInto(out *PredicateArgument) {
|
||||
*out = *in
|
||||
if in.ServiceAffinity != nil {
|
||||
in, out := &in.ServiceAffinity, &out.ServiceAffinity
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(ServiceAffinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.LabelsPresence != nil {
|
||||
in, out := &in.LabelsPresence, &out.LabelsPresence
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(LabelsPresence)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicateArgument.
|
||||
func (in *PredicateArgument) DeepCopy() *PredicateArgument {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PredicateArgument)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PredicatePolicy) DeepCopyInto(out *PredicatePolicy) {
|
||||
*out = *in
|
||||
if in.Argument != nil {
|
||||
in, out := &in.Argument, &out.Argument
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(PredicateArgument)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicatePolicy.
|
||||
func (in *PredicatePolicy) DeepCopy() *PredicatePolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PredicatePolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PriorityArgument) DeepCopyInto(out *PriorityArgument) {
|
||||
*out = *in
|
||||
if in.ServiceAntiAffinity != nil {
|
||||
in, out := &in.ServiceAntiAffinity, &out.ServiceAntiAffinity
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(ServiceAntiAffinity)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.LabelPreference != nil {
|
||||
in, out := &in.LabelPreference, &out.LabelPreference
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(LabelPreference)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityArgument.
|
||||
func (in *PriorityArgument) DeepCopy() *PriorityArgument {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PriorityArgument)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PriorityPolicy) DeepCopyInto(out *PriorityPolicy) {
|
||||
*out = *in
|
||||
if in.Argument != nil {
|
||||
in, out := &in.Argument, &out.Argument
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(PriorityArgument)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityPolicy.
|
||||
func (in *PriorityPolicy) DeepCopy() *PriorityPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PriorityPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceAffinity) DeepCopyInto(out *ServiceAffinity) {
|
||||
*out = *in
|
||||
if in.Labels != nil {
|
||||
in, out := &in.Labels, &out.Labels
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAffinity.
|
||||
func (in *ServiceAffinity) DeepCopy() *ServiceAffinity {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceAffinity)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceAntiAffinity) DeepCopyInto(out *ServiceAntiAffinity) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAntiAffinity.
|
||||
func (in *ServiceAntiAffinity) DeepCopy() *ServiceAntiAffinity {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceAntiAffinity)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
497
vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go
generated
vendored
Normal file
497
vendor/k8s.io/kubernetes/pkg/scheduler/scheduler.go
generated
vendored
Normal file
|
|
@ -0,0 +1,497 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core"
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Binder knows how to write a binding.
|
||||
type Binder interface {
|
||||
Bind(binding *v1.Binding) error
|
||||
}
|
||||
|
||||
// PodConditionUpdater updates the condition of a pod based on the passed
|
||||
// PodCondition
|
||||
type PodConditionUpdater interface {
|
||||
Update(pod *v1.Pod, podCondition *v1.PodCondition) error
|
||||
}
|
||||
|
||||
// PodPreemptor has methods needed to delete a pod and to update
|
||||
// annotations of the preemptor pod.
|
||||
type PodPreemptor interface {
|
||||
GetUpdatedPod(pod *v1.Pod) (*v1.Pod, error)
|
||||
DeletePod(pod *v1.Pod) error
|
||||
SetNominatedNodeName(pod *v1.Pod, nominatedNode string) error
|
||||
RemoveNominatedNodeName(pod *v1.Pod) error
|
||||
}
|
||||
|
||||
// Scheduler watches for new unscheduled pods. It attempts to find
|
||||
// nodes that they fit on and writes bindings back to the api server.
|
||||
type Scheduler struct {
|
||||
config *Config
|
||||
}
|
||||
|
||||
// StopEverything closes the scheduler config's StopEverything channel, to shut
|
||||
// down the Scheduler.
|
||||
func (sched *Scheduler) StopEverything() {
|
||||
close(sched.config.StopEverything)
|
||||
}
|
||||
|
||||
// Configurator defines I/O, caching, and other functionality needed to
|
||||
// construct a new scheduler. An implementation of this can be seen in
|
||||
// factory.go.
|
||||
type Configurator interface {
|
||||
GetPriorityFunctionConfigs(priorityKeys sets.String) ([]algorithm.PriorityConfig, error)
|
||||
GetPriorityMetadataProducer() (algorithm.PriorityMetadataProducer, error)
|
||||
GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error)
|
||||
GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error)
|
||||
GetHardPodAffinitySymmetricWeight() int32
|
||||
GetSchedulerName() string
|
||||
MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue core.SchedulingQueue) func(pod *v1.Pod, err error)
|
||||
|
||||
// Needs to be exposed for things like integration tests where we want to make fake nodes.
|
||||
GetNodeLister() corelisters.NodeLister
|
||||
GetClient() clientset.Interface
|
||||
GetScheduledPodLister() corelisters.PodLister
|
||||
|
||||
Create() (*Config, error)
|
||||
CreateFromProvider(providerName string) (*Config, error)
|
||||
CreateFromConfig(policy schedulerapi.Policy) (*Config, error)
|
||||
CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*Config, error)
|
||||
}
|
||||
|
||||
// Config is an implementation of the Scheduler's configured input data.
|
||||
// TODO over time we should make this struct a hidden implementation detail of the scheduler.
|
||||
type Config struct {
|
||||
// It is expected that changes made via SchedulerCache will be observed
|
||||
// by NodeLister and Algorithm.
|
||||
SchedulerCache schedulercache.Cache
|
||||
// Ecache is used for optimistically invalid affected cache items after
|
||||
// successfully binding a pod
|
||||
Ecache *core.EquivalenceCache
|
||||
NodeLister algorithm.NodeLister
|
||||
Algorithm algorithm.ScheduleAlgorithm
|
||||
GetBinder func(pod *v1.Pod) Binder
|
||||
// PodConditionUpdater is used only in case of scheduling errors. If we succeed
|
||||
// with scheduling, PodScheduled condition will be updated in apiserver in /bind
|
||||
// handler so that binding and setting PodCondition it is atomic.
|
||||
PodConditionUpdater PodConditionUpdater
|
||||
// PodPreemptor is used to evict pods and update pod annotations.
|
||||
PodPreemptor PodPreemptor
|
||||
|
||||
// NextPod should be a function that blocks until the next pod
|
||||
// is available. We don't use a channel for this, because scheduling
|
||||
// a pod may take some amount of time and we don't want pods to get
|
||||
// stale while they sit in a channel.
|
||||
NextPod func() *v1.Pod
|
||||
|
||||
// WaitForCacheSync waits for scheduler cache to populate.
|
||||
// It returns true if it was successful, false if the controller should shutdown.
|
||||
WaitForCacheSync func() bool
|
||||
|
||||
// Error is called if there is an error. It is passed the pod in
|
||||
// question, and the error
|
||||
Error func(*v1.Pod, error)
|
||||
|
||||
// Recorder is the EventRecorder to use
|
||||
Recorder record.EventRecorder
|
||||
|
||||
// Close this to shut down the scheduler.
|
||||
StopEverything chan struct{}
|
||||
|
||||
// VolumeBinder handles PVC/PV binding for the pod.
|
||||
VolumeBinder *volumebinder.VolumeBinder
|
||||
}
|
||||
|
||||
// NewFromConfigurator returns a new scheduler that is created entirely by the Configurator. Assumes Create() is implemented.
|
||||
// Supports intermediate Config mutation for now if you provide modifier functions which will run after Config is created.
|
||||
func NewFromConfigurator(c Configurator, modifiers ...func(c *Config)) (*Scheduler, error) {
|
||||
cfg, err := c.Create()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Mutate it if any functions were provided, changes might be required for certain types of tests (i.e. change the recorder).
|
||||
for _, modifier := range modifiers {
|
||||
modifier(cfg)
|
||||
}
|
||||
// From this point on the config is immutable to the outside.
|
||||
s := &Scheduler{
|
||||
config: cfg,
|
||||
}
|
||||
metrics.Register()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// NewFromConfig returns a new scheduler using the provided Config.
|
||||
func NewFromConfig(config *Config) *Scheduler {
|
||||
metrics.Register()
|
||||
return &Scheduler{
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// Run begins watching and scheduling. It waits for cache to be synced, then starts a goroutine and returns immediately.
|
||||
func (sched *Scheduler) Run() {
|
||||
if !sched.config.WaitForCacheSync() {
|
||||
return
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
|
||||
go sched.config.VolumeBinder.Run(sched.bindVolumesWorker, sched.config.StopEverything)
|
||||
}
|
||||
|
||||
go wait.Until(sched.scheduleOne, 0, sched.config.StopEverything)
|
||||
}
|
||||
|
||||
// Config return scheduler's config pointer. It is exposed for testing purposes.
|
||||
func (sched *Scheduler) Config() *Config {
|
||||
return sched.config
|
||||
}
|
||||
|
||||
// schedule implements the scheduling algorithm and returns the suggested host.
|
||||
func (sched *Scheduler) schedule(pod *v1.Pod) (string, error) {
|
||||
host, err := sched.config.Algorithm.Schedule(pod, sched.config.NodeLister)
|
||||
if err != nil {
|
||||
glog.V(1).Infof("Failed to schedule pod: %v/%v", pod.Namespace, pod.Name)
|
||||
pod = pod.DeepCopy()
|
||||
sched.config.Error(pod, err)
|
||||
sched.config.Recorder.Eventf(pod, v1.EventTypeWarning, "FailedScheduling", "%v", err)
|
||||
sched.config.PodConditionUpdater.Update(pod, &v1.PodCondition{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: v1.PodReasonUnschedulable,
|
||||
Message: err.Error(),
|
||||
})
|
||||
return "", err
|
||||
}
|
||||
return host, err
|
||||
}
|
||||
|
||||
// preempt tries to create room for a pod that has failed to schedule, by preempting lower priority pods if possible.
|
||||
// If it succeeds, it adds the name of the node where preemption has happened to the pod annotations.
|
||||
// It returns the node name and an error if any.
|
||||
func (sched *Scheduler) preempt(preemptor *v1.Pod, scheduleErr error) (string, error) {
|
||||
if !util.PodPriorityEnabled() {
|
||||
glog.V(3).Infof("Pod priority feature is not enabled. No preemption is performed.")
|
||||
return "", nil
|
||||
}
|
||||
preemptor, err := sched.config.PodPreemptor.GetUpdatedPod(preemptor)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting the updated preemptor pod object: %v", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
node, victims, nominatedPodsToClear, err := sched.config.Algorithm.Preempt(preemptor, sched.config.NodeLister, scheduleErr)
|
||||
metrics.PreemptionVictims.Set(float64(len(victims)))
|
||||
if err != nil {
|
||||
glog.Errorf("Error preempting victims to make room for %v/%v.", preemptor.Namespace, preemptor.Name)
|
||||
return "", err
|
||||
}
|
||||
var nodeName = ""
|
||||
if node != nil {
|
||||
nodeName = node.Name
|
||||
err = sched.config.PodPreemptor.SetNominatedNodeName(preemptor, nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error in preemption process. Cannot update pod %v annotations: %v", preemptor.Name, err)
|
||||
return "", err
|
||||
}
|
||||
for _, victim := range victims {
|
||||
if err := sched.config.PodPreemptor.DeletePod(victim); err != nil {
|
||||
glog.Errorf("Error preempting pod %v/%v: %v", victim.Namespace, victim.Name, err)
|
||||
return "", err
|
||||
}
|
||||
sched.config.Recorder.Eventf(victim, v1.EventTypeNormal, "Preempted", "by %v/%v on node %v", preemptor.Namespace, preemptor.Name, nodeName)
|
||||
}
|
||||
}
|
||||
// Clearing nominated pods should happen outside of "if node != nil". Node could
|
||||
// be nil when a pod with nominated node name is eligible to preempt again,
|
||||
// but preemption logic does not find any node for it. In that case Preempt()
|
||||
// function of generic_scheduler.go returns the pod itself for removal of the annotation.
|
||||
for _, p := range nominatedPodsToClear {
|
||||
rErr := sched.config.PodPreemptor.RemoveNominatedNodeName(p)
|
||||
if rErr != nil {
|
||||
glog.Errorf("Cannot remove nominated node annotation of pod: %v", rErr)
|
||||
// We do not return as this error is not critical.
|
||||
}
|
||||
}
|
||||
return nodeName, err
|
||||
}
|
||||
|
||||
// assumeAndBindVolumes will update the volume cache and then asynchronously bind volumes if required.
|
||||
//
|
||||
// If volume binding is required, then the bind volumes routine will update the pod to send it back through
|
||||
// the scheduler.
|
||||
//
|
||||
// Otherwise, return nil error and continue to assume the pod.
|
||||
//
|
||||
// This function modifies assumed if volume binding is required.
|
||||
func (sched *Scheduler) assumeAndBindVolumes(assumed *v1.Pod, host string) error {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
|
||||
allBound, bindingRequired, err := sched.config.VolumeBinder.Binder.AssumePodVolumes(assumed, host)
|
||||
if err != nil {
|
||||
sched.config.Error(assumed, err)
|
||||
sched.config.Recorder.Eventf(assumed, v1.EventTypeWarning, "FailedScheduling", "AssumePodVolumes failed: %v", err)
|
||||
sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "SchedulerError",
|
||||
Message: err.Error(),
|
||||
})
|
||||
return err
|
||||
}
|
||||
if !allBound {
|
||||
err = fmt.Errorf("Volume binding started, waiting for completion")
|
||||
if bindingRequired {
|
||||
if sched.config.Ecache != nil {
|
||||
invalidPredicates := sets.NewString(predicates.CheckVolumeBindingPred)
|
||||
sched.config.Ecache.InvalidateCachedPredicateItemOfAllNodes(invalidPredicates)
|
||||
}
|
||||
|
||||
// bindVolumesWorker() will update the Pod object to put it back in the scheduler queue
|
||||
sched.config.VolumeBinder.BindQueue.Add(assumed)
|
||||
} else {
|
||||
// We are just waiting for PV controller to finish binding, put it back in the
|
||||
// scheduler queue
|
||||
sched.config.Error(assumed, err)
|
||||
sched.config.Recorder.Eventf(assumed, v1.EventTypeNormal, "FailedScheduling", "%v", err)
|
||||
sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "VolumeBindingWaiting",
|
||||
})
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// bindVolumesWorker() processes pods queued in assumeAndBindVolumes() and tries to
|
||||
// make the API update for volume binding.
|
||||
// This function runs forever until the volume BindQueue is closed.
|
||||
func (sched *Scheduler) bindVolumesWorker() {
|
||||
workFunc := func() bool {
|
||||
keyObj, quit := sched.config.VolumeBinder.BindQueue.Get()
|
||||
if quit {
|
||||
return true
|
||||
}
|
||||
defer sched.config.VolumeBinder.BindQueue.Done(keyObj)
|
||||
|
||||
assumed, ok := keyObj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.V(4).Infof("Object is not a *v1.Pod")
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO: add metrics
|
||||
var reason string
|
||||
var eventType string
|
||||
|
||||
glog.V(5).Infof("Trying to bind volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name)
|
||||
|
||||
// The Pod is always sent back to the scheduler afterwards.
|
||||
err := sched.config.VolumeBinder.Binder.BindPodVolumes(assumed)
|
||||
if err != nil {
|
||||
glog.V(1).Infof("Failed to bind volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name, err)
|
||||
reason = "VolumeBindingFailed"
|
||||
eventType = v1.EventTypeWarning
|
||||
} else {
|
||||
glog.V(4).Infof("Successfully bound volumes for pod \"%v/%v\"", assumed.Namespace, assumed.Name)
|
||||
reason = "VolumeBindingWaiting"
|
||||
eventType = v1.EventTypeNormal
|
||||
err = fmt.Errorf("Volume binding started, waiting for completion")
|
||||
}
|
||||
|
||||
// Always fail scheduling regardless of binding success.
|
||||
// The Pod needs to be sent back through the scheduler to:
|
||||
// * Retry volume binding if it fails.
|
||||
// * Retry volume binding if dynamic provisioning fails.
|
||||
// * Bind the Pod to the Node once all volumes are bound.
|
||||
sched.config.Error(assumed, err)
|
||||
sched.config.Recorder.Eventf(assumed, eventType, "FailedScheduling", "%v", err)
|
||||
sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: reason,
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
for {
|
||||
if quit := workFunc(); quit {
|
||||
glog.V(4).Infof("bindVolumesWorker shutting down")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// assume signals to the cache that a pod is already in the cache, so that binding can be asynchronous.
|
||||
// assume modifies `assumed`.
|
||||
func (sched *Scheduler) assume(assumed *v1.Pod, host string) error {
|
||||
// Optimistically assume that the binding will succeed and send it to apiserver
|
||||
// in the background.
|
||||
// If the binding fails, scheduler will release resources allocated to assumed pod
|
||||
// immediately.
|
||||
assumed.Spec.NodeName = host
|
||||
if err := sched.config.SchedulerCache.AssumePod(assumed); err != nil {
|
||||
glog.Errorf("scheduler cache AssumePod failed: %v", err)
|
||||
|
||||
// This is most probably result of a BUG in retrying logic.
|
||||
// We report an error here so that pod scheduling can be retried.
|
||||
// This relies on the fact that Error will check if the pod has been bound
|
||||
// to a node and if so will not add it back to the unscheduled pods queue
|
||||
// (otherwise this would cause an infinite loop).
|
||||
sched.config.Error(assumed, err)
|
||||
sched.config.Recorder.Eventf(assumed, v1.EventTypeWarning, "FailedScheduling", "AssumePod failed: %v", err)
|
||||
sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "SchedulerError",
|
||||
Message: err.Error(),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Optimistically assume that the binding will succeed, so we need to invalidate affected
|
||||
// predicates in equivalence cache.
|
||||
// If the binding fails, these invalidated item will not break anything.
|
||||
if sched.config.Ecache != nil {
|
||||
sched.config.Ecache.InvalidateCachedPredicateItemForPodAdd(assumed, host)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// bind binds a pod to a given node defined in a binding object. We expect this to run asynchronously, so we
|
||||
// handle binding metrics internally.
|
||||
func (sched *Scheduler) bind(assumed *v1.Pod, b *v1.Binding) error {
|
||||
bindingStart := time.Now()
|
||||
// If binding succeeded then PodScheduled condition will be updated in apiserver so that
|
||||
// it's atomic with setting host.
|
||||
err := sched.config.GetBinder(assumed).Bind(b)
|
||||
if err := sched.config.SchedulerCache.FinishBinding(assumed); err != nil {
|
||||
glog.Errorf("scheduler cache FinishBinding failed: %v", err)
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(1).Infof("Failed to bind pod: %v/%v", assumed.Namespace, assumed.Name)
|
||||
if err := sched.config.SchedulerCache.ForgetPod(assumed); err != nil {
|
||||
glog.Errorf("scheduler cache ForgetPod failed: %v", err)
|
||||
}
|
||||
sched.config.Error(assumed, err)
|
||||
sched.config.Recorder.Eventf(assumed, v1.EventTypeWarning, "FailedScheduling", "Binding rejected: %v", err)
|
||||
sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "BindingRejected",
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
metrics.BindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart))
|
||||
sched.config.Recorder.Eventf(assumed, v1.EventTypeNormal, "Scheduled", "Successfully assigned %v to %v", assumed.Name, b.Target.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// scheduleOne does the entire scheduling workflow for a single pod. It is serialized on the scheduling algorithm's host fitting.
|
||||
func (sched *Scheduler) scheduleOne() {
|
||||
pod := sched.config.NextPod()
|
||||
if pod.DeletionTimestamp != nil {
|
||||
sched.config.Recorder.Eventf(pod, v1.EventTypeWarning, "FailedScheduling", "skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name)
|
||||
glog.V(3).Infof("Skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name)
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Attempting to schedule pod: %v/%v", pod.Namespace, pod.Name)
|
||||
|
||||
// Synchronously attempt to find a fit for the pod.
|
||||
start := time.Now()
|
||||
suggestedHost, err := sched.schedule(pod)
|
||||
if err != nil {
|
||||
// schedule() may have failed because the pod would not fit on any host, so we try to
|
||||
// preempt, with the expectation that the next time the pod is tried for scheduling it
|
||||
// will fit due to the preemption. It is also possible that a different pod will schedule
|
||||
// into the resources that were preempted, but this is harmless.
|
||||
if fitError, ok := err.(*core.FitError); ok {
|
||||
preemptionStartTime := time.Now()
|
||||
sched.preempt(pod, fitError)
|
||||
metrics.PreemptionAttempts.Inc()
|
||||
metrics.SchedulingAlgorithmPremptionEvaluationDuration.Observe(metrics.SinceInMicroseconds(preemptionStartTime))
|
||||
}
|
||||
return
|
||||
}
|
||||
metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start))
|
||||
// Tell the cache to assume that a pod now is running on a given node, even though it hasn't been bound yet.
|
||||
// This allows us to keep scheduling without waiting on binding to occur.
|
||||
assumedPod := pod.DeepCopy()
|
||||
|
||||
// Assume volumes first before assuming the pod.
|
||||
//
|
||||
// If no volumes need binding, then nil is returned, and continue to assume the pod.
|
||||
//
|
||||
// Otherwise, error is returned and volume binding is started asynchronously for all of the pod's volumes.
|
||||
// scheduleOne() returns immediately on error, so that it doesn't continue to assume the pod.
|
||||
//
|
||||
// After the asynchronous volume binding updates are made, it will send the pod back through the scheduler for
|
||||
// subsequent passes until all volumes are fully bound.
|
||||
//
|
||||
// This function modifies 'assumedPod' if volume binding is required.
|
||||
err = sched.assumeAndBindVolumes(assumedPod, suggestedHost)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// assume modifies `assumedPod` by setting NodeName=suggestedHost
|
||||
err = sched.assume(assumedPod, suggestedHost)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// bind the pod to its host asynchronously (we can do this b/c of the assumption step above).
|
||||
go func() {
|
||||
err := sched.bind(assumedPod, &v1.Binding{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: assumedPod.Namespace, Name: assumedPod.Name, UID: assumedPod.UID},
|
||||
Target: v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: suggestedHost,
|
||||
},
|
||||
})
|
||||
metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start))
|
||||
if err != nil {
|
||||
glog.Errorf("Internal error binding pod: (%v)", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
838
vendor/k8s.io/kubernetes/pkg/scheduler/scheduler_test.go
generated
vendored
Normal file
838
vendor/k8s.io/kubernetes/pkg/scheduler/scheduler_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,838 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientcache "k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core"
|
||||
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
|
||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||
)
|
||||
|
||||
type fakeBinder struct {
|
||||
b func(binding *v1.Binding) error
|
||||
}
|
||||
|
||||
func (fb fakeBinder) Bind(binding *v1.Binding) error { return fb.b(binding) }
|
||||
|
||||
type fakePodConditionUpdater struct{}
|
||||
|
||||
func (fc fakePodConditionUpdater) Update(pod *v1.Pod, podCondition *v1.PodCondition) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type fakePodPreemptor struct{}
|
||||
|
||||
func (fp fakePodPreemptor) GetUpdatedPod(pod *v1.Pod) (*v1.Pod, error) {
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
func (fp fakePodPreemptor) DeletePod(pod *v1.Pod) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fp fakePodPreemptor) SetNominatedNodeName(pod *v1.Pod, nomNodeName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fp fakePodPreemptor) RemoveNominatedNodeName(pod *v1.Pod) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func podWithID(id, desiredHost string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: id,
|
||||
UID: types.UID(id),
|
||||
SelfLink: util.Test.SelfLink(string(v1.ResourcePods), id),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: desiredHost,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func deletingPod(id string) *v1.Pod {
|
||||
deletionTimestamp := metav1.Now()
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: id,
|
||||
UID: types.UID(id),
|
||||
SelfLink: util.Test.SelfLink(string(v1.ResourcePods), id),
|
||||
DeletionTimestamp: &deletionTimestamp,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func podWithPort(id, desiredHost string, port int) *v1.Pod {
|
||||
pod := podWithID(id, desiredHost)
|
||||
pod.Spec.Containers = []v1.Container{
|
||||
{Name: "ctr", Ports: []v1.ContainerPort{{HostPort: int32(port)}}},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func podWithResources(id, desiredHost string, limits v1.ResourceList, requests v1.ResourceList) *v1.Pod {
|
||||
pod := podWithID(id, desiredHost)
|
||||
pod.Spec.Containers = []v1.Container{
|
||||
{Name: "ctr", Resources: v1.ResourceRequirements{Limits: limits, Requests: requests}},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
type mockScheduler struct {
|
||||
machine string
|
||||
err error
|
||||
}
|
||||
|
||||
func (es mockScheduler) Schedule(pod *v1.Pod, ml algorithm.NodeLister) (string, error) {
|
||||
return es.machine, es.err
|
||||
}
|
||||
|
||||
func (es mockScheduler) Predicates() map[string]algorithm.FitPredicate {
|
||||
return nil
|
||||
}
|
||||
func (es mockScheduler) Prioritizers() []algorithm.PriorityConfig {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (es mockScheduler) Preempt(pod *v1.Pod, nodeLister algorithm.NodeLister, scheduleErr error) (*v1.Node, []*v1.Pod, []*v1.Pod, error) {
|
||||
return nil, nil, nil, nil
|
||||
}
|
||||
|
||||
func TestScheduler(t *testing.T) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(t.Logf).Stop()
|
||||
errS := errors.New("scheduler")
|
||||
errB := errors.New("binder")
|
||||
testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
||||
|
||||
table := []struct {
|
||||
injectBindError error
|
||||
sendPod *v1.Pod
|
||||
algo algorithm.ScheduleAlgorithm
|
||||
expectErrorPod *v1.Pod
|
||||
expectForgetPod *v1.Pod
|
||||
expectAssumedPod *v1.Pod
|
||||
expectError error
|
||||
expectBind *v1.Binding
|
||||
eventReason string
|
||||
}{
|
||||
{
|
||||
sendPod: podWithID("foo", ""),
|
||||
algo: mockScheduler{testNode.Name, nil},
|
||||
expectBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: testNode.Name}},
|
||||
expectAssumedPod: podWithID("foo", testNode.Name),
|
||||
eventReason: "Scheduled",
|
||||
}, {
|
||||
sendPod: podWithID("foo", ""),
|
||||
algo: mockScheduler{testNode.Name, errS},
|
||||
expectError: errS,
|
||||
expectErrorPod: podWithID("foo", ""),
|
||||
eventReason: "FailedScheduling",
|
||||
}, {
|
||||
sendPod: podWithID("foo", ""),
|
||||
algo: mockScheduler{testNode.Name, nil},
|
||||
expectBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: testNode.Name}},
|
||||
expectAssumedPod: podWithID("foo", testNode.Name),
|
||||
injectBindError: errB,
|
||||
expectError: errB,
|
||||
expectErrorPod: podWithID("foo", testNode.Name),
|
||||
expectForgetPod: podWithID("foo", testNode.Name),
|
||||
eventReason: "FailedScheduling",
|
||||
}, {
|
||||
sendPod: deletingPod("foo"),
|
||||
algo: mockScheduler{"", nil},
|
||||
eventReason: "FailedScheduling",
|
||||
},
|
||||
}
|
||||
|
||||
for i, item := range table {
|
||||
var gotError error
|
||||
var gotPod *v1.Pod
|
||||
var gotForgetPod *v1.Pod
|
||||
var gotAssumedPod *v1.Pod
|
||||
var gotBinding *v1.Binding
|
||||
configurator := &FakeConfigurator{
|
||||
Config: &Config{
|
||||
SchedulerCache: &schedulertesting.FakeCache{
|
||||
ForgetFunc: func(pod *v1.Pod) {
|
||||
gotForgetPod = pod
|
||||
},
|
||||
AssumeFunc: func(pod *v1.Pod) {
|
||||
gotAssumedPod = pod
|
||||
},
|
||||
},
|
||||
NodeLister: schedulertesting.FakeNodeLister(
|
||||
[]*v1.Node{&testNode},
|
||||
),
|
||||
Algorithm: item.algo,
|
||||
GetBinder: func(pod *v1.Pod) Binder {
|
||||
return fakeBinder{func(b *v1.Binding) error {
|
||||
gotBinding = b
|
||||
return item.injectBindError
|
||||
}}
|
||||
},
|
||||
PodConditionUpdater: fakePodConditionUpdater{},
|
||||
Error: func(p *v1.Pod, err error) {
|
||||
gotPod = p
|
||||
gotError = err
|
||||
},
|
||||
NextPod: func() *v1.Pod {
|
||||
return item.sendPod
|
||||
},
|
||||
Recorder: eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "scheduler"}),
|
||||
VolumeBinder: volumebinder.NewFakeVolumeBinder(&persistentvolume.FakeVolumeBinderConfig{AllBound: true}),
|
||||
},
|
||||
}
|
||||
|
||||
s, _ := NewFromConfigurator(configurator, nil...)
|
||||
called := make(chan struct{})
|
||||
events := eventBroadcaster.StartEventWatcher(func(e *v1.Event) {
|
||||
if e, a := item.eventReason, e.Reason; e != a {
|
||||
t.Errorf("%v: expected %v, got %v", i, e, a)
|
||||
}
|
||||
close(called)
|
||||
})
|
||||
s.scheduleOne()
|
||||
<-called
|
||||
if e, a := item.expectAssumedPod, gotAssumedPod; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("%v: assumed pod: wanted %v, got %v", i, e, a)
|
||||
}
|
||||
if e, a := item.expectErrorPod, gotPod; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("%v: error pod: wanted %v, got %v", i, e, a)
|
||||
}
|
||||
if e, a := item.expectForgetPod, gotForgetPod; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("%v: forget pod: wanted %v, got %v", i, e, a)
|
||||
}
|
||||
if e, a := item.expectError, gotError; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("%v: error: wanted %v, got %v", i, e, a)
|
||||
}
|
||||
if e, a := item.expectBind, gotBinding; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("%v: error: %s", i, diff.ObjectDiff(e, a))
|
||||
}
|
||||
events.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||
scache := schedulercache.New(100*time.Millisecond, stop)
|
||||
pod := podWithPort("pod.Name", "", 8080)
|
||||
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
||||
scache.AddNode(&node)
|
||||
nodeLister := schedulertesting.FakeNodeLister([]*v1.Node{&node})
|
||||
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
|
||||
scheduler, bindingChan, _ := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, nodeLister, predicateMap, pod, &node)
|
||||
|
||||
waitPodExpireChan := make(chan struct{})
|
||||
timeout := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
return
|
||||
default:
|
||||
}
|
||||
pods, err := scache.List(labels.Everything())
|
||||
if err != nil {
|
||||
t.Fatalf("cache.List failed: %v", err)
|
||||
}
|
||||
if len(pods) == 0 {
|
||||
close(waitPodExpireChan)
|
||||
return
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
// waiting for the assumed pod to expire
|
||||
select {
|
||||
case <-waitPodExpireChan:
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
close(timeout)
|
||||
t.Fatalf("timeout timeout in waiting pod expire after %v", wait.ForeverTestTimeout)
|
||||
}
|
||||
|
||||
// We use conflicted pod ports to incur fit predicate failure if first pod not removed.
|
||||
secondPod := podWithPort("bar", "", 8080)
|
||||
queuedPodStore.Add(secondPod)
|
||||
scheduler.scheduleOne()
|
||||
select {
|
||||
case b := <-bindingChan:
|
||||
expectBinding := &v1.Binding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "bar", UID: types.UID("bar")},
|
||||
Target: v1.ObjectReference{Kind: "Node", Name: node.Name},
|
||||
}
|
||||
if !reflect.DeepEqual(expectBinding, b) {
|
||||
t.Errorf("binding want=%v, get=%v", expectBinding, b)
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Fatalf("timeout in binding after %v", wait.ForeverTestTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||
scache := schedulercache.New(10*time.Minute, stop)
|
||||
firstPod := podWithPort("pod.Name", "", 8080)
|
||||
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
||||
scache.AddNode(&node)
|
||||
nodeLister := schedulertesting.FakeNodeLister([]*v1.Node{&node})
|
||||
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
|
||||
scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, nodeLister, predicateMap, firstPod, &node)
|
||||
|
||||
// We use conflicted pod ports to incur fit predicate failure.
|
||||
secondPod := podWithPort("bar", "", 8080)
|
||||
queuedPodStore.Add(secondPod)
|
||||
// queuedPodStore: [bar:8080]
|
||||
// cache: [(assumed)foo:8080]
|
||||
|
||||
scheduler.scheduleOne()
|
||||
select {
|
||||
case err := <-errChan:
|
||||
expectErr := &core.FitError{
|
||||
Pod: secondPod,
|
||||
NumAllNodes: 1,
|
||||
FailedPredicates: core.FailedPredicateMap{node.Name: []algorithm.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts}},
|
||||
}
|
||||
if !reflect.DeepEqual(expectErr, err) {
|
||||
t.Errorf("err want=%v, get=%v", expectErr, err)
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Fatalf("timeout in fitting after %v", wait.ForeverTestTimeout)
|
||||
}
|
||||
|
||||
// We mimic the workflow of cache behavior when a pod is removed by user.
|
||||
// Note: if the schedulercache timeout would be super short, the first pod would expire
|
||||
// and would be removed itself (without any explicit actions on schedulercache). Even in that case,
|
||||
// explicitly AddPod will as well correct the behavior.
|
||||
firstPod.Spec.NodeName = node.Name
|
||||
if err := scache.AddPod(firstPod); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if err := scache.RemovePod(firstPod); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
queuedPodStore.Add(secondPod)
|
||||
scheduler.scheduleOne()
|
||||
select {
|
||||
case b := <-bindingChan:
|
||||
expectBinding := &v1.Binding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "bar", UID: types.UID("bar")},
|
||||
Target: v1.ObjectReference{Kind: "Node", Name: node.Name},
|
||||
}
|
||||
if !reflect.DeepEqual(expectBinding, b) {
|
||||
t.Errorf("binding want=%v, get=%v", expectBinding, b)
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Fatalf("timeout in binding after %v", wait.ForeverTestTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// Scheduler should preserve predicate constraint even if binding was longer
|
||||
// than cache ttl
|
||||
func TestSchedulerErrorWithLongBinding(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
|
||||
firstPod := podWithPort("foo", "", 8080)
|
||||
conflictPod := podWithPort("bar", "", 8080)
|
||||
pods := map[string]*v1.Pod{firstPod.Name: firstPod, conflictPod.Name: conflictPod}
|
||||
for _, test := range []struct {
|
||||
Expected map[string]bool
|
||||
CacheTTL time.Duration
|
||||
BindingDuration time.Duration
|
||||
}{
|
||||
{
|
||||
Expected: map[string]bool{firstPod.Name: true},
|
||||
CacheTTL: 100 * time.Millisecond,
|
||||
BindingDuration: 300 * time.Millisecond,
|
||||
},
|
||||
{
|
||||
Expected: map[string]bool{firstPod.Name: true},
|
||||
CacheTTL: 10 * time.Second,
|
||||
BindingDuration: 300 * time.Millisecond,
|
||||
},
|
||||
} {
|
||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||
scache := schedulercache.New(test.CacheTTL, stop)
|
||||
|
||||
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
||||
scache.AddNode(&node)
|
||||
|
||||
nodeLister := schedulertesting.FakeNodeLister([]*v1.Node{&node})
|
||||
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
|
||||
|
||||
scheduler, bindingChan := setupTestSchedulerLongBindingWithRetry(
|
||||
queuedPodStore, scache, nodeLister, predicateMap, stop, test.BindingDuration)
|
||||
scheduler.Run()
|
||||
queuedPodStore.Add(firstPod)
|
||||
queuedPodStore.Add(conflictPod)
|
||||
|
||||
resultBindings := map[string]bool{}
|
||||
waitChan := time.After(5 * time.Second)
|
||||
for finished := false; !finished; {
|
||||
select {
|
||||
case b := <-bindingChan:
|
||||
resultBindings[b.Name] = true
|
||||
p := pods[b.Name]
|
||||
p.Spec.NodeName = b.Target.Name
|
||||
scache.AddPod(p)
|
||||
case <-waitChan:
|
||||
finished = true
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(resultBindings, test.Expected) {
|
||||
t.Errorf("Result binding are not equal to expected. %v != %v", resultBindings, test.Expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// queuedPodStore: pods queued before processing.
|
||||
// cache: scheduler cache that might contain assumed pods.
|
||||
func setupTestSchedulerWithOnePodOnNode(t *testing.T, queuedPodStore *clientcache.FIFO, scache schedulercache.Cache,
|
||||
nodeLister schedulertesting.FakeNodeLister, predicateMap map[string]algorithm.FitPredicate, pod *v1.Pod, node *v1.Node) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
|
||||
scheduler, bindingChan, errChan := setupTestScheduler(queuedPodStore, scache, nodeLister, predicateMap, nil)
|
||||
|
||||
queuedPodStore.Add(pod)
|
||||
// queuedPodStore: [foo:8080]
|
||||
// cache: []
|
||||
|
||||
scheduler.scheduleOne()
|
||||
// queuedPodStore: []
|
||||
// cache: [(assumed)foo:8080]
|
||||
|
||||
select {
|
||||
case b := <-bindingChan:
|
||||
expectBinding := &v1.Binding{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: pod.Name, UID: types.UID(pod.Name)},
|
||||
Target: v1.ObjectReference{Kind: "Node", Name: node.Name},
|
||||
}
|
||||
if !reflect.DeepEqual(expectBinding, b) {
|
||||
t.Errorf("binding want=%v, get=%v", expectBinding, b)
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
|
||||
}
|
||||
return scheduler, bindingChan, errChan
|
||||
}
|
||||
|
||||
func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||
scache := schedulercache.New(10*time.Minute, stop)
|
||||
|
||||
// Design the baseline for the pods, and we will make nodes that dont fit it later.
|
||||
var cpu = int64(4)
|
||||
var mem = int64(500)
|
||||
podWithTooBigResourceRequests := podWithResources("bar", "", v1.ResourceList{
|
||||
v1.ResourceCPU: *(resource.NewQuantity(cpu, resource.DecimalSI)),
|
||||
v1.ResourceMemory: *(resource.NewQuantity(mem, resource.DecimalSI)),
|
||||
}, v1.ResourceList{
|
||||
v1.ResourceCPU: *(resource.NewQuantity(cpu, resource.DecimalSI)),
|
||||
v1.ResourceMemory: *(resource.NewQuantity(mem, resource.DecimalSI)),
|
||||
})
|
||||
|
||||
// create several nodes which cannot schedule the above pod
|
||||
nodes := []*v1.Node{}
|
||||
for i := 0; i < 100; i++ {
|
||||
uid := fmt.Sprintf("machine%v", i)
|
||||
node := v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: uid, UID: types.UID(uid)},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *(resource.NewQuantity(cpu/2, resource.DecimalSI)),
|
||||
v1.ResourceMemory: *(resource.NewQuantity(mem/5, resource.DecimalSI)),
|
||||
v1.ResourcePods: *(resource.NewQuantity(10, resource.DecimalSI)),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *(resource.NewQuantity(cpu/2, resource.DecimalSI)),
|
||||
v1.ResourceMemory: *(resource.NewQuantity(mem/5, resource.DecimalSI)),
|
||||
v1.ResourcePods: *(resource.NewQuantity(10, resource.DecimalSI)),
|
||||
}},
|
||||
}
|
||||
scache.AddNode(&node)
|
||||
nodes = append(nodes, &node)
|
||||
}
|
||||
nodeLister := schedulertesting.FakeNodeLister(nodes)
|
||||
predicateMap := map[string]algorithm.FitPredicate{
|
||||
"PodFitsResources": predicates.PodFitsResources,
|
||||
}
|
||||
|
||||
// Create expected failure reasons for all the nodes. Hopefully they will get rolled up into a non-spammy summary.
|
||||
failedPredicatesMap := core.FailedPredicateMap{}
|
||||
for _, node := range nodes {
|
||||
failedPredicatesMap[node.Name] = []algorithm.PredicateFailureReason{
|
||||
predicates.NewInsufficientResourceError(v1.ResourceCPU, 4000, 0, 2000),
|
||||
predicates.NewInsufficientResourceError(v1.ResourceMemory, 500, 0, 100),
|
||||
}
|
||||
}
|
||||
scheduler, _, errChan := setupTestScheduler(queuedPodStore, scache, nodeLister, predicateMap, nil)
|
||||
|
||||
queuedPodStore.Add(podWithTooBigResourceRequests)
|
||||
scheduler.scheduleOne()
|
||||
select {
|
||||
case err := <-errChan:
|
||||
expectErr := &core.FitError{
|
||||
Pod: podWithTooBigResourceRequests,
|
||||
NumAllNodes: len(nodes),
|
||||
FailedPredicates: failedPredicatesMap,
|
||||
}
|
||||
if len(fmt.Sprint(expectErr)) > 150 {
|
||||
t.Errorf("message is too spammy ! %v ", len(fmt.Sprint(expectErr)))
|
||||
}
|
||||
if !reflect.DeepEqual(expectErr, err) {
|
||||
t.Errorf("\n err \nWANT=%+v,\nGOT=%+v", expectErr, err)
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Fatalf("timeout after %v", wait.ForeverTestTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// queuedPodStore: pods queued before processing.
|
||||
// scache: scheduler cache that might contain assumed pods.
|
||||
func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulercache.Cache, nodeLister schedulertesting.FakeNodeLister, predicateMap map[string]algorithm.FitPredicate, recorder record.EventRecorder) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
algo := core.NewGenericScheduler(
|
||||
scache,
|
||||
nil,
|
||||
nil,
|
||||
predicateMap,
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
[]algorithm.PriorityConfig{},
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
[]algorithm.SchedulerExtender{},
|
||||
nil,
|
||||
schedulertesting.FakePersistentVolumeClaimLister{},
|
||||
false)
|
||||
bindingChan := make(chan *v1.Binding, 1)
|
||||
errChan := make(chan error, 1)
|
||||
configurator := &FakeConfigurator{
|
||||
Config: &Config{
|
||||
SchedulerCache: scache,
|
||||
NodeLister: nodeLister,
|
||||
Algorithm: algo,
|
||||
GetBinder: func(pod *v1.Pod) Binder {
|
||||
return fakeBinder{func(b *v1.Binding) error {
|
||||
bindingChan <- b
|
||||
return nil
|
||||
}}
|
||||
},
|
||||
NextPod: func() *v1.Pod {
|
||||
return clientcache.Pop(queuedPodStore).(*v1.Pod)
|
||||
},
|
||||
Error: func(p *v1.Pod, err error) {
|
||||
errChan <- err
|
||||
},
|
||||
Recorder: &record.FakeRecorder{},
|
||||
PodConditionUpdater: fakePodConditionUpdater{},
|
||||
PodPreemptor: fakePodPreemptor{},
|
||||
VolumeBinder: volumebinder.NewFakeVolumeBinder(&persistentvolume.FakeVolumeBinderConfig{AllBound: true}),
|
||||
},
|
||||
}
|
||||
|
||||
if recorder != nil {
|
||||
configurator.Config.Recorder = recorder
|
||||
}
|
||||
|
||||
sched, _ := NewFromConfigurator(configurator, nil...)
|
||||
|
||||
return sched, bindingChan, errChan
|
||||
}
|
||||
|
||||
func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, scache schedulercache.Cache, nodeLister schedulertesting.FakeNodeLister, predicateMap map[string]algorithm.FitPredicate, stop chan struct{}, bindingTime time.Duration) (*Scheduler, chan *v1.Binding) {
|
||||
algo := core.NewGenericScheduler(
|
||||
scache,
|
||||
nil,
|
||||
nil,
|
||||
predicateMap,
|
||||
algorithm.EmptyPredicateMetadataProducer,
|
||||
[]algorithm.PriorityConfig{},
|
||||
algorithm.EmptyPriorityMetadataProducer,
|
||||
[]algorithm.SchedulerExtender{},
|
||||
nil,
|
||||
schedulertesting.FakePersistentVolumeClaimLister{},
|
||||
false)
|
||||
bindingChan := make(chan *v1.Binding, 2)
|
||||
configurator := &FakeConfigurator{
|
||||
Config: &Config{
|
||||
SchedulerCache: scache,
|
||||
NodeLister: nodeLister,
|
||||
Algorithm: algo,
|
||||
GetBinder: func(pod *v1.Pod) Binder {
|
||||
return fakeBinder{func(b *v1.Binding) error {
|
||||
time.Sleep(bindingTime)
|
||||
bindingChan <- b
|
||||
return nil
|
||||
}}
|
||||
},
|
||||
WaitForCacheSync: func() bool {
|
||||
return true
|
||||
},
|
||||
NextPod: func() *v1.Pod {
|
||||
return clientcache.Pop(queuedPodStore).(*v1.Pod)
|
||||
},
|
||||
Error: func(p *v1.Pod, err error) {
|
||||
queuedPodStore.AddIfNotPresent(p)
|
||||
},
|
||||
Recorder: &record.FakeRecorder{},
|
||||
PodConditionUpdater: fakePodConditionUpdater{},
|
||||
PodPreemptor: fakePodPreemptor{},
|
||||
StopEverything: stop,
|
||||
VolumeBinder: volumebinder.NewFakeVolumeBinder(&persistentvolume.FakeVolumeBinderConfig{AllBound: true}),
|
||||
},
|
||||
}
|
||||
|
||||
sched, _ := NewFromConfigurator(configurator, nil...)
|
||||
|
||||
return sched, bindingChan
|
||||
}
|
||||
|
||||
func setupTestSchedulerWithVolumeBinding(fakeVolumeBinder *volumebinder.VolumeBinder, stop <-chan struct{}, broadcaster record.EventBroadcaster) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
||||
nodeLister := schedulertesting.FakeNodeLister([]*v1.Node{&testNode})
|
||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||
queuedPodStore.Add(podWithID("foo", ""))
|
||||
scache := schedulercache.New(10*time.Minute, stop)
|
||||
scache.AddNode(&testNode)
|
||||
|
||||
predicateMap := map[string]algorithm.FitPredicate{
|
||||
predicates.CheckVolumeBindingPred: predicates.NewVolumeBindingPredicate(fakeVolumeBinder),
|
||||
}
|
||||
|
||||
recorder := broadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "scheduler"})
|
||||
s, bindingChan, errChan := setupTestScheduler(queuedPodStore, scache, nodeLister, predicateMap, recorder)
|
||||
s.config.VolumeBinder = fakeVolumeBinder
|
||||
return s, bindingChan, errChan
|
||||
}
|
||||
|
||||
// This is a workaround because golint complains that errors cannot
|
||||
// end with punctuation. However, the real predicate error message does
|
||||
// end with a period.
|
||||
func makePredicateError(failReason string) error {
|
||||
s := fmt.Sprintf("0/1 nodes are available: %v.", failReason)
|
||||
return fmt.Errorf(s)
|
||||
}
|
||||
|
||||
func TestSchedulerWithVolumeBinding(t *testing.T) {
|
||||
order := []string{predicates.CheckVolumeBindingPred, predicates.GeneralPred}
|
||||
predicates.SetPredicatesOrdering(order)
|
||||
findErr := fmt.Errorf("find err")
|
||||
assumeErr := fmt.Errorf("assume err")
|
||||
bindErr := fmt.Errorf("bind err")
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(t.Logf).Stop()
|
||||
|
||||
// This can be small because we wait for pod to finish scheduling first
|
||||
chanTimeout := 2 * time.Second
|
||||
|
||||
utilfeature.DefaultFeatureGate.Set("VolumeScheduling=true")
|
||||
defer utilfeature.DefaultFeatureGate.Set("VolumeScheduling=false")
|
||||
|
||||
table := map[string]struct {
|
||||
expectError error
|
||||
expectPodBind *v1.Binding
|
||||
expectAssumeCalled bool
|
||||
expectBindCalled bool
|
||||
eventReason string
|
||||
volumeBinderConfig *persistentvolume.FakeVolumeBinderConfig
|
||||
}{
|
||||
"all-bound": {
|
||||
volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{
|
||||
AllBound: true,
|
||||
FindUnboundSatsified: true,
|
||||
FindBoundSatsified: true,
|
||||
},
|
||||
expectAssumeCalled: true,
|
||||
expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}},
|
||||
|
||||
eventReason: "Scheduled",
|
||||
},
|
||||
"bound,invalid-pv-affinity": {
|
||||
volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{
|
||||
AllBound: true,
|
||||
FindUnboundSatsified: true,
|
||||
FindBoundSatsified: false,
|
||||
},
|
||||
eventReason: "FailedScheduling",
|
||||
expectError: makePredicateError("1 node(s) had volume node affinity conflict"),
|
||||
},
|
||||
"unbound,no-matches": {
|
||||
volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{
|
||||
FindUnboundSatsified: false,
|
||||
FindBoundSatsified: true,
|
||||
},
|
||||
eventReason: "FailedScheduling",
|
||||
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind"),
|
||||
},
|
||||
"bound-and-unbound-unsatisfied": {
|
||||
volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{
|
||||
FindUnboundSatsified: false,
|
||||
FindBoundSatsified: false,
|
||||
},
|
||||
eventReason: "FailedScheduling",
|
||||
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind, 1 node(s) had volume node affinity conflict"),
|
||||
},
|
||||
"unbound,found-matches": {
|
||||
volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{
|
||||
FindUnboundSatsified: true,
|
||||
FindBoundSatsified: true,
|
||||
AssumeBindingRequired: true,
|
||||
},
|
||||
expectAssumeCalled: true,
|
||||
expectBindCalled: true,
|
||||
eventReason: "FailedScheduling",
|
||||
expectError: fmt.Errorf("Volume binding started, waiting for completion"),
|
||||
},
|
||||
"unbound,found-matches,already-bound": {
|
||||
volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{
|
||||
FindUnboundSatsified: true,
|
||||
FindBoundSatsified: true,
|
||||
AssumeBindingRequired: false,
|
||||
},
|
||||
expectAssumeCalled: true,
|
||||
expectBindCalled: false,
|
||||
eventReason: "FailedScheduling",
|
||||
expectError: fmt.Errorf("Volume binding started, waiting for completion"),
|
||||
},
|
||||
"predicate-error": {
|
||||
volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{
|
||||
FindErr: findErr,
|
||||
},
|
||||
eventReason: "FailedScheduling",
|
||||
expectError: findErr,
|
||||
},
|
||||
"assume-error": {
|
||||
volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{
|
||||
FindUnboundSatsified: true,
|
||||
FindBoundSatsified: true,
|
||||
AssumeErr: assumeErr,
|
||||
},
|
||||
expectAssumeCalled: true,
|
||||
eventReason: "FailedScheduling",
|
||||
expectError: assumeErr,
|
||||
},
|
||||
"bind-error": {
|
||||
volumeBinderConfig: &persistentvolume.FakeVolumeBinderConfig{
|
||||
FindUnboundSatsified: true,
|
||||
FindBoundSatsified: true,
|
||||
AssumeBindingRequired: true,
|
||||
BindErr: bindErr,
|
||||
},
|
||||
expectAssumeCalled: true,
|
||||
expectBindCalled: true,
|
||||
eventReason: "FailedScheduling",
|
||||
expectError: bindErr,
|
||||
},
|
||||
}
|
||||
|
||||
for name, item := range table {
|
||||
stop := make(chan struct{})
|
||||
fakeVolumeBinder := volumebinder.NewFakeVolumeBinder(item.volumeBinderConfig)
|
||||
internalBinder, ok := fakeVolumeBinder.Binder.(*persistentvolume.FakeVolumeBinder)
|
||||
if !ok {
|
||||
t.Fatalf("Failed to get fake volume binder")
|
||||
}
|
||||
s, bindingChan, errChan := setupTestSchedulerWithVolumeBinding(fakeVolumeBinder, stop, eventBroadcaster)
|
||||
|
||||
eventChan := make(chan struct{})
|
||||
events := eventBroadcaster.StartEventWatcher(func(e *v1.Event) {
|
||||
if e, a := item.eventReason, e.Reason; e != a {
|
||||
t.Errorf("%v: expected %v, got %v", name, e, a)
|
||||
}
|
||||
close(eventChan)
|
||||
})
|
||||
|
||||
go fakeVolumeBinder.Run(s.bindVolumesWorker, stop)
|
||||
|
||||
s.scheduleOne()
|
||||
|
||||
// Wait for pod to succeed or fail scheduling
|
||||
select {
|
||||
case <-eventChan:
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
t.Fatalf("%v: scheduling timeout after %v", name, wait.ForeverTestTimeout)
|
||||
}
|
||||
|
||||
events.Stop()
|
||||
|
||||
// Wait for scheduling to return an error
|
||||
select {
|
||||
case err := <-errChan:
|
||||
if item.expectError == nil || !reflect.DeepEqual(item.expectError.Error(), err.Error()) {
|
||||
t.Errorf("%v: \n err \nWANT=%+v,\nGOT=%+v", name, item.expectError, err)
|
||||
}
|
||||
case <-time.After(chanTimeout):
|
||||
if item.expectError != nil {
|
||||
t.Errorf("%v: did not receive error after %v", name, chanTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for pod to succeed binding
|
||||
select {
|
||||
case b := <-bindingChan:
|
||||
if !reflect.DeepEqual(item.expectPodBind, b) {
|
||||
t.Errorf("%v: \n err \nWANT=%+v,\nGOT=%+v", name, item.expectPodBind, b)
|
||||
}
|
||||
case <-time.After(chanTimeout):
|
||||
if item.expectPodBind != nil {
|
||||
t.Errorf("%v: did not receive pod binding after %v", name, chanTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
if item.expectAssumeCalled != internalBinder.AssumeCalled {
|
||||
t.Errorf("%v: expectedAssumeCall %v", name, item.expectAssumeCalled)
|
||||
}
|
||||
|
||||
if item.expectBindCalled != internalBinder.BindCalled {
|
||||
t.Errorf("%v: expectedBindCall %v", name, item.expectBindCalled)
|
||||
}
|
||||
|
||||
close(stop)
|
||||
}
|
||||
}
|
||||
105
vendor/k8s.io/kubernetes/pkg/scheduler/testutil.go
generated
vendored
Normal file
105
vendor/k8s.io/kubernetes/pkg/scheduler/testutil.go
generated
vendored
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
// FakeConfigurator is an implementation for test.
|
||||
type FakeConfigurator struct {
|
||||
Config *Config
|
||||
}
|
||||
|
||||
// GetPriorityFunctionConfigs is not implemented yet.
|
||||
func (fc *FakeConfigurator) GetPriorityFunctionConfigs(priorityKeys sets.String) ([]algorithm.PriorityConfig, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
// GetPriorityMetadataProducer is not implemented yet.
|
||||
func (fc *FakeConfigurator) GetPriorityMetadataProducer() (algorithm.PriorityMetadataProducer, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
// GetPredicateMetadataProducer is not implemented yet.
|
||||
func (fc *FakeConfigurator) GetPredicateMetadataProducer() (algorithm.PredicateMetadataProducer, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
// GetPredicates is not implemented yet.
|
||||
func (fc *FakeConfigurator) GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
// GetHardPodAffinitySymmetricWeight is not implemented yet.
|
||||
func (fc *FakeConfigurator) GetHardPodAffinitySymmetricWeight() int32 {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// GetSchedulerName is not implemented yet.
|
||||
func (fc *FakeConfigurator) GetSchedulerName() string {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// MakeDefaultErrorFunc is not implemented yet.
|
||||
func (fc *FakeConfigurator) MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue core.SchedulingQueue) func(pod *v1.Pod, err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNodeLister is not implemented yet.
|
||||
func (fc *FakeConfigurator) GetNodeLister() corelisters.NodeLister {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetClient is not implemented yet.
|
||||
func (fc *FakeConfigurator) GetClient() clientset.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetScheduledPodLister is not implemented yet.
|
||||
func (fc *FakeConfigurator) GetScheduledPodLister() corelisters.PodLister {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create returns FakeConfigurator.Config
|
||||
func (fc *FakeConfigurator) Create() (*Config, error) {
|
||||
return fc.Config, nil
|
||||
}
|
||||
|
||||
// CreateFromProvider returns FakeConfigurator.Config
|
||||
func (fc *FakeConfigurator) CreateFromProvider(providerName string) (*Config, error) {
|
||||
return fc.Config, nil
|
||||
}
|
||||
|
||||
// CreateFromConfig returns FakeConfigurator.Config
|
||||
func (fc *FakeConfigurator) CreateFromConfig(policy schedulerapi.Policy) (*Config, error) {
|
||||
return fc.Config, nil
|
||||
}
|
||||
|
||||
// CreateFromKeys returns FakeConfigurator.Config
|
||||
func (fc *FakeConfigurator) CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*Config, error) {
|
||||
return fc.Config, nil
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue