Update go dependencies
This commit is contained in:
parent
d5cf22c129
commit
063cc68d1c
1321 changed files with 52830 additions and 31081 deletions
58
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD
generated
vendored
Normal file
58
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"scheduler_interface.go",
|
||||
"types.go",
|
||||
"well_known_labels.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"scheduler_interface_test.go",
|
||||
"types_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/scheduler/algorithm/predicates:all-srcs",
|
||||
"//pkg/scheduler/algorithm/priorities:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
19
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/doc.go
generated
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package algorithm contains a generic Scheduler interface and several
|
||||
// implementations.
|
||||
package algorithm // import "k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
101
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/BUILD
generated
vendored
Normal file
101
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,101 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"balanced_resource_allocation.go",
|
||||
"image_locality.go",
|
||||
"interpod_affinity.go",
|
||||
"least_requested.go",
|
||||
"metadata.go",
|
||||
"most_requested.go",
|
||||
"node_affinity.go",
|
||||
"node_label.go",
|
||||
"node_prefer_avoid_pods.go",
|
||||
"reduce.go",
|
||||
"requested_to_capacity_ratio.go",
|
||||
"resource_allocation.go",
|
||||
"resource_limits.go",
|
||||
"selector_spreading.go",
|
||||
"taint_toleration.go",
|
||||
"test_util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities",
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/parsers:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"balanced_resource_allocation_test.go",
|
||||
"image_locality_test.go",
|
||||
"interpod_affinity_test.go",
|
||||
"least_requested_test.go",
|
||||
"metadata_test.go",
|
||||
"most_requested_test.go",
|
||||
"node_affinity_test.go",
|
||||
"node_label_test.go",
|
||||
"node_prefer_avoid_pods_test.go",
|
||||
"requested_to_capacity_ratio_test.go",
|
||||
"resource_limits_test.go",
|
||||
"selector_spreading_test.go",
|
||||
"taint_toleration_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//pkg/util/parsers:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/scheduler/algorithm/priorities/util:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
77
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go
generated
vendored
Normal file
77
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go
generated
vendored
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
balancedResourcePriority = &ResourceAllocationPriority{"BalancedResourceAllocation", balancedResourceScorer}
|
||||
|
||||
// BalancedResourceAllocationMap favors nodes with balanced resource usage rate.
|
||||
// BalancedResourceAllocationMap should **NOT** be used alone, and **MUST** be used together
|
||||
// with LeastRequestedPriority. It calculates the difference between the cpu and memory fraction
|
||||
// of capacity, and prioritizes the host based on how close the two metrics are to each other.
|
||||
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
|
||||
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced
|
||||
// Resource Utilization"
|
||||
BalancedResourceAllocationMap = balancedResourcePriority.PriorityMap
|
||||
)
|
||||
|
||||
func balancedResourceScorer(requested, allocable *schedulercache.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
|
||||
cpuFraction := fractionOfCapacity(requested.MilliCPU, allocable.MilliCPU)
|
||||
memoryFraction := fractionOfCapacity(requested.Memory, allocable.Memory)
|
||||
// This to find a node which has most balanced CPU, memory and volume usage.
|
||||
if includeVolumes && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && allocatableVolumes > 0 {
|
||||
volumeFraction := float64(requestedVolumes) / float64(allocatableVolumes)
|
||||
if cpuFraction >= 1 || memoryFraction >= 1 || volumeFraction >= 1 {
|
||||
// if requested >= capacity, the corresponding host should never be preferred.
|
||||
return 0
|
||||
}
|
||||
// Compute variance for all the three fractions.
|
||||
mean := (cpuFraction + memoryFraction + volumeFraction) / float64(3)
|
||||
variance := float64((((cpuFraction - mean) * (cpuFraction - mean)) + ((memoryFraction - mean) * (memoryFraction - mean)) + ((volumeFraction - mean) * (volumeFraction - mean))) / float64(3))
|
||||
// Since the variance is between positive fractions, it will be positive fraction. 1-variance lets the
|
||||
// score to be higher for node which has least variance and multiplying it with 10 provides the scaling
|
||||
// factor needed.
|
||||
return int64((1 - variance) * float64(schedulerapi.MaxPriority))
|
||||
}
|
||||
|
||||
if cpuFraction >= 1 || memoryFraction >= 1 {
|
||||
// if requested >= capacity, the corresponding host should never be preferred.
|
||||
return 0
|
||||
}
|
||||
// Upper and lower boundary of difference between cpuFraction and memoryFraction are -1 and 1
|
||||
// respectively. Multiplying the absolute value of the difference by 10 scales the value to
|
||||
// 0-10 with 0 representing well balanced allocation and 10 poorly balanced. Subtracting it from
|
||||
// 10 leads to the score which also scales from 0 to 10 while 10 representing well balanced.
|
||||
diff := math.Abs(cpuFraction - memoryFraction)
|
||||
return int64((1 - diff) * float64(schedulerapi.MaxPriority))
|
||||
}
|
||||
|
||||
func fractionOfCapacity(requested, capacity int64) float64 {
|
||||
if capacity == 0 {
|
||||
return 1
|
||||
}
|
||||
return float64(requested) / float64(capacity)
|
||||
}
|
||||
97
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/image_locality.go
generated
vendored
Normal file
97
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/image_locality.go
generated
vendored
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
"k8s.io/kubernetes/pkg/util/parsers"
|
||||
)
|
||||
|
||||
// This is a reasonable size range of all container images. 90%ile of images on dockerhub drops into this range.
|
||||
const (
|
||||
mb int64 = 1024 * 1024
|
||||
minImgSize int64 = 23 * mb
|
||||
maxImgSize int64 = 1000 * mb
|
||||
)
|
||||
|
||||
// ImageLocalityPriorityMap is a priority function that favors nodes that already have requested pod container's images.
|
||||
// It will detect whether the requested images are present on a node, and then calculate a score ranging from 0 to 10
|
||||
// based on the total size of those images.
|
||||
// - If none of the images are present, this node will be given the lowest priority.
|
||||
// - If some of the images are present on a node, the larger their sizes' sum, the higher the node's priority.
|
||||
func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
sumSize := totalImageSize(nodeInfo, pod.Spec.Containers)
|
||||
|
||||
return schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: calculateScoreFromSize(sumSize),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// calculateScoreFromSize calculates the priority of a node. sumSize is sum size of requested images on this node.
|
||||
// 1. Split image size range into 10 buckets.
|
||||
// 2. Decide the priority of a given sumSize based on which bucket it belongs to.
|
||||
func calculateScoreFromSize(sumSize int64) int {
|
||||
switch {
|
||||
case sumSize == 0 || sumSize < minImgSize:
|
||||
// 0 means none of the images required by this pod are present on this
|
||||
// node or the total size of the images present is too small to be taken into further consideration.
|
||||
return 0
|
||||
|
||||
case sumSize >= maxImgSize:
|
||||
// If existing images' total size is larger than max, just make it highest priority.
|
||||
return schedulerapi.MaxPriority
|
||||
}
|
||||
|
||||
return int((int64(schedulerapi.MaxPriority) * (sumSize - minImgSize) / (maxImgSize - minImgSize)) + 1)
|
||||
}
|
||||
|
||||
// totalImageSize returns the total image size of all the containers that are already on the node.
|
||||
func totalImageSize(nodeInfo *schedulercache.NodeInfo, containers []v1.Container) int64 {
|
||||
var total int64
|
||||
|
||||
imageSizes := nodeInfo.ImageSizes()
|
||||
for _, container := range containers {
|
||||
if size, ok := imageSizes[normalizedImageName(container.Image)]; ok {
|
||||
total += size
|
||||
}
|
||||
}
|
||||
|
||||
return total
|
||||
}
|
||||
|
||||
// normalizedImageName returns the CRI compliant name for a given image.
|
||||
// TODO: cover the corner cases of missed matches, e.g,
|
||||
// 1. Using Docker as runtime and docker.io/library/test:tag in pod spec, but only test:tag will present in node status
|
||||
// 2. Using the implicit registry, i.e., test:tag or library/test:tag in pod spec but only docker.io/library/test:tag
|
||||
// in node status; note that if users consistently use one registry format, this should not happen.
|
||||
func normalizedImageName(name string) string {
|
||||
if strings.LastIndex(name, ":") <= strings.LastIndex(name, "/") {
|
||||
name = name + ":" + parsers.DefaultImageTag
|
||||
}
|
||||
return name
|
||||
}
|
||||
240
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go
generated
vendored
Normal file
240
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go
generated
vendored
Normal file
|
|
@ -0,0 +1,240 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// InterPodAffinity contains information to calculate inter pod affinity.
|
||||
type InterPodAffinity struct {
|
||||
info predicates.NodeInfo
|
||||
nodeLister algorithm.NodeLister
|
||||
podLister algorithm.PodLister
|
||||
hardPodAffinityWeight int32
|
||||
}
|
||||
|
||||
// NewInterPodAffinityPriority creates an InterPodAffinity.
|
||||
func NewInterPodAffinityPriority(
|
||||
info predicates.NodeInfo,
|
||||
nodeLister algorithm.NodeLister,
|
||||
podLister algorithm.PodLister,
|
||||
hardPodAffinityWeight int32) algorithm.PriorityFunction {
|
||||
interPodAffinity := &InterPodAffinity{
|
||||
info: info,
|
||||
nodeLister: nodeLister,
|
||||
podLister: podLister,
|
||||
hardPodAffinityWeight: hardPodAffinityWeight,
|
||||
}
|
||||
return interPodAffinity.CalculateInterPodAffinityPriority
|
||||
}
|
||||
|
||||
type podAffinityPriorityMap struct {
|
||||
sync.Mutex
|
||||
|
||||
// nodes contain all nodes that should be considered
|
||||
nodes []*v1.Node
|
||||
// counts store the mapping from node name to so-far computed score of
|
||||
// the node.
|
||||
counts map[string]float64
|
||||
// The first error that we faced.
|
||||
firstError error
|
||||
}
|
||||
|
||||
func newPodAffinityPriorityMap(nodes []*v1.Node) *podAffinityPriorityMap {
|
||||
return &podAffinityPriorityMap{
|
||||
nodes: nodes,
|
||||
counts: make(map[string]float64, len(nodes)),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *podAffinityPriorityMap) setError(err error) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if p.firstError == nil {
|
||||
p.firstError = err
|
||||
}
|
||||
}
|
||||
|
||||
func (p *podAffinityPriorityMap) processTerm(term *v1.PodAffinityTerm, podDefiningAffinityTerm, podToCheck *v1.Pod, fixedNode *v1.Node, weight float64) {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(podDefiningAffinityTerm, term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
p.setError(err)
|
||||
return
|
||||
}
|
||||
match := priorityutil.PodMatchesTermsNamespaceAndSelector(podToCheck, namespaces, selector)
|
||||
if match {
|
||||
func() {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
for _, node := range p.nodes {
|
||||
if priorityutil.NodesHaveSameTopologyKey(node, fixedNode, term.TopologyKey) {
|
||||
p.counts[node.Name] += weight
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *podAffinityPriorityMap) processTerms(terms []v1.WeightedPodAffinityTerm, podDefiningAffinityTerm, podToCheck *v1.Pod, fixedNode *v1.Node, multiplier int) {
|
||||
for i := range terms {
|
||||
term := &terms[i]
|
||||
p.processTerm(&term.PodAffinityTerm, podDefiningAffinityTerm, podToCheck, fixedNode, float64(term.Weight*int32(multiplier)))
|
||||
}
|
||||
}
|
||||
|
||||
// CalculateInterPodAffinityPriority compute a sum by iterating through the elements of weightedPodAffinityTerm and adding
|
||||
// "weight" to the sum if the corresponding PodAffinityTerm is satisfied for
|
||||
// that node; the node(s) with the highest sum are the most preferred.
|
||||
// Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity,
|
||||
// symmetry need to be considered for hard requirements from podAffinity
|
||||
func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||
affinity := pod.Spec.Affinity
|
||||
hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil
|
||||
hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil
|
||||
|
||||
allNodeNames := make([]string, 0, len(nodeNameToInfo))
|
||||
for name := range nodeNameToInfo {
|
||||
allNodeNames = append(allNodeNames, name)
|
||||
}
|
||||
|
||||
// convert the topology key based weights to the node name based weights
|
||||
var maxCount float64
|
||||
var minCount float64
|
||||
// priorityMap stores the mapping from node name to so-far computed score of
|
||||
// the node.
|
||||
pm := newPodAffinityPriorityMap(nodes)
|
||||
|
||||
processPod := func(existingPod *v1.Pod) error {
|
||||
existingPodNode, err := ipa.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
glog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
existingPodAffinity := existingPod.Spec.Affinity
|
||||
existingHasAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAffinity != nil
|
||||
existingHasAntiAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAntiAffinity != nil
|
||||
|
||||
if hasAffinityConstraints {
|
||||
// For every soft pod affinity term of <pod>, if <existingPod> matches the term,
|
||||
// increment <pm.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPods>`s node by the term`s weight.
|
||||
terms := affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
pm.processTerms(terms, pod, existingPod, existingPodNode, 1)
|
||||
}
|
||||
if hasAntiAffinityConstraints {
|
||||
// For every soft pod anti-affinity term of <pod>, if <existingPod> matches the term,
|
||||
// decrement <pm.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPod>`s node by the term`s weight.
|
||||
terms := affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
pm.processTerms(terms, pod, existingPod, existingPodNode, -1)
|
||||
}
|
||||
|
||||
if existingHasAffinityConstraints {
|
||||
// For every hard pod affinity term of <existingPod>, if <pod> matches the term,
|
||||
// increment <pm.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPod>'s node by the constant <ipa.hardPodAffinityWeight>
|
||||
if ipa.hardPodAffinityWeight > 0 {
|
||||
terms := existingPodAffinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
//if len(existingPodAffinity.PodAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 {
|
||||
// terms = append(terms, existingPodAffinity.PodAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
|
||||
//}
|
||||
for _, term := range terms {
|
||||
pm.processTerm(&term, existingPod, pod, existingPodNode, float64(ipa.hardPodAffinityWeight))
|
||||
}
|
||||
}
|
||||
// For every soft pod affinity term of <existingPod>, if <pod> matches the term,
|
||||
// increment <pm.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPod>'s node by the term's weight.
|
||||
terms := existingPodAffinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
pm.processTerms(terms, existingPod, pod, existingPodNode, 1)
|
||||
}
|
||||
if existingHasAntiAffinityConstraints {
|
||||
// For every soft pod anti-affinity term of <existingPod>, if <pod> matches the term,
|
||||
// decrement <pm.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPod>'s node by the term's weight.
|
||||
terms := existingPodAffinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution
|
||||
pm.processTerms(terms, existingPod, pod, existingPodNode, -1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
processNode := func(i int) {
|
||||
nodeInfo := nodeNameToInfo[allNodeNames[i]]
|
||||
if nodeInfo.Node() != nil {
|
||||
if hasAffinityConstraints || hasAntiAffinityConstraints {
|
||||
// We need to process all the nodes.
|
||||
for _, existingPod := range nodeInfo.Pods() {
|
||||
if err := processPod(existingPod); err != nil {
|
||||
pm.setError(err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// The pod doesn't have any constraints - we need to check only existing
|
||||
// ones that have some.
|
||||
for _, existingPod := range nodeInfo.PodsWithAffinity() {
|
||||
if err := processPod(existingPod); err != nil {
|
||||
pm.setError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
workqueue.Parallelize(16, len(allNodeNames), processNode)
|
||||
if pm.firstError != nil {
|
||||
return nil, pm.firstError
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
if pm.counts[node.Name] > maxCount {
|
||||
maxCount = pm.counts[node.Name]
|
||||
}
|
||||
if pm.counts[node.Name] < minCount {
|
||||
minCount = pm.counts[node.Name]
|
||||
}
|
||||
}
|
||||
|
||||
// calculate final priority score for each node
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
||||
for _, node := range nodes {
|
||||
fScore := float64(0)
|
||||
if (maxCount - minCount) > 0 {
|
||||
fScore = float64(schedulerapi.MaxPriority) * ((pm.counts[node.Name] - minCount) / (maxCount - minCount))
|
||||
}
|
||||
result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)})
|
||||
if glog.V(10) {
|
||||
glog.Infof("%v -> %v: InterPodAffinityPriority, Score: (%d)", pod.Name, node.Name, int(fScore))
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
53
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/least_requested.go
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/least_requested.go
generated
vendored
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
leastResourcePriority = &ResourceAllocationPriority{"LeastResourceAllocation", leastResourceScorer}
|
||||
|
||||
// LeastRequestedPriorityMap is a priority function that favors nodes with fewer requested resources.
|
||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and
|
||||
// prioritizes based on the minimum of the average of the fraction of requested to capacity.
|
||||
//
|
||||
// Details:
|
||||
// cpu((capacity-sum(requested))*10/capacity) + memory((capacity-sum(requested))*10/capacity)/2
|
||||
LeastRequestedPriorityMap = leastResourcePriority.PriorityMap
|
||||
)
|
||||
|
||||
func leastResourceScorer(requested, allocable *schedulercache.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
|
||||
return (leastRequestedScore(requested.MilliCPU, allocable.MilliCPU) +
|
||||
leastRequestedScore(requested.Memory, allocable.Memory)) / 2
|
||||
}
|
||||
|
||||
// The unused capacity is calculated on a scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest.
|
||||
// The more unused resources the higher the score is.
|
||||
func leastRequestedScore(requested, capacity int64) int64 {
|
||||
if capacity == 0 {
|
||||
return 0
|
||||
}
|
||||
if requested > capacity {
|
||||
return 0
|
||||
}
|
||||
|
||||
return ((capacity - requested) * int64(schedulerapi.MaxPriority)) / capacity
|
||||
}
|
||||
114
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/metadata.go
generated
vendored
Normal file
114
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/metadata.go
generated
vendored
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
// PriorityMetadataFactory is a factory to produce PriorityMetadata.
|
||||
type PriorityMetadataFactory struct {
|
||||
serviceLister algorithm.ServiceLister
|
||||
controllerLister algorithm.ControllerLister
|
||||
replicaSetLister algorithm.ReplicaSetLister
|
||||
statefulSetLister algorithm.StatefulSetLister
|
||||
}
|
||||
|
||||
// NewPriorityMetadataFactory creates a PriorityMetadataFactory.
|
||||
func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controllerLister algorithm.ControllerLister, replicaSetLister algorithm.ReplicaSetLister, statefulSetLister algorithm.StatefulSetLister) algorithm.PriorityMetadataProducer {
|
||||
factory := &PriorityMetadataFactory{
|
||||
serviceLister: serviceLister,
|
||||
controllerLister: controllerLister,
|
||||
replicaSetLister: replicaSetLister,
|
||||
statefulSetLister: statefulSetLister,
|
||||
}
|
||||
return factory.PriorityMetadata
|
||||
}
|
||||
|
||||
// priorityMetadata is a type that is passed as metadata for priority functions
|
||||
type priorityMetadata struct {
|
||||
nonZeroRequest *schedulercache.Resource
|
||||
podTolerations []v1.Toleration
|
||||
affinity *v1.Affinity
|
||||
podSelectors []labels.Selector
|
||||
controllerRef *metav1.OwnerReference
|
||||
podFirstServiceSelector labels.Selector
|
||||
}
|
||||
|
||||
// PriorityMetadata is a PriorityMetadataProducer. Node info can be nil.
|
||||
func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} {
|
||||
// If we cannot compute metadata, just return nil
|
||||
if pod == nil {
|
||||
return nil
|
||||
}
|
||||
return &priorityMetadata{
|
||||
nonZeroRequest: getNonZeroRequests(pod),
|
||||
podTolerations: getAllTolerationPreferNoSchedule(pod.Spec.Tolerations),
|
||||
affinity: pod.Spec.Affinity,
|
||||
podSelectors: getSelectors(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister),
|
||||
controllerRef: priorityutil.GetControllerRef(pod),
|
||||
podFirstServiceSelector: getFirstServiceSelector(pod, pmf.serviceLister),
|
||||
}
|
||||
}
|
||||
|
||||
// getFirstServiceSelector returns one selector of services the given pod.
|
||||
func getFirstServiceSelector(pod *v1.Pod, sl algorithm.ServiceLister) (firstServiceSelector labels.Selector) {
|
||||
if services, err := sl.GetPodServices(pod); err == nil && len(services) > 0 {
|
||||
return labels.SelectorFromSet(services[0].Spec.Selector)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getSelectors returns selectors of services, RCs and RSs matching the given pod.
|
||||
func getSelectors(pod *v1.Pod, sl algorithm.ServiceLister, cl algorithm.ControllerLister, rsl algorithm.ReplicaSetLister, ssl algorithm.StatefulSetLister) []labels.Selector {
|
||||
var selectors []labels.Selector
|
||||
|
||||
if services, err := sl.GetPodServices(pod); err == nil {
|
||||
for _, service := range services {
|
||||
selectors = append(selectors, labels.SelectorFromSet(service.Spec.Selector))
|
||||
}
|
||||
}
|
||||
|
||||
if rcs, err := cl.GetPodControllers(pod); err == nil {
|
||||
for _, rc := range rcs {
|
||||
selectors = append(selectors, labels.SelectorFromSet(rc.Spec.Selector))
|
||||
}
|
||||
}
|
||||
|
||||
if rss, err := rsl.GetPodReplicaSets(pod); err == nil {
|
||||
for _, rs := range rss {
|
||||
if selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector); err == nil {
|
||||
selectors = append(selectors, selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sss, err := ssl.GetPodStatefulSets(pod); err == nil {
|
||||
for _, ss := range sss {
|
||||
if selector, err := metav1.LabelSelectorAsSelector(ss.Spec.Selector); err == nil {
|
||||
selectors = append(selectors, selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return selectors
|
||||
}
|
||||
55
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/most_requested.go
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/most_requested.go
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
mostResourcePriority = &ResourceAllocationPriority{"MostResourceAllocation", mostResourceScorer}
|
||||
|
||||
// MostRequestedPriorityMap is a priority function that favors nodes with most requested resources.
|
||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
||||
// based on the maximum of the average of the fraction of requested to capacity.
|
||||
// Details: (cpu(10 * sum(requested) / capacity) + memory(10 * sum(requested) / capacity)) / 2
|
||||
MostRequestedPriorityMap = mostResourcePriority.PriorityMap
|
||||
)
|
||||
|
||||
func mostResourceScorer(requested, allocable *schedulercache.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
|
||||
return (mostRequestedScore(requested.MilliCPU, allocable.MilliCPU) +
|
||||
mostRequestedScore(requested.Memory, allocable.Memory)) / 2
|
||||
}
|
||||
|
||||
// The used capacity is calculated on a scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest.
|
||||
// The more resources are used the higher the score is. This function
|
||||
// is almost a reversed version of least_requested_priority.calculatUnusedScore
|
||||
// (10 - calculateUnusedScore). The main difference is in rounding. It was added to
|
||||
// keep the final formula clean and not to modify the widely used (by users
|
||||
// in their default scheduling policies) calculateUSedScore.
|
||||
func mostRequestedScore(requested, capacity int64) int64 {
|
||||
if capacity == 0 {
|
||||
return 0
|
||||
}
|
||||
if requested > capacity {
|
||||
return 0
|
||||
}
|
||||
|
||||
return (requested * schedulerapi.MaxPriority) / capacity
|
||||
}
|
||||
77
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_affinity.go
generated
vendored
Normal file
77
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_affinity.go
generated
vendored
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
// CalculateNodeAffinityPriorityMap prioritizes nodes according to node affinity scheduling preferences
|
||||
// indicated in PreferredDuringSchedulingIgnoredDuringExecution. Each time a node match a preferredSchedulingTerm,
|
||||
// it will a get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms
|
||||
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
|
||||
// score the node gets.
|
||||
func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
// default is the podspec.
|
||||
affinity := pod.Spec.Affinity
|
||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||
// We were able to parse metadata, use affinity from there.
|
||||
affinity = priorityMeta.affinity
|
||||
}
|
||||
|
||||
var count int32
|
||||
// A nil element of PreferredDuringSchedulingIgnoredDuringExecution matches no objects.
|
||||
// An element of PreferredDuringSchedulingIgnoredDuringExecution that refers to an
|
||||
// empty PreferredSchedulingTerm matches all objects.
|
||||
if affinity != nil && affinity.NodeAffinity != nil && affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
// Match PreferredDuringSchedulingIgnoredDuringExecution term by term.
|
||||
for i := range affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
preferredSchedulingTerm := &affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[i]
|
||||
if preferredSchedulingTerm.Weight == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO: Avoid computing it for all nodes if this becomes a performance problem.
|
||||
nodeSelector, err := v1helper.NodeSelectorRequirementsAsSelector(preferredSchedulingTerm.Preference.MatchExpressions)
|
||||
if err != nil {
|
||||
return schedulerapi.HostPriority{}, err
|
||||
}
|
||||
if nodeSelector.Matches(labels.Set(node.Labels)) {
|
||||
count += preferredSchedulingTerm.Weight
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: int(count),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CalculateNodeAffinityPriorityReduce is a reduce function for node affinity priority calculation.
|
||||
var CalculateNodeAffinityPriorityReduce = NormalizeReduce(schedulerapi.MaxPriority, false)
|
||||
62
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_label.go
generated
vendored
Normal file
62
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_label.go
generated
vendored
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
// NodeLabelPrioritizer contains information to calculate node label priority.
|
||||
type NodeLabelPrioritizer struct {
|
||||
label string
|
||||
presence bool
|
||||
}
|
||||
|
||||
// NewNodeLabelPriority creates a NodeLabelPrioritizer.
|
||||
func NewNodeLabelPriority(label string, presence bool) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {
|
||||
labelPrioritizer := &NodeLabelPrioritizer{
|
||||
label: label,
|
||||
presence: presence,
|
||||
}
|
||||
return labelPrioritizer.CalculateNodeLabelPriorityMap, nil
|
||||
}
|
||||
|
||||
// CalculateNodeLabelPriorityMap checks whether a particular label exists on a node or not, regardless of its value.
|
||||
// If presence is true, prioritizes nodes that have the specified label, regardless of value.
|
||||
// If presence is false, prioritizes nodes that do not have the specified label.
|
||||
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
exists := labels.Set(node.Labels).Has(n.label)
|
||||
score := 0
|
||||
if (exists && n.presence) || (!exists && !n.presence) {
|
||||
score = schedulerapi.MaxPriority
|
||||
}
|
||||
return schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: score,
|
||||
}, nil
|
||||
}
|
||||
68
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go
generated
vendored
Normal file
68
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go
generated
vendored
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
// CalculateNodePreferAvoidPodsPriorityMap priorities nodes according to the node annotation
|
||||
// "scheduler.alpha.kubernetes.io/preferAvoidPods".
|
||||
func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
}
|
||||
var controllerRef *metav1.OwnerReference
|
||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||
controllerRef = priorityMeta.controllerRef
|
||||
} else {
|
||||
// We couldn't parse metadata - fallback to the podspec.
|
||||
controllerRef = priorityutil.GetControllerRef(pod)
|
||||
}
|
||||
|
||||
if controllerRef != nil {
|
||||
// Ignore pods that are owned by other controller than ReplicationController
|
||||
// or ReplicaSet.
|
||||
if controllerRef.Kind != "ReplicationController" && controllerRef.Kind != "ReplicaSet" {
|
||||
controllerRef = nil
|
||||
}
|
||||
}
|
||||
if controllerRef == nil {
|
||||
return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil
|
||||
}
|
||||
|
||||
avoids, err := v1helper.GetAvoidPodsFromNodeAnnotations(node.Annotations)
|
||||
if err != nil {
|
||||
// If we cannot get annotation, assume it's schedulable there.
|
||||
return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil
|
||||
}
|
||||
for i := range avoids.PreferAvoidPods {
|
||||
avoid := &avoids.PreferAvoidPods[i]
|
||||
if avoid.PodSignature.PodController.Kind == controllerRef.Kind && avoid.PodSignature.PodController.UID == controllerRef.UID {
|
||||
return schedulerapi.HostPriority{Host: node.Name, Score: 0}, nil
|
||||
}
|
||||
}
|
||||
return schedulerapi.HostPriority{Host: node.Name, Score: schedulerapi.MaxPriority}, nil
|
||||
}
|
||||
64
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/reduce.go
generated
vendored
Normal file
64
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/reduce.go
generated
vendored
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
// NormalizeReduce generates a PriorityReduceFunction that can normalize the result
|
||||
// scores to [0, maxPriority]. If reverse is set to true, it reverses the scores by
|
||||
// subtracting it from maxPriority.
|
||||
func NormalizeReduce(maxPriority int, reverse bool) algorithm.PriorityReduceFunction {
|
||||
return func(
|
||||
_ *v1.Pod,
|
||||
_ interface{},
|
||||
_ map[string]*schedulercache.NodeInfo,
|
||||
result schedulerapi.HostPriorityList) error {
|
||||
|
||||
var maxCount int
|
||||
for i := range result {
|
||||
if result[i].Score > maxCount {
|
||||
maxCount = result[i].Score
|
||||
}
|
||||
}
|
||||
|
||||
if maxCount == 0 {
|
||||
if reverse {
|
||||
for i := range result {
|
||||
result[i].Score = maxPriority
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range result {
|
||||
score := result[i].Score
|
||||
|
||||
score = maxPriority * score / maxCount
|
||||
if reverse {
|
||||
score = maxPriority - score
|
||||
}
|
||||
|
||||
result[i].Score = score
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
141
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio.go
generated
vendored
Normal file
141
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio.go
generated
vendored
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
// FunctionShape represents shape of scoring function.
|
||||
// For safety use NewFunctionShape which performs precondition checks for struct creation.
|
||||
type FunctionShape []FunctionShapePoint
|
||||
|
||||
// FunctionShapePoint represents single point in scoring function shape.
|
||||
type FunctionShapePoint struct {
|
||||
// Utilization is function argument.
|
||||
Utilization int64
|
||||
// Score is function value.
|
||||
Score int64
|
||||
}
|
||||
|
||||
var (
|
||||
// give priority to least utilized nodes by default
|
||||
defaultFunctionShape, _ = NewFunctionShape([]FunctionShapePoint{{0, 10}, {100, 0}})
|
||||
)
|
||||
|
||||
const (
|
||||
minUtilization = 0
|
||||
maxUtilization = 100
|
||||
minScore = 0
|
||||
maxScore = schedulerapi.MaxPriority
|
||||
)
|
||||
|
||||
// NewFunctionShape creates instance of FunctionShape in a safe way performing all
|
||||
// necessary sanity checks.
|
||||
func NewFunctionShape(points []FunctionShapePoint) (FunctionShape, error) {
|
||||
|
||||
n := len(points)
|
||||
|
||||
if n == 0 {
|
||||
return nil, fmt.Errorf("at least one point must be specified")
|
||||
}
|
||||
|
||||
for i := 1; i < n; i++ {
|
||||
if points[i-1].Utilization >= points[i].Utilization {
|
||||
return nil, fmt.Errorf("utilization values must be sorted. Utilization[%d]==%d >= Utilization[%d]==%d", i-1, points[i-1].Utilization, i, points[i].Utilization)
|
||||
}
|
||||
}
|
||||
|
||||
for i, point := range points {
|
||||
if point.Utilization < minUtilization {
|
||||
return nil, fmt.Errorf("utilization values must not be less than %d. Utilization[%d]==%d", minUtilization, i, point.Utilization)
|
||||
}
|
||||
if point.Utilization > maxUtilization {
|
||||
return nil, fmt.Errorf("utilization values must not be greater than %d. Utilization[%d]==%d", maxUtilization, i, point.Utilization)
|
||||
}
|
||||
if point.Score < minScore {
|
||||
return nil, fmt.Errorf("score values must not be less than %d. Score[%d]==%d", minScore, i, point.Score)
|
||||
}
|
||||
if point.Score > maxScore {
|
||||
return nil, fmt.Errorf("score valuses not be greater than %d. Score[%d]==%d", maxScore, i, point.Score)
|
||||
}
|
||||
}
|
||||
|
||||
// We make defensive copy so we make no assumption if array passed as argument is not changed afterwards
|
||||
pointsCopy := make(FunctionShape, n)
|
||||
copy(pointsCopy, points)
|
||||
return pointsCopy, nil
|
||||
}
|
||||
|
||||
// RequestedToCapacityRatioResourceAllocationPriorityDefault creates a requestedToCapacity based
|
||||
// ResourceAllocationPriority using default resource scoring function shape.
|
||||
// The default function assigns 1.0 to resource when all capacity is available
|
||||
// and 0.0 when requested amount is equal to capacity.
|
||||
func RequestedToCapacityRatioResourceAllocationPriorityDefault() *ResourceAllocationPriority {
|
||||
return RequestedToCapacityRatioResourceAllocationPriority(defaultFunctionShape)
|
||||
}
|
||||
|
||||
// RequestedToCapacityRatioResourceAllocationPriority creates a requestedToCapacity based
|
||||
// ResourceAllocationPriority using provided resource scoring function shape.
|
||||
func RequestedToCapacityRatioResourceAllocationPriority(scoringFunctionShape FunctionShape) *ResourceAllocationPriority {
|
||||
return &ResourceAllocationPriority{"RequestedToCapacityRatioResourceAllocationPriority", buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape)}
|
||||
}
|
||||
|
||||
func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape FunctionShape) func(*schedulercache.Resource, *schedulercache.Resource, bool, int, int) int64 {
|
||||
rawScoringFunction := buildBrokenLinearFunction(scoringFunctionShape)
|
||||
|
||||
resourceScoringFunction := func(requested, capacity int64) int64 {
|
||||
if capacity == 0 || requested > capacity {
|
||||
return rawScoringFunction(maxUtilization)
|
||||
}
|
||||
|
||||
return rawScoringFunction(maxUtilization - (capacity-requested)*maxUtilization/capacity)
|
||||
}
|
||||
|
||||
return func(requested, allocable *schedulercache.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64 {
|
||||
cpuScore := resourceScoringFunction(requested.MilliCPU, allocable.MilliCPU)
|
||||
memoryScore := resourceScoringFunction(requested.Memory, allocable.Memory)
|
||||
return (cpuScore + memoryScore) / 2
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a function which is built using linear segments. Segments are defined via shape array.
|
||||
// Shape[i].Utilization slice represents points on "utilization" axis where different segments meet.
|
||||
// Shape[i].Score represents function values at meeting points.
|
||||
//
|
||||
// function f(p) is defined as:
|
||||
// shape[0].Score for p < f[0].Utilization
|
||||
// shape[i].Score for p == shape[i].Utilization
|
||||
// shape[n-1].Score for p > shape[n-1].Utilization
|
||||
// and linear between points (p < shape[i].Utilization)
|
||||
func buildBrokenLinearFunction(shape FunctionShape) func(int64) int64 {
|
||||
n := len(shape)
|
||||
return func(p int64) int64 {
|
||||
for i := 0; i < n; i++ {
|
||||
if p <= shape[i].Utilization {
|
||||
if i == 0 {
|
||||
return shape[0].Score
|
||||
}
|
||||
return shape[i-1].Score + (shape[i].Score-shape[i-1].Score)*(p-shape[i-1].Utilization)/(shape[i].Utilization-shape[i-1].Utilization)
|
||||
}
|
||||
}
|
||||
return shape[n-1].Score
|
||||
}
|
||||
}
|
||||
90
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go
generated
vendored
Normal file
90
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_allocation.go
generated
vendored
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
// ResourceAllocationPriority contains information to calculate resource allocation priority.
|
||||
type ResourceAllocationPriority struct {
|
||||
Name string
|
||||
scorer func(requested, allocable *schedulercache.Resource, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64
|
||||
}
|
||||
|
||||
// PriorityMap priorities nodes according to the resource allocations on the node.
|
||||
// It will use `scorer` function to calculate the score.
|
||||
func (r *ResourceAllocationPriority) PriorityMap(
|
||||
pod *v1.Pod,
|
||||
meta interface{},
|
||||
nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
}
|
||||
allocatable := nodeInfo.AllocatableResource()
|
||||
|
||||
var requested schedulercache.Resource
|
||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||
requested = *priorityMeta.nonZeroRequest
|
||||
} else {
|
||||
// We couldn't parse metadata - fallback to computing it.
|
||||
requested = *getNonZeroRequests(pod)
|
||||
}
|
||||
|
||||
requested.MilliCPU += nodeInfo.NonZeroRequest().MilliCPU
|
||||
requested.Memory += nodeInfo.NonZeroRequest().Memory
|
||||
var score int64
|
||||
// Check if the pod has volumes and this could be added to scorer function for balanced resource allocation.
|
||||
if len(pod.Spec.Volumes) >= 0 && nodeInfo.TransientInfo != nil {
|
||||
score = r.scorer(&requested, &allocatable, true, nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount)
|
||||
} else {
|
||||
score = r.scorer(&requested, &allocatable, false, 0, 0)
|
||||
}
|
||||
|
||||
if glog.V(10) {
|
||||
glog.Infof(
|
||||
"%v -> %v: %v, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d",
|
||||
pod.Name, node.Name, r.Name,
|
||||
allocatable.MilliCPU, allocatable.Memory,
|
||||
requested.MilliCPU+allocatable.MilliCPU, requested.Memory+allocatable.Memory,
|
||||
score,
|
||||
)
|
||||
}
|
||||
|
||||
return schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: int(score),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getNonZeroRequests(pod *v1.Pod) *schedulercache.Resource {
|
||||
result := &schedulercache.Resource{}
|
||||
for i := range pod.Spec.Containers {
|
||||
container := &pod.Spec.Containers[i]
|
||||
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)
|
||||
result.MilliCPU += cpu
|
||||
result.Memory += memory
|
||||
}
|
||||
return result
|
||||
}
|
||||
99
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_limits.go
generated
vendored
Normal file
99
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/resource_limits.go
generated
vendored
Normal file
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// ResourceLimitsPriorityMap is a priority function that increases score of input node by 1 if the node satisfies
|
||||
// input pod's resource limits. In detail, this priority function works as follows: If a node does not publish its
|
||||
// allocatable resources (cpu and memory both), the node score is not affected. If a pod does not specify
|
||||
// its cpu and memory limits both, the node score is not affected. If one or both of cpu and memory limits
|
||||
// of the pod are satisfied, the node is assigned a score of 1.
|
||||
// Rationale of choosing the lowest score of 1 is that this is mainly selected to break ties between nodes that have
|
||||
// same scores assigned by one of least and most requested priority functions.
|
||||
func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
allocatableResources := nodeInfo.AllocatableResource()
|
||||
|
||||
// compute pod limits
|
||||
podLimits := getResourceLimits(pod)
|
||||
|
||||
cpuScore := computeScore(podLimits.MilliCPU, allocatableResources.MilliCPU)
|
||||
memScore := computeScore(podLimits.Memory, allocatableResources.Memory)
|
||||
|
||||
score := int(0)
|
||||
if cpuScore == 1 || memScore == 1 {
|
||||
score = 1
|
||||
}
|
||||
|
||||
if glog.V(10) {
|
||||
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
// not logged. There is visible performance gain from it.
|
||||
glog.Infof(
|
||||
"%v -> %v: Resource Limits Priority, allocatable %d millicores %d memory bytes, pod limits %d millicores %d memory bytes, score %d",
|
||||
pod.Name, node.Name,
|
||||
allocatableResources.MilliCPU, allocatableResources.Memory,
|
||||
podLimits.MilliCPU, podLimits.Memory,
|
||||
score,
|
||||
)
|
||||
}
|
||||
|
||||
return schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: score,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// computeScore return 1 if limit value is less than or equal to allocable
|
||||
// value, otherwise it returns 0.
|
||||
func computeScore(limit, allocatable int64) int64 {
|
||||
if limit != 0 && allocatable != 0 && limit <= allocatable {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// getResourceLimits computes resource limits for input pod.
|
||||
// The reason to create this new function is to be consistent with other
|
||||
// priority functions because most or perhaps all priority functions work
|
||||
// with schedulercache.Resource.
|
||||
// TODO: cache it as part of metadata passed to priority functions.
|
||||
func getResourceLimits(pod *v1.Pod) *schedulercache.Resource {
|
||||
result := &schedulercache.Resource{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
result.Add(container.Resources.Limits)
|
||||
}
|
||||
|
||||
// take max_resource(sum_pod, any_init_container)
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
result.SetMaxResource(container.Resources.Limits)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
285
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/selector_spreading.go
generated
vendored
Normal file
285
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/selector_spreading.go
generated
vendored
Normal file
|
|
@ -0,0 +1,285 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// When zone information is present, give 2/3 of the weighting to zone spreading, 1/3 to node spreading
|
||||
// TODO: Any way to justify this weighting?
|
||||
const zoneWeighting float64 = 2.0 / 3.0
|
||||
|
||||
// SelectorSpread contains information to calculate selector spread priority.
|
||||
type SelectorSpread struct {
|
||||
serviceLister algorithm.ServiceLister
|
||||
controllerLister algorithm.ControllerLister
|
||||
replicaSetLister algorithm.ReplicaSetLister
|
||||
statefulSetLister algorithm.StatefulSetLister
|
||||
}
|
||||
|
||||
// NewSelectorSpreadPriority creates a SelectorSpread.
|
||||
func NewSelectorSpreadPriority(
|
||||
serviceLister algorithm.ServiceLister,
|
||||
controllerLister algorithm.ControllerLister,
|
||||
replicaSetLister algorithm.ReplicaSetLister,
|
||||
statefulSetLister algorithm.StatefulSetLister) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {
|
||||
selectorSpread := &SelectorSpread{
|
||||
serviceLister: serviceLister,
|
||||
controllerLister: controllerLister,
|
||||
replicaSetLister: replicaSetLister,
|
||||
statefulSetLister: statefulSetLister,
|
||||
}
|
||||
return selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce
|
||||
}
|
||||
|
||||
// CalculateSpreadPriorityMap spreads pods across hosts, considering pods
|
||||
// belonging to the same service,RC,RS or StatefulSet.
|
||||
// When a pod is scheduled, it looks for services, RCs,RSs and StatefulSets that match the pod,
|
||||
// then finds existing pods that match those selectors.
|
||||
// It favors nodes that have fewer existing matching pods.
|
||||
// i.e. it pushes the scheduler towards a node where there's the smallest number of
|
||||
// pods which match the same service, RC,RSs or StatefulSets selectors as the pod being scheduled.
|
||||
func (s *SelectorSpread) CalculateSpreadPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
var selectors []labels.Selector
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
priorityMeta, ok := meta.(*priorityMetadata)
|
||||
if ok {
|
||||
selectors = priorityMeta.podSelectors
|
||||
} else {
|
||||
selectors = getSelectors(pod, s.serviceLister, s.controllerLister, s.replicaSetLister, s.statefulSetLister)
|
||||
}
|
||||
|
||||
if len(selectors) == 0 {
|
||||
return schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: int(0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
count := int(0)
|
||||
for _, nodePod := range nodeInfo.Pods() {
|
||||
if pod.Namespace != nodePod.Namespace {
|
||||
continue
|
||||
}
|
||||
// When we are replacing a failed pod, we often see the previous
|
||||
// deleted version while scheduling the replacement.
|
||||
// Ignore the previous deleted version for spreading purposes
|
||||
// (it can still be considered for resource restrictions etc.)
|
||||
if nodePod.DeletionTimestamp != nil {
|
||||
glog.V(4).Infof("skipping pending-deleted pod: %s/%s", nodePod.Namespace, nodePod.Name)
|
||||
continue
|
||||
}
|
||||
matches := false
|
||||
for _, selector := range selectors {
|
||||
if selector.Matches(labels.Set(nodePod.ObjectMeta.Labels)) {
|
||||
matches = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if matches {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: int(count),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CalculateSpreadPriorityReduce calculates the source of each node
|
||||
// based on the number of existing matching pods on the node
|
||||
// where zone information is included on the nodes, it favors nodes
|
||||
// in zones with fewer existing matching pods.
|
||||
func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error {
|
||||
countsByZone := make(map[string]int, 10)
|
||||
maxCountByZone := int(0)
|
||||
maxCountByNodeName := int(0)
|
||||
|
||||
for i := range result {
|
||||
if result[i].Score > maxCountByNodeName {
|
||||
maxCountByNodeName = result[i].Score
|
||||
}
|
||||
zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
|
||||
if zoneID == "" {
|
||||
continue
|
||||
}
|
||||
countsByZone[zoneID] += result[i].Score
|
||||
}
|
||||
|
||||
for zoneID := range countsByZone {
|
||||
if countsByZone[zoneID] > maxCountByZone {
|
||||
maxCountByZone = countsByZone[zoneID]
|
||||
}
|
||||
}
|
||||
|
||||
haveZones := len(countsByZone) != 0
|
||||
|
||||
maxCountByNodeNameFloat64 := float64(maxCountByNodeName)
|
||||
maxCountByZoneFloat64 := float64(maxCountByZone)
|
||||
MaxPriorityFloat64 := float64(schedulerapi.MaxPriority)
|
||||
|
||||
for i := range result {
|
||||
// initializing to the default/max node score of maxPriority
|
||||
fScore := MaxPriorityFloat64
|
||||
if maxCountByNodeName > 0 {
|
||||
fScore = MaxPriorityFloat64 * (float64(maxCountByNodeName-result[i].Score) / maxCountByNodeNameFloat64)
|
||||
}
|
||||
// If there is zone information present, incorporate it
|
||||
if haveZones {
|
||||
zoneID := utilnode.GetZoneKey(nodeNameToInfo[result[i].Host].Node())
|
||||
if zoneID != "" {
|
||||
zoneScore := MaxPriorityFloat64
|
||||
if maxCountByZone > 0 {
|
||||
zoneScore = MaxPriorityFloat64 * (float64(maxCountByZone-countsByZone[zoneID]) / maxCountByZoneFloat64)
|
||||
}
|
||||
fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore)
|
||||
}
|
||||
}
|
||||
result[i].Score = int(fScore)
|
||||
if glog.V(10) {
|
||||
glog.Infof(
|
||||
"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, result[i].Host, int(fScore),
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServiceAntiAffinity contains information to calculate service anti-affinity priority.
|
||||
type ServiceAntiAffinity struct {
|
||||
podLister algorithm.PodLister
|
||||
serviceLister algorithm.ServiceLister
|
||||
label string
|
||||
}
|
||||
|
||||
// NewServiceAntiAffinityPriority creates a ServiceAntiAffinity.
|
||||
func NewServiceAntiAffinityPriority(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, label string) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {
|
||||
antiAffinity := &ServiceAntiAffinity{
|
||||
podLister: podLister,
|
||||
serviceLister: serviceLister,
|
||||
label: label,
|
||||
}
|
||||
return antiAffinity.CalculateAntiAffinityPriorityMap, antiAffinity.CalculateAntiAffinityPriorityReduce
|
||||
}
|
||||
|
||||
// Classifies nodes into ones with labels and without labels.
|
||||
func (s *ServiceAntiAffinity) getNodeClassificationByLabels(nodes []*v1.Node) (map[string]string, []string) {
|
||||
labeledNodes := map[string]string{}
|
||||
nonLabeledNodes := []string{}
|
||||
for _, node := range nodes {
|
||||
if labels.Set(node.Labels).Has(s.label) {
|
||||
label := labels.Set(node.Labels).Get(s.label)
|
||||
labeledNodes[node.Name] = label
|
||||
} else {
|
||||
nonLabeledNodes = append(nonLabeledNodes, node.Name)
|
||||
}
|
||||
}
|
||||
return labeledNodes, nonLabeledNodes
|
||||
}
|
||||
|
||||
// filteredPod get pods based on namespace and selector
|
||||
func filteredPod(namespace string, selector labels.Selector, nodeInfo *schedulercache.NodeInfo) (pods []*v1.Pod) {
|
||||
if nodeInfo.Pods() == nil || len(nodeInfo.Pods()) == 0 || selector == nil {
|
||||
return []*v1.Pod{}
|
||||
}
|
||||
for _, pod := range nodeInfo.Pods() {
|
||||
// Ignore pods being deleted for spreading purposes
|
||||
// Similar to how it is done for SelectorSpreadPriority
|
||||
if namespace == pod.Namespace && pod.DeletionTimestamp == nil && selector.Matches(labels.Set(pod.Labels)) {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CalculateAntiAffinityPriorityMap spreads pods by minimizing the number of pods belonging to the same service
|
||||
// on given machine
|
||||
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
var firstServiceSelector labels.Selector
|
||||
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
}
|
||||
priorityMeta, ok := meta.(*priorityMetadata)
|
||||
if ok {
|
||||
firstServiceSelector = priorityMeta.podFirstServiceSelector
|
||||
} else {
|
||||
firstServiceSelector = getFirstServiceSelector(pod, s.serviceLister)
|
||||
}
|
||||
//pods matched namespace,selector on current node
|
||||
matchedPodsOfNode := filteredPod(pod.Namespace, firstServiceSelector, nodeInfo)
|
||||
|
||||
return schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: int(len(matchedPodsOfNode)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CalculateAntiAffinityPriorityReduce computes each node score with the same value for a particular label.
|
||||
// The label to be considered is provided to the struct (ServiceAntiAffinity).
|
||||
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error {
|
||||
var numServicePods int
|
||||
var label string
|
||||
podCounts := map[string]int{}
|
||||
labelNodesStatus := map[string]string{}
|
||||
maxPriorityFloat64 := float64(schedulerapi.MaxPriority)
|
||||
|
||||
for _, hostPriority := range result {
|
||||
numServicePods += hostPriority.Score
|
||||
if !labels.Set(nodeNameToInfo[hostPriority.Host].Node().Labels).Has(s.label) {
|
||||
continue
|
||||
}
|
||||
label = labels.Set(nodeNameToInfo[hostPriority.Host].Node().Labels).Get(s.label)
|
||||
labelNodesStatus[hostPriority.Host] = label
|
||||
podCounts[label] += hostPriority.Score
|
||||
}
|
||||
|
||||
//score int - scale of 0-maxPriority
|
||||
// 0 being the lowest priority and maxPriority being the highest
|
||||
for i, hostPriority := range result {
|
||||
label, ok := labelNodesStatus[hostPriority.Host]
|
||||
if !ok {
|
||||
result[i].Host = hostPriority.Host
|
||||
result[i].Score = int(0)
|
||||
continue
|
||||
}
|
||||
// initializing to the default/max node score of maxPriority
|
||||
fScore := maxPriorityFloat64
|
||||
if numServicePods > 0 {
|
||||
fScore = maxPriorityFloat64 * (float64(numServicePods-podCounts[label]) / float64(numServicePods))
|
||||
}
|
||||
result[i].Host = hostPriority.Host
|
||||
result[i].Score = int(fScore)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
76
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/taint_toleration.go
generated
vendored
Normal file
76
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/taint_toleration.go
generated
vendored
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
// CountIntolerableTaintsPreferNoSchedule gives the count of intolerable taints of a pod with effect PreferNoSchedule
|
||||
func countIntolerableTaintsPreferNoSchedule(taints []v1.Taint, tolerations []v1.Toleration) (intolerableTaints int) {
|
||||
for _, taint := range taints {
|
||||
// check only on taints that have effect PreferNoSchedule
|
||||
if taint.Effect != v1.TaintEffectPreferNoSchedule {
|
||||
continue
|
||||
}
|
||||
|
||||
if !v1helper.TolerationsTolerateTaint(tolerations, &taint) {
|
||||
intolerableTaints++
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getAllTolerationEffectPreferNoSchedule gets the list of all Tolerations with Effect PreferNoSchedule or with no effect.
|
||||
func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationList []v1.Toleration) {
|
||||
for _, toleration := range tolerations {
|
||||
// Empty effect means all effects which includes PreferNoSchedule, so we need to collect it as well.
|
||||
if len(toleration.Effect) == 0 || toleration.Effect == v1.TaintEffectPreferNoSchedule {
|
||||
tolerationList = append(tolerationList, toleration)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ComputeTaintTolerationPriorityMap prepares the priority list for all the nodes based on the number of intolerable taints on the node
|
||||
func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
}
|
||||
// To hold all the tolerations with Effect PreferNoSchedule
|
||||
var tolerationsPreferNoSchedule []v1.Toleration
|
||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||
tolerationsPreferNoSchedule = priorityMeta.podTolerations
|
||||
|
||||
} else {
|
||||
tolerationsPreferNoSchedule = getAllTolerationPreferNoSchedule(pod.Spec.Tolerations)
|
||||
}
|
||||
|
||||
return schedulerapi.HostPriority{
|
||||
Host: node.Name,
|
||||
Score: countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, tolerationsPreferNoSchedule),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ComputeTaintTolerationPriorityReduce calculates the source of each node based on the number of intolerable taints on the node
|
||||
var ComputeTaintTolerationPriorityReduce = NormalizeReduce(schedulerapi.MaxPriority, true)
|
||||
61
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/test_util.go
generated
vendored
Normal file
61
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/test_util.go
generated
vendored
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
func makeNode(node string, milliCPU, memory int64) *v1.Node {
|
||||
return &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: node},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func priorityFunction(mapFn algorithm.PriorityMapFunction, reduceFn algorithm.PriorityReduceFunction, mataData interface{}) algorithm.PriorityFunction {
|
||||
return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
||||
for i := range nodes {
|
||||
hostResult, err := mapFn(pod, mataData, nodeNameToInfo[nodes[i].Name])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, hostResult)
|
||||
}
|
||||
if reduceFn != nil {
|
||||
if err := reduceFn(pod, mataData, nodeNameToInfo, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
55
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/BUILD
generated
vendored
Normal file
55
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/BUILD
generated
vendored
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"non_zero_test.go",
|
||||
"topologies_test.go",
|
||||
"util_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/selection:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"non_zero.go",
|
||||
"topologies.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util",
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
52
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/non_zero.go
generated
vendored
Normal file
52
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/non_zero.go
generated
vendored
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import "k8s.io/api/core/v1"
|
||||
|
||||
// For each of these resources, a pod that doesn't request the resource explicitly
|
||||
// will be treated as having requested the amount indicated below, for the purpose
|
||||
// of computing priority only. This ensures that when scheduling zero-request pods, such
|
||||
// pods will not all be scheduled to the machine with the smallest in-use request,
|
||||
// and that when scheduling regular pods, such pods will not see zero-request pods as
|
||||
// consuming no resources whatsoever. We chose these values to be similar to the
|
||||
// resources that we give to cluster addon pods (#10653). But they are pretty arbitrary.
|
||||
// As described in #11713, we use request instead of limit to deal with resource requirements.
|
||||
|
||||
// DefaultMilliCPURequest defines default milli cpu request number.
|
||||
const DefaultMilliCPURequest int64 = 100 // 0.1 core
|
||||
// DefaultMemoryRequest defines default memory request size.
|
||||
const DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
|
||||
|
||||
// GetNonzeroRequests returns the default resource request if none is found or
|
||||
// what is provided on the request.
|
||||
func GetNonzeroRequests(requests *v1.ResourceList) (int64, int64) {
|
||||
var outMilliCPU, outMemory int64
|
||||
// Override if un-set, but not if explicitly set to zero
|
||||
if _, found := (*requests)[v1.ResourceCPU]; !found {
|
||||
outMilliCPU = DefaultMilliCPURequest
|
||||
} else {
|
||||
outMilliCPU = requests.Cpu().MilliValue()
|
||||
}
|
||||
// Override if un-set, but not if explicitly set to zero
|
||||
if _, found := (*requests)[v1.ResourceMemory]; !found {
|
||||
outMemory = DefaultMemoryRequest
|
||||
} else {
|
||||
outMemory = requests.Memory().Value()
|
||||
}
|
||||
return outMilliCPU, outMemory
|
||||
}
|
||||
81
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/topologies.go
generated
vendored
Normal file
81
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/topologies.go
generated
vendored
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// GetNamespacesFromPodAffinityTerm returns a set of names
|
||||
// according to the namespaces indicated in podAffinityTerm.
|
||||
// If namespaces is empty it considers the given pod's namespace.
|
||||
func GetNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm *v1.PodAffinityTerm) sets.String {
|
||||
names := sets.String{}
|
||||
if len(podAffinityTerm.Namespaces) == 0 {
|
||||
names.Insert(pod.Namespace)
|
||||
} else {
|
||||
names.Insert(podAffinityTerm.Namespaces...)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// PodMatchesTermsNamespaceAndSelector returns true if the given <pod>
|
||||
// matches the namespace and selector defined by <affinityPod>`s <term>.
|
||||
func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, namespaces sets.String, selector labels.Selector) bool {
|
||||
if !namespaces.Has(pod.Namespace) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !selector.Matches(labels.Set(pod.Labels)) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// NodesHaveSameTopologyKey checks if nodeA and nodeB have same label value with given topologyKey as label key.
|
||||
// Returns false if topologyKey is empty.
|
||||
func NodesHaveSameTopologyKey(nodeA, nodeB *v1.Node, topologyKey string) bool {
|
||||
if len(topologyKey) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if nodeA.Labels == nil || nodeB.Labels == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
nodeALabel, okA := nodeA.Labels[topologyKey]
|
||||
nodeBLabel, okB := nodeB.Labels[topologyKey]
|
||||
|
||||
// If found label in both nodes, check the label
|
||||
if okB && okA {
|
||||
return nodeALabel == nodeBLabel
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Topologies contains topologies information of nodes.
|
||||
type Topologies struct {
|
||||
DefaultKeys []string
|
||||
}
|
||||
|
||||
// NodesHaveSameTopologyKey checks if nodeA and nodeB have same label value with given topologyKey as label key.
|
||||
func (tps *Topologies) NodesHaveSameTopologyKey(nodeA, nodeB *v1.Node, topologyKey string) bool {
|
||||
return NodesHaveSameTopologyKey(nodeA, nodeB, topologyKey)
|
||||
}
|
||||
36
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/util.go
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util/util.go
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// GetControllerRef gets pod's owner controller reference from a pod object.
|
||||
func GetControllerRef(pod *v1.Pod) *metav1.OwnerReference {
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := range pod.OwnerReferences {
|
||||
ref := &pod.OwnerReferences[i]
|
||||
if ref.Controller != nil && *ref.Controller {
|
||||
return ref
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
88
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface.go
generated
vendored
Normal file
88
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/scheduler_interface.go
generated
vendored
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package algorithm
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
// SchedulerExtender is an interface for external processes to influence scheduling
|
||||
// decisions made by Kubernetes. This is typically needed for resources not directly
|
||||
// managed by Kubernetes.
|
||||
type SchedulerExtender interface {
|
||||
// Filter based on extender-implemented predicate functions. The filtered list is
|
||||
// expected to be a subset of the supplied list. failedNodesMap optionally contains
|
||||
// the list of failed nodes and failure reasons.
|
||||
Filter(pod *v1.Pod,
|
||||
nodes []*v1.Node, nodeNameToInfo map[string]*schedulercache.NodeInfo,
|
||||
) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error)
|
||||
|
||||
// Prioritize based on extender-implemented priority functions. The returned scores & weight
|
||||
// are used to compute the weighted score for an extender. The weighted scores are added to
|
||||
// the scores computed by Kubernetes scheduler. The total scores are used to do the host selection.
|
||||
Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error)
|
||||
|
||||
// Bind delegates the action of binding a pod to a node to the extender.
|
||||
Bind(binding *v1.Binding) error
|
||||
|
||||
// IsBinder returns whether this extender is configured for the Bind method.
|
||||
IsBinder() bool
|
||||
|
||||
// IsInterested returns true if at least one extended resource requested by
|
||||
// this pod is managed by this extender.
|
||||
IsInterested(pod *v1.Pod) bool
|
||||
|
||||
// ProcessPreemption returns nodes with their victim pods processed by extender based on
|
||||
// given:
|
||||
// 1. Pod to schedule
|
||||
// 2. Candidate nodes and victim pods (nodeToVictims) generated by previous scheduling process.
|
||||
// 3. nodeNameToInfo to restore v1.Node from node name if extender cache is enabled.
|
||||
// The possible changes made by extender may include:
|
||||
// 1. Subset of given candidate nodes after preemption phase of extender.
|
||||
// 2. A different set of victim pod for every given candidate node after preemption phase of extender.
|
||||
ProcessPreemption(
|
||||
pod *v1.Pod,
|
||||
nodeToVictims map[*v1.Node]*schedulerapi.Victims,
|
||||
nodeNameToInfo map[string]*schedulercache.NodeInfo,
|
||||
) (map[*v1.Node]*schedulerapi.Victims, error)
|
||||
|
||||
// SupportsPreemption returns if the scheduler extender support preemption or not.
|
||||
SupportsPreemption() bool
|
||||
|
||||
// IsIgnorable returns true indicates scheduling should not fail when this extender
|
||||
// is unavailable. This gives scheduler ability to fail fast and tolerate non-critical extenders as well.
|
||||
IsIgnorable() bool
|
||||
}
|
||||
|
||||
// ScheduleAlgorithm is an interface implemented by things that know how to schedule pods
|
||||
// onto machines.
|
||||
type ScheduleAlgorithm interface {
|
||||
Schedule(*v1.Pod, NodeLister) (selectedMachine string, err error)
|
||||
// Preempt receives scheduling errors for a pod and tries to create room for
|
||||
// the pod by preempting lower priority pods if possible.
|
||||
// It returns the node where preemption happened, a list of preempted pods, a
|
||||
// list of pods whose nominated node name should be removed, and error if any.
|
||||
Preempt(*v1.Pod, NodeLister, error) (selectedNode *v1.Node, preemptedPods []*v1.Pod, cleanupNominatedPods []*v1.Pod, err error)
|
||||
// Predicates() returns a pointer to a map of predicate functions. This is
|
||||
// exposed for testing.
|
||||
Predicates() map[string]FitPredicate
|
||||
// Prioritizers returns a slice of priority config. This is exposed for
|
||||
// testing.
|
||||
Prioritizers() []PriorityConfig
|
||||
}
|
||||
172
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go
generated
vendored
Normal file
172
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/types.go
generated
vendored
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package algorithm
|
||||
|
||||
import (
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
|
||||
)
|
||||
|
||||
// NodeFieldSelectorKeys is a map that: the key are node field selector keys; the values are
|
||||
// the functions to get the value of the node field.
|
||||
var NodeFieldSelectorKeys = map[string]func(*v1.Node) string{
|
||||
NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name },
|
||||
}
|
||||
|
||||
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
||||
// The failure information is given by the error.
|
||||
type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []PredicateFailureReason, error)
|
||||
|
||||
// PriorityMapFunction is a function that computes per-node results for a given node.
|
||||
// TODO: Figure out the exact API of this method.
|
||||
// TODO: Change interface{} to a specific type.
|
||||
type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error)
|
||||
|
||||
// PriorityReduceFunction is a function that aggregated per-node results and computes
|
||||
// final scores for all nodes.
|
||||
// TODO: Figure out the exact API of this method.
|
||||
// TODO: Change interface{} to a specific type.
|
||||
type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error
|
||||
|
||||
// PredicateMetadataProducer is a function that computes predicate metadata for a given pod.
|
||||
type PredicateMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) PredicateMetadata
|
||||
|
||||
// PriorityMetadataProducer is a function that computes metadata for a given pod. This
|
||||
// is now used for only for priority functions. For predicates please use PredicateMetadataProducer.
|
||||
type PriorityMetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{}
|
||||
|
||||
// PriorityFunction is a function that computes scores for all nodes.
|
||||
// DEPRECATED
|
||||
// Use Map-Reduce pattern for priority functions.
|
||||
type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error)
|
||||
|
||||
// PriorityConfig is a config used for a priority function.
|
||||
type PriorityConfig struct {
|
||||
Name string
|
||||
Map PriorityMapFunction
|
||||
Reduce PriorityReduceFunction
|
||||
// TODO: Remove it after migrating all functions to
|
||||
// Map-Reduce pattern.
|
||||
Function PriorityFunction
|
||||
Weight int
|
||||
}
|
||||
|
||||
// EmptyPredicateMetadataProducer returns a no-op MetadataProducer type.
|
||||
func EmptyPredicateMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) PredicateMetadata {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EmptyPriorityMetadataProducer returns a no-op PriorityMetadataProducer type.
|
||||
func EmptyPriorityMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PredicateFailureReason interface represents the failure reason of a predicate.
|
||||
type PredicateFailureReason interface {
|
||||
GetReason() string
|
||||
}
|
||||
|
||||
// NodeLister interface represents anything that can list nodes for a scheduler.
|
||||
type NodeLister interface {
|
||||
// We explicitly return []*v1.Node, instead of v1.NodeList, to avoid
|
||||
// performing expensive copies that are unneeded.
|
||||
List() ([]*v1.Node, error)
|
||||
}
|
||||
|
||||
// PodLister interface represents anything that can list pods for a scheduler.
|
||||
type PodLister interface {
|
||||
// We explicitly return []*v1.Pod, instead of v1.PodList, to avoid
|
||||
// performing expensive copies that are unneeded.
|
||||
List(labels.Selector) ([]*v1.Pod, error)
|
||||
// This is similar to "List()", but the returned slice does not
|
||||
// contain pods that don't pass `podFilter`.
|
||||
FilteredList(podFilter schedulercache.PodFilter, selector labels.Selector) ([]*v1.Pod, error)
|
||||
}
|
||||
|
||||
// ServiceLister interface represents anything that can produce a list of services; the list is consumed by a scheduler.
|
||||
type ServiceLister interface {
|
||||
// Lists all the services
|
||||
List(labels.Selector) ([]*v1.Service, error)
|
||||
// Gets the services for the given pod
|
||||
GetPodServices(*v1.Pod) ([]*v1.Service, error)
|
||||
}
|
||||
|
||||
// ControllerLister interface represents anything that can produce a list of ReplicationController; the list is consumed by a scheduler.
|
||||
type ControllerLister interface {
|
||||
// Lists all the replication controllers
|
||||
List(labels.Selector) ([]*v1.ReplicationController, error)
|
||||
// Gets the services for the given pod
|
||||
GetPodControllers(*v1.Pod) ([]*v1.ReplicationController, error)
|
||||
}
|
||||
|
||||
// ReplicaSetLister interface represents anything that can produce a list of ReplicaSet; the list is consumed by a scheduler.
|
||||
type ReplicaSetLister interface {
|
||||
// Gets the replicasets for the given pod
|
||||
GetPodReplicaSets(*v1.Pod) ([]*extensions.ReplicaSet, error)
|
||||
}
|
||||
|
||||
var _ ControllerLister = &EmptyControllerLister{}
|
||||
|
||||
// EmptyControllerLister implements ControllerLister on []v1.ReplicationController returning empty data
|
||||
type EmptyControllerLister struct{}
|
||||
|
||||
// List returns nil
|
||||
func (f EmptyControllerLister) List(labels.Selector) ([]*v1.ReplicationController, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetPodControllers returns nil
|
||||
func (f EmptyControllerLister) GetPodControllers(pod *v1.Pod) (controllers []*v1.ReplicationController, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var _ ReplicaSetLister = &EmptyReplicaSetLister{}
|
||||
|
||||
// EmptyReplicaSetLister implements ReplicaSetLister on []extensions.ReplicaSet returning empty data
|
||||
type EmptyReplicaSetLister struct{}
|
||||
|
||||
// GetPodReplicaSets returns nil
|
||||
func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*extensions.ReplicaSet, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// StatefulSetLister interface represents anything that can produce a list of StatefulSet; the list is consumed by a scheduler.
|
||||
type StatefulSetLister interface {
|
||||
// Gets the StatefulSet for the given pod.
|
||||
GetPodStatefulSets(*v1.Pod) ([]*apps.StatefulSet, error)
|
||||
}
|
||||
|
||||
var _ StatefulSetLister = &EmptyStatefulSetLister{}
|
||||
|
||||
// EmptyStatefulSetLister implements StatefulSetLister on []apps.StatefulSet returning empty data.
|
||||
type EmptyStatefulSetLister struct{}
|
||||
|
||||
// GetPodStatefulSets of EmptyStatefulSetLister returns nil.
|
||||
func (f EmptyStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*apps.StatefulSet, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// PredicateMetadata interface represents anything that can access a predicate metadata.
|
||||
type PredicateMetadata interface {
|
||||
ShallowCopy() PredicateMetadata
|
||||
AddPod(addedPod *v1.Pod, nodeInfo *schedulercache.NodeInfo) error
|
||||
RemovePod(deletedPod *v1.Pod) error
|
||||
}
|
||||
85
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/well_known_labels.go
generated
vendored
Normal file
85
vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/well_known_labels.go
generated
vendored
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package algorithm
|
||||
|
||||
import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
const (
|
||||
// TaintNodeNotReady will be added when node is not ready
|
||||
// and feature-gate for TaintBasedEvictions flag is enabled,
|
||||
// and removed when node becomes ready.
|
||||
TaintNodeNotReady = "node.kubernetes.io/not-ready"
|
||||
|
||||
// DeprecatedTaintNodeNotReady is the deprecated version of TaintNodeNotReady.
|
||||
// It is deprecated since 1.9
|
||||
DeprecatedTaintNodeNotReady = "node.alpha.kubernetes.io/notReady"
|
||||
|
||||
// TaintNodeUnreachable will be added when node becomes unreachable
|
||||
// (corresponding to NodeReady status ConditionUnknown)
|
||||
// and feature-gate for TaintBasedEvictions flag is enabled,
|
||||
// and removed when node becomes reachable (NodeReady status ConditionTrue).
|
||||
TaintNodeUnreachable = "node.kubernetes.io/unreachable"
|
||||
|
||||
// DeprecatedTaintNodeUnreachable is the deprecated version of TaintNodeUnreachable.
|
||||
// It is deprecated since 1.9
|
||||
DeprecatedTaintNodeUnreachable = "node.alpha.kubernetes.io/unreachable"
|
||||
|
||||
// TaintNodeUnschedulable will be added when node becomes unschedulable
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node becomes scheduable.
|
||||
TaintNodeUnschedulable = "node.kubernetes.io/unschedulable"
|
||||
|
||||
// TaintNodeOutOfDisk will be added when node becomes out of disk
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node has enough disk.
|
||||
TaintNodeOutOfDisk = "node.kubernetes.io/out-of-disk"
|
||||
|
||||
// TaintNodeMemoryPressure will be added when node has memory pressure
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node has enough memory.
|
||||
TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure"
|
||||
|
||||
// TaintNodeDiskPressure will be added when node has disk pressure
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node has enough disk.
|
||||
TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure"
|
||||
|
||||
// TaintNodeNetworkUnavailable will be added when node's network is unavailable
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when network becomes ready.
|
||||
TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable"
|
||||
|
||||
// TaintNodePIDPressure will be added when node has pid pressure
|
||||
// and feature-gate for TaintNodesByCondition flag is enabled,
|
||||
// and removed when node has enough disk.
|
||||
TaintNodePIDPressure = "node.kubernetes.io/pid-pressure"
|
||||
|
||||
// TaintExternalCloudProvider sets this taint on a node to mark it as unusable,
|
||||
// when kubelet is started with the "external" cloud provider, until a controller
|
||||
// from the cloud-controller-manager intitializes this node, and then removes
|
||||
// the taint
|
||||
TaintExternalCloudProvider = "node.cloudprovider.kubernetes.io/uninitialized"
|
||||
|
||||
// TaintNodeShutdown when node is shutdown in external cloud provider
|
||||
TaintNodeShutdown = "node.cloudprovider.kubernetes.io/shutdown"
|
||||
|
||||
// NodeFieldSelectorKeyNodeName ('metadata.name') uses this as node field selector key
|
||||
// when selecting node by node's name.
|
||||
NodeFieldSelectorKeyNodeName = api.ObjectNameField
|
||||
)
|
||||
Loading…
Add table
Add a link
Reference in a new issue