Update go dependencies

This commit is contained in:
Manuel de Brito Fontes 2018-04-21 14:10:40 -03:00
parent 293223eea0
commit b7a799bf82
No known key found for this signature in database
GPG key ID: 786136016A8BA02A
432 changed files with 37346 additions and 25783 deletions

View file

@ -1,10 +1,4 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
@ -16,7 +10,6 @@ go_library(
"metrics_nil.go",
"metrics_statfs.go",
"plugins.go",
"util.go",
"volume.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
@ -55,22 +48,20 @@ go_library(
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/volume",
visibility = ["//visibility:public"],
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/util/io:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/fs:go_default_library",
"//pkg/volume/util/recyclerclient:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
@ -80,20 +71,12 @@ go_test(
srcs = [
"metrics_nil_test.go",
"plugins_test.go",
"util_test.go",
],
embed = [":go_default_library"],
importpath = "k8s.io/kubernetes/pkg/volume",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/util/slice:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
],
)
@ -107,7 +90,6 @@ go_test(
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/volume_test",
deps = [
":go_default_library",
"//pkg/volume/testing:go_default_library",
@ -164,4 +146,5 @@ filegroup(
"//pkg/volume/vsphere_volume:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View file

@ -19,7 +19,7 @@ package volume
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/fs"
)
var _ MetricsProvider = &metricsDu{}
@ -66,7 +66,7 @@ func (md *metricsDu) GetMetrics() (*Metrics, error) {
// runDu executes the "du" command and writes the results to metrics.Used
func (md *metricsDu) runDu(metrics *Metrics) error {
used, err := util.Du(md.path)
used, err := fs.Du(md.path)
if err != nil {
return err
}
@ -76,7 +76,7 @@ func (md *metricsDu) runDu(metrics *Metrics) error {
// runFind executes the "find" command and writes the results to metrics.InodesUsed
func (md *metricsDu) runFind(metrics *Metrics) error {
inodesUsed, err := util.Find(md.path)
inodesUsed, err := fs.Find(md.path)
if err != nil {
return err
}
@ -87,7 +87,7 @@ func (md *metricsDu) runFind(metrics *Metrics) error {
// getFsInfo writes metrics.Capacity and metrics.Available from the filesystem
// info
func (md *metricsDu) getFsInfo(metrics *Metrics) error {
available, capacity, _, inodes, inodesFree, _, err := util.FsInfo(md.path)
available, capacity, _, inodes, inodesFree, _, err := fs.FsInfo(md.path)
if err != nil {
return NewFsInfoFailedError(err)
}

View file

@ -19,7 +19,7 @@ package volume
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/fs"
)
var _ MetricsProvider = &metricsStatFS{}
@ -55,7 +55,7 @@ func (md *metricsStatFS) GetMetrics() (*Metrics, error) {
// getFsInfo writes metrics.Capacity, metrics.Used and metrics.Available from the filesystem info
func (md *metricsStatFS) getFsInfo(metrics *Metrics) error {
available, capacity, usage, inodes, inodesFree, inodesUsed, err := util.FsInfo(md.path)
available, capacity, usage, inodes, inodesFree, inodesUsed, err := fs.FsInfo(md.path)
if err != nil {
return NewFsInfoFailedError(err)
}

View file

@ -33,6 +33,7 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
)
const (
@ -161,7 +162,7 @@ type RecyclableVolumePlugin interface {
// Recycle will use the provided recorder to write any events that might be
// interesting to user. It's expected that caller will pass these events to
// the PV being recycled.
Recycle(pvName string, spec *Spec, eventRecorder RecycleEventRecorder) error
Recycle(pvName string, spec *Spec, eventRecorder recyclerclient.RecycleEventRecorder) error
}
// DeletableVolumePlugin is an extended interface of VolumePlugin and is used
@ -654,7 +655,7 @@ func (pm *VolumePluginMgr) FindCreatablePluginBySpec(spec *Spec) (ProvisionableV
return nil, fmt.Errorf("no creatable volume plugin matched")
}
// FindAttachablePluginBySpec fetches a persistent volume plugin by name.
// FindAttachablePluginBySpec fetches a persistent volume plugin by spec.
// Unlike the other "FindPlugin" methods, this does not return error if no
// plugin is found. All volumes require a mounter and unmounter, but not
// every volume will have an attacher/detacher.

View file

@ -37,7 +37,7 @@ func TestSpecSourceConverters(t *testing.T) {
t.Errorf("Unexpected nil EmptyDir: %#v", converted)
}
if v.Name != converted.Name() {
t.Errorf("Expected %v but got %v", v.Name, converted.Name())
t.Errorf("Expected %v but got %v", converted.Name(), v.Name)
}
pv := &v1.PersistentVolume{
@ -52,7 +52,7 @@ func TestSpecSourceConverters(t *testing.T) {
t.Errorf("Unexpected nil AWSElasticBlockStore: %#v", converted)
}
if pv.Name != converted.Name() {
t.Errorf("Expected %v but got %v", pv.Name, converted.Name())
t.Errorf("Expected %v but got %v", converted.Name(), pv.Name)
}
}

View file

@ -1,523 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"fmt"
"reflect"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
"hash/fnv"
"math/rand"
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
)
const (
// GB - GigaByte size
GB = 1000 * 1000 * 1000
// GIB - GibiByte size
GIB = 1024 * 1024 * 1024
)
type RecycleEventRecorder func(eventtype, message string)
// RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume
// Recyclers. This function will save the given Pod to the API and watch it
// until it completes, fails, or the pod's ActiveDeadlineSeconds is exceeded,
// whichever comes first. An attempt to delete a recycler pod is always
// attempted before returning.
//
// In case there is a pod with the same namespace+name already running, this
// function deletes it as it is not able to judge if it is an old recycler
// or user has forged a fake recycler to block Kubernetes from recycling.//
//
// pod - the pod designed by a volume plugin to recycle the volume. pod.Name
// will be overwritten with unique name based on PV.Name.
// client - kube client for API operations.
func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error {
return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient, recorder))
}
// same as above func comments, except 'recyclerClient' is a narrower pod API
// interface to ease testing
func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error {
glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name)
// Generate unique name for the recycler pod - we need to get "already
// exists" error when a previous controller has already started recycling
// the volume. Here we assume that pv.Name is already unique.
pod.Name = "recycler-for-" + pvName
pod.GenerateName = ""
stopChannel := make(chan struct{})
defer close(stopChannel)
podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel)
if err != nil {
glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err)
return err
}
// Start the pod
_, err = recyclerClient.CreatePod(pod)
if err != nil {
if errors.IsAlreadyExists(err) {
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
if deleteErr != nil {
return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr)
}
// Recycler will try again and the old pod will be hopefuly deleted
// at that time.
return fmt.Errorf("old recycler pod found, will retry later")
}
return fmt.Errorf("unexpected error creating recycler pod: %+v", err)
}
err = waitForPod(pod, recyclerClient, podCh)
// In all cases delete the recycler pod and log its result.
glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name)
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
if deleteErr != nil {
glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
// Returning recycler error is preferred, the pod will be deleted again on
// the next retry.
if err != nil {
return fmt.Errorf("failed to recycle volume: %s", err)
}
// Recycle succeeded but we failed to delete the recycler pod. Report it,
// the controller will re-try recycling the PV again shortly.
if deleteErr != nil {
return fmt.Errorf("failed to delete recycler pod: %s", deleteErr)
}
return nil
}
// waitForPod watches the pod it until it finishes and send all events on the
// pod to the PV.
func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.Event) error {
for {
event, ok := <-podCh
if !ok {
return fmt.Errorf("recycler pod %q watch channel had been closed", pod.Name)
}
switch event.Object.(type) {
case *v1.Pod:
// POD changed
pod := event.Object.(*v1.Pod)
glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase)
switch event.Type {
case watch.Added, watch.Modified:
if pod.Status.Phase == v1.PodSucceeded {
// Recycle succeeded.
return nil
}
if pod.Status.Phase == v1.PodFailed {
if pod.Status.Message != "" {
return fmt.Errorf(pod.Status.Message)
} else {
return fmt.Errorf("pod failed, pod.Status.Message unknown.")
}
}
case watch.Deleted:
return fmt.Errorf("recycler pod was deleted")
case watch.Error:
return fmt.Errorf("recycler pod watcher failed")
}
case *v1.Event:
// Event received
podEvent := event.Object.(*v1.Event)
glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message)
if event.Type == watch.Added {
recyclerClient.Event(podEvent.Type, podEvent.Message)
}
}
}
}
// recyclerClient abstracts access to a Pod by providing a narrower interface.
// This makes it easier to mock a client for testing.
type recyclerClient interface {
CreatePod(pod *v1.Pod) (*v1.Pod, error)
GetPod(name, namespace string) (*v1.Pod, error)
DeletePod(name, namespace string) error
// WatchPod returns a ListWatch for watching a pod. The stopChannel is used
// to close the reflector backing the watch. The caller is responsible for
// derring a close on the channel to stop the reflector.
WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error)
// Event sends an event to the volume that is being recycled.
Event(eventtype, message string)
}
func newRecyclerClient(client clientset.Interface, recorder RecycleEventRecorder) recyclerClient {
return &realRecyclerClient{
client,
recorder,
}
}
type realRecyclerClient struct {
client clientset.Interface
recorder RecycleEventRecorder
}
func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
return c.client.CoreV1().Pods(pod.Namespace).Create(pod)
}
func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
}
func (c *realRecyclerClient) DeletePod(name, namespace string) error {
return c.client.CoreV1().Pods(namespace).Delete(name, nil)
}
func (c *realRecyclerClient) Event(eventtype, message string) {
c.recorder(eventtype, message)
}
func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
podSelector, err := fields.ParseSelector("metadata.name=" + name)
if err != nil {
return nil, err
}
options := metav1.ListOptions{
FieldSelector: podSelector.String(),
Watch: true,
}
podWatch, err := c.client.CoreV1().Pods(namespace).Watch(options)
if err != nil {
return nil, err
}
eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name)
eventWatch, err := c.client.CoreV1().Events(namespace).Watch(metav1.ListOptions{
FieldSelector: eventSelector.String(),
Watch: true,
})
if err != nil {
podWatch.Stop()
return nil, err
}
eventCh := make(chan watch.Event, 30)
go func() {
defer eventWatch.Stop()
defer podWatch.Stop()
defer close(eventCh)
var podWatchChannelClosed bool
var eventWatchChannelClosed bool
for {
select {
case _ = <-stopChannel:
return
case podEvent, ok := <-podWatch.ResultChan():
if !ok {
podWatchChannelClosed = true
} else {
eventCh <- podEvent
}
case eventEvent, ok := <-eventWatch.ResultChan():
if !ok {
eventWatchChannelClosed = true
} else {
eventCh <- eventEvent
}
}
if podWatchChannelClosed && eventWatchChannelClosed {
break
}
}
}()
return eventCh, nil
}
// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a
// recycle operation. The calculation and return value is either the
// minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is
// greater.
func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.PersistentVolume) int64 {
giQty := resource.MustParse("1Gi")
pvQty := pv.Spec.Capacity[v1.ResourceStorage]
giSize := giQty.Value()
pvSize := pvQty.Value()
timeout := (pvSize / giSize) * int64(timeoutIncrement)
if timeout < int64(minimumTimeout) {
return int64(minimumTimeout)
}
return timeout
}
// RoundUpSize calculates how many allocation units are needed to accommodate
// a volume of given size. E.g. when user wants 1500MiB volume, while AWS EBS
// allocates volumes in gibibyte-sized chunks,
// RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2'
// (2 GiB is the smallest allocatable volume that can hold 1500MiB)
func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 {
return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes
}
// RoundUpToGB rounds up given quantity to chunks of GB
func RoundUpToGB(size resource.Quantity) int64 {
requestBytes := size.Value()
return RoundUpSize(requestBytes, GB)
}
// RoundUpToGiB rounds up given quantity upto chunks of GiB
func RoundUpToGiB(size resource.Quantity) int64 {
requestBytes := size.Value()
return RoundUpSize(requestBytes, GIB)
}
// GenerateVolumeName returns a PV name with clusterName prefix. The function
// should be used to generate a name of GCE PD or Cinder volume. It basically
// adds "<clusterName>-dynamic-" before the PV name, making sure the resulting
// string fits given length and cuts "dynamic" if not.
func GenerateVolumeName(clusterName, pvName string, maxLength int) string {
prefix := clusterName + "-dynamic"
pvLen := len(pvName)
// cut the "<clusterName>-dynamic" to fit full pvName into maxLength
// +1 for the '-' dash
if pvLen+1+len(prefix) > maxLength {
prefix = prefix[:maxLength-pvLen-1]
}
return prefix + "-" + pvName
}
// GetPath checks if the path from the mounter is empty.
func GetPath(mounter Mounter) (string, error) {
path := mounter.GetPath()
if path == "" {
return "", fmt.Errorf("Path is empty %s", reflect.TypeOf(mounter).String())
}
return path, nil
}
// ChooseZoneForVolume implements our heuristics for choosing a zone for volume creation based on the volume name
// Volumes are generally round-robin-ed across all active zones, using the hash of the PVC Name.
// However, if the PVCName ends with `-<integer>`, we will hash the prefix, and then add the integer to the hash.
// This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones,
// assuming the id values are consecutive.
func ChooseZoneForVolume(zones sets.String, pvcName string) string {
// We create the volume in a zone determined by the name
// Eventually the scheduler will coordinate placement into an available zone
hash, index := getPVCNameHashAndIndexOffset(pvcName)
// Zones.List returns zones in a consistent order (sorted)
// We do have a potential failure case where volumes will not be properly spread,
// if the set of zones changes during StatefulSet volume creation. However, this is
// probably relatively unlikely because we expect the set of zones to be essentially
// static for clusters.
// Hopefully we can address this problem if/when we do full scheduler integration of
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
// unhealthy zones)
zoneSlice := zones.List()
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice)
return zone
}
// ChooseZonesForVolume is identical to ChooseZoneForVolume, but selects a multiple zones, for multi-zone disks.
func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) sets.String {
// We create the volume in a zone determined by the name
// Eventually the scheduler will coordinate placement into an available zone
hash, index := getPVCNameHashAndIndexOffset(pvcName)
// Zones.List returns zones in a consistent order (sorted)
// We do have a potential failure case where volumes will not be properly spread,
// if the set of zones changes during StatefulSet volume creation. However, this is
// probably relatively unlikely because we expect the set of zones to be essentially
// static for clusters.
// Hopefully we can address this problem if/when we do full scheduler integration of
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
// unhealthy zones)
zoneSlice := zones.List()
replicaZones := sets.NewString()
startingIndex := index * numZones
for index = startingIndex; index < startingIndex+numZones; index++ {
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
replicaZones.Insert(zone)
}
glog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q",
pvcName, replicaZones.UnsortedList(), zoneSlice)
return replicaZones
}
func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) {
if pvcName == "" {
// We should always be called with a name; this shouldn't happen
glog.Warningf("No name defined during volume create; choosing random zone")
hash = rand.Uint32()
} else {
hashString := pvcName
// Heuristic to make sure that volumes in a StatefulSet are spread across zones
// StatefulSet PVCs are (currently) named ClaimName-StatefulSetName-Id,
// where Id is an integer index.
// Note though that if a StatefulSet pod has multiple claims, we need them to be
// in the same zone, because otherwise the pod will be unable to mount both volumes,
// and will be unschedulable. So we hash _only_ the "StatefulSetName" portion when
// it looks like `ClaimName-StatefulSetName-Id`.
// We continue to round-robin volume names that look like `Name-Id` also; this is a useful
// feature for users that are creating statefulset-like functionality without using statefulsets.
lastDash := strings.LastIndexByte(pvcName, '-')
if lastDash != -1 {
statefulsetIDString := pvcName[lastDash+1:]
statefulsetID, err := strconv.ParseUint(statefulsetIDString, 10, 32)
if err == nil {
// Offset by the statefulsetID, so we round-robin across zones
index = uint32(statefulsetID)
// We still hash the volume name, but only the prefix
hashString = pvcName[:lastDash]
// In the special case where it looks like `ClaimName-StatefulSetName-Id`,
// hash only the StatefulSetName, so that different claims on the same StatefulSet
// member end up in the same zone.
// Note that StatefulSetName (and ClaimName) might themselves both have dashes.
// We actually just take the portion after the final - of ClaimName-StatefulSetName.
// For our purposes it doesn't much matter (just suboptimal spreading).
lastDash := strings.LastIndexByte(hashString, '-')
if lastDash != -1 {
hashString = hashString[lastDash+1:]
}
glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index)
}
}
// We hash the (base) volume name, so we don't bias towards the first N zones
h := fnv.New32()
h.Write([]byte(hashString))
hash = h.Sum32()
}
return hash, index
}
// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi
// to empty_dir
func UnmountViaEmptyDir(dir string, host VolumeHost, volName string, volSpec Spec, podUID types.UID) error {
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir)
// Wrap EmptyDir, let it do the teardown.
wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID)
if err != nil {
return err
}
return wrapped.TearDownAt(dir)
}
// MountOptionFromSpec extracts and joins mount options from volume spec with supplied options
func MountOptionFromSpec(spec *Spec, options ...string) []string {
pv := spec.PersistentVolume
if pv != nil {
// Use beta annotation first
if mo, ok := pv.Annotations[v1.MountOptionAnnotation]; ok {
moList := strings.Split(mo, ",")
return JoinMountOptions(moList, options)
}
if len(pv.Spec.MountOptions) > 0 {
return JoinMountOptions(pv.Spec.MountOptions, options)
}
}
return options
}
// JoinMountOptions joins mount options eliminating duplicates
func JoinMountOptions(userOptions []string, systemOptions []string) []string {
allMountOptions := sets.NewString()
for _, mountOption := range userOptions {
if len(mountOption) > 0 {
allMountOptions.Insert(mountOption)
}
}
for _, mountOption := range systemOptions {
allMountOptions.Insert(mountOption)
}
return allMountOptions.UnsortedList()
}
// ValidateZone returns:
// - an error in case zone is an empty string or contains only any combination of spaces and tab characters
// - nil otherwise
func ValidateZone(zone string) error {
if strings.TrimSpace(zone) == "" {
return fmt.Errorf("the provided %q zone is not valid, it's an empty string or contains only spaces and tab characters", zone)
}
return nil
}
// AccessModesContains returns whether the requested mode is contained by modes
func AccessModesContains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
// AccessModesContainedInAll returns whether all of the requested modes are contained by modes
func AccessModesContainedInAll(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool {
for _, mode := range requestedModes {
if !AccessModesContains(indexedModes, mode) {
return false
}
}
return true
}
// GetWindowsPath get a windows path
func GetWindowsPath(path string) string {
windowsPath := strings.Replace(path, "/", "\\", -1)
if strings.HasPrefix(windowsPath, "\\") {
windowsPath = "c:" + windowsPath
}
return windowsPath
}

View file

@ -1,10 +1,4 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
@ -16,124 +10,76 @@ go_library(
"finalizer.go",
"io_util.go",
"metrics.go",
"nested_volumes.go",
"resize_util.go",
"util.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"device_util_unsupported.go",
"fs_unsupported.go",
"util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"device_util_unsupported.go",
"fs.go",
"util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"device_util_unsupported.go",
"fs_unsupported.go",
"util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"device_util_unsupported.go",
"fs_unsupported.go",
"util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"device_util_linux.go",
"fs.go",
"util_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"device_util_unsupported.go",
"fs_unsupported.go",
"util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"device_util_unsupported.go",
"fs_unsupported.go",
"util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"device_util_unsupported.go",
"fs_unsupported.go",
"util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"device_util_unsupported.go",
"fs_unsupported.go",
"util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"device_util_unsupported.go",
"fs_unsupported.go",
"util_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"device_util_unsupported.go",
"fs_unsupported.go",
"util_unsupported.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/volume/util",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util/types:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/storage/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:darwin": [
"//vendor/golang.org/x/sys/unix:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:linux": [
"//vendor/golang.org/x/sys/unix:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:nacl": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:plan9": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:solaris": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:windows": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"//conditions:default": [],
}),
],
)
go_test(
name = "go_default_test",
srcs = [
"nested_volumes_test.go",
"resize_util_test.go",
"util_test.go",
] + select({
"@io_bazel_rules_go//go/platform:linux": [
@ -143,19 +89,19 @@ go_test(
"//conditions:default": [],
}),
embed = [":go_default_library"],
importpath = "k8s.io/kubernetes/pkg/volume/util",
deps = [
"//pkg/apis/core/install:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/slice:go_default_library",
"//pkg/volume:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:linux": [
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
"//conditions:default": [],
}),
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
@ -169,10 +115,13 @@ filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/volume/util/fs:all-srcs",
"//pkg/volume/util/nestedpendingoperations:all-srcs",
"//pkg/volume/util/operationexecutor:all-srcs",
"//pkg/volume/util/recyclerclient:all-srcs",
"//pkg/volume/util/types:all-srcs",
"//pkg/volume/util/volumehelper:all-srcs",
"//pkg/volume/util/volumepathhandler:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View file

@ -88,14 +88,15 @@ const (
// The Write algorithm is:
//
// 1. The payload is validated; if the payload is invalid, the function returns
// 2. The user-visible portion of the volume is walked to determine whether any
// 2.  The current timestamped directory is detected by reading the data directory
// symlink
// 3. The old version of the volume is walked to determine whether any
// portion of the payload was deleted and is still present on disk.
// If the payload is already present on disk and there are no deleted files,
// the function returns
// 3. A check is made to determine whether data present in the payload has changed
// 4.  A new timestamped dir is created
// 5. The payload is written to the new timestamped directory
// 6.  Symlinks and directory for new user-visible files are created (if needed).
// 4. The data in the current timestamped directory is compared to the projected
// data to determine if an update is required.
// 5.  A new timestamped dir is created
// 6. The payload is written to the new timestamped directory
// 7.  Symlinks and directory for new user-visible files are created (if needed).
//
// For example, consider the files:
// <target-dir>/podName
@ -104,16 +105,12 @@ const (
//
// The user visible files are symbolic links into the internal data directory:
// <target-dir>/podName -> ..data/podName
// <target-dir>/usr/labels -> ../..data/usr/labels
// <target-dir>/k8s/annotations -> ../..data/k8s/annotations
//
// Relative links are created into the data directory for files in subdirectories.
// <target-dir>/usr -> ..data/usr
// <target-dir>/k8s -> ..data/k8s
//
// The data directory itself is a link to a timestamped directory with
// the real data:
// <target-dir>/..data -> ..2016_02_01_15_04_05.12345678/
// 7.  The current timestamped directory is detected by reading the data directory
// symlink
// 8.  A symlink to the new timestamped directory ..data_tmp is created that will
// become the new data directory
// 9.  The new data directory symlink is renamed to the data directory; rename is atomic
@ -128,31 +125,50 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
}
// (2)
pathsToRemove, err := w.pathsToRemove(cleanPayload)
dataDirPath := path.Join(w.targetDir, dataDirName)
oldTsDir, err := os.Readlink(dataDirPath)
if err != nil {
glog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err)
return err
if !os.IsNotExist(err) {
glog.Errorf("%s: error reading link for data directory: %v", w.logContext, err)
return err
}
// although Readlink() returns "" on err, don't be fragile by relying on it (since it's not specified in docs)
// empty oldTsDir indicates that it didn't exist
oldTsDir = ""
}
oldTsPath := path.Join(w.targetDir, oldTsDir)
var pathsToRemove sets.String
// if there was no old version, there's nothing to remove
if len(oldTsDir) != 0 {
// (3)
pathsToRemove, err = w.pathsToRemove(cleanPayload, oldTsPath)
if err != nil {
glog.Errorf("%s: error determining user-visible files to remove: %v", w.logContext, err)
return err
}
// (4)
if should, err := shouldWritePayload(cleanPayload, oldTsPath); err != nil {
glog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err)
return err
} else if !should && len(pathsToRemove) == 0 {
glog.V(4).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir)
return nil
} else {
glog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir)
}
}
// (3)
if should, err := w.shouldWritePayload(cleanPayload); err != nil {
glog.Errorf("%s: error determining whether payload should be written to disk: %v", w.logContext, err)
return err
} else if !should && len(pathsToRemove) == 0 {
glog.V(4).Infof("%s: no update required for target directory %v", w.logContext, w.targetDir)
return nil
} else {
glog.V(4).Infof("%s: write required for target directory %v", w.logContext, w.targetDir)
}
// (4)
// (5)
tsDir, err := w.newTimestampDir()
if err != nil {
glog.V(4).Infof("%s: error creating new ts data directory: %v", w.logContext, err)
return err
}
tsDirName := filepath.Base(tsDir)
// (5)
// (6)
if err = w.writePayloadToDir(cleanPayload, tsDir); err != nil {
glog.Errorf("%s: error writing payload to ts data directory %s: %v", w.logContext, tsDir, err)
return err
@ -160,21 +176,12 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
glog.V(4).Infof("%s: performed write of new data to ts data directory: %s", w.logContext, tsDir)
}
// (6)
// (7)
if err = w.createUserVisibleFiles(cleanPayload); err != nil {
glog.Errorf("%s: error creating visible symlinks in %s: %v", w.logContext, w.targetDir, err)
return err
}
// (7)
_, tsDirName := filepath.Split(tsDir)
dataDirPath := path.Join(w.targetDir, dataDirName)
oldTsDir, err := os.Readlink(dataDirPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("%s: error reading link for data directory: %v", w.logContext, err)
return err
}
// (8)
newDataDirPath := path.Join(w.targetDir, newDataDirName)
if err = os.Symlink(tsDirName, newDataDirPath); err != nil {
@ -206,7 +213,7 @@ func (w *AtomicWriter) Write(payload map[string]FileProjection) error {
// (11)
if len(oldTsDir) > 0 {
if err = os.RemoveAll(path.Join(w.targetDir, oldTsDir)); err != nil {
if err = os.RemoveAll(oldTsPath); err != nil {
glog.Errorf("%s: error removing old data directory %s: %v", w.logContext, oldTsDir, err)
return err
}
@ -250,7 +257,7 @@ func validatePath(targetPath string) error {
}
if len(targetPath) > maxPathLength {
return fmt.Errorf("invalid path: must be less than %d characters", maxPathLength)
return fmt.Errorf("invalid path: must be less than or equal to %d characters", maxPathLength)
}
items := strings.Split(targetPath, string(os.PathSeparator))
@ -259,7 +266,7 @@ func validatePath(targetPath string) error {
return fmt.Errorf("invalid path: must not contain '..': %s", targetPath)
}
if len(item) > maxFileNameLength {
return fmt.Errorf("invalid path: filenames must be less than %d characters", maxFileNameLength)
return fmt.Errorf("invalid path: filenames must be less than or equal to %d characters", maxFileNameLength)
}
}
if strings.HasPrefix(items[0], "..") && len(items[0]) > 2 {
@ -270,9 +277,9 @@ func validatePath(targetPath string) error {
}
// shouldWritePayload returns whether the payload should be written to disk.
func (w *AtomicWriter) shouldWritePayload(payload map[string]FileProjection) (bool, error) {
func shouldWritePayload(payload map[string]FileProjection, oldTsDir string) (bool, error) {
for userVisiblePath, fileProjection := range payload {
shouldWrite, err := w.shouldWriteFile(path.Join(w.targetDir, userVisiblePath), fileProjection.Data)
shouldWrite, err := shouldWriteFile(path.Join(oldTsDir, userVisiblePath), fileProjection.Data)
if err != nil {
return false, err
}
@ -286,7 +293,7 @@ func (w *AtomicWriter) shouldWritePayload(payload map[string]FileProjection) (bo
}
// shouldWriteFile returns whether a new version of a file should be written to disk.
func (w *AtomicWriter) shouldWriteFile(path string, content []byte) (bool, error) {
func shouldWriteFile(path string, content []byte) (bool, error) {
_, err := os.Lstat(path)
if os.IsNotExist(err) {
return true, nil
@ -300,19 +307,15 @@ func (w *AtomicWriter) shouldWriteFile(path string, content []byte) (bool, error
return (bytes.Compare(content, contentOnFs) != 0), nil
}
// pathsToRemove walks the user-visible portion of the target directory and
// pathsToRemove walks the current version of the data directory and
// determines which paths should be removed (if any) after the payload is
// written to the target directory.
func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection) (sets.String, error) {
func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection, oldTsDir string) (sets.String, error) {
paths := sets.NewString()
visitor := func(path string, info os.FileInfo, err error) error {
if path == w.targetDir {
return nil
}
relativePath := strings.TrimPrefix(path, w.targetDir)
relativePath := strings.TrimPrefix(path, oldTsDir)
relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator))
if strings.HasPrefix(relativePath, "..") {
if relativePath == "" {
return nil
}
@ -320,7 +323,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection) (sets.St
return nil
}
err := filepath.Walk(w.targetDir, visitor)
err := filepath.Walk(oldTsDir, visitor)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
@ -348,7 +351,7 @@ func (w *AtomicWriter) pathsToRemove(payload map[string]FileProjection) (sets.St
// newTimestampDir creates a new timestamp directory
func (w *AtomicWriter) newTimestampDir() (string, error) {
tsDir, err := ioutil.TempDir(w.targetDir, fmt.Sprintf("..%s.", time.Now().Format("1981_02_01_15_04_05")))
tsDir, err := ioutil.TempDir(w.targetDir, time.Now().UTC().Format("..2006_01_02_15_04_05."))
if err != nil {
glog.Errorf("%s: unable to create new temp directory: %v", w.logContext, err)
return "", err
@ -405,34 +408,22 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir
//
// Viz:
// For files: "bar", "foo/bar", "baz/bar", "foo/baz/blah"
// the following symlinks and subdirectories are created:
// bar -> ..data/bar
// foo/bar -> ../..data/foo/bar
// baz/bar -> ../..data/baz/bar
// foo/baz/blah -> ../../..data/foo/baz/blah
// the following symlinks are created:
// bar -> ..data/bar
// foo -> ..data/foo
// baz -> ..data/baz
func (w *AtomicWriter) createUserVisibleFiles(payload map[string]FileProjection) error {
for userVisiblePath := range payload {
dir, _ := filepath.Split(userVisiblePath)
subDirs := 0
if len(dir) > 0 {
// If dir is not empty, the projection path contains at least one
// subdirectory (example: userVisiblePath := "foo/bar").
// Since filepath.Split leaves a trailing path separator, in this
// example, dir = "foo/". In order to calculate the number of
// subdirectories, we must subtract 1 from the number returned by split.
subDirs = len(strings.Split(dir, string(os.PathSeparator))) - 1
err := os.MkdirAll(path.Join(w.targetDir, dir), os.ModePerm)
if err != nil {
return err
}
slashpos := strings.Index(userVisiblePath, string(os.PathSeparator))
if slashpos == -1 {
slashpos = len(userVisiblePath)
}
_, err := os.Readlink(path.Join(w.targetDir, userVisiblePath))
linkname := userVisiblePath[:slashpos]
_, err := os.Readlink(path.Join(w.targetDir, linkname))
if err != nil && os.IsNotExist(err) {
// The link into the data directory for this path doesn't exist; create it,
// respecting the number of subdirectories necessary to link
// correctly back into the data directory.
visibleFile := path.Join(w.targetDir, userVisiblePath)
dataDirFile := path.Join(strings.Repeat("../", subDirs), dataDirName, userVisiblePath)
// The link into the data directory for this path doesn't exist; create it
visibleFile := path.Join(w.targetDir, linkname)
dataDirFile := path.Join(dataDirName, linkname)
err = os.Symlink(dataDirFile, visibleFile)
if err != nil {
@ -446,13 +437,18 @@ func (w *AtomicWriter) createUserVisibleFiles(payload map[string]FileProjection)
// removeUserVisiblePaths removes the set of paths from the user-visible
// portion of the writer's target directory.
func (w *AtomicWriter) removeUserVisiblePaths(paths sets.String) error {
orderedPaths := paths.List()
for ii := len(orderedPaths) - 1; ii >= 0; ii-- {
if err := os.Remove(path.Join(w.targetDir, orderedPaths[ii])); err != nil {
glog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, orderedPaths[ii], err)
return err
ps := string(os.PathSeparator)
var lasterr error
for p := range paths {
// only remove symlinks from the volume root directory (i.e. items that don't contain '/')
if strings.Contains(p, ps) {
continue
}
if err := os.Remove(path.Join(w.targetDir, p)); err != nil {
glog.Errorf("%s: error pruning old user-visible path %s: %v", w.logContext, p, err)
lasterr = err
}
}
return nil
return lasterr
}

View file

@ -235,7 +235,17 @@ func TestPathsToRemove(t *testing.T) {
continue
}
actual, err := writer.pathsToRemove(tc.payload2)
dataDirPath := path.Join(targetDir, dataDirName)
oldTsDir, err := os.Readlink(dataDirPath)
if err != nil && os.IsNotExist(err) {
t.Errorf("Data symlink does not exist: %v", dataDirPath)
continue
} else if err != nil {
t.Errorf("Unable to read symlink %v: %v", dataDirPath, err)
continue
}
actual, err := writer.pathsToRemove(tc.payload2, path.Join(targetDir, oldTsDir))
if err != nil {
t.Errorf("%v: unexpected error determining paths to remove: %v", tc.name, err)
continue
@ -741,14 +751,15 @@ func TestMultipleUpdates(t *testing.T) {
}
func checkVolumeContents(targetDir, tcName string, payload map[string]FileProjection, t *testing.T) {
dataDirPath := path.Join(targetDir, dataDirName)
// use filepath.Walk to reconstruct the payload, then deep equal
observedPayload := make(map[string]FileProjection)
visitor := func(path string, info os.FileInfo, err error) error {
if info.Mode().IsRegular() || info.IsDir() {
if info.IsDir() {
return nil
}
relativePath := strings.TrimPrefix(path, targetDir)
relativePath := strings.TrimPrefix(path, dataDirPath)
relativePath = strings.TrimPrefix(relativePath, "/")
if strings.HasPrefix(relativePath, "..") {
return nil
@ -769,9 +780,26 @@ func checkVolumeContents(targetDir, tcName string, payload map[string]FileProjec
return nil
}
err := filepath.Walk(targetDir, visitor)
d, err := ioutil.ReadDir(targetDir)
if err != nil {
t.Errorf("%v: unexpected error walking directory: %v", tcName, err)
t.Errorf("Unable to read dir %v: %v", targetDir, err)
return
}
for _, info := range d {
if strings.HasPrefix(info.Name(), "..") {
continue
}
if info.Mode()&os.ModeSymlink != 0 {
p := path.Join(targetDir, info.Name())
actual, err := os.Readlink(p)
if err != nil {
t.Errorf("Unable to read symlink %v: %v", p, err)
continue
}
if err := filepath.Walk(path.Join(targetDir, actual), visitor); err != nil {
t.Errorf("%v: unexpected error walking directory: %v", tcName, err)
}
}
}
cleanPathPayload := make(map[string]FileProjection, len(payload))

View file

@ -19,4 +19,7 @@ package util
const (
// Name of finalizer on PVCs that have a running pod.
PVCProtectionFinalizer = "kubernetes.io/pvc-protection"
// Name of finalizer on PVs that are bound by PVCs
PVProtectionFinalizer = "kubernetes.io/pv-protection"
)

95
vendor/k8s.io/kubernetes/pkg/volume/util/fs/BUILD generated vendored Normal file
View file

@ -0,0 +1,95 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = select({
"@io_bazel_rules_go//go/platform:android": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"fs.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"fs.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"fs_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"fs_unsupported.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/volume/util/fs",
visibility = ["//visibility:public"],
deps = select({
"@io_bazel_rules_go//go/platform:android": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:darwin": [
"//vendor/golang.org/x/sys/unix:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:linux": [
"//vendor/golang.org/x/sys/unix:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:nacl": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:plan9": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:solaris": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"@io_bazel_rules_go//go/platform:windows": [
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
],
"//conditions:default": [],
}),
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View file

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package util
package fs
import (
"bytes"

View file

@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package util
package fs
import (
"fmt"

View file

@ -0,0 +1,99 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"k8s.io/api/core/v1"
"os"
"path"
"path/filepath"
"sort"
"strings"
)
// getNestedMountpoints returns a list of mountpoint directories that should be created
// for the volume indicated by name.
// note: the returned list is relative to baseDir
func getNestedMountpoints(name, baseDir string, pod v1.Pod) ([]string, error) {
var retval []string
checkContainer := func(container *v1.Container) error {
var allMountPoints []string // all mount points in this container
var myMountPoints []string // mount points that match name
for _, vol := range container.VolumeMounts {
cleaned := filepath.Clean(vol.MountPath)
allMountPoints = append(allMountPoints, cleaned)
if vol.Name == name {
myMountPoints = append(myMountPoints, cleaned)
}
}
sort.Strings(allMountPoints)
parentPrefix := ".." + string(os.PathSeparator)
// Examine each place where this volume is mounted
for _, myMountPoint := range myMountPoints {
if strings.HasPrefix(myMountPoint, parentPrefix) {
// Don't let a container trick us into creating directories outside of its rootfs
return fmt.Errorf("Invalid container mount point %v", myMountPoint)
}
myMPSlash := myMountPoint + string(os.PathSeparator)
// The previously found nested mountpoint (or "" if none found yet)
prevNestedMP := ""
// examine each mount point to see if it's nested beneath this volume
// (but skip any that are double-nested beneath this volume)
// For example, if this volume is mounted as /dir and other volumes are mounted
// as /dir/nested and /dir/nested/other, only create /dir/nested.
for _, mp := range allMountPoints {
if !strings.HasPrefix(mp, myMPSlash) {
continue // skip -- not nested beneath myMountPoint
}
if prevNestedMP != "" && strings.HasPrefix(mp, prevNestedMP) {
continue // skip -- double nested beneath myMountPoint
}
// since this mount point is nested, remember it so that we can check that following ones aren't nested beneath this one
prevNestedMP = mp + string(os.PathSeparator)
retval = append(retval, mp[len(myMPSlash):])
}
}
return nil
}
for _, container := range pod.Spec.InitContainers {
if err := checkContainer(&container); err != nil {
return nil, err
}
}
for _, container := range pod.Spec.Containers {
if err := checkContainer(&container); err != nil {
return nil, err
}
}
return retval, nil
}
// MakeNestedMountpoints creates mount points in baseDir for volumes mounted beneath name
func MakeNestedMountpoints(name, baseDir string, pod v1.Pod) error {
dirs, err := getNestedMountpoints(name, baseDir, pod)
if err != nil {
return err
}
for _, dir := range dirs {
err := os.MkdirAll(path.Join(baseDir, dir), 0755)
if err != nil {
return fmt.Errorf("Unable to create nested volume mountpoints: %v", err)
}
}
return nil
}

View file

@ -0,0 +1,233 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"io/ioutil"
"os"
"path"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
)
type testCases struct {
name string
err bool
expected sets.String
volname string
pod v1.Pod
}
func TestGetNestedMountpoints(t *testing.T) {
var (
testNamespace = "test_namespace"
testPodUID = types.UID("test_pod_uid")
)
tc := []testCases{
{
name: "Simple Pod",
err: false,
expected: sets.NewString(),
volname: "vol1",
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
UID: testPodUID,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{MountPath: "/dir", Name: "vol1"},
},
},
},
},
},
},
{
name: "Simple Nested Pod",
err: false,
expected: sets.NewString("nested"),
volname: "vol1",
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
UID: testPodUID,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{MountPath: "/dir", Name: "vol1"},
{MountPath: "/dir/nested", Name: "vol2"},
},
},
},
},
},
},
{
name: "Unsorted Nested Pod",
err: false,
expected: sets.NewString("nested", "nested2"),
volname: "vol1",
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
UID: testPodUID,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{MountPath: "/dir/nested/double", Name: "vol3"},
{MountPath: "/ignore", Name: "vol4"},
{MountPath: "/dir/nested", Name: "vol2"},
{MountPath: "/ignore2", Name: "vol5"},
{MountPath: "/dir", Name: "vol1"},
{MountPath: "/dir/nested2", Name: "vol3"},
},
},
},
},
},
},
{
name: "Multiple vol1 mounts Pod",
err: false,
expected: sets.NewString("nested", "nested2"),
volname: "vol1",
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
UID: testPodUID,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{MountPath: "/dir", Name: "vol1"},
{MountPath: "/dir/nested", Name: "vol2"},
{MountPath: "/ignore", Name: "vol4"},
{MountPath: "/other", Name: "vol1"},
{MountPath: "/other/nested2", Name: "vol3"},
},
},
},
},
},
},
{
name: "Big Pod",
err: false,
volname: "vol1",
expected: sets.NewString("sub1/sub2/sub3", "sub1/sub2/sub4", "sub1/sub2/sub6", "sub"),
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
UID: testPodUID,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{MountPath: "/mnt", Name: "vol1"},
{MountPath: "/ignore", Name: "vol2"},
{MountPath: "/mnt/sub1/sub2/sub3", Name: "vol3"},
{MountPath: "/mnt/sub1/sub2/sub4", Name: "vol4"},
{MountPath: "/mnt/sub1/sub2/sub4/skip", Name: "vol5"},
{MountPath: "/mnt/sub1/sub2/sub4/skip2", Name: "vol5a"},
{MountPath: "/mnt/sub1/sub2/sub6", Name: "vol6"},
{MountPath: "/mnt7", Name: "vol7"},
},
},
},
InitContainers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{MountPath: "/mnt/dir", Name: "vol1"},
{MountPath: "/mnt/dir_ignore", Name: "vol8"},
{MountPath: "/ignore", Name: "vol9"},
{MountPath: "/mnt/dir/sub", Name: "vol11"},
},
},
},
},
},
},
{
name: "Naughty Pod",
err: true,
expected: nil,
volname: "vol1",
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
UID: testPodUID,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{MountPath: "foo/../../dir", Name: "vol1"},
{MountPath: "foo/../../dir/skip", Name: "vol10"},
},
},
},
},
},
},
}
for _, test := range tc {
dir, err := ioutil.TempDir("", "TestMakeNestedMountpoints.")
if err != nil {
t.Errorf("Unexpected error trying to create temp directory: %v", err)
return
}
defer os.RemoveAll(dir)
rootdir := path.Join(dir, "vol")
err = os.Mkdir(rootdir, 0755)
if err != nil {
t.Errorf("Unexpected error trying to create temp root directory: %v", err)
return
}
dirs, err := getNestedMountpoints(test.volname, rootdir, test.pod)
if test.err {
if err == nil {
t.Errorf("%v: expected error, got nil", test.name)
}
continue
} else {
if err != nil {
t.Errorf("%v: expected no error, got %v", test.name, err)
continue
}
}
actual := sets.NewString(dirs...)
if !test.expected.Equal(actual) {
t.Errorf("%v: unexpected nested directories created:\nexpected: %v\n got: %v", test.name, test.expected, actual)
}
}
}

View file

@ -0,0 +1,44 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["recycler_client.go"],
importpath = "k8s.io/kubernetes/pkg/volume/util/recyclerclient",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["recycler_client_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View file

@ -0,0 +1,252 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package recyclerclient
import (
"fmt"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
)
type RecycleEventRecorder func(eventtype, message string)
// RecycleVolumeByWatchingPodUntilCompletion is intended for use with volume
// Recyclers. This function will save the given Pod to the API and watch it
// until it completes, fails, or the pod's ActiveDeadlineSeconds is exceeded,
// whichever comes first. An attempt to delete a recycler pod is always
// attempted before returning.
//
// In case there is a pod with the same namespace+name already running, this
// function deletes it as it is not able to judge if it is an old recycler
// or user has forged a fake recycler to block Kubernetes from recycling.//
//
// pod - the pod designed by a volume plugin to recycle the volume. pod.Name
// will be overwritten with unique name based on PV.Name.
// client - kube client for API operations.
func RecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, kubeClient clientset.Interface, recorder RecycleEventRecorder) error {
return internalRecycleVolumeByWatchingPodUntilCompletion(pvName, pod, newRecyclerClient(kubeClient, recorder))
}
// same as above func comments, except 'recyclerClient' is a narrower pod API
// interface to ease testing
func internalRecycleVolumeByWatchingPodUntilCompletion(pvName string, pod *v1.Pod, recyclerClient recyclerClient) error {
glog.V(5).Infof("creating recycler pod for volume %s\n", pod.Name)
// Generate unique name for the recycler pod - we need to get "already
// exists" error when a previous controller has already started recycling
// the volume. Here we assume that pv.Name is already unique.
pod.Name = "recycler-for-" + pvName
pod.GenerateName = ""
stopChannel := make(chan struct{})
defer close(stopChannel)
podCh, err := recyclerClient.WatchPod(pod.Name, pod.Namespace, stopChannel)
if err != nil {
glog.V(4).Infof("cannot start watcher for pod %s/%s: %v", pod.Namespace, pod.Name, err)
return err
}
// Start the pod
_, err = recyclerClient.CreatePod(pod)
if err != nil {
if errors.IsAlreadyExists(err) {
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
if deleteErr != nil {
return fmt.Errorf("failed to delete old recycler pod %s/%s: %s", pod.Namespace, pod.Name, deleteErr)
}
// Recycler will try again and the old pod will be hopefully deleted
// at that time.
return fmt.Errorf("old recycler pod found, will retry later")
}
return fmt.Errorf("unexpected error creating recycler pod: %+v", err)
}
err = waitForPod(pod, recyclerClient, podCh)
// In all cases delete the recycler pod and log its result.
glog.V(2).Infof("deleting recycler pod %s/%s", pod.Namespace, pod.Name)
deleteErr := recyclerClient.DeletePod(pod.Name, pod.Namespace)
if deleteErr != nil {
glog.Errorf("failed to delete recycler pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
// Returning recycler error is preferred, the pod will be deleted again on
// the next retry.
if err != nil {
return fmt.Errorf("failed to recycle volume: %s", err)
}
// Recycle succeeded but we failed to delete the recycler pod. Report it,
// the controller will re-try recycling the PV again shortly.
if deleteErr != nil {
return fmt.Errorf("failed to delete recycler pod: %s", deleteErr)
}
return nil
}
// waitForPod watches the pod it until it finishes and send all events on the
// pod to the PV.
func waitForPod(pod *v1.Pod, recyclerClient recyclerClient, podCh <-chan watch.Event) error {
for {
event, ok := <-podCh
if !ok {
return fmt.Errorf("recycler pod %q watch channel had been closed", pod.Name)
}
switch event.Object.(type) {
case *v1.Pod:
// POD changed
pod := event.Object.(*v1.Pod)
glog.V(4).Infof("recycler pod update received: %s %s/%s %s", event.Type, pod.Namespace, pod.Name, pod.Status.Phase)
switch event.Type {
case watch.Added, watch.Modified:
if pod.Status.Phase == v1.PodSucceeded {
// Recycle succeeded.
return nil
}
if pod.Status.Phase == v1.PodFailed {
if pod.Status.Message != "" {
return fmt.Errorf(pod.Status.Message)
} else {
return fmt.Errorf("pod failed, pod.Status.Message unknown.")
}
}
case watch.Deleted:
return fmt.Errorf("recycler pod was deleted")
case watch.Error:
return fmt.Errorf("recycler pod watcher failed")
}
case *v1.Event:
// Event received
podEvent := event.Object.(*v1.Event)
glog.V(4).Infof("recycler event received: %s %s/%s %s/%s %s", event.Type, podEvent.Namespace, podEvent.Name, podEvent.InvolvedObject.Namespace, podEvent.InvolvedObject.Name, podEvent.Message)
if event.Type == watch.Added {
recyclerClient.Event(podEvent.Type, podEvent.Message)
}
}
}
}
// recyclerClient abstracts access to a Pod by providing a narrower interface.
// This makes it easier to mock a client for testing.
type recyclerClient interface {
CreatePod(pod *v1.Pod) (*v1.Pod, error)
GetPod(name, namespace string) (*v1.Pod, error)
DeletePod(name, namespace string) error
// WatchPod returns a ListWatch for watching a pod. The stopChannel is used
// to close the reflector backing the watch. The caller is responsible for
// derring a close on the channel to stop the reflector.
WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error)
// Event sends an event to the volume that is being recycled.
Event(eventtype, message string)
}
func newRecyclerClient(client clientset.Interface, recorder RecycleEventRecorder) recyclerClient {
return &realRecyclerClient{
client,
recorder,
}
}
type realRecyclerClient struct {
client clientset.Interface
recorder RecycleEventRecorder
}
func (c *realRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
return c.client.CoreV1().Pods(pod.Namespace).Create(pod)
}
func (c *realRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
return c.client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
}
func (c *realRecyclerClient) DeletePod(name, namespace string) error {
return c.client.CoreV1().Pods(namespace).Delete(name, nil)
}
func (c *realRecyclerClient) Event(eventtype, message string) {
c.recorder(eventtype, message)
}
func (c *realRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
podSelector, err := fields.ParseSelector("metadata.name=" + name)
if err != nil {
return nil, err
}
options := metav1.ListOptions{
FieldSelector: podSelector.String(),
Watch: true,
}
podWatch, err := c.client.CoreV1().Pods(namespace).Watch(options)
if err != nil {
return nil, err
}
eventSelector, _ := fields.ParseSelector("involvedObject.name=" + name)
eventWatch, err := c.client.CoreV1().Events(namespace).Watch(metav1.ListOptions{
FieldSelector: eventSelector.String(),
Watch: true,
})
if err != nil {
podWatch.Stop()
return nil, err
}
eventCh := make(chan watch.Event, 30)
go func() {
defer eventWatch.Stop()
defer podWatch.Stop()
defer close(eventCh)
var podWatchChannelClosed bool
var eventWatchChannelClosed bool
for {
select {
case _ = <-stopChannel:
return
case podEvent, ok := <-podWatch.ResultChan():
if !ok {
podWatchChannelClosed = true
} else {
eventCh <- podEvent
}
case eventEvent, ok := <-eventWatch.ResultChan():
if !ok {
eventWatchChannelClosed = true
} else {
eventCh <- eventEvent
}
}
if podWatchChannelClosed && eventWatchChannelClosed {
break
}
}
}()
return eventCh, nil
}

View file

@ -0,0 +1,235 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package recyclerclient
import (
"fmt"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
api "k8s.io/kubernetes/pkg/apis/core"
)
type testcase struct {
// Input of the test
name string
existingPod *v1.Pod
createPod *v1.Pod
// eventSequence is list of events that are simulated during recycling. It
// can be either event generated by a recycler pod or a state change of
// the pod. (see newPodEvent and newEvent below).
eventSequence []watch.Event
// Expected output.
// expectedEvents is list of events that were sent to the volume that was
// recycled.
expectedEvents []mockEvent
expectedError string
}
func newPodEvent(eventtype watch.EventType, name string, phase v1.PodPhase, message string) watch.Event {
return watch.Event{
Type: eventtype,
Object: newPod(name, phase, message),
}
}
func newEvent(eventtype, message string) watch.Event {
return watch.Event{
Type: watch.Added,
Object: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
},
Reason: "MockEvent",
Message: message,
Type: eventtype,
},
}
}
func newPod(name string, phase v1.PodPhase, message string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Name: name,
},
Status: v1.PodStatus{
Phase: phase,
Message: message,
},
}
}
func TestRecyclerPod(t *testing.T) {
tests := []testcase{
{
// Test recycler success with some events
name: "RecyclerSuccess",
createPod: newPod("podRecyclerSuccess", v1.PodPending, ""),
eventSequence: []watch.Event{
// Pod gets Running and Succeeded
newPodEvent(watch.Added, "podRecyclerSuccess", v1.PodPending, ""),
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"),
newEvent(v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""),
newEvent(v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""),
newEvent(v1.EventTypeNormal, "Created container with docker id 83d929aeac82"),
newEvent(v1.EventTypeNormal, "Started container with docker id 83d929aeac82"),
newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodRunning, ""),
newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodSucceeded, ""),
},
expectedEvents: []mockEvent{
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"},
{v1.EventTypeNormal, "pulling image \"k8s.gcr.io/busybox\""},
{v1.EventTypeNormal, "Successfully pulled image \"k8s.gcr.io/busybox\""},
{v1.EventTypeNormal, "Created container with docker id 83d929aeac82"},
{v1.EventTypeNormal, "Started container with docker id 83d929aeac82"},
},
expectedError: "",
},
{
// Test recycler failure with some events
name: "RecyclerFailure",
createPod: newPod("podRecyclerFailure", v1.PodPending, ""),
eventSequence: []watch.Event{
// Pod gets Running and Succeeded
newPodEvent(watch.Added, "podRecyclerFailure", v1.PodPending, ""),
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"),
newEvent(v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"),
newEvent(v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"default\"/\"recycler-for-podRecyclerFailure\". list of unattached/unmounted"),
newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodRunning, ""),
newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodFailed, "Pod was active on the node longer than specified deadline"),
},
expectedEvents: []mockEvent{
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"},
{v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"},
{v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"default\"/\"recycler-for-podRecyclerFailure\". list of unattached/unmounted"},
},
expectedError: "failed to recycle volume: Pod was active on the node longer than specified deadline",
},
{
// Recycler pod gets deleted
name: "RecyclerDeleted",
createPod: newPod("podRecyclerDeleted", v1.PodPending, ""),
eventSequence: []watch.Event{
// Pod gets Running and Succeeded
newPodEvent(watch.Added, "podRecyclerDeleted", v1.PodPending, ""),
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"),
newPodEvent(watch.Deleted, "podRecyclerDeleted", v1.PodPending, ""),
},
expectedEvents: []mockEvent{
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"},
},
expectedError: "failed to recycle volume: recycler pod was deleted",
},
{
// Another recycler pod is already running
name: "RecyclerRunning",
existingPod: newPod("podOldRecycler", v1.PodRunning, ""),
createPod: newPod("podNewRecycler", v1.PodFailed, "mock message"),
eventSequence: []watch.Event{},
expectedError: "old recycler pod found, will retry later",
},
}
for _, test := range tests {
t.Logf("Test %q", test.name)
client := &mockRecyclerClient{
events: test.eventSequence,
pod: test.existingPod,
}
err := internalRecycleVolumeByWatchingPodUntilCompletion(test.createPod.Name, test.createPod, client)
receivedError := ""
if err != nil {
receivedError = err.Error()
}
if receivedError != test.expectedError {
t.Errorf("Test %q failed, expected error %q, got %q", test.name, test.expectedError, receivedError)
continue
}
if !client.deletedCalled {
t.Errorf("Test %q failed, expected deferred client.Delete to be called on recycler pod", test.name)
continue
}
for i, expectedEvent := range test.expectedEvents {
if len(client.receivedEvents) <= i {
t.Errorf("Test %q failed, expected event %d: %q not received", test.name, i, expectedEvent.message)
continue
}
receivedEvent := client.receivedEvents[i]
if expectedEvent.eventtype != receivedEvent.eventtype {
t.Errorf("Test %q failed, event %d does not match: expected eventtype %q, got %q", test.name, i, expectedEvent.eventtype, receivedEvent.eventtype)
}
if expectedEvent.message != receivedEvent.message {
t.Errorf("Test %q failed, event %d does not match: expected message %q, got %q", test.name, i, expectedEvent.message, receivedEvent.message)
}
}
for i := len(test.expectedEvents); i < len(client.receivedEvents); i++ {
t.Errorf("Test %q failed, unexpected event received: %s, %q", test.name, client.receivedEvents[i].eventtype, client.receivedEvents[i].message)
}
}
}
type mockRecyclerClient struct {
pod *v1.Pod
deletedCalled bool
receivedEvents []mockEvent
events []watch.Event
}
type mockEvent struct {
eventtype, message string
}
func (c *mockRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
if c.pod == nil {
c.pod = pod
return c.pod, nil
}
// Simulate "already exists" error
return nil, errors.NewAlreadyExists(api.Resource("pods"), pod.Name)
}
func (c *mockRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
if c.pod != nil {
return c.pod, nil
} else {
return nil, fmt.Errorf("pod does not exist")
}
}
func (c *mockRecyclerClient) DeletePod(name, namespace string) error {
c.deletedCalled = true
return nil
}
func (c *mockRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
eventCh := make(chan watch.Event, 0)
go func() {
for _, e := range c.events {
eventCh <- e
}
}()
return eventCh, nil
}
func (c *mockRecyclerClient) Event(eventtype, message string) {
c.receivedEvents = append(c.receivedEvents, mockEvent{eventtype, message})
}

125
vendor/k8s.io/kubernetes/pkg/volume/util/resize_util.go generated vendored Normal file
View file

@ -0,0 +1,125 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"encoding/json"
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
clientset "k8s.io/client-go/kubernetes"
)
var (
knownResizeConditions map[v1.PersistentVolumeClaimConditionType]bool = map[v1.PersistentVolumeClaimConditionType]bool{
v1.PersistentVolumeClaimFileSystemResizePending: true,
v1.PersistentVolumeClaimResizing: true,
}
)
type resizeProcessStatus struct {
condition v1.PersistentVolumeClaimCondition
processed bool
}
// ClaimToClaimKey return namespace/name string for pvc
func ClaimToClaimKey(claim *v1.PersistentVolumeClaim) string {
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
}
// MarkFSResizeFinished marks file system resizing as done
func MarkFSResizeFinished(
pvc *v1.PersistentVolumeClaim,
capacity v1.ResourceList,
kubeClient clientset.Interface) error {
newPVC := pvc.DeepCopy()
newPVC.Status.Capacity = capacity
newPVC = MergeResizeConditionOnPVC(newPVC, []v1.PersistentVolumeClaimCondition{})
_, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
return err
}
// PatchPVCStatus updates PVC status using PATCH verb
func PatchPVCStatus(
oldPVC *v1.PersistentVolumeClaim,
newPVC *v1.PersistentVolumeClaim,
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
pvcName := oldPVC.Name
oldData, err := json.Marshal(oldPVC)
if err != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to marshal oldData for pvc %q with %v", pvcName, err)
}
newData, err := json.Marshal(newPVC)
if err != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to marshal newData for pvc %q with %v", pvcName, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, oldPVC)
if err != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to CreateTwoWayMergePatch for pvc %q with %v ", pvcName, err)
}
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(oldPVC.Namespace).
Patch(pvcName, types.StrategicMergePatchType, patchBytes, "status")
if updateErr != nil {
return nil, fmt.Errorf("PatchPVCStatus.Failed to patch PVC %q with %v", pvcName, updateErr)
}
return updatedClaim, nil
}
// MergeResizeConditionOnPVC updates pvc with requested resize conditions
// leaving other conditions untouched.
func MergeResizeConditionOnPVC(
pvc *v1.PersistentVolumeClaim,
resizeConditions []v1.PersistentVolumeClaimCondition) *v1.PersistentVolumeClaim {
resizeConditionMap := map[v1.PersistentVolumeClaimConditionType]*resizeProcessStatus{}
for _, condition := range resizeConditions {
resizeConditionMap[condition.Type] = &resizeProcessStatus{condition, false}
}
oldConditions := pvc.Status.Conditions
newConditions := []v1.PersistentVolumeClaimCondition{}
for _, condition := range oldConditions {
// If Condition is of not resize type, we keep it.
if _, ok := knownResizeConditions[condition.Type]; !ok {
newConditions = append(newConditions, condition)
continue
}
if newCondition, ok := resizeConditionMap[condition.Type]; ok {
if newCondition.condition.Status != condition.Status {
newConditions = append(newConditions, newCondition.condition)
} else {
newConditions = append(newConditions, condition)
}
newCondition.processed = true
}
}
// append all unprocessed conditions
for _, newCondition := range resizeConditionMap {
if !newCondition.processed {
newConditions = append(newConditions, newCondition.condition)
}
}
pvc.Status.Conditions = newConditions
return pvc
}

View file

@ -0,0 +1,167 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"reflect"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type conditionMergeTestCase struct {
description string
pvc *v1.PersistentVolumeClaim
newConditions []v1.PersistentVolumeClaimCondition
finalCondtions []v1.PersistentVolumeClaimCondition
}
func TestMergeResizeCondition(t *testing.T) {
currentTime := metav1.Now()
pvc := getPVC([]v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionTrue,
LastTransitionTime: currentTime,
},
})
noConditionPVC := getPVC([]v1.PersistentVolumeClaimCondition{})
conditionFalseTime := metav1.Now()
newTime := metav1.NewTime(time.Now().Add(1 * time.Hour))
testCases := []conditionMergeTestCase{
{
description: "when removing all conditions",
pvc: pvc.DeepCopy(),
newConditions: []v1.PersistentVolumeClaimCondition{},
finalCondtions: []v1.PersistentVolumeClaimCondition{},
},
{
description: "adding new condition",
pvc: pvc.DeepCopy(),
newConditions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimFileSystemResizePending,
Status: v1.ConditionTrue,
},
},
finalCondtions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimFileSystemResizePending,
Status: v1.ConditionTrue,
},
},
},
{
description: "adding same condition with new timestamp",
pvc: pvc.DeepCopy(),
newConditions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionTrue,
LastTransitionTime: newTime,
},
},
finalCondtions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionTrue,
LastTransitionTime: currentTime,
},
},
},
{
description: "adding same condition but with different status",
pvc: pvc.DeepCopy(),
newConditions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionFalse,
LastTransitionTime: conditionFalseTime,
},
},
finalCondtions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionFalse,
LastTransitionTime: conditionFalseTime,
},
},
},
{
description: "when no condition exists on pvc",
pvc: noConditionPVC.DeepCopy(),
newConditions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionTrue,
LastTransitionTime: currentTime,
},
},
finalCondtions: []v1.PersistentVolumeClaimCondition{
{
Type: v1.PersistentVolumeClaimResizing,
Status: v1.ConditionTrue,
LastTransitionTime: currentTime,
},
},
},
}
for _, testcase := range testCases {
updatePVC := MergeResizeConditionOnPVC(testcase.pvc, testcase.newConditions)
updateConditions := updatePVC.Status.Conditions
if !reflect.DeepEqual(updateConditions, testcase.finalCondtions) {
t.Errorf("Expected updated conditions for test %s to be %v but got %v",
testcase.description,
testcase.finalCondtions, updateConditions)
}
}
}
func getPVC(conditions []v1.PersistentVolumeClaimCondition) *v1.PersistentVolumeClaim {
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "resize"},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
},
},
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
Conditions: conditions,
Capacity: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("2Gi"),
},
},
}
return pvc
}

View file

@ -21,8 +21,8 @@ import (
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"syscall"
"github.com/golang/glog"
"k8s.io/api/core/v1"
@ -30,22 +30,51 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api/legacyscheme"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"reflect"
"hash/fnv"
"math/rand"
"strconv"
"k8s.io/apimachinery/pkg/api/resource"
utypes "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume/util/types"
)
const (
readyFileName = "ready"
losetupPath = "losetup"
// GB - GigaByte size
GB = 1000 * 1000 * 1000
// GIB - GibiByte size
GIB = 1024 * 1024 * 1024
ErrDeviceNotFound = "device not found"
ErrDeviceNotSupported = "device not supported"
ErrNotAvailable = "not available"
readyFileName = "ready"
// ControllerManagedAttachAnnotation is the key of the annotation on Node
// objects that indicates attach/detach operations for the node should be
// managed by the attach/detach controller
ControllerManagedAttachAnnotation string = "volumes.kubernetes.io/controller-managed-attach-detach"
// KeepTerminatedPodVolumesAnnotation is the key of the annotation on Node
// that decides if pod volumes are unmounted when pod is terminated
KeepTerminatedPodVolumesAnnotation string = "volumes.kubernetes.io/keep-terminated-pod-volumes"
// VolumeGidAnnotationKey is the of the annotation on the PersistentVolume
// object that specifies a supplemental GID.
VolumeGidAnnotationKey = "pv.beta.kubernetes.io/gid"
// VolumeDynamicallyCreatedByKey is the key of the annotation on PersistentVolume
// object created dynamically
VolumeDynamicallyCreatedByKey = "kubernetes.io/createdby"
)
// IsReady checks for the existence of a regular file
@ -96,29 +125,42 @@ func UnmountPath(mountPath string, mounter mount.Interface) error {
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
// IsNotMountPoint is more expensive but properly handles bind mounts.
func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool) error {
if pathExists, pathErr := PathExists(mountPath); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
pathExists, pathErr := PathExists(mountPath)
if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath)
return nil
}
var notMnt bool
var err error
if extensiveMountPointCheck {
notMnt, err = mount.IsNotMountPoint(mounter, mountPath)
} else {
notMnt, err = mounter.IsLikelyNotMountPoint(mountPath)
corruptedMnt := isCorruptedMnt(pathErr)
if pathErr != nil && !corruptedMnt {
return fmt.Errorf("Error checking path: %v", pathErr)
}
return doUnmountMountPoint(mountPath, mounter, extensiveMountPointCheck, corruptedMnt)
}
if err != nil {
return err
}
// doUnmountMountPoint is a common unmount routine that unmounts the given path and
// deletes the remaining directory if successful.
// if extensiveMountPointCheck is true
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
// IsNotMountPoint is more expensive but properly handles bind mounts.
// if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, Take it as an argument for convenience of testing
func doUnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool, corruptedMnt bool) error {
if !corruptedMnt {
var notMnt bool
var err error
if extensiveMountPointCheck {
notMnt, err = mount.IsNotMountPoint(mounter, mountPath)
} else {
notMnt, err = mounter.IsLikelyNotMountPoint(mountPath)
}
if notMnt {
glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
return os.Remove(mountPath)
if err != nil {
return err
}
if notMnt {
glog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
return os.Remove(mountPath)
}
}
// Unmount the mount path
@ -128,7 +170,7 @@ func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMount
}
notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath)
if mntErr != nil {
return err
return mntErr
}
if notMnt {
glog.V(4).Infof("%q is unmounted, deleting the directory", mountPath)
@ -144,11 +186,32 @@ func PathExists(path string) (bool, error) {
return true, nil
} else if os.IsNotExist(err) {
return false, nil
} else if isCorruptedMnt(err) {
return true, err
} else {
return false, err
}
}
// isCorruptedMnt return true if err is about corrupted mount point
func isCorruptedMnt(err error) bool {
if err == nil {
return false
}
var underlyingError error
switch pe := err.(type) {
case nil:
return false
case *os.PathError:
underlyingError = pe.Err
case *os.LinkError:
underlyingError = pe.Err
case *os.SyscallError:
underlyingError = pe.Err
}
return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE
}
// GetSecretForPod locates secret by name in the pod's namespace and returns secret map
func GetSecretForPod(pod *v1.Pod, secretName string, kubeClient clientset.Interface) (map[string]string, error) {
secret := make(map[string]string)
@ -203,6 +266,13 @@ func GetClassForVolume(kubeClient clientset.Interface, pv *v1.PersistentVolume)
// CheckNodeAffinity looks at the PV node affinity, and checks if the node has the same corresponding labels
// This ensures that we don't mount a volume that doesn't belong to this node
func CheckNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]string) error {
if err := checkAlphaNodeAffinity(pv, nodeLabels); err != nil {
return err
}
return checkVolumeNodeAffinity(pv, nodeLabels)
}
func checkAlphaNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]string) error {
affinity, err := v1helper.GetStorageNodeAffinityFromAnnotation(pv.Annotations)
if err != nil {
return fmt.Errorf("Error getting storage node affinity: %v", err)
@ -227,6 +297,27 @@ func CheckNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]string) er
return nil
}
func checkVolumeNodeAffinity(pv *v1.PersistentVolume, nodeLabels map[string]string) error {
if pv.Spec.NodeAffinity == nil {
return nil
}
if pv.Spec.NodeAffinity.Required != nil {
terms := pv.Spec.NodeAffinity.Required.NodeSelectorTerms
glog.V(10).Infof("Match for Required node selector terms %+v", terms)
for _, term := range terms {
selector, err := v1helper.NodeSelectorRequirementsAsSelector(term.MatchExpressions)
if err != nil {
return fmt.Errorf("Failed to parse MatchExpressions: %v", err)
}
if !selector.Matches(labels.Set(nodeLabels)) {
return fmt.Errorf("NodeSelectorTerm %+v does not match node labels", term.MatchExpressions)
}
}
}
return nil
}
// LoadPodFromFile will read, decode, and return a Pod from a file.
func LoadPodFromFile(filePath string) (*v1.Pod, error) {
if filePath == "" {
@ -279,200 +370,382 @@ func stringToSet(str, delimiter string) (sets.String, error) {
return zonesSet, nil
}
// BlockVolumePathHandler defines a set of operations for handling block volume-related operations
type BlockVolumePathHandler interface {
// MapDevice creates a symbolic link to block device under specified map path
MapDevice(devicePath string, mapPath string, linkName string) error
// UnmapDevice removes a symbolic link to block device under specified map path
UnmapDevice(mapPath string, linkName string) error
// RemovePath removes a file or directory on specified map path
RemoveMapPath(mapPath string) error
// IsSymlinkExist retruns true if specified symbolic link exists
IsSymlinkExist(mapPath string) (bool, error)
// GetDeviceSymlinkRefs searches symbolic links under global map path
GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error)
// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath
// corresponding to map path symlink, and then return global map path with pod uuid.
FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error)
// AttachFileDevice takes a path to a regular file and makes it available as an
// attached block device.
AttachFileDevice(path string) (string, error)
// GetLoopDevice returns the full path to the loop device associated with the given path.
GetLoopDevice(path string) (string, error)
// RemoveLoopDevice removes specified loopback device
RemoveLoopDevice(device string) error
// CalculateTimeoutForVolume calculates time for a Recycler pod to complete a
// recycle operation. The calculation and return value is either the
// minimumTimeout or the timeoutIncrement per Gi of storage size, whichever is
// greater.
func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *v1.PersistentVolume) int64 {
giQty := resource.MustParse("1Gi")
pvQty := pv.Spec.Capacity[v1.ResourceStorage]
giSize := giQty.Value()
pvSize := pvQty.Value()
timeout := (pvSize / giSize) * int64(timeoutIncrement)
if timeout < int64(minimumTimeout) {
return int64(minimumTimeout)
}
return timeout
}
// NewBlockVolumePathHandler returns a new instance of BlockVolumeHandler.
func NewBlockVolumePathHandler() BlockVolumePathHandler {
var volumePathHandler VolumePathHandler
return volumePathHandler
// RoundUpSize calculates how many allocation units are needed to accommodate
// a volume of given size. E.g. when user wants 1500MiB volume, while AWS EBS
// allocates volumes in gibibyte-sized chunks,
// RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2'
// (2 GiB is the smallest allocatable volume that can hold 1500MiB)
func RoundUpSize(volumeSizeBytes int64, allocationUnitBytes int64) int64 {
return (volumeSizeBytes + allocationUnitBytes - 1) / allocationUnitBytes
}
// VolumePathHandler is path related operation handlers for block volume
type VolumePathHandler struct {
// RoundUpToGB rounds up given quantity to chunks of GB
func RoundUpToGB(size resource.Quantity) int64 {
requestBytes := size.Value()
return RoundUpSize(requestBytes, GB)
}
// MapDevice creates a symbolic link to block device under specified map path
func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName string) error {
// Example of global map path:
// globalMapPath/linkName: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{podUid}
// linkName: {podUid}
//
// Example of pod device map path:
// podDeviceMapPath/linkName: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName}
// linkName: {volumeName}
if len(devicePath) == 0 {
return fmt.Errorf("Failed to map device to map path. devicePath is empty")
}
if len(mapPath) == 0 {
return fmt.Errorf("Failed to map device to map path. mapPath is empty")
}
if !filepath.IsAbs(mapPath) {
return fmt.Errorf("The map path should be absolute: map path: %s", mapPath)
}
glog.V(5).Infof("MapDevice: devicePath %s", devicePath)
glog.V(5).Infof("MapDevice: mapPath %s", mapPath)
glog.V(5).Infof("MapDevice: linkName %s", linkName)
// RoundUpToGiB rounds up given quantity upto chunks of GiB
func RoundUpToGiB(size resource.Quantity) int64 {
requestBytes := size.Value()
return RoundUpSize(requestBytes, GIB)
}
// Check and create mapPath
_, err := os.Stat(mapPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate map path: %s", mapPath)
// GenerateVolumeName returns a PV name with clusterName prefix. The function
// should be used to generate a name of GCE PD or Cinder volume. It basically
// adds "<clusterName>-dynamic-" before the PV name, making sure the resulting
// string fits given length and cuts "dynamic" if not.
func GenerateVolumeName(clusterName, pvName string, maxLength int) string {
prefix := clusterName + "-dynamic"
pvLen := len(pvName)
// cut the "<clusterName>-dynamic" to fit full pvName into maxLength
// +1 for the '-' dash
if pvLen+1+len(prefix) > maxLength {
prefix = prefix[:maxLength-pvLen-1]
}
return prefix + "-" + pvName
}
// GetPath checks if the path from the mounter is empty.
func GetPath(mounter volume.Mounter) (string, error) {
path := mounter.GetPath()
if path == "" {
return "", fmt.Errorf("Path is empty %s", reflect.TypeOf(mounter).String())
}
return path, nil
}
// ChooseZoneForVolume implements our heuristics for choosing a zone for volume creation based on the volume name
// Volumes are generally round-robin-ed across all active zones, using the hash of the PVC Name.
// However, if the PVCName ends with `-<integer>`, we will hash the prefix, and then add the integer to the hash.
// This means that a StatefulSet's volumes (`claimname-statefulsetname-id`) will spread across available zones,
// assuming the id values are consecutive.
func ChooseZoneForVolume(zones sets.String, pvcName string) string {
// We create the volume in a zone determined by the name
// Eventually the scheduler will coordinate placement into an available zone
hash, index := getPVCNameHashAndIndexOffset(pvcName)
// Zones.List returns zones in a consistent order (sorted)
// We do have a potential failure case where volumes will not be properly spread,
// if the set of zones changes during StatefulSet volume creation. However, this is
// probably relatively unlikely because we expect the set of zones to be essentially
// static for clusters.
// Hopefully we can address this problem if/when we do full scheduler integration of
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
// unhealthy zones)
zoneSlice := zones.List()
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
glog.V(2).Infof("Creating volume for PVC %q; chose zone=%q from zones=%q", pvcName, zone, zoneSlice)
return zone
}
// ChooseZonesForVolume is identical to ChooseZoneForVolume, but selects a multiple zones, for multi-zone disks.
func ChooseZonesForVolume(zones sets.String, pvcName string, numZones uint32) sets.String {
// We create the volume in a zone determined by the name
// Eventually the scheduler will coordinate placement into an available zone
hash, index := getPVCNameHashAndIndexOffset(pvcName)
// Zones.List returns zones in a consistent order (sorted)
// We do have a potential failure case where volumes will not be properly spread,
// if the set of zones changes during StatefulSet volume creation. However, this is
// probably relatively unlikely because we expect the set of zones to be essentially
// static for clusters.
// Hopefully we can address this problem if/when we do full scheduler integration of
// PVC placement (which could also e.g. avoid putting volumes in overloaded or
// unhealthy zones)
zoneSlice := zones.List()
replicaZones := sets.NewString()
startingIndex := index * numZones
for index = startingIndex; index < startingIndex+numZones; index++ {
zone := zoneSlice[(hash+index)%uint32(len(zoneSlice))]
replicaZones.Insert(zone)
}
glog.V(2).Infof("Creating volume for replicated PVC %q; chosen zones=%q from zones=%q",
pvcName, replicaZones.UnsortedList(), zoneSlice)
return replicaZones
}
func getPVCNameHashAndIndexOffset(pvcName string) (hash uint32, index uint32) {
if pvcName == "" {
// We should always be called with a name; this shouldn't happen
glog.Warningf("No name defined during volume create; choosing random zone")
hash = rand.Uint32()
} else {
hashString := pvcName
// Heuristic to make sure that volumes in a StatefulSet are spread across zones
// StatefulSet PVCs are (currently) named ClaimName-StatefulSetName-Id,
// where Id is an integer index.
// Note though that if a StatefulSet pod has multiple claims, we need them to be
// in the same zone, because otherwise the pod will be unable to mount both volumes,
// and will be unschedulable. So we hash _only_ the "StatefulSetName" portion when
// it looks like `ClaimName-StatefulSetName-Id`.
// We continue to round-robin volume names that look like `Name-Id` also; this is a useful
// feature for users that are creating statefulset-like functionality without using statefulsets.
lastDash := strings.LastIndexByte(pvcName, '-')
if lastDash != -1 {
statefulsetIDString := pvcName[lastDash+1:]
statefulsetID, err := strconv.ParseUint(statefulsetIDString, 10, 32)
if err == nil {
// Offset by the statefulsetID, so we round-robin across zones
index = uint32(statefulsetID)
// We still hash the volume name, but only the prefix
hashString = pvcName[:lastDash]
// In the special case where it looks like `ClaimName-StatefulSetName-Id`,
// hash only the StatefulSetName, so that different claims on the same StatefulSet
// member end up in the same zone.
// Note that StatefulSetName (and ClaimName) might themselves both have dashes.
// We actually just take the portion after the final - of ClaimName-StatefulSetName.
// For our purposes it doesn't much matter (just suboptimal spreading).
lastDash := strings.LastIndexByte(hashString, '-')
if lastDash != -1 {
hashString = hashString[lastDash+1:]
}
glog.V(2).Infof("Detected StatefulSet-style volume name %q; index=%d", pvcName, index)
}
}
// We hash the (base) volume name, so we don't bias towards the first N zones
h := fnv.New32()
h.Write([]byte(hashString))
hash = h.Sum32()
}
return hash, index
}
// UnmountViaEmptyDir delegates the tear down operation for secret, configmap, git_repo and downwardapi
// to empty_dir
func UnmountViaEmptyDir(dir string, host volume.VolumeHost, volName string, volSpec volume.Spec, podUID utypes.UID) error {
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", volName, podUID, dir)
// Wrap EmptyDir, let it do the teardown.
wrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID)
if err != nil {
return err
}
if err = os.MkdirAll(mapPath, 0750); err != nil {
return fmt.Errorf("Failed to mkdir %s, error %v", mapPath, err)
}
// Remove old symbolic link(or file) then create new one.
// This should be done because current symbolic link is
// stale accross node reboot.
linkPath := path.Join(mapPath, string(linkName))
if err = os.Remove(linkPath); err != nil && !os.IsNotExist(err) {
return err
}
err = os.Symlink(devicePath, linkPath)
return err
return wrapped.TearDownAt(dir)
}
// UnmapDevice removes a symbolic link associated to block device under specified map path
func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error {
if len(mapPath) == 0 {
return fmt.Errorf("Failed to unmap device from map path. mapPath is empty")
}
glog.V(5).Infof("UnmapDevice: mapPath %s", mapPath)
glog.V(5).Infof("UnmapDevice: linkName %s", linkName)
// MountOptionFromSpec extracts and joins mount options from volume spec with supplied options
func MountOptionFromSpec(spec *volume.Spec, options ...string) []string {
pv := spec.PersistentVolume
// Check symbolic link exists
linkPath := path.Join(mapPath, string(linkName))
if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil {
return checkErr
} else if !islinkExist {
glog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath)
return nil
if pv != nil {
// Use beta annotation first
if mo, ok := pv.Annotations[v1.MountOptionAnnotation]; ok {
moList := strings.Split(mo, ",")
return JoinMountOptions(moList, options)
}
if len(pv.Spec.MountOptions) > 0 {
return JoinMountOptions(pv.Spec.MountOptions, options)
}
}
err := os.Remove(linkPath)
return err
return options
}
// RemoveMapPath removes a file or directory on specified map path
func (v VolumePathHandler) RemoveMapPath(mapPath string) error {
if len(mapPath) == 0 {
return fmt.Errorf("Failed to remove map path. mapPath is empty")
// JoinMountOptions joins mount options eliminating duplicates
func JoinMountOptions(userOptions []string, systemOptions []string) []string {
allMountOptions := sets.NewString()
for _, mountOption := range userOptions {
if len(mountOption) > 0 {
allMountOptions.Insert(mountOption)
}
}
glog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath)
err := os.RemoveAll(mapPath)
if err != nil && !os.IsNotExist(err) {
return err
for _, mountOption := range systemOptions {
allMountOptions.Insert(mountOption)
}
return allMountOptions.UnsortedList()
}
// ValidateZone returns:
// - an error in case zone is an empty string or contains only any combination of spaces and tab characters
// - nil otherwise
func ValidateZone(zone string) error {
if strings.TrimSpace(zone) == "" {
return fmt.Errorf("the provided %q zone is not valid, it's an empty string or contains only spaces and tab characters", zone)
}
return nil
}
// IsSymlinkExist returns true if specified file exists and the type is symbolik link.
// If file doesn't exist, or file exists but not symbolick link, return false with no error.
// On other cases, return false with error from Lstat().
func (v VolumePathHandler) IsSymlinkExist(mapPath string) (bool, error) {
fi, err := os.Lstat(mapPath)
if err == nil {
// If file exits and it's symbolick link, return true and no error
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
return true, nil
// AccessModesContains returns whether the requested mode is contained by modes
func AccessModesContains(modes []v1.PersistentVolumeAccessMode, mode v1.PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
// If file exits but it's not symbolick link, return fale and no error
return false, nil
}
// If file doesn't exist, return false and no error
if os.IsNotExist(err) {
return false, nil
}
// Return error from Lstat()
return false, err
return false
}
// GetDeviceSymlinkRefs searches symbolic links under global map path
func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) {
var refs []string
files, err := ioutil.ReadDir(mapPath)
if err != nil {
return nil, fmt.Errorf("Directory cannot read %v", err)
}
for _, file := range files {
if file.Mode()&os.ModeSymlink != os.ModeSymlink {
continue
// AccessModesContainedInAll returns whether all of the requested modes are contained by modes
func AccessModesContainedInAll(indexedModes []v1.PersistentVolumeAccessMode, requestedModes []v1.PersistentVolumeAccessMode) bool {
for _, mode := range requestedModes {
if !AccessModesContains(indexedModes, mode) {
return false
}
filename := file.Name()
filepath, err := os.Readlink(path.Join(mapPath, filename))
}
return true
}
// GetWindowsPath get a windows path
func GetWindowsPath(path string) string {
windowsPath := strings.Replace(path, "/", "\\", -1)
if strings.HasPrefix(windowsPath, "\\") {
windowsPath = "c:" + windowsPath
}
return windowsPath
}
// GetUniquePodName returns a unique identifier to reference a pod by
func GetUniquePodName(pod *v1.Pod) types.UniquePodName {
return types.UniquePodName(pod.UID)
}
// GetUniqueVolumeName returns a unique name representing the volume/plugin.
// Caller should ensure that volumeName is a name/ID uniquely identifying the
// actual backing device, directory, path, etc. for a particular volume.
// The returned name can be used to uniquely reference the volume, for example,
// to prevent operations (attach/detach or mount/unmount) from being triggered
// on the same volume.
func GetUniqueVolumeName(pluginName, volumeName string) v1.UniqueVolumeName {
return v1.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName))
}
// GetUniqueVolumeNameForNonAttachableVolume returns the unique volume name
// for a non-attachable volume.
func GetUniqueVolumeNameForNonAttachableVolume(
podName types.UniquePodName, volumePlugin volume.VolumePlugin, volumeSpec *volume.Spec) v1.UniqueVolumeName {
return v1.UniqueVolumeName(
fmt.Sprintf("%s/%v-%s", volumePlugin.GetPluginName(), podName, volumeSpec.Name()))
}
// GetUniqueVolumeNameFromSpec uses the given VolumePlugin to generate a unique
// name representing the volume defined in the specified volume spec.
// This returned name can be used to uniquely reference the actual backing
// device, directory, path, etc. referenced by the given volumeSpec.
// If the given plugin does not support the volume spec, this returns an error.
func GetUniqueVolumeNameFromSpec(
volumePlugin volume.VolumePlugin,
volumeSpec *volume.Spec) (v1.UniqueVolumeName, error) {
if volumePlugin == nil {
return "", fmt.Errorf(
"volumePlugin should not be nil. volumeSpec.Name=%q",
volumeSpec.Name())
}
volumeName, err := volumePlugin.GetVolumeName(volumeSpec)
if err != nil || volumeName == "" {
return "", fmt.Errorf(
"failed to GetVolumeName from volumePlugin for volumeSpec %q err=%v",
volumeSpec.Name(),
err)
}
return GetUniqueVolumeName(
volumePlugin.GetPluginName(),
volumeName),
nil
}
// IsPodTerminated checks if pod is terminated
func IsPodTerminated(pod *v1.Pod, podStatus v1.PodStatus) bool {
return podStatus.Phase == v1.PodFailed || podStatus.Phase == v1.PodSucceeded || (pod.DeletionTimestamp != nil && notRunning(podStatus.ContainerStatuses))
}
// notRunning returns true if every status is terminated or waiting, or the status list
// is empty.
func notRunning(statuses []v1.ContainerStatus) bool {
for _, status := range statuses {
if status.State.Terminated == nil && status.State.Waiting == nil {
return false
}
}
return true
}
// SplitUniqueName splits the unique name to plugin name and volume name strings. It expects the uniqueName to follow
// the fromat plugin_name/volume_name and the plugin name must be namespaced as described by the plugin interface,
// i.e. namespace/plugin containing exactly one '/'. This means the unique name will always be in the form of
// plugin_namespace/plugin/volume_name, see k8s.io/kubernetes/pkg/volume/plugins.go VolumePlugin interface
// description and pkg/volume/util/volumehelper/volumehelper.go GetUniqueVolumeNameFromSpec that constructs
// the unique volume names.
func SplitUniqueName(uniqueName v1.UniqueVolumeName) (string, string, error) {
components := strings.SplitN(string(uniqueName), "/", 3)
if len(components) != 3 {
return "", "", fmt.Errorf("cannot split volume unique name %s to plugin/volume components", uniqueName)
}
pluginName := fmt.Sprintf("%s/%s", components[0], components[1])
return pluginName, components[2], nil
}
// NewSafeFormatAndMountFromHost creates a new SafeFormatAndMount with Mounter
// and Exec taken from given VolumeHost.
func NewSafeFormatAndMountFromHost(pluginName string, host volume.VolumeHost) *mount.SafeFormatAndMount {
mounter := host.GetMounter(pluginName)
exec := host.GetExec(pluginName)
return &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}
}
// GetVolumeMode retrieves VolumeMode from pv.
// If the volume doesn't have PersistentVolume, it's an inline volume,
// should return volumeMode as filesystem to keep existing behavior.
func GetVolumeMode(volumeSpec *volume.Spec) (v1.PersistentVolumeMode, error) {
if volumeSpec == nil || volumeSpec.PersistentVolume == nil {
return v1.PersistentVolumeFilesystem, nil
}
if volumeSpec.PersistentVolume.Spec.VolumeMode != nil {
return *volumeSpec.PersistentVolume.Spec.VolumeMode, nil
}
return "", fmt.Errorf("cannot get volumeMode for volume: %v", volumeSpec.Name())
}
// GetPersistentVolumeClaimVolumeMode retrieves VolumeMode from pvc.
func GetPersistentVolumeClaimVolumeMode(claim *v1.PersistentVolumeClaim) (v1.PersistentVolumeMode, error) {
if claim.Spec.VolumeMode != nil {
return *claim.Spec.VolumeMode, nil
}
return "", fmt.Errorf("cannot get volumeMode from pvc: %v", claim.Name)
}
// CheckVolumeModeFilesystem checks VolumeMode.
// If the mode is Filesystem, return true otherwise return false.
func CheckVolumeModeFilesystem(volumeSpec *volume.Spec) (bool, error) {
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
volumeMode, err := GetVolumeMode(volumeSpec)
if err != nil {
return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err)
return true, err
}
glog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath)
if filepath == devPath {
refs = append(refs, path.Join(mapPath, filename))
if volumeMode == v1.PersistentVolumeBlock {
return false, nil
}
}
glog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs)
return refs, nil
}
// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath
// corresponding to map path symlink, and then return global map path with pod uuid.
// ex. mapPath symlink: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -> /dev/sdX
// globalMapPath/{pod uuid}: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX
func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) {
var globalMapPathUUID string
// Find symbolic link named pod uuid under plugin dir
err := filepath.Walk(pluginDir, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if (fi.Mode()&os.ModeSymlink == os.ModeSymlink) && (fi.Name() == string(podUID)) {
glog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath)
if res, err := compareSymlinks(path, mapPath); err == nil && res {
globalMapPathUUID = path
}
}
return nil
})
if err != nil {
return "", err
}
glog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID)
// Return path contains global map path + {pod uuid}
return globalMapPathUUID, nil
}
func compareSymlinks(global, pod string) (bool, error) {
devGlobal, err := os.Readlink(global)
if err != nil {
return false, err
}
devPod, err := os.Readlink(pod)
if err != nil {
return false, err
}
glog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod)
if devGlobal == devPod {
return true, nil
}
return false, nil
return true, nil
}

View file

@ -1,106 +0,0 @@
// +build linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"errors"
"fmt"
"os"
"os/exec"
"strings"
"github.com/golang/glog"
)
// AttachFileDevice takes a path to a regular file and makes it available as an
// attached block device.
func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
blockDevicePath, err := v.GetLoopDevice(path)
if err != nil && err.Error() != ErrDeviceNotFound {
return "", err
}
// If no existing loop device for the path, create one
if blockDevicePath == "" {
glog.V(4).Infof("Creating device for path: %s", path)
blockDevicePath, err = makeLoopDevice(path)
if err != nil {
return "", err
}
}
return blockDevicePath, nil
}
// GetLoopDevice returns the full path to the loop device associated with the given path.
func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
_, err := os.Stat(path)
if os.IsNotExist(err) {
return "", errors.New(ErrNotAvailable)
}
if err != nil {
return "", fmt.Errorf("not attachable: %v", err)
}
args := []string{"-j", path}
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
glog.V(2).Infof("Failed device discover command for path %s: %v", path, err)
return "", err
}
return parseLosetupOutputForDevice(out)
}
func makeLoopDevice(path string) (string, error) {
args := []string{"-f", "--show", path}
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
glog.V(2).Infof("Failed device create command for path %s: %v", path, err)
return "", err
}
return parseLosetupOutputForDevice(out)
}
// RemoveLoopDevice removes specified loopback device
func (v VolumePathHandler) RemoveLoopDevice(device string) error {
args := []string{"-d", device}
cmd := exec.Command(losetupPath, args...)
out, err := cmd.CombinedOutput()
if err != nil {
if !strings.Contains(string(out), "No such device or address") {
return err
}
}
return nil
}
func parseLosetupOutputForDevice(output []byte) (string, error) {
if len(output) == 0 {
return "", errors.New(ErrDeviceNotFound)
}
// losetup returns device in the format:
// /dev/loop1: [0073]:148662 (/dev/sda)
device := strings.TrimSpace(strings.SplitN(string(output), ":", 2)[0])
if len(device) == 0 {
return "", errors.New(ErrDeviceNotFound)
}
return device, nil
}

View file

@ -22,12 +22,24 @@ import (
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
utiltesting "k8s.io/client-go/util/testing"
// util.go uses api.Codecs.LegacyCodec so import this package to do some
// resource initialization.
"hash/fnv"
_ "k8s.io/kubernetes/pkg/apis/core/install"
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/util/mount"
"reflect"
"strings"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/slice"
"k8s.io/kubernetes/pkg/volume"
)
var nodeLabels map[string]string = map[string]string{
@ -35,7 +47,7 @@ var nodeLabels map[string]string = map[string]string{
"test-key2": "test-value2",
}
func TestCheckNodeAffinity(t *testing.T) {
func TestCheckAlphaNodeAffinity(t *testing.T) {
type affinityTest struct {
name string
expectSuccess bool
@ -46,12 +58,12 @@ func TestCheckNodeAffinity(t *testing.T) {
{
name: "valid-no-constraints",
expectSuccess: true,
pv: testVolumeWithNodeAffinity(t, &v1.NodeAffinity{}),
pv: testVolumeWithAlphaNodeAffinity(t, &v1.NodeAffinity{}),
},
{
name: "valid-constraints",
expectSuccess: true,
pv: testVolumeWithNodeAffinity(t, &v1.NodeAffinity{
pv: testVolumeWithAlphaNodeAffinity(t, &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
@ -75,7 +87,7 @@ func TestCheckNodeAffinity(t *testing.T) {
{
name: "invalid-key",
expectSuccess: false,
pv: testVolumeWithNodeAffinity(t, &v1.NodeAffinity{
pv: testVolumeWithAlphaNodeAffinity(t, &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
@ -99,7 +111,7 @@ func TestCheckNodeAffinity(t *testing.T) {
{
name: "invalid-values",
expectSuccess: false,
pv: testVolumeWithNodeAffinity(t, &v1.NodeAffinity{
pv: testVolumeWithAlphaNodeAffinity(t, &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
@ -134,7 +146,111 @@ func TestCheckNodeAffinity(t *testing.T) {
}
}
func testVolumeWithNodeAffinity(t *testing.T, affinity *v1.NodeAffinity) *v1.PersistentVolume {
func TestCheckVolumeNodeAffinity(t *testing.T) {
type affinityTest struct {
name string
expectSuccess bool
pv *v1.PersistentVolume
}
cases := []affinityTest{
{
name: "valid-nil",
expectSuccess: true,
pv: testVolumeWithNodeAffinity(t, nil),
},
{
name: "valid-no-constraints",
expectSuccess: true,
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{}),
},
{
name: "valid-constraints",
expectSuccess: true,
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "test-key1",
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-value1", "test-value3"},
},
{
Key: "test-key2",
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-value0", "test-value2"},
},
},
},
},
},
}),
},
{
name: "invalid-key",
expectSuccess: false,
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "test-key1",
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-value1", "test-value3"},
},
{
Key: "test-key3",
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-value0", "test-value2"},
},
},
},
},
},
}),
},
{
name: "invalid-values",
expectSuccess: false,
pv: testVolumeWithNodeAffinity(t, &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "test-key1",
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-value3", "test-value4"},
},
{
Key: "test-key2",
Operator: v1.NodeSelectorOpIn,
Values: []string{"test-value0", "test-value2"},
},
},
},
},
},
}),
},
}
for _, c := range cases {
err := CheckNodeAffinity(c.pv, nodeLabels)
if err != nil && c.expectSuccess {
t.Errorf("CheckTopology %v returned error: %v", c.name, err)
}
if err == nil && !c.expectSuccess {
t.Errorf("CheckTopology %v returned success, expected error", c.name)
}
}
}
func testVolumeWithAlphaNodeAffinity(t *testing.T, affinity *v1.NodeAffinity) *v1.PersistentVolume {
objMeta := metav1.ObjectMeta{Name: "test-constraints"}
objMeta.Annotations = map[string]string{}
err := helper.StorageNodeAffinityToAlphaAnnotation(objMeta.Annotations, affinity)
@ -147,6 +263,16 @@ func testVolumeWithNodeAffinity(t *testing.T, affinity *v1.NodeAffinity) *v1.Per
}
}
func testVolumeWithNodeAffinity(t *testing.T, affinity *v1.VolumeNodeAffinity) *v1.PersistentVolume {
objMeta := metav1.ObjectMeta{Name: "test-constraints"}
return &v1.PersistentVolume{
ObjectMeta: objMeta,
Spec: v1.PersistentVolumeSpec{
NodeAffinity: affinity,
},
}
}
func TestLoadPodFromFile(t *testing.T) {
tests := []struct {
name string
@ -162,7 +288,7 @@ metadata:
name: testpod
spec:
containers:
- image: gcr.io/google_containers/busybox
- image: k8s.gcr.io/busybox
`,
false,
},
@ -179,7 +305,7 @@ spec:
"spec": {
"containers": [
{
"image": "gcr.io/google_containers/busybox"
"image": "k8s.gcr.io/busybox"
}
]
}
@ -195,7 +321,7 @@ kind: Pod
metadata:
name: testpod
spec:
- image: gcr.io/google_containers/busybox
- image: k8s.gcr.io/busybox
`,
true,
},
@ -263,3 +389,703 @@ func TestZonesToSet(t *testing.T) {
}
}
}
func TestDoUnmountMountPoint(t *testing.T) {
tmpDir1, err1 := utiltesting.MkTmpdir("umount_test1")
if err1 != nil {
t.Fatalf("error creating temp dir: %v", err1)
}
defer os.RemoveAll(tmpDir1)
tmpDir2, err2 := utiltesting.MkTmpdir("umount_test2")
if err2 != nil {
t.Fatalf("error creating temp dir: %v", err2)
}
defer os.RemoveAll(tmpDir2)
// Second part: want no error
tests := []struct {
mountPath string
corruptedMnt bool
}{
{
mountPath: tmpDir1,
corruptedMnt: true,
},
{
mountPath: tmpDir2,
corruptedMnt: false,
},
}
fake := &mount.FakeMounter{}
for _, tt := range tests {
err := doUnmountMountPoint(tt.mountPath, fake, false, tt.corruptedMnt)
if err != nil {
t.Errorf("err Expected nil, but got: %v", err)
}
}
}
func TestCalculateTimeoutForVolume(t *testing.T) {
pv := &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("500M"),
},
},
}
timeout := CalculateTimeoutForVolume(50, 30, pv)
if timeout != 50 {
t.Errorf("Expected 50 for timeout but got %v", timeout)
}
pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("2Gi")
timeout = CalculateTimeoutForVolume(50, 30, pv)
if timeout != 60 {
t.Errorf("Expected 60 for timeout but got %v", timeout)
}
pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("150Gi")
timeout = CalculateTimeoutForVolume(50, 30, pv)
if timeout != 4500 {
t.Errorf("Expected 4500 for timeout but got %v", timeout)
}
}
func TestGenerateVolumeName(t *testing.T) {
// Normal operation, no truncate
v1 := GenerateVolumeName("kubernetes", "pv-cinder-abcde", 255)
if v1 != "kubernetes-dynamic-pv-cinder-abcde" {
t.Errorf("Expected kubernetes-dynamic-pv-cinder-abcde, got %s", v1)
}
// Truncate trailing "6789-dynamic"
prefix := strings.Repeat("0123456789", 9) // 90 characters prefix + 8 chars. of "-dynamic"
v2 := GenerateVolumeName(prefix, "pv-cinder-abcde", 100)
expect := prefix[:84] + "-pv-cinder-abcde"
if v2 != expect {
t.Errorf("Expected %s, got %s", expect, v2)
}
// Truncate really long cluster name
prefix = strings.Repeat("0123456789", 1000) // 10000 characters prefix
v3 := GenerateVolumeName(prefix, "pv-cinder-abcde", 100)
if v3 != expect {
t.Errorf("Expected %s, got %s", expect, v3)
}
}
func TestMountOptionFromSpec(t *testing.T) {
scenarios := map[string]struct {
volume *volume.Spec
expectedMountList []string
systemOptions []string
}{
"volume-with-mount-options": {
volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
},
}),
expectedMountList: []string{"ro", "nfsvers=3"},
systemOptions: nil,
},
"volume-with-bad-mount-options": {
volume: createVolumeSpecWithMountOption("good-mount-opts", "", v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
},
}),
expectedMountList: []string{},
systemOptions: nil,
},
"vol-with-sys-opts": {
volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
},
}),
expectedMountList: []string{"ro", "nfsvers=3", "fsid=100", "hard"},
systemOptions: []string{"fsid=100", "hard"},
},
"vol-with-sys-opts-with-dup": {
volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
},
}),
expectedMountList: []string{"ro", "nfsvers=3", "fsid=100"},
systemOptions: []string{"fsid=100", "ro"},
},
}
for name, scenario := range scenarios {
mountOptions := MountOptionFromSpec(scenario.volume, scenario.systemOptions...)
if !reflect.DeepEqual(slice.SortStrings(mountOptions), slice.SortStrings(scenario.expectedMountList)) {
t.Errorf("for %s expected mount options : %v got %v", name, scenario.expectedMountList, mountOptions)
}
}
}
func createVolumeSpecWithMountOption(name string, mountOptions string, spec v1.PersistentVolumeSpec) *volume.Spec {
annotations := map[string]string{
v1.MountOptionAnnotation: mountOptions,
}
objMeta := metav1.ObjectMeta{
Name: name,
Annotations: annotations,
}
pv := &v1.PersistentVolume{
ObjectMeta: objMeta,
Spec: spec,
}
return &volume.Spec{PersistentVolume: pv}
}
func checkFnv32(t *testing.T, s string, expected uint32) {
h := fnv.New32()
h.Write([]byte(s))
h.Sum32()
if h.Sum32() != expected {
t.Fatalf("hash of %q was %v, expected %v", s, h.Sum32(), expected)
}
}
func TestChooseZoneForVolume(t *testing.T) {
checkFnv32(t, "henley", 1180403676)
// 1180403676 mod 3 == 0, so the offset from "henley" is 0, which makes it easier to verify this by inspection
// A few others
checkFnv32(t, "henley-", 2652299129)
checkFnv32(t, "henley-a", 1459735322)
checkFnv32(t, "", 2166136261)
tests := []struct {
Zones sets.String
VolumeName string
Expected string
}{
// Test for PVC names that don't have a dash
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley",
Expected: "a", // hash("henley") == 0
},
// Tests for PVC names that end in - number, but don't look like statefulset PVCs
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-0",
Expected: "a", // hash("henley") == 0
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-1",
Expected: "b", // hash("henley") + 1 == 1
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-2",
Expected: "c", // hash("henley") + 2 == 2
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-3",
Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-4",
Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3
},
// Tests for PVC names that are edge cases
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-",
Expected: "c", // hash("henley-") = 2652299129 === 2 mod 3
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-a",
Expected: "c", // hash("henley-a") = 1459735322 === 2 mod 3
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium--1",
Expected: "c", // hash("") + 1 == 2166136261 + 1 === 2 mod 3
},
// Tests for PVC names for simple StatefulSet cases
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-1",
Expected: "b", // hash("henley") + 1 == 1
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "loud-henley-1",
Expected: "b", // hash("henley") + 1 == 1
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "quiet-henley-2",
Expected: "c", // hash("henley") + 2 == 2
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-2",
Expected: "c", // hash("henley") + 2 == 2
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-3",
Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-4",
Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3
},
// Tests for statefulsets (or claims) with dashes in the names
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-alpha-henley-2",
Expected: "c", // hash("henley") + 2 == 2
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-beta-henley-3",
Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-gamma-henley-4",
Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3
},
// Tests for statefulsets name ending in -
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--2",
Expected: "a", // hash("") + 2 == 0 mod 3
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--3",
Expected: "b", // hash("") + 3 == 1 mod 3
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--4",
Expected: "c", // hash("") + 4 == 2 mod 3
},
}
for _, test := range tests {
actual := ChooseZoneForVolume(test.Zones, test.VolumeName)
if actual != test.Expected {
t.Errorf("Test %v failed, expected zone %q, actual %q", test, test.Expected, actual)
}
}
}
func TestChooseZonesForVolume(t *testing.T) {
checkFnv32(t, "henley", 1180403676)
// 1180403676 mod 3 == 0, so the offset from "henley" is 0, which makes it easier to verify this by inspection
// A few others
checkFnv32(t, "henley-", 2652299129)
checkFnv32(t, "henley-a", 1459735322)
checkFnv32(t, "", 2166136261)
tests := []struct {
Zones sets.String
VolumeName string
NumZones uint32
Expected sets.String
}{
// Test for PVC names that don't have a dash
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley",
NumZones: 1,
Expected: sets.NewString("a" /* hash("henley") == 0 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley",
NumZones: 2,
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"),
},
// Tests for PVC names that end in - number, but don't look like statefulset PVCs
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-0",
NumZones: 1,
Expected: sets.NewString("a" /* hash("henley") == 0 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-0",
NumZones: 2,
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-1",
NumZones: 1,
Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-1",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-2",
NumZones: 1,
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-2",
NumZones: 2,
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-3",
NumZones: 1,
Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-3",
NumZones: 2,
Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 */, "b"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-4",
NumZones: 1,
Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-4",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 */, "a"),
},
// Tests for PVC names that are edge cases
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-",
NumZones: 1,
Expected: sets.NewString("c" /* hash("henley-") = 2652299129 === 2 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley-") = 2652299129 === 2 mod 3 = 2 */, "a"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-a",
NumZones: 1,
Expected: sets.NewString("c" /* hash("henley-a") = 1459735322 === 2 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-a",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley-a") = 1459735322 === 2 mod 3 = 2 */, "a"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium--1",
NumZones: 1,
Expected: sets.NewString("c" /* hash("") + 1 == 2166136261 + 1 === 2 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium--1",
NumZones: 2,
Expected: sets.NewString("a" /* hash("") + 1 + 1(startingIndex) == 2166136261 + 1 + 1 === 3 mod 3 = 0 */, "b"),
},
// Tests for PVC names for simple StatefulSet cases
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-1",
NumZones: 1,
Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */),
},
// Tests for PVC names for simple StatefulSet cases
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-1",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "loud-henley-1",
NumZones: 1,
Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "loud-henley-1",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "quiet-henley-2",
NumZones: 1,
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "quiet-henley-2",
NumZones: 2,
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-2",
NumZones: 1,
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-2",
NumZones: 2,
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-3",
NumZones: 1,
Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-3",
NumZones: 2,
Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 === 6 mod 3 = 0 */, "b"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-4",
NumZones: 1,
Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-4",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 === 2 mod 3 */, "a"),
},
// Tests for statefulsets (or claims) with dashes in the names
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-alpha-henley-2",
NumZones: 1,
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-alpha-henley-2",
NumZones: 2,
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-beta-henley-3",
NumZones: 1,
Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-beta-henley-3",
NumZones: 2,
Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 === 0 mod 3 */, "b"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-gamma-henley-4",
NumZones: 1,
Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-gamma-henley-4",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 === 2 mod 3 */, "a"),
},
// Tests for statefulsets name ending in -
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--2",
NumZones: 1,
Expected: sets.NewString("a" /* hash("") + 2 == 0 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--2",
NumZones: 2,
Expected: sets.NewString("c" /* hash("") + 2 + 2(startingIndex) == 2 mod 3 */, "a"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--3",
NumZones: 1,
Expected: sets.NewString("b" /* hash("") + 3 == 1 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--3",
NumZones: 2,
Expected: sets.NewString("b" /* hash("") + 3 + 3(startingIndex) == 1 mod 3 */, "c"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--4",
NumZones: 1,
Expected: sets.NewString("c" /* hash("") + 4 == 2 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--4",
NumZones: 2,
Expected: sets.NewString("a" /* hash("") + 4 + 4(startingIndex) == 0 mod 3 */, "b"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--4",
NumZones: 3,
Expected: sets.NewString("c" /* hash("") + 4 == 2 mod 3 */, "a", "b"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--4",
NumZones: 4,
Expected: sets.NewString("c" /* hash("") + 4 + 9(startingIndex) == 2 mod 3 */, "a", "b", "c"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-0",
NumZones: 2,
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-1",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley") == 0 + 2 */, "d"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-2",
NumZones: 2,
Expected: sets.NewString("e" /* hash("henley") == 0 + 2 + 2(startingIndex) */, "f"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-3",
NumZones: 2,
Expected: sets.NewString("g" /* hash("henley") == 0 + 2 + 4(startingIndex) */, "h"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-0",
NumZones: 3,
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b", "c"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-1",
NumZones: 3,
Expected: sets.NewString("d" /* hash("henley") == 0 + 1 + 2(startingIndex) */, "e", "f"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-2",
NumZones: 3,
Expected: sets.NewString("g" /* hash("henley") == 0 + 2 + 4(startingIndex) */, "h", "i"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-3",
NumZones: 3,
Expected: sets.NewString("a" /* hash("henley") == 0 + 3 + 6(startingIndex) */, "b", "c"),
},
}
for _, test := range tests {
actual := ChooseZonesForVolume(test.Zones, test.VolumeName, test.NumZones)
if !actual.Equal(test.Expected) {
t.Errorf("Test %v failed, expected zone %#v, actual %#v", test, test.Expected, actual)
}
}
}
func TestValidateZone(t *testing.T) {
functionUnderTest := "ValidateZone"
// First part: want an error
errCases := []string{"", " "}
for _, errCase := range errCases {
if got := ValidateZone(errCase); got == nil {
t.Errorf("%v(%v) returned (%v), want (%v)", functionUnderTest, errCase, got, "an error")
}
}
// Second part: want no error
succCases := []string{" us-east-1a "}
for _, succCase := range succCases {
if got := ValidateZone(succCase); got != nil {
t.Errorf("%v(%v) returned (%v), want (%v)", functionUnderTest, succCase, got, nil)
}
}
}
func TestGetWindowsPath(t *testing.T) {
tests := []struct {
path string
expectedPath string
}{
{
path: `/var/lib/kubelet/pods/146f8428-83e7-11e7-8dd4-000d3a31dac4/volumes/kubernetes.io~disk`,
expectedPath: `c:\var\lib\kubelet\pods\146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`,
},
{
path: `\var/lib/kubelet/pods/146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`,
expectedPath: `c:\var\lib\kubelet\pods\146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`,
},
{
path: `/`,
expectedPath: `c:\`,
},
{
path: ``,
expectedPath: ``,
},
}
for _, test := range tests {
result := GetWindowsPath(test.path)
if result != test.expectedPath {
t.Errorf("GetWindowsPath(%v) returned (%v), want (%v)", test.path, result, test.expectedPath)
}
}
}

View file

@ -1,39 +0,0 @@
// +build !linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
)
// AttachFileDevice takes a path to a regular file and makes it available as an
// attached block device.
func (v VolumePathHandler) AttachFileDevice(path string) (string, error) {
return "", fmt.Errorf("AttachFileDevice not supported for this build.")
}
// GetLoopDevice returns the full path to the loop device associated with the given path.
func (v VolumePathHandler) GetLoopDevice(path string) (string, error) {
return "", fmt.Errorf("GetLoopDevice not supported for this build.")
}
// RemoveLoopDevice removes specified loopback device
func (v VolumePathHandler) RemoveLoopDevice(device string) error {
return fmt.Errorf("RemoveLoopDevice not supported for this build.")
}

View file

@ -1,902 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volume
import (
"fmt"
"hash/fnv"
"reflect"
"strings"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/util/slice"
)
type testcase struct {
// Input of the test
name string
existingPod *v1.Pod
createPod *v1.Pod
// eventSequence is list of events that are simulated during recycling. It
// can be either event generated by a recycler pod or a state change of
// the pod. (see newPodEvent and newEvent below).
eventSequence []watch.Event
// Expected output.
// expectedEvents is list of events that were sent to the volume that was
// recycled.
expectedEvents []mockEvent
expectedError string
}
func newPodEvent(eventtype watch.EventType, name string, phase v1.PodPhase, message string) watch.Event {
return watch.Event{
Type: eventtype,
Object: newPod(name, phase, message),
}
}
func newEvent(eventtype, message string) watch.Event {
return watch.Event{
Type: watch.Added,
Object: &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
},
Reason: "MockEvent",
Message: message,
Type: eventtype,
},
}
}
func newPod(name string, phase v1.PodPhase, message string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceDefault,
Name: name,
},
Status: v1.PodStatus{
Phase: phase,
Message: message,
},
}
}
func TestRecyclerPod(t *testing.T) {
tests := []testcase{
{
// Test recycler success with some events
name: "RecyclerSuccess",
createPod: newPod("podRecyclerSuccess", v1.PodPending, ""),
eventSequence: []watch.Event{
// Pod gets Running and Succeeded
newPodEvent(watch.Added, "podRecyclerSuccess", v1.PodPending, ""),
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"),
newEvent(v1.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""),
newEvent(v1.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""),
newEvent(v1.EventTypeNormal, "Created container with docker id 83d929aeac82"),
newEvent(v1.EventTypeNormal, "Started container with docker id 83d929aeac82"),
newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodRunning, ""),
newPodEvent(watch.Modified, "podRecyclerSuccess", v1.PodSucceeded, ""),
},
expectedEvents: []mockEvent{
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerSuccess to 127.0.0.1"},
{v1.EventTypeNormal, "pulling image \"gcr.io/google_containers/busybox\""},
{v1.EventTypeNormal, "Successfully pulled image \"gcr.io/google_containers/busybox\""},
{v1.EventTypeNormal, "Created container with docker id 83d929aeac82"},
{v1.EventTypeNormal, "Started container with docker id 83d929aeac82"},
},
expectedError: "",
},
{
// Test recycler failure with some events
name: "RecyclerFailure",
createPod: newPod("podRecyclerFailure", v1.PodPending, ""),
eventSequence: []watch.Event{
// Pod gets Running and Succeeded
newPodEvent(watch.Added, "podRecyclerFailure", v1.PodPending, ""),
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"),
newEvent(v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"),
newEvent(v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"default\"/\"recycler-for-podRecyclerFailure\". list of unattached/unmounted"),
newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodRunning, ""),
newPodEvent(watch.Modified, "podRecyclerFailure", v1.PodFailed, "Pod was active on the node longer than specified deadline"),
},
expectedEvents: []mockEvent{
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerFailure to 127.0.0.1"},
{v1.EventTypeWarning, "Unable to mount volumes for pod \"recycler-for-podRecyclerFailure_default(3c9809e5-347c-11e6-a79b-3c970e965218)\": timeout expired waiting for volumes to attach/mount"},
{v1.EventTypeWarning, "Error syncing pod, skipping: timeout expired waiting for volumes to attach/mount for pod \"default\"/\"recycler-for-podRecyclerFailure\". list of unattached/unmounted"},
},
expectedError: "failed to recycle volume: Pod was active on the node longer than specified deadline",
},
{
// Recycler pod gets deleted
name: "RecyclerDeleted",
createPod: newPod("podRecyclerDeleted", v1.PodPending, ""),
eventSequence: []watch.Event{
// Pod gets Running and Succeeded
newPodEvent(watch.Added, "podRecyclerDeleted", v1.PodPending, ""),
newEvent(v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"),
newPodEvent(watch.Deleted, "podRecyclerDeleted", v1.PodPending, ""),
},
expectedEvents: []mockEvent{
{v1.EventTypeNormal, "Successfully assigned recycler-for-podRecyclerDeleted to 127.0.0.1"},
},
expectedError: "failed to recycle volume: recycler pod was deleted",
},
{
// Another recycler pod is already running
name: "RecyclerRunning",
existingPod: newPod("podOldRecycler", v1.PodRunning, ""),
createPod: newPod("podNewRecycler", v1.PodFailed, "mock message"),
eventSequence: []watch.Event{},
expectedError: "old recycler pod found, will retry later",
},
}
for _, test := range tests {
t.Logf("Test %q", test.name)
client := &mockRecyclerClient{
events: test.eventSequence,
pod: test.existingPod,
}
err := internalRecycleVolumeByWatchingPodUntilCompletion(test.createPod.Name, test.createPod, client)
receivedError := ""
if err != nil {
receivedError = err.Error()
}
if receivedError != test.expectedError {
t.Errorf("Test %q failed, expected error %q, got %q", test.name, test.expectedError, receivedError)
continue
}
if !client.deletedCalled {
t.Errorf("Test %q failed, expected deferred client.Delete to be called on recycler pod", test.name)
continue
}
for i, expectedEvent := range test.expectedEvents {
if len(client.receivedEvents) <= i {
t.Errorf("Test %q failed, expected event %d: %q not received", test.name, i, expectedEvent.message)
continue
}
receivedEvent := client.receivedEvents[i]
if expectedEvent.eventtype != receivedEvent.eventtype {
t.Errorf("Test %q failed, event %d does not match: expected eventtype %q, got %q", test.name, i, expectedEvent.eventtype, receivedEvent.eventtype)
}
if expectedEvent.message != receivedEvent.message {
t.Errorf("Test %q failed, event %d does not match: expected message %q, got %q", test.name, i, expectedEvent.message, receivedEvent.message)
}
}
for i := len(test.expectedEvents); i < len(client.receivedEvents); i++ {
t.Errorf("Test %q failed, unexpected event received: %s, %q", test.name, client.receivedEvents[i].eventtype, client.receivedEvents[i].message)
}
}
}
type mockRecyclerClient struct {
pod *v1.Pod
deletedCalled bool
receivedEvents []mockEvent
events []watch.Event
}
type mockEvent struct {
eventtype, message string
}
func (c *mockRecyclerClient) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
if c.pod == nil {
c.pod = pod
return c.pod, nil
}
// Simulate "already exists" error
return nil, errors.NewAlreadyExists(api.Resource("pods"), pod.Name)
}
func (c *mockRecyclerClient) GetPod(name, namespace string) (*v1.Pod, error) {
if c.pod != nil {
return c.pod, nil
} else {
return nil, fmt.Errorf("pod does not exist")
}
}
func (c *mockRecyclerClient) DeletePod(name, namespace string) error {
c.deletedCalled = true
return nil
}
func (c *mockRecyclerClient) WatchPod(name, namespace string, stopChannel chan struct{}) (<-chan watch.Event, error) {
eventCh := make(chan watch.Event, 0)
go func() {
for _, e := range c.events {
eventCh <- e
}
}()
return eventCh, nil
}
func (c *mockRecyclerClient) Event(eventtype, message string) {
c.receivedEvents = append(c.receivedEvents, mockEvent{eventtype, message})
}
func TestCalculateTimeoutForVolume(t *testing.T) {
pv := &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("500M"),
},
},
}
timeout := CalculateTimeoutForVolume(50, 30, pv)
if timeout != 50 {
t.Errorf("Expected 50 for timeout but got %v", timeout)
}
pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("2Gi")
timeout = CalculateTimeoutForVolume(50, 30, pv)
if timeout != 60 {
t.Errorf("Expected 60 for timeout but got %v", timeout)
}
pv.Spec.Capacity[v1.ResourceStorage] = resource.MustParse("150Gi")
timeout = CalculateTimeoutForVolume(50, 30, pv)
if timeout != 4500 {
t.Errorf("Expected 4500 for timeout but got %v", timeout)
}
}
func TestGenerateVolumeName(t *testing.T) {
// Normal operation, no truncate
v1 := GenerateVolumeName("kubernetes", "pv-cinder-abcde", 255)
if v1 != "kubernetes-dynamic-pv-cinder-abcde" {
t.Errorf("Expected kubernetes-dynamic-pv-cinder-abcde, got %s", v1)
}
// Truncate trailing "6789-dynamic"
prefix := strings.Repeat("0123456789", 9) // 90 characters prefix + 8 chars. of "-dynamic"
v2 := GenerateVolumeName(prefix, "pv-cinder-abcde", 100)
expect := prefix[:84] + "-pv-cinder-abcde"
if v2 != expect {
t.Errorf("Expected %s, got %s", expect, v2)
}
// Truncate really long cluster name
prefix = strings.Repeat("0123456789", 1000) // 10000 characters prefix
v3 := GenerateVolumeName(prefix, "pv-cinder-abcde", 100)
if v3 != expect {
t.Errorf("Expected %s, got %s", expect, v3)
}
}
func TestMountOptionFromSpec(t *testing.T) {
scenarios := map[string]struct {
volume *Spec
expectedMountList []string
systemOptions []string
}{
"volume-with-mount-options": {
volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
},
}),
expectedMountList: []string{"ro", "nfsvers=3"},
systemOptions: nil,
},
"volume-with-bad-mount-options": {
volume: createVolumeSpecWithMountOption("good-mount-opts", "", v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
},
}),
expectedMountList: []string{},
systemOptions: nil,
},
"vol-with-sys-opts": {
volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
},
}),
expectedMountList: []string{"ro", "nfsvers=3", "fsid=100", "hard"},
systemOptions: []string{"fsid=100", "hard"},
},
"vol-with-sys-opts-with-dup": {
volume: createVolumeSpecWithMountOption("good-mount-opts", "ro,nfsvers=3", v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/srv", ReadOnly: false},
},
}),
expectedMountList: []string{"ro", "nfsvers=3", "fsid=100"},
systemOptions: []string{"fsid=100", "ro"},
},
}
for name, scenario := range scenarios {
mountOptions := MountOptionFromSpec(scenario.volume, scenario.systemOptions...)
if !reflect.DeepEqual(slice.SortStrings(mountOptions), slice.SortStrings(scenario.expectedMountList)) {
t.Errorf("for %s expected mount options : %v got %v", name, scenario.expectedMountList, mountOptions)
}
}
}
func createVolumeSpecWithMountOption(name string, mountOptions string, spec v1.PersistentVolumeSpec) *Spec {
annotations := map[string]string{
v1.MountOptionAnnotation: mountOptions,
}
objMeta := metav1.ObjectMeta{
Name: name,
Annotations: annotations,
}
pv := &v1.PersistentVolume{
ObjectMeta: objMeta,
Spec: spec,
}
return &Spec{PersistentVolume: pv}
}
func checkFnv32(t *testing.T, s string, expected int) {
h := fnv.New32()
h.Write([]byte(s))
h.Sum32()
if int(h.Sum32()) != expected {
t.Fatalf("hash of %q was %v, expected %v", s, h.Sum32(), expected)
}
}
func TestChooseZoneForVolume(t *testing.T) {
checkFnv32(t, "henley", 1180403676)
// 1180403676 mod 3 == 0, so the offset from "henley" is 0, which makes it easier to verify this by inspection
// A few others
checkFnv32(t, "henley-", 2652299129)
checkFnv32(t, "henley-a", 1459735322)
checkFnv32(t, "", 2166136261)
tests := []struct {
Zones sets.String
VolumeName string
Expected string
}{
// Test for PVC names that don't have a dash
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley",
Expected: "a", // hash("henley") == 0
},
// Tests for PVC names that end in - number, but don't look like statefulset PVCs
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-0",
Expected: "a", // hash("henley") == 0
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-1",
Expected: "b", // hash("henley") + 1 == 1
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-2",
Expected: "c", // hash("henley") + 2 == 2
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-3",
Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-4",
Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3
},
// Tests for PVC names that are edge cases
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-",
Expected: "c", // hash("henley-") = 2652299129 === 2 mod 3
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-a",
Expected: "c", // hash("henley-a") = 1459735322 === 2 mod 3
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium--1",
Expected: "c", // hash("") + 1 == 2166136261 + 1 === 2 mod 3
},
// Tests for PVC names for simple StatefulSet cases
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-1",
Expected: "b", // hash("henley") + 1 == 1
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "loud-henley-1",
Expected: "b", // hash("henley") + 1 == 1
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "quiet-henley-2",
Expected: "c", // hash("henley") + 2 == 2
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-2",
Expected: "c", // hash("henley") + 2 == 2
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-3",
Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-4",
Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3
},
// Tests for statefulsets (or claims) with dashes in the names
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-alpha-henley-2",
Expected: "c", // hash("henley") + 2 == 2
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-beta-henley-3",
Expected: "a", // hash("henley") + 3 == 3 === 0 mod 3
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-gamma-henley-4",
Expected: "b", // hash("henley") + 4 == 4 === 1 mod 3
},
// Tests for statefulsets name ending in -
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--2",
Expected: "a", // hash("") + 2 == 0 mod 3
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--3",
Expected: "b", // hash("") + 3 == 1 mod 3
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--4",
Expected: "c", // hash("") + 4 == 2 mod 3
},
}
for _, test := range tests {
actual := ChooseZoneForVolume(test.Zones, test.VolumeName)
if actual != test.Expected {
t.Errorf("Test %v failed, expected zone %q, actual %q", test, test.Expected, actual)
}
}
}
func TestChooseZonesForVolume(t *testing.T) {
checkFnv32(t, "henley", 1180403676)
// 1180403676 mod 3 == 0, so the offset from "henley" is 0, which makes it easier to verify this by inspection
// A few others
checkFnv32(t, "henley-", 2652299129)
checkFnv32(t, "henley-a", 1459735322)
checkFnv32(t, "", 2166136261)
tests := []struct {
Zones sets.String
VolumeName string
NumZones uint32
Expected sets.String
}{
// Test for PVC names that don't have a dash
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley",
NumZones: 1,
Expected: sets.NewString("a" /* hash("henley") == 0 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley",
NumZones: 2,
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"),
},
// Tests for PVC names that end in - number, but don't look like statefulset PVCs
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-0",
NumZones: 1,
Expected: sets.NewString("a" /* hash("henley") == 0 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-0",
NumZones: 2,
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-1",
NumZones: 1,
Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-1",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-2",
NumZones: 1,
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-2",
NumZones: 2,
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-3",
NumZones: 1,
Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-3",
NumZones: 2,
Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 */, "b"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-4",
NumZones: 1,
Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-4",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 */, "a"),
},
// Tests for PVC names that are edge cases
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-",
NumZones: 1,
Expected: sets.NewString("c" /* hash("henley-") = 2652299129 === 2 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley-") = 2652299129 === 2 mod 3 = 2 */, "a"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-a",
NumZones: 1,
Expected: sets.NewString("c" /* hash("henley-a") = 1459735322 === 2 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "henley-a",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley-a") = 1459735322 === 2 mod 3 = 2 */, "a"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium--1",
NumZones: 1,
Expected: sets.NewString("c" /* hash("") + 1 == 2166136261 + 1 === 2 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium--1",
NumZones: 2,
Expected: sets.NewString("a" /* hash("") + 1 + 1(startingIndex) == 2166136261 + 1 + 1 === 3 mod 3 = 0 */, "b"),
},
// Tests for PVC names for simple StatefulSet cases
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-1",
NumZones: 1,
Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */),
},
// Tests for PVC names for simple StatefulSet cases
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-1",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "loud-henley-1",
NumZones: 1,
Expected: sets.NewString("b" /* hash("henley") + 1 == 1 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "loud-henley-1",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley") + 1 + 1(startingIndex) == 2 */, "a"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "quiet-henley-2",
NumZones: 1,
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "quiet-henley-2",
NumZones: 2,
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-2",
NumZones: 1,
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-2",
NumZones: 2,
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-3",
NumZones: 1,
Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-3",
NumZones: 2,
Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 === 6 mod 3 = 0 */, "b"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-4",
NumZones: 1,
Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley-4",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 === 2 mod 3 */, "a"),
},
// Tests for statefulsets (or claims) with dashes in the names
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-alpha-henley-2",
NumZones: 1,
Expected: sets.NewString("c" /* hash("henley") + 2 == 2 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-alpha-henley-2",
NumZones: 2,
Expected: sets.NewString("b" /* hash("henley") + 2 + 2(startingIndex) == 4 */, "c"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-beta-henley-3",
NumZones: 1,
Expected: sets.NewString("a" /* hash("henley") + 3 == 3 === 0 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-beta-henley-3",
NumZones: 2,
Expected: sets.NewString("a" /* hash("henley") + 3 + 3(startingIndex) == 6 === 0 mod 3 */, "b"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-gamma-henley-4",
NumZones: 1,
Expected: sets.NewString("b" /* hash("henley") + 4 == 4 === 1 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-gamma-henley-4",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley") + 4 + 4(startingIndex) == 8 === 2 mod 3 */, "a"),
},
// Tests for statefulsets name ending in -
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--2",
NumZones: 1,
Expected: sets.NewString("a" /* hash("") + 2 == 0 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--2",
NumZones: 2,
Expected: sets.NewString("c" /* hash("") + 2 + 2(startingIndex) == 2 mod 3 */, "a"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--3",
NumZones: 1,
Expected: sets.NewString("b" /* hash("") + 3 == 1 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--3",
NumZones: 2,
Expected: sets.NewString("b" /* hash("") + 3 + 3(startingIndex) == 1 mod 3 */, "c"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--4",
NumZones: 1,
Expected: sets.NewString("c" /* hash("") + 4 == 2 mod 3 */),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--4",
NumZones: 2,
Expected: sets.NewString("a" /* hash("") + 4 + 4(startingIndex) == 0 mod 3 */, "b"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--4",
NumZones: 3,
Expected: sets.NewString("c" /* hash("") + 4 == 2 mod 3 */, "a", "b"),
},
{
Zones: sets.NewString("a", "b", "c"),
VolumeName: "medium-henley--4",
NumZones: 4,
Expected: sets.NewString("c" /* hash("") + 4 + 9(startingIndex) == 2 mod 3 */, "a", "b", "c"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-0",
NumZones: 2,
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-1",
NumZones: 2,
Expected: sets.NewString("c" /* hash("henley") == 0 + 2 */, "d"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-2",
NumZones: 2,
Expected: sets.NewString("e" /* hash("henley") == 0 + 2 + 2(startingIndex) */, "f"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-3",
NumZones: 2,
Expected: sets.NewString("g" /* hash("henley") == 0 + 2 + 4(startingIndex) */, "h"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-0",
NumZones: 3,
Expected: sets.NewString("a" /* hash("henley") == 0 */, "b", "c"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-1",
NumZones: 3,
Expected: sets.NewString("d" /* hash("henley") == 0 + 1 + 2(startingIndex) */, "e", "f"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-2",
NumZones: 3,
Expected: sets.NewString("g" /* hash("henley") == 0 + 2 + 4(startingIndex) */, "h", "i"),
},
{
Zones: sets.NewString("a", "b", "c", "d", "e", "f", "g", "h", "i"),
VolumeName: "henley-3",
NumZones: 3,
Expected: sets.NewString("a" /* hash("henley") == 0 + 3 + 6(startingIndex) */, "b", "c"),
},
}
for _, test := range tests {
actual := ChooseZonesForVolume(test.Zones, test.VolumeName, test.NumZones)
if !actual.Equal(test.Expected) {
t.Errorf("Test %v failed, expected zone %#v, actual %#v", test, test.Expected, actual)
}
}
}
func TestValidateZone(t *testing.T) {
functionUnderTest := "ValidateZone"
// First part: want an error
errCases := []string{"", " "}
for _, errCase := range errCases {
if got := ValidateZone(errCase); got == nil {
t.Errorf("%v(%v) returned (%v), want (%v)", functionUnderTest, errCase, got, "an error")
}
}
// Second part: want no error
succCases := []string{" us-east-1a "}
for _, succCase := range succCases {
if got := ValidateZone(succCase); got != nil {
t.Errorf("%v(%v) returned (%v), want (%v)", functionUnderTest, succCase, got, nil)
}
}
}
func TestGetWindowsPath(t *testing.T) {
tests := []struct {
path string
expectedPath string
}{
{
path: `/var/lib/kubelet/pods/146f8428-83e7-11e7-8dd4-000d3a31dac4/volumes/kubernetes.io~disk`,
expectedPath: `c:\var\lib\kubelet\pods\146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`,
},
{
path: `\var/lib/kubelet/pods/146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`,
expectedPath: `c:\var\lib\kubelet\pods\146f8428-83e7-11e7-8dd4-000d3a31dac4\volumes\kubernetes.io~disk`,
},
{
path: `/`,
expectedPath: `c:\`,
},
{
path: ``,
expectedPath: ``,
},
}
for _, test := range tests {
result := GetWindowsPath(test.path)
if result != test.expectedPath {
t.Errorf("GetWindowsPath(%v) returned (%v), want (%v)", test.path, result, test.expectedPath)
}
}
}