Unittests

This commit is contained in:
Prashanth Balasubramanian 2016-05-29 16:05:38 -07:00
parent f84ca54831
commit 22c6e5ddd7
14 changed files with 340 additions and 49 deletions

View file

@ -84,6 +84,7 @@ type ClusterManager struct {
healthCheckers []healthchecks.HealthChecker
}
// Init initializes the cluster manager.
func (c *ClusterManager) Init(tr *GCETranslator) {
c.instancePool.Init(tr)
for _, h := range c.healthCheckers {

View file

@ -55,7 +55,7 @@ func newLoadBalancerController(t *testing.T, cm *fakeClusterManager, masterUrl s
if err != nil {
t.Fatalf("%v", err)
}
lb.hasSynced = func() { return true }
lb.hasSynced = func() bool { return true }
return lb
}

View file

@ -49,8 +49,13 @@ func NewFakeClusterManager(clusterName string) *fakeClusterManager {
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
fakeHCs := healthchecks.NewFakeHealthChecks()
namer := &utils.Namer{clusterName}
nodePool := instances.NewNodePool(fakeIGs, defaultZone)
nodePool.Init(&instances.FakeZoneLister{[]string{"zone-a"}})
healthChecker := healthchecks.NewHealthChecker(fakeHCs, "/", namer)
healthChecker.Init(&healthchecks.FakeHealthCheckGetter{nil})
backendPool := backends.NewBackendPool(
fakeBackends,
healthChecker, nodePool, namer, []int64{}, false)

View file

@ -0,0 +1,174 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"fmt"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/sets"
)
func TestZoneListing(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID)
lbc := newLoadBalancerController(t, cm, "")
zoneToNode := map[string][]string{
"zone-1": {"n1"},
"zone-2": {"n2"},
}
addNodes(lbc, zoneToNode)
zones, err := lbc.tr.ListZones()
if err != nil {
t.Errorf("Failed to list zones: %v", err)
}
for expectedZone := range zoneToNode {
found := false
for _, gotZone := range zones {
if gotZone == expectedZone {
found = true
}
}
if !found {
t.Fatalf("Expected zones %v; Got zones %v", zoneToNode, zones)
}
}
}
func TestInstancesAddedToZones(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID)
lbc := newLoadBalancerController(t, cm, "")
zoneToNode := map[string][]string{
"zone-1": {"n1", "n2"},
"zone-2": {"n3"},
}
addNodes(lbc, zoneToNode)
// Create 2 igs, one per zone.
testIG := "test-ig"
testPort := int64(3001)
lbc.CloudClusterManager.instancePool.AddInstanceGroup(testIG, testPort)
// node pool syncs kube-nodes, this will add them to both igs.
lbc.CloudClusterManager.instancePool.Sync([]string{"n1", "n2", "n3"})
gotZonesToNode := cm.fakeIGs.GetInstancesByZone()
i := 0
for z, nodeNames := range zoneToNode {
if ig, err := cm.fakeIGs.GetInstanceGroup(testIG, z); err != nil {
t.Errorf("Failed to find ig %v in zone %v, found %+v: %v", testIG, z, ig, err)
}
if cm.fakeIGs.Ports[i] != testPort {
t.Errorf("Expected the same node port on all igs, got ports %+v", cm.fakeIGs.Ports)
}
expNodes := sets.NewString(nodeNames...)
gotNodes := sets.NewString(gotZonesToNode[z]...)
if !gotNodes.Equal(expNodes) {
t.Errorf("Nodes not added to zones, expected %+v got %+v", expNodes, gotNodes)
}
i++
}
}
func TestProbeGetter(t *testing.T) {
cm := NewFakeClusterManager(DefaultClusterUID)
lbc := newLoadBalancerController(t, cm, "")
nodePortToHealthCheck := map[int64]string{
3001: "/healthz",
3002: "/foo",
}
addPods(lbc, nodePortToHealthCheck)
for p, exp := range nodePortToHealthCheck {
got, err := lbc.tr.HealthCheck(p)
if err != nil {
t.Errorf("Failed to get health check for node port %v: %v", p, err)
} else if got.RequestPath != exp {
t.Errorf("Wrong health check for node port %v, got %v expected %v", p, got.RequestPath, exp)
}
}
}
func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string) {
for np, u := range nodePortToHealthCheck {
l := map[string]string{fmt.Sprintf("app-%d", np): "test"}
svc := &api.Service{
Spec: api.ServiceSpec{
Selector: l,
Ports: []api.ServicePort{
{
NodePort: int32(np),
TargetPort: intstr.IntOrString{
Type: intstr.Int,
IntVal: 80,
},
},
},
},
}
svc.Name = fmt.Sprintf("%d", np)
lbc.svcLister.Store.Add(svc)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Labels: l,
Name: fmt.Sprintf("%d", np),
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Ports: []api.ContainerPort{{ContainerPort: 80}},
ReadinessProbe: &api.Probe{
Handler: api.Handler{
HTTPGet: &api.HTTPGetAction{
Path: u,
Port: intstr.IntOrString{
Type: intstr.Int,
IntVal: 80,
},
},
},
},
},
},
},
}
lbc.podLister.Indexer.Add(pod)
}
}
func addNodes(lbc *LoadBalancerController, zoneToNode map[string][]string) {
for zone, nodes := range zoneToNode {
for _, node := range nodes {
n := &api.Node{
ObjectMeta: api.ObjectMeta{
Name: node,
Labels: map[string]string{
zoneKey: zone,
},
},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{Type: api.NodeReady, Status: api.ConditionTrue},
},
},
}
lbc.nodeLister.Store.Add(n)
}
}
lbc.CloudClusterManager.instancePool.Init(lbc.tr)
}

View file

@ -18,6 +18,7 @@ package controller
import (
"fmt"
"sort"
"strconv"
"time"
@ -376,6 +377,10 @@ func (t *GCETranslator) getHTTPProbe(l map[string]string, targetPort intstr.IntO
if err != nil {
return nil, err
}
// If multiple endpoints have different health checks, take the first
sort.Sort(PodsByCreationTimestamp(pl))
for _, pod := range pl {
logStr := fmt.Sprintf("Pod %v matching service selectors %v (targetport %+v)", pod.Name, l, targetPort)
for _, c := range pod.Spec.Containers {
@ -387,10 +392,9 @@ func (t *GCETranslator) getHTTPProbe(l map[string]string, targetPort intstr.IntO
if isPortEqual(cPort, targetPort) {
if isPortEqual(c.ReadinessProbe.Handler.HTTPGet.Port, targetPort) {
return c.ReadinessProbe, nil
} else {
glog.Infof("%v: found matching targetPort on container %v, but not on readinessProbe (%+v)",
logStr, c.Name, c.ReadinessProbe.Handler.HTTPGet.Port)
}
glog.Infof("%v: found matching targetPort on container %v, but not on readinessProbe (%+v)",
logStr, c.Name, c.ReadinessProbe.Handler.HTTPGet.Port)
}
}
}
@ -437,19 +441,18 @@ func (t *GCETranslator) HealthCheck(port int64) (*compute.HttpHealthCheck, error
}
}
}
return &compute.HttpHealthCheck{
Port: port,
// Empty string is used as a signal to the caller to use the appropriate
// default.
RequestPath: "",
Description: "Default kubernetes L7 Loadbalancing health check.",
// How often to health check.
CheckIntervalSec: 1,
// How long to wait before claiming failure of a health check.
TimeoutSec: 1,
// Number of healthchecks to pass for a vm to be deemed healthy.
HealthyThreshold: 1,
// Number of healthchecks to fail before the vm is deemed unhealthy.
UnhealthyThreshold: 10,
}, nil
return utils.DefaultHealthCheckTemplate(port), nil
}
// PodsByCreationTimestamp sorts a list of Pods by creation timestamp, using their names as a tie breaker.
type PodsByCreationTimestamp []*api.Pod
func (o PodsByCreationTimestamp) Len() int { return len(o) }
func (o PodsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o PodsByCreationTimestamp) Less(i, j int) bool {
if o[i].CreationTimestamp.Equal(o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
}