Update dependencies

This commit is contained in:
Manuel de Brito Fontes 2017-10-06 17:33:32 -03:00
parent bf5616c65b
commit d6d374b28d
13962 changed files with 48226 additions and 3618880 deletions

View file

@ -0,0 +1,399 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package status
import (
"fmt"
"net"
"os"
"sort"
"strings"
"time"
"github.com/golang/glog"
"github.com/pkg/errors"
pool "gopkg.in/go-playground/pool.v3"
apiv1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
"k8s.io/ingress-nginx/pkg/ingress/annotations/class"
"k8s.io/ingress-nginx/pkg/ingress/store"
"k8s.io/ingress-nginx/pkg/k8s"
"k8s.io/ingress-nginx/pkg/task"
)
const (
updateInterval = 60 * time.Second
)
// Sync ...
type Sync interface {
Run(stopCh <-chan struct{})
Shutdown()
}
// Config ...
type Config struct {
Client clientset.Interface
PublishService string
ElectionID string
UpdateStatusOnShutdown bool
IngressLister store.IngressLister
DefaultIngressClass string
IngressClass string
// CustomIngressStatus allows to set custom values in Ingress status
CustomIngressStatus func(*extensions.Ingress) []apiv1.LoadBalancerIngress
}
// statusSync keeps the status IP in each Ingress rule updated executing a periodic check
// in all the defined rules. To simplify the process leader election is used so the update
// is executed only in one node (Ingress controllers can be scaled to more than one)
// If the controller is running with the flag --publish-service (with a valid service)
// the IP address behind the service is used, if not the source is the IP/s of the node/s
type statusSync struct {
Config
// pod contains runtime information about this pod
pod *k8s.PodInfo
elector *leaderelection.LeaderElector
// workqueue used to keep in sync the status IP/s
// in the Ingress rules
syncQueue *task.Queue
}
// Run starts the loop to keep the status in sync
func (s statusSync) Run(stopCh <-chan struct{}) {
go s.elector.Run()
go wait.Forever(s.update, updateInterval)
go s.syncQueue.Run(time.Second, stopCh)
<-stopCh
}
func (s *statusSync) update() {
// send a dummy object to the queue to force a sync
s.syncQueue.Enqueue("sync status")
}
// Shutdown stop the sync. In case the instance is the leader it will remove the current IP
// if there is no other instances running.
func (s statusSync) Shutdown() {
go s.syncQueue.Shutdown()
// remove IP from Ingress
if !s.elector.IsLeader() {
return
}
if !s.UpdateStatusOnShutdown {
glog.Warningf("skipping update of status of Ingress rules")
return
}
glog.Infof("updating status of Ingress rules (remove)")
addrs, err := s.runningAddresses()
if err != nil {
glog.Errorf("error obtaining running IPs: %v", addrs)
return
}
if len(addrs) > 1 {
// leave the job to the next leader
glog.Infof("leaving status update for next leader (%v)", len(addrs))
return
}
if s.isRunningMultiplePods() {
glog.V(2).Infof("skipping Ingress status update (multiple pods running - another one will be elected as master)")
return
}
glog.Infof("removing address from ingress status (%v)", addrs)
s.updateStatus([]apiv1.LoadBalancerIngress{})
}
func (s *statusSync) sync(key interface{}) error {
if s.syncQueue.IsShuttingDown() {
glog.V(2).Infof("skipping Ingress status update (shutting down in progress)")
return nil
}
if !s.elector.IsLeader() {
glog.V(2).Infof("skipping Ingress status update (I am not the current leader)")
return nil
}
addrs, err := s.runningAddresses()
if err != nil {
return err
}
s.updateStatus(sliceToStatus(addrs))
return nil
}
func (s statusSync) keyfunc(input interface{}) (interface{}, error) {
return input, nil
}
// NewStatusSyncer returns a new Sync instance
func NewStatusSyncer(config Config) Sync {
pod, err := k8s.GetPodDetails(config.Client)
if err != nil {
glog.Fatalf("unexpected error obtaining pod information: %v", err)
}
st := statusSync{
pod: pod,
Config: config,
}
st.syncQueue = task.NewCustomTaskQueue(st.sync, st.keyfunc)
// we need to use the defined ingress class to allow multiple leaders
// in order to update information about ingress status
electionID := fmt.Sprintf("%v-%v", config.ElectionID, config.DefaultIngressClass)
if config.IngressClass != "" {
electionID = fmt.Sprintf("%v-%v", config.ElectionID, config.IngressClass)
}
callbacks := leaderelection.LeaderCallbacks{
OnStartedLeading: func(stop <-chan struct{}) {
glog.V(2).Infof("I am the new status update leader")
},
OnStoppedLeading: func() {
glog.V(2).Infof("I am not status update leader anymore")
},
OnNewLeader: func(identity string) {
glog.Infof("new leader elected: %v", identity)
},
}
broadcaster := record.NewBroadcaster()
hostname, _ := os.Hostname()
recorder := broadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{
Component: "ingress-leader-elector",
Host: hostname,
})
lock := resourcelock.ConfigMapLock{
ConfigMapMeta: metav1.ObjectMeta{Namespace: pod.Namespace, Name: electionID},
Client: config.Client.CoreV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: pod.Name,
EventRecorder: recorder,
},
}
ttl := 30 * time.Second
le, err := leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
Lock: &lock,
LeaseDuration: ttl,
RenewDeadline: ttl / 2,
RetryPeriod: ttl / 4,
Callbacks: callbacks,
})
if err != nil {
glog.Fatalf("unexpected error starting leader election: %v", err)
}
st.elector = le
return st
}
// runningAddresses returns a list of IP addresses and/or FQDN where the
// ingress controller is currently running
func (s *statusSync) runningAddresses() ([]string, error) {
if s.PublishService != "" {
ns, name, _ := k8s.ParseNameNS(s.PublishService)
svc, err := s.Client.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
addrs := []string{}
for _, ip := range svc.Status.LoadBalancer.Ingress {
if ip.IP == "" {
addrs = append(addrs, ip.Hostname)
} else {
addrs = append(addrs, ip.IP)
}
}
for _, ip := range svc.Spec.ExternalIPs {
addrs = append(addrs, ip)
}
return addrs, nil
}
// get information about all the pods running the ingress controller
pods, err := s.Client.CoreV1().Pods(s.pod.Namespace).List(metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(s.pod.Labels).String(),
})
if err != nil {
return nil, err
}
addrs := []string{}
for _, pod := range pods.Items {
name := k8s.GetNodeIP(s.Client, pod.Spec.NodeName)
if !sliceutils.StringInSlice(name, addrs) {
addrs = append(addrs, name)
}
}
return addrs, nil
}
func (s *statusSync) isRunningMultiplePods() bool {
pods, err := s.Client.CoreV1().Pods(s.pod.Namespace).List(metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(s.pod.Labels).String(),
})
if err != nil {
return false
}
return len(pods.Items) > 1
}
// sliceToStatus converts a slice of IP and/or hostnames to LoadBalancerIngress
func sliceToStatus(endpoints []string) []apiv1.LoadBalancerIngress {
lbi := []apiv1.LoadBalancerIngress{}
for _, ep := range endpoints {
if net.ParseIP(ep) == nil {
lbi = append(lbi, apiv1.LoadBalancerIngress{Hostname: ep})
} else {
lbi = append(lbi, apiv1.LoadBalancerIngress{IP: ep})
}
}
sort.SliceStable(lbi, func(a, b int) bool {
return lbi[a].IP < lbi[b].IP
})
return lbi
}
// updateStatus changes the status information of Ingress rules
// If the backend function CustomIngressStatus returns a value different
// of nil then it uses the returned value or the newIngressPoint values
func (s *statusSync) updateStatus(newIngressPoint []apiv1.LoadBalancerIngress) {
ings := s.IngressLister.List()
p := pool.NewLimited(10)
defer p.Close()
batch := p.Batch()
for _, cur := range ings {
ing := cur.(*extensions.Ingress)
if !class.IsValid(ing, s.Config.IngressClass, s.Config.DefaultIngressClass) {
continue
}
batch.Queue(runUpdate(ing, newIngressPoint, s.Client, s.CustomIngressStatus))
}
batch.QueueComplete()
batch.WaitAll()
}
func runUpdate(ing *extensions.Ingress, status []apiv1.LoadBalancerIngress,
client clientset.Interface,
statusFunc func(*extensions.Ingress) []apiv1.LoadBalancerIngress) pool.WorkFunc {
return func(wu pool.WorkUnit) (interface{}, error) {
if wu.IsCancelled() {
return nil, nil
}
addrs := status
ca := statusFunc(ing)
if ca != nil {
addrs = ca
}
sort.SliceStable(addrs, lessLoadBalancerIngress(addrs))
curIPs := ing.Status.LoadBalancer.Ingress
sort.SliceStable(curIPs, lessLoadBalancerIngress(curIPs))
if ingressSliceEqual(addrs, curIPs) {
glog.V(3).Infof("skipping update of Ingress %v/%v (no change)", ing.Namespace, ing.Name)
return true, nil
}
ingClient := client.Extensions().Ingresses(ing.Namespace)
currIng, err := ingClient.Get(ing.Name, metav1.GetOptions{})
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("unexpected error searching Ingress %v/%v", ing.Namespace, ing.Name))
}
glog.Infof("updating Ingress %v/%v status to %v", currIng.Namespace, currIng.Name, addrs)
currIng.Status.LoadBalancer.Ingress = addrs
_, err = ingClient.UpdateStatus(currIng)
if err != nil {
glog.Warningf("error updating ingress rule: %v", err)
}
return true, nil
}
}
func lessLoadBalancerIngress(addrs []apiv1.LoadBalancerIngress) func(int, int) bool {
return func(a, b int) bool {
switch strings.Compare(addrs[a].Hostname, addrs[b].Hostname) {
case -1:
return true
case 1:
return false
}
return addrs[a].IP < addrs[b].IP
}
}
func ingressSliceEqual(lhs, rhs []apiv1.LoadBalancerIngress) bool {
if len(lhs) != len(rhs) {
return false
}
for i := range lhs {
if lhs[i].IP != rhs[i].IP {
return false
}
if lhs[i].Hostname != rhs[i].Hostname {
return false
}
}
return true
}

View file

@ -0,0 +1,463 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package status
import (
"os"
"testing"
"time"
apiv1 "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
testclient "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/api"
"k8s.io/ingress-nginx/pkg/ingress/annotations/class"
"k8s.io/ingress-nginx/pkg/ingress/store"
"k8s.io/ingress-nginx/pkg/k8s"
"k8s.io/ingress-nginx/pkg/task"
)
func buildLoadBalancerIngressByIP() []apiv1.LoadBalancerIngress {
return []apiv1.LoadBalancerIngress{
{
IP: "10.0.0.1",
Hostname: "foo1",
},
{
IP: "10.0.0.2",
Hostname: "foo2",
},
{
IP: "10.0.0.3",
Hostname: "",
},
{
IP: "",
Hostname: "foo4",
},
}
}
func buildSimpleClientSet() *testclient.Clientset {
return testclient.NewSimpleClientset(
&apiv1.PodList{Items: []apiv1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo1",
Namespace: apiv1.NamespaceDefault,
Labels: map[string]string{
"lable_sig": "foo_pod",
},
},
Spec: apiv1.PodSpec{
NodeName: "foo_node_2",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo2",
Namespace: apiv1.NamespaceDefault,
Labels: map[string]string{
"lable_sig": "foo_no",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo3",
Namespace: api.NamespaceSystem,
Labels: map[string]string{
"lable_sig": "foo_pod",
},
},
Spec: apiv1.PodSpec{
NodeName: "foo_node_2",
},
},
}},
&apiv1.ServiceList{Items: []apiv1.Service{
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Namespace: apiv1.NamespaceDefault,
},
Status: apiv1.ServiceStatus{
LoadBalancer: apiv1.LoadBalancerStatus{
Ingress: buildLoadBalancerIngressByIP(),
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo_non_exist",
Namespace: apiv1.NamespaceDefault,
},
},
}},
&apiv1.NodeList{Items: []apiv1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo_node_1",
},
Status: apiv1.NodeStatus{
Addresses: []apiv1.NodeAddress{
{
Type: apiv1.NodeInternalIP,
Address: "10.0.0.1",
}, {
Type: apiv1.NodeExternalIP,
Address: "10.0.0.2",
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo_node_2",
},
Status: apiv1.NodeStatus{
Addresses: []apiv1.NodeAddress{
{
Type: apiv1.NodeInternalIP,
Address: "11.0.0.1",
},
{
Type: apiv1.NodeExternalIP,
Address: "11.0.0.2",
},
},
},
},
}},
&apiv1.EndpointsList{Items: []apiv1.Endpoints{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ingress-controller-leader",
Namespace: apiv1.NamespaceDefault,
SelfLink: "/api/v1/namespaces/default/endpoints/ingress-controller-leader",
},
}}},
&extensions.IngressList{Items: buildExtensionsIngresses()},
)
}
func fakeSynFn(interface{}) error {
return nil
}
func buildExtensionsIngresses() []extensions.Ingress {
return []extensions.Ingress{
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo_ingress_1",
Namespace: apiv1.NamespaceDefault,
},
Status: extensions.IngressStatus{
LoadBalancer: apiv1.LoadBalancerStatus{
Ingress: []apiv1.LoadBalancerIngress{
{
IP: "10.0.0.1",
Hostname: "foo1",
},
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo_ingress_different_class",
Namespace: api.NamespaceDefault,
Annotations: map[string]string{
class.IngressKey: "no-nginx",
},
},
Status: extensions.IngressStatus{
LoadBalancer: apiv1.LoadBalancerStatus{
Ingress: []apiv1.LoadBalancerIngress{
{
IP: "0.0.0.0",
Hostname: "foo.bar.com",
},
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "foo_ingress_2",
Namespace: apiv1.NamespaceDefault,
},
Status: extensions.IngressStatus{
LoadBalancer: apiv1.LoadBalancerStatus{
Ingress: []apiv1.LoadBalancerIngress{},
},
},
},
}
}
func buildIngressListener() store.IngressLister {
s := cache.NewStore(cache.MetaNamespaceKeyFunc)
s.Add(&extensions.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "foo_ingress_non_01",
Namespace: apiv1.NamespaceDefault,
}})
s.Add(&extensions.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "foo_ingress_1",
Namespace: apiv1.NamespaceDefault,
},
Status: extensions.IngressStatus{
LoadBalancer: apiv1.LoadBalancerStatus{
Ingress: buildLoadBalancerIngressByIP(),
},
},
})
return store.IngressLister{Store: s}
}
func buildStatusSync() statusSync {
return statusSync{
pod: &k8s.PodInfo{
Name: "foo_base_pod",
Namespace: apiv1.NamespaceDefault,
Labels: map[string]string{
"lable_sig": "foo_pod",
},
},
syncQueue: task.NewTaskQueue(fakeSynFn),
Config: Config{
Client: buildSimpleClientSet(),
PublishService: apiv1.NamespaceDefault + "/" + "foo",
IngressLister: buildIngressListener(),
CustomIngressStatus: func(*extensions.Ingress) []apiv1.LoadBalancerIngress {
return nil
},
},
}
}
func TestStatusActions(t *testing.T) {
// make sure election can be created
os.Setenv("POD_NAME", "foo1")
os.Setenv("POD_NAMESPACE", apiv1.NamespaceDefault)
c := Config{
Client: buildSimpleClientSet(),
PublishService: "",
IngressLister: buildIngressListener(),
DefaultIngressClass: "nginx",
IngressClass: "",
UpdateStatusOnShutdown: true,
CustomIngressStatus: func(*extensions.Ingress) []apiv1.LoadBalancerIngress {
return nil
},
}
// create object
fkSync := NewStatusSyncer(c)
if fkSync == nil {
t.Fatalf("expected a valid Sync")
}
fk := fkSync.(statusSync)
ns := make(chan struct{})
// start it and wait for the election and syn actions
go fk.Run(ns)
// wait for the election
time.Sleep(100 * time.Millisecond)
// execute sync
fk.sync("just-test")
// PublishService is empty, so the running address is: ["11.0.0.2"]
// after updated, the ingress's ip should only be "11.0.0.2"
newIPs := []apiv1.LoadBalancerIngress{{
IP: "11.0.0.2",
}}
fooIngress1, err1 := fk.Client.Extensions().Ingresses(apiv1.NamespaceDefault).Get("foo_ingress_1", metav1.GetOptions{})
if err1 != nil {
t.Fatalf("unexpected error")
}
fooIngress1CurIPs := fooIngress1.Status.LoadBalancer.Ingress
if !ingressSliceEqual(fooIngress1CurIPs, newIPs) {
t.Fatalf("returned %v but expected %v", fooIngress1CurIPs, newIPs)
}
// execute shutdown
fk.Shutdown()
// ingress should be empty
newIPs2 := []apiv1.LoadBalancerIngress{}
fooIngress2, err2 := fk.Client.Extensions().Ingresses(apiv1.NamespaceDefault).Get("foo_ingress_1", metav1.GetOptions{})
if err2 != nil {
t.Fatalf("unexpected error")
}
fooIngress2CurIPs := fooIngress2.Status.LoadBalancer.Ingress
if !ingressSliceEqual(fooIngress2CurIPs, newIPs2) {
t.Fatalf("returned %v but expected %v", fooIngress2CurIPs, newIPs2)
}
oic, err := fk.Client.Extensions().Ingresses(api.NamespaceDefault).Get("foo_ingress_different_class", metav1.GetOptions{})
if err != nil {
t.Fatalf("unexpected error")
}
if oic.Status.LoadBalancer.Ingress[0].IP != "0.0.0.0" && oic.Status.LoadBalancer.Ingress[0].Hostname != "foo.bar.com" {
t.Fatalf("invalid ingress status for rule with different class")
}
// end test
ns <- struct{}{}
}
func TestCallback(t *testing.T) {
buildStatusSync()
}
func TestKeyfunc(t *testing.T) {
fk := buildStatusSync()
i := "foo_base_pod"
r, err := fk.keyfunc(i)
if err != nil {
t.Fatalf("unexpected error")
}
if r != i {
t.Errorf("returned %v but expected %v", r, i)
}
}
func TestRunningAddresessWithPublishService(t *testing.T) {
fk := buildStatusSync()
r, _ := fk.runningAddresses()
if r == nil {
t.Fatalf("returned nil but expected valid []string")
}
rl := len(r)
if len(r) != 4 {
t.Errorf("returned %v but expected %v", rl, 4)
}
}
func TestRunningAddresessWithPods(t *testing.T) {
fk := buildStatusSync()
fk.PublishService = ""
r, _ := fk.runningAddresses()
if r == nil {
t.Fatalf("returned nil but expected valid []string")
}
rl := len(r)
if len(r) != 1 {
t.Fatalf("returned %v but expected %v", rl, 1)
}
rv := r[0]
if rv != "11.0.0.2" {
t.Errorf("returned %v but expected %v", rv, "11.0.0.2")
}
}
/*
TODO: this test requires a refactoring
func TestUpdateStatus(t *testing.T) {
fk := buildStatusSync()
newIPs := buildLoadBalancerIngressByIP()
fk.updateStatus(newIPs)
fooIngress1, err1 := fk.Client.Extensions().Ingresses(apiv1.NamespaceDefault).Get("foo_ingress_1", metav1.GetOptions{})
if err1 != nil {
t.Fatalf("unexpected error")
}
fooIngress1CurIPs := fooIngress1.Status.LoadBalancer.Ingress
if !ingressSliceEqual(fooIngress1CurIPs, newIPs) {
t.Fatalf("returned %v but expected %v", fooIngress1CurIPs, newIPs)
}
fooIngress2, err2 := fk.Client.Extensions().Ingresses(apiv1.NamespaceDefault).Get("foo_ingress_2", metav1.GetOptions{})
if err2 != nil {
t.Fatalf("unexpected error")
}
fooIngress2CurIPs := fooIngress2.Status.LoadBalancer.Ingress
if !ingressSliceEqual(fooIngress2CurIPs, []apiv1.LoadBalancerIngress{}) {
t.Fatalf("returned %v but expected %v", fooIngress2CurIPs, []apiv1.LoadBalancerIngress{})
}
}
*/
func TestSliceToStatus(t *testing.T) {
fkEndpoints := []string{
"10.0.0.1",
"2001:db8::68",
"opensource-k8s-ingress",
}
r := sliceToStatus(fkEndpoints)
if r == nil {
t.Fatalf("returned nil but expected a valid []apiv1.LoadBalancerIngress")
}
rl := len(r)
if rl != 3 {
t.Fatalf("returned %v but expected %v", rl, 3)
}
re1 := r[0]
if re1.Hostname != "opensource-k8s-ingress" {
t.Fatalf("returned %v but expected %v", re1, apiv1.LoadBalancerIngress{Hostname: "opensource-k8s-ingress"})
}
re2 := r[1]
if re2.IP != "10.0.0.1" {
t.Fatalf("returned %v but expected %v", re2, apiv1.LoadBalancerIngress{IP: "10.0.0.1"})
}
re3 := r[2]
if re3.IP != "2001:db8::68" {
t.Fatalf("returned %v but expected %v", re3, apiv1.LoadBalancerIngress{IP: "2001:db8::68"})
}
}
func TestIngressSliceEqual(t *testing.T) {
fk1 := buildLoadBalancerIngressByIP()
fk2 := append(buildLoadBalancerIngressByIP(), apiv1.LoadBalancerIngress{
IP: "10.0.0.5",
Hostname: "foo5",
})
fk3 := buildLoadBalancerIngressByIP()
fk3[0].Hostname = "foo_no_01"
fk4 := buildLoadBalancerIngressByIP()
fk4[2].IP = "11.0.0.3"
fooTests := []struct {
lhs []apiv1.LoadBalancerIngress
rhs []apiv1.LoadBalancerIngress
er bool
}{
{fk1, fk1, true},
{fk2, fk1, false},
{fk3, fk1, false},
{fk4, fk1, false},
{fk1, nil, false},
{nil, nil, true},
{[]apiv1.LoadBalancerIngress{}, []apiv1.LoadBalancerIngress{}, true},
}
for _, fooTest := range fooTests {
r := ingressSliceEqual(fooTest.lhs, fooTest.rhs)
if r != fooTest.er {
t.Errorf("returned %v but expected %v", r, fooTest.er)
}
}
}