Update gce controller
This commit is contained in:
parent
ea7f943160
commit
c7c2a564a9
26 changed files with 275 additions and 236 deletions
|
|
@ -22,16 +22,17 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/ingress/controllers/gce/instances"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
|||
|
|
@ -23,20 +23,21 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
unversionedcore "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
listers "k8s.io/client-go/listers/core/v1"
|
||||
base_api "k8s.io/client-go/pkg/api"
|
||||
api "k8s.io/client-go/pkg/api/v1"
|
||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -57,16 +58,16 @@ var (
|
|||
// LoadBalancerController watches the kubernetes api and adds/removes services
|
||||
// from the loadbalancer, via loadBalancerConfig.
|
||||
type LoadBalancerController struct {
|
||||
client client.Interface
|
||||
ingController *cache.Controller
|
||||
nodeController *cache.Controller
|
||||
svcController *cache.Controller
|
||||
podController *cache.Controller
|
||||
client kubernetes.Interface
|
||||
ingController cache.Controller
|
||||
nodeController cache.Controller
|
||||
svcController cache.Controller
|
||||
podController cache.Controller
|
||||
ingLister StoreToIngressLister
|
||||
nodeLister cache.StoreToNodeLister
|
||||
svcLister cache.StoreToServiceLister
|
||||
nodeLister StoreToNodeLister
|
||||
svcLister StoreToServiceLister
|
||||
// Health checks are the readiness probes of containers on pods.
|
||||
podLister cache.StoreToPodLister
|
||||
podLister StoreToPodLister
|
||||
// TODO: Watch secrets
|
||||
CloudClusterManager *ClusterManager
|
||||
recorder record.EventRecorder
|
||||
|
|
@ -91,7 +92,7 @@ type LoadBalancerController struct {
|
|||
// - clusterManager: A ClusterManager capable of creating all cloud resources
|
||||
// required for L7 loadbalancing.
|
||||
// - resyncPeriod: Watchers relist from the Kubernetes API server this often.
|
||||
func NewLoadBalancerController(kubeClient client.Interface, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*LoadBalancerController, error) {
|
||||
func NewLoadBalancerController(kubeClient kubernetes.Interface, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*LoadBalancerController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{
|
||||
|
|
@ -101,7 +102,7 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus
|
|||
client: kubeClient,
|
||||
CloudClusterManager: clusterManager,
|
||||
stopCh: make(chan struct{}),
|
||||
recorder: eventBroadcaster.NewRecorder(
|
||||
recorder: eventBroadcaster.NewRecorder(base_api.Scheme,
|
||||
api.EventSource{Component: "loadbalancer-controller"}),
|
||||
}
|
||||
lbc.nodeQueue = NewTaskQueue(lbc.syncNodes)
|
||||
|
|
@ -140,10 +141,7 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus
|
|||
},
|
||||
}
|
||||
lbc.ingLister.Store, lbc.ingController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: ingressListFunc(lbc.client, namespace),
|
||||
WatchFunc: ingressWatchFunc(lbc.client, namespace),
|
||||
},
|
||||
cache.NewListWatchFromClient(lbc.client.Extensions().RESTClient(), "ingresses", namespace, fields.Everything()),
|
||||
&extensions.Ingress{}, resyncPeriod, pathHandlers)
|
||||
|
||||
// Service watch handlers
|
||||
|
|
@ -173,30 +171,14 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus
|
|||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
nodeHandlers := cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: lbc.nodeQueue.enqueue,
|
||||
DeleteFunc: lbc.nodeQueue.enqueue,
|
||||
// Nodes are updated every 10s and we don't care, so no update handler.
|
||||
}
|
||||
// Node watch handlers
|
||||
lbc.nodeLister.Store, lbc.nodeController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(opts api.ListOptions) (runtime.Object, error) {
|
||||
return lbc.client.Core().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
FieldsSelectorParam(fields.Everything()).
|
||||
Do().
|
||||
Get()
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return lbc.client.Core().RESTClient().Get().
|
||||
Prefix("watch").
|
||||
Resource("nodes").
|
||||
FieldsSelectorParam(fields.Everything()).
|
||||
Param("resourceVersion", options.ResourceVersion).Watch()
|
||||
},
|
||||
},
|
||||
&api.Node{}, 0, nodeHandlers)
|
||||
lbc.nodeLister.Indexer, lbc.nodeController = cache.NewIndexerInformer(
|
||||
cache.NewListWatchFromClient(lbc.client.Core().RESTClient(), "nodes", api.NamespaceAll, fields.Everything()),
|
||||
&api.Node{},
|
||||
resyncPeriod,
|
||||
cache.ResourceEventHandlerFuncs{},
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
|
||||
lbc.tr = &GCETranslator{&lbc}
|
||||
lbc.tlsLoader = &apiServerTLSLoader{client: lbc.client}
|
||||
|
|
@ -205,18 +187,6 @@ func NewLoadBalancerController(kubeClient client.Interface, clusterManager *Clus
|
|||
return &lbc, nil
|
||||
}
|
||||
|
||||
func ingressListFunc(c client.Interface, ns string) func(api.ListOptions) (runtime.Object, error) {
|
||||
return func(opts api.ListOptions) (runtime.Object, error) {
|
||||
return c.Extensions().Ingresses(ns).List(opts)
|
||||
}
|
||||
}
|
||||
|
||||
func ingressWatchFunc(c client.Interface, ns string) func(options api.ListOptions) (watch.Interface, error) {
|
||||
return func(options api.ListOptions) (watch.Interface, error) {
|
||||
return c.Extensions().Ingresses(ns).Watch(options)
|
||||
}
|
||||
}
|
||||
|
||||
// enqueueIngressForService enqueues all the Ingress' for a Service.
|
||||
func (lbc *LoadBalancerController) enqueueIngressForService(obj interface{}) {
|
||||
svc := obj.(*api.Service)
|
||||
|
|
@ -377,7 +347,7 @@ func (lbc *LoadBalancerController) updateIngressStatus(l7 *loadbalancers.L7, ing
|
|||
|
||||
// Update IP through update/status endpoint
|
||||
ip := l7.GetIP()
|
||||
currIng, err := ingClient.Get(ing.Name)
|
||||
currIng, err := ingClient.Get(ing.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -401,7 +371,7 @@ func (lbc *LoadBalancerController) updateIngressStatus(l7 *loadbalancers.L7, ing
|
|||
}
|
||||
}
|
||||
// Update annotations through /update endpoint
|
||||
currIng, err = ingClient.Get(ing.Name)
|
||||
currIng, err = ingClient.Get(ing.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -464,7 +434,7 @@ func (lbc *LoadBalancerController) syncNodes(key string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func getNodeReadyPredicate() cache.NodeConditionPredicate {
|
||||
func getNodeReadyPredicate() listers.NodeConditionPredicate {
|
||||
return func(node *api.Node) bool {
|
||||
for ix := range node.Status.Conditions {
|
||||
condition := &node.Status.Conditions[ix]
|
||||
|
|
@ -479,7 +449,7 @@ func getNodeReadyPredicate() cache.NodeConditionPredicate {
|
|||
// getReadyNodeNames returns names of schedulable, ready nodes from the node lister.
|
||||
func (lbc *LoadBalancerController) getReadyNodeNames() ([]string, error) {
|
||||
nodeNames := []string{}
|
||||
nodes, err := lbc.nodeLister.NodeCondition(getNodeReadyPredicate()).List()
|
||||
nodes, err := listers.NewNodeLister(lbc.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate())
|
||||
if err != nil {
|
||||
return nodeNames, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,18 +24,18 @@ import (
|
|||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/pkg/api"
|
||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
)
|
||||
|
||||
const testClusterName = "testcluster"
|
||||
|
|
@ -51,9 +51,9 @@ func defaultBackendName(clusterName string) string {
|
|||
}
|
||||
|
||||
// newLoadBalancerController create a loadbalancer controller.
|
||||
func newLoadBalancerController(t *testing.T, cm *fakeClusterManager, masterURL string) *LoadBalancerController {
|
||||
client := client.NewForConfigOrDie(&restclient.Config{Host: masterURL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
lb, err := NewLoadBalancerController(client, cm.ClusterManager, 1*time.Second, api.NamespaceAll)
|
||||
func newLoadBalancerController(t *testing.T, cm *fakeClusterManager) *LoadBalancerController {
|
||||
kubeClient := fake.NewSimpleClientset()
|
||||
lb, err := NewLoadBalancerController(kubeClient, cm.ClusterManager, 1*time.Second, api_v1.NamespaceAll)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
|
@ -95,7 +95,7 @@ func toIngressRules(hostRules map[string]utils.FakeIngressRuleValueMap) []extens
|
|||
// newIngress returns a new Ingress with the given path map.
|
||||
func newIngress(hostRules map[string]utils.FakeIngressRuleValueMap) *extensions.Ingress {
|
||||
return &extensions.Ingress{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v", uuid.NewUUID()),
|
||||
Namespace: api.NamespaceNone,
|
||||
},
|
||||
|
|
@ -107,8 +107,8 @@ func newIngress(hostRules map[string]utils.FakeIngressRuleValueMap) *extensions.
|
|||
Rules: toIngressRules(hostRules),
|
||||
},
|
||||
Status: extensions.IngressStatus{
|
||||
LoadBalancer: api.LoadBalancerStatus{
|
||||
Ingress: []api.LoadBalancerIngress{
|
||||
LoadBalancer: api_v1.LoadBalancerStatus{
|
||||
Ingress: []api_v1.LoadBalancerIngress{
|
||||
{IP: testIPManager.ip()},
|
||||
},
|
||||
},
|
||||
|
|
@ -178,21 +178,21 @@ func addIngress(lbc *LoadBalancerController, ing *extensions.Ingress, pm *nodePo
|
|||
}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
for _, path := range rule.HTTP.Paths {
|
||||
svc := &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
svc := &api_v1.Service{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: path.Backend.ServiceName,
|
||||
Namespace: ing.Namespace,
|
||||
},
|
||||
}
|
||||
var svcPort api.ServicePort
|
||||
var svcPort api_v1.ServicePort
|
||||
switch path.Backend.ServicePort.Type {
|
||||
case intstr.Int:
|
||||
svcPort = api.ServicePort{Port: path.Backend.ServicePort.IntVal}
|
||||
svcPort = api_v1.ServicePort{Port: path.Backend.ServicePort.IntVal}
|
||||
default:
|
||||
svcPort = api.ServicePort{Name: path.Backend.ServicePort.StrVal}
|
||||
svcPort = api_v1.ServicePort{Name: path.Backend.ServicePort.StrVal}
|
||||
}
|
||||
svcPort.NodePort = int32(pm.getNodePort(path.Backend.ServiceName))
|
||||
svc.Spec.Ports = []api.ServicePort{svcPort}
|
||||
svc.Spec.Ports = []api_v1.ServicePort{svcPort}
|
||||
lbc.svcLister.Indexer.Add(svc)
|
||||
}
|
||||
}
|
||||
|
|
@ -201,7 +201,7 @@ func addIngress(lbc *LoadBalancerController, ing *extensions.Ingress, pm *nodePo
|
|||
func TestLbCreateDelete(t *testing.T) {
|
||||
testFirewallName := "quux"
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, testFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
inputMap1 := map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
|
|
@ -293,7 +293,7 @@ func TestLbCreateDelete(t *testing.T) {
|
|||
|
||||
func TestLbFaultyUpdate(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
|
|
@ -330,7 +330,7 @@ func TestLbFaultyUpdate(t *testing.T) {
|
|||
|
||||
func TestLbDefaulting(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
// Make sure the controller plugs in the default values accepted by GCE.
|
||||
ing := newIngress(map[string]utils.FakeIngressRuleValueMap{"": {"": "foo1svc"}})
|
||||
pm := newPortManager(1, 65536)
|
||||
|
|
@ -348,7 +348,7 @@ func TestLbDefaulting(t *testing.T) {
|
|||
|
||||
func TestLbNoService(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
|
|
@ -373,8 +373,8 @@ func TestLbNoService(t *testing.T) {
|
|||
// Creates the service, next sync should have complete url map.
|
||||
pm := newPortManager(1, 65536)
|
||||
addIngress(lbc, ing, pm)
|
||||
lbc.enqueueIngressForService(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
lbc.enqueueIngressForService(&api_v1.Service{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: "foo1svc",
|
||||
Namespace: ing.Namespace,
|
||||
},
|
||||
|
|
@ -392,7 +392,7 @@ func TestLbNoService(t *testing.T) {
|
|||
|
||||
func TestLbChangeStaticIP(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
|
|
|
|||
|
|
@ -17,10 +17,10 @@ limitations under the License.
|
|||
package controller
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/backends"
|
||||
"k8s.io/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/ingress/controllers/gce/healthchecks"
|
||||
|
|
|
|||
|
|
@ -19,13 +19,14 @@ package controller
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/client-go/pkg/api/v1"
|
||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
)
|
||||
|
||||
// secretLoaders returns a type containing all the secrets of an Ingress.
|
||||
|
|
@ -44,7 +45,7 @@ func (n *noOPValidator) validate(certs *loadbalancers.TLSCerts) error {
|
|||
// apiServerTLSLoader loads TLS certs from the apiserver.
|
||||
type apiServerTLSLoader struct {
|
||||
noOPValidator
|
||||
client client.Interface
|
||||
client kubernetes.Interface
|
||||
}
|
||||
|
||||
func (t *apiServerTLSLoader) load(ing *extensions.Ingress) (*loadbalancers.TLSCerts, error) {
|
||||
|
|
@ -59,7 +60,7 @@ func (t *apiServerTLSLoader) load(ing *extensions.Ingress) (*loadbalancers.TLSCe
|
|||
secretName := ing.Spec.TLS[0].SecretName
|
||||
// TODO: Replace this for a secret watcher.
|
||||
glog.V(3).Infof("Retrieving secret for ing %v with name %v", ing.Name, secretName)
|
||||
secret, err := t.client.Core().Secrets(ing.Namespace).Get(secretName)
|
||||
secret, err := t.client.Core().Secrets(ing.Namespace).Get(secretName, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,10 +21,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
api_v1 "k8s.io/client-go/pkg/api/v1"
|
||||
)
|
||||
|
||||
// Pods created in loops start from this time, for routines that
|
||||
|
|
@ -33,7 +33,7 @@ var firstPodCreationTime = time.Date(2006, 01, 02, 15, 04, 05, 0, time.UTC)
|
|||
|
||||
func TestZoneListing(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
zoneToNode := map[string][]string{
|
||||
"zone-1": {"n1"},
|
||||
"zone-2": {"n2"},
|
||||
|
|
@ -58,7 +58,7 @@ func TestZoneListing(t *testing.T) {
|
|||
|
||||
func TestInstancesAddedToZones(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
zoneToNode := map[string][]string{
|
||||
"zone-1": {"n1", "n2"},
|
||||
"zone-2": {"n3"},
|
||||
|
|
@ -93,12 +93,12 @@ func TestInstancesAddedToZones(t *testing.T) {
|
|||
|
||||
func TestProbeGetter(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
nodePortToHealthCheck := map[int64]string{
|
||||
3001: "/healthz",
|
||||
3002: "/foo",
|
||||
}
|
||||
addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault)
|
||||
addPods(lbc, nodePortToHealthCheck, api_v1.NamespaceDefault)
|
||||
for p, exp := range nodePortToHealthCheck {
|
||||
got, err := lbc.tr.HealthCheck(p)
|
||||
if err != nil {
|
||||
|
|
@ -111,13 +111,13 @@ func TestProbeGetter(t *testing.T) {
|
|||
|
||||
func TestProbeGetterNamedPort(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
nodePortToHealthCheck := map[int64]string{
|
||||
3001: "/healthz",
|
||||
}
|
||||
addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault)
|
||||
addPods(lbc, nodePortToHealthCheck, api_v1.NamespaceDefault)
|
||||
for _, p := range lbc.podLister.Indexer.List() {
|
||||
pod := p.(*api.Pod)
|
||||
pod := p.(*api_v1.Pod)
|
||||
pod.Spec.Containers[0].Ports[0].Name = "test"
|
||||
pod.Spec.Containers[0].ReadinessProbe.Handler.HTTPGet.Port = intstr.IntOrString{Type: intstr.String, StrVal: "test"}
|
||||
}
|
||||
|
|
@ -134,26 +134,26 @@ func TestProbeGetterNamedPort(t *testing.T) {
|
|||
|
||||
func TestProbeGetterCrossNamespace(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID, DefaultFirewallName)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
lbc := newLoadBalancerController(t, cm)
|
||||
|
||||
firstPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
firstPod := &api_v1.Pod{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
// labels match those added by "addPods", but ns and health check
|
||||
// path is different. If this pod was created in the same ns, it
|
||||
// would become the health check.
|
||||
Labels: map[string]string{"app-3001": "test"},
|
||||
Name: fmt.Sprintf("test-pod-new-ns"),
|
||||
Namespace: "new-ns",
|
||||
CreationTimestamp: unversioned.NewTime(firstPodCreationTime.Add(-time.Duration(time.Hour))),
|
||||
CreationTimestamp: meta_v1.NewTime(firstPodCreationTime.Add(-time.Duration(time.Hour))),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: api_v1.PodSpec{
|
||||
Containers: []api_v1.Container{
|
||||
{
|
||||
Ports: []api.ContainerPort{{ContainerPort: 80}},
|
||||
ReadinessProbe: &api.Probe{
|
||||
Handler: api.Handler{
|
||||
HTTPGet: &api.HTTPGetAction{
|
||||
Scheme: api.URISchemeHTTP,
|
||||
Ports: []api_v1.ContainerPort{{ContainerPort: 80}},
|
||||
ReadinessProbe: &api_v1.Probe{
|
||||
Handler: api_v1.Handler{
|
||||
HTTPGet: &api_v1.HTTPGetAction{
|
||||
Scheme: api_v1.URISchemeHTTP,
|
||||
Path: "/badpath",
|
||||
Port: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
|
|
@ -170,7 +170,7 @@ func TestProbeGetterCrossNamespace(t *testing.T) {
|
|||
nodePortToHealthCheck := map[int64]string{
|
||||
3001: "/healthz",
|
||||
}
|
||||
addPods(lbc, nodePortToHealthCheck, api.NamespaceDefault)
|
||||
addPods(lbc, nodePortToHealthCheck, api_v1.NamespaceDefault)
|
||||
|
||||
for p, exp := range nodePortToHealthCheck {
|
||||
got, err := lbc.tr.HealthCheck(p)
|
||||
|
|
@ -186,10 +186,10 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
|
|||
delay := time.Minute
|
||||
for np, u := range nodePortToHealthCheck {
|
||||
l := map[string]string{fmt.Sprintf("app-%d", np): "test"}
|
||||
svc := &api.Service{
|
||||
Spec: api.ServiceSpec{
|
||||
svc := &api_v1.Service{
|
||||
Spec: api_v1.ServiceSpec{
|
||||
Selector: l,
|
||||
Ports: []api.ServicePort{
|
||||
Ports: []api_v1.ServicePort{
|
||||
{
|
||||
NodePort: int32(np),
|
||||
TargetPort: intstr.IntOrString{
|
||||
|
|
@ -204,21 +204,21 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
|
|||
svc.Namespace = ns
|
||||
lbc.svcLister.Indexer.Add(svc)
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod := &api_v1.Pod{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Labels: l,
|
||||
Name: fmt.Sprintf("%d", np),
|
||||
Namespace: ns,
|
||||
CreationTimestamp: unversioned.NewTime(firstPodCreationTime.Add(delay)),
|
||||
CreationTimestamp: meta_v1.NewTime(firstPodCreationTime.Add(delay)),
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: api_v1.PodSpec{
|
||||
Containers: []api_v1.Container{
|
||||
{
|
||||
Ports: []api.ContainerPort{{Name: "test", ContainerPort: 80}},
|
||||
ReadinessProbe: &api.Probe{
|
||||
Handler: api.Handler{
|
||||
HTTPGet: &api.HTTPGetAction{
|
||||
Scheme: api.URISchemeHTTP,
|
||||
Ports: []api_v1.ContainerPort{{Name: "test", ContainerPort: 80}},
|
||||
ReadinessProbe: &api_v1.Probe{
|
||||
Handler: api_v1.Handler{
|
||||
HTTPGet: &api_v1.HTTPGetAction{
|
||||
Scheme: api_v1.URISchemeHTTP,
|
||||
Path: u,
|
||||
Port: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
|
|
@ -239,20 +239,20 @@ func addPods(lbc *LoadBalancerController, nodePortToHealthCheck map[int64]string
|
|||
func addNodes(lbc *LoadBalancerController, zoneToNode map[string][]string) {
|
||||
for zone, nodes := range zoneToNode {
|
||||
for _, node := range nodes {
|
||||
n := &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
n := &api_v1.Node{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: node,
|
||||
Labels: map[string]string{
|
||||
zoneKey: zone,
|
||||
},
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeReady, Status: api.ConditionTrue},
|
||||
Status: api_v1.NodeStatus{
|
||||
Conditions: []api_v1.NodeCondition{
|
||||
{Type: api_v1.NodeReady, Status: api_v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
}
|
||||
lbc.nodeLister.Store.Add(n)
|
||||
lbc.nodeLister.Indexer.Add(n)
|
||||
}
|
||||
}
|
||||
lbc.CloudClusterManager.instancePool.Init(lbc.tr)
|
||||
|
|
|
|||
|
|
@ -22,19 +22,23 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
listers "k8s.io/client-go/listers/core/v1"
|
||||
api "k8s.io/client-go/pkg/api/v1"
|
||||
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
"k8s.io/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -202,6 +206,41 @@ type StoreToIngressLister struct {
|
|||
cache.Store
|
||||
}
|
||||
|
||||
// StoreToNodeLister makes a Store that lists Node.
|
||||
type StoreToNodeLister struct {
|
||||
cache.Indexer
|
||||
}
|
||||
|
||||
// StoreToServiceLister makes a Store that lists Service.
|
||||
type StoreToServiceLister struct {
|
||||
cache.Indexer
|
||||
}
|
||||
|
||||
// StoreToPodLister makes a Store that lists Pods.
|
||||
type StoreToPodLister struct {
|
||||
cache.Indexer
|
||||
}
|
||||
|
||||
func (s *StoreToPodLister) List(selector labels.Selector) (ret []*api.Pod, err error) {
|
||||
err = ListAll(s.Indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*api.Pod))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func ListAll(store cache.Store, selector labels.Selector, appendFn cache.AppendFunc) error {
|
||||
for _, m := range store.List() {
|
||||
metadata, err := meta.Accessor(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if selector.Matches(labels.Set(metadata.GetLabels())) {
|
||||
appendFn(m)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List lists all Ingress' in the store.
|
||||
func (s *StoreToIngressLister) List() (ing extensions.IngressList, err error) {
|
||||
for _, m := range s.Store.List() {
|
||||
|
|
@ -336,7 +375,7 @@ func (t *GCETranslator) toGCEBackend(be *extensions.IngressBackend, ns string) (
|
|||
func (t *GCETranslator) getServiceNodePort(be extensions.IngressBackend, namespace string) (int, error) {
|
||||
obj, exists, err := t.svcLister.Indexer.Get(
|
||||
&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: be.ServiceName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
|
|
@ -411,7 +450,7 @@ func getZone(n *api.Node) string {
|
|||
|
||||
// GetZoneForNode returns the zone for a given node by looking up its zone label.
|
||||
func (t *GCETranslator) GetZoneForNode(name string) (string, error) {
|
||||
nodes, err := t.nodeLister.NodeCondition(getNodeReadyPredicate()).List()
|
||||
nodes, err := listers.NewNodeLister(t.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
@ -428,7 +467,7 @@ func (t *GCETranslator) GetZoneForNode(name string) (string, error) {
|
|||
// ListZones returns a list of zones this Kubernetes cluster spans.
|
||||
func (t *GCETranslator) ListZones() ([]string, error) {
|
||||
zones := sets.String{}
|
||||
readyNodes, err := t.nodeLister.NodeCondition(getNodeReadyPredicate()).List()
|
||||
readyNodes, err := listers.NewNodeLister(t.nodeLister.Indexer).ListWithPredicate(getNodeReadyPredicate())
|
||||
if err != nil {
|
||||
return zones.List(), err
|
||||
}
|
||||
|
|
@ -502,14 +541,12 @@ func isSimpleHTTPProbe(probe *api.Probe) bool {
|
|||
// the request path, callers are responsible for swapping this out for the
|
||||
// appropriate default.
|
||||
func (t *GCETranslator) HealthCheck(port int64) (*compute.HttpHealthCheck, error) {
|
||||
sl, err := t.svcLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sl := t.svcLister.List()
|
||||
var ingresses []extensions.Ingress
|
||||
var healthCheck *compute.HttpHealthCheck
|
||||
// Find the label and target port of the one service with the given nodePort
|
||||
for _, s := range sl {
|
||||
for _, as := range sl {
|
||||
s := as.(*api.Service)
|
||||
for _, p := range s.Spec.Ports {
|
||||
|
||||
// only one Service can match this nodePort, try and look up
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue