Cluster UID store/retrieval

This commit is contained in:
Prashanth Balasubramanian 2016-03-14 17:33:12 -07:00
parent e93d8d8152
commit 24fb4b70aa
12 changed files with 263 additions and 20 deletions

View file

@ -66,7 +66,7 @@ const (
// ClusterManager manages cluster resource pools.
type ClusterManager struct {
ClusterNamer utils.Namer
ClusterNamer *utils.Namer
defaultBackendNodePort int64
instancePool instances.NodePool
backendPool backends.BackendPool
@ -227,12 +227,17 @@ func NewClusterManager(
// and continue.
cloud := getGCEClient()
cluster := ClusterManager{ClusterNamer: utils.Namer{name}}
// Names are fundamental to the cluster, the uid allocator makes sure names don't collide.
cluster := ClusterManager{ClusterNamer: &utils.Namer{name}}
zone, err := cloud.GetZone()
if err != nil {
return nil, err
}
// NodePool stores GCE vms that are in this Kubernetes cluster.
cluster.instancePool = instances.NewNodePool(cloud, zone.FailureDomain)
// BackendPool creates GCE BackendServices and associated health checks.
healthChecker := healthchecks.NewHealthChecker(cloud, defaultHealthCheckPath, cluster.ClusterNamer)
// TODO: This needs to change to a consolidated management of the default backend.
@ -242,6 +247,8 @@ func NewClusterManager(
defaultBackendPool := backends.NewBackendPool(
cloud, defaultBackendHealthChecker, cluster.instancePool, cluster.ClusterNamer, []int64{}, false)
cluster.defaultBackendNodePort = defaultBackendNodePort
// L7 pool creates targetHTTPProxy, ForwardingRules, UrlMaps, StaticIPs.
cluster.l7Pool = loadbalancers.NewLoadBalancerPool(
cloud, defaultBackendPool, defaultBackendNodePort, cluster.ClusterNamer)
cluster.firewallPool = firewalls.NewFirewallPool(cloud, cluster.ClusterNamer)

View file

@ -301,7 +301,7 @@ func (lbc *LoadBalancerController) sync(key string) {
} else if err := l7.UpdateUrlMap(urlMap); err != nil {
lbc.recorder.Eventf(&ing, api.EventTypeWarning, "UrlMap", err.Error())
syncError = fmt.Errorf("%v, update url map error: %v", syncError, err)
} else if lbc.updateIngressStatus(l7, ing); err != nil {
} else if err := lbc.updateIngressStatus(l7, ing); err != nil {
lbc.recorder.Eventf(&ing, api.EventTypeWarning, "Status", err.Error())
syncError = fmt.Errorf("%v, update ingress error: %v", syncError, err)
}

View file

@ -17,14 +17,15 @@ limitations under the License.
package controller
import (
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/contrib/ingress/controllers/gce/backends"
"k8s.io/contrib/ingress/controllers/gce/firewalls"
"k8s.io/contrib/ingress/controllers/gce/healthchecks"
"k8s.io/contrib/ingress/controllers/gce/instances"
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
"k8s.io/contrib/ingress/controllers/gce/utils"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/sets"
)
const (
@ -48,7 +49,7 @@ func NewFakeClusterManager(clusterName string) *fakeClusterManager {
fakeBackends := backends.NewFakeBackendServices()
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
fakeHCs := healthchecks.NewFakeHealthChecks()
namer := utils.Namer{clusterName}
namer := &utils.Namer{clusterName}
nodePool := instances.NewNodePool(fakeIGs, defaultZone)
healthChecker := healthchecks.NewHealthChecker(fakeHCs, "/", namer)
backendPool := backends.NewBackendPool(