Simpler firewall rules
This commit is contained in:
parent
4159a40da4
commit
8084341920
7 changed files with 298 additions and 1 deletions
|
|
@ -22,6 +22,7 @@ import (
|
|||
"time"
|
||||
|
||||
"k8s.io/contrib/ingress/controllers/gce/backends"
|
||||
"k8s.io/contrib/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/contrib/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/contrib/ingress/controllers/gce/instances"
|
||||
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
|
||||
|
|
@ -70,6 +71,7 @@ type ClusterManager struct {
|
|||
instancePool instances.NodePool
|
||||
backendPool backends.BackendPool
|
||||
l7Pool loadbalancers.LoadBalancerPool
|
||||
firewallPool firewalls.SingleFirewallPool
|
||||
}
|
||||
|
||||
// IsHealthy returns an error if the cluster manager is unhealthy.
|
||||
|
|
@ -92,6 +94,9 @@ func (c *ClusterManager) shutdown() error {
|
|||
if err := c.l7Pool.Shutdown(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.firewallPool.Shutdown(); err != nil {
|
||||
return err
|
||||
}
|
||||
// The backend pool will also delete instance groups.
|
||||
return c.backendPool.Shutdown()
|
||||
}
|
||||
|
|
@ -107,6 +112,17 @@ func (c *ClusterManager) shutdown() error {
|
|||
// If in performing the checkpoint the cluster manager runs out of quota, a
|
||||
// googleapi 403 is returned.
|
||||
func (c *ClusterManager) Checkpoint(lbs []*loadbalancers.L7RuntimeInfo, nodeNames []string, nodePorts []int64) error {
|
||||
// Multiple ingress paths can point to the same service (and hence nodePort)
|
||||
// but each nodePort can only have one set of cloud resources behind it. So
|
||||
// don't waste time double validating GCE BackendServices.
|
||||
portMap := map[int64]struct{}{}
|
||||
for _, p := range nodePorts {
|
||||
portMap[p] = struct{}{}
|
||||
}
|
||||
nodePorts = []int64{}
|
||||
for p, _ := range portMap {
|
||||
nodePorts = append(nodePorts, p)
|
||||
}
|
||||
if err := c.backendPool.Sync(nodePorts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -116,6 +132,21 @@ func (c *ClusterManager) Checkpoint(lbs []*loadbalancers.L7RuntimeInfo, nodeName
|
|||
if err := c.l7Pool.Sync(lbs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Manage default backend and its firewall rule in a centralized way.
|
||||
// DefaultBackend is managed in l7 pool, which doesn't understand instances,
|
||||
// which the firewall rule requires.
|
||||
fwNodePorts := nodePorts
|
||||
if len(fwNodePorts) != 0 {
|
||||
// If there are no Ingresses, we shouldn't be allowing traffic to the
|
||||
// default backend. Equally importantly if the cluster gets torn down
|
||||
// we shouldn't leak the firewall rule.
|
||||
fwNodePorts = append(fwNodePorts, c.defaultBackendNodePort)
|
||||
}
|
||||
if err := c.firewallPool.Sync(fwNodePorts, nodeNames); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -213,6 +244,6 @@ func NewClusterManager(
|
|||
cluster.defaultBackendNodePort = defaultBackendNodePort
|
||||
cluster.l7Pool = loadbalancers.NewLoadBalancerPool(
|
||||
cloud, defaultBackendPool, defaultBackendNodePort, cluster.ClusterNamer)
|
||||
|
||||
cluster.firewallPool = firewalls.NewFirewallPool(cloud, cluster.ClusterNamer)
|
||||
return &cluster, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ import (
|
|||
"time"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
|
@ -32,6 +33,7 @@ import (
|
|||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
const testClusterName = "testcluster"
|
||||
|
|
@ -234,11 +236,27 @@ func TestLbCreateDelete(t *testing.T) {
|
|||
// we shouldn't pull shared backends out from existing loadbalancers.
|
||||
unexpected := []int{pm.portMap["foo2svc"], pm.portMap["bar2svc"]}
|
||||
expected := []int{pm.portMap["foo1svc"], pm.portMap["bar1svc"]}
|
||||
firewallPorts := sets.NewString()
|
||||
firewallName := pm.namer.FrName(pm.namer.FrSuffix())
|
||||
|
||||
if firewallRule, err := cm.firewallPool.(*firewalls.FirewallRules).GetFirewall(firewallName); err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
} else {
|
||||
if len(firewallRule.Allowed) != 1 {
|
||||
t.Fatalf("Expected a single firewall rule")
|
||||
}
|
||||
for _, p := range firewallRule.Allowed[0].Ports {
|
||||
firewallPorts.Insert(p)
|
||||
}
|
||||
}
|
||||
|
||||
for _, port := range expected {
|
||||
if _, err := cm.backendPool.Get(int64(port)); err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
if !firewallPorts.Has(fmt.Sprintf("%v", port)) {
|
||||
t.Fatalf("Expected a firewall rule for port %v", port)
|
||||
}
|
||||
}
|
||||
for _, port := range unexpected {
|
||||
if be, err := cm.backendPool.Get(int64(port)); err == nil {
|
||||
|
|
@ -263,6 +281,9 @@ func TestLbCreateDelete(t *testing.T) {
|
|||
t.Fatalf("Found unexpected loadbalandcer %+v: %v", l7, err)
|
||||
}
|
||||
}
|
||||
if firewallRule, err := cm.firewallPool.(*firewalls.FirewallRules).GetFirewall(firewallName); err == nil {
|
||||
t.Fatalf("Found unexpected firewall rule %v", firewallRule)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLbFaultyUpdate(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ package controller
|
|||
|
||||
import (
|
||||
"k8s.io/contrib/ingress/controllers/gce/backends"
|
||||
"k8s.io/contrib/ingress/controllers/gce/firewalls"
|
||||
"k8s.io/contrib/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/contrib/ingress/controllers/gce/instances"
|
||||
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
|
||||
|
|
@ -60,11 +61,13 @@ func NewFakeClusterManager(clusterName string) *fakeClusterManager {
|
|||
testDefaultBeNodePort,
|
||||
namer,
|
||||
)
|
||||
frPool := firewalls.NewFirewallPool(firewalls.NewFakeFirewallRules(), namer)
|
||||
cm := &ClusterManager{
|
||||
ClusterNamer: namer,
|
||||
instancePool: nodePool,
|
||||
backendPool: backendPool,
|
||||
l7Pool: l7Pool,
|
||||
firewallPool: frPool,
|
||||
}
|
||||
return &fakeClusterManager{cm, fakeLbs, fakeBackends, fakeIGs}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue