feat: support topology aware hints (#9165)
* support topology aware hints Signed-off-by: tombokombo <tombo@sysart.tech> * add flag to enable topology and fixes Signed-off-by: tombokombo <tombo@sysart.tech> * update readme Signed-off-by: tombokombo <tombo@sysart.tech> * add e2e test Signed-off-by: tombokombo <tombo@sysart.tech> * isolate topology test Signed-off-by: tombokombo <tombo@sysart.tech> * gofmt fix Signed-off-by: tombokombo <tombo@sysart.tech> Signed-off-by: tombokombo <tombo@sysart.tech>
This commit is contained in:
parent
ada114315e
commit
5b2a9475dc
14 changed files with 564 additions and 18 deletions
|
|
@ -41,12 +41,22 @@ reportFileNamePrefix="report-e2e-test-suite"
|
|||
echo -e "${BGREEN}Running e2e test suite (FOCUS=${FOCUS})...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="${FOCUS}" \
|
||||
-skip="\[Serial\]|\[MemoryLeak\]" \
|
||||
-skip="\[Serial\]|\[MemoryLeak\]|\[TopologyHints\]" \
|
||||
-nodes="${E2E_NODES}" \
|
||||
--junit-report=$reportFileNamePrefix.xml \
|
||||
/e2e.test
|
||||
# Create configMap out of a compressed report file for extraction later
|
||||
|
||||
# Must be isolated, there is a collision if multiple helms tries to install same clusterRole at same time
|
||||
echo -e "${BGREEN}Running e2e test for topology aware hints...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[TopologyHints\]" \
|
||||
-skip="\[Serial\]|\[MemoryLeak\]]" \
|
||||
-nodes="${E2E_NODES}" \
|
||||
--junit-report=$reportFileNamePrefix-topology.xml \
|
||||
/e2e.test
|
||||
# Create configMap out of a compressed report file for extraction later
|
||||
|
||||
echo -e "${BGREEN}Running e2e test suite with tests that require serial execution...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[Serial\]" \
|
||||
|
|
|
|||
51
test/e2e-image/namespace-overlays/topology/values.yaml
Normal file
51
test/e2e-image/namespace-overlays/topology/values.yaml
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
# TODO: remove the need to use fullnameOverride
|
||||
fullnameOverride: nginx-ingress
|
||||
controller:
|
||||
image:
|
||||
repository: ingress-controller/controller
|
||||
chroot: true
|
||||
tag: 1.0.0-dev
|
||||
digest:
|
||||
digestChroot:
|
||||
scope:
|
||||
enabled: false
|
||||
config:
|
||||
worker-processes: "1"
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 1
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 1
|
||||
service:
|
||||
type: NodePort
|
||||
electionID: ingress-controller-leader
|
||||
ingressClassResource:
|
||||
# We will create and remove each IC/ClusterRole/ClusterRoleBinding per test so there's no conflict
|
||||
enabled: false
|
||||
extraArgs:
|
||||
tcp-services-configmap: $NAMESPACE/tcp-services
|
||||
# e2e tests do not require information about ingress status
|
||||
update-status: "false"
|
||||
terminationGracePeriodSeconds: 1
|
||||
admissionWebhooks:
|
||||
enabled: false
|
||||
|
||||
enableTopologyAwareRouting: true
|
||||
|
||||
# ulimit -c unlimited
|
||||
# mkdir -p /tmp/coredump
|
||||
# chmod a+rwx /tmp/coredump
|
||||
# echo "/tmp/coredump/core.%e.%p.%h.%t" > /proc/sys/kernel/core_pattern
|
||||
extraVolumeMounts:
|
||||
- name: coredump
|
||||
mountPath: /tmp/coredump
|
||||
|
||||
extraVolumes:
|
||||
- name: coredump
|
||||
hostPath:
|
||||
path: /tmp/coredump
|
||||
|
||||
rbac:
|
||||
create: true
|
||||
scope: false
|
||||
112
test/e2e/endpointslices/topology.go
Normal file
112
test/e2e/endpointslices/topology.go
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpointslices
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/ingress-nginx/internal/nginx"
|
||||
"k8s.io/ingress-nginx/test/e2e/framework"
|
||||
)
|
||||
|
||||
var _ = framework.IngressNginxDescribe("[TopologyHints] topology aware routing", func() {
|
||||
f := framework.NewDefaultFramework("topology")
|
||||
host := "topology-svc.foo.com"
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
f.NewEchoDeployment(framework.WithDeploymentReplicas(2), framework.WithSvcTopologyAnnotations())
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
// we need to uninstall chart because of clusterRole which is not destroyed with namespace
|
||||
err := uninstallChart(f)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "uninstalling helm chart")
|
||||
})
|
||||
|
||||
ginkgo.It("should return 200 when service has topology hints", func() {
|
||||
|
||||
annotations := make(map[string]string)
|
||||
ing := framework.NewSingleIngress(host, "/", host, f.Namespace, framework.EchoService, 80, annotations)
|
||||
f.EnsureIngress(ing)
|
||||
|
||||
f.WaitForNginxServer(host, func(server string) bool {
|
||||
return strings.Contains(server, fmt.Sprintf("server_name %s", host))
|
||||
})
|
||||
|
||||
ginkgo.By("checking if the service is reached")
|
||||
f.HTTPTestClient().
|
||||
GET("/").
|
||||
WithHeader("Host", host).
|
||||
Expect().
|
||||
Status(http.StatusOK)
|
||||
|
||||
slices, err := f.KubeClientSet.DiscoveryV1().EndpointSlices(f.Namespace).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: "kubernetes.io/service-name=echo",
|
||||
Limit: 1,
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
// check if we have hints, really depends on k8s endpoint slice controller
|
||||
gotHints := true
|
||||
for _, ep := range slices.Items[0].Endpoints {
|
||||
if ep.Hints == nil || len(ep.Hints.ForZones) == 0 {
|
||||
gotHints = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
curlCmd := fmt.Sprintf("curl --fail --silent http://localhost:%v/configuration/backends", nginx.StatusPort)
|
||||
status, err := f.ExecIngressPod(curlCmd)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
var backends []map[string]interface{}
|
||||
json.Unmarshal([]byte(status), &backends)
|
||||
gotBackends := 0
|
||||
for _, bck := range backends {
|
||||
if strings.Contains(bck["name"].(string), "topology") {
|
||||
gotBackends = len(bck["endpoints"].([]interface{}))
|
||||
}
|
||||
}
|
||||
|
||||
if gotHints {
|
||||
//we have 2 replics, if there is just one backend it means that we are routing according slices hints to same zone as controller is
|
||||
assert.Equal(ginkgo.GinkgoT(), 1, gotBackends)
|
||||
} else {
|
||||
// two replicas should have two endpoints without topology hints
|
||||
assert.Equal(ginkgo.GinkgoT(), 2, gotBackends)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
func uninstallChart(f *framework.Framework) error {
|
||||
cmd := exec.Command("helm", "uninstall", "--namespace", f.Namespace, "nginx-ingress")
|
||||
_, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error uninstalling ingress-nginx release: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -40,10 +40,10 @@ const SlowEchoService = "slow-echo"
|
|||
const HTTPBinService = "httpbin"
|
||||
|
||||
type deploymentOptions struct {
|
||||
namespace string
|
||||
name string
|
||||
replicas int
|
||||
image string
|
||||
namespace string
|
||||
name string
|
||||
replicas int
|
||||
svcAnnotations map[string]string
|
||||
}
|
||||
|
||||
// WithDeploymentNamespace allows configuring the deployment's namespace
|
||||
|
|
@ -53,6 +53,15 @@ func WithDeploymentNamespace(n string) func(*deploymentOptions) {
|
|||
}
|
||||
}
|
||||
|
||||
// WithSvcTopologyAnnotations create svc with topology aware hints sets to auto
|
||||
func WithSvcTopologyAnnotations() func(*deploymentOptions) {
|
||||
return func(o *deploymentOptions) {
|
||||
o.svcAnnotations = map[string]string{
|
||||
"service.kubernetes.io/topology-aware-hints": "auto",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithDeploymentName allows configuring the deployment's names
|
||||
func WithDeploymentName(n string) func(*deploymentOptions) {
|
||||
return func(o *deploymentOptions) {
|
||||
|
|
@ -95,8 +104,9 @@ func (f *Framework) NewEchoDeployment(opts ...func(*deploymentOptions)) {
|
|||
|
||||
service := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: options.name,
|
||||
Namespace: options.namespace,
|
||||
Name: options.name,
|
||||
Namespace: options.namespace,
|
||||
Annotations: options.svcAnnotations,
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Ports: []corev1.ServicePort{
|
||||
|
|
|
|||
|
|
@ -2,8 +2,14 @@ kind: Cluster
|
|||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
labels:
|
||||
topology.kubernetes.io/zone: zone-1
|
||||
- role: worker
|
||||
labels:
|
||||
topology.kubernetes.io/zone: zone-1
|
||||
- role: worker
|
||||
labels:
|
||||
topology.kubernetes.io/zone: zone-2
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: ClusterConfiguration
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue