Remove unused variables and verbose e2e logs
This commit is contained in:
parent
47b5e20a88
commit
10dcf0db15
35 changed files with 331 additions and 427 deletions
|
|
@ -1,24 +1,8 @@
|
|||
.PHONY: all
|
||||
all: image
|
||||
|
||||
DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
|
||||
|
||||
# Use docker to run makefile tasks
|
||||
USE_DOCKER ?= true
|
||||
|
||||
# Disable run docker tasks if running in prow.
|
||||
# only checks the existence of the variable, not the value.
|
||||
ifdef DIND_TASKS
|
||||
USE_DOCKER=false
|
||||
endif
|
||||
|
||||
.PHONY: image
|
||||
image:
|
||||
ifeq ($(USE_DOCKER), true)
|
||||
@$(DIR)/../../build/run-in-docker.sh make e2e-test-binary
|
||||
else
|
||||
@make -C $(DIR)/../../ e2e-test-binary
|
||||
endif
|
||||
make -C $(DIR)/../../ e2e-test-binary
|
||||
|
||||
cp $(DIR)/../e2e/e2e.test .
|
||||
cp $(DIR)/../e2e/wait-for-nginx.sh .
|
||||
|
|
@ -30,7 +14,9 @@ endif
|
|||
docker build \
|
||||
--tag nginx-ingress-controller:e2e .
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
rm -rf _cache e2e.test kubectl cluster ginkgo
|
||||
docker rmi -f nginx-ingress-controller:e2e || true
|
||||
|
||||
|
||||
.PHONY: image clean
|
||||
|
|
@ -19,48 +19,37 @@ set -e
|
|||
NC='\e[0m'
|
||||
BGREEN='\e[32m'
|
||||
|
||||
SLOW_E2E_THRESHOLD=${SLOW_E2E_THRESHOLD:-50}
|
||||
SLOW_E2E_THRESHOLD=${SLOW_E2E_THRESHOLD:-5}
|
||||
FOCUS=${FOCUS:-.*}
|
||||
E2E_NODES=${E2E_NODES:-5}
|
||||
E2E_CHECK_LEAKS=${E2E_CHECK_LEAKS:-""}
|
||||
|
||||
if [ ! -f "${HOME}/.kube/config" ]; then
|
||||
kubectl config set-cluster dev --certificate-authority=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt --embed-certs=true --server="https://kubernetes.default/"
|
||||
kubectl config set-credentials user --token="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)"
|
||||
kubectl config set-context default --cluster=dev --user=user
|
||||
kubectl config use-context default
|
||||
fi
|
||||
|
||||
ginkgo_args=(
|
||||
"-randomizeSuites"
|
||||
"-randomizeAllSpecs"
|
||||
"-flakeAttempts=2"
|
||||
"-p"
|
||||
"-trace"
|
||||
"-progress"
|
||||
"-slowSpecThreshold=${SLOW_E2E_THRESHOLD}"
|
||||
"-r"
|
||||
"-succinct"
|
||||
"-timeout=45m" # Suite timeout should be lower than Prow job timeout to avoid abrupt termination
|
||||
)
|
||||
|
||||
echo -e "${BGREEN}Running e2e test suite (FOCUS=${FOCUS})...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="${FOCUS}" \
|
||||
-skip="\[Serial\]|\[MemoryLeak\]" \
|
||||
-nodes="${E2E_NODES}" \
|
||||
-focus="${FOCUS}" \
|
||||
-skip="\[Serial\]|\[MemoryLeak\]" \
|
||||
-nodes="${E2E_NODES}" \
|
||||
/e2e.test
|
||||
|
||||
echo -e "${BGREEN}Running e2e test suite with tests that require serial execution...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[Serial\]" \
|
||||
-skip="\[MemoryLeak\]" \
|
||||
-nodes=1 \
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[Serial\]" \
|
||||
-skip="\[MemoryLeak\]" \
|
||||
/e2e.test
|
||||
|
||||
if [[ ${E2E_CHECK_LEAKS} != "" ]]; then
|
||||
echo -e "${BGREEN}Running e2e test suite with tests that check for memory leaks...${NC}"
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[MemoryLeak\]" \
|
||||
-skip="\[Serial\]" \
|
||||
-nodes=1 \
|
||||
ginkgo "${ginkgo_args[@]}" \
|
||||
-focus="\[MemoryLeak\]" \
|
||||
-skip="\[Serial\]" \
|
||||
/e2e.test
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ var _ = framework.DescribeAnnotation("affinity session-cookie-name", func() {
|
|||
|
||||
_, err := f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Update(context.TODO(), ing, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating ingress")
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.HTTPTestClient().
|
||||
GET("/").
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ import (
|
|||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
@ -104,7 +103,7 @@ var _ = framework.DescribeAnnotation("affinitymode", func() {
|
|||
replicas = replicas + 1
|
||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
time.Sleep(3 * time.Second)
|
||||
framework.Sleep()
|
||||
response = request.WithCookies(cookies).Expect()
|
||||
newHostName := getHostnameFromResponseBody(response.Body().Raw())
|
||||
assert.Equal(ginkgo.GinkgoT(), originalHostName, newHostName,
|
||||
|
|
@ -116,7 +115,7 @@ var _ = framework.DescribeAnnotation("affinitymode", func() {
|
|||
replicas = 0
|
||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
// validate, there is no backend to serve the request
|
||||
response = request.WithCookies(cookies).Expect().Status(http.StatusServiceUnavailable)
|
||||
|
|
@ -125,13 +124,13 @@ var _ = framework.DescribeAnnotation("affinitymode", func() {
|
|||
replicas = 2
|
||||
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
// wait brand new backends to spawn
|
||||
response = request.WithCookies(cookies).Expect()
|
||||
try := 0
|
||||
for (response.Raw().StatusCode == http.StatusServiceUnavailable) && (try < 30) {
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
response = request.WithCookies(cookies).Expect()
|
||||
try++
|
||||
}
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
@ -81,7 +80,7 @@ var _ = framework.DescribeAnnotation("from-to-www-redirect", func() {
|
|||
ing.Spec.TLS[0].SecretName,
|
||||
ing.Namespace)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.WaitForNginxServer(toHost,
|
||||
func(server string) bool {
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ var _ = framework.DescribeAnnotation("influxdb-*", func() {
|
|||
var measurements string
|
||||
var err error
|
||||
|
||||
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
err = wait.Poll(time.Second, time.Minute, func() (bool, error) {
|
||||
measurements, err = extractInfluxDBMeasurements(f)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ func startIngress(f *framework.Framework, annotations map[string]string) map[str
|
|||
return strings.Contains(server, fmt.Sprintf("server_name %s ;", host))
|
||||
})
|
||||
|
||||
err := wait.PollImmediate(framework.Poll, framework.DefaultTimeout, func() (bool, error) {
|
||||
err := wait.Poll(framework.Poll, framework.DefaultTimeout, func() (bool, error) {
|
||||
|
||||
resp := f.HTTPTestClient().
|
||||
GET("/").
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
@ -43,12 +42,12 @@ var _ = framework.IngressNginxDescribe("[Default Backend] custom service", func(
|
|||
args = append(args, fmt.Sprintf("--default-backend-service=%v/%v", f.Namespace, framework.EchoService))
|
||||
deployment.Spec.Template.Spec.Containers[0].Args = args
|
||||
_, err := f.KubeClientSet.AppsV1().Deployments(f.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{})
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
return err
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating deployment")
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.WaitForNginxServer("_",
|
||||
func(server string) bool {
|
||||
|
|
|
|||
|
|
@ -59,6 +59,8 @@ var _ = framework.IngressNginxDescribe("[Default Backend]", func() {
|
|||
{"basic HTTPS POST request to host foo.bar.com and path /demo should return 404", " foo.bar.com", framework.HTTPS, "POST", "/demo", http.StatusNotFound},
|
||||
}
|
||||
|
||||
framework.Sleep()
|
||||
|
||||
for _, test := range testCases {
|
||||
ginkgo.By(test.Name)
|
||||
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ func (f *Framework) NewNewFastCGIHelloServerDeploymentWithReplicas(replicas int3
|
|||
|
||||
d := f.EnsureDeployment(deployment)
|
||||
|
||||
err := WaitForPodsReady(f.KubeClientSet, DefaultTimeout, int(replicas), f.Namespace, metav1.ListOptions{
|
||||
err := waitForPodsReady(f.KubeClientSet, DefaultTimeout, int(replicas), f.Namespace, metav1.ListOptions{
|
||||
LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(),
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "failed to wait for to become ready")
|
||||
|
|
|
|||
|
|
@ -24,6 +24,9 @@ import (
|
|||
"github.com/gavv/httpexpect/v2"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/pkg/errors"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
|
@ -37,6 +40,7 @@ import (
|
|||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/ingress-nginx/internal/k8s"
|
||||
"k8s.io/klog"
|
||||
kubeframework "k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// RequestScheme define a scheme used in a test request.
|
||||
|
|
@ -72,22 +76,8 @@ type Framework struct {
|
|||
func NewDefaultFramework(baseName string) *Framework {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
kubeConfig, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating kubernetes API client configuration")
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating Kubernetes API client")
|
||||
|
||||
_, isIngressV1Ready := k8s.NetworkingIngressAvailable(kubeClient)
|
||||
|
||||
f := &Framework{
|
||||
BaseName: baseName,
|
||||
KubeConfig: kubeConfig,
|
||||
KubeClientSet: kubeClient,
|
||||
IsIngressV1Ready: isIngressV1Ready,
|
||||
BaseName: baseName,
|
||||
}
|
||||
|
||||
ginkgo.BeforeEach(f.BeforeEach)
|
||||
|
|
@ -98,63 +88,74 @@ func NewDefaultFramework(baseName string) *Framework {
|
|||
|
||||
// BeforeEach gets a client and makes a namespace.
|
||||
func (f *Framework) BeforeEach() {
|
||||
ingressNamespace, err := CreateKubeNamespace(f.BaseName, f.KubeClientSet)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating namespace")
|
||||
var err error
|
||||
|
||||
f.Namespace = ingressNamespace
|
||||
if f.KubeClientSet == nil {
|
||||
f.KubeConfig, err = kubeframework.LoadConfig()
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "loading a kubernetes client configuration")
|
||||
f.KubeClientSet, err = kubernetes.NewForConfig(f.KubeConfig)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating a kubernetes client")
|
||||
|
||||
_, isIngressV1Ready := k8s.NetworkingIngressAvailable(f.KubeClientSet)
|
||||
f.IsIngressV1Ready = isIngressV1Ready
|
||||
}
|
||||
|
||||
f.Namespace, err = CreateKubeNamespace(f.BaseName, f.KubeClientSet)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating namespace")
|
||||
|
||||
err = f.newIngressController(f.Namespace, f.BaseName)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deploying the ingress controller")
|
||||
|
||||
err = WaitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
|
||||
err = waitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
|
||||
LabelSelector: "app.kubernetes.io/name=ingress-nginx",
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for ingress pods to be ready")
|
||||
|
||||
// wait before any request
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
// AfterEach deletes the namespace, after reading its events.
|
||||
func (f *Framework) AfterEach() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
pod, err := GetIngressNGINXPod(f.Namespace, f.KubeClientSet)
|
||||
if err != nil {
|
||||
Logf("Unexpected error searching for ingress controller pod: %v", err)
|
||||
return
|
||||
}
|
||||
defer func(kubeClient kubernetes.Interface, ns string) {
|
||||
err := deleteKubeNamespace(kubeClient, ns)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting namespace %v", f.Namespace)
|
||||
}(f.KubeClientSet, f.Namespace)
|
||||
|
||||
cmd := fmt.Sprintf("cat /etc/nginx/nginx.conf")
|
||||
o, err := f.ExecCommand(pod, cmd)
|
||||
if err != nil {
|
||||
Logf("Unexpected error obtaining nginx.conf file: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("Dumping NGINX configuration after failure")
|
||||
Logf("%v", o)
|
||||
|
||||
log, err := f.NginxLogs()
|
||||
if err != nil {
|
||||
Logf("Unexpected error obtaining NGINX logs: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("Dumping NGINX logs")
|
||||
Logf("%v", log)
|
||||
|
||||
o, err = f.NamespaceContent()
|
||||
if err != nil {
|
||||
Logf("Unexpected error obtaining namespace information: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("Dumping namespace content")
|
||||
Logf("%v", o)
|
||||
if !ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
return
|
||||
}
|
||||
|
||||
err := DeleteKubeNamespace(f.KubeClientSet, f.Namespace)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "deleting namespace %v", f.Namespace)
|
||||
pod, err := GetIngressNGINXPod(f.Namespace, f.KubeClientSet)
|
||||
if err != nil {
|
||||
Logf("Unexpected error searching for ingress controller pod: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
cmd := fmt.Sprintf("cat /etc/nginx/nginx.conf")
|
||||
o, err := f.ExecCommand(pod, cmd)
|
||||
if err != nil {
|
||||
Logf("Unexpected error obtaining nginx.conf file: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("Dumping NGINX configuration after failure")
|
||||
Logf("%v", o)
|
||||
|
||||
log, err := f.NginxLogs()
|
||||
if err != nil {
|
||||
Logf("Unexpected error obtaining NGINX logs: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("Dumping NGINX logs")
|
||||
Logf("%v", log)
|
||||
|
||||
o, err = f.NamespaceContent()
|
||||
if err != nil {
|
||||
Logf("Unexpected error obtaining namespace information: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("Dumping namespace content")
|
||||
Logf("%v", o)
|
||||
}
|
||||
|
||||
// IngressNginxDescribe wrapper function for ginkgo describe. Adds namespacing.
|
||||
|
|
@ -188,20 +189,10 @@ func (f *Framework) GetNginxIP() string {
|
|||
}
|
||||
|
||||
// GetNginxPodIP returns the IP addresses of the running pods
|
||||
func (f *Framework) GetNginxPodIP() []string {
|
||||
e, err := f.KubeClientSet.
|
||||
CoreV1().
|
||||
Endpoints(f.Namespace).
|
||||
Get(context.TODO(), "nginx-ingress-controller", metav1.GetOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "obtaining NGINX IP address")
|
||||
eips := make([]string, 0)
|
||||
for _, s := range e.Subsets {
|
||||
for _, a := range s.Addresses {
|
||||
eips = append(eips, a.IP)
|
||||
}
|
||||
}
|
||||
|
||||
return eips
|
||||
func (f *Framework) GetNginxPodIP() string {
|
||||
pod, err := GetIngressNGINXPod(f.Namespace, f.KubeClientSet)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "obtaining NGINX Pod")
|
||||
return pod.Status.PodIP
|
||||
}
|
||||
|
||||
// GetURL returns the URL should be used to make a request to NGINX
|
||||
|
|
@ -214,7 +205,7 @@ func (f *Framework) GetURL(scheme RequestScheme) string {
|
|||
func (f *Framework) WaitForNginxServer(name string, matcher func(cfg string) bool) {
|
||||
err := wait.Poll(Poll, DefaultTimeout, f.matchNginxConditions(name, matcher))
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for nginx server condition/s")
|
||||
time.Sleep(5 * time.Second)
|
||||
Sleep()
|
||||
}
|
||||
|
||||
// WaitForNginxConfiguration waits until the nginx configuration contains a particular configuration
|
||||
|
|
@ -334,13 +325,15 @@ func (f *Framework) SetNginxConfigMapData(cmData map[string]string) {
|
|||
|
||||
cfgMap.Data = cmData
|
||||
|
||||
_, err = f.KubeClientSet.
|
||||
CoreV1().
|
||||
ConfigMaps(f.Namespace).
|
||||
Update(context.TODO(), cfgMap, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating configuration configmap")
|
||||
fn := func() {
|
||||
_, err = f.KubeClientSet.
|
||||
CoreV1().
|
||||
ConfigMaps(f.Namespace).
|
||||
Update(context.TODO(), cfgMap, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating configuration configmap")
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
f.waitForReload(fn)
|
||||
}
|
||||
|
||||
// CreateConfigMap creates a new configmap in the current namespace
|
||||
|
|
@ -363,13 +356,60 @@ func (f *Framework) UpdateNginxConfigMapData(key string, value string) {
|
|||
|
||||
config.Data[key] = value
|
||||
|
||||
_, err = f.KubeClientSet.
|
||||
CoreV1().
|
||||
ConfigMaps(f.Namespace).
|
||||
Update(context.TODO(), config, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating configuration configmap")
|
||||
fn := func() {
|
||||
_, err = f.KubeClientSet.
|
||||
CoreV1().
|
||||
ConfigMaps(f.Namespace).
|
||||
Update(context.TODO(), config, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "updating configuration configmap")
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
Sleep(1)
|
||||
f.waitForReload(fn)
|
||||
}
|
||||
|
||||
func (f *Framework) waitForReload(fn func()) {
|
||||
reloadCount := f.getReloadCount()
|
||||
|
||||
fn()
|
||||
|
||||
var count int
|
||||
err := wait.Poll(Poll, DefaultTimeout, func() (bool, error) {
|
||||
// most of the cases reload the ingress controller
|
||||
// in cases where the value is not modified we could wait forever
|
||||
if count > 4 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
count++
|
||||
|
||||
return (f.getReloadCount() > reloadCount), nil
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "while waiting for ingress controller reload")
|
||||
}
|
||||
|
||||
func (f *Framework) getReloadCount() int {
|
||||
ip := f.GetNginxPodIP()
|
||||
mf, err := f.GetMetric("nginx_ingress_controller_success", ip)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
assert.NotNil(ginkgo.GinkgoT(), mf)
|
||||
|
||||
rc0, err := extractReloadCount(mf)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
return int(rc0)
|
||||
}
|
||||
|
||||
func extractReloadCount(mf *dto.MetricFamily) (float64, error) {
|
||||
vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{
|
||||
Timestamp: model.Now(),
|
||||
}, mf)
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return float64(vec[0].Value), nil
|
||||
}
|
||||
|
||||
// DeleteNGINXPod deletes the currently running pod. It waits for the replacement pod to be up.
|
||||
|
|
@ -451,7 +491,7 @@ func UpdateDeployment(kubeClientSet kubernetes.Interface, namespace string, name
|
|||
}
|
||||
}
|
||||
|
||||
err = WaitForPodsReady(kubeClientSet, DefaultTimeout, replicas, namespace, metav1.ListOptions{
|
||||
err = waitForPodsReady(kubeClientSet, DefaultTimeout, replicas, namespace, metav1.ListOptions{
|
||||
LabelSelector: fields.SelectorFromSet(fields.Set(deployment.Spec.Template.ObjectMeta.Labels)).String(),
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -485,7 +525,7 @@ func UpdateIngress(kubeClientSet kubernetes.Interface, namespace string, name st
|
|||
return err
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
Sleep()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -621,3 +661,18 @@ func newSingleIngress(name, ns string, annotations map[string]string, spec netwo
|
|||
|
||||
return ing
|
||||
}
|
||||
|
||||
// defaultWaitDuration default sleep time for operations related
|
||||
// to the API server and NGINX reloads.
|
||||
var defaultWaitDuration = 5 * time.Second
|
||||
|
||||
// Sleep pauses the current goroutine for at least the duration d.
|
||||
// If no duration is defined, it uses a default
|
||||
func Sleep(duration ...time.Duration) {
|
||||
sleepFor := defaultWaitDuration
|
||||
if len(duration) != 0 {
|
||||
sleepFor = duration[0]
|
||||
}
|
||||
|
||||
time.Sleep(sleepFor)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ func (f *Framework) NewNewGRPCFortuneTellerDeploymentWithReplicas(replicas int32
|
|||
|
||||
d := f.EnsureDeployment(deployment)
|
||||
|
||||
err := WaitForPodsReady(f.KubeClientSet, DefaultTimeout, int(replicas), f.Namespace, metav1.ListOptions{
|
||||
err := waitForPodsReady(f.KubeClientSet, DefaultTimeout, int(replicas), f.Namespace, metav1.ListOptions{
|
||||
LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(),
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "failed to wait for to become ready")
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@ func (f *Framework) NewInfluxDBDeployment() {
|
|||
|
||||
d := f.EnsureDeployment(deployment)
|
||||
|
||||
err = WaitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
|
||||
err = waitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{
|
||||
LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(),
|
||||
})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "waiting for influxdb pod to become ready")
|
||||
|
|
|
|||
|
|
@ -72,18 +72,18 @@ func (f *Framework) GetIngress(namespace string, name string) *networking.Ingres
|
|||
|
||||
// EnsureIngress creates an Ingress object and retunrs it, throws error if it already exists.
|
||||
func (f *Framework) EnsureIngress(ingress *networking.Ingress) *networking.Ingress {
|
||||
err := createIngressWithRetries(f.KubeClientSet, f.Namespace, ingress)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating ingress")
|
||||
fn := func() {
|
||||
err := createIngressWithRetries(f.KubeClientSet, f.Namespace, ingress)
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "creating ingress")
|
||||
}
|
||||
|
||||
f.waitForReload(fn)
|
||||
|
||||
ing := f.GetIngress(f.Namespace, ingress.Name)
|
||||
|
||||
if ing.Annotations == nil {
|
||||
ing.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
// creating an ingress requires a reload.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
return ing
|
||||
}
|
||||
|
||||
|
|
@ -93,13 +93,12 @@ func (f *Framework) UpdateIngress(ingress *networking.Ingress) *networking.Ingre
|
|||
assert.Nil(ginkgo.GinkgoT(), err, "updating ingress")
|
||||
|
||||
ing := f.GetIngress(f.Namespace, ingress.Name)
|
||||
|
||||
if ing.Annotations == nil {
|
||||
ing.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
// updating an ingress requires a reload.
|
||||
time.Sleep(5 * time.Second)
|
||||
Sleep()
|
||||
|
||||
return ing
|
||||
}
|
||||
|
|
@ -128,8 +127,8 @@ func (f *Framework) EnsureDeployment(deployment *appsv1.Deployment) *appsv1.Depl
|
|||
return d
|
||||
}
|
||||
|
||||
// WaitForPodsReady waits for a given amount of time until a group of Pods is running in the given namespace.
|
||||
func WaitForPodsReady(kubeClientSet kubernetes.Interface, timeout time.Duration, expectedReplicas int, namespace string, opts metav1.ListOptions) error {
|
||||
// waitForPodsReady waits for a given amount of time until a group of Pods is running in the given namespace.
|
||||
func waitForPodsReady(kubeClientSet kubernetes.Interface, timeout time.Duration, expectedReplicas int, namespace string, opts metav1.ListOptions) error {
|
||||
return wait.Poll(Poll, timeout, func() (bool, error) {
|
||||
pl, err := kubeClientSet.CoreV1().Pods(namespace).List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
|
|
@ -215,6 +214,13 @@ func podRunningReady(p *core.Pod) (bool, error) {
|
|||
|
||||
// GetIngressNGINXPod returns the ingress controller running pod
|
||||
func GetIngressNGINXPod(ns string, kubeClientSet kubernetes.Interface) (*core.Pod, error) {
|
||||
err := waitForPodsReady(kubeClientSet, DefaultTimeout, 1, ns, metav1.ListOptions{
|
||||
LabelSelector: "app.kubernetes.io/name=ingress-nginx",
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l, err := kubeClientSet.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: "app.kubernetes.io/name=ingress-nginx",
|
||||
})
|
||||
|
|
@ -222,26 +228,15 @@ func GetIngressNGINXPod(ns string, kubeClientSet kubernetes.Interface) (*core.Po
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
if len(l.Items) == 0 {
|
||||
return nil, fmt.Errorf("there is no ingress-nginx pods running in namespace %v", ns)
|
||||
}
|
||||
|
||||
var pod *core.Pod
|
||||
|
||||
for _, p := range l.Items {
|
||||
if strings.HasPrefix(p.GetName(), "nginx-ingress-controller") {
|
||||
if isRunning, err := podRunningReady(&p); err == nil && isRunning {
|
||||
pod = &p
|
||||
break
|
||||
return &p, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pod == nil {
|
||||
return nil, fmt.Errorf("there is no ingress-nginx pods running in namespace %v", ns)
|
||||
}
|
||||
|
||||
return pod, nil
|
||||
return nil, fmt.Errorf("there is no ingress-nginx pods running in namespace %v", ns)
|
||||
}
|
||||
|
||||
func createDeploymentWithRetries(c kubernetes.Interface, namespace string, obj *appsv1.Deployment) error {
|
||||
|
|
|
|||
|
|
@ -32,21 +32,10 @@ type TestContextType struct {
|
|||
// TestContext is the global client context for tests.
|
||||
var TestContext TestContextType
|
||||
|
||||
// RegisterCommonFlags registers flags common to all e2e test suites.
|
||||
func RegisterCommonFlags() {
|
||||
// Turn on verbose by default to get spec names
|
||||
config.DefaultReporterConfig.Verbose = true
|
||||
|
||||
// Turn on EmitSpecProgress to get spec progress (especially on interrupt)
|
||||
// registerCommonFlags registers flags common to all e2e test suites.
|
||||
func registerCommonFlags() {
|
||||
config.GinkgoConfig.EmitSpecProgress = true
|
||||
|
||||
// Randomize specs as well as suites
|
||||
config.GinkgoConfig.RandomizeAllSpecs = true
|
||||
|
||||
// Default SlowSpecThreshold is 5 seconds.
|
||||
// Too low for the kind of operations we need to tests
|
||||
config.DefaultReporterConfig.SlowSpecThreshold = 20
|
||||
|
||||
flag.StringVar(&TestContext.KubeHost, "kubernetes-host", "http://127.0.0.1:8080", "The kubernetes host, or apiserver, to connect to")
|
||||
//flag.StringVar(&TestContext.KubeConfig, "kubernetes-config", os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to config containing embedded authinfo for kubernetes. Default value is from environment variable "+clientcmd.RecommendedConfigPathEnvVar)
|
||||
flag.StringVar(&TestContext.KubeContext, "kubernetes-context", "", "config context to use for kubernetes. If unset, will use value from 'current-context'")
|
||||
|
|
@ -54,6 +43,6 @@ func RegisterCommonFlags() {
|
|||
|
||||
// RegisterParseFlags registers and parses flags for the test binary.
|
||||
func RegisterParseFlags() {
|
||||
RegisterCommonFlags()
|
||||
registerCommonFlags()
|
||||
flag.Parse()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package framework
|
||||
|
||||
import (
|
||||
context2 "context"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
|
@ -93,8 +93,8 @@ func CreateKubeNamespace(baseName string, c kubernetes.Interface) (string, error
|
|||
var got *corev1.Namespace
|
||||
var err error
|
||||
|
||||
err = wait.PollImmediate(Poll, DefaultTimeout, func() (bool, error) {
|
||||
got, err = c.CoreV1().Namespaces().Create(context2.TODO(), ns, metav1.CreateOptions{})
|
||||
err = wait.Poll(Poll, DefaultTimeout, func() (bool, error) {
|
||||
got, err = c.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
Logf("Unexpected error while creating namespace: %v", err)
|
||||
return false, nil
|
||||
|
|
@ -107,11 +107,11 @@ func CreateKubeNamespace(baseName string, c kubernetes.Interface) (string, error
|
|||
return got.Name, nil
|
||||
}
|
||||
|
||||
// DeleteKubeNamespace deletes a namespace and all the objects inside
|
||||
func DeleteKubeNamespace(c kubernetes.Interface, namespace string) error {
|
||||
// deleteKubeNamespace deletes a namespace and all the objects inside
|
||||
func deleteKubeNamespace(c kubernetes.Interface, namespace string) error {
|
||||
grace := int64(0)
|
||||
pb := metav1.DeletePropagationBackground
|
||||
return c.CoreV1().Namespaces().Delete(context2.TODO(), namespace, metav1.DeleteOptions{
|
||||
return c.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &grace,
|
||||
PropagationPolicy: &pb,
|
||||
})
|
||||
|
|
@ -119,12 +119,12 @@ func DeleteKubeNamespace(c kubernetes.Interface, namespace string) error {
|
|||
|
||||
// WaitForKubeNamespaceNotExist waits until a namespaces is not present in the cluster
|
||||
func WaitForKubeNamespaceNotExist(c kubernetes.Interface, namespace string) error {
|
||||
return wait.PollImmediate(Poll, DefaultTimeout, namespaceNotExist(c, namespace))
|
||||
return wait.Poll(Poll, DefaultTimeout, namespaceNotExist(c, namespace))
|
||||
}
|
||||
|
||||
func namespaceNotExist(c kubernetes.Interface, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
_, err := c.CoreV1().Namespaces().Get(context2.TODO(), namespace, metav1.GetOptions{})
|
||||
_, err := c.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
|
|
@ -137,12 +137,12 @@ func namespaceNotExist(c kubernetes.Interface, namespace string) wait.ConditionF
|
|||
|
||||
// WaitForNoPodsInNamespace waits until there are no pods running in a namespace
|
||||
func WaitForNoPodsInNamespace(c kubernetes.Interface, namespace string) error {
|
||||
return wait.PollImmediate(Poll, DefaultTimeout, noPodsInNamespace(c, namespace))
|
||||
return wait.Poll(Poll, DefaultTimeout, noPodsInNamespace(c, namespace))
|
||||
}
|
||||
|
||||
func noPodsInNamespace(c kubernetes.Interface, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
items, err := c.CoreV1().Pods(namespace).List(context2.TODO(), metav1.ListOptions{})
|
||||
items, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
|
|
@ -167,17 +167,17 @@ func WaitForPodRunningInNamespace(c kubernetes.Interface, pod *corev1.Pod) error
|
|||
}
|
||||
|
||||
func waitTimeoutForPodRunningInNamespace(c kubernetes.Interface, podName, namespace string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(Poll, DefaultTimeout, podRunning(c, podName, namespace))
|
||||
return wait.Poll(Poll, DefaultTimeout, podRunning(c, podName, namespace))
|
||||
}
|
||||
|
||||
// WaitForSecretInNamespace waits a default amount of time for the specified secret is present in a particular namespace
|
||||
func WaitForSecretInNamespace(c kubernetes.Interface, namespace, name string) error {
|
||||
return wait.PollImmediate(Poll, DefaultTimeout, secretInNamespace(c, namespace, name))
|
||||
return wait.Poll(Poll, DefaultTimeout, secretInNamespace(c, namespace, name))
|
||||
}
|
||||
|
||||
func secretInNamespace(c kubernetes.Interface, namespace, name string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
s, err := c.CoreV1().Secrets(namespace).Get(context2.TODO(), name, metav1.GetOptions{})
|
||||
s, err := c.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
|
@ -194,7 +194,7 @@ func secretInNamespace(c kubernetes.Interface, namespace, name string) wait.Cond
|
|||
|
||||
// WaitForFileInFS waits a default amount of time for the specified file is present in the filesystem
|
||||
func WaitForFileInFS(file string) error {
|
||||
return wait.PollImmediate(Poll, DefaultTimeout, fileInFS(file))
|
||||
return wait.Poll(Poll, DefaultTimeout, fileInFS(file))
|
||||
}
|
||||
|
||||
func fileInFS(file string) wait.ConditionFunc {
|
||||
|
|
@ -218,12 +218,12 @@ func fileInFS(file string) wait.ConditionFunc {
|
|||
|
||||
// WaitForNoIngressInNamespace waits until there is no ingress object in a particular namespace
|
||||
func WaitForNoIngressInNamespace(c kubernetes.Interface, namespace, name string) error {
|
||||
return wait.PollImmediate(Poll, DefaultTimeout, noIngressInNamespace(c, namespace, name))
|
||||
return wait.Poll(Poll, DefaultTimeout, noIngressInNamespace(c, namespace, name))
|
||||
}
|
||||
|
||||
func noIngressInNamespace(c kubernetes.Interface, namespace, name string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
ing, err := c.NetworkingV1beta1().Ingresses(namespace).Get(context2.TODO(), name, metav1.GetOptions{})
|
||||
ing, err := c.NetworkingV1beta1().Ingresses(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
|
|
@ -240,12 +240,12 @@ func noIngressInNamespace(c kubernetes.Interface, namespace, name string) wait.C
|
|||
|
||||
// WaitForIngressInNamespace waits until a particular ingress object exists namespace
|
||||
func WaitForIngressInNamespace(c kubernetes.Interface, namespace, name string) error {
|
||||
return wait.PollImmediate(Poll, DefaultTimeout, ingressInNamespace(c, namespace, name))
|
||||
return wait.Poll(Poll, DefaultTimeout, ingressInNamespace(c, namespace, name))
|
||||
}
|
||||
|
||||
func ingressInNamespace(c kubernetes.Interface, namespace, name string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
ing, err := c.NetworkingV1beta1().Ingresses(namespace).Get(context2.TODO(), name, metav1.GetOptions{})
|
||||
ing, err := c.NetworkingV1beta1().Ingresses(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
|
|
@ -262,7 +262,7 @@ func ingressInNamespace(c kubernetes.Interface, namespace, name string) wait.Con
|
|||
|
||||
func podRunning(c kubernetes.Interface, podName, namespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context2.TODO(), podName, metav1.GetOptions{})
|
||||
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,13 +37,10 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
|||
|
||||
ginkgo.BeforeEach(func() {
|
||||
f.UpdateNginxConfigMapData("worker-shutdown-timeout", "600s")
|
||||
|
||||
f.NewSlowEchoDeployment()
|
||||
})
|
||||
|
||||
ginkgo.It("should shutdown in less than 60 secons without pending connections", func() {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, framework.SlowEchoService, 80, nil))
|
||||
|
||||
f.WaitForNginxServer(host,
|
||||
|
|
@ -64,11 +61,7 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
|||
assert.LessOrEqual(ginkgo.GinkgoT(), int(time.Since(startTime).Seconds()), 60, "waiting shutdown")
|
||||
})
|
||||
|
||||
type asyncResult struct {
|
||||
status int
|
||||
}
|
||||
|
||||
ginkgo.It("should shutdown after waiting 60 seconds for pending connections to be closed", func() {
|
||||
ginkgo.It("should shutdown after waiting 60 seconds for pending connections to be closed", func(done ginkgo.Done) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,
|
||||
|
|
@ -92,11 +85,10 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
|||
return strings.Contains(server, "server_name shutdown")
|
||||
})
|
||||
|
||||
result := make(chan *asyncResult)
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
go func(host string, c chan *asyncResult) {
|
||||
result := make(chan int)
|
||||
go func(host string, c chan int) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
resp := f.HTTPTestClient().
|
||||
|
|
@ -105,34 +97,19 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
|||
Expect().
|
||||
Raw()
|
||||
|
||||
code := 0
|
||||
if resp != nil {
|
||||
code = resp.StatusCode
|
||||
}
|
||||
|
||||
c <- &asyncResult{code}
|
||||
c <- resp.StatusCode
|
||||
}(host, result)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.ScaleDeploymentToZero("nginx-ingress-controller")
|
||||
|
||||
ticker := time.NewTicker(time.Second * 10)
|
||||
assert.Equal(ginkgo.GinkgoT(), <-result, http.StatusOK, "expecting a valid response from HTTP request")
|
||||
assert.GreaterOrEqual(ginkgo.GinkgoT(), int(time.Since(startTime).Seconds()), 60, "waiting shutdown")
|
||||
close(done)
|
||||
}, 100)
|
||||
|
||||
for {
|
||||
select {
|
||||
case res := <-result:
|
||||
assert.Equal(ginkgo.GinkgoT(), res.status, http.StatusOK, "expecting a valid response from HTTP request")
|
||||
assert.GreaterOrEqual(ginkgo.GinkgoT(), int(time.Since(startTime).Seconds()), 60, "waiting shutdown")
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
framework.Logf("waiting for request completion after shutdown")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should shutdown after waiting 150 seconds for pending connections to be closed", func() {
|
||||
ginkgo.It("should shutdown after waiting 150 seconds for pending connections to be closed", func(done ginkgo.Done) {
|
||||
err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1,
|
||||
func(deployment *appsv1.Deployment) error {
|
||||
grace := int64(3600)
|
||||
|
|
@ -153,11 +130,10 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
|||
return strings.Contains(server, "server_name shutdown")
|
||||
})
|
||||
|
||||
result := make(chan *asyncResult)
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
go func(host string, c chan *asyncResult) {
|
||||
result := make(chan int)
|
||||
go func(host string, c chan int) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
resp := f.HTTPTestClient().
|
||||
|
|
@ -166,30 +142,15 @@ var _ = framework.IngressNginxDescribe("[Shutdown] ingress controller", func() {
|
|||
Expect().
|
||||
Raw()
|
||||
|
||||
code := 0
|
||||
if resp != nil {
|
||||
code = resp.StatusCode
|
||||
}
|
||||
|
||||
c <- &asyncResult{code}
|
||||
c <- resp.StatusCode
|
||||
}(host, result)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.ScaleDeploymentToZero("nginx-ingress-controller")
|
||||
|
||||
ticker := time.NewTicker(time.Second * 10)
|
||||
|
||||
for {
|
||||
select {
|
||||
case res := <-result:
|
||||
assert.Equal(ginkgo.GinkgoT(), res.status, http.StatusOK, "expecting a valid response from HTTP request")
|
||||
assert.GreaterOrEqual(ginkgo.GinkgoT(), int(time.Since(startTime).Seconds()), 150, "waiting shutdown")
|
||||
ticker.Stop()
|
||||
return
|
||||
case <-ticker.C:
|
||||
framework.Logf("waiting for request completion after shutdown")
|
||||
}
|
||||
}
|
||||
})
|
||||
assert.Equal(ginkgo.GinkgoT(), <-result, http.StatusOK, "expecting a valid response from HTTP request")
|
||||
assert.GreaterOrEqual(ginkgo.GinkgoT(), int(time.Since(startTime).Seconds()), 150, "waiting shutdown")
|
||||
close(done)
|
||||
}, 200)
|
||||
})
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ package gracefulshutdown
|
|||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
|
|
@ -55,7 +54,7 @@ var _ = framework.IngressNginxDescribe("[Shutdown] Graceful shutdown with pendin
|
|||
Status(http.StatusOK)
|
||||
}()
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
framework.Sleep()
|
||||
f.DeleteNGINXPod(60)
|
||||
<-done
|
||||
})
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic certificates", func() {
|
|||
time.Sleep(waitForLuaSync)
|
||||
|
||||
ip := f.GetNginxPodIP()
|
||||
mf, err := f.GetMetric("nginx_ingress_controller_success", ip[0])
|
||||
mf, err := f.GetMetric("nginx_ingress_controller_success", ip)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
assert.NotNil(ginkgo.GinkgoT(), mf)
|
||||
|
||||
|
|
@ -99,7 +99,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic certificates", func() {
|
|||
assert.NotEmpty(ginkgo.GinkgoT(), log)
|
||||
|
||||
ginkgo.By("skipping Nginx reload")
|
||||
mf, err = f.GetMetric("nginx_ingress_controller_success", ip[0])
|
||||
mf, err = f.GetMetric("nginx_ingress_controller_success", ip)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
assert.NotNil(ginkgo.GinkgoT(), mf)
|
||||
|
||||
|
|
@ -189,7 +189,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic certificates", func() {
|
|||
ensureHTTPSRequest(f, fmt.Sprintf("%s?id=dummy_log_splitter_foo_bar", f.GetURL(framework.HTTPS)), host, host)
|
||||
|
||||
ip := f.GetNginxPodIP()
|
||||
mf, err := f.GetMetric("nginx_ingress_controller_success", ip[0])
|
||||
mf, err := f.GetMetric("nginx_ingress_controller_success", ip)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
assert.NotNil(ginkgo.GinkgoT(), mf)
|
||||
|
||||
|
|
@ -204,7 +204,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic certificates", func() {
|
|||
ginkgo.By("serving the default certificate on HTTPS endpoint")
|
||||
ensureHTTPSRequest(f, f.GetURL(framework.HTTPS), host, "ingress.local")
|
||||
|
||||
mf, err = f.GetMetric("nginx_ingress_controller_success", ip[0])
|
||||
mf, err = f.GetMetric("nginx_ingress_controller_success", ip)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
assert.NotNil(ginkgo.GinkgoT(), mf)
|
||||
|
||||
|
|
|
|||
|
|
@ -210,7 +210,7 @@ var _ = framework.IngressNginxDescribe("[Lua] dynamic configuration", func() {
|
|||
err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 3, nil)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
output, err = f.ExecIngressPod(curlCmd)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ func smugglingRequest(host, addr string, port int) (string, error) {
|
|||
}
|
||||
|
||||
// wait for /_hidden/index.html response
|
||||
time.Sleep(1 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
var buf = make([]byte, 1024)
|
||||
r := bufio.NewReader(conn)
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
@ -271,7 +270,7 @@ var _ = framework.IngressNginxDescribe("[Service] Type ExternalName", func() {
|
|||
_, err = f.KubeClientSet.CoreV1().Services(f.Namespace).Update(context.Background(), svc, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating httpbin service")
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
body = f.HTTPTestClient().
|
||||
GET("/get").
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
@ -38,28 +38,32 @@ import (
|
|||
var _ = framework.IngressNginxDescribe("[Flag] ingress-class", func() {
|
||||
f := framework.NewDefaultFramework("ingress-class")
|
||||
|
||||
f.KubeClientSet.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ingress-nginx-class"},
|
||||
Rules: []rbacv1.PolicyRule{{
|
||||
APIGroups: []string{"networking.k8s.io"},
|
||||
Resources: []string{"ingressclasses"},
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
}},
|
||||
}, metav1.CreateOptions{})
|
||||
|
||||
f.KubeClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ingress-nginx-class",
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "ingress-nginx-class",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
var doOnce sync.Once
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
f.NewEchoDeploymentWithReplicas(1)
|
||||
|
||||
doOnce.Do(func() {
|
||||
f.KubeClientSet.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ingress-nginx-class"},
|
||||
Rules: []rbacv1.PolicyRule{{
|
||||
APIGroups: []string{"networking.k8s.io"},
|
||||
Resources: []string{"ingressclasses"},
|
||||
Verbs: []string{"get", "list", "watch"},
|
||||
}},
|
||||
}, metav1.CreateOptions{})
|
||||
|
||||
f.KubeClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ingress-nginx-class",
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "ingress-nginx-class",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("Without a specific ingress-class", func() {
|
||||
|
|
@ -175,6 +179,8 @@ var _ = framework.IngressNginxDescribe("[Flag] ingress-class", func() {
|
|||
_, err = f.KubeClientSet.NetworkingV1beta1().Ingresses(ing.Namespace).Update(context.TODO(), ing, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
framework.Sleep()
|
||||
|
||||
f.WaitForNginxConfiguration(func(cfg string) bool {
|
||||
return !strings.Contains(cfg, "server_name foo")
|
||||
})
|
||||
|
|
@ -277,7 +283,7 @@ var _ = framework.IngressNginxDescribe("[Flag] ingress-class", func() {
|
|||
return strings.Contains(cfg, fmt.Sprintf("server_name %v", host))
|
||||
})
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.HTTPTestClient().
|
||||
GET("/").
|
||||
|
|
@ -297,7 +303,7 @@ var _ = framework.IngressNginxDescribe("[Flag] ingress-class", func() {
|
|||
_, err = f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Update(context.TODO(), ing, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.WaitForNginxConfiguration(func(cfg string) bool {
|
||||
return !strings.Contains(cfg, fmt.Sprintf("server_name %v", host))
|
||||
|
|
@ -319,7 +325,7 @@ var _ = framework.IngressNginxDescribe("[Flag] ingress-class", func() {
|
|||
_, err = f.KubeClientSet.NetworkingV1beta1().Ingresses(f.Namespace).Update(context.TODO(), ing, metav1.UpdateOptions{})
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.WaitForNginxConfiguration(func(cfg string) bool {
|
||||
return !strings.Contains(cfg, fmt.Sprintf("server_name %v", host))
|
||||
|
|
|
|||
|
|
@ -27,7 +27,6 @@ import (
|
|||
"os/exec"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
@ -126,7 +125,7 @@ var _ = framework.DescribeSetting("OCSP", func() {
|
|||
|
||||
// give time the lua request to the OCSP
|
||||
// URL to finish and update the cache
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
// TODO: is possible to avoid second request?
|
||||
resp := f.HTTPTestClientWithTLSConfig(tlsConfig).
|
||||
|
|
@ -235,7 +234,7 @@ func prepareCertificates(namespace string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
command = "cfssl gencert -remote=localhost -profile=server leaf_csr.json | cfssljson -bare leaf"
|
||||
ginkgo.By(fmt.Sprintf("running %v", command))
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
|
@ -58,7 +57,7 @@ var _ = framework.IngressNginxDescribe("[SSL] secret update", func() {
|
|||
ing.Namespace)
|
||||
assert.Nil(ginkgo.GinkgoT(), err)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.WaitForNginxServer(host,
|
||||
func(server string) bool {
|
||||
|
|
@ -70,7 +69,7 @@ var _ = framework.IngressNginxDescribe("[SSL] secret update", func() {
|
|||
assert.Nil(ginkgo.GinkgoT(), err, "obtaining nginx logs")
|
||||
assert.NotContains(ginkgo.GinkgoT(), log, fmt.Sprintf("starting syncing of secret %v/dummy", f.Namespace))
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
dummySecret.Data["some-key"] = []byte("some value")
|
||||
|
||||
|
|
|
|||
|
|
@ -78,7 +78,7 @@ var _ = framework.IngressNginxDescribe("[TCP] tcp-services", func() {
|
|||
assert.Nil(ginkgo.GinkgoT(), err, "unexpected error updating service")
|
||||
|
||||
// wait for update and nginx reload and new endpoint is available
|
||||
time.Sleep(5 * time.Second)
|
||||
framework.Sleep()
|
||||
|
||||
f.WaitForNginxConfiguration(
|
||||
func(cfg string) bool {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue