Split documentation
This commit is contained in:
parent
a18daabc51
commit
a9168f276e
144 changed files with 1780 additions and 3789 deletions
12
docs/examples/customization/configuration-snippets/README.md
Normal file
12
docs/examples/customization/configuration-snippets/README.md
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
|
||||
## Ingress
|
||||
The Ingress in this example adds a custom header to Nginx configuration that only applies to that specific Ingress. If you want to add headers that apply globally to all Ingresses, please have a look at [this example](/examples/customization/custom-headers/nginx).
|
||||
|
||||
```console
|
||||
$ kubectl apply -f ingress.yaml
|
||||
```
|
||||
|
||||
## Test
|
||||
|
||||
Check if the contents of the annotation are present in the nginx.conf file using:
|
||||
`kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf`
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: nginx-configuration-snippet
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "nginx"
|
||||
ingress.kubernetes.io/configuration-snippet: |
|
||||
more_set_headers "Request-Id: $request_id";
|
||||
|
||||
spec:
|
||||
rules:
|
||||
- host: custom.configuration.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: http-svc
|
||||
servicePort: 80
|
||||
path: /
|
||||
24
docs/examples/customization/custom-configuration/README.md
Normal file
24
docs/examples/customization/custom-configuration/README.md
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
|
||||
Using a [ConfigMap](https://kubernetes.io/docs/user-guide/configmap/) is possible to customize the NGINX configuration
|
||||
|
||||
For example, if we want to change the timeouts we need to create a ConfigMap:
|
||||
|
||||
```
|
||||
$ cat nginx-load-balancer-conf.yaml
|
||||
apiVersion: v1
|
||||
data:
|
||||
proxy-connect-timeout: "10"
|
||||
proxy-read-timeout: "120"
|
||||
proxy-send-timeout: "120"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: nginx-load-balancer-conf
|
||||
```
|
||||
|
||||
```
|
||||
$ kubectl create -f nginx-load-balancer-conf.yaml
|
||||
```
|
||||
|
||||
Please check the example `nginx-custom-configuration.yaml`
|
||||
|
||||
If the Configmap it is updated, NGINX will be reloaded with the new configuration.
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-ingress-controller
|
||||
labels:
|
||||
k8s-app: nginx-ingress-controller
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nginx-ingress-controller
|
||||
annotations:
|
||||
prometheus.io/port: '10254'
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
# hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration
|
||||
# however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host
|
||||
# that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used
|
||||
# like with kubeadm
|
||||
# hostNetwork: true
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15
|
||||
name: nginx-ingress-controller
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 1
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
- containerPort: 443
|
||||
hostPort: 443
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
|
||||
- --configmap=$(POD_NAMESPACE)/nginx-custom-configuration
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: nginx-custom-configuration
|
||||
labels:
|
||||
k8s-app: nginx-ingress-controller
|
||||
namespace: kube-system
|
||||
data:
|
||||
proxy-connect-timeout: "10"
|
||||
proxy-read-timeout: "120"
|
||||
proxy-send-timeout: "120"
|
||||
80
docs/examples/customization/custom-errors/README.md
Normal file
80
docs/examples/customization/custom-errors/README.md
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
This example shows how is possible to use a custom backend to render custom error pages. The code of this example is located here [custom-error-pages](https://github.com/kubernetes/ingress/tree/master/examples/customization/custom-errors/nginx)
|
||||
|
||||
|
||||
The idea is to use the headers `X-Code` and `X-Format` that NGINX pass to the backend in case of an error to find out the best existent representation of the response to be returned. i.e. if the request contains an `Accept` header of type `json` the error should be in that format and not in `html` (the default in NGINX).
|
||||
|
||||
First create the custom backend to use in the Ingress controller
|
||||
|
||||
```
|
||||
$ kubectl create -f custom-default-backend.yaml
|
||||
service "nginx-errors" created
|
||||
replicationcontroller "nginx-errors" created
|
||||
```
|
||||
|
||||
```
|
||||
$ kubectl get svc
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
echoheaders 10.3.0.7 nodes 80/TCP 23d
|
||||
kubernetes 10.3.0.1 <none> 443/TCP 34d
|
||||
nginx-errors 10.3.0.102 <none> 80/TCP 11s
|
||||
```
|
||||
|
||||
```
|
||||
$ kubectl get rc
|
||||
CONTROLLER REPLICAS AGE
|
||||
echoheaders 1 19d
|
||||
nginx-errors 1 19s
|
||||
```
|
||||
|
||||
Next create the Ingress controller executing
|
||||
```
|
||||
$ kubectl create -f rc-custom-errors.yaml
|
||||
```
|
||||
|
||||
Now to check if this is working we use curl:
|
||||
|
||||
```
|
||||
$ curl -v http://172.17.4.99/
|
||||
* Trying 172.17.4.99...
|
||||
* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0)
|
||||
> GET / HTTP/1.1
|
||||
> Host: 172.17.4.99
|
||||
> User-Agent: curl/7.43.0
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 404 Not Found
|
||||
< Server: nginx/1.10.0
|
||||
< Date: Wed, 04 May 2016 02:53:45 GMT
|
||||
< Content-Type: text/html
|
||||
< Transfer-Encoding: chunked
|
||||
< Connection: keep-alive
|
||||
< Vary: Accept-Encoding
|
||||
<
|
||||
<span>The page you're looking for could not be found.</span>
|
||||
|
||||
* Connection #0 to host 172.17.4.99 left intact
|
||||
```
|
||||
|
||||
Specifying json as expected format:
|
||||
|
||||
```
|
||||
$ curl -v http://172.17.4.99/ -H 'Accept: application/json'
|
||||
* Trying 172.17.4.99...
|
||||
* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0)
|
||||
> GET / HTTP/1.1
|
||||
> Host: 172.17.4.99
|
||||
> User-Agent: curl/7.43.0
|
||||
> Accept: application/json
|
||||
>
|
||||
< HTTP/1.1 404 Not Found
|
||||
< Server: nginx/1.10.0
|
||||
< Date: Wed, 04 May 2016 02:54:00 GMT
|
||||
< Content-Type: text/html
|
||||
< Transfer-Encoding: chunked
|
||||
< Connection: keep-alive
|
||||
< Vary: Accept-Encoding
|
||||
<
|
||||
{ "message": "The page you're looking for could not be found" }
|
||||
|
||||
* Connection #0 to host 172.17.4.99 left intact
|
||||
```
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx-errors
|
||||
labels:
|
||||
app: nginx-errors
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: nginx-errors
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: nginx-errors
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx-errors
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx-errors
|
||||
image: aledbf/nginx-error-server:0.1
|
||||
ports:
|
||||
- containerPort: 80
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: nginx-ingress-controller
|
||||
labels:
|
||||
k8s-app: nginx-ingress-lb
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: nginx-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nginx-ingress-lb
|
||||
name: nginx-ingress-lb
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15
|
||||
name: nginx-ingress-lb
|
||||
imagePullPolicy: Always
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 1
|
||||
# use downward API
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
- containerPort: 443
|
||||
hostPort: 443
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --default-backend-service=$(POD_NAMESPACE)/nginx-errors
|
||||
76
docs/examples/customization/custom-headers/README.md
Normal file
76
docs/examples/customization/custom-headers/README.md
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
# Deploying the Nginx Ingress controller
|
||||
|
||||
This example aims to demonstrate the deployment of an nginx ingress controller and
|
||||
use a ConfigMap to configure a custom list of headers to be passed to the upstream
|
||||
server
|
||||
|
||||
## Default Backend
|
||||
|
||||
The default backend is a Service capable of handling all url paths and hosts the
|
||||
nginx controller doesn't understand. This most basic implementation just returns
|
||||
a 404 page:
|
||||
|
||||
```console
|
||||
$ kubectl apply -f default-backend.yaml
|
||||
deployment "default-http-backend" created
|
||||
service "default-http-backend" created
|
||||
|
||||
$ kubectl -n kube-system get po
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
default-http-backend-2657704409-qgwdd 1/1 Running 0 28s
|
||||
```
|
||||
|
||||
## Custom configuration
|
||||
|
||||
```console
|
||||
$ cat nginx-load-balancer-conf.yaml
|
||||
apiVersion: v1
|
||||
data:
|
||||
proxy-set-headers: "default/custom-headers"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: nginx-load-balancer-conf
|
||||
```
|
||||
|
||||
```console
|
||||
$ kubectl create -f nginx-load-balancer-conf.yaml
|
||||
```
|
||||
|
||||
## Custom headers
|
||||
|
||||
```console
|
||||
$ cat custom-headers.yaml
|
||||
apiVersion: v1
|
||||
data:
|
||||
X-Different-Name: "true"
|
||||
X-Request-Start: t=${msec}
|
||||
X-Using-Nginx-Controller: "true"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: proxy-headers
|
||||
namespace: default
|
||||
|
||||
```
|
||||
|
||||
```console
|
||||
$ kubectl create -f custom-headers.yaml
|
||||
```
|
||||
|
||||
## Controller
|
||||
|
||||
You can deploy the controller as follows:
|
||||
|
||||
```console
|
||||
$ kubectl apply -f nginx-ingress-controller.yaml
|
||||
deployment "nginx-ingress-controller" created
|
||||
|
||||
$ kubectl -n kube-system get po
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
default-http-backend-2657704409-qgwdd 1/1 Running 0 2m
|
||||
nginx-ingress-controller-873061567-4n3k2 1/1 Running 0 42s
|
||||
```
|
||||
|
||||
## Test
|
||||
|
||||
Check the contents of the configmap is present in the nginx.conf file using:
|
||||
`kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf`
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
apiVersion: v1
|
||||
data:
|
||||
X-Different-Name: "true"
|
||||
X-Request-Start: t=${msec}
|
||||
X-Using-Nginx-Controller: "true"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: custom-headers
|
||||
namespace: kube-system
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
labels:
|
||||
k8s-app: default-http-backend
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: default-http-backend
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
# Any image is permissable as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: gcr.io/google_containers/defaultbackend:1.0
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: default-http-backend
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
k8s-app: default-http-backend
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-ingress-controller
|
||||
labels:
|
||||
k8s-app: nginx-ingress-controller
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nginx-ingress-controller
|
||||
spec:
|
||||
# hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration
|
||||
# however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host
|
||||
# that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used
|
||||
# like with kubeadm
|
||||
# hostNetwork: true
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15
|
||||
name: nginx-ingress-controller
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 1
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
- containerPort: 443
|
||||
hostPort: 443
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
|
||||
- --configmap=$(POD_NAMESPACE)/nginx-load-balancer-conf
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
apiVersion: v1
|
||||
data:
|
||||
proxy-set-headers: "kube-system/custom-headers"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: nginx-load-balancer-conf
|
||||
namespace: kube-system
|
||||
47
docs/examples/customization/custom-upstream-check/README.md
Normal file
47
docs/examples/customization/custom-upstream-check/README.md
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
# Custom Upstream server checks
|
||||
|
||||
This example shows how is possible to create a custom configuration for a particular upstream associated with an Ingress rule.
|
||||
|
||||
```
|
||||
echo "
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: http-svc
|
||||
annotations:
|
||||
ingress.kubernetes.io/upstream-fail-timeout: "30"
|
||||
spec:
|
||||
rules:
|
||||
- host: foo.bar.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: http-svc
|
||||
servicePort: 80
|
||||
" | kubectl create -f -
|
||||
```
|
||||
|
||||
Check the annotation is present in the Ingress rule:
|
||||
```
|
||||
kubectl get ingress http-svc -o yaml
|
||||
```
|
||||
|
||||
Check the NGINX configuration is updated using kubectl or the status page:
|
||||
|
||||
```
|
||||
$ kubectl exec nginx-ingress-controller-v1ppm cat /etc/nginx/nginx.conf
|
||||
```
|
||||
|
||||
```
|
||||
....
|
||||
upstream default-http-svc-x-80 {
|
||||
least_conn;
|
||||
server 10.2.92.2:8080 max_fails=5 fail_timeout=30;
|
||||
|
||||
}
|
||||
....
|
||||
```
|
||||
|
||||
|
||||

|
||||
Binary file not shown.
|
After Width: | Height: | Size: 59 KiB |
|
|
@ -0,0 +1,128 @@
|
|||
# Deploying the Nginx Ingress controller
|
||||
|
||||
This example aims to demonstrate the deployment of an nginx ingress controller and use a ConfigMap to enable [nginx vts module](https://github.com/vozlt/nginx-module-vts
|
||||
) to export metrics in prometheus format.
|
||||
|
||||
# vts-metrics
|
||||
|
||||
Vts-metrics export NGINX metrics. To deploy all the files simply run `kubectl apply -f nginx`. A deployment and service will be
|
||||
created which already has a `prometheus.io/scrape: 'true'` annotation and if you added
|
||||
the recommended Prometheus service-endpoint scraping [configuration](https://raw.githubusercontent.com/prometheus/prometheus/master/documentation/examples/prometheus-kubernetes.yml),
|
||||
Prometheus will scrape it automatically and you start using the generated metrics right away.
|
||||
|
||||
## Default Backend
|
||||
|
||||
The default backend is a Service capable of handling all url paths and hosts the
|
||||
nginx controller doesn't understand. This most basic implementation just returns
|
||||
a 404 page:
|
||||
|
||||
```console
|
||||
$ kubectl apply -f default-backend.yaml
|
||||
deployment "default-http-backend" created
|
||||
service "default-http-backend" created
|
||||
|
||||
$ kubectl -n kube-system get po
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
default-http-backend-2657704409-qgwdd 1/1 Running 0 28s
|
||||
```
|
||||
|
||||
## Custom configuration
|
||||
|
||||
```console
|
||||
$ cat nginx-vts-metrics-conf.yaml
|
||||
apiVersion: v1
|
||||
data:
|
||||
enable-vts-status: "true"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: nginx-vts-metrics-conf
|
||||
namespace: kube-system
|
||||
```
|
||||
|
||||
```console
|
||||
$ kubectl create -f nginx-vts-metrics-conf.yaml
|
||||
```
|
||||
|
||||
## Controller
|
||||
|
||||
You can deploy the controller as follows:
|
||||
|
||||
```console
|
||||
$ kubectl apply -f nginx-ingress-controller.yaml
|
||||
deployment "nginx-ingress-controller" created
|
||||
|
||||
$ kubectl -n kube-system get po
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
default-http-backend-2657704409-qgwdd 1/1 Running 0 2m
|
||||
nginx-ingress-controller-873061567-4n3k2 1/1 Running 0 42s
|
||||
```
|
||||
|
||||
## Result
|
||||
Check whether the ingress controller successfully generated the NGINX vts status:
|
||||
```console
|
||||
$ kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf|grep vhost_traffic_status_display
|
||||
vhost_traffic_status_display;
|
||||
vhost_traffic_status_display_format html;
|
||||
```
|
||||
### NGINX vts dashboard
|
||||
The vts dashboard provides real time metrics.
|
||||
|
||||

|
||||
|
||||
Because the vts port it's not yet exposed, you should forward the controller port to see it.
|
||||
|
||||
```console
|
||||
$ kubectl port-forward $(kubectl get pods --selector=k8s-app=nginx-ingress-controller -n kube-system --output=jsonpath={.items..metadata.name}) -n kube-system 18080
|
||||
```
|
||||
|
||||
Now open the url [http://localhost:18080/nginx_status](http://localhost:18080/nginx_status) in your browser.
|
||||
|
||||
|
||||
### Prometheus metrics output
|
||||
NGINX Ingress controller already has a parser to convert vts metrics to Prometheus format. It exports prometheus metrics to the address `:10254/metrics`.
|
||||
|
||||
```console
|
||||
$ kubectl exec -ti -n kube-system $(kubectl get pods --selector=k8s-app=nginx-ingress-controller -n kube-system --output=jsonpath={.items..metadata.name}) curl localhost:10254/metrics
|
||||
ingress_controller_ssl_expire_time_seconds{host="foo.bar.com"} -6.21355968e+10
|
||||
# HELP ingress_controller_success Cumulative number of Ingress controller reload operations
|
||||
# TYPE ingress_controller_success counter
|
||||
ingress_controller_success{count="reloads"} 3
|
||||
# HELP nginx_bytes_total Nginx bytes count
|
||||
# TYPE nginx_bytes_total counter
|
||||
nginx_bytes_total{direction="in",ingress_class="nginx",namespace="",server_zone="*"} 3708
|
||||
nginx_bytes_total{direction="in",ingress_class="nginx",namespace="",server_zone="_"} 3708
|
||||
nginx_bytes_total{direction="out",ingress_class="nginx",namespace="",server_zone="*"} 5256
|
||||
nginx_bytes_total{direction="out",ingress_class="nginx",namespace="",server_zone="_"} 5256
|
||||
```
|
||||
|
||||
### Customize metrics
|
||||
|
||||
The default [vts vhost key](https://github.com/vozlt/nginx-module-vts#vhost_traffic_status_filter_by_set_key) is `$geoip_country_code country::*` that expose metrics groupped by server and country code. The example below show how to have metrics grouped by server and server path.
|
||||
|
||||

|
||||
|
||||
## NGINX custom configuration ( http level )
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
data:
|
||||
enable-vts-status: "true"
|
||||
vts-default-filter-key: "$server_name"
|
||||
...
|
||||
```
|
||||
|
||||
## Customize ingress
|
||||
|
||||
```
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
ingress.kubernetes.io/vts-filter-key: $uri $server_name
|
||||
name: ingress
|
||||
```
|
||||
|
||||
## Result
|
||||
|
||||

|
||||
|
|
@ -0,0 +1,51 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
labels:
|
||||
k8s-app: default-http-backend
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: default-http-backend
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
# Any image is permissable as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: gcr.io/google_containers/defaultbackend:1.0
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: default-http-backend
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
k8s-app: default-http-backend
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 969 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 451 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 244 KiB |
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx-ingress-controller-service
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: nginx-ingress-controller-service
|
||||
annotations:
|
||||
prometheus.io/port: '10254'
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 8080
|
||||
targetPort: 10254
|
||||
protocol: TCP
|
||||
selector:
|
||||
k8s-app: nginx-ingress-controller
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-ingress-controller
|
||||
labels:
|
||||
k8s-app: nginx-ingress-controller
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/port: "10254"
|
||||
prometheus.io/scrape: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nginx-ingress-controller
|
||||
spec:
|
||||
# hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration
|
||||
# however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host
|
||||
# that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used
|
||||
# like with kubeadm
|
||||
# hostNetwork: true
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15
|
||||
name: nginx-ingress-controller
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 1
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
- containerPort: 443
|
||||
hostPort: 443
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
|
||||
- --configmap=$(POD_NAMESPACE)/nginx-vts-metrics-conf
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
apiVersion: v1
|
||||
data:
|
||||
enable-vts-status: "true"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: nginx-vts-metrics-conf
|
||||
namespace: kube-system
|
||||
23
docs/examples/customization/external-auth-headers/Makefile
Normal file
23
docs/examples/customization/external-auth-headers/Makefile
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
all: push
|
||||
|
||||
TAG=0.1
|
||||
PREFIX?=electroma/ingress-demo-
|
||||
ARCH?=amd64
|
||||
GOLANG_VERSION=1.8
|
||||
TEMP_DIR:=$(shell mktemp -d)
|
||||
|
||||
build: clean
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=$(ARCH) go build -o authsvc/authsvc authsvc/authsvc.go
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=$(ARCH) go build -o echosvc/echosvc echosvc/echosvc.go
|
||||
|
||||
container: build
|
||||
docker build --pull -t $(PREFIX)authsvc-$(ARCH):$(TAG) authsvc
|
||||
docker build --pull -t $(PREFIX)echosvc-$(ARCH):$(TAG) echosvc
|
||||
|
||||
push: container
|
||||
docker push $(PREFIX)authsvc-$(ARCH):$(TAG)
|
||||
docker push $(PREFIX)echosvc-$(ARCH):$(TAG)
|
||||
|
||||
clean:
|
||||
rm -f authsvc/authsvc echosvc/echosvc
|
||||
|
||||
138
docs/examples/customization/external-auth-headers/README.md
Normal file
138
docs/examples/customization/external-auth-headers/README.md
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
# External authentication, authentication service response headers propagation
|
||||
|
||||
This example demonstrates propagation of selected authentication service response headers
|
||||
to backend service.
|
||||
|
||||
Sample configuration includes:
|
||||
|
||||
* Sample authentication service producing several response headers
|
||||
* Authentication logic is based on HTTP header: requests with header `User` containing string `internal` are considered authenticated
|
||||
* After successful authentication service generates response headers `UserID` and `UserRole`
|
||||
* Sample echo service displaying header information
|
||||
* Two ingress objects pointing to echo service
|
||||
* Public, which allows access from unauthenticated users
|
||||
* Private, which allows access from authenticated users only
|
||||
|
||||
You can deploy the controller as
|
||||
follows:
|
||||
|
||||
```console
|
||||
$ kubectl create -f deploy/
|
||||
deployment "demo-auth-service" created
|
||||
service "demo-auth-service" created
|
||||
ingress "demo-auth-service" created
|
||||
deployment "default-http-backend" created
|
||||
service "default-http-backend" created
|
||||
deployment "demo-echo-service" created
|
||||
service "demo-echo-service" created
|
||||
ingress "public-demo-echo-service" created
|
||||
ingress "secure-demo-echo-service" created
|
||||
deployment "nginx-ingress-controller" created
|
||||
|
||||
$ kubectl get po
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
default-http-backend-2657704409-vv0hm 1/1 Running 0 29s
|
||||
demo-auth-service-2769076528-7g9mh 1/1 Running 0 30s
|
||||
demo-echo-service-3636052215-3vw8c 1/1 Running 0 29s
|
||||
|
||||
kubectl get ing
|
||||
NAME HOSTS ADDRESS PORTS AGE
|
||||
public-demo-echo-service public-demo-echo-service.kube.local 80 1m
|
||||
secure-demo-echo-service secure-demo-echo-service.kube.local 80 1m
|
||||
```
|
||||
|
||||
|
||||
Test 1: public service with no auth header
|
||||
```
|
||||
$ curl -H 'Host: public-demo-echo-service.kube.local' -v 192.168.99.100
|
||||
* Rebuilt URL to: 192.168.99.100/
|
||||
* Trying 192.168.99.100...
|
||||
* Connected to 192.168.99.100 (192.168.99.100) port 80 (#0)
|
||||
> GET / HTTP/1.1
|
||||
> Host: public-demo-echo-service.kube.local
|
||||
> User-Agent: curl/7.43.0
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Server: nginx/1.11.10
|
||||
< Date: Mon, 13 Mar 2017 20:19:21 GMT
|
||||
< Content-Type: text/plain; charset=utf-8
|
||||
< Content-Length: 20
|
||||
< Connection: keep-alive
|
||||
<
|
||||
* Connection #0 to host 192.168.99.100 left intact
|
||||
UserID: , UserRole:
|
||||
```
|
||||
Test 2: secure service with no auth header
|
||||
```
|
||||
$ curl -H 'Host: secure-demo-echo-service.kube.local' -v 192.168.99.100
|
||||
* Rebuilt URL to: 192.168.99.100/
|
||||
* Trying 192.168.99.100...
|
||||
* Connected to 192.168.99.100 (192.168.99.100) port 80 (#0)
|
||||
> GET / HTTP/1.1
|
||||
> Host: secure-demo-echo-service.kube.local
|
||||
> User-Agent: curl/7.43.0
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 403 Forbidden
|
||||
< Server: nginx/1.11.10
|
||||
< Date: Mon, 13 Mar 2017 20:18:48 GMT
|
||||
< Content-Type: text/html
|
||||
< Content-Length: 170
|
||||
< Connection: keep-alive
|
||||
<
|
||||
<html>
|
||||
<head><title>403 Forbidden</title></head>
|
||||
<body bgcolor="white">
|
||||
<center><h1>403 Forbidden</h1></center>
|
||||
<hr><center>nginx/1.11.10</center>
|
||||
</body>
|
||||
</html>
|
||||
* Connection #0 to host 192.168.99.100 left intact
|
||||
```
|
||||
Test 3: public service with valid auth header
|
||||
```
|
||||
$ curl -H 'Host: public-demo-echo-service.kube.local' -H 'User:internal' -v 192.168.99.100
|
||||
* Rebuilt URL to: 192.168.99.100/
|
||||
* Trying 192.168.99.100...
|
||||
* Connected to 192.168.99.100 (192.168.99.100) port 80 (#0)
|
||||
> GET / HTTP/1.1
|
||||
> Host: public-demo-echo-service.kube.local
|
||||
> User-Agent: curl/7.43.0
|
||||
> Accept: */*
|
||||
> User:internal
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Server: nginx/1.11.10
|
||||
< Date: Mon, 13 Mar 2017 20:19:59 GMT
|
||||
< Content-Type: text/plain; charset=utf-8
|
||||
< Content-Length: 44
|
||||
< Connection: keep-alive
|
||||
<
|
||||
* Connection #0 to host 192.168.99.100 left intact
|
||||
UserID: 1443635317331776148, UserRole: admin
|
||||
```
|
||||
Test 4: public service with valid auth header
|
||||
|
||||
```
|
||||
$ curl -H 'Host: secure-demo-echo-service.kube.local' -H 'User:internal' -v 192.168.99.100
|
||||
* Rebuilt URL to: 192.168.99.100/
|
||||
* Trying 192.168.99.100...
|
||||
* Connected to 192.168.99.100 (192.168.99.100) port 80 (#0)
|
||||
> GET / HTTP/1.1
|
||||
> Host: secure-demo-echo-service.kube.local
|
||||
> User-Agent: curl/7.43.0
|
||||
> Accept: */*
|
||||
> User:internal
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Server: nginx/1.11.10
|
||||
< Date: Mon, 13 Mar 2017 20:17:23 GMT
|
||||
< Content-Type: text/plain; charset=utf-8
|
||||
< Content-Length: 43
|
||||
< Connection: keep-alive
|
||||
<
|
||||
* Connection #0 to host 192.168.99.100 left intact
|
||||
UserID: 605394647632969758, UserRole: admin
|
||||
```
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
FROM alpine:3.5
|
||||
MAINTAINER Roman Safronov <electroma@gmail.com>
|
||||
COPY authsvc /
|
||||
EXPOSE 8080
|
||||
ENTRYPOINT ["/authsvc"]
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Sample authentication service returning several HTTP headers in response
|
||||
func main() {
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
if strings.ContainsAny(r.Header.Get("User"), "internal") {
|
||||
w.Header().Add("UserID", strconv.Itoa(rand.Int()))
|
||||
w.Header().Add("UserRole", "admin")
|
||||
w.Header().Add("Other", "not used")
|
||||
fmt.Fprint(w, "ok")
|
||||
} else {
|
||||
rc := http.StatusForbidden
|
||||
if c := r.URL.Query().Get("code"); len(c) > 0 {
|
||||
c, _ := strconv.Atoi(c)
|
||||
if c > 0 && c < 600 {
|
||||
rc = c
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(rc)
|
||||
fmt.Fprint(w, "unauthorized")
|
||||
}
|
||||
})
|
||||
http.ListenAndServe(":8080", nil)
|
||||
}
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: demo-auth-service
|
||||
labels:
|
||||
k8s-app: demo-auth-service
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: demo-auth-service
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- name: auth-service
|
||||
image: electroma/ingress-demo-authsvc-amd64:0.1
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: demo-auth-service
|
||||
labels:
|
||||
k8s-app: demo-auth-service
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
k8s-app: demo-auth-service
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
labels:
|
||||
k8s-app: default-http-backend
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: default-http-backend
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
image: gcr.io/google_containers/defaultbackend:1.0
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
namespace: default
|
||||
labels:
|
||||
k8s-app: default-http-backend
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
k8s-app: default-http-backend
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: demo-echo-service
|
||||
labels:
|
||||
k8s-app: demo-echo-service
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: demo-echo-service
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- name: echo-service
|
||||
image: electroma/ingress-demo-echosvc-amd64:0.1
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: demo-echo-service
|
||||
labels:
|
||||
k8s-app: demo-echo-service
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
k8s-app: demo-echo-service
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: public-demo-echo-service
|
||||
annotations:
|
||||
ingress.kubernetes.io/auth-url: http://demo-auth-service.default.svc.cluster.local?code=200
|
||||
ingress.kubernetes.io/auth-response-headers: UserID, UserRole
|
||||
namespace: default
|
||||
spec:
|
||||
rules:
|
||||
- host: public-demo-echo-service.kube.local
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: demo-echo-service
|
||||
servicePort: 80
|
||||
path: /
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: secure-demo-echo-service
|
||||
annotations:
|
||||
ingress.kubernetes.io/auth-url: http://demo-auth-service.default.svc.cluster.local
|
||||
ingress.kubernetes.io/auth-response-headers: UserID, UserRole
|
||||
namespace: default
|
||||
spec:
|
||||
rules:
|
||||
- host: secure-demo-echo-service.kube.local
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: demo-echo-service
|
||||
servicePort: 80
|
||||
path: /
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-ingress-controller
|
||||
labels:
|
||||
k8s-app: nginx-ingress-controller
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nginx-ingress-controller
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.2
|
||||
name: nginx-ingress-controller
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
FROM alpine:3.5
|
||||
MAINTAINER Roman Safronov <electroma@gmail.com>
|
||||
COPY echosvc /
|
||||
EXPOSE 8080
|
||||
ENTRYPOINT ["/echosvc"]
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func handler(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, "UserID: %s, UserRole: %s", r.Header.Get("UserID"), r.Header.Get("UserRole"))
|
||||
}
|
||||
|
||||
// Sample "echo" service displaying UserID and UserRole HTTP request headers
|
||||
func main() {
|
||||
http.HandleFunc("/", handler)
|
||||
http.ListenAndServe(":8080", nil)
|
||||
}
|
||||
79
docs/examples/customization/ssl-dh-param/README.md
Normal file
79
docs/examples/customization/ssl-dh-param/README.md
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
# Deploying the Nginx Ingress controller
|
||||
|
||||
This example aims to demonstrate the deployment of an nginx ingress controller and
|
||||
use a ConfigMap to configure custom Diffie-Hellman parameters file to help with
|
||||
"Perfect Forward Secrecy".
|
||||
|
||||
## Default Backend
|
||||
|
||||
The default backend is a Service capable of handling all url paths and hosts the
|
||||
nginx controller doesn't understand. This most basic implementation just returns
|
||||
a 404 page:
|
||||
|
||||
```console
|
||||
$ kubectl apply -f default-backend.yaml
|
||||
deployment "default-http-backend" created
|
||||
service "default-http-backend" created
|
||||
|
||||
$ kubectl -n kube-system get po
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
default-http-backend-2657704409-qgwdd 1/1 Running 0 28s
|
||||
```
|
||||
|
||||
## Custom configuration
|
||||
|
||||
```console
|
||||
$ cat nginx-load-balancer-conf.yaml
|
||||
apiVersion: v1
|
||||
data:
|
||||
ssl-dh-param: "kube-system/lb-dhparam"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: nginx-load-balancer-conf
|
||||
```
|
||||
|
||||
```console
|
||||
$ kubectl create -f nginx-load-balancer-conf.yaml
|
||||
```
|
||||
|
||||
## Custom DH parameters secret
|
||||
|
||||
```console
|
||||
$> openssl dhparam 1024 2> /dev/null | base64
|
||||
LS0tLS1CRUdJTiBESCBQQVJBTUVURVJ...
|
||||
```
|
||||
|
||||
```console
|
||||
$ cat ssl-dh-param.yaml
|
||||
apiVersion: v1
|
||||
data:
|
||||
dhparam.pem: "LS0tLS1CRUdJTiBESCBQQVJBTUVURVJ..."
|
||||
kind: Secret
|
||||
type: Opaque
|
||||
metadata:
|
||||
name: lb-dhparam
|
||||
namespace: kube-system
|
||||
```
|
||||
|
||||
```console
|
||||
$ kubectl create -f ssl-dh-param.yaml
|
||||
```
|
||||
|
||||
## Controller
|
||||
|
||||
You can deploy the controller as follows:
|
||||
|
||||
```console
|
||||
$ kubectl apply -f nginx-ingress-controller.yaml
|
||||
deployment "nginx-ingress-controller" created
|
||||
|
||||
$ kubectl -n kube-system get po
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
default-http-backend-2657704409-qgwdd 1/1 Running 0 2m
|
||||
nginx-ingress-controller-873061567-4n3k2 1/1 Running 0 42s
|
||||
```
|
||||
|
||||
## Test
|
||||
|
||||
Check the contents of the configmap is present in the nginx.conf file using:
|
||||
`kubectl exec nginx-ingress-controller-873061567-4n3k2 -n kube-system cat /etc/nginx/nginx.conf`
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
labels:
|
||||
k8s-app: default-http-backend
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: default-http-backend
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
# Any image is permissable as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: gcr.io/google_containers/defaultbackend:1.0
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: default-http-backend
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
k8s-app: default-http-backend
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-ingress-controller
|
||||
labels:
|
||||
k8s-app: nginx-ingress-controller
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nginx-ingress-controller
|
||||
spec:
|
||||
# hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration
|
||||
# however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host
|
||||
# that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used
|
||||
# like with kubeadm
|
||||
# hostNetwork: true
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.15
|
||||
name: nginx-ingress-controller
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 1
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
- containerPort: 443
|
||||
hostPort: 443
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
|
||||
- --configmap=$(POD_NAMESPACE)/nginx-load-balancer-conf
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
apiVersion: v1
|
||||
data:
|
||||
ssl-dh-param: "kube-system/lb-dhparam"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: nginx-load-balancer-conf
|
||||
namespace: kube-system
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
apiVersion: v1
|
||||
data:
|
||||
dhparam.pem: "...base64 encoded data..."
|
||||
kind: Secret
|
||||
type: Opaque
|
||||
metadata:
|
||||
name: lb-dhparam
|
||||
namespace: kube-system
|
||||
Loading…
Add table
Add a link
Reference in a new issue