git mv Ingress ingress
This commit is contained in:
parent
34b949c134
commit
3da4e74e5a
2185 changed files with 754743 additions and 0 deletions
138
controllers/README.md
Normal file
138
controllers/README.md
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
# Ingress Controllers
|
||||
|
||||
Configuring a webserver or loadbalancer is harder than it should be. Most webserver configuration files are very similar. There are some applications that have weird little quirks that tend to throw a wrench in things, but for the most part you can apply the same logic to them and achieve a desired result. The Ingress resource embodies this idea, and an Ingress controller is meant to handle all the quirks associated with a specific "class" of Ingress (be it a single instance of a loadbalancer, or a more complicated setup of frontends that provide GSLB, DDoS protection etc).
|
||||
|
||||
## What is an Ingress Controller?
|
||||
|
||||
An Ingress Controller is a daemon, deployed as a Kubernetes Pod, that watches the ApiServer's `/ingresses` endpoint for updates to the [Ingress resource](https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/ingress.md). Its job is to satisfy requests for ingress.
|
||||
|
||||
## Writing an Ingress Controller
|
||||
|
||||
Writing an Ingress controller is simple. By way of example, the [nginx controller] (nginx-alpha) does the following:
|
||||
* Poll until apiserver reports a new Ingress
|
||||
* Write the nginx config file based on a [go text/template](https://golang.org/pkg/text/template/)
|
||||
* Reload nginx
|
||||
|
||||
Pay attention to how it denormalizes the Kubernetes Ingress object into an nginx config:
|
||||
```go
|
||||
const (
|
||||
nginxConf = `
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
http {
|
||||
{{range $ing := .Items}}
|
||||
{{range $rule := $ing.Spec.Rules}}
|
||||
server {
|
||||
listen 80;
|
||||
server_name {{$rule.Host}};
|
||||
resolver 127.0.0.1;
|
||||
{{ range $path := $rule.HTTP.Paths }}
|
||||
location {{$path.Path}} {
|
||||
proxy_set_header Host $host;
|
||||
proxy_pass http://{{$path.Backend.ServiceName}}.{{$ing.Namespace}}.svc.cluster.local:{{$path.Backend.ServicePort}};
|
||||
}{{end}}
|
||||
}{{end}}{{end}}
|
||||
}`
|
||||
)
|
||||
```
|
||||
|
||||
You can take a similar approach to denormalize the Ingress to a [haproxy config](https://github.com/kubernetes/contrib/blob/master/service-loadbalancer/template.cfg) or use it to configure a cloud loadbalancer such as a [GCE L7](https://github.com/kubernetes/contrib/blob/master/Ingress/controllers/gce/README.md).
|
||||
|
||||
And here is the Ingress controller's control loop:
|
||||
|
||||
```go
|
||||
for {
|
||||
rateLimiter.Accept()
|
||||
ingresses, err := ingClient.List(labels.Everything(), fields.Everything())
|
||||
if err != nil || reflect.DeepEqual(ingresses.Items, known.Items) {
|
||||
continue
|
||||
}
|
||||
if w, err := os.Create("/etc/nginx/nginx.conf"); err != nil {
|
||||
log.Fatalf("Failed to open %v: %v", nginxConf, err)
|
||||
} else if err := tmpl.Execute(w, ingresses); err != nil {
|
||||
log.Fatalf("Failed to write template %v", err)
|
||||
}
|
||||
shellOut("nginx -s reload")
|
||||
}
|
||||
```
|
||||
|
||||
All this is doing is:
|
||||
* List Ingresses, optionally you can watch for changes (see [GCE Ingress controller](https://github.com/kubernetes/contrib/blob/master/Ingress/controllers/gce/controller.go) for an example)
|
||||
* Executes the template and writes results to `/etc/nginx/nginx.conf`
|
||||
* Reloads nginx
|
||||
|
||||
You can deploy this controller to a Kubernetes cluster by [creating an RC](nginx-alpha/rc.yaml). After doing so, if you were to create an Ingress such as:
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: test
|
||||
spec:
|
||||
rules:
|
||||
- host: foo.bar.com
|
||||
http:
|
||||
paths:
|
||||
- path: /foo
|
||||
backend:
|
||||
serviceName: fooSvc
|
||||
servicePort: 80
|
||||
- host: bar.baz.com
|
||||
http:
|
||||
paths:
|
||||
- path: /bar
|
||||
backend:
|
||||
serviceName: barSvc
|
||||
servicePort: 80
|
||||
```
|
||||
|
||||
Where `fooSvc` and `barSvc` are 2 services running in your Kubernetes cluster. The controller would satisfy the Ingress by writing a configuration file to /etc/nginx/nginx.conf:
|
||||
```nginx
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
http {
|
||||
server {
|
||||
listen 80;
|
||||
server_name foo.bar.com;
|
||||
resolver 127.0.0.1;
|
||||
|
||||
location /foo {
|
||||
proxy_pass http://fooSvc;
|
||||
}
|
||||
}
|
||||
server {
|
||||
listen 80;
|
||||
server_name bar.baz.com;
|
||||
resolver 127.0.0.1;
|
||||
|
||||
location /bar {
|
||||
proxy_pass http://barSvc;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
And you can reach the `/foo` and `/bar` endpoints on the publicIP of the VM the nginx-ingress pod landed on.
|
||||
```
|
||||
$ kubectl get pods -o wide
|
||||
NAME READY STATUS RESTARTS AGE NODE
|
||||
nginx-ingress-tk7dl 1/1 Running 0 3m e2e-test-beeps-minion-15p3
|
||||
|
||||
$ kubectl get nodes e2e-test-beeps-minion-15p3 -o yaml | grep -i externalip -B 1
|
||||
- address: 104.197.203.179
|
||||
type: ExternalIP
|
||||
|
||||
$ curl --resolve foo.bar.com:80:104.197.203.179 foo.bar.com/foo
|
||||
```
|
||||
|
||||
## Future work
|
||||
|
||||
This section can also bear the title "why anyone would want to write an Ingress controller instead of directly configuring Services". There is more to Ingress than webserver configuration. *Real* HA usually involves the configuration of gateways and packet forwarding devices, which most cloud providers allow you to do through an API. See the GCE Loadbalancer Controller, which is deployed as a [cluster addon](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/cluster-loadbalancing/glbc) in GCE and GKE clusters for more advanced Ingress configuration examples. Post 1.1 the Ingress resource will support at least the following:
|
||||
* TLS options (edge, passthrough, SNI etc)
|
||||
* L4 and L7 loadbalancing (it currently only supports HTTP rules)
|
||||
* Ingress Rules that are not limited to a simple path regex (eg: redirect rules, session persistence)
|
||||
|
||||
And is expected to be the way one configures a "frontends" that handle user traffic for a Kubernetes cluster.
|
||||
|
||||
|
||||
34
controllers/gce/Dockerfile
Normal file
34
controllers/gce/Dockerfile
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
# Copyright 2015 The Kubernetes Authors. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# TODO: use radial/busyboxplus:curl or alping instead
|
||||
FROM ubuntu:14.04
|
||||
MAINTAINER Prashanth B <beeps@google.com>
|
||||
|
||||
# so apt-get doesn't complain
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN sed -i 's/^exit 101/exit 0/' /usr/sbin/policy-rc.d
|
||||
|
||||
# TODO: Move to using haproxy:1.5 image instead. Honestly,
|
||||
# that image isn't much smaller and the convenience of having
|
||||
# an ubuntu container for dev purposes trumps the tiny amounts
|
||||
# of disk and bandwidth we'd save in doing so.
|
||||
RUN \
|
||||
apt-get update && \
|
||||
apt-get install -y ca-certificates && \
|
||||
apt-get install -y curl && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ADD glbc glbc
|
||||
ENTRYPOINT ["/glbc"]
|
||||
17
controllers/gce/Makefile
Normal file
17
controllers/gce/Makefile
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
all: push
|
||||
|
||||
# 0.0 shouldn't clobber any released builds
|
||||
TAG = 0.6.0
|
||||
PREFIX = gcr.io/google_containers/glbc
|
||||
|
||||
server:
|
||||
CGO_ENABLED=0 GOOS=linux godep go build -a -installsuffix cgo -ldflags '-w' -o glbc *.go
|
||||
|
||||
container: server
|
||||
docker build -t $(PREFIX):$(TAG) .
|
||||
|
||||
push: container
|
||||
gcloud docker push $(PREFIX):$(TAG)
|
||||
|
||||
clean:
|
||||
rm -f glbc
|
||||
448
controllers/gce/README.md
Normal file
448
controllers/gce/README.md
Normal file
|
|
@ -0,0 +1,448 @@
|
|||
# GLBC
|
||||
|
||||
GLBC is a GCE L7 load balancer controller that manages external loadbalancers configured through the Kubernetes Ingress API.
|
||||
|
||||
## Disclaimer
|
||||
- This is a **work in progress**.
|
||||
- It relies on an experimental Kubernetes resource.
|
||||
- The loadbalancer controller pod is not aware of your GCE quota.
|
||||
|
||||
## Overview
|
||||
|
||||
__A reminder on GCE L7__: Google Compute Engine does not have a single resource that represents a L7 loadbalancer. When a user request comes in, it is first handled by the global forwarding rule, which sends the traffic to an HTTP proxy service that sends the traffic to a URL map that parses the URL to see which backend service will handle the request. Each backend service is assigned a set of virtual machine instances grouped into instance groups.
|
||||
|
||||
__A reminder on Services__: A Kubernetes Service defines a set of pods and a means by which to access them, such as single stable IP address and corresponding DNS name. This IP defaults to a cluster VIP in a private address range. You can direct ingress traffic to a particular Service by setting its `Type` to NodePort or LoadBalancer. NodePort opens up a port on *every* node in your cluster and proxies traffic to the endpoints of your service, while LoadBalancer allocates an L4 cloud loadbalancer.
|
||||
|
||||
### L7 Load balancing on Kubernetes
|
||||
|
||||
To achive L7 loadbalancing through Kubernetes, we employ a resource called `Ingress`. The Ingress is consumed by this loadbalancer controller, which creates the following GCE resource graph:
|
||||
|
||||
[Global Forwarding Rule](https://cloud.google.com/compute/docs/load-balancing/http/global-forwarding-rules) -> [TargetHttpProxy](https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) -> [Url Map](https://cloud.google.com/compute/docs/load-balancing/http/url-map) -> [Backend Service](https://cloud.google.com/compute/docs/load-balancing/http/backend-service) -> [Instance Group](https://cloud.google.com/compute/docs/instance-groups/)
|
||||
|
||||
The controller (glbc) manages the lifecycle of each component in the graph. It uses the Kubernetes resources as a spec for the desired state, and the GCE cloud resources as the observed state, and drives the observed to the desired. If an edge is disconnected, it fixes it. Each Ingress translates to a new GCE L7, and the rules on the Ingress become paths in the GCE Url Map. This allows you to route traffic to various backend Kubernetes Services through a single public IP, which is in contrast to `Type=LoadBalancer`, which allocates a public IP *per* Kubernetes Service. For this to work, the Kubernetes Service *must* have Type=NodePort.
|
||||
|
||||
### The Ingress
|
||||
|
||||
An Ingress in Kubernetes is a REST object, similar to a Service. A minimal Ingress might look like:
|
||||
|
||||
```yaml
|
||||
01. apiVersion: extensions/v1beta1
|
||||
02. kind: Ingress
|
||||
03. metadata:
|
||||
04. name: hostlessendpoint
|
||||
05. spec:
|
||||
06. rules:
|
||||
07. - http:
|
||||
08. paths:
|
||||
09. - path: /hostless
|
||||
10. backend:
|
||||
11. serviceName: test
|
||||
12. servicePort: 80
|
||||
```
|
||||
|
||||
POSTing this to the Kubernetes API server would result in glbc creating a GCE L7 that routes all traffic sent to `http://ip-of-loadbalancer/hostless` to :80 of the service named `test`. If the service doesn't exist yet, or doesn't have a nodePort, glbc will allocate an IP and wait till it does. Once the Service shows up, it will create the required path rules to route traffic to it.
|
||||
|
||||
__Lines 1-4__: Resource metadata used to tag GCE resources. For example, if you go to the console you would see a url map called: k8-fw-default-hostlessendpoint, where default is the namespace and hostlessendpoint is the name of the resource. The Kubernetes API server ensures that namespace/name is unique so there will never be any collisions.
|
||||
|
||||
__Lines 5-7__: Ingress Spec has all the information needed to configure a GCE L7. Most importantly, it contains a list of `rules`. A rule can take many forms, but the only rule relevant to glbc is the `http` rule.
|
||||
|
||||
__Lines 8-9__: Each http rule contains the following information: A host (eg: foo.bar.com, defaults to `*` in this example), a list of paths (eg: `/hostless`) each of which has an associated backend (`test:80`). Both the `host` and `path` must match the content of an incoming request before the L7 directs traffic to the `backend`.
|
||||
|
||||
__Lines 10-12__: A `backend` is a service:port combination. It selects a group of pods capable of servicing traffic sent to the path specified in the parent rule.
|
||||
|
||||
__Global Prameters__: For the sake of simplicity the example Ingress has no global parameters. However, one can specify a default backend (see examples below) in the absence of which requests that don't match a path in the spec are sent to the default backend of glbc. Though glbc doesn't support HTTPS yet, security configs would also be global.
|
||||
|
||||
|
||||
## Load Balancer Management
|
||||
|
||||
You can manage a GCE L7 by creating/updating/deleting the associated Kubernetes Ingress.
|
||||
|
||||
### Creation
|
||||
|
||||
Before you can start creating Ingress you need to start up glbc. We can use the rc.yaml in this directory:
|
||||
```shell
|
||||
$ kubectl create -f rc.yaml
|
||||
replicationcontroller "glbc" created
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
glbc-6m6b6 2/2 Running 0 21s
|
||||
|
||||
```
|
||||
|
||||
A couple of things to note about this controller:
|
||||
* It needs a service with a node port to use as the default backend. This is the backend that's used when an Ingress does not specify the default.
|
||||
* It has an intentionally long terminationGracePeriod, this is only required with the --delete-all-on-quit flag (see [Deletion](#deletion))
|
||||
* Don't start 2 instances of the controller in a single cluster, they will fight each other.
|
||||
|
||||
The loadbalancer controller will watch for Services, Nodes and Ingress. Nodes already exist (the nodes in your cluster). We need to create the other 2. You can do so using the ingress-app.yaml in this directory.
|
||||
|
||||
A couple of things to note about the Ingress:
|
||||
* It creates a Replication Controller for a simple echoserver application, with 1 replica.
|
||||
* It creates 3 services for the same application pod: echoheaders[x, y, default]
|
||||
* It creates an Ingress with 2 hostnames and 3 endpoints (foo.bar.com{/foo} and bar.baz.com{/foo, /bar}) that access the given service
|
||||
|
||||
```shell
|
||||
$ kubectl create -f ingress-app.yaml
|
||||
$ kubectl get svc
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
echoheadersdefault 10.0.43.119 nodes 80/TCP app=echoheaders 16m
|
||||
echoheadersx 10.0.126.10 nodes 80/TCP app=echoheaders 16m
|
||||
echoheadersy 10.0.134.238 nodes 80/TCP app=echoheaders 16m
|
||||
Kubernetes 10.0.0.1 <none> 443/TCP <none> 21h
|
||||
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
echomap - echoheadersdefault:80
|
||||
foo.bar.com
|
||||
/foo echoheadersx:80
|
||||
bar.baz.com
|
||||
/bar echoheadersy:80
|
||||
/foo echoheadersx:80
|
||||
```
|
||||
|
||||
You can tail the logs of the controller to observe its progress:
|
||||
```
|
||||
$ kubectl logs --follow glbc-6m6b6 l7-lb-controller
|
||||
I1005 22:11:26.731845 1 instances.go:48] Creating instance group k8-ig-foo
|
||||
I1005 22:11:34.360689 1 controller.go:152] Created new loadbalancer controller
|
||||
I1005 22:11:34.360737 1 controller.go:172] Starting loadbalancer controller
|
||||
I1005 22:11:34.380757 1 controller.go:206] Syncing default/echomap
|
||||
I1005 22:11:34.380763 1 loadbalancer.go:134] Syncing loadbalancers [default/echomap]
|
||||
I1005 22:11:34.380810 1 loadbalancer.go:100] Creating l7 default-echomap
|
||||
I1005 22:11:34.385161 1 utils.go:83] Syncing e2e-test-beeps-minion-ugv1
|
||||
...
|
||||
```
|
||||
|
||||
When it's done, it will update the status of the Ingress with the ip of the L7 it created:
|
||||
```shell
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
echomap - echoheadersdefault:80 107.178.254.239
|
||||
foo.bar.com
|
||||
/foo echoheadersx:80
|
||||
bar.baz.com
|
||||
/bar echoheadersy:80
|
||||
/foo echoheadersx:80
|
||||
```
|
||||
|
||||
Go to your GCE console and confirm that the following resources have been created through the HTTPLoadbalancing panel:
|
||||
* A Global Forwarding Rule
|
||||
* An UrlMap
|
||||
* A TargetHTTPProxy
|
||||
* BackendServices (one for each Kubernetes nodePort service)
|
||||
* An Instance Group (with ports corresponding to the BackendServices)
|
||||
|
||||
The HTTPLoadBalancing panel will also show you if your backends have responded to the health checks, wait till they do. This can take a few minutes. If you see `Health status will display here once configuration is complete.` the L7 is still bootstrapping. Wait till you have `Healthy instances: X`. Even though the GCE L7 is driven by our controller, which notices the Kubernetes healtchecks of a pod, we still need to wait on the first GCE L7 health check to complete. Once your backends are up and healthy:
|
||||
|
||||
```shell
|
||||
$ curl --resolve foo.bar.com:80:107.178.245.239 http://foo.bar.com/foo
|
||||
CLIENT VALUES:
|
||||
client_address=('10.240.29.196', 56401) (10.240.29.196)
|
||||
command=GET
|
||||
path=/echoheadersx
|
||||
real path=/echoheadersx
|
||||
query=
|
||||
request_version=HTTP/1.1
|
||||
|
||||
SERVER VALUES:
|
||||
server_version=BaseHTTP/0.6
|
||||
sys_version=Python/3.4.3
|
||||
protocol_version=HTTP/1.0
|
||||
|
||||
HEADERS RECEIVED:
|
||||
Accept=*/*
|
||||
Connection=Keep-Alive
|
||||
Host=107.178.254.239
|
||||
User-Agent=curl/7.35.0
|
||||
Via=1.1 google
|
||||
X-Forwarded-For=216.239.45.73, 107.178.254.239
|
||||
X-Forwarded-Proto=http
|
||||
```
|
||||
|
||||
You can also edit `/etc/hosts` instead of using `--resolve`.
|
||||
|
||||
#### Updates
|
||||
|
||||
Say you don't want a default backend and you'd like to allow all traffic hitting your loadbalancer at /foo to reach your echoheaders backend service, not just the traffic for foo.bar.com. You can modify the Ingress Spec:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /foo
|
||||
..
|
||||
```
|
||||
|
||||
and replace the existing Ingress (ignore errors about replacing the Service, we're using the same .yaml file but we only care about the Ingress):
|
||||
|
||||
```
|
||||
$ kubectl replace -f ingress-app.yaml
|
||||
ingress "echomap" replaced
|
||||
|
||||
$ curl http://107.178.254.239/foo
|
||||
CLIENT VALUES:
|
||||
client_address=('10.240.143.179', 59546) (10.240.143.179)
|
||||
command=GET
|
||||
path=/foo
|
||||
real path=/foo
|
||||
...
|
||||
|
||||
$ curl http://107.178.254.239/
|
||||
<pre>
|
||||
INTRODUCTION
|
||||
============
|
||||
This is an nginx webserver for simple loadbalancer testing. It works well
|
||||
for me but it might not have some of the features you want. If you would
|
||||
...
|
||||
```
|
||||
|
||||
A couple of things to note about this particular update:
|
||||
* An Ingress without a default backend inherits the backend of the Ingress controller.
|
||||
* A IngressRule without a host gets the wildcard. This is controller specific, some loadbalancer controllers do not respect anything but a DNS subdomain as the host. You *cannot* set the host to a regex.
|
||||
* You never want to delete then re-create an Ingress, as it will result in the controller tearing down and recreating the loadbalancer.
|
||||
|
||||
__Unexpected updates__: Since glbc constantly runs a control loop it won't allow you to break links that black hole traffic. An easy link to break is the url map itself, but you can also disconnect a target proxy from the urlmap, or remove an instance from the instance group (note this is different from *deleting* the instance, the loadbalancer controller will not recreate it if you do so). Modify one of the url links in the map to point to another backend through the GCE Control Panel UI, and wait till the controller sync (this happens as frequently as you tell it to, via the --resync-period flag). The same goes for the Kubernetes side of things, the API server will validate against obviously bad updates, but if you relink an Ingress so it points to the wrong backends the controller will blindly follow.
|
||||
|
||||
### Paths
|
||||
|
||||
Till now, our examples were simplified in that they hit an endpoint with a catch-all path regex. Most real world backends have subresources. Let's create service to test how the loadbalancer handles paths:
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: nginxtest
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginxtest
|
||||
spec:
|
||||
containers:
|
||||
- name: nginxtest
|
||||
image: bprashanth/nginxtest:1.0
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginxtest
|
||||
labels:
|
||||
app: nginxtest
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: nginxtest
|
||||
```
|
||||
|
||||
Running kubectl create against this manifest will given you a service with multiple endpoints:
|
||||
```shell
|
||||
$ kubectl get svc nginxtest -o yaml | grep -i nodeport:
|
||||
nodePort: 30404
|
||||
$ curl nodeip:30404/
|
||||
ENDPOINTS
|
||||
=========
|
||||
<a href="hostname">hostname</a>: An endpoint to query the hostname.
|
||||
<a href="stress">stress</a>: An endpoint to stress the host.
|
||||
<a href="fs/index.html">fs</a>: A file system for static content.
|
||||
|
||||
```
|
||||
You can put the nodeip:port into your browser and play around with the endpoints so you're familiar with what to expect. We will test the `/hostname` and `/fs/files/nginx.html` endpoints. Modify/create your Ingress:
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: nginxtest-ingress
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /hostname
|
||||
backend:
|
||||
serviceName: nginxtest
|
||||
servicePort: 80
|
||||
```
|
||||
|
||||
And check the endpoint (you will have to wait till the update takes effect, this could be a few minutes):
|
||||
```shell
|
||||
$ kubectl replace -f ingress.yaml
|
||||
$ curl loadbalancerip/hostname
|
||||
nginx-tester-pod-name
|
||||
```
|
||||
|
||||
Note what just happened, the endpoint exposes /hostname, and the loadbalancer forwarded the entire matching url to the endpoint. This means if you had '/foo' in the Ingress and tried accessing /hostname, your endpoint would've received /foo/hostname and not known how to route it. Now update the Ingress to access static content via the /fs endpoint:
|
||||
```
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: nginxtest-ingress
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /fs/*
|
||||
backend:
|
||||
serviceName: nginxtest
|
||||
servicePort: 80
|
||||
```
|
||||
|
||||
As before, wait a while for the update to take effect, and try accessing `loadbalancerip/fs/files/nginx.html`.
|
||||
|
||||
#### Deletion
|
||||
|
||||
Most production loadbalancers live as long as the nodes in the cluster and are torn down when the nodes are destroyed. That said, there are plenty of use cases for deleting an Ingress, deleting a loadbalancer controller, or just purging external loadbalancer resources alltogether. Deleting a loadbalancer controller pod will not affect the loadbalancers themselves, this way your backends won't suffer a loss of availability if the scheduler pre-empts your controller pod. Deleting a single loadbalancer is as easy as deleting an Ingress via kubectl:
|
||||
```shell
|
||||
$ kubectl delete ing echomap
|
||||
$ kubectl logs --follow glbc-6m6b6 l7-lb-controller
|
||||
I1007 00:25:45.099429 1 loadbalancer.go:144] Deleting lb default-echomap
|
||||
I1007 00:25:45.099432 1 loadbalancer.go:437] Deleting global forwarding rule k8-fw-default-echomap
|
||||
I1007 00:25:54.885823 1 loadbalancer.go:444] Deleting target proxy k8-tp-default-echomap
|
||||
I1007 00:25:58.446941 1 loadbalancer.go:451] Deleting url map k8-um-default-echomap
|
||||
I1007 00:26:02.043065 1 backends.go:176] Deleting backends []
|
||||
I1007 00:26:02.043188 1 backends.go:134] Deleting backend k8-be-30301
|
||||
I1007 00:26:05.591140 1 backends.go:134] Deleting backend k8-be-30284
|
||||
I1007 00:26:09.159016 1 controller.go:232] Finished syncing default/echomap
|
||||
```
|
||||
Note that it takes ~30 seconds to purge cloud resources, the API calls to create and delete are a one time cost. GCE BackendServices are ref-counted and deleted by the controller as you delete Kubernetes Ingress'. This is not sufficient for cleanup, because you might have deleted the Ingress while glbc was down, in which case it would leak cloud resources. You can delete the glbc and purge cloud resources in 2 more ways:
|
||||
|
||||
__The dev/test way__: If you want to delete everything in the cloud when the loadbalancer controller pod dies, start it with the --delete-all-on-quit flag. When a pod is killed it's first sent a SIGTERM, followed by a grace period (set to 10minutes for loadbalancer controllers), followed by a SIGKILL. The controller pod uses this time to delete cloud resources. Be careful with --delete-all-on-quit, because if you're running a production glbc and the scheduler re-schedules your pod for some reason, it will result in a loss of availability. You can do this because your rc.yaml has:
|
||||
```yaml
|
||||
args:
|
||||
# auto quit requires a high termination grace period.
|
||||
- --delete-all-on-quit=true
|
||||
```
|
||||
|
||||
So simply delete the replication controller:
|
||||
```shell
|
||||
$ kubectl get rc glbc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS AGE
|
||||
glbc default-http-backend gcr.io/google_containers/defaultbackend:1.0 k8s-app=glbc,version=v0.5 1 2m
|
||||
l7-lb-controller gcr.io/google_containers/glbc:0.5
|
||||
|
||||
$ kubectl delete rc glbc
|
||||
replicationcontroller "glbc" deleted
|
||||
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
glbc-6m6b6 1/1 Terminating 0 13m
|
||||
```
|
||||
|
||||
__The prod way__: If you didn't start the controller with `--delete-all-on-quit`, you can execute a GET on the `/delete-all-and-quit` endpoint. This endpoint is deliberately not exported.
|
||||
|
||||
```
|
||||
$ kubectl exec -it glbc-6m6b6 -- curl http://localhost:8081/delete-all-and-quit
|
||||
..Hangs till quit is done..
|
||||
|
||||
$ kubectl logs glbc-6m6b6 --follow
|
||||
I1007 00:26:09.159016 1 controller.go:232] Finished syncing default/echomap
|
||||
I1007 00:29:30.321419 1 controller.go:192] Shutting down controller queues.
|
||||
I1007 00:29:30.321970 1 controller.go:199] Shutting down cluster manager.
|
||||
I1007 00:29:30.321574 1 controller.go:178] Shutting down Loadbalancer Controller
|
||||
I1007 00:29:30.322378 1 main.go:160] Handled quit, awaiting pod deletion.
|
||||
I1007 00:29:30.321977 1 loadbalancer.go:154] Creating loadbalancers []
|
||||
I1007 00:29:30.322617 1 loadbalancer.go:192] Loadbalancer pool shutdown.
|
||||
I1007 00:29:30.322622 1 backends.go:176] Deleting backends []
|
||||
I1007 00:30:00.322528 1 main.go:160] Handled quit, awaiting pod deletion.
|
||||
I1007 00:30:30.322751 1 main.go:160] Handled quit, awaiting pod deletion
|
||||
```
|
||||
|
||||
You just instructed the loadbalancer controller to quit, however if it had done so, the replication controller would've just created another pod, so it waits around till you delete the rc.
|
||||
|
||||
#### Health checks
|
||||
|
||||
Currently, all service backends must respond with a 200 on '/'. The content does not matter. If they fail to do so they will be deemed unhealthy by the GCE L7. This limitation is because there are 2 sets of health checks:
|
||||
* From the kubernetes endpoints, taking the form of liveness/readiness probes
|
||||
* From the GCE L7, which periodically pings '/'
|
||||
We really want (1) to control the health of an instance but (2) is a GCE requirement. Ideally, we would point (2) at (1), but we still need (2) for pods that don't have a defined health check. This will probably get resolved when Ingress grows up.
|
||||
|
||||
## Troubleshooting:
|
||||
|
||||
This controller is complicated because it exposes a tangled set of external resources as a single logical abstraction. It's recommended that you are at least *aware* of how one creates a GCE L7 [without a kubernetes Ingress](https://cloud.google.com/container-engine/docs/tutorials/http-balancer). If weird things happen, here are some basic debugging guidelines:
|
||||
|
||||
* Check loadbalancer controller pod logs via kubectl
|
||||
A typical sign of trouble is repeated retries in the logs:
|
||||
```shell
|
||||
I1006 18:58:53.451869 1 loadbalancer.go:268] Forwarding rule k8-fw-default-echomap already exists
|
||||
I1006 18:58:53.451955 1 backends.go:162] Syncing backends [30301 30284 30301]
|
||||
I1006 18:58:53.451998 1 backends.go:134] Deleting backend k8-be-30302
|
||||
E1006 18:58:57.029253 1 utils.go:71] Requeuing default/echomap, err googleapi: Error 400: The backendService resource 'projects/Kubernetesdev/global/backendServices/k8-be-30302' is already being used by 'projects/Kubernetesdev/global/urlMaps/k8-um-default-echomap'
|
||||
I1006 18:58:57.029336 1 utils.go:83] Syncing default/echomap
|
||||
```
|
||||
|
||||
This could be a bug or quota limitation. In the case of the former, please head over to slack or github.
|
||||
|
||||
* If you see a GET hanging, followed by a 502 with the following response:
|
||||
|
||||
```
|
||||
<html><head>
|
||||
<meta http-equiv="content-type" content="text/html;charset=utf-8">
|
||||
<title>502 Server Error</title>
|
||||
</head>
|
||||
<body text=#000000 bgcolor=#ffffff>
|
||||
<h1>Error: Server Error</h1>
|
||||
<h2>The server encountered a temporary error and could not complete your request.<p>Please try again in 30 seconds.</h2>
|
||||
<h2></h2>
|
||||
</body></html>
|
||||
```
|
||||
The loadbalancer is probably bootstrapping itself.
|
||||
|
||||
* If a GET responds with a 404 and the following response:
|
||||
```
|
||||
<a href=//www.google.com/><span id=logo aria-label=Google></span></a>
|
||||
<p><b>404.</b> <ins>That’s an error.</ins>
|
||||
<p>The requested URL <code>/hostless</code> was not found on this server. <ins>That’s all we know.</ins>
|
||||
```
|
||||
It means you have lost your IP somehow, or just typed in the wrong IP.
|
||||
|
||||
* If you see requests taking an abnormal amount of time, run the echoheaders pod and look for the client address
|
||||
```shell
|
||||
CLIENT VALUES:
|
||||
client_address=('10.240.29.196', 56401) (10.240.29.196)
|
||||
```
|
||||
|
||||
Then head over to the GCE node with internal ip 10.240.29.196 and check that the [Service is functioning](https://github.com/kubernetes/kubernetes/blob/release-1.0/docs/user-guide/debugging-services.md) as expected. Remember that the GCE L7 is routing you through the NodePort service, and try to trace back.
|
||||
|
||||
* Check if you can access the backend service directly via nodeip:nodeport
|
||||
* Check the GCE console
|
||||
* Make sure you only have a single loadbalancer controller running
|
||||
* Make sure the initial GCE health checks have passed
|
||||
* A crash loop looks like:
|
||||
```shell
|
||||
$ kubectl get pods
|
||||
glbc-fjtlq 0/1 CrashLoopBackOff 17 1h
|
||||
```
|
||||
If you hit that it means the controller isn't even starting. Re-check your input flags, especially the required ones.
|
||||
|
||||
## GCELBC Implementation Details
|
||||
|
||||
For the curious, here is a high level overview of how the GCE LoadBalancer controller manages cloud resources.
|
||||
|
||||
The controller manages cloud resources through a notion of pools. Each pool is the representation of the last known state of a logical cloud resource. Pools are periodically synced with the desired state, as reflected by the Kubernetes api. When you create a new Ingress, the following happens:
|
||||
* Create BackendServices for each Kubernetes backend in the Ingress, through the backend pool.
|
||||
* Add nodePorts for each BackendService to an Instance Group with all the instances in your cluster, through the instance pool.
|
||||
* Create a UrlMap, TargetHttpProxy, Global Forwarding Rule through the loadbalancer pool.
|
||||
* Update the loadbalancer's urlmap according to the Ingress.
|
||||
|
||||
Periodically, each pool checks that it has a valid connection to the next hop in the above resource graph. So for example, the backend pool will check that each backend is connected to the instance group and that the node ports match, the instance group will check that all the Kubernetes nodes are a part of the instance group, and so on. Since Backends are a limited resource, they're shared (well, everything is limited by your quota, this applies doubly to backend services). This means you can setup N Ingress' exposing M services through different paths and the controller will only create M backends. When all the Ingress' are deleted, the backend pool GCs the backend.
|
||||
|
||||
## Wishlist:
|
||||
|
||||
* E2e, integration tests
|
||||
* Better events
|
||||
* Detect leaked resources even if the Ingress has been deleted when the controller isn't around
|
||||
* Specify health checks (currently we just rely on kubernetes service/pod liveness probes and force pods to have a `/` endpoint that responds with 200 for GCE)
|
||||
* Alleviate the NodePort requirement for Service Type=LoadBalancer.
|
||||
* Async pool management of backends/L7s etc
|
||||
* Retry back-off when GCE Quota is done
|
||||
* GCE Quota integration
|
||||
* HTTP support as the Ingress grows
|
||||
* More aggressive resource sharing
|
||||
|
||||
[]()
|
||||
242
controllers/gce/backends/backends.go
Normal file
242
controllers/gce/backends/backends.go
Normal file
|
|
@ -0,0 +1,242 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backends
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
"github.com/golang/glog"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/contrib/ingress/controllers/gce/instances"
|
||||
"k8s.io/contrib/ingress/controllers/gce/storage"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
// Backends implements BackendPool.
|
||||
type Backends struct {
|
||||
cloud BackendServices
|
||||
nodePool instances.NodePool
|
||||
healthChecker healthchecks.HealthChecker
|
||||
snapshotter storage.Snapshotter
|
||||
namer utils.Namer
|
||||
}
|
||||
|
||||
func portKey(port int64) string {
|
||||
return fmt.Sprintf("%d", port)
|
||||
}
|
||||
|
||||
// NewBackendPool returns a new backend pool.
|
||||
// - cloud: implements BackendServices and syncs backends with a cloud provider
|
||||
// - nodePool: implements NodePool, used to create/delete new instance groups.
|
||||
func NewBackendPool(
|
||||
cloud BackendServices,
|
||||
healthChecker healthchecks.HealthChecker,
|
||||
nodePool instances.NodePool, namer utils.Namer) *Backends {
|
||||
return &Backends{
|
||||
cloud: cloud,
|
||||
nodePool: nodePool,
|
||||
snapshotter: storage.NewInMemoryPool(),
|
||||
healthChecker: healthChecker,
|
||||
namer: namer,
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a single backend.
|
||||
func (b *Backends) Get(port int64) (*compute.BackendService, error) {
|
||||
be, err := b.cloud.GetBackendService(b.namer.BeName(port))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.snapshotter.Add(portKey(port), be)
|
||||
return be, nil
|
||||
}
|
||||
|
||||
func (b *Backends) create(ig *compute.InstanceGroup, namedPort *compute.NamedPort, name string) (*compute.BackendService, error) {
|
||||
// Create a new health check
|
||||
if err := b.healthChecker.Add(namedPort.Port, ""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hc, err := b.healthChecker.Get(namedPort.Port)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Create a new backend
|
||||
backend := &compute.BackendService{
|
||||
Name: name,
|
||||
Protocol: "HTTP",
|
||||
Backends: []*compute.Backend{
|
||||
{
|
||||
Group: ig.SelfLink,
|
||||
},
|
||||
},
|
||||
// Api expects one, means little to kubernetes.
|
||||
HealthChecks: []string{hc.SelfLink},
|
||||
Port: namedPort.Port,
|
||||
PortName: namedPort.Name,
|
||||
}
|
||||
if err := b.cloud.CreateBackendService(backend); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Get(namedPort.Port)
|
||||
}
|
||||
|
||||
// Add will get or create a Backend for the given port.
|
||||
func (b *Backends) Add(port int64) error {
|
||||
// We must track the port even if creating the backend failed, because
|
||||
// we might've created a health-check for it.
|
||||
be := &compute.BackendService{}
|
||||
defer func() { b.snapshotter.Add(portKey(port), be) }()
|
||||
|
||||
ig, namedPort, err := b.nodePool.AddInstanceGroup(b.namer.IGName(), port)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
be, _ = b.Get(port)
|
||||
if be == nil {
|
||||
glog.Infof("Creating backend for instance group %v port %v named port %v",
|
||||
ig.Name, port, namedPort)
|
||||
be, err = b.create(ig, namedPort, b.namer.BeName(port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := b.edgeHop(be, ig); err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete deletes the Backend for the given port.
|
||||
func (b *Backends) Delete(port int64) (err error) {
|
||||
name := b.namer.BeName(port)
|
||||
glog.Infof("Deleting backend %v", name)
|
||||
defer func() {
|
||||
if utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
err = nil
|
||||
}
|
||||
if err == nil {
|
||||
b.snapshotter.Delete(portKey(port))
|
||||
}
|
||||
}()
|
||||
// Try deleting health checks even if a backend is not found.
|
||||
if err = b.cloud.DeleteBackendService(name); err != nil &&
|
||||
!utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return err
|
||||
}
|
||||
if err = b.healthChecker.Delete(port); err != nil &&
|
||||
!utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// List lists all backends.
|
||||
func (b *Backends) List() (*compute.BackendServiceList, error) {
|
||||
// TODO: for consistency with the rest of this sub-package this method
|
||||
// should return a list of backend ports.
|
||||
return b.cloud.ListBackendServices()
|
||||
}
|
||||
|
||||
// edgeHop checks the links of the given backend by executing an edge hop.
|
||||
// It fixes broken links.
|
||||
func (b *Backends) edgeHop(be *compute.BackendService, ig *compute.InstanceGroup) error {
|
||||
if len(be.Backends) == 1 &&
|
||||
utils.CompareLinks(be.Backends[0].Group, ig.SelfLink) {
|
||||
return nil
|
||||
}
|
||||
glog.Infof("Backend %v has a broken edge, adding link to %v",
|
||||
be.Name, ig.Name)
|
||||
be.Backends = []*compute.Backend{
|
||||
{Group: ig.SelfLink},
|
||||
}
|
||||
if err := b.cloud.UpdateBackendService(be); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sync syncs backend services corresponding to ports in the given list.
|
||||
func (b *Backends) Sync(svcNodePorts []int64) error {
|
||||
glog.V(3).Infof("Sync: backends %v", svcNodePorts)
|
||||
|
||||
// create backends for new ports, perform an edge hop for existing ports
|
||||
for _, port := range svcNodePorts {
|
||||
if err := b.Add(port); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GC garbage collects services corresponding to ports in the given list.
|
||||
func (b *Backends) GC(svcNodePorts []int64) error {
|
||||
knownPorts := sets.NewString()
|
||||
for _, port := range svcNodePorts {
|
||||
knownPorts.Insert(portKey(port))
|
||||
}
|
||||
pool := b.snapshotter.Snapshot()
|
||||
for port := range pool {
|
||||
p, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodePort := int64(p)
|
||||
if knownPorts.Has(portKey(nodePort)) {
|
||||
continue
|
||||
}
|
||||
glog.V(3).Infof("GCing backend for port %v", p)
|
||||
if err := b.Delete(nodePort); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if len(svcNodePorts) == 0 {
|
||||
glog.Infof("Deleting instance group %v", b.namer.IGName())
|
||||
if err := b.nodePool.DeleteInstanceGroup(b.namer.IGName()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown deletes all backends and the default backend.
|
||||
// This will fail if one of the backends is being used by another resource.
|
||||
func (b *Backends) Shutdown() error {
|
||||
if err := b.GC([]int64{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status returns the status of the given backend by name.
|
||||
func (b *Backends) Status(name string) string {
|
||||
backend, err := b.cloud.GetBackendService(name)
|
||||
if err != nil {
|
||||
return "Unknown"
|
||||
}
|
||||
// TODO: Include port, ip in the status, since it's in the health info.
|
||||
hs, err := b.cloud.GetHealth(name, backend.Backends[0].Group)
|
||||
if err != nil || len(hs.HealthStatus) == 0 || hs.HealthStatus[0] == nil {
|
||||
return "Unknown"
|
||||
}
|
||||
// TODO: State transition are important, not just the latest.
|
||||
return hs.HealthStatus[0].HealthState
|
||||
}
|
||||
126
controllers/gce/backends/backends_test.go
Normal file
126
controllers/gce/backends/backends_test.go
Normal file
|
|
@ -0,0 +1,126 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backends
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/contrib/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/contrib/ingress/controllers/gce/instances"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
func newBackendPool(f BackendServices, fakeIGs instances.InstanceGroups) BackendPool {
|
||||
namer := utils.Namer{}
|
||||
return NewBackendPool(
|
||||
f,
|
||||
healthchecks.NewHealthChecker(healthchecks.NewFakeHealthChecks(), "/", namer),
|
||||
instances.NewNodePool(fakeIGs, "default-zone"), namer)
|
||||
}
|
||||
|
||||
func TestBackendPoolAdd(t *testing.T) {
|
||||
f := NewFakeBackendServices()
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
pool := newBackendPool(f, fakeIGs)
|
||||
namer := utils.Namer{}
|
||||
|
||||
// Add a backend for a port, then re-add the same port and
|
||||
// make sure it corrects a broken link from the backend to
|
||||
// the instance group.
|
||||
nodePort := int64(8080)
|
||||
pool.Add(nodePort)
|
||||
beName := namer.BeName(nodePort)
|
||||
|
||||
// Check that the new backend has the right port
|
||||
be, err := f.GetBackendService(beName)
|
||||
if err != nil {
|
||||
t.Fatalf("Did not find expected backend %v", beName)
|
||||
}
|
||||
if be.Port != nodePort {
|
||||
t.Fatalf("Backend %v has wrong port %v, expected %v", be.Name, be.Port, nodePort)
|
||||
}
|
||||
// Check that the instance group has the new port
|
||||
var found bool
|
||||
for _, port := range fakeIGs.Ports {
|
||||
if port == nodePort {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("Port %v not added to instance group", nodePort)
|
||||
}
|
||||
|
||||
// Mess up the link between backend service and instance group.
|
||||
// This simulates a user doing foolish things through the UI.
|
||||
f.calls = []int{}
|
||||
be, err = f.GetBackendService(beName)
|
||||
be.Backends[0].Group = "test edge hop"
|
||||
f.UpdateBackendService(be)
|
||||
|
||||
pool.Add(nodePort)
|
||||
for _, call := range f.calls {
|
||||
if call == utils.Create {
|
||||
t.Fatalf("Unexpected create for existing backend service")
|
||||
}
|
||||
}
|
||||
gotBackend, _ := f.GetBackendService(beName)
|
||||
gotGroup, _ := fakeIGs.GetInstanceGroup(namer.IGName(), "default-zone")
|
||||
if gotBackend.Backends[0].Group != gotGroup.SelfLink {
|
||||
t.Fatalf(
|
||||
"Broken instance group link: %v %v",
|
||||
gotBackend.Backends[0].Group,
|
||||
gotGroup.SelfLink)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendPoolSync(t *testing.T) {
|
||||
|
||||
// Call sync on a backend pool with a list of ports, make sure the pool
|
||||
// creates/deletes required ports.
|
||||
svcNodePorts := []int64{81, 82, 83}
|
||||
f := NewFakeBackendServices()
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
pool := newBackendPool(f, fakeIGs)
|
||||
pool.Add(81)
|
||||
pool.Add(90)
|
||||
pool.Sync(svcNodePorts)
|
||||
pool.GC(svcNodePorts)
|
||||
if _, err := pool.Get(90); err == nil {
|
||||
t.Fatalf("Did not expect to find port 90")
|
||||
}
|
||||
for _, port := range svcNodePorts {
|
||||
if _, err := pool.Get(port); err != nil {
|
||||
t.Fatalf("Expected to find port %v", port)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestBackendPoolShutdown(t *testing.T) {
|
||||
f := NewFakeBackendServices()
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
pool := newBackendPool(f, fakeIGs)
|
||||
namer := utils.Namer{}
|
||||
|
||||
pool.Add(80)
|
||||
pool.Shutdown()
|
||||
if _, err := f.GetBackendService(namer.BeName(80)); err == nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
}
|
||||
145
controllers/gce/backends/fakes.go
Normal file
145
controllers/gce/backends/fakes.go
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backends
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
// NewFakeBackendServices creates a new fake backend services manager.
|
||||
func NewFakeBackendServices() *FakeBackendServices {
|
||||
return &FakeBackendServices{
|
||||
backendServices: []*compute.BackendService{},
|
||||
}
|
||||
}
|
||||
|
||||
// FakeBackendServices fakes out GCE backend services.
|
||||
type FakeBackendServices struct {
|
||||
backendServices []*compute.BackendService
|
||||
calls []int
|
||||
}
|
||||
|
||||
// GetBackendService fakes getting a backend service from the cloud.
|
||||
func (f *FakeBackendServices) GetBackendService(name string) (*compute.BackendService, error) {
|
||||
f.calls = append(f.calls, utils.Get)
|
||||
for i := range f.backendServices {
|
||||
if name == f.backendServices[i].Name {
|
||||
return f.backendServices[i], nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Backend service %v not found", name)
|
||||
}
|
||||
|
||||
// CreateBackendService fakes backend service creation.
|
||||
func (f *FakeBackendServices) CreateBackendService(be *compute.BackendService) error {
|
||||
f.calls = append(f.calls, utils.Create)
|
||||
be.SelfLink = be.Name
|
||||
f.backendServices = append(f.backendServices, be)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteBackendService fakes backend service deletion.
|
||||
func (f *FakeBackendServices) DeleteBackendService(name string) error {
|
||||
f.calls = append(f.calls, utils.Delete)
|
||||
newBackends := []*compute.BackendService{}
|
||||
for i := range f.backendServices {
|
||||
if name != f.backendServices[i].Name {
|
||||
newBackends = append(newBackends, f.backendServices[i])
|
||||
}
|
||||
}
|
||||
f.backendServices = newBackends
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListBackendServices fakes backend service listing.
|
||||
func (f *FakeBackendServices) ListBackendServices() (*compute.BackendServiceList, error) {
|
||||
return &compute.BackendServiceList{Items: f.backendServices}, nil
|
||||
}
|
||||
|
||||
// UpdateBackendService fakes updating a backend service.
|
||||
func (f *FakeBackendServices) UpdateBackendService(be *compute.BackendService) error {
|
||||
f.calls = append(f.calls, utils.Update)
|
||||
for i := range f.backendServices {
|
||||
if f.backendServices[i].Name == be.Name {
|
||||
f.backendServices[i] = be
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetHealth fakes getting backend service health.
|
||||
func (f *FakeBackendServices) GetHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error) {
|
||||
be, err := f.GetBackendService(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
states := []*compute.HealthStatus{
|
||||
{
|
||||
HealthState: "HEALTHY",
|
||||
IpAddress: "",
|
||||
Port: be.Port,
|
||||
},
|
||||
}
|
||||
return &compute.BackendServiceGroupHealth{
|
||||
HealthStatus: states}, nil
|
||||
}
|
||||
|
||||
// NewFakeHealthChecks returns a health check fake.
|
||||
func NewFakeHealthChecks() *FakeHealthChecks {
|
||||
return &FakeHealthChecks{hc: []*compute.HttpHealthCheck{}}
|
||||
}
|
||||
|
||||
// FakeHealthChecks fakes out health checks.
|
||||
type FakeHealthChecks struct {
|
||||
hc []*compute.HttpHealthCheck
|
||||
}
|
||||
|
||||
// CreateHttpHealthCheck fakes health check creation.
|
||||
func (f *FakeHealthChecks) CreateHttpHealthCheck(hc *compute.HttpHealthCheck) error {
|
||||
f.hc = append(f.hc, hc)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetHttpHealthCheck fakes getting a http health check.
|
||||
func (f *FakeHealthChecks) GetHttpHealthCheck(name string) (*compute.HttpHealthCheck, error) {
|
||||
for _, h := range f.hc {
|
||||
if h.Name == name {
|
||||
return h, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Health check %v not found.", name)
|
||||
}
|
||||
|
||||
// DeleteHttpHealthCheck fakes deleting a http health check.
|
||||
func (f *FakeHealthChecks) DeleteHttpHealthCheck(name string) error {
|
||||
healthChecks := []*compute.HttpHealthCheck{}
|
||||
exists := false
|
||||
for _, h := range f.hc {
|
||||
if h.Name == name {
|
||||
exists = true
|
||||
continue
|
||||
}
|
||||
healthChecks = append(healthChecks, h)
|
||||
}
|
||||
if !exists {
|
||||
return fmt.Errorf("Failed to find health check %v", name)
|
||||
}
|
||||
f.hc = healthChecks
|
||||
return nil
|
||||
}
|
||||
58
controllers/gce/backends/interfaces.go
Normal file
58
controllers/gce/backends/interfaces.go
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backends
|
||||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// BackendPool is an interface to manage a pool of kubernetes nodePort services
|
||||
// as gce backendServices, and sync them through the BackendServices interface.
|
||||
type BackendPool interface {
|
||||
Add(port int64) error
|
||||
Get(port int64) (*compute.BackendService, error)
|
||||
Delete(port int64) error
|
||||
Sync(ports []int64) error
|
||||
GC(ports []int64) error
|
||||
Shutdown() error
|
||||
Status(name string) string
|
||||
List() (*compute.BackendServiceList, error)
|
||||
}
|
||||
|
||||
// BackendServices is an interface for managing gce backend services.
|
||||
type BackendServices interface {
|
||||
GetBackendService(name string) (*compute.BackendService, error)
|
||||
UpdateBackendService(bg *compute.BackendService) error
|
||||
CreateBackendService(bg *compute.BackendService) error
|
||||
DeleteBackendService(name string) error
|
||||
ListBackendServices() (*compute.BackendServiceList, error)
|
||||
GetHealth(name, instanceGroupLink string) (*compute.BackendServiceGroupHealth, error)
|
||||
}
|
||||
|
||||
// SingleHealthCheck is an interface to manage a single GCE health check.
|
||||
type SingleHealthCheck interface {
|
||||
CreateHttpHealthCheck(hc *compute.HttpHealthCheck) error
|
||||
DeleteHttpHealthCheck(name string) error
|
||||
GetHttpHealthCheck(name string) (*compute.HttpHealthCheck, error)
|
||||
}
|
||||
|
||||
// HealthChecker is an interface to manage cloud HTTPHealthChecks.
|
||||
type HealthChecker interface {
|
||||
Add(port int64, path string) error
|
||||
Delete(port int64) error
|
||||
Get(port int64) (*compute.HttpHealthCheck, error)
|
||||
}
|
||||
173
controllers/gce/controller/cluster_manager.go
Normal file
173
controllers/gce/controller/cluster_manager.go
Normal file
|
|
@ -0,0 +1,173 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/contrib/ingress/controllers/gce/backends"
|
||||
"k8s.io/contrib/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/contrib/ingress/controllers/gce/instances"
|
||||
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultPort = 80
|
||||
defaultHealthCheckPath = "/"
|
||||
|
||||
// A single instance-group is created per cluster manager.
|
||||
// Tagged with the name of the controller.
|
||||
instanceGroupPrefix = "k8s-ig"
|
||||
|
||||
// A backend is created per nodePort, tagged with the nodeport.
|
||||
// This allows sharing of backends across loadbalancers.
|
||||
backendPrefix = "k8s-be"
|
||||
|
||||
// A single target proxy/urlmap/forwarding rule is created per loadbalancer.
|
||||
// Tagged with the namespace/name of the Ingress.
|
||||
targetProxyPrefix = "k8s-tp"
|
||||
forwardingRulePrefix = "k8s-fw"
|
||||
urlMapPrefix = "k8s-um"
|
||||
|
||||
// Used in the test RunServer method to denote a delete request.
|
||||
deleteType = "del"
|
||||
|
||||
// port 0 is used as a signal for port not found/no such port etc.
|
||||
invalidPort = 0
|
||||
|
||||
// Names longer than this are truncated, because of GCE restrictions.
|
||||
nameLenLimit = 62
|
||||
)
|
||||
|
||||
// ClusterManager manages cluster resource pools.
|
||||
type ClusterManager struct {
|
||||
ClusterNamer utils.Namer
|
||||
defaultBackendNodePort int64
|
||||
instancePool instances.NodePool
|
||||
backendPool backends.BackendPool
|
||||
l7Pool loadbalancers.LoadBalancerPool
|
||||
}
|
||||
|
||||
// IsHealthy returns an error if the cluster manager is unhealthy.
|
||||
func (c *ClusterManager) IsHealthy() (err error) {
|
||||
// TODO: Expand on this, for now we just want to detect when the GCE client
|
||||
// is broken.
|
||||
_, err = c.backendPool.List()
|
||||
return
|
||||
}
|
||||
|
||||
func (c *ClusterManager) shutdown() error {
|
||||
if err := c.l7Pool.Shutdown(); err != nil {
|
||||
return err
|
||||
}
|
||||
// The backend pool will also delete instance groups.
|
||||
return c.backendPool.Shutdown()
|
||||
}
|
||||
|
||||
// Checkpoint performs a checkpoint with the cloud.
|
||||
// - lbNames are the names of L7 loadbalancers we wish to exist. If they already
|
||||
// exist, they should not have any broken links between say, a UrlMap and
|
||||
// TargetHttpProxy.
|
||||
// - nodeNames are the names of nodes we wish to add to all loadbalancer
|
||||
// instance groups.
|
||||
// - nodePorts are the ports for which we require BackendServices. Each of
|
||||
// these ports must also be opened on the corresponding Instance Group.
|
||||
// If in performing the checkpoint the cluster manager runs out of quota, a
|
||||
// googleapi 403 is returned.
|
||||
func (c *ClusterManager) Checkpoint(lbs []*loadbalancers.L7RuntimeInfo, nodeNames []string, nodePorts []int64) error {
|
||||
if err := c.backendPool.Sync(nodePorts); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.instancePool.Sync(nodeNames); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.l7Pool.Sync(lbs); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GC garbage collects unused resources.
|
||||
// - lbNames are the names of L7 loadbalancers we wish to exist. Those not in
|
||||
// this list are removed from the cloud.
|
||||
// - nodePorts are the ports for which we want BackendServies. BackendServices
|
||||
// for ports not in this list are deleted.
|
||||
// This method ignores googleapi 404 errors (StatusNotFound).
|
||||
func (c *ClusterManager) GC(lbNames []string, nodePorts []int64) error {
|
||||
|
||||
// On GC:
|
||||
// * Loadbalancers need to get deleted before backends.
|
||||
// * Backends are refcounted in a shared pool.
|
||||
// * We always want to GC backends even if there was an error in GCing
|
||||
// loadbalancers, because the next Sync could rely on the GC for quota.
|
||||
// * There are at least 2 cases for backend GC:
|
||||
// 1. The loadbalancer has been deleted.
|
||||
// 2. An update to the url map drops the refcount of a backend. This can
|
||||
// happen when an Ingress is updated, if we don't GC after the update
|
||||
// we'll leak the backend.
|
||||
|
||||
lbErr := c.l7Pool.GC(lbNames)
|
||||
beErr := c.backendPool.GC(nodePorts)
|
||||
if lbErr != nil {
|
||||
return lbErr
|
||||
}
|
||||
if beErr != nil {
|
||||
return beErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func defaultInstanceGroupName(clusterName string) string {
|
||||
return fmt.Sprintf("%v-%v", instanceGroupPrefix, clusterName)
|
||||
}
|
||||
|
||||
// NewClusterManager creates a cluster manager for shared resources.
|
||||
// - name: is the name used to tag cluster wide shared resources. This is the
|
||||
// string passed to glbc via --gce-cluster-name.
|
||||
// - defaultBackendNodePort: is the node port of glbc's default backend. This is
|
||||
// the kubernetes Service that serves the 404 page if no urls match.
|
||||
// - defaultHealthCheckPath: is the default path used for L7 health checks, eg: "/healthz"
|
||||
func NewClusterManager(
|
||||
name string,
|
||||
defaultBackendNodePort int64,
|
||||
defaultHealthCheckPath string) (*ClusterManager, error) {
|
||||
|
||||
cloudInterface, err := cloudprovider.GetCloudProvider("gce", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cloud := cloudInterface.(*gce.GCECloud)
|
||||
cluster := ClusterManager{ClusterNamer: utils.Namer{name}}
|
||||
zone, err := cloud.GetZone()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cluster.instancePool = instances.NewNodePool(cloud, zone.FailureDomain)
|
||||
healthChecker := healthchecks.NewHealthChecker(cloud, defaultHealthCheckPath, cluster.ClusterNamer)
|
||||
cluster.backendPool = backends.NewBackendPool(
|
||||
cloud, healthChecker, cluster.instancePool, cluster.ClusterNamer)
|
||||
defaultBackendHealthChecker := healthchecks.NewHealthChecker(cloud, "/healthz", cluster.ClusterNamer)
|
||||
defaultBackendPool := backends.NewBackendPool(
|
||||
cloud, defaultBackendHealthChecker, cluster.instancePool, cluster.ClusterNamer)
|
||||
cluster.defaultBackendNodePort = defaultBackendNodePort
|
||||
cluster.l7Pool = loadbalancers.NewLoadBalancerPool(
|
||||
cloud, defaultBackendPool, defaultBackendNodePort, cluster.ClusterNamer)
|
||||
return &cluster, nil
|
||||
}
|
||||
435
controllers/gce/controller/controller.go
Normal file
435
controllers/gce/controller/controller.go
Normal file
|
|
@ -0,0 +1,435 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var (
|
||||
keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
|
||||
|
||||
// DefaultClusterUID is the uid to use for clusters resources created by an
|
||||
// L7 controller created without specifying the --cluster-uid flag.
|
||||
DefaultClusterUID = ""
|
||||
)
|
||||
|
||||
// LoadBalancerController watches the kubernetes api and adds/removes services
|
||||
// from the loadbalancer, via loadBalancerConfig.
|
||||
type LoadBalancerController struct {
|
||||
client *client.Client
|
||||
ingController *framework.Controller
|
||||
nodeController *framework.Controller
|
||||
svcController *framework.Controller
|
||||
ingLister StoreToIngressLister
|
||||
nodeLister cache.StoreToNodeLister
|
||||
svcLister cache.StoreToServiceLister
|
||||
CloudClusterManager *ClusterManager
|
||||
recorder record.EventRecorder
|
||||
nodeQueue *taskQueue
|
||||
ingQueue *taskQueue
|
||||
tr *GCETranslator
|
||||
stopCh chan struct{}
|
||||
// stopLock is used to enforce only a single call to Stop is active.
|
||||
// Needed because we allow stopping through an http endpoint and
|
||||
// allowing concurrent stoppers leads to stack traces.
|
||||
stopLock sync.Mutex
|
||||
shutdown bool
|
||||
}
|
||||
|
||||
// NewLoadBalancerController creates a controller for gce loadbalancers.
|
||||
// - kubeClient: A kubernetes REST client.
|
||||
// - clusterManager: A ClusterManager capable of creating all cloud resources
|
||||
// required for L7 loadbalancing.
|
||||
// - resyncPeriod: Watchers relist from the Kubernetes API server this often.
|
||||
func NewLoadBalancerController(kubeClient *client.Client, clusterManager *ClusterManager, resyncPeriod time.Duration, namespace string) (*LoadBalancerController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
|
||||
|
||||
lbc := LoadBalancerController{
|
||||
client: kubeClient,
|
||||
CloudClusterManager: clusterManager,
|
||||
stopCh: make(chan struct{}),
|
||||
recorder: eventBroadcaster.NewRecorder(
|
||||
api.EventSource{Component: "loadbalancer-controller"}),
|
||||
}
|
||||
lbc.nodeQueue = NewTaskQueue(lbc.syncNodes)
|
||||
lbc.ingQueue = NewTaskQueue(lbc.sync)
|
||||
|
||||
// Ingress watch handlers
|
||||
pathHandlers := framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
addIng := obj.(*extensions.Ingress)
|
||||
lbc.recorder.Eventf(addIng, api.EventTypeNormal, "ADD", fmt.Sprintf("%s/%s", addIng.Namespace, addIng.Name))
|
||||
lbc.ingQueue.enqueue(obj)
|
||||
},
|
||||
DeleteFunc: lbc.ingQueue.enqueue,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
if !reflect.DeepEqual(old, cur) {
|
||||
glog.V(3).Infof("Ingress %v changed, syncing",
|
||||
cur.(*extensions.Ingress).Name)
|
||||
}
|
||||
lbc.ingQueue.enqueue(cur)
|
||||
},
|
||||
}
|
||||
lbc.ingLister.Store, lbc.ingController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: ingressListFunc(lbc.client, namespace),
|
||||
WatchFunc: ingressWatchFunc(lbc.client, namespace),
|
||||
},
|
||||
&extensions.Ingress{}, resyncPeriod, pathHandlers)
|
||||
|
||||
// Service watch handlers
|
||||
svcHandlers := framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: lbc.enqueueIngressForService,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
if !reflect.DeepEqual(old, cur) {
|
||||
lbc.enqueueIngressForService(cur)
|
||||
}
|
||||
},
|
||||
// Ingress deletes matter, service deletes don't.
|
||||
}
|
||||
|
||||
lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
|
||||
cache.NewListWatchFromClient(
|
||||
lbc.client, "services", namespace, fields.Everything()),
|
||||
&api.Service{}, resyncPeriod, svcHandlers)
|
||||
|
||||
nodeHandlers := framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: lbc.nodeQueue.enqueue,
|
||||
DeleteFunc: lbc.nodeQueue.enqueue,
|
||||
// Nodes are updated every 10s and we don't care, so no update handler.
|
||||
}
|
||||
|
||||
// Node watch handlers
|
||||
lbc.nodeLister.Store, lbc.nodeController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(opts api.ListOptions) (runtime.Object, error) {
|
||||
return lbc.client.Get().
|
||||
Resource("nodes").
|
||||
FieldsSelectorParam(fields.Everything()).
|
||||
Do().
|
||||
Get()
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return lbc.client.Get().
|
||||
Prefix("watch").
|
||||
Resource("nodes").
|
||||
FieldsSelectorParam(fields.Everything()).
|
||||
Param("resourceVersion", options.ResourceVersion).Watch()
|
||||
},
|
||||
},
|
||||
&api.Node{}, 0, nodeHandlers)
|
||||
|
||||
lbc.tr = &GCETranslator{&lbc}
|
||||
glog.V(3).Infof("Created new loadbalancer controller")
|
||||
|
||||
return &lbc, nil
|
||||
}
|
||||
|
||||
func ingressListFunc(c *client.Client, ns string) func(api.ListOptions) (runtime.Object, error) {
|
||||
return func(opts api.ListOptions) (runtime.Object, error) {
|
||||
return c.Extensions().Ingress(ns).List(opts)
|
||||
}
|
||||
}
|
||||
|
||||
func ingressWatchFunc(c *client.Client, ns string) func(options api.ListOptions) (watch.Interface, error) {
|
||||
return func(options api.ListOptions) (watch.Interface, error) {
|
||||
return c.Extensions().Ingress(ns).Watch(options)
|
||||
}
|
||||
}
|
||||
|
||||
// enqueueIngressForService enqueues all the Ingress' for a Service.
|
||||
func (lbc *LoadBalancerController) enqueueIngressForService(obj interface{}) {
|
||||
svc := obj.(*api.Service)
|
||||
ings, err := lbc.ingLister.GetServiceIngress(svc)
|
||||
if err != nil {
|
||||
glog.V(5).Infof("ignoring service %v: %v", svc.Name, err)
|
||||
return
|
||||
}
|
||||
for _, ing := range ings {
|
||||
lbc.ingQueue.enqueue(&ing)
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts the loadbalancer controller.
|
||||
func (lbc *LoadBalancerController) Run() {
|
||||
glog.Infof("Starting loadbalancer controller")
|
||||
go lbc.ingController.Run(lbc.stopCh)
|
||||
go lbc.nodeController.Run(lbc.stopCh)
|
||||
go lbc.svcController.Run(lbc.stopCh)
|
||||
go lbc.ingQueue.run(time.Second, lbc.stopCh)
|
||||
go lbc.nodeQueue.run(time.Second, lbc.stopCh)
|
||||
<-lbc.stopCh
|
||||
glog.Infof("Shutting down Loadbalancer Controller")
|
||||
}
|
||||
|
||||
// Stop stops the loadbalancer controller. It also deletes cluster resources
|
||||
// if deleteAll is true.
|
||||
func (lbc *LoadBalancerController) Stop(deleteAll bool) error {
|
||||
// Stop is invoked from the http endpoint.
|
||||
lbc.stopLock.Lock()
|
||||
defer lbc.stopLock.Unlock()
|
||||
|
||||
// Only try draining the workqueue if we haven't already.
|
||||
if !lbc.shutdown {
|
||||
close(lbc.stopCh)
|
||||
glog.Infof("Shutting down controller queues.")
|
||||
lbc.ingQueue.shutdown()
|
||||
lbc.nodeQueue.shutdown()
|
||||
lbc.shutdown = true
|
||||
}
|
||||
|
||||
// Deleting shared cluster resources is idempotent.
|
||||
if deleteAll {
|
||||
glog.Infof("Shutting down cluster manager.")
|
||||
return lbc.CloudClusterManager.shutdown()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sync manages Ingress create/updates/deletes.
|
||||
func (lbc *LoadBalancerController) sync(key string) {
|
||||
glog.V(3).Infof("Syncing %v", key)
|
||||
|
||||
paths, err := lbc.ingLister.List()
|
||||
if err != nil {
|
||||
lbc.ingQueue.requeue(key, err)
|
||||
return
|
||||
}
|
||||
nodePorts := lbc.tr.toNodePorts(&paths)
|
||||
lbNames := lbc.ingLister.Store.ListKeys()
|
||||
lbs, _ := lbc.ListRuntimeInfo()
|
||||
nodeNames, err := lbc.getReadyNodeNames()
|
||||
if err != nil {
|
||||
lbc.ingQueue.requeue(key, err)
|
||||
return
|
||||
}
|
||||
obj, ingExists, err := lbc.ingLister.Store.GetByKey(key)
|
||||
if err != nil {
|
||||
lbc.ingQueue.requeue(key, err)
|
||||
return
|
||||
}
|
||||
|
||||
// This performs a 2 phase checkpoint with the cloud:
|
||||
// * Phase 1 creates/verifies resources are as expected. At the end of a
|
||||
// successful checkpoint we know that existing L7s are WAI, and the L7
|
||||
// for the Ingress associated with "key" is ready for a UrlMap update.
|
||||
// If this encounters an error, eg for quota reasons, we want to invoke
|
||||
// Phase 2 right away and retry checkpointing.
|
||||
// * Phase 2 performs GC by refcounting shared resources. This needs to
|
||||
// happen periodically whether or not stage 1 fails. At the end of a
|
||||
// successful GC we know that there are no dangling cloud resources that
|
||||
// don't have an associated Kubernetes Ingress/Service/Endpoint.
|
||||
|
||||
defer func() {
|
||||
if err := lbc.CloudClusterManager.GC(lbNames, nodePorts); err != nil {
|
||||
lbc.ingQueue.requeue(key, err)
|
||||
}
|
||||
glog.V(3).Infof("Finished syncing %v", key)
|
||||
}()
|
||||
|
||||
if err := lbc.CloudClusterManager.Checkpoint(lbs, nodeNames, nodePorts); err != nil {
|
||||
// TODO: Implement proper backoff for the queue.
|
||||
eventMsg := "GCE"
|
||||
if utils.IsHTTPErrorCode(err, http.StatusForbidden) {
|
||||
eventMsg += " :Quota"
|
||||
}
|
||||
if ingExists {
|
||||
lbc.recorder.Eventf(obj.(*extensions.Ingress), api.EventTypeWarning, eventMsg, err.Error())
|
||||
} else {
|
||||
err = fmt.Errorf("%v Error: %v", eventMsg, err)
|
||||
}
|
||||
lbc.ingQueue.requeue(key, err)
|
||||
return
|
||||
}
|
||||
|
||||
if !ingExists {
|
||||
return
|
||||
}
|
||||
// Update the UrlMap of the single loadbalancer that came through the watch.
|
||||
l7, err := lbc.CloudClusterManager.l7Pool.Get(key)
|
||||
if err != nil {
|
||||
lbc.ingQueue.requeue(key, err)
|
||||
return
|
||||
}
|
||||
|
||||
ing := *obj.(*extensions.Ingress)
|
||||
if urlMap, err := lbc.tr.toUrlMap(&ing); err != nil {
|
||||
lbc.ingQueue.requeue(key, err)
|
||||
} else if err := l7.UpdateUrlMap(urlMap); err != nil {
|
||||
lbc.recorder.Eventf(&ing, api.EventTypeWarning, "UrlMap", err.Error())
|
||||
lbc.ingQueue.requeue(key, err)
|
||||
} else if lbc.updateIngressStatus(l7, ing); err != nil {
|
||||
lbc.recorder.Eventf(&ing, api.EventTypeWarning, "Status", err.Error())
|
||||
lbc.ingQueue.requeue(key, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// updateIngressStatus updates the IP and annotations of a loadbalancer.
|
||||
// The annotations are parsed by kubectl describe.
|
||||
func (lbc *LoadBalancerController) updateIngressStatus(l7 *loadbalancers.L7, ing extensions.Ingress) error {
|
||||
ingClient := lbc.client.Extensions().Ingress(ing.Namespace)
|
||||
|
||||
// Update IP through update/status endpoint
|
||||
ip := l7.GetIP()
|
||||
currIng, err := ingClient.Get(ing.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currIng.Status = extensions.IngressStatus{
|
||||
LoadBalancer: api.LoadBalancerStatus{
|
||||
Ingress: []api.LoadBalancerIngress{
|
||||
{IP: ip},
|
||||
},
|
||||
},
|
||||
}
|
||||
lbIPs := ing.Status.LoadBalancer.Ingress
|
||||
if len(lbIPs) == 0 && ip != "" || lbIPs[0].IP != ip {
|
||||
// TODO: If this update fails it's probably resource version related,
|
||||
// which means it's advantageous to retry right away vs requeuing.
|
||||
glog.Infof("Updating loadbalancer %v/%v with IP %v", ing.Namespace, ing.Name, ip)
|
||||
if _, err := ingClient.UpdateStatus(currIng); err != nil {
|
||||
return err
|
||||
}
|
||||
lbc.recorder.Eventf(currIng, api.EventTypeNormal, "CREATE", "ip: %v", ip)
|
||||
}
|
||||
|
||||
// Update annotations through /update endpoint
|
||||
currIng, err = ingClient.Get(ing.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currIng.Annotations = loadbalancers.GetLBAnnotations(l7, currIng.Annotations, lbc.CloudClusterManager.backendPool)
|
||||
if !reflect.DeepEqual(ing.Annotations, currIng.Annotations) {
|
||||
glog.V(3).Infof("Updating annotations of %v/%v", ing.Namespace, ing.Name)
|
||||
if _, err := ingClient.Update(currIng); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListRuntimeInfo lists L7RuntimeInfo as understood by the loadbalancer module.
|
||||
func (lbc *LoadBalancerController) ListRuntimeInfo() (lbs []*loadbalancers.L7RuntimeInfo, err error) {
|
||||
for _, m := range lbc.ingLister.Store.List() {
|
||||
ing := m.(*extensions.Ingress)
|
||||
k, err := keyFunc(ing)
|
||||
if err != nil {
|
||||
glog.Warningf("Cannot get key for Ingress %v/%v: %v", ing.Namespace, ing.Name, err)
|
||||
continue
|
||||
}
|
||||
tls, err := lbc.loadSecrets(ing)
|
||||
if err != nil {
|
||||
glog.Warningf("Cannot get certs for Ingress %v/%v: %v", ing.Namespace, ing.Name, err)
|
||||
}
|
||||
lbs = append(lbs, &loadbalancers.L7RuntimeInfo{
|
||||
Name: k,
|
||||
TLS: tls,
|
||||
AllowHTTP: ingAnnotations(ing.ObjectMeta.Annotations).allowHTTP(),
|
||||
})
|
||||
}
|
||||
return lbs, nil
|
||||
}
|
||||
|
||||
func (lbc *LoadBalancerController) loadSecrets(ing *extensions.Ingress) (*loadbalancers.TLSCerts, error) {
|
||||
if len(ing.Spec.TLS) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
// GCE L7s currently only support a single cert.
|
||||
if len(ing.Spec.TLS) > 1 {
|
||||
glog.Warningf("Ignoring %d certs and taking the first for ingress %v/%v",
|
||||
len(ing.Spec.TLS)-1, ing.Namespace, ing.Name)
|
||||
}
|
||||
secretName := ing.Spec.TLS[0].SecretName
|
||||
// TODO: Replace this for a secret watcher.
|
||||
glog.V(3).Infof("Retrieving secret for ing %v with name %v", ing.Name, secretName)
|
||||
secret, err := lbc.client.Secrets(ing.Namespace).Get(secretName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert, ok := secret.Data[api.TLSCertKey]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Secret %v has no private key", secretName)
|
||||
}
|
||||
key, ok := secret.Data[api.TLSPrivateKeyKey]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Secret %v has no cert", secretName)
|
||||
}
|
||||
// TODO: Validate certificate with hostnames in ingress?
|
||||
return &loadbalancers.TLSCerts{Key: string(key), Cert: string(cert)}, nil
|
||||
}
|
||||
|
||||
// syncNodes manages the syncing of kubernetes nodes to gce instance groups.
|
||||
// The instancegroups are referenced by loadbalancer backends.
|
||||
func (lbc *LoadBalancerController) syncNodes(key string) {
|
||||
nodeNames, err := lbc.getReadyNodeNames()
|
||||
if err != nil {
|
||||
lbc.nodeQueue.requeue(key, err)
|
||||
return
|
||||
}
|
||||
if err := lbc.CloudClusterManager.instancePool.Sync(nodeNames); err != nil {
|
||||
lbc.nodeQueue.requeue(key, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func nodeReady(node api.Node) bool {
|
||||
for ix := range node.Status.Conditions {
|
||||
condition := &node.Status.Conditions[ix]
|
||||
if condition.Type == api.NodeReady {
|
||||
return condition.Status == api.ConditionTrue
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getReadyNodeNames returns names of schedulable, ready nodes from the node lister.
|
||||
func (lbc *LoadBalancerController) getReadyNodeNames() ([]string, error) {
|
||||
nodeNames := []string{}
|
||||
nodes, err := lbc.nodeLister.NodeCondition(nodeReady).List()
|
||||
if err != nil {
|
||||
return nodeNames, err
|
||||
}
|
||||
for _, n := range nodes.Items {
|
||||
if n.Spec.Unschedulable {
|
||||
continue
|
||||
}
|
||||
nodeNames = append(nodeNames, n.Name)
|
||||
}
|
||||
return nodeNames, nil
|
||||
}
|
||||
375
controllers/gce/controller/controller_test.go
Normal file
375
controllers/gce/controller/controller_test.go
Normal file
|
|
@ -0,0 +1,375 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
)
|
||||
|
||||
const testClusterName = "testcluster"
|
||||
|
||||
var (
|
||||
testPathMap = map[string]string{"/foo": defaultBackendName(testClusterName)}
|
||||
testIPManager = testIP{}
|
||||
)
|
||||
|
||||
// TODO: Use utils.Namer instead of this function.
|
||||
func defaultBackendName(clusterName string) string {
|
||||
return fmt.Sprintf("%v-%v", backendPrefix, clusterName)
|
||||
}
|
||||
|
||||
// newLoadBalancerController create a loadbalancer controller.
|
||||
func newLoadBalancerController(t *testing.T, cm *fakeClusterManager, masterUrl string) *LoadBalancerController {
|
||||
client := client.NewOrDie(&client.Config{Host: masterUrl, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
|
||||
lb, err := NewLoadBalancerController(client, cm.ClusterManager, 1*time.Second, api.NamespaceAll)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
return lb
|
||||
}
|
||||
|
||||
// toHTTPIngressPaths converts the given pathMap to a list of HTTPIngressPaths.
|
||||
func toHTTPIngressPaths(pathMap map[string]string) []extensions.HTTPIngressPath {
|
||||
httpPaths := []extensions.HTTPIngressPath{}
|
||||
for path, backend := range pathMap {
|
||||
httpPaths = append(httpPaths, extensions.HTTPIngressPath{
|
||||
Path: path,
|
||||
Backend: extensions.IngressBackend{
|
||||
ServiceName: backend,
|
||||
ServicePort: testBackendPort,
|
||||
},
|
||||
})
|
||||
}
|
||||
return httpPaths
|
||||
}
|
||||
|
||||
// toIngressRules converts the given ingressRule map to a list of IngressRules.
|
||||
func toIngressRules(hostRules map[string]utils.FakeIngressRuleValueMap) []extensions.IngressRule {
|
||||
rules := []extensions.IngressRule{}
|
||||
for host, pathMap := range hostRules {
|
||||
rules = append(rules, extensions.IngressRule{
|
||||
Host: host,
|
||||
IngressRuleValue: extensions.IngressRuleValue{
|
||||
HTTP: &extensions.HTTPIngressRuleValue{
|
||||
Paths: toHTTPIngressPaths(pathMap),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
return rules
|
||||
}
|
||||
|
||||
// newIngress returns a new Ingress with the given path map.
|
||||
func newIngress(hostRules map[string]utils.FakeIngressRuleValueMap) *extensions.Ingress {
|
||||
return &extensions.Ingress{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v", util.NewUUID()),
|
||||
Namespace: api.NamespaceNone,
|
||||
},
|
||||
Spec: extensions.IngressSpec{
|
||||
Backend: &extensions.IngressBackend{
|
||||
ServiceName: defaultBackendName(testClusterName),
|
||||
ServicePort: testBackendPort,
|
||||
},
|
||||
Rules: toIngressRules(hostRules),
|
||||
},
|
||||
Status: extensions.IngressStatus{
|
||||
LoadBalancer: api.LoadBalancerStatus{
|
||||
Ingress: []api.LoadBalancerIngress{
|
||||
{IP: testIPManager.ip()},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// validIngress returns a valid Ingress.
|
||||
func validIngress() *extensions.Ingress {
|
||||
return newIngress(map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.bar.com": testPathMap,
|
||||
})
|
||||
}
|
||||
|
||||
// getKey returns the key for an ingress.
|
||||
func getKey(ing *extensions.Ingress, t *testing.T) string {
|
||||
key, err := keyFunc(ing)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error getting key for Ingress %v: %v", ing.Name, err)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// nodePortManager is a helper to allocate ports to services and
|
||||
// remember the allocations.
|
||||
type nodePortManager struct {
|
||||
portMap map[string]int
|
||||
start int
|
||||
end int
|
||||
namer utils.Namer
|
||||
}
|
||||
|
||||
// randPort generated pseudo random port numbers.
|
||||
func (p *nodePortManager) getNodePort(svcName string) int {
|
||||
if port, ok := p.portMap[svcName]; ok {
|
||||
return port
|
||||
}
|
||||
p.portMap[svcName] = rand.Intn(p.end-p.start) + p.start
|
||||
return p.portMap[svcName]
|
||||
}
|
||||
|
||||
// toNodePortSvcNames converts all service names in the given map to gce node
|
||||
// port names, eg foo -> k8-be-<foo nodeport>
|
||||
func (p *nodePortManager) toNodePortSvcNames(inputMap map[string]utils.FakeIngressRuleValueMap) map[string]utils.FakeIngressRuleValueMap {
|
||||
expectedMap := map[string]utils.FakeIngressRuleValueMap{}
|
||||
for host, rules := range inputMap {
|
||||
ruleMap := utils.FakeIngressRuleValueMap{}
|
||||
for path, svc := range rules {
|
||||
ruleMap[path] = p.namer.BeName(int64(p.portMap[svc]))
|
||||
}
|
||||
expectedMap[host] = ruleMap
|
||||
}
|
||||
return expectedMap
|
||||
}
|
||||
|
||||
func newPortManager(st, end int) *nodePortManager {
|
||||
return &nodePortManager{map[string]int{}, st, end, utils.Namer{}}
|
||||
}
|
||||
|
||||
// addIngress adds an ingress to the loadbalancer controllers ingress store. If
|
||||
// a nodePortManager is supplied, it also adds all backends to the service store
|
||||
// with a nodePort acquired through it.
|
||||
func addIngress(lbc *LoadBalancerController, ing *extensions.Ingress, pm *nodePortManager) {
|
||||
lbc.ingLister.Store.Add(ing)
|
||||
if pm == nil {
|
||||
return
|
||||
}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
for _, path := range rule.HTTP.Paths {
|
||||
svc := &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: path.Backend.ServiceName,
|
||||
Namespace: ing.Namespace,
|
||||
},
|
||||
}
|
||||
var svcPort api.ServicePort
|
||||
switch path.Backend.ServicePort.Type {
|
||||
case intstr.Int:
|
||||
svcPort = api.ServicePort{Port: int(path.Backend.ServicePort.IntVal)}
|
||||
default:
|
||||
svcPort = api.ServicePort{Name: path.Backend.ServicePort.StrVal}
|
||||
}
|
||||
svcPort.NodePort = pm.getNodePort(path.Backend.ServiceName)
|
||||
svc.Spec.Ports = []api.ServicePort{svcPort}
|
||||
lbc.svcLister.Store.Add(svc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLbCreateDelete(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
inputMap1 := map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
"/foo2": "foo2svc",
|
||||
},
|
||||
"bar.example.com": {
|
||||
"/bar1": "bar1svc",
|
||||
"/bar2": "bar2svc",
|
||||
},
|
||||
}
|
||||
inputMap2 := map[string]utils.FakeIngressRuleValueMap{
|
||||
"baz.foobar.com": {
|
||||
"/foo": "foo1svc",
|
||||
"/bar": "bar1svc",
|
||||
},
|
||||
}
|
||||
pm := newPortManager(1, 65536)
|
||||
ings := []*extensions.Ingress{}
|
||||
for _, m := range []map[string]utils.FakeIngressRuleValueMap{inputMap1, inputMap2} {
|
||||
newIng := newIngress(m)
|
||||
addIngress(lbc, newIng, pm)
|
||||
ingStoreKey := getKey(newIng, t)
|
||||
lbc.sync(ingStoreKey)
|
||||
l7, err := cm.l7Pool.Get(ingStoreKey)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
cm.fakeLbs.CheckURLMap(t, l7, pm.toNodePortSvcNames(m))
|
||||
ings = append(ings, newIng)
|
||||
}
|
||||
lbc.ingLister.Store.Delete(ings[0])
|
||||
lbc.sync(getKey(ings[0], t))
|
||||
|
||||
// BackendServices associated with ports of deleted Ingress' should get gc'd
|
||||
// when the Ingress is deleted, regardless of the service. At the same time
|
||||
// we shouldn't pull shared backends out from existing loadbalancers.
|
||||
unexpected := []int{pm.portMap["foo2svc"], pm.portMap["bar2svc"]}
|
||||
expected := []int{pm.portMap["foo1svc"], pm.portMap["bar1svc"]}
|
||||
|
||||
for _, port := range expected {
|
||||
if _, err := cm.backendPool.Get(int64(port)); err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
for _, port := range unexpected {
|
||||
if be, err := cm.backendPool.Get(int64(port)); err == nil {
|
||||
t.Fatalf("Found backend %+v for port %v", be, port)
|
||||
}
|
||||
}
|
||||
lbc.ingLister.Store.Delete(ings[1])
|
||||
lbc.sync(getKey(ings[1], t))
|
||||
|
||||
// No cluster resources (except the defaults used by the cluster manager)
|
||||
// should exist at this point.
|
||||
for _, port := range expected {
|
||||
if be, err := cm.backendPool.Get(int64(port)); err == nil {
|
||||
t.Fatalf("Found backend %+v for port %v", be, port)
|
||||
}
|
||||
}
|
||||
if len(cm.fakeLbs.Fw) != 0 || len(cm.fakeLbs.Um) != 0 || len(cm.fakeLbs.Tp) != 0 {
|
||||
t.Fatalf("Loadbalancer leaked resources")
|
||||
}
|
||||
for _, lbName := range []string{getKey(ings[0], t), getKey(ings[1], t)} {
|
||||
if l7, err := cm.l7Pool.Get(lbName); err == nil {
|
||||
t.Fatalf("Found unexpected loadbalandcer %+v: %v", l7, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLbFaultyUpdate(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
"/foo2": "foo2svc",
|
||||
},
|
||||
"bar.example.com": {
|
||||
"/bar1": "bar1svc",
|
||||
"/bar2": "bar2svc",
|
||||
},
|
||||
}
|
||||
ing := newIngress(inputMap)
|
||||
pm := newPortManager(1, 65536)
|
||||
addIngress(lbc, ing, pm)
|
||||
|
||||
ingStoreKey := getKey(ing, t)
|
||||
lbc.sync(ingStoreKey)
|
||||
l7, err := cm.l7Pool.Get(ingStoreKey)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
cm.fakeLbs.CheckURLMap(t, l7, pm.toNodePortSvcNames(inputMap))
|
||||
|
||||
// Change the urlmap directly through the lb pool, resync, and
|
||||
// make sure the controller corrects it.
|
||||
l7.UpdateUrlMap(utils.GCEURLMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": &compute.BackendService{SelfLink: "foo2svc"},
|
||||
},
|
||||
})
|
||||
|
||||
lbc.sync(ingStoreKey)
|
||||
cm.fakeLbs.CheckURLMap(t, l7, pm.toNodePortSvcNames(inputMap))
|
||||
}
|
||||
|
||||
func TestLbDefaulting(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
// Make sure the controller plugs in the default values accepted by GCE.
|
||||
ing := newIngress(map[string]utils.FakeIngressRuleValueMap{"": {"": "foo1svc"}})
|
||||
pm := newPortManager(1, 65536)
|
||||
addIngress(lbc, ing, pm)
|
||||
|
||||
ingStoreKey := getKey(ing, t)
|
||||
lbc.sync(ingStoreKey)
|
||||
l7, err := cm.l7Pool.Get(ingStoreKey)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
expectedMap := map[string]utils.FakeIngressRuleValueMap{loadbalancers.DefaultHost: {loadbalancers.DefaultPath: "foo1svc"}}
|
||||
cm.fakeLbs.CheckURLMap(t, l7, pm.toNodePortSvcNames(expectedMap))
|
||||
}
|
||||
|
||||
func TestLbNoService(t *testing.T) {
|
||||
cm := NewFakeClusterManager(DefaultClusterUID)
|
||||
lbc := newLoadBalancerController(t, cm, "")
|
||||
inputMap := map[string]utils.FakeIngressRuleValueMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
},
|
||||
}
|
||||
ing := newIngress(inputMap)
|
||||
ing.Spec.Backend.ServiceName = "foo1svc"
|
||||
ingStoreKey := getKey(ing, t)
|
||||
|
||||
// Adds ingress to store, but doesn't create an associated service.
|
||||
// This will still create the associated loadbalancer, it will just
|
||||
// have empty rules. The rules will get corrected when the service
|
||||
// pops up.
|
||||
addIngress(lbc, ing, nil)
|
||||
lbc.sync(ingStoreKey)
|
||||
|
||||
l7, err := cm.l7Pool.Get(ingStoreKey)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
// Creates the service, next sync should have complete url map.
|
||||
pm := newPortManager(1, 65536)
|
||||
addIngress(lbc, ing, pm)
|
||||
lbc.enqueueIngressForService(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo1svc",
|
||||
Namespace: ing.Namespace,
|
||||
},
|
||||
})
|
||||
// TODO: This will hang if the previous step failed to insert into queue
|
||||
key, _ := lbc.ingQueue.queue.Get()
|
||||
lbc.sync(key.(string))
|
||||
|
||||
inputMap[utils.DefaultBackendKey] = map[string]string{
|
||||
utils.DefaultBackendKey: "foo1svc",
|
||||
}
|
||||
expectedMap := pm.toNodePortSvcNames(inputMap)
|
||||
cm.fakeLbs.CheckURLMap(t, l7, expectedMap)
|
||||
}
|
||||
|
||||
type testIP struct {
|
||||
start int
|
||||
}
|
||||
|
||||
func (t *testIP) ip() string {
|
||||
t.start++
|
||||
return fmt.Sprintf("0.0.0.%v", t.start)
|
||||
}
|
||||
|
||||
// TODO: Test lb status update when annotation stabilize
|
||||
52
controllers/gce/controller/doc.go
Normal file
52
controllers/gce/controller/doc.go
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This is the structure of the gce l7 controller:
|
||||
// apiserver <-> controller ---> pools --> cloud
|
||||
// | |
|
||||
// |-> Ingress |-> backends
|
||||
// |-> Services | |-> health checks
|
||||
// |-> Nodes |
|
||||
// |-> instance groups
|
||||
// | |-> port per backend
|
||||
// |
|
||||
// |-> loadbalancers
|
||||
// |-> http proxy
|
||||
// |-> forwarding rule
|
||||
// |-> urlmap
|
||||
// * apiserver: kubernetes api serer.
|
||||
// * controller: gce l7 controller, watches apiserver and interacts
|
||||
// with sync pools. The controller doesn't know anything about the cloud.
|
||||
// Communication between the controller and pools is 1 way.
|
||||
// * pool: the controller tells each pool about desired state by inserting
|
||||
// into shared memory store. The pools sync this with the cloud. Pools are
|
||||
// also responsible for periodically checking the edge links between various
|
||||
// cloud resources.
|
||||
//
|
||||
// A note on sync pools: this package has 3 sync pools: for node, instances and
|
||||
// loadbalancer resources. A sync pool is meant to record all creates/deletes
|
||||
// performed by a controller and periodically verify that links are not broken.
|
||||
// For example, the controller might create a backend via backendPool.Add(),
|
||||
// the backend pool remembers this and continuously verifies that the backend
|
||||
// is connected to the right instance group, and that the instance group has
|
||||
// the right ports open.
|
||||
//
|
||||
// A note on naming convention: per golang style guide for Initialisms, Http
|
||||
// should be HTTP and Url should be URL, however because these interfaces
|
||||
// must match their siblings in the Kubernetes cloud provider, which are in turn
|
||||
// consistent with GCE compute API, there might be inconsistencies.
|
||||
|
||||
package controller
|
||||
70
controllers/gce/controller/fakes.go
Normal file
70
controllers/gce/controller/fakes.go
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"k8s.io/contrib/ingress/controllers/gce/backends"
|
||||
"k8s.io/contrib/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/contrib/ingress/controllers/gce/instances"
|
||||
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
const (
|
||||
testDefaultBeNodePort = int64(3000)
|
||||
defaultZone = "default-zone"
|
||||
)
|
||||
|
||||
var testBackendPort = intstr.IntOrString{Type: intstr.Int, IntVal: 80}
|
||||
|
||||
// ClusterManager fake
|
||||
type fakeClusterManager struct {
|
||||
*ClusterManager
|
||||
fakeLbs *loadbalancers.FakeLoadBalancers
|
||||
fakeBackends *backends.FakeBackendServices
|
||||
fakeIGs *instances.FakeInstanceGroups
|
||||
}
|
||||
|
||||
// NewFakeClusterManager creates a new fake ClusterManager.
|
||||
func NewFakeClusterManager(clusterName string) *fakeClusterManager {
|
||||
fakeLbs := loadbalancers.NewFakeLoadBalancers(clusterName)
|
||||
fakeBackends := backends.NewFakeBackendServices()
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
fakeHCs := healthchecks.NewFakeHealthChecks()
|
||||
namer := utils.Namer{clusterName}
|
||||
nodePool := instances.NewNodePool(fakeIGs, defaultZone)
|
||||
healthChecker := healthchecks.NewHealthChecker(fakeHCs, "/", namer)
|
||||
backendPool := backends.NewBackendPool(
|
||||
fakeBackends,
|
||||
healthChecker, nodePool, namer)
|
||||
l7Pool := loadbalancers.NewLoadBalancerPool(
|
||||
fakeLbs,
|
||||
// TODO: change this
|
||||
backendPool,
|
||||
testDefaultBeNodePort,
|
||||
namer,
|
||||
)
|
||||
cm := &ClusterManager{
|
||||
ClusterNamer: namer,
|
||||
instancePool: nodePool,
|
||||
backendPool: backendPool,
|
||||
l7Pool: l7Pool,
|
||||
}
|
||||
return &fakeClusterManager{cm, fakeLbs, fakeBackends, fakeIGs}
|
||||
}
|
||||
306
controllers/gce/controller/utils.go
Normal file
306
controllers/gce/controller/utils.go
Normal file
|
|
@ -0,0 +1,306 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/loadbalancers"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const allowHTTPKey = "kubernetes.io/ingress.allowHTTP"
|
||||
|
||||
// ingAnnotations represents Ingress annotations.
|
||||
type ingAnnotations map[string]string
|
||||
|
||||
// allowHTTP returns the allowHTTP flag. True by default.
|
||||
func (ing ingAnnotations) allowHTTP() bool {
|
||||
val, ok := ing[allowHTTPKey]
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
v, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// errorNodePortNotFound is an implementation of error.
|
||||
type errorNodePortNotFound struct {
|
||||
backend extensions.IngressBackend
|
||||
origErr error
|
||||
}
|
||||
|
||||
func (e errorNodePortNotFound) Error() string {
|
||||
return fmt.Sprintf("Could not find nodeport for backend %+v: %v",
|
||||
e.backend, e.origErr)
|
||||
}
|
||||
|
||||
// taskQueue manages a work queue through an independent worker that
|
||||
// invokes the given sync function for every work item inserted.
|
||||
type taskQueue struct {
|
||||
// queue is the work queue the worker polls
|
||||
queue *workqueue.Type
|
||||
// sync is called for each item in the queue
|
||||
sync func(string)
|
||||
// workerDone is closed when the worker exits
|
||||
workerDone chan struct{}
|
||||
}
|
||||
|
||||
func (t *taskQueue) run(period time.Duration, stopCh <-chan struct{}) {
|
||||
wait.Until(t.worker, period, stopCh)
|
||||
}
|
||||
|
||||
// enqueue enqueues ns/name of the given api object in the task queue.
|
||||
func (t *taskQueue) enqueue(obj interface{}) {
|
||||
key, err := keyFunc(obj)
|
||||
if err != nil {
|
||||
glog.Infof("Couldn't get key for object %+v: %v", obj, err)
|
||||
return
|
||||
}
|
||||
t.queue.Add(key)
|
||||
}
|
||||
|
||||
func (t *taskQueue) requeue(key string, err error) {
|
||||
glog.Errorf("Requeuing %v, err %v", key, err)
|
||||
t.queue.Add(key)
|
||||
}
|
||||
|
||||
// worker processes work in the queue through sync.
|
||||
func (t *taskQueue) worker() {
|
||||
for {
|
||||
key, quit := t.queue.Get()
|
||||
if quit {
|
||||
close(t.workerDone)
|
||||
return
|
||||
}
|
||||
glog.V(3).Infof("Syncing %v", key)
|
||||
t.sync(key.(string))
|
||||
t.queue.Done(key)
|
||||
}
|
||||
}
|
||||
|
||||
// shutdown shuts down the work queue and waits for the worker to ACK
|
||||
func (t *taskQueue) shutdown() {
|
||||
t.queue.ShutDown()
|
||||
<-t.workerDone
|
||||
}
|
||||
|
||||
// NewTaskQueue creates a new task queue with the given sync function.
|
||||
// The sync function is called for every element inserted into the queue.
|
||||
func NewTaskQueue(syncFn func(string)) *taskQueue {
|
||||
return &taskQueue{
|
||||
queue: workqueue.New(),
|
||||
sync: syncFn,
|
||||
workerDone: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// compareLinks returns true if the 2 self links are equal.
|
||||
func compareLinks(l1, l2 string) bool {
|
||||
// TODO: These can be partial links
|
||||
return l1 == l2 && l1 != ""
|
||||
}
|
||||
|
||||
// StoreToIngressLister makes a Store that lists Ingress.
|
||||
// TODO: Move this to cache/listers post 1.1.
|
||||
type StoreToIngressLister struct {
|
||||
cache.Store
|
||||
}
|
||||
|
||||
// List lists all Ingress' in the store.
|
||||
func (s *StoreToIngressLister) List() (ing extensions.IngressList, err error) {
|
||||
for _, m := range s.Store.List() {
|
||||
ing.Items = append(ing.Items, *(m.(*extensions.Ingress)))
|
||||
}
|
||||
return ing, nil
|
||||
}
|
||||
|
||||
// GetServiceIngress gets all the Ingress' that have rules pointing to a service.
|
||||
// Note that this ignores services without the right nodePorts.
|
||||
func (s *StoreToIngressLister) GetServiceIngress(svc *api.Service) (ings []extensions.Ingress, err error) {
|
||||
for _, m := range s.Store.List() {
|
||||
ing := *m.(*extensions.Ingress)
|
||||
if ing.Namespace != svc.Namespace {
|
||||
continue
|
||||
}
|
||||
for _, rules := range ing.Spec.Rules {
|
||||
if rules.IngressRuleValue.HTTP == nil {
|
||||
continue
|
||||
}
|
||||
for _, p := range rules.IngressRuleValue.HTTP.Paths {
|
||||
if p.Backend.ServiceName == svc.Name {
|
||||
ings = append(ings, ing)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(ings) == 0 {
|
||||
err = fmt.Errorf("No ingress for service %v", svc.Name)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GCETranslator helps with kubernetes -> gce api conversion.
|
||||
type GCETranslator struct {
|
||||
*LoadBalancerController
|
||||
}
|
||||
|
||||
// toUrlMap converts an ingress to a map of subdomain: url-regex: gce backend.
|
||||
func (t *GCETranslator) toUrlMap(ing *extensions.Ingress) (utils.GCEURLMap, error) {
|
||||
hostPathBackend := utils.GCEURLMap{}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.HTTP == nil {
|
||||
glog.Errorf("Ignoring non http Ingress rule")
|
||||
continue
|
||||
}
|
||||
pathToBackend := map[string]*compute.BackendService{}
|
||||
for _, p := range rule.HTTP.Paths {
|
||||
backend, err := t.toGCEBackend(&p.Backend, ing.Namespace)
|
||||
if err != nil {
|
||||
// If a service doesn't have a nodeport we can still forward traffic
|
||||
// to all other services under the assumption that the user will
|
||||
// modify nodeport.
|
||||
if _, ok := err.(errorNodePortNotFound); ok {
|
||||
glog.Infof("%v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// If a service doesn't have a backend, there's nothing the user
|
||||
// can do to correct this (the admin might've limited quota).
|
||||
// So keep requeuing the l7 till all backends exist.
|
||||
return utils.GCEURLMap{}, err
|
||||
}
|
||||
// The Ingress spec defines empty path as catch-all, so if a user
|
||||
// asks for a single host and multiple empty paths, all traffic is
|
||||
// sent to one of the last backend in the rules list.
|
||||
path := p.Path
|
||||
if path == "" {
|
||||
path = loadbalancers.DefaultPath
|
||||
}
|
||||
pathToBackend[path] = backend
|
||||
}
|
||||
// If multiple hostless rule sets are specified, last one wins
|
||||
host := rule.Host
|
||||
if host == "" {
|
||||
host = loadbalancers.DefaultHost
|
||||
}
|
||||
hostPathBackend[host] = pathToBackend
|
||||
}
|
||||
defaultBackend, _ := t.toGCEBackend(ing.Spec.Backend, ing.Namespace)
|
||||
hostPathBackend.PutDefaultBackend(defaultBackend)
|
||||
return hostPathBackend, nil
|
||||
}
|
||||
|
||||
func (t *GCETranslator) toGCEBackend(be *extensions.IngressBackend, ns string) (*compute.BackendService, error) {
|
||||
if be == nil {
|
||||
return nil, nil
|
||||
}
|
||||
port, err := t.getServiceNodePort(*be, ns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
backend, err := t.CloudClusterManager.backendPool.Get(int64(port))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"No GCE backend exists for port %v, kube backend %+v", port, be)
|
||||
}
|
||||
return backend, nil
|
||||
}
|
||||
|
||||
// getServiceNodePort looks in the svc store for a matching service:port,
|
||||
// and returns the nodeport.
|
||||
func (t *GCETranslator) getServiceNodePort(be extensions.IngressBackend, namespace string) (int, error) {
|
||||
obj, exists, err := t.svcLister.Store.Get(
|
||||
&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: be.ServiceName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
})
|
||||
if !exists {
|
||||
return invalidPort, errorNodePortNotFound{be, fmt.Errorf(
|
||||
"Service %v/%v not found in store", namespace, be.ServiceName)}
|
||||
}
|
||||
if err != nil {
|
||||
return invalidPort, errorNodePortNotFound{be, err}
|
||||
}
|
||||
var nodePort int
|
||||
for _, p := range obj.(*api.Service).Spec.Ports {
|
||||
switch be.ServicePort.Type {
|
||||
case intstr.Int:
|
||||
if p.Port == int(be.ServicePort.IntVal) {
|
||||
nodePort = p.NodePort
|
||||
break
|
||||
}
|
||||
default:
|
||||
if p.Name == be.ServicePort.StrVal {
|
||||
nodePort = p.NodePort
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if nodePort != invalidPort {
|
||||
return nodePort, nil
|
||||
}
|
||||
return invalidPort, errorNodePortNotFound{be, fmt.Errorf(
|
||||
"Could not find matching nodeport from service.")}
|
||||
}
|
||||
|
||||
// toNodePorts converts a pathlist to a flat list of nodeports.
|
||||
func (t *GCETranslator) toNodePorts(ings *extensions.IngressList) []int64 {
|
||||
knownPorts := []int64{}
|
||||
for _, ing := range ings.Items {
|
||||
defaultBackend := ing.Spec.Backend
|
||||
if defaultBackend != nil {
|
||||
port, err := t.getServiceNodePort(*defaultBackend, ing.Namespace)
|
||||
if err != nil {
|
||||
glog.Infof("%v", err)
|
||||
} else {
|
||||
knownPorts = append(knownPorts, int64(port))
|
||||
}
|
||||
}
|
||||
for _, rule := range ing.Spec.Rules {
|
||||
if rule.HTTP == nil {
|
||||
glog.Errorf("Ignoring non http Ingress rule.")
|
||||
continue
|
||||
}
|
||||
for _, path := range rule.HTTP.Paths {
|
||||
port, err := t.getServiceNodePort(path.Backend, ing.Namespace)
|
||||
if err != nil {
|
||||
glog.Infof("%v", err)
|
||||
continue
|
||||
}
|
||||
knownPorts = append(knownPorts, int64(port))
|
||||
}
|
||||
}
|
||||
}
|
||||
return knownPorts
|
||||
}
|
||||
67
controllers/gce/healthchecks/fakes.go
Normal file
67
controllers/gce/healthchecks/fakes.go
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthchecks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// NewFakeHealthChecks returns a new FakeHealthChecks.
|
||||
func NewFakeHealthChecks() *FakeHealthChecks {
|
||||
return &FakeHealthChecks{hc: []*compute.HttpHealthCheck{}}
|
||||
}
|
||||
|
||||
// FakeHealthChecks fakes out health checks.
|
||||
type FakeHealthChecks struct {
|
||||
hc []*compute.HttpHealthCheck
|
||||
}
|
||||
|
||||
// CreateHttpHealthCheck fakes out http health check creation.
|
||||
func (f *FakeHealthChecks) CreateHttpHealthCheck(hc *compute.HttpHealthCheck) error {
|
||||
f.hc = append(f.hc, hc)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetHttpHealthCheck fakes out getting a http health check from the cloud.
|
||||
func (f *FakeHealthChecks) GetHttpHealthCheck(name string) (*compute.HttpHealthCheck, error) {
|
||||
for _, h := range f.hc {
|
||||
if h.Name == name {
|
||||
return h, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Health check %v not found.", name)
|
||||
}
|
||||
|
||||
// DeleteHttpHealthCheck fakes out deleting a http health check.
|
||||
func (f *FakeHealthChecks) DeleteHttpHealthCheck(name string) error {
|
||||
healthChecks := []*compute.HttpHealthCheck{}
|
||||
exists := false
|
||||
for _, h := range f.hc {
|
||||
if h.Name == name {
|
||||
exists = true
|
||||
continue
|
||||
}
|
||||
healthChecks = append(healthChecks, h)
|
||||
}
|
||||
if !exists {
|
||||
return fmt.Errorf("Failed to find health check %v", name)
|
||||
}
|
||||
f.hc = healthChecks
|
||||
return nil
|
||||
}
|
||||
89
controllers/gce/healthchecks/healthchecks.go
Normal file
89
controllers/gce/healthchecks/healthchecks.go
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthchecks
|
||||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// HealthChecks manages health checks.
|
||||
type HealthChecks struct {
|
||||
cloud SingleHealthCheck
|
||||
defaultPath string
|
||||
namer utils.Namer
|
||||
}
|
||||
|
||||
// NewHealthChecker creates a new health checker.
|
||||
// cloud: the cloud object implementing SingleHealthCheck.
|
||||
// defaultHealthCheckPath: is the HTTP path to use for health checks.
|
||||
func NewHealthChecker(cloud SingleHealthCheck, defaultHealthCheckPath string, namer utils.Namer) HealthChecker {
|
||||
return &HealthChecks{cloud, defaultHealthCheckPath, namer}
|
||||
}
|
||||
|
||||
// Add adds a healthcheck if one for the same port doesn't already exist.
|
||||
func (h *HealthChecks) Add(port int64, path string) error {
|
||||
hc, _ := h.Get(port)
|
||||
name := h.namer.BeName(port)
|
||||
if path == "" {
|
||||
path = h.defaultPath
|
||||
}
|
||||
if hc == nil {
|
||||
glog.Infof("Creating health check %v", name)
|
||||
if err := h.cloud.CreateHttpHealthCheck(
|
||||
&compute.HttpHealthCheck{
|
||||
Name: name,
|
||||
Port: port,
|
||||
RequestPath: path,
|
||||
Description: "Default kubernetes L7 Loadbalancing health check.",
|
||||
// How often to health check.
|
||||
CheckIntervalSec: 1,
|
||||
// How long to wait before claiming failure of a health check.
|
||||
TimeoutSec: 1,
|
||||
// Number of healthchecks to pass for a vm to be deemed healthy.
|
||||
HealthyThreshold: 1,
|
||||
// Number of healthchecks to fail before the vm is deemed unhealthy.
|
||||
UnhealthyThreshold: 10,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// TODO: Does this health check need an edge hop?
|
||||
glog.Infof("Health check %v already exists", hc.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes the health check by port.
|
||||
func (h *HealthChecks) Delete(port int64) error {
|
||||
name := h.namer.BeName(port)
|
||||
glog.Infof("Deleting health check %v", name)
|
||||
if err := h.cloud.DeleteHttpHealthCheck(h.namer.BeName(port)); err != nil {
|
||||
if !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the given health check.
|
||||
func (h *HealthChecks) Get(port int64) (*compute.HttpHealthCheck, error) {
|
||||
return h.cloud.GetHttpHealthCheck(h.namer.BeName(port))
|
||||
}
|
||||
35
controllers/gce/healthchecks/interfaces.go
Normal file
35
controllers/gce/healthchecks/interfaces.go
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package healthchecks
|
||||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// SingleHealthCheck is an interface to manage a single GCE health check.
|
||||
type SingleHealthCheck interface {
|
||||
CreateHttpHealthCheck(hc *compute.HttpHealthCheck) error
|
||||
DeleteHttpHealthCheck(name string) error
|
||||
GetHttpHealthCheck(name string) (*compute.HttpHealthCheck, error)
|
||||
}
|
||||
|
||||
// HealthChecker is an interface to manage cloud HTTPHealthChecks.
|
||||
type HealthChecker interface {
|
||||
Add(port int64, path string) error
|
||||
Delete(port int64) error
|
||||
Get(port int64) (*compute.HttpHealthCheck, error)
|
||||
}
|
||||
102
controllers/gce/ingress-app.yaml
Normal file
102
controllers/gce/ingress-app.yaml
Normal file
|
|
@ -0,0 +1,102 @@
|
|||
# This Service writes the HTTP request headers out to the response. Access it
|
||||
# through its NodePort, LoadBalancer or Ingress endpoint.
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: echoheadersx
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
nodePort: 30301
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: echoheaders
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: echoheadersdefault
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
nodePort: 30302
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: echoheaders
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: echoheadersy
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
nodePort: 30284
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: echoheaders
|
||||
---
|
||||
# This is a replication controller for the endpoint that services the 3
|
||||
# Services above.
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: echoheaders
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
containers:
|
||||
- name: echoheaders
|
||||
image: bprashanth/echoserver:0.0
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
---
|
||||
# This is the Ingress resource that creates a HTTP Loadbalancer configured
|
||||
# according to the Ingress rules.
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: echomap
|
||||
spec:
|
||||
backend:
|
||||
serviceName: echoheadersdefault
|
||||
servicePort: 80
|
||||
rules:
|
||||
- host: foo.bar.com
|
||||
http:
|
||||
paths:
|
||||
- path: /foo
|
||||
backend:
|
||||
serviceName: echoheadersx
|
||||
servicePort: 80
|
||||
- host: bar.baz.com
|
||||
http:
|
||||
paths:
|
||||
- path: /bar
|
||||
backend:
|
||||
serviceName: echoheadersy
|
||||
servicePort: 80
|
||||
- path: /foo
|
||||
backend:
|
||||
serviceName: echoheadersx
|
||||
servicePort: 80
|
||||
|
||||
127
controllers/gce/instances/fakes.go
Normal file
127
controllers/gce/instances/fakes.go
Normal file
|
|
@ -0,0 +1,127 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package instances
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
// NewFakeInstanceGroups creates a new FakeInstanceGroups.
|
||||
func NewFakeInstanceGroups(nodes sets.String) *FakeInstanceGroups {
|
||||
return &FakeInstanceGroups{
|
||||
instances: nodes,
|
||||
listResult: getInstanceList(nodes),
|
||||
namer: utils.Namer{},
|
||||
}
|
||||
}
|
||||
|
||||
// InstanceGroup fakes
|
||||
|
||||
// FakeInstanceGroups fakes out the instance groups api.
|
||||
type FakeInstanceGroups struct {
|
||||
instances sets.String
|
||||
instanceGroups []*compute.InstanceGroup
|
||||
Ports []int64
|
||||
getResult *compute.InstanceGroup
|
||||
listResult *compute.InstanceGroupsListInstances
|
||||
calls []int
|
||||
namer utils.Namer
|
||||
}
|
||||
|
||||
// GetInstanceGroup fakes getting an instance group from the cloud.
|
||||
func (f *FakeInstanceGroups) GetInstanceGroup(name, zone string) (*compute.InstanceGroup, error) {
|
||||
f.calls = append(f.calls, utils.Get)
|
||||
for _, ig := range f.instanceGroups {
|
||||
if ig.Name == name {
|
||||
return ig, nil
|
||||
}
|
||||
}
|
||||
// TODO: Return googleapi 404 error
|
||||
return nil, fmt.Errorf("Instance group %v not found", name)
|
||||
}
|
||||
|
||||
// CreateInstanceGroup fakes instance group creation.
|
||||
func (f *FakeInstanceGroups) CreateInstanceGroup(name, zone string) (*compute.InstanceGroup, error) {
|
||||
newGroup := &compute.InstanceGroup{Name: name, SelfLink: name}
|
||||
f.instanceGroups = append(f.instanceGroups, newGroup)
|
||||
return newGroup, nil
|
||||
}
|
||||
|
||||
// DeleteInstanceGroup fakes instance group deletion.
|
||||
func (f *FakeInstanceGroups) DeleteInstanceGroup(name, zone string) error {
|
||||
newGroups := []*compute.InstanceGroup{}
|
||||
found := false
|
||||
for _, ig := range f.instanceGroups {
|
||||
if ig.Name == name {
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
newGroups = append(newGroups, ig)
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("Instance Group %v not found", name)
|
||||
}
|
||||
f.instanceGroups = newGroups
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListInstancesInInstanceGroup fakes listing instances in an instance group.
|
||||
func (f *FakeInstanceGroups) ListInstancesInInstanceGroup(name, zone string, state string) (*compute.InstanceGroupsListInstances, error) {
|
||||
return f.listResult, nil
|
||||
}
|
||||
|
||||
// AddInstancesToInstanceGroup fakes adding instances to an instance group.
|
||||
func (f *FakeInstanceGroups) AddInstancesToInstanceGroup(name, zone string, instanceNames []string) error {
|
||||
f.calls = append(f.calls, utils.AddInstances)
|
||||
f.instances.Insert(instanceNames...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveInstancesFromInstanceGroup fakes removing instances from an instance group.
|
||||
func (f *FakeInstanceGroups) RemoveInstancesFromInstanceGroup(name, zone string, instanceNames []string) error {
|
||||
f.calls = append(f.calls, utils.RemoveInstances)
|
||||
f.instances.Delete(instanceNames...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddPortToInstanceGroup fakes adding ports to an Instance Group.
|
||||
func (f *FakeInstanceGroups) AddPortToInstanceGroup(ig *compute.InstanceGroup, port int64) (*compute.NamedPort, error) {
|
||||
f.Ports = append(f.Ports, port)
|
||||
return &compute.NamedPort{Name: f.namer.BeName(port), Port: port}, nil
|
||||
}
|
||||
|
||||
// getInstanceList returns an instance list based on the given names.
|
||||
// The names cannot contain a '.', the real gce api validates against this.
|
||||
func getInstanceList(nodeNames sets.String) *compute.InstanceGroupsListInstances {
|
||||
instanceNames := nodeNames.List()
|
||||
computeInstances := []*compute.InstanceWithNamedPorts{}
|
||||
for _, name := range instanceNames {
|
||||
instanceLink := fmt.Sprintf(
|
||||
"https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s",
|
||||
"project", "zone", name)
|
||||
computeInstances = append(
|
||||
computeInstances, &compute.InstanceWithNamedPorts{
|
||||
Instance: instanceLink})
|
||||
}
|
||||
return &compute.InstanceGroupsListInstances{
|
||||
Items: computeInstances,
|
||||
}
|
||||
}
|
||||
165
controllers/gce/instances/instances.go
Normal file
165
controllers/gce/instances/instances.go
Normal file
|
|
@ -0,0 +1,165 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package instances
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/storage"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
// State string required by gce library to list all instances.
|
||||
allInstances = "ALL"
|
||||
)
|
||||
|
||||
// Instances implements NodePool.
|
||||
type Instances struct {
|
||||
cloud InstanceGroups
|
||||
zone string
|
||||
snapshotter storage.Snapshotter
|
||||
}
|
||||
|
||||
// NewNodePool creates a new node pool.
|
||||
// - cloud: implements InstanceGroups, used to sync Kubernetes nodes with
|
||||
// members of the cloud InstanceGroup.
|
||||
func NewNodePool(cloud InstanceGroups, zone string) NodePool {
|
||||
glog.V(3).Infof("NodePool is only aware of instances in zone %v", zone)
|
||||
return &Instances{cloud, zone, storage.NewInMemoryPool()}
|
||||
}
|
||||
|
||||
// AddInstanceGroup creates or gets an instance group if it doesn't exist
|
||||
// and adds the given port to it.
|
||||
func (i *Instances) AddInstanceGroup(name string, port int64) (*compute.InstanceGroup, *compute.NamedPort, error) {
|
||||
ig, _ := i.Get(name)
|
||||
if ig == nil {
|
||||
glog.Infof("Creating instance group %v", name)
|
||||
var err error
|
||||
ig, err = i.cloud.CreateInstanceGroup(name, i.zone)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
glog.V(3).Infof("Instance group already exists %v", name)
|
||||
}
|
||||
defer i.snapshotter.Add(name, ig)
|
||||
namedPort, err := i.cloud.AddPortToInstanceGroup(ig, port)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return ig, namedPort, nil
|
||||
}
|
||||
|
||||
// DeleteInstanceGroup deletes the given IG by name.
|
||||
func (i *Instances) DeleteInstanceGroup(name string) error {
|
||||
defer i.snapshotter.Delete(name)
|
||||
return i.cloud.DeleteInstanceGroup(name, i.zone)
|
||||
}
|
||||
|
||||
func (i *Instances) list(name string) (sets.String, error) {
|
||||
nodeNames := sets.NewString()
|
||||
instances, err := i.cloud.ListInstancesInInstanceGroup(
|
||||
name, i.zone, allInstances)
|
||||
if err != nil {
|
||||
return nodeNames, err
|
||||
}
|
||||
for _, ins := range instances.Items {
|
||||
// TODO: If round trips weren't so slow one would be inclided
|
||||
// to GetInstance using this url and get the name.
|
||||
parts := strings.Split(ins.Instance, "/")
|
||||
nodeNames.Insert(parts[len(parts)-1])
|
||||
}
|
||||
return nodeNames, nil
|
||||
}
|
||||
|
||||
// Get returns the Instance Group by name.
|
||||
func (i *Instances) Get(name string) (*compute.InstanceGroup, error) {
|
||||
ig, err := i.cloud.GetInstanceGroup(name, i.zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i.snapshotter.Add(name, ig)
|
||||
return ig, nil
|
||||
}
|
||||
|
||||
// Add adds the given instances to the Instance Group.
|
||||
func (i *Instances) Add(groupName string, names []string) error {
|
||||
glog.V(3).Infof("Adding nodes %v to %v", names, groupName)
|
||||
return i.cloud.AddInstancesToInstanceGroup(groupName, i.zone, names)
|
||||
}
|
||||
|
||||
// Remove removes the given instances from the Instance Group.
|
||||
func (i *Instances) Remove(groupName string, names []string) error {
|
||||
glog.V(3).Infof("Removing nodes %v from %v", names, groupName)
|
||||
return i.cloud.RemoveInstancesFromInstanceGroup(groupName, i.zone, names)
|
||||
}
|
||||
|
||||
// Sync syncs kubernetes instances with the instances in the instance group.
|
||||
func (i *Instances) Sync(nodes []string) (err error) {
|
||||
glog.V(3).Infof("Syncing nodes %v", nodes)
|
||||
|
||||
defer func() {
|
||||
// The node pool is only responsible for syncing nodes to instance
|
||||
// groups. It never creates/deletes, so if an instance groups is
|
||||
// not found there's nothing it can do about it anyway. Most cases
|
||||
// this will happen because the backend pool has deleted the instance
|
||||
// group, however if it happens because a user deletes the IG by mistake
|
||||
// we should just wait till the backend pool fixes it.
|
||||
if utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
glog.Infof("Node pool encountered a 404, ignoring: %v", err)
|
||||
err = nil
|
||||
}
|
||||
}()
|
||||
|
||||
pool := i.snapshotter.Snapshot()
|
||||
for name := range pool {
|
||||
gceNodes := sets.NewString()
|
||||
gceNodes, err = i.list(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kubeNodes := sets.NewString(nodes...)
|
||||
|
||||
// A node deleted via kubernetes could still exist as a gce vm. We don't
|
||||
// want to route requests to it. Similarly, a node added to kubernetes
|
||||
// needs to get added to the instance group so we do route requests to it.
|
||||
|
||||
removeNodes := gceNodes.Difference(kubeNodes).List()
|
||||
addNodes := kubeNodes.Difference(gceNodes).List()
|
||||
if len(removeNodes) != 0 {
|
||||
if err = i.Remove(
|
||||
name, gceNodes.Difference(kubeNodes).List()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(addNodes) != 0 {
|
||||
if err = i.Add(
|
||||
name, kubeNodes.Difference(gceNodes).List()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
75
controllers/gce/instances/instances_test.go
Normal file
75
controllers/gce/instances/instances_test.go
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package instances
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
const defaultZone = "default-zone"
|
||||
|
||||
func TestNodePoolSync(t *testing.T) {
|
||||
f := NewFakeInstanceGroups(sets.NewString(
|
||||
[]string{"n1", "n2"}...))
|
||||
pool := NewNodePool(f, defaultZone)
|
||||
pool.AddInstanceGroup("test", 80)
|
||||
|
||||
// KubeNodes: n1
|
||||
// GCENodes: n1, n2
|
||||
// Remove n2 from the instance group.
|
||||
|
||||
f.calls = []int{}
|
||||
kubeNodes := sets.NewString([]string{"n1"}...)
|
||||
pool.Sync(kubeNodes.List())
|
||||
if f.instances.Len() != kubeNodes.Len() || !kubeNodes.IsSuperset(f.instances) {
|
||||
t.Fatalf("%v != %v", kubeNodes, f.instances)
|
||||
}
|
||||
|
||||
// KubeNodes: n1, n2
|
||||
// GCENodes: n1
|
||||
// Try to add n2 to the instance group.
|
||||
|
||||
f = NewFakeInstanceGroups(sets.NewString([]string{"n1"}...))
|
||||
pool = NewNodePool(f, defaultZone)
|
||||
pool.AddInstanceGroup("test", 80)
|
||||
|
||||
f.calls = []int{}
|
||||
kubeNodes = sets.NewString([]string{"n1", "n2"}...)
|
||||
pool.Sync(kubeNodes.List())
|
||||
if f.instances.Len() != kubeNodes.Len() ||
|
||||
!kubeNodes.IsSuperset(f.instances) {
|
||||
t.Fatalf("%v != %v", kubeNodes, f.instances)
|
||||
}
|
||||
|
||||
// KubeNodes: n1, n2
|
||||
// GCENodes: n1, n2
|
||||
// Do nothing.
|
||||
|
||||
f = NewFakeInstanceGroups(sets.NewString([]string{"n1", "n2"}...))
|
||||
pool = NewNodePool(f, defaultZone)
|
||||
pool.AddInstanceGroup("test", 80)
|
||||
|
||||
f.calls = []int{}
|
||||
kubeNodes = sets.NewString([]string{"n1", "n2"}...)
|
||||
pool.Sync(kubeNodes.List())
|
||||
if len(f.calls) != 0 {
|
||||
t.Fatalf(
|
||||
"Did not expect any calls, got %+v", f.calls)
|
||||
}
|
||||
}
|
||||
47
controllers/gce/instances/interfaces.go
Normal file
47
controllers/gce/instances/interfaces.go
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package instances
|
||||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// NodePool is an interface to manage a pool of kubernetes nodes synced with vm instances in the cloud
|
||||
// through the InstanceGroups interface.
|
||||
type NodePool interface {
|
||||
AddInstanceGroup(name string, port int64) (*compute.InstanceGroup, *compute.NamedPort, error)
|
||||
DeleteInstanceGroup(name string) error
|
||||
|
||||
// TODO: Refactor for modularity
|
||||
Add(groupName string, nodeNames []string) error
|
||||
Remove(groupName string, nodeNames []string) error
|
||||
Sync(nodeNames []string) error
|
||||
Get(name string) (*compute.InstanceGroup, error)
|
||||
}
|
||||
|
||||
// InstanceGroups is an interface for managing gce instances groups, and the instances therein.
|
||||
type InstanceGroups interface {
|
||||
GetInstanceGroup(name, zone string) (*compute.InstanceGroup, error)
|
||||
CreateInstanceGroup(name, zone string) (*compute.InstanceGroup, error)
|
||||
DeleteInstanceGroup(name, zone string) error
|
||||
|
||||
// TODO: Refactor for modulatiry.
|
||||
ListInstancesInInstanceGroup(name, zone string, state string) (*compute.InstanceGroupsListInstances, error)
|
||||
AddInstancesToInstanceGroup(name, zone string, instanceNames []string) error
|
||||
RemoveInstancesFromInstanceGroup(name, zone string, instanceName []string) error
|
||||
AddPortToInstanceGroup(ig *compute.InstanceGroup, port int64) (*compute.NamedPort, error)
|
||||
}
|
||||
438
controllers/gce/loadbalancers/fakes.go
Normal file
438
controllers/gce/loadbalancers/fakes.go
Normal file
|
|
@ -0,0 +1,438 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loadbalancers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
)
|
||||
|
||||
var testIPManager = testIP{}
|
||||
|
||||
type testIP struct {
|
||||
start int
|
||||
}
|
||||
|
||||
func (t *testIP) ip() string {
|
||||
t.start++
|
||||
return fmt.Sprintf("0.0.0.%v", t.start)
|
||||
}
|
||||
|
||||
// Loadbalancer fakes
|
||||
|
||||
// FakeLoadBalancers is a type that fakes out the loadbalancer interface.
|
||||
type FakeLoadBalancers struct {
|
||||
Fw []*compute.ForwardingRule
|
||||
Um []*compute.UrlMap
|
||||
Tp []*compute.TargetHttpProxy
|
||||
Tps []*compute.TargetHttpsProxy
|
||||
IP []*compute.Address
|
||||
Certs []*compute.SslCertificate
|
||||
name string
|
||||
}
|
||||
|
||||
// TODO: There is some duplication between these functions and the name mungers in
|
||||
// loadbalancer file.
|
||||
func (f *FakeLoadBalancers) fwName(https bool) string {
|
||||
if https {
|
||||
return fmt.Sprintf("%v-%v", httpsForwardingRulePrefix, f.name)
|
||||
}
|
||||
return fmt.Sprintf("%v-%v", forwardingRulePrefix, f.name)
|
||||
}
|
||||
|
||||
func (f *FakeLoadBalancers) umName() string {
|
||||
return fmt.Sprintf("%v-%v", urlMapPrefix, f.name)
|
||||
}
|
||||
|
||||
func (f *FakeLoadBalancers) tpName(https bool) string {
|
||||
if https {
|
||||
return fmt.Sprintf("%v-%v", targetHTTPSProxyPrefix, f.name)
|
||||
}
|
||||
return fmt.Sprintf("%v-%v", targetProxyPrefix, f.name)
|
||||
}
|
||||
|
||||
// String is the string method for FakeLoadBalancers.
|
||||
func (f *FakeLoadBalancers) String() string {
|
||||
msg := fmt.Sprintf(
|
||||
"Loadbalancer %v,\nforwarding rules:\n", f.name)
|
||||
for _, fw := range f.Fw {
|
||||
msg += fmt.Sprintf("\t%v\n", fw.Name)
|
||||
}
|
||||
msg += fmt.Sprintf("Target proxies\n")
|
||||
for _, tp := range f.Tp {
|
||||
msg += fmt.Sprintf("\t%v\n", tp.Name)
|
||||
}
|
||||
msg += fmt.Sprintf("UrlMaps\n")
|
||||
for _, um := range f.Um {
|
||||
msg += fmt.Sprintf("%v\n", um.Name)
|
||||
msg += fmt.Sprintf("\tHost Rules:\n")
|
||||
for _, hostRule := range um.HostRules {
|
||||
msg += fmt.Sprintf("\t\t%v\n", hostRule)
|
||||
}
|
||||
msg += fmt.Sprintf("\tPath Matcher:\n")
|
||||
for _, pathMatcher := range um.PathMatchers {
|
||||
msg += fmt.Sprintf("\t\t%v\n", pathMatcher.Name)
|
||||
for _, pathRule := range pathMatcher.PathRules {
|
||||
msg += fmt.Sprintf("\t\t\t%+v\n", pathRule)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// Forwarding Rule fakes
|
||||
|
||||
// GetGlobalForwardingRule returns a fake forwarding rule.
|
||||
func (f *FakeLoadBalancers) GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error) {
|
||||
for i := range f.Fw {
|
||||
if f.Fw[i].Name == name {
|
||||
return f.Fw[i], nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Forwarding rule %v not found", name)
|
||||
}
|
||||
|
||||
// CreateGlobalForwardingRule fakes forwarding rule creation.
|
||||
func (f *FakeLoadBalancers) CreateGlobalForwardingRule(proxyLink, ip, name, portRange string) (*compute.ForwardingRule, error) {
|
||||
if ip == "" {
|
||||
ip = fmt.Sprintf(testIPManager.ip())
|
||||
}
|
||||
rule := &compute.ForwardingRule{
|
||||
Name: name,
|
||||
IPAddress: ip,
|
||||
Target: proxyLink,
|
||||
PortRange: portRange,
|
||||
IPProtocol: "TCP",
|
||||
SelfLink: name,
|
||||
}
|
||||
f.Fw = append(f.Fw, rule)
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
// SetProxyForGlobalForwardingRule fakes setting a global forwarding rule.
|
||||
func (f *FakeLoadBalancers) SetProxyForGlobalForwardingRule(fw *compute.ForwardingRule, proxyLink string) error {
|
||||
for i := range f.Fw {
|
||||
if f.Fw[i].Name == fw.Name {
|
||||
f.Fw[i].Target = proxyLink
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteGlobalForwardingRule fakes deleting a global forwarding rule.
|
||||
func (f *FakeLoadBalancers) DeleteGlobalForwardingRule(name string) error {
|
||||
fw := []*compute.ForwardingRule{}
|
||||
for i := range f.Fw {
|
||||
if f.Fw[i].Name != name {
|
||||
fw = append(fw, f.Fw[i])
|
||||
}
|
||||
}
|
||||
f.Fw = fw
|
||||
return nil
|
||||
}
|
||||
|
||||
// UrlMaps fakes
|
||||
|
||||
// GetUrlMap fakes getting url maps from the cloud.
|
||||
func (f *FakeLoadBalancers) GetUrlMap(name string) (*compute.UrlMap, error) {
|
||||
for i := range f.Um {
|
||||
if f.Um[i].Name == name {
|
||||
return f.Um[i], nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Url Map %v not found", name)
|
||||
}
|
||||
|
||||
// CreateUrlMap fakes url-map creation.
|
||||
func (f *FakeLoadBalancers) CreateUrlMap(backend *compute.BackendService, name string) (*compute.UrlMap, error) {
|
||||
urlMap := &compute.UrlMap{
|
||||
Name: name,
|
||||
DefaultService: backend.SelfLink,
|
||||
SelfLink: f.umName(),
|
||||
}
|
||||
f.Um = append(f.Um, urlMap)
|
||||
return urlMap, nil
|
||||
}
|
||||
|
||||
// UpdateUrlMap fakes updating url-maps.
|
||||
func (f *FakeLoadBalancers) UpdateUrlMap(urlMap *compute.UrlMap) (*compute.UrlMap, error) {
|
||||
for i := range f.Um {
|
||||
if f.Um[i].Name == urlMap.Name {
|
||||
f.Um[i] = urlMap
|
||||
return urlMap, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// DeleteUrlMap fakes url-map deletion.
|
||||
func (f *FakeLoadBalancers) DeleteUrlMap(name string) error {
|
||||
um := []*compute.UrlMap{}
|
||||
for i := range f.Um {
|
||||
if f.Um[i].Name != name {
|
||||
um = append(um, f.Um[i])
|
||||
}
|
||||
}
|
||||
f.Um = um
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetProxies fakes
|
||||
|
||||
// GetTargetHttpProxy fakes getting target http proxies from the cloud.
|
||||
func (f *FakeLoadBalancers) GetTargetHttpProxy(name string) (*compute.TargetHttpProxy, error) {
|
||||
for i := range f.Tp {
|
||||
if f.Tp[i].Name == name {
|
||||
return f.Tp[i], nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Targetproxy %v not found", name)
|
||||
}
|
||||
|
||||
// CreateTargetHttpProxy fakes creating a target http proxy.
|
||||
func (f *FakeLoadBalancers) CreateTargetHttpProxy(urlMap *compute.UrlMap, name string) (*compute.TargetHttpProxy, error) {
|
||||
proxy := &compute.TargetHttpProxy{
|
||||
Name: name,
|
||||
UrlMap: urlMap.SelfLink,
|
||||
SelfLink: name,
|
||||
}
|
||||
f.Tp = append(f.Tp, proxy)
|
||||
return proxy, nil
|
||||
}
|
||||
|
||||
// DeleteTargetHttpProxy fakes deleting a target http proxy.
|
||||
func (f *FakeLoadBalancers) DeleteTargetHttpProxy(name string) error {
|
||||
tp := []*compute.TargetHttpProxy{}
|
||||
for i := range f.Tp {
|
||||
if f.Tp[i].Name != name {
|
||||
tp = append(tp, f.Tp[i])
|
||||
}
|
||||
}
|
||||
f.Tp = tp
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUrlMapForTargetHttpProxy fakes setting an url-map for a target http proxy.
|
||||
func (f *FakeLoadBalancers) SetUrlMapForTargetHttpProxy(proxy *compute.TargetHttpProxy, urlMap *compute.UrlMap) error {
|
||||
for i := range f.Tp {
|
||||
if f.Tp[i].Name == proxy.Name {
|
||||
f.Tp[i].UrlMap = urlMap.SelfLink
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetHttpsProxy fakes
|
||||
|
||||
// GetTargetHttpsProxy fakes getting target http proxies from the cloud.
|
||||
func (f *FakeLoadBalancers) GetTargetHttpsProxy(name string) (*compute.TargetHttpsProxy, error) {
|
||||
for i := range f.Tps {
|
||||
if f.Tps[i].Name == name {
|
||||
return f.Tps[i], nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Targetproxy %v not found", name)
|
||||
}
|
||||
|
||||
// CreateTargetHttpsProxy fakes creating a target http proxy.
|
||||
func (f *FakeLoadBalancers) CreateTargetHttpsProxy(urlMap *compute.UrlMap, cert *compute.SslCertificate, name string) (*compute.TargetHttpsProxy, error) {
|
||||
proxy := &compute.TargetHttpsProxy{
|
||||
Name: name,
|
||||
UrlMap: urlMap.SelfLink,
|
||||
SslCertificates: []string{cert.SelfLink},
|
||||
SelfLink: name,
|
||||
}
|
||||
f.Tps = append(f.Tps, proxy)
|
||||
return proxy, nil
|
||||
}
|
||||
|
||||
// DeleteTargetHttpsProxy fakes deleting a target http proxy.
|
||||
func (f *FakeLoadBalancers) DeleteTargetHttpsProxy(name string) error {
|
||||
tp := []*compute.TargetHttpsProxy{}
|
||||
for i := range f.Tps {
|
||||
if f.Tps[i].Name != name {
|
||||
tp = append(tp, f.Tps[i])
|
||||
}
|
||||
}
|
||||
f.Tps = tp
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUrlMapForTargetHttpsProxy fakes setting an url-map for a target http proxy.
|
||||
func (f *FakeLoadBalancers) SetUrlMapForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, urlMap *compute.UrlMap) error {
|
||||
for i := range f.Tps {
|
||||
if f.Tps[i].Name == proxy.Name {
|
||||
f.Tps[i].UrlMap = urlMap.SelfLink
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetSslCertificateForTargetHttpsProxy fakes out setting certificates.
|
||||
func (f *FakeLoadBalancers) SetSslCertificateForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, SSLCert *compute.SslCertificate) error {
|
||||
found := false
|
||||
for i := range f.Tps {
|
||||
if f.Tps[i].Name == proxy.Name {
|
||||
f.Tps[i].SslCertificates = []string{SSLCert.SelfLink}
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("Failed to find proxy %v", proxy.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UrlMap fakes
|
||||
|
||||
// CheckURLMap checks the URL map.
|
||||
func (f *FakeLoadBalancers) CheckURLMap(t *testing.T, l7 *L7, expectedMap map[string]utils.FakeIngressRuleValueMap) {
|
||||
um, err := f.GetUrlMap(l7.um.Name)
|
||||
if err != nil || um == nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
// Check the default backend
|
||||
var d string
|
||||
if h, ok := expectedMap[utils.DefaultBackendKey]; ok {
|
||||
if d, ok = h[utils.DefaultBackendKey]; ok {
|
||||
delete(h, utils.DefaultBackendKey)
|
||||
}
|
||||
delete(expectedMap, utils.DefaultBackendKey)
|
||||
}
|
||||
// The urlmap should have a default backend, and each path matcher.
|
||||
if d != "" && l7.um.DefaultService != d {
|
||||
t.Fatalf("Expected default backend %v found %v",
|
||||
d, l7.um.DefaultService)
|
||||
}
|
||||
|
||||
for _, matcher := range l7.um.PathMatchers {
|
||||
var hostname string
|
||||
// There's a 1:1 mapping between pathmatchers and hosts
|
||||
for _, hostRule := range l7.um.HostRules {
|
||||
if matcher.Name == hostRule.PathMatcher {
|
||||
if len(hostRule.Hosts) != 1 {
|
||||
t.Fatalf("Unexpected hosts in hostrules %+v", hostRule)
|
||||
}
|
||||
if d != "" && matcher.DefaultService != d {
|
||||
t.Fatalf("Expected default backend %v found %v",
|
||||
d, matcher.DefaultService)
|
||||
}
|
||||
hostname = hostRule.Hosts[0]
|
||||
break
|
||||
}
|
||||
}
|
||||
// These are all pathrules for a single host, found above
|
||||
for _, rule := range matcher.PathRules {
|
||||
if len(rule.Paths) != 1 {
|
||||
t.Fatalf("Unexpected rule in pathrules %+v", rule)
|
||||
}
|
||||
pathRule := rule.Paths[0]
|
||||
if hostMap, ok := expectedMap[hostname]; !ok {
|
||||
t.Fatalf("Expected map for host %v: %v", hostname, hostMap)
|
||||
} else if svc, ok := expectedMap[hostname][pathRule]; !ok {
|
||||
t.Fatalf("Expected rule %v in host map", pathRule)
|
||||
} else if svc != rule.Service {
|
||||
t.Fatalf("Expected service %v found %v", svc, rule.Service)
|
||||
}
|
||||
delete(expectedMap[hostname], pathRule)
|
||||
if len(expectedMap[hostname]) == 0 {
|
||||
delete(expectedMap, hostname)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(expectedMap) != 0 {
|
||||
t.Fatalf("Untranslated entries %+v", expectedMap)
|
||||
}
|
||||
}
|
||||
|
||||
// Static IP fakes
|
||||
|
||||
// ReserveGlobalStaticIP fakes out static IP reservation.
|
||||
func (f *FakeLoadBalancers) ReserveGlobalStaticIP(name, IPAddress string) (*compute.Address, error) {
|
||||
ip := &compute.Address{
|
||||
Name: name,
|
||||
Address: IPAddress,
|
||||
}
|
||||
f.IP = append(f.IP, ip)
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
// GetGlobalStaticIP fakes out static IP retrieval.
|
||||
func (f *FakeLoadBalancers) GetGlobalStaticIP(name string) (*compute.Address, error) {
|
||||
for i := range f.IP {
|
||||
if f.IP[i].Name == name {
|
||||
return f.IP[i], nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Static IP %v not found", name)
|
||||
}
|
||||
|
||||
// DeleteGlobalStaticIP fakes out static IP deletion.
|
||||
func (f *FakeLoadBalancers) DeleteGlobalStaticIP(name string) error {
|
||||
ip := []*compute.Address{}
|
||||
for i := range f.IP {
|
||||
if f.IP[i].Name != name {
|
||||
ip = append(ip, f.IP[i])
|
||||
}
|
||||
}
|
||||
f.IP = ip
|
||||
return nil
|
||||
}
|
||||
|
||||
// SslCertificate fakes
|
||||
|
||||
// GetSslCertificate fakes out getting ssl certs.
|
||||
func (f *FakeLoadBalancers) GetSslCertificate(name string) (*compute.SslCertificate, error) {
|
||||
for i := range f.Certs {
|
||||
if f.Certs[i].Name == name {
|
||||
return f.Certs[i], nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Cert %v not found", name)
|
||||
}
|
||||
|
||||
// CreateSslCertificate fakes out certificate creation.
|
||||
func (f *FakeLoadBalancers) CreateSslCertificate(cert *compute.SslCertificate) (*compute.SslCertificate, error) {
|
||||
cert.SelfLink = cert.Name
|
||||
f.Certs = append(f.Certs, cert)
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// DeleteSslCertificate fakes out certificate deletion.
|
||||
func (f *FakeLoadBalancers) DeleteSslCertificate(name string) error {
|
||||
certs := []*compute.SslCertificate{}
|
||||
for i := range f.Certs {
|
||||
if f.Certs[i].Name != name {
|
||||
certs = append(certs, f.Certs[i])
|
||||
}
|
||||
}
|
||||
f.Certs = certs
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFakeLoadBalancers creates a fake cloud client. Name is the name
|
||||
// inserted into the selfLink of the associated resources for testing.
|
||||
// eg: forwardingRule.SelfLink == k8-fw-name.
|
||||
func NewFakeLoadBalancers(name string) *FakeLoadBalancers {
|
||||
return &FakeLoadBalancers{
|
||||
Fw: []*compute.ForwardingRule{},
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
74
controllers/gce/loadbalancers/interfaces.go
Normal file
74
controllers/gce/loadbalancers/interfaces.go
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loadbalancers
|
||||
|
||||
import (
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
// LoadBalancers is an interface for managing all the gce resources needed by L7
|
||||
// loadbalancers. We don't have individual pools for each of these resources
|
||||
// because none of them are usable (or acquirable) stand-alone, unlinke backends
|
||||
// and instance groups. The dependency graph:
|
||||
// ForwardingRule -> UrlMaps -> TargetProxies
|
||||
type LoadBalancers interface {
|
||||
// Forwarding Rules
|
||||
GetGlobalForwardingRule(name string) (*compute.ForwardingRule, error)
|
||||
CreateGlobalForwardingRule(proxyLink, ip, name, portRange string) (*compute.ForwardingRule, error)
|
||||
DeleteGlobalForwardingRule(name string) error
|
||||
SetProxyForGlobalForwardingRule(fw *compute.ForwardingRule, proxy string) error
|
||||
|
||||
// UrlMaps
|
||||
GetUrlMap(name string) (*compute.UrlMap, error)
|
||||
CreateUrlMap(backend *compute.BackendService, name string) (*compute.UrlMap, error)
|
||||
UpdateUrlMap(urlMap *compute.UrlMap) (*compute.UrlMap, error)
|
||||
DeleteUrlMap(name string) error
|
||||
|
||||
// TargetProxies
|
||||
GetTargetHttpProxy(name string) (*compute.TargetHttpProxy, error)
|
||||
CreateTargetHttpProxy(urlMap *compute.UrlMap, name string) (*compute.TargetHttpProxy, error)
|
||||
DeleteTargetHttpProxy(name string) error
|
||||
SetUrlMapForTargetHttpProxy(proxy *compute.TargetHttpProxy, urlMap *compute.UrlMap) error
|
||||
|
||||
// TargetHttpsProxies
|
||||
GetTargetHttpsProxy(name string) (*compute.TargetHttpsProxy, error)
|
||||
CreateTargetHttpsProxy(urlMap *compute.UrlMap, SSLCerts *compute.SslCertificate, name string) (*compute.TargetHttpsProxy, error)
|
||||
DeleteTargetHttpsProxy(name string) error
|
||||
SetUrlMapForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, urlMap *compute.UrlMap) error
|
||||
SetSslCertificateForTargetHttpsProxy(proxy *compute.TargetHttpsProxy, SSLCerts *compute.SslCertificate) error
|
||||
|
||||
// SslCertificates
|
||||
GetSslCertificate(name string) (*compute.SslCertificate, error)
|
||||
CreateSslCertificate(certs *compute.SslCertificate) (*compute.SslCertificate, error)
|
||||
DeleteSslCertificate(name string) error
|
||||
|
||||
// Static IP
|
||||
ReserveGlobalStaticIP(name, IPAddress string) (*compute.Address, error)
|
||||
GetGlobalStaticIP(name string) (*compute.Address, error)
|
||||
DeleteGlobalStaticIP(name string) error
|
||||
}
|
||||
|
||||
// LoadBalancerPool is an interface to manage the cloud resources associated
|
||||
// with a gce loadbalancer.
|
||||
type LoadBalancerPool interface {
|
||||
Get(name string) (*L7, error)
|
||||
Add(ri *L7RuntimeInfo) error
|
||||
Delete(name string) error
|
||||
Sync(ri []*L7RuntimeInfo) error
|
||||
GC(names []string) error
|
||||
Shutdown() error
|
||||
}
|
||||
789
controllers/gce/loadbalancers/loadbalancers.go
Normal file
789
controllers/gce/loadbalancers/loadbalancers.go
Normal file
|
|
@ -0,0 +1,789 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loadbalancers
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/backends"
|
||||
"k8s.io/contrib/ingress/controllers/gce/storage"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
// The gce api uses the name of a path rule to match a host rule.
|
||||
hostRulePrefix = "host"
|
||||
|
||||
// DefaultHost is the host used if none is specified. It is a valid value
|
||||
// for the "Host" field recognized by GCE.
|
||||
DefaultHost = "*"
|
||||
|
||||
// DefaultPath is the path used if none is specified. It is a valid path
|
||||
// recognized by GCE.
|
||||
DefaultPath = "/*"
|
||||
|
||||
// A single target proxy/urlmap/forwarding rule is created per loadbalancer.
|
||||
// Tagged with the namespace/name of the Ingress.
|
||||
targetProxyPrefix = "k8s-tp"
|
||||
targetHTTPSProxyPrefix = "k8s-tps"
|
||||
sslCertPrefix = "k8s-ssl"
|
||||
forwardingRulePrefix = "k8s-fw"
|
||||
httpsForwardingRulePrefix = "k8s-fws"
|
||||
urlMapPrefix = "k8s-um"
|
||||
httpDefaultPortRange = "80-80"
|
||||
httpsDefaultPortRange = "443-443"
|
||||
)
|
||||
|
||||
// L7s implements LoadBalancerPool.
|
||||
type L7s struct {
|
||||
cloud LoadBalancers
|
||||
snapshotter storage.Snapshotter
|
||||
// TODO: Remove this field and always ask the BackendPool using the NodePort.
|
||||
glbcDefaultBackend *compute.BackendService
|
||||
defaultBackendPool backends.BackendPool
|
||||
defaultBackendNodePort int64
|
||||
namer utils.Namer
|
||||
}
|
||||
|
||||
// NewLoadBalancerPool returns a new loadbalancer pool.
|
||||
// - cloud: implements LoadBalancers. Used to sync L7 loadbalancer resources
|
||||
// with the cloud.
|
||||
// - defaultBackendPool: a BackendPool used to manage the GCE BackendService for
|
||||
// the default backend.
|
||||
// - defaultBackendNodePort: The nodePort of the Kubernetes service representing
|
||||
// the default backend.
|
||||
func NewLoadBalancerPool(
|
||||
cloud LoadBalancers,
|
||||
defaultBackendPool backends.BackendPool,
|
||||
defaultBackendNodePort int64, namer utils.Namer) LoadBalancerPool {
|
||||
return &L7s{cloud, storage.NewInMemoryPool(), nil, defaultBackendPool, defaultBackendNodePort, namer}
|
||||
}
|
||||
|
||||
func (l *L7s) create(ri *L7RuntimeInfo) (*L7, error) {
|
||||
// Lazily create a default backend so we don't tax users who don't care
|
||||
// about Ingress by consuming 1 of their 3 GCE BackendServices. This
|
||||
// BackendService is deleted when there are no more Ingresses, either
|
||||
// through Sync or Shutdown.
|
||||
if l.glbcDefaultBackend == nil {
|
||||
err := l.defaultBackendPool.Add(l.defaultBackendNodePort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.glbcDefaultBackend, err = l.defaultBackendPool.Get(l.defaultBackendNodePort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &L7{
|
||||
runtimeInfo: ri,
|
||||
Name: l.namer.LBName(ri.Name),
|
||||
cloud: l.cloud,
|
||||
glbcDefaultBackend: l.glbcDefaultBackend,
|
||||
namer: l.namer,
|
||||
sslCert: nil,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Get returns the loadbalancer by name.
|
||||
func (l *L7s) Get(name string) (*L7, error) {
|
||||
name = l.namer.LBName(name)
|
||||
lb, exists := l.snapshotter.Get(name)
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("Loadbalancer %v not in pool", name)
|
||||
}
|
||||
return lb.(*L7), nil
|
||||
}
|
||||
|
||||
// Add gets or creates a loadbalancer.
|
||||
// If the loadbalancer already exists, it checks that its edges are valid.
|
||||
func (l *L7s) Add(ri *L7RuntimeInfo) (err error) {
|
||||
name := l.namer.LBName(ri.Name)
|
||||
|
||||
lb, _ := l.Get(name)
|
||||
if lb == nil {
|
||||
glog.Infof("Creating l7 %v", name)
|
||||
lb, err = l.create(ri)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Add the lb to the pool, in case we create an UrlMap but run out
|
||||
// of quota in creating the ForwardingRule we still need to cleanup
|
||||
// the UrlMap during GC.
|
||||
defer l.snapshotter.Add(name, lb)
|
||||
|
||||
// Why edge hop for the create?
|
||||
// The loadbalancer is a fictitious resource, it doesn't exist in gce. To
|
||||
// make it exist we need to create a collection of gce resources, done
|
||||
// through the edge hop.
|
||||
if err := lb.edgeHop(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes a loadbalancer by name.
|
||||
func (l *L7s) Delete(name string) error {
|
||||
name = l.namer.LBName(name)
|
||||
lb, err := l.Get(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.Infof("Deleting lb %v", name)
|
||||
if err := lb.Cleanup(); err != nil {
|
||||
return err
|
||||
}
|
||||
l.snapshotter.Delete(name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sync loadbalancers with the given runtime info from the controller.
|
||||
func (l *L7s) Sync(lbs []*L7RuntimeInfo) error {
|
||||
glog.V(3).Infof("Creating loadbalancers %+v", lbs)
|
||||
|
||||
// The default backend is completely managed by the l7 pool.
|
||||
// This includes recreating it if it's deleted, or fixing broken links.
|
||||
if err := l.defaultBackendPool.Sync([]int64{l.defaultBackendNodePort}); err != nil {
|
||||
return err
|
||||
}
|
||||
// create new loadbalancers, perform an edge hop for existing
|
||||
for _, ri := range lbs {
|
||||
if err := l.Add(ri); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Tear down the default backend when there are no more loadbalancers
|
||||
// because the cluster could go down anytime and we'd leak it otherwise.
|
||||
if len(lbs) == 0 {
|
||||
if err := l.defaultBackendPool.Delete(l.defaultBackendNodePort); err != nil {
|
||||
return err
|
||||
}
|
||||
l.glbcDefaultBackend = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GC garbage collects loadbalancers not in the input list.
|
||||
func (l *L7s) GC(names []string) error {
|
||||
knownLoadBalancers := sets.NewString()
|
||||
for _, n := range names {
|
||||
knownLoadBalancers.Insert(l.namer.LBName(n))
|
||||
}
|
||||
pool := l.snapshotter.Snapshot()
|
||||
|
||||
// Delete unknown loadbalancers
|
||||
for name := range pool {
|
||||
if knownLoadBalancers.Has(name) {
|
||||
continue
|
||||
}
|
||||
glog.V(3).Infof("GCing loadbalancer %v", name)
|
||||
if err := l.Delete(name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown logs whether or not the pool is empty.
|
||||
func (l *L7s) Shutdown() error {
|
||||
if err := l.GC([]string{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := l.defaultBackendPool.Shutdown(); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.Infof("Loadbalancer pool shutdown.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// TLSCerts encapsulates .pem encoded TLS information.
|
||||
type TLSCerts struct {
|
||||
// Key is private key.
|
||||
Key string
|
||||
// Cert is a public key.
|
||||
Cert string
|
||||
// Chain is a certificate chain.
|
||||
Chain string
|
||||
}
|
||||
|
||||
// L7RuntimeInfo is info passed to this module from the controller runtime.
|
||||
type L7RuntimeInfo struct {
|
||||
// Name is the name of a loadbalancer.
|
||||
Name string
|
||||
// IP is the desired ip of the loadbalancer, eg from a staticIP.
|
||||
IP string
|
||||
// TLS are the tls certs to use in termination.
|
||||
TLS *TLSCerts
|
||||
// AllowHTTP will not setup :80, if TLS is nil and AllowHTTP is set,
|
||||
// no loadbalancer is created.
|
||||
AllowHTTP bool
|
||||
}
|
||||
|
||||
// L7 represents a single L7 loadbalancer.
|
||||
type L7 struct {
|
||||
Name string
|
||||
// runtimeInfo is non-cloudprovider information passed from the controller.
|
||||
runtimeInfo *L7RuntimeInfo
|
||||
// cloud is an interface to manage loadbalancers in the GCE cloud.
|
||||
cloud LoadBalancers
|
||||
// um is the UrlMap associated with this L7.
|
||||
um *compute.UrlMap
|
||||
// tp is the TargetHTTPProxy associated with this L7.
|
||||
tp *compute.TargetHttpProxy
|
||||
// tps is the TargetHTTPSProxy associated with this L7.
|
||||
tps *compute.TargetHttpsProxy
|
||||
// fw is the GlobalForwardingRule that points to the TargetHTTPProxy.
|
||||
fw *compute.ForwardingRule
|
||||
// fws is the GlobalForwardingRule that points to the TargetHTTPSProxy.
|
||||
fws *compute.ForwardingRule
|
||||
// ip is the static-ip associated with both GlobalForwardingRules.
|
||||
ip *compute.Address
|
||||
// sslCert is the ssl cert associated with the targetHTTPSProxy.
|
||||
// TODO: Make this a custom type that contains crt+key
|
||||
sslCert *compute.SslCertificate
|
||||
// glbcDefaultBacked is the backend to use if no path rules match.
|
||||
// TODO: Expose this to users.
|
||||
glbcDefaultBackend *compute.BackendService
|
||||
// namer is used to compute names of the various sub-components of an L7.
|
||||
namer utils.Namer
|
||||
}
|
||||
|
||||
func (l *L7) checkUrlMap(backend *compute.BackendService) (err error) {
|
||||
if l.glbcDefaultBackend == nil {
|
||||
return fmt.Errorf("Cannot create urlmap without default backend.")
|
||||
}
|
||||
urlMapName := l.namer.Truncate(fmt.Sprintf("%v-%v", urlMapPrefix, l.Name))
|
||||
urlMap, _ := l.cloud.GetUrlMap(urlMapName)
|
||||
if urlMap != nil {
|
||||
glog.V(3).Infof("Url map %v already exists", urlMap.Name)
|
||||
l.um = urlMap
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.Infof("Creating url map %v for backend %v", urlMapName, l.glbcDefaultBackend.Name)
|
||||
urlMap, err = l.cloud.CreateUrlMap(l.glbcDefaultBackend, urlMapName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.um = urlMap
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *L7) checkProxy() (err error) {
|
||||
if l.um == nil {
|
||||
return fmt.Errorf("Cannot create proxy without urlmap.")
|
||||
}
|
||||
proxyName := l.namer.Truncate(fmt.Sprintf("%v-%v", targetProxyPrefix, l.Name))
|
||||
proxy, _ := l.cloud.GetTargetHttpProxy(proxyName)
|
||||
if proxy == nil {
|
||||
glog.Infof("Creating new http proxy for urlmap %v", l.um.Name)
|
||||
proxy, err = l.cloud.CreateTargetHttpProxy(l.um, proxyName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.tp = proxy
|
||||
return nil
|
||||
}
|
||||
if !utils.CompareLinks(proxy.UrlMap, l.um.SelfLink) {
|
||||
glog.Infof("Proxy %v has the wrong url map, setting %v overwriting %v",
|
||||
proxy.Name, l.um, proxy.UrlMap)
|
||||
if err := l.cloud.SetUrlMapForTargetHttpProxy(proxy, l.um); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.tp = proxy
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *L7) checkSSLCert() (err error) {
|
||||
// TODO: Currently, GCE only supports a single certificate per static IP
|
||||
// so we don't need to bother with disambiguation. Naming the cert after
|
||||
// the loadbalancer is a simplification.
|
||||
certName := l.namer.Truncate(fmt.Sprintf("%v-%v", sslCertPrefix, l.Name))
|
||||
cert, _ := l.cloud.GetSslCertificate(certName)
|
||||
if cert == nil {
|
||||
glog.Infof("Creating new sslCertificates %v for %v", l.Name, certName)
|
||||
cert, err = l.cloud.CreateSslCertificate(&compute.SslCertificate{
|
||||
Name: certName,
|
||||
Certificate: l.runtimeInfo.TLS.Cert,
|
||||
PrivateKey: l.runtimeInfo.TLS.Key,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.sslCert = cert
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *L7) checkHttpsProxy() (err error) {
|
||||
if l.sslCert == nil {
|
||||
glog.V(3).Infof("No SSL certificates for %v, will not create HTTPS proxy.", l.Name)
|
||||
return nil
|
||||
}
|
||||
if l.um == nil {
|
||||
return fmt.Errorf("No UrlMap for %v, will not create HTTPS proxy.", l.Name)
|
||||
}
|
||||
proxyName := l.namer.Truncate(fmt.Sprintf("%v-%v", targetHTTPSProxyPrefix, l.Name))
|
||||
proxy, _ := l.cloud.GetTargetHttpsProxy(proxyName)
|
||||
if proxy == nil {
|
||||
glog.Infof("Creating new https proxy for urlmap %v", l.um.Name)
|
||||
proxy, err = l.cloud.CreateTargetHttpsProxy(l.um, l.sslCert, proxyName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.tps = proxy
|
||||
return nil
|
||||
}
|
||||
if !utils.CompareLinks(proxy.UrlMap, l.um.SelfLink) {
|
||||
glog.Infof("Https proxy %v has the wrong url map, setting %v overwriting %v",
|
||||
proxy.Name, l.um, proxy.UrlMap)
|
||||
if err := l.cloud.SetUrlMapForTargetHttpsProxy(proxy, l.um); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
cert := proxy.SslCertificates[0]
|
||||
if !utils.CompareLinks(cert, l.sslCert.SelfLink) {
|
||||
glog.Infof("Https proxy %v has the wrong ssl certs, setting %v overwriting %v",
|
||||
proxy.Name, l.sslCert.SelfLink, cert)
|
||||
if err := l.cloud.SetSslCertificateForTargetHttpsProxy(proxy, l.sslCert); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
glog.V(3).Infof("Created target https proxy %v", proxy.Name)
|
||||
l.tps = proxy
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *L7) checkForwardingRule(name, proxyLink, ip, portRange string) (fw *compute.ForwardingRule, err error) {
|
||||
fw, _ = l.cloud.GetGlobalForwardingRule(name)
|
||||
if fw != nil && (ip != "" && fw.IPAddress != ip || fw.PortRange != portRange) {
|
||||
glog.Warningf("Recreating forwarding rule %v(%v), so it has %v(%v)",
|
||||
fw.IPAddress, fw.PortRange, ip, portRange)
|
||||
if err := l.cloud.DeleteGlobalForwardingRule(name); err != nil {
|
||||
if !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
fw = nil
|
||||
}
|
||||
if fw == nil {
|
||||
parts := strings.Split(proxyLink, "/")
|
||||
glog.Infof("Creating forwarding rule for proxy %v and ip %v:%v", parts[len(parts)-1:], ip, portRange)
|
||||
fw, err = l.cloud.CreateGlobalForwardingRule(proxyLink, ip, name, portRange)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// TODO: If the port range and protocol don't match, recreate the rule
|
||||
if utils.CompareLinks(fw.Target, proxyLink) {
|
||||
glog.V(3).Infof("Forwarding rule %v already exists", fw.Name)
|
||||
} else {
|
||||
glog.Infof("Forwarding rule %v has the wrong proxy, setting %v overwriting %v",
|
||||
fw.Name, fw.Target, proxyLink)
|
||||
if err := l.cloud.SetProxyForGlobalForwardingRule(fw, proxyLink); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return fw, nil
|
||||
}
|
||||
|
||||
func (l *L7) checkHttpForwardingRule() (err error) {
|
||||
if l.tp == nil {
|
||||
return fmt.Errorf("Cannot create forwarding rule without proxy.")
|
||||
}
|
||||
var address string
|
||||
if l.ip != nil {
|
||||
address = l.ip.Address
|
||||
}
|
||||
name := l.namer.Truncate(fmt.Sprintf("%v-%v", forwardingRulePrefix, l.Name))
|
||||
fw, err := l.checkForwardingRule(name, l.tp.SelfLink, address, httpDefaultPortRange)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.fw = fw
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *L7) checkHttpsForwardingRule() (err error) {
|
||||
if l.tps == nil {
|
||||
glog.V(3).Infof("No https target proxy for %v, not created https forwarding rule", l.Name)
|
||||
return nil
|
||||
}
|
||||
var address string
|
||||
if l.ip != nil {
|
||||
address = l.ip.Address
|
||||
}
|
||||
name := l.namer.Truncate(fmt.Sprintf("%v-%v", httpsForwardingRulePrefix, l.Name))
|
||||
fws, err := l.checkForwardingRule(name, l.tps.SelfLink, address, httpsDefaultPortRange)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.fws = fws
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *L7) checkStaticIP() (err error) {
|
||||
if l.fw == nil || l.fw.IPAddress == "" {
|
||||
return fmt.Errorf("Will not create static IP without a forwarding rule.")
|
||||
}
|
||||
staticIPName := l.namer.Truncate(fmt.Sprintf("%v-%v", forwardingRulePrefix, l.Name))
|
||||
ip, _ := l.cloud.GetGlobalStaticIP(staticIPName)
|
||||
if ip == nil {
|
||||
glog.Infof("Creating static ip %v", staticIPName)
|
||||
ip, err = l.cloud.ReserveGlobalStaticIP(staticIPName, l.fw.IPAddress)
|
||||
if err != nil {
|
||||
if utils.IsHTTPErrorCode(err, http.StatusConflict) ||
|
||||
utils.IsHTTPErrorCode(err, http.StatusBadRequest) {
|
||||
glog.V(3).Infof("IP %v(%v) is already reserved, assuming it is OK to use.",
|
||||
l.fw.IPAddress, staticIPName)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.ip = ip
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *L7) edgeHop() error {
|
||||
if err := l.checkUrlMap(l.glbcDefaultBackend); err != nil {
|
||||
return err
|
||||
}
|
||||
if l.runtimeInfo.AllowHTTP {
|
||||
if err := l.edgeHopHttp(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Defer promoting an emphemral to a static IP till it's really needed.
|
||||
if l.runtimeInfo.AllowHTTP && l.runtimeInfo.TLS != nil {
|
||||
if err := l.checkStaticIP(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if l.runtimeInfo.TLS != nil {
|
||||
if err := l.edgeHopHttps(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *L7) edgeHopHttp() error {
|
||||
if err := l.checkProxy(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := l.checkHttpForwardingRule(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *L7) edgeHopHttps() error {
|
||||
if err := l.checkSSLCert(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := l.checkHttpsProxy(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := l.checkHttpsForwardingRule(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetIP returns the ip associated with the forwarding rule for this l7.
|
||||
func (l *L7) GetIP() string {
|
||||
if l.fw != nil {
|
||||
return l.fw.IPAddress
|
||||
}
|
||||
if l.fws != nil {
|
||||
return l.fws.IPAddress
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// getNameForPathMatcher returns a name for a pathMatcher based on the given host rule.
|
||||
// The host rule can be a regex, the path matcher name used to associate the 2 cannot.
|
||||
func getNameForPathMatcher(hostRule string) string {
|
||||
hasher := md5.New()
|
||||
hasher.Write([]byte(hostRule))
|
||||
return fmt.Sprintf("%v%v", hostRulePrefix, hex.EncodeToString(hasher.Sum(nil)))
|
||||
}
|
||||
|
||||
// UpdateUrlMap translates the given hostname: endpoint->port mapping into a gce url map.
|
||||
//
|
||||
// HostRule: Conceptually contains all PathRules for a given host.
|
||||
// PathMatcher: Associates a path rule with a host rule. Mostly an optimization.
|
||||
// PathRule: Maps a single path regex to a backend.
|
||||
//
|
||||
// The GCE url map allows multiple hosts to share url->backend mappings without duplication, eg:
|
||||
// Host: foo(PathMatcher1), bar(PathMatcher1,2)
|
||||
// PathMatcher1:
|
||||
// /a -> b1
|
||||
// /b -> b2
|
||||
// PathMatcher2:
|
||||
// /c -> b1
|
||||
// This leads to a lot of complexity in the common case, where all we want is a mapping of
|
||||
// host->{/path: backend}.
|
||||
//
|
||||
// Consider some alternatives:
|
||||
// 1. Using a single backend per PathMatcher:
|
||||
// Host: foo(PathMatcher1,3) bar(PathMatcher1,2,3)
|
||||
// PathMatcher1:
|
||||
// /a -> b1
|
||||
// PathMatcher2:
|
||||
// /c -> b1
|
||||
// PathMatcher3:
|
||||
// /b -> b2
|
||||
// 2. Using a single host per PathMatcher:
|
||||
// Host: foo(PathMatcher1)
|
||||
// PathMatcher1:
|
||||
// /a -> b1
|
||||
// /b -> b2
|
||||
// Host: bar(PathMatcher2)
|
||||
// PathMatcher2:
|
||||
// /a -> b1
|
||||
// /b -> b2
|
||||
// /c -> b1
|
||||
// In the context of kubernetes services, 2 makes more sense, because we
|
||||
// rarely want to lookup backends (service:nodeport). When a service is
|
||||
// deleted, we need to find all host PathMatchers that have the backend
|
||||
// and remove the mapping. When a new path is added to a host (happens
|
||||
// more frequently than service deletion) we just need to lookup the 1
|
||||
// pathmatcher of the host.
|
||||
func (l *L7) UpdateUrlMap(ingressRules utils.GCEURLMap) error {
|
||||
if l.um == nil {
|
||||
return fmt.Errorf("Cannot add url without an urlmap.")
|
||||
}
|
||||
glog.V(3).Infof("Updating urlmap for l7 %v", l.Name)
|
||||
|
||||
// All UrlMaps must have a default backend. If the Ingress has a default
|
||||
// backend, it applies to all host rules as well as to the urlmap itself.
|
||||
// If it doesn't the urlmap might have a stale default, so replace it with
|
||||
// glbc's default backend.
|
||||
defaultBackend := ingressRules.GetDefaultBackend()
|
||||
if defaultBackend != nil {
|
||||
l.um.DefaultService = defaultBackend.SelfLink
|
||||
} else {
|
||||
l.um.DefaultService = l.glbcDefaultBackend.SelfLink
|
||||
}
|
||||
glog.V(3).Infof("Updating url map %+v", ingressRules)
|
||||
|
||||
for hostname, urlToBackend := range ingressRules {
|
||||
// Find the hostrule
|
||||
// Find the path matcher
|
||||
// Add all given endpoint:backends to pathRules in path matcher
|
||||
var hostRule *compute.HostRule
|
||||
pmName := getNameForPathMatcher(hostname)
|
||||
for _, hr := range l.um.HostRules {
|
||||
// TODO: Hostnames must be exact match?
|
||||
if hr.Hosts[0] == hostname {
|
||||
hostRule = hr
|
||||
break
|
||||
}
|
||||
}
|
||||
if hostRule == nil {
|
||||
// This is a new host
|
||||
hostRule = &compute.HostRule{
|
||||
Hosts: []string{hostname},
|
||||
PathMatcher: pmName,
|
||||
}
|
||||
// Why not just clobber existing host rules?
|
||||
// Because we can have multiple loadbalancers point to a single
|
||||
// gce url map when we have IngressClaims.
|
||||
l.um.HostRules = append(l.um.HostRules, hostRule)
|
||||
}
|
||||
var pathMatcher *compute.PathMatcher
|
||||
for _, pm := range l.um.PathMatchers {
|
||||
if pm.Name == hostRule.PathMatcher {
|
||||
pathMatcher = pm
|
||||
break
|
||||
}
|
||||
}
|
||||
if pathMatcher == nil {
|
||||
// This is a dangling or new host
|
||||
pathMatcher = &compute.PathMatcher{Name: pmName}
|
||||
l.um.PathMatchers = append(l.um.PathMatchers, pathMatcher)
|
||||
}
|
||||
pathMatcher.DefaultService = l.um.DefaultService
|
||||
|
||||
// TODO: Every update replaces the entire path map. This will need to
|
||||
// change when we allow joining. Right now we call a single method
|
||||
// to verify current == desired and add new url mappings.
|
||||
pathMatcher.PathRules = []*compute.PathRule{}
|
||||
|
||||
// Longest prefix wins. For equal rules, first hit wins, i.e the second
|
||||
// /foo rule when the first is deleted.
|
||||
for expr, be := range urlToBackend {
|
||||
pathMatcher.PathRules = append(
|
||||
pathMatcher.PathRules, &compute.PathRule{Paths: []string{expr}, Service: be.SelfLink})
|
||||
}
|
||||
}
|
||||
um, err := l.cloud.UpdateUrlMap(l.um)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.um = um
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup deletes resources specific to this l7 in the right order.
|
||||
// forwarding rule -> target proxy -> url map
|
||||
// This leaves backends and health checks, which are shared across loadbalancers.
|
||||
func (l *L7) Cleanup() error {
|
||||
if l.fw != nil {
|
||||
glog.Infof("Deleting global forwarding rule %v", l.fw.Name)
|
||||
if err := l.cloud.DeleteGlobalForwardingRule(l.fw.Name); err != nil {
|
||||
if !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.fw = nil
|
||||
}
|
||||
if l.fws != nil {
|
||||
glog.Infof("Deleting global forwarding rule %v", l.fws.Name)
|
||||
if err := l.cloud.DeleteGlobalForwardingRule(l.fws.Name); err != nil {
|
||||
if !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return err
|
||||
}
|
||||
l.fws = nil
|
||||
}
|
||||
}
|
||||
if l.ip != nil {
|
||||
glog.Infof("Deleting static IP %v(%v)", l.ip.Name, l.ip.Address)
|
||||
if err := l.cloud.DeleteGlobalStaticIP(l.ip.Name); err != nil {
|
||||
if !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return err
|
||||
}
|
||||
l.ip = nil
|
||||
}
|
||||
}
|
||||
if l.tps != nil {
|
||||
glog.Infof("Deleting target https proxy %v", l.tps.Name)
|
||||
if err := l.cloud.DeleteTargetHttpsProxy(l.tps.Name); err != nil {
|
||||
if !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.tps = nil
|
||||
}
|
||||
if l.sslCert != nil {
|
||||
glog.Infof("Deleting sslcert %v", l.sslCert.Name)
|
||||
if err := l.cloud.DeleteSslCertificate(l.sslCert.Name); err != nil {
|
||||
if !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.sslCert = nil
|
||||
}
|
||||
if l.tp != nil {
|
||||
glog.Infof("Deleting target http proxy %v", l.tp.Name)
|
||||
if err := l.cloud.DeleteTargetHttpProxy(l.tp.Name); err != nil {
|
||||
if !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.tp = nil
|
||||
}
|
||||
if l.um != nil {
|
||||
glog.Infof("Deleting url map %v", l.um.Name)
|
||||
if err := l.cloud.DeleteUrlMap(l.um.Name); err != nil {
|
||||
if !utils.IsHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.um = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBackendNames returns the names of backends in this L7 urlmap.
|
||||
func (l *L7) getBackendNames() []string {
|
||||
if l.um == nil {
|
||||
return []string{}
|
||||
}
|
||||
beNames := sets.NewString()
|
||||
for _, pathMatcher := range l.um.PathMatchers {
|
||||
for _, pathRule := range pathMatcher.PathRules {
|
||||
// This is gross, but the urlmap only has links to backend services.
|
||||
parts := strings.Split(pathRule.Service, "/")
|
||||
name := parts[len(parts)-1]
|
||||
if name != "" {
|
||||
beNames.Insert(name)
|
||||
}
|
||||
}
|
||||
}
|
||||
// The default Service recorded in the urlMap is a link to the backend.
|
||||
// Note that this can either be user specified, or the L7 controller's
|
||||
// global default.
|
||||
parts := strings.Split(l.um.DefaultService, "/")
|
||||
defaultBackendName := parts[len(parts)-1]
|
||||
if defaultBackendName != "" {
|
||||
beNames.Insert(defaultBackendName)
|
||||
}
|
||||
return beNames.List()
|
||||
}
|
||||
|
||||
// GetLBAnnotations returns the annotations of an l7. This includes it's current status.
|
||||
func GetLBAnnotations(l7 *L7, existing map[string]string, backendPool backends.BackendPool) map[string]string {
|
||||
if existing == nil {
|
||||
existing = map[string]string{}
|
||||
}
|
||||
backends := l7.getBackendNames()
|
||||
backendState := map[string]string{}
|
||||
for _, beName := range backends {
|
||||
backendState[beName] = backendPool.Status(beName)
|
||||
}
|
||||
jsonBackendState := "Unknown"
|
||||
b, err := json.Marshal(backendState)
|
||||
if err == nil {
|
||||
jsonBackendState = string(b)
|
||||
}
|
||||
existing[fmt.Sprintf("%v/url-map", utils.K8sAnnotationPrefix)] = l7.um.Name
|
||||
// Forwarding rule and target proxy might not exist if allowHTTP == false
|
||||
if l7.fw != nil {
|
||||
existing[fmt.Sprintf("%v/forwarding-rule", utils.K8sAnnotationPrefix)] = l7.fw.Name
|
||||
}
|
||||
if l7.tp != nil {
|
||||
existing[fmt.Sprintf("%v/target-proxy", utils.K8sAnnotationPrefix)] = l7.tp.Name
|
||||
}
|
||||
// HTTPs resources might not exist if TLS == nil
|
||||
if l7.fws != nil {
|
||||
existing[fmt.Sprintf("%v/https-forwarding-rule", utils.K8sAnnotationPrefix)] = l7.fws.Name
|
||||
}
|
||||
if l7.tps != nil {
|
||||
existing[fmt.Sprintf("%v/https-target-proxy", utils.K8sAnnotationPrefix)] = l7.tps.Name
|
||||
}
|
||||
if l7.ip != nil {
|
||||
existing[fmt.Sprintf("%v/static-ip", utils.K8sAnnotationPrefix)] = l7.ip.Name
|
||||
}
|
||||
// TODO: We really want to know *when* a backend flipped states.
|
||||
existing[fmt.Sprintf("%v/backends", utils.K8sAnnotationPrefix)] = jsonBackendState
|
||||
return existing
|
||||
}
|
||||
189
controllers/gce/loadbalancers/loadbalancers_test.go
Normal file
189
controllers/gce/loadbalancers/loadbalancers_test.go
Normal file
|
|
@ -0,0 +1,189 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loadbalancers
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/contrib/ingress/controllers/gce/backends"
|
||||
"k8s.io/contrib/ingress/controllers/gce/healthchecks"
|
||||
"k8s.io/contrib/ingress/controllers/gce/instances"
|
||||
"k8s.io/contrib/ingress/controllers/gce/utils"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
const (
|
||||
testDefaultBeNodePort = int64(3000)
|
||||
defaultZone = "default-zone"
|
||||
)
|
||||
|
||||
func newFakeLoadBalancerPool(f LoadBalancers, t *testing.T) LoadBalancerPool {
|
||||
fakeBackends := backends.NewFakeBackendServices()
|
||||
fakeIGs := instances.NewFakeInstanceGroups(sets.NewString())
|
||||
fakeHCs := healthchecks.NewFakeHealthChecks()
|
||||
namer := utils.Namer{}
|
||||
healthChecker := healthchecks.NewHealthChecker(fakeHCs, "/", namer)
|
||||
backendPool := backends.NewBackendPool(
|
||||
fakeBackends, healthChecker, instances.NewNodePool(fakeIGs, defaultZone), namer)
|
||||
return NewLoadBalancerPool(f, backendPool, testDefaultBeNodePort, namer)
|
||||
}
|
||||
|
||||
func TestCreateHTTPLoadBalancer(t *testing.T) {
|
||||
// This should NOT create the forwarding rule and target proxy
|
||||
// associated with the HTTPS branch of this loadbalancer.
|
||||
lbInfo := &L7RuntimeInfo{Name: "test", AllowHTTP: true}
|
||||
f := NewFakeLoadBalancers(lbInfo.Name)
|
||||
pool := newFakeLoadBalancerPool(f, t)
|
||||
pool.Add(lbInfo)
|
||||
l7, err := pool.Get(lbInfo.Name)
|
||||
if err != nil || l7 == nil {
|
||||
t.Fatalf("Expected l7 not created")
|
||||
}
|
||||
um, err := f.GetUrlMap(f.umName())
|
||||
if err != nil ||
|
||||
um.DefaultService != pool.(*L7s).glbcDefaultBackend.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
tp, err := f.GetTargetHttpProxy(f.tpName(false))
|
||||
if err != nil || tp.UrlMap != um.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
fw, err := f.GetGlobalForwardingRule(f.fwName(false))
|
||||
if err != nil || fw.Target != tp.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateHTTPSLoadBalancer(t *testing.T) {
|
||||
// This should NOT create the forwarding rule and target proxy
|
||||
// associated with the HTTP branch of this loadbalancer.
|
||||
lbInfo := &L7RuntimeInfo{
|
||||
Name: "test",
|
||||
AllowHTTP: false,
|
||||
TLS: &TLSCerts{Key: "key", Cert: "cert"},
|
||||
}
|
||||
f := NewFakeLoadBalancers(lbInfo.Name)
|
||||
pool := newFakeLoadBalancerPool(f, t)
|
||||
pool.Add(lbInfo)
|
||||
l7, err := pool.Get(lbInfo.Name)
|
||||
if err != nil || l7 == nil {
|
||||
t.Fatalf("Expected l7 not created")
|
||||
}
|
||||
um, err := f.GetUrlMap(f.umName())
|
||||
if err != nil ||
|
||||
um.DefaultService != pool.(*L7s).glbcDefaultBackend.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
tps, err := f.GetTargetHttpsProxy(f.tpName(true))
|
||||
if err != nil || tps.UrlMap != um.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
fws, err := f.GetGlobalForwardingRule(f.fwName(true))
|
||||
if err != nil || fws.Target != tps.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateBothLoadBalancers(t *testing.T) {
|
||||
// This should create 2 forwarding rules and target proxies
|
||||
// but they should use the same urlmap, and have the same
|
||||
// static ip.
|
||||
lbInfo := &L7RuntimeInfo{
|
||||
Name: "test",
|
||||
AllowHTTP: true,
|
||||
TLS: &TLSCerts{Key: "key", Cert: "cert"},
|
||||
}
|
||||
f := NewFakeLoadBalancers(lbInfo.Name)
|
||||
pool := newFakeLoadBalancerPool(f, t)
|
||||
pool.Add(lbInfo)
|
||||
l7, err := pool.Get(lbInfo.Name)
|
||||
if err != nil || l7 == nil {
|
||||
t.Fatalf("Expected l7 not created")
|
||||
}
|
||||
um, err := f.GetUrlMap(f.umName())
|
||||
if err != nil ||
|
||||
um.DefaultService != pool.(*L7s).glbcDefaultBackend.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
tps, err := f.GetTargetHttpsProxy(f.tpName(true))
|
||||
if err != nil || tps.UrlMap != um.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
tp, err := f.GetTargetHttpProxy(f.tpName(false))
|
||||
if err != nil || tp.UrlMap != um.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
fws, err := f.GetGlobalForwardingRule(f.fwName(true))
|
||||
if err != nil || fws.Target != tps.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
fw, err := f.GetGlobalForwardingRule(f.fwName(false))
|
||||
if err != nil || fw.Target != tp.SelfLink {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
ip, err := f.GetGlobalStaticIP(f.fwName(false))
|
||||
if err != nil || ip.Address != fw.IPAddress || ip.Address != fws.IPAddress {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateUrlMap(t *testing.T) {
|
||||
um1 := utils.GCEURLMap{
|
||||
"bar.example.com": {
|
||||
"/bar2": &compute.BackendService{SelfLink: "bar2svc"},
|
||||
},
|
||||
}
|
||||
um2 := utils.GCEURLMap{
|
||||
"foo.example.com": {
|
||||
"/foo1": &compute.BackendService{SelfLink: "foo1svc"},
|
||||
"/foo2": &compute.BackendService{SelfLink: "foo2svc"},
|
||||
},
|
||||
"bar.example.com": {
|
||||
"/bar1": &compute.BackendService{SelfLink: "bar1svc"},
|
||||
},
|
||||
}
|
||||
um2.PutDefaultBackend(&compute.BackendService{SelfLink: "default"})
|
||||
|
||||
lbInfo := &L7RuntimeInfo{Name: "test", AllowHTTP: true}
|
||||
f := NewFakeLoadBalancers(lbInfo.Name)
|
||||
pool := newFakeLoadBalancerPool(f, t)
|
||||
pool.Add(lbInfo)
|
||||
l7, err := pool.Get(lbInfo.Name)
|
||||
if err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
for _, ir := range []utils.GCEURLMap{um1, um2} {
|
||||
if err := l7.UpdateUrlMap(ir); err != nil {
|
||||
t.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
// The final map doesn't contain /bar2
|
||||
expectedMap := map[string]utils.FakeIngressRuleValueMap{
|
||||
utils.DefaultBackendKey: {
|
||||
utils.DefaultBackendKey: "default",
|
||||
},
|
||||
"foo.example.com": {
|
||||
"/foo1": "foo1svc",
|
||||
"/foo2": "foo2svc",
|
||||
},
|
||||
"bar.example.com": {
|
||||
"/bar1": "bar1svc",
|
||||
},
|
||||
}
|
||||
f.CheckURLMap(t, l7, expectedMap)
|
||||
}
|
||||
245
controllers/gce/main.go
Normal file
245
controllers/gce/main.go
Normal file
|
|
@ -0,0 +1,245 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
go_flag "flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
flag "github.com/spf13/pflag"
|
||||
"k8s.io/contrib/ingress/controllers/gce/controller"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
kubectl_util "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Entrypoint of GLBC. Example invocation:
|
||||
// 1. In a pod:
|
||||
// glbc --delete-all-on-quit
|
||||
// 2. Dry run (on localhost):
|
||||
// $ kubectl proxy --api-prefix="/"
|
||||
// $ glbc --proxy="http://localhost:proxyport"
|
||||
|
||||
const (
|
||||
// lbApiPort is the port on which the loadbalancer controller serves a
|
||||
// minimal api (/healthz, /delete-all-and-quit etc).
|
||||
lbApiPort = 8081
|
||||
|
||||
// A delimiter used for clarity in naming GCE resources.
|
||||
clusterNameDelimiter = "--"
|
||||
|
||||
// Arbitrarily chosen alphanumeric character to use in constructing resource
|
||||
// names, eg: to avoid cases where we end up with a name ending in '-'.
|
||||
alphaNumericChar = "0"
|
||||
|
||||
// Current docker image version. Only used in debug logging.
|
||||
imageVersion = "glbc:0.6.0"
|
||||
)
|
||||
|
||||
var (
|
||||
flags = flag.NewFlagSet(
|
||||
`gclb: gclb --runngin-in-cluster=false --default-backend-node-port=123`,
|
||||
flag.ExitOnError)
|
||||
|
||||
proxyUrl = flags.String("proxy", "",
|
||||
`If specified, the controller assumes a kubctl proxy server is running on the
|
||||
given url and creates a proxy client and fake cluster manager. Results are
|
||||
printed to stdout and no changes are made to your cluster. This flag is for
|
||||
testing.`)
|
||||
|
||||
clusterName = flags.String("cluster-uid", controller.DefaultClusterUID,
|
||||
`Optional, used to tag cluster wide, shared loadbalancer resources such
|
||||
as instance groups. Use this flag if you'd like to continue using the
|
||||
same resources across a pod restart. Note that this does not need to
|
||||
match the name of you Kubernetes cluster, it's just an arbitrary name
|
||||
used to tag/lookup cloud resources.`)
|
||||
|
||||
inCluster = flags.Bool("running-in-cluster", true,
|
||||
`Optional, if this controller is running in a kubernetes cluster, use the
|
||||
pod secrets for creating a Kubernetes client.`)
|
||||
|
||||
resyncPeriod = flags.Duration("sync-period", 30*time.Second,
|
||||
`Relist and confirm cloud resources this often.`)
|
||||
|
||||
deleteAllOnQuit = flags.Bool("delete-all-on-quit", false,
|
||||
`If true, the controller will delete all Ingress and the associated
|
||||
external cloud resources as it's shutting down. Mostly used for
|
||||
testing. In normal environments the controller should only delete
|
||||
a loadbalancer if the associated Ingress is deleted.`)
|
||||
|
||||
defaultSvc = flags.String("default-backend-service", "kube-system/default-http-backend",
|
||||
`Service used to serve a 404 page for the default backend. Takes the form
|
||||
namespace/name. The controller uses the first node port of this Service for
|
||||
the default backend.`)
|
||||
|
||||
healthCheckPath = flags.String("health-check-path", "/",
|
||||
`Path used to health-check a backend service. All Services must serve
|
||||
a 200 page on this path. Currently this is only configurable globally.`)
|
||||
|
||||
watchNamespace = flags.String("watch-namespace", api.NamespaceAll,
|
||||
`Namespace to watch for Ingress/Services/Endpoints.`)
|
||||
|
||||
verbose = flags.Bool("verbose", false,
|
||||
`If true, logs are displayed at V(4), otherwise V(2).`)
|
||||
)
|
||||
|
||||
func registerHandlers(lbc *controller.LoadBalancerController) {
|
||||
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||
if err := lbc.CloudClusterManager.IsHealthy(); err != nil {
|
||||
w.WriteHeader(500)
|
||||
w.Write([]byte(fmt.Sprintf("Cluster unhealthy: %v", err)))
|
||||
return
|
||||
}
|
||||
w.WriteHeader(200)
|
||||
w.Write([]byte("ok"))
|
||||
})
|
||||
http.HandleFunc("/delete-all-and-quit", func(w http.ResponseWriter, r *http.Request) {
|
||||
// TODO: Retry failures during shutdown.
|
||||
lbc.Stop(true)
|
||||
})
|
||||
|
||||
glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%v", lbApiPort), nil))
|
||||
}
|
||||
|
||||
func handleSigterm(lbc *controller.LoadBalancerController, deleteAll bool) {
|
||||
// Multiple SIGTERMs will get dropped
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, syscall.SIGTERM)
|
||||
<-signalChan
|
||||
glog.Infof("Received SIGTERM, shutting down")
|
||||
|
||||
// TODO: Better retires than relying on restartPolicy.
|
||||
exitCode := 0
|
||||
if err := lbc.Stop(deleteAll); err != nil {
|
||||
glog.Infof("Error during shutdown %v", err)
|
||||
exitCode = 1
|
||||
}
|
||||
glog.Infof("Exiting with %v", exitCode)
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
// main function for GLBC.
|
||||
func main() {
|
||||
// TODO: Add a healthz endpoint
|
||||
var kubeClient *client.Client
|
||||
var err error
|
||||
var clusterManager *controller.ClusterManager
|
||||
flags.Parse(os.Args)
|
||||
clientConfig := kubectl_util.DefaultClientConfig(flags)
|
||||
|
||||
// Set glog verbosity levels
|
||||
if *verbose {
|
||||
go_flag.Lookup("logtostderr").Value.Set("true")
|
||||
go_flag.Set("v", "4")
|
||||
}
|
||||
glog.Infof("Starting GLBC image: %v", imageVersion)
|
||||
if *defaultSvc == "" {
|
||||
glog.Fatalf("Please specify --default-backend")
|
||||
}
|
||||
|
||||
if *proxyUrl != "" {
|
||||
// Create proxy kubeclient
|
||||
kubeClient = client.NewOrDie(&client.Config{
|
||||
Host: *proxyUrl,
|
||||
ContentConfig: client.ContentConfig{GroupVersion: &unversioned.GroupVersion{Version: "v1"}},
|
||||
})
|
||||
} else {
|
||||
// Create kubeclient
|
||||
if *inCluster {
|
||||
if kubeClient, err = client.NewInCluster(); err != nil {
|
||||
glog.Fatalf("Failed to create client: %v.", err)
|
||||
}
|
||||
} else {
|
||||
config, err := clientConfig.ClientConfig()
|
||||
if err != nil {
|
||||
glog.Fatalf("error connecting to the client: %v", err)
|
||||
}
|
||||
kubeClient, err = client.New(config)
|
||||
}
|
||||
}
|
||||
// Wait for the default backend Service. There's no pretty way to do this.
|
||||
parts := strings.Split(*defaultSvc, "/")
|
||||
if len(parts) != 2 {
|
||||
glog.Fatalf("Default backend should take the form namespace/name: %v",
|
||||
*defaultSvc)
|
||||
}
|
||||
defaultBackendNodePort, err := getNodePort(kubeClient, parts[0], parts[1])
|
||||
if err != nil {
|
||||
glog.Fatalf("Could not configure default backend %v: %v",
|
||||
*defaultSvc, err)
|
||||
}
|
||||
|
||||
if *proxyUrl == "" && *inCluster {
|
||||
// Create cluster manager
|
||||
clusterManager, err = controller.NewClusterManager(
|
||||
*clusterName, defaultBackendNodePort, *healthCheckPath)
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
} else {
|
||||
// Create fake cluster manager
|
||||
clusterManager = controller.NewFakeClusterManager(*clusterName).ClusterManager
|
||||
}
|
||||
|
||||
// Start loadbalancer controller
|
||||
lbc, err := controller.NewLoadBalancerController(kubeClient, clusterManager, *resyncPeriod, *watchNamespace)
|
||||
if err != nil {
|
||||
glog.Fatalf("%v", err)
|
||||
}
|
||||
if clusterManager.ClusterNamer.ClusterName != "" {
|
||||
glog.V(3).Infof("Cluster name %+v", clusterManager.ClusterNamer.ClusterName)
|
||||
}
|
||||
go registerHandlers(lbc)
|
||||
go handleSigterm(lbc, *deleteAllOnQuit)
|
||||
|
||||
lbc.Run()
|
||||
for {
|
||||
glog.Infof("Handled quit, awaiting pod deletion.")
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// getNodePort waits for the Service, and returns it's first node port.
|
||||
func getNodePort(client *client.Client, ns, name string) (nodePort int64, err error) {
|
||||
var svc *api.Service
|
||||
glog.V(3).Infof("Waiting for %v/%v", ns, name)
|
||||
wait.Poll(1*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
svc, err = client.Services(ns).Get(name)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
for _, p := range svc.Spec.Ports {
|
||||
if p.NodePort != 0 {
|
||||
nodePort = int64(p.NodePort)
|
||||
glog.V(3).Infof("Node port %v", nodePort)
|
||||
break
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return
|
||||
}
|
||||
82
controllers/gce/rc.yaml
Normal file
82
controllers/gce/rc.yaml
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
# This must match the --default-backend-service argument of the l7 lb
|
||||
# controller and is required because GCE mandates a default backend.
|
||||
name: default-http-backend
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
spec:
|
||||
# The default backend must be of type NodePort.
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
k8s-app: glbc
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: l7-lb-controller
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
version: v0.5.2
|
||||
spec:
|
||||
# There should never be more than 1 controller alive simultaneously.
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: glbc
|
||||
version: v0.5.2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: glbc
|
||||
version: v0.5.2
|
||||
name: glbc
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 600
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
# Any image is permissable as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: gcr.io/google_containers/defaultbackend:1.0
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- image: gcr.io/google_containers/glbc:0.5.2
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
name: l7-lb-controller
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
args:
|
||||
- --default-backend-service=default/default-http-backend
|
||||
- --sync-period=300s
|
||||
30
controllers/gce/storage/doc.go
Normal file
30
controllers/gce/storage/doc.go
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Storage backends used by the Ingress controller.
|
||||
// Ingress controllers require their own storage for the following reasons:
|
||||
// 1. There is only so much information we can pack into 64 chars allowed
|
||||
// by GCE for resource names.
|
||||
// 2. An Ingress controller cannot assume total control over a project, in
|
||||
// fact in a majority of cases (ubernetes, tests, multiple gke clusters in
|
||||
// same project) there *will* be multiple controllers in a project.
|
||||
// 3. If the Ingress controller pod is killed, an Ingress is deleted while
|
||||
// the pod is down, and then the controller is re-scheduled on another node,
|
||||
// it will leak resources. Note that this will happen today because
|
||||
// the only implemented storage backend is InMemoryPool.
|
||||
// 4. Listing from cloudproviders is really slow.
|
||||
|
||||
package storage
|
||||
53
controllers/gce/storage/pools.go
Normal file
53
controllers/gce/storage/pools.go
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
)
|
||||
|
||||
// Snapshotter is an interface capable of providing a consistent snapshot of
|
||||
// the underlying storage implementation of a pool. It does not guarantee
|
||||
// thread safety of snapshots, so they should be treated as read only unless
|
||||
// the implementation specifies otherwise.
|
||||
type Snapshotter interface {
|
||||
Snapshot() map[string]interface{}
|
||||
cache.ThreadSafeStore
|
||||
}
|
||||
|
||||
// InMemoryPool is used as a cache for cluster resource pools.
|
||||
type InMemoryPool struct {
|
||||
cache.ThreadSafeStore
|
||||
}
|
||||
|
||||
// Snapshot returns a read only copy of the k:v pairs in the store.
|
||||
// Caller beware: Violates traditional snapshot guarantees.
|
||||
func (p *InMemoryPool) Snapshot() map[string]interface{} {
|
||||
snap := map[string]interface{}{}
|
||||
for _, key := range p.ListKeys() {
|
||||
if item, ok := p.Get(key); ok {
|
||||
snap[key] = item
|
||||
}
|
||||
}
|
||||
return snap
|
||||
}
|
||||
|
||||
// NewInMemoryPool creates an InMemoryPool.
|
||||
func NewInMemoryPool() *InMemoryPool {
|
||||
return &InMemoryPool{
|
||||
cache.NewThreadSafeStore(cache.Indexers{}, cache.Indices{})}
|
||||
}
|
||||
21
controllers/gce/utils/doc.go
Normal file
21
controllers/gce/utils/doc.go
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// utils contains odd structs, constants etc that don't fit cleanly into any
|
||||
// sub-module because they're shared. Ideally this module wouldn't exist, but
|
||||
// sharing these odd bits reduces margin for error.
|
||||
|
||||
package utils
|
||||
178
controllers/gce/utils/utils.go
Normal file
178
controllers/gce/utils/utils.go
Normal file
|
|
@ -0,0 +1,178 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const (
|
||||
// Add used to record additions in a sync pool.
|
||||
Add = iota
|
||||
// Remove used to record removals from a sync pool.
|
||||
Remove
|
||||
// Sync used to record syncs of a sync pool.
|
||||
Sync
|
||||
// Get used to record Get from a sync pool.
|
||||
Get
|
||||
// Create used to recrod creations in a sync pool.
|
||||
Create
|
||||
// Update used to record updates in a sync pool.
|
||||
Update
|
||||
// Delete used to record deltions from a sync pool.
|
||||
Delete
|
||||
// AddInstances used to record a call to AddInstances.
|
||||
AddInstances
|
||||
// RemoveInstances used to record a call to RemoveInstances.
|
||||
RemoveInstances
|
||||
|
||||
// This allows sharing of backends across loadbalancers.
|
||||
backendPrefix = "k8s-be"
|
||||
|
||||
// Prefix used for instance groups involved in L7 balancing.
|
||||
igPrefix = "k8s-ig"
|
||||
|
||||
// A delimiter used for clarity in naming GCE resources.
|
||||
clusterNameDelimiter = "--"
|
||||
|
||||
// Arbitrarily chosen alphanumeric character to use in constructing resource
|
||||
// names, eg: to avoid cases where we end up with a name ending in '-'.
|
||||
alphaNumericChar = "0"
|
||||
|
||||
// Names longer than this are truncated, because of GCE restrictions.
|
||||
nameLenLimit = 62
|
||||
|
||||
// DefaultBackendKey is the key used to transmit the defaultBackend through
|
||||
// a urlmap. It's not a valid subdomain, and it is a catch all path.
|
||||
// TODO: Find a better way to transmit this, once we've decided on default
|
||||
// backend semantics (i.e do we want a default per host, per lb etc).
|
||||
DefaultBackendKey = "DefaultBackend"
|
||||
|
||||
// K8sAnnotationPrefix is the prefix used in annotations used to record
|
||||
// debug information in the Ingress annotations.
|
||||
K8sAnnotationPrefix = "ingress.kubernetes.io"
|
||||
)
|
||||
|
||||
// Namer handles centralized naming for the cluster.
|
||||
type Namer struct {
|
||||
ClusterName string
|
||||
}
|
||||
|
||||
// Truncate truncates the given key to a GCE length limit.
|
||||
func (n *Namer) Truncate(key string) string {
|
||||
if len(key) > nameLenLimit {
|
||||
// GCE requires names to end with an albhanumeric, but allows characters
|
||||
// like '-', so make sure the trucated name ends legally.
|
||||
return fmt.Sprintf("%v%v", key[:nameLenLimit], alphaNumericChar)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func (n *Namer) decorateName(name string) string {
|
||||
if n.ClusterName == "" {
|
||||
return name
|
||||
}
|
||||
return n.Truncate(fmt.Sprintf("%v%v%v", name, clusterNameDelimiter, n.ClusterName))
|
||||
}
|
||||
|
||||
// BeName constructs the name for a backend.
|
||||
func (n *Namer) BeName(port int64) string {
|
||||
return n.decorateName(fmt.Sprintf("%v-%d", backendPrefix, port))
|
||||
}
|
||||
|
||||
// IGName constructs the name for an Instance Group.
|
||||
func (n *Namer) IGName() string {
|
||||
// Currently all ports are added to a single instance group.
|
||||
return n.decorateName(igPrefix)
|
||||
}
|
||||
|
||||
// LBName constructs a loadbalancer name from the given key. The key is usually
|
||||
// the namespace/name of a Kubernetes Ingress.
|
||||
func (n *Namer) LBName(key string) string {
|
||||
// TODO: Pipe the clusterName through, for now it saves code churn to just
|
||||
// grab it globally, especially since we haven't decided how to handle
|
||||
// namespace conflicts in the Ubernetes context.
|
||||
parts := strings.Split(key, clusterNameDelimiter)
|
||||
scrubbedName := strings.Replace(key, "/", "-", -1)
|
||||
if n.ClusterName == "" || parts[len(parts)-1] == n.ClusterName {
|
||||
return scrubbedName
|
||||
}
|
||||
return n.Truncate(fmt.Sprintf("%v%v%v", scrubbedName, clusterNameDelimiter, n.ClusterName))
|
||||
}
|
||||
|
||||
// GCEURLMap is a nested map of hostname->path regex->backend
|
||||
type GCEURLMap map[string]map[string]*compute.BackendService
|
||||
|
||||
// GetDefaultBackend performs a destructive read and returns the default
|
||||
// backend of the urlmap.
|
||||
func (g GCEURLMap) GetDefaultBackend() *compute.BackendService {
|
||||
var d *compute.BackendService
|
||||
var exists bool
|
||||
if h, ok := g[DefaultBackendKey]; ok {
|
||||
if d, exists = h[DefaultBackendKey]; exists {
|
||||
delete(h, DefaultBackendKey)
|
||||
}
|
||||
delete(g, DefaultBackendKey)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// String implements the string interface for the GCEURLMap.
|
||||
func (g GCEURLMap) String() string {
|
||||
msg := ""
|
||||
for host, um := range g {
|
||||
msg += fmt.Sprintf("%v\n", host)
|
||||
for url, be := range um {
|
||||
msg += fmt.Sprintf("\t%v: ", url)
|
||||
if be == nil {
|
||||
msg += fmt.Sprintf("No backend\n")
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v\n", be.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// PutDefaultBackend performs a destructive write replacing the
|
||||
// default backend of the url map with the given backend.
|
||||
func (g GCEURLMap) PutDefaultBackend(d *compute.BackendService) {
|
||||
g[DefaultBackendKey] = map[string]*compute.BackendService{
|
||||
DefaultBackendKey: d,
|
||||
}
|
||||
}
|
||||
|
||||
// IsHTTPErrorCode checks if the given error matches the given HTTP Error code.
|
||||
// For this to work the error must be a googleapi Error.
|
||||
func IsHTTPErrorCode(err error, code int) bool {
|
||||
apiErr, ok := err.(*googleapi.Error)
|
||||
return ok && apiErr.Code == code
|
||||
}
|
||||
|
||||
// CompareLinks returns true if the 2 self links are equal.
|
||||
func CompareLinks(l1, l2 string) bool {
|
||||
// TODO: These can be partial links
|
||||
return l1 == l2 && l1 != ""
|
||||
}
|
||||
|
||||
// FakeIngressRuleValueMap is a convenience type used by multiple submodules
|
||||
// that share the same testing methods.
|
||||
type FakeIngressRuleValueMap map[string]string
|
||||
18
controllers/nginx-alpha/Dockerfile
Normal file
18
controllers/nginx-alpha/Dockerfile
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Copyright 2015 The Kubernetes Authors. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM gcr.io/google_containers/nginx
|
||||
COPY controller /
|
||||
COPY default.conf /etc/nginx/nginx.conf
|
||||
CMD ["/controller"]
|
||||
17
controllers/nginx-alpha/Makefile
Normal file
17
controllers/nginx-alpha/Makefile
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
all: push
|
||||
|
||||
# 0.0 shouldn't clobber any release builds
|
||||
TAG = 0.0
|
||||
PREFIX = gcr.io/google_containers/nginx-ingress
|
||||
|
||||
controller: controller.go
|
||||
CGO_ENABLED=0 GOOS=linux godep go build -a -installsuffix cgo -ldflags '-w' -o controller ./controller.go
|
||||
|
||||
container: controller
|
||||
docker build -t $(PREFIX):$(TAG) .
|
||||
|
||||
push: container
|
||||
gcloud docker push $(PREFIX):$(TAG)
|
||||
|
||||
clean:
|
||||
rm -f controller
|
||||
116
controllers/nginx-alpha/README.md
Normal file
116
controllers/nginx-alpha/README.md
Normal file
|
|
@ -0,0 +1,116 @@
|
|||
# Nginx Ingress Controller
|
||||
|
||||
This is a simple nginx Ingress controller. Expect it to grow up. See [Ingress controller documentation](../README.md) for details on how it works.
|
||||
|
||||
## Deploying the controller
|
||||
|
||||
Deploying the controller is as easy as creating the RC in this directory. Having done so you can test it with the following echoheaders application:
|
||||
|
||||
```yaml
|
||||
# 3 Services for the 3 endpoints of the Ingress
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: echoheaders-x
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
nodePort: 30301
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: echoheaders
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: echoheaders-default
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
nodePort: 30302
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: echoheaders
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: echoheaders-y
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
nodePort: 30284
|
||||
targetPort: 8080
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app: echoheaders
|
||||
---
|
||||
# A single RC matching all Services
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: echoheaders
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: echoheaders
|
||||
spec:
|
||||
containers:
|
||||
- name: echoheaders
|
||||
image: gcr.io/google_containers/echoserver:1.0
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
---
|
||||
# An Ingress with 2 hosts and 3 endpoints
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: echomap
|
||||
spec:
|
||||
rules:
|
||||
- host: foo.bar.com
|
||||
http:
|
||||
paths:
|
||||
- path: /foo
|
||||
backend:
|
||||
serviceName: echoheaders-x
|
||||
servicePort: 80
|
||||
- host: bar.baz.com
|
||||
http:
|
||||
paths:
|
||||
- path: /bar
|
||||
backend:
|
||||
serviceName: echoheaders-y
|
||||
servicePort: 80
|
||||
- path: /foo
|
||||
backend:
|
||||
serviceName: echoheaders-x
|
||||
servicePort: 80
|
||||
```
|
||||
You should be able to access the Services on the public IP of the node the nginx pod lands on.
|
||||
|
||||
## Wishlist
|
||||
|
||||
* SSL/TLS
|
||||
* Production ready options
|
||||
* Dynamic adding backends
|
||||
* Varied loadbalancing algorithms
|
||||
|
||||
... this list goes on. If you feel you know nginx better than I do, please contribute.
|
||||
|
||||
95
controllers/nginx-alpha/controller.go
Normal file
95
controllers/nginx-alpha/controller.go
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"reflect"
|
||||
"text/template"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
nginxConf = `
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
http {
|
||||
# http://nginx.org/en/docs/http/ngx_http_core_module.html
|
||||
types_hash_max_size 2048;
|
||||
server_names_hash_max_size 512;
|
||||
server_names_hash_bucket_size 64;
|
||||
|
||||
{{range $ing := .Items}}
|
||||
{{range $rule := $ing.Spec.Rules}}
|
||||
server {
|
||||
listen 80;
|
||||
server_name {{$rule.Host}};
|
||||
{{ range $path := $rule.HTTP.Paths }}
|
||||
location {{$path.Path}} {
|
||||
proxy_set_header Host $host;
|
||||
proxy_pass http://{{$path.Backend.ServiceName}}.{{$ing.Namespace}}.svc.cluster.local:{{$path.Backend.ServicePort}};
|
||||
}{{end}}
|
||||
}{{end}}{{end}}
|
||||
}`
|
||||
)
|
||||
|
||||
func shellOut(cmd string) {
|
||||
out, err := exec.Command("sh", "-c", cmd).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to execute %v: %v, err: %v", cmd, string(out), err)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
var ingClient client.IngressInterface
|
||||
if kubeClient, err := client.NewInCluster(); err != nil {
|
||||
log.Fatalf("Failed to create client: %v.", err)
|
||||
} else {
|
||||
ingClient = kubeClient.Extensions().Ingress(api.NamespaceAll)
|
||||
}
|
||||
tmpl, _ := template.New("nginx").Parse(nginxConf)
|
||||
rateLimiter := util.NewTokenBucketRateLimiter(0.1, 1)
|
||||
known := &extensions.IngressList{}
|
||||
|
||||
// Controller loop
|
||||
shellOut("nginx")
|
||||
for {
|
||||
rateLimiter.Accept()
|
||||
ingresses, err := ingClient.List(api.ListOptions{})
|
||||
if err != nil {
|
||||
log.Printf("Error retrieving ingresses: %v", err)
|
||||
continue
|
||||
}
|
||||
if reflect.DeepEqual(ingresses.Items, known.Items) {
|
||||
continue
|
||||
}
|
||||
known = ingresses
|
||||
if w, err := os.Create("/etc/nginx/nginx.conf"); err != nil {
|
||||
log.Fatalf("Failed to open %v: %v", nginxConf, err)
|
||||
} else if err := tmpl.Execute(w, ingresses); err != nil {
|
||||
log.Fatalf("Failed to write template %v", err)
|
||||
}
|
||||
shellOut("nginx -s reload")
|
||||
}
|
||||
}
|
||||
4
controllers/nginx-alpha/default.conf
Normal file
4
controllers/nginx-alpha/default.conf
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
# A very simple nginx configuration file that forces nginx to start as a daemon.
|
||||
events {}
|
||||
http {}
|
||||
daemon on;
|
||||
22
controllers/nginx-alpha/rc.yaml
Normal file
22
controllers/nginx-alpha/rc.yaml
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: nginx-ingress
|
||||
labels:
|
||||
app: nginx-ingress
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
app: nginx-ingress
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx-ingress
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/nginx-ingress:0.1
|
||||
imagePullPolicy: Always
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
25
controllers/nginx-third-party/Dockerfile
vendored
Normal file
25
controllers/nginx-third-party/Dockerfile
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
# Copyright 2015 The Kubernetes Authors. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM gcr.io/google_containers/nginx-slim:0.3
|
||||
|
||||
COPY nginx-third-party-lb /
|
||||
COPY nginx.tmpl /
|
||||
COPY default.conf /etc/nginx/nginx.conf
|
||||
|
||||
COPY lua /etc/nginx/lua/
|
||||
|
||||
WORKDIR /
|
||||
|
||||
CMD ["/nginx-third-party-lb"]
|
||||
17
controllers/nginx-third-party/Makefile
vendored
Normal file
17
controllers/nginx-third-party/Makefile
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
all: push
|
||||
|
||||
# 0.0 shouldn't clobber any release builds
|
||||
TAG = 0.3
|
||||
PREFIX = gcr.io/google_containers/nginx-third-party
|
||||
|
||||
controller: controller.go clean
|
||||
CGO_ENABLED=0 GOOS=linux godep go build -a -installsuffix cgo -ldflags '-w' -o nginx-third-party-lb
|
||||
|
||||
container: controller
|
||||
docker build -t $(PREFIX):$(TAG) .
|
||||
|
||||
push: container
|
||||
gcloud docker push $(PREFIX):$(TAG)
|
||||
|
||||
clean:
|
||||
rm -f nginx-third-party-lb
|
||||
327
controllers/nginx-third-party/README.md
vendored
Normal file
327
controllers/nginx-third-party/README.md
vendored
Normal file
|
|
@ -0,0 +1,327 @@
|
|||
# Nginx Ingress Controller
|
||||
|
||||
This is a nginx Ingress controller that uses [ConfigMap](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/configmap.md) to store the nginx configuration. See [Ingress controller documentation](../README.md) for details on how it works.
|
||||
|
||||
|
||||
## What it provides?
|
||||
|
||||
- Ingress controller
|
||||
- nginx 1.9.x with [lua-nginx-module](https://github.com/openresty/lua-nginx-module)
|
||||
- SSL support
|
||||
- custom ssl_dhparam (optional). Just mount a secret with a file named `dhparam.pem`.
|
||||
- support for TCP services (flag `--tcp-services`)
|
||||
- custom nginx configuration using [ConfigMap](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/configmap.md)
|
||||
- custom error pages. Using the flag `--custom-error-service` is possible to use a custom compatible [404-server](https://github.com/kubernetes/contrib/tree/master/404-server) image [nginx-error-server](https://github.com/aledbf/contrib/tree/nginx-debug-server/Ingress/images/nginx-error-server) that provides an additional `/errors` route that returns custom content for a particular error code. **This is completely optional**
|
||||
|
||||
|
||||
## Requirements
|
||||
- default backend [404-server](https://github.com/kubernetes/contrib/tree/master/404-server) (or a custom compatible image)
|
||||
- DNS must be operational and able to resolve default-http-backend.default.svc.cluster.local
|
||||
|
||||
## SSL
|
||||
|
||||
Please follow [test.sh](https://github.com/bprashanth/Ingress/blob/master/examples/sni/nginx/test.sh) as a guide on how to generate secrets containing SSL certificates. The name of the secret can be different than the name of the certificate.
|
||||
|
||||
Currently Ingress does not support HTTPS. To bypass this the controller will check if there's a certificate for the the host in `Spec.Rules.Host` checking for a certificate in each of the mounted secrets. If exists it will create a nginx server listening in the port 443.
|
||||
|
||||
## Examples:
|
||||
|
||||
First we need to deploy some application to publish. To keep this simple we will use the [echoheaders app]() that just returns information about the http request as output
|
||||
```
|
||||
kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.0 --replicas=1 --port=8080
|
||||
```
|
||||
|
||||
Now we expose the same application in two different services (so we can create different Ingress rules)
|
||||
```
|
||||
kubectl expose rc echoheaders --port=80 --target-port=8080 --name=echoheaders-x
|
||||
kubectl expose rc echoheaders --port=80 --target-port=8080 --name=echoheaders-y
|
||||
```
|
||||
|
||||
Next we create a couple of Ingress rules
|
||||
```
|
||||
kubectl create -f examples/ingress.yaml
|
||||
```
|
||||
|
||||
we check that ingress rules are defined:
|
||||
```
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
echomap -
|
||||
foo.bar.com
|
||||
/foo echoheaders-x:80
|
||||
bar.baz.com
|
||||
/bar echoheaders-y:80
|
||||
/foo echoheaders-x:80
|
||||
```
|
||||
|
||||
Before the deploy of nginx we need a default backend [404-server](https://github.com/kubernetes/contrib/tree/master/404-server) (or a compatible custom image)
|
||||
```
|
||||
kubectl create -f examples/default-backend.yaml
|
||||
kubectl expose rc default-http-backend --port=80 --target-port=8080 --name=default-http-backend
|
||||
```
|
||||
|
||||
# Default configuration
|
||||
|
||||
The last step is the deploy of nginx Ingress rc (from the examples directory)
|
||||
```
|
||||
kubectl create -f examples/rc-default.yaml
|
||||
```
|
||||
|
||||
To test if evertyhing is working correctly:
|
||||
|
||||
`curl -v http://<node IP address>:80/foo -H 'Host: foo.bar.com'`
|
||||
|
||||
You should see an output similar to
|
||||
```
|
||||
* Trying 172.17.4.99...
|
||||
* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0)
|
||||
> GET /foo HTTP/1.1
|
||||
> Host: foo.bar.com
|
||||
> User-Agent: curl/7.43.0
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Server: nginx/1.9.8
|
||||
< Date: Tue, 15 Dec 2015 13:45:13 GMT
|
||||
< Content-Type: text/plain
|
||||
< Transfer-Encoding: chunked
|
||||
< Connection: keep-alive
|
||||
< Vary: Accept-Encoding
|
||||
<
|
||||
CLIENT VALUES:
|
||||
client_address=10.2.84.43
|
||||
command=GET
|
||||
real path=/foo
|
||||
query=nil
|
||||
request_version=1.1
|
||||
request_uri=http://foo.bar.com:8080/foo
|
||||
|
||||
SERVER VALUES:
|
||||
server_version=nginx: 1.9.7 - lua: 9019
|
||||
|
||||
HEADERS RECEIVED:
|
||||
accept=*/*
|
||||
connection=close
|
||||
host=foo.bar.com
|
||||
user-agent=curl/7.43.0
|
||||
x-forwarded-for=172.17.4.1
|
||||
x-forwarded-host=foo.bar.com
|
||||
x-forwarded-server=foo.bar.com
|
||||
x-real-ip=172.17.4.1
|
||||
BODY:
|
||||
* Connection #0 to host 172.17.4.99 left intact
|
||||
```
|
||||
|
||||
If we try to get a non exising route like `/foobar` we should see
|
||||
```
|
||||
$ curl -v 172.17.4.99/foobar -H 'Host: foo.bar.com'
|
||||
* Trying 172.17.4.99...
|
||||
* Connected to 172.17.4.99 (172.17.4.99) port 80 (#0)
|
||||
> GET /foobar HTTP/1.1
|
||||
> Host: foo.bar.com
|
||||
> User-Agent: curl/7.43.0
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 404 Not Found
|
||||
< Server: nginx/1.9.8
|
||||
< Date: Tue, 15 Dec 2015 13:48:18 GMT
|
||||
< Content-Type: text/html
|
||||
< Transfer-Encoding: chunked
|
||||
< Connection: keep-alive
|
||||
< Vary: Accept-Encoding
|
||||
<
|
||||
default backend - 404
|
||||
* Connection #0 to host 172.17.4.99 left intact
|
||||
```
|
||||
|
||||
(this test checked that the default backend is properly working)
|
||||
|
||||
*Replacing the default backend with a custom one we can change the default error pages provided by nginx*
|
||||
|
||||
# Exposing TCP services
|
||||
|
||||
First we need to remove the running
|
||||
```
|
||||
kubectl delete rc nginx-ingress-3rdpartycfg
|
||||
```
|
||||
|
||||
```
|
||||
kubectl create -f examples/rc-tcp.yaml
|
||||
```
|
||||
|
||||
Now we add the annotation to the replication controller that indicates with services should be exposed as TCP:
|
||||
The annotation key is `nginx-ingress.kubernetes.io/tcpservices`. You can expose more than one service using comma as separator.
|
||||
Each service must contain the namespace, service name and port to be use as public port
|
||||
|
||||
```
|
||||
kubectl annotate rc nginx-ingress-3rdpartycfg "nginx-ingress.kubernetes.io/tcpservices=default/echoheaders-x:9000"
|
||||
```
|
||||
|
||||
*Note:* the only reason to remove and create a new rc is that we cannot open new ports dynamically once the pod is running.
|
||||
|
||||
|
||||
Once we run the `kubectl annotate` command nginx will reload.
|
||||
|
||||
Now we can test the new service:
|
||||
```
|
||||
$ (sleep 1; echo "GET / HTTP/1.1"; echo "Host: 172.17.4.99:9000"; echo;echo;sleep 2) | telnet 172.17.4.99 9000
|
||||
|
||||
Trying 172.17.4.99...
|
||||
Connected to 172.17.4.99.
|
||||
Escape character is '^]'.
|
||||
HTTP/1.1 200 OK
|
||||
Server: nginx/1.9.7
|
||||
Date: Tue, 15 Dec 2015 14:46:28 GMT
|
||||
Content-Type: text/plain
|
||||
Transfer-Encoding: chunked
|
||||
Connection: keep-alive
|
||||
|
||||
f
|
||||
CLIENT VALUES:
|
||||
|
||||
1a
|
||||
client_address=10.2.84.45
|
||||
|
||||
c
|
||||
command=GET
|
||||
|
||||
c
|
||||
real path=/
|
||||
|
||||
a
|
||||
query=nil
|
||||
|
||||
14
|
||||
request_version=1.1
|
||||
|
||||
25
|
||||
request_uri=http://172.17.4.99:8080/
|
||||
|
||||
1
|
||||
|
||||
|
||||
f
|
||||
SERVER VALUES:
|
||||
|
||||
28
|
||||
server_version=nginx: 1.9.7 - lua: 9019
|
||||
|
||||
1
|
||||
|
||||
|
||||
12
|
||||
HEADERS RECEIVED:
|
||||
|
||||
16
|
||||
host=172.17.4.99:9000
|
||||
|
||||
6
|
||||
BODY:
|
||||
|
||||
14
|
||||
-no body in request-
|
||||
0
|
||||
```
|
||||
|
||||
## SSL
|
||||
|
||||
Currently Ingress rules does not contains SSL definitions. In order to support SSL in nginx this controller uses secrets mounted inside the directory `/etc/nginx-ssl` to detect if some Ingress rule contains a host for which it is possible the creation of an SSL server.
|
||||
|
||||
First create a secret containing the ssl certificate and key. This example creates the certificate and the secret (json):
|
||||
|
||||
`SECRET_NAME=secret-echoheaders-1 HOSTS=foo.bar.com ./examples/certs.sh`
|
||||
|
||||
Create the secret:
|
||||
```
|
||||
kubectl create -f secret-secret-echoheaders-1-foo.bar.com.json
|
||||
```
|
||||
|
||||
Check if the secret was created:
|
||||
```
|
||||
$ kubectl get secrets
|
||||
NAME TYPE DATA AGE
|
||||
secret-echoheaders-1 Opaque 2 9m
|
||||
```
|
||||
|
||||
|
||||
Like before we need to remove the running nginx rc
|
||||
```
|
||||
kubectl delete rc nginx-ingress-3rdpartycfg
|
||||
```
|
||||
|
||||
Next create a new rc that uses the secret
|
||||
```
|
||||
kubectl create -f examples/rc-ssl.yaml
|
||||
```
|
||||
|
||||
*Note:* this example uses a self signed certificate.
|
||||
|
||||
Example output:
|
||||
```
|
||||
$ curl -v https://172.17.4.99/foo -H 'Host: bar.baz.com' -k
|
||||
* Trying 172.17.4.99...
|
||||
* Connected to 172.17.4.99 (172.17.4.99) port 4444 (#0)
|
||||
* TLS 1.2 connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
|
||||
* Server certificate: foo.bar.com
|
||||
> GET /foo HTTP/1.1
|
||||
> Host: bar.baz.com
|
||||
> User-Agent: curl/7.43.0
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Server: nginx/1.9.8
|
||||
< Date: Thu, 17 Dec 2015 14:57:03 GMT
|
||||
< Content-Type: text/plain
|
||||
< Transfer-Encoding: chunked
|
||||
< Connection: keep-alive
|
||||
< Vary: Accept-Encoding
|
||||
<
|
||||
CLIENT VALUES:
|
||||
client_address=10.2.84.34
|
||||
command=GET
|
||||
real path=/foo
|
||||
query=nil
|
||||
request_version=1.1
|
||||
request_uri=http://bar.baz.com:8080/foo
|
||||
|
||||
SERVER VALUES:
|
||||
server_version=nginx: 1.9.7 - lua: 9019
|
||||
|
||||
HEADERS RECEIVED:
|
||||
accept=*/*
|
||||
connection=close
|
||||
host=bar.baz.com
|
||||
user-agent=curl/7.43.0
|
||||
x-forwarded-for=172.17.4.1
|
||||
x-forwarded-host=bar.baz.com
|
||||
x-forwarded-server=bar.baz.com
|
||||
x-real-ip=172.17.4.1
|
||||
BODY:
|
||||
* Connection #0 to host 172.17.4.99 left intact
|
||||
-no body in request-
|
||||
```
|
||||
|
||||
|
||||
## Custom errors
|
||||
|
||||
The default backend provides a way to customize the default 404 page. This helps but sometimes is not enough.
|
||||
Using the flag `--custom-error-service` is possible to use an image that must be 404 compatible and provide the route /error
|
||||
[Here](https://github.com/aledbf/contrib/tree/nginx-debug-server/Ingress/images/nginx-error-server) there is an example of the the image
|
||||
|
||||
The route `/error` expects two arguments: code and format
|
||||
* code defines the wich error code is expected to be returned (502,503,etc.)
|
||||
* format the format that should be returned For instance /error?code=504&format=json or /error?code=502&format=html
|
||||
|
||||
Using a volume pointing to `/var/www/html` directory is possible to use a custom error
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Problems encountered during [1.2.0-alpha7 deployment](https://github.com/kubernetes/kubernetes/blob/master/docs/getting-started-guides/docker.md):
|
||||
* make setup-files.sh file in hypercube does not provide 10.0.0.1 IP to make-ca-certs, resulting in CA certs that are issued to the external cluster IP address rather then 10.0.0.1 -> this results in nginx-third-party-lb appearing to get stuck at "Utils.go:177 - Waiting for default/default-http-backend" in the docker logs. Kubernetes will eventually kill the container before nginx-third-party-lb times out with a message indicating that the CA certificate issuer is invalid (wrong ip), to verify this add zeros to the end of initialDelaySeconds and timeoutSeconds and reload the RC, and docker will log this error before kubernetes kills the container.
|
||||
* To fix the above, setup-files.sh must be patched before the cluster is inited (refer to https://github.com/kubernetes/kubernetes/pull/21504)
|
||||
* if once the nginx-third-party-lb starts, its docker log spams this message continously "utils.go:(line #)] Requeuing default/echomap, err Post http://127.0.0.1:8080/update-ingress: dial tcp 127.0.0.1:8080: getsockopt: connection refused", it means that the container is unable to use DNS to resolve the service address, DNS autoconfigure is broken on 1.2.0-alpha7 (refer again to https://github.com/kubernetes/kubernetes/pull/21504 for fixes)
|
||||
|
||||
## TODO:
|
||||
- multiple SSL certificates
|
||||
- custom nginx configuration using [ConfigMap](https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/configmap.md)
|
||||
|
||||
333
controllers/nginx-third-party/controller.go
vendored
Normal file
333
controllers/nginx-third-party/controller.go
vendored
Normal file
|
|
@ -0,0 +1,333 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
||||
"k8s.io/contrib/ingress/controllers/nginx-third-party/nginx"
|
||||
)
|
||||
|
||||
const (
|
||||
// Name of the default config map that contains the configuration for nginx.
|
||||
// Takes the form namespace/name.
|
||||
// If the annotation does not exists the controller will create a new annotation with the default
|
||||
// configuration.
|
||||
lbConfigName = "lbconfig"
|
||||
|
||||
// If you have pure tcp services or https services that need L3 routing, you
|
||||
// must specify them by name. Note that you are responsible for:
|
||||
// 1. Making sure there is no collision between the service ports of these services.
|
||||
// - You can have multiple <mysql svc name>:3306 specifications in this map, and as
|
||||
// long as the service ports of your mysql service don't clash, you'll get
|
||||
// loadbalancing for each one.
|
||||
// 2. Exposing the service ports as node ports on a pod.
|
||||
// 3. Adding firewall rules so these ports can ingress traffic.
|
||||
|
||||
// Comma separated list of tcp/https
|
||||
// namespace/serviceName:portToExport pairings. This assumes you've opened up the right
|
||||
// hostPorts for each service that serves ingress traffic. Te value of portToExport indicates the
|
||||
// port to listen inside nginx, not the port of the service.
|
||||
lbTcpServices = "tcpservices"
|
||||
|
||||
k8sAnnotationPrefix = "nginx-ingress.kubernetes.io"
|
||||
)
|
||||
|
||||
var (
|
||||
keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc
|
||||
)
|
||||
|
||||
// loadBalancerController watches the kubernetes api and adds/removes services
|
||||
// from the loadbalancer
|
||||
type loadBalancerController struct {
|
||||
client *client.Client
|
||||
ingController *framework.Controller
|
||||
configController *framework.Controller
|
||||
ingLister StoreToIngressLister
|
||||
configLister StoreToConfigMapLister
|
||||
recorder record.EventRecorder
|
||||
ingQueue *taskQueue
|
||||
configQueue *taskQueue
|
||||
stopCh chan struct{}
|
||||
ngx *nginx.NginxManager
|
||||
lbInfo *lbInfo
|
||||
// stopLock is used to enforce only a single call to Stop is active.
|
||||
// Needed because we allow stopping through an http endpoint and
|
||||
// allowing concurrent stoppers leads to stack traces.
|
||||
stopLock sync.Mutex
|
||||
shutdown bool
|
||||
}
|
||||
|
||||
type annotations map[string]string
|
||||
|
||||
func (a annotations) getNginxConfig() (string, bool) {
|
||||
val, ok := a[fmt.Sprintf("%v/%v", k8sAnnotationPrefix, lbConfigName)]
|
||||
return val, ok
|
||||
}
|
||||
|
||||
func (a annotations) getTcpServices() (string, bool) {
|
||||
val, ok := a[fmt.Sprintf("%v/%v", k8sAnnotationPrefix, lbTcpServices)]
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// NewLoadBalancerController creates a controller for nginx loadbalancer
|
||||
func NewLoadBalancerController(kubeClient *client.Client, resyncPeriod time.Duration, defaultSvc, customErrorSvc nginx.Service, namespace string, lbInfo *lbInfo) (*loadBalancerController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
|
||||
|
||||
lbc := loadBalancerController{
|
||||
client: kubeClient,
|
||||
stopCh: make(chan struct{}),
|
||||
recorder: eventBroadcaster.NewRecorder(
|
||||
api.EventSource{Component: "nginx-lb-controller"}),
|
||||
lbInfo: lbInfo,
|
||||
}
|
||||
lbc.ingQueue = NewTaskQueue(lbc.syncIngress)
|
||||
lbc.configQueue = NewTaskQueue(lbc.syncConfig)
|
||||
|
||||
lbc.ngx = nginx.NewManager(kubeClient, defaultSvc, customErrorSvc)
|
||||
|
||||
// Ingress watch handlers
|
||||
pathHandlers := framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
addIng := obj.(*extensions.Ingress)
|
||||
lbc.recorder.Eventf(addIng, api.EventTypeNormal, "ADD", fmt.Sprintf("Adding ingress %s/%s", addIng.Namespace, addIng.Name))
|
||||
lbc.ingQueue.enqueue(obj)
|
||||
},
|
||||
DeleteFunc: lbc.ingQueue.enqueue,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
if !reflect.DeepEqual(old, cur) {
|
||||
glog.V(2).Infof("Ingress %v changed, syncing", cur.(*extensions.Ingress).Name)
|
||||
}
|
||||
lbc.ingQueue.enqueue(cur)
|
||||
},
|
||||
}
|
||||
lbc.ingLister.Store, lbc.ingController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: ingressListFunc(lbc.client, namespace),
|
||||
WatchFunc: ingressWatchFunc(lbc.client, namespace),
|
||||
},
|
||||
&extensions.Ingress{}, resyncPeriod, pathHandlers)
|
||||
|
||||
// Config watch handlers
|
||||
configHandlers := framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
lbc.configQueue.enqueue(obj)
|
||||
},
|
||||
DeleteFunc: lbc.configQueue.enqueue,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
if !reflect.DeepEqual(old, cur) {
|
||||
glog.V(2).Infof("nginx rc changed, syncing")
|
||||
lbc.configQueue.enqueue(cur)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
lbc.configLister.Store, lbc.configController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(api.ListOptions) (runtime.Object, error) {
|
||||
rc, err := kubeClient.ReplicationControllers(lbInfo.RCNamespace).Get(lbInfo.RCName)
|
||||
return &api.ReplicationControllerList{
|
||||
Items: []api.ReplicationController{*rc},
|
||||
}, err
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labels.SelectorFromSet(labels.Set{"name": lbInfo.RCName})
|
||||
return kubeClient.ReplicationControllers(lbInfo.RCNamespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ReplicationController{}, resyncPeriod, configHandlers)
|
||||
|
||||
return &lbc, nil
|
||||
}
|
||||
|
||||
func ingressListFunc(c *client.Client, ns string) func(api.ListOptions) (runtime.Object, error) {
|
||||
return func(opts api.ListOptions) (runtime.Object, error) {
|
||||
return c.Extensions().Ingress(ns).List(opts)
|
||||
}
|
||||
}
|
||||
|
||||
func ingressWatchFunc(c *client.Client, ns string) func(options api.ListOptions) (watch.Interface, error) {
|
||||
return func(options api.ListOptions) (watch.Interface, error) {
|
||||
return c.Extensions().Ingress(ns).Watch(options)
|
||||
}
|
||||
}
|
||||
|
||||
// syncIngress manages Ingress create/updates/deletes.
|
||||
func (lbc *loadBalancerController) syncIngress(key string) {
|
||||
glog.V(2).Infof("Syncing Ingress %v", key)
|
||||
|
||||
obj, ingExists, err := lbc.ingLister.Store.GetByKey(key)
|
||||
if err != nil {
|
||||
lbc.ingQueue.requeue(key, err)
|
||||
return
|
||||
}
|
||||
|
||||
if !ingExists {
|
||||
glog.Errorf("Ingress not found: %v", key)
|
||||
return
|
||||
}
|
||||
|
||||
// this means some Ingress rule changed. There is no need to reload nginx but
|
||||
// we need to update the rules to use invoking "POST /update-ingress" with the
|
||||
// list of Ingress rules
|
||||
ingList := lbc.ingLister.Store.List()
|
||||
if err := lbc.ngx.SyncIngress(ingList); err != nil {
|
||||
lbc.ingQueue.requeue(key, err)
|
||||
return
|
||||
}
|
||||
|
||||
ing := *obj.(*extensions.Ingress)
|
||||
if err := lbc.updateIngressStatus(ing); err != nil {
|
||||
lbc.recorder.Eventf(&ing, api.EventTypeWarning, "Status", err.Error())
|
||||
lbc.ingQueue.requeue(key, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// syncConfig manages changes in nginx configuration.
|
||||
func (lbc *loadBalancerController) syncConfig(key string) {
|
||||
// we only need to sync the nginx rc
|
||||
if key != fmt.Sprintf("%v/%v", lbc.lbInfo.RCNamespace, lbc.lbInfo.RCName) {
|
||||
return
|
||||
}
|
||||
|
||||
obj, configExists, err := lbc.configLister.Store.GetByKey(key)
|
||||
if err != nil {
|
||||
lbc.configQueue.requeue(key, err)
|
||||
return
|
||||
}
|
||||
|
||||
if !configExists {
|
||||
glog.Errorf("Configutation not found: %v", key)
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Syncing config %v", key)
|
||||
|
||||
rc := *obj.(*api.ReplicationController)
|
||||
ngxCfgAnn, _ := annotations(rc.Annotations).getNginxConfig()
|
||||
tcpSvcAnn, _ := annotations(rc.Annotations).getTcpServices()
|
||||
|
||||
ngxConfig, err := lbc.ngx.ReadConfig(ngxCfgAnn)
|
||||
if err != nil {
|
||||
glog.Warningf("%v", err)
|
||||
}
|
||||
|
||||
// TODO: tcp services can change (new item in the annotation list)
|
||||
// TODO: skip get everytime
|
||||
tcpServices := getTcpServices(lbc.client, tcpSvcAnn)
|
||||
lbc.ngx.Reload(ngxConfig, tcpServices)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// updateIngressStatus updates the IP and annotations of a loadbalancer.
|
||||
// The annotations are parsed by kubectl describe.
|
||||
func (lbc *loadBalancerController) updateIngressStatus(ing extensions.Ingress) error {
|
||||
ingClient := lbc.client.Extensions().Ingress(ing.Namespace)
|
||||
|
||||
ip := lbc.lbInfo.PodIP
|
||||
currIng, err := ingClient.Get(ing.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currIng.Status = extensions.IngressStatus{
|
||||
LoadBalancer: api.LoadBalancerStatus{
|
||||
Ingress: []api.LoadBalancerIngress{
|
||||
{IP: ip},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
glog.Infof("Updating loadbalancer %v/%v with IP %v", ing.Namespace, ing.Name, ip)
|
||||
lbc.recorder.Eventf(currIng, api.EventTypeNormal, "CREATE", "ip: %v", ip)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lbc *loadBalancerController) registerHandlers() {
|
||||
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||
if err := lbc.ngx.IsHealthy(); err != nil {
|
||||
w.WriteHeader(500)
|
||||
w.Write([]byte("nginx error"))
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(200)
|
||||
w.Write([]byte("ok"))
|
||||
})
|
||||
|
||||
http.HandleFunc("/stop", func(w http.ResponseWriter, r *http.Request) {
|
||||
lbc.Stop()
|
||||
})
|
||||
|
||||
glog.Fatalf(fmt.Sprintf("%v", http.ListenAndServe(fmt.Sprintf(":%v", *healthzPort), nil)))
|
||||
}
|
||||
|
||||
// Stop stops the loadbalancer controller.
|
||||
func (lbc *loadBalancerController) Stop() {
|
||||
// Stop is invoked from the http endpoint.
|
||||
lbc.stopLock.Lock()
|
||||
defer lbc.stopLock.Unlock()
|
||||
|
||||
// Only try draining the workqueue if we haven't already.
|
||||
if !lbc.shutdown {
|
||||
close(lbc.stopCh)
|
||||
glog.Infof("Shutting down controller queues")
|
||||
lbc.ingQueue.shutdown()
|
||||
lbc.configQueue.shutdown()
|
||||
lbc.shutdown = true
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts the loadbalancer controller.
|
||||
func (lbc *loadBalancerController) Run() {
|
||||
glog.Infof("Starting nginx loadbalancer controller")
|
||||
go lbc.ngx.Start()
|
||||
go lbc.registerHandlers()
|
||||
|
||||
go lbc.configController.Run(lbc.stopCh)
|
||||
go lbc.configQueue.run(time.Second, lbc.stopCh)
|
||||
|
||||
// Initial nginx configuration.
|
||||
lbc.syncConfig(lbc.lbInfo.RCName)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
go lbc.ingController.Run(lbc.stopCh)
|
||||
go lbc.ingQueue.run(time.Second, lbc.stopCh)
|
||||
|
||||
<-lbc.stopCh
|
||||
glog.Infof("Shutting down nginx loadbalancer controller")
|
||||
}
|
||||
6
controllers/nginx-third-party/default.conf
vendored
Normal file
6
controllers/nginx-third-party/default.conf
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
# A very simple nginx configuration file that forces nginx to start.
|
||||
pid /run/nginx.pid;
|
||||
|
||||
events {}
|
||||
http {}
|
||||
daemon off;
|
||||
71
controllers/nginx-third-party/examples/certs.sh
vendored
Executable file
71
controllers/nginx-third-party/examples/certs.sh
vendored
Executable file
|
|
@ -0,0 +1,71 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# This test is for dev purposes.
|
||||
|
||||
set -e
|
||||
|
||||
SECRET_NAME=${SECRET_NAME:-ssl-secret}
|
||||
# Name of the app in the .yaml
|
||||
APP=${APP:-nginxsni}
|
||||
# SNI hostnames
|
||||
HOSTS=${HOSTS:-foo.bar.com}
|
||||
# Should the test build and push the container via make push?
|
||||
PUSH=${PUSH:-false}
|
||||
|
||||
# makeCerts makes certificates applying the given hostnames as CNAMEs
|
||||
# $1 Name of the app that will use this secret, applied as a app= label
|
||||
# $2... hostnames as described below
|
||||
# Eg: makeCerts nginxsni nginx1 nginx2 nginx3
|
||||
# Will generate nginx{1,2,3}.crt,.key,.json file in cwd. It's upto the caller
|
||||
# to execute kubectl -f on the json file. The secret will have a label of
|
||||
# app=nginxsni, so you can delete it via the cleanup function.
|
||||
function makeCerts {
|
||||
local label=$1
|
||||
shift
|
||||
for h in ${@}; do
|
||||
if [ ! -f $h.json ] || [ ! -f $h.crt ] || [ ! -f $h.key ]; then
|
||||
printf "\nCreating new secrets for $h, will take ~30s\n\n"
|
||||
local cert=$h.crt key=$h.key host=$h secret=$h.json cname=$h
|
||||
if [ $h == "wildcard" ]; then
|
||||
cname=*.$h.com
|
||||
fi
|
||||
|
||||
# Generate crt and key
|
||||
openssl req -x509 -nodes -days 365 -newkey rsa:2048 \
|
||||
-keyout "${key}" -out "${cert}" -subj "/CN=${cname}/O=${cname}"
|
||||
fi
|
||||
|
||||
cat <<EOF > secret-$SECRET_NAME-$h.json
|
||||
{
|
||||
"kind": "Secret",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "$SECRET_NAME"
|
||||
},
|
||||
"data": {
|
||||
"$h.crt": "$(cat ./$h.crt | base64)",
|
||||
"$h.key": "$(cat ./$h.key | base64)"
|
||||
}
|
||||
}
|
||||
|
||||
EOF
|
||||
|
||||
done
|
||||
}
|
||||
|
||||
makeCerts ${APP} ${HOSTS[*]}
|
||||
36
controllers/nginx-third-party/examples/default-backend.yaml
vendored
Normal file
36
controllers/nginx-third-party/examples/default-backend.yaml
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
app: default-http-backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: default-http-backend
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 600
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
# Any image is permissable as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: gcr.io/google_containers/defaultbackend:1.0
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
35
controllers/nginx-third-party/examples/dhparam.sh
vendored
Executable file
35
controllers/nginx-third-party/examples/dhparam.sh
vendored
Executable file
|
|
@ -0,0 +1,35 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# https://www.openssl.org/docs/manmaster/apps/dhparam.html
|
||||
# this command generates a key used to get "Perfect Forward Secrecy" in nginx
|
||||
# https://wiki.mozilla.org/Security/Server_Side_TLS#DHE_handshake_and_dhparam
|
||||
openssl dhparam -out dhparam.pem 4096
|
||||
|
||||
cat <<EOF > dhparam-example.yaml
|
||||
{
|
||||
"kind": "Secret",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "dhparam-example"
|
||||
},
|
||||
"data": {
|
||||
"dhparam.pem": "$(cat ./dhparam.pem | base64)"
|
||||
}
|
||||
}
|
||||
|
||||
EOF
|
||||
25
controllers/nginx-third-party/examples/ingress.yaml
vendored
Normal file
25
controllers/nginx-third-party/examples/ingress.yaml
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
# An Ingress with 2 hosts and 3 endpoints
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: echomap
|
||||
spec:
|
||||
rules:
|
||||
- host: foo.bar.com
|
||||
http:
|
||||
paths:
|
||||
- path: /foo
|
||||
backend:
|
||||
serviceName: echoheaders-x
|
||||
servicePort: 80
|
||||
- host: bar.baz.com
|
||||
http:
|
||||
paths:
|
||||
- path: /bar
|
||||
backend:
|
||||
serviceName: echoheaders-y
|
||||
servicePort: 80
|
||||
- path: /foo
|
||||
backend:
|
||||
serviceName: echoheaders-x
|
||||
servicePort: 80
|
||||
53
controllers/nginx-third-party/examples/rc-default.yaml
vendored
Normal file
53
controllers/nginx-third-party/examples/rc-default.yaml
vendored
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: nginx-ingress-3rdpartycfg
|
||||
labels:
|
||||
k8s-app: nginx-ingress-lb
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: nginx-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nginx-ingress-lb
|
||||
name: nginx-ingress-lb
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/nginx-third-party:0.3
|
||||
name: nginx-ingress-lb
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10249
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
# use downward API
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
- containerPort: 443
|
||||
hostPort: 4444
|
||||
# we expose 8080 to access nginx stats in url /nginx-status
|
||||
# this is optional
|
||||
- containerPort: 8080
|
||||
hostPort: 8081
|
||||
args:
|
||||
- /nginx-third-party-lb
|
||||
- --default-backend-service=default/default-http-backend
|
||||
75
controllers/nginx-third-party/examples/rc-full.yaml
vendored
Normal file
75
controllers/nginx-third-party/examples/rc-full.yaml
vendored
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: nginx-ingress-3rdpartycfg
|
||||
labels:
|
||||
k8s-app: nginx-ingress-lb
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: nginx-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nginx-ingress-lb
|
||||
name: nginx-ingress-lb
|
||||
spec:
|
||||
# A secret for each nginx host that requires SSL. These secrets need to
|
||||
# exist before hand, see README.
|
||||
# The secret must contains 2 variables: cert and key.
|
||||
# Follow this https://github.com/bprashanth/Ingress/blob/master/examples/sni/nginx/test.sh
|
||||
# as a guide on how to generate secrets containing SSL certificates.
|
||||
volumes:
|
||||
- name: secret-echoheaders-1
|
||||
secret:
|
||||
secretName: echoheaders
|
||||
- name: dhparam-example
|
||||
secret:
|
||||
secretName: dhparam-example
|
||||
containers:
|
||||
- image: gcr.io/google_containers/nginx-third-party:0.3
|
||||
name: nginx-ingress-lb
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10249
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
# use downward API
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
- containerPort: 443
|
||||
hostPort: 4444
|
||||
- containerPort: 8080
|
||||
hostPort: 9000
|
||||
volumeMounts:
|
||||
- mountPath: /etc/nginx-ssl/secret-echoheaders-1
|
||||
name: secret-echoheaders-1
|
||||
- mountPath: /etc/nginx-ssl/dhparam
|
||||
name: dhparam-example
|
||||
# the flags tcp-services is required because Ingress do not support TCP rules
|
||||
# if no namespace is specified "default" is used. Example: nodefaultns/example-go:8080
|
||||
# containerPort 8080 is mapped to 9000 in the node.
|
||||
args:
|
||||
- /nginx-third-party-lb
|
||||
- --tcp-services=default/example-go:8080
|
||||
- --default-backend-service=default/default-http-backend
|
||||
- --custom-error-service=default/default-error-backend
|
||||
|
||||
66
controllers/nginx-third-party/examples/rc-ssl.yaml
vendored
Normal file
66
controllers/nginx-third-party/examples/rc-ssl.yaml
vendored
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: nginx-ingress-3rdpartycfg
|
||||
labels:
|
||||
k8s-app: nginx-ingress-lb
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: nginx-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nginx-ingress-lb
|
||||
name: nginx-ingress-lb
|
||||
spec:
|
||||
# A secret for each nginx host that requires SSL. These secrets need to
|
||||
# exist before hand, see README.
|
||||
# Follow this https://github.com/kubernetes/contrib/Ingress/controllers/nginx-third-party/examples/certs.sh
|
||||
# as a guide on how to generate secrets containing SSL certificates.
|
||||
volumes:
|
||||
- name: secret-echoheaders-1
|
||||
secret:
|
||||
secretName: secret-echoheaders-1
|
||||
containers:
|
||||
- image: gcr.io/google_containers/nginx-third-party:0.3
|
||||
name: nginx-ingress-lb
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10249
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
# use downward API
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
- containerPort: 443
|
||||
hostPort: 4444
|
||||
- containerPort: 8080
|
||||
hostPort: 9000
|
||||
# the mountpoints for the SSL secrets must be a /etc/nginx-ssl subdirectory
|
||||
volumeMounts:
|
||||
- mountPath: /etc/nginx-ssl/secret-echoheaders-1
|
||||
name: secret-echoheaders-1
|
||||
# to configure ssl_dhparam http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam
|
||||
# use the dhparam.sh file to generate and mount a secret that containing the key dhparam.pem or
|
||||
# create a configuration with the content of dhparam.pem in the field sslDHParam.
|
||||
args:
|
||||
- /nginx-third-party-lb
|
||||
- --default-backend-service=default/default-http-backend
|
||||
57
controllers/nginx-third-party/examples/rc-tcp.yaml
vendored
Normal file
57
controllers/nginx-third-party/examples/rc-tcp.yaml
vendored
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: nginx-ingress-3rdpartycfg
|
||||
labels:
|
||||
k8s-app: nginx-ingress-lb
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: nginx-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: nginx-ingress-lb
|
||||
name: nginx-ingress-lb
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/nginx-third-party:0.3
|
||||
name: nginx-ingress-lb
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10249
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
# use downward API
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 80
|
||||
- containerPort: 443
|
||||
hostPort: 4444
|
||||
# we expose 8080 to access nginx stats in url /nginx-status
|
||||
# this is optional
|
||||
- containerPort: 8080
|
||||
hostPort: 8081
|
||||
# service echoheaders as TCP service default/echoheaders:9000
|
||||
# 9000 indicates the port used to expose the service
|
||||
- containerPort: 9000
|
||||
hostPort: 9000
|
||||
args:
|
||||
- /nginx-third-party-lb
|
||||
- --default-backend-service=default/default-http-backend
|
||||
44
controllers/nginx-third-party/lua/dynamic-ssl.lua
vendored
Normal file
44
controllers/nginx-third-party/lua/dynamic-ssl.lua
vendored
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
local ssl = require "ngx.ssl"
|
||||
local ssl_base_directory = "/etc/nginx/nginx-ssl"
|
||||
|
||||
local server_name = ssl.server_name()
|
||||
local addr, addrtyp, err = ssl.raw_server_addr()
|
||||
local byte = string.byte
|
||||
local cert_path = ""
|
||||
|
||||
ssl.clear_certs()
|
||||
|
||||
-- Check for SNI request.
|
||||
if server_name == nil then
|
||||
ngx.log(ngx.INFO, "SNI Not present - performing IP lookup")
|
||||
-- Set server name as IP address.
|
||||
server_name = string.format("%d.%d.%d.%d", byte(addr, 1), byte(addr, 2), byte(addr, 3), byte(addr, 4))
|
||||
ngx.log(ngx.INFO, "IP Address: ", server_name)
|
||||
end
|
||||
|
||||
-- Set certifcate paths
|
||||
cert_path = ssl_base_directory .. "/" .. server_name .. ".cert"
|
||||
key_path = ssl_base_directory .. "/" .. server_name .. ".key"
|
||||
|
||||
-- Attempt to retrieve and set certificate for request.
|
||||
local f = assert(io.open(cert_path))
|
||||
local cert_data = f:read("*a")
|
||||
f:close()
|
||||
|
||||
local ok, err = ssl.set_der_cert(cert_data)
|
||||
if not ok then
|
||||
ngx.log(ngx.ERR, "failed to set DER cert: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
-- Attempt to retrieve and set key for request.
|
||||
local f = assert(io.open(key_path))
|
||||
local pkey_data = f:read("*a")
|
||||
f:close()
|
||||
|
||||
local ok, err = ssl.set_der_priv_key(pkey_data)
|
||||
|
||||
if not ok then
|
||||
ngx.log(ngx.ERR, "failed to set DER key: ", err)
|
||||
return
|
||||
end
|
||||
50
controllers/nginx-third-party/lua/error_page.lua
vendored
Normal file
50
controllers/nginx-third-party/lua/error_page.lua
vendored
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
http = require "resty.http"
|
||||
|
||||
function openURL(status, page)
|
||||
local httpc = http.new()
|
||||
|
||||
local res, err = httpc:request_uri(page, {
|
||||
path = "/",
|
||||
method = "GET"
|
||||
})
|
||||
|
||||
if not res then
|
||||
ngx.log(ngx.ERR, err)
|
||||
ngx.exit(500)
|
||||
end
|
||||
|
||||
ngx.status = tonumber(status)
|
||||
ngx.header["Content-Type"] = ngx.var.httpReturnType or "text/plain"
|
||||
if ngx.var.http_cookie then
|
||||
ngx.header["Cookie"] = ngx.var.http_cookie
|
||||
end
|
||||
|
||||
ngx.say(res.body)
|
||||
end
|
||||
|
||||
|
||||
function openCustomErrorURL(status, page)
|
||||
local httpc = http.new()
|
||||
|
||||
data = {}
|
||||
data["code"] = status
|
||||
data["format"] = ngx.var.httpAccept
|
||||
local params = "/error?"..ngx.encode_args(data)
|
||||
local res, err = httpc:request_uri(page, {
|
||||
path = params,
|
||||
method = "GET"
|
||||
})
|
||||
|
||||
if not res then
|
||||
ngx.log(ngx.ERR, err)
|
||||
ngx.exit(500)
|
||||
end
|
||||
|
||||
ngx.status = tonumber(status)
|
||||
ngx.header["Content-Type"] = ngx.var.httpReturnType or "text/plain"
|
||||
if ngx.var.http_cookie then
|
||||
ngx.header["Cookie"] = ngx.var.http_cookie
|
||||
end
|
||||
|
||||
ngx.say(res.body)
|
||||
end
|
||||
229
controllers/nginx-third-party/lua/ingress.lua
vendored
Normal file
229
controllers/nginx-third-party/lua/ingress.lua
vendored
Normal file
|
|
@ -0,0 +1,229 @@
|
|||
local _M = {}
|
||||
|
||||
local cjson = require "cjson"
|
||||
local trie = require "trie"
|
||||
local http = require "resty.http"
|
||||
local cache = require "resty.dns.cache"
|
||||
local os = require "os"
|
||||
|
||||
local encode = cjson.encode
|
||||
local decode = cjson.decode
|
||||
|
||||
local table_concat = table.concat
|
||||
|
||||
local trie_get = trie.get
|
||||
local match = string.match
|
||||
local gsub = string.gsub
|
||||
local lower = string.lower
|
||||
|
||||
|
||||
-- we "cache" the config local to each worker
|
||||
local ingressConfig = nil
|
||||
|
||||
local cluster_domain = "cluster.local"
|
||||
|
||||
local def_backend = nil
|
||||
|
||||
local custom_error = nil
|
||||
|
||||
local dns_cache_options = nil
|
||||
|
||||
function get_ingressConfig(ngx)
|
||||
local d = ngx.shared["ingress"]
|
||||
local value, flags, stale = d:get_stale("ingressConfig")
|
||||
if not value then
|
||||
-- nothing we can do
|
||||
return nil, "config not set"
|
||||
end
|
||||
ingressConfig = decode(value)
|
||||
return ingressConfig, nil
|
||||
end
|
||||
|
||||
function worker_cache_config(ngx)
|
||||
local _, err = get_ingressConfig(ngx)
|
||||
if err then
|
||||
ngx.log(ngx.ERR, "unable to get ingressConfig: ", err)
|
||||
return
|
||||
end
|
||||
end
|
||||
|
||||
function _M.content(ngx)
|
||||
local host = ngx.var.host
|
||||
|
||||
-- strip off any port
|
||||
local h = match(host, "^(.+):?")
|
||||
if h then
|
||||
host = h
|
||||
end
|
||||
|
||||
host = lower(host)
|
||||
|
||||
local config, err = get_ingressConfig(ngx)
|
||||
if err then
|
||||
ngx.log(ngx.ERR, "unable to get config: ", err)
|
||||
return ngx.exit(503)
|
||||
end
|
||||
|
||||
-- this assumes we only allow exact host matches
|
||||
local paths = config[host]
|
||||
if not paths then
|
||||
ngx.log(ngx.ERR, "No server for host "..host.." returning 404")
|
||||
if custom_error then
|
||||
openCustomErrorURL(404, custom_error)
|
||||
return
|
||||
else
|
||||
openURL(404, def_backend)
|
||||
return
|
||||
end
|
||||
end
|
||||
|
||||
local backend = trie_get(paths, ngx.var.uri)
|
||||
|
||||
if not backend then
|
||||
ngx.log(ngx.ERR, "No server for host "..host.." and path "..ngx.var.uri.." returning 404")
|
||||
if custom_error then
|
||||
openCustomErrorURL(404, custom_error)
|
||||
return
|
||||
else
|
||||
openURL(404, def_backend)
|
||||
return
|
||||
end
|
||||
end
|
||||
|
||||
local address = backend.host
|
||||
ngx.var.upstream_port = backend.port or 80
|
||||
|
||||
if dns_cache_options then
|
||||
local dns = cache.new(dns_cache_options)
|
||||
local answer, err, stale = dns:query(address, { qtype = 1 })
|
||||
if err or (not answer) then
|
||||
if stale then
|
||||
answer = stale
|
||||
else
|
||||
answer = nil
|
||||
end
|
||||
end
|
||||
if answer and answer[1] then
|
||||
local ans = answer[1]
|
||||
if ans.address then
|
||||
address = ans.address
|
||||
end
|
||||
else
|
||||
ngx.log(ngx.ERR, "dns failed for ", address, " with ", err, " => ", encode(answer or ""))
|
||||
end
|
||||
end
|
||||
|
||||
ngx.var.upstream_host = address
|
||||
return
|
||||
end
|
||||
|
||||
function _M.init_worker(ngx)
|
||||
end
|
||||
|
||||
function _M.init(ngx, options)
|
||||
-- ngx.log(ngx.OK, "options: "..encode(options))
|
||||
def_backend = options.def_backend
|
||||
custom_error = options.custom_error
|
||||
|
||||
-- try to create a dns cache
|
||||
local resolvers = options.resolvers
|
||||
if resolvers then
|
||||
cache.init_cache(512)
|
||||
local servers = trie.strsplit(" ", resolvers)
|
||||
dns_cache_options =
|
||||
{
|
||||
dict = "dns_cache",
|
||||
negative_ttl = nil,
|
||||
max_stale = 900,
|
||||
normalise_ttl = false,
|
||||
resolver = {
|
||||
nameservers = {servers[1]}
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
-- dump config. This is the raw config (including trie) for now
|
||||
function _M.config(ngx)
|
||||
ngx.header.content_type = "application/json"
|
||||
local config = {
|
||||
ingress = ingressConfig
|
||||
}
|
||||
local val = encode(config)
|
||||
ngx.print(val)
|
||||
end
|
||||
|
||||
function _M.update_ingress(ngx)
|
||||
ngx.header.content_type = "application/json"
|
||||
|
||||
if ngx.req.get_method() ~= "POST" then
|
||||
ngx.print(encode({
|
||||
message = "only POST request"
|
||||
}))
|
||||
ngx.exit(400)
|
||||
return
|
||||
end
|
||||
|
||||
ngx.req.read_body()
|
||||
local data = ngx.req.get_body_data()
|
||||
local val = decode(data)
|
||||
|
||||
if not val then
|
||||
ngx.log(ngx.ERR, "failed to decode body")
|
||||
return
|
||||
end
|
||||
|
||||
config = {}
|
||||
|
||||
for _, ingress in ipairs(val) do
|
||||
local namespace = ingress.metadata.namespace
|
||||
|
||||
local spec = ingress.spec
|
||||
-- we do not allow default ingress backends right now.
|
||||
for _, rule in ipairs(spec.rules) do
|
||||
local host = rule.host
|
||||
local paths = config[host]
|
||||
if not paths then
|
||||
paths = trie.new()
|
||||
config[host] = paths
|
||||
end
|
||||
rule.http = rule.http or { paths = {}}
|
||||
for _, path in ipairs(rule.http.paths) do
|
||||
local hostname = table_concat(
|
||||
{
|
||||
path.backend.serviceName,
|
||||
namespace,
|
||||
"svc",
|
||||
cluster_domain
|
||||
}, ".")
|
||||
local backend = {
|
||||
host = hostname,
|
||||
port = path.backend.servicePort
|
||||
}
|
||||
|
||||
paths:add(path.path, backend)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
local d = ngx.shared["ingress"]
|
||||
local ok, err, _ = d:set("ingressConfig", encode(config))
|
||||
if not ok then
|
||||
ngx.log(ngx.ERR, "Error: "..err)
|
||||
local res = encode({
|
||||
message = "Error updating Ingress rules: "..err
|
||||
})
|
||||
ngx.print(res)
|
||||
return ngx.exit(500)
|
||||
end
|
||||
|
||||
ingressConfig = config
|
||||
|
||||
local res = encode({
|
||||
message = "Ingress rules updated"
|
||||
})
|
||||
ngx.print(res)
|
||||
end
|
||||
|
||||
return _M
|
||||
78
controllers/nginx-third-party/lua/trie.lua
vendored
Normal file
78
controllers/nginx-third-party/lua/trie.lua
vendored
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
-- Simple trie for URLs
|
||||
|
||||
local _M = {}
|
||||
|
||||
local mt = {
|
||||
__index = _M
|
||||
}
|
||||
|
||||
-- http://lua-users.org/wiki/SplitJoin
|
||||
local strfind, tinsert, strsub = string.find, table.insert, string.sub
|
||||
function _M.strsplit(delimiter, text)
|
||||
local list = {}
|
||||
local pos = 1
|
||||
while 1 do
|
||||
local first, last = strfind(text, delimiter, pos)
|
||||
if first then -- found?
|
||||
tinsert(list, strsub(text, pos, first-1))
|
||||
pos = last+1
|
||||
else
|
||||
tinsert(list, strsub(text, pos))
|
||||
break
|
||||
end
|
||||
end
|
||||
return list
|
||||
end
|
||||
|
||||
local strsplit = _M.strsplit
|
||||
|
||||
function _M.new()
|
||||
local t = { }
|
||||
return setmetatable(t, mt)
|
||||
end
|
||||
|
||||
function _M.add(t, key, val)
|
||||
local parts = {}
|
||||
-- hack for just /
|
||||
if key == "/" then
|
||||
parts = { "" }
|
||||
else
|
||||
parts = strsplit("/", key)
|
||||
end
|
||||
|
||||
local l = t
|
||||
for i = 1, #parts do
|
||||
local p = parts[i]
|
||||
if not l[p] then
|
||||
l[p] = {}
|
||||
end
|
||||
l = l[p]
|
||||
end
|
||||
l.__value = val
|
||||
end
|
||||
|
||||
function _M.get(t, key)
|
||||
local parts = strsplit("/", key)
|
||||
|
||||
local l = t
|
||||
|
||||
-- this may be nil
|
||||
local val = t.__value
|
||||
for i = 1, #parts do
|
||||
local p = parts[i]
|
||||
if l[p] then
|
||||
l = l[p]
|
||||
local v = l.__value
|
||||
if v then
|
||||
val = v
|
||||
end
|
||||
else
|
||||
break
|
||||
end
|
||||
end
|
||||
|
||||
-- may be nil
|
||||
return val
|
||||
end
|
||||
|
||||
return _M
|
||||
2
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/.gitignore
vendored
Normal file
2
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
t/servroot/
|
||||
t/error.log
|
||||
20
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/LICENSE.txt
vendored
Normal file
20
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/LICENSE.txt
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Hamish Forbes
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
23
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/Makefile
vendored
Normal file
23
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/Makefile
vendored
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
OPENRESTY_PREFIX=/usr/local/openresty
|
||||
|
||||
PREFIX ?= /usr/local
|
||||
LUA_INCLUDE_DIR ?= $(PREFIX)/include
|
||||
LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION)
|
||||
INSTALL ?= install
|
||||
TEST_FILE ?= t
|
||||
|
||||
.PHONY: all test leak
|
||||
|
||||
all: ;
|
||||
|
||||
|
||||
install: all
|
||||
$(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty/dns
|
||||
|
||||
leak: all
|
||||
TEST_NGINX_CHECK_LEAK=1 TEST_NGINX_NO_SHUFFLE=1 PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I../test-nginx/lib -r $(TEST_FILE)
|
||||
|
||||
test: all
|
||||
TEST_NGINX_NO_SHUFFLE=1 PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I../test-nginx/lib -r $(TEST_FILE)
|
||||
util/lua-releng.pl
|
||||
|
||||
111
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/README.md
vendored
Normal file
111
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/README.md
vendored
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
#lua-resty-dns-cache
|
||||
|
||||
A wrapper for [lua-resty-dns](https://github.com/openresty/lua-resty-dns) to cache responses based on record TTLs.
|
||||
|
||||
Uses [lua-resty-lrucache](https://github.com/openresty/lua-resty-lrucache) and [ngx.shared.DICT](https://github.com/openresty/lua-nginx-module#ngxshareddict) to provide a 2 level cache.
|
||||
|
||||
Can repopulate cache in the background while returning stale answers.
|
||||
|
||||
#Overview
|
||||
|
||||
```lua
|
||||
lua_shared_dict dns_cache 1m;
|
||||
|
||||
init_by_lua '
|
||||
require("resty.dns.cache").init_cache(200)
|
||||
';
|
||||
|
||||
server {
|
||||
|
||||
listen 80;
|
||||
server_name dns_cache;
|
||||
|
||||
location / {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
negative_ttl = 30,
|
||||
max_stale = 300,
|
||||
resolver = {
|
||||
nameservers = {"123.123.123.123"}
|
||||
}
|
||||
})
|
||||
|
||||
local host = ngx.req.get_uri_args()["host"] or "www.google.com"
|
||||
|
||||
local answer, err, stale = dns:query(host)
|
||||
if err then
|
||||
if stale then
|
||||
ngx.header["Warning"] = "110: Response is stale"
|
||||
answer = stale
|
||||
ngx.log(ngx.ERR, err)
|
||||
else
|
||||
ngx.status = 500
|
||||
ngx.say(err)
|
||||
return ngx.exit(ngx.status)
|
||||
end
|
||||
end
|
||||
|
||||
local cjson = require "cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
';
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#Methods
|
||||
### init_cache
|
||||
`syntax: ok, err = dns_cache.init_cache(max_items?)`
|
||||
|
||||
Creates a global lrucache object for caching responses.
|
||||
|
||||
Accepts an optional `max_items` argument, defaults to 200 entries.
|
||||
|
||||
Calling this repeatedly will reset the LRU cache
|
||||
|
||||
### initted
|
||||
`syntax: ok = dns_cache.initted()`
|
||||
|
||||
Returns `true` if LRU Cache has been initialised
|
||||
|
||||
### new
|
||||
`syntax: ok, err = dns_cache.new(opts)`
|
||||
|
||||
Returns a new DNS cache instance. Returns `nil` and a string on error
|
||||
|
||||
Accepts a table of options, if no shared dictionary is provided only lrucache is used.
|
||||
|
||||
* `dict` - Name of the [ngx.shared.DICT](https://github.com/openresty/lua-nginx-module#ngxshareddict) to use for cache.
|
||||
* `resolver` - Table of options passed to [lua-resty-dns](https://github.com/openresty/lua-resty-dns#new). Defaults to using Google DNS.
|
||||
* `normalise_ttl` - Boolean. Reduces TTL in cached answers to account for cached time. Defaults to `true`.
|
||||
* `negative_ttl` - Time in seconds to cache negative / error responses. `nil` or `false` disables caching negative responses. Defaults to `false`
|
||||
* `minimise_ttl` - Boolean. Set cache TTL based on the shortest DNS TTL in all responses rather than the first response. Defaults to `false`
|
||||
* `max_stale` - Number of seconds past expiry to return stale content rather than querying. Stale hits will trigger a non-blocking background query to repopulate cache.
|
||||
|
||||
|
||||
### query
|
||||
`syntax: answers, err, stale = c:query(name, opts?)`
|
||||
|
||||
Passes through to lua-resty-dns' [query](https://github.com/openresty/lua-resty-dns#query) method.
|
||||
|
||||
Returns an extra `stale` variable containing stale data if a resolver cannot be contacted.
|
||||
|
||||
### tcp_query
|
||||
`syntax: answers, err, stale = c:tcp_query(name, opts?)`
|
||||
|
||||
Passes through to lua-resty-dns' [tcp_query](https://github.com/openresty/lua-resty-dns#tcp_query) method.
|
||||
|
||||
Returns an extra `stale` variable containing stale data if a resolver cannot be contacted.
|
||||
|
||||
### set_timeout
|
||||
`syntax: c:set_timeout(time)`
|
||||
|
||||
Passes through to lua-resty-dns' [set_timeout](https://github.com/openresty/lua-resty-dns#set_timeout) method.
|
||||
|
||||
## Constants
|
||||
lua-resty-dns' [constants](https://github.com/openresty/lua-resty-dns#constants) are accessible on the `resty.dns.cache` object too.
|
||||
|
||||
## TODO
|
||||
* Cap'n'proto serialisation
|
||||
|
||||
449
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/lib/resty/dns/cache.lua
vendored
Normal file
449
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/lib/resty/dns/cache.lua
vendored
Normal file
|
|
@ -0,0 +1,449 @@
|
|||
local ngx_log = ngx.log
|
||||
local ngx_DEBUG = ngx.DEBUG
|
||||
local ngx_ERR = ngx.ERR
|
||||
local ngx_shared = ngx.shared
|
||||
local ngx_time = ngx.time
|
||||
local resty_resolver = require "resty.dns.resolver"
|
||||
local resty_lrucache = require "resty.lrucache"
|
||||
local cjson = require "cjson"
|
||||
local json_encode = cjson.encode
|
||||
local json_decode = cjson.decode
|
||||
local tbl_concat = table.concat
|
||||
local tonumber = tonumber
|
||||
local _ngx_timer_at = ngx.timer.at
|
||||
local ngx_worker_pid = ngx.worker.pid
|
||||
|
||||
local function ngx_timer_at(delay, func, ...)
|
||||
local ok, err = _ngx_timer_at(delay, func, ...)
|
||||
if not ok then
|
||||
ngx_log(ngx_ERR, "Timer Error: ", err)
|
||||
end
|
||||
return ok, err
|
||||
end
|
||||
|
||||
|
||||
local debug_log = function(msg, ...)
|
||||
if type(msg) == 'table' then
|
||||
local ok, json = pcall(json_encode, msg)
|
||||
if ok then
|
||||
msg = json
|
||||
else
|
||||
ngx_log(ngx_ERR, json)
|
||||
end
|
||||
end
|
||||
ngx_log(ngx_DEBUG, msg, ...)
|
||||
end
|
||||
|
||||
local _M = {
|
||||
_VERSION = '0.01',
|
||||
TYPE_A = resty_resolver.TYPE_A,
|
||||
TYPE_NS = resty_resolver.TYPE_NS,
|
||||
TYPE_CNAME = resty_resolver.TYPE_CNAME,
|
||||
TYPE_PTR = resty_resolver.TYPE_PTR,
|
||||
TYPE_MX = resty_resolver.TYPE_MX,
|
||||
TYPE_TXT = resty_resolver.TYPE_TXT,
|
||||
TYPE_AAAA = resty_resolver.TYPE_AAAA,
|
||||
TYPE_SRV = resty_resolver.TYPE_SRV,
|
||||
TYPE_SPF = resty_resolver.TYPE_SPF,
|
||||
CLASS_IN = resty_resolver.CLASS_IN
|
||||
}
|
||||
|
||||
local DEBUG = false
|
||||
|
||||
local mt = { __index = _M }
|
||||
|
||||
local lru_cache_defaults = {200}
|
||||
local resolver_defaults = {
|
||||
nameservers = {"8.8.8.8", "8.8.4.4"}
|
||||
}
|
||||
|
||||
-- Global lrucache instance
|
||||
local lru_cache
|
||||
local max_items = 200
|
||||
|
||||
function _M.init_cache(size)
|
||||
if size then max_items = size end
|
||||
local err
|
||||
if DEBUG then debug_log("Initialising lru cache with ", max_items, " max items") end
|
||||
lru_cache, err = resty_lrucache.new(max_items)
|
||||
if not lru_cache then
|
||||
return nil, err
|
||||
end
|
||||
return true
|
||||
end
|
||||
|
||||
|
||||
function _M.initted()
|
||||
if lru_cache then return true end
|
||||
return false
|
||||
end
|
||||
|
||||
|
||||
function _M.new(opts)
|
||||
local self, err = { opts = opts}, nil
|
||||
opts = opts or {}
|
||||
|
||||
-- Set defaults
|
||||
if opts.normalise_ttl ~= nil then self.normalise_ttl = opts.normalise_ttl else self.normalise_ttl = true end
|
||||
if opts.minimise_ttl ~= nil then self.minimise_ttl = opts.minimise_ttl else self.minimise_ttl = false end
|
||||
if opts.negative_ttl ~= nil then
|
||||
self.negative_ttl = tonumber(opts.negative_ttl)
|
||||
else
|
||||
self.negative_ttl = false
|
||||
end
|
||||
if opts.max_stale ~= nil then
|
||||
self.max_stale = tonumber(opts.max_stale)
|
||||
else
|
||||
self.max_stale = 0
|
||||
end
|
||||
|
||||
opts.resolver = opts.resolver or resolver_defaults
|
||||
self.resolver, err = resty_resolver:new(opts.resolver)
|
||||
if not self.resolver then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
if opts.dict then
|
||||
self.dict = ngx_shared[opts.dict]
|
||||
end
|
||||
return setmetatable(self, mt)
|
||||
end
|
||||
|
||||
|
||||
function _M.flush(self, hard)
|
||||
local ok, err = self.init_cache()
|
||||
if not ok then
|
||||
ngx_log(ngx_ERR, err)
|
||||
end
|
||||
if self.dict then
|
||||
if DEBUG then debug_log("Flushing dictionary") end
|
||||
self.dict:flush_all()
|
||||
if hard then
|
||||
local flushed = self.dict:flush_expired()
|
||||
if DEBUG then debug_log("Flushed ", flushed, " keys from memory") end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
function _M._debug(flag)
|
||||
DEBUG = flag
|
||||
end
|
||||
|
||||
|
||||
function _M.set_timeout(self, ...)
|
||||
return self.resolver:set_timeout(...)
|
||||
end
|
||||
|
||||
|
||||
local function minimise_ttl(answer)
|
||||
if DEBUG then debug_log('Minimising TTL') end
|
||||
local ttl
|
||||
for _, ans in ipairs(answer) do
|
||||
if DEBUG then debug_log('TTL ', ans.name, ': ', ans.ttl) end
|
||||
if ttl == nil or ans.ttl < ttl then
|
||||
ttl = ans.ttl
|
||||
end
|
||||
end
|
||||
return ttl
|
||||
end
|
||||
|
||||
|
||||
local function normalise_ttl(self, data)
|
||||
-- Calculate time since query and subtract from answer's TTL
|
||||
if self.normalise_ttl then
|
||||
local now = ngx_time()
|
||||
local diff = now - data.now
|
||||
if DEBUG then debug_log("Normalising TTL, diff: ", diff) end
|
||||
for _, answer in ipairs(data.answer) do
|
||||
if DEBUG then debug_log("Old: ", answer.ttl, ", new: ", answer.ttl - diff) end
|
||||
answer.ttl = answer.ttl - diff
|
||||
end
|
||||
data.now = now
|
||||
end
|
||||
return data
|
||||
end
|
||||
|
||||
|
||||
local function cache_get(self, key)
|
||||
-- Try local LRU cache first
|
||||
local data, lru_stale
|
||||
if lru_cache then
|
||||
data, lru_stale = lru_cache:get(key)
|
||||
-- Set stale if should have expired
|
||||
if data and data.expires <= ngx_time() then
|
||||
lru_stale = data
|
||||
data = nil
|
||||
end
|
||||
if data then
|
||||
if DEBUG then
|
||||
debug_log('lru_cache HIT: ', key)
|
||||
debug_log(data)
|
||||
end
|
||||
return normalise_ttl(self, data)
|
||||
elseif DEBUG then
|
||||
debug_log('lru_cache MISS: ', key)
|
||||
end
|
||||
end
|
||||
|
||||
-- lru_cache miss, try shared dict
|
||||
local dict = self.dict
|
||||
if dict then
|
||||
local data, flags, stale = dict:get_stale(key)
|
||||
-- Set stale if should have expired
|
||||
if data then
|
||||
data = json_decode(data)
|
||||
if data.expires <= ngx_time() then
|
||||
stale = true
|
||||
end
|
||||
end
|
||||
|
||||
-- Dict data is stale, prefer stale LRU data
|
||||
if stale and lru_stale then
|
||||
if DEBUG then
|
||||
debug_log('lru_cache STALE: ', key)
|
||||
debug_log(lru_stale)
|
||||
end
|
||||
return nil, normalise_ttl(self, lru_stale)
|
||||
end
|
||||
|
||||
-- Definitely no lru data, going to have to try shared dict
|
||||
if not data then
|
||||
-- Full MISS on dict, return nil
|
||||
if DEBUG then debug_log('shared_dict MISS: ', key) end
|
||||
return nil
|
||||
end
|
||||
|
||||
-- Return nil and dict cache if its stale
|
||||
if stale then
|
||||
if DEBUG then debug_log('shared_dict STALE: ', key) end
|
||||
return nil, normalise_ttl(self, data)
|
||||
end
|
||||
|
||||
-- Fresh HIT from dict, repopulate the lru_cache
|
||||
if DEBUG then debug_log('shared_dict HIT: ', key) end
|
||||
if lru_cache then
|
||||
local ttl = data.expires - ngx_time()
|
||||
if DEBUG then debug_log('lru_cache SET: ', key, ' ', ttl) end
|
||||
lru_cache:set(key, data, ttl)
|
||||
end
|
||||
return normalise_ttl(self, data)
|
||||
elseif lru_stale then
|
||||
-- Return lru stale if no dict configured
|
||||
if DEBUG then
|
||||
debug_log('lru_cache STALE: ', key)
|
||||
debug_log(lru_stale)
|
||||
end
|
||||
return nil, normalise_ttl(self, lru_stale)
|
||||
end
|
||||
|
||||
if not lru_cache or dict then
|
||||
ngx_log(ngx_ERR, "No cache defined")
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function cache_set(self, key, answer, ttl)
|
||||
-- Don't cache records with 0 TTL
|
||||
if ttl == 0 or ttl == nil then
|
||||
return
|
||||
end
|
||||
|
||||
-- Calculate absolute expiry - used to populate lru_cache from shared_dict
|
||||
local now = ngx_time()
|
||||
local data = {
|
||||
answer = answer,
|
||||
now = now,
|
||||
queried = now,
|
||||
expires = now + ttl
|
||||
}
|
||||
|
||||
-- Extend cache expiry if using stale
|
||||
local real_ttl = ttl
|
||||
if self.max_stale then
|
||||
real_ttl = real_ttl + self.max_stale
|
||||
end
|
||||
|
||||
-- Set lru cache
|
||||
if lru_cache then
|
||||
if DEBUG then debug_log('lru_cache SET: ', key, ' ', real_ttl) end
|
||||
lru_cache:set(key, data, real_ttl)
|
||||
end
|
||||
|
||||
-- Set dict cache
|
||||
local dict = self.dict
|
||||
if dict then
|
||||
if DEBUG then debug_log('shared_dict SET: ', key, ' ', real_ttl) end
|
||||
local ok, err, forcible = dict:set(key, json_encode(data), real_ttl)
|
||||
if not ok then
|
||||
ngx_log(ngx_ERR, 'shared_dict ERR: ', err)
|
||||
end
|
||||
if forcible then
|
||||
ngx_log(ngx_DEBUG, 'shared_dict full')
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function _resolve(resolver, query_func, host, opts)
|
||||
if DEBUG then debug_log('Querying: ', host) end
|
||||
local answers, err = query_func(resolver, host, opts)
|
||||
if not answers then
|
||||
return answers, err
|
||||
end
|
||||
if DEBUG then debug_log(answers) end
|
||||
|
||||
return answers
|
||||
end
|
||||
|
||||
|
||||
local function cache_key(host, qtype)
|
||||
return tbl_concat({host,'|',qtype})
|
||||
end
|
||||
|
||||
|
||||
local function get_repopulate_lock(dict, host, qtype)
|
||||
local key = cache_key(host, qtype or 1) .. '|lock'
|
||||
if DEBUG then debug_log("Locking '", key, "' for ", 30, "s: ", ngx_worker_pid()) end
|
||||
return dict:add(key, ngx_worker_pid(), 30)
|
||||
end
|
||||
|
||||
|
||||
local function release_repopulate_lock(dict, host, qtype)
|
||||
local key = cache_key(host, qtype or 1) .. '|lock'
|
||||
local pid, err = dict:get(key)
|
||||
if DEBUG then debug_log("Releasing '", key, "' for ", ngx_worker_pid(), " from ", pid) end
|
||||
if pid == ngx_worker_pid() then
|
||||
dict:delete(key)
|
||||
else
|
||||
ngx_log(ngx_DEBUG, "couldnt release lock")
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local _query
|
||||
|
||||
local function _repopulate(premature, self, host, opts, tcp)
|
||||
if premature then return end
|
||||
|
||||
if DEBUG then debug_log("Repopulating '", host, "'") end
|
||||
-- Create a new resolver instance, cannot share sockets
|
||||
local err
|
||||
self.resolver, err = resty_resolver:new(self.opts.resolver)
|
||||
if err then
|
||||
ngx_log(ngx_ERR, err)
|
||||
return nil
|
||||
end
|
||||
-- Do not use stale when repopulating
|
||||
_query(self, host, opts, tcp, true)
|
||||
end
|
||||
|
||||
|
||||
local function repopulate(self, host, opts, tcp)
|
||||
-- Lock, there's a window between the key expiring and the background query completing
|
||||
-- during which another query could trigger duplicate repopulate jobs
|
||||
local ok, err = get_repopulate_lock(self.dict, host, opts.qtype)
|
||||
if ok then
|
||||
if DEBUG then debug_log("Attempting to repopulate '", host, "'") end
|
||||
local ok, err = ngx_timer_at(0, _repopulate, self, host, opts, tcp)
|
||||
if not ok then
|
||||
-- Release lock if we couldn't start the timer
|
||||
release_repopulate_lock(self.dict, host, opts.qtype)
|
||||
end
|
||||
else
|
||||
if err == "exists" then
|
||||
if DEBUG then debug_log("Lock not acquired") end
|
||||
return
|
||||
else
|
||||
ngx.log(ngx.ERR, err)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
_query = function(self, host, opts, tcp, repopulating)
|
||||
-- Build cache key
|
||||
opts = opts or {}
|
||||
local key = cache_key(host, opts.qtype or 1)
|
||||
|
||||
-- Check caches
|
||||
local answer
|
||||
local data, stale = cache_get(self, key)
|
||||
if data then
|
||||
-- Shouldn't get a cache hit when repopulating but better safe than sorry
|
||||
if repopulating then release_repopulate_lock(self.dict, host, opts.qtype) end
|
||||
answer = data.answer
|
||||
-- Don't return negative cache hits if negative_ttl is off in this instance
|
||||
if not answer.errcode or self.negative_ttl then
|
||||
return answer
|
||||
end
|
||||
end
|
||||
|
||||
-- No fresh cache entry, return stale if within max_stale and trigger background repopulate
|
||||
if stale and not repopulating and self.max_stale > 0
|
||||
and (ngx_time() - stale.expires) < self.max_stale then
|
||||
if DEBUG then debug_log('max_stale ', self.max_stale) end
|
||||
repopulate(self, host, opts, tcp)
|
||||
if DEBUG then debug_log('Returning STALE: ', key) end
|
||||
return nil, nil, stale.answer
|
||||
end
|
||||
|
||||
-- Try to resolve
|
||||
local resolver = self.resolver
|
||||
local query_func = resolver.query
|
||||
if tcp then
|
||||
query_func = resolver.tcp_query
|
||||
end
|
||||
|
||||
local answer, err = _resolve(resolver, query_func, host, opts)
|
||||
if not answer then
|
||||
-- Couldn't resolve, return potential stale response with error msg
|
||||
if DEBUG then
|
||||
debug_log('Resolver error ', key, ': ', err)
|
||||
if stale then debug_log('Returning STALE: ', key) end
|
||||
end
|
||||
if repopulating then release_repopulate_lock(self.dict, host, opts.qtype) end
|
||||
if stale then stale = stale.answer end
|
||||
return nil, err, stale
|
||||
end
|
||||
|
||||
local ttl
|
||||
|
||||
-- Cache server errors for negative_cache seconds
|
||||
if answer.errcode then
|
||||
if self.negative_ttl then
|
||||
ttl = self.negative_ttl
|
||||
else
|
||||
if repopulating then release_repopulate_lock(self.dict, host, opts.qtype) end
|
||||
return answer
|
||||
end
|
||||
else
|
||||
-- Cache for the lowest TTL in the chain of responses...
|
||||
if self.minimise_ttl then
|
||||
ttl = minimise_ttl(answer)
|
||||
elseif answer[1] then
|
||||
-- ... or just the first one
|
||||
ttl = answer[1].ttl or nil
|
||||
end
|
||||
end
|
||||
|
||||
-- Set cache
|
||||
cache_set(self, key, answer, ttl)
|
||||
|
||||
if repopulating then release_repopulate_lock(self.dict, host, opts.qtype) end
|
||||
|
||||
return answer
|
||||
end
|
||||
|
||||
|
||||
function _M.query(self, host, opts)
|
||||
return _query(self, host, opts, false)
|
||||
end
|
||||
|
||||
|
||||
function _M.tcp_query(self, host, opts)
|
||||
return _query(self, host, opts, true)
|
||||
end
|
||||
|
||||
|
||||
return _M
|
||||
233
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/t/01-sanity.t
vendored
Normal file
233
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/t/01-sanity.t
vendored
Normal file
|
|
@ -0,0 +1,233 @@
|
|||
use Test::Nginx::Socket;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * 24;
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
};
|
||||
|
||||
no_long_string();
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1: Load module without errors.
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /sanity {
|
||||
echo "OK";
|
||||
}
|
||||
--- request
|
||||
GET /sanity
|
||||
--- no_error_log
|
||||
[error]
|
||||
--- response_body
|
||||
OK
|
||||
|
||||
|
||||
=== TEST 2: Can init cache - defaults
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /sanity {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
ngx.say(DNS_Cache.initted())
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /sanity
|
||||
--- no_error_log
|
||||
[error]
|
||||
--- response_body
|
||||
true
|
||||
|
||||
=== TEST 3: Can init cache - user config
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache(300)
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /sanity {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
ngx.say(DNS_Cache.initted())
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /sanity
|
||||
--- no_error_log
|
||||
[error]
|
||||
--- response_body
|
||||
true
|
||||
|
||||
=== TEST 4: Can init new instance - defaults
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache(300)
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /sanity {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new()
|
||||
if dns then
|
||||
ngx.say("OK")
|
||||
else
|
||||
ngx.say(err)
|
||||
end
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /sanity
|
||||
--- no_error_log
|
||||
[error]
|
||||
--- response_body
|
||||
OK
|
||||
|
||||
=== TEST 5: Can init new instance - user config
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache(300)
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /sanity {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
negative_ttl = 10,
|
||||
resolver = { nameservers = {"10.10.10.10"} }
|
||||
})
|
||||
if dns then
|
||||
ngx.say("OK")
|
||||
else
|
||||
ngx.say(err)
|
||||
end
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /sanity
|
||||
--- no_error_log
|
||||
[error]
|
||||
--- response_body
|
||||
OK
|
||||
|
||||
=== TEST 6: Resty DNS errors are passed through
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache(300)
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /sanity {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
resolver = { }
|
||||
})
|
||||
if dns then
|
||||
ngx.say("OK")
|
||||
else
|
||||
ngx.say(err)
|
||||
end
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /sanity
|
||||
--- no_error_log
|
||||
[error]
|
||||
--- response_body
|
||||
no nameservers specified
|
||||
|
||||
=== TEST 7: Can create instance with shared dict
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /sanity {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
ngx.say(DNS_Cache.initted())
|
||||
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache"
|
||||
})
|
||||
if dns then
|
||||
ngx.say("OK")
|
||||
else
|
||||
ngx.say(err)
|
||||
end
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /sanity
|
||||
--- no_error_log
|
||||
[error]
|
||||
--- response_body
|
||||
true
|
||||
OK
|
||||
|
||||
=== TEST 8: Can create instance with shared dict and no lru_cache
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
}
|
||||
--- config
|
||||
location /sanity {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
ngx.say(DNS_Cache.initted())
|
||||
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache"
|
||||
})
|
||||
if dns then
|
||||
ngx.say("OK")
|
||||
else
|
||||
ngx.say(err)
|
||||
end
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /sanity
|
||||
--- no_error_log
|
||||
[error]
|
||||
--- response_body
|
||||
false
|
||||
OK
|
||||
195
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/t/02-resolve.t
vendored
Normal file
195
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/t/02-resolve.t
vendored
Normal file
|
|
@ -0,0 +1,195 @@
|
|||
|
||||
use lib 't';
|
||||
use TestDNS;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * 12;
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
};
|
||||
|
||||
no_long_string();
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1: Can resolve with lru + dict
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {
|
||||
nameservers = { {"127.0.0.1", "1953"} }
|
||||
}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- no_error_log
|
||||
[error]
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456}]
|
||||
|
||||
=== TEST 2: Can resolve with lru only
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
resolver = {
|
||||
nameservers = { {"127.0.0.1", "1953"} }
|
||||
}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- no_error_log
|
||||
[error]
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456}]
|
||||
|
||||
=== TEST 3: Can resolve with dict only
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {
|
||||
nameservers = { {"127.0.0.1", "1953"} }
|
||||
}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- no_error_log
|
||||
[error]
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456}]
|
||||
|
||||
=== TEST 4: Can resolve with no cache, error thrown
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
resolver = {
|
||||
nameservers = { {"127.0.0.1", "1953"} }
|
||||
}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- error_log
|
||||
No cache defined
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456}]
|
||||
|
||||
873
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/t/03-cache.t
vendored
Normal file
873
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/t/03-cache.t
vendored
Normal file
|
|
@ -0,0 +1,873 @@
|
|||
use lib 't';
|
||||
use TestDNS;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * 47;
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
lua_socket_log_errors off;
|
||||
};
|
||||
|
||||
no_long_string();
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1: Response comes from cache on second hit
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
echo_location /_t;
|
||||
echo_location /_t2;
|
||||
}
|
||||
location /_t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
';
|
||||
}
|
||||
location /_t2 {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
dns._debug(true)
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- error_log
|
||||
lru_cache HIT
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456}]
|
||||
|
||||
=== TEST 2: Response comes from dict on miss
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
echo_location /_t;
|
||||
echo_location /_t2;
|
||||
}
|
||||
location /_t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
';
|
||||
}
|
||||
location /_t2 {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache() -- reset cache
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
dns._debug(true)
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- error_log
|
||||
lru_cache MISS
|
||||
shared_dict HIT
|
||||
lru_cache SET
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456}]
|
||||
|
||||
|
||||
=== TEST 3: Stale response from lru served if resolver down
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
echo_location /_t;
|
||||
echo_sleep 2;
|
||||
echo_location /_t2;
|
||||
}
|
||||
location /_t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
';
|
||||
}
|
||||
location /_t2 {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1954"}}, retrans = 1, timeout = 100}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
dns._debug(true)
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if stale then
|
||||
answer = stale
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- error_log
|
||||
lru_cache MISS
|
||||
lru_cache STALE
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":-1}]
|
||||
|
||||
|
||||
=== TEST 4: Stale response from dict served if resolver down
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
echo_location /_t;
|
||||
echo_sleep 2;
|
||||
echo_location /_t2;
|
||||
}
|
||||
location /_t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
';
|
||||
}
|
||||
location /_t2 {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1954"}}, retrans = 1, timeout = 100}
|
||||
})
|
||||
DNS_Cache.init_cache() -- reset cache
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
dns._debug(true)
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if stale then
|
||||
answer = stale
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- error_log
|
||||
lru_cache MISS
|
||||
shared_dict STALE
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":-1}]
|
||||
|
||||
=== TEST 5: Stale response from lru served if resolver down, no dict
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
echo_location /_t;
|
||||
echo_sleep 2;
|
||||
echo_location /_t2;
|
||||
}
|
||||
location /_t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
';
|
||||
}
|
||||
location /_t2 {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1954"}}, retrans = 1, timeout = 100}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
dns._debug(true)
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if stale then
|
||||
answer = stale
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- error_log
|
||||
lru_cache MISS
|
||||
lru_cache STALE
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":-1}]
|
||||
|
||||
=== TEST 6: Stale response from dict served if resolver down, no lru
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
echo_location /_t;
|
||||
echo_sleep 2;
|
||||
echo_location /_t2;
|
||||
}
|
||||
location /_t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
';
|
||||
}
|
||||
location /_t2 {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1954"}}, retrans = 1, timeout = 100}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
dns._debug(true)
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if stale then
|
||||
answer = stale
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- error_log
|
||||
shared_dict STALE
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":-1}]
|
||||
|
||||
=== TEST 7: TTLs are reduced
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
echo_location /_t;
|
||||
echo_sleep 2;
|
||||
echo_location /_t2;
|
||||
}
|
||||
location /_t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
';
|
||||
}
|
||||
location /_t2 {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}, retrans = 1, timeout = 100}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(answer)
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 10 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- no_error_log
|
||||
[error]
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":8}]
|
||||
|
||||
=== TEST 8: TTL reduction can be disabled
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
echo_location /_t;
|
||||
echo_sleep 2;
|
||||
echo_location /_t2;
|
||||
}
|
||||
location /_t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
';
|
||||
}
|
||||
location /_t2 {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
normalise_ttl = false,
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}, retrans = 1, timeout = 100}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(answer)
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 10 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- no_error_log
|
||||
[error]
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":10}]
|
||||
|
||||
=== TEST 9: Negative responses are not cached by default
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns._debug(true)
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
rcode => 3,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- no_error_log
|
||||
SET
|
||||
--- response_body
|
||||
{"errcode":3,"errstr":"name error"}
|
||||
|
||||
|
||||
=== TEST 10: Negative responses can be cached
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
echo_location /_t;
|
||||
echo_location /_t2;
|
||||
}
|
||||
location /_t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
negative_ttl = 10,
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
';
|
||||
}
|
||||
location /_t2 {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
negative_ttl = 10,
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
dns._debug(true)
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
rcode => 3,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- error_log
|
||||
lru_cache HIT
|
||||
--- response_body
|
||||
{"errcode":3,"errstr":"name error"}
|
||||
|
||||
=== TEST 11: Cached negative responses are not returned by default
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
echo_location /_t;
|
||||
echo_location /_t2;
|
||||
}
|
||||
location /_t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
negative_ttl = 10,
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns._debug(true)
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
';
|
||||
}
|
||||
location /_t2 {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1954"}, retrans = 1, timeout = 100}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
dns._debug(true)
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
rcode => 3,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- error_log
|
||||
lru_cache SET
|
||||
lru_cache HIT
|
||||
--- response_body
|
||||
null
|
||||
|
||||
=== TEST 12: Cache TTL can be minimised
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
minimise_ttl = true,
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
dns._debug(true)
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [
|
||||
{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 },
|
||||
{ name => "l.www.google.com", ipv6 => "::1", ttl => 10 },
|
||||
],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- error_log
|
||||
lru_cache SET: www.google.com|1 10
|
||||
shared_dict SET: www.google.com|1 10
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456},{"address":"0:0:0:0:0:0:0:1","type":28,"class":1,"name":"l.www.google.com","ttl":10}]
|
||||
|
||||
=== TEST 13: Cache TTLs not minimised by default
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}}
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
dns._debug(true)
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [
|
||||
{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 123456 },
|
||||
{ name => "l.www.google.com", ipv6 => "::1", ttl => 10 },
|
||||
],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- error_log
|
||||
lru_cache SET: www.google.com|1 123456
|
||||
shared_dict SET: www.google.com|1 123456
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":123456},{"address":"0:0:0:0:0:0:0:1","type":28,"class":1,"name":"l.www.google.com","ttl":10}]
|
||||
275
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/t/04-repopulate.t
vendored
Normal file
275
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/t/04-repopulate.t
vendored
Normal file
|
|
@ -0,0 +1,275 @@
|
|||
use lib 't';
|
||||
use TestDNS;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * 17;
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
lua_socket_log_errors off;
|
||||
};
|
||||
|
||||
no_long_string();
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1: Query is triggered when cache is expired
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}},
|
||||
max_stale = 10
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
|
||||
dns._debug(true)
|
||||
|
||||
-- Sleep beyond response TTL
|
||||
ngx.sleep(1.1)
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
if stale then
|
||||
answer = stale
|
||||
else
|
||||
ngx.say(err)
|
||||
end
|
||||
end
|
||||
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
|
||||
ngx.sleep(0.1)
|
||||
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- error_log
|
||||
Returning STALE
|
||||
Attempting to repopulate 'www.google.com'
|
||||
Repopulating 'www.google.com'
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":1}]
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":0}]
|
||||
|
||||
=== TEST 2: Query is not triggered when cache expires and max_stale is disabled
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}, retrans = 1, timeout = 50 },
|
||||
max_stale = 0
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
|
||||
dns._debug(true)
|
||||
|
||||
-- Sleep beyond response TTL
|
||||
ngx.sleep(1.1)
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
if stale then
|
||||
answer = stale
|
||||
else
|
||||
ngx.say(err)
|
||||
end
|
||||
end
|
||||
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
|
||||
ngx.sleep(0.1)
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- no_error_log
|
||||
Attempting to repopulate 'www.google.com'
|
||||
Repopulating 'www.google.com'
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":0}]
|
||||
|
||||
|
||||
=== TEST 3: Repopulate ignores max_stale
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}, retrans = 1, timeout = 50 },
|
||||
max_stale = 10,
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
dns._debug(true)
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
|
||||
-- Sleep beyond response TTL
|
||||
ngx.sleep(1.1)
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
if stale then
|
||||
answer = stale
|
||||
else
|
||||
ngx.say(err)
|
||||
end
|
||||
end
|
||||
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
|
||||
ngx.sleep(0.1)
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- error_log
|
||||
Repopulating 'www.google.com'
|
||||
Querying: www.google.com
|
||||
Resolver error
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":0}]
|
||||
|
||||
=== TEST 4: Multiple queries only trigger 1 repopulate timer
|
||||
--- http_config eval
|
||||
"$::HttpConfig"
|
||||
. q{
|
||||
lua_shared_dict dns_cache 1m;
|
||||
init_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
DNS_Cache.init_cache()
|
||||
';
|
||||
}
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local DNS_Cache = require("resty.dns.cache")
|
||||
local dns, err = DNS_Cache.new({
|
||||
dict = "dns_cache",
|
||||
resolver = {nameservers = {{"127.0.0.1", "1953"}}, retrans = 1, timeout = 50 },
|
||||
repopulate = true,
|
||||
})
|
||||
if not dns then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns.resolver._id = 125
|
||||
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
dns._debug(true)
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
local answer, err, stale = dns:query("www.google.com", { qtype = dns.TYPE_A })
|
||||
if not answer then
|
||||
ngx.say(err)
|
||||
end
|
||||
|
||||
local cjson = require"cjson"
|
||||
ngx.say(cjson.encode(answer))
|
||||
';
|
||||
}
|
||||
--- udp_listen: 1953
|
||||
--- udp_reply dns
|
||||
{
|
||||
id => 125,
|
||||
opcode => 0,
|
||||
qname => 'www.google.com',
|
||||
answer => [{ name => "www.google.com", ipv4 => "127.0.0.1", ttl => 1 }],
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- no_error_log
|
||||
Attempting to repopulate www.google.com
|
||||
--- response_body
|
||||
[{"address":"127.0.0.1","type":1,"class":1,"name":"www.google.com","ttl":1}]
|
||||
271
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/t/TestDNS.pm
vendored
Normal file
271
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/t/TestDNS.pm
vendored
Normal file
|
|
@ -0,0 +1,271 @@
|
|||
package TestDNS;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use 5.010001;
|
||||
use Test::Nginx::Socket::Lua -Base;
|
||||
#use JSON::XS;
|
||||
|
||||
use constant {
|
||||
TYPE_A => 1,
|
||||
TYPE_TXT => 16,
|
||||
TYPE_CNAME => 5,
|
||||
TYPE_AAAA => 28,
|
||||
CLASS_INTERNET => 1,
|
||||
};
|
||||
|
||||
sub encode_name ($);
|
||||
sub encode_ipv4 ($);
|
||||
sub encode_ipv6 ($);
|
||||
sub gen_dns_reply ($$);
|
||||
|
||||
sub Test::Base::Filter::dns {
|
||||
my ($self, $code) = @_;
|
||||
|
||||
my $args = $self->current_arguments;
|
||||
#warn "args: $args";
|
||||
if (defined $args && $args ne 'tcp' && $args ne 'udp') {
|
||||
die "Invalid argument to the \"dns\" filter: $args\n";
|
||||
}
|
||||
|
||||
my $mode = $args // 'udp';
|
||||
|
||||
my $block = $self->current_block;
|
||||
|
||||
my $pointer_spec = $block->dns_pointers;
|
||||
my @pointers;
|
||||
if (defined $pointer_spec) {
|
||||
my @loops = split /\s*,\s*/, $pointer_spec;
|
||||
for my $loop (@loops) {
|
||||
my @nodes = split /\s*=>\s*/, $loop;
|
||||
my $prev;
|
||||
for my $n (@nodes) {
|
||||
if ($n !~ /^\d+$/ || $n == 0) {
|
||||
die "bad name ID in the --- dns_pointers: $n\n";
|
||||
}
|
||||
|
||||
if (!defined $prev) {
|
||||
$prev = $n;
|
||||
next;
|
||||
}
|
||||
|
||||
$pointers[$prev] = $n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
my $input = eval $code;
|
||||
if ($@) {
|
||||
die "failed to evaluate code $code: $@\n";
|
||||
}
|
||||
|
||||
if (!ref $input) {
|
||||
return $input;
|
||||
}
|
||||
|
||||
if (ref $input eq 'ARRAY') {
|
||||
my @replies;
|
||||
for my $t (@$input) {
|
||||
push @replies, gen_dns_reply($t, $mode);
|
||||
}
|
||||
|
||||
return \@replies;
|
||||
}
|
||||
|
||||
if (ref $input eq 'HASH') {
|
||||
return gen_dns_reply($input, $mode);
|
||||
}
|
||||
|
||||
return $input;
|
||||
}
|
||||
|
||||
sub gen_dns_reply ($$) {
|
||||
my ($t, $mode) = @_;
|
||||
|
||||
my @raw_names;
|
||||
push @raw_names, \($t->{qname});
|
||||
|
||||
my $answers = $t->{answer} // [];
|
||||
if (!ref $answers) {
|
||||
$answers = [$answers];
|
||||
}
|
||||
|
||||
for my $ans (@$answers) {
|
||||
push @raw_names, \($ans->{name});
|
||||
if (defined $ans->{cname}) {
|
||||
push @raw_names, \($ans->{cname});
|
||||
}
|
||||
}
|
||||
|
||||
for my $rname (@raw_names) {
|
||||
$$rname = encode_name($$rname // "");
|
||||
}
|
||||
|
||||
my $qname = $t->{qname};
|
||||
|
||||
my $s = '';
|
||||
|
||||
my $id = $t->{id} // 0;
|
||||
|
||||
$s .= pack("n", $id);
|
||||
#warn "id: ", length($s), " ", encode_json([$s]);
|
||||
|
||||
my $qr = $t->{qr} // 1;
|
||||
|
||||
my $opcode = $t->{opcode} // 0;
|
||||
|
||||
my $aa = $t->{aa} // 0;
|
||||
|
||||
my $tc = $t->{tc} // 0;
|
||||
my $rd = $t->{rd} // 1;
|
||||
my $ra = $t->{ra} // 1;
|
||||
my $rcode = $t->{rcode} // 0;
|
||||
|
||||
my $flags = ($qr << 15) + ($opcode << 11) + ($aa << 10) + ($tc << 9) + ($rd << 8) + ($ra << 7) + $rcode;
|
||||
#warn sprintf("flags: %b", $flags);
|
||||
|
||||
$flags = pack("n", $flags);
|
||||
$s .= $flags;
|
||||
|
||||
#warn "flags: ", length($flags), " ", encode_json([$flags]);
|
||||
|
||||
my $qdcount = $t->{qdcount} // 1;
|
||||
my $ancount = $t->{ancount} // scalar @$answers;
|
||||
my $nscount = 0;
|
||||
my $arcount = 0;
|
||||
|
||||
$s .= pack("nnnn", $qdcount, $ancount, $nscount, $arcount);
|
||||
|
||||
#warn "qname: ", length($qname), " ", encode_json([$qname]);
|
||||
|
||||
$s .= $qname;
|
||||
|
||||
my $qs_type = $t->{qtype} // TYPE_A;
|
||||
my $qs_class = $t->{qclass} // CLASS_INTERNET;
|
||||
|
||||
$s .= pack("nn", $qs_type, $qs_class);
|
||||
|
||||
for my $ans (@$answers) {
|
||||
my $name = $ans->{name};
|
||||
my $type = $ans->{type};
|
||||
my $class = $ans->{class};
|
||||
my $ttl = $ans->{ttl};
|
||||
my $rdlength = $ans->{rdlength};
|
||||
my $rddata = $ans->{rddata};
|
||||
|
||||
my $ipv4 = $ans->{ipv4};
|
||||
if (defined $ipv4) {
|
||||
my ($data, $len) = encode_ipv4($ipv4);
|
||||
$rddata //= $data;
|
||||
$rdlength //= $len;
|
||||
$type //= TYPE_A;
|
||||
$class //= CLASS_INTERNET;
|
||||
}
|
||||
|
||||
my $ipv6 = $ans->{ipv6};
|
||||
if (defined $ipv6) {
|
||||
my ($data, $len) = encode_ipv6($ipv6);
|
||||
$rddata //= $data;
|
||||
$rdlength //= $len;
|
||||
$type //= TYPE_AAAA;
|
||||
$class //= CLASS_INTERNET;
|
||||
}
|
||||
|
||||
my $cname = $ans->{cname};
|
||||
if (defined $cname) {
|
||||
$rddata //= $cname;
|
||||
$rdlength //= length $rddata;
|
||||
$type //= TYPE_CNAME;
|
||||
$class //= CLASS_INTERNET;
|
||||
}
|
||||
|
||||
my $txt = $ans->{txt};
|
||||
if (defined $txt) {
|
||||
$rddata //= $txt;
|
||||
$rdlength //= length $rddata;
|
||||
$type //= TYPE_TXT;
|
||||
$class //= CLASS_INTERNET;
|
||||
}
|
||||
|
||||
$type //= 0;
|
||||
$class //= 0;
|
||||
$ttl //= 0;
|
||||
|
||||
#warn "rdlength: $rdlength, rddata: ", encode_json([$rddata]), "\n";
|
||||
|
||||
$s .= $name . pack("nnNn", $type, $class, $ttl, $rdlength) . $rddata;
|
||||
}
|
||||
|
||||
if ($mode eq 'tcp') {
|
||||
return pack("n", length($s)) . $s;
|
||||
}
|
||||
|
||||
return $s;
|
||||
}
|
||||
|
||||
sub encode_ipv4 ($) {
|
||||
my $txt = shift;
|
||||
my @bytes = split /\./, $txt;
|
||||
return pack("CCCC", @bytes), 4;
|
||||
}
|
||||
|
||||
sub encode_ipv6 ($) {
|
||||
my $txt = shift;
|
||||
my @groups = split /:/, $txt;
|
||||
my $nils = 0;
|
||||
my $nonnils = 0;
|
||||
for my $g (@groups) {
|
||||
if ($g eq '') {
|
||||
$nils++;
|
||||
} else {
|
||||
$nonnils++;
|
||||
$g = hex($g);
|
||||
}
|
||||
}
|
||||
|
||||
my $total = $nils + $nonnils;
|
||||
if ($total > 8 ) {
|
||||
die "Invalid IPv6 address: too many groups: $total: $txt";
|
||||
}
|
||||
|
||||
if ($nils) {
|
||||
my $found = 0;
|
||||
my @new_groups;
|
||||
for my $g (@groups) {
|
||||
if ($g eq '') {
|
||||
if ($found) {
|
||||
next;
|
||||
}
|
||||
|
||||
for (1 .. 8 - $nonnils) {
|
||||
push @new_groups, 0;
|
||||
}
|
||||
|
||||
$found = 1;
|
||||
|
||||
} else {
|
||||
push @new_groups, $g;
|
||||
}
|
||||
}
|
||||
|
||||
@groups = @new_groups;
|
||||
}
|
||||
|
||||
if (@groups != 8) {
|
||||
die "Invalid IPv6 address: $txt: @groups\n";
|
||||
}
|
||||
|
||||
#warn "IPv6 groups: @groups";
|
||||
|
||||
return pack("nnnnnnnn", @groups), 16;
|
||||
}
|
||||
|
||||
sub encode_name ($) {
|
||||
my $name = shift;
|
||||
$name =~ s/([^.]+)\.?/chr(length($1)) . $1/ge;
|
||||
$name .= "\0";
|
||||
return $name;
|
||||
}
|
||||
|
||||
1
|
||||
32
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/util/lua-releng.pl
vendored
Executable file
32
controllers/nginx-third-party/lua/vendor/lua-resty-dns-cache/util/lua-releng.pl
vendored
Executable file
|
|
@ -0,0 +1,32 @@
|
|||
#!/usr/bin/env perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub file_contains ($$);
|
||||
|
||||
my $version;
|
||||
for my $file (map glob, qw{ *.lua lib/*.lua lib/*/*.lua lib/*/*/*.lua lib/*/*/*/*.lua lib/*/*/*/*/*.lua }) {
|
||||
|
||||
|
||||
print "Checking use of Lua global variables in file $file ...\n";
|
||||
system("luac -p -l $file | grep ETGLOBAL | grep -vE 'require|type|tostring|error|ngx|ndk|jit|setmetatable|getmetatable|string|table|io|os|print|tonumber|math|pcall|xpcall|unpack|pairs|ipairs|assert|module|package|coroutine|[gs]etfenv|next|rawget|rawset|rawlen'");
|
||||
file_contains($file, "attempt to write to undeclared variable");
|
||||
#system("grep -H -n -E --color '.{81}' $file");
|
||||
}
|
||||
|
||||
sub file_contains ($$) {
|
||||
my ($file, $regex) = @_;
|
||||
open my $in, $file
|
||||
or die "Cannot open $file fo reading: $!\n";
|
||||
my $content = do { local $/; <$in> };
|
||||
close $in;
|
||||
#print "$content";
|
||||
return scalar ($content =~ /$regex/);
|
||||
}
|
||||
|
||||
if (-d 't') {
|
||||
for my $file (map glob, qw{ t/*.t t/*/*.t t/*/*/*.t }) {
|
||||
system(qq{grep -H -n --color -E '\\--- ?(ONLY|LAST)' $file});
|
||||
}
|
||||
}
|
||||
1
controllers/nginx-third-party/lua/vendor/lua-resty-dns/.gitattributes
vendored
Normal file
1
controllers/nginx-third-party/lua/vendor/lua-resty-dns/.gitattributes
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
*.t linguist-language=Text
|
||||
10
controllers/nginx-third-party/lua/vendor/lua-resty-dns/.gitignore
vendored
Normal file
10
controllers/nginx-third-party/lua/vendor/lua-resty-dns/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
go
|
||||
t/servroot/
|
||||
reindex
|
||||
nginx
|
||||
ctags
|
||||
tags
|
||||
a.lua
|
||||
18
controllers/nginx-third-party/lua/vendor/lua-resty-dns/Makefile
vendored
Normal file
18
controllers/nginx-third-party/lua/vendor/lua-resty-dns/Makefile
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
OPENRESTY_PREFIX=/usr/local/openresty
|
||||
|
||||
PREFIX ?= /usr/local
|
||||
LUA_INCLUDE_DIR ?= $(PREFIX)/include
|
||||
LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION)
|
||||
INSTALL ?= install
|
||||
|
||||
.PHONY: all test install
|
||||
|
||||
all: ;
|
||||
|
||||
install: all
|
||||
$(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty/dns
|
||||
$(INSTALL) lib/resty/dns/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/dns/
|
||||
|
||||
test: all
|
||||
PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I../test-nginx/lib -r t
|
||||
|
||||
404
controllers/nginx-third-party/lua/vendor/lua-resty-dns/README.markdown
vendored
Normal file
404
controllers/nginx-third-party/lua/vendor/lua-resty-dns/README.markdown
vendored
Normal file
|
|
@ -0,0 +1,404 @@
|
|||
Name
|
||||
====
|
||||
|
||||
lua-resty-dns - Lua DNS resolver for the ngx_lua based on the cosocket API
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
* [Name](#name)
|
||||
* [Status](#status)
|
||||
* [Description](#description)
|
||||
* [Synopsis](#synopsis)
|
||||
* [Methods](#methods)
|
||||
* [new](#new)
|
||||
* [query](#query)
|
||||
* [tcp_query](#tcp_query)
|
||||
* [set_timeout](#set_timeout)
|
||||
* [compress_ipv6_addr](#compress_ipv6_addr)
|
||||
* [Constants](#constants)
|
||||
* [TYPE_A](#type_a)
|
||||
* [TYPE_NS](#type_ns)
|
||||
* [TYPE_CNAME](#type_cname)
|
||||
* [TYPE_PTR](#type_ptr)
|
||||
* [TYPE_MX](#type_mx)
|
||||
* [TYPE_TXT](#type_txt)
|
||||
* [TYPE_AAAA](#type_aaaa)
|
||||
* [TYPE_SRV](#type_srv)
|
||||
* [TYPE_SPF](#type_spf)
|
||||
* [CLASS_IN](#class_in)
|
||||
* [Automatic Error Logging](#automatic-error-logging)
|
||||
* [Limitations](#limitations)
|
||||
* [TODO](#todo)
|
||||
* [Author](#author)
|
||||
* [Copyright and License](#copyright-and-license)
|
||||
* [See Also](#see-also)
|
||||
|
||||
Status
|
||||
======
|
||||
|
||||
This library is considered production ready.
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
This Lua library provies a DNS resolver for the ngx_lua nginx module:
|
||||
|
||||
http://wiki.nginx.org/HttpLuaModule
|
||||
|
||||
This Lua library takes advantage of ngx_lua's cosocket API, which ensures
|
||||
100% nonblocking behavior.
|
||||
|
||||
Note that at least [ngx_lua 0.5.12](https://github.com/chaoslawful/lua-nginx-module/tags) or [ngx_openresty 1.2.1.11](http://openresty.org/#Download) is required.
|
||||
|
||||
Also, the [bit library](http://bitop.luajit.org/) is also required. If you're using LuaJIT 2.0 with ngx_lua, then the `bit` library is already available by default.
|
||||
|
||||
Note that, this library is bundled and enabled by default in the [ngx_openresty bundle](http://openresty.org/).
|
||||
|
||||
Synopsis
|
||||
========
|
||||
|
||||
```lua
|
||||
lua_package_path "/path/to/lua-resty-dns/lib/?.lua;;";
|
||||
|
||||
server {
|
||||
location = /dns {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
local r, err = resolver:new{
|
||||
nameservers = {"8.8.8.8", {"8.8.4.4", 53} },
|
||||
retrans = 5, -- 5 retransmissions on receive timeout
|
||||
timeout = 2000, -- 2 sec
|
||||
}
|
||||
|
||||
if not r then
|
||||
ngx.say("failed to instantiate the resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local answers, err = r:query("www.google.com")
|
||||
if not answers then
|
||||
ngx.say("failed to query the DNS server: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
if answers.errcode then
|
||||
ngx.say("server returned error code: ", answers.errcode,
|
||||
": ", answers.errstr)
|
||||
end
|
||||
|
||||
for i, ans in ipairs(answers) do
|
||||
ngx.say(ans.name, " ", ans.address or ans.cname,
|
||||
" type:", ans.type, " class:", ans.class,
|
||||
" ttl:", ans.ttl)
|
||||
end
|
||||
';
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
Methods
|
||||
=======
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
new
|
||||
---
|
||||
`syntax: r, err = class:new(opts)`
|
||||
|
||||
Creates a dns.resolver object. Returns `nil` and an message string on error.
|
||||
|
||||
It accepts a `opts` table argument. The following options are supported:
|
||||
|
||||
* `nameservers`
|
||||
|
||||
a list of nameservers to be used. Each nameserver entry can be either a single hostname string or a table holding both the hostname string and the port number. The nameserver is picked up by a simple round-robin algorithm for each `query` method call. This option is required.
|
||||
* `retrans`
|
||||
|
||||
the total number of times of retransmitting the DNS request when receiving a DNS response times out according to the `timeout` setting. Default to `5` times. When trying to retransmit the query, the next nameserver according to the round-robin algorithm will be picked up.
|
||||
* `timeout`
|
||||
|
||||
the time in milliseconds for waiting for the respond for a single attempt of request transmition. note that this is ''not'' the maximal total waiting time before giving up, the maximal total waiting time can be calculated by the expression `timeout x retrans`. The `timeout` setting can also be changed by calling the `set_timeout` method. The default `timeout` setting is 2000 milliseconds, or 2 seconds.
|
||||
* `no_recurse`
|
||||
|
||||
a boolean flag controls whether to disable the "recursion desired" (RD) flag in the UDP request. Default to `false`.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
query
|
||||
-----
|
||||
`syntax: answers, err = r:query(name, options?)`
|
||||
|
||||
Performs a DNS standard query to the nameservers specified by the `new` method,
|
||||
and returns all the answer records in an array-like Lua table. In case of errors, it will
|
||||
return `nil` and a string describing the error instead.
|
||||
|
||||
If the server returns a non-zero error code, the fields `errcode` and `errstr` will be set accordingly in the Lua table returned.
|
||||
|
||||
Each entry in the `answers` returned table value is also a hash-like Lua table
|
||||
which usually takes some of the following fields:
|
||||
|
||||
* `name`
|
||||
|
||||
The resource record name.
|
||||
* `type`
|
||||
|
||||
The current resource record type, possible values are `1` (`TYPE_A`), `5` (`TYPE_CNAME`), `28` (`TYPE_AAAA`), and any other values allowed by RFC 1035.
|
||||
* `address`
|
||||
|
||||
The IPv4 or IPv6 address in their textual representations when the resource record type is either `1` (`TYPE_A`) or `28` (`TYPE_AAAA`), respectively. Secussesive 16-bit zero groups in IPv6 addresses will not be compressed by default, if you want that, you need to call the `compress_ipv6_addr` static method instead.
|
||||
* `cname`
|
||||
|
||||
The (decoded) record data value for `CNAME` resource records. Only present for `CNAME` records.
|
||||
* `ttl`
|
||||
|
||||
The time-to-live (TTL) value in seconds for the current resource record.
|
||||
* `class`
|
||||
|
||||
The current resource record class, possible values are `1` (`CLASS_IN`) or any other values allowed by RFC 1035.
|
||||
* `preference`
|
||||
|
||||
The preference integer number for `MX` resource records. Only present for `MX` type records.
|
||||
* `exchange`
|
||||
|
||||
The exchange domain name for `MX` resource records. Only present for `MX` type records.
|
||||
* `nsdname`
|
||||
|
||||
A domain-name which specifies a host which should be authoritative for the specified class and domain. Usually present for `NS` type records.
|
||||
* `rdata`
|
||||
|
||||
The raw resource data (RDATA) for resource records that are not recognized.
|
||||
* `txt`
|
||||
|
||||
The record value for `TXT` records. When there is only one character string in this record, then this field takes a single Lua string. Otherwise this field takes a Lua table holding all the strings.
|
||||
* `ptrdname`
|
||||
|
||||
The record value for `PTR` records.
|
||||
|
||||
This method also takes an optional `options` argument table, which takes the following fields:
|
||||
|
||||
* `qtype`
|
||||
|
||||
The type of the question. Possible values are `1` (`TYPE_A`), `5` (`TYPE_CNAME`), `28` (`TYPE_AAAA`), or any other QTYPE value specified by RFC 1035 and RFC 3596. Default to `1` (`TYPE_A`).
|
||||
|
||||
When data truncation happens, the resolver will automatically retry using the TCP transport mode
|
||||
to query the current nameserver. All TCP connections are short lived.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
tcp_query
|
||||
---------
|
||||
`syntax: answers, err = r:tcp_query(name, options?)`
|
||||
|
||||
Just like the `query` method, but enforce the TCP transport mode instead of UDP.
|
||||
|
||||
All TCP connections are short lived.
|
||||
|
||||
Here is an example:
|
||||
|
||||
```lua
|
||||
local resolver = require "resty.dns.resolver"
|
||||
|
||||
local r, err = resolver:new{
|
||||
nameservers = { "8.8.8.8" }
|
||||
}
|
||||
if not r then
|
||||
ngx.say("failed to instantiate resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ans, err = r:tcp_query("www.google.com", { qtype = r.TYPE_A })
|
||||
if not ans then
|
||||
ngx.say("failed to query: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local cjson = require "cjson"
|
||||
ngx.say("records: ", cjson.encode(ans))
|
||||
```
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
set_timeout
|
||||
-----------
|
||||
`syntax: r:set_timeout(time)`
|
||||
|
||||
Overrides the current `timeout` setting by the `time` argument in milliseconds for all the nameserver peers.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
compress_ipv6_addr
|
||||
------------------
|
||||
`syntax: compressed = resty.dns.resolver.compress_ipv6_addr(address)`
|
||||
|
||||
Compresses the successive 16-bit zero groups in the textual format of the IPv6 address.
|
||||
|
||||
For example,
|
||||
|
||||
```lua
|
||||
local resolver = require "resty.dns.resolver"
|
||||
local compress = resolver.compress_ipv6_addr
|
||||
local new_addr = compress("FF01:0:0:0:0:0:0:101")
|
||||
```
|
||||
|
||||
will yield `FF01::101` in the `new_addr` return value.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
Constants
|
||||
=========
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
TYPE_A
|
||||
------
|
||||
|
||||
The `A` resource record type, equal to the decimal number `1`.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
TYPE_NS
|
||||
-------
|
||||
|
||||
The `NS` resource record type, equal to the decimal number `2`.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
TYPE_CNAME
|
||||
----------
|
||||
|
||||
The `CNAME` resource record type, equal to the decimal number `5`.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
TYPE_PTR
|
||||
--------
|
||||
|
||||
The `PTR` resource record type, equal to the decimal number `12`.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
TYPE_MX
|
||||
-------
|
||||
|
||||
The `MX` resource record type, equal to the decimal number `15`.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
TYPE_TXT
|
||||
--------
|
||||
|
||||
The `TXT` resource record type, equal to the decimal number `16`.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
TYPE_AAAA
|
||||
---------
|
||||
`syntax: typ = r.TYPE_AAAA`
|
||||
|
||||
The `AAAA` resource record type, equal to the decimal number `28`.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
TYPE_SRV
|
||||
---------
|
||||
`syntax: typ = r.TYPE_SRV`
|
||||
|
||||
The `SRV` resource record type, equal to the decimal number `33`.
|
||||
|
||||
See RFC 2782 for details.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
TYPE_SPF
|
||||
---------
|
||||
`syntax: typ = r.TYPE_SPF`
|
||||
|
||||
The `SPF` resource record type, equal to the decimal number `99`.
|
||||
|
||||
See RFC 4408 for details.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
CLASS_IN
|
||||
--------
|
||||
`syntax: class = r.CLASS_IN`
|
||||
|
||||
The `Internet` resource record type, equal to the decimal number `1`.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
Automatic Error Logging
|
||||
=======================
|
||||
|
||||
By default the underlying [ngx_lua](http://wiki.nginx.org/HttpLuaModule) module
|
||||
does error logging when socket errors happen. If you are already doing proper error
|
||||
handling in your own Lua code, then you are recommended to disable this automatic error logging by turning off [ngx_lua](http://wiki.nginx.org/HttpLuaModule)'s [lua_socket_log_errors](http://wiki.nginx.org/HttpLuaModule#lua_socket_log_errors) directive, that is,
|
||||
|
||||
```nginx
|
||||
lua_socket_log_errors off;
|
||||
```
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
Limitations
|
||||
===========
|
||||
|
||||
* This library cannot be used in code contexts like set_by_lua*, log_by_lua*, and
|
||||
header_filter_by_lua* where the ngx_lua cosocket API is not available.
|
||||
* The `resty.dns.resolver` object instance cannot be stored in a Lua variable at the Lua module level,
|
||||
because it will then be shared by all the concurrent requests handled by the same nginx
|
||||
worker process (see
|
||||
http://wiki.nginx.org/HttpLuaModule#Data_Sharing_within_an_Nginx_Worker ) and
|
||||
result in bad race conditions when concurrent requests are trying to use the same `resty.dns.resolver` instance.
|
||||
You should always initiate `resty.dns.resolver` objects in function local
|
||||
variables or in the `ngx.ctx` table. These places all have their own data copies for
|
||||
each request.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
TODO
|
||||
====
|
||||
|
||||
* Concurrent (or parallel) query mode
|
||||
* Better support for other resource record types like `TLSA`.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
Author
|
||||
======
|
||||
|
||||
Yichun "agentzh" Zhang (章亦春) <agentzh@gmail.com>, CloudFlare Inc.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
Copyright and License
|
||||
=====================
|
||||
|
||||
This module is licensed under the BSD license.
|
||||
|
||||
Copyright (C) 2012-2014, by Yichun "agentzh" Zhang (章亦春) <agentzh@gmail.com>, CloudFlare Inc.
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
See Also
|
||||
========
|
||||
* the ngx_lua module: http://wiki.nginx.org/HttpLuaModule
|
||||
* the [lua-resty-memcached](https://github.com/agentzh/lua-resty-memcached) library.
|
||||
* the [lua-resty-redis](https://github.com/agentzh/lua-resty-redis) library.
|
||||
* the [lua-resty-mysql](https://github.com/agentzh/lua-resty-mysql) library.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
803
controllers/nginx-third-party/lua/vendor/lua-resty-dns/lib/resty/dns/resolver.lua
vendored
Normal file
803
controllers/nginx-third-party/lua/vendor/lua-resty-dns/lib/resty/dns/resolver.lua
vendored
Normal file
|
|
@ -0,0 +1,803 @@
|
|||
-- Copyright (C) Yichun Zhang (agentzh)
|
||||
|
||||
|
||||
-- local socket = require "socket"
|
||||
local bit = require "bit"
|
||||
local udp = ngx.socket.udp
|
||||
local rand = math.random
|
||||
local char = string.char
|
||||
local byte = string.byte
|
||||
local find = string.find
|
||||
local gsub = string.gsub
|
||||
local sub = string.sub
|
||||
local format = string.format
|
||||
local band = bit.band
|
||||
local rshift = bit.rshift
|
||||
local lshift = bit.lshift
|
||||
local insert = table.insert
|
||||
local concat = table.concat
|
||||
local re_sub = ngx.re.sub
|
||||
local tcp = ngx.socket.tcp
|
||||
local log = ngx.log
|
||||
local DEBUG = ngx.DEBUG
|
||||
local randomseed = math.randomseed
|
||||
local ngx_time = ngx.time
|
||||
local setmetatable = setmetatable
|
||||
local type = type
|
||||
|
||||
|
||||
local DOT_CHAR = byte(".")
|
||||
|
||||
|
||||
local TYPE_A = 1
|
||||
local TYPE_NS = 2
|
||||
local TYPE_CNAME = 5
|
||||
local TYPE_PTR = 12
|
||||
local TYPE_MX = 15
|
||||
local TYPE_TXT = 16
|
||||
local TYPE_AAAA = 28
|
||||
local TYPE_SRV = 33
|
||||
local TYPE_SPF = 99
|
||||
|
||||
local CLASS_IN = 1
|
||||
|
||||
|
||||
local _M = {
|
||||
_VERSION = '0.14',
|
||||
TYPE_A = TYPE_A,
|
||||
TYPE_NS = TYPE_NS,
|
||||
TYPE_CNAME = TYPE_CNAME,
|
||||
TYPE_PTR = TYPE_PTR,
|
||||
TYPE_MX = TYPE_MX,
|
||||
TYPE_TXT = TYPE_TXT,
|
||||
TYPE_AAAA = TYPE_AAAA,
|
||||
TYPE_SRV = TYPE_SRV,
|
||||
TYPE_SPF = TYPE_SPF,
|
||||
CLASS_IN = CLASS_IN,
|
||||
}
|
||||
|
||||
|
||||
local resolver_errstrs = {
|
||||
"format error", -- 1
|
||||
"server failure", -- 2
|
||||
"name error", -- 3
|
||||
"not implemented", -- 4
|
||||
"refused", -- 5
|
||||
}
|
||||
|
||||
|
||||
local mt = { __index = _M }
|
||||
|
||||
|
||||
function _M.new(class, opts)
|
||||
if not opts then
|
||||
return nil, "no options table specified"
|
||||
end
|
||||
|
||||
local servers = opts.nameservers
|
||||
if not servers or #servers == 0 then
|
||||
return nil, "no nameservers specified"
|
||||
end
|
||||
|
||||
local timeout = opts.timeout or 2000 -- default 2 sec
|
||||
|
||||
local n = #servers
|
||||
|
||||
local socks = {}
|
||||
|
||||
for i = 1, n do
|
||||
local server = servers[i]
|
||||
local sock, err = udp()
|
||||
if not sock then
|
||||
return nil, "failed to create udp socket: " .. err
|
||||
end
|
||||
|
||||
local host, port
|
||||
if type(server) == 'table' then
|
||||
host = server[1]
|
||||
port = server[2] or 53
|
||||
|
||||
else
|
||||
host = server
|
||||
port = 53
|
||||
servers[i] = {host, port}
|
||||
end
|
||||
|
||||
local ok, err = sock:setpeername(host, port)
|
||||
if not ok then
|
||||
return nil, "failed to set peer name: " .. err
|
||||
end
|
||||
|
||||
sock:settimeout(timeout)
|
||||
|
||||
insert(socks, sock)
|
||||
end
|
||||
|
||||
local tcp_sock, err = tcp()
|
||||
if not tcp_sock then
|
||||
return nil, "failed to create tcp socket: " .. err
|
||||
end
|
||||
|
||||
tcp_sock:settimeout(timeout)
|
||||
|
||||
return setmetatable(
|
||||
{ cur = rand(1, n), socks = socks,
|
||||
tcp_sock = tcp_sock,
|
||||
servers = servers,
|
||||
retrans = opts.retrans or 5,
|
||||
no_recurse = opts.no_recurse,
|
||||
}, mt)
|
||||
end
|
||||
|
||||
|
||||
local function pick_sock(self, socks)
|
||||
local cur = self.cur
|
||||
|
||||
if cur == #socks then
|
||||
self.cur = 1
|
||||
else
|
||||
self.cur = cur + 1
|
||||
end
|
||||
|
||||
return socks[cur]
|
||||
end
|
||||
|
||||
|
||||
local function _get_cur_server(self)
|
||||
local cur = self.cur
|
||||
|
||||
local servers = self.servers
|
||||
|
||||
if cur == 1 then
|
||||
return servers[#servers]
|
||||
end
|
||||
|
||||
return servers[cur - 1]
|
||||
end
|
||||
|
||||
|
||||
function _M.set_timeout(self, timeout)
|
||||
local socks = self.socks
|
||||
if not socks then
|
||||
return nil, "not initialized"
|
||||
end
|
||||
|
||||
for i = 1, #socks do
|
||||
local sock = socks[i]
|
||||
sock:settimeout(timeout)
|
||||
end
|
||||
|
||||
local tcp_sock = self.tcp_sock
|
||||
if not tcp_sock then
|
||||
return nil, "not initialized"
|
||||
end
|
||||
|
||||
tcp_sock:settimeout(timeout)
|
||||
end
|
||||
|
||||
|
||||
local function _encode_name(s)
|
||||
return char(#s) .. s
|
||||
end
|
||||
|
||||
|
||||
local function _decode_name(buf, pos)
|
||||
local labels = {}
|
||||
local nptrs = 0
|
||||
local p = pos
|
||||
while nptrs < 128 do
|
||||
local fst = byte(buf, p)
|
||||
|
||||
if not fst then
|
||||
return nil, 'truncated';
|
||||
end
|
||||
|
||||
-- print("fst at ", p, ": ", fst)
|
||||
|
||||
if fst == 0 then
|
||||
if nptrs == 0 then
|
||||
pos = pos + 1
|
||||
end
|
||||
break
|
||||
end
|
||||
|
||||
if band(fst, 0xc0) ~= 0 then
|
||||
-- being a pointer
|
||||
if nptrs == 0 then
|
||||
pos = pos + 2
|
||||
end
|
||||
|
||||
nptrs = nptrs + 1
|
||||
|
||||
local snd = byte(buf, p + 1)
|
||||
if not snd then
|
||||
return nil, 'truncated'
|
||||
end
|
||||
|
||||
p = lshift(band(fst, 0x3f), 8) + snd + 1
|
||||
|
||||
-- print("resolving ptr ", p, ": ", byte(buf, p))
|
||||
|
||||
else
|
||||
-- being a label
|
||||
local label = sub(buf, p + 1, p + fst)
|
||||
insert(labels, label)
|
||||
|
||||
-- print("resolved label ", label)
|
||||
|
||||
p = p + fst + 1
|
||||
|
||||
if nptrs == 0 then
|
||||
pos = p
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return concat(labels, "."), pos
|
||||
end
|
||||
|
||||
|
||||
local function _build_request(qname, id, no_recurse, opts)
|
||||
local qtype
|
||||
|
||||
if opts then
|
||||
qtype = opts.qtype
|
||||
end
|
||||
|
||||
if not qtype then
|
||||
qtype = 1 -- A record
|
||||
end
|
||||
|
||||
local ident_hi = char(rshift(id, 8))
|
||||
local ident_lo = char(band(id, 0xff))
|
||||
|
||||
local flags
|
||||
if no_recurse then
|
||||
-- print("found no recurse")
|
||||
flags = "\0\0"
|
||||
else
|
||||
flags = "\1\0"
|
||||
end
|
||||
|
||||
local nqs = "\0\1"
|
||||
local nan = "\0\0"
|
||||
local nns = "\0\0"
|
||||
local nar = "\0\0"
|
||||
local typ = "\0" .. char(qtype)
|
||||
local class = "\0\1" -- the Internet class
|
||||
|
||||
if byte(qname, 1) == DOT_CHAR then
|
||||
return nil, "bad name"
|
||||
end
|
||||
|
||||
local name = gsub(qname, "([^.]+)%.?", _encode_name) .. '\0'
|
||||
|
||||
return {
|
||||
ident_hi, ident_lo, flags, nqs, nan, nns, nar,
|
||||
name, typ, class
|
||||
}
|
||||
end
|
||||
|
||||
|
||||
local function parse_response(buf, id)
|
||||
local n = #buf
|
||||
if n < 12 then
|
||||
return nil, 'truncated';
|
||||
end
|
||||
|
||||
-- header layout: ident flags nqs nan nns nar
|
||||
|
||||
local ident_hi = byte(buf, 1)
|
||||
local ident_lo = byte(buf, 2)
|
||||
local ans_id = lshift(ident_hi, 8) + ident_lo
|
||||
|
||||
-- print("id: ", id, ", ans id: ", ans_id)
|
||||
|
||||
if ans_id ~= id then
|
||||
-- identifier mismatch and throw it away
|
||||
log(DEBUG, "id mismatch in the DNS reply: ", ans_id, " ~= ", id)
|
||||
return nil, "id mismatch"
|
||||
end
|
||||
|
||||
local flags_hi = byte(buf, 3)
|
||||
local flags_lo = byte(buf, 4)
|
||||
local flags = lshift(flags_hi, 8) + flags_lo
|
||||
|
||||
-- print(format("flags: 0x%x", flags))
|
||||
|
||||
if band(flags, 0x8000) == 0 then
|
||||
return nil, format("bad QR flag in the DNS response")
|
||||
end
|
||||
|
||||
if band(flags, 0x200) ~= 0 then
|
||||
return nil, "truncated"
|
||||
end
|
||||
|
||||
local code = band(flags, 0x7f)
|
||||
|
||||
-- print(format("code: %d", code))
|
||||
|
||||
local nqs_hi = byte(buf, 5)
|
||||
local nqs_lo = byte(buf, 6)
|
||||
local nqs = lshift(nqs_hi, 8) + nqs_lo
|
||||
|
||||
-- print("nqs: ", nqs)
|
||||
|
||||
if nqs ~= 1 then
|
||||
return nil, format("bad number of questions in DNS response: %d", nqs)
|
||||
end
|
||||
|
||||
local nan_hi = byte(buf, 7)
|
||||
local nan_lo = byte(buf, 8)
|
||||
local nan = lshift(nan_hi, 8) + nan_lo
|
||||
|
||||
-- print("nan: ", nan)
|
||||
|
||||
-- skip the question part
|
||||
|
||||
local ans_qname, pos = _decode_name(buf, 13)
|
||||
if not ans_qname then
|
||||
return nil, pos
|
||||
end
|
||||
|
||||
-- print("qname in reply: ", ans_qname)
|
||||
|
||||
-- print("question: ", sub(buf, 13, pos))
|
||||
|
||||
if pos + 3 + nan * 12 > n then
|
||||
-- print(format("%d > %d", pos + 3 + nan * 12, n))
|
||||
return nil, 'truncated';
|
||||
end
|
||||
|
||||
-- question section layout: qname qtype(2) qclass(2)
|
||||
|
||||
local type_hi = byte(buf, pos)
|
||||
local type_lo = byte(buf, pos + 1)
|
||||
local ans_type = lshift(type_hi, 8) + type_lo
|
||||
|
||||
-- print("ans qtype: ", ans_type)
|
||||
|
||||
local class_hi = byte(buf, pos + 2)
|
||||
local class_lo = byte(buf, pos + 3)
|
||||
local qclass = lshift(class_hi, 8) + class_lo
|
||||
|
||||
-- print("ans qclass: ", qclass)
|
||||
|
||||
if qclass ~= 1 then
|
||||
return nil, format("unknown query class %d in DNS response", qclass)
|
||||
end
|
||||
|
||||
pos = pos + 4
|
||||
|
||||
local answers = {}
|
||||
|
||||
if code ~= 0 then
|
||||
answers.errcode = code
|
||||
answers.errstr = resolver_errstrs[code] or "unknown"
|
||||
end
|
||||
|
||||
for i = 1, nan do
|
||||
-- print(format("ans %d: qtype:%d qclass:%d", i, qtype, qclass))
|
||||
|
||||
local ans = {}
|
||||
insert(answers, ans)
|
||||
|
||||
local name
|
||||
name, pos = _decode_name(buf, pos)
|
||||
if not name then
|
||||
return nil, pos
|
||||
end
|
||||
|
||||
ans.name = name
|
||||
|
||||
-- print("name: ", name)
|
||||
|
||||
type_hi = byte(buf, pos)
|
||||
type_lo = byte(buf, pos + 1)
|
||||
local typ = lshift(type_hi, 8) + type_lo
|
||||
|
||||
ans.type = typ
|
||||
|
||||
-- print("type: ", typ)
|
||||
|
||||
class_hi = byte(buf, pos + 2)
|
||||
class_lo = byte(buf, pos + 3)
|
||||
local class = lshift(class_hi, 8) + class_lo
|
||||
|
||||
ans.class = class
|
||||
|
||||
-- print("class: ", class)
|
||||
|
||||
local ttl_bytes = { byte(buf, pos + 4, pos + 7) }
|
||||
|
||||
-- print("ttl bytes: ", concat(ttl_bytes, " "))
|
||||
|
||||
local ttl = lshift(ttl_bytes[1], 24) + lshift(ttl_bytes[2], 16)
|
||||
+ lshift(ttl_bytes[3], 8) + ttl_bytes[4]
|
||||
|
||||
-- print("ttl: ", ttl)
|
||||
|
||||
ans.ttl = ttl
|
||||
|
||||
local len_hi = byte(buf, pos + 8)
|
||||
local len_lo = byte(buf, pos + 9)
|
||||
local len = lshift(len_hi, 8) + len_lo
|
||||
|
||||
-- print("record len: ", len)
|
||||
|
||||
pos = pos + 10
|
||||
|
||||
if typ == TYPE_A then
|
||||
|
||||
if len ~= 4 then
|
||||
return nil, "bad A record value length: " .. len
|
||||
end
|
||||
|
||||
local addr_bytes = { byte(buf, pos, pos + 3) }
|
||||
local addr = concat(addr_bytes, ".")
|
||||
-- print("ipv4 address: ", addr)
|
||||
|
||||
ans.address = addr
|
||||
|
||||
pos = pos + 4
|
||||
|
||||
elseif typ == TYPE_CNAME then
|
||||
|
||||
local cname, p = _decode_name(buf, pos)
|
||||
if not cname then
|
||||
return nil, pos
|
||||
end
|
||||
|
||||
if p - pos ~= len then
|
||||
return nil, format("bad cname record length: %d ~= %d",
|
||||
p - pos, len)
|
||||
end
|
||||
|
||||
pos = p
|
||||
|
||||
-- print("cname: ", cname)
|
||||
|
||||
ans.cname = cname
|
||||
|
||||
elseif typ == TYPE_AAAA then
|
||||
|
||||
if len ~= 16 then
|
||||
return nil, "bad AAAA record value length: " .. len
|
||||
end
|
||||
|
||||
local addr_bytes = { byte(buf, pos, pos + 15) }
|
||||
local flds = {}
|
||||
local comp_begin, comp_end
|
||||
for i = 1, 16, 2 do
|
||||
local a = addr_bytes[i]
|
||||
local b = addr_bytes[i + 1]
|
||||
if a == 0 then
|
||||
insert(flds, format("%x", b))
|
||||
|
||||
else
|
||||
insert(flds, format("%x%02x", a, b))
|
||||
end
|
||||
end
|
||||
|
||||
-- we do not compress the IPv6 addresses by default
|
||||
-- due to performance considerations
|
||||
|
||||
ans.address = concat(flds, ":")
|
||||
|
||||
pos = pos + 16
|
||||
|
||||
elseif typ == TYPE_MX then
|
||||
|
||||
-- print("len = ", len)
|
||||
|
||||
if len < 3 then
|
||||
return nil, "bad MX record value length: " .. len
|
||||
end
|
||||
|
||||
local pref_hi = byte(buf, pos)
|
||||
local pref_lo = byte(buf, pos + 1)
|
||||
|
||||
ans.preference = lshift(pref_hi, 8) + pref_lo
|
||||
|
||||
local host, p = _decode_name(buf, pos + 2)
|
||||
if not host then
|
||||
return nil, pos
|
||||
end
|
||||
|
||||
if p - pos ~= len then
|
||||
return nil, format("bad cname record length: %d ~= %d",
|
||||
p - pos, len)
|
||||
end
|
||||
|
||||
ans.exchange = host
|
||||
|
||||
pos = p
|
||||
|
||||
elseif typ == TYPE_SRV then
|
||||
if len < 7 then
|
||||
return nil, "bad SRV record value length: " .. len
|
||||
end
|
||||
|
||||
local prio_hi = byte(buf, pos)
|
||||
local prio_lo = byte(buf, pos + 1)
|
||||
ans.priority = lshift(prio_hi, 8) + prio_lo
|
||||
|
||||
local weight_hi = byte(buf, pos + 2)
|
||||
local weight_lo = byte(buf, pos + 3)
|
||||
ans.weight = lshift(weight_hi, 8) + weight_lo
|
||||
|
||||
local port_hi = byte(buf, pos + 4)
|
||||
local port_lo = byte(buf, pos + 5)
|
||||
ans.port = lshift(port_hi, 8) + port_lo
|
||||
|
||||
local name, p = _decode_name(buf, pos + 6)
|
||||
if not name then
|
||||
return nil, pos
|
||||
end
|
||||
|
||||
if p - pos ~= len then
|
||||
return nil, format("bad srv record length: %d ~= %d",
|
||||
p - pos, len)
|
||||
end
|
||||
|
||||
ans.target = name
|
||||
|
||||
pos = p
|
||||
|
||||
elseif typ == TYPE_NS then
|
||||
|
||||
local name, p = _decode_name(buf, pos)
|
||||
if not name then
|
||||
return nil, pos
|
||||
end
|
||||
|
||||
if p - pos ~= len then
|
||||
return nil, format("bad cname record length: %d ~= %d",
|
||||
p - pos, len)
|
||||
end
|
||||
|
||||
pos = p
|
||||
|
||||
-- print("name: ", name)
|
||||
|
||||
ans.nsdname = name
|
||||
|
||||
elseif typ == TYPE_TXT or typ == TYPE_SPF then
|
||||
|
||||
local key = (typ == TYPE_TXT) and "txt" or "spf"
|
||||
|
||||
local slen = byte(buf, pos)
|
||||
if slen + 1 > len then
|
||||
-- truncate the over-run TXT record data
|
||||
slen = len
|
||||
end
|
||||
|
||||
-- print("slen: ", len)
|
||||
|
||||
local val = sub(buf, pos + 1, pos + slen)
|
||||
local last = pos + len
|
||||
pos = pos + slen + 1
|
||||
|
||||
if pos < last then
|
||||
-- more strings to be processed
|
||||
-- this code path is usually cold, so we do not
|
||||
-- merge the following loop on this code path
|
||||
-- with the processing logic above.
|
||||
|
||||
val = {val}
|
||||
local idx = 2
|
||||
repeat
|
||||
local slen = byte(buf, pos)
|
||||
if pos + slen + 1 > last then
|
||||
-- truncate the over-run TXT record data
|
||||
slen = last - pos - 1
|
||||
end
|
||||
|
||||
val[idx] = sub(buf, pos + 1, pos + slen)
|
||||
idx = idx + 1
|
||||
pos = pos + slen + 1
|
||||
|
||||
until pos >= last
|
||||
end
|
||||
|
||||
ans[key] = val
|
||||
|
||||
elseif typ == TYPE_PTR then
|
||||
|
||||
local name, p = _decode_name(buf, pos)
|
||||
if not name then
|
||||
return nil, pos
|
||||
end
|
||||
|
||||
if p - pos ~= len then
|
||||
return nil, format("bad cname record length: %d ~= %d",
|
||||
p - pos, len)
|
||||
end
|
||||
|
||||
pos = p
|
||||
|
||||
-- print("name: ", name)
|
||||
|
||||
ans.ptrdname = name
|
||||
|
||||
else
|
||||
-- for unknown types, just forward the raw value
|
||||
|
||||
ans.rdata = sub(buf, pos, pos + len - 1)
|
||||
pos = pos + len
|
||||
end
|
||||
end
|
||||
|
||||
return answers
|
||||
end
|
||||
|
||||
|
||||
local function _gen_id(self)
|
||||
local id = self._id -- for regression testing
|
||||
if id then
|
||||
return id
|
||||
end
|
||||
return rand(0, 65535) -- two bytes
|
||||
end
|
||||
|
||||
|
||||
local function _tcp_query(self, query, id)
|
||||
local sock = self.tcp_sock
|
||||
if not sock then
|
||||
return nil, "not initialized"
|
||||
end
|
||||
|
||||
log(DEBUG, "query the TCP server due to reply truncation")
|
||||
|
||||
local server = _get_cur_server(self)
|
||||
|
||||
local ok, err = sock:connect(server[1], server[2])
|
||||
if not ok then
|
||||
return nil, "failed to connect to TCP server "
|
||||
.. concat(server, ":") .. ": " .. err
|
||||
end
|
||||
|
||||
query = concat(query, "")
|
||||
local len = #query
|
||||
|
||||
local len_hi = char(rshift(len, 8))
|
||||
local len_lo = char(band(len, 0xff))
|
||||
|
||||
local bytes, err = sock:send({len_hi, len_lo, query})
|
||||
if not bytes then
|
||||
return nil, "failed to send query to TCP server "
|
||||
.. concat(server, ":") .. ": " .. err
|
||||
end
|
||||
|
||||
local buf, err = sock:receive(2)
|
||||
if not buf then
|
||||
return nil, "failed to receive the reply length field from TCP server "
|
||||
.. concat(server, ":") .. ": " .. err
|
||||
end
|
||||
|
||||
local len_hi = byte(buf, 1)
|
||||
local len_lo = byte(buf, 2)
|
||||
local len = lshift(len_hi, 8) + len_lo
|
||||
|
||||
-- print("tcp message len: ", len)
|
||||
|
||||
buf, err = sock:receive(len)
|
||||
if not buf then
|
||||
return nil, "failed to receive the reply message body from TCP server "
|
||||
.. concat(server, ":") .. ": " .. err
|
||||
end
|
||||
|
||||
local answers, err = parse_response(buf, id)
|
||||
if not answers then
|
||||
return nil, "failed to parse the reply from the TCP server "
|
||||
.. concat(server, ":") .. ": " .. err
|
||||
end
|
||||
|
||||
sock:close()
|
||||
|
||||
return answers
|
||||
end
|
||||
|
||||
|
||||
function _M.tcp_query(self, qname, opts)
|
||||
local socks = self.socks
|
||||
if not socks then
|
||||
return nil, "not initialized"
|
||||
end
|
||||
|
||||
pick_sock(self, socks)
|
||||
|
||||
local id = _gen_id(self)
|
||||
|
||||
local query, err = _build_request(qname, id, self.no_recurse, opts)
|
||||
if not query then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
return _tcp_query(self, query, id)
|
||||
end
|
||||
|
||||
|
||||
function _M.query(self, qname, opts)
|
||||
local socks = self.socks
|
||||
if not socks then
|
||||
return nil, "not initialized"
|
||||
end
|
||||
|
||||
local id = _gen_id(self)
|
||||
|
||||
local query, err = _build_request(qname, id, self.no_recurse, opts)
|
||||
if not query then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
-- local cjson = require "cjson"
|
||||
-- print("query: ", cjson.encode(concat(query, "")))
|
||||
|
||||
local retrans = self.retrans
|
||||
|
||||
-- print("retrans: ", retrans)
|
||||
|
||||
for i = 1, retrans do
|
||||
local sock = pick_sock(self, socks)
|
||||
|
||||
local ok, err = sock:send(query)
|
||||
if not ok then
|
||||
local server = _get_cur_server(self)
|
||||
return nil, "failed to send request to UDP server "
|
||||
.. concat(server, ":") .. ": " .. err
|
||||
end
|
||||
|
||||
local buf, err
|
||||
|
||||
for j = 1, 128 do
|
||||
buf, err = sock:receive(4096)
|
||||
|
||||
if err then
|
||||
break
|
||||
end
|
||||
|
||||
if buf then
|
||||
local answers
|
||||
answers, err = parse_response(buf, id)
|
||||
if not answers then
|
||||
if err == "truncated" then
|
||||
return _tcp_query(self, query, id)
|
||||
end
|
||||
|
||||
if err ~= "id mismatch" then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
-- retry receiving when err == "id mismatch"
|
||||
else
|
||||
return answers
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if err ~= "timeout" or i == retrans then
|
||||
local server = _get_cur_server(self)
|
||||
return nil, "failed to receive reply from UDP server "
|
||||
.. concat(server, ":") .. ": " .. err
|
||||
end
|
||||
end
|
||||
|
||||
-- impossible to reach here
|
||||
end
|
||||
|
||||
|
||||
function _M.compress_ipv6_addr(addr)
|
||||
local addr = re_sub(addr, "^(0:)+|(:0)+$|:(0:)+", "::", "jo")
|
||||
if addr == "::0" then
|
||||
addr = "::"
|
||||
end
|
||||
|
||||
return addr
|
||||
end
|
||||
|
||||
|
||||
randomseed(ngx_time())
|
||||
|
||||
|
||||
return _M
|
||||
271
controllers/nginx-third-party/lua/vendor/lua-resty-dns/t/TestDNS.pm
vendored
Normal file
271
controllers/nginx-third-party/lua/vendor/lua-resty-dns/t/TestDNS.pm
vendored
Normal file
|
|
@ -0,0 +1,271 @@
|
|||
package TestDNS;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use 5.010001;
|
||||
use Test::Nginx::Socket::Lua -Base;
|
||||
#use JSON::XS;
|
||||
|
||||
use constant {
|
||||
TYPE_A => 1,
|
||||
TYPE_TXT => 16,
|
||||
TYPE_CNAME => 5,
|
||||
TYPE_AAAA => 28,
|
||||
CLASS_INTERNET => 1,
|
||||
};
|
||||
|
||||
sub encode_name ($);
|
||||
sub encode_ipv4 ($);
|
||||
sub encode_ipv6 ($);
|
||||
sub gen_dns_reply ($$);
|
||||
|
||||
sub Test::Base::Filter::dns {
|
||||
my ($self, $code) = @_;
|
||||
|
||||
my $args = $self->current_arguments;
|
||||
#warn "args: $args";
|
||||
if (defined $args && $args ne 'tcp' && $args ne 'udp') {
|
||||
die "Invalid argument to the \"dns\" filter: $args\n";
|
||||
}
|
||||
|
||||
my $mode = $args // 'udp';
|
||||
|
||||
my $block = $self->current_block;
|
||||
|
||||
my $pointer_spec = $block->dns_pointers;
|
||||
my @pointers;
|
||||
if (defined $pointer_spec) {
|
||||
my @loops = split /\s*,\s*/, $pointer_spec;
|
||||
for my $loop (@loops) {
|
||||
my @nodes = split /\s*=>\s*/, $loop;
|
||||
my $prev;
|
||||
for my $n (@nodes) {
|
||||
if ($n !~ /^\d+$/ || $n == 0) {
|
||||
die "bad name ID in the --- dns_pointers: $n\n";
|
||||
}
|
||||
|
||||
if (!defined $prev) {
|
||||
$prev = $n;
|
||||
next;
|
||||
}
|
||||
|
||||
$pointers[$prev] = $n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
my $input = eval $code;
|
||||
if ($@) {
|
||||
die "failed to evaluate code $code: $@\n";
|
||||
}
|
||||
|
||||
if (!ref $input) {
|
||||
return $input;
|
||||
}
|
||||
|
||||
if (ref $input eq 'ARRAY') {
|
||||
my @replies;
|
||||
for my $t (@$input) {
|
||||
push @replies, gen_dns_reply($t, $mode);
|
||||
}
|
||||
|
||||
return \@replies;
|
||||
}
|
||||
|
||||
if (ref $input eq 'HASH') {
|
||||
return gen_dns_reply($input, $mode);
|
||||
}
|
||||
|
||||
return $input;
|
||||
}
|
||||
|
||||
sub gen_dns_reply ($$) {
|
||||
my ($t, $mode) = @_;
|
||||
|
||||
my @raw_names;
|
||||
push @raw_names, \($t->{qname});
|
||||
|
||||
my $answers = $t->{answer} // [];
|
||||
if (!ref $answers) {
|
||||
$answers = [$answers];
|
||||
}
|
||||
|
||||
for my $ans (@$answers) {
|
||||
push @raw_names, \($ans->{name});
|
||||
if (defined $ans->{cname}) {
|
||||
push @raw_names, \($ans->{cname});
|
||||
}
|
||||
}
|
||||
|
||||
for my $rname (@raw_names) {
|
||||
$$rname = encode_name($$rname // "");
|
||||
}
|
||||
|
||||
my $qname = $t->{qname};
|
||||
|
||||
my $s = '';
|
||||
|
||||
my $id = $t->{id} // 0;
|
||||
|
||||
$s .= pack("n", $id);
|
||||
#warn "id: ", length($s), " ", encode_json([$s]);
|
||||
|
||||
my $qr = $t->{qr} // 1;
|
||||
|
||||
my $opcode = $t->{opcode} // 0;
|
||||
|
||||
my $aa = $t->{aa} // 0;
|
||||
|
||||
my $tc = $t->{tc} // 0;
|
||||
my $rd = $t->{rd} // 1;
|
||||
my $ra = $t->{ra} // 1;
|
||||
my $rcode = $t->{rcode} // 0;
|
||||
|
||||
my $flags = ($qr << 15) + ($opcode << 11) + ($aa << 10) + ($tc << 9) + ($rd << 8) + ($ra << 7) + $rcode;
|
||||
#warn sprintf("flags: %b", $flags);
|
||||
|
||||
$flags = pack("n", $flags);
|
||||
$s .= $flags;
|
||||
|
||||
#warn "flags: ", length($flags), " ", encode_json([$flags]);
|
||||
|
||||
my $qdcount = $t->{qdcount} // 1;
|
||||
my $ancount = $t->{ancount} // scalar @$answers;
|
||||
my $nscount = 0;
|
||||
my $arcount = 0;
|
||||
|
||||
$s .= pack("nnnn", $qdcount, $ancount, $nscount, $arcount);
|
||||
|
||||
#warn "qname: ", length($qname), " ", encode_json([$qname]);
|
||||
|
||||
$s .= $qname;
|
||||
|
||||
my $qs_type = $t->{qtype} // TYPE_A;
|
||||
my $qs_class = $t->{qclass} // CLASS_INTERNET;
|
||||
|
||||
$s .= pack("nn", $qs_type, $qs_class);
|
||||
|
||||
for my $ans (@$answers) {
|
||||
my $name = $ans->{name};
|
||||
my $type = $ans->{type};
|
||||
my $class = $ans->{class};
|
||||
my $ttl = $ans->{ttl};
|
||||
my $rdlength = $ans->{rdlength};
|
||||
my $rddata = $ans->{rddata};
|
||||
|
||||
my $ipv4 = $ans->{ipv4};
|
||||
if (defined $ipv4) {
|
||||
my ($data, $len) = encode_ipv4($ipv4);
|
||||
$rddata //= $data;
|
||||
$rdlength //= $len;
|
||||
$type //= TYPE_A;
|
||||
$class //= CLASS_INTERNET;
|
||||
}
|
||||
|
||||
my $ipv6 = $ans->{ipv6};
|
||||
if (defined $ipv6) {
|
||||
my ($data, $len) = encode_ipv6($ipv6);
|
||||
$rddata //= $data;
|
||||
$rdlength //= $len;
|
||||
$type //= TYPE_AAAA;
|
||||
$class //= CLASS_INTERNET;
|
||||
}
|
||||
|
||||
my $cname = $ans->{cname};
|
||||
if (defined $cname) {
|
||||
$rddata //= $cname;
|
||||
$rdlength //= length $rddata;
|
||||
$type //= TYPE_CNAME;
|
||||
$class //= CLASS_INTERNET;
|
||||
}
|
||||
|
||||
my $txt = $ans->{txt};
|
||||
if (defined $txt) {
|
||||
$rddata //= $txt;
|
||||
$rdlength //= length $rddata;
|
||||
$type //= TYPE_TXT;
|
||||
$class //= CLASS_INTERNET;
|
||||
}
|
||||
|
||||
$type //= 0;
|
||||
$class //= 0;
|
||||
$ttl //= 0;
|
||||
|
||||
#warn "rdlength: $rdlength, rddata: ", encode_json([$rddata]), "\n";
|
||||
|
||||
$s .= $name . pack("nnNn", $type, $class, $ttl, $rdlength) . $rddata;
|
||||
}
|
||||
|
||||
if ($mode eq 'tcp') {
|
||||
return pack("n", length($s)) . $s;
|
||||
}
|
||||
|
||||
return $s;
|
||||
}
|
||||
|
||||
sub encode_ipv4 ($) {
|
||||
my $txt = shift;
|
||||
my @bytes = split /\./, $txt;
|
||||
return pack("CCCC", @bytes), 4;
|
||||
}
|
||||
|
||||
sub encode_ipv6 ($) {
|
||||
my $txt = shift;
|
||||
my @groups = split /:/, $txt;
|
||||
my $nils = 0;
|
||||
my $nonnils = 0;
|
||||
for my $g (@groups) {
|
||||
if ($g eq '') {
|
||||
$nils++;
|
||||
} else {
|
||||
$nonnils++;
|
||||
$g = hex($g);
|
||||
}
|
||||
}
|
||||
|
||||
my $total = $nils + $nonnils;
|
||||
if ($total > 8 ) {
|
||||
die "Invalid IPv6 address: too many groups: $total: $txt";
|
||||
}
|
||||
|
||||
if ($nils) {
|
||||
my $found = 0;
|
||||
my @new_groups;
|
||||
for my $g (@groups) {
|
||||
if ($g eq '') {
|
||||
if ($found) {
|
||||
next;
|
||||
}
|
||||
|
||||
for (1 .. 8 - $nonnils) {
|
||||
push @new_groups, 0;
|
||||
}
|
||||
|
||||
$found = 1;
|
||||
|
||||
} else {
|
||||
push @new_groups, $g;
|
||||
}
|
||||
}
|
||||
|
||||
@groups = @new_groups;
|
||||
}
|
||||
|
||||
if (@groups != 8) {
|
||||
die "Invalid IPv6 address: $txt: @groups\n";
|
||||
}
|
||||
|
||||
#warn "IPv6 groups: @groups";
|
||||
|
||||
return pack("nnnnnnnn", @groups), 16;
|
||||
}
|
||||
|
||||
sub encode_name ($) {
|
||||
my $name = shift;
|
||||
$name =~ s/([^.]+)\.?/chr(length($1)) . $1/ge;
|
||||
$name .= "\0";
|
||||
return $name;
|
||||
}
|
||||
|
||||
1
|
||||
89
controllers/nginx-third-party/lua/vendor/lua-resty-dns/t/lib/ljson.lua
vendored
Normal file
89
controllers/nginx-third-party/lua/vendor/lua-resty-dns/t/lib/ljson.lua
vendored
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
local ngx_null = ngx.null
|
||||
local tostring = tostring
|
||||
local byte = string.byte
|
||||
local gsub = string.gsub
|
||||
local sort = table.sort
|
||||
local pairs = pairs
|
||||
local ipairs = ipairs
|
||||
local concat = table.concat
|
||||
|
||||
local ok, new_tab = pcall(require, "table.new")
|
||||
if not ok then
|
||||
new_tab = function (narr, nrec) return {} end
|
||||
end
|
||||
|
||||
local _M = {}
|
||||
|
||||
local metachars = {
|
||||
['\t'] = '\\t',
|
||||
["\\"] = "\\\\",
|
||||
['"'] = '\\"',
|
||||
['\r'] = '\\r',
|
||||
['\n'] = '\\n',
|
||||
}
|
||||
|
||||
local function encode_str(s)
|
||||
-- XXX we will rewrite this when string.buffer is implemented
|
||||
-- in LuaJIT 2.1 because string.gsub cannot be JIT compiled.
|
||||
return gsub(s, '["\\\r\n\t]', metachars)
|
||||
end
|
||||
|
||||
local function is_arr(t)
|
||||
local exp = 1
|
||||
for k, _ in pairs(t) do
|
||||
if k ~= exp then
|
||||
return nil
|
||||
end
|
||||
exp = exp + 1
|
||||
end
|
||||
return exp - 1
|
||||
end
|
||||
|
||||
local encode
|
||||
|
||||
encode = function (v)
|
||||
if v == nil or v == ngx_null then
|
||||
return "null"
|
||||
end
|
||||
|
||||
local typ = type(v)
|
||||
if typ == 'string' then
|
||||
return '"' .. encode_str(v) .. '"'
|
||||
end
|
||||
|
||||
if typ == 'number' or typ == 'boolean' then
|
||||
return tostring(v)
|
||||
end
|
||||
|
||||
if typ == 'table' then
|
||||
local n = is_arr(v)
|
||||
if n then
|
||||
local bits = new_tab(n, 0)
|
||||
for i, elem in ipairs(v) do
|
||||
bits[i] = encode(elem)
|
||||
end
|
||||
return "[" .. concat(bits, ",") .. "]"
|
||||
end
|
||||
|
||||
local keys = {}
|
||||
local i = 0
|
||||
for key, _ in pairs(v) do
|
||||
i = i + 1
|
||||
keys[i] = key
|
||||
end
|
||||
sort(keys)
|
||||
|
||||
local bits = new_tab(0, i)
|
||||
i = 0
|
||||
for _, key in ipairs(keys) do
|
||||
i = i + 1
|
||||
bits[i] = encode(key) .. ":" .. encode(v[key])
|
||||
end
|
||||
return "{" .. concat(bits, ",") .. "}"
|
||||
end
|
||||
|
||||
return '"<' .. typ .. '>"'
|
||||
end
|
||||
_M.encode = encode
|
||||
|
||||
return _M
|
||||
1423
controllers/nginx-third-party/lua/vendor/lua-resty-dns/t/mock.t
vendored
Normal file
1423
controllers/nginx-third-party/lua/vendor/lua-resty-dns/t/mock.t
vendored
Normal file
File diff suppressed because it is too large
Load diff
502
controllers/nginx-third-party/lua/vendor/lua-resty-dns/t/sanity.t
vendored
Normal file
502
controllers/nginx-third-party/lua/vendor/lua-resty-dns/t/sanity.t
vendored
Normal file
|
|
@ -0,0 +1,502 @@
|
|||
# vim:set ft= ts=4 sw=4 et:
|
||||
|
||||
use Test::Nginx::Socket::Lua;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
repeat_each(2);
|
||||
|
||||
plan tests => repeat_each() * (3 * blocks());
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/t/lib/?.lua;$pwd/lib/?.lua;;";
|
||||
lua_package_cpath "/usr/local/openresty-debug/lualib/?.so;/usr/local/openresty/lualib/?.so;;";
|
||||
};
|
||||
|
||||
$ENV{TEST_NGINX_RESOLVER} ||= '8.8.8.8';
|
||||
|
||||
no_long_string();
|
||||
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
|
||||
=== TEST 1: A records
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
|
||||
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
|
||||
if not r then
|
||||
ngx.say("failed to instantiate resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ans, err = r:query("www.google.com", { qtype = r.TYPE_A })
|
||||
if not ans then
|
||||
ngx.say("failed to query: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local cjson = require "cjson"
|
||||
ngx.say("records: ", cjson.encode(ans))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- response_body_like chop
|
||||
^records: \[.*?"address":"(?:\d{1,3}\.){3}\d+".*?\]$
|
||||
--- no_error_log
|
||||
[error]
|
||||
|
||||
|
||||
|
||||
=== TEST 2: CNAME records
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
|
||||
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
|
||||
if not r then
|
||||
ngx.say("failed to instantiate resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ans, err = r:query("www.yahoo.com", { qtype = r.TYPE_CNAME })
|
||||
if not ans then
|
||||
ngx.say("failed to query: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local cjson = require "cjson"
|
||||
ngx.say("records: ", cjson.encode(ans))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- response_body_like chop
|
||||
^records: \[.*?"cname":"[-_a-z0-9.]+".*?\]$
|
||||
--- no_error_log
|
||||
[error]
|
||||
|
||||
|
||||
|
||||
=== TEST 3: AAAA records
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
|
||||
if not r then
|
||||
ngx.say("failed to instantiate resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ans, err = r:query("www.google.com", { qtype = r.TYPE_AAAA })
|
||||
if not ans then
|
||||
ngx.say("failed to query: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local cjson = require "cjson"
|
||||
ngx.say("records: ", cjson.encode(ans))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- response_body_like chop
|
||||
^records: \[.*?"address":"[a-fA-F0-9]*(?::[a-fA-F0-9]*)+".*?\]$
|
||||
--- no_error_log
|
||||
[error]
|
||||
|
||||
|
||||
|
||||
=== TEST 4: compress ipv6 addr
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
|
||||
local c = resolver.compress_ipv6_addr
|
||||
|
||||
ngx.say(c("1080:0:0:0:8:800:200C:417A"))
|
||||
ngx.say(c("FF01:0:0:0:0:0:0:101"))
|
||||
ngx.say(c("0:0:0:0:0:0:0:1"))
|
||||
ngx.say(c("1:5:0:0:0:0:0:0"))
|
||||
ngx.say(c("7:25:0:0:0:3:0:0"))
|
||||
ngx.say(c("0:0:0:0:0:0:0:0"))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- response_body
|
||||
1080::8:800:200C:417A
|
||||
FF01::101
|
||||
::1
|
||||
1:5::
|
||||
7:25::3:0:0
|
||||
::
|
||||
--- no_error_log
|
||||
[error]
|
||||
|
||||
|
||||
|
||||
=== TEST 5: A records (TCP)
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
|
||||
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
|
||||
if not r then
|
||||
ngx.say("failed to instantiate resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ans, err = r:tcp_query("www.google.com", { qtype = r.TYPE_A })
|
||||
if not ans then
|
||||
ngx.say("failed to query: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local cjson = require "cjson"
|
||||
ngx.say("records: ", cjson.encode(ans))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- response_body_like chop
|
||||
^records: \[.*?"address":"(?:\d{1,3}\.){3}\d+".*?\]$
|
||||
--- no_error_log
|
||||
[error]
|
||||
|
||||
|
||||
|
||||
=== TEST 6: MX records
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
|
||||
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
|
||||
if not r then
|
||||
ngx.say("failed to instantiate resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ans, err = r:query("gmail.com", { qtype = r.TYPE_MX })
|
||||
if not ans then
|
||||
ngx.say("failed to query: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local cjson = require "cjson"
|
||||
ngx.say("records: ", cjson.encode(ans))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- response_body_like chop
|
||||
^records: \[\{.*?"preference":\d+,.*?"exchange":"[^"]+".*?\}\]$
|
||||
--- no_error_log
|
||||
[error]
|
||||
|
||||
|
||||
|
||||
=== TEST 7: NS records
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
|
||||
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
|
||||
if not r then
|
||||
ngx.say("failed to instantiate resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ans, err = r:query("agentzh.org", { qtype = r.TYPE_NS })
|
||||
if not ans then
|
||||
ngx.say("failed to query: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local cjson = require "cjson"
|
||||
ngx.say("records: ", cjson.encode(ans))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- response_body_like chop
|
||||
^records: \[\{.*?"nsdname":"[^"]+".*?\}\]$
|
||||
--- no_error_log
|
||||
[error]
|
||||
|
||||
|
||||
|
||||
=== TEST 8: TXT query (no ans)
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
|
||||
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
|
||||
if not r then
|
||||
ngx.say("failed to instantiate resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ans, err = r:query("agentzh.org", { qtype = r.TYPE_TXT })
|
||||
if not ans then
|
||||
ngx.say("failed to query: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local cjson = require "cjson"
|
||||
ngx.say("records: ", cjson.encode(ans))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- response_body
|
||||
records: {}
|
||||
--- no_error_log
|
||||
[error]
|
||||
--- timeout: 10
|
||||
|
||||
|
||||
|
||||
=== TEST 9: TXT query (with ans)
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
|
||||
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
|
||||
if not r then
|
||||
ngx.say("failed to instantiate resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ans, err = r:query("gmail.com", { qtype = r.TYPE_TXT })
|
||||
if not ans then
|
||||
ngx.say("failed to query: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local cjson = require "cjson"
|
||||
ngx.say("records: ", cjson.encode(ans))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- response_body_like chop
|
||||
^records: \[\{.*?"txt":"v=spf\d+\s[^"]+".*?\}\]$
|
||||
--- no_error_log
|
||||
[error]
|
||||
|
||||
|
||||
|
||||
=== TEST 10: PTR query
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
|
||||
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
|
||||
if not r then
|
||||
ngx.say("failed to instantiate resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ans, err = r:query("4.4.8.8.in-addr.arpa", { qtype = r.TYPE_PTR })
|
||||
if not ans then
|
||||
ngx.say("failed to query: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local cjson = require "cjson"
|
||||
ngx.say("records: ", cjson.encode(ans))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- response_body_like chop
|
||||
^records: \[\{.*?"ptrdname":"google-public-dns-b\.google\.com".*?\}\]$
|
||||
--- no_error_log
|
||||
[error]
|
||||
|
||||
|
||||
|
||||
=== TEST 11: domains with a trailing dot
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
|
||||
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
|
||||
if not r then
|
||||
ngx.say("failed to instantiate resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ans, err = r:query("www.google.com.", { qtype = r.TYPE_A })
|
||||
if not ans then
|
||||
ngx.say("failed to query: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local cjson = require "cjson"
|
||||
ngx.say("records: ", cjson.encode(ans))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- response_body_like chop
|
||||
^records: \[.*?"address":"(?:\d{1,3}\.){3}\d+".*?\]$
|
||||
--- no_error_log
|
||||
[error]
|
||||
|
||||
|
||||
|
||||
=== TEST 12: domains with a leading dot
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
|
||||
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
|
||||
if not r then
|
||||
ngx.say("failed to instantiate resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ans, err = r:query(".www.google.com", { qtype = r.TYPE_A })
|
||||
if not ans then
|
||||
ngx.say("failed to query: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local cjson = require "cjson"
|
||||
ngx.say("records: ", cjson.encode(ans))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- response_body
|
||||
failed to query: bad name
|
||||
--- no_error_log
|
||||
[error]
|
||||
|
||||
|
||||
|
||||
=== TEST 13: SRV records or XMPP
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
|
||||
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
|
||||
if not r then
|
||||
ngx.say("failed to instantiate resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ans, err = r:query("_xmpp-client._tcp.jabber.org", { qtype = r.TYPE_SRV })
|
||||
if not ans then
|
||||
ngx.say("failed to query: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ljson = require "ljson"
|
||||
ngx.say("records: ", ljson.encode(ans))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- response_body_like chop
|
||||
^records: \[(?:{"class":1,"name":"_xmpp-client._tcp.jabber.org","port":\d+,"priority":\d+,"target":"[\w.]+\.jabber.org","ttl":\d+,"type":33,"weight":\d+},?)+\]$
|
||||
--- no_error_log
|
||||
[error]
|
||||
|
||||
|
||||
|
||||
=== TEST 14: SPF query (with ans)
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
|
||||
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
|
||||
if not r then
|
||||
ngx.say("failed to instantiate resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ans, err = r:query("linkedin.com", { qtype = r.TYPE_SPF })
|
||||
if not ans then
|
||||
ngx.say("failed to query: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local cjson = require "cjson"
|
||||
ngx.say("records: ", cjson.encode(ans))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- response_body_like chop
|
||||
^records: \[\{.*?"spf":"v=spf\d+\s[^"]+".*?\}\]$
|
||||
--- no_error_log
|
||||
[error]
|
||||
|
||||
|
||||
|
||||
=== TEST 15: SPF query (no ans)
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location /t {
|
||||
content_by_lua '
|
||||
local resolver = require "resty.dns.resolver"
|
||||
|
||||
local r, err = resolver:new{ nameservers = { "$TEST_NGINX_RESOLVER" } }
|
||||
if not r then
|
||||
ngx.say("failed to instantiate resolver: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local ans, err = r:query("agentzh.org", { qtype = r.TYPE_SPF })
|
||||
if not ans then
|
||||
ngx.say("failed to query: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
local cjson = require "cjson"
|
||||
ngx.say("records: ", cjson.encode(ans))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /t
|
||||
--- response_body
|
||||
records: {}
|
||||
--- no_error_log
|
||||
[error]
|
||||
--- timeout: 10
|
||||
|
||||
549
controllers/nginx-third-party/lua/vendor/lua-resty-dns/valgrind.suppress
vendored
Normal file
549
controllers/nginx-third-party/lua/vendor/lua-resty-dns/valgrind.suppress
vendored
Normal file
|
|
@ -0,0 +1,549 @@
|
|||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Cond
|
||||
fun:lj_str_new
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Param
|
||||
write(buf)
|
||||
fun:__write_nocancel
|
||||
fun:ngx_log_error_core
|
||||
fun:ngx_resolver_read_response
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Cond
|
||||
fun:ngx_sprintf_num
|
||||
fun:ngx_vslprintf
|
||||
fun:ngx_log_error_core
|
||||
fun:ngx_resolver_read_response
|
||||
fun:ngx_epoll_process_events
|
||||
fun:ngx_process_events_and_timers
|
||||
fun:ngx_single_process_cycle
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Addr1
|
||||
fun:ngx_vslprintf
|
||||
fun:ngx_snprintf
|
||||
fun:ngx_sock_ntop
|
||||
fun:ngx_event_accept
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Param
|
||||
write(buf)
|
||||
fun:__write_nocancel
|
||||
fun:ngx_log_error_core
|
||||
fun:ngx_resolver_read_response
|
||||
fun:ngx_event_process_posted
|
||||
fun:ngx_process_events_and_timers
|
||||
fun:ngx_single_process_cycle
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Cond
|
||||
fun:ngx_sprintf_num
|
||||
fun:ngx_vslprintf
|
||||
fun:ngx_log_error_core
|
||||
fun:ngx_resolver_read_response
|
||||
fun:ngx_event_process_posted
|
||||
fun:ngx_process_events_and_timers
|
||||
fun:ngx_single_process_cycle
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
exp-sgcheck:SorG
|
||||
fun:lj_str_new
|
||||
fun:lua_pushlstring
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
obj:*
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
exp-sgcheck:SorG
|
||||
fun:lj_str_new
|
||||
fun:lua_pushlstring
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
exp-sgcheck:SorG
|
||||
fun:ngx_http_lua_ndk_set_var_get
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
exp-sgcheck:SorG
|
||||
fun:lj_str_new
|
||||
fun:lua_getfield
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
exp-sgcheck:SorG
|
||||
fun:lj_str_new
|
||||
fun:lua_setfield
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
exp-sgcheck:SorG
|
||||
fun:ngx_http_variables_init_vars
|
||||
fun:ngx_http_block
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
exp-sgcheck:SorG
|
||||
fun:ngx_conf_parse
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
exp-sgcheck:SorG
|
||||
fun:ngx_vslprintf
|
||||
fun:ngx_log_error_core
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_calloc
|
||||
fun:ngx_event_process_init
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_malloc
|
||||
fun:ngx_pcalloc
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Addr4
|
||||
fun:lj_str_new
|
||||
fun:lua_setfield
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Addr4
|
||||
fun:lj_str_new
|
||||
fun:lua_getfield
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:(below main)
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Param
|
||||
epoll_ctl(event)
|
||||
fun:epoll_ctl
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_event_process_init
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Cond
|
||||
fun:ngx_conf_flush_files
|
||||
fun:ngx_single_process_cycle
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Cond
|
||||
fun:memcpy
|
||||
fun:ngx_vslprintf
|
||||
fun:ngx_log_error_core
|
||||
fun:ngx_http_charset_header_filter
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:memalign
|
||||
fun:posix_memalign
|
||||
fun:ngx_memalign
|
||||
fun:ngx_pcalloc
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Addr4
|
||||
fun:lj_str_new
|
||||
fun:lua_pushlstring
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Cond
|
||||
fun:lj_str_new
|
||||
fun:lj_str_fromnum
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Cond
|
||||
fun:lj_str_new
|
||||
fun:lua_pushlstring
|
||||
}
|
||||
{
|
||||
<false_alarm_due_to_u32_alignment_in_luajit2>
|
||||
Memcheck:Addr4
|
||||
fun:lj_str_new
|
||||
fun:lua_setfield
|
||||
fun:ngx_http_lua_cache_store_code
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Cond
|
||||
fun:lj_str_new
|
||||
fun:lua_getfield
|
||||
fun:ngx_http_lua_cache_load_code
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Cond
|
||||
fun:lj_str_new
|
||||
fun:lua_setfield
|
||||
fun:ngx_http_lua_cache_store_code
|
||||
}
|
||||
{
|
||||
<false_alarm_due_to_u32_alignment_in_luajit2>
|
||||
Memcheck:Addr4
|
||||
fun:lj_str_new
|
||||
fun:lua_getfield
|
||||
fun:ngx_http_lua_cache_load_code
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Param
|
||||
socketcall.setsockopt(optval)
|
||||
fun:setsockopt
|
||||
fun:drizzle_state_connect
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_palloc_large
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_pool_cleanup_add
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_pnalloc
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Cond
|
||||
fun:ngx_conf_flush_files
|
||||
fun:ngx_single_process_cycle
|
||||
fun:main
|
||||
}
|
||||
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_palloc
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_pcalloc
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_malloc
|
||||
fun:ngx_palloc_large
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_create_pool
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_malloc
|
||||
fun:ngx_palloc
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_malloc
|
||||
fun:ngx_pnalloc
|
||||
}
|
||||
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_palloc_large
|
||||
fun:ngx_palloc
|
||||
fun:ngx_array_push
|
||||
fun:ngx_http_get_variable_index
|
||||
fun:ngx_http_memc_add_variable
|
||||
fun:ngx_http_memc_init
|
||||
fun:ngx_http_block
|
||||
fun:ngx_conf_parse
|
||||
fun:ngx_init_cycle
|
||||
fun:main
|
||||
}
|
||||
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_event_process_init
|
||||
fun:ngx_single_process_cycle
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_crc32_table_init
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_event_process_init
|
||||
fun:ngx_worker_process_init
|
||||
fun:ngx_worker_process_cycle
|
||||
fun:ngx_spawn_process
|
||||
fun:ngx_start_worker_processes
|
||||
fun:ngx_master_process_cycle
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_palloc_large
|
||||
fun:ngx_palloc
|
||||
fun:ngx_pcalloc
|
||||
fun:ngx_hash_init
|
||||
fun:ngx_http_variables_init_vars
|
||||
fun:ngx_http_block
|
||||
fun:ngx_conf_parse
|
||||
fun:ngx_init_cycle
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_palloc_large
|
||||
fun:ngx_palloc
|
||||
fun:ngx_pcalloc
|
||||
fun:ngx_http_upstream_drizzle_create_srv_conf
|
||||
fun:ngx_http_upstream
|
||||
fun:ngx_conf_parse
|
||||
fun:ngx_http_block
|
||||
fun:ngx_conf_parse
|
||||
fun:ngx_init_cycle
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_palloc_large
|
||||
fun:ngx_palloc
|
||||
fun:ngx_pcalloc
|
||||
fun:ngx_hash_keys_array_init
|
||||
fun:ngx_http_variables_add_core_vars
|
||||
fun:ngx_http_core_preconfiguration
|
||||
fun:ngx_http_block
|
||||
fun:ngx_conf_parse
|
||||
fun:ngx_init_cycle
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_palloc_large
|
||||
fun:ngx_palloc
|
||||
fun:ngx_array_push
|
||||
fun:ngx_hash_add_key
|
||||
fun:ngx_http_add_variable
|
||||
fun:ngx_http_echo_add_variables
|
||||
fun:ngx_http_echo_handler_init
|
||||
fun:ngx_http_block
|
||||
fun:ngx_conf_parse
|
||||
fun:ngx_init_cycle
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_palloc_large
|
||||
fun:ngx_palloc
|
||||
fun:ngx_pcalloc
|
||||
fun:ngx_http_upstream_drizzle_create_srv_conf
|
||||
fun:ngx_http_core_server
|
||||
fun:ngx_conf_parse
|
||||
fun:ngx_http_block
|
||||
fun:ngx_conf_parse
|
||||
fun:ngx_init_cycle
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_palloc_large
|
||||
fun:ngx_palloc
|
||||
fun:ngx_pcalloc
|
||||
fun:ngx_http_upstream_drizzle_create_srv_conf
|
||||
fun:ngx_http_block
|
||||
fun:ngx_conf_parse
|
||||
fun:ngx_init_cycle
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_palloc_large
|
||||
fun:ngx_palloc
|
||||
fun:ngx_array_push
|
||||
fun:ngx_hash_add_key
|
||||
fun:ngx_http_variables_add_core_vars
|
||||
fun:ngx_http_core_preconfiguration
|
||||
fun:ngx_http_block
|
||||
fun:ngx_conf_parse
|
||||
fun:ngx_init_cycle
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_palloc_large
|
||||
fun:ngx_palloc
|
||||
fun:ngx_pcalloc
|
||||
fun:ngx_init_cycle
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_palloc_large
|
||||
fun:ngx_palloc
|
||||
fun:ngx_hash_init
|
||||
fun:ngx_http_upstream_init_main_conf
|
||||
fun:ngx_http_block
|
||||
fun:ngx_conf_parse
|
||||
fun:ngx_init_cycle
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_palloc_large
|
||||
fun:ngx_palloc
|
||||
fun:ngx_pcalloc
|
||||
fun:ngx_http_drizzle_keepalive_init
|
||||
fun:ngx_http_upstream_drizzle_init
|
||||
fun:ngx_http_upstream_init_main_conf
|
||||
fun:ngx_http_block
|
||||
fun:ngx_conf_parse
|
||||
fun:ngx_init_cycle
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:malloc
|
||||
fun:ngx_alloc
|
||||
fun:ngx_palloc_large
|
||||
fun:ngx_palloc
|
||||
fun:ngx_hash_init
|
||||
fun:ngx_http_variables_init_vars
|
||||
fun:ngx_http_block
|
||||
fun:ngx_conf_parse
|
||||
fun:ngx_init_cycle
|
||||
fun:main
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:memalign
|
||||
fun:posix_memalign
|
||||
fun:ngx_memalign
|
||||
fun:ngx_create_pool
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Leak
|
||||
fun:memalign
|
||||
fun:posix_memalign
|
||||
fun:ngx_memalign
|
||||
fun:ngx_palloc_block
|
||||
fun:ngx_palloc
|
||||
}
|
||||
{
|
||||
<insert_a_suppression_name_here>
|
||||
Memcheck:Cond
|
||||
fun:index
|
||||
fun:expand_dynamic_string_token
|
||||
fun:_dl_map_object
|
||||
fun:map_doit
|
||||
fun:_dl_catch_error
|
||||
fun:do_preload
|
||||
fun:dl_main
|
||||
fun:_dl_sysdep_start
|
||||
fun:_dl_start
|
||||
}
|
||||
2
controllers/nginx-third-party/lua/vendor/lua-resty-http/.gitignore
vendored
Normal file
2
controllers/nginx-third-party/lua/vendor/lua-resty-http/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
t/servroot/
|
||||
t/error.log
|
||||
23
controllers/nginx-third-party/lua/vendor/lua-resty-http/LICENSE
vendored
Normal file
23
controllers/nginx-third-party/lua/vendor/lua-resty-http/LICENSE
vendored
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
Copyright (c) 2013, James Hurst
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
20
controllers/nginx-third-party/lua/vendor/lua-resty-http/Makefile
vendored
Normal file
20
controllers/nginx-third-party/lua/vendor/lua-resty-http/Makefile
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
OPENRESTY_PREFIX=/usr/local/openresty
|
||||
|
||||
PREFIX ?= /usr/local
|
||||
LUA_INCLUDE_DIR ?= $(PREFIX)/include
|
||||
LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION)
|
||||
INSTALL ?= install
|
||||
TEST_FILE ?= t
|
||||
|
||||
.PHONY: all test install
|
||||
|
||||
all: ;
|
||||
|
||||
install: all
|
||||
$(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty
|
||||
$(INSTALL) lib/resty/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/
|
||||
|
||||
test: all
|
||||
PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH TEST_NGINX_NO_SHUFFLE=1 prove -I../test-nginx/lib -r $(TEST_FILE)
|
||||
util/lua-releng
|
||||
|
||||
422
controllers/nginx-third-party/lua/vendor/lua-resty-http/README.md
vendored
Normal file
422
controllers/nginx-third-party/lua/vendor/lua-resty-http/README.md
vendored
Normal file
|
|
@ -0,0 +1,422 @@
|
|||
# lua-resty-http
|
||||
|
||||
Lua HTTP client cosocket driver for [OpenResty](http://openresty.org/) / [ngx_lua](https://github.com/chaoslawful/lua-nginx-module).
|
||||
|
||||
# Status
|
||||
|
||||
Ready for testing. Probably production ready in most cases, though not yet proven in the wild. Please check the issues list and let me know if you have any problems / questions.
|
||||
|
||||
# Features
|
||||
|
||||
* HTTP 1.0 and 1.1
|
||||
* Streaming interface to reading bodies using coroutines, for predictable memory usage in Lua land.
|
||||
* Alternative simple interface for singleshot requests without manual connection step.
|
||||
* Headers treated case insensitively.
|
||||
* Chunked transfer encoding.
|
||||
* Keepalive.
|
||||
* Pipelining.
|
||||
* Trailers.
|
||||
|
||||
|
||||
# API
|
||||
|
||||
* [new](#name)
|
||||
* [connect](#connect)
|
||||
* [set_timeout](#set_timeout)
|
||||
* [ssl_handshake](#ssl_handshake)
|
||||
* [set_keepalive](#set_keepalive)
|
||||
* [get_reused_times](#get_reused_times)
|
||||
* [close](#close)
|
||||
* [request](#request)
|
||||
* [request_uri](#request_uri)
|
||||
* [request_pipeline](#request_pipeline)
|
||||
* [Response](#response)
|
||||
* [body_reader](#resbody_reader)
|
||||
* [read_body](#resread_body)
|
||||
* [read_trailes](#resread_trailers)
|
||||
* [Proxy](#proxy)
|
||||
* [proxy_request](#proxy_request)
|
||||
* [proxy_response](#proxy_response)
|
||||
* [Utility](#utility)
|
||||
* [parse_uri](#parse_uri)
|
||||
* [get_client_body_reader](#get_client_body_reader)
|
||||
|
||||
|
||||
## Synopsis
|
||||
|
||||
```` lua
|
||||
lua_package_path "/path/to/lua-resty-http/lib/?.lua;;";
|
||||
|
||||
server {
|
||||
|
||||
|
||||
location /simpleinterface {
|
||||
resolver 8.8.8.8; # use Google's open DNS server for an example
|
||||
|
||||
content_by_lua '
|
||||
|
||||
-- For simple singleshot requests, use the URI interface.
|
||||
local httpc = http.new()
|
||||
local res, err = httpc:request_uri("http://example.com/helloworld", {
|
||||
method = "POST",
|
||||
body = "a=1&b=2",
|
||||
headers = {
|
||||
["Content-Type"] = "application/x-www-form-urlencoded",
|
||||
}
|
||||
})
|
||||
|
||||
if not res then
|
||||
ngx.say("failed to request: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
-- In this simple form, there is no manual connection step, so the body is read
|
||||
-- all in one go, including any trailers, and the connection closed or keptalive
|
||||
-- for you.
|
||||
|
||||
ngx.status = res.status
|
||||
|
||||
for k,v in pairs(res.headers) do
|
||||
--
|
||||
end
|
||||
|
||||
ngx.say(res.body)
|
||||
';
|
||||
}
|
||||
|
||||
|
||||
location /genericinterface {
|
||||
content_by_lua '
|
||||
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
|
||||
-- The generic form gives us more control. We must connect manually.
|
||||
httpc:set_timeout(500)
|
||||
httpc:connect("127.0.0.1", 80)
|
||||
|
||||
-- And request using a path, rather than a full URI.
|
||||
local res, err = httpc:request{
|
||||
path = "/helloworld",
|
||||
headers = {
|
||||
["Host"] = "example.com",
|
||||
},
|
||||
}
|
||||
|
||||
if not res then
|
||||
ngx.say("failed to request: ", err)
|
||||
return
|
||||
end
|
||||
|
||||
-- Now we can use the body_reader iterator, to stream the body according to our desired chunk size.
|
||||
local reader = res.body_reader
|
||||
|
||||
repeat
|
||||
local chunk, err = reader(8192)
|
||||
if err then
|
||||
ngx.log(ngx.ERR, err)
|
||||
break
|
||||
end
|
||||
|
||||
if chunk then
|
||||
-- process
|
||||
end
|
||||
until not chunk
|
||||
|
||||
local ok, err = httpc:set_keepalive()
|
||||
if not ok then
|
||||
ngx.say("failed to set keepalive: ", err)
|
||||
return
|
||||
end
|
||||
';
|
||||
}
|
||||
}
|
||||
````
|
||||
|
||||
# Connection
|
||||
|
||||
## new
|
||||
|
||||
`syntax: httpc = http.new()`
|
||||
|
||||
Creates the http object. In case of failures, returns `nil` and a string describing the error.
|
||||
|
||||
## connect
|
||||
|
||||
`syntax: ok, err = httpc:connect(host, port, options_table?)`
|
||||
|
||||
`syntax: ok, err = httpc:connect("unix:/path/to/unix.sock", options_table?)`
|
||||
|
||||
Attempts to connect to the web server.
|
||||
|
||||
Before actually resolving the host name and connecting to the remote backend, this method will always look up the connection pool for matched idle connections created by previous calls of this method.
|
||||
|
||||
An optional Lua table can be specified as the last argument to this method to specify various connect options:
|
||||
|
||||
* `pool`
|
||||
: Specifies a custom name for the connection pool being used. If omitted, then the connection pool name will be generated from the string template `<host>:<port>` or `<unix-socket-path>`.
|
||||
|
||||
## set_timeout
|
||||
|
||||
`syntax: httpc:set_timeout(time)`
|
||||
|
||||
Sets the timeout (in ms) protection for subsequent operations, including the `connect` method.
|
||||
|
||||
## ssl_handshake
|
||||
|
||||
`syntax: session, err = httpc:ssl_handshake(session, host, verify)`
|
||||
|
||||
Performs an SSL handshake on the TCP connection, only availble in ngx_lua > v0.9.11
|
||||
|
||||
See docs for [ngx.socket.tcp](https://github.com/openresty/lua-nginx-module#ngxsockettcp) for details.
|
||||
|
||||
## set_keepalive
|
||||
|
||||
`syntax: ok, err = httpc:set_keepalive(max_idle_timeout, pool_size)`
|
||||
|
||||
Attempts to puts the current connection into the ngx_lua cosocket connection pool.
|
||||
|
||||
You can specify the max idle timeout (in ms) when the connection is in the pool and the maximal size of the pool every nginx worker process.
|
||||
|
||||
Only call this method in the place you would have called the `close` method instead. Calling this method will immediately turn the current http object into the `closed` state. Any subsequent operations other than `connect()` on the current objet will return the `closed` error.
|
||||
|
||||
Note that calling this instead of `close` is "safe" in that it will conditionally close depending on the type of request. Specifically, a `1.0` request without `Connection: Keep-Alive` will be closed, as will a `1.1` request with `Connection: Close`.
|
||||
|
||||
In case of success, returns `1`. In case of errors, returns `nil, err`. In the case where the conneciton is conditionally closed as described above, returns `2` and the error string `connection must be closed`.
|
||||
|
||||
## get_reused_times
|
||||
|
||||
`syntax: times, err = httpc:get_reused_times()`
|
||||
|
||||
This method returns the (successfully) reused times for the current connection. In case of error, it returns `nil` and a string describing the error.
|
||||
|
||||
If the current connection does not come from the built-in connection pool, then this method always returns `0`, that is, the connection has never been reused (yet). If the connection comes from the connection pool, then the return value is always non-zero. So this method can also be used to determine if the current connection comes from the pool.
|
||||
|
||||
## close
|
||||
|
||||
`syntax: ok, err = http:close()`
|
||||
|
||||
Closes the current connection and returns the status.
|
||||
|
||||
In case of success, returns `1`. In case of errors, returns `nil` with a string describing the error.
|
||||
|
||||
|
||||
# Requesting
|
||||
|
||||
## request
|
||||
|
||||
`syntax: res, err = httpc:request(params)`
|
||||
|
||||
Returns a `res` table or `nil` and an error message.
|
||||
|
||||
The `params` table accepts the following fields:
|
||||
|
||||
* `version` The HTTP version number, currently supporting 1.0 or 1.1.
|
||||
* `method` The HTTP method string.
|
||||
* `path` The path string.
|
||||
* `headers` A table of request headers.
|
||||
* `body` The request body as a string, or an iterator function (see [get_client_body_reader](#get_client_body_reader)).
|
||||
* `ssl_verify` Verify SSL cert matches hostname
|
||||
|
||||
When the request is successful, `res` will contain the following fields:
|
||||
|
||||
* `status` The status code.
|
||||
* `headers` A table of headers. Multiple headers with the same field name will be presented as a table of values.
|
||||
* `has_body` A boolean flag indicating if there is a body to be read.
|
||||
* `body_reader` An iterator function for reading the body in a streaming fashion.
|
||||
* `read_body` A method to read the entire body into a string.
|
||||
* `read_trailers` A method to merge any trailers underneath the headers, after reading the body.
|
||||
|
||||
## request_uri
|
||||
|
||||
`syntax: res, err = httpc:request_uri(uri, params)`
|
||||
|
||||
The simple interface. Options supplied in the `params` table are the same as in the generic interface, and will override components found in the uri itself.
|
||||
|
||||
In this mode, there is no need to connect manually first. The connection is made on your behalf, suiting cases where you simply need to grab a URI without too much hassle.
|
||||
|
||||
Additionally there is no ability to stream the response body in this mode. If the request is successful, `res` will contain the following fields:
|
||||
|
||||
* `status` The status code.
|
||||
* `headers` A table of headers.
|
||||
* `body` The response body as a string.
|
||||
|
||||
|
||||
## request_pipeline
|
||||
|
||||
`syntax: responses, err = httpc:request_pipeline(params)`
|
||||
|
||||
This method works as per the [request](#request) method above, but `params` is instead a table of param tables. Each request is sent in order, and `responses` is returned as a table of response handles. For example:
|
||||
|
||||
```lua
|
||||
local responses = httpc:request_pipeline{
|
||||
{
|
||||
path = "/b",
|
||||
},
|
||||
{
|
||||
path = "/c",
|
||||
},
|
||||
{
|
||||
path = "/d",
|
||||
}
|
||||
}
|
||||
|
||||
for i,r in ipairs(responses) do
|
||||
if r.status then
|
||||
ngx.say(r.status)
|
||||
ngx.say(r:read_body())
|
||||
end
|
||||
end
|
||||
```
|
||||
|
||||
Due to the nature of pipelining, no responses are actually read until you attempt to use the response fields (status / headers etc). And since the responses are read off in order, you must read the entire body (and any trailers if you have them), before attempting to read the next response.
|
||||
|
||||
Note this doesn't preclude the use of the streaming response body reader. Responses can still be streamed, so long as the entire body is streamed before attempting to access the next response.
|
||||
|
||||
Be sure to test at least one field (such as status) before trying to use the others, in case a socket read error has occurred.
|
||||
|
||||
# Response
|
||||
|
||||
## res.body_reader
|
||||
|
||||
The `body_reader` iterator can be used to stream the response body in chunk sizes of your choosing, as follows:
|
||||
|
||||
````lua
|
||||
local reader = res.body_reader
|
||||
|
||||
repeat
|
||||
local chunk, err = reader(8192)
|
||||
if err then
|
||||
ngx.log(ngx.ERR, err)
|
||||
break
|
||||
end
|
||||
|
||||
if chunk then
|
||||
-- process
|
||||
end
|
||||
until not chunk
|
||||
````
|
||||
|
||||
If the reader is called with no arguments, the behaviour depends on the type of connection. If the response is encoded as chunked, then the iterator will return the chunks as they arrive. If not, it will simply return the entire body.
|
||||
|
||||
Note that the size provided is actually a **maximum** size. So in the chunked transfer case, you may get chunks smaller than the size you ask, as a remainder of the actual HTTP chunks.
|
||||
|
||||
## res:read_body
|
||||
|
||||
`syntax: body, err = res:read_body()`
|
||||
|
||||
Reads the entire body into a local string.
|
||||
|
||||
|
||||
## res:read_trailers
|
||||
|
||||
`syntax: res:read_trailers()`
|
||||
|
||||
This merges any trailers underneath the `res.headers` table itself. Must be called after reading the body.
|
||||
|
||||
|
||||
# Proxy
|
||||
|
||||
There are two convenience methods for when one simply wishes to proxy the current request to the connected upstream, and safely send it downstream to the client, as a reverse proxy. A complete example:
|
||||
|
||||
```lua
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
|
||||
httpc:set_timeout(500)
|
||||
local ok, err = httpc:connect(HOST, PORT)
|
||||
|
||||
if not ok then
|
||||
ngx.log(ngx.ERR, err)
|
||||
return
|
||||
end
|
||||
|
||||
httpc:set_timeout(2000)
|
||||
httpc:proxy_response(httpc:proxy_request())
|
||||
httpc:set_keepalive()
|
||||
```
|
||||
|
||||
|
||||
## proxy_request
|
||||
|
||||
`syntax: local res, err = httpc:proxy_request(request_body_chunk_size?)`
|
||||
|
||||
Performs a request using the current client request arguments, effectively proxying to the connected upstream. The request body will be read in a streaming fashion, according to `request_body_chunk_size` (see [documentation on the client body reader](#get_client_body_reader) below).
|
||||
|
||||
|
||||
## proxy_response
|
||||
|
||||
`syntax: httpc:proxy_response(res, chunksize?)`
|
||||
|
||||
Sets the current response based on the given `res`. Ensures that hop-by-hop headers are not sent downstream, and will read the response according to `chunksize` (see [documentation on the body reader](#resbody_reader) above).
|
||||
|
||||
|
||||
# Utility
|
||||
|
||||
## parse_uri
|
||||
|
||||
`syntax: local scheme, host, port, path = unpack(httpc:parse_uri(uri))`
|
||||
|
||||
This is a convenience function allowing one to more easily use the generic interface, when the input data is a URI.
|
||||
|
||||
|
||||
## get_client_body_reader
|
||||
|
||||
`syntax: reader, err = httpc:get_client_body_reader(chunksize?, sock?)`
|
||||
|
||||
Returns an iterator function which can be used to read the downstream client request body in a streaming fashion. You may also specify an optional default chunksize (default is `65536`), or an already established socket in
|
||||
place of the client request.
|
||||
|
||||
Example:
|
||||
|
||||
```lua
|
||||
local req_reader = httpc:get_client_body_reader()
|
||||
|
||||
repeat
|
||||
local chunk, err = req_reader(8192)
|
||||
if err then
|
||||
ngx.log(ngx.ERR, err)
|
||||
break
|
||||
end
|
||||
|
||||
if chunk then
|
||||
-- process
|
||||
end
|
||||
until not chunk
|
||||
```
|
||||
|
||||
This iterator can also be used as the value for the body field in request params, allowing one to stream the request body into a proxied upstream request.
|
||||
|
||||
```lua
|
||||
local client_body_reader, err = httpc:get_client_body_reader()
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/helloworld",
|
||||
body = client_body_reader,
|
||||
}
|
||||
```
|
||||
|
||||
If `sock` is specified,
|
||||
|
||||
# Author
|
||||
|
||||
James Hurst <james@pintsized.co.uk>
|
||||
|
||||
Originally started life based on https://github.com/bakins/lua-resty-http-simple. Cosocket docs and implementation borrowed from the other lua-resty-* cosocket modules.
|
||||
|
||||
|
||||
# Licence
|
||||
|
||||
This module is licensed under the 2-clause BSD license.
|
||||
|
||||
Copyright (c) 2013, James Hurst <james@pintsized.co.uk>
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
814
controllers/nginx-third-party/lua/vendor/lua-resty-http/lib/resty/http.lua
vendored
Normal file
814
controllers/nginx-third-party/lua/vendor/lua-resty-http/lib/resty/http.lua
vendored
Normal file
|
|
@ -0,0 +1,814 @@
|
|||
local http_headers = require "resty.http_headers"
|
||||
|
||||
local ngx_socket_tcp = ngx.socket.tcp
|
||||
local ngx_req = ngx.req
|
||||
local ngx_req_socket = ngx_req.socket
|
||||
local ngx_req_get_headers = ngx_req.get_headers
|
||||
local ngx_req_get_method = ngx_req.get_method
|
||||
local str_gmatch = string.gmatch
|
||||
local str_lower = string.lower
|
||||
local str_upper = string.upper
|
||||
local str_find = string.find
|
||||
local str_sub = string.sub
|
||||
local str_gsub = string.gsub
|
||||
local tbl_concat = table.concat
|
||||
local tbl_insert = table.insert
|
||||
local ngx_encode_args = ngx.encode_args
|
||||
local ngx_re_match = ngx.re.match
|
||||
local ngx_re_gsub = ngx.re.gsub
|
||||
local ngx_log = ngx.log
|
||||
local ngx_DEBUG = ngx.DEBUG
|
||||
local ngx_ERR = ngx.ERR
|
||||
local ngx_NOTICE = ngx.NOTICE
|
||||
local ngx_var = ngx.var
|
||||
local co_yield = coroutine.yield
|
||||
local co_create = coroutine.create
|
||||
local co_status = coroutine.status
|
||||
local co_resume = coroutine.resume
|
||||
|
||||
|
||||
-- http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.5.1
|
||||
local HOP_BY_HOP_HEADERS = {
|
||||
["connection"] = true,
|
||||
["keep-alive"] = true,
|
||||
["proxy-authenticate"] = true,
|
||||
["proxy-authorization"] = true,
|
||||
["te"] = true,
|
||||
["trailers"] = true,
|
||||
["transfer-encoding"] = true,
|
||||
["upgrade"] = true,
|
||||
["content-length"] = true, -- Not strictly hop-by-hop, but Nginx will deal
|
||||
-- with this (may send chunked for example).
|
||||
}
|
||||
|
||||
|
||||
-- Reimplemented coroutine.wrap, returning "nil, err" if the coroutine cannot
|
||||
-- be resumed. This protects user code from inifite loops when doing things like
|
||||
-- repeat
|
||||
-- local chunk, err = res.body_reader()
|
||||
-- if chunk then -- <-- This could be a string msg in the core wrap function.
|
||||
-- ...
|
||||
-- end
|
||||
-- until not chunk
|
||||
local co_wrap = function(func)
|
||||
local co = co_create(func)
|
||||
if not co then
|
||||
return nil, "could not create coroutine"
|
||||
else
|
||||
return function(...)
|
||||
if co_status(co) == "suspended" then
|
||||
return select(2, co_resume(co, ...))
|
||||
else
|
||||
return nil, "can't resume a " .. co_status(co) .. " coroutine"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local _M = {
|
||||
_VERSION = '0.06',
|
||||
}
|
||||
_M._USER_AGENT = "lua-resty-http/" .. _M._VERSION .. " (Lua) ngx_lua/" .. ngx.config.ngx_lua_version
|
||||
|
||||
local mt = { __index = _M }
|
||||
|
||||
|
||||
local HTTP = {
|
||||
[1.0] = " HTTP/1.0\r\n",
|
||||
[1.1] = " HTTP/1.1\r\n",
|
||||
}
|
||||
|
||||
local DEFAULT_PARAMS = {
|
||||
method = "GET",
|
||||
path = "/",
|
||||
version = 1.1,
|
||||
}
|
||||
|
||||
|
||||
function _M.new(self)
|
||||
local sock, err = ngx_socket_tcp()
|
||||
if not sock then
|
||||
return nil, err
|
||||
end
|
||||
return setmetatable({ sock = sock, keepalive = true }, mt)
|
||||
end
|
||||
|
||||
|
||||
function _M.set_timeout(self, timeout)
|
||||
local sock = self.sock
|
||||
if not sock then
|
||||
return nil, "not initialized"
|
||||
end
|
||||
|
||||
return sock:settimeout(timeout)
|
||||
end
|
||||
|
||||
|
||||
function _M.ssl_handshake(self, ...)
|
||||
local sock = self.sock
|
||||
if not sock then
|
||||
return nil, "not initialized"
|
||||
end
|
||||
|
||||
return sock:sslhandshake(...)
|
||||
end
|
||||
|
||||
|
||||
function _M.connect(self, ...)
|
||||
local sock = self.sock
|
||||
if not sock then
|
||||
return nil, "not initialized"
|
||||
end
|
||||
|
||||
self.host = select(1, ...)
|
||||
self.keepalive = true
|
||||
|
||||
return sock:connect(...)
|
||||
end
|
||||
|
||||
|
||||
function _M.set_keepalive(self, ...)
|
||||
local sock = self.sock
|
||||
if not sock then
|
||||
return nil, "not initialized"
|
||||
end
|
||||
|
||||
if self.keepalive == true then
|
||||
return sock:setkeepalive(...)
|
||||
else
|
||||
-- The server said we must close the connection, so we cannot setkeepalive.
|
||||
-- If close() succeeds we return 2 instead of 1, to differentiate between
|
||||
-- a normal setkeepalive() failure and an intentional close().
|
||||
local res, err = sock:close()
|
||||
if res then
|
||||
return 2, "connection must be closed"
|
||||
else
|
||||
return res, err
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
function _M.get_reused_times(self)
|
||||
local sock = self.sock
|
||||
if not sock then
|
||||
return nil, "not initialized"
|
||||
end
|
||||
|
||||
return sock:getreusedtimes()
|
||||
end
|
||||
|
||||
|
||||
function _M.close(self)
|
||||
local sock = self.sock
|
||||
if not sock then
|
||||
return nil, "not initialized"
|
||||
end
|
||||
|
||||
return sock:close()
|
||||
end
|
||||
|
||||
|
||||
local function _should_receive_body(method, code)
|
||||
if method == "HEAD" then return nil end
|
||||
if code == 204 or code == 304 then return nil end
|
||||
if code >= 100 and code < 200 then return nil end
|
||||
return true
|
||||
end
|
||||
|
||||
|
||||
function _M.parse_uri(self, uri)
|
||||
local m, err = ngx_re_match(uri, [[^(http[s]*)://([^:/]+)(?::(\d+))?(.*)]],
|
||||
"jo")
|
||||
|
||||
if not m then
|
||||
if err then
|
||||
return nil, "failed to match the uri: " .. err
|
||||
end
|
||||
|
||||
return nil, "bad uri"
|
||||
else
|
||||
if not m[3] then
|
||||
if m[1] == "https" then
|
||||
m[3] = 443
|
||||
else
|
||||
m[3] = 80
|
||||
end
|
||||
end
|
||||
if not m[4] then m[4] = "/" end
|
||||
return m, nil
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local function _format_request(params)
|
||||
local version = params.version
|
||||
local headers = params.headers or {}
|
||||
|
||||
local query = params.query or ""
|
||||
if query then
|
||||
if type(query) == "table" then
|
||||
query = "?" .. ngx_encode_args(query)
|
||||
end
|
||||
end
|
||||
|
||||
-- Initialize request
|
||||
local req = {
|
||||
str_upper(params.method),
|
||||
" ",
|
||||
params.path,
|
||||
query,
|
||||
HTTP[version],
|
||||
-- Pre-allocate slots for minimum headers and carriage return.
|
||||
true,
|
||||
true,
|
||||
true,
|
||||
}
|
||||
local c = 6 -- req table index it's faster to do this inline vs table.insert
|
||||
|
||||
-- Append headers
|
||||
for key, values in pairs(headers) do
|
||||
if type(values) ~= "table" then
|
||||
values = {values}
|
||||
end
|
||||
|
||||
key = tostring(key)
|
||||
for _, value in pairs(values) do
|
||||
req[c] = key .. ": " .. tostring(value) .. "\r\n"
|
||||
c = c + 1
|
||||
end
|
||||
end
|
||||
|
||||
-- Close headers
|
||||
req[c] = "\r\n"
|
||||
|
||||
return tbl_concat(req)
|
||||
end
|
||||
|
||||
|
||||
local function _receive_status(sock)
|
||||
local line, err = sock:receive("*l")
|
||||
if not line then
|
||||
return nil, nil, err
|
||||
end
|
||||
|
||||
return tonumber(str_sub(line, 10, 12)), tonumber(str_sub(line, 6, 8))
|
||||
end
|
||||
|
||||
|
||||
|
||||
local function _receive_headers(sock)
|
||||
local headers = http_headers.new()
|
||||
|
||||
repeat
|
||||
local line, err = sock:receive("*l")
|
||||
if not line then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
for key, val in str_gmatch(line, "([^:%s]+):%s*(.+)") do
|
||||
if headers[key] then
|
||||
if type(headers[key]) ~= "table" then
|
||||
headers[key] = { headers[key] }
|
||||
end
|
||||
tbl_insert(headers[key], tostring(val))
|
||||
else
|
||||
headers[key] = tostring(val)
|
||||
end
|
||||
end
|
||||
until str_find(line, "^%s*$")
|
||||
|
||||
return headers, nil
|
||||
end
|
||||
|
||||
|
||||
local function _chunked_body_reader(sock, default_chunk_size)
|
||||
return co_wrap(function(max_chunk_size)
|
||||
local max_chunk_size = max_chunk_size or default_chunk_size
|
||||
local remaining = 0
|
||||
local length
|
||||
|
||||
repeat
|
||||
-- If we still have data on this chunk
|
||||
if max_chunk_size and remaining > 0 then
|
||||
|
||||
if remaining > max_chunk_size then
|
||||
-- Consume up to max_chunk_size
|
||||
length = max_chunk_size
|
||||
remaining = remaining - max_chunk_size
|
||||
else
|
||||
-- Consume all remaining
|
||||
length = remaining
|
||||
remaining = 0
|
||||
end
|
||||
else -- This is a fresh chunk
|
||||
|
||||
-- Receive the chunk size
|
||||
local str, err = sock:receive("*l")
|
||||
if not str then
|
||||
co_yield(nil, err)
|
||||
end
|
||||
|
||||
length = tonumber(str, 16)
|
||||
|
||||
if not length then
|
||||
co_yield(nil, "unable to read chunksize")
|
||||
end
|
||||
|
||||
if max_chunk_size and length > max_chunk_size then
|
||||
-- Consume up to max_chunk_size
|
||||
remaining = length - max_chunk_size
|
||||
length = max_chunk_size
|
||||
end
|
||||
end
|
||||
|
||||
if length > 0 then
|
||||
local str, err = sock:receive(length)
|
||||
if not str then
|
||||
co_yield(nil, err)
|
||||
end
|
||||
|
||||
max_chunk_size = co_yield(str) or default_chunk_size
|
||||
|
||||
-- If we're finished with this chunk, read the carriage return.
|
||||
if remaining == 0 then
|
||||
sock:receive(2) -- read \r\n
|
||||
end
|
||||
else
|
||||
-- Read the last (zero length) chunk's carriage return
|
||||
sock:receive(2) -- read \r\n
|
||||
end
|
||||
|
||||
until length == 0
|
||||
end)
|
||||
end
|
||||
|
||||
|
||||
local function _body_reader(sock, content_length, default_chunk_size)
|
||||
return co_wrap(function(max_chunk_size)
|
||||
local max_chunk_size = max_chunk_size or default_chunk_size
|
||||
|
||||
if not content_length and max_chunk_size then
|
||||
-- We have no length, but wish to stream.
|
||||
-- HTTP 1.0 with no length will close connection, so read chunks to the end.
|
||||
repeat
|
||||
local str, err, partial = sock:receive(max_chunk_size)
|
||||
if not str and err == "closed" then
|
||||
max_chunk_size = tonumber(co_yield(partial, err) or default_chunk_size)
|
||||
end
|
||||
|
||||
max_chunk_size = tonumber(co_yield(str) or default_chunk_size)
|
||||
if max_chunk_size and max_chunk_size < 0 then max_chunk_size = nil end
|
||||
|
||||
if not max_chunk_size then
|
||||
ngx_log(ngx_ERR, "Buffer size not specified, bailing")
|
||||
break
|
||||
end
|
||||
until not str
|
||||
|
||||
elseif not content_length then
|
||||
-- We have no length but don't wish to stream.
|
||||
-- HTTP 1.0 with no length will close connection, so read to the end.
|
||||
co_yield(sock:receive("*a"))
|
||||
|
||||
elseif not max_chunk_size then
|
||||
-- We have a length and potentially keep-alive, but want everything.
|
||||
co_yield(sock:receive(content_length))
|
||||
|
||||
else
|
||||
-- We have a length and potentially a keep-alive, and wish to stream
|
||||
-- the response.
|
||||
local received = 0
|
||||
repeat
|
||||
local length = max_chunk_size
|
||||
if received + length > content_length then
|
||||
length = content_length - received
|
||||
end
|
||||
|
||||
if length > 0 then
|
||||
local str, err = sock:receive(length)
|
||||
if not str then
|
||||
max_chunk_size = tonumber(co_yield(nil, err) or default_chunk_size)
|
||||
end
|
||||
received = received + length
|
||||
|
||||
max_chunk_size = tonumber(co_yield(str) or default_chunk_size)
|
||||
if max_chunk_size and max_chunk_size < 0 then max_chunk_size = nil end
|
||||
|
||||
if not max_chunk_size then
|
||||
ngx_log(ngx_ERR, "Buffer size not specified, bailing")
|
||||
break
|
||||
end
|
||||
end
|
||||
|
||||
until length == 0
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
|
||||
local function _no_body_reader()
|
||||
return nil
|
||||
end
|
||||
|
||||
|
||||
local function _read_body(res)
|
||||
local reader = res.body_reader
|
||||
|
||||
if not reader then
|
||||
-- Most likely HEAD or 304 etc.
|
||||
return nil, "no body to be read"
|
||||
end
|
||||
|
||||
local chunks = {}
|
||||
local c = 1
|
||||
|
||||
local chunk, err
|
||||
repeat
|
||||
chunk, err = reader()
|
||||
|
||||
if err then
|
||||
return nil, err, tbl_concat(chunks) -- Return any data so far.
|
||||
end
|
||||
if chunk then
|
||||
chunks[c] = chunk
|
||||
c = c + 1
|
||||
end
|
||||
until not chunk
|
||||
|
||||
return tbl_concat(chunks)
|
||||
end
|
||||
|
||||
|
||||
local function _trailer_reader(sock)
|
||||
return co_wrap(function()
|
||||
co_yield(_receive_headers(sock))
|
||||
end)
|
||||
end
|
||||
|
||||
|
||||
local function _read_trailers(res)
|
||||
local reader = res.trailer_reader
|
||||
if not reader then
|
||||
return nil, "no trailers"
|
||||
end
|
||||
|
||||
local trailers = reader()
|
||||
setmetatable(res.headers, { __index = trailers })
|
||||
end
|
||||
|
||||
|
||||
local function _send_body(sock, body)
|
||||
if type(body) == 'function' then
|
||||
repeat
|
||||
local chunk, err, partial = body()
|
||||
|
||||
if chunk then
|
||||
local ok,err = sock:send(chunk)
|
||||
|
||||
if not ok then
|
||||
return nil, err
|
||||
end
|
||||
elseif err ~= nil then
|
||||
return nil, err, partial
|
||||
end
|
||||
|
||||
until chunk == nil
|
||||
elseif body ~= nil then
|
||||
local bytes, err = sock:send(body)
|
||||
|
||||
if not bytes then
|
||||
return nil, err
|
||||
end
|
||||
end
|
||||
return true, nil
|
||||
end
|
||||
|
||||
|
||||
local function _handle_continue(sock, body)
|
||||
local status, version, err = _receive_status(sock)
|
||||
if not status then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
-- Only send body if we receive a 100 Continue
|
||||
if status == 100 then
|
||||
local ok, err = sock:receive("*l") -- Read carriage return
|
||||
if not ok then
|
||||
return nil, err
|
||||
end
|
||||
_send_body(sock, body)
|
||||
end
|
||||
return status, version, err
|
||||
end
|
||||
|
||||
|
||||
function _M.send_request(self, params)
|
||||
-- Apply defaults
|
||||
setmetatable(params, { __index = DEFAULT_PARAMS })
|
||||
|
||||
local sock = self.sock
|
||||
local body = params.body
|
||||
local headers = http_headers.new()
|
||||
|
||||
local params_headers = params.headers
|
||||
if params_headers then
|
||||
-- We assign one by one so that the metatable can handle case insensitivity
|
||||
-- for us. You can blame the spec for this inefficiency.
|
||||
for k,v in pairs(params_headers) do
|
||||
headers[k] = v
|
||||
end
|
||||
end
|
||||
|
||||
-- Ensure minimal headers are set
|
||||
if type(body) == 'string' and not headers["Content-Length"] then
|
||||
headers["Content-Length"] = #body
|
||||
end
|
||||
if not headers["Host"] then
|
||||
headers["Host"] = self.host
|
||||
end
|
||||
if not headers["User-Agent"] then
|
||||
headers["User-Agent"] = _M._USER_AGENT
|
||||
end
|
||||
if params.version == 1.0 and not headers["Connection"] then
|
||||
headers["Connection"] = "Keep-Alive"
|
||||
end
|
||||
|
||||
params.headers = headers
|
||||
|
||||
-- Format and send request
|
||||
local req = _format_request(params)
|
||||
ngx_log(ngx_DEBUG, "\n", req)
|
||||
local bytes, err = sock:send(req)
|
||||
|
||||
if not bytes then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
-- Send the request body, unless we expect: continue, in which case
|
||||
-- we handle this as part of reading the response.
|
||||
if headers["Expect"] ~= "100-continue" then
|
||||
local ok, err, partial = _send_body(sock, body)
|
||||
if not ok then
|
||||
return nil, err, partial
|
||||
end
|
||||
end
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
|
||||
function _M.read_response(self, params)
|
||||
local sock = self.sock
|
||||
|
||||
local status, version, err
|
||||
|
||||
-- If we expect: continue, we need to handle this, sending the body if allowed.
|
||||
-- If we don't get 100 back, then status is the actual status.
|
||||
if params.headers["Expect"] == "100-continue" then
|
||||
local _status, _version, _err = _handle_continue(sock, params.body)
|
||||
if not _status then
|
||||
return nil, _err
|
||||
elseif _status ~= 100 then
|
||||
status, version, err = _status, _version, _err
|
||||
end
|
||||
end
|
||||
|
||||
-- Just read the status as normal.
|
||||
if not status then
|
||||
status, version, err = _receive_status(sock)
|
||||
if not status then
|
||||
return nil, err
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
local res_headers, err = _receive_headers(sock)
|
||||
if not res_headers then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
-- Determine if we should keepalive or not.
|
||||
local ok, connection = pcall(str_lower, res_headers["Connection"])
|
||||
if ok then
|
||||
if (version == 1.1 and connection == "close") or
|
||||
(version == 1.0 and connection ~= "keep-alive") then
|
||||
self.keepalive = false
|
||||
end
|
||||
end
|
||||
|
||||
local body_reader = _no_body_reader
|
||||
local trailer_reader, err = nil, nil
|
||||
local has_body = false
|
||||
|
||||
-- Receive the body_reader
|
||||
if _should_receive_body(params.method, status) then
|
||||
local ok, encoding = pcall(str_lower, res_headers["Transfer-Encoding"])
|
||||
if ok and version == 1.1 and encoding == "chunked" then
|
||||
body_reader, err = _chunked_body_reader(sock)
|
||||
has_body = true
|
||||
else
|
||||
|
||||
local ok, length = pcall(tonumber, res_headers["Content-Length"])
|
||||
if ok then
|
||||
body_reader, err = _body_reader(sock, length)
|
||||
has_body = true
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if res_headers["Trailer"] then
|
||||
trailer_reader, err = _trailer_reader(sock)
|
||||
end
|
||||
|
||||
if err then
|
||||
return nil, err
|
||||
else
|
||||
return {
|
||||
status = status,
|
||||
headers = res_headers,
|
||||
has_body = has_body,
|
||||
body_reader = body_reader,
|
||||
read_body = _read_body,
|
||||
trailer_reader = trailer_reader,
|
||||
read_trailers = _read_trailers,
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
function _M.request(self, params)
|
||||
local res, err = self:send_request(params)
|
||||
if not res then
|
||||
return res, err
|
||||
else
|
||||
return self:read_response(params)
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
function _M.request_pipeline(self, requests)
|
||||
for i, params in ipairs(requests) do
|
||||
if params.headers and params.headers["Expect"] == "100-continue" then
|
||||
return nil, "Cannot pipeline request specifying Expect: 100-continue"
|
||||
end
|
||||
|
||||
local res, err = self:send_request(params)
|
||||
if not res then
|
||||
return res, err
|
||||
end
|
||||
end
|
||||
|
||||
local responses = {}
|
||||
for i, params in ipairs(requests) do
|
||||
responses[i] = setmetatable({
|
||||
params = params,
|
||||
response_read = false,
|
||||
}, {
|
||||
-- Read each actual response lazily, at the point the user tries
|
||||
-- to access any of the fields.
|
||||
__index = function(t, k)
|
||||
local res, err
|
||||
if t.response_read == false then
|
||||
res, err = _M.read_response(self, t.params)
|
||||
t.response_read = true
|
||||
|
||||
if not res then
|
||||
ngx_log(ngx_ERR, err)
|
||||
else
|
||||
for rk, rv in pairs(res) do
|
||||
t[rk] = rv
|
||||
end
|
||||
end
|
||||
end
|
||||
return rawget(t, k)
|
||||
end,
|
||||
})
|
||||
end
|
||||
return responses
|
||||
end
|
||||
|
||||
|
||||
function _M.request_uri(self, uri, params)
|
||||
if not params then params = {} end
|
||||
|
||||
local parsed_uri, err = self:parse_uri(uri)
|
||||
if not parsed_uri then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
local scheme, host, port, path = unpack(parsed_uri)
|
||||
if not params.path then params.path = path end
|
||||
|
||||
local c, err = self:connect(host, port)
|
||||
if not c then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
if scheme == "https" then
|
||||
local verify = true
|
||||
if params.ssl_verify == false then
|
||||
verify = false
|
||||
end
|
||||
local ok, err = self:ssl_handshake(nil, host, verify)
|
||||
if not ok then
|
||||
return nil, err
|
||||
end
|
||||
end
|
||||
|
||||
local res, err = self:request(params)
|
||||
if not res then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
local body, err = res:read_body()
|
||||
if not body then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
res.body = body
|
||||
|
||||
local ok, err = self:set_keepalive()
|
||||
if not ok then
|
||||
ngx_log(ngx_ERR, err)
|
||||
end
|
||||
|
||||
return res, nil
|
||||
end
|
||||
|
||||
|
||||
function _M.get_client_body_reader(self, chunksize, sock)
|
||||
local chunksize = chunksize or 65536
|
||||
if not sock then
|
||||
local ok, err
|
||||
ok, sock, err = pcall(ngx_req_socket)
|
||||
|
||||
if not ok then
|
||||
return nil, sock -- pcall err
|
||||
end
|
||||
|
||||
if not sock then
|
||||
if err == "no body" then
|
||||
return nil
|
||||
else
|
||||
return nil, err
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
local headers = ngx_req_get_headers()
|
||||
local length = headers.content_length
|
||||
local encoding = headers.transfer_encoding
|
||||
if length then
|
||||
return _body_reader(sock, tonumber(length), chunksize)
|
||||
elseif encoding and str_lower(encoding) == 'chunked' then
|
||||
-- Not yet supported by ngx_lua but should just work...
|
||||
return _chunked_body_reader(sock, chunksize)
|
||||
else
|
||||
return nil
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
function _M.proxy_request(self, chunksize)
|
||||
return self:request{
|
||||
method = ngx_req_get_method(),
|
||||
path = ngx_re_gsub(ngx_var.uri, "\\s", "%20", "jo") .. ngx_var.is_args .. (ngx_var.query_string or ""),
|
||||
body = self:get_client_body_reader(chunksize),
|
||||
headers = ngx_req_get_headers(),
|
||||
}
|
||||
end
|
||||
|
||||
|
||||
function _M.proxy_response(self, response, chunksize)
|
||||
if not response then
|
||||
ngx_log(ngx_ERR, "no response provided")
|
||||
return
|
||||
end
|
||||
|
||||
ngx.status = response.status
|
||||
|
||||
-- Filter out hop-by-hop headeres
|
||||
for k,v in pairs(response.headers) do
|
||||
if not HOP_BY_HOP_HEADERS[str_lower(k)] then
|
||||
ngx.header[k] = v
|
||||
end
|
||||
end
|
||||
|
||||
local reader = response.body_reader
|
||||
repeat
|
||||
local chunk, err = reader(chunksize)
|
||||
if err then
|
||||
ngx_log(ngx_ERR, err)
|
||||
break
|
||||
end
|
||||
|
||||
if chunk then
|
||||
ngx.print(chunk)
|
||||
end
|
||||
until not chunk
|
||||
end
|
||||
|
||||
|
||||
return _M
|
||||
62
controllers/nginx-third-party/lua/vendor/lua-resty-http/lib/resty/http_headers.lua
vendored
Normal file
62
controllers/nginx-third-party/lua/vendor/lua-resty-http/lib/resty/http_headers.lua
vendored
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
local rawget, rawset, setmetatable =
|
||||
rawget, rawset, setmetatable
|
||||
|
||||
local str_gsub = string.gsub
|
||||
local str_lower = string.lower
|
||||
|
||||
|
||||
local _M = {
|
||||
_VERSION = '0.01',
|
||||
}
|
||||
|
||||
|
||||
-- Returns an empty headers table with internalised case normalisation.
|
||||
-- Supports the same cases as in ngx_lua:
|
||||
--
|
||||
-- headers.content_length
|
||||
-- headers["content-length"]
|
||||
-- headers["Content-Length"]
|
||||
function _M.new(self)
|
||||
local mt = {
|
||||
normalised = {},
|
||||
}
|
||||
|
||||
|
||||
mt.__index = function(t, k)
|
||||
local k_hyphened = str_gsub(k, "_", "-")
|
||||
local matched = rawget(t, k)
|
||||
if matched then
|
||||
return matched
|
||||
else
|
||||
local k_normalised = str_lower(k_hyphened)
|
||||
return rawget(t, mt.normalised[k_normalised])
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
-- First check the normalised table. If there's no match (first time) add an entry for
|
||||
-- our current case in the normalised table. This is to preserve the human (prettier) case
|
||||
-- instead of outputting lowercased header names.
|
||||
--
|
||||
-- If there's a match, we're being updated, just with a different case for the key. We use
|
||||
-- the normalised table to give us the original key, and perorm a rawset().
|
||||
mt.__newindex = function(t, k, v)
|
||||
-- we support underscore syntax, so always hyphenate.
|
||||
local k_hyphened = str_gsub(k, "_", "-")
|
||||
|
||||
-- lowercase hyphenated is "normalised"
|
||||
local k_normalised = str_lower(k_hyphened)
|
||||
|
||||
if not mt.normalised[k_normalised] then
|
||||
mt.normalised[k_normalised] = k_hyphened
|
||||
rawset(t, k_hyphened, v)
|
||||
else
|
||||
rawset(t, mt.normalised[k_normalised], v)
|
||||
end
|
||||
end
|
||||
|
||||
return setmetatable({}, mt)
|
||||
end
|
||||
|
||||
|
||||
return _M
|
||||
33
controllers/nginx-third-party/lua/vendor/lua-resty-http/lua-resty-http-0.06-0.rockspec
vendored
Normal file
33
controllers/nginx-third-party/lua/vendor/lua-resty-http/lua-resty-http-0.06-0.rockspec
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
package = "lua-resty-http"
|
||||
version = "0.06-0"
|
||||
source = {
|
||||
url = "git://github.com/pintsized/lua-resty-http",
|
||||
tag = "v0.06"
|
||||
}
|
||||
description = {
|
||||
summary = "Lua HTTP client cosocket driver for OpenResty / ngx_lua.",
|
||||
detailed = [[
|
||||
Features an HTTP 1.0 and 1.1 streaming interface to reading
|
||||
bodies using coroutines, for predictable memory usage in Lua
|
||||
land. Alternative simple interface for singleshot requests
|
||||
without manual connection step. Supports chunked transfer
|
||||
encoding, keepalive, pipelining, and trailers. Headers are
|
||||
treated case insensitively. Probably production ready in most
|
||||
cases, though not yet proven in the wild.
|
||||
Recommended by the OpenResty maintainer as a long-term
|
||||
replacement for internal requests through ngx.location.capture.
|
||||
]],
|
||||
homepage = "https://github.com/pintsized/lua-resty-http",
|
||||
license = "2-clause BSD",
|
||||
maintainer = "James Hurst <james@pintsized.co.uk>"
|
||||
}
|
||||
dependencies = {
|
||||
"lua >= 5.1",
|
||||
}
|
||||
build = {
|
||||
type = "builtin",
|
||||
modules = {
|
||||
["resty.http"] = "lib/resty/http.lua",
|
||||
["resty.http_headers"] = "lib/resty/http_headers.lua"
|
||||
}
|
||||
}
|
||||
231
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/01-basic.t
vendored
Normal file
231
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/01-basic.t
vendored
Normal file
|
|
@ -0,0 +1,231 @@
|
|||
# vim:set ft= ts=4 sw=4 et:
|
||||
|
||||
use Test::Nginx::Socket;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * (blocks() * 4) + 1;
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
error_log logs/error.log debug;
|
||||
};
|
||||
|
||||
$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8';
|
||||
|
||||
no_long_string();
|
||||
#no_diff();
|
||||
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1: Simple default get.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b"
|
||||
}
|
||||
|
||||
ngx.status = res.status
|
||||
ngx.print(res:read_body())
|
||||
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
echo "OK";
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
OK
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 2: HTTP 1.0
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
version = 1.0,
|
||||
path = "/b"
|
||||
}
|
||||
|
||||
ngx.status = res.status
|
||||
ngx.print(res:read_body())
|
||||
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
echo "OK";
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
OK
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 3: Status code
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b"
|
||||
}
|
||||
|
||||
ngx.status = res.status
|
||||
ngx.print(res:read_body())
|
||||
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.status = 404
|
||||
ngx.say("OK")
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
OK
|
||||
--- error_code: 404
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 4: Response headers
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b"
|
||||
}
|
||||
|
||||
ngx.status = res.status
|
||||
ngx.say(res.headers["X-Test"])
|
||||
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.header["X-Test"] = "x-value"
|
||||
ngx.say("OK")
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
x-value
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 5: Query
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
query = {
|
||||
a = 1,
|
||||
b = 2,
|
||||
},
|
||||
path = "/b"
|
||||
}
|
||||
|
||||
ngx.status = res.status
|
||||
|
||||
for k,v in pairs(res.headers) do
|
||||
ngx.header[k] = v
|
||||
end
|
||||
|
||||
ngx.print(res:read_body())
|
||||
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
for k,v in pairs(ngx.req.get_uri_args()) do
|
||||
ngx.header["X-Header-" .. string.upper(k)] = v
|
||||
end
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_headers
|
||||
X-Header-A: 1
|
||||
X-Header-B: 2
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 7: HEAD has no body.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
method = "HEAD",
|
||||
path = "/b"
|
||||
}
|
||||
|
||||
local body = res:read_body()
|
||||
|
||||
if body then
|
||||
ngx.print(body)
|
||||
end
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
echo "OK";
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
158
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/02-chunked.t
vendored
Normal file
158
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/02-chunked.t
vendored
Normal file
|
|
@ -0,0 +1,158 @@
|
|||
# vim:set ft= ts=4 sw=4 et:
|
||||
|
||||
use Test::Nginx::Socket;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * (blocks() * 4);
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
};
|
||||
|
||||
$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8';
|
||||
|
||||
no_long_string();
|
||||
#no_diff();
|
||||
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1: Non chunked.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b"
|
||||
}
|
||||
|
||||
local body = res:read_body()
|
||||
|
||||
ngx.say(#body)
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
chunked_transfer_encoding off;
|
||||
content_by_lua '
|
||||
local len = 32768
|
||||
local t = {}
|
||||
for i=1,len do
|
||||
t[i] = 0
|
||||
end
|
||||
ngx.print(table.concat(t))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
32768
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 2: Chunked. The number of chunks received when no max size is given proves the response was in fact chunked.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b"
|
||||
}
|
||||
|
||||
local chunks = {}
|
||||
local c = 1
|
||||
repeat
|
||||
local chunk, err = res.body_reader()
|
||||
if chunk then
|
||||
chunks[c] = chunk
|
||||
c = c + 1
|
||||
end
|
||||
until not chunk
|
||||
|
||||
local body = table.concat(chunks)
|
||||
|
||||
ngx.say(#body)
|
||||
ngx.say(#chunks)
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
local len = 32768
|
||||
local t = {}
|
||||
for i=1,len do
|
||||
t[i] = 0
|
||||
end
|
||||
ngx.print(table.concat(t))
|
||||
local len = 32768
|
||||
local t = {}
|
||||
for i=1,len do
|
||||
t[i] = 0
|
||||
end
|
||||
ngx.print(table.concat(t))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
65536
|
||||
2
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 3: Chunked using read_body method.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b"
|
||||
}
|
||||
|
||||
local body = res:read_body()
|
||||
|
||||
ngx.say(#body)
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
local len = 32768
|
||||
local t = {}
|
||||
for i=1,len do
|
||||
t[i] = 0
|
||||
end
|
||||
ngx.print(table.concat(t))
|
||||
local len = 32768
|
||||
local t = {}
|
||||
for i=1,len do
|
||||
t[i] = 0
|
||||
end
|
||||
ngx.print(table.concat(t))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
65536
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
185
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/03-requestbody.t
vendored
Normal file
185
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/03-requestbody.t
vendored
Normal file
|
|
@ -0,0 +1,185 @@
|
|||
# vim:set ft= ts=4 sw=4 et:
|
||||
|
||||
use Test::Nginx::Socket;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * (blocks() * 4);
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
};
|
||||
|
||||
$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8';
|
||||
|
||||
no_long_string();
|
||||
#no_diff();
|
||||
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1: POST form-urlencoded
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
body = "a=1&b=2&c=3",
|
||||
path = "/b",
|
||||
headers = {
|
||||
["Content-Type"] = "application/x-www-form-urlencoded",
|
||||
}
|
||||
}
|
||||
|
||||
ngx.say(res:read_body())
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.req.read_body()
|
||||
local args = ngx.req.get_post_args()
|
||||
ngx.say("a: ", args.a)
|
||||
ngx.say("b: ", args.b)
|
||||
ngx.print("c: ", args.c)
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
a: 1
|
||||
b: 2
|
||||
c: 3
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 2: POST form-urlencoded 1.0
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
method = "POST",
|
||||
body = "a=1&b=2&c=3",
|
||||
path = "/b",
|
||||
headers = {
|
||||
["Content-Type"] = "application/x-www-form-urlencoded",
|
||||
},
|
||||
version = 1.0,
|
||||
}
|
||||
|
||||
ngx.say(res:read_body())
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.req.read_body()
|
||||
local args = ngx.req.get_post_args()
|
||||
ngx.say(ngx.req.get_method())
|
||||
ngx.say("a: ", args.a)
|
||||
ngx.say("b: ", args.b)
|
||||
ngx.print("c: ", args.c)
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
POST
|
||||
a: 1
|
||||
b: 2
|
||||
c: 3
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 3: 100 Continue does not end requset
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
body = "a=1&b=2&c=3",
|
||||
path = "/b",
|
||||
headers = {
|
||||
["Expect"] = "100-continue",
|
||||
["Content-Type"] = "application/x-www-form-urlencoded",
|
||||
}
|
||||
}
|
||||
ngx.say(res.status)
|
||||
ngx.say(res:read_body())
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.req.read_body()
|
||||
local args = ngx.req.get_post_args()
|
||||
ngx.say("a: ", args.a)
|
||||
ngx.say("b: ", args.b)
|
||||
ngx.print("c: ", args.c)
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
200
|
||||
a: 1
|
||||
b: 2
|
||||
c: 3
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
=== TEST 4: Return non-100 status to user
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b",
|
||||
headers = {
|
||||
["Expect"] = "100-continue",
|
||||
["Content-Type"] = "application/x-www-form-urlencoded",
|
||||
}
|
||||
}
|
||||
if not res then
|
||||
ngx.say(err)
|
||||
end
|
||||
ngx.say(res.status)
|
||||
ngx.say(res:read_body())
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
return 417 "Expectation Failed";
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
417
|
||||
Expectation Failed
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
151
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/04-trailers.t
vendored
Normal file
151
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/04-trailers.t
vendored
Normal file
|
|
@ -0,0 +1,151 @@
|
|||
# vim:set ft= ts=4 sw=4 et:
|
||||
|
||||
use Test::Nginx::Socket;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * (blocks() * 4);
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
};
|
||||
|
||||
$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8';
|
||||
|
||||
no_long_string();
|
||||
#no_diff();
|
||||
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1: Trailers. Check Content-MD5 generated after the body is sent matches up.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b",
|
||||
headers = {
|
||||
["TE"] = "trailers",
|
||||
}
|
||||
}
|
||||
|
||||
local body = res:read_body()
|
||||
local hash = ngx.md5(body)
|
||||
res:read_trailers()
|
||||
|
||||
if res.headers["Content-MD5"] == hash then
|
||||
ngx.say("OK")
|
||||
else
|
||||
ngx.say(res.headers["Content-MD5"])
|
||||
end
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
-- We use the raw socket to compose a response, since OpenResty
|
||||
-- doesnt support trailers natively.
|
||||
|
||||
ngx.req.read_body()
|
||||
local sock, err = ngx.req.socket(true)
|
||||
if not sock then
|
||||
ngx.say(err)
|
||||
end
|
||||
|
||||
local res = {}
|
||||
table.insert(res, "HTTP/1.1 200 OK")
|
||||
table.insert(res, "Date: " .. ngx.http_time(ngx.time()))
|
||||
table.insert(res, "Transfer-Encoding: chunked")
|
||||
table.insert(res, "Trailer: Content-MD5")
|
||||
table.insert(res, "")
|
||||
|
||||
local body = "Hello, World"
|
||||
|
||||
table.insert(res, string.format("%x", #body))
|
||||
table.insert(res, body)
|
||||
table.insert(res, "0")
|
||||
table.insert(res, "")
|
||||
|
||||
table.insert(res, "Content-MD5: " .. ngx.md5(body))
|
||||
|
||||
table.insert(res, "")
|
||||
table.insert(res, "")
|
||||
sock:send(table.concat(res, "\\r\\n"))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
OK
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 2: Advertised trailer does not exist, handled gracefully.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b",
|
||||
headers = {
|
||||
["TE"] = "trailers",
|
||||
}
|
||||
}
|
||||
|
||||
local body = res:read_body()
|
||||
local hash = ngx.md5(body)
|
||||
res:read_trailers()
|
||||
|
||||
ngx.say("OK")
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
-- We use the raw socket to compose a response, since OpenResty
|
||||
-- doesnt support trailers natively.
|
||||
|
||||
ngx.req.read_body()
|
||||
local sock, err = ngx.req.socket(true)
|
||||
if not sock then
|
||||
ngx.say(err)
|
||||
end
|
||||
|
||||
local res = {}
|
||||
table.insert(res, "HTTP/1.1 200 OK")
|
||||
table.insert(res, "Date: " .. ngx.http_time(ngx.time()))
|
||||
table.insert(res, "Transfer-Encoding: chunked")
|
||||
table.insert(res, "Trailer: Content-MD5")
|
||||
table.insert(res, "")
|
||||
|
||||
local body = "Hello, World"
|
||||
|
||||
table.insert(res, string.format("%x", #body))
|
||||
table.insert(res, body)
|
||||
table.insert(res, "0")
|
||||
|
||||
table.insert(res, "")
|
||||
table.insert(res, "")
|
||||
sock:send(table.concat(res, "\\r\\n"))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
OK
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
566
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/05-stream.t
vendored
Normal file
566
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/05-stream.t
vendored
Normal file
|
|
@ -0,0 +1,566 @@
|
|||
# vim:set ft= ts=4 sw=4 et:
|
||||
|
||||
use Test::Nginx::Socket;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * (blocks() * 4) - 1;
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
};
|
||||
|
||||
$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8';
|
||||
|
||||
no_long_string();
|
||||
#no_diff();
|
||||
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1: Chunked streaming body reader returns the right content length.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b",
|
||||
}
|
||||
|
||||
local chunks = {}
|
||||
repeat
|
||||
local chunk = res.body_reader()
|
||||
if chunk then
|
||||
table.insert(chunks, chunk)
|
||||
end
|
||||
until not chunk
|
||||
|
||||
local body = table.concat(chunks)
|
||||
ngx.say(#body)
|
||||
ngx.say(res.headers["Transfer-Encoding"])
|
||||
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
local len = 32768
|
||||
local t = {}
|
||||
for i=1,len do
|
||||
t[i] = 0
|
||||
end
|
||||
ngx.print(table.concat(t))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
32768
|
||||
chunked
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 2: Non-Chunked streaming body reader returns the right content length.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b",
|
||||
}
|
||||
|
||||
local chunks = {}
|
||||
repeat
|
||||
local chunk = res.body_reader()
|
||||
if chunk then
|
||||
table.insert(chunks, chunk)
|
||||
end
|
||||
until not chunk
|
||||
|
||||
local body = table.concat(chunks)
|
||||
ngx.say(#body)
|
||||
ngx.say(res.headers["Transfer-Encoding"])
|
||||
ngx.say(#chunks)
|
||||
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
chunked_transfer_encoding off;
|
||||
content_by_lua '
|
||||
local len = 32768
|
||||
local t = {}
|
||||
for i=1,len do
|
||||
t[i] = 0
|
||||
end
|
||||
ngx.print(table.concat(t))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
32768
|
||||
nil
|
||||
1
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 2b: Non-Chunked streaming body reader, buffer size becomes nil
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b",
|
||||
}
|
||||
|
||||
local chunks = {}
|
||||
local buffer_size = 16384
|
||||
repeat
|
||||
local chunk = res.body_reader(buffer_size)
|
||||
if chunk then
|
||||
table.insert(chunks, chunk)
|
||||
end
|
||||
|
||||
buffer_size = nil
|
||||
until not chunk
|
||||
|
||||
local body = table.concat(chunks)
|
||||
ngx.say(res.headers["Transfer-Encoding"])
|
||||
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
chunked_transfer_encoding off;
|
||||
content_by_lua '
|
||||
local len = 32768
|
||||
local t = {}
|
||||
for i=1,len do
|
||||
t[i] = 0
|
||||
end
|
||||
ngx.print(table.concat(t))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
nil
|
||||
--- error_log
|
||||
Buffer size not specified, bailing
|
||||
|
||||
|
||||
=== TEST 3: HTTP 1.0 body reader with no max size returns the right content length.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b",
|
||||
version = 1.0,
|
||||
}
|
||||
|
||||
local chunks = {}
|
||||
repeat
|
||||
local chunk = res.body_reader()
|
||||
if chunk then
|
||||
table.insert(chunks, chunk)
|
||||
end
|
||||
until not chunk
|
||||
|
||||
local body = table.concat(chunks)
|
||||
ngx.say(#body)
|
||||
ngx.say(res.headers["Transfer-Encoding"])
|
||||
ngx.say(#chunks)
|
||||
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
chunked_transfer_encoding off;
|
||||
content_by_lua '
|
||||
local len = 32768
|
||||
local t = {}
|
||||
for i=1,len do
|
||||
t[i] = 0
|
||||
end
|
||||
ngx.print(table.concat(t))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
32768
|
||||
nil
|
||||
1
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 4: HTTP 1.0 body reader with max chunk size returns the right content length.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b",
|
||||
version = 1.0,
|
||||
}
|
||||
|
||||
local chunks = {}
|
||||
local size = 8192
|
||||
repeat
|
||||
local chunk = res.body_reader(size)
|
||||
if chunk then
|
||||
table.insert(chunks, chunk)
|
||||
end
|
||||
size = size + size
|
||||
until not chunk
|
||||
|
||||
local body = table.concat(chunks)
|
||||
ngx.say(#body)
|
||||
ngx.say(res.headers["Transfer-Encoding"])
|
||||
ngx.say(#chunks)
|
||||
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
chunked_transfer_encoding off;
|
||||
content_by_lua '
|
||||
local len = 32769
|
||||
local t = {}
|
||||
for i=1,len do
|
||||
t[i] = 0
|
||||
end
|
||||
ngx.print(table.concat(t))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
32769
|
||||
nil
|
||||
3
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 4b: HTTP 1.0 body reader with no content length, stream works as expected.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b",
|
||||
version = 1.0,
|
||||
}
|
||||
|
||||
local chunks = {}
|
||||
local size = 8192
|
||||
repeat
|
||||
local chunk = res.body_reader(size)
|
||||
if chunk then
|
||||
table.insert(chunks, chunk)
|
||||
end
|
||||
size = size + size
|
||||
until not chunk
|
||||
|
||||
local body = table.concat(chunks)
|
||||
ngx.say(#body)
|
||||
ngx.say(#chunks)
|
||||
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.req.read_body()
|
||||
local sock, err = ngx.req.socket(true)
|
||||
if not sock then
|
||||
ngx.say(err)
|
||||
end
|
||||
|
||||
local res = {}
|
||||
table.insert(res, "HTTP/1.0 200 OK")
|
||||
table.insert(res, "Date: " .. ngx.http_time(ngx.time()))
|
||||
table.insert(res, "")
|
||||
|
||||
local len = 32769
|
||||
local t = {}
|
||||
for i=1,len do
|
||||
t[i] = 0
|
||||
end
|
||||
table.insert(res, table.concat(t))
|
||||
sock:send(table.concat(res, "\\r\\n"))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
32769
|
||||
3
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 5: Chunked streaming body reader with max chunk size returns the right content length.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b",
|
||||
}
|
||||
|
||||
local chunks = {}
|
||||
local size = 8192
|
||||
repeat
|
||||
local chunk = res.body_reader(size)
|
||||
if chunk then
|
||||
table.insert(chunks, chunk)
|
||||
end
|
||||
size = size + size
|
||||
until not chunk
|
||||
|
||||
local body = table.concat(chunks)
|
||||
ngx.say(#body)
|
||||
ngx.say(res.headers["Transfer-Encoding"])
|
||||
ngx.say(#chunks)
|
||||
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
local len = 32768
|
||||
local t = {}
|
||||
for i=1,len do
|
||||
t[i] = 0
|
||||
end
|
||||
ngx.print(table.concat(t))
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
32768
|
||||
chunked
|
||||
3
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 6: Request reader correctly reads body
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
lua_need_request_body off;
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
|
||||
local reader, err = httpc:get_client_body_reader(8192)
|
||||
|
||||
repeat
|
||||
local chunk, err = reader()
|
||||
if chunk then
|
||||
ngx.print(chunk)
|
||||
end
|
||||
until chunk == nil
|
||||
|
||||
';
|
||||
}
|
||||
|
||||
--- request
|
||||
POST /a
|
||||
foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz
|
||||
--- response_body: foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
=== TEST 7: Request reader correctly reads body in chunks
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
lua_need_request_body off;
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
|
||||
local reader, err = httpc:get_client_body_reader(64)
|
||||
|
||||
local chunks = 0
|
||||
repeat
|
||||
chunks = chunks +1
|
||||
local chunk, err = reader()
|
||||
if chunk then
|
||||
ngx.print(chunk)
|
||||
end
|
||||
until chunk == nil
|
||||
ngx.say("\\n"..chunks)
|
||||
';
|
||||
}
|
||||
|
||||
--- request
|
||||
POST /a
|
||||
foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz
|
||||
--- response_body
|
||||
foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz
|
||||
3
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 8: Request reader passes into client
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
lua_need_request_body off;
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local reader, err = httpc:get_client_body_reader(64)
|
||||
|
||||
local res, err = httpc:request{
|
||||
method = POST,
|
||||
path = "/b",
|
||||
body = reader,
|
||||
headers = ngx.req.get_headers(100, true),
|
||||
}
|
||||
|
||||
local body = res:read_body()
|
||||
ngx.say(body)
|
||||
httpc:close()
|
||||
|
||||
';
|
||||
}
|
||||
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.req.read_body()
|
||||
local body, err = ngx.req.get_body_data()
|
||||
ngx.print(body)
|
||||
';
|
||||
}
|
||||
|
||||
--- request
|
||||
POST /a
|
||||
foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz
|
||||
--- response_body
|
||||
foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 9: Body reader is a function returning nil when no body is present.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b",
|
||||
method = "HEAD",
|
||||
}
|
||||
|
||||
repeat
|
||||
local chunk = res.body_reader()
|
||||
until not chunk
|
||||
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.exit(200)
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 10: Issue a notice (but do not error) if trying to read the request body in a subrequest
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
echo_location /b;
|
||||
}
|
||||
location = /b {
|
||||
lua_need_request_body off;
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
|
||||
local reader, err = httpc:get_client_body_reader(8192)
|
||||
if not reader then
|
||||
ngx.log(ngx.NOTICE, err)
|
||||
return
|
||||
end
|
||||
|
||||
repeat
|
||||
local chunk, err = reader()
|
||||
if chunk then
|
||||
ngx.print(chunk)
|
||||
end
|
||||
until chunk == nil
|
||||
';
|
||||
}
|
||||
|
||||
--- request
|
||||
POST /a
|
||||
foobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbazfoobarbaz
|
||||
--- response_body:
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
--- error_log
|
||||
attempt to read the request body in a subrequest
|
||||
145
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/06-simpleinterface.t
vendored
Normal file
145
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/06-simpleinterface.t
vendored
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
# vim:set ft= ts=4 sw=4 et:
|
||||
|
||||
use Test::Nginx::Socket;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * (blocks() * 4) + 6;
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
error_log logs/error.log debug;
|
||||
};
|
||||
|
||||
$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8';
|
||||
|
||||
no_long_string();
|
||||
#no_diff();
|
||||
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1: Simple URI interface
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
local res, err = httpc:request_uri("http://127.0.0.1:"..ngx.var.server_port.."/b?a=1&b=2")
|
||||
|
||||
if not res then
|
||||
ngx.log(ngx.ERR, err)
|
||||
end
|
||||
ngx.status = res.status
|
||||
|
||||
ngx.header["X-Header-A"] = res.headers["X-Header-A"]
|
||||
ngx.header["X-Header-B"] = res.headers["X-Header-B"]
|
||||
|
||||
ngx.print(res.body)
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
for k,v in pairs(ngx.req.get_uri_args()) do
|
||||
ngx.header["X-Header-" .. string.upper(k)] = v
|
||||
end
|
||||
ngx.say("OK")
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_headers
|
||||
X-Header-A: 1
|
||||
X-Header-B: 2
|
||||
--- response_body
|
||||
OK
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 2: Simple URI interface HTTP 1.0
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
local res, err = httpc:request_uri(
|
||||
"http://127.0.0.1:"..ngx.var.server_port.."/b?a=1&b=2", {
|
||||
}
|
||||
)
|
||||
|
||||
ngx.status = res.status
|
||||
|
||||
ngx.header["X-Header-A"] = res.headers["X-Header-A"]
|
||||
ngx.header["X-Header-B"] = res.headers["X-Header-B"]
|
||||
|
||||
ngx.print(res.body)
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
for k,v in pairs(ngx.req.get_uri_args()) do
|
||||
ngx.header["X-Header-" .. string.upper(k)] = v
|
||||
end
|
||||
ngx.say("OK")
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_headers
|
||||
X-Header-A: 1
|
||||
X-Header-B: 2
|
||||
--- response_body
|
||||
OK
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 3 Simple URI interface, params override
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
local res, err = httpc:request_uri(
|
||||
"http://127.0.0.1:"..ngx.var.server_port.."/b?a=1&b=2", {
|
||||
path = "/c",
|
||||
query = {
|
||||
a = 2,
|
||||
b = 3,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
ngx.status = res.status
|
||||
|
||||
ngx.header["X-Header-A"] = res.headers["X-Header-A"]
|
||||
ngx.header["X-Header-B"] = res.headers["X-Header-B"]
|
||||
|
||||
ngx.print(res.body)
|
||||
';
|
||||
}
|
||||
location = /c {
|
||||
content_by_lua '
|
||||
for k,v in pairs(ngx.req.get_uri_args()) do
|
||||
ngx.header["X-Header-" .. string.upper(k)] = v
|
||||
end
|
||||
ngx.say("OK")
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_headers
|
||||
X-Header-A: 2
|
||||
X-Header-B: 3
|
||||
--- response_body
|
||||
OK
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
182
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/07-keepalive.t
vendored
Normal file
182
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/07-keepalive.t
vendored
Normal file
|
|
@ -0,0 +1,182 @@
|
|||
# vim:set ft= ts=4 sw=4 et:
|
||||
|
||||
use Test::Nginx::Socket;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * (blocks() * 4);
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
error_log logs/error.log debug;
|
||||
};
|
||||
|
||||
$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8';
|
||||
|
||||
no_long_string();
|
||||
#no_diff();
|
||||
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1 Simple interface, Connection: Keep-alive. Test the connection is reused.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
local res, err = httpc:request_uri(
|
||||
"http://127.0.0.1:"..ngx.var.server_port.."/b", {
|
||||
}
|
||||
)
|
||||
|
||||
ngx.say(res.headers["Connection"])
|
||||
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
ngx.say(httpc:get_reused_times())
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.say("OK")
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
keep-alive
|
||||
1
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 2 Simple interface, Connection: close, test we don't try to keepalive, but also that subsequent connections can keepalive.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
local res, err = httpc:request_uri(
|
||||
"http://127.0.0.1:"..ngx.var.server_port.."/b", {
|
||||
version = 1.0,
|
||||
headers = {
|
||||
["Connection"] = "close",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
ngx.say(httpc:get_reused_times())
|
||||
|
||||
httpc:set_keepalive()
|
||||
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
ngx.say(httpc:get_reused_times())
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.say("OK")
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
0
|
||||
1
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 3 Generic interface, Connection: Keep-alive. Test the connection is reused.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b"
|
||||
}
|
||||
|
||||
local body = res:read_body()
|
||||
|
||||
ngx.say(res.headers["Connection"])
|
||||
ngx.say(httpc:set_keepalive())
|
||||
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
ngx.say(httpc:get_reused_times())
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.say("OK")
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
keep-alive
|
||||
1
|
||||
1
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 4 Generic interface, Connection: Close. Test we don't try to keepalive, but also that subsequent connections can keepalive.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
version = 1.0,
|
||||
headers = {
|
||||
["Connection"] = "Close",
|
||||
},
|
||||
path = "/b"
|
||||
}
|
||||
|
||||
local body = res:read_body()
|
||||
|
||||
ngx.say(res.headers["Connection"])
|
||||
local r, e = httpc:set_keepalive()
|
||||
ngx.say(r)
|
||||
ngx.say(e)
|
||||
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
ngx.say(httpc:get_reused_times())
|
||||
|
||||
httpc:set_keepalive()
|
||||
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
ngx.say(httpc:get_reused_times())
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.say("OK")
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
close
|
||||
2
|
||||
connection must be closed
|
||||
0
|
||||
1
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
143
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/08-pipeline.t
vendored
Normal file
143
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/08-pipeline.t
vendored
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
# vim:set ft= ts=4 sw=4 et:
|
||||
|
||||
use Test::Nginx::Socket;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * (blocks() * 4);
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
error_log logs/error.log debug;
|
||||
};
|
||||
|
||||
$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8';
|
||||
|
||||
no_long_string();
|
||||
#no_diff();
|
||||
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1 Test that pipelined reqests can be read correctly.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local responses = httpc:request_pipeline{
|
||||
{
|
||||
path = "/b",
|
||||
},
|
||||
{
|
||||
path = "/c",
|
||||
},
|
||||
{
|
||||
path = "/d",
|
||||
}
|
||||
}
|
||||
|
||||
for i,r in ipairs(responses) do
|
||||
if r.status then
|
||||
ngx.say(r.status)
|
||||
ngx.say(r.headers["X-Res"])
|
||||
ngx.say(r:read_body())
|
||||
end
|
||||
end
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.status = 200
|
||||
ngx.header["X-Res"] = "B"
|
||||
ngx.print("B")
|
||||
';
|
||||
}
|
||||
location = /c {
|
||||
content_by_lua '
|
||||
ngx.status = 404
|
||||
ngx.header["X-Res"] = "C"
|
||||
ngx.print("C")
|
||||
';
|
||||
}
|
||||
location = /d {
|
||||
content_by_lua '
|
||||
ngx.status = 200
|
||||
ngx.header["X-Res"] = "D"
|
||||
ngx.print("D")
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
200
|
||||
B
|
||||
B
|
||||
404
|
||||
C
|
||||
C
|
||||
200
|
||||
D
|
||||
D
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 2: Test we can handle timeouts on reading the pipelined requests.
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
httpc:set_timeout(1)
|
||||
|
||||
local responses = httpc:request_pipeline{
|
||||
{
|
||||
path = "/b",
|
||||
},
|
||||
{
|
||||
path = "/c",
|
||||
},
|
||||
}
|
||||
|
||||
for i,r in ipairs(responses) do
|
||||
if r.status then
|
||||
ngx.say(r.status)
|
||||
ngx.say(r.headers["X-Res"])
|
||||
ngx.say(r:read_body())
|
||||
end
|
||||
end
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.status = 200
|
||||
ngx.header["X-Res"] = "B"
|
||||
ngx.print("B")
|
||||
';
|
||||
}
|
||||
location = /c {
|
||||
content_by_lua '
|
||||
ngx.status = 404
|
||||
ngx.header["X-Res"] = "C"
|
||||
ngx.sleep(1)
|
||||
ngx.print("C")
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
200
|
||||
B
|
||||
B
|
||||
--- no_error_log
|
||||
[warn]
|
||||
--- error_log eval
|
||||
[qr/timeout/]
|
||||
59
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/09-ssl.t
vendored
Normal file
59
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/09-ssl.t
vendored
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
# vim:set ft= ts=4 sw=4 et:
|
||||
|
||||
use Test::Nginx::Socket;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * (blocks() * 4);
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
error_log logs/error.log debug;
|
||||
};
|
||||
|
||||
$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8';
|
||||
|
||||
no_long_string();
|
||||
#no_diff();
|
||||
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1: parse_uri returns port 443 for https URIs
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
local parsed = httpc:parse_uri("https://www.google.com/foobar")
|
||||
ngx.say(parsed[3])
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
443
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
=== TEST 2: parse_uri returns port 80 for http URIs
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
local parsed = httpc:parse_uri("http://www.google.com/foobar")
|
||||
ngx.say(parsed[3])
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
80
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
57
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/10-clientbodyreader.t
vendored
Normal file
57
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/10-clientbodyreader.t
vendored
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
# vim:set ft= ts=4 sw=4 et:
|
||||
|
||||
use Test::Nginx::Socket;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * (blocks() * 4);
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
};
|
||||
|
||||
$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8';
|
||||
|
||||
no_long_string();
|
||||
#no_diff();
|
||||
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1: Issue a notice (but do not error) if trying to read the request body in a subrequest
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
echo_location /b;
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/c",
|
||||
headers = {
|
||||
["Content-Type"] = "application/x-www-form-urlencoded",
|
||||
}
|
||||
}
|
||||
if not res then
|
||||
ngx.say(err)
|
||||
end
|
||||
ngx.print(res:read_body())
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location /c {
|
||||
echo "OK";
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
OK
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
152
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/11-proxy.t
vendored
Normal file
152
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/11-proxy.t
vendored
Normal file
|
|
@ -0,0 +1,152 @@
|
|||
# vim:set ft= ts=4 sw=4 et:
|
||||
|
||||
use Test::Nginx::Socket;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * (blocks() * 5);
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
error_log logs/error.log debug;
|
||||
};
|
||||
|
||||
$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8';
|
||||
|
||||
no_long_string();
|
||||
#no_diff();
|
||||
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1: Proxy GET request and response
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a_prx {
|
||||
rewrite ^(.*)_prx$ $1 break;
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
httpc:proxy_response(httpc:proxy_request())
|
||||
httpc:set_keepalive()
|
||||
';
|
||||
}
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
ngx.status = 200
|
||||
ngx.header["X-Test"] = "foo"
|
||||
ngx.say("OK")
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a_prx
|
||||
--- response_body
|
||||
OK
|
||||
--- response_headers
|
||||
X-Test: foo
|
||||
--- error_code: 200
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 2: Proxy POST request and response
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a_prx {
|
||||
rewrite ^(.*)_prx$ $1 break;
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
httpc:proxy_response(httpc:proxy_request())
|
||||
httpc:set_keepalive()
|
||||
';
|
||||
}
|
||||
location = /a {
|
||||
lua_need_request_body on;
|
||||
content_by_lua '
|
||||
ngx.status = 404
|
||||
ngx.header["X-Test"] = "foo"
|
||||
local args, err = ngx.req.get_post_args()
|
||||
ngx.say(args["foo"])
|
||||
ngx.say(args["hello"])
|
||||
';
|
||||
}
|
||||
--- request
|
||||
POST /a_prx
|
||||
foo=bar&hello=world
|
||||
--- response_body
|
||||
bar
|
||||
world
|
||||
--- response_headers
|
||||
X-Test: foo
|
||||
--- error_code: 404
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 3: Proxy multiple headers
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a_prx {
|
||||
rewrite ^(.*)_prx$ $1 break;
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
httpc:proxy_response(httpc:proxy_request())
|
||||
httpc:set_keepalive()
|
||||
';
|
||||
}
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
ngx.status = 200
|
||||
ngx.header["Set-Cookie"] = { "cookie1", "cookie2" }
|
||||
ngx.say("OK")
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a_prx
|
||||
--- response_body
|
||||
OK
|
||||
--- raw_response_headers_like: .*Set-Cookie: cookie1\r\nSet-Cookie: cookie2\r\n
|
||||
--- error_code: 200
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 4: Proxy still works with spaces in URI
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = "/a_ b_prx" {
|
||||
rewrite ^(.*)_prx$ $1 break;
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
httpc:proxy_response(httpc:proxy_request())
|
||||
httpc:set_keepalive()
|
||||
';
|
||||
}
|
||||
location = "/a_ b" {
|
||||
content_by_lua '
|
||||
ngx.status = 200
|
||||
ngx.header["X-Test"] = "foo"
|
||||
ngx.say("OK")
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a_%20b_prx
|
||||
--- response_body
|
||||
OK
|
||||
--- response_headers
|
||||
X-Test: foo
|
||||
--- error_code: 200
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
160
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/12-case_insensitive_headers.t
vendored
Normal file
160
controllers/nginx-third-party/lua/vendor/lua-resty-http/t/12-case_insensitive_headers.t
vendored
Normal file
|
|
@ -0,0 +1,160 @@
|
|||
# vim:set ft= ts=4 sw=4 et:
|
||||
|
||||
use Test::Nginx::Socket;
|
||||
use Cwd qw(cwd);
|
||||
|
||||
plan tests => repeat_each() * (blocks() * 4);
|
||||
|
||||
my $pwd = cwd();
|
||||
|
||||
our $HttpConfig = qq{
|
||||
lua_package_path "$pwd/lib/?.lua;;";
|
||||
error_log logs/error.log debug;
|
||||
};
|
||||
|
||||
$ENV{TEST_NGINX_RESOLVER} = '8.8.8.8';
|
||||
|
||||
no_long_string();
|
||||
#no_diff();
|
||||
|
||||
run_tests();
|
||||
|
||||
__DATA__
|
||||
=== TEST 1: Test header normalisation
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http_headers = require "resty.http_headers"
|
||||
|
||||
local headers = http_headers.new()
|
||||
|
||||
headers.x_a_header = "a"
|
||||
headers["x-b-header"] = "b"
|
||||
headers["X-C-Header"] = "c"
|
||||
headers["X_d-HEAder"] = "d"
|
||||
|
||||
ngx.say(headers["X-A-Header"])
|
||||
ngx.say(headers.x_b_header)
|
||||
|
||||
for k,v in pairs(headers) do
|
||||
ngx.say(k, ": ", v)
|
||||
end
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
a
|
||||
b
|
||||
x-b-header: b
|
||||
x-a-header: a
|
||||
X-d-HEAder: d
|
||||
X-C-Header: c
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 2: Test headers can be accessed in all cases
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b"
|
||||
}
|
||||
|
||||
ngx.status = res.status
|
||||
ngx.say(res.headers["X-Foo-Header"])
|
||||
ngx.say(res.headers["x-fOo-heaDeR"])
|
||||
ngx.say(res.headers.x_foo_header)
|
||||
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.header["X-Foo-Header"] = "bar"
|
||||
ngx.say("OK")
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
bar
|
||||
bar
|
||||
bar
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
|
||||
|
||||
=== TEST 3: Test request headers are normalised
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http = require "resty.http"
|
||||
local httpc = http.new()
|
||||
httpc:connect("127.0.0.1", ngx.var.server_port)
|
||||
|
||||
local res, err = httpc:request{
|
||||
path = "/b",
|
||||
headers = {
|
||||
["uSeR-AgENT"] = "test_user_agent",
|
||||
x_foo = "bar",
|
||||
},
|
||||
}
|
||||
|
||||
ngx.status = res.status
|
||||
ngx.print(res:read_body())
|
||||
|
||||
httpc:close()
|
||||
';
|
||||
}
|
||||
location = /b {
|
||||
content_by_lua '
|
||||
ngx.say(ngx.req.get_headers()["User-Agent"])
|
||||
ngx.say(ngx.req.get_headers()["X-Foo"])
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_body
|
||||
test_user_agent
|
||||
bar
|
||||
--- no_error_log
|
||||
[error]
|
||||
|
||||
|
||||
=== TEST 4: Test that headers remain unique
|
||||
--- http_config eval: $::HttpConfig
|
||||
--- config
|
||||
location = /a {
|
||||
content_by_lua '
|
||||
local http_headers = require "resty.http_headers"
|
||||
|
||||
local headers = http_headers.new()
|
||||
|
||||
headers["x-a-header"] = "a"
|
||||
headers["X-A-HEAder"] = "b"
|
||||
|
||||
for k,v in pairs(headers) do
|
||||
ngx.log(ngx.DEBUG, k, ": ", v)
|
||||
ngx.header[k] = v
|
||||
end
|
||||
';
|
||||
}
|
||||
--- request
|
||||
GET /a
|
||||
--- response_headers
|
||||
x-a-header: b
|
||||
--- no_error_log
|
||||
[error]
|
||||
[warn]
|
||||
[warn]
|
||||
63
controllers/nginx-third-party/lua/vendor/lua-resty-http/util/lua-releng
vendored
Executable file
63
controllers/nginx-third-party/lua/vendor/lua-resty-http/util/lua-releng
vendored
Executable file
|
|
@ -0,0 +1,63 @@
|
|||
#!/usr/bin/env perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub file_contains ($$);
|
||||
|
||||
my $version;
|
||||
for my $file (map glob, qw{ *.lua lib/*.lua lib/*/*.lua lib/*/*/*.lua lib/*/*/*/*.lua lib/*/*/*/*/*.lua }) {
|
||||
# Check the sanity of each .lua file
|
||||
open my $in, $file or
|
||||
die "ERROR: Can't open $file for reading: $!\n";
|
||||
my $found_ver;
|
||||
while (<$in>) {
|
||||
my ($ver, $skipping);
|
||||
if (/(?x) (?:_VERSION) \s* = .*? ([\d\.]*\d+) (.*? SKIP)?/) {
|
||||
my $orig_ver = $ver = $1;
|
||||
$found_ver = 1;
|
||||
# $skipping = $2;
|
||||
$ver =~ s{^(\d+)\.(\d{3})(\d{3})$}{join '.', int($1), int($2), int($3)}e;
|
||||
warn "$file: $orig_ver ($ver)\n";
|
||||
|
||||
} elsif (/(?x) (?:_VERSION) \s* = \s* ([a-zA-Z_]\S*)/) {
|
||||
warn "$file: $1\n";
|
||||
$found_ver = 1;
|
||||
last;
|
||||
}
|
||||
|
||||
if ($ver and $version and !$skipping) {
|
||||
if ($version ne $ver) {
|
||||
# die "$file: $ver != $version\n";
|
||||
}
|
||||
} elsif ($ver and !$version) {
|
||||
$version = $ver;
|
||||
}
|
||||
}
|
||||
if (!$found_ver) {
|
||||
warn "WARNING: No \"_VERSION\" or \"version\" field found in `$file`.\n";
|
||||
}
|
||||
close $in;
|
||||
|
||||
print "Checking use of Lua global variables in file $file ...\n";
|
||||
system("luac -p -l $file | grep ETGLOBAL | grep -vE 'require|type|tostring|error|ngx|ndk|jit|setmetatable|getmetatable|string|table|io|os|print|tonumber|math|pcall|xpcall|unpack|pairs|ipairs|assert|module|package|coroutine|[gs]etfenv|next|select|rawset|rawget|debug'");
|
||||
#file_contains($file, "attempt to write to undeclared variable");
|
||||
system("grep -H -n -E --color '.{120}' $file");
|
||||
}
|
||||
|
||||
sub file_contains ($$) {
|
||||
my ($file, $regex) = @_;
|
||||
open my $in, $file
|
||||
or die "Cannot open $file fo reading: $!\n";
|
||||
my $content = do { local $/; <$in> };
|
||||
close $in;
|
||||
#print "$content";
|
||||
return scalar ($content =~ /$regex/);
|
||||
}
|
||||
|
||||
if (-d 't') {
|
||||
for my $file (map glob, qw{ t/*.t t/*/*.t t/*/*/*.t }) {
|
||||
system(qq{grep -H -n --color -E '\\--- ?(ONLY|LAST)' $file});
|
||||
}
|
||||
}
|
||||
|
||||
10
controllers/nginx-third-party/lua/vendor/lua-resty-lock/.gitignore
vendored
Normal file
10
controllers/nginx-third-party/lua/vendor/lua-resty-lock/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
go
|
||||
t/servroot/
|
||||
reindex
|
||||
nginx
|
||||
ctags
|
||||
tags
|
||||
a.lua
|
||||
18
controllers/nginx-third-party/lua/vendor/lua-resty-lock/Makefile
vendored
Normal file
18
controllers/nginx-third-party/lua/vendor/lua-resty-lock/Makefile
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
OPENRESTY_PREFIX=/usr/local/openresty
|
||||
|
||||
PREFIX ?= /usr/local
|
||||
LUA_INCLUDE_DIR ?= $(PREFIX)/include
|
||||
LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION)
|
||||
INSTALL ?= install
|
||||
|
||||
.PHONY: all test install
|
||||
|
||||
all: ;
|
||||
|
||||
install: all
|
||||
$(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty/
|
||||
$(INSTALL) lib/resty/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/
|
||||
|
||||
test: all
|
||||
PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I../test-nginx/lib -r t
|
||||
|
||||
376
controllers/nginx-third-party/lua/vendor/lua-resty-lock/README.markdown
vendored
Normal file
376
controllers/nginx-third-party/lua/vendor/lua-resty-lock/README.markdown
vendored
Normal file
|
|
@ -0,0 +1,376 @@
|
|||
Name
|
||||
====
|
||||
|
||||
lua-resty-lock - Simple shm-based nonblocking lock API
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
* [Name](#name)
|
||||
* [Status](#status)
|
||||
* [Synopsis](#synopsis)
|
||||
* [Description](#description)
|
||||
* [Methods](#methods)
|
||||
* [new](#new)
|
||||
* [lock](#lock)
|
||||
* [unlock](#unlock)
|
||||
* [For Multiple Lua Light Threads](#for-multiple-lua-light-threads)
|
||||
* [For Cache Locks](#for-cache-locks)
|
||||
* [Prerequisites](#prerequisites)
|
||||
* [Installation](#installation)
|
||||
* [TODO](#todo)
|
||||
* [Community](#community)
|
||||
* [English Mailing List](#english-mailing-list)
|
||||
* [Chinese Mailing List](#chinese-mailing-list)
|
||||
* [Bugs and Patches](#bugs-and-patches)
|
||||
* [Author](#author)
|
||||
* [Copyright and License](#copyright-and-license)
|
||||
* [See Also](#see-also)
|
||||
|
||||
Status
|
||||
======
|
||||
|
||||
This library is still under early development and is production ready.
|
||||
|
||||
Synopsis
|
||||
========
|
||||
|
||||
```lua
|
||||
# nginx.conf
|
||||
|
||||
http {
|
||||
# you do not need the following line if you are using the
|
||||
# ngx_openresty bundle:
|
||||
lua_package_path "/path/to/lua-resty-lock/lib/?.lua;;";
|
||||
|
||||
lua_shared_dict my_locks 100k;
|
||||
|
||||
server {
|
||||
...
|
||||
|
||||
location = /t {
|
||||
content_by_lua '
|
||||
local lock = require "resty.lock"
|
||||
for i = 1, 2 do
|
||||
local lock = lock:new("my_locks")
|
||||
|
||||
local elapsed, err = lock:lock("my_key")
|
||||
ngx.say("lock: ", elapsed, ", ", err)
|
||||
|
||||
local ok, err = lock:unlock()
|
||||
if not ok then
|
||||
ngx.say("failed to unlock: ", err)
|
||||
end
|
||||
ngx.say("unlock: ", ok)
|
||||
end
|
||||
';
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
This library implements a simple mutex lock in a similar way to ngx_proxy module's [proxy_cache_lock directive](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock).
|
||||
|
||||
Under the hood, this library uses [ngx_lua](https://github.com/chaoslawful/lua-nginx-module) module's shared memory dictionaries. The lock waiting is nonblocking because we use stepwise [ngx.sleep](https://github.com/chaoslawful/lua-nginx-module#ngxsleep) to poll the lock periodically.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
Methods
|
||||
=======
|
||||
|
||||
To load this library,
|
||||
|
||||
1. you need to specify this library's path in ngx_lua's [lua_package_path](https://github.com/chaoslawful/lua-nginx-module#lua_package_path) directive. For example, `lua_package_path "/path/to/lua-resty-lock/lib/?.lua;;";`.
|
||||
2. you use `require` to load the library into a local Lua variable:
|
||||
|
||||
```lua
|
||||
local lock = require "resty.lock"
|
||||
```
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
new
|
||||
---
|
||||
`syntax: obj = lock:new(dict_name)`
|
||||
|
||||
`syntax: obj = lock:new(dict_name, opts)`
|
||||
|
||||
Creates a new lock object instance by specifying the shared dictionary name (created by [lua_shared_dict](http://https://github.com/chaoslawful/lua-nginx-module#lua_shared_dict)) and an optional options table `opts`.
|
||||
|
||||
The options table accepts the following options:
|
||||
|
||||
* `exptime`
|
||||
Specifies expiration time (in seconds) for the lock entry in the shared memory dictionary. You can specify up to `0.001` seconds. Default to 30 (seconds). Even if the invoker does not call `unlock` or the object holding the lock is not GC'd, the lock will be released after this time. So deadlock won't happen even when the worker process holding the lock crashes.
|
||||
* `timeout`
|
||||
Specifies the maximal waiting time (in seconds) for the [lock](#lock) method calls on the current object instance. You can specify up to `0.001` seconds. Default to 5 (seconds). This option value cannot be bigger than `exptime`. This timeout is to prevent a [lock](#lock) method call from waiting forever.
|
||||
You can specify `0` to make the [lock](#lock) method return immediately without waiting if it cannot acquire the lock right away.
|
||||
* `step`
|
||||
Specifies the initial step (in seconds) of sleeping when waiting for the lock. Default to `0.001` (seconds). When the [lock](#lock) method is waiting on a busy lock, it sleeps by steps. The step size is increased by a ratio (specified by the `ratio` option) until reaching the step size limit (specified by the `max_step` option).
|
||||
* `ratio`
|
||||
Specifies the step increasing ratio. Default to 2, that is, the step size doubles at each waiting iteration.
|
||||
* `max_step`
|
||||
Specifies the maximal step size (i.e., sleep interval, in seconds) allowed. See also the `step` and `ratio` options). Default to 0.5 (seconds).
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
lock
|
||||
----
|
||||
`syntax: elapsed, err = obj:lock(key)`
|
||||
|
||||
Tries to lock a key across all the Nginx worker processes in the current Nginx server instance. Different keys are different locks.
|
||||
|
||||
The length of the key string must not be larger than 65535 bytes.
|
||||
|
||||
Returns the waiting time (in seconds) if the lock is successfully acquired. Otherwise returns `nil` and a string describing the error.
|
||||
|
||||
The waiting time is not from the wallclock, but rather is from simply adding up all the waiting "steps". A nonzero `elapsed` return value indicates that someone else has just hold this lock. But a zero return value cannot gurantee that no one else has just acquired and released the lock.
|
||||
|
||||
When this method is waiting on fetching the lock, no operating system threads will be blocked and the current Lua "light thread" will be automatically yielded behind the scene.
|
||||
|
||||
It is strongly recommended to always call the [unlock()](#unlock) method to actively release the lock as soon as possible.
|
||||
|
||||
If the [unlock()](#unlock) method is never called after this method call, the lock will get released when
|
||||
1. the current `resty.lock` object instance is collected automatically by the Lua GC.
|
||||
2. the `exptime` for the lock entry is reached.
|
||||
|
||||
Common errors for this method call is
|
||||
* "timeout"
|
||||
: The timeout threshold specified by the `timeout` option of the [new](#new) method is exceeded.
|
||||
* "locked"
|
||||
: The current `resty.lock` object instance is already holding a lock (not necessarily of the same key).
|
||||
|
||||
Other possible errors are from ngx_lua's shared dictionary API.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
unlock
|
||||
------
|
||||
`syntax: ok, err = obj:unlock()`
|
||||
|
||||
Releases the lock held by the current `resty.lock` object instance.
|
||||
|
||||
Returns `1` on success. Returns `nil` and a string describing the error otherwise.
|
||||
|
||||
If you call `unlock` when no lock is currently held, the error "unlocked" will be returned.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
For Multiple Lua Light Threads
|
||||
==============================
|
||||
|
||||
It is always a bad idea to share a single `resty.lock` object instance across multiple ngx_lua "light threads" because the object itself is stateful and is vulnerable to race conditions. It is highly recommended to always allocate a separate `resty.lock` object instance for each "light thread" that needs one.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
For Cache Locks
|
||||
===============
|
||||
|
||||
One common use case for this library is avoid the so-called "dog-pile effect", that is, to limit concurrent backend queries for the same key when a cache miss happens. This usage is similar to the standard ngx_proxy module's [proxy_cache_lock](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_lock) directive.
|
||||
|
||||
The basic workflow for a cache lock is as follows:
|
||||
|
||||
1. Check the cache for a hit with the key. If a cache miss happens, proceed to step 2.
|
||||
2. Instantiate a `resty.lock` object, call the [lock](#lock) method on the key, and check the 1st return value, i.e., the lock waiting time. If it is `nil`, handle the error; otherwise proceed to step 3.
|
||||
3. Check the cache again for a hit. If it is still a miss, proceed to step 4; otherwise release the lock by calling [unlock](#unlock) and then return the cached value.
|
||||
4. Query the backend (the data source) for the value, put the result into the cache, and then release the lock currently held by calling [unlock](#unlock).
|
||||
|
||||
Below is a kinda complete code example that demonstrates the idea.
|
||||
|
||||
```lua
|
||||
local resty_lock = require "resty.lock"
|
||||
local cache = ngx.shared.my_cache
|
||||
|
||||
-- step 1:
|
||||
local val, err = cache:get(key)
|
||||
if val then
|
||||
ngx.say("result: ", val)
|
||||
return
|
||||
end
|
||||
|
||||
if err then
|
||||
return fail("failed to get key from shm: ", err)
|
||||
end
|
||||
|
||||
-- cache miss!
|
||||
-- step 2:
|
||||
local lock = resty_lock:new("my_locks")
|
||||
local elapsed, err = lock:lock(key)
|
||||
if not elapsed then
|
||||
return fail("failed to acquire the lock: ", err)
|
||||
end
|
||||
|
||||
-- lock successfully acquired!
|
||||
|
||||
-- step 3:
|
||||
-- someone might have already put the value into the cache
|
||||
-- so we check it here again:
|
||||
val, err = cache:get(key)
|
||||
if val then
|
||||
local ok, err = lock:unlock()
|
||||
if not ok then
|
||||
return fail("failed to unlock: ", err)
|
||||
end
|
||||
|
||||
ngx.say("result: ", val)
|
||||
return
|
||||
end
|
||||
|
||||
--- step 4:
|
||||
local val = fetch_redis(key)
|
||||
if not val then
|
||||
local ok, err = lock:unlock()
|
||||
if not ok then
|
||||
return fail("failed to unlock: ", err)
|
||||
end
|
||||
|
||||
-- FIXME: we should handle the backend miss more carefully
|
||||
-- here, like inserting a stub value into the cache.
|
||||
|
||||
ngx.say("no value found")
|
||||
return
|
||||
end
|
||||
|
||||
-- update the shm cache with the newly fetched value
|
||||
local ok, err = cache:set(key, val, 1)
|
||||
if not ok then
|
||||
local ok, err = lock:unlock()
|
||||
if not ok then
|
||||
return fail("failed to unlock: ", err)
|
||||
end
|
||||
|
||||
return fail("failed to update shm cache: ", err)
|
||||
end
|
||||
|
||||
local ok, err = lock:unlock()
|
||||
if not ok then
|
||||
return fail("failed to unlock: ", err)
|
||||
end
|
||||
|
||||
ngx.say("result: ", val)
|
||||
```
|
||||
|
||||
Here we assume that we use the ngx_lua shared memory dictionary to cache the Redis query results and we have the following configurations in `nginx.conf`:
|
||||
|
||||
```nginx
|
||||
# you may want to change the dictionary size for your cases.
|
||||
lua_shared_dict my_cache 10m;
|
||||
lua_shared_dict my_locks 1m;
|
||||
```
|
||||
|
||||
The `my_cache` dictionary is for the data cache while the `my_locks` dictionary is for `resty.lock` itself.
|
||||
|
||||
Several important things to note in the example above:
|
||||
|
||||
1. You need to release the lock as soon as possible, even when some other unrelated errors happen.
|
||||
2. You need to update the cache with the result got from the backend *before* releasing the lock so other threads already waiting on the lock can get cached value when they get the lock afterwards.
|
||||
3. When the backend returns no value at all, we should handle the case carefully by inserting some stub value into the cache.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
Prerequisites
|
||||
=============
|
||||
|
||||
* [LuaJIT](http://luajit.org) 2.0+
|
||||
* [ngx_lua](https://github.com/chaoslawful/lua-nginx-module) 0.8.10+
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
Installation
|
||||
============
|
||||
|
||||
It is recommended to use the latest [ngx_openresty bundle](http://openresty.org) directly where this library
|
||||
is bundled and enabled by default. At least ngx_openresty 1.4.2.9 is required. And you need to enable LuaJIT when building your ngx_openresty
|
||||
bundle by passing the `--with-luajit` option to its `./configure` script. No extra Nginx configuration is required.
|
||||
|
||||
If you want to use this library with your own Nginx build (with ngx_lua), then you need to
|
||||
ensure you are using at least ngx_lua 0.8.10. Also, You need to configure
|
||||
the [lua_package_path](https://github.com/chaoslawful/lua-nginx-module#lua_package_path) directive to
|
||||
add the path of your lua-resty-lock source tree to ngx_lua's Lua module search path, as in
|
||||
|
||||
```nginx
|
||||
# nginx.conf
|
||||
http {
|
||||
lua_package_path "/path/to/lua-resty-lock/lib/?.lua;;";
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
and then load the library in Lua:
|
||||
|
||||
```lua
|
||||
local lock = require "resty.lock"
|
||||
```
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
TODO
|
||||
====
|
||||
|
||||
* We should simplify the current implementation when LuaJIT 2.1 gets support for `__gc` metamethod on normal Lua tables. Right now we are using an FFI cdata and a ref/unref memo table to work around this, which is rather ugly and a bit inefficient.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
Community
|
||||
=========
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
English Mailing List
|
||||
--------------------
|
||||
|
||||
The [openresty-en](https://groups.google.com/group/openresty-en) mailing list is for English speakers.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
Chinese Mailing List
|
||||
--------------------
|
||||
|
||||
The [openresty](https://groups.google.com/group/openresty) mailing list is for Chinese speakers.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
Bugs and Patches
|
||||
================
|
||||
|
||||
Please report bugs or submit patches by
|
||||
|
||||
1. creating a ticket on the [GitHub Issue Tracker](http://github.com/openresty/lua-resty-lock/issues),
|
||||
1. or posting to the [OpenResty community](#community).
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
Author
|
||||
======
|
||||
|
||||
Yichun "agentzh" Zhang (章亦春) <agentzh@gmail.com>, CloudFlare Inc.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
Copyright and License
|
||||
=====================
|
||||
|
||||
This module is licensed under the BSD license.
|
||||
|
||||
Copyright (C) 2013-2014, by Yichun "agentzh" Zhang, CloudFlare Inc.
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
See Also
|
||||
========
|
||||
* the ngx_lua module: https://github.com/chaoslawful/lua-nginx-module
|
||||
* OpenResty: http://openresty.org
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
208
controllers/nginx-third-party/lua/vendor/lua-resty-lock/lib/resty/lock.lua
vendored
Normal file
208
controllers/nginx-third-party/lua/vendor/lua-resty-lock/lib/resty/lock.lua
vendored
Normal file
|
|
@ -0,0 +1,208 @@
|
|||
-- Copyright (C) Yichun Zhang (agentzh)
|
||||
|
||||
|
||||
local ffi = require "ffi"
|
||||
local ffi_new = ffi.new
|
||||
local shared = ngx.shared
|
||||
local sleep = ngx.sleep
|
||||
local shdict_mt
|
||||
local debug = ngx.config.debug
|
||||
local setmetatable = setmetatable
|
||||
local getmetatable = getmetatable
|
||||
local tonumber = tonumber
|
||||
|
||||
|
||||
local _M = { _VERSION = '0.04' }
|
||||
local mt = { __index = _M }
|
||||
|
||||
|
||||
local FREE_LIST_REF = 0
|
||||
|
||||
-- FIXME: we don't need this when we have __gc metamethod support on Lua
|
||||
-- tables.
|
||||
local memo = {}
|
||||
if debug then _M.memo = memo end
|
||||
|
||||
|
||||
local function ref_obj(key)
|
||||
if key == nil then
|
||||
return -1
|
||||
end
|
||||
local ref = memo[FREE_LIST_REF]
|
||||
if ref and ref ~= 0 then
|
||||
memo[FREE_LIST_REF] = memo[ref]
|
||||
|
||||
else
|
||||
ref = #memo + 1
|
||||
end
|
||||
memo[ref] = key
|
||||
|
||||
-- print("ref key_id returned ", ref)
|
||||
return ref
|
||||
end
|
||||
if debug then _M.ref_obj = ref_obj end
|
||||
|
||||
|
||||
local function unref_obj(ref)
|
||||
if ref >= 0 then
|
||||
memo[ref] = memo[FREE_LIST_REF]
|
||||
memo[FREE_LIST_REF] = ref
|
||||
end
|
||||
end
|
||||
if debug then _M.unref_obj = unref_obj end
|
||||
|
||||
|
||||
local function gc_lock(cdata)
|
||||
local dict_id = tonumber(cdata.dict_id)
|
||||
local key_id = tonumber(cdata.key_id)
|
||||
|
||||
-- print("key_id: ", key_id, ", key: ", memo[key_id], "dict: ",
|
||||
-- type(memo[cdata.dict_id]))
|
||||
if key_id > 0 then
|
||||
local key = memo[key_id]
|
||||
unref_obj(key_id)
|
||||
local dict = memo[dict_id]
|
||||
-- print("dict.delete type: ", type(dict.delete))
|
||||
local ok, err = dict:delete(key)
|
||||
if not ok then
|
||||
ngx.log(ngx.ERR, 'failed to delete key "', key, '": ', err)
|
||||
end
|
||||
cdata.key_id = 0
|
||||
end
|
||||
|
||||
unref_obj(dict_id)
|
||||
end
|
||||
|
||||
|
||||
local ctype = ffi.metatype("struct { int key_id; int dict_id; }",
|
||||
{ __gc = gc_lock })
|
||||
|
||||
|
||||
function _M.new(_, dict_name, opts)
|
||||
local dict = shared[dict_name]
|
||||
if not dict then
|
||||
return nil, "dictionary not found"
|
||||
end
|
||||
local cdata = ffi_new(ctype)
|
||||
cdata.key_id = 0
|
||||
cdata.dict_id = ref_obj(dict)
|
||||
|
||||
local timeout, exptime, step, ratio, max_step
|
||||
if opts then
|
||||
timeout = opts.timeout
|
||||
exptime = opts.exptime
|
||||
step = opts.step
|
||||
ratio = opts.ratio
|
||||
max_step = opts.max_step
|
||||
end
|
||||
|
||||
if not exptime then
|
||||
exptime = 30
|
||||
end
|
||||
|
||||
if timeout and timeout > exptime then
|
||||
timeout = exptime
|
||||
end
|
||||
|
||||
local self = {
|
||||
cdata = cdata,
|
||||
dict = dict,
|
||||
timeout = timeout or 5,
|
||||
exptime = exptime,
|
||||
step = step or 0.001,
|
||||
ratio = ratio or 2,
|
||||
max_step = max_step or 0.5,
|
||||
}
|
||||
return setmetatable(self, mt)
|
||||
end
|
||||
|
||||
|
||||
function _M.lock(self, key)
|
||||
if not key then
|
||||
return nil, "nil key"
|
||||
end
|
||||
|
||||
local dict = self.dict
|
||||
local cdata = self.cdata
|
||||
if cdata.key_id > 0 then
|
||||
return nil, "locked"
|
||||
end
|
||||
local exptime = self.exptime
|
||||
local ok, err = dict:add(key, true, exptime)
|
||||
if ok then
|
||||
cdata.key_id = ref_obj(key)
|
||||
if not shdict_mt then
|
||||
shdict_mt = getmetatable(dict)
|
||||
end
|
||||
return 0
|
||||
end
|
||||
if err ~= "exists" then
|
||||
return nil, err
|
||||
end
|
||||
-- lock held by others
|
||||
local step = self.step
|
||||
local ratio = self.ratio
|
||||
local timeout = self.timeout
|
||||
local max_step = self.max_step
|
||||
local elapsed = 0
|
||||
while timeout > 0 do
|
||||
if step > timeout then
|
||||
step = timeout
|
||||
end
|
||||
|
||||
sleep(step)
|
||||
elapsed = elapsed + step
|
||||
timeout = timeout - step
|
||||
|
||||
local ok, err = dict:add(key, true, exptime)
|
||||
if ok then
|
||||
cdata.key_id = ref_obj(key)
|
||||
if not shdict_mt then
|
||||
shdict_mt = getmetatable(dict)
|
||||
end
|
||||
return elapsed
|
||||
end
|
||||
|
||||
if err ~= "exists" then
|
||||
return nil, err
|
||||
end
|
||||
|
||||
if timeout <= 0 then
|
||||
break
|
||||
end
|
||||
|
||||
step = step * ratio
|
||||
if step <= 0 then
|
||||
step = 0.001
|
||||
end
|
||||
if step > max_step then
|
||||
step = max_step
|
||||
end
|
||||
end
|
||||
|
||||
return nil, "timeout"
|
||||
end
|
||||
|
||||
|
||||
function _M.unlock(self)
|
||||
local dict = self.dict
|
||||
local cdata = self.cdata
|
||||
local key_id = tonumber(cdata.key_id)
|
||||
if key_id <= 0 then
|
||||
return nil, "unlocked"
|
||||
end
|
||||
|
||||
local key = memo[key_id]
|
||||
unref_obj(key_id)
|
||||
|
||||
local ok, err = dict:delete(key)
|
||||
if not ok then
|
||||
return nil, err
|
||||
end
|
||||
cdata.key_id = 0
|
||||
|
||||
return 1
|
||||
end
|
||||
|
||||
|
||||
return _M
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue