Rewrite echoheaders to use openresty's templating.
This automatically escapes things, and makes the code easier to read. template.lua was slightly modifed to not escape /, so it is vendored.
This commit is contained in:
parent
245e6b0b0e
commit
e5d5bda1ca
4 changed files with 573 additions and 61 deletions
|
|
@ -1,5 +1,5 @@
|
|||
events {
|
||||
worker_connections 1024;
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
env HOSTNAME;
|
||||
|
|
@ -9,73 +9,75 @@ env POD_NAMESPACE;
|
|||
env POD_IP;
|
||||
|
||||
http {
|
||||
default_type 'text/plain';
|
||||
# maximum allowed size of the client request body. By default this is 1m.
|
||||
# Request with bigger bodies nginx will return error code 413.
|
||||
# http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size
|
||||
client_max_body_size 10m;
|
||||
default_type 'text/plain';
|
||||
# maximum allowed size of the client request body. By default this is 1m.
|
||||
# Request with bigger bodies nginx will return error code 413.
|
||||
# http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size
|
||||
client_max_body_size 10m;
|
||||
|
||||
server {
|
||||
# please check the benefits of reuseport https://www.nginx.com/blog/socket-sharding-nginx-release-1-9-1
|
||||
# basically instructs to create an individual listening socket for each worker process (using the SO_REUSEPORT
|
||||
# socket option), allowing a kernel to distribute incoming connections between worker processes.
|
||||
listen 8080 default_server reuseport;
|
||||
init_by_lua_block {
|
||||
local template = require("template")
|
||||
-- template syntax documented here:
|
||||
-- https://github.com/bungle/lua-resty-template/blob/master/README.md
|
||||
tmpl = template.compile([[
|
||||
|
||||
# Replace '_' with your hostname.
|
||||
server_name _;
|
||||
|
||||
location / {
|
||||
lua_need_request_body on;
|
||||
content_by_lua_block {
|
||||
ngx.header["Server"] = "echoserver"
|
||||
Hostname: {{os.getenv("HOSTNAME") or "N/A"}}
|
||||
|
||||
ngx.say("")
|
||||
ngx.say("")
|
||||
ngx.say("Hostname: ", os.getenv("HOSTNAME") or "N/A")
|
||||
ngx.say("")
|
||||
Pod Information:
|
||||
{% if os.getenv("POD_NAME") then %}
|
||||
node name: {{os.getenv("NODE_NAME") or "N/A"}}
|
||||
pod name: {{os.getenv("POD_NAME") or "N/A"}}
|
||||
pod namespace: {{os.getenv("POD_NAMESPACE") or "N/A"}}
|
||||
pod IP: {{os.getenv("POD_IP") or "N/A"}}
|
||||
{% else %}
|
||||
-no pod information available-
|
||||
{% end %}
|
||||
|
||||
ngx.say("Pod Information:")
|
||||
if os.getenv("POD_NAME") then
|
||||
ngx.say("\tnode name:\t ", os.getenv("NODE_NAME") or "N/A")
|
||||
ngx.say("\tpod name:\t ", os.getenv("POD_NAME") or "N/A")
|
||||
ngx.say("\tpod namespace:\t ", os.getenv("POD_NAMESPACE") or "N/A")
|
||||
ngx.say("\tpod IP: \t ", os.getenv("POD_IP") or "N/A")
|
||||
else
|
||||
ngx.say("\t-no pod information available-")
|
||||
end
|
||||
Server values:
|
||||
server_version=nginx: {{ngx.var.nginx_version}} - lua: {{ngx.config.ngx_lua_version}}
|
||||
|
||||
ngx.say("")
|
||||
Request Information:
|
||||
client_address={{ngx.var.remote_addr}}
|
||||
method={{ngx.req.get_method()}}
|
||||
real path={{ngx.var.request_uri}}
|
||||
query={{ngx.var.query_string or ""}}
|
||||
request_version={{ngx.req.http_version()}}
|
||||
request_uri={{ngx.var.scheme.."://"..ngx.var.host..":"..ngx.var.server_port..ngx.var.request_uri}}
|
||||
|
||||
ngx.say("Server values:")
|
||||
ngx.say("\tserver_version=", "nginx: "..ngx.var.nginx_version.." - lua: "..ngx.config.ngx_lua_version)
|
||||
ngx.say("")
|
||||
Request Headers:
|
||||
{% for i, key in ipairs(keys) do %}
|
||||
{{key}}={{headers[key]}}
|
||||
{% end %}
|
||||
|
||||
ngx.say("Request Information:")
|
||||
ngx.say("\tclient_address=", ngx.var.remote_addr)
|
||||
ngx.say("\tmethod=", ngx.req.get_method())
|
||||
ngx.say("\treal path=", ngx.var.request_uri)
|
||||
ngx.say("\tquery=", ngx.var.query_string or "")
|
||||
ngx.say("\trequest_version=", ngx.req.http_version())
|
||||
ngx.say("\trequest_uri=", ngx.var.scheme.."://"..ngx.var.host..":"..ngx.var.server_port..ngx.var.request_uri)
|
||||
ngx.say("")
|
||||
Request Body:
|
||||
{{ngx.var.request_body or " -no body in request-"}}
|
||||
]])
|
||||
}
|
||||
|
||||
ngx.say("Request Headers:")
|
||||
local headers = ngx.req.get_headers()
|
||||
local keys = {}
|
||||
for key, val in pairs(headers) do
|
||||
table.insert(keys, key)
|
||||
end
|
||||
server {
|
||||
# please check the benefits of reuseport https://www.nginx.com/blog/socket-sharding-nginx-release-1-9-1
|
||||
# basically instructs to create an individual listening socket for each worker process (using the SO_REUSEPORT
|
||||
# socket option), allowing a kernel to distribute incoming connections between worker processes.
|
||||
listen 8080 default_server reuseport;
|
||||
|
||||
table.sort(keys)
|
||||
for i, key in ipairs(keys) do
|
||||
ngx.say("\t", key, "=", headers[key])
|
||||
end
|
||||
ngx.say("")
|
||||
# Replace '_' with your hostname.
|
||||
server_name _;
|
||||
|
||||
ngx.say("Request Body:")
|
||||
ngx.say(ngx.var.request_body or "\t-no body in request-");
|
||||
ngx.say("")
|
||||
}
|
||||
}
|
||||
}
|
||||
location / {
|
||||
lua_need_request_body on;
|
||||
content_by_lua_block {
|
||||
ngx.header["Server"] = "echoserver"
|
||||
|
||||
local headers = ngx.req.get_headers()
|
||||
local keys = {}
|
||||
for key, val in pairs(headers) do
|
||||
table.insert(keys, key)
|
||||
end
|
||||
table.sort(keys)
|
||||
|
||||
ngx.say(tmpl({os=os, ngx=ngx, keys=keys, headers=headers}))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue