Merge remote-tracking branch 'origin' into refactor-cert

This commit is contained in:
Henry Tran 2018-06-21 11:40:49 -04:00
commit 86def984a3
89 changed files with 4420 additions and 1800 deletions

View file

@ -20,15 +20,37 @@ WORKDIR /etc/nginx
RUN clean-install \
diffutils \
dumb-init
dumb-init \
libcap2-bin
COPY . /
RUN setcap cap_net_bind_service=+ep /usr/sbin/nginx \
&& setcap cap_net_bind_service=+ep /nginx-ingress-controller
RUN bash -eux -c ' \
writeDirs=( \
/etc/nginx \
/etc/ingress-controller/ssl \
/etc/ingress-controller/auth \
/var/log \
/var/log/nginx \
/opt/modsecurity/var/log \
/opt/modsecurity/var/upload \
/opt/modsecurity/var/audit \
); \
for dir in "${writeDirs[@]}"; do \
mkdir -p ${dir}; \
chown -R www-data.www-data ${dir}; \
done \
'
# Create symlinks to redirect nginx logs to stdout and stderr docker log collector
# This only works if nginx is started with CMD or ENTRYPOINT
RUN mkdir -p /var/log/nginx \
&& ln -sf /dev/stdout /var/log/nginx/access.log \
RUN ln -sf /dev/stdout /var/log/nginx/access.log \
&& ln -sf /dev/stderr /var/log/nginx/error.log
COPY . /
USER www-data
ENTRYPOINT ["/usr/bin/dumb-init"]

View file

@ -1,6 +1,7 @@
local balancer_resty = require("balancer.resty")
local resty_chash = require("resty.chash")
local util = require("util")
local split = require("util.split")
local _M = balancer_resty:new({ factory = resty_chash, name = "chash" })
@ -15,7 +16,7 @@ end
function _M.balance(self)
local key = util.lua_ngx_var(self.hash_by)
local endpoint_string = self.instance:find(key)
return util.split_pair(endpoint_string, ":")
return split.split_pair(endpoint_string, ":")
end
return _M

View file

@ -7,6 +7,7 @@
local resty_lock = require("resty.lock")
local util = require("util")
local split = require("util.split")
local DECAY_TIME = 10 -- this value is in seconds
local LOCK_KEY = ":ewma_key"
@ -131,10 +132,10 @@ function _M.balance(self)
end
function _M.after_balance(_)
local response_time = tonumber(util.get_first_value(ngx.var.upstream_response_time)) or 0
local connect_time = tonumber(util.get_first_value(ngx.var.upstream_connect_time)) or 0
local response_time = tonumber(split.get_first_value(ngx.var.upstream_response_time)) or 0
local connect_time = tonumber(split.get_first_value(ngx.var.upstream_connect_time)) or 0
local rtt = connect_time + response_time
local upstream = util.get_first_value(ngx.var.upstream_addr)
local upstream = split.get_first_value(ngx.var.upstream_addr)
if util.is_blank(upstream) then
return

View file

@ -1,6 +1,7 @@
local balancer_resty = require("balancer.resty")
local resty_roundrobin = require("resty.roundrobin")
local util = require("util")
local split = require("util.split")
local _M = balancer_resty:new({ factory = resty_roundrobin, name = "round_robin" })
@ -14,7 +15,7 @@ end
function _M.balance(self)
local endpoint_string = self.instance:find()
return util.split_pair(endpoint_string, ":")
return split.split_pair(endpoint_string, ":")
end
return _M

View file

@ -1,6 +1,7 @@
local balancer_resty = require("balancer.resty")
local resty_chash = require("resty.chash")
local util = require("util")
local split = require("util.split")
local ck = require("resty.cookie")
local _M = balancer_resty:new({ factory = resty_chash, name = "sticky" })
@ -74,7 +75,7 @@ end
function _M.balance(self)
local endpoint_string = sticky_endpoint_string(self)
return util.split_pair(endpoint_string, ":")
return split.split_pair(endpoint_string, ":")
end
return _M

View file

@ -0,0 +1,46 @@
local socket = ngx.socket.tcp
local cjson = require('cjson')
local defer = require('util.defer')
local assert = assert
local _M = {}
local function send_data(jsonData)
local s = assert(socket())
assert(s:connect('unix:/tmp/prometheus-nginx.socket'))
assert(s:send(jsonData))
assert(s:close())
end
function _M.encode_nginx_stats()
return cjson.encode({
host = ngx.var.host or "-",
status = ngx.var.status or "-",
remoteAddr = ngx.var.remote_addr or "-",
realIpAddr = ngx.var.realip_remote_addr or "-",
remoteUser = ngx.var.remote_user or "-",
bytesSent = tonumber(ngx.var.bytes_sent) or -1,
protocol = ngx.var.server_protocol or "-",
method = ngx.var.request_method or "-",
uri = ngx.var.uri or "-",
requestLength = tonumber(ngx.var.request_length) or -1,
requestTime = tonumber(ngx.var.request_time) or -1,
upstreamName = ngx.var.proxy_upstream_name or "-",
upstreamIP = ngx.var.upstream_addr or "-",
upstreamResponseTime = tonumber(ngx.var.upstream_response_time) or -1,
upstreamStatus = ngx.var.upstream_status or "-",
namespace = ngx.var.namespace or "-",
ingress = ngx.var.ingress_name or "-",
service = ngx.var.service_name or "-",
})
end
function _M.call()
local ok, err = defer.to_timer_phase(send_data, _M.encode_nginx_stats())
if not ok then
ngx.log(ngx.ERR, "failed to defer send_data to timer phase: ", err)
return
end
end
return _M

View file

@ -0,0 +1,20 @@
package.path = "./rootfs/etc/nginx/lua/?.lua;./rootfs/etc/nginx/lua/test/mocks/?.lua;" .. package.path
_G._TEST = true
local defer = require('util.defer')
local _ngx = {
shared = {},
log = function(...) end,
get_phase = function() return "timer" end,
}
_G.ngx = _ngx
describe("Defer", function()
describe("to_timer_phase", function()
it("executes passed callback immediately if called on timer phase", function()
defer.counter = 0
defer.to_timer_phase(function() defer.counter = defer.counter + 1 end)
assert.equal(defer.counter, 1)
end)
end)
end)

View file

@ -0,0 +1,122 @@
package.path = "./rootfs/etc/nginx/lua/?.lua;./rootfs/etc/nginx/lua/test/mocks/?.lua;" .. package.path
_G._TEST = true
local cjson = require('cjson')
local function udp_mock()
return {
setpeername = function(...) return true end,
send = function(payload) return payload end,
close = function(...) return true end
}
end
local _ngx = {
shared = {},
log = function(...) end,
socket = {
udp = udp_mock
},
get_phase = function() return "timer" end,
var = {}
}
_G.ngx = _ngx
describe("Monitor", function()
local monitor = require("monitor")
describe("encode_nginx_stats()", function()
it("successfuly encodes the current stats of nginx to JSON", function()
local nginx_environment = {
host = "testshop.com",
status = "200",
remote_addr = "10.10.10.10",
realip_remote_addr = "5.5.5.5",
remote_user = "admin",
bytes_sent = "150",
server_protocol = "HTTP",
request_method = "GET",
uri = "/admin",
request_length = "300",
request_time = "60",
proxy_upstream_name = "test-upstream",
upstream_addr = "2.2.2.2",
upstream_response_time = "200",
upstream_status = "220",
namespace = "test-app-production",
ingress_name = "web-yml",
service_name = "test-app",
}
ngx.var = nginx_environment
local encode_nginx_stats = monitor.encode_nginx_stats
local encoded_json_stats = encode_nginx_stats()
local decoded_json_stats = cjson.decode(encoded_json_stats)
local expected_json_stats = {
host = "testshop.com",
status = "200",
remoteAddr = "10.10.10.10",
realIpAddr = "5.5.5.5",
remoteUser = "admin",
bytesSent = 150.0,
protocol = "HTTP",
method = "GET",
uri = "/admin",
requestLength = 300.0,
requestTime = 60.0,
upstreamName = "test-upstream",
upstreamIP = "2.2.2.2",
upstreamResponseTime = 200,
upstreamStatus = "220",
namespace = "test-app-production",
ingress = "web-yml",
service = "test-app",
}
assert.are.same(decoded_json_stats,expected_json_stats)
end)
it("replaces empty numeric keys with -1 and missing string keys with -", function()
local nginx_environment = {
remote_addr = "10.10.10.10",
realip_remote_addr = "5.5.5.5",
remote_user = "francisco",
server_protocol = "HTTP",
request_method = "GET",
uri = "/admin",
request_time = "60",
proxy_upstream_name = "test-upstream",
upstream_addr = "2.2.2.2",
upstream_response_time = "200",
upstream_status = "220",
ingress_name = "web-yml",
}
ngx.var = nginx_environment
local encode_nginx_stats = monitor.encode_nginx_stats
local encoded_json_stats = encode_nginx_stats()
local decoded_json_stats = cjson.decode(encoded_json_stats)
local expected_json_stats = {
host = "-",
status = "-",
remoteAddr = "10.10.10.10",
realIpAddr = "5.5.5.5",
remoteUser = "francisco",
bytesSent = -1,
protocol = "HTTP",
method = "GET",
uri = "/admin",
requestLength = -1,
requestTime = 60.0,
upstreamName = "test-upstream",
upstreamIP = "2.2.2.2",
upstreamResponseTime = 200,
upstreamStatus = "220",
namespace = "-",
ingress = "web-yml",
service = "-",
}
assert.are.same(decoded_json_stats,expected_json_stats)
end)
end)
end)

View file

@ -49,17 +49,6 @@ function _M.lua_ngx_var(ngx_var)
return ngx.var[var_name]
end
function _M.split_pair(pair, seperator)
local i = pair:find(seperator)
if i == nil then
return pair, nil
else
local name = pair:sub(1, i - 1)
local value = pair:sub(i + 1, -1)
return name, value
end
end
-- this implementation is taken from
-- https://web.archive.org/web/20131225070434/http://snippets.luacode.org/snippets/Deep_Comparison_of_Two_Values_3
-- and modified for use in this project
@ -88,30 +77,6 @@ function _M.is_blank(str)
return str == nil or string_len(str) == 0
end
-- http://nginx.org/en/docs/http/ngx_http_upstream_module.html#example
-- CAVEAT: nginx is giving out : instead of , so the docs are wrong
-- 127.0.0.1:26157 : 127.0.0.1:26157 , ngx.var.upstream_addr
-- 200 : 200 , ngx.var.upstream_status
-- 0.00 : 0.00, ngx.var.upstream_response_time
function _M.split_upstream_var(var)
if not var then
return nil, nil
end
local t = {}
for v in var:gmatch("[^%s|,]+") do
if v ~= ":" then
t[#t+1] = v
end
end
return t
end
function _M.get_first_value(var)
local t = _M.split_upstream_var(var) or {}
if #t == 0 then return nil end
return t[1]
end
-- this implementation is taken from:
-- https://github.com/luafun/luafun/blob/master/fun.lua#L33
-- SHA: 04c99f9c393e54a604adde4b25b794f48104e0d0
@ -130,4 +95,13 @@ local function deepcopy(orig)
end
_M.deepcopy = deepcopy
local function tablelength(T)
local count = 0
for _ in pairs(T) do
count = count + 1
end
return count
end
_M.tablelength = tablelength
return _M

View file

@ -0,0 +1,57 @@
local util = require("util")
local timer_started = false
local queue = {}
local MAX_QUEUE_SIZE = 10000
local _M = {}
local function flush_queue(premature)
-- TODO Investigate if we should actually still flush the queue when we're
-- shutting down.
if premature then return end
local current_queue = queue
queue = {}
timer_started = false
for _,v in ipairs(current_queue) do
v.func(unpack(v.args))
end
end
-- `to_timer_phase` will enqueue a function that will be executed in a timer
-- context, at a later point in time. The purpose is that some APIs (such as
-- sockets) are not available during some nginx request phases (such as the
-- logging phase), but are available for use in timers. There are no ordering
-- guarantees for when a function will be executed.
function _M.to_timer_phase(func, ...)
if ngx.get_phase() == "timer" then
func(...)
return true
end
if #queue >= MAX_QUEUE_SIZE then
ngx.log(ngx.ERR, "deferred timer queue full")
return nil, "deferred timer queue full"
end
table.insert(queue, { func = func, args = {...} })
if not timer_started then
local ok, err = ngx.timer.at(0, flush_queue)
if ok then
-- unfortunately this is to deal with tests - when running unit tests, we
-- dont actually run the timer, we call the function inline
if util.tablelength(queue) > 0 then
timer_started = true
end
else
local msg = "failed to create timer: " .. tostring(err)
ngx.log(ngx.ERR, msg)
return nil, msg
end
end
return true
end
return _M

View file

@ -0,0 +1,70 @@
local _M = {}
-- splits strings into host and port
local function parse_addr(addr)
local _, _, host, port = addr:find("([^:]+):([^:]+)")
if host and port then
return {host=host, port=port}
else
return nil, "error in parsing upstream address!"
end
end
function _M.get_first_value(var)
local t = _M.split_upstream_var(var) or {}
if #t == 0 then return nil end
return t[1]
end
function _M.split_pair(pair, seperator)
local i = pair:find(seperator)
if i == nil then
return pair, nil
else
local name = pair:sub(1, i - 1)
local value = pair:sub(i + 1, -1)
return name, value
end
end
-- http://nginx.org/en/docs/http/ngx_http_upstream_module.html#example
-- CAVEAT: nginx is giving out : instead of , so the docs are wrong
-- 127.0.0.1:26157 : 127.0.0.1:26157 , ngx.var.upstream_addr
-- 200 : 200 , ngx.var.upstream_status
-- 0.00 : 0.00, ngx.var.upstream_response_time
function _M.split_upstream_var(var)
if not var then
return nil, nil
end
local t = {}
for v in var:gmatch("[^%s|,]+") do
if v ~= ":" then
t[#t+1] = v
end
end
return t
end
-- Splits an NGINX $upstream_addr and returns an array of tables with a `host` and `port` key-value pair.
function _M.split_upstream_addr(addrs_str)
if not addrs_str then
return nil, nil
end
local addrs = _M.split_upstream_var(addrs_str)
local host_and_ports = {}
for _, v in ipairs(addrs) do
local a, err = parse_addr(v)
if err then
return nil, err
end
host_and_ports[#host_and_ports+1] = a
end
if #host_and_ports == 0 then
return nil, "no upstream addresses to parse!"
end
return host_and_ports
end
return _M

View file

@ -1,5 +1,5 @@
# A very simple nginx configuration file that forces nginx to start.
pid /run/nginx.pid;
pid /tmp/nginx.pid;
events {}
http {}

View file

@ -7,6 +7,11 @@
{{ $proxyHeaders := .ProxySetHeaders }}
{{ $addHeaders := .AddHeaders }}
# Configuration checksum: {{ $all.Cfg.Checksum }}
# setup custom paths that do not require root access
pid /tmp/nginx.pid;
{{ if $cfg.EnableModsecurity }}
load_module /etc/nginx/modules/ngx_http_modsecurity_module.so;
{{ end }}
@ -20,7 +25,6 @@ worker_processes {{ $cfg.WorkerProcesses }};
worker_cpu_affinity {{ $cfg.WorkerCpuAffinity }};
{{ end }}
pid /run/nginx.pid;
{{ if ne .MaxOpenFiles 0 }}
worker_rlimit_nofile {{ .MaxOpenFiles }};
{{ end }}
@ -67,6 +71,13 @@ http {
balancer = res
end
{{ end }}
ok, res = pcall(require, "monitor")
if not ok then
error("require failed: " .. tostring(res))
else
monitor = res
end
}
{{ if $all.DynamicConfigurationEnabled }}
@ -97,11 +108,6 @@ http {
geoip_proxy_recursive on;
{{ end }}
{{ if $cfg.EnableVtsStatus }}
vhost_traffic_status_zone shared:vhost_traffic_status:{{ $cfg.VtsStatusZoneSize }};
vhost_traffic_status_filter_by_set_key {{ $cfg.VtsDefaultFilterKey }};
{{ end }}
aio threads;
aio_write on;
@ -115,6 +121,10 @@ http {
keepalive_timeout {{ $cfg.KeepAlive }}s;
keepalive_requests {{ $cfg.KeepAliveRequests }};
client_body_temp_path /tmp/client-body;
fastcgi_temp_path /tmp/fastcgi-temp;
proxy_temp_path /tmp/proxy-temp;
client_header_buffer_size {{ $cfg.ClientHeaderBufferSize }};
client_header_timeout {{ $cfg.ClientHeaderTimeout }}s;
large_client_header_buffers {{ $cfg.LargeClientHeaderBuffers }};
@ -182,6 +192,7 @@ http {
# $namespace
# $ingress_name
# $service_name
# $service_port
log_format upstreaminfo {{ if $cfg.LogFormatEscapeJSON }}escape=json {{ end }}'{{ buildLogFormatUpstream $cfg }}';
{{/* map urls that should not appear in access.log */}}
@ -360,7 +371,7 @@ http {
{{ range $name, $upstream := $backends }}
{{ if eq $upstream.SessionAffinity.AffinityType "cookie" }}
upstream sticky-{{ $upstream.Name }} {
sticky hash={{ $upstream.SessionAffinity.CookieSessionAffinity.Hash }} name={{ $upstream.SessionAffinity.CookieSessionAffinity.Name }} httponly;
sticky hash={{ $upstream.SessionAffinity.CookieSessionAffinity.Hash }} name={{ $upstream.SessionAffinity.CookieSessionAffinity.Name }}{{if eq (len $upstream.SessionAffinity.CookieSessionAffinity.Locations) 1 }}{{ range $locationName, $locationPaths := $upstream.SessionAffinity.CookieSessionAffinity.Locations }}{{ if eq (len $locationPaths) 1 }} path={{ index $locationPaths 0 }}{{ end }}{{ end }}{{ end }} httponly;
{{ if (gt $cfg.UpstreamKeepaliveConnections 0) }}
keepalive {{ $cfg.UpstreamKeepaliveConnections }};
@ -529,14 +540,8 @@ http {
opentracing off;
{{ end }}
{{ if $cfg.EnableVtsStatus }}
vhost_traffic_status_display;
vhost_traffic_status_display_format html;
vhost_traffic_status_display_sum_key {{ $cfg.VtsSumKey }};
{{ else }}
access_log off;
stub_status on;
{{ end }}
}
{{ if $all.DynamicConfigurationEnabled }}
@ -593,7 +598,7 @@ stream {
{{ range $i, $tcpServer := .TCPBackends }}
upstream tcp-{{ $tcpServer.Port }}-{{ $tcpServer.Backend.Namespace }}-{{ $tcpServer.Backend.Name }}-{{ $tcpServer.Backend.Port }} {
{{ range $j, $endpoint := $tcpServer.Endpoints }}
server {{ $endpoint.Address }}:{{ $endpoint.Port }};
server {{ $endpoint.Address | formatIP }}:{{ $endpoint.Port }};
{{ end }}
}
server {
@ -622,7 +627,7 @@ stream {
{{ range $i, $udpServer := .UDPBackends }}
upstream udp-{{ $udpServer.Port }}-{{ $udpServer.Backend.Namespace }}-{{ $udpServer.Backend.Name }}-{{ $udpServer.Backend.Port }} {
{{ range $j, $endpoint := $udpServer.Endpoints }}
server {{ $endpoint.Address }}:{{ $endpoint.Port }};
server {{ $endpoint.Address | formatIP }}:{{ $endpoint.Port }};
{{ end }}
}
@ -663,6 +668,7 @@ stream {
proxy_set_header X-Namespace $namespace;
proxy_set_header X-Ingress-Name $ingress_name;
proxy_set_header X-Service-Name $service_name;
proxy_set_header X-Service-Port $service_port;
rewrite (.*) / break;
@ -833,6 +839,13 @@ stream {
{{ end }}
location {{ $path }} {
{{ $ing := (getIngressInformation $location.Ingress $location.Path) }}
set $namespace "{{ $ing.Namespace }}";
set $ingress_name "{{ $ing.Rule }}";
set $service_name "{{ $ing.Service }}";
set $service_port "{{ $location.Port }}";
set $location_path "{{ $location.Path }}";
{{ if not $all.DisableLua }}
rewrite_by_lua_block {
{{ if $all.DynamicConfigurationEnabled}}
@ -888,6 +901,8 @@ stream {
{{ if $all.DynamicConfigurationEnabled}}
balancer.log()
{{ end }}
monitor.call()
}
{{ end }}
@ -908,16 +923,8 @@ stream {
port_in_redirect {{ if $location.UsePortInRedirects }}on{{ else }}off{{ end }};
{{ if $all.Cfg.EnableVtsStatus }}{{ if $location.VtsFilterKey }} vhost_traffic_status_filter_by_set_key {{ $location.VtsFilterKey }};{{ end }}{{ end }}
set $proxy_upstream_name "{{ buildUpstreamName $server.Hostname $all.Backends $location $all.DynamicConfigurationEnabled }}";
{{ $ing := (getIngressInformation $location.Ingress $location.Path) }}
{{/* $ing.Metadata contains the Ingress metadata */}}
set $namespace "{{ $ing.Namespace }}";
set $ingress_name "{{ $ing.Rule }}";
set $service_name "{{ $ing.Service }}";
{{/* redirect to HTTPS can be achieved forcing the redirect or having a SSL Certificate configured for the server */}}
{{ if (or $location.Rewrite.ForceSSLRedirect (and (not (empty $server.SSLCert.PemFileName)) $location.Rewrite.SSLRedirect)) }}
{{ if not (isLocationInLocationList $location $all.Cfg.NoTLSRedirectLocations) }}
@ -1091,6 +1098,7 @@ stream {
proxy_set_header X-Namespace $namespace;
proxy_set_header X-Ingress-Name $ingress_name;
proxy_set_header X-Service-Name $service_name;
proxy_set_header X-Service-Port $service_port;
{{ end }}
{{ if not (empty $location.Backend) }}