Live Nginx (re)configuration without reloading (#2174)

This commit is contained in:
Elvin Efendi 2018-03-18 09:13:41 -04:00 committed by Manuel Alejandro de Brito Fontes
parent 41cefeb178
commit c90a4e811e
13 changed files with 759 additions and 114 deletions

View file

@ -0,0 +1,107 @@
local ngx_balancer = require("ngx.balancer")
local json = require("cjson")
local configuration = require("configuration")
local util = require("util")
local lrucache = require("resty.lrucache")
local resty_lock = require("resty.lock")
-- measured in seconds
-- for an Nginx worker to pick up the new list of upstream peers
-- it will take <the delay until controller POSTed the backend object to the Nginx endpoint> + BACKENDS_SYNC_INTERVAL
local BACKENDS_SYNC_INTERVAL = 1
ROUND_ROBIN_LOCK_KEY = "round_robin"
local round_robin_state = ngx.shared.round_robin_state
local _M = {}
local round_robin_lock = resty_lock:new("locks", {timeout = 0, exptime = 0.1})
local backends, err = lrucache.new(1024)
if not backends then
return error("failed to create the cache for backends: " .. (err or "unknown"))
end
local function balance()
local backend_name = ngx.var.proxy_upstream_name
local backend = backends:get(backend_name)
-- lb_alg field does not exist for ingress.Backend struct for now, so lb_alg
-- will always be round_robin
local lb_alg = backend.lb_alg or "round_robin"
if lb_alg == "ip_hash" then
-- TODO(elvinefendi) implement me
return backend.endpoints[0].address, backend.endpoints[0].port
end
-- Round-Robin
round_robin_lock:lock(backend_name .. ROUND_ROBIN_LOCK_KEY)
local index = round_robin_state:get(backend_name)
local index, endpoint = next(backend.endpoints, index)
if not index then
index = 1
endpoint = backend.endpoints[index]
end
round_robin_state:set(backend_name, index)
round_robin_lock:unlock(backend_name .. ROUND_ROBIN_LOCK_KEY)
return endpoint.address, endpoint.port
end
local function sync_backend(backend)
backends:set(backend.name, backend)
-- also reset the respective balancer state since backend has changed
round_robin_state:delete(backend.name)
ngx.log(ngx.INFO, "syncronization completed for: " .. backend.name)
end
local function sync_backends()
local backends_data = configuration.get_backends_data()
if not backends_data then
return
end
local ok, new_backends = pcall(json.decode, backends_data)
if not ok then
ngx.log(ngx.ERR, "could not parse backends data: " .. tostring(new_backends))
return
end
for _, new_backend in pairs(new_backends) do
local backend = backends:get(new_backend.name)
local backend_changed = true
if backend then
backend_changed = not util.deep_compare(backend, new_backend)
end
if backend_changed then
sync_backend(new_backend)
end
end
end
function _M.init_worker()
_, err = ngx.timer.every(BACKENDS_SYNC_INTERVAL, sync_backends)
if err then
ngx.log(ngx.ERR, "error when setting up timer.every for sync_backends: " .. tostring(err))
end
end
function _M.call()
ngx_balancer.set_more_tries(1)
local host, port = balance()
local ok, err = ngx_balancer.set_current_peer(host, port)
if ok then
ngx.log(ngx.INFO, "current peer is set to " .. host .. ":" .. port)
else
ngx.log(ngx.ERR, "error while setting current upstream peer to: " .. tostring(err))
end
end
return _M

View file

@ -0,0 +1,41 @@
-- this is the Lua representation of Configuration struct in internal/ingress/types.go
local configuration_data = ngx.shared.configuration_data
local _M = {}
function _M.get_backends_data()
return configuration_data:get("backends")
end
function _M.call()
if ngx.var.request_method ~= "POST" and ngx.var.request_method ~= "GET" then
ngx.status = ngx.HTTP_BAD_REQUEST
ngx.print("Only POST and GET requests are allowed!")
return
end
if ngx.var.request_uri ~= "/configuration/backends" then
ngx.status = ngx.HTTP_NOT_FOUND
ngx.print("Not found!")
return
end
if ngx.var.request_method == "GET" then
ngx.status = ngx.HTTP_OK
ngx.print(_M.get_backends_data())
return
end
ngx.req.read_body()
local success, err = configuration_data:set("backends", ngx.req.get_body_data())
if not success then
ngx.log(ngx.ERR, "error while saving configuration: " .. tostring(err))
ngx.status = ngx.HTTP_BAD_REQUEST
return
end
ngx.status = ngx.HTTP_CREATED
end
return _M

View file

@ -0,0 +1,27 @@
local _M = {}
-- this implementation is taken from
-- https://web.archive.org/web/20131225070434/http://snippets.luacode.org/snippets/Deep_Comparison_of_Two_Values_3
-- and modified for use in this project
local function deep_compare(t1, t2, ignore_mt)
local ty1 = type(t1)
local ty2 = type(t2)
if ty1 ~= ty2 then return false end
-- non-table types can be directly compared
if ty1 ~= 'table' and ty2 ~= 'table' then return t1 == t2 end
-- as well as tables which have the metamethod __eq
local mt = getmetatable(t1)
if not ignore_mt and mt and mt.__eq then return t1 == t2 end
for k1,v1 in pairs(t1) do
local v2 = t2[k1]
if v2 == nil or not deep_compare(v1,v2) then return false end
end
for k2,v2 in pairs(t2) do
local v1 = t1[k2]
if v1 == nil or not deep_compare(v1,v2) then return false end
end
return true
end
_M.deep_compare = deep_compare
return _M

View file

@ -36,6 +36,39 @@ events {
}
http {
lua_package_cpath "/usr/local/lib/lua/?.so;/usr/lib/x86_64-linux-gnu/lua/5.1/?.so;;";
lua_package_path "/etc/nginx/lua/?.lua;/etc/nginx/lua/vendor/?.lua;/usr/local/lib/lua/?.lua;;";
lua_shared_dict configuration_data 5M;
lua_shared_dict round_robin_state 1M;
lua_shared_dict locks 512k;
init_by_lua_block {
require("resty.core")
collectgarbage("collect")
-- init modules
local ok, res
ok, res = pcall(require, "configuration")
if not ok then
error("require failed: " .. tostring(res))
else
configuration = res
end
ok, res = pcall(require, "balancer")
if not ok then
error("require failed: " .. tostring(res))
else
balancer = res
end
}
init_worker_by_lua_block {
balancer.init_worker()
}
{{/* we use the value of the header X-Forwarded-For to be able to use the geo_ip module */}}
{{ if $cfg.UseProxyProtocol }}
real_ip_header proxy_protocol;
@ -308,6 +341,7 @@ http {
{{ $cfg.HTTPSnippet }}
{{ end }}
{{ if not $all.DynamicConfigurationEnabled }}
{{ range $name, $upstream := $backends }}
{{ if eq $upstream.SessionAffinity.AffinityType "cookie" }}
upstream sticky-{{ $upstream.Name }} {
@ -319,9 +353,7 @@ http {
{{ range $server := $upstream.Endpoints }}server {{ $server.Address | formatIP }}:{{ $server.Port }} max_fails={{ $server.MaxFails }} fail_timeout={{ $server.FailTimeout }};
{{ end }}
}
{{ end }}
upstream {{ $upstream.Name }} {
@ -334,8 +366,18 @@ http {
{{ range $server := $upstream.Endpoints }}server {{ $server.Address | formatIP }}:{{ $server.Port }} max_fails={{ $server.MaxFails }} fail_timeout={{ $server.FailTimeout }};
{{ end }}
}
{{ end }}
{{ end }}
upstream upstream_balancer {
server 0.0.0.1; # placeholder
balancer_by_lua_block {
balancer.call()
}
keepalive 1000;
}
{{/* build the maps that will be use to validate the Whitelist */}}
{{ range $index, $server := $servers }}
@ -452,12 +494,24 @@ http {
{{ end }}
}
location /configuration {
allow 127.0.0.1;
deny all;
content_by_lua_block {
configuration.call()
}
}
location / {
{{ if .CustomErrors }}
proxy_set_header X-Code 404;
{{ end }}
set $proxy_upstream_name "upstream-default-backend";
{{ if $all.DynamicConfigurationEnabled }}
proxy_pass http://upstream_balancer;
{{ else }}
proxy_pass http://upstream-default-backend;
{{ end }}
}
{{ template "CUSTOM_ERRORS" $all }}
@ -550,7 +604,12 @@ stream {
proxy_set_header X-Service-Name $service_name;
rewrite (.*) / break;
proxy_pass http://upstream-default-backend;
{{ if .DynamicConfigurationEnabled }}
proxy_pass http://upstream_balancer;
{{ else }}
proxy_pass http://upstream-default-backend;
{{ end }}
}
{{ end }}
{{ end }}
@ -887,7 +946,7 @@ stream {
{{ end }}
{{ if not (empty $location.Backend) }}
{{ buildProxyPass $server.Hostname $all.Backends $location }}
{{ buildProxyPass $server.Hostname $all.Backends $location $all.DynamicConfigurationEnabled }}
{{ if (or (eq $location.Proxy.ProxyRedirectFrom "default") (eq $location.Proxy.ProxyRedirectFrom "off")) }}
proxy_redirect {{ $location.Proxy.ProxyRedirectFrom }};
{{ else }}