Converted sticky session balancers into separate classes.
This commit is contained in:
parent
9170591185
commit
881e352d68
6 changed files with 174 additions and 177 deletions
|
|
@ -1,5 +1,3 @@
|
|||
local affinity_balanced = require("affinity.balanced")
|
||||
local affinity_persistent = require("affinity.persistent")
|
||||
local balancer_resty = require("balancer.resty")
|
||||
local util = require("util")
|
||||
local ck = require("resty.cookie")
|
||||
|
|
@ -10,48 +8,24 @@ local string_format = string.format
|
|||
local ngx_log = ngx.log
|
||||
local INFO = ngx.INFO
|
||||
|
||||
local _M = balancer_resty:new({ name = "sticky" })
|
||||
local _M = balancer_resty:new()
|
||||
local DEFAULT_COOKIE_NAME = "route"
|
||||
|
||||
|
||||
function _M.cookie_name(self)
|
||||
return self.cookie_session_affinity.name or DEFAULT_COOKIE_NAME
|
||||
end
|
||||
|
||||
local function init_affinity_mode(self, backend)
|
||||
local mode = backend["sessionAffinityConfig"]["mode"] or 'balanced'
|
||||
|
||||
-- set default mode to 'balanced' for backwards compatibility
|
||||
if mode == nil or mode == '' then
|
||||
mode = 'balanced'
|
||||
end
|
||||
|
||||
self.affinity_mode = mode
|
||||
|
||||
if mode == 'persistent' then
|
||||
return affinity_persistent:new(self, backend)
|
||||
end
|
||||
|
||||
-- default is 'balanced' for backwards compatibility
|
||||
if mode ~= 'balanced' then
|
||||
ngx.log(ngx.WARN, string.format("Invalid affinity mode '%s'! Using 'balanced' as a default.", mode))
|
||||
end
|
||||
|
||||
return affinity_balanced:new(self, backend)
|
||||
end
|
||||
|
||||
function _M.new(self, backend)
|
||||
function _M.new(self)
|
||||
local o = {
|
||||
instance = nil,
|
||||
affinity_mode = nil,
|
||||
traffic_shaping_policy = backend.trafficShapingPolicy,
|
||||
alternative_backends = backend.alternativeBackends,
|
||||
cookie_session_affinity = backend["sessionAffinityConfig"]["cookieSessionAffinity"]
|
||||
alternative_backends = nil,
|
||||
cookie_session_affinity = nil,
|
||||
traffic_shaping_policy = nil
|
||||
}
|
||||
|
||||
setmetatable(o, self)
|
||||
self.__index = self
|
||||
|
||||
return init_affinity_mode(o, backend)
|
||||
|
||||
return o
|
||||
end
|
||||
|
||||
function _M.get_cookie(self)
|
||||
|
|
@ -112,34 +86,8 @@ local function get_failed_upstreams()
|
|||
return indexed_upstream_addrs
|
||||
end
|
||||
|
||||
--- get_routing_key gets the current routing key from the cookie
|
||||
-- @treturn string, string The routing key and an error message if an error occured.
|
||||
function _M.get_routing_key(self)
|
||||
-- interface method to get the routing key from the cookie
|
||||
-- has to be overridden by an affinity mode
|
||||
ngx.log(ngx.ERR, "[BUG] Failed to get routing key as no implementation has been provided!")
|
||||
return nil, nil
|
||||
end
|
||||
|
||||
--- set_routing_key sets the current routing key on the cookie
|
||||
-- @tparam string key The routing key to set on the cookie.
|
||||
function _M.set_routing_key(self, key)
|
||||
-- interface method to set the routing key on the cookie
|
||||
-- has to be overridden by an affinity mode
|
||||
ngx.log(ngx.ERR, "[BUG] Failed to set routing key as no implementation has been provided!")
|
||||
end
|
||||
|
||||
--- pick_new_upstream picks a new upstream while ignoring the given failed upstreams.
|
||||
-- @tparam {[string]=boolean} A table of upstreams to ignore where the key is the endpoint and the value a boolean.
|
||||
-- @treturn string, string The endpoint and its key.
|
||||
function _M.pick_new_upstream(self, failed_upstreams)
|
||||
-- interface method to get a new upstream
|
||||
-- has to be overridden by an affinity mode
|
||||
ngx.log(ngx.ERR, "[BUG] Failed to pick new upstream as no implementation has been provided!")
|
||||
return nil, nil
|
||||
end
|
||||
|
||||
local function should_set_cookie(self)
|
||||
|
||||
if self.cookie_session_affinity.locations and ngx.var.host then
|
||||
local locs = self.cookie_session_affinity.locations[ngx.var.host]
|
||||
if locs == nil then
|
||||
|
|
@ -193,19 +141,11 @@ function _M.balance(self)
|
|||
end
|
||||
|
||||
function _M.sync(self, backend)
|
||||
local changed = false
|
||||
|
||||
-- check and reinit affinity mode before syncing the balancer which will reinit the nodes
|
||||
if self.affinity_mode ~= backend.sessionAffinityConfig.mode then
|
||||
changed = true
|
||||
init_affinity_mode(self, backend)
|
||||
end
|
||||
|
||||
-- reload balancer nodes
|
||||
balancer_resty.sync(self, backend)
|
||||
|
||||
-- Reload the balancer if any of the annotations have changed.
|
||||
changed = changed or not util.deep_compare(
|
||||
local changed = not util.deep_compare(
|
||||
self.cookie_session_affinity,
|
||||
backend.sessionAffinityConfig.cookieSessionAffinity
|
||||
)
|
||||
|
|
@ -216,6 +156,8 @@ function _M.sync(self, backend)
|
|||
|
||||
ngx_log(INFO, string_format("[%s] nodes have changed for backend %s", self.name, backend.name))
|
||||
|
||||
self.traffic_shaping_policy = backend.trafficShapingPolicy
|
||||
self.alternative_backends = backend.alternativeBackends
|
||||
self.cookie_session_affinity = backend.sessionAffinityConfig.cookieSessionAffinity
|
||||
end
|
||||
|
||||
|
|
|
|||
57
rootfs/etc/nginx/lua/balancer/sticky_balanced.lua
Normal file
57
rootfs/etc/nginx/lua/balancer/sticky_balanced.lua
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
-- An affinity mode which makes sure connections are rebalanced when a deployment is scaled.
|
||||
-- The advantage of this mode is that the load on the pods will be redistributed.
|
||||
-- The drawback of this mode is that, when scaling up a deployment, roughly (n-c)/n users
|
||||
-- will lose their session, where c is the current number of pods and n is the new number of
|
||||
-- pods.
|
||||
--
|
||||
local balancer_sticky = require("balancer.sticky")
|
||||
local math = require("math")
|
||||
local resty_chash = require("resty.chash")
|
||||
local util = require("util")
|
||||
|
||||
local _M = balancer_sticky:new()
|
||||
|
||||
-- Consider the situation of N upstreams one of which is failing.
|
||||
-- Then the probability to obtain failing upstream after M iterations would be close to (1/N)**M.
|
||||
-- For the worst case (2 upstreams; 20 iterations) it would be ~10**(-6)
|
||||
-- which is much better then ~10**(-3) for 10 iterations.
|
||||
local MAX_UPSTREAM_CHECKS_COUNT = 20
|
||||
|
||||
function _M.new(self, backend)
|
||||
local nodes = util.get_nodes(backend.endpoints)
|
||||
|
||||
local o = {
|
||||
name = "sticky_balanced",
|
||||
instance = resty_chash:new(nodes)
|
||||
}
|
||||
|
||||
setmetatable(o, self)
|
||||
self.__index = self
|
||||
|
||||
balancer_sticky.sync(o, backend)
|
||||
|
||||
return o
|
||||
end
|
||||
|
||||
function _M.get_routing_key(self)
|
||||
return self:get_cookie(), nil
|
||||
end
|
||||
|
||||
function _M.set_routing_key(self, key)
|
||||
self:set_cookie(key)
|
||||
end
|
||||
|
||||
function _M.pick_new_upstream(self, failed_upstreams)
|
||||
for i = 1, MAX_UPSTREAM_CHECKS_COUNT do
|
||||
local key = string.format("%s.%s.%s", ngx.now() + i, ngx.worker.pid(), math.random(999999))
|
||||
local new_upstream = self.instance:find(key)
|
||||
|
||||
if not failed_upstreams[new_upstream] then
|
||||
return new_upstream, key
|
||||
end
|
||||
end
|
||||
|
||||
return nil, nil
|
||||
end
|
||||
|
||||
return _M
|
||||
56
rootfs/etc/nginx/lua/balancer/sticky_persistent.lua
Normal file
56
rootfs/etc/nginx/lua/balancer/sticky_persistent.lua
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
-- An affinity mode which makes sure a session is always routed to the same endpoint.
|
||||
-- The advantage of this mode is that a user will never lose his session.
|
||||
-- The drawback of this mode is that when scaling up a deployment, sessions will not
|
||||
-- be rebalanced.
|
||||
--
|
||||
local balancer_sticky = require("balancer.sticky")
|
||||
local util = require("util")
|
||||
local util_nodemap = require("util.nodemap")
|
||||
|
||||
local _M = balancer_sticky:new()
|
||||
|
||||
function _M.new(self, backend)
|
||||
local nodes = util.get_nodes(backend.endpoints)
|
||||
local hash_salt = backend["name"]
|
||||
|
||||
local o = {
|
||||
name = "sticky_persistent",
|
||||
instance = util_nodemap:new(nodes, hash_salt)
|
||||
}
|
||||
|
||||
setmetatable(o, self)
|
||||
self.__index = self
|
||||
|
||||
balancer_sticky.sync(o, backend)
|
||||
|
||||
return o
|
||||
end
|
||||
|
||||
function _M.get_routing_key(self)
|
||||
local cookie_value = self:get_cookie()
|
||||
|
||||
if cookie_value then
|
||||
-- format <timestamp>.<workder-pid>.<routing-key>
|
||||
local routing_key = string.match(cookie_value, '[^\\.]+$')
|
||||
|
||||
if routing_key == nil then
|
||||
local err = string.format("Failed to extract routing key from cookie '%s'!", cookie_value)
|
||||
return nil, err
|
||||
end
|
||||
|
||||
return routing_key, nil
|
||||
end
|
||||
|
||||
return nil, nil
|
||||
end
|
||||
|
||||
function _M.set_routing_key(self, key)
|
||||
local value = string.format("%s.%s.%s", ngx.now(), ngx.worker.pid(), key)
|
||||
self:set_cookie(value);
|
||||
end
|
||||
|
||||
function _M.pick_new_upstream(self, failed_upstreams)
|
||||
return self.instance:random_except(failed_upstreams)
|
||||
end
|
||||
|
||||
return _M
|
||||
Loading…
Add table
Add a link
Reference in a new issue