Clarify log messages in controller pkg

This commit is contained in:
Antoine Cotten 2018-06-13 20:15:45 +02:00
parent 55679aa268
commit 47bea35492
No known key found for this signature in database
GPG key ID: 94637E68D4A79DD0
6 changed files with 254 additions and 273 deletions

View file

@ -77,8 +77,6 @@ var (
)
// NewNGINXController creates a new NGINX Ingress controller.
// If the environment variable NGINX_BINARY exists it will be used
// as source for nginx commands
func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXController {
ngx := os.Getenv("NGINX_BINARY")
if ngx == "" {
@ -93,7 +91,7 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl
h, err := dns.GetSystemNameServers()
if err != nil {
glog.Warningf("unexpected error reading system nameservers: %v", err)
glog.Warningf("Error reading system nameservers: %v", err)
}
n := &NGINXController{
@ -116,8 +114,7 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl
fileSystem: fs,
// create an empty configuration.
runningConfig: &ingress.Configuration{},
runningConfig: new(ingress.Configuration),
Proxy: &TCPProxy{},
}
@ -153,7 +150,7 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl
UseNodeInternalIP: config.UseNodeInternalIP,
})
} else {
glog.Warning("Update of ingress status is disabled (flag --update-status=false was specified)")
glog.Warning("Update of Ingress status is disabled (flag --update-status)")
}
onTemplateChange := func() {
@ -162,20 +159,20 @@ func NewNGINXController(config *Configuration, fs file.Filesystem) *NGINXControl
// this error is different from the rest because it must be clear why nginx is not working
glog.Errorf(`
-------------------------------------------------------------------------------
Error loading new template : %v
Error loading new template: %v
-------------------------------------------------------------------------------
`, err)
return
}
n.t = template
glog.Info("new NGINX template loaded")
glog.Info("New NGINX configuration template loaded.")
n.SetForceReload(true)
}
ngxTpl, err := ngx_template.NewTemplate(tmplPath, fs)
if err != nil {
glog.Fatalf("invalid NGINX template: %v", err)
glog.Fatalf("Invalid NGINX configuration template: %v", err)
}
n.t = ngxTpl
@ -187,7 +184,7 @@ Error loading new template : %v
_, err = watch.NewFileWatcher(tmplPath, onTemplateChange)
if err != nil {
glog.Fatalf("unexpected error creating file watcher: %v", err)
glog.Fatalf("Error creating file watcher for %v: %v", tmplPath, err)
}
filesToWatch := []string{}
@ -205,16 +202,16 @@ Error loading new template : %v
})
if err != nil {
glog.Fatalf("unexpected error creating file watcher: %v", err)
glog.Fatalf("Error creating file watchers: %v", err)
}
for _, f := range filesToWatch {
_, err = watch.NewFileWatcher(f, func() {
glog.Info("file %v changed. Reloading NGINX", f)
glog.Info("File %v changed. Reloading NGINX", f)
n.SetForceReload(true)
})
if err != nil {
glog.Fatalf("unexpected error creating file watcher: %v", err)
glog.Fatalf("Error creating file watcher for %v: %v", f, err)
}
}
@ -223,7 +220,7 @@ Error loading new template : %v
return n
}
// NGINXController ...
// NGINXController describes a NGINX Ingress controller.
type NGINXController struct {
cfg *Configuration
@ -237,15 +234,15 @@ type NGINXController struct {
syncRateLimiter flowcontrol.RateLimiter
// stopLock is used to enforce only a single call to Stop is active.
// Needed because we allow stopping through an http endpoint and
// stopLock is used to enforce that only a single call to Stop send at
// a given time. We allow stopping through an HTTP endpoint and
// allowing concurrent stoppers leads to stack traces.
stopLock *sync.Mutex
stopCh chan struct{}
updateCh *channels.RingChannel
// ngxErrCh channel used to detect errors with the nginx processes
// ngxErrCh is used to detect errors with the NGINX processes
ngxErrCh chan error
// runningConfig contains the running configuration in the Backend
@ -261,7 +258,6 @@ type NGINXController struct {
stats *statsCollector
statusModule statusModule
// returns true if IPV6 is enabled in the pod
isIPV6Enabled bool
isShuttingDown bool
@ -273,9 +269,9 @@ type NGINXController struct {
fileSystem filesystem.Filesystem
}
// Start start a new NGINX master process running in foreground.
// Start starts a new NGINX master process running in the foreground.
func (n *NGINXController) Start() {
glog.Infof("starting Ingress controller")
glog.Infof("Starting NGINX Ingress controller")
n.store.Run(n.stopCh)
@ -285,7 +281,7 @@ func (n *NGINXController) Start() {
cmd := exec.Command(n.binary, "-c", cfgPath)
// put nginx in another process group to prevent it
// put NGINX in another process group to prevent it
// to receive signals meant for the controller
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
@ -296,7 +292,7 @@ func (n *NGINXController) Start() {
n.setupSSLProxy()
}
glog.Info("starting NGINX process...")
glog.Info("Starting NGINX process")
n.start(cmd)
go n.syncQueue.Run(time.Second, n.stopCh)
@ -339,7 +335,7 @@ func (n *NGINXController) Start() {
n.syncQueue.Enqueue(evt.Obj)
} else {
glog.Warningf("unexpected event type received %T", event)
glog.Warningf("Unexpected event type received %T", event)
}
case <-n.stopCh:
break
@ -354,20 +350,19 @@ func (n *NGINXController) Stop() error {
n.stopLock.Lock()
defer n.stopLock.Unlock()
// Only try draining the workqueue if we haven't already.
if n.syncQueue.IsShuttingDown() {
return fmt.Errorf("shutdown already in progress")
}
glog.Infof("shutting down controller queues")
glog.Infof("Shutting down controller queues")
close(n.stopCh)
go n.syncQueue.Shutdown()
if n.syncStatus != nil {
n.syncStatus.Shutdown()
}
// Send stop signal to Nginx
glog.Info("stopping NGINX process...")
// send stop signal to NGINX
glog.Info("Stopping NGINX process")
cmd := exec.Command(n.binary, "-c", cfgPath, "-s", "quit")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
@ -376,7 +371,7 @@ func (n *NGINXController) Stop() error {
return err
}
// Wait for the Nginx process disappear
// wait for the NGINX process to terminate
timer := time.NewTicker(time.Second * 1)
for range timer.C {
if !process.IsNginxRunning() {
@ -393,7 +388,7 @@ func (n *NGINXController) start(cmd *exec.Cmd) {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Start(); err != nil {
glog.Fatalf("nginx error: %v", err)
glog.Fatalf("NGINX error: %v", err)
n.ngxErrCh <- err
return
}
@ -416,7 +411,7 @@ func (n NGINXController) DefaultEndpoint() ingress.Endpoint {
// running the command "nginx -t" using a temporal file.
func (n NGINXController) testTemplate(cfg []byte) error {
if len(cfg) == 0 {
return fmt.Errorf("invalid nginx configuration (empty)")
return fmt.Errorf("Invalid NGINX configuration (empty)")
}
tmpfile, err := ioutil.TempFile("", "nginx-cfg")
if err != nil {
@ -443,14 +438,10 @@ Error: %v
return nil
}
// OnUpdate is called periodically by syncQueue to keep the configuration in sync.
//
// 1. converts configmap configuration to custom configuration object
// 2. write the custom template (the complexity depends on the implementation)
// 3. write the configuration file
//
// returning nil implies the backend will be reloaded.
// if an error is returned means requeue the update
// OnUpdate is called by the synchronization loop whenever configuration
// changes were detected. The received backend Configuration is merged with the
// configuration ConfigMap before generating the final configuration file.
// Returns nil in case the backend was successfully reloaded.
func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
cfg := n.store.GetBackendConfiguration()
cfg.Resolver = n.resolver
@ -460,7 +451,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
for _, pb := range ingressCfg.PassthroughBackends {
svc := pb.Service
if svc == nil {
glog.Warningf("missing service for PassthroughBackends %v", pb.Backend)
glog.Warningf("Missing Service for SSL Passthrough backend %q", pb.Backend)
continue
}
port, err := strconv.Atoi(pb.Port.String())
@ -480,7 +471,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
}
}
//TODO: Allow PassthroughBackends to specify they support proxy-protocol
// TODO: Allow PassthroughBackends to specify they support proxy-protocol
servers = append(servers, &TCPServer{
Hostname: pb.Hostname,
IP: svc.Spec.ClusterIP,
@ -499,10 +490,10 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
n.setupMonitor(defaultStatusModule)
}
// NGINX cannot resize the hash tables used to store server names.
// For this reason we check if the defined size defined is correct
// for the FQDN defined in the ingress rules adjusting the value
// if is required.
// NGINX cannot resize the hash tables used to store server names. For
// this reason we check if the current size is correct for the host
// names defined in the Ingress rules and adjust the value if
// necessary.
// https://trac.nginx.org/nginx/ticket/352
// https://trac.nginx.org/nginx/ticket/631
var longestName int
@ -520,7 +511,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
} else {
n = fmt.Sprintf("www.%v", srv.Hostname)
}
glog.V(3).Infof("creating redirect from %v to %v", srv.Hostname, n)
glog.V(3).Infof("Creating redirect from %q to %q", srv.Hostname, n)
if _, ok := redirectServers[n]; !ok {
found := false
for _, esrv := range ingressCfg.Servers {
@ -537,24 +528,24 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
}
if cfg.ServerNameHashBucketSize == 0 {
nameHashBucketSize := nginxHashBucketSize(longestName)
glog.V(3).Infof("adjusting ServerNameHashBucketSize variable to %v", nameHashBucketSize)
glog.V(3).Infof("Adjusting ServerNameHashBucketSize variable to %q", nameHashBucketSize)
cfg.ServerNameHashBucketSize = nameHashBucketSize
}
serverNameHashMaxSize := nextPowerOf2(serverNameBytes)
if cfg.ServerNameHashMaxSize < serverNameHashMaxSize {
glog.V(3).Infof("adjusting ServerNameHashMaxSize variable to %v", serverNameHashMaxSize)
glog.V(3).Infof("Adjusting ServerNameHashMaxSize variable to %q", serverNameHashMaxSize)
cfg.ServerNameHashMaxSize = serverNameHashMaxSize
}
// the limit of open files is per worker process
// and we leave some room to avoid consuming all the FDs available
wp, err := strconv.Atoi(cfg.WorkerProcesses)
glog.V(3).Infof("number of worker processes: %v", wp)
glog.V(3).Infof("Number of worker processes: %d", wp)
if err != nil {
wp = 1
}
maxOpenFiles := (sysctlFSFileMax() / wp) - 1024
glog.V(2).Infof("maximum number of open file descriptors : %v", maxOpenFiles)
glog.V(2).Infof("Maximum number of open file descriptors: %d", maxOpenFiles)
if maxOpenFiles < 1024 {
// this means the value of RLIMIT_NOFILE is too low.
maxOpenFiles = 1024
@ -564,7 +555,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
if cfg.ProxySetHeaders != "" {
cmap, err := n.store.GetConfigMap(cfg.ProxySetHeaders)
if err != nil {
glog.Warningf("unexpected error reading configmap %v: %v", cfg.ProxySetHeaders, err)
glog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.ProxySetHeaders, err)
}
setHeaders = cmap.Data
@ -574,7 +565,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
if cfg.AddHeaders != "" {
cmap, err := n.store.GetConfigMap(cfg.AddHeaders)
if err != nil {
glog.Warningf("unexpected error reading configmap %v: %v", cfg.AddHeaders, err)
glog.Warningf("Error reading ConfigMap %q from local store: %v", cfg.AddHeaders, err)
}
addHeaders = cmap.Data
@ -586,7 +577,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
secret, err := n.store.GetSecret(secretName)
if err != nil {
glog.Warningf("unexpected error reading secret %v: %v", secretName, err)
glog.Warningf("Error reading Secret %q from local store: %v", secretName, err)
}
nsSecName := strings.Replace(secretName, "/", "-", -1)
@ -595,7 +586,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
if ok {
pemFileName, err := ssl.AddOrUpdateDHParam(nsSecName, dh, n.fileSystem)
if err != nil {
glog.Warningf("unexpected error adding or updating dhparam %v file: %v", nsSecName, err)
glog.Warningf("Error adding or updating dhparam file %v: %v", nsSecName, err)
} else {
sslDHParam = pemFileName
}
@ -652,16 +643,13 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
return err
}
// executing diff can return exit code != 0
// TODO: executing diff can return exit code != 0
diffOutput, _ := exec.Command("diff", "-u", cfgPath, tmpfile.Name()).CombinedOutput()
glog.Infof("NGINX configuration diff\n")
glog.Infof("%v\n", string(diffOutput))
glog.Infof("NGINX configuration diff:\n%v", string(diffOutput))
// Do not use defer to remove the temporal file.
// This is helpful when there is an error in the
// temporal configuration (we can manually inspect the file).
// Only remove the file when no error occurred.
// we do not defer the deletion of temp files in order
// to keep them around for inspection in case of error
os.Remove(tmpfile.Name())
}
}
@ -679,9 +667,10 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error {
return nil
}
// nginxHashBucketSize computes the correct nginx hash_bucket_size for a hash with the given longest key
// nginxHashBucketSize computes the correct NGINX hash_bucket_size for a hash
// with the given longest key.
func nginxHashBucketSize(longestString int) int {
// See https://github.com/kubernetes/ingress-nginxs/issues/623 for an explanation
// see https://github.com/kubernetes/ingress-nginxs/issues/623 for an explanation
wordSize := 8 // Assume 64 bit CPU
n := longestString + 2
aligned := (n + wordSize - 1) & ^(wordSize - 1)
@ -708,7 +697,7 @@ func (n *NGINXController) setupSSLProxy() {
sslPort := n.cfg.ListenPorts.HTTPS
proxyPort := n.cfg.ListenPorts.SSLProxy
glog.Info("starting TLS proxy for SSL passthrough")
glog.Info("Starting TLS proxy for SSL Passthrough")
n.Proxy = &TCPProxy{
Default: &TCPServer{
Hostname: "localhost",
@ -725,32 +714,33 @@ func (n *NGINXController) setupSSLProxy() {
proxyList := &proxyproto.Listener{Listener: listener, ProxyHeaderTimeout: cfg.ProxyProtocolHeaderTimeout}
// start goroutine that accepts tcp connections in port 443
// accept TCP connections on the configured HTTPS port
go func() {
for {
var conn net.Conn
var err error
if n.store.GetBackendConfiguration().UseProxyProtocol {
// we need to wrap the listener in order to decode
// proxy protocol before handling the connection
// wrap the listener in order to decode Proxy
// Protocol before handling the connection
conn, err = proxyList.Accept()
} else {
conn, err = listener.Accept()
}
if err != nil {
glog.Warningf("unexpected error accepting tcp connection: %v", err)
glog.Warningf("Error accepting TCP connection: %v", err)
continue
}
glog.V(3).Infof("remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr())
glog.V(3).Infof("Handling connection from remote address %s to local %s", conn.RemoteAddr(), conn.LocalAddr())
go n.Proxy.Handle(conn)
}
}()
}
// IsDynamicConfigurationEnough decides if the new configuration changes can be dynamically applied without reloading
// IsDynamicConfigurationEnough returns whether a Configuration can be
// dynamically applied, without reloading the backend.
func (n *NGINXController) IsDynamicConfigurationEnough(pcfg *ingress.Configuration) bool {
copyOfRunningConfig := *n.runningConfig
copyOfPcfg := *pcfg
@ -761,8 +751,8 @@ func (n *NGINXController) IsDynamicConfigurationEnough(pcfg *ingress.Configurati
return copyOfRunningConfig.Equal(&copyOfPcfg)
}
// configureDynamically JSON encodes new Backends and POSTs it to an internal HTTP endpoint
// that is handled by Lua
// configureDynamically encodes new Backends in JSON format and POSTs the
// payload to an internal HTTP endpoint handled by Lua.
func configureDynamically(pcfg *ingress.Configuration, port int) error {
backends := make([]*ingress.Backend, len(pcfg.Backends))
@ -796,7 +786,7 @@ func configureDynamically(pcfg *ingress.Configuration, port int) error {
return err
}
glog.V(2).Infof("posting backends configuration: %s", buf)
glog.V(2).Infof("Posting backends configuration: %s", buf)
url := fmt.Sprintf("http://localhost:%d/configuration/backends", port)
resp, err := http.Post(url, "application/json", bytes.NewReader(buf))
@ -806,7 +796,7 @@ func configureDynamically(pcfg *ingress.Configuration, port int) error {
defer func() {
if err := resp.Body.Close(); err != nil {
glog.Warningf("error while closing response body: \n%v", err)
glog.Warningf("Error while closing response body:\n%v", err)
}
}()