Merge pull request #3885 from aledbf/status

Refactor status update
This commit is contained in:
Kubernetes Prow Robot 2019-03-13 08:19:36 -07:00 committed by GitHub
commit e0793650d0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 260 additions and 127 deletions

View file

@ -188,7 +188,11 @@ func (n *NGINXController) syncIngress(interface{}) error {
klog.Infof("Backend successfully reloaded.")
n.metricCollector.ConfigSuccess(hash, true)
n.metricCollector.IncReloadCount()
n.metricCollector.SetSSLExpireTime(servers)
if n.isLeader() {
klog.V(2).Infof("Updating ssl expiration metrics.")
n.metricCollector.SetSSLExpireTime(servers)
}
}
isFirstSync := n.runningConfig.Equal(&ingress.Configuration{})

View file

@ -31,6 +31,7 @@ import (
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"text/template"
"time"
@ -115,6 +116,7 @@ func NewNGINXController(config *Configuration, mc metric.Collector, fs file.File
if err != nil {
klog.Fatalf("unexpected error obtaining pod information: %v", err)
}
n.podInfo = pod
n.store = store.New(
config.EnableSSLChainCompletion,
@ -132,15 +134,13 @@ func NewNGINXController(config *Configuration, mc metric.Collector, fs file.File
config.DisableCatchAll)
n.syncQueue = task.NewTaskQueue(n.syncIngress)
if config.UpdateStatus {
n.syncStatus = status.NewStatusSyncer(status.Config{
n.syncStatus = status.NewStatusSyncer(pod, status.Config{
Client: config.Client,
PublishService: config.PublishService,
PublishStatusAddress: config.PublishStatusAddress,
IngressLister: n.store,
ElectionID: config.ElectionID,
IngressClass: class.IngressClass,
DefaultIngressClass: class.DefaultClass,
UpdateStatusOnShutdown: config.UpdateStatusOnShutdown,
UseNodeInternalIP: config.UseNodeInternalIP,
})
@ -215,13 +215,15 @@ Error loading new template: %v
// NGINXController describes a NGINX Ingress controller.
type NGINXController struct {
podInfo *k8s.PodInfo
cfg *Configuration
recorder record.EventRecorder
syncQueue *task.Queue
syncStatus status.Sync
syncStatus status.Syncer
syncRateLimiter flowcontrol.RateLimiter
@ -254,6 +256,8 @@ type NGINXController struct {
fileSystem filesystem.Filesystem
metricCollector metric.Collector
currentLeader uint32
}
// Start starts a new NGINX master process running in the foreground.
@ -262,10 +266,35 @@ func (n *NGINXController) Start() {
n.store.Run(n.stopCh)
if n.syncStatus != nil {
go n.syncStatus.Run()
// we need to use the defined ingress class to allow multiple leaders
// in order to update information about ingress status
electionID := fmt.Sprintf("%v-%v", n.cfg.ElectionID, class.DefaultClass)
if class.IngressClass != "" {
electionID = fmt.Sprintf("%v-%v", n.cfg.ElectionID, class.IngressClass)
}
setupLeaderElection(&leaderElectionConfig{
Client: n.cfg.Client,
ElectionID: electionID,
OnStartedLeading: func(stopCh chan struct{}) {
if n.syncStatus != nil {
go n.syncStatus.Run(stopCh)
}
n.setLeader(true)
n.metricCollector.OnStartedLeading(electionID)
// manually update SSL expiration metrics
// (to not wait for a reload)
n.metricCollector.SetSSLExpireTime(n.runningConfig.Servers)
},
OnStoppedLeading: func() {
n.setLeader(false)
n.metricCollector.OnStoppedLeading(electionID)
},
PodName: n.podInfo.Name,
PodNamespace: n.podInfo.Namespace,
})
cmd := nginxExecCommand()
// put NGINX in another process group to prevent it
@ -1099,3 +1128,15 @@ func buildRedirects(servers []*ingress.Server) []*redirect {
return redirectServers
}
func (n *NGINXController) setLeader(leader bool) {
var i uint32
if leader {
i = 1
}
atomic.StoreUint32(&n.currentLeader, i)
}
func (n *NGINXController) isLeader() bool {
return atomic.LoadUint32(&n.currentLeader) != 0
}

View file

@ -0,0 +1,123 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"os"
"time"
"k8s.io/klog"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
)
type leaderElectionConfig struct {
PodName string
PodNamespace string
Client clientset.Interface
ElectionID string
OnStartedLeading func(chan struct{})
OnStoppedLeading func()
}
func setupLeaderElection(config *leaderElectionConfig) {
var elector *leaderelection.LeaderElector
// start a new context
ctx := context.Background()
var cancelContext context.CancelFunc
var newLeaderCtx = func(ctx context.Context) context.CancelFunc {
// allow to cancel the context in case we stop being the leader
leaderCtx, cancel := context.WithCancel(ctx)
go elector.Run(leaderCtx)
return cancel
}
var stopCh chan struct{}
callbacks := leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
klog.V(2).Infof("I am the new leader")
stopCh = make(chan struct{})
if config.OnStartedLeading != nil {
config.OnStartedLeading(stopCh)
}
},
OnStoppedLeading: func() {
klog.V(2).Info("I am not leader anymore")
close(stopCh)
// cancel the context
cancelContext()
cancelContext = newLeaderCtx(ctx)
if config.OnStoppedLeading != nil {
config.OnStoppedLeading()
}
},
OnNewLeader: func(identity string) {
klog.Infof("new leader elected: %v", identity)
},
}
broadcaster := record.NewBroadcaster()
hostname, _ := os.Hostname()
recorder := broadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{
Component: "ingress-leader-elector",
Host: hostname,
})
lock := resourcelock.ConfigMapLock{
ConfigMapMeta: metav1.ObjectMeta{Namespace: config.PodNamespace, Name: config.ElectionID},
Client: config.Client.CoreV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: config.PodName,
EventRecorder: recorder,
},
}
ttl := 30 * time.Second
var err error
elector, err = leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
Lock: &lock,
LeaseDuration: ttl,
RenewDeadline: ttl / 2,
RetryPeriod: ttl / 4,
Callbacks: callbacks,
})
if err != nil {
klog.Fatalf("unexpected error starting leader election: %v", err)
}
cancelContext = newLeaderCtx(ctx)
}