github.com/spotahome/redis-operator@v1.2.4/operator/redisfailover/factory.go (about)

     1  package redisfailover
     2  
     3  import (
     4  	"context"
     5  	"time"
     6  
     7  	"github.com/spotahome/kooper/v2/controller"
     8  	"github.com/spotahome/kooper/v2/controller/leaderelection"
     9  	kooperlog "github.com/spotahome/kooper/v2/log"
    10  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    11  	"k8s.io/apimachinery/pkg/runtime"
    12  	"k8s.io/apimachinery/pkg/watch"
    13  	"k8s.io/client-go/kubernetes"
    14  	"k8s.io/client-go/tools/cache"
    15  
    16  	"github.com/spotahome/redis-operator/log"
    17  	"github.com/spotahome/redis-operator/metrics"
    18  	rfservice "github.com/spotahome/redis-operator/operator/redisfailover/service"
    19  	"github.com/spotahome/redis-operator/service/k8s"
    20  	"github.com/spotahome/redis-operator/service/redis"
    21  )
    22  
    23  const (
    24  	resync       = 30 * time.Second
    25  	operatorName = "redis-operator"
    26  	lockKey      = "redis-failover-lease"
    27  )
    28  
    29  // New will create an operator that is responsible of managing all the required stuff
    30  // to create redis failovers.
    31  func New(cfg Config, k8sService k8s.Services, k8sClient kubernetes.Interface, lockNamespace string, redisClient redis.Client, kooperMetricsRecorder metrics.Recorder, logger log.Logger) (controller.Controller, error) {
    32  	// Create internal services.
    33  	rfService := rfservice.NewRedisFailoverKubeClient(k8sService, logger, kooperMetricsRecorder)
    34  	rfChecker := rfservice.NewRedisFailoverChecker(k8sService, redisClient, logger, kooperMetricsRecorder)
    35  	rfHealer := rfservice.NewRedisFailoverHealer(k8sService, redisClient, logger)
    36  
    37  	// Create the handlers.
    38  	rfHandler := NewRedisFailoverHandler(cfg, rfService, rfChecker, rfHealer, k8sService, kooperMetricsRecorder, logger)
    39  	rfRetriever := NewRedisFailoverRetriever(k8sService)
    40  
    41  	kooperLogger := kooperlogger{Logger: logger.WithField("operator", "redisfailover")}
    42  	// Leader election service.
    43  	leSVC, err := leaderelection.NewDefault(lockKey, lockNamespace, k8sClient, kooperLogger)
    44  	if err != nil {
    45  		return nil, err
    46  	}
    47  
    48  	// Create our controller.
    49  	return controller.New(&controller.Config{
    50  		Handler:           rfHandler,
    51  		Retriever:         rfRetriever,
    52  		LeaderElector:     leSVC,
    53  		MetricsRecorder:   kooperMetricsRecorder,
    54  		Logger:            kooperLogger,
    55  		Name:              "redisfailover",
    56  		ResyncInterval:    resync,
    57  		ConcurrentWorkers: cfg.Concurrency,
    58  	})
    59  }
    60  
    61  func NewRedisFailoverRetriever(cli k8s.Services) controller.Retriever {
    62  	return controller.MustRetrieverFromListerWatcher(&cache.ListWatch{
    63  		ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    64  			return cli.ListRedisFailovers(context.Background(), "", options)
    65  		},
    66  		WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    67  			return cli.WatchRedisFailovers(context.Background(), "", options)
    68  		},
    69  	})
    70  }
    71  
    72  type kooperlogger struct {
    73  	log.Logger
    74  }
    75  
    76  func (k kooperlogger) WithKV(kv kooperlog.KV) kooperlog.Logger {
    77  	return kooperlogger{Logger: k.Logger.WithFields(kv)}
    78  }