github.com/Tyktechnologies/tyk@v2.9.5+incompatible/gateway/host_checker_manager_test.go (about)

     1  package gateway
     2  
     3  import (
     4  	"bytes"
     5  	"net/http"
     6  	"testing"
     7  	"text/template"
     8  
     9  	"github.com/TykTechnologies/tyk/config"
    10  	"github.com/TykTechnologies/tyk/storage"
    11  	uuid "github.com/satori/go.uuid"
    12  )
    13  
    14  func TestHostCheckerManagerInit(t *testing.T) {
    15  	ts := StartTest()
    16  	defer ts.Close()
    17  
    18  	hc := HostCheckerManager{}
    19  	redisStorage := &storage.RedisCluster{KeyPrefix: "host-checker:"}
    20  	hc.Init(redisStorage)
    21  
    22  	if hc.Id == "" {
    23  		t.Error("HostCheckerManager should create an Id on Init")
    24  	}
    25  	if hc.unhealthyHostList == nil {
    26  		t.Error("HostCheckerManager should initialize unhealthyHostList on Init")
    27  	}
    28  	if hc.resetsInitiated == nil {
    29  		t.Error("HostCheckerManager should initialize resetsInitiated on Init")
    30  	}
    31  }
    32  
    33  func TestAmIPolling(t *testing.T) {
    34  	hc := HostCheckerManager{}
    35  
    36  	polling := hc.AmIPolling()
    37  	if polling {
    38  		t.Error("HostCheckerManager storage not configured, it should have failed.")
    39  	}
    40  
    41  	//Testing if we had 2 active host checker managers, only 1 takes control of the uptimechecks
    42  	globalConf := config.Global()
    43  	globalConf.UptimeTests.PollerGroup = "TEST"
    44  	config.SetGlobal(globalConf)
    45  
    46  	ts := StartTest()
    47  	defer ts.Close()
    48  
    49  	redisStorage := &storage.RedisCluster{KeyPrefix: "host-checker:"}
    50  	hc.Init(redisStorage)
    51  	hc2 := HostCheckerManager{}
    52  	hc2.Init(redisStorage)
    53  
    54  	polling = hc.AmIPolling()
    55  	pollingHc2 := hc2.AmIPolling()
    56  	if !polling && pollingHc2 {
    57  		t.Error("HostCheckerManager storage configured, it shouldn't have failed.")
    58  	}
    59  
    60  	//Testing if the PollerCacheKey contains the poller_group
    61  	activeInstance, err := hc.store.GetKey("PollerActiveInstanceID.TEST")
    62  	if err != nil {
    63  		t.Error("PollerActiveInstanceID.TEST  should exist in redis.", activeInstance)
    64  	}
    65  	if activeInstance != hc.Id {
    66  		t.Error("PollerActiveInstanceID.TEST value should be hc.Id")
    67  	}
    68  
    69  	//Testing if the PollerCacheKey doesn't contains the poller_group by default
    70  	ResetTestConfig()
    71  	emptyRedis()
    72  	hc = HostCheckerManager{}
    73  
    74  	redisStorage = &storage.RedisCluster{KeyPrefix: "host-checker:"}
    75  	hc.Init(redisStorage)
    76  	hc.AmIPolling()
    77  
    78  	activeInstance, err = hc.store.GetKey("PollerActiveInstanceID")
    79  	if err != nil {
    80  		t.Error("PollerActiveInstanceID should exist in redis.", activeInstance)
    81  	}
    82  	if activeInstance != hc.Id {
    83  		t.Error("PollerActiveInstanceID value should be hc.Id")
    84  	}
    85  }
    86  
    87  func TestGenerateCheckerId(t *testing.T) {
    88  	hc := HostCheckerManager{}
    89  	hc.GenerateCheckerId()
    90  	if hc.Id == "" {
    91  		t.Error("HostCheckerManager should generate an Id on GenerateCheckerId")
    92  	}
    93  
    94  	uuid, _ := uuid.FromString(hc.Id)
    95  	if uuid.Version() != 4 {
    96  		t.Error("HostCheckerManager should generate an uuid.v4 id")
    97  	}
    98  }
    99  
   100  func TestCheckActivePollerLoop(t *testing.T) {
   101  	ts := StartTest()
   102  	defer ts.Close()
   103  	emptyRedis()
   104  
   105  	hc := &HostCheckerManager{}
   106  	redisStorage := &storage.RedisCluster{KeyPrefix: "host-checker:"}
   107  	hc.Init(redisStorage)
   108  	//defering the stop of the CheckActivePollerLoop
   109  	defer func(hc *HostCheckerManager) {
   110  		hc.stopLoop = true
   111  	}(hc)
   112  
   113  	go hc.CheckActivePollerLoop()
   114  
   115  	found := false
   116  
   117  	//Giving 5 retries to find the poller active key
   118  	for i := 0; i < 5; i++ {
   119  		activeInstance, err := hc.store.GetKey("PollerActiveInstanceID")
   120  		if activeInstance == hc.Id && err == nil {
   121  			found = true
   122  			break
   123  		}
   124  	}
   125  
   126  	if !found {
   127  		t.Error("activeInstance should be hc.Id when the CheckActivePollerLoop is running")
   128  	}
   129  
   130  }
   131  
   132  func TestStartPoller(t *testing.T) {
   133  	hc := HostCheckerManager{}
   134  	hc.StartPoller()
   135  
   136  	if hc.checker == nil {
   137  		t.Error("StartPoller should have initialized the HostUptimeChecker")
   138  	}
   139  }
   140  
   141  func TestRecordUptimeAnalytics(t *testing.T) {
   142  	ts := StartTest()
   143  	defer ts.Close()
   144  	emptyRedis()
   145  
   146  	hc := &HostCheckerManager{}
   147  	redisStorage := &storage.RedisCluster{KeyPrefix: "host-checker:"}
   148  	hc.Init(redisStorage)
   149  
   150  	specTmpl := template.Must(template.New("spec").Parse(sampleUptimeTestAPI))
   151  
   152  	tmplData := struct {
   153  		Host1, Host2 string
   154  	}{
   155  		testHttpFailureAny,
   156  		testHttpFailureAny,
   157  	}
   158  
   159  	specBuf := &bytes.Buffer{}
   160  	specTmpl.ExecuteTemplate(specBuf, specTmpl.Name(), &tmplData)
   161  
   162  	spec := CreateDefinitionFromString(specBuf.String())
   163  	spec.UptimeTests.Config.ExpireUptimeAnalyticsAfter = 30
   164  	apisMu.Lock()
   165  	apisByID = map[string]*APISpec{spec.APIID: spec}
   166  	apisMu.Unlock()
   167  	defer func() {
   168  		apisMu.Lock()
   169  		apisByID = make(map[string]*APISpec)
   170  		apisMu.Unlock()
   171  	}()
   172  
   173  	hostData := HostData{
   174  		CheckURL: "/test",
   175  		Method:   http.MethodGet,
   176  	}
   177  	report := HostHealthReport{
   178  		HostData:     hostData,
   179  		ResponseCode: http.StatusOK,
   180  		Latency:      10.00,
   181  		IsTCPError:   false,
   182  	}
   183  	report.MetaData = make(map[string]string)
   184  	report.MetaData[UnHealthyHostMetaDataAPIKey] = spec.APIID
   185  
   186  	err := hc.RecordUptimeAnalytics(report)
   187  	if err != nil {
   188  		t.Error("RecordUptimeAnalytics shouldn't fail")
   189  	}
   190  
   191  	set, err := hc.store.Exists(UptimeAnalytics_KEYNAME)
   192  	if err != nil || !set {
   193  		t.Error("tyk-uptime-analytics should exist in redis.", err)
   194  	}
   195  
   196  }