zotregistry.io/zot@v1.4.4-0.20231124084042-02a8ed785457/pkg/exporter/api/controller_test.go (about) 1 //go:build !metrics 2 // +build !metrics 3 4 package api_test 5 6 import ( 7 "context" 8 "crypto/rand" 9 "errors" 10 "fmt" 11 "math/big" 12 "net/http" 13 "strings" 14 "sync" 15 "testing" 16 "time" 17 18 jsoniter "github.com/json-iterator/go" 19 "github.com/prometheus/client_golang/prometheus" 20 dto "github.com/prometheus/client_model/go" 21 . "github.com/smartystreets/goconvey/convey" 22 "gopkg.in/resty.v1" 23 24 zotapi "zotregistry.io/zot/pkg/api" 25 zotcfg "zotregistry.io/zot/pkg/api/config" 26 "zotregistry.io/zot/pkg/exporter/api" 27 "zotregistry.io/zot/pkg/extensions/monitoring" 28 . "zotregistry.io/zot/pkg/test/common" 29 ) 30 31 func getRandomLatencyN(max int64) time.Duration { 32 nBig, err := rand.Int(rand.Reader, big.NewInt(max)) 33 if err != nil { 34 panic(err) 35 } 36 37 return time.Duration(nBig.Int64()) 38 } 39 40 func getRandomLatency() time.Duration { 41 return getRandomLatencyN(int64(2 * time.Minute)) // a random latency (in nanoseconds) that can be up to 2 minutes 42 } 43 44 func TestNew(t *testing.T) { 45 Convey("Make a new controller", t, func() { 46 config := api.DefaultConfig() 47 So(config, ShouldNotBeNil) 48 So(api.NewController(config), ShouldNotBeNil) 49 }) 50 } 51 52 func isChannelDrained(ch chan prometheus.Metric) bool { 53 time.Sleep(SleepTime) 54 select { 55 case <-ch: 56 return false 57 default: 58 return true 59 } 60 } 61 62 func readDefaultMetrics(collector *api.Collector, chMetric chan prometheus.Metric) { 63 var metric dto.Metric 64 65 pmMetric := <-chMetric 66 So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_up"].String()) 67 68 err := pmMetric.Write(&metric) 69 So(err, ShouldBeNil) 70 So(*metric.Gauge.Value, ShouldEqual, 1) 71 72 pmMetric = <-chMetric 73 So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_info"].String()) 74 75 err = pmMetric.Write(&metric) 76 So(err, ShouldBeNil) 77 So(*metric.Gauge.Value, ShouldEqual, 0) 78 } 79 80 func TestNewExporter(t *testing.T) { 81 Convey("Make an exporter controller", t, func() { 82 exporterConfig := api.DefaultConfig() 83 So(exporterConfig, ShouldNotBeNil) 84 exporterPort := GetFreePort() 85 serverPort := GetFreePort() 86 exporterConfig.Exporter.Port = exporterPort 87 exporterConfig.Exporter.Metrics.Path = strings.TrimPrefix(t.TempDir(), "/tmp/") 88 exporterConfig.Server.Port = serverPort 89 exporterController := api.NewController(exporterConfig) 90 91 Convey("Start the zot exporter", func() { 92 go func() { 93 // this blocks 94 exporterController.Run() 95 So(nil, ShouldNotBeNil) // Fail the test in case zot exporter unexpectedly exits 96 }() 97 time.Sleep(SleepTime) 98 99 collector := api.GetCollector(exporterController) 100 chMetric := make(chan prometheus.Metric) 101 102 Convey("When zot server not running", func() { 103 go func() { 104 // this blocks 105 collector.Collect(chMetric) 106 }() 107 // Read from the channel expected values 108 pm := <-chMetric 109 So(pm.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_up"].String()) 110 111 var metric dto.Metric 112 err := pm.Write(&metric) 113 So(err, ShouldBeNil) 114 So(*metric.Gauge.Value, ShouldEqual, 0) // "zot_up=0" means zot server is not running 115 116 // Check that no more data was written to the channel 117 So(isChannelDrained(chMetric), ShouldEqual, true) 118 }) 119 Convey("When zot server is running", func() { 120 servercConfig := zotcfg.New() 121 So(servercConfig, ShouldNotBeNil) 122 baseURL := fmt.Sprintf(BaseURL, serverPort) 123 servercConfig.HTTP.Port = serverPort 124 servercConfig.BinaryType = "minimal" 125 serverController := zotapi.NewController(servercConfig) 126 So(serverController, ShouldNotBeNil) 127 128 dir := t.TempDir() 129 serverController.Config.Storage.RootDirectory = dir 130 go func(ctrl *zotapi.Controller) { 131 if err := ctrl.Init(context.Background()); err != nil { 132 panic(err) 133 } 134 135 // this blocks 136 if err := ctrl.Run(context.Background()); !errors.Is(err, http.ErrServerClosed) { 137 panic(err) 138 } 139 }(serverController) 140 defer func(ctrl *zotapi.Controller) { 141 _ = ctrl.Server.Shutdown(context.TODO()) 142 }(serverController) 143 // wait till ready 144 for { 145 _, err := resty.R().Get(baseURL) 146 if err == nil { 147 break 148 } 149 time.Sleep(SleepTime) 150 } 151 152 // Side effect of calling this endpoint is that it will enable metrics 153 resp, err := resty.R().Get(baseURL + "/metrics") 154 So(resp, ShouldNotBeNil) 155 So(err, ShouldBeNil) 156 So(resp.StatusCode(), ShouldEqual, 200) 157 158 Convey("Collecting data: default metrics", func() { 159 go func() { 160 // this blocks 161 collector.Collect(chMetric) 162 }() 163 readDefaultMetrics(collector, chMetric) 164 So(isChannelDrained(chMetric), ShouldEqual, true) 165 }) 166 167 Convey("Collecting data: Test init value & that increment works on Counters", func() { 168 // Testing initial value of the counter to be 1 after first incrementation call 169 monitoring.IncUploadCounter(serverController.Metrics, "testrepo") 170 time.Sleep(SleepTime) 171 172 go func() { 173 // this blocks 174 collector.Collect(chMetric) 175 }() 176 readDefaultMetrics(collector, chMetric) 177 178 pmMetric := <-chMetric 179 So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_repo_uploads_total"].String()) 180 181 var metric dto.Metric 182 err := pmMetric.Write(&metric) 183 So(err, ShouldBeNil) 184 So(*metric.Counter.Value, ShouldEqual, 1) 185 186 So(isChannelDrained(chMetric), ShouldEqual, true) 187 188 // Testing that counter is incremented by 1 189 monitoring.IncUploadCounter(serverController.Metrics, "testrepo") 190 time.Sleep(SleepTime) 191 192 go func() { 193 // this blocks 194 collector.Collect(chMetric) 195 }() 196 readDefaultMetrics(collector, chMetric) 197 198 pmMetric = <-chMetric 199 So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_repo_uploads_total"].String()) 200 201 err = pmMetric.Write(&metric) 202 So(err, ShouldBeNil) 203 So(*metric.Counter.Value, ShouldEqual, 2) 204 205 So(isChannelDrained(chMetric), ShouldEqual, true) 206 }) 207 Convey("Collecting data: Test that concurent Counter increment requests works properly", func() { 208 nBig, err := rand.Int(rand.Reader, big.NewInt(1000)) 209 if err != nil { 210 panic(err) 211 } 212 reqsSize := int(nBig.Int64()) 213 for i := 0; i < reqsSize; i++ { 214 monitoring.IncDownloadCounter(serverController.Metrics, "dummyrepo") 215 } 216 time.Sleep(SleepTime) 217 218 go func() { 219 // this blocks 220 collector.Collect(chMetric) 221 }() 222 readDefaultMetrics(collector, chMetric) 223 pm := <-chMetric 224 So(pm.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_repo_downloads_total"].String()) 225 226 var metric dto.Metric 227 err = pm.Write(&metric) 228 So(err, ShouldBeNil) 229 So(*metric.Counter.Value, ShouldEqual, reqsSize) 230 231 So(isChannelDrained(chMetric), ShouldEqual, true) 232 }) 233 Convey("Collecting data: Test init value & that observe works on Summaries", func() { 234 // Testing initial value of the summary counter to be 1 after first observation call 235 var latency1, latency2 time.Duration 236 latency1 = getRandomLatency() 237 monitoring.ObserveHTTPRepoLatency(serverController.Metrics, "/v2/testrepo/blogs/dummydigest", latency1) 238 time.Sleep(SleepTime) 239 240 go func() { 241 // this blocks 242 collector.Collect(chMetric) 243 }() 244 readDefaultMetrics(collector, chMetric) 245 246 pmMetric := <-chMetric 247 So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_count"].String()) 248 249 var metric dto.Metric 250 err := pmMetric.Write(&metric) 251 So(err, ShouldBeNil) 252 So(*metric.Counter.Value, ShouldEqual, 1) 253 254 pmMetric = <-chMetric 255 So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_sum"].String()) 256 257 err = pmMetric.Write(&metric) 258 So(err, ShouldBeNil) 259 So(*metric.Counter.Value, ShouldEqual, latency1.Seconds()) 260 261 So(isChannelDrained(chMetric), ShouldEqual, true) 262 263 // Testing that summary counter is incremented by 1 and summary sum is properly updated 264 latency2 = getRandomLatency() 265 monitoring.ObserveHTTPRepoLatency(serverController.Metrics, "/v2/testrepo/blogs/dummydigest", latency2) 266 time.Sleep(SleepTime) 267 268 go func() { 269 // this blocks 270 collector.Collect(chMetric) 271 }() 272 readDefaultMetrics(collector, chMetric) 273 274 pmMetric = <-chMetric 275 So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_count"].String()) 276 277 err = pmMetric.Write(&metric) 278 So(err, ShouldBeNil) 279 So(*metric.Counter.Value, ShouldEqual, 2) 280 281 pmMetric = <-chMetric 282 So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_sum"].String()) 283 284 err = pmMetric.Write(&metric) 285 So(err, ShouldBeNil) 286 So(*metric.Counter.Value, ShouldEqual, (latency1.Seconds())+(latency2.Seconds())) 287 288 So(isChannelDrained(chMetric), ShouldEqual, true) 289 }) 290 Convey("Collecting data: Test that concurent Summary observation requests works properly", func() { 291 var latencySum float64 292 nBig, err := rand.Int(rand.Reader, big.NewInt(1000)) 293 if err != nil { 294 panic(err) 295 } 296 reqsSize := int(nBig.Int64()) 297 for i := 0; i < reqsSize; i++ { 298 latency := getRandomLatency() 299 latencySum += latency.Seconds() 300 monitoring.ObserveHTTPRepoLatency(serverController.Metrics, "/v2/dummyrepo/manifests/testreference", latency) 301 } 302 time.Sleep(SleepTime) 303 304 go func() { 305 // this blocks 306 collector.Collect(chMetric) 307 }() 308 readDefaultMetrics(collector, chMetric) 309 310 pmMetric := <-chMetric 311 So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_count"].String()) 312 313 var metric dto.Metric 314 err = pmMetric.Write(&metric) 315 So(err, ShouldBeNil) 316 So(*metric.Counter.Value, ShouldEqual, reqsSize) 317 318 pmMetric = <-chMetric 319 So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_repo_latency_seconds_sum"].String()) 320 321 err = pmMetric.Write(&metric) 322 So(err, ShouldBeNil) 323 So(*metric.Counter.Value, ShouldEqual, latencySum) 324 325 So(isChannelDrained(chMetric), ShouldEqual, true) 326 }) 327 Convey("Collecting data: Test init value & that observe works on Histogram buckets", func() { 328 // Testing initial value of the histogram counter to be 1 after first observation call 329 latency := getRandomLatency() 330 monitoring.ObserveHTTPMethodLatency(serverController.Metrics, "GET", latency) 331 time.Sleep(SleepTime) 332 333 go func() { 334 // this blocks 335 collector.Collect(chMetric) 336 }() 337 readDefaultMetrics(collector, chMetric) 338 339 pmMetric := <-chMetric 340 So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_method_latency_seconds_count"].String()) 341 342 var metric dto.Metric 343 err := pmMetric.Write(&metric) 344 So(err, ShouldBeNil) 345 So(*metric.Counter.Value, ShouldEqual, 1) 346 347 pmMetric = <-chMetric 348 So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_method_latency_seconds_sum"].String()) 349 350 err = pmMetric.Write(&metric) 351 So(err, ShouldBeNil) 352 So(*metric.Counter.Value, ShouldEqual, latency.Seconds()) 353 354 for _, fvalue := range monitoring.GetDefaultBuckets() { 355 pmMetric = <-chMetric 356 So(pmMetric.Desc().String(), ShouldEqual, 357 collector.MetricsDesc["zot_http_method_latency_seconds_bucket"].String()) 358 359 err = pmMetric.Write(&metric) 360 So(err, ShouldBeNil) 361 if latency.Seconds() < fvalue { 362 So(*metric.Counter.Value, ShouldEqual, 1) 363 } else { 364 So(*metric.Counter.Value, ShouldEqual, 0) 365 } 366 } 367 368 So(isChannelDrained(chMetric), ShouldEqual, true) 369 }) 370 Convey("Collecting data: Test init value & that observe works on Histogram buckets (lock latency)", func() { 371 // Testing initial value of the histogram counter to be 1 after first observation call 372 latency := getRandomLatency() 373 monitoring.ObserveStorageLockLatency(serverController.Metrics, latency, "/tmp/zot", "RWLock") 374 time.Sleep(SleepTime) 375 376 go func() { 377 // this blocks 378 collector.Collect(chMetric) 379 }() 380 readDefaultMetrics(collector, chMetric) 381 382 pmMetric := <-chMetric 383 So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_storage_lock_latency_seconds_count"].String()) 384 385 var metric dto.Metric 386 err := pmMetric.Write(&metric) 387 So(err, ShouldBeNil) 388 So(*metric.Counter.Value, ShouldEqual, 1) 389 390 pmMetric = <-chMetric 391 So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_storage_lock_latency_seconds_sum"].String()) 392 393 err = pmMetric.Write(&metric) 394 So(err, ShouldBeNil) 395 So(*metric.Counter.Value, ShouldEqual, latency.Seconds()) 396 397 for _, fvalue := range monitoring.GetBuckets("zot.storage.lock.latency.seconds") { 398 pmMetric = <-chMetric 399 So(pmMetric.Desc().String(), ShouldEqual, 400 collector.MetricsDesc["zot_storage_lock_latency_seconds_bucket"].String()) 401 402 err = pmMetric.Write(&metric) 403 So(err, ShouldBeNil) 404 if latency.Seconds() < fvalue { 405 So(*metric.Counter.Value, ShouldEqual, 1) 406 } else { 407 So(*metric.Counter.Value, ShouldEqual, 0) 408 } 409 } 410 411 So(isChannelDrained(chMetric), ShouldEqual, true) 412 }) 413 Convey("Collecting data: Test init Histogram buckets \n", func() { 414 // Generate a random latency within each bucket and finally test 415 // that "higher" rank bucket counter is incremented by 1 416 var latencySum float64 417 418 dBuckets := monitoring.GetDefaultBuckets() 419 for index, fvalue := range dBuckets { 420 var latency time.Duration 421 if index == 0 { 422 // first bucket value 423 latency = getRandomLatencyN(int64(fvalue * float64(time.Second))) 424 } else { 425 pvalue := dBuckets[index-1] // previous bucket value 426 latency = time.Duration(pvalue*float64(time.Second)) + 427 getRandomLatencyN(int64(dBuckets[0]*float64(time.Second))) 428 } 429 latencySum += latency.Seconds() 430 monitoring.ObserveHTTPMethodLatency(serverController.Metrics, "GET", latency) 431 } 432 time.Sleep(SleepTime) 433 434 go func() { 435 // this blocks 436 collector.Collect(chMetric) 437 }() 438 readDefaultMetrics(collector, chMetric) 439 440 pmMetric := <-chMetric 441 So(pmMetric.Desc().String(), ShouldEqual, collector.MetricsDesc["zot_http_method_latency_seconds_count"].String()) 442 443 var metric dto.Metric 444 err := pmMetric.Write(&metric) 445 So(err, ShouldBeNil) 446 So(*metric.Counter.Value, ShouldEqual, len(dBuckets)) 447 448 pmMetric = <-chMetric 449 So(pmMetric.Desc().String(), ShouldEqual, 450 collector.MetricsDesc["zot_http_method_latency_seconds_sum"].String()) 451 452 err = pmMetric.Write(&metric) 453 So(err, ShouldBeNil) 454 So(*metric.Counter.Value, ShouldEqual, latencySum) 455 456 for index := range dBuckets { 457 pmMetric = <-chMetric 458 So(pmMetric.Desc().String(), ShouldEqual, 459 collector.MetricsDesc["zot_http_method_latency_seconds_bucket"].String()) 460 461 err = pmMetric.Write(&metric) 462 So(err, ShouldBeNil) 463 So(*metric.Counter.Value, ShouldEqual, index+1) 464 } 465 466 So(isChannelDrained(chMetric), ShouldEqual, true) 467 }) 468 Convey("Negative testing: Send unknown metric type to MetricServer", func() { 469 serverController.Metrics.SendMetric(getRandomLatency()) 470 }) 471 Convey("Concurrent metrics scrape", func() { 472 var wg sync.WaitGroup 473 474 nBig, err := rand.Int(rand.Reader, big.NewInt(100)) 475 if err != nil { 476 panic(err) 477 } 478 workersSize := int(nBig.Int64()) 479 for i := 0; i < workersSize; i++ { 480 wg.Add(1) 481 go func() { 482 defer wg.Done() 483 m := serverController.Metrics.ReceiveMetrics() 484 json := jsoniter.ConfigCompatibleWithStandardLibrary 485 486 _, err := json.Marshal(m) 487 if err != nil { 488 exporterController.Log.Error().Err(err).Msg("Concurrent metrics scrape fail") 489 } 490 }() 491 } 492 wg.Wait() 493 }) 494 Convey("Negative testing: Increment a counter that does not exist", func() { 495 cv := monitoring.CounterValue{Name: "dummyName"} 496 serverController.Metrics.SendMetric(cv) 497 }) 498 Convey("Negative testing: Set a gauge for a metric with len(labelNames)!=len(knownLabelNames)", func() { 499 gv := monitoring.GaugeValue{ 500 Name: "zot.info", 501 Value: 1, 502 LabelNames: []string{"commit", "binaryType", "version"}, 503 } 504 serverController.Metrics.SendMetric(gv) 505 }) 506 Convey("Negative testing: Summary observe for a metric with labelNames!=knownLabelNames", func() { 507 sv := monitoring.SummaryValue{ 508 Name: "zot.repo.latency.seconds", 509 LabelNames: []string{"dummyRepoLabelName"}, 510 LabelValues: []string{"dummyrepo"}, 511 } 512 serverController.Metrics.SendMetric(sv) 513 }) 514 Convey("Negative testing: Histogram observe for a metric with len(labelNames)!=len(LabelValues)", func() { 515 hv := monitoring.HistogramValue{ 516 Name: "zot.method.latency.seconds", 517 LabelNames: []string{"method"}, 518 LabelValues: []string{"GET", "POST", "DELETE"}, 519 } 520 serverController.Metrics.SendMetric(hv) 521 }) 522 Convey("Negative testing: error in getting the size for a repo directory", func() { 523 monitoring.SetStorageUsage(serverController.Metrics, "/tmp/zot", "dummyrepo") 524 }) 525 Convey("Disabling metrics after idle timeout", func() { 526 So(serverController.Metrics.IsEnabled(), ShouldEqual, true) 527 time.Sleep(monitoring.GetMaxIdleScrapeInterval()) 528 So(serverController.Metrics.IsEnabled(), ShouldEqual, false) 529 }) 530 }) 531 }) 532 }) 533 }