storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/peer-rest-client.go (about) 1 /* 2 * MinIO Cloud Storage, (C) 2019 MinIO, Inc. 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package cmd 18 19 import ( 20 "bytes" 21 "context" 22 "encoding/gob" 23 "errors" 24 "fmt" 25 "io" 26 "math" 27 "net/url" 28 "strconv" 29 "strings" 30 "sync" 31 "sync/atomic" 32 "time" 33 34 "github.com/dustin/go-humanize" 35 "github.com/tinylib/msgp/msgp" 36 37 "storj.io/minio/cmd/http" 38 xhttp "storj.io/minio/cmd/http" 39 "storj.io/minio/cmd/logger" 40 "storj.io/minio/cmd/rest" 41 "storj.io/minio/pkg/bandwidth" 42 "storj.io/minio/pkg/event" 43 "storj.io/minio/pkg/madmin" 44 xnet "storj.io/minio/pkg/net" 45 "storj.io/minio/pkg/trace" 46 ) 47 48 // client to talk to peer Nodes. 49 type peerRESTClient struct { 50 host *xnet.Host 51 restClient *rest.Client 52 } 53 54 // Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected 55 // permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints() 56 // after verifying format.json 57 func (client *peerRESTClient) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) { 58 return client.callWithContext(GlobalContext, method, values, body, length) 59 } 60 61 // Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected 62 // permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints() 63 // after verifying format.json 64 func (client *peerRESTClient) callWithContext(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) { 65 if values == nil { 66 values = make(url.Values) 67 } 68 69 respBody, err = client.restClient.Call(ctx, method, values, body, length) 70 if err == nil { 71 return respBody, nil 72 } 73 74 return nil, err 75 } 76 77 // Stringer provides a canonicalized representation of node. 78 func (client *peerRESTClient) String() string { 79 return client.host.String() 80 } 81 82 // IsOnline returns true if the peer client is online. 83 func (client *peerRESTClient) IsOnline() bool { 84 return client.restClient.IsOnline() 85 } 86 87 // Close - marks the client as closed. 88 func (client *peerRESTClient) Close() error { 89 client.restClient.Close() 90 return nil 91 } 92 93 // GetLocks - fetch older locks for a remote node. 94 func (client *peerRESTClient) GetLocks() (lockMap map[string][]lockRequesterInfo, err error) { 95 respBody, err := client.call(peerRESTMethodGetLocks, nil, nil, -1) 96 if err != nil { 97 return 98 } 99 lockMap = map[string][]lockRequesterInfo{} 100 defer http.DrainBody(respBody) 101 err = gob.NewDecoder(respBody).Decode(&lockMap) 102 return lockMap, err 103 } 104 105 // ServerInfo - fetch server information for a remote node. 106 func (client *peerRESTClient) ServerInfo() (info madmin.ServerProperties, err error) { 107 respBody, err := client.call(peerRESTMethodServerInfo, nil, nil, -1) 108 if err != nil { 109 return 110 } 111 defer http.DrainBody(respBody) 112 err = gob.NewDecoder(respBody).Decode(&info) 113 return info, err 114 } 115 116 type networkOverloadedErr struct{} 117 118 var networkOverloaded networkOverloadedErr 119 120 func (n networkOverloadedErr) Error() string { 121 return "network overloaded" 122 } 123 124 type nullReader struct{} 125 126 func (r *nullReader) Read(b []byte) (int, error) { 127 return len(b), nil 128 } 129 130 func (client *peerRESTClient) doNetTest(ctx context.Context, dataSize int64, threadCount uint) (info madmin.NetPerfInfo, err error) { 131 var mu sync.Mutex // mutex used to protect these slices in go-routines 132 latencies := []float64{} 133 throughputs := []float64{} 134 135 buflimiter := make(chan struct{}, threadCount) 136 errChan := make(chan error, threadCount) 137 138 var totalTransferred int64 139 140 // ensure enough samples to obtain normal distribution 141 maxSamples := int(10 * threadCount) 142 143 innerCtx, cancel := context.WithCancel(ctx) 144 145 slowSamples := int32(0) 146 maxSlowSamples := int32(maxSamples / 20) 147 slowSample := func() { 148 if slowSamples > maxSlowSamples { // 5% of total 149 return 150 } 151 if atomic.AddInt32(&slowSamples, 1) >= maxSlowSamples { 152 errChan <- networkOverloaded 153 cancel() 154 } 155 } 156 157 var wg sync.WaitGroup 158 finish := func() { 159 <-buflimiter 160 wg.Done() 161 } 162 163 for i := 0; i < maxSamples; i++ { 164 select { 165 case <-ctx.Done(): 166 return info, ctx.Err() 167 case err = <-errChan: 168 case buflimiter <- struct{}{}: 169 wg.Add(1) 170 171 if innerCtx.Err() != nil { 172 finish() 173 continue 174 } 175 176 go func(i int) { 177 start := time.Now() 178 before := atomic.LoadInt64(&totalTransferred) 179 180 ctx, cancel := context.WithTimeout(innerCtx, 10*time.Second) 181 defer cancel() 182 183 progress := io.LimitReader(&nullReader{}, dataSize) 184 185 // Turn off healthCheckFn for health tests to cater for higher load on the peers. 186 clnt := newPeerRESTClient(client.host) 187 clnt.restClient.HealthCheckFn = nil 188 189 respBody, err := clnt.callWithContext(ctx, peerRESTMethodNetInfo, nil, progress, dataSize) 190 if err != nil { 191 if errors.Is(err, context.DeadlineExceeded) { 192 slowSample() 193 finish() 194 return 195 } 196 197 errChan <- err 198 finish() 199 return 200 } 201 http.DrainBody(respBody) 202 203 finish() 204 atomic.AddInt64(&totalTransferred, dataSize) 205 after := atomic.LoadInt64(&totalTransferred) 206 end := time.Now() 207 208 latency := end.Sub(start).Seconds() 209 210 if latency > maxLatencyForSizeThreads(dataSize, threadCount) { 211 slowSample() 212 } 213 214 /* Throughput = (total data transferred across all threads / time taken) */ 215 throughput := float64((after - before)) / latency 216 217 // Protect updating latencies and throughputs slices from 218 // multiple go-routines. 219 mu.Lock() 220 latencies = append(latencies, latency) 221 throughputs = append(throughputs, throughput) 222 mu.Unlock() 223 }(i) 224 } 225 } 226 wg.Wait() 227 228 if err != nil { 229 return info, err 230 } 231 232 latency, throughput, err := xnet.ComputePerfStats(latencies, throughputs) 233 info = madmin.NetPerfInfo{ 234 Latency: latency, 235 Throughput: throughput, 236 } 237 return info, err 238 239 } 240 241 func maxLatencyForSizeThreads(size int64, threadCount uint) float64 { 242 Gbit100 := 12.5 * float64(humanize.GiByte) 243 Gbit40 := 5.00 * float64(humanize.GiByte) 244 Gbit25 := 3.25 * float64(humanize.GiByte) 245 Gbit10 := 1.25 * float64(humanize.GiByte) 246 // Gbit1 := 0.25 * float64(humanize.GiByte) 247 248 // Given the current defaults, each combination of size/thread 249 // is supposed to fully saturate the intended pipe when all threads are active 250 // i.e. if the test is performed in a perfectly controlled environment, i.e. without 251 // CPU scheduling latencies and/or network jitters, then all threads working 252 // simultaneously should result in each of them completing in 1s 253 // 254 // In reality, I've assumed a normal distribution of latency with expected mean of 1s and min of 0s 255 // Then, 95% of threads should complete within 2 seconds (2 std. deviations from the mean). The 2s comes 256 // from fitting the normal curve such that the mean is 1. 257 // 258 // i.e. we expect that no more than 5% of threads to take longer than 2s to push the data. 259 // 260 // throughput | max latency 261 // 100 Gbit | 2s 262 // 40 Gbit | 2s 263 // 25 Gbit | 2s 264 // 10 Gbit | 2s 265 // 1 Gbit | inf 266 267 throughput := float64(size * int64(threadCount)) 268 if throughput >= Gbit100 { 269 return 2.0 270 } else if throughput >= Gbit40 { 271 return 2.0 272 } else if throughput >= Gbit25 { 273 return 2.0 274 } else if throughput >= Gbit10 { 275 return 2.0 276 } 277 return math.MaxFloat64 278 } 279 280 // NetInfo - fetch Net information for a remote node. 281 func (client *peerRESTClient) NetInfo(ctx context.Context) (info madmin.NetPerfInfo, err error) { 282 283 // 100 Gbit -> 256 MiB * 50 threads 284 // 40 Gbit -> 256 MiB * 20 threads 285 // 25 Gbit -> 128 MiB * 25 threads 286 // 10 Gbit -> 128 MiB * 10 threads 287 // 1 Gbit -> 64 MiB * 2 threads 288 289 type step struct { 290 size int64 291 threads uint 292 } 293 steps := []step{ 294 { // 100 Gbit 295 size: 256 * humanize.MiByte, 296 threads: 50, 297 }, 298 { // 40 Gbit 299 size: 256 * humanize.MiByte, 300 threads: 20, 301 }, 302 { // 25 Gbit 303 size: 128 * humanize.MiByte, 304 threads: 25, 305 }, 306 { // 10 Gbit 307 size: 128 * humanize.MiByte, 308 threads: 10, 309 }, 310 { // 1 Gbit 311 size: 64 * humanize.MiByte, 312 threads: 2, 313 }, 314 } 315 316 for i := range steps { 317 size := steps[i].size 318 threads := steps[i].threads 319 320 if info, err = client.doNetTest(ctx, size, threads); err != nil { 321 if err == networkOverloaded { 322 continue 323 } 324 325 if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { 326 continue 327 } 328 } 329 return info, err 330 } 331 return info, err 332 } 333 334 // DispatchNetInfo - dispatch other nodes to run Net info. 335 func (client *peerRESTClient) DispatchNetInfo(ctx context.Context) (info madmin.ServerNetHealthInfo, err error) { 336 respBody, err := client.callWithContext(ctx, peerRESTMethodDispatchNetInfo, nil, nil, -1) 337 if err != nil { 338 return 339 } 340 defer http.DrainBody(respBody) 341 waitReader, err := waitForHTTPResponse(respBody) 342 if err != nil { 343 return 344 } 345 err = gob.NewDecoder(waitReader).Decode(&info) 346 return 347 } 348 349 // DriveInfo - fetch Drive information for a remote node. 350 func (client *peerRESTClient) DriveInfo(ctx context.Context) (info madmin.ServerDrivesInfo, err error) { 351 respBody, err := client.callWithContext(ctx, peerRESTMethodDriveInfo, nil, nil, -1) 352 if err != nil { 353 return 354 } 355 defer http.DrainBody(respBody) 356 err = gob.NewDecoder(respBody).Decode(&info) 357 return info, err 358 } 359 360 // CPUInfo - fetch CPU information for a remote node. 361 func (client *peerRESTClient) CPUInfo(ctx context.Context) (info madmin.ServerCPUInfo, err error) { 362 respBody, err := client.callWithContext(ctx, peerRESTMethodCPUInfo, nil, nil, -1) 363 if err != nil { 364 return 365 } 366 defer http.DrainBody(respBody) 367 err = gob.NewDecoder(respBody).Decode(&info) 368 return info, err 369 } 370 371 // DiskHwInfo - fetch Disk HW information for a remote node. 372 func (client *peerRESTClient) DiskHwInfo(ctx context.Context) (info madmin.ServerDiskHwInfo, err error) { 373 respBody, err := client.callWithContext(ctx, peerRESTMethodDiskHwInfo, nil, nil, -1) 374 if err != nil { 375 return 376 } 377 defer http.DrainBody(respBody) 378 err = gob.NewDecoder(respBody).Decode(&info) 379 return info, err 380 } 381 382 // OsInfo - fetch OS information for a remote node. 383 func (client *peerRESTClient) OsInfo(ctx context.Context) (info madmin.ServerOsInfo, err error) { 384 respBody, err := client.callWithContext(ctx, peerRESTMethodOsInfo, nil, nil, -1) 385 if err != nil { 386 return 387 } 388 defer http.DrainBody(respBody) 389 err = gob.NewDecoder(respBody).Decode(&info) 390 return info, err 391 } 392 393 // MemInfo - fetch Memory information for a remote node. 394 func (client *peerRESTClient) MemInfo(ctx context.Context) (info madmin.ServerMemInfo, err error) { 395 respBody, err := client.callWithContext(ctx, peerRESTMethodMemInfo, nil, nil, -1) 396 if err != nil { 397 return 398 } 399 defer http.DrainBody(respBody) 400 err = gob.NewDecoder(respBody).Decode(&info) 401 return info, err 402 } 403 404 // ProcInfo - fetch Process information for a remote node. 405 func (client *peerRESTClient) ProcInfo(ctx context.Context) (info madmin.ServerProcInfo, err error) { 406 respBody, err := client.callWithContext(ctx, peerRESTMethodProcInfo, nil, nil, -1) 407 if err != nil { 408 return 409 } 410 defer http.DrainBody(respBody) 411 err = gob.NewDecoder(respBody).Decode(&info) 412 return info, err 413 } 414 415 // StartProfiling - Issues profiling command on the peer node. 416 func (client *peerRESTClient) StartProfiling(profiler string) error { 417 values := make(url.Values) 418 values.Set(peerRESTProfiler, profiler) 419 respBody, err := client.call(peerRESTMethodStartProfiling, values, nil, -1) 420 if err != nil { 421 return err 422 } 423 defer http.DrainBody(respBody) 424 return nil 425 } 426 427 // DownloadProfileData - download profiled data from a remote node. 428 func (client *peerRESTClient) DownloadProfileData() (data map[string][]byte, err error) { 429 respBody, err := client.call(peerRESTMethodDownloadProfilingData, nil, nil, -1) 430 if err != nil { 431 return 432 } 433 defer http.DrainBody(respBody) 434 err = gob.NewDecoder(respBody).Decode(&data) 435 return data, err 436 } 437 438 // GetBucketStats - load bucket statistics 439 func (client *peerRESTClient) GetBucketStats(bucket string) (BucketStats, error) { 440 values := make(url.Values) 441 values.Set(peerRESTBucket, bucket) 442 respBody, err := client.call(peerRESTMethodGetBucketStats, values, nil, -1) 443 if err != nil { 444 return BucketStats{}, err 445 } 446 447 var bs BucketStats 448 defer http.DrainBody(respBody) 449 return bs, msgp.Decode(respBody, &bs) 450 } 451 452 // LoadBucketMetadata - load bucket metadata 453 func (client *peerRESTClient) LoadBucketMetadata(bucket string) error { 454 values := make(url.Values) 455 values.Set(peerRESTBucket, bucket) 456 respBody, err := client.call(peerRESTMethodLoadBucketMetadata, values, nil, -1) 457 if err != nil { 458 return err 459 } 460 defer http.DrainBody(respBody) 461 return nil 462 } 463 464 // DeleteBucketMetadata - Delete bucket metadata 465 func (client *peerRESTClient) DeleteBucketMetadata(bucket string) error { 466 values := make(url.Values) 467 values.Set(peerRESTBucket, bucket) 468 respBody, err := client.call(peerRESTMethodDeleteBucketMetadata, values, nil, -1) 469 if err != nil { 470 return err 471 } 472 defer http.DrainBody(respBody) 473 return nil 474 } 475 476 // cycleServerBloomFilter will cycle the bloom filter to start recording to index y if not already. 477 // The response will contain a bloom filter starting at index x up to, but not including index y. 478 // If y is 0, the response will not update y, but return the currently recorded information 479 // from the current x to y-1. 480 func (client *peerRESTClient) cycleServerBloomFilter(ctx context.Context, req bloomFilterRequest) (*bloomFilterResponse, error) { 481 var reader bytes.Buffer 482 err := gob.NewEncoder(&reader).Encode(req) 483 if err != nil { 484 return nil, err 485 } 486 respBody, err := client.callWithContext(ctx, peerRESTMethodCycleBloom, nil, &reader, -1) 487 if err != nil { 488 return nil, err 489 } 490 var resp bloomFilterResponse 491 defer http.DrainBody(respBody) 492 return &resp, gob.NewDecoder(respBody).Decode(&resp) 493 } 494 495 // DeletePolicy - delete a specific canned policy. 496 func (client *peerRESTClient) DeletePolicy(policyName string) (err error) { 497 values := make(url.Values) 498 values.Set(peerRESTPolicy, policyName) 499 500 respBody, err := client.call(peerRESTMethodDeletePolicy, values, nil, -1) 501 if err != nil { 502 return 503 } 504 defer http.DrainBody(respBody) 505 return nil 506 } 507 508 // LoadPolicy - reload a specific canned policy. 509 func (client *peerRESTClient) LoadPolicy(policyName string) (err error) { 510 values := make(url.Values) 511 values.Set(peerRESTPolicy, policyName) 512 513 respBody, err := client.call(peerRESTMethodLoadPolicy, values, nil, -1) 514 if err != nil { 515 return 516 } 517 defer http.DrainBody(respBody) 518 return nil 519 } 520 521 // LoadPolicyMapping - reload a specific policy mapping 522 func (client *peerRESTClient) LoadPolicyMapping(userOrGroup string, isGroup bool) error { 523 values := make(url.Values) 524 values.Set(peerRESTUserOrGroup, userOrGroup) 525 if isGroup { 526 values.Set(peerRESTIsGroup, "") 527 } 528 529 respBody, err := client.call(peerRESTMethodLoadPolicyMapping, values, nil, -1) 530 if err != nil { 531 return err 532 } 533 defer http.DrainBody(respBody) 534 return nil 535 } 536 537 // DeleteUser - delete a specific user. 538 func (client *peerRESTClient) DeleteUser(accessKey string) (err error) { 539 values := make(url.Values) 540 values.Set(peerRESTUser, accessKey) 541 542 respBody, err := client.call(peerRESTMethodDeleteUser, values, nil, -1) 543 if err != nil { 544 return 545 } 546 defer http.DrainBody(respBody) 547 return nil 548 } 549 550 // DeleteServiceAccount - delete a specific service account. 551 func (client *peerRESTClient) DeleteServiceAccount(accessKey string) (err error) { 552 values := make(url.Values) 553 values.Set(peerRESTUser, accessKey) 554 555 respBody, err := client.call(peerRESTMethodDeleteServiceAccount, values, nil, -1) 556 if err != nil { 557 return 558 } 559 defer http.DrainBody(respBody) 560 return nil 561 } 562 563 // LoadUser - reload a specific user. 564 func (client *peerRESTClient) LoadUser(accessKey string, temp bool) (err error) { 565 values := make(url.Values) 566 values.Set(peerRESTUser, accessKey) 567 values.Set(peerRESTUserTemp, strconv.FormatBool(temp)) 568 569 respBody, err := client.call(peerRESTMethodLoadUser, values, nil, -1) 570 if err != nil { 571 return 572 } 573 defer http.DrainBody(respBody) 574 return nil 575 } 576 577 // LoadServiceAccount - reload a specific service account. 578 func (client *peerRESTClient) LoadServiceAccount(accessKey string) (err error) { 579 values := make(url.Values) 580 values.Set(peerRESTUser, accessKey) 581 582 respBody, err := client.call(peerRESTMethodLoadServiceAccount, values, nil, -1) 583 if err != nil { 584 return 585 } 586 defer http.DrainBody(respBody) 587 return nil 588 } 589 590 // LoadGroup - send load group command to peers. 591 func (client *peerRESTClient) LoadGroup(group string) error { 592 values := make(url.Values) 593 values.Set(peerRESTGroup, group) 594 respBody, err := client.call(peerRESTMethodLoadGroup, values, nil, -1) 595 if err != nil { 596 return err 597 } 598 defer http.DrainBody(respBody) 599 return nil 600 } 601 602 type serverUpdateInfo struct { 603 URL *url.URL 604 Sha256Sum []byte 605 Time time.Time 606 ReleaseInfo string 607 } 608 609 // ServerUpdate - sends server update message to remote peers. 610 func (client *peerRESTClient) ServerUpdate(ctx context.Context, u *url.URL, sha256Sum []byte, lrTime time.Time, releaseInfo string) error { 611 values := make(url.Values) 612 var reader bytes.Buffer 613 if err := gob.NewEncoder(&reader).Encode(serverUpdateInfo{ 614 URL: u, 615 Sha256Sum: sha256Sum, 616 Time: lrTime, 617 ReleaseInfo: releaseInfo, 618 }); err != nil { 619 return err 620 } 621 respBody, err := client.callWithContext(ctx, peerRESTMethodServerUpdate, values, &reader, -1) 622 if err != nil { 623 return err 624 } 625 defer http.DrainBody(respBody) 626 return nil 627 } 628 629 // SignalService - sends signal to peer nodes. 630 func (client *peerRESTClient) SignalService(sig serviceSignal) error { 631 values := make(url.Values) 632 values.Set(peerRESTSignal, strconv.Itoa(int(sig))) 633 respBody, err := client.call(peerRESTMethodSignalService, values, nil, -1) 634 if err != nil { 635 return err 636 } 637 defer http.DrainBody(respBody) 638 return nil 639 } 640 641 func (client *peerRESTClient) BackgroundHealStatus() (madmin.BgHealState, error) { 642 respBody, err := client.call(peerRESTMethodBackgroundHealStatus, nil, nil, -1) 643 if err != nil { 644 return madmin.BgHealState{}, err 645 } 646 defer http.DrainBody(respBody) 647 648 state := madmin.BgHealState{} 649 err = gob.NewDecoder(respBody).Decode(&state) 650 return state, err 651 } 652 653 // GetLocalDiskIDs - get a peer's local disks' IDs. 654 func (client *peerRESTClient) GetLocalDiskIDs(ctx context.Context) (diskIDs []string) { 655 respBody, err := client.callWithContext(ctx, peerRESTMethodGetLocalDiskIDs, nil, nil, -1) 656 if err != nil { 657 logger.LogIf(ctx, err) 658 return nil 659 } 660 defer http.DrainBody(respBody) 661 if err = gob.NewDecoder(respBody).Decode(&diskIDs); err != nil { 662 logger.LogIf(ctx, err) 663 return nil 664 } 665 return diskIDs 666 } 667 668 // GetMetacacheListing - get a new or existing metacache. 669 func (client *peerRESTClient) GetMetacacheListing(ctx context.Context, o listPathOptions) (*metacache, error) { 670 var reader bytes.Buffer 671 err := gob.NewEncoder(&reader).Encode(o) 672 if err != nil { 673 return nil, err 674 } 675 respBody, err := client.callWithContext(ctx, peerRESTMethodGetMetacacheListing, nil, &reader, int64(reader.Len())) 676 if err != nil { 677 logger.LogIf(ctx, err) 678 return nil, err 679 } 680 var resp metacache 681 defer http.DrainBody(respBody) 682 return &resp, msgp.Decode(respBody, &resp) 683 } 684 685 // UpdateMetacacheListing - update an existing metacache it will unconditionally be updated to the new state. 686 func (client *peerRESTClient) UpdateMetacacheListing(ctx context.Context, m metacache) (metacache, error) { 687 b, err := m.MarshalMsg(nil) 688 if err != nil { 689 return m, err 690 } 691 respBody, err := client.callWithContext(ctx, peerRESTMethodUpdateMetacacheListing, nil, bytes.NewBuffer(b), int64(len(b))) 692 if err != nil { 693 logger.LogIf(ctx, err) 694 return m, err 695 } 696 defer http.DrainBody(respBody) 697 var resp metacache 698 return resp, msgp.Decode(respBody, &resp) 699 700 } 701 702 func (client *peerRESTClient) doTrace(traceCh chan interface{}, doneCh <-chan struct{}, traceOpts madmin.ServiceTraceOpts) { 703 values := make(url.Values) 704 values.Set(peerRESTTraceErr, strconv.FormatBool(traceOpts.OnlyErrors)) 705 values.Set(peerRESTTraceS3, strconv.FormatBool(traceOpts.S3)) 706 values.Set(peerRESTTraceStorage, strconv.FormatBool(traceOpts.Storage)) 707 values.Set(peerRESTTraceOS, strconv.FormatBool(traceOpts.OS)) 708 values.Set(peerRESTTraceInternal, strconv.FormatBool(traceOpts.Internal)) 709 values.Set(peerRESTTraceThreshold, traceOpts.Threshold.String()) 710 711 // To cancel the REST request in case doneCh gets closed. 712 ctx, cancel := context.WithCancel(GlobalContext) 713 714 cancelCh := make(chan struct{}) 715 defer close(cancelCh) 716 go func() { 717 select { 718 case <-doneCh: 719 case <-cancelCh: 720 // There was an error in the REST request. 721 } 722 cancel() 723 }() 724 725 respBody, err := client.callWithContext(ctx, peerRESTMethodTrace, values, nil, -1) 726 defer http.DrainBody(respBody) 727 728 if err != nil { 729 return 730 } 731 732 dec := gob.NewDecoder(respBody) 733 for { 734 var info trace.Info 735 if err = dec.Decode(&info); err != nil { 736 return 737 } 738 if len(info.NodeName) > 0 { 739 select { 740 case traceCh <- info: 741 default: 742 // Do not block on slow receivers. 743 } 744 } 745 } 746 } 747 748 func (client *peerRESTClient) doListen(listenCh chan interface{}, doneCh <-chan struct{}, v url.Values) { 749 // To cancel the REST request in case doneCh gets closed. 750 ctx, cancel := context.WithCancel(GlobalContext) 751 752 cancelCh := make(chan struct{}) 753 defer close(cancelCh) 754 go func() { 755 select { 756 case <-doneCh: 757 case <-cancelCh: 758 // There was an error in the REST request. 759 } 760 cancel() 761 }() 762 763 respBody, err := client.callWithContext(ctx, peerRESTMethodListen, v, nil, -1) 764 defer http.DrainBody(respBody) 765 766 if err != nil { 767 return 768 } 769 770 dec := gob.NewDecoder(respBody) 771 for { 772 var ev event.Event 773 if err := dec.Decode(&ev); err != nil { 774 return 775 } 776 if len(ev.EventVersion) > 0 { 777 select { 778 case listenCh <- ev: 779 default: 780 // Do not block on slow receivers. 781 } 782 } 783 } 784 } 785 786 // Listen - listen on peers. 787 func (client *peerRESTClient) Listen(listenCh chan interface{}, doneCh <-chan struct{}, v url.Values) { 788 go func() { 789 for { 790 client.doListen(listenCh, doneCh, v) 791 select { 792 case <-doneCh: 793 return 794 default: 795 // There was error in the REST request, retry after sometime as probably the peer is down. 796 time.Sleep(5 * time.Second) 797 } 798 } 799 }() 800 } 801 802 // Trace - send http trace request to peer nodes 803 func (client *peerRESTClient) Trace(traceCh chan interface{}, doneCh <-chan struct{}, traceOpts madmin.ServiceTraceOpts) { 804 go func() { 805 for { 806 client.doTrace(traceCh, doneCh, traceOpts) 807 select { 808 case <-doneCh: 809 return 810 default: 811 // There was error in the REST request, retry after sometime as probably the peer is down. 812 time.Sleep(5 * time.Second) 813 } 814 } 815 }() 816 } 817 818 // ConsoleLog - sends request to peer nodes to get console logs 819 func (client *peerRESTClient) ConsoleLog(logCh chan interface{}, doneCh <-chan struct{}) { 820 go func() { 821 for { 822 // get cancellation context to properly unsubscribe peers 823 ctx, cancel := context.WithCancel(GlobalContext) 824 respBody, err := client.callWithContext(ctx, peerRESTMethodLog, nil, nil, -1) 825 if err != nil { 826 // Retry the failed request. 827 time.Sleep(5 * time.Second) 828 } else { 829 dec := gob.NewDecoder(respBody) 830 831 go func() { 832 <-doneCh 833 cancel() 834 }() 835 836 for { 837 var log madmin.LogInfo 838 if err = dec.Decode(&log); err != nil { 839 break 840 } 841 select { 842 case logCh <- log: 843 default: 844 } 845 } 846 } 847 848 select { 849 case <-doneCh: 850 cancel() 851 http.DrainBody(respBody) 852 return 853 default: 854 // There was error in the REST request, retry. 855 } 856 } 857 }() 858 } 859 860 // newPeerRestClients creates new peer clients. 861 // The two slices will point to the same clients, 862 // but 'all' will contain nil entry for local client. 863 // The 'all' slice will be in the same order across the cluster. 864 func newPeerRestClients(endpoints EndpointServerPools) (remote, all []*peerRESTClient) { 865 if !globalIsDistErasure { 866 // Only useful in distributed setups 867 return nil, nil 868 } 869 hosts := endpoints.hostsSorted() 870 remote = make([]*peerRESTClient, 0, len(hosts)) 871 all = make([]*peerRESTClient, len(hosts)) 872 for i, host := range hosts { 873 if host == nil { 874 continue 875 } 876 all[i] = newPeerRESTClient(host) 877 remote = append(remote, all[i]) 878 } 879 if len(all) != len(remote)+1 { 880 logger.LogIf(context.Background(), fmt.Errorf("WARNING: Expected number of all hosts (%v) to be remote +1 (%v)", len(all), len(remote))) 881 } 882 return remote, all 883 } 884 885 // Returns a peer rest client. 886 func newPeerRESTClient(peer *xnet.Host) *peerRESTClient { 887 scheme := "http" 888 if GlobalIsTLS { 889 scheme = "https" 890 } 891 892 serverURL := &url.URL{ 893 Scheme: scheme, 894 Host: peer.String(), 895 Path: peerRESTPath, 896 } 897 898 restClient := rest.NewClient(serverURL, globalInternodeTransport, newAuthToken) 899 // Use a separate client to avoid recursive calls. 900 healthClient := rest.NewClient(serverURL, globalInternodeTransport, newAuthToken) 901 healthClient.ExpectTimeouts = true 902 903 // Construct a new health function. 904 restClient.HealthCheckFn = func() bool { 905 ctx, cancel := context.WithTimeout(context.Background(), restClient.HealthCheckTimeout) 906 defer cancel() 907 respBody, err := healthClient.Call(ctx, peerRESTMethodHealth, nil, nil, -1) 908 xhttp.DrainBody(respBody) 909 return !isNetworkError(err) 910 } 911 912 return &peerRESTClient{host: peer, restClient: restClient} 913 } 914 915 // MonitorBandwidth - send http trace request to peer nodes 916 func (client *peerRESTClient) MonitorBandwidth(ctx context.Context, buckets []string) (*bandwidth.Report, error) { 917 values := make(url.Values) 918 values.Set(peerRESTBuckets, strings.Join(buckets, ",")) 919 respBody, err := client.callWithContext(ctx, peerRESTMethodGetBandwidth, values, nil, -1) 920 if err != nil { 921 return nil, err 922 } 923 defer http.DrainBody(respBody) 924 925 dec := gob.NewDecoder(respBody) 926 var bandwidthReport bandwidth.Report 927 err = dec.Decode(&bandwidthReport) 928 return &bandwidthReport, err 929 } 930 931 func (client *peerRESTClient) GetPeerMetrics(ctx context.Context) (<-chan Metric, error) { 932 respBody, err := client.callWithContext(ctx, peerRESTMethodGetPeerMetrics, nil, nil, -1) 933 if err != nil { 934 return nil, err 935 } 936 dec := gob.NewDecoder(respBody) 937 ch := make(chan Metric) 938 go func(ch chan<- Metric) { 939 for { 940 var metric Metric 941 if err := dec.Decode(&metric); err != nil { 942 http.DrainBody(respBody) 943 close(ch) 944 return 945 } 946 ch <- metric 947 } 948 }(ch) 949 return ch, nil 950 }