github.com/minio/minio@v0.0.0-20240328213742-3f72439b8a27/cmd/admin-handlers.go (about)

     1  // Copyright (c) 2015-2022 MinIO, Inc.
     2  //
     3  // This file is part of MinIO Object Storage stack
     4  //
     5  // This program is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Affero General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // This program is distributed in the hope that it will be useful
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13  // GNU Affero General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Affero General Public License
    16  // along with this program.  If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package cmd
    19  
    20  import (
    21  	"bytes"
    22  	"context"
    23  	crand "crypto/rand"
    24  	"crypto/rsa"
    25  	"crypto/subtle"
    26  	"crypto/x509"
    27  	"encoding/base64"
    28  	"encoding/json"
    29  	"encoding/pem"
    30  	"errors"
    31  	"fmt"
    32  	"hash/crc32"
    33  	"io"
    34  	"math"
    35  	"net/http"
    36  	"net/url"
    37  	"os"
    38  	"path"
    39  	"regexp"
    40  	"runtime"
    41  	"sort"
    42  	"strconv"
    43  	"strings"
    44  	"sync/atomic"
    45  	"time"
    46  
    47  	"github.com/dustin/go-humanize"
    48  	"github.com/klauspost/compress/zip"
    49  	"github.com/minio/madmin-go/v3"
    50  	"github.com/minio/madmin-go/v3/estream"
    51  	"github.com/minio/minio-go/v7/pkg/set"
    52  	"github.com/minio/minio/internal/dsync"
    53  	"github.com/minio/minio/internal/grid"
    54  	"github.com/minio/minio/internal/handlers"
    55  	xhttp "github.com/minio/minio/internal/http"
    56  	xioutil "github.com/minio/minio/internal/ioutil"
    57  	"github.com/minio/minio/internal/kms"
    58  	"github.com/minio/minio/internal/logger"
    59  	"github.com/minio/mux"
    60  	"github.com/minio/pkg/v2/logger/message/log"
    61  	xnet "github.com/minio/pkg/v2/net"
    62  	"github.com/minio/pkg/v2/policy"
    63  	"github.com/secure-io/sio-go"
    64  	"github.com/zeebo/xxh3"
    65  )
    66  
    67  const (
    68  	maxEConfigJSONSize        = 262272
    69  	kubernetesVersionEndpoint = "https://kubernetes.default.svc/version"
    70  	anonymizeParam            = "anonymize"
    71  	anonymizeStrict           = "strict"
    72  )
    73  
    74  // Only valid query params for mgmt admin APIs.
    75  const (
    76  	mgmtBucket      = "bucket"
    77  	mgmtPrefix      = "prefix"
    78  	mgmtClientToken = "clientToken"
    79  	mgmtForceStart  = "forceStart"
    80  	mgmtForceStop   = "forceStop"
    81  )
    82  
    83  // ServerUpdateV2Handler - POST /minio/admin/v3/update?updateURL={updateURL}&type=2
    84  // ----------
    85  // updates all minio servers and restarts them gracefully.
    86  func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.Request) {
    87  	ctx := r.Context()
    88  
    89  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.ServerUpdateAdminAction)
    90  	if objectAPI == nil {
    91  		return
    92  	}
    93  
    94  	if globalInplaceUpdateDisabled || currentReleaseTime.IsZero() {
    95  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
    96  		return
    97  	}
    98  
    99  	vars := mux.Vars(r)
   100  	updateURL := vars["updateURL"]
   101  	dryRun := r.Form.Get("dry-run") == "true"
   102  
   103  	mode := getMinioMode()
   104  	if updateURL == "" {
   105  		updateURL = minioReleaseInfoURL
   106  		if runtime.GOOS == globalWindowsOSName {
   107  			updateURL = minioReleaseWindowsInfoURL
   108  		}
   109  	}
   110  
   111  	u, err := url.Parse(updateURL)
   112  	if err != nil {
   113  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   114  		return
   115  	}
   116  
   117  	content, err := downloadReleaseURL(u, updateTimeout, mode)
   118  	if err != nil {
   119  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   120  		return
   121  	}
   122  
   123  	sha256Sum, lrTime, releaseInfo, err := parseReleaseData(content)
   124  	if err != nil {
   125  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   126  		return
   127  	}
   128  
   129  	u.Path = path.Dir(u.Path) + SlashSeparator + releaseInfo
   130  	// Download Binary Once
   131  	binC, bin, err := downloadBinary(u, mode)
   132  	if err != nil {
   133  		logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
   134  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   135  		return
   136  	}
   137  
   138  	updateStatus := madmin.ServerUpdateStatusV2{DryRun: dryRun}
   139  	peerResults := make(map[string]madmin.ServerPeerUpdateStatus)
   140  
   141  	local := globalLocalNodeName
   142  	if local == "" {
   143  		local = "127.0.0.1"
   144  	}
   145  
   146  	failedClients := make(map[int]struct{})
   147  
   148  	if globalIsDistErasure {
   149  		// Push binary to other servers
   150  		for idx, nerr := range globalNotificationSys.VerifyBinary(ctx, u, sha256Sum, releaseInfo, binC) {
   151  			if nerr.Err != nil {
   152  				peerResults[nerr.Host.String()] = madmin.ServerPeerUpdateStatus{
   153  					Host:           nerr.Host.String(),
   154  					Err:            nerr.Err.Error(),
   155  					CurrentVersion: Version,
   156  				}
   157  				failedClients[idx] = struct{}{}
   158  			} else {
   159  				peerResults[nerr.Host.String()] = madmin.ServerPeerUpdateStatus{
   160  					Host:           nerr.Host.String(),
   161  					CurrentVersion: Version,
   162  					UpdatedVersion: lrTime.Format(MinioReleaseTagTimeLayout),
   163  				}
   164  			}
   165  		}
   166  	}
   167  
   168  	if lrTime.Sub(currentReleaseTime) > 0 {
   169  		if err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin)); err != nil {
   170  			peerResults[local] = madmin.ServerPeerUpdateStatus{
   171  				Host:           local,
   172  				Err:            err.Error(),
   173  				CurrentVersion: Version,
   174  			}
   175  		} else {
   176  			peerResults[local] = madmin.ServerPeerUpdateStatus{
   177  				Host:           local,
   178  				CurrentVersion: Version,
   179  				UpdatedVersion: lrTime.Format(MinioReleaseTagTimeLayout),
   180  			}
   181  		}
   182  	} else {
   183  		peerResults[local] = madmin.ServerPeerUpdateStatus{
   184  			Host:           local,
   185  			Err:            fmt.Sprintf("server is already running the latest version: %s", Version),
   186  			CurrentVersion: Version,
   187  		}
   188  	}
   189  
   190  	if !dryRun {
   191  		if globalIsDistErasure {
   192  			ng := WithNPeers(len(globalNotificationSys.peerClients))
   193  			for idx, client := range globalNotificationSys.peerClients {
   194  				_, ok := failedClients[idx]
   195  				if ok {
   196  					continue
   197  				}
   198  				client := client
   199  				ng.Go(ctx, func() error {
   200  					return client.CommitBinary(ctx)
   201  				}, idx, *client.host)
   202  			}
   203  
   204  			for _, nerr := range ng.Wait() {
   205  				if nerr.Err != nil {
   206  					prs, ok := peerResults[nerr.Host.String()]
   207  					if ok {
   208  						prs.Err = nerr.Err.Error()
   209  						peerResults[nerr.Host.String()] = prs
   210  					} else {
   211  						peerResults[nerr.Host.String()] = madmin.ServerPeerUpdateStatus{
   212  							Host:           nerr.Host.String(),
   213  							Err:            nerr.Err.Error(),
   214  							CurrentVersion: Version,
   215  							UpdatedVersion: lrTime.Format(MinioReleaseTagTimeLayout),
   216  						}
   217  					}
   218  				}
   219  			}
   220  		}
   221  		prs := peerResults[local]
   222  		if prs.Err == "" {
   223  			if err = commitBinary(); err != nil {
   224  				prs.Err = err.Error()
   225  			}
   226  			peerResults[local] = prs
   227  		}
   228  	}
   229  
   230  	prs, ok := peerResults[local]
   231  	if ok {
   232  		prs.WaitingDrives = waitingDrivesNode()
   233  		peerResults[local] = prs
   234  	}
   235  
   236  	if globalIsDistErasure {
   237  		// Notify all other MinIO peers signal service.
   238  		ng := WithNPeers(len(globalNotificationSys.peerClients))
   239  		for idx, client := range globalNotificationSys.peerClients {
   240  			_, ok := failedClients[idx]
   241  			if ok {
   242  				continue
   243  			}
   244  			client := client
   245  			ng.Go(ctx, func() error {
   246  				prs, ok := peerResults[client.String()]
   247  				if ok && prs.CurrentVersion != prs.UpdatedVersion && prs.UpdatedVersion != "" {
   248  					return client.SignalService(serviceRestart, "", dryRun)
   249  				}
   250  				return nil
   251  			}, idx, *client.host)
   252  		}
   253  
   254  		for _, nerr := range ng.Wait() {
   255  			if nerr.Err != nil {
   256  				waitingDrives := map[string]madmin.DiskMetrics{}
   257  				jerr := json.Unmarshal([]byte(nerr.Err.Error()), &waitingDrives)
   258  				if jerr == nil {
   259  					prs, ok := peerResults[nerr.Host.String()]
   260  					if ok {
   261  						prs.WaitingDrives = waitingDrives
   262  						peerResults[nerr.Host.String()] = prs
   263  					}
   264  					continue
   265  				}
   266  			}
   267  		}
   268  	}
   269  
   270  	for _, pr := range peerResults {
   271  		updateStatus.Results = append(updateStatus.Results, pr)
   272  	}
   273  
   274  	// Marshal API response
   275  	jsonBytes, err := json.Marshal(updateStatus)
   276  	if err != nil {
   277  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   278  		return
   279  	}
   280  
   281  	writeSuccessResponseJSON(w, jsonBytes)
   282  
   283  	if !dryRun {
   284  		if lrTime.Sub(currentReleaseTime) > 0 {
   285  			globalServiceSignalCh <- serviceRestart
   286  		}
   287  	}
   288  }
   289  
   290  // ServerUpdateHandler - POST /minio/admin/v3/update?updateURL={updateURL}
   291  // ----------
   292  // updates all minio servers and restarts them gracefully.
   293  func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Request) {
   294  	ctx := r.Context()
   295  
   296  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.ServerUpdateAdminAction)
   297  	if objectAPI == nil {
   298  		return
   299  	}
   300  
   301  	if globalInplaceUpdateDisabled || currentReleaseTime.IsZero() {
   302  		// if MINIO_UPDATE=off - inplace update is disabled, mostly in containers.
   303  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
   304  		return
   305  	}
   306  
   307  	vars := mux.Vars(r)
   308  	updateURL := vars["updateURL"]
   309  	mode := getMinioMode()
   310  	if updateURL == "" {
   311  		updateURL = minioReleaseInfoURL
   312  		if runtime.GOOS == globalWindowsOSName {
   313  			updateURL = minioReleaseWindowsInfoURL
   314  		}
   315  	}
   316  
   317  	u, err := url.Parse(updateURL)
   318  	if err != nil {
   319  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   320  		return
   321  	}
   322  
   323  	content, err := downloadReleaseURL(u, updateTimeout, mode)
   324  	if err != nil {
   325  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   326  		return
   327  	}
   328  
   329  	sha256Sum, lrTime, releaseInfo, err := parseReleaseData(content)
   330  	if err != nil {
   331  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   332  		return
   333  	}
   334  
   335  	if lrTime.Sub(currentReleaseTime) <= 0 {
   336  		updateStatus := madmin.ServerUpdateStatus{
   337  			CurrentVersion: Version,
   338  			UpdatedVersion: Version,
   339  		}
   340  
   341  		// Marshal API response
   342  		jsonBytes, err := json.Marshal(updateStatus)
   343  		if err != nil {
   344  			writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   345  			return
   346  		}
   347  
   348  		writeSuccessResponseJSON(w, jsonBytes)
   349  		return
   350  	}
   351  
   352  	u.Path = path.Dir(u.Path) + SlashSeparator + releaseInfo
   353  
   354  	// Download Binary Once
   355  	binC, bin, err := downloadBinary(u, mode)
   356  	if err != nil {
   357  		logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
   358  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   359  		return
   360  	}
   361  
   362  	// Push binary to other servers
   363  	for _, nerr := range globalNotificationSys.VerifyBinary(ctx, u, sha256Sum, releaseInfo, binC) {
   364  		if nerr.Err != nil {
   365  			err := AdminError{
   366  				Code:       AdminUpdateApplyFailure,
   367  				Message:    nerr.Err.Error(),
   368  				StatusCode: http.StatusInternalServerError,
   369  			}
   370  			logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
   371  			logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
   372  			writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   373  			return
   374  		}
   375  	}
   376  
   377  	err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin))
   378  	if err != nil {
   379  		logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
   380  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   381  		return
   382  	}
   383  
   384  	for _, nerr := range globalNotificationSys.CommitBinary(ctx) {
   385  		if nerr.Err != nil {
   386  			err := AdminError{
   387  				Code:       AdminUpdateApplyFailure,
   388  				Message:    nerr.Err.Error(),
   389  				StatusCode: http.StatusInternalServerError,
   390  			}
   391  			logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
   392  			logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
   393  			writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   394  			return
   395  		}
   396  	}
   397  
   398  	err = commitBinary()
   399  	if err != nil {
   400  		logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
   401  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   402  		return
   403  	}
   404  
   405  	updateStatus := madmin.ServerUpdateStatus{
   406  		CurrentVersion: Version,
   407  		UpdatedVersion: lrTime.Format(MinioReleaseTagTimeLayout),
   408  	}
   409  
   410  	// Marshal API response
   411  	jsonBytes, err := json.Marshal(updateStatus)
   412  	if err != nil {
   413  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   414  		return
   415  	}
   416  
   417  	writeSuccessResponseJSON(w, jsonBytes)
   418  
   419  	// Notify all other MinIO peers signal service.
   420  	for _, nerr := range globalNotificationSys.SignalService(serviceRestart) {
   421  		if nerr.Err != nil {
   422  			logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
   423  			logger.LogIf(ctx, nerr.Err)
   424  		}
   425  	}
   426  
   427  	globalServiceSignalCh <- serviceRestart
   428  }
   429  
   430  // ServiceHandler - POST /minio/admin/v3/service?action={action}
   431  // ----------
   432  // Supports following actions:
   433  // - restart (restarts all the MinIO instances in a setup)
   434  // - stop (stops all the MinIO instances in a setup)
   435  // - freeze (freezes all incoming S3 API calls)
   436  // - unfreeze (unfreezes previously frozen S3 API calls)
   437  func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request) {
   438  	ctx := r.Context()
   439  
   440  	vars := mux.Vars(r)
   441  	action := vars["action"]
   442  
   443  	var serviceSig serviceSignal
   444  	switch madmin.ServiceAction(action) {
   445  	case madmin.ServiceActionRestart:
   446  		serviceSig = serviceRestart
   447  	case madmin.ServiceActionStop:
   448  		serviceSig = serviceStop
   449  	case madmin.ServiceActionFreeze:
   450  		serviceSig = serviceFreeze
   451  	case madmin.ServiceActionUnfreeze:
   452  		serviceSig = serviceUnFreeze
   453  	default:
   454  		logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind)
   455  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
   456  		return
   457  	}
   458  
   459  	var objectAPI ObjectLayer
   460  	switch serviceSig {
   461  	case serviceRestart:
   462  		objectAPI, _ = validateAdminReq(ctx, w, r, policy.ServiceRestartAdminAction)
   463  	case serviceStop:
   464  		objectAPI, _ = validateAdminReq(ctx, w, r, policy.ServiceStopAdminAction)
   465  	case serviceFreeze, serviceUnFreeze:
   466  		objectAPI, _ = validateAdminReq(ctx, w, r, policy.ServiceFreezeAdminAction)
   467  	}
   468  	if objectAPI == nil {
   469  		return
   470  	}
   471  
   472  	// Notify all other MinIO peers signal service.
   473  	for _, nerr := range globalNotificationSys.SignalService(serviceSig) {
   474  		if nerr.Err != nil {
   475  			logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
   476  			logger.LogIf(ctx, nerr.Err)
   477  		}
   478  	}
   479  
   480  	// Reply to the client before restarting, stopping MinIO server.
   481  	writeSuccessResponseHeadersOnly(w)
   482  
   483  	switch serviceSig {
   484  	case serviceFreeze:
   485  		freezeServices()
   486  	case serviceUnFreeze:
   487  		unfreezeServices()
   488  	case serviceRestart, serviceStop:
   489  		globalServiceSignalCh <- serviceSig
   490  	}
   491  }
   492  
   493  type servicePeerResult struct {
   494  	Host          string                        `json:"host"`
   495  	Err           string                        `json:"err,omitempty"`
   496  	WaitingDrives map[string]madmin.DiskMetrics `json:"waitingDrives,omitempty"`
   497  }
   498  
   499  type serviceResult struct {
   500  	Action  madmin.ServiceAction `json:"action"`
   501  	DryRun  bool                 `json:"dryRun"`
   502  	Results []servicePeerResult  `json:"results,omitempty"`
   503  }
   504  
   505  // ServiceV2Handler - POST /minio/admin/v3/service?action={action}&type=2
   506  // ----------
   507  // Supports following actions:
   508  // - restart (restarts all the MinIO instances in a setup)
   509  // - stop (stops all the MinIO instances in a setup)
   510  // - freeze (freezes all incoming S3 API calls)
   511  // - unfreeze (unfreezes previously frozen S3 API calls)
   512  //
   513  // This newer API now returns back status per remote peer and local regarding
   514  // if a "restart/stop" was successful or not. Service signal now supports
   515  // a dry-run that helps skip the nodes that may have hung drives. By default
   516  // restart/stop will ignore the servers that are hung on drives. You can use
   517  // 'force' param to force restart even with hung drives if needed.
   518  func (a adminAPIHandlers) ServiceV2Handler(w http.ResponseWriter, r *http.Request) {
   519  	ctx := r.Context()
   520  
   521  	vars := mux.Vars(r)
   522  	action := vars["action"]
   523  	dryRun := r.Form.Get("dry-run") == "true"
   524  
   525  	var serviceSig serviceSignal
   526  	act := madmin.ServiceAction(action)
   527  	switch act {
   528  	case madmin.ServiceActionRestart:
   529  		serviceSig = serviceRestart
   530  	case madmin.ServiceActionStop:
   531  		serviceSig = serviceStop
   532  	case madmin.ServiceActionFreeze:
   533  		serviceSig = serviceFreeze
   534  	case madmin.ServiceActionUnfreeze:
   535  		serviceSig = serviceUnFreeze
   536  	default:
   537  		logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind)
   538  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
   539  		return
   540  	}
   541  
   542  	var objectAPI ObjectLayer
   543  	switch serviceSig {
   544  	case serviceRestart:
   545  		objectAPI, _ = validateAdminReq(ctx, w, r, policy.ServiceRestartAdminAction)
   546  	case serviceStop:
   547  		objectAPI, _ = validateAdminReq(ctx, w, r, policy.ServiceStopAdminAction)
   548  	case serviceFreeze, serviceUnFreeze:
   549  		objectAPI, _ = validateAdminReq(ctx, w, r, policy.ServiceFreezeAdminAction)
   550  	}
   551  	if objectAPI == nil {
   552  		return
   553  	}
   554  
   555  	// Notify all other MinIO peers signal service.
   556  	srvResult := serviceResult{Action: act, Results: []servicePeerResult{}}
   557  
   558  	process := act == madmin.ServiceActionRestart || act == madmin.ServiceActionStop
   559  	if process {
   560  		localhost := globalLocalNodeName
   561  		if globalLocalNodeName == "" {
   562  			localhost = "127.0.0.1"
   563  		}
   564  		waitingDrives := waitingDrivesNode()
   565  		srvResult.Results = append(srvResult.Results, servicePeerResult{
   566  			Host:          localhost,
   567  			WaitingDrives: waitingDrives,
   568  		})
   569  	}
   570  
   571  	if globalIsDistErasure {
   572  		for _, nerr := range globalNotificationSys.SignalServiceV2(serviceSig, dryRun) {
   573  			if nerr.Err != nil && process {
   574  				waitingDrives := map[string]madmin.DiskMetrics{}
   575  				jerr := json.Unmarshal([]byte(nerr.Err.Error()), &waitingDrives)
   576  				if jerr == nil {
   577  					srvResult.Results = append(srvResult.Results, servicePeerResult{
   578  						Host:          nerr.Host.String(),
   579  						WaitingDrives: waitingDrives,
   580  					})
   581  					continue
   582  				}
   583  			}
   584  			errStr := ""
   585  			if nerr.Err != nil {
   586  				errStr = nerr.Err.Error()
   587  			}
   588  			srvResult.Results = append(srvResult.Results, servicePeerResult{
   589  				Host: nerr.Host.String(),
   590  				Err:  errStr,
   591  			})
   592  		}
   593  	}
   594  
   595  	srvResult.DryRun = dryRun
   596  
   597  	buf, err := json.Marshal(srvResult)
   598  	if err != nil {
   599  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   600  		return
   601  	}
   602  
   603  	// Reply to the client before restarting, stopping MinIO server.
   604  	writeSuccessResponseJSON(w, buf)
   605  
   606  	switch serviceSig {
   607  	case serviceFreeze:
   608  		freezeServices()
   609  	case serviceUnFreeze:
   610  		unfreezeServices()
   611  	case serviceRestart, serviceStop:
   612  		if !dryRun {
   613  			globalServiceSignalCh <- serviceSig
   614  		}
   615  	}
   616  }
   617  
   618  // ServerProperties holds some server information such as, version, region
   619  // uptime, etc..
   620  type ServerProperties struct {
   621  	Uptime       int64    `json:"uptime"`
   622  	Version      string   `json:"version"`
   623  	CommitID     string   `json:"commitID"`
   624  	DeploymentID string   `json:"deploymentID"`
   625  	Region       string   `json:"region"`
   626  	SQSARN       []string `json:"sqsARN"`
   627  }
   628  
   629  // serverConnStats holds transferred bytes from/to the server
   630  type serverConnStats struct {
   631  	internodeInputBytes  uint64
   632  	internodeOutputBytes uint64
   633  	s3InputBytes         uint64
   634  	s3OutputBytes        uint64
   635  }
   636  
   637  // ServerHTTPAPIStats holds total number of HTTP operations from/to the server,
   638  // including the average duration the call was spent.
   639  type ServerHTTPAPIStats struct {
   640  	APIStats map[string]int `json:"apiStats"`
   641  }
   642  
   643  // ServerHTTPStats holds all type of http operations performed to/from the server
   644  // including their average execution time.
   645  type ServerHTTPStats struct {
   646  	S3RequestsInQueue      int32              `json:"s3RequestsInQueue"`
   647  	S3RequestsIncoming     uint64             `json:"s3RequestsIncoming"`
   648  	CurrentS3Requests      ServerHTTPAPIStats `json:"currentS3Requests"`
   649  	TotalS3Requests        ServerHTTPAPIStats `json:"totalS3Requests"`
   650  	TotalS3Errors          ServerHTTPAPIStats `json:"totalS3Errors"`
   651  	TotalS35xxErrors       ServerHTTPAPIStats `json:"totalS35xxErrors"`
   652  	TotalS34xxErrors       ServerHTTPAPIStats `json:"totalS34xxErrors"`
   653  	TotalS3Canceled        ServerHTTPAPIStats `json:"totalS3Canceled"`
   654  	TotalS3RejectedAuth    uint64             `json:"totalS3RejectedAuth"`
   655  	TotalS3RejectedTime    uint64             `json:"totalS3RejectedTime"`
   656  	TotalS3RejectedHeader  uint64             `json:"totalS3RejectedHeader"`
   657  	TotalS3RejectedInvalid uint64             `json:"totalS3RejectedInvalid"`
   658  }
   659  
   660  // StorageInfoHandler - GET /minio/admin/v3/storageinfo
   661  // ----------
   662  // Get server information
   663  func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Request) {
   664  	ctx := r.Context()
   665  
   666  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.StorageInfoAdminAction)
   667  	if objectAPI == nil {
   668  		return
   669  	}
   670  
   671  	storageInfo := objectAPI.StorageInfo(ctx, true)
   672  
   673  	// Collect any disk healing.
   674  	healing, _ := getAggregatedBackgroundHealState(ctx, nil)
   675  	healDisks := make(map[string]struct{}, len(healing.HealDisks))
   676  	for _, disk := range healing.HealDisks {
   677  		healDisks[disk] = struct{}{}
   678  	}
   679  
   680  	// find all disks which belong to each respective endpoints
   681  	for i, disk := range storageInfo.Disks {
   682  		if _, ok := healDisks[disk.Endpoint]; ok {
   683  			storageInfo.Disks[i].Healing = true
   684  		}
   685  	}
   686  
   687  	// Marshal API response
   688  	jsonBytes, err := json.Marshal(storageInfo)
   689  	if err != nil {
   690  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   691  		return
   692  	}
   693  
   694  	// Reply with storage information (across nodes in a
   695  	// distributed setup) as json.
   696  	writeSuccessResponseJSON(w, jsonBytes)
   697  }
   698  
   699  // MetricsHandler - GET /minio/admin/v3/metrics
   700  // ----------
   701  // Get realtime server metrics
   702  func (a adminAPIHandlers) MetricsHandler(w http.ResponseWriter, r *http.Request) {
   703  	ctx := r.Context()
   704  
   705  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.ServerInfoAdminAction)
   706  	if objectAPI == nil {
   707  		return
   708  	}
   709  	const defaultMetricsInterval = time.Second
   710  
   711  	interval, err := time.ParseDuration(r.Form.Get("interval"))
   712  	if err != nil || interval < time.Second {
   713  		interval = defaultMetricsInterval
   714  	}
   715  
   716  	n, err := strconv.Atoi(r.Form.Get("n"))
   717  	if err != nil || n <= 0 {
   718  		n = math.MaxInt32
   719  	}
   720  
   721  	var types madmin.MetricType
   722  	if t, _ := strconv.ParseUint(r.Form.Get("types"), 10, 64); t != 0 {
   723  		types = madmin.MetricType(t)
   724  	} else {
   725  		types = madmin.MetricsAll
   726  	}
   727  
   728  	disks := strings.Split(r.Form.Get("disks"), ",")
   729  	byDisk := strings.EqualFold(r.Form.Get("by-disk"), "true")
   730  	var diskMap map[string]struct{}
   731  	if len(disks) > 0 && disks[0] != "" {
   732  		diskMap = make(map[string]struct{}, len(disks))
   733  		for _, k := range disks {
   734  			if k != "" {
   735  				diskMap[k] = struct{}{}
   736  			}
   737  		}
   738  	}
   739  	jobID := r.Form.Get("by-jobID")
   740  
   741  	hosts := strings.Split(r.Form.Get("hosts"), ",")
   742  	byHost := strings.EqualFold(r.Form.Get("by-host"), "true")
   743  	var hostMap map[string]struct{}
   744  	if len(hosts) > 0 && hosts[0] != "" {
   745  		hostMap = make(map[string]struct{}, len(hosts))
   746  		for _, k := range hosts {
   747  			if k != "" {
   748  				hostMap[k] = struct{}{}
   749  			}
   750  		}
   751  	}
   752  	dID := r.Form.Get("by-depID")
   753  	done := ctx.Done()
   754  	ticker := time.NewTicker(interval)
   755  	defer ticker.Stop()
   756  	w.Header().Set(xhttp.ContentType, string(mimeJSON))
   757  
   758  	enc := json.NewEncoder(w)
   759  	for n > 0 {
   760  		var m madmin.RealtimeMetrics
   761  		mLocal := collectLocalMetrics(types, collectMetricsOpts{
   762  			hosts: hostMap,
   763  			disks: diskMap,
   764  			jobID: jobID,
   765  			depID: dID,
   766  		})
   767  		m.Merge(&mLocal)
   768  		// Allow half the interval for collecting remote...
   769  		cctx, cancel := context.WithTimeout(ctx, interval/2)
   770  		mRemote := collectRemoteMetrics(cctx, types, collectMetricsOpts{
   771  			hosts: hostMap,
   772  			disks: diskMap,
   773  			jobID: jobID,
   774  			depID: dID,
   775  		})
   776  		cancel()
   777  		m.Merge(&mRemote)
   778  		if !byHost {
   779  			m.ByHost = nil
   780  		}
   781  		if !byDisk {
   782  			m.ByDisk = nil
   783  		}
   784  
   785  		m.Final = n <= 1
   786  
   787  		// Marshal API reesponse
   788  		if err := enc.Encode(&m); err != nil {
   789  			n = 0
   790  		}
   791  
   792  		n--
   793  		if n <= 0 {
   794  			break
   795  		}
   796  
   797  		// Flush before waiting for next...
   798  		w.(http.Flusher).Flush()
   799  
   800  		select {
   801  		case <-ticker.C:
   802  		case <-done:
   803  			return
   804  		}
   805  	}
   806  }
   807  
   808  // DataUsageInfoHandler - GET /minio/admin/v3/datausage?capacity={true}
   809  // ----------
   810  // Get server/cluster data usage info
   811  func (a adminAPIHandlers) DataUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
   812  	ctx := r.Context()
   813  
   814  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.DataUsageInfoAdminAction)
   815  	if objectAPI == nil {
   816  		return
   817  	}
   818  
   819  	dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
   820  	if err != nil {
   821  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   822  		return
   823  	}
   824  
   825  	dataUsageInfoJSON, err := json.Marshal(dataUsageInfo)
   826  	if err != nil {
   827  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   828  		return
   829  	}
   830  
   831  	// Get capacity info when asked.
   832  	if r.Form.Get("capacity") == "true" {
   833  		sinfo := objectAPI.StorageInfo(ctx, false)
   834  		dataUsageInfo.TotalCapacity = GetTotalUsableCapacity(sinfo.Disks, sinfo)
   835  		dataUsageInfo.TotalFreeCapacity = GetTotalUsableCapacityFree(sinfo.Disks, sinfo)
   836  		if dataUsageInfo.TotalCapacity > dataUsageInfo.TotalFreeCapacity {
   837  			dataUsageInfo.TotalUsedCapacity = dataUsageInfo.TotalCapacity - dataUsageInfo.TotalFreeCapacity
   838  		}
   839  	}
   840  
   841  	writeSuccessResponseJSON(w, dataUsageInfoJSON)
   842  }
   843  
   844  func lriToLockEntry(l lockRequesterInfo, now time.Time, resource, server string) *madmin.LockEntry {
   845  	entry := &madmin.LockEntry{
   846  		Timestamp:  l.Timestamp,
   847  		Elapsed:    now.Sub(l.Timestamp),
   848  		Resource:   resource,
   849  		ServerList: []string{server},
   850  		Source:     l.Source,
   851  		Owner:      l.Owner,
   852  		ID:         l.UID,
   853  		Quorum:     l.Quorum,
   854  	}
   855  	if l.Writer {
   856  		entry.Type = "WRITE"
   857  	} else {
   858  		entry.Type = "READ"
   859  	}
   860  	return entry
   861  }
   862  
   863  func topLockEntries(peerLocks []*PeerLocks, stale bool) madmin.LockEntries {
   864  	now := time.Now().UTC()
   865  	entryMap := make(map[string]*madmin.LockEntry)
   866  	toEntry := func(lri lockRequesterInfo) string {
   867  		return fmt.Sprintf("%s/%s", lri.Name, lri.UID)
   868  	}
   869  	for _, peerLock := range peerLocks {
   870  		if peerLock == nil {
   871  			continue
   872  		}
   873  		for k, v := range peerLock.Locks {
   874  			for _, lockReqInfo := range v {
   875  				if val, ok := entryMap[toEntry(lockReqInfo)]; ok {
   876  					val.ServerList = append(val.ServerList, peerLock.Addr)
   877  				} else {
   878  					entryMap[toEntry(lockReqInfo)] = lriToLockEntry(lockReqInfo, now, k, peerLock.Addr)
   879  				}
   880  			}
   881  		}
   882  	}
   883  	var lockEntries madmin.LockEntries
   884  	for _, v := range entryMap {
   885  		if stale {
   886  			lockEntries = append(lockEntries, *v)
   887  			continue
   888  		}
   889  		if len(v.ServerList) >= v.Quorum {
   890  			lockEntries = append(lockEntries, *v)
   891  		}
   892  	}
   893  	sort.Sort(lockEntries)
   894  	return lockEntries
   895  }
   896  
   897  // PeerLocks holds server information result of one node
   898  type PeerLocks struct {
   899  	Addr  string
   900  	Locks map[string][]lockRequesterInfo
   901  }
   902  
   903  // ForceUnlockHandler force unlocks requested resource
   904  func (a adminAPIHandlers) ForceUnlockHandler(w http.ResponseWriter, r *http.Request) {
   905  	ctx := r.Context()
   906  
   907  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.ForceUnlockAdminAction)
   908  	if objectAPI == nil {
   909  		return
   910  	}
   911  
   912  	z, ok := objectAPI.(*erasureServerPools)
   913  	if !ok {
   914  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
   915  		return
   916  	}
   917  
   918  	vars := mux.Vars(r)
   919  
   920  	var args dsync.LockArgs
   921  	var lockers []dsync.NetLocker
   922  	for _, path := range strings.Split(vars["paths"], ",") {
   923  		if path == "" {
   924  			continue
   925  		}
   926  		args.Resources = append(args.Resources, path)
   927  	}
   928  
   929  	for _, lks := range z.serverPools[0].erasureLockers {
   930  		lockers = append(lockers, lks...)
   931  	}
   932  
   933  	for _, locker := range lockers {
   934  		locker.ForceUnlock(ctx, args)
   935  	}
   936  }
   937  
   938  // TopLocksHandler Get list of locks in use
   939  func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request) {
   940  	ctx := r.Context()
   941  
   942  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.TopLocksAdminAction)
   943  	if objectAPI == nil {
   944  		return
   945  	}
   946  
   947  	count := 10 // by default list only top 10 entries
   948  	if countStr := r.Form.Get("count"); countStr != "" {
   949  		var err error
   950  		count, err = strconv.Atoi(countStr)
   951  		if err != nil {
   952  			writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   953  			return
   954  		}
   955  	}
   956  	stale := r.Form.Get("stale") == "true" // list also stale locks
   957  
   958  	peerLocks := globalNotificationSys.GetLocks(ctx, r)
   959  
   960  	topLocks := topLockEntries(peerLocks, stale)
   961  
   962  	// Marshal API response upto requested count.
   963  	if len(topLocks) > count && count > 0 {
   964  		topLocks = topLocks[:count]
   965  	}
   966  
   967  	jsonBytes, err := json.Marshal(topLocks)
   968  	if err != nil {
   969  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
   970  		return
   971  	}
   972  
   973  	// Reply with storage information (across nodes in a
   974  	// distributed setup) as json.
   975  	writeSuccessResponseJSON(w, jsonBytes)
   976  }
   977  
   978  // StartProfilingResult contains the status of the starting
   979  // profiling action in a given server - deprecated API
   980  type StartProfilingResult struct {
   981  	NodeName string `json:"nodeName"`
   982  	Success  bool   `json:"success"`
   983  	Error    string `json:"error"`
   984  }
   985  
   986  // StartProfilingHandler - POST /minio/admin/v3/profiling/start?profilerType={profilerType}
   987  // ----------
   988  // Enable server profiling
   989  func (a adminAPIHandlers) StartProfilingHandler(w http.ResponseWriter, r *http.Request) {
   990  	ctx := r.Context()
   991  
   992  	// Validate request signature.
   993  	_, adminAPIErr := checkAdminRequestAuth(ctx, r, policy.ProfilingAdminAction, "")
   994  	if adminAPIErr != ErrNone {
   995  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
   996  		return
   997  	}
   998  
   999  	if globalNotificationSys == nil {
  1000  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
  1001  		return
  1002  	}
  1003  
  1004  	vars := mux.Vars(r)
  1005  	profiles := strings.Split(vars["profilerType"], ",")
  1006  	thisAddr, err := xnet.ParseHost(globalLocalNodeName)
  1007  	if err != nil {
  1008  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
  1009  		return
  1010  	}
  1011  
  1012  	globalProfilerMu.Lock()
  1013  	defer globalProfilerMu.Unlock()
  1014  
  1015  	if globalProfiler == nil {
  1016  		globalProfiler = make(map[string]minioProfiler, 10)
  1017  	}
  1018  
  1019  	// Stop profiler of all types if already running
  1020  	for k, v := range globalProfiler {
  1021  		for _, p := range profiles {
  1022  			if p == k {
  1023  				v.Stop()
  1024  				delete(globalProfiler, k)
  1025  			}
  1026  		}
  1027  	}
  1028  
  1029  	// Start profiling on remote servers.
  1030  	var hostErrs []NotificationPeerErr
  1031  	for _, profiler := range profiles {
  1032  		hostErrs = append(hostErrs, globalNotificationSys.StartProfiling(profiler)...)
  1033  
  1034  		// Start profiling locally as well.
  1035  		prof, err := startProfiler(profiler)
  1036  		if err != nil {
  1037  			hostErrs = append(hostErrs, NotificationPeerErr{
  1038  				Host: *thisAddr,
  1039  				Err:  err,
  1040  			})
  1041  		} else {
  1042  			globalProfiler[profiler] = prof
  1043  			hostErrs = append(hostErrs, NotificationPeerErr{
  1044  				Host: *thisAddr,
  1045  			})
  1046  		}
  1047  	}
  1048  
  1049  	var startProfilingResult []StartProfilingResult
  1050  
  1051  	for _, nerr := range hostErrs {
  1052  		result := StartProfilingResult{NodeName: nerr.Host.String()}
  1053  		if nerr.Err != nil {
  1054  			result.Error = nerr.Err.Error()
  1055  		} else {
  1056  			result.Success = true
  1057  		}
  1058  		startProfilingResult = append(startProfilingResult, result)
  1059  	}
  1060  
  1061  	// Create JSON result and send it to the client
  1062  	startProfilingResultInBytes, err := json.Marshal(startProfilingResult)
  1063  	if err != nil {
  1064  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
  1065  		return
  1066  	}
  1067  
  1068  	writeSuccessResponseJSON(w, startProfilingResultInBytes)
  1069  }
  1070  
  1071  // ProfileHandler - POST /minio/admin/v3/profile/?profilerType={profilerType}
  1072  // ----------
  1073  // Enable server profiling
  1074  func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request) {
  1075  	ctx := r.Context()
  1076  
  1077  	// Validate request signature.
  1078  	_, adminAPIErr := checkAdminRequestAuth(ctx, r, policy.ProfilingAdminAction, "")
  1079  	if adminAPIErr != ErrNone {
  1080  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
  1081  		return
  1082  	}
  1083  
  1084  	if globalNotificationSys == nil {
  1085  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
  1086  		return
  1087  	}
  1088  	profileStr := r.Form.Get("profilerType")
  1089  	profiles := strings.Split(profileStr, ",")
  1090  	duration := time.Minute
  1091  	if dstr := r.Form.Get("duration"); dstr != "" {
  1092  		var err error
  1093  		duration, err = time.ParseDuration(dstr)
  1094  		if err != nil {
  1095  			writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
  1096  			return
  1097  		}
  1098  	}
  1099  
  1100  	globalProfilerMu.Lock()
  1101  	if globalProfiler == nil {
  1102  		globalProfiler = make(map[string]minioProfiler, 10)
  1103  	}
  1104  
  1105  	// Stop profiler of all types if already running
  1106  	for k, v := range globalProfiler {
  1107  		v.Stop()
  1108  		delete(globalProfiler, k)
  1109  	}
  1110  
  1111  	// Start profiling on remote servers.
  1112  	for _, profiler := range profiles {
  1113  		globalNotificationSys.StartProfiling(profiler)
  1114  
  1115  		// Start profiling locally as well.
  1116  		prof, err := startProfiler(profiler)
  1117  		if err == nil {
  1118  			globalProfiler[profiler] = prof
  1119  		}
  1120  	}
  1121  	globalProfilerMu.Unlock()
  1122  
  1123  	timer := time.NewTimer(duration)
  1124  	defer timer.Stop()
  1125  	for {
  1126  		select {
  1127  		case <-ctx.Done():
  1128  			globalProfilerMu.Lock()
  1129  			defer globalProfilerMu.Unlock()
  1130  			for k, v := range globalProfiler {
  1131  				v.Stop()
  1132  				delete(globalProfiler, k)
  1133  			}
  1134  			return
  1135  		case <-timer.C:
  1136  			if !globalNotificationSys.DownloadProfilingData(ctx, w) {
  1137  				writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminProfilerNotEnabled), r.URL)
  1138  				return
  1139  			}
  1140  			return
  1141  		}
  1142  	}
  1143  }
  1144  
  1145  // dummyFileInfo represents a dummy representation of a profile data file
  1146  // present only in memory, it helps to generate the zip stream.
  1147  type dummyFileInfo struct {
  1148  	name    string
  1149  	size    int64
  1150  	mode    os.FileMode
  1151  	modTime time.Time
  1152  	isDir   bool
  1153  	sys     interface{}
  1154  }
  1155  
  1156  func (f dummyFileInfo) Name() string       { return f.name }
  1157  func (f dummyFileInfo) Size() int64        { return f.size }
  1158  func (f dummyFileInfo) Mode() os.FileMode  { return f.mode }
  1159  func (f dummyFileInfo) ModTime() time.Time { return f.modTime }
  1160  func (f dummyFileInfo) IsDir() bool        { return f.isDir }
  1161  func (f dummyFileInfo) Sys() interface{}   { return f.sys }
  1162  
  1163  // DownloadProfilingHandler - POST /minio/admin/v3/profiling/download
  1164  // ----------
  1165  // Download profiling information of all nodes in a zip format - deprecated API
  1166  func (a adminAPIHandlers) DownloadProfilingHandler(w http.ResponseWriter, r *http.Request) {
  1167  	ctx := r.Context()
  1168  
  1169  	// Validate request signature.
  1170  	_, adminAPIErr := checkAdminRequestAuth(ctx, r, policy.ProfilingAdminAction, "")
  1171  	if adminAPIErr != ErrNone {
  1172  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
  1173  		return
  1174  	}
  1175  
  1176  	if globalNotificationSys == nil {
  1177  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
  1178  		return
  1179  	}
  1180  
  1181  	if !globalNotificationSys.DownloadProfilingData(ctx, w) {
  1182  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminProfilerNotEnabled), r.URL)
  1183  		return
  1184  	}
  1185  }
  1186  
  1187  type healInitParams struct {
  1188  	bucket, objPrefix     string
  1189  	hs                    madmin.HealOpts
  1190  	clientToken           string
  1191  	forceStart, forceStop bool
  1192  }
  1193  
  1194  // extractHealInitParams - Validates params for heal init API.
  1195  func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Reader) (hip healInitParams, err APIErrorCode) {
  1196  	hip.bucket = vars[mgmtBucket]
  1197  	hip.objPrefix = vars[mgmtPrefix]
  1198  
  1199  	if hip.bucket == "" {
  1200  		if hip.objPrefix != "" {
  1201  			// Bucket is required if object-prefix is given
  1202  			err = ErrHealMissingBucket
  1203  			return
  1204  		}
  1205  	} else if isReservedOrInvalidBucket(hip.bucket, false) {
  1206  		err = ErrInvalidBucketName
  1207  		return
  1208  	}
  1209  
  1210  	// empty prefix is valid.
  1211  	if !IsValidObjectPrefix(hip.objPrefix) {
  1212  		err = ErrInvalidObjectName
  1213  		return
  1214  	}
  1215  
  1216  	if len(qParams[mgmtClientToken]) > 0 {
  1217  		hip.clientToken = qParams[mgmtClientToken][0]
  1218  	}
  1219  	if _, ok := qParams[mgmtForceStart]; ok {
  1220  		hip.forceStart = true
  1221  	}
  1222  	if _, ok := qParams[mgmtForceStop]; ok {
  1223  		hip.forceStop = true
  1224  	}
  1225  
  1226  	// Invalid request conditions:
  1227  	//
  1228  	//   Cannot have both forceStart and forceStop in the same
  1229  	//   request; If clientToken is provided, request can only be
  1230  	//   to continue receiving logs, so it cannot be start or
  1231  	//   stop;
  1232  	if (hip.forceStart && hip.forceStop) ||
  1233  		(hip.clientToken != "" && (hip.forceStart || hip.forceStop)) {
  1234  		err = ErrInvalidRequest
  1235  		return
  1236  	}
  1237  
  1238  	// ignore body if clientToken is provided
  1239  	if hip.clientToken == "" {
  1240  		jerr := json.NewDecoder(r).Decode(&hip.hs)
  1241  		if jerr != nil {
  1242  			logger.LogIf(GlobalContext, jerr, logger.ErrorKind)
  1243  			err = ErrRequestBodyParse
  1244  			return
  1245  		}
  1246  	}
  1247  
  1248  	err = ErrNone
  1249  	return
  1250  }
  1251  
  1252  // HealHandler - POST /minio/admin/v3/heal/
  1253  // -----------
  1254  // Start heal processing and return heal status items.
  1255  //
  1256  // On a successful heal sequence start, a unique client token is
  1257  // returned. Subsequent requests to this endpoint providing the client
  1258  // token will receive heal status records from the running heal
  1259  // sequence.
  1260  //
  1261  // If no client token is provided, and a heal sequence is in progress
  1262  // an error is returned with information about the running heal
  1263  // sequence. However, if the force-start flag is provided, the server
  1264  // aborts the running heal sequence and starts a new one.
  1265  func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
  1266  	ctx := r.Context()
  1267  
  1268  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealAdminAction)
  1269  	if objectAPI == nil {
  1270  		return
  1271  	}
  1272  
  1273  	hip, errCode := extractHealInitParams(mux.Vars(r), r.Form, r.Body)
  1274  	if errCode != ErrNone {
  1275  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
  1276  		return
  1277  	}
  1278  
  1279  	// Analyze the heal token and route the request accordingly
  1280  	token, success := proxyRequestByToken(ctx, w, r, hip.clientToken)
  1281  	if success {
  1282  		return
  1283  	}
  1284  	hip.clientToken = token
  1285  	// if request was not successful, try this server locally if token
  1286  	// is not found the call will fail anyways. if token is empty
  1287  	// try this server to generate a new token.
  1288  
  1289  	type healResp struct {
  1290  		respBytes []byte
  1291  		apiErr    APIError
  1292  		errBody   string
  1293  	}
  1294  
  1295  	// Define a closure to start sending whitespace to client
  1296  	// after 10s unless a response item comes in
  1297  	keepConnLive := func(w http.ResponseWriter, r *http.Request, respCh chan healResp) {
  1298  		ticker := time.NewTicker(time.Second * 10)
  1299  		defer ticker.Stop()
  1300  		started := false
  1301  	forLoop:
  1302  		for {
  1303  			select {
  1304  			case <-r.Context().Done():
  1305  				return
  1306  			case <-ticker.C:
  1307  				if !started {
  1308  					// Start writing response to client
  1309  					started = true
  1310  					setCommonHeaders(w)
  1311  					setEventStreamHeaders(w)
  1312  					// Set 200 OK status
  1313  					w.WriteHeader(200)
  1314  				}
  1315  				// Send whitespace and keep connection open
  1316  				if _, err := w.Write([]byte(" ")); err != nil {
  1317  					return
  1318  				}
  1319  				w.(http.Flusher).Flush()
  1320  			case hr := <-respCh:
  1321  				switch hr.apiErr {
  1322  				case noError:
  1323  					if started {
  1324  						if _, err := w.Write(hr.respBytes); err != nil {
  1325  							return
  1326  						}
  1327  						w.(http.Flusher).Flush()
  1328  					} else {
  1329  						writeSuccessResponseJSON(w, hr.respBytes)
  1330  					}
  1331  				default:
  1332  					var errorRespJSON []byte
  1333  					if hr.errBody == "" {
  1334  						errorRespJSON = encodeResponseJSON(getAPIErrorResponse(ctx, hr.apiErr,
  1335  							r.URL.Path, w.Header().Get(xhttp.AmzRequestID),
  1336  							w.Header().Get(xhttp.AmzRequestHostID)))
  1337  					} else {
  1338  						errorRespJSON = encodeResponseJSON(APIErrorResponse{
  1339  							Code:      hr.apiErr.Code,
  1340  							Message:   hr.errBody,
  1341  							Resource:  r.URL.Path,
  1342  							RequestID: w.Header().Get(xhttp.AmzRequestID),
  1343  							HostID:    globalDeploymentID(),
  1344  						})
  1345  					}
  1346  					if !started {
  1347  						setCommonHeaders(w)
  1348  						w.Header().Set(xhttp.ContentType, string(mimeJSON))
  1349  						w.WriteHeader(hr.apiErr.HTTPStatusCode)
  1350  					}
  1351  					if _, err := w.Write(errorRespJSON); err != nil {
  1352  						return
  1353  					}
  1354  					w.(http.Flusher).Flush()
  1355  				}
  1356  				break forLoop
  1357  			}
  1358  		}
  1359  	}
  1360  
  1361  	healPath := pathJoin(hip.bucket, hip.objPrefix)
  1362  	if hip.clientToken == "" && !hip.forceStart && !hip.forceStop {
  1363  		nh, exists := globalAllHealState.getHealSequence(healPath)
  1364  		if exists && !nh.hasEnded() && len(nh.currentStatus.Items) > 0 {
  1365  			clientToken := nh.clientToken
  1366  			if globalIsDistErasure {
  1367  				clientToken = fmt.Sprintf("%s:%d", nh.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
  1368  			}
  1369  			b, err := json.Marshal(madmin.HealStartSuccess{
  1370  				ClientToken:   clientToken,
  1371  				ClientAddress: nh.clientAddress,
  1372  				StartTime:     nh.startTime,
  1373  			})
  1374  			if err != nil {
  1375  				writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
  1376  				return
  1377  			}
  1378  			// Client token not specified but a heal sequence exists on a path,
  1379  			// Send the token back to client.
  1380  			writeSuccessResponseJSON(w, b)
  1381  			return
  1382  		}
  1383  	}
  1384  
  1385  	if hip.clientToken != "" && !hip.forceStart && !hip.forceStop {
  1386  		// Since clientToken is given, fetch heal status from running
  1387  		// heal sequence.
  1388  		respBytes, errCode := globalAllHealState.PopHealStatusJSON(
  1389  			healPath, hip.clientToken)
  1390  		if errCode != ErrNone {
  1391  			writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL)
  1392  		} else {
  1393  			writeSuccessResponseJSON(w, respBytes)
  1394  		}
  1395  		return
  1396  	}
  1397  
  1398  	respCh := make(chan healResp, 1)
  1399  	switch {
  1400  	case hip.forceStop:
  1401  		go func() {
  1402  			respBytes, apiErr := globalAllHealState.stopHealSequence(healPath)
  1403  			hr := healResp{respBytes: respBytes, apiErr: apiErr}
  1404  			respCh <- hr
  1405  		}()
  1406  	case hip.clientToken == "":
  1407  		nh := newHealSequence(GlobalContext, hip.bucket, hip.objPrefix, handlers.GetSourceIP(r), hip.hs, hip.forceStart)
  1408  		go func() {
  1409  			respBytes, apiErr, errMsg := globalAllHealState.LaunchNewHealSequence(nh, objectAPI)
  1410  			hr := healResp{respBytes, apiErr, errMsg}
  1411  			respCh <- hr
  1412  		}()
  1413  	}
  1414  
  1415  	// Due to the force-starting functionality, the Launch
  1416  	// call above can take a long time - to keep the
  1417  	// connection alive, we start sending whitespace
  1418  	keepConnLive(w, r, respCh)
  1419  }
  1420  
  1421  // getAggregatedBackgroundHealState returns the heal state of disks.
  1422  // If no ObjectLayer is provided no set status is returned.
  1423  func getAggregatedBackgroundHealState(ctx context.Context, o ObjectLayer) (madmin.BgHealState, error) {
  1424  	// Get local heal status first
  1425  	bgHealStates, ok := getLocalBackgroundHealStatus(ctx, o)
  1426  	if !ok {
  1427  		return bgHealStates, errServerNotInitialized
  1428  	}
  1429  
  1430  	if globalIsDistErasure {
  1431  		// Get heal status from other peers
  1432  		peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus()
  1433  		var errCount int
  1434  		for _, nerr := range nerrs {
  1435  			if nerr.Err != nil {
  1436  				logger.LogIf(ctx, nerr.Err)
  1437  				errCount++
  1438  			}
  1439  		}
  1440  		if errCount == len(nerrs) {
  1441  			return madmin.BgHealState{}, fmt.Errorf("all remote servers failed to report heal status, cluster is unhealthy")
  1442  		}
  1443  		bgHealStates.Merge(peersHealStates...)
  1444  	}
  1445  
  1446  	return bgHealStates, nil
  1447  }
  1448  
  1449  func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *http.Request) {
  1450  	ctx := r.Context()
  1451  
  1452  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealAdminAction)
  1453  	if objectAPI == nil {
  1454  		return
  1455  	}
  1456  
  1457  	aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context(), objectAPI)
  1458  	if err != nil {
  1459  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
  1460  		return
  1461  	}
  1462  
  1463  	if err := json.NewEncoder(w).Encode(aggregateHealStateResult); err != nil {
  1464  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
  1465  		return
  1466  	}
  1467  }
  1468  
  1469  // SitePerfHandler -  measures network throughput between site replicated setups
  1470  func (a adminAPIHandlers) SitePerfHandler(w http.ResponseWriter, r *http.Request) {
  1471  	ctx := r.Context()
  1472  
  1473  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
  1474  	if objectAPI == nil {
  1475  		return
  1476  	}
  1477  
  1478  	if !globalSiteReplicationSys.isEnabled() {
  1479  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
  1480  		return
  1481  	}
  1482  
  1483  	nsLock := objectAPI.NewNSLock(minioMetaBucket, "site-net-perf")
  1484  	lkctx, err := nsLock.GetLock(ctx, globalOperationTimeout)
  1485  	if err != nil {
  1486  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(toAPIErrorCode(ctx, err)), r.URL)
  1487  		return
  1488  	}
  1489  	ctx = lkctx.Context()
  1490  	defer nsLock.Unlock(lkctx)
  1491  
  1492  	durationStr := r.Form.Get(peerRESTDuration)
  1493  	duration, err := time.ParseDuration(durationStr)
  1494  	if err != nil {
  1495  		duration = globalNetPerfMinDuration
  1496  	}
  1497  
  1498  	if duration < globalNetPerfMinDuration {
  1499  		// We need sample size of minimum 10 secs.
  1500  		duration = globalNetPerfMinDuration
  1501  	}
  1502  
  1503  	duration = duration.Round(time.Second)
  1504  
  1505  	results, err := globalSiteReplicationSys.Netperf(ctx, duration)
  1506  	if err != nil {
  1507  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(toAPIErrorCode(ctx, err)), r.URL)
  1508  		return
  1509  	}
  1510  	enc := json.NewEncoder(w)
  1511  	if err := enc.Encode(results); err != nil {
  1512  		return
  1513  	}
  1514  }
  1515  
  1516  // ClientDevNullExtraTime - return extratime for last devnull
  1517  // [POST] /minio/admin/v3/speedtest/client/devnull/extratime
  1518  func (a adminAPIHandlers) ClientDevNullExtraTime(w http.ResponseWriter, r *http.Request) {
  1519  	ctx := r.Context()
  1520  
  1521  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.BandwidthMonitorAction)
  1522  	if objectAPI == nil {
  1523  		return
  1524  	}
  1525  
  1526  	enc := json.NewEncoder(w)
  1527  	if err := enc.Encode(madmin.ClientPerfExtraTime{TimeSpent: atomic.LoadInt64(&globalLastClientPerfExtraTime)}); err != nil {
  1528  		return
  1529  	}
  1530  }
  1531  
  1532  // ClientDevNull - everything goes to io.Discard
  1533  // [POST] /minio/admin/v3/speedtest/client/devnull
  1534  func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request) {
  1535  	ctx := r.Context()
  1536  
  1537  	timeStart := time.Now()
  1538  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.BandwidthMonitorAction)
  1539  	if objectAPI == nil {
  1540  		return
  1541  	}
  1542  
  1543  	nsLock := objectAPI.NewNSLock(minioMetaBucket, "client-perf")
  1544  	lkctx, err := nsLock.GetLock(ctx, globalOperationTimeout)
  1545  	if err != nil {
  1546  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(toAPIErrorCode(ctx, err)), r.URL)
  1547  		return
  1548  	}
  1549  	ctx = lkctx.Context()
  1550  	defer nsLock.Unlock(lkctx)
  1551  	timeEnd := time.Now()
  1552  
  1553  	atomic.SwapInt64(&globalLastClientPerfExtraTime, timeEnd.Sub(timeStart).Nanoseconds())
  1554  
  1555  	ctx, cancel := context.WithTimeout(ctx, madmin.MaxClientPerfTimeout)
  1556  	defer cancel()
  1557  	totalRx := int64(0)
  1558  	connectTime := time.Now()
  1559  	for {
  1560  		n, err := io.CopyN(xioutil.Discard, r.Body, 128*humanize.KiByte)
  1561  		if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
  1562  			// would mean the network is not stable. Logging here will help in debugging network issues.
  1563  			if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) {
  1564  				logger.LogIf(ctx, err)
  1565  			}
  1566  		}
  1567  		totalRx += n
  1568  		if err != nil || ctx.Err() != nil || totalRx > 100*humanize.GiByte {
  1569  			break
  1570  		}
  1571  
  1572  	}
  1573  	w.WriteHeader(http.StatusOK)
  1574  }
  1575  
  1576  // NetperfHandler - perform mesh style network throughput test
  1577  func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request) {
  1578  	ctx := r.Context()
  1579  
  1580  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
  1581  	if objectAPI == nil {
  1582  		return
  1583  	}
  1584  
  1585  	if !globalIsDistErasure {
  1586  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
  1587  		return
  1588  	}
  1589  
  1590  	nsLock := objectAPI.NewNSLock(minioMetaBucket, "netperf")
  1591  	lkctx, err := nsLock.GetLock(ctx, globalOperationTimeout)
  1592  	if err != nil {
  1593  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(toAPIErrorCode(ctx, err)), r.URL)
  1594  		return
  1595  	}
  1596  	ctx = lkctx.Context()
  1597  	defer nsLock.Unlock(lkctx)
  1598  
  1599  	durationStr := r.Form.Get(peerRESTDuration)
  1600  	duration, err := time.ParseDuration(durationStr)
  1601  	if err != nil {
  1602  		duration = globalNetPerfMinDuration
  1603  	}
  1604  
  1605  	if duration < globalNetPerfMinDuration {
  1606  		// We need sample size of minimum 10 secs.
  1607  		duration = globalNetPerfMinDuration
  1608  	}
  1609  
  1610  	duration = duration.Round(time.Second)
  1611  
  1612  	results := globalNotificationSys.Netperf(ctx, duration)
  1613  	enc := json.NewEncoder(w)
  1614  	if err := enc.Encode(madmin.NetperfResult{NodeResults: results}); err != nil {
  1615  		return
  1616  	}
  1617  }
  1618  
  1619  // ObjectSpeedTestHandler - reports maximum speed of a cluster by performing PUT and
  1620  // GET operations on the server, supports auto tuning by default by automatically
  1621  // increasing concurrency and stopping when we have reached the limits on the
  1622  // system.
  1623  func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.Request) {
  1624  	ctx := r.Context()
  1625  
  1626  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
  1627  	if objectAPI == nil {
  1628  		return
  1629  	}
  1630  
  1631  	sizeStr := r.Form.Get(peerRESTSize)
  1632  	durationStr := r.Form.Get(peerRESTDuration)
  1633  	concurrentStr := r.Form.Get(peerRESTConcurrent)
  1634  	storageClass := strings.TrimSpace(r.Form.Get(peerRESTStorageClass))
  1635  	customBucket := strings.TrimSpace(r.Form.Get(peerRESTBucket))
  1636  	autotune := r.Form.Get("autotune") == "true"
  1637  	noClear := r.Form.Get("noclear") == "true"
  1638  	enableSha256 := r.Form.Get("enableSha256") == "true"
  1639  
  1640  	size, err := strconv.Atoi(sizeStr)
  1641  	if err != nil {
  1642  		size = 64 * humanize.MiByte
  1643  	}
  1644  
  1645  	concurrent, err := strconv.Atoi(concurrentStr)
  1646  	if err != nil {
  1647  		concurrent = 32
  1648  	}
  1649  
  1650  	duration, err := time.ParseDuration(durationStr)
  1651  	if err != nil {
  1652  		duration = time.Second * 10
  1653  	}
  1654  
  1655  	storageInfo := objectAPI.StorageInfo(ctx, false)
  1656  
  1657  	sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, storageInfo, concurrent, size, autotune)
  1658  	if !sufficientCapacity {
  1659  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, AdminError{
  1660  			Code:       "XMinioSpeedtestInsufficientCapacity",
  1661  			Message:    capacityErrMsg,
  1662  			StatusCode: http.StatusInsufficientStorage,
  1663  		}), r.URL)
  1664  		return
  1665  	}
  1666  
  1667  	if autotune && !canAutotune {
  1668  		autotune = false
  1669  	}
  1670  
  1671  	if customBucket == "" {
  1672  		customBucket = globalObjectPerfBucket
  1673  
  1674  		bucketExists, err := makeObjectPerfBucket(ctx, objectAPI, customBucket)
  1675  		if err != nil {
  1676  			writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
  1677  			return
  1678  		}
  1679  
  1680  		if !noClear && !bucketExists {
  1681  			defer deleteObjectPerfBucket(objectAPI)
  1682  		}
  1683  	}
  1684  
  1685  	if !noClear {
  1686  		defer objectAPI.DeleteObject(ctx, customBucket, speedTest+SlashSeparator, ObjectOptions{
  1687  			DeletePrefix: true,
  1688  		})
  1689  	}
  1690  
  1691  	// Freeze all incoming S3 API calls before running speedtest.
  1692  	globalNotificationSys.ServiceFreeze(ctx, true)
  1693  
  1694  	// unfreeze all incoming S3 API calls after speedtest.
  1695  	defer globalNotificationSys.ServiceFreeze(ctx, false)
  1696  
  1697  	keepAliveTicker := time.NewTicker(500 * time.Millisecond)
  1698  	defer keepAliveTicker.Stop()
  1699  
  1700  	enc := json.NewEncoder(w)
  1701  	ch := objectSpeedTest(ctx, speedTestOpts{
  1702  		objectSize:       size,
  1703  		concurrencyStart: concurrent,
  1704  		duration:         duration,
  1705  		autotune:         autotune,
  1706  		storageClass:     storageClass,
  1707  		bucketName:       customBucket,
  1708  		enableSha256:     enableSha256,
  1709  	})
  1710  	var prevResult madmin.SpeedTestResult
  1711  	for {
  1712  		select {
  1713  		case <-ctx.Done():
  1714  			return
  1715  		case <-keepAliveTicker.C:
  1716  			// if previous result is set keep writing the
  1717  			// previous result back to the client
  1718  			if prevResult.Version != "" {
  1719  				if err := enc.Encode(prevResult); err != nil {
  1720  					return
  1721  				}
  1722  			} else {
  1723  				// first result is not yet obtained, keep writing
  1724  				// empty entry to prevent client from disconnecting.
  1725  				if err := enc.Encode(madmin.SpeedTestResult{}); err != nil {
  1726  					return
  1727  				}
  1728  			}
  1729  			w.(http.Flusher).Flush()
  1730  		case result, ok := <-ch:
  1731  			if !ok {
  1732  				return
  1733  			}
  1734  			if err := enc.Encode(result); err != nil {
  1735  				return
  1736  			}
  1737  			prevResult = result
  1738  			w.(http.Flusher).Flush()
  1739  		}
  1740  	}
  1741  }
  1742  
  1743  func makeObjectPerfBucket(ctx context.Context, objectAPI ObjectLayer, bucketName string) (bucketExists bool, err error) {
  1744  	if err = objectAPI.MakeBucket(ctx, bucketName, MakeBucketOptions{VersioningEnabled: globalSiteReplicationSys.isEnabled()}); err != nil {
  1745  		if _, ok := err.(BucketExists); !ok {
  1746  			// Only BucketExists error can be ignored.
  1747  			return false, err
  1748  		}
  1749  		bucketExists = true
  1750  	}
  1751  
  1752  	if globalSiteReplicationSys.isEnabled() {
  1753  		configData := []byte(`<VersioningConfiguration><Status>Enabled</Status><ExcludedPrefixes><Prefix>speedtest/*</Prefix></ExcludedPrefixes></VersioningConfiguration>`)
  1754  		if _, err = globalBucketMetadataSys.Update(ctx, bucketName, bucketVersioningConfig, configData); err != nil {
  1755  			return false, err
  1756  		}
  1757  	}
  1758  
  1759  	return bucketExists, nil
  1760  }
  1761  
  1762  func deleteObjectPerfBucket(objectAPI ObjectLayer) {
  1763  	objectAPI.DeleteBucket(context.Background(), globalObjectPerfBucket, DeleteBucketOptions{
  1764  		Force:      true,
  1765  		SRDeleteOp: getSRBucketDeleteOp(globalSiteReplicationSys.isEnabled()),
  1766  	})
  1767  }
  1768  
  1769  func validateObjPerfOptions(ctx context.Context, storageInfo madmin.StorageInfo, concurrent int, size int, autotune bool) (bool, bool, string) {
  1770  	capacityNeeded := uint64(concurrent * size)
  1771  	capacity := GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo)
  1772  
  1773  	if capacity < capacityNeeded {
  1774  		return false, false, fmt.Sprintf("not enough usable space available to perform speedtest - expected %s, got %s",
  1775  			humanize.IBytes(capacityNeeded), humanize.IBytes(capacity))
  1776  	}
  1777  
  1778  	// Verify if we can employ autotune without running out of capacity,
  1779  	// if we do run out of capacity, make sure to turn-off autotuning
  1780  	// in such situations.
  1781  	if autotune {
  1782  		newConcurrent := concurrent + (concurrent+1)/2
  1783  		autoTunedCapacityNeeded := uint64(newConcurrent * size)
  1784  		if capacity < autoTunedCapacityNeeded {
  1785  			// Turn-off auto-tuning if next possible concurrency would reach beyond disk capacity.
  1786  			return true, false, ""
  1787  		}
  1788  	}
  1789  
  1790  	return true, autotune, ""
  1791  }
  1792  
  1793  // DriveSpeedtestHandler - reports throughput of drives available in the cluster
  1794  func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
  1795  	ctx := r.Context()
  1796  
  1797  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
  1798  	if objectAPI == nil {
  1799  		return
  1800  	}
  1801  
  1802  	// Freeze all incoming S3 API calls before running speedtest.
  1803  	globalNotificationSys.ServiceFreeze(ctx, true)
  1804  
  1805  	// unfreeze all incoming S3 API calls after speedtest.
  1806  	defer globalNotificationSys.ServiceFreeze(ctx, false)
  1807  
  1808  	serial := r.Form.Get("serial") == "true"
  1809  	blockSizeStr := r.Form.Get("blocksize")
  1810  	fileSizeStr := r.Form.Get("filesize")
  1811  
  1812  	blockSize, err := strconv.ParseUint(blockSizeStr, 10, 64)
  1813  	if err != nil {
  1814  		blockSize = 4 * humanize.MiByte // default value
  1815  	}
  1816  
  1817  	fileSize, err := strconv.ParseUint(fileSizeStr, 10, 64)
  1818  	if err != nil {
  1819  		fileSize = 1 * humanize.GiByte // default value
  1820  	}
  1821  
  1822  	opts := madmin.DriveSpeedTestOpts{
  1823  		Serial:    serial,
  1824  		BlockSize: blockSize,
  1825  		FileSize:  fileSize,
  1826  	}
  1827  
  1828  	keepAliveTicker := time.NewTicker(500 * time.Millisecond)
  1829  	defer keepAliveTicker.Stop()
  1830  
  1831  	ch := globalNotificationSys.DriveSpeedTest(ctx, opts)
  1832  
  1833  	enc := json.NewEncoder(w)
  1834  	for {
  1835  		select {
  1836  		case <-ctx.Done():
  1837  			return
  1838  		case <-keepAliveTicker.C:
  1839  			// Write a blank entry to prevent client from disconnecting
  1840  			if err := enc.Encode(madmin.DriveSpeedTestResult{}); err != nil {
  1841  				return
  1842  			}
  1843  			w.(http.Flusher).Flush()
  1844  		case result, ok := <-ch:
  1845  			if !ok {
  1846  				return
  1847  			}
  1848  			if err := enc.Encode(result); err != nil {
  1849  				return
  1850  			}
  1851  			w.(http.Flusher).Flush()
  1852  		}
  1853  	}
  1854  }
  1855  
  1856  // Admin API errors
  1857  const (
  1858  	AdminUpdateUnexpectedFailure = "XMinioAdminUpdateUnexpectedFailure"
  1859  	AdminUpdateURLNotReachable   = "XMinioAdminUpdateURLNotReachable"
  1860  	AdminUpdateApplyFailure      = "XMinioAdminUpdateApplyFailure"
  1861  )
  1862  
  1863  // Returns true if the madmin.TraceInfo should be traced,
  1864  // false if certain conditions are not met.
  1865  // - input entry is not of the type *madmin.TraceInfo*
  1866  // - errOnly entries are to be traced, not status code 2xx, 3xx.
  1867  // - madmin.TraceInfo type is asked by opts
  1868  func shouldTrace(trcInfo madmin.TraceInfo, opts madmin.ServiceTraceOpts) (shouldTrace bool) {
  1869  	// Reject all unwanted types.
  1870  	want := opts.TraceTypes()
  1871  	if !want.Contains(trcInfo.TraceType) {
  1872  		return false
  1873  	}
  1874  
  1875  	isHTTP := trcInfo.TraceType.Overlaps(madmin.TraceInternal|madmin.TraceS3) && trcInfo.HTTP != nil
  1876  
  1877  	// Check latency...
  1878  	if opts.Threshold > 0 && trcInfo.Duration < opts.Threshold {
  1879  		return false
  1880  	}
  1881  
  1882  	// Check internal path
  1883  	isInternal := isHTTP && HasPrefix(trcInfo.HTTP.ReqInfo.Path, minioReservedBucketPath+SlashSeparator)
  1884  	if isInternal && !opts.Internal {
  1885  		return false
  1886  	}
  1887  
  1888  	// Filter non-errors.
  1889  	if isHTTP && opts.OnlyErrors && trcInfo.HTTP.RespInfo.StatusCode < http.StatusBadRequest {
  1890  		return false
  1891  	}
  1892  
  1893  	return true
  1894  }
  1895  
  1896  func extractTraceOptions(r *http.Request) (opts madmin.ServiceTraceOpts, err error) {
  1897  	if err := opts.ParseParams(r); err != nil {
  1898  		return opts, err
  1899  	}
  1900  	// Support deprecated 'all' query
  1901  	if r.Form.Get("all") == "true" {
  1902  		opts.S3 = true
  1903  		opts.Internal = true
  1904  		opts.Storage = true
  1905  		opts.OS = true
  1906  		// Older mc - cannot deal with more types...
  1907  	}
  1908  	return
  1909  }
  1910  
  1911  // TraceHandler - POST /minio/admin/v3/trace
  1912  // ----------
  1913  // The handler sends http trace to the connected HTTP client.
  1914  func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
  1915  	ctx := r.Context()
  1916  
  1917  	// Validate request signature.
  1918  	_, adminAPIErr := checkAdminRequestAuth(ctx, r, policy.TraceAdminAction, "")
  1919  	if adminAPIErr != ErrNone {
  1920  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
  1921  		return
  1922  	}
  1923  
  1924  	traceOpts, err := extractTraceOptions(r)
  1925  	if err != nil {
  1926  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
  1927  		return
  1928  	}
  1929  	setEventStreamHeaders(w)
  1930  
  1931  	// Trace Publisher and peer-trace-client uses nonblocking send and hence does not wait for slow receivers.
  1932  	// Keep 100k buffered channel.
  1933  	// If receiver cannot keep up with that we drop events.
  1934  	traceCh := make(chan []byte, 100000)
  1935  	peers, _ := newPeerRestClients(globalEndpoints)
  1936  	err = globalTrace.SubscribeJSON(traceOpts.TraceTypes(), traceCh, ctx.Done(), func(entry madmin.TraceInfo) bool {
  1937  		return shouldTrace(entry, traceOpts)
  1938  	}, nil)
  1939  	if err != nil {
  1940  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
  1941  		return
  1942  	}
  1943  
  1944  	// Publish bootstrap events that have already occurred before client could subscribe.
  1945  	if traceOpts.TraceTypes().Contains(madmin.TraceBootstrap) {
  1946  		go globalBootstrapTracer.Publish(ctx, globalTrace)
  1947  	}
  1948  
  1949  	for _, peer := range peers {
  1950  		if peer == nil {
  1951  			continue
  1952  		}
  1953  		peer.Trace(ctx, traceCh, traceOpts)
  1954  	}
  1955  
  1956  	keepAliveTicker := time.NewTicker(time.Second)
  1957  	defer keepAliveTicker.Stop()
  1958  
  1959  	for {
  1960  		select {
  1961  		case entry := <-traceCh:
  1962  			if _, err := w.Write(entry); err != nil {
  1963  				return
  1964  			}
  1965  			grid.PutByteBuffer(entry)
  1966  			if len(traceCh) == 0 {
  1967  				// Flush if nothing is queued
  1968  				w.(http.Flusher).Flush()
  1969  			}
  1970  		case <-keepAliveTicker.C:
  1971  			if len(traceCh) > 0 {
  1972  				continue
  1973  			}
  1974  			if _, err := w.Write([]byte(" ")); err != nil {
  1975  				return
  1976  			}
  1977  			w.(http.Flusher).Flush()
  1978  		case <-ctx.Done():
  1979  			return
  1980  		}
  1981  	}
  1982  }
  1983  
  1984  // The ConsoleLogHandler handler sends console logs to the connected HTTP client.
  1985  func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Request) {
  1986  	ctx := r.Context()
  1987  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConsoleLogAdminAction)
  1988  	if objectAPI == nil {
  1989  		return
  1990  	}
  1991  	node := r.Form.Get("node")
  1992  	// limit buffered console entries if client requested it.
  1993  	limitStr := r.Form.Get("limit")
  1994  	limitLines, err := strconv.Atoi(limitStr)
  1995  	if err != nil {
  1996  		limitLines = 10
  1997  	}
  1998  
  1999  	logKind := madmin.LogKind(strings.ToUpper(r.Form.Get("logType"))).LogMask()
  2000  	if logKind == 0 {
  2001  		logKind = madmin.LogMaskAll
  2002  	}
  2003  
  2004  	// Avoid reusing tcp connection if read timeout is hit
  2005  	// This is needed to make r.Context().Done() work as
  2006  	// expected in case of read timeout
  2007  	w.Header().Set("Connection", "close")
  2008  
  2009  	setEventStreamHeaders(w)
  2010  
  2011  	logCh := make(chan log.Info, 1000)
  2012  	peers, _ := newPeerRestClients(globalEndpoints)
  2013  	encodedCh := make(chan []byte, 1000+len(peers)*1000)
  2014  	err = globalConsoleSys.Subscribe(logCh, ctx.Done(), node, limitLines, logKind, nil)
  2015  	if err != nil {
  2016  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
  2017  		return
  2018  	}
  2019  	// Convert local entries to JSON
  2020  	go func() {
  2021  		var buf bytes.Buffer
  2022  		enc := json.NewEncoder(&buf)
  2023  		for {
  2024  			select {
  2025  			case <-ctx.Done():
  2026  				return
  2027  			case li := <-logCh:
  2028  				if !li.SendLog(node, logKind) {
  2029  					continue
  2030  				}
  2031  				buf.Reset()
  2032  				if err := enc.Encode(li); err != nil {
  2033  					continue
  2034  				}
  2035  				select {
  2036  				case <-ctx.Done():
  2037  					return
  2038  				case encodedCh <- append(grid.GetByteBuffer()[:0], buf.Bytes()...):
  2039  				}
  2040  			}
  2041  		}
  2042  	}()
  2043  
  2044  	// Collect from matching peers
  2045  	for _, peer := range peers {
  2046  		if peer == nil {
  2047  			continue
  2048  		}
  2049  		if node == "" || strings.EqualFold(peer.host.Name, node) {
  2050  			peer.ConsoleLog(ctx, logKind, encodedCh)
  2051  		}
  2052  	}
  2053  
  2054  	keepAliveTicker := time.NewTicker(500 * time.Millisecond)
  2055  	defer keepAliveTicker.Stop()
  2056  	for {
  2057  		select {
  2058  		case log, ok := <-encodedCh:
  2059  			if !ok {
  2060  				return
  2061  			}
  2062  			_, err = w.Write(log)
  2063  			if err != nil {
  2064  				return
  2065  			}
  2066  			grid.PutByteBuffer(log)
  2067  			if len(logCh) == 0 {
  2068  				// Flush if nothing is queued
  2069  				w.(http.Flusher).Flush()
  2070  			}
  2071  		case <-keepAliveTicker.C:
  2072  			if len(logCh) > 0 {
  2073  				continue
  2074  			}
  2075  			if _, err := w.Write([]byte(" ")); err != nil {
  2076  				return
  2077  			}
  2078  			w.(http.Flusher).Flush()
  2079  		case <-ctx.Done():
  2080  			return
  2081  		}
  2082  	}
  2083  }
  2084  
  2085  // KMSCreateKeyHandler - POST /minio/admin/v3/kms/key/create?key-id=<master-key-id>
  2086  func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Request) {
  2087  	ctx := r.Context()
  2088  
  2089  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSCreateKeyAdminAction)
  2090  	if objectAPI == nil {
  2091  		return
  2092  	}
  2093  
  2094  	if GlobalKMS == nil {
  2095  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
  2096  		return
  2097  	}
  2098  
  2099  	if err := GlobalKMS.CreateKey(ctx, r.Form.Get("key-id")); err != nil {
  2100  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
  2101  		return
  2102  	}
  2103  	writeSuccessResponseHeadersOnly(w)
  2104  }
  2105  
  2106  // KMSKeyStatusHandler - GET /minio/admin/v3/kms/status
  2107  func (a adminAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Request) {
  2108  	ctx := r.Context()
  2109  
  2110  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSKeyStatusAdminAction)
  2111  	if objectAPI == nil {
  2112  		return
  2113  	}
  2114  
  2115  	if GlobalKMS == nil {
  2116  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
  2117  		return
  2118  	}
  2119  
  2120  	stat, err := GlobalKMS.Stat(ctx)
  2121  	if err != nil {
  2122  		writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
  2123  		return
  2124  	}
  2125  
  2126  	status := madmin.KMSStatus{
  2127  		Name:         stat.Name,
  2128  		DefaultKeyID: stat.DefaultKey,
  2129  		Endpoints:    make(map[string]madmin.ItemState, len(stat.Endpoints)),
  2130  	}
  2131  	for _, endpoint := range stat.Endpoints {
  2132  		status.Endpoints[endpoint] = madmin.ItemOnline // TODO(aead): Implement an online check for mTLS
  2133  	}
  2134  
  2135  	resp, err := json.Marshal(status)
  2136  	if err != nil {
  2137  		writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
  2138  		return
  2139  	}
  2140  	writeSuccessResponseJSON(w, resp)
  2141  }
  2142  
  2143  // KMSKeyStatusHandler - GET /minio/admin/v3/kms/key/status?key-id=<master-key-id>
  2144  func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Request) {
  2145  	ctx := r.Context()
  2146  
  2147  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSKeyStatusAdminAction)
  2148  	if objectAPI == nil {
  2149  		return
  2150  	}
  2151  
  2152  	if GlobalKMS == nil {
  2153  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
  2154  		return
  2155  	}
  2156  
  2157  	stat, err := GlobalKMS.Stat(ctx)
  2158  	if err != nil {
  2159  		writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
  2160  		return
  2161  	}
  2162  
  2163  	keyID := r.Form.Get("key-id")
  2164  	if keyID == "" {
  2165  		keyID = stat.DefaultKey
  2166  	}
  2167  	response := madmin.KMSKeyStatus{
  2168  		KeyID: keyID,
  2169  	}
  2170  
  2171  	kmsContext := kms.Context{"MinIO admin API": "KMSKeyStatusHandler"} // Context for a test key operation
  2172  	// 1. Generate a new key using the KMS.
  2173  	key, err := GlobalKMS.GenerateKey(ctx, keyID, kmsContext)
  2174  	if err != nil {
  2175  		response.EncryptionErr = err.Error()
  2176  		resp, err := json.Marshal(response)
  2177  		if err != nil {
  2178  			writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
  2179  			return
  2180  		}
  2181  		writeSuccessResponseJSON(w, resp)
  2182  		return
  2183  	}
  2184  
  2185  	// 2. Verify that we can indeed decrypt the (encrypted) key
  2186  	decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
  2187  	if err != nil {
  2188  		response.DecryptionErr = err.Error()
  2189  		resp, err := json.Marshal(response)
  2190  		if err != nil {
  2191  			writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
  2192  			return
  2193  		}
  2194  		writeSuccessResponseJSON(w, resp)
  2195  		return
  2196  	}
  2197  
  2198  	// 3. Compare generated key with decrypted key
  2199  	if subtle.ConstantTimeCompare(key.Plaintext, decryptedKey) != 1 {
  2200  		response.DecryptionErr = "The generated and the decrypted data key do not match"
  2201  		resp, err := json.Marshal(response)
  2202  		if err != nil {
  2203  			writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
  2204  			return
  2205  		}
  2206  		writeSuccessResponseJSON(w, resp)
  2207  		return
  2208  	}
  2209  
  2210  	resp, err := json.Marshal(response)
  2211  	if err != nil {
  2212  		writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
  2213  		return
  2214  	}
  2215  	writeSuccessResponseJSON(w, resp)
  2216  }
  2217  
  2218  func getPoolsInfo(ctx context.Context, allDisks []madmin.Disk) (map[int]map[int]madmin.ErasureSetInfo, error) {
  2219  	objectAPI := newObjectLayerFn()
  2220  	if objectAPI == nil {
  2221  		return nil, errServerNotInitialized
  2222  	}
  2223  
  2224  	z, ok := objectAPI.(*erasureServerPools)
  2225  	if !ok {
  2226  		return nil, errServerNotInitialized
  2227  	}
  2228  
  2229  	poolsInfo := make(map[int]map[int]madmin.ErasureSetInfo)
  2230  	for _, d := range allDisks {
  2231  		poolInfo, ok := poolsInfo[d.PoolIndex]
  2232  		if !ok {
  2233  			poolInfo = make(map[int]madmin.ErasureSetInfo)
  2234  		}
  2235  		erasureSet, ok := poolInfo[d.SetIndex]
  2236  		if !ok {
  2237  			erasureSet.ID = d.SetIndex
  2238  			cache := dataUsageCache{}
  2239  			if err := cache.load(ctx, z.serverPools[d.PoolIndex].sets[d.SetIndex], dataUsageCacheName); err == nil {
  2240  				dataUsageInfo := cache.dui(dataUsageRoot, nil)
  2241  				erasureSet.ObjectsCount = dataUsageInfo.ObjectsTotalCount
  2242  				erasureSet.VersionsCount = dataUsageInfo.VersionsTotalCount
  2243  				erasureSet.DeleteMarkersCount = dataUsageInfo.DeleteMarkersTotalCount
  2244  				erasureSet.Usage = dataUsageInfo.ObjectsTotalSize
  2245  			}
  2246  		}
  2247  		erasureSet.RawCapacity += d.TotalSpace
  2248  		erasureSet.RawUsage += d.UsedSpace
  2249  		if d.Healing {
  2250  			erasureSet.HealDisks = 1
  2251  		}
  2252  		poolInfo[d.SetIndex] = erasureSet
  2253  		poolsInfo[d.PoolIndex] = poolInfo
  2254  	}
  2255  	return poolsInfo, nil
  2256  }
  2257  
  2258  func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) madmin.InfoMessage {
  2259  	ldap := madmin.LDAP{}
  2260  	if globalIAMSys.LDAPConfig.Enabled() {
  2261  		ldapConn, err := globalIAMSys.LDAPConfig.LDAP.Connect()
  2262  		//nolint:gocritic
  2263  		if err != nil {
  2264  			ldap.Status = string(madmin.ItemOffline)
  2265  		} else if ldapConn == nil {
  2266  			ldap.Status = "Not Configured"
  2267  		} else {
  2268  			// Close ldap connection to avoid leaks.
  2269  			ldapConn.Close()
  2270  			ldap.Status = string(madmin.ItemOnline)
  2271  		}
  2272  	}
  2273  
  2274  	log, audit := fetchLoggerInfo(ctx)
  2275  
  2276  	// Get the notification target info
  2277  	notifyTarget := fetchLambdaInfo()
  2278  
  2279  	local := getLocalServerProperty(globalEndpoints, r, metrics)
  2280  	servers := globalNotificationSys.ServerInfo(metrics)
  2281  	servers = append(servers, local)
  2282  
  2283  	var poolsInfo map[int]map[int]madmin.ErasureSetInfo
  2284  	var backend madmin.ErasureBackend
  2285  
  2286  	mode := madmin.ItemInitializing
  2287  
  2288  	buckets := madmin.Buckets{}
  2289  	objects := madmin.Objects{}
  2290  	versions := madmin.Versions{}
  2291  	deleteMarkers := madmin.DeleteMarkers{}
  2292  	usage := madmin.Usage{}
  2293  
  2294  	objectAPI := newObjectLayerFn()
  2295  	if objectAPI != nil {
  2296  		mode = madmin.ItemOnline
  2297  
  2298  		// Load data usage
  2299  		dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
  2300  		if err == nil {
  2301  			buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount}
  2302  			objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount}
  2303  			versions = madmin.Versions{Count: dataUsageInfo.VersionsTotalCount}
  2304  			deleteMarkers = madmin.DeleteMarkers{Count: dataUsageInfo.DeleteMarkersTotalCount}
  2305  			usage = madmin.Usage{Size: dataUsageInfo.ObjectsTotalSize}
  2306  		} else {
  2307  			buckets = madmin.Buckets{Error: err.Error()}
  2308  			objects = madmin.Objects{Error: err.Error()}
  2309  			deleteMarkers = madmin.DeleteMarkers{Error: err.Error()}
  2310  			usage = madmin.Usage{Error: err.Error()}
  2311  		}
  2312  
  2313  		// Fetching the backend information
  2314  		backendInfo := objectAPI.BackendInfo()
  2315  		// Calculate the number of online/offline disks of all nodes
  2316  		var allDisks []madmin.Disk
  2317  		for _, s := range servers {
  2318  			allDisks = append(allDisks, s.Disks...)
  2319  		}
  2320  		onlineDisks, offlineDisks := getOnlineOfflineDisksStats(allDisks)
  2321  
  2322  		backend = madmin.ErasureBackend{
  2323  			Type:             madmin.ErasureType,
  2324  			OnlineDisks:      onlineDisks.Sum(),
  2325  			OfflineDisks:     offlineDisks.Sum(),
  2326  			StandardSCParity: backendInfo.StandardSCParity,
  2327  			RRSCParity:       backendInfo.RRSCParity,
  2328  			TotalSets:        backendInfo.TotalSets,
  2329  			DrivesPerSet:     backendInfo.DrivesPerSet,
  2330  		}
  2331  
  2332  		if pools {
  2333  			poolsInfo, _ = getPoolsInfo(ctx, allDisks)
  2334  		}
  2335  	}
  2336  
  2337  	domain := globalDomainNames
  2338  	services := madmin.Services{
  2339  		KMS:           fetchKMSStatus(),
  2340  		KMSStatus:     fetchKMSStatusV2(ctx),
  2341  		LDAP:          ldap,
  2342  		Logger:        log,
  2343  		Audit:         audit,
  2344  		Notifications: notifyTarget,
  2345  	}
  2346  
  2347  	return madmin.InfoMessage{
  2348  		Mode:          string(mode),
  2349  		Domain:        domain,
  2350  		Region:        globalSite.Region,
  2351  		SQSARN:        globalEventNotifier.GetARNList(false),
  2352  		DeploymentID:  globalDeploymentID(),
  2353  		Buckets:       buckets,
  2354  		Objects:       objects,
  2355  		Versions:      versions,
  2356  		DeleteMarkers: deleteMarkers,
  2357  		Usage:         usage,
  2358  		Services:      services,
  2359  		Backend:       backend,
  2360  		Servers:       servers,
  2361  		Pools:         poolsInfo,
  2362  	}
  2363  }
  2364  
  2365  func getKubernetesInfo(dctx context.Context) madmin.KubernetesInfo {
  2366  	ctx, cancel := context.WithCancel(dctx)
  2367  	defer cancel()
  2368  
  2369  	ki := madmin.KubernetesInfo{}
  2370  
  2371  	req, err := http.NewRequestWithContext(ctx, http.MethodGet, kubernetesVersionEndpoint, nil)
  2372  	if err != nil {
  2373  		ki.Error = err.Error()
  2374  		return ki
  2375  	}
  2376  
  2377  	client := &http.Client{
  2378  		Transport: globalHealthChkTransport,
  2379  		Timeout:   10 * time.Second,
  2380  	}
  2381  
  2382  	resp, err := client.Do(req)
  2383  	if err != nil {
  2384  		ki.Error = err.Error()
  2385  		return ki
  2386  	}
  2387  	defer resp.Body.Close()
  2388  	decoder := json.NewDecoder(resp.Body)
  2389  	if err := decoder.Decode(&ki); err != nil {
  2390  		ki.Error = err.Error()
  2391  	}
  2392  	return ki
  2393  }
  2394  
  2395  func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *url.Values, healthInfoCh chan madmin.HealthInfo, healthInfo madmin.HealthInfo) {
  2396  	hostAnonymizer := createHostAnonymizer()
  2397  
  2398  	anonParam := query.Get(anonymizeParam)
  2399  	// anonAddr - Anonymizes hosts in given input string
  2400  	// (only if the anonymize param is set to srict).
  2401  	anonAddr := func(addr string) string {
  2402  		if anonParam != anonymizeStrict {
  2403  			return addr
  2404  		}
  2405  		newAddr, found := hostAnonymizer[addr]
  2406  		if found {
  2407  			return newAddr
  2408  		}
  2409  
  2410  		// If we reach here, it means that the given addr doesn't contain any of the hosts.
  2411  		// Return it as is. Can happen for drive paths in non-distributed mode
  2412  		return addr
  2413  	}
  2414  
  2415  	// anonymizedAddr - Updated the addr of the node info with anonymized one
  2416  	anonymizeAddr := func(info madmin.NodeInfo) {
  2417  		info.SetAddr(anonAddr(info.GetAddr()))
  2418  	}
  2419  
  2420  	partialWrite := func(oinfo madmin.HealthInfo) {
  2421  		select {
  2422  		case healthInfoCh <- oinfo:
  2423  		case <-healthCtx.Done():
  2424  		}
  2425  	}
  2426  
  2427  	getAndWritePlatformInfo := func() {
  2428  		if IsKubernetes() {
  2429  			healthInfo.Sys.KubernetesInfo = getKubernetesInfo(healthCtx)
  2430  			partialWrite(healthInfo)
  2431  		}
  2432  	}
  2433  
  2434  	getAndWriteCPUs := func() {
  2435  		if query.Get("syscpu") == "true" {
  2436  			localCPUInfo := madmin.GetCPUs(healthCtx, globalLocalNodeName)
  2437  			anonymizeAddr(&localCPUInfo)
  2438  			healthInfo.Sys.CPUInfo = append(healthInfo.Sys.CPUInfo, localCPUInfo)
  2439  
  2440  			peerCPUInfo := globalNotificationSys.GetCPUs(healthCtx)
  2441  			for _, cpuInfo := range peerCPUInfo {
  2442  				anonymizeAddr(&cpuInfo)
  2443  				healthInfo.Sys.CPUInfo = append(healthInfo.Sys.CPUInfo, cpuInfo)
  2444  			}
  2445  
  2446  			partialWrite(healthInfo)
  2447  		}
  2448  	}
  2449  
  2450  	getAndWritePartitions := func() {
  2451  		if query.Get("sysdrivehw") == "true" {
  2452  			localPartitions := madmin.GetPartitions(healthCtx, globalLocalNodeName)
  2453  			anonymizeAddr(&localPartitions)
  2454  			healthInfo.Sys.Partitions = append(healthInfo.Sys.Partitions, localPartitions)
  2455  
  2456  			peerPartitions := globalNotificationSys.GetPartitions(healthCtx)
  2457  			for _, p := range peerPartitions {
  2458  				anonymizeAddr(&p)
  2459  				healthInfo.Sys.Partitions = append(healthInfo.Sys.Partitions, p)
  2460  			}
  2461  			partialWrite(healthInfo)
  2462  		}
  2463  	}
  2464  
  2465  	getAndWriteNetInfo := func() {
  2466  		if query.Get(string(madmin.HealthDataTypeSysNet)) == "true" {
  2467  			localNetInfo := madmin.GetNetInfo(globalLocalNodeName, globalInternodeInterface)
  2468  			healthInfo.Sys.NetInfo = append(healthInfo.Sys.NetInfo, localNetInfo)
  2469  
  2470  			peerNetInfos := globalNotificationSys.GetNetInfo(healthCtx)
  2471  			for _, n := range peerNetInfos {
  2472  				anonymizeAddr(&n)
  2473  				healthInfo.Sys.NetInfo = append(healthInfo.Sys.NetInfo, n)
  2474  			}
  2475  			partialWrite(healthInfo)
  2476  		}
  2477  	}
  2478  
  2479  	getAndWriteOSInfo := func() {
  2480  		if query.Get("sysosinfo") == "true" {
  2481  			localOSInfo := madmin.GetOSInfo(healthCtx, globalLocalNodeName)
  2482  			anonymizeAddr(&localOSInfo)
  2483  			healthInfo.Sys.OSInfo = append(healthInfo.Sys.OSInfo, localOSInfo)
  2484  
  2485  			peerOSInfos := globalNotificationSys.GetOSInfo(healthCtx)
  2486  			for _, o := range peerOSInfos {
  2487  				anonymizeAddr(&o)
  2488  				healthInfo.Sys.OSInfo = append(healthInfo.Sys.OSInfo, o)
  2489  			}
  2490  			partialWrite(healthInfo)
  2491  		}
  2492  	}
  2493  
  2494  	getAndWriteMemInfo := func() {
  2495  		if query.Get("sysmem") == "true" {
  2496  			localMemInfo := madmin.GetMemInfo(healthCtx, globalLocalNodeName)
  2497  			anonymizeAddr(&localMemInfo)
  2498  			healthInfo.Sys.MemInfo = append(healthInfo.Sys.MemInfo, localMemInfo)
  2499  
  2500  			peerMemInfos := globalNotificationSys.GetMemInfo(healthCtx)
  2501  			for _, m := range peerMemInfos {
  2502  				anonymizeAddr(&m)
  2503  				healthInfo.Sys.MemInfo = append(healthInfo.Sys.MemInfo, m)
  2504  			}
  2505  			partialWrite(healthInfo)
  2506  		}
  2507  	}
  2508  
  2509  	getAndWriteSysErrors := func() {
  2510  		if query.Get(string(madmin.HealthDataTypeSysErrors)) == "true" {
  2511  			localSysErrors := madmin.GetSysErrors(healthCtx, globalLocalNodeName)
  2512  			anonymizeAddr(&localSysErrors)
  2513  			healthInfo.Sys.SysErrs = append(healthInfo.Sys.SysErrs, localSysErrors)
  2514  			partialWrite(healthInfo)
  2515  
  2516  			peerSysErrs := globalNotificationSys.GetSysErrors(healthCtx)
  2517  			for _, se := range peerSysErrs {
  2518  				anonymizeAddr(&se)
  2519  				healthInfo.Sys.SysErrs = append(healthInfo.Sys.SysErrs, se)
  2520  			}
  2521  			partialWrite(healthInfo)
  2522  		}
  2523  	}
  2524  
  2525  	getAndWriteSysConfig := func() {
  2526  		if query.Get(string(madmin.HealthDataTypeSysConfig)) == "true" {
  2527  			localSysConfig := madmin.GetSysConfig(healthCtx, globalLocalNodeName)
  2528  			anonymizeAddr(&localSysConfig)
  2529  			healthInfo.Sys.SysConfig = append(healthInfo.Sys.SysConfig, localSysConfig)
  2530  			partialWrite(healthInfo)
  2531  
  2532  			peerSysConfig := globalNotificationSys.GetSysConfig(healthCtx)
  2533  			for _, sc := range peerSysConfig {
  2534  				anonymizeAddr(&sc)
  2535  				healthInfo.Sys.SysConfig = append(healthInfo.Sys.SysConfig, sc)
  2536  			}
  2537  			partialWrite(healthInfo)
  2538  		}
  2539  	}
  2540  
  2541  	getAndWriteSysServices := func() {
  2542  		if query.Get(string(madmin.HealthDataTypeSysServices)) == "true" {
  2543  			localSysServices := madmin.GetSysServices(healthCtx, globalLocalNodeName)
  2544  			anonymizeAddr(&localSysServices)
  2545  			healthInfo.Sys.SysServices = append(healthInfo.Sys.SysServices, localSysServices)
  2546  			partialWrite(healthInfo)
  2547  
  2548  			peerSysServices := globalNotificationSys.GetSysServices(healthCtx)
  2549  			for _, ss := range peerSysServices {
  2550  				anonymizeAddr(&ss)
  2551  				healthInfo.Sys.SysServices = append(healthInfo.Sys.SysServices, ss)
  2552  			}
  2553  			partialWrite(healthInfo)
  2554  		}
  2555  	}
  2556  
  2557  	// collect all realtime metrics except disk
  2558  	// disk metrics are already included under drive info of each server
  2559  	getRealtimeMetrics := func() *madmin.RealtimeMetrics {
  2560  		var m madmin.RealtimeMetrics
  2561  		var types madmin.MetricType = madmin.MetricsAll &^ madmin.MetricsDisk
  2562  		mLocal := collectLocalMetrics(types, collectMetricsOpts{})
  2563  		m.Merge(&mLocal)
  2564  		cctx, cancel := context.WithTimeout(healthCtx, time.Second/2)
  2565  		mRemote := collectRemoteMetrics(cctx, types, collectMetricsOpts{})
  2566  		cancel()
  2567  		m.Merge(&mRemote)
  2568  		for idx, host := range m.Hosts {
  2569  			m.Hosts[idx] = anonAddr(host)
  2570  		}
  2571  		for host, metrics := range m.ByHost {
  2572  			m.ByHost[anonAddr(host)] = metrics
  2573  			delete(m.ByHost, host)
  2574  		}
  2575  		return &m
  2576  	}
  2577  
  2578  	anonymizeCmdLine := func(cmdLine string) string {
  2579  		if anonParam != anonymizeStrict {
  2580  			return cmdLine
  2581  		}
  2582  
  2583  		if !globalIsDistErasure {
  2584  			// FS mode - single server - hard code to `server1`
  2585  			anonCmdLine := strings.ReplaceAll(cmdLine, globalLocalNodeName, "server1")
  2586  			if len(globalMinioConsoleHost) > 0 {
  2587  				anonCmdLine = strings.ReplaceAll(anonCmdLine, globalMinioConsoleHost, "server1")
  2588  			}
  2589  			return anonCmdLine
  2590  		}
  2591  
  2592  		// Server start command regex groups:
  2593  		// 1 - minio server
  2594  		// 2 - flags e.g. `--address :9000 --certs-dir /etc/minio/certs`
  2595  		// 3 - pool args e.g. `https://node{01...16}.domain/data/disk{001...204} https://node{17...32}.domain/data/disk{001...204}`
  2596  		re := regexp.MustCompile(`^(.*minio\s+server\s+)(--[^\s]+\s+[^\s]+\s+)*(.*)`)
  2597  
  2598  		// stays unchanged in the anonymized version
  2599  		cmdLineWithoutPools := re.ReplaceAllString(cmdLine, `$1$2`)
  2600  
  2601  		// to be anonymized
  2602  		poolsArgs := re.ReplaceAllString(cmdLine, `$3`)
  2603  		var anonPools []string
  2604  
  2605  		if !(strings.Contains(poolsArgs, "{") && strings.Contains(poolsArgs, "}")) {
  2606  			// No ellipses pattern. Anonymize host name from every pool arg
  2607  			pools := strings.Fields(poolsArgs)
  2608  			anonPools = make([]string, len(pools))
  2609  			for index, arg := range pools {
  2610  				anonPools[index] = anonAddr(arg)
  2611  			}
  2612  			return cmdLineWithoutPools + strings.Join(anonPools, " ")
  2613  		}
  2614  
  2615  		// Ellipses pattern in pool args. Regex groups:
  2616  		// 1 - server prefix
  2617  		// 2 - number sequence for servers
  2618  		// 3 - server suffix
  2619  		// 4 - drive prefix (starting with /)
  2620  		// 5 - number sequence for drives
  2621  		// 6 - drive suffix
  2622  		re = regexp.MustCompile(`([^\s^{]*)({\d+...\d+})?([^\s^{^/]*)(/[^\s^{]*)({\d+...\d+})?([^\s]*)`)
  2623  		poolsMatches := re.FindAllStringSubmatch(poolsArgs, -1)
  2624  
  2625  		anonPools = make([]string, len(poolsMatches))
  2626  		idxMap := map[int]string{
  2627  			1: "spfx",
  2628  			3: "ssfx",
  2629  		}
  2630  		for pi, poolsMatch := range poolsMatches {
  2631  			// Replace the server prefix/suffix with anonymized ones
  2632  			for idx, lbl := range idxMap {
  2633  				if len(poolsMatch[idx]) > 0 {
  2634  					poolsMatch[idx] = fmt.Sprintf("%s%d", lbl, crc32.ChecksumIEEE([]byte(poolsMatch[idx])))
  2635  				}
  2636  			}
  2637  
  2638  			// Remove the original pools args present at index 0
  2639  			anonPools[pi] = strings.Join(poolsMatch[1:], "")
  2640  		}
  2641  		return cmdLineWithoutPools + strings.Join(anonPools, " ")
  2642  	}
  2643  
  2644  	anonymizeProcInfo := func(p *madmin.ProcInfo) {
  2645  		p.CmdLine = anonymizeCmdLine(p.CmdLine)
  2646  		anonymizeAddr(p)
  2647  	}
  2648  
  2649  	getAndWriteProcInfo := func() {
  2650  		if query.Get("sysprocess") == "true" {
  2651  			localProcInfo := madmin.GetProcInfo(healthCtx, globalLocalNodeName)
  2652  			anonymizeProcInfo(&localProcInfo)
  2653  			healthInfo.Sys.ProcInfo = append(healthInfo.Sys.ProcInfo, localProcInfo)
  2654  			peerProcInfos := globalNotificationSys.GetProcInfo(healthCtx)
  2655  			for _, p := range peerProcInfos {
  2656  				anonymizeProcInfo(&p)
  2657  				healthInfo.Sys.ProcInfo = append(healthInfo.Sys.ProcInfo, p)
  2658  			}
  2659  			partialWrite(healthInfo)
  2660  		}
  2661  	}
  2662  
  2663  	getAndWriteMinioConfig := func() {
  2664  		if query.Get("minioconfig") == "true" {
  2665  			config, err := readServerConfig(healthCtx, objectAPI, nil)
  2666  			if err != nil {
  2667  				healthInfo.Minio.Config = madmin.MinioConfig{
  2668  					Error: err.Error(),
  2669  				}
  2670  			} else {
  2671  				healthInfo.Minio.Config = madmin.MinioConfig{
  2672  					Config: config.RedactSensitiveInfo(),
  2673  				}
  2674  			}
  2675  			partialWrite(healthInfo)
  2676  		}
  2677  	}
  2678  
  2679  	anonymizeNetwork := func(network map[string]string) map[string]string {
  2680  		anonNetwork := map[string]string{}
  2681  		for endpoint, status := range network {
  2682  			anonEndpoint := anonAddr(endpoint)
  2683  			anonNetwork[anonEndpoint] = status
  2684  		}
  2685  		return anonNetwork
  2686  	}
  2687  
  2688  	anonymizeDrives := func(drives []madmin.Disk) []madmin.Disk {
  2689  		anonDrives := []madmin.Disk{}
  2690  		for _, drive := range drives {
  2691  			drive.Endpoint = anonAddr(drive.Endpoint)
  2692  			anonDrives = append(anonDrives, drive)
  2693  		}
  2694  		return anonDrives
  2695  	}
  2696  
  2697  	go func() {
  2698  		defer xioutil.SafeClose(healthInfoCh)
  2699  
  2700  		partialWrite(healthInfo) // Write first message with only version and deployment id populated
  2701  		getAndWritePlatformInfo()
  2702  		getAndWriteCPUs()
  2703  		getAndWritePartitions()
  2704  		getAndWriteNetInfo()
  2705  		getAndWriteOSInfo()
  2706  		getAndWriteMemInfo()
  2707  		getAndWriteProcInfo()
  2708  		getAndWriteMinioConfig()
  2709  		getAndWriteSysErrors()
  2710  		getAndWriteSysServices()
  2711  		getAndWriteSysConfig()
  2712  
  2713  		if query.Get("minioinfo") == "true" {
  2714  			infoMessage := getServerInfo(healthCtx, false, true, nil)
  2715  			servers := make([]madmin.ServerInfo, 0, len(infoMessage.Servers))
  2716  			for _, server := range infoMessage.Servers {
  2717  				anonEndpoint := anonAddr(server.Endpoint)
  2718  				servers = append(servers, madmin.ServerInfo{
  2719  					State:    server.State,
  2720  					Endpoint: anonEndpoint,
  2721  					Uptime:   server.Uptime,
  2722  					Version:  server.Version,
  2723  					CommitID: server.CommitID,
  2724  					Network:  anonymizeNetwork(server.Network),
  2725  					Drives:   anonymizeDrives(server.Disks),
  2726  					PoolNumber: func() int {
  2727  						if len(server.PoolNumbers) == 1 {
  2728  							return server.PoolNumbers[0]
  2729  						}
  2730  						return math.MaxInt // this indicates that its unset.
  2731  					}(),
  2732  					PoolNumbers: server.PoolNumbers,
  2733  					MemStats: madmin.MemStats{
  2734  						Alloc:      server.MemStats.Alloc,
  2735  						TotalAlloc: server.MemStats.TotalAlloc,
  2736  						Mallocs:    server.MemStats.Mallocs,
  2737  						Frees:      server.MemStats.Frees,
  2738  						HeapAlloc:  server.MemStats.HeapAlloc,
  2739  					},
  2740  					GoMaxProcs:     server.GoMaxProcs,
  2741  					NumCPU:         server.NumCPU,
  2742  					RuntimeVersion: server.RuntimeVersion,
  2743  					GCStats:        server.GCStats,
  2744  					MinioEnvVars:   server.MinioEnvVars,
  2745  				})
  2746  			}
  2747  
  2748  			tls := getTLSInfo()
  2749  			isK8s := IsKubernetes()
  2750  			isDocker := IsDocker()
  2751  			healthInfo.Minio.Info = madmin.MinioInfo{
  2752  				Mode:         infoMessage.Mode,
  2753  				Domain:       infoMessage.Domain,
  2754  				Region:       infoMessage.Region,
  2755  				SQSARN:       infoMessage.SQSARN,
  2756  				DeploymentID: infoMessage.DeploymentID,
  2757  				Buckets:      infoMessage.Buckets,
  2758  				Objects:      infoMessage.Objects,
  2759  				Usage:        infoMessage.Usage,
  2760  				Services:     infoMessage.Services,
  2761  				Backend:      infoMessage.Backend,
  2762  				Servers:      servers,
  2763  				TLS:          &tls,
  2764  				IsKubernetes: &isK8s,
  2765  				IsDocker:     &isDocker,
  2766  				Metrics:      getRealtimeMetrics(),
  2767  			}
  2768  			partialWrite(healthInfo)
  2769  		}
  2770  	}()
  2771  }
  2772  
  2773  // HealthInfoHandler - GET /minio/admin/v3/healthinfo
  2774  // ----------
  2775  // Get server health info
  2776  func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Request) {
  2777  	ctx := r.Context()
  2778  
  2779  	objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
  2780  	if objectAPI == nil {
  2781  		return
  2782  	}
  2783  
  2784  	query := r.Form
  2785  	healthInfoCh := make(chan madmin.HealthInfo)
  2786  	enc := json.NewEncoder(w)
  2787  
  2788  	healthInfo := madmin.HealthInfo{
  2789  		TimeStamp: time.Now().UTC(),
  2790  		Version:   madmin.HealthInfoVersion,
  2791  		Minio: madmin.MinioHealthInfo{
  2792  			Info: madmin.MinioInfo{
  2793  				DeploymentID: globalDeploymentID(),
  2794  			},
  2795  		},
  2796  	}
  2797  
  2798  	errResp := func(err error) {
  2799  		errorResponse := getAPIErrorResponse(ctx, toAdminAPIErr(ctx, err), r.URL.String(),
  2800  			w.Header().Get(xhttp.AmzRequestID), w.Header().Get(xhttp.AmzRequestHostID))
  2801  		encodedErrorResponse := encodeResponse(errorResponse)
  2802  		healthInfo.Error = string(encodedErrorResponse)
  2803  		logger.LogIf(ctx, enc.Encode(healthInfo))
  2804  	}
  2805  
  2806  	deadline := 10 * time.Second // Default deadline is 10secs for health diagnostics.
  2807  	if dstr := query.Get("deadline"); dstr != "" {
  2808  		var err error
  2809  		deadline, err = time.ParseDuration(dstr)
  2810  		if err != nil {
  2811  			errResp(err)
  2812  			return
  2813  		}
  2814  	}
  2815  
  2816  	nsLock := objectAPI.NewNSLock(minioMetaBucket, "health-check-in-progress")
  2817  	lkctx, err := nsLock.GetLock(ctx, newDynamicTimeout(deadline, deadline))
  2818  	if err != nil { // returns a locked lock
  2819  		errResp(err)
  2820  		return
  2821  	}
  2822  
  2823  	defer nsLock.Unlock(lkctx)
  2824  	healthCtx, healthCancel := context.WithTimeout(lkctx.Context(), deadline)
  2825  	defer healthCancel()
  2826  
  2827  	go fetchHealthInfo(healthCtx, objectAPI, &query, healthInfoCh, healthInfo)
  2828  
  2829  	setCommonHeaders(w)
  2830  	setEventStreamHeaders(w)
  2831  	w.WriteHeader(http.StatusOK)
  2832  
  2833  	ticker := time.NewTicker(5 * time.Second)
  2834  	defer ticker.Stop()
  2835  
  2836  	for {
  2837  		select {
  2838  		case oinfo, ok := <-healthInfoCh:
  2839  			if !ok {
  2840  				return
  2841  			}
  2842  			if err := enc.Encode(oinfo); err != nil {
  2843  				return
  2844  			}
  2845  			if len(healthInfoCh) == 0 {
  2846  				// Flush if nothing is queued
  2847  				w.(http.Flusher).Flush()
  2848  			}
  2849  		case <-ticker.C:
  2850  			if _, err := w.Write([]byte(" ")); err != nil {
  2851  				return
  2852  			}
  2853  			w.(http.Flusher).Flush()
  2854  		case <-healthCtx.Done():
  2855  			return
  2856  		}
  2857  	}
  2858  }
  2859  
  2860  func getTLSInfo() madmin.TLSInfo {
  2861  	tlsInfo := madmin.TLSInfo{
  2862  		TLSEnabled: globalIsTLS,
  2863  		Certs:      []madmin.TLSCert{},
  2864  	}
  2865  
  2866  	if globalIsTLS {
  2867  		for _, c := range globalPublicCerts {
  2868  			check := xxh3.Hash(c.RawIssuer)
  2869  			check ^= xxh3.Hash(c.RawSubjectPublicKeyInfo)
  2870  			// We XOR, so order doesn't matter.
  2871  			for _, v := range c.DNSNames {
  2872  				check ^= xxh3.HashString(v)
  2873  			}
  2874  			for _, v := range c.EmailAddresses {
  2875  				check ^= xxh3.HashString(v)
  2876  			}
  2877  			for _, v := range c.IPAddresses {
  2878  				check ^= xxh3.HashString(v.String())
  2879  			}
  2880  			for _, v := range c.URIs {
  2881  				check ^= xxh3.HashString(v.String())
  2882  			}
  2883  			tlsInfo.Certs = append(tlsInfo.Certs, madmin.TLSCert{
  2884  				PubKeyAlgo:    c.PublicKeyAlgorithm.String(),
  2885  				SignatureAlgo: c.SignatureAlgorithm.String(),
  2886  				NotBefore:     c.NotBefore,
  2887  				NotAfter:      c.NotAfter,
  2888  				Checksum:      strconv.FormatUint(check, 16),
  2889  			})
  2890  		}
  2891  	}
  2892  	return tlsInfo
  2893  }
  2894  
  2895  // ServerInfoHandler - GET /minio/admin/v3/info
  2896  // ----------
  2897  // Get server information
  2898  func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Request) {
  2899  	ctx := r.Context()
  2900  
  2901  	// Validate request signature.
  2902  	_, adminAPIErr := checkAdminRequestAuth(ctx, r, policy.ServerInfoAdminAction, "")
  2903  	if adminAPIErr != ErrNone {
  2904  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
  2905  		return
  2906  	}
  2907  
  2908  	metrics := r.Form.Get(peerRESTMetrics) == "true"
  2909  
  2910  	// Marshal API response
  2911  	jsonBytes, err := json.Marshal(getServerInfo(ctx, true, metrics, r))
  2912  	if err != nil {
  2913  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
  2914  		return
  2915  	}
  2916  
  2917  	// Reply with storage information (across nodes in a
  2918  	// distributed setup) as json.
  2919  	writeSuccessResponseJSON(w, jsonBytes)
  2920  }
  2921  
  2922  func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
  2923  	lambdaMap := make(map[string][]madmin.TargetIDStatus)
  2924  
  2925  	for _, tgt := range globalEventNotifier.Targets() {
  2926  		targetIDStatus := make(map[string]madmin.Status)
  2927  		active, _ := tgt.IsActive()
  2928  		targetID := tgt.ID()
  2929  		if active {
  2930  			targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOnline)}
  2931  		} else {
  2932  			targetIDStatus[targetID.ID] = madmin.Status{Status: string(madmin.ItemOffline)}
  2933  		}
  2934  		list := lambdaMap[targetID.Name]
  2935  		list = append(list, targetIDStatus)
  2936  		lambdaMap[targetID.Name] = list
  2937  	}
  2938  
  2939  	notify := make([]map[string][]madmin.TargetIDStatus, len(lambdaMap))
  2940  	counter := 0
  2941  	for key, value := range lambdaMap {
  2942  		v := make(map[string][]madmin.TargetIDStatus)
  2943  		v[key] = value
  2944  		notify[counter] = v
  2945  		counter++
  2946  	}
  2947  	return notify
  2948  }
  2949  
  2950  // fetchKMSStatus fetches KMS-related status information.
  2951  func fetchKMSStatus() madmin.KMS {
  2952  	kmsStat := madmin.KMS{}
  2953  	if GlobalKMS == nil {
  2954  		kmsStat.Status = "disabled"
  2955  		return kmsStat
  2956  	}
  2957  
  2958  	stat, err := GlobalKMS.Stat(context.Background())
  2959  	if err != nil {
  2960  		kmsStat.Status = string(madmin.ItemOffline)
  2961  		return kmsStat
  2962  	}
  2963  	if len(stat.Endpoints) == 0 {
  2964  		kmsStat.Status = stat.Name
  2965  		return kmsStat
  2966  	}
  2967  	kmsStat.Status = string(madmin.ItemOnline)
  2968  
  2969  	kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
  2970  	// 1. Generate a new key using the KMS.
  2971  	key, err := GlobalKMS.GenerateKey(context.Background(), "", kmsContext)
  2972  	if err != nil {
  2973  		kmsStat.Encrypt = fmt.Sprintf("Encryption failed: %v", err)
  2974  	} else {
  2975  		kmsStat.Encrypt = "success"
  2976  	}
  2977  
  2978  	// 2. Verify that we can indeed decrypt the (encrypted) key
  2979  	decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
  2980  	switch {
  2981  	case err != nil:
  2982  		kmsStat.Decrypt = fmt.Sprintf("Decryption failed: %v", err)
  2983  	case subtle.ConstantTimeCompare(key.Plaintext, decryptedKey) != 1:
  2984  		kmsStat.Decrypt = "Decryption failed: decrypted key does not match generated key"
  2985  	default:
  2986  		kmsStat.Decrypt = "success"
  2987  	}
  2988  	return kmsStat
  2989  }
  2990  
  2991  // fetchKMSStatusV2 fetches KMS-related status information for all instances
  2992  func fetchKMSStatusV2(ctx context.Context) []madmin.KMS {
  2993  	if GlobalKMS == nil {
  2994  		return []madmin.KMS{}
  2995  	}
  2996  
  2997  	results := GlobalKMS.Verify(ctx)
  2998  
  2999  	stats := []madmin.KMS{}
  3000  	for _, result := range results {
  3001  		stats = append(stats, madmin.KMS{
  3002  			Status:   result.Status,
  3003  			Endpoint: result.Endpoint,
  3004  			Encrypt:  result.Encrypt,
  3005  			Decrypt:  result.Decrypt,
  3006  			Version:  result.Version,
  3007  		})
  3008  	}
  3009  
  3010  	return stats
  3011  }
  3012  
  3013  func targetStatus(ctx context.Context, h logger.Target) madmin.Status {
  3014  	if h.IsOnline(ctx) {
  3015  		return madmin.Status{Status: string(madmin.ItemOnline)}
  3016  	}
  3017  	return madmin.Status{Status: string(madmin.ItemOffline)}
  3018  }
  3019  
  3020  // fetchLoggerDetails return log info
  3021  func fetchLoggerInfo(ctx context.Context) ([]madmin.Logger, []madmin.Audit) {
  3022  	var loggerInfo []madmin.Logger
  3023  	var auditloggerInfo []madmin.Audit
  3024  	for _, tgt := range logger.SystemTargets() {
  3025  		if tgt.Endpoint() != "" {
  3026  			loggerInfo = append(loggerInfo, madmin.Logger{tgt.String(): targetStatus(ctx, tgt)})
  3027  		}
  3028  	}
  3029  
  3030  	for _, tgt := range logger.AuditTargets() {
  3031  		if tgt.Endpoint() != "" {
  3032  			auditloggerInfo = append(auditloggerInfo, madmin.Audit{tgt.String(): targetStatus(ctx, tgt)})
  3033  		}
  3034  	}
  3035  
  3036  	return loggerInfo, auditloggerInfo
  3037  }
  3038  
  3039  func embedFileInZip(zipWriter *zip.Writer, name string, data []byte, fileMode os.FileMode) error {
  3040  	// Send profiling data to zip as file
  3041  	header, zerr := zip.FileInfoHeader(dummyFileInfo{
  3042  		name:    name,
  3043  		size:    int64(len(data)),
  3044  		mode:    fileMode,
  3045  		modTime: UTCNow(),
  3046  		isDir:   false,
  3047  		sys:     nil,
  3048  	})
  3049  	if zerr != nil {
  3050  		return zerr
  3051  	}
  3052  	header.Method = zip.Deflate
  3053  	zwriter, zerr := zipWriter.CreateHeader(header)
  3054  	if zerr != nil {
  3055  		return zerr
  3056  	}
  3057  	_, err := io.Copy(zwriter, bytes.NewReader(data))
  3058  	return err
  3059  }
  3060  
  3061  // getClusterMetaInfo gets information of the current cluster and
  3062  // returns it.
  3063  // This is not a critical function, and it is allowed
  3064  // to fail with a ten seconds timeout, returning nil.
  3065  func getClusterMetaInfo(ctx context.Context) []byte {
  3066  	objectAPI := newObjectLayerFn()
  3067  	if objectAPI == nil {
  3068  		return nil
  3069  	}
  3070  
  3071  	// Add a ten seconds timeout because getting profiling data
  3072  	// is critical for debugging, in contrary to getting cluster info
  3073  	ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
  3074  	defer cancel()
  3075  
  3076  	resultCh := make(chan madmin.ClusterRegistrationInfo)
  3077  
  3078  	go func() {
  3079  		defer xioutil.SafeClose(resultCh)
  3080  
  3081  		ci := madmin.ClusterRegistrationInfo{}
  3082  		ci.Info.NoOfServerPools = len(globalEndpoints)
  3083  		ci.Info.NoOfServers = totalNodeCount()
  3084  		ci.Info.MinioVersion = Version
  3085  
  3086  		si := objectAPI.StorageInfo(ctx, false)
  3087  
  3088  		ci.Info.NoOfDrives = len(si.Disks)
  3089  		for _, disk := range si.Disks {
  3090  			ci.Info.TotalDriveSpace += disk.TotalSpace
  3091  			ci.Info.UsedDriveSpace += disk.UsedSpace
  3092  		}
  3093  
  3094  		dataUsageInfo, _ := loadDataUsageFromBackend(ctx, objectAPI)
  3095  
  3096  		ci.UsedCapacity = dataUsageInfo.ObjectsTotalSize
  3097  		ci.Info.NoOfBuckets = dataUsageInfo.BucketsCount
  3098  		ci.Info.NoOfObjects = dataUsageInfo.ObjectsTotalCount
  3099  
  3100  		ci.DeploymentID = globalDeploymentID()
  3101  		ci.ClusterName = fmt.Sprintf("%d-servers-%d-disks-%s", ci.Info.NoOfServers, ci.Info.NoOfDrives, ci.Info.MinioVersion)
  3102  
  3103  		select {
  3104  		case resultCh <- ci:
  3105  		case <-ctx.Done():
  3106  			return
  3107  		}
  3108  	}()
  3109  
  3110  	select {
  3111  	case <-ctx.Done():
  3112  		return nil
  3113  	case ci := <-resultCh:
  3114  		out, err := json.MarshalIndent(ci, "", "  ")
  3115  		if err != nil {
  3116  			logger.LogIf(ctx, err)
  3117  			return nil
  3118  		}
  3119  		return out
  3120  	}
  3121  }
  3122  
  3123  func bytesToPublicKey(pub []byte) (*rsa.PublicKey, error) {
  3124  	block, _ := pem.Decode(pub)
  3125  	if block != nil {
  3126  		pub = block.Bytes
  3127  	}
  3128  	key, err := x509.ParsePKCS1PublicKey(pub)
  3129  	if err != nil {
  3130  		return nil, err
  3131  	}
  3132  	return key, nil
  3133  }
  3134  
  3135  // getRawDataer provides an interface for getting raw FS files.
  3136  type getRawDataer interface {
  3137  	GetRawData(ctx context.Context, volume, file string, fn func(r io.Reader, host string, disk string, filename string, info StatInfo) error) error
  3138  }
  3139  
  3140  // InspectDataHandler - GET /minio/admin/v3/inspect-data
  3141  // ----------
  3142  // Download file from all nodes in a zip format
  3143  func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Request) {
  3144  	ctx := r.Context()
  3145  
  3146  	// Validate request signature.
  3147  	_, adminAPIErr := checkAdminRequestAuth(ctx, r, policy.InspectDataAction, "")
  3148  	if adminAPIErr != ErrNone {
  3149  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
  3150  		return
  3151  	}
  3152  
  3153  	objLayer := newObjectLayerFn()
  3154  	o, ok := objLayer.(getRawDataer)
  3155  	if !ok {
  3156  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
  3157  		return
  3158  	}
  3159  
  3160  	if err := parseForm(r); err != nil {
  3161  		writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
  3162  		return
  3163  	}
  3164  
  3165  	volume := r.Form.Get("volume")
  3166  	if len(volume) == 0 {
  3167  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidBucketName), r.URL)
  3168  		return
  3169  	}
  3170  	file := r.Form.Get("file")
  3171  	if len(file) == 0 {
  3172  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
  3173  		return
  3174  	}
  3175  	file = strings.ReplaceAll(file, string(os.PathSeparator), "/")
  3176  
  3177  	// Reject attempts to traverse parent or absolute paths.
  3178  	if strings.Contains(file, "..") || strings.Contains(volume, "..") {
  3179  		writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
  3180  		return
  3181  	}
  3182  
  3183  	var publicKey *rsa.PublicKey
  3184  
  3185  	publicKeyB64 := r.Form.Get("public-key")
  3186  	if publicKeyB64 != "" {
  3187  		publicKeyBytes, err := base64.StdEncoding.DecodeString(publicKeyB64)
  3188  		if err != nil {
  3189  			writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
  3190  			return
  3191  		}
  3192  		publicKey, err = bytesToPublicKey(publicKeyBytes)
  3193  		if err != nil {
  3194  			writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
  3195  			return
  3196  		}
  3197  	}
  3198  
  3199  	// Write a version for making *incompatible* changes.
  3200  	// The AdminClient will reject any version it does not know.
  3201  	var inspectZipW *zip.Writer
  3202  	if publicKey != nil {
  3203  		w.WriteHeader(200)
  3204  		stream := estream.NewWriter(w)
  3205  		defer stream.Close()
  3206  
  3207  		clusterKey, err := bytesToPublicKey(getSubnetAdminPublicKey())
  3208  		if err != nil {
  3209  			logger.LogIf(ctx, stream.AddError(err.Error()))
  3210  			return
  3211  		}
  3212  		err = stream.AddKeyEncrypted(clusterKey)
  3213  		if err != nil {
  3214  			logger.LogIf(ctx, stream.AddError(err.Error()))
  3215  			return
  3216  		}
  3217  		if b := getClusterMetaInfo(ctx); len(b) > 0 {
  3218  			w, err := stream.AddEncryptedStream("cluster.info", nil)
  3219  			if err != nil {
  3220  				logger.LogIf(ctx, err)
  3221  				return
  3222  			}
  3223  			w.Write(b)
  3224  			w.Close()
  3225  		}
  3226  
  3227  		// Add new key for inspect data.
  3228  		if err := stream.AddKeyEncrypted(publicKey); err != nil {
  3229  			logger.LogIf(ctx, stream.AddError(err.Error()))
  3230  			return
  3231  		}
  3232  		encStream, err := stream.AddEncryptedStream("inspect.zip", nil)
  3233  		if err != nil {
  3234  			logger.LogIf(ctx, stream.AddError(err.Error()))
  3235  			return
  3236  		}
  3237  		defer encStream.Close()
  3238  
  3239  		inspectZipW = zip.NewWriter(encStream)
  3240  		defer inspectZipW.Close()
  3241  	} else {
  3242  		// Legacy: Remove if we stop supporting inspection without public key.
  3243  		var key [32]byte
  3244  		// MUST use crypto/rand
  3245  		n, err := crand.Read(key[:])
  3246  		if err != nil || n != len(key) {
  3247  			logger.LogIf(ctx, err)
  3248  			writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
  3249  			return
  3250  		}
  3251  
  3252  		// Write a version for making *incompatible* changes.
  3253  		// The AdminClient will reject any version it does not know.
  3254  		if publicKey == nil {
  3255  			w.Write([]byte{1})
  3256  			w.Write(key[:])
  3257  		}
  3258  
  3259  		stream, err := sio.AES_256_GCM.Stream(key[:])
  3260  		if err != nil {
  3261  			logger.LogIf(ctx, err)
  3262  			return
  3263  		}
  3264  		// Zero nonce, we only use each key once, and 32 bytes is plenty.
  3265  		nonce := make([]byte, stream.NonceSize())
  3266  		encw := stream.EncryptWriter(w, nonce, nil)
  3267  		defer encw.Close()
  3268  
  3269  		// Initialize a zip writer which will provide a zipped content
  3270  		// of profiling data of all nodes
  3271  		inspectZipW = zip.NewWriter(encw)
  3272  		defer inspectZipW.Close()
  3273  
  3274  		if b := getClusterMetaInfo(ctx); len(b) > 0 {
  3275  			logger.LogIf(ctx, embedFileInZip(inspectZipW, "cluster.info", b, 0o600))
  3276  		}
  3277  	}
  3278  
  3279  	rawDataFn := func(r io.Reader, host, disk, filename string, si StatInfo) error {
  3280  		// Prefix host+disk
  3281  		filename = path.Join(host, disk, filename)
  3282  		if si.Dir {
  3283  			filename += "/"
  3284  			si.Size = 0
  3285  		}
  3286  		if si.Mode == 0 {
  3287  			// Not, set it to default.
  3288  			si.Mode = 0o600
  3289  		}
  3290  		if si.ModTime.IsZero() {
  3291  			// Set time to now.
  3292  			si.ModTime = time.Now()
  3293  		}
  3294  		header, zerr := zip.FileInfoHeader(dummyFileInfo{
  3295  			name:    filename,
  3296  			size:    si.Size,
  3297  			mode:    os.FileMode(si.Mode),
  3298  			modTime: si.ModTime,
  3299  			isDir:   si.Dir,
  3300  			sys:     nil,
  3301  		})
  3302  		if zerr != nil {
  3303  			logger.LogIf(ctx, zerr)
  3304  			return nil
  3305  		}
  3306  		header.Method = zip.Deflate
  3307  		zwriter, zerr := inspectZipW.CreateHeader(header)
  3308  		if zerr != nil {
  3309  			logger.LogIf(ctx, zerr)
  3310  			return nil
  3311  		}
  3312  		if _, err := io.Copy(zwriter, r); err != nil {
  3313  			logger.LogIf(ctx, err)
  3314  		}
  3315  		return nil
  3316  	}
  3317  	err := o.GetRawData(ctx, volume, file, rawDataFn)
  3318  	if !errors.Is(err, errFileNotFound) {
  3319  		logger.LogIf(ctx, err)
  3320  	}
  3321  
  3322  	// save the format.json as part of inspect by default
  3323  	if !(volume == minioMetaBucket && file == formatConfigFile) {
  3324  		err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
  3325  	}
  3326  	if !errors.Is(err, errFileNotFound) {
  3327  		logger.LogIf(ctx, err)
  3328  	}
  3329  
  3330  	// save args passed to inspect command
  3331  	var sb bytes.Buffer
  3332  	fmt.Fprintf(&sb, "Inspect path: %s%s%s\n", volume, slashSeparator, file)
  3333  	sb.WriteString("Server command line args:")
  3334  	for _, pool := range globalEndpoints {
  3335  		sb.WriteString(" ")
  3336  		sb.WriteString(pool.CmdLine)
  3337  	}
  3338  	sb.WriteString("\n")
  3339  	logger.LogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes(), 0o600))
  3340  
  3341  	scheme := "https"
  3342  	if !globalIsTLS {
  3343  		scheme = "http"
  3344  	}
  3345  
  3346  	// save MinIO start script to inspect command
  3347  	var scrb bytes.Buffer
  3348  	fmt.Fprintf(&scrb, `#!/usr/bin/env bash
  3349  
  3350  function main() {
  3351  	for file in $(ls -1); do
  3352  		dest_file=$(echo "$file" | cut -d ":" -f1)
  3353  		mv "$file" "$dest_file"
  3354  	done
  3355  
  3356  	# Read content of inspect-input.txt
  3357  	MINIO_OPTS=$(grep "Server command line args" <./inspect-input.txt | sed "s/Server command line args: //g" | sed -r "s#%s:\/\/#\.\/#g")
  3358  
  3359  	# Start MinIO instance using the options
  3360  	START_CMD="CI=on _MINIO_AUTO_DRIVE_HEALING=off minio server ${MINIO_OPTS} &"
  3361  	echo
  3362  	echo "Starting MinIO instance: ${START_CMD}"
  3363  	echo
  3364  	eval "$START_CMD"
  3365  	MINIO_SRVR_PID="$!"
  3366  	echo "MinIO Server PID: ${MINIO_SRVR_PID}"
  3367  	echo
  3368  	echo "Waiting for MinIO instance to get ready!"
  3369  	sleep 10
  3370  }
  3371  
  3372  main "$@"`, scheme)
  3373  	logger.LogIf(ctx, embedFileInZip(inspectZipW, "start-minio.sh", scrb.Bytes(), 0o755))
  3374  }
  3375  
  3376  func getSubnetAdminPublicKey() []byte {
  3377  	if globalIsCICD {
  3378  		return subnetAdminPublicKeyDev
  3379  	}
  3380  	return subnetAdminPublicKey
  3381  }
  3382  
  3383  func createHostAnonymizerForFSMode() map[string]string {
  3384  	hostAnonymizer := map[string]string{
  3385  		globalLocalNodeName: "server1",
  3386  	}
  3387  
  3388  	apiEndpoints := getAPIEndpoints()
  3389  	for _, ep := range apiEndpoints {
  3390  		if len(ep) == 0 {
  3391  			continue
  3392  		}
  3393  		if url, err := xnet.ParseHTTPURL(ep); err == nil {
  3394  			// In FS mode the drive names don't include the host.
  3395  			// So mapping just the host should be sufficient.
  3396  			hostAnonymizer[url.Host] = "server1"
  3397  		}
  3398  	}
  3399  	return hostAnonymizer
  3400  }
  3401  
  3402  // anonymizeHost - Add entries related to given endpoint in the host anonymizer map
  3403  // The health report data can contain the hostname in various forms e.g. host, host:port,
  3404  // host:port/drivepath, full url (http://host:port/drivepath)
  3405  // The anonymizer map will have mappings for all these variants for efficiently replacing
  3406  // any of these strings to the anonymized versions at the time of health report generation.
  3407  func anonymizeHost(hostAnonymizer map[string]string, endpoint Endpoint, poolNum int, srvrNum int) {
  3408  	if len(endpoint.Host) == 0 {
  3409  		return
  3410  	}
  3411  
  3412  	currentURL := endpoint.String()
  3413  
  3414  	// mapIfNotPresent - Maps the given key to the value only if the key is not present in the map
  3415  	mapIfNotPresent := func(m map[string]string, key string, val string) {
  3416  		_, found := m[key]
  3417  		if !found {
  3418  			m[key] = val
  3419  		}
  3420  	}
  3421  
  3422  	_, found := hostAnonymizer[currentURL]
  3423  	if !found {
  3424  		// In distributed setup, anonymized addr = 'poolNum.serverNum'
  3425  		newHost := fmt.Sprintf("pool%d.server%d", poolNum, srvrNum)
  3426  		schemePfx := endpoint.Scheme + "://"
  3427  
  3428  		// Hostname
  3429  		mapIfNotPresent(hostAnonymizer, endpoint.Hostname(), newHost)
  3430  
  3431  		newHostPort := newHost
  3432  		if len(endpoint.Port()) > 0 {
  3433  			// Host + port
  3434  			newHostPort = newHost + ":" + endpoint.Port()
  3435  			mapIfNotPresent(hostAnonymizer, endpoint.Host, newHostPort)
  3436  			mapIfNotPresent(hostAnonymizer, schemePfx+endpoint.Host, newHostPort)
  3437  		}
  3438  
  3439  		newHostPortPath := newHostPort
  3440  		if len(endpoint.Path) > 0 {
  3441  			// Host + port + path
  3442  			currentHostPortPath := endpoint.Host + endpoint.Path
  3443  			newHostPortPath = newHostPort + endpoint.Path
  3444  			mapIfNotPresent(hostAnonymizer, currentHostPortPath, newHostPortPath)
  3445  			mapIfNotPresent(hostAnonymizer, schemePfx+currentHostPortPath, newHostPortPath)
  3446  		}
  3447  
  3448  		// Full url
  3449  		hostAnonymizer[currentURL] = schemePfx + newHostPortPath
  3450  	}
  3451  }
  3452  
  3453  // createHostAnonymizer - Creates a map of various strings to corresponding anonymized names
  3454  func createHostAnonymizer() map[string]string {
  3455  	if !globalIsDistErasure {
  3456  		return createHostAnonymizerForFSMode()
  3457  	}
  3458  
  3459  	hostAnonymizer := map[string]string{}
  3460  	hosts := set.NewStringSet()
  3461  	srvrIdx := 0
  3462  
  3463  	for poolIdx, pool := range globalEndpoints {
  3464  		for _, endpoint := range pool.Endpoints {
  3465  			if !hosts.Contains(endpoint.Host) {
  3466  				hosts.Add(endpoint.Host)
  3467  				srvrIdx++
  3468  			}
  3469  			anonymizeHost(hostAnonymizer, endpoint, poolIdx+1, srvrIdx)
  3470  		}
  3471  	}
  3472  	return hostAnonymizer
  3473  }