storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/notification.go (about)

     1  /*
     2   * MinIO Cloud Storage, (C) 2018, 2019 MinIO, Inc.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package cmd
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"encoding/json"
    23  	"errors"
    24  	"fmt"
    25  	"io"
    26  	"net/http"
    27  	"net/url"
    28  	"sort"
    29  	"strings"
    30  	"sync"
    31  	"time"
    32  
    33  	"github.com/cespare/xxhash/v2"
    34  	"github.com/klauspost/compress/zip"
    35  	"github.com/minio/minio-go/v7/pkg/set"
    36  	"github.com/willf/bloom"
    37  
    38  	"storj.io/minio/cmd/crypto"
    39  	xhttp "storj.io/minio/cmd/http"
    40  	"storj.io/minio/cmd/logger"
    41  	bandwidth "storj.io/minio/pkg/bandwidth"
    42  	bucketBandwidth "storj.io/minio/pkg/bucket/bandwidth"
    43  	"storj.io/minio/pkg/bucket/policy"
    44  	"storj.io/minio/pkg/event"
    45  	"storj.io/minio/pkg/madmin"
    46  	xnet "storj.io/minio/pkg/net"
    47  	"storj.io/minio/pkg/sync/errgroup"
    48  )
    49  
    50  // NotificationSys - notification system.
    51  type NotificationSys struct {
    52  	sync.RWMutex
    53  	targetList                 *event.TargetList
    54  	targetResCh                chan event.TargetIDResult
    55  	bucketRulesMap             map[string]event.RulesMap
    56  	bucketRemoteTargetRulesMap map[string]map[event.TargetID]event.RulesMap
    57  	peerClients                []*peerRESTClient // Excludes self
    58  	allPeerClients             []*peerRESTClient // Includes nil client for self
    59  }
    60  
    61  // GetARNList - returns available ARNs.
    62  func (sys *NotificationSys) GetARNList(onlyActive bool) []string {
    63  	arns := []string{}
    64  	if sys == nil {
    65  		return arns
    66  	}
    67  	region := globalServerRegion
    68  	for targetID, target := range sys.targetList.TargetMap() {
    69  		// httpclient target is part of ListenNotification
    70  		// which doesn't need to be listed as part of the ARN list
    71  		// This list is only meant for external targets, filter
    72  		// this out pro-actively.
    73  		if !strings.HasPrefix(targetID.ID, "httpclient+") {
    74  			if onlyActive && !target.HasQueueStore() {
    75  				if _, err := target.IsActive(); err != nil {
    76  					continue
    77  				}
    78  			}
    79  			arns = append(arns, targetID.ToARN(region).String())
    80  		}
    81  	}
    82  
    83  	return arns
    84  }
    85  
    86  // NotificationPeerErr returns error associated for a remote peer.
    87  type NotificationPeerErr struct {
    88  	Host xnet.Host // Remote host on which the rpc call was initiated
    89  	Err  error     // Error returned by the remote peer for an rpc call
    90  }
    91  
    92  // A NotificationGroup is a collection of goroutines working on subtasks that are part of
    93  // the same overall task.
    94  //
    95  // A zero NotificationGroup is valid and does not cancel on error.
    96  type NotificationGroup struct {
    97  	wg   sync.WaitGroup
    98  	errs []NotificationPeerErr
    99  }
   100  
   101  // WithNPeers returns a new NotificationGroup with length of errs slice upto nerrs,
   102  // upon Wait() errors are returned collected from all tasks.
   103  func WithNPeers(nerrs int) *NotificationGroup {
   104  	return &NotificationGroup{errs: make([]NotificationPeerErr, nerrs)}
   105  }
   106  
   107  // Wait blocks until all function calls from the Go method have returned, then
   108  // returns the slice of errors from all function calls.
   109  func (g *NotificationGroup) Wait() []NotificationPeerErr {
   110  	g.wg.Wait()
   111  	return g.errs
   112  }
   113  
   114  // Go calls the given function in a new goroutine.
   115  //
   116  // The first call to return a non-nil error will be
   117  // collected in errs slice and returned by Wait().
   118  func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, addr xnet.Host) {
   119  	g.wg.Add(1)
   120  
   121  	go func() {
   122  		defer g.wg.Done()
   123  		g.errs[index] = NotificationPeerErr{
   124  			Host: addr,
   125  		}
   126  		for i := 0; i < 3; i++ {
   127  			if err := f(); err != nil {
   128  				g.errs[index].Err = err
   129  				// Last iteration log the error.
   130  				if i == 2 {
   131  					reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", addr.String())
   132  					ctx := logger.SetReqInfo(ctx, reqInfo)
   133  					logger.LogIf(ctx, err)
   134  				}
   135  				// Wait for one second and no need wait after last attempt.
   136  				if i < 2 {
   137  					time.Sleep(1 * time.Second)
   138  				}
   139  				continue
   140  			}
   141  			break
   142  		}
   143  	}()
   144  }
   145  
   146  // DeletePolicy - deletes policy across all peers.
   147  func (sys *NotificationSys) DeletePolicy(policyName string) []NotificationPeerErr {
   148  	ng := WithNPeers(len(sys.peerClients))
   149  	for idx, client := range sys.peerClients {
   150  		if client == nil {
   151  			continue
   152  		}
   153  		client := client
   154  		ng.Go(GlobalContext, func() error {
   155  			return client.DeletePolicy(policyName)
   156  		}, idx, *client.host)
   157  	}
   158  	return ng.Wait()
   159  }
   160  
   161  // LoadPolicy - reloads a specific modified policy across all peers
   162  func (sys *NotificationSys) LoadPolicy(policyName string) []NotificationPeerErr {
   163  	ng := WithNPeers(len(sys.peerClients))
   164  	for idx, client := range sys.peerClients {
   165  		if client == nil {
   166  			continue
   167  		}
   168  		client := client
   169  		ng.Go(GlobalContext, func() error {
   170  			return client.LoadPolicy(policyName)
   171  		}, idx, *client.host)
   172  	}
   173  	return ng.Wait()
   174  }
   175  
   176  // LoadPolicyMapping - reloads a policy mapping across all peers
   177  func (sys *NotificationSys) LoadPolicyMapping(userOrGroup string, isGroup bool) []NotificationPeerErr {
   178  	ng := WithNPeers(len(sys.peerClients))
   179  	for idx, client := range sys.peerClients {
   180  		if client == nil {
   181  			continue
   182  		}
   183  		client := client
   184  		ng.Go(GlobalContext, func() error {
   185  			return client.LoadPolicyMapping(userOrGroup, isGroup)
   186  		}, idx, *client.host)
   187  	}
   188  	return ng.Wait()
   189  }
   190  
   191  // DeleteUser - deletes a specific user across all peers
   192  func (sys *NotificationSys) DeleteUser(accessKey string) []NotificationPeerErr {
   193  	ng := WithNPeers(len(sys.peerClients))
   194  	for idx, client := range sys.peerClients {
   195  		if client == nil {
   196  			continue
   197  		}
   198  		client := client
   199  		ng.Go(GlobalContext, func() error {
   200  			return client.DeleteUser(accessKey)
   201  		}, idx, *client.host)
   202  	}
   203  	return ng.Wait()
   204  }
   205  
   206  // LoadUser - reloads a specific user across all peers
   207  func (sys *NotificationSys) LoadUser(accessKey string, temp bool) []NotificationPeerErr {
   208  	ng := WithNPeers(len(sys.peerClients))
   209  	for idx, client := range sys.peerClients {
   210  		if client == nil {
   211  			continue
   212  		}
   213  		client := client
   214  		ng.Go(GlobalContext, func() error {
   215  			return client.LoadUser(accessKey, temp)
   216  		}, idx, *client.host)
   217  	}
   218  	return ng.Wait()
   219  }
   220  
   221  // LoadGroup - loads a specific group on all peers.
   222  func (sys *NotificationSys) LoadGroup(group string) []NotificationPeerErr {
   223  	ng := WithNPeers(len(sys.peerClients))
   224  	for idx, client := range sys.peerClients {
   225  		if client == nil {
   226  			continue
   227  		}
   228  		client := client
   229  		ng.Go(GlobalContext, func() error { return client.LoadGroup(group) }, idx, *client.host)
   230  	}
   231  	return ng.Wait()
   232  }
   233  
   234  // DeleteServiceAccount - deletes a specific service account across all peers
   235  func (sys *NotificationSys) DeleteServiceAccount(accessKey string) []NotificationPeerErr {
   236  	ng := WithNPeers(len(sys.peerClients))
   237  	for idx, client := range sys.peerClients {
   238  		if client == nil {
   239  			continue
   240  		}
   241  		client := client
   242  		ng.Go(GlobalContext, func() error {
   243  			return client.DeleteServiceAccount(accessKey)
   244  		}, idx, *client.host)
   245  	}
   246  	return ng.Wait()
   247  }
   248  
   249  // LoadServiceAccount - reloads a specific service account across all peers
   250  func (sys *NotificationSys) LoadServiceAccount(accessKey string) []NotificationPeerErr {
   251  	ng := WithNPeers(len(sys.peerClients))
   252  	for idx, client := range sys.peerClients {
   253  		if client == nil {
   254  			continue
   255  		}
   256  		client := client
   257  		ng.Go(GlobalContext, func() error {
   258  			return client.LoadServiceAccount(accessKey)
   259  		}, idx, *client.host)
   260  	}
   261  	return ng.Wait()
   262  }
   263  
   264  // BackgroundHealStatus - returns background heal status of all peers
   265  func (sys *NotificationSys) BackgroundHealStatus() ([]madmin.BgHealState, []NotificationPeerErr) {
   266  	ng := WithNPeers(len(sys.peerClients))
   267  	states := make([]madmin.BgHealState, len(sys.peerClients))
   268  	for idx, client := range sys.peerClients {
   269  		if client == nil {
   270  			continue
   271  		}
   272  		idx := idx
   273  		client := client
   274  		ng.Go(GlobalContext, func() error {
   275  			st, err := client.BackgroundHealStatus()
   276  			if err != nil {
   277  				return err
   278  			}
   279  			states[idx] = st
   280  			return nil
   281  		}, idx, *client.host)
   282  	}
   283  
   284  	return states, ng.Wait()
   285  }
   286  
   287  // StartProfiling - start profiling on remote peers, by initiating a remote RPC.
   288  func (sys *NotificationSys) StartProfiling(profiler string) []NotificationPeerErr {
   289  	ng := WithNPeers(len(sys.peerClients))
   290  	for idx, client := range sys.peerClients {
   291  		if client == nil {
   292  			continue
   293  		}
   294  		client := client
   295  		ng.Go(GlobalContext, func() error {
   296  			return client.StartProfiling(profiler)
   297  		}, idx, *client.host)
   298  	}
   299  	return ng.Wait()
   300  }
   301  
   302  // DownloadProfilingData - download profiling data from all remote peers.
   303  func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io.Writer) bool {
   304  	profilingDataFound := false
   305  
   306  	// Initialize a zip writer which will provide a zipped content
   307  	// of profiling data of all nodes
   308  	zipWriter := zip.NewWriter(writer)
   309  	defer zipWriter.Close()
   310  
   311  	for _, client := range sys.peerClients {
   312  		if client == nil {
   313  			continue
   314  		}
   315  		data, err := client.DownloadProfileData()
   316  		if err != nil {
   317  			reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String())
   318  			ctx := logger.SetReqInfo(ctx, reqInfo)
   319  			logger.LogIf(ctx, err)
   320  			continue
   321  		}
   322  
   323  		profilingDataFound = true
   324  
   325  		for typ, data := range data {
   326  			// Send profiling data to zip as file
   327  			header, zerr := zip.FileInfoHeader(dummyFileInfo{
   328  				name:    fmt.Sprintf("profile-%s-%s", client.host.String(), typ),
   329  				size:    int64(len(data)),
   330  				mode:    0600,
   331  				modTime: UTCNow(),
   332  				isDir:   false,
   333  				sys:     nil,
   334  			})
   335  			if zerr != nil {
   336  				reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String())
   337  				ctx := logger.SetReqInfo(ctx, reqInfo)
   338  				logger.LogIf(ctx, zerr)
   339  				continue
   340  			}
   341  			header.Method = zip.Deflate
   342  			zwriter, zerr := zipWriter.CreateHeader(header)
   343  			if zerr != nil {
   344  				reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String())
   345  				ctx := logger.SetReqInfo(ctx, reqInfo)
   346  				logger.LogIf(ctx, zerr)
   347  				continue
   348  			}
   349  			if _, err = io.Copy(zwriter, bytes.NewReader(data)); err != nil {
   350  				reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String())
   351  				ctx := logger.SetReqInfo(ctx, reqInfo)
   352  				logger.LogIf(ctx, err)
   353  				continue
   354  			}
   355  		}
   356  	}
   357  
   358  	// Local host
   359  	thisAddr, err := xnet.ParseHost(globalLocalNodeName)
   360  	if err != nil {
   361  		logger.LogIf(ctx, err)
   362  		return profilingDataFound
   363  	}
   364  
   365  	data, err := getProfileData()
   366  	if err != nil {
   367  		reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", thisAddr.String())
   368  		ctx := logger.SetReqInfo(ctx, reqInfo)
   369  		logger.LogIf(ctx, err)
   370  		return profilingDataFound
   371  	}
   372  
   373  	profilingDataFound = true
   374  
   375  	// Send profiling data to zip as file
   376  	for typ, data := range data {
   377  		header, zerr := zip.FileInfoHeader(dummyFileInfo{
   378  			name:    fmt.Sprintf("profile-%s-%s", thisAddr, typ),
   379  			size:    int64(len(data)),
   380  			mode:    0600,
   381  			modTime: UTCNow(),
   382  			isDir:   false,
   383  			sys:     nil,
   384  		})
   385  		if zerr != nil {
   386  			return profilingDataFound
   387  		}
   388  		header.Method = zip.Deflate
   389  
   390  		zwriter, zerr := zipWriter.CreateHeader(header)
   391  		if zerr != nil {
   392  			return profilingDataFound
   393  		}
   394  
   395  		if _, err = io.Copy(zwriter, bytes.NewReader(data)); err != nil {
   396  			return profilingDataFound
   397  		}
   398  	}
   399  
   400  	return profilingDataFound
   401  }
   402  
   403  // ServerUpdate - updates remote peers.
   404  func (sys *NotificationSys) ServerUpdate(ctx context.Context, u *url.URL, sha256Sum []byte, lrTime time.Time, releaseInfo string) []NotificationPeerErr {
   405  	ng := WithNPeers(len(sys.peerClients))
   406  	for idx, client := range sys.peerClients {
   407  		if client == nil {
   408  			continue
   409  		}
   410  		client := client
   411  		ng.Go(ctx, func() error {
   412  			return client.ServerUpdate(ctx, u, sha256Sum, lrTime, releaseInfo)
   413  		}, idx, *client.host)
   414  	}
   415  	return ng.Wait()
   416  }
   417  
   418  // SignalService - calls signal service RPC call on all peers.
   419  func (sys *NotificationSys) SignalService(sig serviceSignal) []NotificationPeerErr {
   420  	ng := WithNPeers(len(sys.peerClients))
   421  	for idx, client := range sys.peerClients {
   422  		if client == nil {
   423  			continue
   424  		}
   425  		client := client
   426  		ng.Go(GlobalContext, func() error {
   427  			return client.SignalService(sig)
   428  		}, idx, *client.host)
   429  	}
   430  	return ng.Wait()
   431  }
   432  
   433  // updateBloomFilter will cycle all servers to the current index and
   434  // return a merged bloom filter if a complete one can be retrieved.
   435  func (sys *NotificationSys) updateBloomFilter(ctx context.Context, current uint64) (*bloomFilter, error) {
   436  	var req = bloomFilterRequest{
   437  		Current: current,
   438  		Oldest:  current - dataUsageUpdateDirCycles,
   439  	}
   440  	if current < dataUsageUpdateDirCycles {
   441  		req.Oldest = 0
   442  	}
   443  
   444  	// Load initial state from local...
   445  	var bf *bloomFilter
   446  	bfr, err := intDataUpdateTracker.cycleFilter(ctx, req)
   447  	logger.LogIf(ctx, err)
   448  	if err == nil && bfr.Complete {
   449  		nbf := intDataUpdateTracker.newBloomFilter()
   450  		bf = &nbf
   451  		_, err = bf.ReadFrom(bytes.NewReader(bfr.Filter))
   452  		logger.LogIf(ctx, err)
   453  	}
   454  
   455  	var mu sync.Mutex
   456  	g := errgroup.WithNErrs(len(sys.peerClients))
   457  	for idx, client := range sys.peerClients {
   458  		if client == nil {
   459  			continue
   460  		}
   461  		client := client
   462  		g.Go(func() error {
   463  			serverBF, err := client.cycleServerBloomFilter(ctx, req)
   464  			if false && intDataUpdateTracker.debug {
   465  				b, _ := json.MarshalIndent(serverBF, "", "  ")
   466  				logger.Info("Disk %v, Bloom filter: %v", client.host.Name, string(b))
   467  			}
   468  			// Keep lock while checking result.
   469  			mu.Lock()
   470  			defer mu.Unlock()
   471  
   472  			if err != nil || !serverBF.Complete || bf == nil {
   473  				logger.LogOnceIf(ctx, err, fmt.Sprintf("host:%s, cycle:%d", client.host, current), client.cycleServerBloomFilter)
   474  				bf = nil
   475  				return nil
   476  			}
   477  
   478  			var tmp bloom.BloomFilter
   479  			_, err = tmp.ReadFrom(bytes.NewReader(serverBF.Filter))
   480  			if err != nil {
   481  				logger.LogIf(ctx, err)
   482  				bf = nil
   483  				return nil
   484  			}
   485  			if bf.BloomFilter == nil {
   486  				bf.BloomFilter = &tmp
   487  			} else {
   488  				err = bf.Merge(&tmp)
   489  				if err != nil {
   490  					logger.LogIf(ctx, err)
   491  					bf = nil
   492  					return nil
   493  				}
   494  			}
   495  			return nil
   496  		}, idx)
   497  	}
   498  	g.Wait()
   499  	return bf, nil
   500  }
   501  
   502  // collectBloomFilter will collect bloom filters from all servers from the specified cycle.
   503  func (sys *NotificationSys) collectBloomFilter(ctx context.Context, from uint64) (*bloomFilter, error) {
   504  	var req = bloomFilterRequest{
   505  		Current: 0,
   506  		Oldest:  from,
   507  	}
   508  
   509  	// Load initial state from local...
   510  	var bf *bloomFilter
   511  	bfr, err := intDataUpdateTracker.cycleFilter(ctx, req)
   512  	logger.LogIf(ctx, err)
   513  	if err == nil && bfr.Complete {
   514  		nbf := intDataUpdateTracker.newBloomFilter()
   515  		bf = &nbf
   516  		_, err = bf.ReadFrom(bytes.NewReader(bfr.Filter))
   517  		logger.LogIf(ctx, err)
   518  	}
   519  	if !bfr.Complete {
   520  		// If local isn't complete just return early
   521  		return nil, nil
   522  	}
   523  
   524  	var mu sync.Mutex
   525  	g := errgroup.WithNErrs(len(sys.peerClients))
   526  	for idx, client := range sys.peerClients {
   527  		if client == nil {
   528  			continue
   529  		}
   530  		client := client
   531  		g.Go(func() error {
   532  			serverBF, err := client.cycleServerBloomFilter(ctx, req)
   533  			if false && intDataUpdateTracker.debug {
   534  				b, _ := json.MarshalIndent(serverBF, "", "  ")
   535  				logger.Info("Disk %v, Bloom filter: %v", client.host.Name, string(b))
   536  			}
   537  			// Keep lock while checking result.
   538  			mu.Lock()
   539  			defer mu.Unlock()
   540  
   541  			if err != nil || !serverBF.Complete || bf == nil {
   542  				logger.LogIf(ctx, err)
   543  				bf = nil
   544  				return nil
   545  			}
   546  
   547  			var tmp bloom.BloomFilter
   548  			_, err = tmp.ReadFrom(bytes.NewReader(serverBF.Filter))
   549  			if err != nil {
   550  				logger.LogIf(ctx, err)
   551  				bf = nil
   552  				return nil
   553  			}
   554  			if bf.BloomFilter == nil {
   555  				bf.BloomFilter = &tmp
   556  			} else {
   557  				err = bf.Merge(&tmp)
   558  				if err != nil {
   559  					logger.LogIf(ctx, err)
   560  					bf = nil
   561  					return nil
   562  				}
   563  			}
   564  			return nil
   565  		}, idx)
   566  	}
   567  	g.Wait()
   568  	return bf, nil
   569  }
   570  
   571  // findEarliestCleanBloomFilter will find the earliest bloom filter across the cluster
   572  // where the directory is clean.
   573  // Due to how objects are stored this can include object names.
   574  func (sys *NotificationSys) findEarliestCleanBloomFilter(ctx context.Context, dir string) uint64 {
   575  
   576  	// Load initial state from local...
   577  	current := intDataUpdateTracker.current()
   578  	best := intDataUpdateTracker.latestWithDir(dir)
   579  	if best == current {
   580  		// If the current is dirty no need to check others.
   581  		return current
   582  	}
   583  
   584  	var req = bloomFilterRequest{
   585  		Current:     0,
   586  		Oldest:      best,
   587  		OldestClean: dir,
   588  	}
   589  
   590  	var mu sync.Mutex
   591  	g := errgroup.WithNErrs(len(sys.peerClients))
   592  	for idx, client := range sys.peerClients {
   593  		if client == nil {
   594  			continue
   595  		}
   596  		client := client
   597  		g.Go(func() error {
   598  			serverBF, err := client.cycleServerBloomFilter(ctx, req)
   599  
   600  			// Keep lock while checking result.
   601  			mu.Lock()
   602  			defer mu.Unlock()
   603  
   604  			if err != nil {
   605  				// Error, don't assume clean.
   606  				best = current
   607  				logger.LogIf(ctx, err)
   608  				return nil
   609  			}
   610  			if serverBF.OldestIdx > best {
   611  				best = serverBF.OldestIdx
   612  			}
   613  			return nil
   614  		}, idx)
   615  	}
   616  	g.Wait()
   617  	return best
   618  }
   619  
   620  var errPeerNotReachable = errors.New("peer is not reachable")
   621  
   622  // GetLocks - makes GetLocks RPC call on all peers.
   623  func (sys *NotificationSys) GetLocks(ctx context.Context, r *http.Request) []*PeerLocks {
   624  	locksResp := make([]*PeerLocks, len(sys.peerClients))
   625  	g := errgroup.WithNErrs(len(sys.peerClients))
   626  	for index, client := range sys.peerClients {
   627  		index := index
   628  		g.Go(func() error {
   629  			if client == nil {
   630  				return errPeerNotReachable
   631  			}
   632  			serverLocksResp, err := sys.peerClients[index].GetLocks()
   633  			if err != nil {
   634  				return err
   635  			}
   636  			locksResp[index] = &PeerLocks{
   637  				Addr:  sys.peerClients[index].host.String(),
   638  				Locks: serverLocksResp,
   639  			}
   640  			return nil
   641  		}, index)
   642  	}
   643  	for index, err := range g.Wait() {
   644  		reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress",
   645  			sys.peerClients[index].host.String())
   646  		ctx := logger.SetReqInfo(ctx, reqInfo)
   647  		logger.LogOnceIf(ctx, err, sys.peerClients[index].host.String())
   648  	}
   649  	locksResp = append(locksResp, &PeerLocks{
   650  		Addr:  getHostName(r),
   651  		Locks: globalLockServer.DupLockMap(),
   652  	})
   653  	return locksResp
   654  }
   655  
   656  // LoadBucketMetadata - calls LoadBucketMetadata call on all peers
   657  func (sys *NotificationSys) LoadBucketMetadata(ctx context.Context, bucketName string) {
   658  	ng := WithNPeers(len(sys.peerClients))
   659  	for idx, client := range sys.peerClients {
   660  		if client == nil {
   661  			continue
   662  		}
   663  		client := client
   664  		ng.Go(ctx, func() error {
   665  			return client.LoadBucketMetadata(bucketName)
   666  		}, idx, *client.host)
   667  	}
   668  	for _, nErr := range ng.Wait() {
   669  		reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
   670  		if nErr.Err != nil {
   671  			logger.LogIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err)
   672  		}
   673  	}
   674  }
   675  
   676  // DeleteBucketMetadata - calls DeleteBucketMetadata call on all peers
   677  func (sys *NotificationSys) DeleteBucketMetadata(ctx context.Context, bucketName string) {
   678  	globalReplicationStats.Delete(bucketName)
   679  	globalBucketMetadataSys.Remove(bucketName)
   680  	if localMetacacheMgr != nil {
   681  		localMetacacheMgr.deleteBucketCache(bucketName)
   682  	}
   683  
   684  	ng := WithNPeers(len(sys.peerClients))
   685  	for idx, client := range sys.peerClients {
   686  		if client == nil {
   687  			continue
   688  		}
   689  		client := client
   690  		ng.Go(ctx, func() error {
   691  			return client.DeleteBucketMetadata(bucketName)
   692  		}, idx, *client.host)
   693  	}
   694  	for _, nErr := range ng.Wait() {
   695  		reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
   696  		if nErr.Err != nil {
   697  			logger.LogIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err)
   698  		}
   699  	}
   700  }
   701  
   702  // GetClusterBucketStats - calls GetClusterBucketStats call on all peers for a cluster statistics view.
   703  func (sys *NotificationSys) GetClusterBucketStats(ctx context.Context, bucketName string) []BucketStats {
   704  	ng := WithNPeers(len(sys.peerClients))
   705  	bucketStats := make([]BucketStats, len(sys.peerClients))
   706  	for index, client := range sys.peerClients {
   707  		index := index
   708  		client := client
   709  		ng.Go(ctx, func() error {
   710  			if client == nil {
   711  				return errPeerNotReachable
   712  			}
   713  			bs, err := client.GetBucketStats(bucketName)
   714  			if err != nil {
   715  				return err
   716  			}
   717  			bucketStats[index] = bs
   718  			return nil
   719  		}, index, *client.host)
   720  	}
   721  	for _, nErr := range ng.Wait() {
   722  		reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String())
   723  		if nErr.Err != nil {
   724  			logger.LogIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err)
   725  		}
   726  	}
   727  	bucketStats = append(bucketStats, BucketStats{
   728  		ReplicationStats: globalReplicationStats.Get(bucketName),
   729  	})
   730  	return bucketStats
   731  }
   732  
   733  // Loads notification policies for all buckets into NotificationSys.
   734  func (sys *NotificationSys) load(buckets []BucketInfo) {
   735  	for _, bucket := range buckets {
   736  		ctx := logger.SetReqInfo(GlobalContext, &logger.ReqInfo{BucketName: bucket.Name})
   737  		config, err := globalBucketMetadataSys.GetNotificationConfig(bucket.Name)
   738  		if err != nil {
   739  			logger.LogIf(ctx, err)
   740  			continue
   741  		}
   742  		config.SetRegion(globalServerRegion)
   743  		if err = config.Validate(globalServerRegion, GlobalNotificationSys.targetList); err != nil {
   744  			if _, ok := err.(*event.ErrARNNotFound); !ok {
   745  				logger.LogIf(ctx, err)
   746  			}
   747  			continue
   748  		}
   749  		sys.AddRulesMap(bucket.Name, config.ToRulesMap())
   750  	}
   751  }
   752  
   753  // Init - initializes notification system from notification.xml and listenxl.meta of all buckets.
   754  func (sys *NotificationSys) Init(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
   755  	if objAPI == nil {
   756  		return errServerNotInitialized
   757  	}
   758  
   759  	// In gateway mode, notifications are not supported - except NAS gateway.
   760  	if GlobalIsGateway && !objAPI.IsNotificationSupported() {
   761  		return nil
   762  	}
   763  
   764  	logger.LogIf(ctx, sys.targetList.Add(globalConfigTargetList.Targets()...))
   765  
   766  	go func() {
   767  		for res := range sys.targetResCh {
   768  			if res.Err != nil {
   769  				reqInfo := &logger.ReqInfo{}
   770  				reqInfo.AppendTags("targetID", res.ID.Name)
   771  				logger.LogOnceIf(logger.SetReqInfo(GlobalContext, reqInfo), res.Err, res.ID)
   772  			}
   773  		}
   774  	}()
   775  
   776  	go sys.load(buckets)
   777  	return nil
   778  }
   779  
   780  // AddRulesMap - adds rules map for bucket name.
   781  func (sys *NotificationSys) AddRulesMap(bucketName string, rulesMap event.RulesMap) {
   782  	sys.Lock()
   783  	defer sys.Unlock()
   784  
   785  	rulesMap = rulesMap.Clone()
   786  
   787  	for _, targetRulesMap := range sys.bucketRemoteTargetRulesMap[bucketName] {
   788  		rulesMap.Add(targetRulesMap)
   789  	}
   790  
   791  	// Do not add for an empty rulesMap.
   792  	if len(rulesMap) == 0 {
   793  		delete(sys.bucketRulesMap, bucketName)
   794  	} else {
   795  		sys.bucketRulesMap[bucketName] = rulesMap
   796  	}
   797  }
   798  
   799  // RemoveRulesMap - removes rules map for bucket name.
   800  func (sys *NotificationSys) RemoveRulesMap(bucketName string, rulesMap event.RulesMap) {
   801  	sys.Lock()
   802  	defer sys.Unlock()
   803  
   804  	sys.bucketRulesMap[bucketName].Remove(rulesMap)
   805  	if len(sys.bucketRulesMap[bucketName]) == 0 {
   806  		delete(sys.bucketRulesMap, bucketName)
   807  	}
   808  }
   809  
   810  // ConfiguredTargetIDs - returns list of configured target id's
   811  func (sys *NotificationSys) ConfiguredTargetIDs() []event.TargetID {
   812  	if sys == nil {
   813  		return nil
   814  	}
   815  
   816  	sys.RLock()
   817  	defer sys.RUnlock()
   818  
   819  	var targetIDs []event.TargetID
   820  	for _, rmap := range sys.bucketRulesMap {
   821  		for _, rules := range rmap {
   822  			for _, targetSet := range rules {
   823  				for id := range targetSet {
   824  					targetIDs = append(targetIDs, id)
   825  				}
   826  			}
   827  		}
   828  	}
   829  	// Filter out targets configured via env
   830  	var tIDs []event.TargetID
   831  	for _, targetID := range targetIDs {
   832  		if !globalEnvTargetList.Exists(targetID) {
   833  			tIDs = append(tIDs, targetID)
   834  		}
   835  	}
   836  	return tIDs
   837  }
   838  
   839  // RemoveNotification - removes all notification configuration for bucket name.
   840  func (sys *NotificationSys) RemoveNotification(bucketName string) {
   841  	sys.Lock()
   842  	defer sys.Unlock()
   843  
   844  	delete(sys.bucketRulesMap, bucketName)
   845  
   846  	targetIDSet := event.NewTargetIDSet()
   847  	for targetID := range sys.bucketRemoteTargetRulesMap[bucketName] {
   848  		targetIDSet[targetID] = struct{}{}
   849  		delete(sys.bucketRemoteTargetRulesMap[bucketName], targetID)
   850  	}
   851  	sys.targetList.Remove(targetIDSet)
   852  
   853  	delete(sys.bucketRemoteTargetRulesMap, bucketName)
   854  }
   855  
   856  // RemoveAllRemoteTargets - closes and removes all notification targets.
   857  func (sys *NotificationSys) RemoveAllRemoteTargets() {
   858  	sys.Lock()
   859  	defer sys.Unlock()
   860  
   861  	for _, targetMap := range sys.bucketRemoteTargetRulesMap {
   862  		targetIDSet := event.NewTargetIDSet()
   863  		for k := range targetMap {
   864  			targetIDSet[k] = struct{}{}
   865  		}
   866  		sys.targetList.Remove(targetIDSet)
   867  	}
   868  }
   869  
   870  // Send - sends event data to all matching targets.
   871  func (sys *NotificationSys) Send(args eventArgs) {
   872  	sys.RLock()
   873  	targetIDSet := sys.bucketRulesMap[args.BucketName].Match(args.EventName, args.Object.Name)
   874  	sys.RUnlock()
   875  
   876  	if len(targetIDSet) == 0 {
   877  		return
   878  	}
   879  
   880  	sys.targetList.Send(args.ToEvent(true), targetIDSet, sys.targetResCh)
   881  }
   882  
   883  // NetInfo - Net information
   884  func (sys *NotificationSys) NetInfo(ctx context.Context) madmin.ServerNetHealthInfo {
   885  	var sortedGlobalEndpoints []string
   886  
   887  	/*
   888  			Ensure that only untraversed links are visited by this server
   889  		        i.e. if net perf tests have been performed between a -> b, then do
   890  			not run it between b -> a
   891  
   892  		        The graph of tests looks like this
   893  
   894  		            a   b   c   d
   895  		        a | o | x | x | x |
   896  		        b | o | o | x | x |
   897  		        c | o | o | o | x |
   898  		        d | o | o | o | o |
   899  
   900  		        'x's should be tested, and 'o's should be skipped
   901  	*/
   902  
   903  	hostSet := set.NewStringSet()
   904  	for _, ez := range globalEndpoints {
   905  		for _, e := range ez.Endpoints {
   906  			if !hostSet.Contains(e.Host) {
   907  				sortedGlobalEndpoints = append(sortedGlobalEndpoints, e.Host)
   908  				hostSet.Add(e.Host)
   909  			}
   910  		}
   911  	}
   912  
   913  	sort.Strings(sortedGlobalEndpoints)
   914  	var remoteTargets []*peerRESTClient
   915  	search := func(host string) *peerRESTClient {
   916  		for index, client := range sys.peerClients {
   917  			if client == nil {
   918  				continue
   919  			}
   920  			if sys.peerClients[index].host.String() == host {
   921  				return client
   922  			}
   923  		}
   924  		return nil
   925  	}
   926  
   927  	for i := 0; i < len(sortedGlobalEndpoints); i++ {
   928  		if sortedGlobalEndpoints[i] != globalLocalNodeName {
   929  			continue
   930  		}
   931  		for j := 0; j < len(sortedGlobalEndpoints); j++ {
   932  			if j > i {
   933  				remoteTarget := search(sortedGlobalEndpoints[j])
   934  				if remoteTarget != nil {
   935  					remoteTargets = append(remoteTargets, remoteTarget)
   936  				}
   937  			}
   938  		}
   939  	}
   940  
   941  	netInfos := make([]madmin.NetPerfInfo, len(remoteTargets))
   942  
   943  	for index, client := range remoteTargets {
   944  		if client == nil {
   945  			continue
   946  		}
   947  		var err error
   948  		netInfos[index], err = client.NetInfo(ctx)
   949  
   950  		addr := client.host.String()
   951  		reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
   952  		ctx := logger.SetReqInfo(GlobalContext, reqInfo)
   953  		logger.LogIf(ctx, err)
   954  		netInfos[index].Addr = addr
   955  		if err != nil {
   956  			netInfos[index].Error = err.Error()
   957  		}
   958  	}
   959  	return madmin.ServerNetHealthInfo{
   960  		Net:  netInfos,
   961  		Addr: globalLocalNodeName,
   962  	}
   963  }
   964  
   965  // DispatchNetPerfInfo - Net perf information from other nodes
   966  func (sys *NotificationSys) DispatchNetPerfInfo(ctx context.Context) []madmin.ServerNetHealthInfo {
   967  	serverNetInfos := []madmin.ServerNetHealthInfo{}
   968  
   969  	for index, client := range sys.peerClients {
   970  		if client == nil {
   971  			continue
   972  		}
   973  		serverNetInfo, err := sys.peerClients[index].DispatchNetInfo(ctx)
   974  		if err != nil {
   975  			serverNetInfo.Addr = client.host.String()
   976  			serverNetInfo.Error = err.Error()
   977  		}
   978  		serverNetInfos = append(serverNetInfos, serverNetInfo)
   979  	}
   980  	return serverNetInfos
   981  }
   982  
   983  // DispatchNetPerfChan - Net perf information from other nodes
   984  func (sys *NotificationSys) DispatchNetPerfChan(ctx context.Context) chan madmin.ServerNetHealthInfo {
   985  	serverNetInfos := make(chan madmin.ServerNetHealthInfo)
   986  	wg := sync.WaitGroup{}
   987  
   988  	wg.Add(1)
   989  	go func() {
   990  		for _, client := range sys.peerClients {
   991  			if client == nil {
   992  				continue
   993  			}
   994  			serverNetInfo, err := client.DispatchNetInfo(ctx)
   995  			if err != nil {
   996  				serverNetInfo.Addr = client.host.String()
   997  				serverNetInfo.Error = err.Error()
   998  			}
   999  			serverNetInfos <- serverNetInfo
  1000  		}
  1001  		wg.Done()
  1002  	}()
  1003  
  1004  	go func() {
  1005  		wg.Wait()
  1006  		close(serverNetInfos)
  1007  	}()
  1008  
  1009  	return serverNetInfos
  1010  }
  1011  
  1012  // NetPerfParallelInfo - Performs Net parallel tests
  1013  func (sys *NotificationSys) NetPerfParallelInfo(ctx context.Context) madmin.ServerNetHealthInfo {
  1014  	netInfos := []madmin.NetPerfInfo{}
  1015  	wg := sync.WaitGroup{}
  1016  
  1017  	for index, client := range sys.peerClients {
  1018  		if client == nil {
  1019  			continue
  1020  		}
  1021  
  1022  		wg.Add(1)
  1023  		go func(index int) {
  1024  			netInfo, err := sys.peerClients[index].NetInfo(ctx)
  1025  			netInfo.Addr = sys.peerClients[index].host.String()
  1026  			if err != nil {
  1027  				netInfo.Error = err.Error()
  1028  			}
  1029  			netInfos = append(netInfos, netInfo)
  1030  			wg.Done()
  1031  		}(index)
  1032  	}
  1033  	wg.Wait()
  1034  	return madmin.ServerNetHealthInfo{
  1035  		Net:  netInfos,
  1036  		Addr: globalLocalNodeName,
  1037  	}
  1038  
  1039  }
  1040  
  1041  // DrivePerfInfo - Drive perf information
  1042  func (sys *NotificationSys) DrivePerfInfo(ctx context.Context) []madmin.ServerDrivesInfo {
  1043  	reply := make([]madmin.ServerDrivesInfo, len(sys.peerClients))
  1044  
  1045  	g := errgroup.WithNErrs(len(sys.peerClients))
  1046  	for index, client := range sys.peerClients {
  1047  		if client == nil {
  1048  			continue
  1049  		}
  1050  		index := index
  1051  		g.Go(func() error {
  1052  			var err error
  1053  			reply[index], err = sys.peerClients[index].DriveInfo(ctx)
  1054  			return err
  1055  		}, index)
  1056  	}
  1057  
  1058  	for index, err := range g.Wait() {
  1059  		if err != nil {
  1060  			addr := sys.peerClients[index].host.String()
  1061  			reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
  1062  			ctx := logger.SetReqInfo(GlobalContext, reqInfo)
  1063  			logger.LogIf(ctx, err)
  1064  			reply[index].Addr = addr
  1065  			reply[index].Error = err.Error()
  1066  		}
  1067  	}
  1068  	return reply
  1069  }
  1070  
  1071  // DrivePerfInfoChan - Drive perf information
  1072  func (sys *NotificationSys) DrivePerfInfoChan(ctx context.Context) chan madmin.ServerDrivesInfo {
  1073  	updateChan := make(chan madmin.ServerDrivesInfo)
  1074  	wg := sync.WaitGroup{}
  1075  
  1076  	for _, client := range sys.peerClients {
  1077  		if client == nil {
  1078  			continue
  1079  		}
  1080  		wg.Add(1)
  1081  		go func(client *peerRESTClient) {
  1082  			reply, err := client.DriveInfo(ctx)
  1083  
  1084  			addr := client.host.String()
  1085  			reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
  1086  			ctx := logger.SetReqInfo(GlobalContext, reqInfo)
  1087  			logger.LogIf(ctx, err)
  1088  
  1089  			reply.Addr = addr
  1090  			if err != nil {
  1091  				reply.Error = err.Error()
  1092  			}
  1093  
  1094  			updateChan <- reply
  1095  			wg.Done()
  1096  		}(client)
  1097  	}
  1098  
  1099  	go func() {
  1100  		wg.Wait()
  1101  		close(updateChan)
  1102  	}()
  1103  
  1104  	return updateChan
  1105  }
  1106  
  1107  // CPUInfo - CPU information
  1108  func (sys *NotificationSys) CPUInfo(ctx context.Context) []madmin.ServerCPUInfo {
  1109  	reply := make([]madmin.ServerCPUInfo, len(sys.peerClients))
  1110  
  1111  	g := errgroup.WithNErrs(len(sys.peerClients))
  1112  	for index, client := range sys.peerClients {
  1113  		if client == nil {
  1114  			continue
  1115  		}
  1116  		index := index
  1117  		g.Go(func() error {
  1118  			var err error
  1119  			reply[index], err = sys.peerClients[index].CPUInfo(ctx)
  1120  			return err
  1121  		}, index)
  1122  	}
  1123  
  1124  	for index, err := range g.Wait() {
  1125  		if err != nil {
  1126  			addr := sys.peerClients[index].host.String()
  1127  			reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
  1128  			ctx := logger.SetReqInfo(GlobalContext, reqInfo)
  1129  			logger.LogIf(ctx, err)
  1130  			reply[index].Addr = addr
  1131  			reply[index].Error = err.Error()
  1132  		}
  1133  	}
  1134  	return reply
  1135  }
  1136  
  1137  // DiskHwInfo - Disk HW information
  1138  func (sys *NotificationSys) DiskHwInfo(ctx context.Context) []madmin.ServerDiskHwInfo {
  1139  	reply := make([]madmin.ServerDiskHwInfo, len(sys.peerClients))
  1140  
  1141  	g := errgroup.WithNErrs(len(sys.peerClients))
  1142  	for index, client := range sys.peerClients {
  1143  		if client == nil {
  1144  			continue
  1145  		}
  1146  		index := index
  1147  		g.Go(func() error {
  1148  			var err error
  1149  			reply[index], err = sys.peerClients[index].DiskHwInfo(ctx)
  1150  			return err
  1151  		}, index)
  1152  	}
  1153  
  1154  	for index, err := range g.Wait() {
  1155  		if err != nil {
  1156  			addr := sys.peerClients[index].host.String()
  1157  			reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
  1158  			ctx := logger.SetReqInfo(GlobalContext, reqInfo)
  1159  			logger.LogIf(ctx, err)
  1160  			reply[index].Addr = addr
  1161  			reply[index].Error = err.Error()
  1162  		}
  1163  	}
  1164  	return reply
  1165  }
  1166  
  1167  // OsInfo - Os information
  1168  func (sys *NotificationSys) OsInfo(ctx context.Context) []madmin.ServerOsInfo {
  1169  	reply := make([]madmin.ServerOsInfo, len(sys.peerClients))
  1170  
  1171  	g := errgroup.WithNErrs(len(sys.peerClients))
  1172  	for index, client := range sys.peerClients {
  1173  		if client == nil {
  1174  			continue
  1175  		}
  1176  		index := index
  1177  		g.Go(func() error {
  1178  			var err error
  1179  			reply[index], err = sys.peerClients[index].OsInfo(ctx)
  1180  			return err
  1181  		}, index)
  1182  	}
  1183  
  1184  	for index, err := range g.Wait() {
  1185  		if err != nil {
  1186  			addr := sys.peerClients[index].host.String()
  1187  			reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
  1188  			ctx := logger.SetReqInfo(GlobalContext, reqInfo)
  1189  			logger.LogIf(ctx, err)
  1190  			reply[index].Addr = addr
  1191  			reply[index].Error = err.Error()
  1192  		}
  1193  	}
  1194  	return reply
  1195  }
  1196  
  1197  // MemInfo - Mem information
  1198  func (sys *NotificationSys) MemInfo(ctx context.Context) []madmin.ServerMemInfo {
  1199  	reply := make([]madmin.ServerMemInfo, len(sys.peerClients))
  1200  
  1201  	g := errgroup.WithNErrs(len(sys.peerClients))
  1202  	for index, client := range sys.peerClients {
  1203  		if client == nil {
  1204  			continue
  1205  		}
  1206  		index := index
  1207  		g.Go(func() error {
  1208  			var err error
  1209  			reply[index], err = sys.peerClients[index].MemInfo(ctx)
  1210  			return err
  1211  		}, index)
  1212  	}
  1213  
  1214  	for index, err := range g.Wait() {
  1215  		if err != nil {
  1216  			addr := sys.peerClients[index].host.String()
  1217  			reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
  1218  			ctx := logger.SetReqInfo(GlobalContext, reqInfo)
  1219  			logger.LogIf(ctx, err)
  1220  			reply[index].Addr = addr
  1221  			reply[index].Error = err.Error()
  1222  		}
  1223  	}
  1224  	return reply
  1225  }
  1226  
  1227  // ProcInfo - Process information
  1228  func (sys *NotificationSys) ProcInfo(ctx context.Context) []madmin.ServerProcInfo {
  1229  	reply := make([]madmin.ServerProcInfo, len(sys.peerClients))
  1230  
  1231  	g := errgroup.WithNErrs(len(sys.peerClients))
  1232  	for index, client := range sys.peerClients {
  1233  		if client == nil {
  1234  			continue
  1235  		}
  1236  		index := index
  1237  		g.Go(func() error {
  1238  			var err error
  1239  			reply[index], err = sys.peerClients[index].ProcInfo(ctx)
  1240  			return err
  1241  		}, index)
  1242  	}
  1243  
  1244  	for index, err := range g.Wait() {
  1245  		if err != nil {
  1246  			addr := sys.peerClients[index].host.String()
  1247  			reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr)
  1248  			ctx := logger.SetReqInfo(GlobalContext, reqInfo)
  1249  			logger.LogIf(ctx, err)
  1250  			reply[index].Addr = addr
  1251  			reply[index].Error = err.Error()
  1252  		}
  1253  	}
  1254  	return reply
  1255  }
  1256  
  1257  func getOfflineDisks(offlineHost string, endpoints EndpointServerPools) []madmin.Disk {
  1258  	var offlineDisks []madmin.Disk
  1259  	for _, pool := range endpoints {
  1260  		for _, ep := range pool.Endpoints {
  1261  			if offlineHost == ep.Host {
  1262  				offlineDisks = append(offlineDisks, madmin.Disk{
  1263  					Endpoint: ep.String(),
  1264  					State:    string(madmin.ItemOffline),
  1265  				})
  1266  			}
  1267  		}
  1268  	}
  1269  	return offlineDisks
  1270  }
  1271  
  1272  // ServerInfo - calls ServerInfo RPC call on all peers.
  1273  func (sys *NotificationSys) ServerInfo() []madmin.ServerProperties {
  1274  	reply := make([]madmin.ServerProperties, len(sys.peerClients))
  1275  	var wg sync.WaitGroup
  1276  	for i, client := range sys.peerClients {
  1277  		if client == nil {
  1278  			continue
  1279  		}
  1280  		wg.Add(1)
  1281  		go func(client *peerRESTClient, idx int) {
  1282  			defer wg.Done()
  1283  			info, err := client.ServerInfo()
  1284  			if err != nil {
  1285  				info.Endpoint = client.host.String()
  1286  				info.State = string(madmin.ItemOffline)
  1287  				info.Disks = getOfflineDisks(info.Endpoint, globalEndpoints)
  1288  			} else {
  1289  				info.State = string(madmin.ItemOnline)
  1290  			}
  1291  			reply[idx] = info
  1292  		}(client, i)
  1293  	}
  1294  	wg.Wait()
  1295  
  1296  	return reply
  1297  }
  1298  
  1299  // GetLocalDiskIDs - return disk ids of the local disks of the peers.
  1300  func (sys *NotificationSys) GetLocalDiskIDs(ctx context.Context) (localDiskIDs [][]string) {
  1301  	localDiskIDs = make([][]string, len(sys.peerClients))
  1302  	var wg sync.WaitGroup
  1303  	for idx, client := range sys.peerClients {
  1304  		if client == nil {
  1305  			continue
  1306  		}
  1307  		wg.Add(1)
  1308  		go func(idx int, client *peerRESTClient) {
  1309  			defer wg.Done()
  1310  			localDiskIDs[idx] = client.GetLocalDiskIDs(ctx)
  1311  		}(idx, client)
  1312  	}
  1313  	wg.Wait()
  1314  	return localDiskIDs
  1315  }
  1316  
  1317  // returns all the peers that are currently online.
  1318  func (sys *NotificationSys) getOnlinePeers() []*peerRESTClient {
  1319  	var peerClients []*peerRESTClient
  1320  	for _, peerClient := range sys.allPeerClients {
  1321  		if peerClient != nil && peerClient.IsOnline() {
  1322  			peerClients = append(peerClients, peerClient)
  1323  		}
  1324  	}
  1325  	return peerClients
  1326  }
  1327  
  1328  // restClientFromHash will return a deterministic peerRESTClient based on s.
  1329  // Will return nil if client is local.
  1330  func (sys *NotificationSys) restClientFromHash(s string) (client *peerRESTClient) {
  1331  	if len(sys.peerClients) == 0 {
  1332  		return nil
  1333  	}
  1334  	peerClients := sys.getOnlinePeers()
  1335  	if len(peerClients) == 0 {
  1336  		return nil
  1337  	}
  1338  	idx := xxhash.Sum64String(s) % uint64(len(peerClients))
  1339  	return peerClients[idx]
  1340  }
  1341  
  1342  // NewNotificationSys - creates new notification system object.
  1343  func NewNotificationSys(endpoints EndpointServerPools) *NotificationSys {
  1344  	// targetList/bucketRulesMap/bucketRemoteTargetRulesMap are populated by NotificationSys.Init()
  1345  	remote, all := newPeerRestClients(endpoints)
  1346  	return &NotificationSys{
  1347  		targetList:                 event.NewTargetList(),
  1348  		targetResCh:                make(chan event.TargetIDResult),
  1349  		bucketRulesMap:             make(map[string]event.RulesMap),
  1350  		bucketRemoteTargetRulesMap: make(map[string]map[event.TargetID]event.RulesMap),
  1351  		peerClients:                remote,
  1352  		allPeerClients:             all,
  1353  	}
  1354  }
  1355  
  1356  // GetPeerOnlineCount gets the count of online and offline nodes.
  1357  func GetPeerOnlineCount() (nodesOnline, nodesOffline int) {
  1358  	nodesOnline = 1 // Self is always online.
  1359  	nodesOffline = 0
  1360  	servers := GlobalNotificationSys.ServerInfo()
  1361  	for _, s := range servers {
  1362  		if s.State == string(madmin.ItemOnline) {
  1363  			nodesOnline++
  1364  			continue
  1365  		}
  1366  		nodesOffline++
  1367  	}
  1368  	return
  1369  }
  1370  
  1371  type eventArgs struct {
  1372  	EventName    event.Name
  1373  	BucketName   string
  1374  	Object       ObjectInfo
  1375  	ReqParams    map[string]string
  1376  	RespElements map[string]string
  1377  	Host         string
  1378  	UserAgent    string
  1379  }
  1380  
  1381  // ToEvent - converts to notification event.
  1382  func (args eventArgs) ToEvent(escape bool) event.Event {
  1383  	eventTime := UTCNow()
  1384  	uniqueID := fmt.Sprintf("%X", eventTime.UnixNano())
  1385  
  1386  	respElements := map[string]string{
  1387  		"x-amz-request-id":        args.RespElements["requestId"],
  1388  		"x-minio-origin-endpoint": globalMinioEndpoint, // MinIO specific custom elements.
  1389  	}
  1390  	// Add deployment as part of
  1391  	if globalDeploymentID != "" {
  1392  		respElements["x-minio-deployment-id"] = globalDeploymentID
  1393  	}
  1394  	if args.RespElements["content-length"] != "" {
  1395  		respElements["content-length"] = args.RespElements["content-length"]
  1396  	}
  1397  	keyName := args.Object.Name
  1398  	if escape {
  1399  		keyName = url.QueryEscape(args.Object.Name)
  1400  	}
  1401  	newEvent := event.Event{
  1402  		EventVersion:      "2.0",
  1403  		EventSource:       "minio:s3",
  1404  		AwsRegion:         args.ReqParams["region"],
  1405  		EventTime:         eventTime.Format(event.AMZTimeFormat),
  1406  		EventName:         args.EventName,
  1407  		UserIdentity:      event.Identity{PrincipalID: args.ReqParams["principalId"]},
  1408  		RequestParameters: args.ReqParams,
  1409  		ResponseElements:  respElements,
  1410  		S3: event.Metadata{
  1411  			SchemaVersion:   "1.0",
  1412  			ConfigurationID: "Config",
  1413  			Bucket: event.Bucket{
  1414  				Name:          args.BucketName,
  1415  				OwnerIdentity: event.Identity{PrincipalID: args.ReqParams["principalId"]},
  1416  				ARN:           policy.ResourceARNPrefix + args.BucketName,
  1417  			},
  1418  			Object: event.Object{
  1419  				Key:       keyName,
  1420  				VersionID: args.Object.VersionID,
  1421  				Sequencer: uniqueID,
  1422  			},
  1423  		},
  1424  		Source: event.Source{
  1425  			Host:      args.Host,
  1426  			UserAgent: args.UserAgent,
  1427  		},
  1428  	}
  1429  
  1430  	if args.EventName != event.ObjectRemovedDelete && args.EventName != event.ObjectRemovedDeleteMarkerCreated {
  1431  		newEvent.S3.Object.ETag = args.Object.ETag
  1432  		newEvent.S3.Object.Size = args.Object.Size
  1433  		newEvent.S3.Object.ContentType = args.Object.ContentType
  1434  		newEvent.S3.Object.UserMetadata = args.Object.UserDefined
  1435  	}
  1436  
  1437  	return newEvent
  1438  }
  1439  
  1440  func sendEvent(args eventArgs) {
  1441  	args.Object.Size, _ = args.Object.GetActualSize()
  1442  
  1443  	// avoid generating a notification for REPLICA creation event.
  1444  	if _, ok := args.ReqParams[xhttp.MinIOSourceReplicationRequest]; ok {
  1445  		return
  1446  	}
  1447  	// remove sensitive encryption entries in metadata.
  1448  	crypto.RemoveSensitiveEntries(args.Object.UserDefined)
  1449  	crypto.RemoveInternalEntries(args.Object.UserDefined)
  1450  
  1451  	// GlobalNotificationSys is not initialized in gateway mode.
  1452  	if GlobalNotificationSys == nil {
  1453  		return
  1454  	}
  1455  
  1456  	if globalHTTPListen.NumSubscribers() > 0 {
  1457  		globalHTTPListen.Publish(args.ToEvent(false))
  1458  	}
  1459  
  1460  	GlobalNotificationSys.Send(args)
  1461  }
  1462  
  1463  // GetBandwidthReports - gets the bandwidth report from all nodes including self.
  1464  func (sys *NotificationSys) GetBandwidthReports(ctx context.Context, buckets ...string) bandwidth.Report {
  1465  	reports := make([]*bandwidth.Report, len(sys.peerClients))
  1466  	g := errgroup.WithNErrs(len(sys.peerClients))
  1467  	for index := range sys.peerClients {
  1468  		if sys.peerClients[index] == nil {
  1469  			continue
  1470  		}
  1471  		index := index
  1472  		g.Go(func() error {
  1473  			var err error
  1474  			reports[index], err = sys.peerClients[index].MonitorBandwidth(ctx, buckets)
  1475  			return err
  1476  		}, index)
  1477  	}
  1478  
  1479  	for index, err := range g.Wait() {
  1480  		reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress",
  1481  			sys.peerClients[index].host.String())
  1482  		ctx := logger.SetReqInfo(ctx, reqInfo)
  1483  		logger.LogOnceIf(ctx, err, sys.peerClients[index].host.String())
  1484  	}
  1485  	reports = append(reports, globalBucketMonitor.GetReport(bucketBandwidth.SelectBuckets(buckets...)))
  1486  	consolidatedReport := bandwidth.Report{
  1487  		BucketStats: make(map[string]bandwidth.Details),
  1488  	}
  1489  	for _, report := range reports {
  1490  		if report == nil || report.BucketStats == nil {
  1491  			continue
  1492  		}
  1493  		for bucket := range report.BucketStats {
  1494  			d, ok := consolidatedReport.BucketStats[bucket]
  1495  			if !ok {
  1496  				consolidatedReport.BucketStats[bucket] = bandwidth.Details{}
  1497  				d = consolidatedReport.BucketStats[bucket]
  1498  				d.LimitInBytesPerSecond = report.BucketStats[bucket].LimitInBytesPerSecond
  1499  			}
  1500  			if d.LimitInBytesPerSecond < report.BucketStats[bucket].LimitInBytesPerSecond {
  1501  				d.LimitInBytesPerSecond = report.BucketStats[bucket].LimitInBytesPerSecond
  1502  			}
  1503  			d.CurrentBandwidthInBytesPerSecond += report.BucketStats[bucket].CurrentBandwidthInBytesPerSecond
  1504  			consolidatedReport.BucketStats[bucket] = d
  1505  		}
  1506  	}
  1507  	return consolidatedReport
  1508  }
  1509  
  1510  // GetClusterMetrics - gets the cluster metrics from all nodes excluding self.
  1511  func (sys *NotificationSys) GetClusterMetrics(ctx context.Context) chan Metric {
  1512  	g := errgroup.WithNErrs(len(sys.peerClients))
  1513  	peerChannels := make([]<-chan Metric, len(sys.peerClients))
  1514  	for index := range sys.peerClients {
  1515  		if sys.peerClients[index] == nil {
  1516  			continue
  1517  		}
  1518  		index := index
  1519  		g.Go(func() error {
  1520  			var err error
  1521  			peerChannels[index], err = sys.peerClients[index].GetPeerMetrics(ctx)
  1522  			return err
  1523  		}, index)
  1524  	}
  1525  
  1526  	ch := make(chan Metric)
  1527  	var wg sync.WaitGroup
  1528  	for index, err := range g.Wait() {
  1529  		reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress",
  1530  			sys.peerClients[index].host.String())
  1531  		ctx := logger.SetReqInfo(ctx, reqInfo)
  1532  		if err != nil {
  1533  			logger.LogOnceIf(ctx, err, sys.peerClients[index].host.String())
  1534  			continue
  1535  		}
  1536  		wg.Add(1)
  1537  		go func(ctx context.Context, peerChannel <-chan Metric, wg *sync.WaitGroup) {
  1538  			defer wg.Done()
  1539  			for {
  1540  				select {
  1541  				case m, ok := <-peerChannel:
  1542  					if !ok {
  1543  						return
  1544  					}
  1545  					ch <- m
  1546  				case <-ctx.Done():
  1547  					return
  1548  				}
  1549  			}
  1550  		}(ctx, peerChannels[index], &wg)
  1551  	}
  1552  	go func(wg *sync.WaitGroup, ch chan Metric) {
  1553  		wg.Wait()
  1554  		close(ch)
  1555  	}(&wg, ch)
  1556  	return ch
  1557  }