storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/local-locker.go (about)

     1  /*
     2   * MinIO Cloud Storage, (C) 2018, 2019 MinIO, Inc.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package cmd
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"sync"
    23  	"time"
    24  
    25  	"storj.io/minio/pkg/dsync"
    26  )
    27  
    28  // lockRequesterInfo stores various info from the client for each lock that is requested.
    29  type lockRequesterInfo struct {
    30  	Name            string    // name of the resource lock was requested for
    31  	Writer          bool      // Bool whether write or read lock.
    32  	UID             string    // UID to uniquely identify request of client.
    33  	Timestamp       time.Time // Timestamp set at the time of initialization.
    34  	TimeLastRefresh time.Time // Timestamp for last lock refresh.
    35  	Source          string    // Contains line, function and filename reqesting the lock.
    36  	Group           bool      // indicates if it was a group lock.
    37  	// Owner represents the UUID of the owner who originally requested the lock
    38  	// useful in expiry.
    39  	Owner string
    40  	// Quorum represents the quorum required for this lock to be active.
    41  	Quorum int
    42  }
    43  
    44  // isWriteLock returns whether the lock is a write or read lock.
    45  func isWriteLock(lri []lockRequesterInfo) bool {
    46  	return len(lri) == 1 && lri[0].Writer
    47  }
    48  
    49  // localLocker implements Dsync.NetLocker
    50  type localLocker struct {
    51  	mutex   sync.Mutex
    52  	lockMap map[string][]lockRequesterInfo
    53  }
    54  
    55  func (l *localLocker) String() string {
    56  	return globalEndpoints.Localhost()
    57  }
    58  
    59  func (l *localLocker) canTakeUnlock(resources ...string) bool {
    60  	var lkCnt int
    61  	for _, resource := range resources {
    62  		isWriteLockTaken := isWriteLock(l.lockMap[resource])
    63  		if isWriteLockTaken {
    64  			lkCnt++
    65  		}
    66  	}
    67  	return lkCnt == len(resources)
    68  }
    69  
    70  func (l *localLocker) canTakeLock(resources ...string) bool {
    71  	var noLkCnt int
    72  	for _, resource := range resources {
    73  		_, lockTaken := l.lockMap[resource]
    74  		if !lockTaken {
    75  			noLkCnt++
    76  		}
    77  	}
    78  	return noLkCnt == len(resources)
    79  }
    80  
    81  func (l *localLocker) Lock(ctx context.Context, args dsync.LockArgs) (reply bool, err error) {
    82  	l.mutex.Lock()
    83  	defer l.mutex.Unlock()
    84  
    85  	if !l.canTakeLock(args.Resources...) {
    86  		// Not all locks can be taken on resources,
    87  		// reject it completely.
    88  		return false, nil
    89  	}
    90  
    91  	// No locks held on the all resources, so claim write
    92  	// lock on all resources at once.
    93  	for _, resource := range args.Resources {
    94  		l.lockMap[resource] = []lockRequesterInfo{
    95  			{
    96  				Name:            resource,
    97  				Writer:          true,
    98  				Source:          args.Source,
    99  				Owner:           args.Owner,
   100  				UID:             args.UID,
   101  				Timestamp:       UTCNow(),
   102  				TimeLastRefresh: UTCNow(),
   103  				Group:           len(args.Resources) > 1,
   104  				Quorum:          args.Quorum,
   105  			},
   106  		}
   107  	}
   108  	return true, nil
   109  }
   110  
   111  func (l *localLocker) Unlock(args dsync.LockArgs) (reply bool, err error) {
   112  	l.mutex.Lock()
   113  	defer l.mutex.Unlock()
   114  
   115  	if !l.canTakeUnlock(args.Resources...) {
   116  		// Unless it is a write lock reject it.
   117  		return reply, fmt.Errorf("Unlock attempted on a read locked entity: %s", args.Resources)
   118  	}
   119  	for _, resource := range args.Resources {
   120  		lri, ok := l.lockMap[resource]
   121  		if ok {
   122  			l.removeEntry(resource, args, &lri)
   123  		}
   124  	}
   125  	return true, nil
   126  
   127  }
   128  
   129  // removeEntry based on the uid of the lock message, removes a single entry from the
   130  // lockRequesterInfo array or the whole array from the map (in case of a write lock
   131  // or last read lock)
   132  func (l *localLocker) removeEntry(name string, args dsync.LockArgs, lri *[]lockRequesterInfo) bool {
   133  	// Find correct entry to remove based on uid.
   134  	for index, entry := range *lri {
   135  		if entry.UID == args.UID && entry.Owner == args.Owner {
   136  			if len(*lri) == 1 {
   137  				// Remove the write lock.
   138  				delete(l.lockMap, name)
   139  			} else {
   140  				// Remove the appropriate read lock.
   141  				*lri = append((*lri)[:index], (*lri)[index+1:]...)
   142  				l.lockMap[name] = *lri
   143  			}
   144  			return true
   145  		}
   146  	}
   147  
   148  	// None found return false, perhaps entry removed in previous run.
   149  	return false
   150  }
   151  
   152  func (l *localLocker) RLock(ctx context.Context, args dsync.LockArgs) (reply bool, err error) {
   153  	l.mutex.Lock()
   154  	defer l.mutex.Unlock()
   155  	resource := args.Resources[0]
   156  	lrInfo := lockRequesterInfo{
   157  		Name:            resource,
   158  		Writer:          false,
   159  		Source:          args.Source,
   160  		Owner:           args.Owner,
   161  		UID:             args.UID,
   162  		Timestamp:       UTCNow(),
   163  		TimeLastRefresh: UTCNow(),
   164  		Quorum:          args.Quorum,
   165  	}
   166  	if lri, ok := l.lockMap[resource]; ok {
   167  		if reply = !isWriteLock(lri); reply {
   168  			// Unless there is a write lock
   169  			l.lockMap[resource] = append(l.lockMap[resource], lrInfo)
   170  		}
   171  	} else {
   172  		// No locks held on the given name, so claim (first) read lock
   173  		l.lockMap[resource] = []lockRequesterInfo{lrInfo}
   174  		reply = true
   175  	}
   176  	return reply, nil
   177  }
   178  
   179  func (l *localLocker) RUnlock(args dsync.LockArgs) (reply bool, err error) {
   180  	l.mutex.Lock()
   181  	defer l.mutex.Unlock()
   182  	var lri []lockRequesterInfo
   183  
   184  	resource := args.Resources[0]
   185  	if lri, reply = l.lockMap[resource]; !reply {
   186  		// No lock is held on the given name
   187  		return true, nil
   188  	}
   189  	if reply = !isWriteLock(lri); !reply {
   190  		// A write-lock is held, cannot release a read lock
   191  		return reply, fmt.Errorf("RUnlock attempted on a write locked entity: %s", resource)
   192  	}
   193  	l.removeEntry(resource, args, &lri)
   194  	return reply, nil
   195  }
   196  
   197  func (l *localLocker) DupLockMap() map[string][]lockRequesterInfo {
   198  	l.mutex.Lock()
   199  	defer l.mutex.Unlock()
   200  
   201  	lockCopy := map[string][]lockRequesterInfo{}
   202  	for k, v := range l.lockMap {
   203  		lockCopy[k] = append(lockCopy[k], v...)
   204  	}
   205  	return lockCopy
   206  }
   207  
   208  func (l *localLocker) Close() error {
   209  	return nil
   210  }
   211  
   212  // IsOnline - local locker is always online.
   213  func (l *localLocker) IsOnline() bool {
   214  	return true
   215  }
   216  
   217  // IsLocal - local locker returns true.
   218  func (l *localLocker) IsLocal() bool {
   219  	return true
   220  }
   221  
   222  func (l *localLocker) ForceUnlock(ctx context.Context, args dsync.LockArgs) (reply bool, err error) {
   223  	select {
   224  	case <-ctx.Done():
   225  		return false, ctx.Err()
   226  	default:
   227  		l.mutex.Lock()
   228  		defer l.mutex.Unlock()
   229  		if len(args.UID) != 0 {
   230  			return false, fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID)
   231  		}
   232  		for _, resource := range args.Resources {
   233  			delete(l.lockMap, resource) // Remove the lock (irrespective of write or read lock)
   234  		}
   235  		return true, nil
   236  	}
   237  }
   238  
   239  func (l *localLocker) Refresh(ctx context.Context, args dsync.LockArgs) (refreshed bool, err error) {
   240  	select {
   241  	case <-ctx.Done():
   242  		return false, ctx.Err()
   243  	default:
   244  		l.mutex.Lock()
   245  		defer l.mutex.Unlock()
   246  
   247  		resource := args.Resources[0] // refresh check is always per resource.
   248  
   249  		// Lock found, proceed to verify if belongs to given uid.
   250  		lri, ok := l.lockMap[resource]
   251  		if !ok {
   252  			// lock doesn't exist yet, return false
   253  			return false, nil
   254  		}
   255  
   256  		// Check whether uid is still active
   257  		for i := range lri {
   258  			if lri[i].UID == args.UID && lri[i].Owner == args.Owner {
   259  				lri[i].TimeLastRefresh = UTCNow()
   260  				return true, nil
   261  			}
   262  		}
   263  
   264  		return false, nil
   265  	}
   266  }
   267  
   268  // Similar to removeEntry but only removes an entry only if the lock entry exists in map.
   269  // Caller must hold 'l.mutex' lock.
   270  func (l *localLocker) expireOldLocks(interval time.Duration) {
   271  	l.mutex.Lock()
   272  	defer l.mutex.Unlock()
   273  
   274  	for _, lris := range l.lockMap {
   275  		for _, lri := range lris {
   276  			if time.Since(lri.TimeLastRefresh) > interval {
   277  				l.removeEntry(lri.Name, dsync.LockArgs{Owner: lri.Owner, UID: lri.UID}, &lris)
   278  			}
   279  		}
   280  	}
   281  }
   282  
   283  func newLocker() *localLocker {
   284  	return &localLocker{
   285  		lockMap: make(map[string][]lockRequesterInfo),
   286  	}
   287  }