github.com/minio/minio@v0.0.0-20240328213742-3f72439b8a27/internal/dsync/dsync-server_test.go (about)

     1  // Copyright (c) 2015-2021 MinIO, Inc.
     2  //
     3  // This file is part of MinIO Object Storage stack
     4  //
     5  // This program is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Affero General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // This program is distributed in the hope that it will be useful
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13  // GNU Affero General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Affero General Public License
    16  // along with this program.  If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package dsync
    19  
    20  import (
    21  	"fmt"
    22  	"io"
    23  	"net/http"
    24  	"net/http/httptest"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/minio/mux"
    30  )
    31  
    32  const numberOfNodes = 5
    33  
    34  var (
    35  	ds          *Dsync
    36  	nodes       = make([]*httptest.Server, numberOfNodes) // list of node IP addrs or hostname with ports.
    37  	lockServers = make([]*lockServer, numberOfNodes)
    38  )
    39  
    40  func getLockArgs(r *http.Request) (args LockArgs, err error) {
    41  	buf, err := io.ReadAll(r.Body)
    42  	if err != nil {
    43  		return args, err
    44  	}
    45  	_, err = args.UnmarshalMsg(buf)
    46  	return args, err
    47  }
    48  
    49  type lockServerHandler struct {
    50  	lsrv *lockServer
    51  }
    52  
    53  func (lh *lockServerHandler) writeErrorResponse(w http.ResponseWriter, err error) {
    54  	w.WriteHeader(http.StatusForbidden)
    55  	w.Write([]byte(err.Error()))
    56  }
    57  
    58  func (lh *lockServerHandler) ForceUnlockHandler(w http.ResponseWriter, r *http.Request) {
    59  	args, err := getLockArgs(r)
    60  	if err != nil {
    61  		lh.writeErrorResponse(w, err)
    62  		return
    63  	}
    64  
    65  	if _, err = lh.lsrv.ForceUnlock(&args); err != nil {
    66  		lh.writeErrorResponse(w, err)
    67  		return
    68  	}
    69  }
    70  
    71  func (lh *lockServerHandler) RefreshHandler(w http.ResponseWriter, r *http.Request) {
    72  	args, err := getLockArgs(r)
    73  	if err != nil {
    74  		lh.writeErrorResponse(w, err)
    75  		return
    76  	}
    77  
    78  	reply, err := lh.lsrv.Refresh(&args)
    79  	if err != nil {
    80  		lh.writeErrorResponse(w, err)
    81  		return
    82  	}
    83  
    84  	if !reply {
    85  		lh.writeErrorResponse(w, errLockNotFound)
    86  		return
    87  	}
    88  }
    89  
    90  func (lh *lockServerHandler) LockHandler(w http.ResponseWriter, r *http.Request) {
    91  	args, err := getLockArgs(r)
    92  	if err != nil {
    93  		lh.writeErrorResponse(w, err)
    94  		return
    95  	}
    96  	reply, err := lh.lsrv.Lock(&args)
    97  	if err == nil && !reply {
    98  		err = errLockConflict
    99  	}
   100  	if err != nil {
   101  		lh.writeErrorResponse(w, err)
   102  		return
   103  	}
   104  }
   105  
   106  func (lh *lockServerHandler) UnlockHandler(w http.ResponseWriter, r *http.Request) {
   107  	args, err := getLockArgs(r)
   108  	if err != nil {
   109  		lh.writeErrorResponse(w, err)
   110  		return
   111  	}
   112  	_, err = lh.lsrv.Unlock(&args)
   113  	if err != nil {
   114  		lh.writeErrorResponse(w, err)
   115  		return
   116  	}
   117  }
   118  
   119  func (lh *lockServerHandler) RUnlockHandler(w http.ResponseWriter, r *http.Request) {
   120  	args, err := getLockArgs(r)
   121  	if err != nil {
   122  		lh.writeErrorResponse(w, err)
   123  		return
   124  	}
   125  	_, err = lh.lsrv.RUnlock(&args)
   126  	if err != nil {
   127  		lh.writeErrorResponse(w, err)
   128  		return
   129  	}
   130  }
   131  
   132  func (lh *lockServerHandler) HealthHandler(w http.ResponseWriter, r *http.Request) {}
   133  
   134  func (lh *lockServerHandler) RLockHandler(w http.ResponseWriter, r *http.Request) {
   135  	args, err := getLockArgs(r)
   136  	if err != nil {
   137  		lh.writeErrorResponse(w, err)
   138  		return
   139  	}
   140  
   141  	reply, err := lh.lsrv.RLock(&args)
   142  	if err == nil && !reply {
   143  		err = errLockConflict
   144  	}
   145  	if err != nil {
   146  		lh.writeErrorResponse(w, err)
   147  		return
   148  	}
   149  }
   150  
   151  func stopLockServers() {
   152  	for i := 0; i < numberOfNodes; i++ {
   153  		nodes[i].Close()
   154  	}
   155  }
   156  
   157  func startLockServers() {
   158  	for i := 0; i < numberOfNodes; i++ {
   159  		lsrv := &lockServer{
   160  			mutex:   sync.Mutex{},
   161  			lockMap: make(map[string]int64),
   162  		}
   163  		lockServer := lockServerHandler{
   164  			lsrv: lsrv,
   165  		}
   166  		lockServers[i] = lsrv
   167  
   168  		router := mux.NewRouter().SkipClean(true)
   169  		subrouter := router.PathPrefix("/").Subrouter()
   170  		subrouter.Methods(http.MethodPost).Path("/v1/health").HandlerFunc(lockServer.HealthHandler)
   171  		subrouter.Methods(http.MethodPost).Path("/v1/refresh").HandlerFunc(lockServer.RefreshHandler)
   172  		subrouter.Methods(http.MethodPost).Path("/v1/lock").HandlerFunc(lockServer.LockHandler)
   173  		subrouter.Methods(http.MethodPost).Path("/v1/rlock").HandlerFunc(lockServer.RLockHandler)
   174  		subrouter.Methods(http.MethodPost).Path("/v1/unlock").HandlerFunc(lockServer.UnlockHandler)
   175  		subrouter.Methods(http.MethodPost).Path("/v1/runlock").HandlerFunc(lockServer.RUnlockHandler)
   176  		subrouter.Methods(http.MethodPost).Path("/v1/force-unlock").HandlerFunc(lockServer.ForceUnlockHandler)
   177  
   178  		nodes[i] = httptest.NewServer(router)
   179  	}
   180  }
   181  
   182  const WriteLock = -1
   183  
   184  type lockServer struct {
   185  	mutex sync.Mutex
   186  	// Map of locks, with negative value indicating (exclusive) write lock
   187  	// and positive values indicating number of read locks
   188  	lockMap map[string]int64
   189  
   190  	// Refresh returns lock not found if set to true
   191  	lockNotFound bool
   192  
   193  	// Set to true if you want peers servers to do not respond
   194  	responseDelay int64
   195  }
   196  
   197  func (l *lockServer) setRefreshReply(refreshed bool) {
   198  	l.mutex.Lock()
   199  	defer l.mutex.Unlock()
   200  	l.lockNotFound = !refreshed
   201  }
   202  
   203  func (l *lockServer) setResponseDelay(responseDelay time.Duration) {
   204  	atomic.StoreInt64(&l.responseDelay, int64(responseDelay))
   205  }
   206  
   207  func (l *lockServer) Lock(args *LockArgs) (reply bool, err error) {
   208  	if d := atomic.LoadInt64(&l.responseDelay); d != 0 {
   209  		time.Sleep(time.Duration(d))
   210  	}
   211  
   212  	l.mutex.Lock()
   213  	defer l.mutex.Unlock()
   214  	if _, reply = l.lockMap[args.Resources[0]]; !reply {
   215  		l.lockMap[args.Resources[0]] = WriteLock // No locks held on the given name, so claim write lock
   216  	}
   217  	reply = !reply // Negate *reply to return true when lock is granted or false otherwise
   218  	return reply, nil
   219  }
   220  
   221  func (l *lockServer) Unlock(args *LockArgs) (reply bool, err error) {
   222  	if d := atomic.LoadInt64(&l.responseDelay); d != 0 {
   223  		time.Sleep(time.Duration(d))
   224  	}
   225  
   226  	l.mutex.Lock()
   227  	defer l.mutex.Unlock()
   228  	var locksHeld int64
   229  	if locksHeld, reply = l.lockMap[args.Resources[0]]; !reply { // No lock is held on the given name
   230  		return false, fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Resources[0])
   231  	}
   232  	if reply = locksHeld == WriteLock; !reply { // Unless it is a write lock
   233  		return false, fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Resources[0], locksHeld)
   234  	}
   235  	delete(l.lockMap, args.Resources[0]) // Remove the write lock
   236  	return true, nil
   237  }
   238  
   239  const ReadLock = 1
   240  
   241  func (l *lockServer) RLock(args *LockArgs) (reply bool, err error) {
   242  	if d := atomic.LoadInt64(&l.responseDelay); d != 0 {
   243  		time.Sleep(time.Duration(d))
   244  	}
   245  
   246  	l.mutex.Lock()
   247  	defer l.mutex.Unlock()
   248  	var locksHeld int64
   249  	if locksHeld, reply = l.lockMap[args.Resources[0]]; !reply {
   250  		l.lockMap[args.Resources[0]] = ReadLock // No locks held on the given name, so claim (first) read lock
   251  		reply = true
   252  	} else if reply = locksHeld != WriteLock; reply { // Unless there is a write lock
   253  		l.lockMap[args.Resources[0]] = locksHeld + ReadLock // Grant another read lock
   254  	}
   255  	return reply, nil
   256  }
   257  
   258  func (l *lockServer) RUnlock(args *LockArgs) (reply bool, err error) {
   259  	if d := atomic.LoadInt64(&l.responseDelay); d != 0 {
   260  		time.Sleep(time.Duration(d))
   261  	}
   262  
   263  	l.mutex.Lock()
   264  	defer l.mutex.Unlock()
   265  	var locksHeld int64
   266  	if locksHeld, reply = l.lockMap[args.Resources[0]]; !reply { // No lock is held on the given name
   267  		return false, fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Resources[0])
   268  	}
   269  	if reply = locksHeld != WriteLock; !reply { // A write-lock is held, cannot release a read lock
   270  		return false, fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Resources[0])
   271  	}
   272  	if locksHeld > ReadLock {
   273  		l.lockMap[args.Resources[0]] = locksHeld - ReadLock // Remove one of the read locks held
   274  	} else {
   275  		delete(l.lockMap, args.Resources[0]) // Remove the (last) read lock
   276  	}
   277  	return reply, nil
   278  }
   279  
   280  func (l *lockServer) Refresh(args *LockArgs) (reply bool, err error) {
   281  	if d := atomic.LoadInt64(&l.responseDelay); d != 0 {
   282  		time.Sleep(time.Duration(d))
   283  	}
   284  
   285  	l.mutex.Lock()
   286  	defer l.mutex.Unlock()
   287  	reply = !l.lockNotFound
   288  	return reply, nil
   289  }
   290  
   291  func (l *lockServer) ForceUnlock(args *LockArgs) (reply bool, err error) {
   292  	if d := atomic.LoadInt64(&l.responseDelay); d != 0 {
   293  		time.Sleep(time.Duration(d))
   294  	}
   295  
   296  	l.mutex.Lock()
   297  	defer l.mutex.Unlock()
   298  	if len(args.UID) != 0 {
   299  		return false, fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID)
   300  	}
   301  	delete(l.lockMap, args.Resources[0]) // Remove the lock (irrespective of write or read lock)
   302  	reply = true
   303  	return reply, nil
   304  }