storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/pkg/dsync/dsync_test.go (about)

     1  /*
     2   * Minio Cloud Storage, (C) 2016 Minio, Inc.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  // GOMAXPROCS=10 go test
    18  
    19  package dsync_test
    20  
    21  import (
    22  	"context"
    23  	"fmt"
    24  	"log"
    25  	"math/rand"
    26  	"net"
    27  	"net/http"
    28  	"net/rpc"
    29  	"os"
    30  	"strconv"
    31  	"sync"
    32  	"testing"
    33  	"time"
    34  
    35  	"github.com/google/uuid"
    36  
    37  	"storj.io/minio/pkg/dsync"
    38  	. "storj.io/minio/pkg/dsync"
    39  )
    40  
    41  const numberOfNodes = 5
    42  
    43  var ds *Dsync
    44  var rpcPaths []string // list of rpc paths where lock server is serving.
    45  
    46  var nodes = make([]string, numberOfNodes) // list of node IP addrs or hostname with ports.
    47  var lockServers []*lockServer
    48  
    49  func startRPCServers() {
    50  	for i := range nodes {
    51  		server := rpc.NewServer()
    52  		ls := &lockServer{
    53  			mutex:   sync.Mutex{},
    54  			lockMap: make(map[string]int64),
    55  		}
    56  		server.RegisterName("Dsync", ls)
    57  		// For some reason the registration paths need to be different (even for different server objs)
    58  		server.HandleHTTP(rpcPaths[i], fmt.Sprintf("%s-debug", rpcPaths[i]))
    59  		l, e := net.Listen("tcp", ":"+strconv.Itoa(i+12345))
    60  		if e != nil {
    61  			log.Fatal("listen error:", e)
    62  		}
    63  		go http.Serve(l, nil)
    64  
    65  		lockServers = append(lockServers, ls)
    66  	}
    67  
    68  	// Let servers start
    69  	time.Sleep(10 * time.Millisecond)
    70  }
    71  
    72  // TestMain initializes the testing framework
    73  func TestMain(m *testing.M) {
    74  	const rpcPath = "/dsync"
    75  
    76  	rand.Seed(time.Now().UTC().UnixNano())
    77  
    78  	for i := range nodes {
    79  		nodes[i] = fmt.Sprintf("127.0.0.1:%d", i+12345)
    80  	}
    81  	for i := range nodes {
    82  		rpcPaths = append(rpcPaths, rpcPath+"-"+strconv.Itoa(i))
    83  	}
    84  
    85  	// Initialize net/rpc clients for dsync.
    86  	var clnts []NetLocker
    87  	for i := 0; i < len(nodes); i++ {
    88  		clnts = append(clnts, newClient(nodes[i], rpcPaths[i]))
    89  	}
    90  
    91  	ds = &Dsync{
    92  		GetLockers: func() ([]NetLocker, string) { return clnts, uuid.New().String() },
    93  	}
    94  
    95  	startRPCServers()
    96  
    97  	os.Exit(m.Run())
    98  }
    99  
   100  func TestSimpleLock(t *testing.T) {
   101  
   102  	dm := NewDRWMutex(ds, "test")
   103  
   104  	dm.Lock(id, source)
   105  
   106  	// fmt.Println("Lock acquired, waiting...")
   107  	time.Sleep(2500 * time.Millisecond)
   108  
   109  	dm.Unlock()
   110  }
   111  
   112  func TestSimpleLockUnlockMultipleTimes(t *testing.T) {
   113  
   114  	dm := NewDRWMutex(ds, "test")
   115  
   116  	dm.Lock(id, source)
   117  	time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
   118  	dm.Unlock()
   119  
   120  	dm.Lock(id, source)
   121  	time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
   122  	dm.Unlock()
   123  
   124  	dm.Lock(id, source)
   125  	time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
   126  	dm.Unlock()
   127  
   128  	dm.Lock(id, source)
   129  	time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
   130  	dm.Unlock()
   131  
   132  	dm.Lock(id, source)
   133  	time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond)
   134  	dm.Unlock()
   135  }
   136  
   137  // Test two locks for same resource, one succeeds, one fails (after timeout)
   138  func TestTwoSimultaneousLocksForSameResource(t *testing.T) {
   139  
   140  	dm1st := NewDRWMutex(ds, "aap")
   141  	dm2nd := NewDRWMutex(ds, "aap")
   142  
   143  	dm1st.Lock(id, source)
   144  
   145  	// Release lock after 10 seconds
   146  	go func() {
   147  		time.Sleep(10 * time.Second)
   148  		// fmt.Println("Unlocking dm1")
   149  
   150  		dm1st.Unlock()
   151  	}()
   152  
   153  	dm2nd.Lock(id, source)
   154  
   155  	// fmt.Printf("2nd lock obtained after 1st lock is released\n")
   156  	time.Sleep(2500 * time.Millisecond)
   157  
   158  	dm2nd.Unlock()
   159  }
   160  
   161  // Test three locks for same resource, one succeeds, one fails (after timeout)
   162  func TestThreeSimultaneousLocksForSameResource(t *testing.T) {
   163  
   164  	dm1st := NewDRWMutex(ds, "aap")
   165  	dm2nd := NewDRWMutex(ds, "aap")
   166  	dm3rd := NewDRWMutex(ds, "aap")
   167  
   168  	dm1st.Lock(id, source)
   169  
   170  	// Release lock after 10 seconds
   171  	go func() {
   172  		time.Sleep(10 * time.Second)
   173  		// fmt.Println("Unlocking dm1")
   174  
   175  		dm1st.Unlock()
   176  	}()
   177  
   178  	var wg sync.WaitGroup
   179  	wg.Add(2)
   180  
   181  	go func() {
   182  		defer wg.Done()
   183  
   184  		dm2nd.Lock(id, source)
   185  
   186  		// Release lock after 10 seconds
   187  		go func() {
   188  			time.Sleep(2500 * time.Millisecond)
   189  			// fmt.Println("Unlocking dm2")
   190  
   191  			dm2nd.Unlock()
   192  		}()
   193  
   194  		dm3rd.Lock(id, source)
   195  
   196  		// fmt.Printf("3rd lock obtained after 1st & 2nd locks are released\n")
   197  		time.Sleep(2500 * time.Millisecond)
   198  
   199  		dm3rd.Unlock()
   200  	}()
   201  
   202  	go func() {
   203  		defer wg.Done()
   204  
   205  		dm3rd.Lock(id, source)
   206  
   207  		// Release lock after 10 seconds
   208  		go func() {
   209  			time.Sleep(2500 * time.Millisecond)
   210  			// fmt.Println("Unlocking dm3")
   211  
   212  			dm3rd.Unlock()
   213  		}()
   214  
   215  		dm2nd.Lock(id, source)
   216  
   217  		// fmt.Printf("2nd lock obtained after 1st & 3rd locks are released\n")
   218  		time.Sleep(2500 * time.Millisecond)
   219  
   220  		dm2nd.Unlock()
   221  	}()
   222  
   223  	wg.Wait()
   224  }
   225  
   226  // Test two locks for different resources, both succeed
   227  func TestTwoSimultaneousLocksForDifferentResources(t *testing.T) {
   228  
   229  	dm1 := NewDRWMutex(ds, "aap")
   230  	dm2 := NewDRWMutex(ds, "noot")
   231  
   232  	dm1.Lock(id, source)
   233  	dm2.Lock(id, source)
   234  
   235  	// fmt.Println("Both locks acquired, waiting...")
   236  	time.Sleep(2500 * time.Millisecond)
   237  
   238  	dm1.Unlock()
   239  	dm2.Unlock()
   240  
   241  	time.Sleep(10 * time.Millisecond)
   242  }
   243  
   244  // Test refreshing lock
   245  func TestFailedRefreshLock(t *testing.T) {
   246  	// Simulate Refresh RPC response to return no locking found
   247  	for i := range lockServers {
   248  		lockServers[i].setRefreshReply(false)
   249  	}
   250  
   251  	dm := NewDRWMutex(ds, "aap")
   252  	wg := sync.WaitGroup{}
   253  	wg.Add(1)
   254  
   255  	ctx, cl := context.WithCancel(context.Background())
   256  	cancel := func() {
   257  		cl()
   258  		wg.Done()
   259  	}
   260  
   261  	if !dm.GetLock(ctx, cancel, id, source, dsync.Options{Timeout: 5 * time.Minute}) {
   262  		t.Fatal("GetLock() should be successful")
   263  	}
   264  
   265  	// Wait until context is canceled
   266  	wg.Wait()
   267  	if ctx.Err() == nil {
   268  		t.Fatal("Unexpected error", ctx.Err())
   269  	}
   270  
   271  	// Should be safe operation in all cases
   272  	dm.Unlock()
   273  
   274  	// Revert Refresh RPC response to locking found
   275  	for i := range lockServers {
   276  		lockServers[i].setRefreshReply(false)
   277  	}
   278  }
   279  
   280  // Borrowed from mutex_test.go
   281  func HammerMutex(m *DRWMutex, loops int, cdone chan bool) {
   282  	for i := 0; i < loops; i++ {
   283  		m.Lock(id, source)
   284  		m.Unlock()
   285  	}
   286  	cdone <- true
   287  }
   288  
   289  // Borrowed from mutex_test.go
   290  func TestMutex(t *testing.T) {
   291  	loops := 200
   292  	if testing.Short() {
   293  		loops = 5
   294  	}
   295  	c := make(chan bool)
   296  	m := NewDRWMutex(ds, "test")
   297  	for i := 0; i < 10; i++ {
   298  		go HammerMutex(m, loops, c)
   299  	}
   300  	for i := 0; i < 10; i++ {
   301  		<-c
   302  	}
   303  }
   304  
   305  func BenchmarkMutexUncontended(b *testing.B) {
   306  	type PaddedMutex struct {
   307  		*DRWMutex
   308  	}
   309  	b.RunParallel(func(pb *testing.PB) {
   310  		var mu = PaddedMutex{NewDRWMutex(ds, "")}
   311  		for pb.Next() {
   312  			mu.Lock(id, source)
   313  			mu.Unlock()
   314  		}
   315  	})
   316  }
   317  
   318  func benchmarkMutex(b *testing.B, slack, work bool) {
   319  	mu := NewDRWMutex(ds, "")
   320  	if slack {
   321  		b.SetParallelism(10)
   322  	}
   323  	b.RunParallel(func(pb *testing.PB) {
   324  		foo := 0
   325  		for pb.Next() {
   326  			mu.Lock(id, source)
   327  			mu.Unlock()
   328  			if work {
   329  				for i := 0; i < 100; i++ {
   330  					foo *= 2
   331  					foo /= 2
   332  				}
   333  			}
   334  		}
   335  		_ = foo
   336  	})
   337  }
   338  
   339  func BenchmarkMutex(b *testing.B) {
   340  	benchmarkMutex(b, false, false)
   341  }
   342  
   343  func BenchmarkMutexSlack(b *testing.B) {
   344  	benchmarkMutex(b, true, false)
   345  }
   346  
   347  func BenchmarkMutexWork(b *testing.B) {
   348  	benchmarkMutex(b, false, true)
   349  }
   350  
   351  func BenchmarkMutexWorkSlack(b *testing.B) {
   352  	benchmarkMutex(b, true, true)
   353  }
   354  
   355  func BenchmarkMutexNoSpin(b *testing.B) {
   356  	// This benchmark models a situation where spinning in the mutex should be
   357  	// non-profitable and allows to confirm that spinning does not do harm.
   358  	// To achieve this we create excess of goroutines most of which do local work.
   359  	// These goroutines yield during local work, so that switching from
   360  	// a blocked goroutine to other goroutines is profitable.
   361  	// As a matter of fact, this benchmark still triggers some spinning in the mutex.
   362  	m := NewDRWMutex(ds, "")
   363  	var acc0, acc1 uint64
   364  	b.SetParallelism(4)
   365  	b.RunParallel(func(pb *testing.PB) {
   366  		c := make(chan bool)
   367  		var data [4 << 10]uint64
   368  		for i := 0; pb.Next(); i++ {
   369  			if i%4 == 0 {
   370  				m.Lock(id, source)
   371  				acc0 -= 100
   372  				acc1 += 100
   373  				m.Unlock()
   374  			} else {
   375  				for i := 0; i < len(data); i += 4 {
   376  					data[i]++
   377  				}
   378  				// Elaborate way to say runtime.Gosched
   379  				// that does not put the goroutine onto global runq.
   380  				go func() {
   381  					c <- true
   382  				}()
   383  				<-c
   384  			}
   385  		}
   386  	})
   387  }
   388  
   389  func BenchmarkMutexSpin(b *testing.B) {
   390  	// This benchmark models a situation where spinning in the mutex should be
   391  	// profitable. To achieve this we create a goroutine per-proc.
   392  	// These goroutines access considerable amount of local data so that
   393  	// unnecessary rescheduling is penalized by cache misses.
   394  	m := NewDRWMutex(ds, "")
   395  	var acc0, acc1 uint64
   396  	b.RunParallel(func(pb *testing.PB) {
   397  		var data [16 << 10]uint64
   398  		for i := 0; pb.Next(); i++ {
   399  			m.Lock(id, source)
   400  			acc0 -= 100
   401  			acc1 += 100
   402  			m.Unlock()
   403  			for i := 0; i < len(data); i += 4 {
   404  				data[i]++
   405  			}
   406  		}
   407  	})
   408  }