github.com/ethersphere/bee/v2@v2.2.0/pkg/sharky/sharky_test.go (about)

     1  // Copyright 2021 The Swarm Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package sharky_test
     6  
     7  import (
     8  	"bytes"
     9  	"context"
    10  	"encoding/binary"
    11  	"errors"
    12  	"fmt"
    13  	"io/fs"
    14  	"math/rand"
    15  	"os"
    16  	"path/filepath"
    17  	"sync"
    18  	"testing"
    19  	"time"
    20  
    21  	"github.com/ethersphere/bee/v2/pkg/sharky"
    22  	"golang.org/x/sync/errgroup"
    23  )
    24  
    25  type dirFS struct {
    26  	basedir string
    27  }
    28  
    29  func (d *dirFS) Open(path string) (fs.File, error) {
    30  	return os.OpenFile(filepath.Join(d.basedir, path), os.O_RDWR|os.O_CREATE, 0644)
    31  }
    32  
    33  func TestSingleRetrieval(t *testing.T) {
    34  	t.Parallel()
    35  
    36  	datasize := 4
    37  	dir := t.TempDir()
    38  	s, err := sharky.New(&dirFS{basedir: dir}, 2, datasize)
    39  	if err != nil {
    40  		t.Fatal(err)
    41  	}
    42  	t.Cleanup(func() { s.Close() })
    43  
    44  	ctx := context.Background()
    45  
    46  	t.Run("write and read", func(t *testing.T) {
    47  		t.Parallel()
    48  
    49  		for _, tc := range []struct {
    50  			name string
    51  			want []byte
    52  			err  error
    53  		}{
    54  			{
    55  				"short data",
    56  				[]byte{0x1},
    57  				nil,
    58  			}, {
    59  				"exact size data",
    60  				[]byte{1, 1, 1, 1},
    61  				nil,
    62  			}, {
    63  				"exact size data 2",
    64  				[]byte{1, 1, 1, 1},
    65  				nil,
    66  			}, {
    67  				"long data",
    68  				[]byte("long data"),
    69  				sharky.ErrTooLong,
    70  			}, {
    71  				"exact size data 3",
    72  				[]byte{1, 1, 1, 1},
    73  				nil,
    74  			},
    75  		} {
    76  			tc := tc
    77  			t.Run(tc.name, func(t *testing.T) {
    78  				cctx, cancel := context.WithTimeout(ctx, 800*time.Millisecond)
    79  				defer cancel()
    80  				loc, err := s.Write(cctx, tc.want)
    81  				if !errors.Is(err, tc.err) {
    82  					t.Fatalf("error mismatch on write. want %v, got %v", tc.err, err)
    83  				}
    84  				if err != nil {
    85  					return
    86  
    87  				}
    88  				buf := make([]byte, datasize)
    89  				err = s.Read(ctx, loc, buf)
    90  				if err != nil {
    91  					t.Fatal(err)
    92  				}
    93  				got := buf[:loc.Length]
    94  				if !bytes.Equal(tc.want, got) {
    95  					t.Fatalf("data mismatch at location %v. want %x, got %x", loc, tc.want, got)
    96  				}
    97  			})
    98  		}
    99  	})
   100  }
   101  
   102  // TestPersistence tests behaviour across several process sessions
   103  // and checks if items and pregenerated free slots are persisted correctly
   104  func TestPersistence(t *testing.T) {
   105  	t.Parallel()
   106  
   107  	datasize := 4
   108  	shards := 2
   109  	shardSize := uint32(16)
   110  	items := shards * int(shardSize)
   111  
   112  	dir := t.TempDir()
   113  	buf := make([]byte, 4)
   114  	locs := make([]*sharky.Location, items)
   115  	i := 0
   116  	j := 0
   117  	ctx := context.Background()
   118  	// simulate several subsequent sessions filling up the store
   119  	for ; i < items; j++ {
   120  		cctx, cancel := context.WithTimeout(ctx, 10*time.Second)
   121  		s, err := sharky.New(&dirFS{basedir: dir}, shards, datasize)
   122  		if err != nil {
   123  			t.Fatal(err)
   124  		}
   125  		for ; i < items && rand.Intn(4) > 0; i++ {
   126  			if locs[i] != nil {
   127  				continue
   128  			}
   129  			binary.BigEndian.PutUint32(buf, uint32(i))
   130  			loc, err := s.Write(cctx, buf)
   131  			if err != nil {
   132  				t.Fatal(err)
   133  			}
   134  			locs[i] = &loc
   135  		}
   136  		cancel()
   137  		if err := s.Close(); err != nil {
   138  			t.Fatal(err)
   139  		}
   140  	}
   141  	t.Logf("got full in %d sessions\n", j)
   142  
   143  	// check location and data consisency
   144  	cctx, cancel := context.WithTimeout(ctx, 10*time.Second)
   145  	s, err := sharky.New(&dirFS{basedir: dir}, shards, datasize)
   146  	if err != nil {
   147  		t.Fatal(err)
   148  	}
   149  	buf = make([]byte, datasize)
   150  	j = 0
   151  	for want, loc := range locs {
   152  		j++
   153  		err := s.Read(cctx, *loc, buf)
   154  		if err != nil {
   155  			t.Fatal(err)
   156  		}
   157  		got := binary.BigEndian.Uint32(buf)
   158  		if int(got) != want {
   159  			t.Fatalf("data mismatch. want %d, got %d", want, got)
   160  		}
   161  	}
   162  	cancel()
   163  	if err := s.Close(); err != nil {
   164  		t.Fatal(err)
   165  	}
   166  }
   167  
   168  func TestConcurrency(t *testing.T) {
   169  	t.Parallel()
   170  
   171  	datasize := 4
   172  	test := func(t *testing.T, workers, shards int, shardSize uint32) {
   173  		t.Helper()
   174  
   175  		limit := shards * int(shardSize)
   176  
   177  		dir := t.TempDir()
   178  		defer os.RemoveAll(dir)
   179  		s, err := sharky.New(&dirFS{basedir: dir}, shards, datasize)
   180  		if err != nil {
   181  			t.Fatal(err)
   182  		}
   183  		c := make(chan sharky.Location, limit)
   184  		start := make(chan struct{})
   185  		deleted := make(map[uint32]int)
   186  		entered := make(map[uint32]struct{})
   187  		ctx := context.Background()
   188  		eg, ectx := errgroup.WithContext(ctx)
   189  		// a number of workers write sequential numbers to sharky
   190  		for k := 0; k < workers; k++ {
   191  			k := k
   192  			eg.Go(func() error {
   193  				<-start
   194  				buf := make([]byte, 4)
   195  				for i := 0; i < limit; i++ {
   196  					j := i*workers + k
   197  					binary.BigEndian.PutUint32(buf, uint32(j))
   198  					loc, err := s.Write(ctx, buf)
   199  					if err != nil {
   200  						return err
   201  					}
   202  					select {
   203  					case <-ectx.Done():
   204  						return ectx.Err()
   205  					case c <- loc:
   206  					}
   207  				}
   208  				return nil
   209  			})
   210  		}
   211  		// parallel to these workers, other workers collect the taken slots and release them
   212  		// modelling some aggressive gc policy
   213  		mtx := sync.Mutex{}
   214  		for k := 0; k < workers-1; k++ {
   215  			eg.Go(func() error {
   216  				<-start
   217  				buf := make([]byte, datasize)
   218  				for i := 0; i < limit; i++ {
   219  					select {
   220  					case <-ectx.Done():
   221  						return ectx.Err()
   222  					case loc := <-c:
   223  						if err := s.Read(ectx, loc, buf); err != nil {
   224  							return err
   225  						}
   226  						j := binary.BigEndian.Uint32(buf)
   227  						mtx.Lock()
   228  						deleted[j]++
   229  						mtx.Unlock()
   230  						if err := s.Release(ectx, loc); err != nil {
   231  							return err
   232  						}
   233  					}
   234  				}
   235  				return nil
   236  			})
   237  		}
   238  		close(start)
   239  		if err := eg.Wait(); err != nil {
   240  			t.Fatal(err)
   241  		}
   242  		close(c)
   243  		extraSlots := 0
   244  		for i := uint32(0); i < uint32(workers*limit); i++ {
   245  			cnt, found := deleted[i]
   246  			if !found {
   247  				entered[i] = struct{}{}
   248  				continue
   249  			}
   250  			extraSlots += cnt - 1
   251  		}
   252  		buf := make([]byte, datasize)
   253  		for loc := range c {
   254  			err := s.Read(ctx, loc, buf)
   255  			if err != nil {
   256  				t.Error(err)
   257  				return
   258  			}
   259  			i := binary.BigEndian.Uint32(buf)
   260  
   261  			_, found := entered[i]
   262  			if !found {
   263  				t.Fatal("item at unreleased location incorrect")
   264  			}
   265  		}
   266  
   267  		// the store has extra slots capacity
   268  		cctx, cancel := context.WithTimeout(ctx, 800*time.Millisecond)
   269  		for i := 0; i < extraSlots; i++ {
   270  			_, err = s.Write(cctx, []byte{0})
   271  			if err != nil {
   272  				t.Fatal(err)
   273  			}
   274  		}
   275  		cancel()
   276  
   277  		if err := s.Close(); err != nil {
   278  			t.Fatal(err)
   279  		}
   280  	}
   281  	for _, c := range []struct {
   282  		workers, shards int
   283  		shardSize       uint32
   284  	}{
   285  		{3, 2, 2},
   286  		{2, 64, 2},
   287  		{32, 8, 32},
   288  		{64, 32, 64},
   289  	} {
   290  		c := c
   291  		t.Run(fmt.Sprintf("workers:%d,shards:%d,size:%d", c.workers, c.shards, c.shardSize), func(t *testing.T) {
   292  			t.Parallel()
   293  			test(t, c.workers, c.shards, c.shardSize)
   294  		})
   295  	}
   296  }