github.com/onflow/atree@v0.6.0/cmd/stress/map.go (about)

     1  /*
     2   * Atree - Scalable Arrays and Ordered Maps
     3   *
     4   * Copyright 2021 Dapper Labs, Inc.
     5   *
     6   * Licensed under the Apache License, Version 2.0 (the "License");
     7   * you may not use this file except in compliance with the License.
     8   * You may obtain a copy of the License at
     9   *
    10   *   http://www.apache.org/licenses/LICENSE-2.0
    11   *
    12   * Unless required by applicable law or agreed to in writing, software
    13   * distributed under the License is distributed on an "AS IS" BASIS,
    14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    15   * See the License for the specific language governing permissions and
    16   * limitations under the License.
    17   */
    18  
    19  package main
    20  
    21  import (
    22  	"fmt"
    23  	"os"
    24  	"runtime"
    25  	"sync"
    26  	"time"
    27  
    28  	"github.com/onflow/atree"
    29  )
    30  
    31  const (
    32  	mapSetOp1 = iota
    33  	mapSetOp2
    34  	mapSetOp3
    35  	mapRemoveOp
    36  	maxMapOp
    37  )
    38  
    39  type mapStatus struct {
    40  	lock sync.RWMutex
    41  
    42  	startTime time.Time
    43  
    44  	count uint64 // number of elements in map
    45  
    46  	setOps    uint64
    47  	removeOps uint64
    48  }
    49  
    50  var _ Status = &mapStatus{}
    51  
    52  func newMapStatus() *mapStatus {
    53  	return &mapStatus{startTime: time.Now()}
    54  }
    55  
    56  func (status *mapStatus) String() string {
    57  	status.lock.RLock()
    58  	defer status.lock.RUnlock()
    59  
    60  	duration := time.Since(status.startTime)
    61  
    62  	var m runtime.MemStats
    63  	runtime.ReadMemStats(&m)
    64  
    65  	return fmt.Sprintf("duration %s, heapAlloc %d MiB, %d elements, %d sets, %d removes",
    66  		duration.Truncate(time.Second).String(),
    67  		m.Alloc/1024/1024,
    68  		status.count,
    69  		status.setOps,
    70  		status.removeOps,
    71  	)
    72  }
    73  
    74  func (status *mapStatus) incSet(new bool) {
    75  	status.lock.Lock()
    76  	defer status.lock.Unlock()
    77  
    78  	status.setOps++
    79  
    80  	if new {
    81  		status.count++
    82  	}
    83  }
    84  
    85  func (status *mapStatus) incRemove() {
    86  	status.lock.Lock()
    87  	defer status.lock.Unlock()
    88  
    89  	status.removeOps++
    90  	status.count--
    91  }
    92  
    93  func (status *mapStatus) Write() {
    94  	writeStatus(status.String())
    95  }
    96  
    97  func testMap(
    98  	storage *atree.PersistentSlabStorage,
    99  	address atree.Address,
   100  	typeInfo atree.TypeInfo,
   101  	maxLength uint64,
   102  	status *mapStatus,
   103  	minHeapAllocMiB uint64,
   104  	maxHeapAllocMiB uint64,
   105  ) {
   106  
   107  	m, err := atree.NewMap(storage, address, atree.NewDefaultDigesterBuilder(), typeInfo)
   108  	if err != nil {
   109  		fmt.Fprintf(os.Stderr, "Failed to create new map: %s", err)
   110  		return
   111  	}
   112  
   113  	// elements contains generated keys and values. It is used to check data loss.
   114  	elements := make(map[atree.Value]atree.Value, maxLength)
   115  
   116  	// keys contains generated keys.  It is used to select random keys for removal.
   117  	keys := make([]atree.Value, 0, maxLength)
   118  
   119  	reduceHeapAllocs := false
   120  
   121  	opCount := uint64(0)
   122  
   123  	var ms runtime.MemStats
   124  
   125  	for {
   126  		runtime.ReadMemStats(&ms)
   127  		allocMiB := ms.Alloc / 1024 / 1024
   128  
   129  		if !reduceHeapAllocs && allocMiB > maxHeapAllocMiB {
   130  			fmt.Printf("\nHeapAlloc is %d MiB, removing elements to reduce allocs...\n", allocMiB)
   131  			reduceHeapAllocs = true
   132  		} else if reduceHeapAllocs && allocMiB < minHeapAllocMiB {
   133  			fmt.Printf("\nHeapAlloc is %d MiB, resuming random operation...\n", allocMiB)
   134  			reduceHeapAllocs = false
   135  		}
   136  
   137  		if reduceHeapAllocs && m.Count() == 0 {
   138  			fmt.Printf("\nHeapAlloc is %d MiB while map is empty, dropping read/write cache to free mem\n", allocMiB)
   139  			reduceHeapAllocs = false
   140  
   141  			// Commit slabs to storage and drop read and write to reduce mem
   142  			err = storage.FastCommit(runtime.NumCPU())
   143  			if err != nil {
   144  				fmt.Fprintf(os.Stderr, "Failed to commit to storage: %s", err)
   145  				return
   146  			}
   147  
   148  			storage.DropDeltas()
   149  			storage.DropCache()
   150  
   151  			elements = make(map[atree.Value]atree.Value, maxLength)
   152  
   153  			// Load root slab from storage and cache it in read cache
   154  			rootID := m.StorageID()
   155  			m, err = atree.NewMapWithRootID(storage, rootID, atree.NewDefaultDigesterBuilder())
   156  			if err != nil {
   157  				fmt.Fprintf(os.Stderr, "Failed to create map from root id %s: %s", rootID, err)
   158  				return
   159  			}
   160  
   161  			runtime.GC()
   162  
   163  			// Check if map is using > MaxHeapAlloc while empty.
   164  			runtime.ReadMemStats(&ms)
   165  			allocMiB = ms.Alloc / 1024 / 1024
   166  			fmt.Printf("\nHeapAlloc is %d MiB after cleanup and forced gc\n", allocMiB)
   167  
   168  			// Prevent infinite loop that doesn't do useful work.
   169  			if allocMiB > maxHeapAllocMiB {
   170  				// This shouldn't happen unless there's a memory leak.
   171  				fmt.Fprintf(
   172  					os.Stderr,
   173  					"Exiting because allocMiB %d > maxMapHeapAlloMiB %d with empty map\n",
   174  					allocMiB,
   175  					maxHeapAllocMiB)
   176  				return
   177  			}
   178  		}
   179  
   180  		nextOp := r.Intn(maxMapOp)
   181  
   182  		if m.Count() == maxLength || reduceHeapAllocs {
   183  			nextOp = mapRemoveOp
   184  		}
   185  
   186  		switch nextOp {
   187  
   188  		case mapSetOp1, mapSetOp2, mapSetOp3:
   189  			opCount++
   190  
   191  			k, err := randomKey()
   192  			if err != nil {
   193  				fmt.Fprintf(os.Stderr, "Failed to generate random key %s: %s", k, err)
   194  				return
   195  			}
   196  
   197  			nestedLevels := r.Intn(maxNestedLevels)
   198  			v, err := randomValue(storage, address, nestedLevels)
   199  			if err != nil {
   200  				fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", v, err)
   201  				return
   202  			}
   203  
   204  			copiedKey, err := copyValue(storage, atree.Address{}, k)
   205  			if err != nil {
   206  				fmt.Fprintf(os.Stderr, "Failed to copy random key %s: %s", k, err)
   207  				return
   208  			}
   209  
   210  			copiedValue, err := copyValue(storage, atree.Address{}, v)
   211  			if err != nil {
   212  				fmt.Fprintf(os.Stderr, "Failed to copy random value %s: %s", k, err)
   213  				return
   214  			}
   215  
   216  			oldV := elements[copiedKey]
   217  
   218  			// Update keys
   219  			if oldV == nil {
   220  				keys = append(keys, copiedKey)
   221  			}
   222  
   223  			// Update elements
   224  			elements[copiedKey] = copiedValue
   225  
   226  			// Update map
   227  			existingStorable, err := m.Set(compare, hashInputProvider, k, v)
   228  			if err != nil {
   229  				fmt.Fprintf(os.Stderr, "Failed to set %s at index %d: %s", v, k, err)
   230  				return
   231  			}
   232  
   233  			// Compare old value from map with old value from elements
   234  			if (oldV == nil) && (existingStorable != nil) {
   235  				fmt.Fprintf(os.Stderr, "Set returned storable %s, want nil", existingStorable)
   236  				return
   237  			}
   238  
   239  			if (oldV != nil) && (existingStorable == nil) {
   240  				fmt.Fprintf(os.Stderr, "Set returned nil, want %s", oldV)
   241  				return
   242  			}
   243  
   244  			if existingStorable != nil {
   245  
   246  				existingValue, err := existingStorable.StoredValue(storage)
   247  				if err != nil {
   248  					fmt.Fprintf(os.Stderr, "Failed to convert %s to value: %s", existingStorable, err)
   249  					return
   250  				}
   251  
   252  				err = valueEqual(oldV, existingValue)
   253  				if err != nil {
   254  					fmt.Fprintf(os.Stderr, "Set() returned wrong existing value %s, want %s", existingValue, oldV)
   255  					return
   256  				}
   257  
   258  				err = removeStorable(storage, existingStorable)
   259  				if err != nil {
   260  					fmt.Fprintf(os.Stderr, "Failed to remove map storable element %s: %s", existingStorable, err)
   261  					return
   262  				}
   263  
   264  				err = removeValue(storage, oldV)
   265  				if err != nil {
   266  					fmt.Fprintf(os.Stderr, "Failed to remove copied overwritten value %s: %s", existingValue, err)
   267  					return
   268  				}
   269  			}
   270  
   271  			// Update status
   272  			status.incSet(oldV == nil)
   273  
   274  		case mapRemoveOp:
   275  			if m.Count() == 0 {
   276  				continue
   277  			}
   278  
   279  			opCount++
   280  
   281  			index := r.Intn(len(keys))
   282  			k := keys[index]
   283  
   284  			oldV := elements[k]
   285  
   286  			// Update elements
   287  			delete(elements, k)
   288  
   289  			// Update keys
   290  			copy(keys[index:], keys[index+1:])
   291  			keys[len(keys)-1] = nil
   292  			keys = keys[:len(keys)-1]
   293  
   294  			// Update map
   295  			existingKeyStorable, existingValueStorable, err := m.Remove(compare, hashInputProvider, k)
   296  			if err != nil {
   297  				fmt.Fprintf(os.Stderr, "Failed to remove element with key %s: %s", k, err)
   298  				return
   299  			}
   300  
   301  			// Compare removed key from map with removed key from elements
   302  			existingKeyValue, err := existingKeyStorable.StoredValue(storage)
   303  			if err != nil {
   304  				fmt.Fprintf(os.Stderr, "Failed to convert %s to value: %s", existingKeyStorable, err)
   305  				return
   306  			}
   307  
   308  			err = valueEqual(k, existingKeyValue)
   309  			if err != nil {
   310  				fmt.Fprintf(os.Stderr, "Remove() returned wrong existing key %s, want %s", existingKeyStorable, k)
   311  				return
   312  			}
   313  
   314  			// Compare removed value from map with removed value from elements
   315  			existingValue, err := existingValueStorable.StoredValue(storage)
   316  			if err != nil {
   317  				fmt.Fprintf(os.Stderr, "Failed to convert %s to value: %s", existingValueStorable, err)
   318  				return
   319  			}
   320  
   321  			err = valueEqual(oldV, existingValue)
   322  			if err != nil {
   323  				fmt.Fprintf(os.Stderr, "Remove() returned wrong existing value %s, want %s", existingValueStorable, oldV)
   324  				return
   325  			}
   326  
   327  			err = removeStorable(storage, existingKeyStorable)
   328  			if err != nil {
   329  				fmt.Fprintf(os.Stderr, "Failed to remove key %s: %s", existingKeyStorable, err)
   330  				return
   331  			}
   332  
   333  			err = removeStorable(storage, existingValueStorable)
   334  			if err != nil {
   335  				fmt.Fprintf(os.Stderr, "Failed to remove value %s: %s", existingValueStorable, err)
   336  				return
   337  			}
   338  
   339  			err = removeValue(storage, k)
   340  			if err != nil {
   341  				fmt.Fprintf(os.Stderr, "Failed to remove copied key %s: %s", k, err)
   342  				return
   343  			}
   344  
   345  			err = removeValue(storage, oldV)
   346  			if err != nil {
   347  				fmt.Fprintf(os.Stderr, "Failed to remove copied value %s: %s", existingValue, err)
   348  				return
   349  			}
   350  
   351  			// Update status
   352  			status.incRemove()
   353  		}
   354  
   355  		// Check map elements against elements after every op
   356  		err = checkMapDataLoss(m, elements)
   357  		if err != nil {
   358  			fmt.Fprintln(os.Stderr, err)
   359  			return
   360  		}
   361  
   362  		if opCount >= 100 {
   363  			opCount = 0
   364  			rootIDs, err := atree.CheckStorageHealth(storage, -1)
   365  			if err != nil {
   366  				fmt.Fprintln(os.Stderr, err)
   367  				return
   368  			}
   369  			ids := make([]atree.StorageID, 0, len(rootIDs))
   370  			for id := range rootIDs {
   371  				// filter out root ids with empty address
   372  				if id.Address != atree.AddressUndefined {
   373  					ids = append(ids, id)
   374  				}
   375  			}
   376  			if len(ids) != 1 || ids[0] != m.StorageID() {
   377  				fmt.Fprintf(os.Stderr, "root storage ids %v in storage, want %s\n", ids, m.StorageID())
   378  				return
   379  			}
   380  		}
   381  	}
   382  }
   383  
   384  func checkMapDataLoss(m *atree.OrderedMap, elements map[atree.Value]atree.Value) error {
   385  
   386  	// Check map has the same number of elements as elements
   387  	if m.Count() != uint64(len(elements)) {
   388  		return fmt.Errorf("Count() %d != len(values) %d", m.Count(), len(elements))
   389  	}
   390  
   391  	// Check every element
   392  	for k, v := range elements {
   393  		storable, err := m.Get(compare, hashInputProvider, k)
   394  		if err != nil {
   395  			return fmt.Errorf("failed to get element with key %s: %w", k, err)
   396  		}
   397  		convertedValue, err := storable.StoredValue(m.Storage)
   398  		if err != nil {
   399  			return fmt.Errorf("failed to convert storable to value with key %s: %w", k, err)
   400  		}
   401  		err = valueEqual(v, convertedValue)
   402  		if err != nil {
   403  			return fmt.Errorf("failed to compare %s and %s: %w", v, convertedValue, err)
   404  		}
   405  	}
   406  
   407  	return nil
   408  }