github.com/better-concurrent/guc@v0.0.0-20190520022744-eb29266403a1/concurrenthashmap.go (about)

     1  package guc
     2  
     3  import (
     4  	"fmt"
     5  	"math/bits"
     6  	"runtime"
     7  	"strconv"
     8  	"sync"
     9  	"sync/atomic"
    10  	"unsafe"
    11  )
    12  
    13  const (
    14  	defaultCapacity         = 16
    15  	defaultContendCellCount = 8
    16  	loadFactor              = 0.75
    17  	maxCapacity             = 1 << 30
    18  	treeifyThreshold        = 8
    19  	resizeStampBits         = 16
    20  	maxResizers             = (1 << (32 - resizeStampBits)) - 1
    21  	resizeStampShift        = 32 - resizeStampBits
    22  	minTransferStride       = 16
    23  )
    24  
    25  var hashSeed = generateHashSeed()
    26  
    27  func generateHashSeed() uint32 {
    28  	return Fastrand()
    29  }
    30  
    31  type CounterCell struct {
    32  	// Volatile
    33  	value   int64
    34  	padding [CacheLineSize - 4]byte
    35  }
    36  
    37  // TODO list
    38  // 1. LongAdder like total count
    39  // 2. bucket tree degenerate, ps: golang has no build-in comparable interface
    40  // 3. iterator
    41  // 4. multi-goroutine cooperate resize
    42  type ConcurrentHashMap struct {
    43  	// The array of bins. Lazily initialized upon first insertion.
    44  	// Volatile, type is []*node
    45  	table unsafe.Pointer
    46  	// The next table to use; non-nil only while resizing.
    47  	// Volatile, type is []*node
    48  	nextTable unsafe.Pointer
    49  	// Table initialization and resizing control
    50  	// When negative, the table is being initialized or resized: -1 for initialization,
    51  	// else -(1 + the number of active resizing threads).  Otherwise,
    52  	// when table is null, holds the initial table size to use upon
    53  	// creation, or 0 for default. After initialization, holds the
    54  	// next element count value upon which to resize the table.
    55  	// Volatile
    56  	sizeCtl int32
    57  	// The next table index (plus one) to split while resizing.
    58  	// Volatile
    59  	transferIndex int32
    60  	// Base counter value, used mainly when there is no contention,
    61  	// but also as a fallback during table initialization
    62  	// races. Updated via CAS.
    63  	// Volatile
    64  	baseCount int64
    65  	// FIXME! j.u.c implementation is too complex, this is a simple version
    66  	// Volatile, type is []CounterCell
    67  	counterCells unsafe.Pointer
    68  	// Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
    69  	// Volatile
    70  	cellsBusy int32
    71  }
    72  
    73  // node const
    74  const (
    75  	moved    = -1
    76  	treebin  = -2
    77  	reserved = -3
    78  	hashBits = 0x7fffffff
    79  )
    80  
    81  type externNode interface {
    82  	find(n *node, h int32, k interface{}) (node *node, ok bool)
    83  	isTreeNode() bool
    84  	isForwardNode() bool
    85  }
    86  
    87  // base node
    88  type node struct {
    89  	hash int32
    90  	// FIXME! move to head node, not each node
    91  	m sync.Mutex
    92  	// type is *interface
    93  	key unsafe.Pointer
    94  	// volatile, type is *interface
    95  	val unsafe.Pointer
    96  	// volatile, type is *node
    97  	next unsafe.Pointer
    98  	// FIXME! better design?
    99  	extern externNode
   100  }
   101  
   102  func (n *node) getKey() interface{} {
   103  	k := n.key
   104  	if k == nil {
   105  		return nil
   106  	} else {
   107  		return *(*interface{})(k)
   108  	}
   109  }
   110  
   111  func (n *node) getValue() interface{} {
   112  	v := atomic.LoadPointer(&n.val)
   113  	if v == nil {
   114  		return nil
   115  	} else {
   116  		return *(*interface{})(v)
   117  	}
   118  }
   119  
   120  func (n *node) getKeyPointer() unsafe.Pointer {
   121  	return n.key
   122  }
   123  
   124  func (n *node) getValuePointer() unsafe.Pointer {
   125  	return atomic.LoadPointer(&n.val)
   126  }
   127  
   128  func (n *node) getNext() *node {
   129  	return (*node)(atomic.LoadPointer(&n.next))
   130  }
   131  
   132  func (n *node) getExternNode() externNode {
   133  	return n.extern
   134  }
   135  
   136  type baseNode struct {
   137  }
   138  
   139  func (en *baseNode) find(n *node, h int32, k interface{}) (*node, bool) {
   140  	e := n
   141  	if k != nil {
   142  		for {
   143  			if h == e.hash {
   144  				ek := e.getKey()
   145  				if ek == k {
   146  					return e, true
   147  				}
   148  			}
   149  			// loop
   150  			e = (*node)(atomic.LoadPointer(&e.next))
   151  			if e == nil {
   152  				break
   153  			}
   154  		}
   155  	}
   156  	return nil, false
   157  }
   158  
   159  func (en *baseNode) isTreeNode() bool {
   160  	return false
   161  }
   162  
   163  func (en *baseNode) isForwardNode() bool {
   164  	return false
   165  }
   166  
   167  type forwardingNode struct {
   168  	nextTable *[]unsafe.Pointer
   169  }
   170  
   171  func newForwardingNode(tab *[]unsafe.Pointer) *node {
   172  	return &node{hash: moved, key: nil, val: nil, next: nil,
   173  		extern: &forwardingNode{nextTable: tab}}
   174  }
   175  
   176  func (en *forwardingNode) find(n *node, h int32, k interface{}) (*node, bool) {
   177  	// // loop to avoid arbitrarily deep recursion on forwarding nodes
   178  	continueOuter := false
   179  	tab := en.nextTable
   180  	for {
   181  		var e *node
   182  		var n int32
   183  		if k == nil || tab == nil {
   184  			return nil, false
   185  		} else {
   186  			n = int32(len(*tab))
   187  			e = tabAt(tab, (n-1)&h)
   188  			if n == 0 || e == nil {
   189  				return nil, false
   190  			}
   191  		} // end of if
   192  		for {
   193  			eh := e.hash
   194  			ek := e.key
   195  			if eh == h && ek == k {
   196  				return e, true
   197  			}
   198  			if eh < 0 {
   199  				if en.isForwardNode() {
   200  					tab = e.getExternNode().(*forwardingNode).nextTable
   201  					continueOuter = true
   202  					break
   203  				} else {
   204  					return e.extern.find(e, h, k)
   205  				}
   206  			}
   207  			if continueOuter {
   208  				break
   209  			}
   210  			e = e.getNext()
   211  			if e == nil {
   212  				return nil, false
   213  			}
   214  		} // end of inner loop
   215  	}
   216  }
   217  
   218  func (en *forwardingNode) isTreeNode() bool {
   219  	return false
   220  }
   221  
   222  func (en *forwardingNode) isForwardNode() bool {
   223  	return true
   224  }
   225  
   226  // TODO NYI
   227  type treeNode struct {
   228  }
   229  
   230  func (en *treeNode) find(n *node, h int32, k interface{}) (*node, bool) {
   231  	panic("NYI")
   232  }
   233  
   234  func (en *treeNode) isTreeNode() bool {
   235  	return true
   236  }
   237  
   238  // TODO need test
   239  func spread(hash uintptr) int32 {
   240  	h := int32(hash)
   241  	return (h ^ (h >> 16)) & hashBits
   242  }
   243  
   244  func tableSizeFor(c int32) int32 {
   245  	n := c - 1
   246  	n |= n >> 1
   247  	n |= n >> 2
   248  	n |= n >> 4
   249  	n |= n >> 8
   250  	n |= n >> 16
   251  	if n < 0 {
   252  		return 1
   253  	} else {
   254  		if n >= maxCapacity {
   255  			return maxCapacity
   256  		} else {
   257  			return n + 1
   258  		}
   259  	}
   260  }
   261  
   262  func hash(v interface{}) uintptr {
   263  	return Nilinterhash(unsafe.Pointer(&v), uintptr(hashSeed))
   264  }
   265  
   266  func equals(v1, v2 *interface{}) bool {
   267  	return v1 == v2 || *v1 == *v2
   268  }
   269  
   270  func tabAt(tab *[]unsafe.Pointer, i int32) *node {
   271  	return (*node)(atomic.LoadPointer(&(*tab)[i]))
   272  }
   273  
   274  func setTabAt(tab *[]unsafe.Pointer, i int32, v *node) {
   275  	atomic.StorePointer(&(*tab)[i], unsafe.Pointer(v))
   276  }
   277  
   278  func casTabAt(tab *[]unsafe.Pointer, i int32, c, v *node) bool {
   279  	return atomic.CompareAndSwapPointer(&(*tab)[i], unsafe.Pointer(c), unsafe.Pointer(v))
   280  }
   281  
   282  func NewConcurrentHashMap(initialCapacity, concurrencyLevel int32) *ConcurrentHashMap {
   283  	cmap := ConcurrentHashMap{}
   284  	cmap.init(initialCapacity, concurrencyLevel)
   285  	return &cmap
   286  }
   287  
   288  func (m *ConcurrentHashMap) sumCount() int64 {
   289  	cells := m.getCountCells()
   290  	sum := atomic.LoadInt64(&m.baseCount)
   291  	if cells != nil {
   292  		for i := 0; i < len(*cells); i++ {
   293  			c := (*cells)[i]
   294  			sum += atomic.LoadInt64(&c.value)
   295  		}
   296  	}
   297  	return sum
   298  }
   299  
   300  func (m *ConcurrentHashMap) getCountCells() *[]CounterCell {
   301  	return (*[]CounterCell)(atomic.LoadPointer(&m.counterCells))
   302  }
   303  
   304  func (m *ConcurrentHashMap) getTable() *[]unsafe.Pointer {
   305  	return (*[]unsafe.Pointer)(atomic.LoadPointer(&m.table))
   306  }
   307  
   308  func (m *ConcurrentHashMap) getNextTable() *[]unsafe.Pointer {
   309  	return (*[]unsafe.Pointer)(atomic.LoadPointer(&m.nextTable))
   310  }
   311  
   312  func (m *ConcurrentHashMap) init(initialCapacity, concurrencyLevel int32) {
   313  	if initialCapacity < 0 {
   314  		panic("initialCapacity should > 0")
   315  	}
   316  	var capacity int32 = 0
   317  	if initialCapacity < concurrencyLevel {
   318  		initialCapacity = concurrencyLevel
   319  	}
   320  	if initialCapacity >= (maxCapacity >> 1) {
   321  		capacity = maxCapacity
   322  	} else {
   323  		capacity = tableSizeFor(initialCapacity + (initialCapacity >> 1) + 1)
   324  	}
   325  	m.sizeCtl = capacity
   326  }
   327  
   328  func (m *ConcurrentHashMap) initTable() *[]unsafe.Pointer {
   329  	for {
   330  		tab := m.getTable()
   331  		if tab != nil && len(*tab) > 0 {
   332  			break
   333  		}
   334  		sc := atomic.LoadInt32(&m.sizeCtl)
   335  		if sc < 0 {
   336  			// lost initialization race; just spin
   337  			runtime.Gosched()
   338  		} else if atomic.CompareAndSwapInt32(&m.sizeCtl, sc, -1) {
   339  			tab = m.getTable()
   340  			if tab == nil || len(*tab) == 0 {
   341  				var n int32
   342  				if sc > 0 {
   343  					n = sc
   344  				} else {
   345  					n = defaultCapacity
   346  				}
   347  				arr := make([]unsafe.Pointer, n)
   348  				atomic.StorePointer(&m.table, unsafe.Pointer(&arr))
   349  			}
   350  			atomic.StoreInt32(&m.sizeCtl, sc)
   351  		}
   352  	}
   353  	return m.getTable()
   354  }
   355  
   356  func (m *ConcurrentHashMap) Size() int {
   357  	sum := m.sumCount()
   358  	if sum < 0 {
   359  		return 0
   360  	} else {
   361  		return int(sum)
   362  	}
   363  }
   364  
   365  func (m *ConcurrentHashMap) IsEmpty() bool {
   366  	return m.sumCount() <= 0
   367  }
   368  
   369  func (m *ConcurrentHashMap) Load(key interface{}) (interface{}, bool) {
   370  	if key == nil {
   371  		panic("key is nil!")
   372  	}
   373  	h := spread(hash(key))
   374  	tab := m.getTable()
   375  	// not initialized
   376  	if tab == nil {
   377  		return nil, false
   378  	}
   379  	// empty table
   380  	n := int32(len(*tab))
   381  	if n == 0 {
   382  		return nil, false
   383  	}
   384  	// bin is empty
   385  	e := tabAt(tab, (n-1)&h)
   386  	if e == nil {
   387  		return nil, false
   388  	}
   389  	eh := e.hash
   390  	if h == eh {
   391  		ek := e.getKey()
   392  		if key == ek {
   393  			return e.getValue(), true
   394  		}
   395  	} else if eh < 0 {
   396  		p, ok := e.extern.find(e, h, &key)
   397  		if ok {
   398  			return p.getValue(), true
   399  		} else {
   400  			return nil, false
   401  		}
   402  	}
   403  	for {
   404  		e = e.getNext()
   405  		if e == nil {
   406  			break
   407  		}
   408  		if h == e.hash && key == e.getKey() {
   409  			return e.getValue(), true
   410  		}
   411  	}
   412  	return nil, false
   413  }
   414  
   415  func (m *ConcurrentHashMap) Contains(key interface{}) bool {
   416  	_, ok := m.Load(key)
   417  	return ok
   418  }
   419  
   420  func (m *ConcurrentHashMap) Store(key, value interface{}) interface{} {
   421  	return m.storeVal(key, value, false)
   422  }
   423  
   424  func (m *ConcurrentHashMap) storeVal(key, value interface{}, onlyIfAbsent bool) interface{} {
   425  	if key == nil || value == nil {
   426  		panic("key or value is null")
   427  	}
   428  	var binCount int32 = 0
   429  	h := spread(hash(key))
   430  	for {
   431  		tab := m.getTable()
   432  		var n int32
   433  		var f *node
   434  		if tab == nil || len(*tab) == 0 {
   435  			tab = m.initTable()
   436  		} else {
   437  			n = int32(len(*tab)) // length
   438  			i := (n - 1) & h
   439  			f = tabAt(tab, i)
   440  			if f == nil {
   441  				// cas node
   442  				newNode := &node{hash: h, key: unsafe.Pointer(&key),
   443  					val: unsafe.Pointer(&value), next: nil, extern: &baseNode{}}
   444  				if casTabAt(tab, i, nil, newNode) {
   445  					// no lock when adding to empty bin
   446  					break
   447  				}
   448  			} else {
   449  				fh := f.hash
   450  				if fh == moved {
   451  					m.helpTransfer(tab, f)
   452  				} else {
   453  					var oldVal interface{} = nil
   454  					// slow path
   455  					f.m.Lock()
   456  					// re-check
   457  					if tabAt(tab, i) != f {
   458  						f.m.Unlock()
   459  						continue
   460  					}
   461  					if fh >= 0 {
   462  						binCount = 1
   463  						for e := f; ; binCount++ {
   464  							if e.hash == h {
   465  								ek := e.getKey()
   466  								if key == ek {
   467  									oldVal = e.getValue()
   468  									if !onlyIfAbsent {
   469  										e.val = unsafe.Pointer(&value)
   470  										break
   471  									}
   472  								}
   473  							}
   474  							pred := e
   475  							e = e.getNext()
   476  							if e == nil {
   477  								pred.next = unsafe.Pointer(&node{hash: h, key: unsafe.Pointer(&key),
   478  									val: unsafe.Pointer(&value), next: nil, extern: &baseNode{}})
   479  								break
   480  							}
   481  						}
   482  					} else if f.extern.isTreeNode() {
   483  						panic("NYI")
   484  					}
   485  					f.m.Unlock()
   486  					// treeify
   487  					if binCount != 0 {
   488  						if binCount > treeifyThreshold {
   489  							m.treeifyBin(tab, i)
   490  						}
   491  						if oldVal != nil {
   492  							return oldVal
   493  						}
   494  						break
   495  					}
   496  				}
   497  			}
   498  		}
   499  	}
   500  	m.addCount(1, binCount)
   501  	return nil
   502  }
   503  
   504  // Helps transfer if a resize is in progress.
   505  func (m *ConcurrentHashMap) helpTransfer(tab *[]unsafe.Pointer, f *node) *[]unsafe.Pointer {
   506  	var nextTab *[]unsafe.Pointer
   507  	var sc int32
   508  	if tab != nil && f.extern.isForwardNode() {
   509  		nextTab = f.extern.(*forwardingNode).nextTable
   510  		if nextTab != nil {
   511  			rs := resizeStamp(int32(len(*tab)))
   512  			for nextTab == m.getNextTable() && tab == m.getTable() {
   513  				sc = atomic.LoadInt32(&m.sizeCtl)
   514  				if sc < 0 {
   515  					if sc>>resizeStampShift != rs || sc == rs+1 || sc == maxResizers || atomic.LoadInt32(&m.transferIndex) <= 0 {
   516  						break
   517  					}
   518  					if atomic.CompareAndSwapInt32(&m.sizeCtl, sc, sc+1) {
   519  						m.transfer(tab, nextTab)
   520  						break
   521  					}
   522  				}
   523  			} // end of for loop
   524  			return nextTab
   525  		}
   526  	}
   527  	return m.getTable()
   528  }
   529  
   530  // x: the count to add
   531  // check: if <0, don't check resize, if <= 1 only check if uncontended
   532  // FIXME! simple implementation
   533  func (m *ConcurrentHashMap) addCount(x int64, check int32) {
   534  	as := m.getCountCells()
   535  	b := atomic.LoadInt64(&m.baseCount)
   536  	s := b + x
   537  	if as != nil || !atomic.CompareAndSwapInt64(&m.baseCount, b, s) {
   538  		if as == nil {
   539  			m.fullAddCount(x, false)
   540  		} else {
   541  			a := getRandomCountCell(as)
   542  			incrementCountCell(a, x)
   543  		}
   544  		if check <= 1 {
   545  			return
   546  		}
   547  		s = m.sumCount()
   548  	}
   549  	if check >= 0 {
   550  		for {
   551  			sc := atomic.LoadInt32(&m.sizeCtl)
   552  			var tab, nt *[]unsafe.Pointer
   553  			tab = m.getTable()
   554  			if s >= int64(sc) && tab != nil {
   555  				n := len(*tab)
   556  				if n > maxCapacity {
   557  					break
   558  				}
   559  				rs := resizeStamp(int32(n))
   560  				if sc < 0 {
   561  					nt = m.getNextTable()
   562  					if (sc>>resizeStampBits) != rs || sc == rs+1 ||
   563  						sc == rs+maxResizers || nt == nil {
   564  						break
   565  					} else {
   566  						ti := atomic.LoadInt32(&m.transferIndex)
   567  						if ti <= 0 {
   568  							break
   569  						}
   570  					}
   571  					if atomic.CompareAndSwapInt32(&m.sizeCtl, sc, sc+1) {
   572  						m.transfer(tab, nt)
   573  					}
   574  				} else {
   575  					if atomic.CompareAndSwapInt32(&m.sizeCtl, sc, (rs<<resizeStampShift)+2) {
   576  						m.transfer(tab, nil)
   577  					}
   578  				}
   579  				s = m.sumCount()
   580  			} else {
   581  				break
   582  			}
   583  		}
   584  	}
   585  }
   586  
   587  func resizeStamp(n int32) int32 {
   588  	return int32(bits.LeadingZeros(uint(n)) | (1 << (resizeStampBits - 1)))
   589  }
   590  
   591  func (m *ConcurrentHashMap) fullAddCount(x int64, wasUncontended bool) {
   592  	// TODO hard code
   593  	as := make([]CounterCell, defaultContendCellCount)
   594  	asp := &as
   595  	for {
   596  		if !atomic.CompareAndSwapPointer(&m.counterCells, nil, unsafe.Pointer(asp)) {
   597  			asp = m.getCountCells()
   598  			if asp != nil {
   599  				break
   600  			}
   601  		}
   602  	}
   603  	incrementCountCell(&(*asp)[0], x)
   604  }
   605  
   606  func incrementCountCell(a *CounterCell, x int64) {
   607  	for i := 0; ; i++ {
   608  		old := atomic.LoadInt64(&a.value)
   609  		if !atomic.CompareAndSwapInt64(&a.value, old, old+x) {
   610  			if !SyncRuntimeCanSpin(i) {
   611  				runtime.Gosched()
   612  			} else {
   613  				// or sync.runtime_doSpin? FIXME
   614  				continue
   615  			}
   616  		} else {
   617  			break
   618  		}
   619  	}
   620  }
   621  
   622  // FIXME just need a random probe in G.m, no need re-rand
   623  func getRandomCountCell(as *[]CounterCell) *CounterCell {
   624  	i := int(Fastrand()) & 0xffffffff
   625  	n := len(*as)
   626  	return &(*as)[i%n]
   627  }
   628  
   629  // TODO
   630  func (m *ConcurrentHashMap) treeifyBin(tab *[]unsafe.Pointer, i int32) {
   631  	// NYI, golang has no build-in comparable interface
   632  	return
   633  }
   634  
   635  // Moves and/or copies the nodes in each bin to new table.
   636  func (m *ConcurrentHashMap) transfer(tab, nextTab *[]unsafe.Pointer) {
   637  	var n, stride int
   638  	n = len(*tab)
   639  	ncpu := runtime.GOMAXPROCS(0)
   640  	// subdivide range
   641  	if ncpu > 1 {
   642  		stride = (ncpu >> 3) / n
   643  	} else {
   644  		stride = n
   645  	}
   646  	if stride < minTransferStride {
   647  		stride = minTransferStride
   648  	}
   649  	// initiating
   650  	if nextTab == nil {
   651  		newTable := make([]unsafe.Pointer, n<<1)
   652  		nextTab = &newTable
   653  		atomic.StorePointer(&m.nextTable, unsafe.Pointer(nextTab))
   654  		atomic.StoreInt32(&m.transferIndex, int32(n))
   655  	}
   656  	nextn := len(*nextTab)
   657  	fwd := newForwardingNode(nextTab)
   658  	var advance = true
   659  	var finishing = false
   660  	var i int32 = 0
   661  	var bound int32 = 0
   662  	for {
   663  		var f *node
   664  		var fh int32
   665  		for advance {
   666  			var nextIndex, nextBound int32
   667  			i = i - 1
   668  			if i >= bound || finishing {
   669  				advance = false
   670  			} else {
   671  				nextIndex = atomic.LoadInt32(&m.transferIndex)
   672  				if nextIndex <= 0 {
   673  					i = -1
   674  					advance = false
   675  				} else {
   676  					if nextIndex > int32(stride) {
   677  						nextBound = nextIndex - int32(stride)
   678  					} else {
   679  						nextBound = 0
   680  					}
   681  					if atomic.CompareAndSwapInt32(&m.transferIndex, nextIndex, nextBound) {
   682  						bound = nextBound
   683  						i = nextIndex - 1
   684  						advance = false
   685  					}
   686  				}
   687  			}
   688  		} // end of for advance loop
   689  		if i < 0 || int(i) >= n || int(i)+n >= nextn {
   690  			if finishing {
   691  				atomic.StorePointer(&m.nextTable, nil)
   692  				atomic.StorePointer(&m.table, unsafe.Pointer(nextTab))
   693  				ctl := (n << 1) - (n >> 1)
   694  				atomic.StoreInt32(&m.sizeCtl, int32(ctl))
   695  				return
   696  			}
   697  			sc := atomic.LoadInt32(&m.sizeCtl)
   698  			if atomic.CompareAndSwapInt32(&m.sizeCtl, sc, sc-1) {
   699  				if (sc - 2) != resizeStamp(int32(n))<<resizeStampShift {
   700  					return
   701  				}
   702  				advance = true
   703  				finishing = true
   704  				i = int32(n) // recheck before commit
   705  			}
   706  		} else {
   707  			f = tabAt(tab, i)
   708  			if f == nil {
   709  				advance = casTabAt(tab, i, nil, fwd)
   710  			} else {
   711  				fh = f.hash
   712  				if fh == moved {
   713  					advance = true // already processed
   714  				} else {
   715  					// synchronize f
   716  					f.m.Lock()
   717  					if tabAt(tab, i) == f {
   718  						var ln, hn *node
   719  						if fh >= 0 {
   720  							runBit := fh & int32(n)
   721  							lastRun := f
   722  							for p := (*node)(atomic.LoadPointer(&f.next)); p != nil; p = (*node)(atomic.LoadPointer(&p.next)) {
   723  								b := p.hash & int32(n)
   724  								if b != runBit {
   725  									runBit = b
   726  									lastRun = p
   727  								}
   728  							} // end of for loop
   729  							if runBit == 0 {
   730  								ln = lastRun
   731  								hn = nil
   732  							} else {
   733  								hn = lastRun
   734  								ln = nil
   735  							}
   736  							for p := f; p != lastRun; p = (*node)(atomic.LoadPointer(&p.next)) {
   737  								ph := p.hash
   738  								pk := p.getKeyPointer()
   739  								pv := p.getValuePointer()
   740  								if (ph & int32(n)) == 0 {
   741  									ln = &node{hash: ph, key: pk, val: pv,
   742  										next: unsafe.Pointer(ln), extern: &baseNode{}}
   743  								} else {
   744  									hn = &node{hash: ph, key: pk, val: pv,
   745  										next: unsafe.Pointer(hn), extern: &baseNode{}}
   746  								}
   747  							}
   748  							setTabAt(nextTab, i, ln)
   749  							setTabAt(nextTab, i+int32(n), hn)
   750  							setTabAt(tab, i, fwd)
   751  							advance = true
   752  						} else if f.extern.isTreeNode() {
   753  							panic("treeify not implement yet")
   754  						}
   755  					}
   756  					f.m.Unlock()
   757  				}
   758  			}
   759  		}
   760  	}
   761  }
   762  
   763  // debug func
   764  func (m *ConcurrentHashMap) printTableDetail() {
   765  	tab := m.getTable()
   766  	nextTab := m.getNextTable()
   767  	var tabSize, nextTabSize = 0, 0
   768  	if tab != nil {
   769  		tabSize = len(*tab)
   770  	}
   771  	if nextTab != nil {
   772  		nextTabSize = len(*nextTab)
   773  	}
   774  	fmt.Printf("[DEBUG] tab size is %d, nextTab size is %d\n", tabSize, nextTabSize)
   775  }
   776  
   777  func (m *ConcurrentHashMap) printCountDetail() {
   778  	bc := atomic.LoadInt64(&m.baseCount)
   779  	cells := m.getCountCells()
   780  	if cells == nil {
   781  		fmt.Printf("[DEBUG] baseCount is %d, cells is nil\n", bc)
   782  	} else {
   783  		content := ""
   784  		for i := 0; i < len(*cells); i++ {
   785  			c := (*cells)[i]
   786  			content += strconv.Itoa(int(c.value))
   787  		}
   788  		fmt.Printf("[DEBUG] baseCount is %d, cells is %s\n", bc, content)
   789  	}
   790  }