github.com/xushiwei/go@v0.0.0-20130601165731-2b9d83f45bc9/src/pkg/runtime/hashmap.c (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  #include "runtime.h"
     6  #include "arch_GOARCH.h"
     7  #include "malloc.h"
     8  #include "hashmap.h"
     9  #include "type.h"
    10  #include "race.h"
    11  
    12  // This file contains the implementation of Go's map type.
    13  //
    14  // The map is just a hash table.  The data is arranged
    15  // into an array of buckets.  Each bucket contains up to
    16  // 8 key/value pairs.  The low-order bits of the hash are
    17  // used to select a bucket.  Each bucket contains a few
    18  // high-order bits of each hash to distinguish the entries
    19  // within a single bucket.
    20  //
    21  // If more than 8 keys hash to a bucket, we chain on
    22  // extra buckets.
    23  //
    24  // When the hashtable grows, we allocate a new array
    25  // of buckets twice as big.  Buckets are incrementally
    26  // copied from the old bucket array to the new bucket array.
    27  //
    28  // Map iterators walk through the array of buckets and
    29  // return the keys in walk order (bucket #, then overflow
    30  // chain order, then bucket index).  To maintain iteration
    31  // semantics, we never move keys within their bucket (if
    32  // we did, keys might be returned 0 or 2 times).  When
    33  // growing the table, iterators remain iterating through the
    34  // old table and must check the new table if the bucket
    35  // they are iterating through has been moved ("evacuated")
    36  // to the new table.
    37  
    38  // Maximum number of key/value pairs a bucket can hold.
    39  #define BUCKETSIZE 8
    40  
    41  // Maximum average load of a bucket that triggers growth.
    42  #define LOAD 6.5
    43  
    44  // Picking LOAD: too large and we have lots of overflow
    45  // buckets, too small and we waste a lot of space.  I wrote
    46  // a simple program to check some stats for different loads:
    47  // (64-bit, 8 byte keys and values)
    48  //        LOAD    %overflow  bytes/entry     hitprobe    missprobe
    49  //        4.00         2.13        20.77         3.00         4.00
    50  //        4.50         4.05        17.30         3.25         4.50
    51  //        5.00         6.85        14.77         3.50         5.00
    52  //        5.50        10.55        12.94         3.75         5.50
    53  //        6.00        15.27        11.67         4.00         6.00
    54  //        6.50        20.90        10.79         4.25         6.50
    55  //        7.00        27.14        10.15         4.50         7.00
    56  //        7.50        34.03         9.73         4.75         7.50
    57  //        8.00        41.10         9.40         5.00         8.00
    58  //
    59  // %overflow   = percentage of buckets which have an overflow bucket
    60  // bytes/entry = overhead bytes used per key/value pair
    61  // hitprobe    = # of entries to check when looking up a present key
    62  // missprobe   = # of entries to check when looking up an absent key
    63  //
    64  // Keep in mind this data is for maximally loaded tables, i.e. just
    65  // before the table grows.  Typical tables will be somewhat less loaded.
    66  
    67  // Maximum key or value size to keep inline (instead of mallocing per element).
    68  // Must fit in a uint8.
    69  // Fast versions cannot handle big values - the cutoff size for
    70  // fast versions in ../../cmd/gc/walk.c must be at most this value.
    71  #define MAXKEYSIZE 128
    72  #define MAXVALUESIZE 128
    73  
    74  typedef struct Bucket Bucket;
    75  struct Bucket
    76  {
    77  	uint8  tophash[BUCKETSIZE]; // top 8 bits of hash of each entry (0 = empty)
    78  	Bucket *overflow;           // overflow bucket, if any
    79  	byte   data[1];             // BUCKETSIZE keys followed by BUCKETSIZE values
    80  };
    81  // NOTE: packing all the keys together and then all the values together makes the
    82  // code a bit more complicated than alternating key/value/key/value/... but it allows
    83  // us to eliminate padding which would be needed for, e.g., map[int64]int8.
    84  
    85  // Low-order bit of overflow field is used to mark a bucket as already evacuated
    86  // without destroying the overflow pointer.
    87  // Only buckets in oldbuckets will be marked as evacuated.
    88  // Evacuated bit will be set identically on the base bucket and any overflow buckets.
    89  #define evacuated(b) (((uintptr)(b)->overflow & 1) != 0)
    90  #define overflowptr(b) ((Bucket*)((uintptr)(b)->overflow & ~(uintptr)1))
    91  
    92  // Initialize bucket to the empty state.  This only works if BUCKETSIZE==8!
    93  #define clearbucket(b) { *(uint64*)((b)->tophash) = 0; (b)->overflow = nil; }
    94  
    95  struct Hmap
    96  {
    97  	uintgo  count;        // # live cells == size of map.  Must be first (used by len() builtin)
    98  	uint32  flags;
    99  	uint8   B;            // log_2 of # of buckets (can hold up to LOAD * 2^B items)
   100  	uint8   keysize;      // key size in bytes
   101  	uint8   valuesize;    // value size in bytes
   102  	uint16  bucketsize;   // bucket size in bytes
   103  
   104  	uintptr hash0;        // hash seed
   105  	byte    *buckets;     // array of 2^B Buckets. may be nil if count==0.
   106  	byte    *oldbuckets;  // previous bucket array of half the size, non-nil only when growing
   107  	uintptr nevacuate;    // progress counter for evacuation (buckets less than this have been evacuated)
   108  };
   109  
   110  // possible flags
   111  enum
   112  {
   113  	IndirectKey = 1,    // storing pointers to keys
   114  	IndirectValue = 2,  // storing pointers to values
   115  	Iterator = 4,       // there may be an iterator using buckets
   116  	OldIterator = 8,    // there may be an iterator using oldbuckets
   117  	CanFreeBucket = 16, // ok to free buckets
   118  	CanFreeKey = 32,    // keys are indirect and ok to free keys
   119  };
   120  
   121  // Macros for dereferencing indirect keys
   122  #define IK(h, p) (((h)->flags & IndirectKey) != 0 ? *(byte**)(p) : (p))
   123  #define IV(h, p) (((h)->flags & IndirectValue) != 0 ? *(byte**)(p) : (p))
   124  
   125  enum
   126  {
   127  	docheck = 0,  // check invariants before and after every op.  Slow!!!
   128  	debug = 0,    // print every operation
   129  	checkgc = 0 || docheck,  // check interaction of mallocgc() with the garbage collector
   130  };
   131  static void
   132  check(MapType *t, Hmap *h)
   133  {
   134  	uintptr bucket, oldbucket;
   135  	Bucket *b;
   136  	uintptr i;
   137  	uintptr hash;
   138  	uintgo cnt;
   139  	uint8 top;
   140  	bool eq;
   141  	byte *k, *v;
   142  
   143  	cnt = 0;
   144  
   145  	// check buckets
   146  	for(bucket = 0; bucket < (uintptr)1 << h->B; bucket++) {
   147  		if(h->oldbuckets != nil) {
   148  			oldbucket = bucket & (((uintptr)1 << (h->B - 1)) - 1);
   149  			b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   150  			if(!evacuated(b))
   151  				continue; // b is still uninitialized
   152  		}
   153  		for(b = (Bucket*)(h->buckets + bucket * h->bucketsize); b != nil; b = b->overflow) {
   154  			for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   155  				if(b->tophash[i] == 0)
   156  					continue;
   157  				cnt++;
   158  				t->key->alg->equal(&eq, t->key->size, IK(h, k), IK(h, k));
   159  				if(!eq)
   160  					continue; // NaN!
   161  				hash = h->hash0;
   162  				t->key->alg->hash(&hash, t->key->size, IK(h, k));
   163  				top = hash >> (8*sizeof(uintptr) - 8);
   164  				if(top == 0)
   165  					top = 1;
   166  				if(top != b->tophash[i])
   167  					runtime·throw("bad hash");
   168  			}
   169  		}
   170  	}
   171  
   172  	// check oldbuckets
   173  	if(h->oldbuckets != nil) {
   174  		for(oldbucket = 0; oldbucket < (uintptr)1 << (h->B - 1); oldbucket++) {
   175  			b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   176  			if(evacuated(b))
   177  				continue;
   178  			if(oldbucket < h->nevacuate)
   179  				runtime·throw("bucket became unevacuated");
   180  			for(; b != nil; b = overflowptr(b)) {
   181  				for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   182  					if(b->tophash[i] == 0)
   183  						continue;
   184  					cnt++;
   185  					t->key->alg->equal(&eq, t->key->size, IK(h, k), IK(h, k));
   186  					if(!eq)
   187  						continue; // NaN!
   188  					hash = h->hash0;
   189  					t->key->alg->hash(&hash, t->key->size, IK(h, k));
   190  					top = hash >> (8*sizeof(uintptr) - 8);
   191  					if(top == 0)
   192  						top = 1;
   193  					if(top != b->tophash[i])
   194  						runtime·throw("bad hash (old)");
   195  				}
   196  			}
   197  		}
   198  	}
   199  
   200  	if(cnt != h->count) {
   201  		runtime·printf("%D %D\n", (uint64)cnt, (uint64)h->count);
   202  		runtime·throw("entries missing");
   203  	}
   204  }
   205  
   206  static void
   207  hash_init(MapType *t, Hmap *h, uint32 hint)
   208  {
   209  	uint8 B;
   210  	byte *buckets;
   211  	uintptr i;
   212  	uintptr keysize, valuesize, bucketsize;
   213  	uint8 flags;
   214  	Bucket *b;
   215  
   216  	flags = CanFreeBucket;
   217  
   218  	// figure out how big we have to make everything
   219  	keysize = t->key->size;
   220  	if(keysize > MAXKEYSIZE) {
   221  		flags |= IndirectKey | CanFreeKey;
   222  		keysize = sizeof(byte*);
   223  	}
   224  	valuesize = t->elem->size;
   225  	if(valuesize > MAXVALUESIZE) {
   226  		flags |= IndirectValue;
   227  		valuesize = sizeof(byte*);
   228  	}
   229  	bucketsize = offsetof(Bucket, data[0]) + (keysize + valuesize) * BUCKETSIZE;
   230  
   231  	// invariants we depend on.  We should probably check these at compile time
   232  	// somewhere, but for now we'll do it here.
   233  	if(t->key->align > BUCKETSIZE)
   234  		runtime·throw("key align too big");
   235  	if(t->elem->align > BUCKETSIZE)
   236  		runtime·throw("value align too big");
   237  	if(t->key->size % t->key->align != 0)
   238  		runtime·throw("key size not a multiple of key align");
   239  	if(t->elem->size % t->elem->align != 0)
   240  		runtime·throw("value size not a multiple of value align");
   241  	if(BUCKETSIZE < 8)
   242  		runtime·throw("bucketsize too small for proper alignment");
   243  	if(BUCKETSIZE != 8)
   244  		runtime·throw("must redo clearbucket");
   245  	if(sizeof(void*) == 4 && t->key->align > 4)
   246  		runtime·throw("need padding in bucket (key)");
   247  	if(sizeof(void*) == 4 && t->elem->align > 4)
   248  		runtime·throw("need padding in bucket (value)");
   249  
   250  	// find size parameter which will hold the requested # of elements
   251  	B = 0;
   252  	while(hint > BUCKETSIZE && hint > LOAD * ((uintptr)1 << B))
   253  		B++;
   254  
   255  	// allocate initial hash table
   256  	// If hint is large zeroing this memory could take a while.
   257  	if(checkgc) mstats.next_gc = mstats.heap_alloc;
   258  	if(B == 0) {
   259  		// done lazily later.
   260  		buckets = nil;
   261  	} else {
   262  		buckets = runtime·mallocgc(bucketsize << B, 0, 1, 0);
   263  		for(i = 0; i < (uintptr)1 << B; i++) {
   264  			b = (Bucket*)(buckets + i * bucketsize);
   265  			clearbucket(b);
   266  		}
   267  	}
   268  
   269  	// initialize Hmap
   270  	// Note: we save all these stores to the end so gciter doesn't see
   271  	// a partially initialized map.
   272  	h->count = 0;
   273  	h->B = B;
   274  	h->flags = flags;
   275  	h->keysize = keysize;
   276  	h->valuesize = valuesize;
   277  	h->bucketsize = bucketsize;
   278  	h->hash0 = runtime·fastrand1();
   279  	h->buckets = buckets;
   280  	h->oldbuckets = nil;
   281  	h->nevacuate = 0;
   282  	if(docheck)
   283  		check(t, h);
   284  }
   285  
   286  // Moves entries in oldbuckets[i] to buckets[i] and buckets[i+2^k].
   287  // We leave the original bucket intact, except for the evacuated marks, so that
   288  // iterators can still iterate through the old buckets.
   289  static void
   290  evacuate(MapType *t, Hmap *h, uintptr oldbucket)
   291  {
   292  	Bucket *b;
   293  	Bucket *nextb;
   294  	Bucket *x, *y;
   295  	Bucket *newx, *newy;
   296  	uintptr xi, yi;
   297  	uintptr newbit;
   298  	uintptr hash;
   299  	uintptr i;
   300  	byte *k, *v;
   301  	byte *xk, *yk, *xv, *yv;
   302  	byte *ob;
   303  
   304  	b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   305  	newbit = (uintptr)1 << (h->B - 1);
   306  
   307  	if(!evacuated(b)) {
   308  		// TODO: reuse overflow buckets instead of using new ones, if there
   309  		// is no iterator using the old buckets.  (If CanFreeBuckets and !OldIterator.)
   310  
   311  		x = (Bucket*)(h->buckets + oldbucket * h->bucketsize);
   312  		y = (Bucket*)(h->buckets + (oldbucket + newbit) * h->bucketsize);
   313  		clearbucket(x);
   314  		clearbucket(y);
   315  		xi = 0;
   316  		yi = 0;
   317  		xk = x->data;
   318  		yk = y->data;
   319  		xv = xk + h->keysize * BUCKETSIZE;
   320  		yv = yk + h->keysize * BUCKETSIZE;
   321  		do {
   322  			for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   323  				if(b->tophash[i] == 0)
   324  					continue;
   325  				hash = h->hash0;
   326  				t->key->alg->hash(&hash, t->key->size, IK(h, k));
   327  				// NOTE: if key != key, then this hash could be (and probably will be)
   328  				// entirely different from the old hash.  We effectively only update
   329  				// the B'th bit of the hash in this case.
   330  				if((hash & newbit) == 0) {
   331  					if(xi == BUCKETSIZE) {
   332  						if(checkgc) mstats.next_gc = mstats.heap_alloc;
   333  						newx = runtime·mallocgc(h->bucketsize, 0, 1, 0);
   334  						clearbucket(newx);
   335  						x->overflow = newx;
   336  						x = newx;
   337  						xi = 0;
   338  						xk = x->data;
   339  						xv = xk + h->keysize * BUCKETSIZE;
   340  					}
   341  					x->tophash[xi] = b->tophash[i];
   342  					if((h->flags & IndirectKey) != 0) {
   343  						*(byte**)xk = *(byte**)k;               // copy pointer
   344  					} else {
   345  						t->key->alg->copy(t->key->size, xk, k); // copy value
   346  					}
   347  					if((h->flags & IndirectValue) != 0) {
   348  						*(byte**)xv = *(byte**)v;
   349  					} else {
   350  						t->elem->alg->copy(t->elem->size, xv, v);
   351  					}
   352  					xi++;
   353  					xk += h->keysize;
   354  					xv += h->valuesize;
   355  				} else {
   356  					if(yi == BUCKETSIZE) {
   357  						if(checkgc) mstats.next_gc = mstats.heap_alloc;
   358  						newy = runtime·mallocgc(h->bucketsize, 0, 1, 0);
   359  						clearbucket(newy);
   360  						y->overflow = newy;
   361  						y = newy;
   362  						yi = 0;
   363  						yk = y->data;
   364  						yv = yk + h->keysize * BUCKETSIZE;
   365  					}
   366  					y->tophash[yi] = b->tophash[i];
   367  					if((h->flags & IndirectKey) != 0) {
   368  						*(byte**)yk = *(byte**)k;
   369  					} else {
   370  						t->key->alg->copy(t->key->size, yk, k);
   371  					}
   372  					if((h->flags & IndirectValue) != 0) {
   373  						*(byte**)yv = *(byte**)v;
   374  					} else {
   375  						t->elem->alg->copy(t->elem->size, yv, v);
   376  					}
   377  					yi++;
   378  					yk += h->keysize;
   379  					yv += h->valuesize;
   380  				}
   381  			}
   382  
   383  			// mark as evacuated so we don't do it again.
   384  			// this also tells any iterators that this data isn't golden anymore.
   385  			nextb = b->overflow;
   386  			b->overflow = (Bucket*)((uintptr)nextb + 1);
   387  
   388  			b = nextb;
   389  		} while(b != nil);
   390  
   391  		// Free old overflow buckets as much as we can.
   392  		if((h->flags & OldIterator) == 0) {
   393  			b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   394  			if((h->flags & CanFreeBucket) != 0) {
   395  				while((nextb = overflowptr(b)) != nil) {
   396  					b->overflow = nextb->overflow;
   397  					runtime·free(nextb);
   398  				}
   399  			} else {
   400  				// can't explicitly free overflow buckets, but at least
   401  				// we can unlink them.
   402  				b->overflow = (Bucket*)1;
   403  			}
   404  		}
   405  	}
   406  
   407  	// advance evacuation mark
   408  	if(oldbucket == h->nevacuate) {
   409  		h->nevacuate = oldbucket + 1;
   410  		if(oldbucket + 1 == newbit) { // newbit == # of oldbuckets
   411  			// free main bucket array
   412  			if((h->flags & (OldIterator | CanFreeBucket)) == CanFreeBucket) {
   413  				ob = h->oldbuckets;
   414  				h->oldbuckets = nil;
   415  				runtime·free(ob);
   416  			} else {
   417  				h->oldbuckets = nil;
   418  			}
   419  		}
   420  	}
   421  	if(docheck)
   422  		check(t, h);
   423  }
   424  
   425  static void
   426  grow_work(MapType *t, Hmap *h, uintptr bucket)
   427  {
   428  	uintptr noldbuckets;
   429  
   430  	noldbuckets = (uintptr)1 << (h->B - 1);
   431  
   432  	// make sure we evacuate the oldbucket corresponding
   433  	// to the bucket we're about to use
   434  	evacuate(t, h, bucket & (noldbuckets - 1));
   435  
   436  	// evacuate one more oldbucket to make progress on growing
   437  	if(h->oldbuckets != nil)
   438  		evacuate(t, h, h->nevacuate);
   439  }
   440  
   441  static void
   442  hash_grow(MapType *t, Hmap *h)
   443  {
   444  	byte *old_buckets;
   445  	byte *new_buckets;
   446  	uint8 flags;
   447  
   448  	// allocate a bigger hash table
   449  	if(h->oldbuckets != nil)
   450  		runtime·throw("evacuation not done in time");
   451  	old_buckets = h->buckets;
   452  	// NOTE: this could be a big malloc, but since we don't need zeroing it is probably fast.
   453  	if(checkgc) mstats.next_gc = mstats.heap_alloc;
   454  	new_buckets = runtime·mallocgc((uintptr)h->bucketsize << (h->B + 1), 0, 1, 0);
   455  	flags = (h->flags & ~(Iterator | OldIterator));
   456  	if((h->flags & Iterator) != 0) {
   457  		flags |= OldIterator;
   458  		// We can't free indirect keys any more, as
   459  		// they are potentially aliased across buckets.
   460  		flags &= ~CanFreeKey;
   461  	}
   462  
   463  	// commit the grow (atomic wrt gc)
   464  	h->B++;
   465  	h->flags = flags;
   466  	h->oldbuckets = old_buckets;
   467  	h->buckets = new_buckets;
   468  	h->nevacuate = 0;
   469  
   470  	// the actual copying of the hash table data is done incrementally
   471  	// by grow_work() and evacuate().
   472  	if(docheck)
   473  		check(t, h);
   474  }
   475  
   476  // returns ptr to value associated with key *keyp, or nil if none.
   477  // if it returns non-nil, updates *keyp to point to the currently stored key.
   478  static byte*
   479  hash_lookup(MapType *t, Hmap *h, byte **keyp)
   480  {
   481  	void *key;
   482  	uintptr hash;
   483  	uintptr bucket, oldbucket;
   484  	Bucket *b;
   485  	uint8 top;
   486  	uintptr i;
   487  	bool eq;
   488  	byte *k, *k2, *v;
   489  
   490  	key = *keyp;
   491  	if(docheck)
   492  		check(t, h);
   493  	if(h->count == 0)
   494  		return nil;
   495  	hash = h->hash0;
   496  	t->key->alg->hash(&hash, t->key->size, key);
   497  	bucket = hash & (((uintptr)1 << h->B) - 1);
   498  	if(h->oldbuckets != nil) {
   499  		oldbucket = bucket & (((uintptr)1 << (h->B - 1)) - 1);
   500  		b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   501  		if(evacuated(b)) {
   502  			b = (Bucket*)(h->buckets + bucket * h->bucketsize);
   503  		}
   504  	} else {
   505  		b = (Bucket*)(h->buckets + bucket * h->bucketsize);
   506  	}
   507  	top = hash >> (sizeof(uintptr)*8 - 8);
   508  	if(top == 0)
   509  		top = 1;
   510  	do {
   511  		for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   512  			if(b->tophash[i] == top) {
   513  				k2 = IK(h, k);
   514  				t->key->alg->equal(&eq, t->key->size, key, k2);
   515  				if(eq) {
   516  					*keyp = k2;
   517  					return IV(h, v);
   518  				}
   519  			}
   520  		}
   521  		b = b->overflow;
   522  	} while(b != nil);
   523  	return nil;
   524  }
   525  
   526  // When an item is not found, fast versions return a pointer to this zeroed memory.
   527  static uint8 empty_value[MAXVALUESIZE];
   528  
   529  // Specialized versions of mapaccess1 for specific types.
   530  // See ./hashmap_fast.c and ../../cmd/gc/walk.c.
   531  #define HASH_LOOKUP1 runtime·mapaccess1_fast32
   532  #define HASH_LOOKUP2 runtime·mapaccess2_fast32
   533  #define KEYTYPE uint32
   534  #define HASHFUNC runtime·algarray[AMEM32].hash
   535  #define EQFUNC(x,y) ((x) == (y))
   536  #define EQMAYBE(x,y) ((x) == (y))
   537  #define HASMAYBE false
   538  #define QUICKEQ(x) true
   539  #include "hashmap_fast.c"
   540  
   541  #undef HASH_LOOKUP1
   542  #undef HASH_LOOKUP2
   543  #undef KEYTYPE
   544  #undef HASHFUNC
   545  #undef EQFUNC
   546  #undef EQMAYBE
   547  #undef HASMAYBE
   548  #undef QUICKEQ
   549  
   550  #define HASH_LOOKUP1 runtime·mapaccess1_fast64
   551  #define HASH_LOOKUP2 runtime·mapaccess2_fast64
   552  #define KEYTYPE uint64
   553  #define HASHFUNC runtime·algarray[AMEM64].hash
   554  #define EQFUNC(x,y) ((x) == (y))
   555  #define EQMAYBE(x,y) ((x) == (y))
   556  #define HASMAYBE false
   557  #define QUICKEQ(x) true
   558  #include "hashmap_fast.c"
   559  
   560  #undef HASH_LOOKUP1
   561  #undef HASH_LOOKUP2
   562  #undef KEYTYPE
   563  #undef HASHFUNC
   564  #undef EQFUNC
   565  #undef EQMAYBE
   566  #undef HASMAYBE
   567  #undef QUICKEQ
   568  
   569  #define HASH_LOOKUP1 runtime·mapaccess1_faststr
   570  #define HASH_LOOKUP2 runtime·mapaccess2_faststr
   571  #define KEYTYPE String
   572  #define HASHFUNC runtime·algarray[ASTRING].hash
   573  #define EQFUNC(x,y) ((x).len == (y).len && ((x).str == (y).str || runtime·memeq((x).str, (y).str, (x).len)))
   574  #define EQMAYBE(x,y) ((x).len == (y).len)
   575  #define HASMAYBE true
   576  #define QUICKEQ(x) ((x).len < 32)
   577  #include "hashmap_fast.c"
   578  
   579  static void
   580  hash_insert(MapType *t, Hmap *h, void *key, void *value)
   581  {
   582  	uintptr hash;
   583  	uintptr bucket;
   584  	uintptr i;
   585  	bool eq;
   586  	Bucket *b;
   587  	Bucket *newb;
   588  	uint8 *inserti;
   589  	byte *insertk, *insertv;
   590  	uint8 top;
   591  	byte *k, *v;
   592  	byte *kmem, *vmem;
   593  
   594  	if(docheck)
   595  		check(t, h);
   596  	hash = h->hash0;
   597  	t->key->alg->hash(&hash, t->key->size, key);
   598  	if(h->buckets == nil) {
   599  		h->buckets = runtime·mallocgc(h->bucketsize, 0, 1, 0);
   600  		b = (Bucket*)(h->buckets);
   601  		clearbucket(b);
   602  	}
   603  
   604   again:
   605  	bucket = hash & (((uintptr)1 << h->B) - 1);
   606  	if(h->oldbuckets != nil)
   607  		grow_work(t, h, bucket);
   608  	b = (Bucket*)(h->buckets + bucket * h->bucketsize);
   609  	top = hash >> (sizeof(uintptr)*8 - 8);
   610  	if(top == 0)
   611  		top = 1;
   612  	inserti = 0;
   613  	insertk = nil;
   614  	insertv = nil;
   615  	while(true) {
   616  		for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   617  			if(b->tophash[i] != top) {
   618  				if(b->tophash[i] == 0 && inserti == nil) {
   619  					inserti = &b->tophash[i];
   620  					insertk = k;
   621  					insertv = v;
   622  				}
   623  				continue;
   624  			}
   625  			t->key->alg->equal(&eq, t->key->size, key, IK(h, k));
   626  			if(!eq)
   627  				continue;
   628  			// already have a mapping for key.  Update it.
   629  			t->key->alg->copy(t->key->size, IK(h, k), key); // Need to update key for keys which are distinct but equal (e.g. +0.0 and -0.0)
   630  			t->elem->alg->copy(t->elem->size, IV(h, v), value);
   631  			if(docheck)
   632  				check(t, h);
   633  			return;
   634  		}
   635  		if(b->overflow == nil)
   636  			break;
   637  		b = b->overflow;
   638  	}
   639  
   640  	// did not find mapping for key.  Allocate new cell & add entry.
   641  	if(h->count >= LOAD * ((uintptr)1 << h->B) && h->count >= BUCKETSIZE) {
   642  		hash_grow(t, h);
   643  		goto again; // Growing the table invalidates everything, so try again
   644  	}
   645  
   646  	if(inserti == nil) {
   647  		// all current buckets are full, allocate a new one.
   648  		if(checkgc) mstats.next_gc = mstats.heap_alloc;
   649  		newb = runtime·mallocgc(h->bucketsize, 0, 1, 0);
   650  		clearbucket(newb);
   651  		b->overflow = newb;
   652  		inserti = newb->tophash;
   653  		insertk = newb->data;
   654  		insertv = insertk + h->keysize * BUCKETSIZE;
   655  	}
   656  
   657  	// store new key/value at insert position
   658  	if((h->flags & IndirectKey) != 0) {
   659  		if(checkgc) mstats.next_gc = mstats.heap_alloc;
   660  		kmem = runtime·mallocgc(t->key->size, 0, 1, 0);
   661  		*(byte**)insertk = kmem;
   662  		insertk = kmem;
   663  	}
   664  	if((h->flags & IndirectValue) != 0) {
   665  		if(checkgc) mstats.next_gc = mstats.heap_alloc;
   666  		vmem = runtime·mallocgc(t->elem->size, 0, 1, 0);
   667  		*(byte**)insertv = vmem;
   668  		insertv = vmem;
   669  	}
   670  	t->key->alg->copy(t->key->size, insertk, key);
   671  	t->elem->alg->copy(t->elem->size, insertv, value);
   672  	*inserti = top;
   673  	h->count++;
   674  	if(docheck)
   675  		check(t, h);
   676  }
   677  
   678  static void
   679  hash_remove(MapType *t, Hmap *h, void *key)
   680  {
   681  	uintptr hash;
   682  	uintptr bucket;
   683  	Bucket *b;
   684  	uint8 top;
   685  	uintptr i;
   686  	byte *k, *v;
   687  	bool eq;
   688  	
   689  	if(docheck)
   690  		check(t, h);
   691  	if(h->count == 0)
   692  		return;
   693  	hash = h->hash0;
   694  	t->key->alg->hash(&hash, t->key->size, key);
   695  	bucket = hash & (((uintptr)1 << h->B) - 1);
   696  	if(h->oldbuckets != nil)
   697  		grow_work(t, h, bucket);
   698  	b = (Bucket*)(h->buckets + bucket * h->bucketsize);
   699  	top = hash >> (sizeof(uintptr)*8 - 8);
   700  	if(top == 0)
   701  		top = 1;
   702  	do {
   703  		for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   704  			if(b->tophash[i] != top)
   705  				continue;
   706  			t->key->alg->equal(&eq, t->key->size, key, IK(h, k));
   707  			if(!eq)
   708  				continue;
   709  
   710  			if((h->flags & CanFreeKey) != 0) {
   711  				k = *(byte**)k;
   712  			}
   713  			if((h->flags & IndirectValue) != 0) {
   714  				v = *(byte**)v;
   715  			}
   716  
   717  			b->tophash[i] = 0;
   718  			h->count--;
   719  			
   720  			if((h->flags & CanFreeKey) != 0) {
   721  				runtime·free(k);
   722  			}
   723  			if((h->flags & IndirectValue) != 0) {
   724  				runtime·free(v);
   725  			}
   726  			// TODO: consolidate buckets if they are mostly empty
   727  			// can only consolidate if there are no live iterators at this size.
   728  			if(docheck)
   729  				check(t, h);
   730  			return;
   731  		}
   732  		b = b->overflow;
   733  	} while(b != nil);
   734  }
   735  
   736  // TODO: shrink the map, the same way we grow it.
   737  
   738  // If you modify hash_iter, also change cmd/gc/range.c to indicate
   739  // the size of this structure.
   740  struct hash_iter
   741  {
   742  	uint8* key; // Must be in first position.  Write nil to indicate iteration end (see cmd/gc/range.c).
   743  	uint8* value;
   744  
   745  	MapType *t;
   746  	Hmap *h;
   747  
   748  	// end point for iteration
   749  	uintptr endbucket;
   750  	bool wrapped;
   751  
   752  	// state of table at time iterator is initialized
   753  	uint8 B;
   754  	byte *buckets;
   755  
   756  	// iter state
   757  	uintptr bucket;
   758  	struct Bucket *bptr;
   759  	uintptr i;
   760  	intptr check_bucket;
   761  };
   762  
   763  // iterator state:
   764  // bucket: the current bucket ID
   765  // b: the current Bucket in the chain
   766  // i: the next offset to check in the current bucket
   767  static void
   768  hash_iter_init(MapType *t, Hmap *h, struct hash_iter *it)
   769  {
   770  	uint32 old;
   771  
   772  	if(sizeof(struct hash_iter) / sizeof(uintptr) != 11) {
   773  		runtime·throw("hash_iter size incorrect"); // see ../../cmd/gc/range.c
   774  	}
   775  	it->t = t;
   776  	it->h = h;
   777  
   778  	// grab snapshot of bucket state
   779  	it->B = h->B;
   780  	it->buckets = h->buckets;
   781  
   782  	// iterator state
   783  	it->bucket = it->endbucket = runtime·fastrand1() & (((uintptr)1 << h->B) - 1);
   784  	it->wrapped = false;
   785  	it->bptr = nil;
   786  
   787  	// Remember we have an iterator.
   788  	// Can run concurrently with another hash_iter_init() and with reflect·mapiterinit().
   789  	for(;;) {
   790  		old = h->flags;
   791  		if((old&(Iterator|OldIterator)) == (Iterator|OldIterator))
   792  			break;
   793  		if(runtime·cas(&h->flags, old, old|Iterator|OldIterator))
   794  			break;
   795  	}
   796  
   797  	if(h->buckets == nil) {
   798  		// Empty map. Force next hash_next to exit without
   799  		// evalulating h->bucket.
   800  		it->wrapped = true;
   801  	}
   802  }
   803  
   804  // initializes it->key and it->value to the next key/value pair
   805  // in the iteration, or nil if we've reached the end.
   806  static void
   807  hash_next(struct hash_iter *it)
   808  {
   809  	Hmap *h;
   810  	MapType *t;
   811  	uintptr bucket, oldbucket;
   812  	uintptr hash;
   813  	Bucket *b;
   814  	uintptr i;
   815  	intptr check_bucket;
   816  	bool eq;
   817  	byte *k, *v;
   818  	byte *rk, *rv;
   819  
   820  	h = it->h;
   821  	t = it->t;
   822  	bucket = it->bucket;
   823  	b = it->bptr;
   824  	i = it->i;
   825  	check_bucket = it->check_bucket;
   826  
   827  next:
   828  	if(b == nil) {
   829  		if(bucket == it->endbucket && it->wrapped) {
   830  			// end of iteration
   831  			it->key = nil;
   832  			it->value = nil;
   833  			return;
   834  		}
   835  		if(h->oldbuckets != nil && it->B == h->B) {
   836  			// Iterator was started in the middle of a grow, and the grow isn't done yet.
   837  			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
   838  			// bucket hasn't been evacuated) then we need to iterate through the old
   839  			// bucket and only return the ones that will be migrated to this bucket.
   840  			oldbucket = bucket & (((uintptr)1 << (it->B - 1)) - 1);
   841  			b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   842  			if(!evacuated(b)) {
   843  				check_bucket = bucket;
   844  			} else {
   845  				b = (Bucket*)(it->buckets + bucket * h->bucketsize);
   846  				check_bucket = -1;
   847  			}
   848  		} else {
   849  			b = (Bucket*)(it->buckets + bucket * h->bucketsize);
   850  			check_bucket = -1;
   851  		}
   852  		bucket++;
   853  		if(bucket == ((uintptr)1 << it->B)) {
   854  			bucket = 0;
   855  			it->wrapped = true;
   856  		}
   857  		i = 0;
   858  	}
   859  	k = b->data + h->keysize * i;
   860  	v = b->data + h->keysize * BUCKETSIZE + h->valuesize * i;
   861  	for(; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   862  		if(b->tophash[i] != 0) {
   863  			if(check_bucket >= 0) {
   864  				// Special case: iterator was started during a grow and the
   865  				// grow is not done yet.  We're working on a bucket whose
   866  				// oldbucket has not been evacuated yet.  So we iterate
   867  				// through the oldbucket, skipping any keys that will go
   868  				// to the other new bucket (each oldbucket expands to two
   869  				// buckets during a grow).
   870  				t->key->alg->equal(&eq, t->key->size, IK(h, k), IK(h, k));
   871  				if(!eq) {
   872  					// Hash is meaningless if k != k (NaNs).  Return all
   873  					// NaNs during the first of the two new buckets.
   874  					if(bucket >= ((uintptr)1 << (it->B - 1))) {
   875  						continue;
   876  					}
   877  				} else {
   878  					// If the item in the oldbucket is not destined for
   879  					// the current new bucket in the iteration, skip it.
   880  					hash = h->hash0;
   881  					t->key->alg->hash(&hash, t->key->size, IK(h, k));
   882  					if((hash & (((uintptr)1 << it->B) - 1)) != check_bucket) {
   883  						continue;
   884  					}
   885  				}
   886  			}
   887  			if(!evacuated(b)) {
   888  				// this is the golden data, we can return it.
   889  				it->key = IK(h, k);
   890  				it->value = IV(h, v);
   891  			} else {
   892  				// The hash table has grown since the iterator was started.
   893  				// The golden data for this key is now somewhere else.
   894  				t->key->alg->equal(&eq, t->key->size, IK(h, k), IK(h, k));
   895  				if(eq) {
   896  					// Check the current hash table for the data.
   897  					// This code handles the case where the key
   898  					// has been deleted, updated, or deleted and reinserted.
   899  					// NOTE: we need to regrab the key as it has potentially been
   900  					// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
   901  					rk = IK(h, k);
   902  					rv = hash_lookup(t, it->h, &rk);
   903  					if(rv == nil)
   904  						continue; // key has been deleted
   905  					it->key = rk;
   906  					it->value = rv;
   907  				} else {
   908  					// if key!=key then the entry can't be deleted or
   909  					// updated, so we can just return it.  That's lucky for
   910  					// us because when key!=key we can't look it up
   911  					// successfully in the current table.
   912  					it->key = IK(h, k);
   913  					it->value = IV(h, v);
   914  				}
   915  			}
   916  			it->bucket = bucket;
   917  			it->bptr = b;
   918  			it->i = i + 1;
   919  			it->check_bucket = check_bucket;
   920  			return;
   921  		}
   922  	}
   923  	b = overflowptr(b);
   924  	i = 0;
   925  	goto next;
   926  }
   927  
   928  
   929  #define PHASE_BUCKETS      0
   930  #define PHASE_OLD_BUCKETS  1
   931  #define PHASE_TABLE        2
   932  #define PHASE_OLD_TABLE    3
   933  #define PHASE_DONE         4
   934  
   935  // Initialize the iterator.
   936  // Returns false if Hmap contains no pointers (in which case the iterator is not initialized).
   937  bool
   938  hash_gciter_init (Hmap *h, struct hash_gciter *it)
   939  {
   940  	// GC during map initialization or on an empty map.
   941  	if(h->buckets == nil)
   942  		return false;
   943  
   944  	it->h = h;
   945  	it->phase = PHASE_BUCKETS;
   946  	it->bucket = 0;
   947  	it->b = nil;
   948  
   949  	// TODO: finish evacuating oldbuckets so that we can collect
   950  	// oldbuckets?  We don't want to keep a partially evacuated
   951  	// table around forever, so each gc could make at least some
   952  	// evacuation progress.  Need to be careful about concurrent
   953  	// access if we do concurrent gc.  Even if not, we don't want
   954  	// to make the gc pause any longer than it has to be.
   955  
   956  	return true;
   957  }
   958  
   959  // Returns true and fills *data with internal structure/key/value data,
   960  // or returns false if the iterator has terminated.
   961  // Ugh, this interface is really annoying.  I want a callback fn!
   962  bool
   963  hash_gciter_next(struct hash_gciter *it, struct hash_gciter_data *data)
   964  {
   965  	Hmap *h;
   966  	uintptr bucket, oldbucket;
   967  	Bucket *b, *oldb;
   968  	uintptr i;
   969  	byte *k, *v;
   970  
   971  	h = it->h;
   972  	bucket = it->bucket;
   973  	b = it->b;
   974  	i = it->i;
   975  
   976  	data->st = nil;
   977  	data->key_data = nil;
   978  	data->val_data = nil;
   979  	data->indirectkey = (h->flags & IndirectKey) != 0;
   980  	data->indirectval = (h->flags & IndirectValue) != 0;
   981  
   982  next:
   983  	switch (it->phase) {
   984  	case PHASE_BUCKETS:
   985  		if(b != nil) {
   986  			k = b->data + h->keysize * i;
   987  			v = b->data + h->keysize * BUCKETSIZE + h->valuesize * i;
   988  			for(; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   989  				if(b->tophash[i] != 0) {
   990  					data->key_data = k;
   991  					data->val_data = v;
   992  					it->bucket = bucket;
   993  					it->b = b;
   994  					it->i = i + 1;
   995  					return true;
   996  				}
   997  			}
   998  			b = b->overflow;
   999  			if(b != nil) {
  1000  				data->st = (byte*)b;
  1001  				it->bucket = bucket;
  1002  				it->b = b;
  1003  				it->i = 0;
  1004  				return true;
  1005  			}
  1006  		}
  1007  		while(bucket < ((uintptr)1 << h->B)) {
  1008  			if(h->oldbuckets != nil) {
  1009  				oldbucket = bucket & (((uintptr)1 << (h->B - 1)) - 1);
  1010  				oldb = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
  1011  				if(!evacuated(oldb)) {
  1012  					// new bucket isn't valid yet
  1013  					bucket++;
  1014  					continue;
  1015  				}
  1016  			}
  1017  			b = (Bucket*)(h->buckets + bucket * h->bucketsize);
  1018  			i = 0;
  1019  			bucket++;
  1020  			goto next;
  1021  		}
  1022  		it->phase = PHASE_OLD_BUCKETS;
  1023  		bucket = 0;
  1024  		b = nil;
  1025  		goto next;
  1026  	case PHASE_OLD_BUCKETS:
  1027  		if(h->oldbuckets == nil) {
  1028  			it->phase = PHASE_TABLE;
  1029  			goto next;
  1030  		}
  1031  		if(b != nil) {
  1032  			k = b->data + h->keysize * i;
  1033  			v = b->data + h->keysize * BUCKETSIZE + h->valuesize * i;
  1034  			for(; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
  1035  				if(b->tophash[i] != 0) {
  1036  					data->key_data = k;
  1037  					data->val_data = v;
  1038  					it->bucket = bucket;
  1039  					it->b = b;
  1040  					it->i = i + 1;
  1041  					return true;
  1042  				}
  1043  			}
  1044  			b = overflowptr(b);
  1045  			if(b != nil) {
  1046  				data->st = (byte*)b;
  1047  				it->bucket = bucket;
  1048  				it->b = b;
  1049  				it->i = 0;
  1050  				return true;
  1051  			}
  1052  		}
  1053  		if(bucket < ((uintptr)1 << (h->B - 1))) {
  1054  			b = (Bucket*)(h->oldbuckets + bucket * h->bucketsize);
  1055  			bucket++;
  1056  			i = 0;
  1057  			goto next;
  1058  		}
  1059  		it->phase = PHASE_TABLE;
  1060  		goto next;
  1061  	case PHASE_TABLE:
  1062  		it->phase = PHASE_OLD_TABLE;
  1063  		data->st = h->buckets;
  1064  		return true;
  1065  	case PHASE_OLD_TABLE:
  1066  		it->phase = PHASE_DONE;
  1067  		if(h->oldbuckets != nil) {
  1068  			data->st = h->oldbuckets;
  1069  			return true;
  1070  		} else {
  1071  			goto next;
  1072  		}
  1073  	}
  1074  	if(it->phase != PHASE_DONE)
  1075  		runtime·throw("bad phase at done");
  1076  	return false;
  1077  }
  1078  
  1079  //
  1080  /// interfaces to go runtime
  1081  //
  1082  
  1083  void
  1084  reflect·ismapkey(Type *typ, bool ret)
  1085  {
  1086  	ret = typ != nil && typ->alg->hash != runtime·nohash;
  1087  	FLUSH(&ret);
  1088  }
  1089  
  1090  Hmap*
  1091  runtime·makemap_c(MapType *typ, int64 hint)
  1092  {
  1093  	Hmap *h;
  1094  	Type *key;
  1095  
  1096  	key = typ->key;
  1097  
  1098  	if(hint < 0 || (int32)hint != hint)
  1099  		runtime·panicstring("makemap: size out of range");
  1100  
  1101  	if(key->alg->hash == runtime·nohash)
  1102  		runtime·throw("runtime.makemap: unsupported map key type");
  1103  
  1104  	h = runtime·mal(sizeof(*h));
  1105  
  1106  	if(UseSpanType) {
  1107  		if(false) {
  1108  			runtime·printf("makemap %S: %p\n", *typ->string, h);
  1109  		}
  1110  		runtime·settype(h, (uintptr)typ | TypeInfo_Map);
  1111  	}
  1112  
  1113  	hash_init(typ, h, hint);
  1114  
  1115  	// these calculations are compiler dependent.
  1116  	// figure out offsets of map call arguments.
  1117  
  1118  	if(debug) {
  1119  		runtime·printf("makemap: map=%p; keysize=%p; valsize=%p; keyalg=%p; valalg=%p\n",
  1120  			       h, key->size, typ->elem->size, key->alg, typ->elem->alg);
  1121  	}
  1122  
  1123  	return h;
  1124  }
  1125  
  1126  // makemap(key, val *Type, hint int64) (hmap *map[any]any);
  1127  void
  1128  runtime·makemap(MapType *typ, int64 hint, Hmap *ret)
  1129  {
  1130  	ret = runtime·makemap_c(typ, hint);
  1131  	FLUSH(&ret);
  1132  }
  1133  
  1134  // For reflect:
  1135  //	func makemap(Type *mapType) (hmap *map)
  1136  void
  1137  reflect·makemap(MapType *t, Hmap *ret)
  1138  {
  1139  	ret = runtime·makemap_c(t, 0);
  1140  	FLUSH(&ret);
  1141  }
  1142  
  1143  void
  1144  runtime·mapaccess(MapType *t, Hmap *h, byte *ak, byte *av, bool *pres)
  1145  {
  1146  	byte *res;
  1147  	Type *elem;
  1148  
  1149  	elem = t->elem;
  1150  	if(h == nil || h->count == 0) {
  1151  		elem->alg->copy(elem->size, av, nil);
  1152  		*pres = false;
  1153  		return;
  1154  	}
  1155  
  1156  	if(runtime·gcwaiting)
  1157  		runtime·gosched();
  1158  
  1159  	res = hash_lookup(t, h, &ak);
  1160  
  1161  	if(res != nil) {
  1162  		*pres = true;
  1163  		elem->alg->copy(elem->size, av, res);
  1164  	} else {
  1165  		*pres = false;
  1166  		elem->alg->copy(elem->size, av, nil);
  1167  	}
  1168  }
  1169  
  1170  // mapaccess1(hmap *map[any]any, key any) (val any);
  1171  #pragma textflag 7
  1172  void
  1173  runtime·mapaccess1(MapType *t, Hmap *h, ...)
  1174  {
  1175  	byte *ak, *av;
  1176  	byte *res;
  1177  
  1178  	if(raceenabled && h != nil)
  1179  		runtime·racereadpc(h, runtime·getcallerpc(&t), runtime·mapaccess1);
  1180  
  1181  	ak = (byte*)(&h + 1);
  1182  	av = ak + ROUND(t->key->size, Structrnd);
  1183  
  1184  	if(h == nil || h->count == 0) {
  1185  		t->elem->alg->copy(t->elem->size, av, nil);
  1186  	} else {
  1187  		res = hash_lookup(t, h, &ak);
  1188  		t->elem->alg->copy(t->elem->size, av, res);
  1189  	}
  1190  
  1191  	if(debug) {
  1192  		runtime·prints("runtime.mapaccess1: map=");
  1193  		runtime·printpointer(h);
  1194  		runtime·prints("; key=");
  1195  		t->key->alg->print(t->key->size, ak);
  1196  		runtime·prints("; val=");
  1197  		t->elem->alg->print(t->elem->size, av);
  1198  		runtime·prints("\n");
  1199  	}
  1200  }
  1201  
  1202  // mapaccess2(hmap *map[any]any, key any) (val any, pres bool);
  1203  #pragma textflag 7
  1204  void
  1205  runtime·mapaccess2(MapType *t, Hmap *h, ...)
  1206  {
  1207  	byte *ak, *av, *ap;
  1208  
  1209  	if(raceenabled && h != nil)
  1210  		runtime·racereadpc(h, runtime·getcallerpc(&t), runtime·mapaccess2);
  1211  
  1212  	ak = (byte*)(&h + 1);
  1213  	av = ak + ROUND(t->key->size, Structrnd);
  1214  	ap = av + t->elem->size;
  1215  
  1216  	runtime·mapaccess(t, h, ak, av, ap);
  1217  
  1218  	if(debug) {
  1219  		runtime·prints("runtime.mapaccess2: map=");
  1220  		runtime·printpointer(h);
  1221  		runtime·prints("; key=");
  1222  		t->key->alg->print(t->key->size, ak);
  1223  		runtime·prints("; val=");
  1224  		t->elem->alg->print(t->elem->size, av);
  1225  		runtime·prints("; pres=");
  1226  		runtime·printbool(*ap);
  1227  		runtime·prints("\n");
  1228  	}
  1229  }
  1230  
  1231  // For reflect:
  1232  //	func mapaccess(t type, h map, key iword) (val iword, pres bool)
  1233  // where an iword is the same word an interface value would use:
  1234  // the actual data if it fits, or else a pointer to the data.
  1235  void
  1236  reflect·mapaccess(MapType *t, Hmap *h, uintptr key, uintptr val, bool pres)
  1237  {
  1238  	byte *ak, *av;
  1239  
  1240  	if(raceenabled && h != nil)
  1241  		runtime·racereadpc(h, runtime·getcallerpc(&t), reflect·mapaccess);
  1242  
  1243  	if(t->key->size <= sizeof(key))
  1244  		ak = (byte*)&key;
  1245  	else
  1246  		ak = (byte*)key;
  1247  	val = 0;
  1248  	pres = false;
  1249  	if(t->elem->size <= sizeof(val))
  1250  		av = (byte*)&val;
  1251  	else {
  1252  		av = runtime·mal(t->elem->size);
  1253  		val = (uintptr)av;
  1254  	}
  1255  	runtime·mapaccess(t, h, ak, av, &pres);
  1256  	FLUSH(&val);
  1257  	FLUSH(&pres);
  1258  }
  1259  
  1260  void
  1261  runtime·mapassign(MapType *t, Hmap *h, byte *ak, byte *av)
  1262  {
  1263  	if(h == nil)
  1264  		runtime·panicstring("assignment to entry in nil map");
  1265  
  1266  	if(runtime·gcwaiting)
  1267  		runtime·gosched();
  1268  
  1269  	if(av == nil) {
  1270  		hash_remove(t, h, ak);
  1271  	} else {
  1272  		hash_insert(t, h, ak, av);
  1273  	}
  1274  
  1275  	if(debug) {
  1276  		runtime·prints("mapassign: map=");
  1277  		runtime·printpointer(h);
  1278  		runtime·prints("; key=");
  1279  		t->key->alg->print(t->key->size, ak);
  1280  		runtime·prints("; val=");
  1281  		t->elem->alg->print(t->elem->size, av);
  1282  		runtime·prints("\n");
  1283  	}
  1284  }
  1285  
  1286  // mapassign1(mapType *type, hmap *map[any]any, key any, val any);
  1287  #pragma textflag 7
  1288  void
  1289  runtime·mapassign1(MapType *t, Hmap *h, ...)
  1290  {
  1291  	byte *ak, *av;
  1292  
  1293  	if(h == nil)
  1294  		runtime·panicstring("assignment to entry in nil map");
  1295  
  1296  	if(raceenabled)
  1297  		runtime·racewritepc(h, runtime·getcallerpc(&t), runtime·mapassign1);
  1298  	ak = (byte*)(&h + 1);
  1299  	av = ak + ROUND(t->key->size, t->elem->align);
  1300  
  1301  	runtime·mapassign(t, h, ak, av);
  1302  }
  1303  
  1304  // mapdelete(mapType *type, hmap *map[any]any, key any)
  1305  #pragma textflag 7
  1306  void
  1307  runtime·mapdelete(MapType *t, Hmap *h, ...)
  1308  {
  1309  	byte *ak;
  1310  
  1311  	if(h == nil)
  1312  		return;
  1313  
  1314  	if(raceenabled)
  1315  		runtime·racewritepc(h, runtime·getcallerpc(&t), runtime·mapdelete);
  1316  	ak = (byte*)(&h + 1);
  1317  	runtime·mapassign(t, h, ak, nil);
  1318  
  1319  	if(debug) {
  1320  		runtime·prints("mapdelete: map=");
  1321  		runtime·printpointer(h);
  1322  		runtime·prints("; key=");
  1323  		t->key->alg->print(t->key->size, ak);
  1324  		runtime·prints("\n");
  1325  	}
  1326  }
  1327  
  1328  // For reflect:
  1329  //	func mapassign(t type h map, key, val iword, pres bool)
  1330  // where an iword is the same word an interface value would use:
  1331  // the actual data if it fits, or else a pointer to the data.
  1332  void
  1333  reflect·mapassign(MapType *t, Hmap *h, uintptr key, uintptr val, bool pres)
  1334  {
  1335  	byte *ak, *av;
  1336  
  1337  	if(h == nil)
  1338  		runtime·panicstring("assignment to entry in nil map");
  1339  	if(raceenabled)
  1340  		runtime·racewritepc(h, runtime·getcallerpc(&t), reflect·mapassign);
  1341  	if(t->key->size <= sizeof(key))
  1342  		ak = (byte*)&key;
  1343  	else
  1344  		ak = (byte*)key;
  1345  	if(t->elem->size <= sizeof(val))
  1346  		av = (byte*)&val;
  1347  	else
  1348  		av = (byte*)val;
  1349  	if(!pres)
  1350  		av = nil;
  1351  	runtime·mapassign(t, h, ak, av);
  1352  }
  1353  
  1354  // mapiterinit(mapType *type, hmap *map[any]any, hiter *any);
  1355  void
  1356  runtime·mapiterinit(MapType *t, Hmap *h, struct hash_iter *it)
  1357  {
  1358  	if(h == nil) {
  1359  		it->key = nil;
  1360  		return;
  1361  	}
  1362  	if(raceenabled)
  1363  		runtime·racereadpc(h, runtime·getcallerpc(&t), runtime·mapiterinit);
  1364  	hash_iter_init(t, h, it);
  1365  	hash_next(it);
  1366  	if(debug) {
  1367  		runtime·prints("runtime.mapiterinit: map=");
  1368  		runtime·printpointer(h);
  1369  		runtime·prints("; iter=");
  1370  		runtime·printpointer(it);
  1371  		runtime·prints("; key=");
  1372  		runtime·printpointer(it->key);
  1373  		runtime·prints("\n");
  1374  	}
  1375  }
  1376  
  1377  // For reflect:
  1378  //	func mapiterinit(h map) (it iter)
  1379  void
  1380  reflect·mapiterinit(MapType *t, Hmap *h, struct hash_iter *it)
  1381  {
  1382  	uint32 old, new;
  1383  
  1384  	if(h != nil && t->key->size > sizeof(void*)) {
  1385  		// reflect·mapiterkey returns pointers to key data,
  1386  		// and reflect holds them, so we cannot free key data
  1387  		// eagerly anymore.
  1388  		// Can run concurrently with another reflect·mapiterinit() and with hash_iter_init().
  1389  		for(;;) {
  1390  			old = h->flags;
  1391  			if(old & IndirectKey)
  1392  				new = old & ~CanFreeKey;
  1393  			else
  1394  				new = old & ~CanFreeBucket;
  1395  			if(new == old)
  1396  				break;
  1397  			if(runtime·cas(&h->flags, old, new))
  1398  				break;
  1399  		}
  1400  	}
  1401  
  1402  	it = runtime·mal(sizeof *it);
  1403  	FLUSH(&it);
  1404  	runtime·mapiterinit(t, h, it);
  1405  }
  1406  
  1407  // mapiternext(hiter *any);
  1408  void
  1409  runtime·mapiternext(struct hash_iter *it)
  1410  {
  1411  	if(raceenabled)
  1412  		runtime·racereadpc(it->h, runtime·getcallerpc(&it), runtime·mapiternext);
  1413  	if(runtime·gcwaiting)
  1414  		runtime·gosched();
  1415  
  1416  	hash_next(it);
  1417  	if(debug) {
  1418  		runtime·prints("runtime.mapiternext: iter=");
  1419  		runtime·printpointer(it);
  1420  		runtime·prints("; key=");
  1421  		runtime·printpointer(it->key);
  1422  		runtime·prints("\n");
  1423  	}
  1424  }
  1425  
  1426  // For reflect:
  1427  //	func mapiternext(it iter)
  1428  void
  1429  reflect·mapiternext(struct hash_iter *it)
  1430  {
  1431  	runtime·mapiternext(it);
  1432  }
  1433  
  1434  // mapiter1(hiter *any) (key any);
  1435  #pragma textflag 7
  1436  void
  1437  runtime·mapiter1(struct hash_iter *it, ...)
  1438  {
  1439  	byte *ak, *res;
  1440  	Type *key;
  1441  
  1442  	ak = (byte*)(&it + 1);
  1443  
  1444  	res = it->key;
  1445  	if(res == nil)
  1446  		runtime·throw("runtime.mapiter1: key:val nil pointer");
  1447  
  1448  	key = it->t->key;
  1449  	key->alg->copy(key->size, ak, res);
  1450  
  1451  	if(debug) {
  1452  		runtime·prints("mapiter1: iter=");
  1453  		runtime·printpointer(it);
  1454  		runtime·prints("; map=");
  1455  		runtime·printpointer(it->h);
  1456  		runtime·prints("\n");
  1457  	}
  1458  }
  1459  
  1460  bool
  1461  runtime·mapiterkey(struct hash_iter *it, void *ak)
  1462  {
  1463  	byte *res;
  1464  	Type *key;
  1465  
  1466  	res = it->key;
  1467  	if(res == nil)
  1468  		return false;
  1469  	key = it->t->key;
  1470  	key->alg->copy(key->size, ak, res);
  1471  	return true;
  1472  }
  1473  
  1474  // For reflect:
  1475  //	func mapiterkey(h map) (key iword, ok bool)
  1476  // where an iword is the same word an interface value would use:
  1477  // the actual data if it fits, or else a pointer to the data.
  1478  void
  1479  reflect·mapiterkey(struct hash_iter *it, uintptr key, bool ok)
  1480  {
  1481  	byte *res;
  1482  	Type *tkey;
  1483  
  1484  	key = 0;
  1485  	ok = false;
  1486  	res = it->key;
  1487  	if(res == nil) {
  1488  		key = 0;
  1489  		ok = false;
  1490  	} else {
  1491  		tkey = it->t->key;
  1492  		key = 0;
  1493  		if(tkey->size <= sizeof(key))
  1494  			tkey->alg->copy(tkey->size, (byte*)&key, res);
  1495  		else
  1496  			key = (uintptr)res;
  1497  		ok = true;
  1498  	}
  1499  	FLUSH(&key);
  1500  	FLUSH(&ok);
  1501  }
  1502  
  1503  // For reflect:
  1504  //	func maplen(h map) (len int)
  1505  // Like len(m) in the actual language, we treat the nil map as length 0.
  1506  void
  1507  reflect·maplen(Hmap *h, intgo len)
  1508  {
  1509  	if(h == nil)
  1510  		len = 0;
  1511  	else {
  1512  		len = h->count;
  1513  		if(raceenabled)
  1514  			runtime·racereadpc(h, runtime·getcallerpc(&h), reflect·maplen);
  1515  	}
  1516  	FLUSH(&len);
  1517  }
  1518  
  1519  // mapiter2(hiter *any) (key any, val any);
  1520  #pragma textflag 7
  1521  void
  1522  runtime·mapiter2(struct hash_iter *it, ...)
  1523  {
  1524  	byte *ak, *av, *res;
  1525  	MapType *t;
  1526  
  1527  	t = it->t;
  1528  	ak = (byte*)(&it + 1);
  1529  	av = ak + ROUND(t->key->size, t->elem->align);
  1530  
  1531  	res = it->key;
  1532  	if(res == nil)
  1533  		runtime·throw("runtime.mapiter2: key:val nil pointer");
  1534  
  1535  	t->key->alg->copy(t->key->size, ak, res);
  1536  	t->elem->alg->copy(t->elem->size, av, it->value);
  1537  
  1538  	if(debug) {
  1539  		runtime·prints("mapiter2: iter=");
  1540  		runtime·printpointer(it);
  1541  		runtime·prints("; map=");
  1542  		runtime·printpointer(it->h);
  1543  		runtime·prints("\n");
  1544  	}
  1545  }