github.com/varialus/godfly@v0.0.0-20130904042352-1934f9f095ab/src/pkg/runtime/hashmap.c (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  #include "runtime.h"
     6  #include "arch_GOARCH.h"
     7  #include "malloc.h"
     8  #include "type.h"
     9  #include "race.h"
    10  #include "../../cmd/ld/textflag.h"
    11  
    12  // This file contains the implementation of Go's map type.
    13  //
    14  // The map is just a hash table.  The data is arranged
    15  // into an array of buckets.  Each bucket contains up to
    16  // 8 key/value pairs.  The low-order bits of the hash are
    17  // used to select a bucket.  Each bucket contains a few
    18  // high-order bits of each hash to distinguish the entries
    19  // within a single bucket.
    20  //
    21  // If more than 8 keys hash to a bucket, we chain on
    22  // extra buckets.
    23  //
    24  // When the hashtable grows, we allocate a new array
    25  // of buckets twice as big.  Buckets are incrementally
    26  // copied from the old bucket array to the new bucket array.
    27  //
    28  // Map iterators walk through the array of buckets and
    29  // return the keys in walk order (bucket #, then overflow
    30  // chain order, then bucket index).  To maintain iteration
    31  // semantics, we never move keys within their bucket (if
    32  // we did, keys might be returned 0 or 2 times).  When
    33  // growing the table, iterators remain iterating through the
    34  // old table and must check the new table if the bucket
    35  // they are iterating through has been moved ("evacuated")
    36  // to the new table.
    37  
    38  // Maximum number of key/value pairs a bucket can hold.
    39  #define BUCKETSIZE 8
    40  
    41  // Maximum average load of a bucket that triggers growth.
    42  #define LOAD 6.5
    43  
    44  // Picking LOAD: too large and we have lots of overflow
    45  // buckets, too small and we waste a lot of space.  I wrote
    46  // a simple program to check some stats for different loads:
    47  // (64-bit, 8 byte keys and values)
    48  //        LOAD    %overflow  bytes/entry     hitprobe    missprobe
    49  //        4.00         2.13        20.77         3.00         4.00
    50  //        4.50         4.05        17.30         3.25         4.50
    51  //        5.00         6.85        14.77         3.50         5.00
    52  //        5.50        10.55        12.94         3.75         5.50
    53  //        6.00        15.27        11.67         4.00         6.00
    54  //        6.50        20.90        10.79         4.25         6.50
    55  //        7.00        27.14        10.15         4.50         7.00
    56  //        7.50        34.03         9.73         4.75         7.50
    57  //        8.00        41.10         9.40         5.00         8.00
    58  //
    59  // %overflow   = percentage of buckets which have an overflow bucket
    60  // bytes/entry = overhead bytes used per key/value pair
    61  // hitprobe    = # of entries to check when looking up a present key
    62  // missprobe   = # of entries to check when looking up an absent key
    63  //
    64  // Keep in mind this data is for maximally loaded tables, i.e. just
    65  // before the table grows.  Typical tables will be somewhat less loaded.
    66  
    67  // Maximum key or value size to keep inline (instead of mallocing per element).
    68  // Must fit in a uint8.
    69  // Fast versions cannot handle big values - the cutoff size for
    70  // fast versions in ../../cmd/gc/walk.c must be at most this value.
    71  #define MAXKEYSIZE 128
    72  #define MAXVALUESIZE 128
    73  
    74  typedef struct Bucket Bucket;
    75  struct Bucket
    76  {
    77  	// Note: the format of the Bucket is encoded in ../../cmd/gc/reflect.c and
    78  	// ../reflect/type.go.  Don't change this structure without also changing that code!
    79  	uint8  tophash[BUCKETSIZE]; // top 8 bits of hash of each entry (0 = empty)
    80  	Bucket *overflow;           // overflow bucket, if any
    81  	byte   data[1];             // BUCKETSIZE keys followed by BUCKETSIZE values
    82  };
    83  // NOTE: packing all the keys together and then all the values together makes the
    84  // code a bit more complicated than alternating key/value/key/value/... but it allows
    85  // us to eliminate padding which would be needed for, e.g., map[int64]int8.
    86  
    87  // Low-order bit of overflow field is used to mark a bucket as already evacuated
    88  // without destroying the overflow pointer.
    89  // Only buckets in oldbuckets will be marked as evacuated.
    90  // Evacuated bit will be set identically on the base bucket and any overflow buckets.
    91  #define evacuated(b) (((uintptr)(b)->overflow & 1) != 0)
    92  #define overflowptr(b) ((Bucket*)((uintptr)(b)->overflow & ~(uintptr)1))
    93  
    94  struct Hmap
    95  {
    96  	// Note: the format of the Hmap is encoded in ../../cmd/gc/reflect.c and
    97  	// ../reflect/type.go.  Don't change this structure without also changing that code!
    98  	uintgo  count;        // # live cells == size of map.  Must be first (used by len() builtin)
    99  	uint32  flags;
   100  	uint32  hash0;        // hash seed
   101  	uint8   B;            // log_2 of # of buckets (can hold up to LOAD * 2^B items)
   102  	uint8   keysize;      // key size in bytes
   103  	uint8   valuesize;    // value size in bytes
   104  	uint16  bucketsize;   // bucket size in bytes
   105  
   106  	byte    *buckets;     // array of 2^B Buckets. may be nil if count==0.
   107  	byte    *oldbuckets;  // previous bucket array of half the size, non-nil only when growing
   108  	uintptr nevacuate;    // progress counter for evacuation (buckets less than this have been evacuated)
   109  };
   110  
   111  // possible flags
   112  enum
   113  {
   114  	IndirectKey = 1,    // storing pointers to keys
   115  	IndirectValue = 2,  // storing pointers to values
   116  	Iterator = 4,       // there may be an iterator using buckets
   117  	OldIterator = 8,    // there may be an iterator using oldbuckets
   118  };
   119  
   120  // Macros for dereferencing indirect keys
   121  #define IK(h, p) (((h)->flags & IndirectKey) != 0 ? *(byte**)(p) : (p))
   122  #define IV(h, p) (((h)->flags & IndirectValue) != 0 ? *(byte**)(p) : (p))
   123  
   124  enum
   125  {
   126  	docheck = 0,  // check invariants before and after every op.  Slow!!!
   127  	debug = 0,    // print every operation
   128  	checkgc = 0 || docheck,  // check interaction of mallocgc() with the garbage collector
   129  };
   130  static void
   131  check(MapType *t, Hmap *h)
   132  {
   133  	uintptr bucket, oldbucket;
   134  	Bucket *b;
   135  	uintptr i;
   136  	uintptr hash;
   137  	uintgo cnt;
   138  	uint8 top;
   139  	bool eq;
   140  	byte *k, *v;
   141  
   142  	cnt = 0;
   143  
   144  	// check buckets
   145  	for(bucket = 0; bucket < (uintptr)1 << h->B; bucket++) {
   146  		if(h->oldbuckets != nil) {
   147  			oldbucket = bucket & (((uintptr)1 << (h->B - 1)) - 1);
   148  			b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   149  			if(!evacuated(b))
   150  				continue; // b is still uninitialized
   151  		}
   152  		for(b = (Bucket*)(h->buckets + bucket * h->bucketsize); b != nil; b = b->overflow) {
   153  			for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   154  				if(b->tophash[i] == 0)
   155  					continue;
   156  				cnt++;
   157  				t->key->alg->equal(&eq, t->key->size, IK(h, k), IK(h, k));
   158  				if(!eq)
   159  					continue; // NaN!
   160  				hash = h->hash0;
   161  				t->key->alg->hash(&hash, t->key->size, IK(h, k));
   162  				top = hash >> (8*sizeof(uintptr) - 8);
   163  				if(top == 0)
   164  					top = 1;
   165  				if(top != b->tophash[i])
   166  					runtime·throw("bad hash");
   167  			}
   168  		}
   169  	}
   170  
   171  	// check oldbuckets
   172  	if(h->oldbuckets != nil) {
   173  		for(oldbucket = 0; oldbucket < (uintptr)1 << (h->B - 1); oldbucket++) {
   174  			b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   175  			if(evacuated(b))
   176  				continue;
   177  			if(oldbucket < h->nevacuate)
   178  				runtime·throw("bucket became unevacuated");
   179  			for(; b != nil; b = overflowptr(b)) {
   180  				for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   181  					if(b->tophash[i] == 0)
   182  						continue;
   183  					cnt++;
   184  					t->key->alg->equal(&eq, t->key->size, IK(h, k), IK(h, k));
   185  					if(!eq)
   186  						continue; // NaN!
   187  					hash = h->hash0;
   188  					t->key->alg->hash(&hash, t->key->size, IK(h, k));
   189  					top = hash >> (8*sizeof(uintptr) - 8);
   190  					if(top == 0)
   191  						top = 1;
   192  					if(top != b->tophash[i])
   193  						runtime·throw("bad hash (old)");
   194  				}
   195  			}
   196  		}
   197  	}
   198  
   199  	if(cnt != h->count) {
   200  		runtime·printf("%D %D\n", (uint64)cnt, (uint64)h->count);
   201  		runtime·throw("entries missing");
   202  	}
   203  }
   204  
   205  static void
   206  hash_init(MapType *t, Hmap *h, uint32 hint)
   207  {
   208  	uint8 B;
   209  	byte *buckets;
   210  	uintptr keysize, valuesize, bucketsize;
   211  	uint8 flags;
   212  
   213  	flags = 0;
   214  
   215  	// figure out how big we have to make everything
   216  	keysize = t->key->size;
   217  	if(keysize > MAXKEYSIZE) {
   218  		flags |= IndirectKey;
   219  		keysize = sizeof(byte*);
   220  	}
   221  	valuesize = t->elem->size;
   222  	if(valuesize > MAXVALUESIZE) {
   223  		flags |= IndirectValue;
   224  		valuesize = sizeof(byte*);
   225  	}
   226  	bucketsize = offsetof(Bucket, data[0]) + (keysize + valuesize) * BUCKETSIZE;
   227  
   228  	// invariants we depend on.  We should probably check these at compile time
   229  	// somewhere, but for now we'll do it here.
   230  	if(t->key->align > BUCKETSIZE)
   231  		runtime·throw("key align too big");
   232  	if(t->elem->align > BUCKETSIZE)
   233  		runtime·throw("value align too big");
   234  	if(t->key->size % t->key->align != 0)
   235  		runtime·throw("key size not a multiple of key align");
   236  	if(t->elem->size % t->elem->align != 0)
   237  		runtime·throw("value size not a multiple of value align");
   238  	if(BUCKETSIZE < 8)
   239  		runtime·throw("bucketsize too small for proper alignment");
   240  	if(sizeof(void*) == 4 && t->key->align > 4)
   241  		runtime·throw("need padding in bucket (key)");
   242  	if(sizeof(void*) == 4 && t->elem->align > 4)
   243  		runtime·throw("need padding in bucket (value)");
   244  
   245  	// find size parameter which will hold the requested # of elements
   246  	B = 0;
   247  	while(hint > BUCKETSIZE && hint > LOAD * ((uintptr)1 << B))
   248  		B++;
   249  
   250  	// allocate initial hash table
   251  	// If hint is large zeroing this memory could take a while.
   252  	if(checkgc) mstats.next_gc = mstats.heap_alloc;
   253  	if(B == 0) {
   254  		// done lazily later.
   255  		buckets = nil;
   256  	} else {
   257  		buckets = runtime·cnewarray(t->bucket, (uintptr)1 << B);
   258  	}
   259  
   260  	// initialize Hmap
   261  	h->count = 0;
   262  	h->B = B;
   263  	h->flags = flags;
   264  	h->keysize = keysize;
   265  	h->valuesize = valuesize;
   266  	h->bucketsize = bucketsize;
   267  	h->hash0 = runtime·fastrand1();
   268  	h->buckets = buckets;
   269  	h->oldbuckets = nil;
   270  	h->nevacuate = 0;
   271  	if(docheck)
   272  		check(t, h);
   273  }
   274  
   275  // Moves entries in oldbuckets[i] to buckets[i] and buckets[i+2^k].
   276  // We leave the original bucket intact, except for the evacuated marks, so that
   277  // iterators can still iterate through the old buckets.
   278  static void
   279  evacuate(MapType *t, Hmap *h, uintptr oldbucket)
   280  {
   281  	Bucket *b;
   282  	Bucket *nextb;
   283  	Bucket *x, *y;
   284  	Bucket *newx, *newy;
   285  	uintptr xi, yi;
   286  	uintptr newbit;
   287  	uintptr hash;
   288  	uintptr i;
   289  	byte *k, *v;
   290  	byte *xk, *yk, *xv, *yv;
   291  
   292  	b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   293  	newbit = (uintptr)1 << (h->B - 1);
   294  
   295  	if(!evacuated(b)) {
   296  		// TODO: reuse overflow buckets instead of using new ones, if there
   297  		// is no iterator using the old buckets.  (If !OldIterator.)
   298  
   299  		x = (Bucket*)(h->buckets + oldbucket * h->bucketsize);
   300  		y = (Bucket*)(h->buckets + (oldbucket + newbit) * h->bucketsize);
   301  		xi = 0;
   302  		yi = 0;
   303  		xk = x->data;
   304  		yk = y->data;
   305  		xv = xk + h->keysize * BUCKETSIZE;
   306  		yv = yk + h->keysize * BUCKETSIZE;
   307  		do {
   308  			for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   309  				if(b->tophash[i] == 0)
   310  					continue;
   311  				hash = h->hash0;
   312  				t->key->alg->hash(&hash, t->key->size, IK(h, k));
   313  				// NOTE: if key != key, then this hash could be (and probably will be)
   314  				// entirely different from the old hash.  We effectively only update
   315  				// the B'th bit of the hash in this case.
   316  				if((hash & newbit) == 0) {
   317  					if(xi == BUCKETSIZE) {
   318  						if(checkgc) mstats.next_gc = mstats.heap_alloc;
   319  						newx = runtime·cnew(t->bucket);
   320  						x->overflow = newx;
   321  						x = newx;
   322  						xi = 0;
   323  						xk = x->data;
   324  						xv = xk + h->keysize * BUCKETSIZE;
   325  					}
   326  					x->tophash[xi] = b->tophash[i];
   327  					if((h->flags & IndirectKey) != 0) {
   328  						*(byte**)xk = *(byte**)k;               // copy pointer
   329  					} else {
   330  						t->key->alg->copy(t->key->size, xk, k); // copy value
   331  					}
   332  					if((h->flags & IndirectValue) != 0) {
   333  						*(byte**)xv = *(byte**)v;
   334  					} else {
   335  						t->elem->alg->copy(t->elem->size, xv, v);
   336  					}
   337  					xi++;
   338  					xk += h->keysize;
   339  					xv += h->valuesize;
   340  				} else {
   341  					if(yi == BUCKETSIZE) {
   342  						if(checkgc) mstats.next_gc = mstats.heap_alloc;
   343  						newy = runtime·cnew(t->bucket);
   344  						y->overflow = newy;
   345  						y = newy;
   346  						yi = 0;
   347  						yk = y->data;
   348  						yv = yk + h->keysize * BUCKETSIZE;
   349  					}
   350  					y->tophash[yi] = b->tophash[i];
   351  					if((h->flags & IndirectKey) != 0) {
   352  						*(byte**)yk = *(byte**)k;
   353  					} else {
   354  						t->key->alg->copy(t->key->size, yk, k);
   355  					}
   356  					if((h->flags & IndirectValue) != 0) {
   357  						*(byte**)yv = *(byte**)v;
   358  					} else {
   359  						t->elem->alg->copy(t->elem->size, yv, v);
   360  					}
   361  					yi++;
   362  					yk += h->keysize;
   363  					yv += h->valuesize;
   364  				}
   365  			}
   366  
   367  			// mark as evacuated so we don't do it again.
   368  			// this also tells any iterators that this data isn't golden anymore.
   369  			nextb = b->overflow;
   370  			b->overflow = (Bucket*)((uintptr)nextb + 1);
   371  
   372  			b = nextb;
   373  		} while(b != nil);
   374  
   375  		// Unlink the overflow buckets to help GC.
   376  		b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   377  		if((h->flags & OldIterator) == 0)
   378  			b->overflow = (Bucket*)1;
   379  	}
   380  
   381  	// advance evacuation mark
   382  	if(oldbucket == h->nevacuate) {
   383  		h->nevacuate = oldbucket + 1;
   384  		if(oldbucket + 1 == newbit) // newbit == # of oldbuckets
   385  			// free main bucket array
   386  			h->oldbuckets = nil;
   387  	}
   388  	if(docheck)
   389  		check(t, h);
   390  }
   391  
   392  static void
   393  grow_work(MapType *t, Hmap *h, uintptr bucket)
   394  {
   395  	uintptr noldbuckets;
   396  
   397  	noldbuckets = (uintptr)1 << (h->B - 1);
   398  
   399  	// make sure we evacuate the oldbucket corresponding
   400  	// to the bucket we're about to use
   401  	evacuate(t, h, bucket & (noldbuckets - 1));
   402  
   403  	// evacuate one more oldbucket to make progress on growing
   404  	if(h->oldbuckets != nil)
   405  		evacuate(t, h, h->nevacuate);
   406  }
   407  
   408  static void
   409  hash_grow(MapType *t, Hmap *h)
   410  {
   411  	byte *old_buckets;
   412  	byte *new_buckets;
   413  	uint8 flags;
   414  
   415  	// allocate a bigger hash table
   416  	if(h->oldbuckets != nil)
   417  		runtime·throw("evacuation not done in time");
   418  	old_buckets = h->buckets;
   419  	// NOTE: this could be a big malloc, but since we don't need zeroing it is probably fast.
   420  	if(checkgc) mstats.next_gc = mstats.heap_alloc;
   421  	new_buckets = runtime·cnewarray(t->bucket, (uintptr)1 << (h->B + 1));
   422  	flags = (h->flags & ~(Iterator | OldIterator));
   423  	if((h->flags & Iterator) != 0)
   424  		flags |= OldIterator;
   425  
   426  	// commit the grow (atomic wrt gc)
   427  	h->B++;
   428  	h->flags = flags;
   429  	h->oldbuckets = old_buckets;
   430  	h->buckets = new_buckets;
   431  	h->nevacuate = 0;
   432  
   433  	// the actual copying of the hash table data is done incrementally
   434  	// by grow_work() and evacuate().
   435  	if(docheck)
   436  		check(t, h);
   437  }
   438  
   439  // returns ptr to value associated with key *keyp, or nil if none.
   440  // if it returns non-nil, updates *keyp to point to the currently stored key.
   441  static byte*
   442  hash_lookup(MapType *t, Hmap *h, byte **keyp)
   443  {
   444  	void *key;
   445  	uintptr hash;
   446  	uintptr bucket, oldbucket;
   447  	Bucket *b;
   448  	uint8 top;
   449  	uintptr i;
   450  	bool eq;
   451  	byte *k, *k2, *v;
   452  
   453  	key = *keyp;
   454  	if(docheck)
   455  		check(t, h);
   456  	if(h->count == 0)
   457  		return nil;
   458  	hash = h->hash0;
   459  	t->key->alg->hash(&hash, t->key->size, key);
   460  	bucket = hash & (((uintptr)1 << h->B) - 1);
   461  	if(h->oldbuckets != nil) {
   462  		oldbucket = bucket & (((uintptr)1 << (h->B - 1)) - 1);
   463  		b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   464  		if(evacuated(b)) {
   465  			b = (Bucket*)(h->buckets + bucket * h->bucketsize);
   466  		}
   467  	} else {
   468  		b = (Bucket*)(h->buckets + bucket * h->bucketsize);
   469  	}
   470  	top = hash >> (sizeof(uintptr)*8 - 8);
   471  	if(top == 0)
   472  		top = 1;
   473  	do {
   474  		for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   475  			if(b->tophash[i] == top) {
   476  				k2 = IK(h, k);
   477  				t->key->alg->equal(&eq, t->key->size, key, k2);
   478  				if(eq) {
   479  					*keyp = k2;
   480  					return IV(h, v);
   481  				}
   482  			}
   483  		}
   484  		b = b->overflow;
   485  	} while(b != nil);
   486  	return nil;
   487  }
   488  
   489  // When an item is not found, fast versions return a pointer to this zeroed memory.
   490  #pragma dataflag RODATA
   491  static uint8 empty_value[MAXVALUESIZE];
   492  
   493  // Specialized versions of mapaccess1 for specific types.
   494  // See ./hashmap_fast.c and ../../cmd/gc/walk.c.
   495  #define HASH_LOOKUP1 runtime·mapaccess1_fast32
   496  #define HASH_LOOKUP2 runtime·mapaccess2_fast32
   497  #define KEYTYPE uint32
   498  #define HASHFUNC runtime·algarray[AMEM32].hash
   499  #define FASTKEY(x) true
   500  #define QUICK_NE(x,y) ((x) != (y))
   501  #define QUICK_EQ(x,y) true
   502  #define SLOW_EQ(x,y) true
   503  #define MAYBE_EQ(x,y) true
   504  #include "hashmap_fast.c"
   505  
   506  #undef HASH_LOOKUP1
   507  #undef HASH_LOOKUP2
   508  #undef KEYTYPE
   509  #undef HASHFUNC
   510  #undef FASTKEY
   511  #undef QUICK_NE
   512  #undef QUICK_EQ
   513  #undef SLOW_EQ
   514  #undef MAYBE_EQ
   515  
   516  #define HASH_LOOKUP1 runtime·mapaccess1_fast64
   517  #define HASH_LOOKUP2 runtime·mapaccess2_fast64
   518  #define KEYTYPE uint64
   519  #define HASHFUNC runtime·algarray[AMEM64].hash
   520  #define FASTKEY(x) true
   521  #define QUICK_NE(x,y) ((x) != (y))
   522  #define QUICK_EQ(x,y) true
   523  #define SLOW_EQ(x,y) true
   524  #define MAYBE_EQ(x,y) true
   525  #include "hashmap_fast.c"
   526  
   527  #undef HASH_LOOKUP1
   528  #undef HASH_LOOKUP2
   529  #undef KEYTYPE
   530  #undef HASHFUNC
   531  #undef FASTKEY
   532  #undef QUICK_NE
   533  #undef QUICK_EQ
   534  #undef SLOW_EQ
   535  #undef MAYBE_EQ
   536  
   537  #ifdef GOARCH_amd64
   538  #define CHECKTYPE uint64
   539  #endif
   540  #ifdef GOARCH_386
   541  #define CHECKTYPE uint32
   542  #endif
   543  #ifdef GOARCH_arm
   544  // can't use uint32 on arm because our loads aren't aligned.
   545  // TODO: use uint32 for arm v6+?
   546  #define CHECKTYPE uint8
   547  #endif
   548  
   549  #define HASH_LOOKUP1 runtime·mapaccess1_faststr
   550  #define HASH_LOOKUP2 runtime·mapaccess2_faststr
   551  #define KEYTYPE String
   552  #define HASHFUNC runtime·algarray[ASTRING].hash
   553  #define FASTKEY(x) ((x).len < 32)
   554  #define QUICK_NE(x,y) ((x).len != (y).len)
   555  #define QUICK_EQ(x,y) ((x).str == (y).str)
   556  #define SLOW_EQ(x,y) runtime·memeq((x).str, (y).str, (x).len)
   557  #define MAYBE_EQ(x,y) (*(CHECKTYPE*)(x).str == *(CHECKTYPE*)(y).str && *(CHECKTYPE*)((x).str + (x).len - sizeof(CHECKTYPE)) == *(CHECKTYPE*)((y).str + (x).len - sizeof(CHECKTYPE)))
   558  #include "hashmap_fast.c"
   559  
   560  static void
   561  hash_insert(MapType *t, Hmap *h, void *key, void *value)
   562  {
   563  	uintptr hash;
   564  	uintptr bucket;
   565  	uintptr i;
   566  	bool eq;
   567  	Bucket *b;
   568  	Bucket *newb;
   569  	uint8 *inserti;
   570  	byte *insertk, *insertv;
   571  	uint8 top;
   572  	byte *k, *v;
   573  	byte *kmem, *vmem;
   574  
   575  	if(docheck)
   576  		check(t, h);
   577  	hash = h->hash0;
   578  	t->key->alg->hash(&hash, t->key->size, key);
   579  	if(h->buckets == nil)
   580  		h->buckets = runtime·cnewarray(t->bucket, 1);
   581  
   582   again:
   583  	bucket = hash & (((uintptr)1 << h->B) - 1);
   584  	if(h->oldbuckets != nil)
   585  		grow_work(t, h, bucket);
   586  	b = (Bucket*)(h->buckets + bucket * h->bucketsize);
   587  	top = hash >> (sizeof(uintptr)*8 - 8);
   588  	if(top == 0)
   589  		top = 1;
   590  	inserti = nil;
   591  	insertk = nil;
   592  	insertv = nil;
   593  	while(true) {
   594  		for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   595  			if(b->tophash[i] != top) {
   596  				if(b->tophash[i] == 0 && inserti == nil) {
   597  					inserti = &b->tophash[i];
   598  					insertk = k;
   599  					insertv = v;
   600  				}
   601  				continue;
   602  			}
   603  			t->key->alg->equal(&eq, t->key->size, key, IK(h, k));
   604  			if(!eq)
   605  				continue;
   606  			// already have a mapping for key.  Update it.
   607  			t->key->alg->copy(t->key->size, IK(h, k), key); // Need to update key for keys which are distinct but equal (e.g. +0.0 and -0.0)
   608  			t->elem->alg->copy(t->elem->size, IV(h, v), value);
   609  			if(docheck)
   610  				check(t, h);
   611  			return;
   612  		}
   613  		if(b->overflow == nil)
   614  			break;
   615  		b = b->overflow;
   616  	}
   617  
   618  	// did not find mapping for key.  Allocate new cell & add entry.
   619  	if(h->count >= LOAD * ((uintptr)1 << h->B) && h->count >= BUCKETSIZE) {
   620  		hash_grow(t, h);
   621  		goto again; // Growing the table invalidates everything, so try again
   622  	}
   623  
   624  	if(inserti == nil) {
   625  		// all current buckets are full, allocate a new one.
   626  		if(checkgc) mstats.next_gc = mstats.heap_alloc;
   627  		newb = runtime·cnew(t->bucket);
   628  		b->overflow = newb;
   629  		inserti = newb->tophash;
   630  		insertk = newb->data;
   631  		insertv = insertk + h->keysize * BUCKETSIZE;
   632  	}
   633  
   634  	// store new key/value at insert position
   635  	if((h->flags & IndirectKey) != 0) {
   636  		if(checkgc) mstats.next_gc = mstats.heap_alloc;
   637  		kmem = runtime·cnew(t->key);
   638  		*(byte**)insertk = kmem;
   639  		insertk = kmem;
   640  	}
   641  	if((h->flags & IndirectValue) != 0) {
   642  		if(checkgc) mstats.next_gc = mstats.heap_alloc;
   643  		vmem = runtime·cnew(t->elem);
   644  		*(byte**)insertv = vmem;
   645  		insertv = vmem;
   646  	}
   647  	t->key->alg->copy(t->key->size, insertk, key);
   648  	t->elem->alg->copy(t->elem->size, insertv, value);
   649  	*inserti = top;
   650  	h->count++;
   651  	if(docheck)
   652  		check(t, h);
   653  }
   654  
   655  static void
   656  hash_remove(MapType *t, Hmap *h, void *key)
   657  {
   658  	uintptr hash;
   659  	uintptr bucket;
   660  	Bucket *b;
   661  	uint8 top;
   662  	uintptr i;
   663  	byte *k, *v;
   664  	bool eq;
   665  	
   666  	if(docheck)
   667  		check(t, h);
   668  	if(h->count == 0)
   669  		return;
   670  	hash = h->hash0;
   671  	t->key->alg->hash(&hash, t->key->size, key);
   672  	bucket = hash & (((uintptr)1 << h->B) - 1);
   673  	if(h->oldbuckets != nil)
   674  		grow_work(t, h, bucket);
   675  	b = (Bucket*)(h->buckets + bucket * h->bucketsize);
   676  	top = hash >> (sizeof(uintptr)*8 - 8);
   677  	if(top == 0)
   678  		top = 1;
   679  	do {
   680  		for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   681  			if(b->tophash[i] != top)
   682  				continue;
   683  			t->key->alg->equal(&eq, t->key->size, key, IK(h, k));
   684  			if(!eq)
   685  				continue;
   686  
   687  			if((h->flags & IndirectKey) != 0) {
   688  				*(byte**)k = nil;
   689  			} else {
   690  				t->key->alg->copy(t->key->size, k, nil);
   691  			}
   692  			if((h->flags & IndirectValue) != 0) {
   693  				*(byte**)v = nil;
   694  			} else {
   695  				t->elem->alg->copy(t->elem->size, v, nil);
   696  			}
   697  
   698  			b->tophash[i] = 0;
   699  			h->count--;
   700  			
   701  			// TODO: consolidate buckets if they are mostly empty
   702  			// can only consolidate if there are no live iterators at this size.
   703  			if(docheck)
   704  				check(t, h);
   705  			return;
   706  		}
   707  		b = b->overflow;
   708  	} while(b != nil);
   709  }
   710  
   711  // TODO: shrink the map, the same way we grow it.
   712  
   713  // If you modify hash_iter, also change cmd/gc/range.c to indicate
   714  // the size of this structure.
   715  struct hash_iter
   716  {
   717  	uint8* key; // Must be in first position.  Write nil to indicate iteration end (see cmd/gc/range.c).
   718  	uint8* value;
   719  
   720  	MapType *t;
   721  	Hmap *h;
   722  
   723  	// end point for iteration
   724  	uintptr endbucket;
   725  	bool wrapped;
   726  
   727  	// state of table at time iterator is initialized
   728  	uint8 B;
   729  	byte *buckets;
   730  
   731  	// iter state
   732  	uintptr bucket;
   733  	struct Bucket *bptr;
   734  	uintptr i;
   735  	intptr check_bucket;
   736  };
   737  
   738  // iterator state:
   739  // bucket: the current bucket ID
   740  // b: the current Bucket in the chain
   741  // i: the next offset to check in the current bucket
   742  static void
   743  hash_iter_init(MapType *t, Hmap *h, struct hash_iter *it)
   744  {
   745  	uint32 old;
   746  
   747  	if(sizeof(struct hash_iter) / sizeof(uintptr) != 11) {
   748  		runtime·throw("hash_iter size incorrect"); // see ../../cmd/gc/range.c
   749  	}
   750  	it->t = t;
   751  	it->h = h;
   752  
   753  	// grab snapshot of bucket state
   754  	it->B = h->B;
   755  	it->buckets = h->buckets;
   756  
   757  	// iterator state
   758  	it->bucket = it->endbucket = runtime·fastrand1() & (((uintptr)1 << h->B) - 1);
   759  	it->wrapped = false;
   760  	it->bptr = nil;
   761  
   762  	// Remember we have an iterator.
   763  	// Can run concurrently with another hash_iter_init().
   764  	for(;;) {
   765  		old = h->flags;
   766  		if((old&(Iterator|OldIterator)) == (Iterator|OldIterator))
   767  			break;
   768  		if(runtime·cas(&h->flags, old, old|Iterator|OldIterator))
   769  			break;
   770  	}
   771  
   772  	if(h->buckets == nil) {
   773  		// Empty map. Force next hash_next to exit without
   774  		// evalulating h->bucket.
   775  		it->wrapped = true;
   776  	}
   777  }
   778  
   779  // initializes it->key and it->value to the next key/value pair
   780  // in the iteration, or nil if we've reached the end.
   781  static void
   782  hash_next(struct hash_iter *it)
   783  {
   784  	Hmap *h;
   785  	MapType *t;
   786  	uintptr bucket, oldbucket;
   787  	uintptr hash;
   788  	Bucket *b;
   789  	uintptr i;
   790  	intptr check_bucket;
   791  	bool eq;
   792  	byte *k, *v;
   793  	byte *rk, *rv;
   794  
   795  	h = it->h;
   796  	t = it->t;
   797  	bucket = it->bucket;
   798  	b = it->bptr;
   799  	i = it->i;
   800  	check_bucket = it->check_bucket;
   801  
   802  next:
   803  	if(b == nil) {
   804  		if(bucket == it->endbucket && it->wrapped) {
   805  			// end of iteration
   806  			it->key = nil;
   807  			it->value = nil;
   808  			return;
   809  		}
   810  		if(h->oldbuckets != nil && it->B == h->B) {
   811  			// Iterator was started in the middle of a grow, and the grow isn't done yet.
   812  			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
   813  			// bucket hasn't been evacuated) then we need to iterate through the old
   814  			// bucket and only return the ones that will be migrated to this bucket.
   815  			oldbucket = bucket & (((uintptr)1 << (it->B - 1)) - 1);
   816  			b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   817  			if(!evacuated(b)) {
   818  				check_bucket = bucket;
   819  			} else {
   820  				b = (Bucket*)(it->buckets + bucket * h->bucketsize);
   821  				check_bucket = -1;
   822  			}
   823  		} else {
   824  			b = (Bucket*)(it->buckets + bucket * h->bucketsize);
   825  			check_bucket = -1;
   826  		}
   827  		bucket++;
   828  		if(bucket == ((uintptr)1 << it->B)) {
   829  			bucket = 0;
   830  			it->wrapped = true;
   831  		}
   832  		i = 0;
   833  	}
   834  	k = b->data + h->keysize * i;
   835  	v = b->data + h->keysize * BUCKETSIZE + h->valuesize * i;
   836  	for(; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   837  		if(b->tophash[i] != 0) {
   838  			if(check_bucket >= 0) {
   839  				// Special case: iterator was started during a grow and the
   840  				// grow is not done yet.  We're working on a bucket whose
   841  				// oldbucket has not been evacuated yet.  So we iterate
   842  				// through the oldbucket, skipping any keys that will go
   843  				// to the other new bucket (each oldbucket expands to two
   844  				// buckets during a grow).
   845  				t->key->alg->equal(&eq, t->key->size, IK(h, k), IK(h, k));
   846  				if(!eq) {
   847  					// Hash is meaningless if k != k (NaNs).  Return all
   848  					// NaNs during the first of the two new buckets.
   849  					if(bucket >= ((uintptr)1 << (it->B - 1))) {
   850  						continue;
   851  					}
   852  				} else {
   853  					// If the item in the oldbucket is not destined for
   854  					// the current new bucket in the iteration, skip it.
   855  					hash = h->hash0;
   856  					t->key->alg->hash(&hash, t->key->size, IK(h, k));
   857  					if((hash & (((uintptr)1 << it->B) - 1)) != check_bucket) {
   858  						continue;
   859  					}
   860  				}
   861  			}
   862  			if(!evacuated(b)) {
   863  				// this is the golden data, we can return it.
   864  				it->key = IK(h, k);
   865  				it->value = IV(h, v);
   866  			} else {
   867  				// The hash table has grown since the iterator was started.
   868  				// The golden data for this key is now somewhere else.
   869  				t->key->alg->equal(&eq, t->key->size, IK(h, k), IK(h, k));
   870  				if(eq) {
   871  					// Check the current hash table for the data.
   872  					// This code handles the case where the key
   873  					// has been deleted, updated, or deleted and reinserted.
   874  					// NOTE: we need to regrab the key as it has potentially been
   875  					// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
   876  					rk = IK(h, k);
   877  					rv = hash_lookup(t, it->h, &rk);
   878  					if(rv == nil)
   879  						continue; // key has been deleted
   880  					it->key = rk;
   881  					it->value = rv;
   882  				} else {
   883  					// if key!=key then the entry can't be deleted or
   884  					// updated, so we can just return it.  That's lucky for
   885  					// us because when key!=key we can't look it up
   886  					// successfully in the current table.
   887  					it->key = IK(h, k);
   888  					it->value = IV(h, v);
   889  				}
   890  			}
   891  			it->bucket = bucket;
   892  			it->bptr = b;
   893  			it->i = i + 1;
   894  			it->check_bucket = check_bucket;
   895  			return;
   896  		}
   897  	}
   898  	b = overflowptr(b);
   899  	i = 0;
   900  	goto next;
   901  }
   902  
   903  //
   904  /// interfaces to go runtime
   905  //
   906  
   907  void
   908  reflect·ismapkey(Type *typ, bool ret)
   909  {
   910  	ret = typ != nil && typ->alg->hash != runtime·nohash;
   911  	FLUSH(&ret);
   912  }
   913  
   914  Hmap*
   915  runtime·makemap_c(MapType *typ, int64 hint)
   916  {
   917  	Hmap *h;
   918  	Type *key;
   919  
   920  	key = typ->key;
   921  	
   922  	if(sizeof(Hmap) > 48)
   923  		runtime·panicstring("hmap too large");
   924  
   925  	if(hint < 0 || (int32)hint != hint)
   926  		runtime·panicstring("makemap: size out of range");
   927  
   928  	if(key->alg->hash == runtime·nohash)
   929  		runtime·throw("runtime.makemap: unsupported map key type");
   930  
   931  	h = runtime·cnew(typ->hmap);
   932  	hash_init(typ, h, hint);
   933  
   934  	// these calculations are compiler dependent.
   935  	// figure out offsets of map call arguments.
   936  
   937  	if(debug) {
   938  		runtime·printf("makemap: map=%p; keysize=%p; valsize=%p; keyalg=%p; valalg=%p\n",
   939  			       h, key->size, typ->elem->size, key->alg, typ->elem->alg);
   940  	}
   941  
   942  	return h;
   943  }
   944  
   945  // makemap(key, val *Type, hint int64) (hmap *map[any]any);
   946  void
   947  runtime·makemap(MapType *typ, int64 hint, Hmap *ret)
   948  {
   949  	ret = runtime·makemap_c(typ, hint);
   950  	FLUSH(&ret);
   951  }
   952  
   953  // For reflect:
   954  //	func makemap(Type *mapType) (hmap *map)
   955  void
   956  reflect·makemap(MapType *t, Hmap *ret)
   957  {
   958  	ret = runtime·makemap_c(t, 0);
   959  	FLUSH(&ret);
   960  }
   961  
   962  void
   963  runtime·mapaccess(MapType *t, Hmap *h, byte *ak, byte *av, bool *pres)
   964  {
   965  	byte *res;
   966  	Type *elem;
   967  
   968  	elem = t->elem;
   969  	if(h == nil || h->count == 0) {
   970  		elem->alg->copy(elem->size, av, nil);
   971  		*pres = false;
   972  		return;
   973  	}
   974  
   975  	res = hash_lookup(t, h, &ak);
   976  
   977  	if(res != nil) {
   978  		*pres = true;
   979  		elem->alg->copy(elem->size, av, res);
   980  	} else {
   981  		*pres = false;
   982  		elem->alg->copy(elem->size, av, nil);
   983  	}
   984  }
   985  
   986  // mapaccess1(hmap *map[any]any, key any) (val any);
   987  #pragma textflag NOSPLIT
   988  void
   989  runtime·mapaccess1(MapType *t, Hmap *h, ...)
   990  {
   991  	byte *ak, *av;
   992  	byte *res;
   993  
   994  	if(raceenabled && h != nil)
   995  		runtime·racereadpc(h, runtime·getcallerpc(&t), runtime·mapaccess1);
   996  
   997  	ak = (byte*)(&h + 1);
   998  	av = ak + ROUND(t->key->size, Structrnd);
   999  
  1000  	if(h == nil || h->count == 0) {
  1001  		t->elem->alg->copy(t->elem->size, av, nil);
  1002  	} else {
  1003  		res = hash_lookup(t, h, &ak);
  1004  		t->elem->alg->copy(t->elem->size, av, res);
  1005  	}
  1006  
  1007  	if(debug) {
  1008  		runtime·prints("runtime.mapaccess1: map=");
  1009  		runtime·printpointer(h);
  1010  		runtime·prints("; key=");
  1011  		t->key->alg->print(t->key->size, ak);
  1012  		runtime·prints("; val=");
  1013  		t->elem->alg->print(t->elem->size, av);
  1014  		runtime·prints("\n");
  1015  	}
  1016  }
  1017  
  1018  // mapaccess2(hmap *map[any]any, key any) (val any, pres bool);
  1019  #pragma textflag NOSPLIT
  1020  void
  1021  runtime·mapaccess2(MapType *t, Hmap *h, ...)
  1022  {
  1023  	byte *ak, *av, *ap;
  1024  
  1025  	if(raceenabled && h != nil)
  1026  		runtime·racereadpc(h, runtime·getcallerpc(&t), runtime·mapaccess2);
  1027  
  1028  	ak = (byte*)(&h + 1);
  1029  	av = ak + ROUND(t->key->size, Structrnd);
  1030  	ap = av + t->elem->size;
  1031  
  1032  	runtime·mapaccess(t, h, ak, av, ap);
  1033  
  1034  	if(debug) {
  1035  		runtime·prints("runtime.mapaccess2: map=");
  1036  		runtime·printpointer(h);
  1037  		runtime·prints("; key=");
  1038  		t->key->alg->print(t->key->size, ak);
  1039  		runtime·prints("; val=");
  1040  		t->elem->alg->print(t->elem->size, av);
  1041  		runtime·prints("; pres=");
  1042  		runtime·printbool(*ap);
  1043  		runtime·prints("\n");
  1044  	}
  1045  }
  1046  
  1047  // For reflect:
  1048  //	func mapaccess(t type, h map, key iword) (val iword, pres bool)
  1049  // where an iword is the same word an interface value would use:
  1050  // the actual data if it fits, or else a pointer to the data.
  1051  void
  1052  reflect·mapaccess(MapType *t, Hmap *h, uintptr key, uintptr val, bool pres)
  1053  {
  1054  	byte *ak, *av;
  1055  
  1056  	if(raceenabled && h != nil)
  1057  		runtime·racereadpc(h, runtime·getcallerpc(&t), reflect·mapaccess);
  1058  
  1059  	if(t->key->size <= sizeof(key))
  1060  		ak = (byte*)&key;
  1061  	else
  1062  		ak = (byte*)key;
  1063  	val = 0;
  1064  	pres = false;
  1065  	if(t->elem->size <= sizeof(val))
  1066  		av = (byte*)&val;
  1067  	else {
  1068  		av = runtime·mal(t->elem->size);
  1069  		val = (uintptr)av;
  1070  	}
  1071  	runtime·mapaccess(t, h, ak, av, &pres);
  1072  	FLUSH(&val);
  1073  	FLUSH(&pres);
  1074  }
  1075  
  1076  void
  1077  runtime·mapassign(MapType *t, Hmap *h, byte *ak, byte *av)
  1078  {
  1079  	if(h == nil)
  1080  		runtime·panicstring("assignment to entry in nil map");
  1081  
  1082  	if(av == nil) {
  1083  		hash_remove(t, h, ak);
  1084  	} else {
  1085  		hash_insert(t, h, ak, av);
  1086  	}
  1087  
  1088  	if(debug) {
  1089  		runtime·prints("mapassign: map=");
  1090  		runtime·printpointer(h);
  1091  		runtime·prints("; key=");
  1092  		t->key->alg->print(t->key->size, ak);
  1093  		runtime·prints("; val=");
  1094  		t->elem->alg->print(t->elem->size, av);
  1095  		runtime·prints("\n");
  1096  	}
  1097  }
  1098  
  1099  // mapassign1(mapType *type, hmap *map[any]any, key any, val any);
  1100  #pragma textflag NOSPLIT
  1101  void
  1102  runtime·mapassign1(MapType *t, Hmap *h, ...)
  1103  {
  1104  	byte *ak, *av;
  1105  
  1106  	if(h == nil)
  1107  		runtime·panicstring("assignment to entry in nil map");
  1108  
  1109  	if(raceenabled)
  1110  		runtime·racewritepc(h, runtime·getcallerpc(&t), runtime·mapassign1);
  1111  	ak = (byte*)(&h + 1);
  1112  	av = ak + ROUND(t->key->size, t->elem->align);
  1113  
  1114  	runtime·mapassign(t, h, ak, av);
  1115  }
  1116  
  1117  // mapdelete(mapType *type, hmap *map[any]any, key any)
  1118  #pragma textflag NOSPLIT
  1119  void
  1120  runtime·mapdelete(MapType *t, Hmap *h, ...)
  1121  {
  1122  	byte *ak;
  1123  
  1124  	if(h == nil)
  1125  		return;
  1126  
  1127  	if(raceenabled)
  1128  		runtime·racewritepc(h, runtime·getcallerpc(&t), runtime·mapdelete);
  1129  	ak = (byte*)(&h + 1);
  1130  	runtime·mapassign(t, h, ak, nil);
  1131  
  1132  	if(debug) {
  1133  		runtime·prints("mapdelete: map=");
  1134  		runtime·printpointer(h);
  1135  		runtime·prints("; key=");
  1136  		t->key->alg->print(t->key->size, ak);
  1137  		runtime·prints("\n");
  1138  	}
  1139  }
  1140  
  1141  // For reflect:
  1142  //	func mapassign(t type h map, key, val iword, pres bool)
  1143  // where an iword is the same word an interface value would use:
  1144  // the actual data if it fits, or else a pointer to the data.
  1145  void
  1146  reflect·mapassign(MapType *t, Hmap *h, uintptr key, uintptr val, bool pres)
  1147  {
  1148  	byte *ak, *av;
  1149  
  1150  	if(h == nil)
  1151  		runtime·panicstring("assignment to entry in nil map");
  1152  	if(raceenabled)
  1153  		runtime·racewritepc(h, runtime·getcallerpc(&t), reflect·mapassign);
  1154  	if(t->key->size <= sizeof(key))
  1155  		ak = (byte*)&key;
  1156  	else
  1157  		ak = (byte*)key;
  1158  	if(t->elem->size <= sizeof(val))
  1159  		av = (byte*)&val;
  1160  	else
  1161  		av = (byte*)val;
  1162  	if(!pres)
  1163  		av = nil;
  1164  	runtime·mapassign(t, h, ak, av);
  1165  }
  1166  
  1167  // mapiterinit(mapType *type, hmap *map[any]any, hiter *any);
  1168  void
  1169  runtime·mapiterinit(MapType *t, Hmap *h, struct hash_iter *it)
  1170  {
  1171  	if(h == nil || h->count == 0) {
  1172  		it->key = nil;
  1173  		return;
  1174  	}
  1175  	if(raceenabled)
  1176  		runtime·racereadpc(h, runtime·getcallerpc(&t), runtime·mapiterinit);
  1177  	hash_iter_init(t, h, it);
  1178  	hash_next(it);
  1179  	if(debug) {
  1180  		runtime·prints("runtime.mapiterinit: map=");
  1181  		runtime·printpointer(h);
  1182  		runtime·prints("; iter=");
  1183  		runtime·printpointer(it);
  1184  		runtime·prints("; key=");
  1185  		runtime·printpointer(it->key);
  1186  		runtime·prints("\n");
  1187  	}
  1188  }
  1189  
  1190  // For reflect:
  1191  //	func mapiterinit(h map) (it iter)
  1192  void
  1193  reflect·mapiterinit(MapType *t, Hmap *h, struct hash_iter *it)
  1194  {
  1195  	it = runtime·mal(sizeof *it);
  1196  	FLUSH(&it);
  1197  	runtime·mapiterinit(t, h, it);
  1198  }
  1199  
  1200  // mapiternext(hiter *any);
  1201  void
  1202  runtime·mapiternext(struct hash_iter *it)
  1203  {
  1204  	if(raceenabled)
  1205  		runtime·racereadpc(it->h, runtime·getcallerpc(&it), runtime·mapiternext);
  1206  
  1207  	hash_next(it);
  1208  	if(debug) {
  1209  		runtime·prints("runtime.mapiternext: iter=");
  1210  		runtime·printpointer(it);
  1211  		runtime·prints("; key=");
  1212  		runtime·printpointer(it->key);
  1213  		runtime·prints("\n");
  1214  	}
  1215  }
  1216  
  1217  // For reflect:
  1218  //	func mapiternext(it iter)
  1219  void
  1220  reflect·mapiternext(struct hash_iter *it)
  1221  {
  1222  	runtime·mapiternext(it);
  1223  }
  1224  
  1225  // mapiter1(hiter *any) (key any);
  1226  #pragma textflag NOSPLIT
  1227  void
  1228  runtime·mapiter1(struct hash_iter *it, ...)
  1229  {
  1230  	byte *ak, *res;
  1231  	Type *key;
  1232  
  1233  	ak = (byte*)(&it + 1);
  1234  
  1235  	res = it->key;
  1236  	if(res == nil)
  1237  		runtime·throw("runtime.mapiter1: key:val nil pointer");
  1238  
  1239  	key = it->t->key;
  1240  	key->alg->copy(key->size, ak, res);
  1241  
  1242  	if(debug) {
  1243  		runtime·prints("mapiter1: iter=");
  1244  		runtime·printpointer(it);
  1245  		runtime·prints("; map=");
  1246  		runtime·printpointer(it->h);
  1247  		runtime·prints("\n");
  1248  	}
  1249  }
  1250  
  1251  bool
  1252  runtime·mapiterkey(struct hash_iter *it, void *ak)
  1253  {
  1254  	byte *res;
  1255  	Type *key;
  1256  
  1257  	res = it->key;
  1258  	if(res == nil)
  1259  		return false;
  1260  	key = it->t->key;
  1261  	key->alg->copy(key->size, ak, res);
  1262  	return true;
  1263  }
  1264  
  1265  // For reflect:
  1266  //	func mapiterkey(h map) (key iword, ok bool)
  1267  // where an iword is the same word an interface value would use:
  1268  // the actual data if it fits, or else a pointer to the data.
  1269  void
  1270  reflect·mapiterkey(struct hash_iter *it, uintptr key, bool ok)
  1271  {
  1272  	byte *res;
  1273  	Type *tkey;
  1274  
  1275  	key = 0;
  1276  	ok = false;
  1277  	res = it->key;
  1278  	if(res != nil) {
  1279  		tkey = it->t->key;
  1280  		if(tkey->size <= sizeof(key))
  1281  			tkey->alg->copy(tkey->size, (byte*)&key, res);
  1282  		else
  1283  			key = (uintptr)res;
  1284  		ok = true;
  1285  	}
  1286  	FLUSH(&key);
  1287  	FLUSH(&ok);
  1288  }
  1289  
  1290  // For reflect:
  1291  //	func maplen(h map) (len int)
  1292  // Like len(m) in the actual language, we treat the nil map as length 0.
  1293  void
  1294  reflect·maplen(Hmap *h, intgo len)
  1295  {
  1296  	if(h == nil)
  1297  		len = 0;
  1298  	else {
  1299  		len = h->count;
  1300  		if(raceenabled)
  1301  			runtime·racereadpc(h, runtime·getcallerpc(&h), reflect·maplen);
  1302  	}
  1303  	FLUSH(&len);
  1304  }
  1305  
  1306  // mapiter2(hiter *any) (key any, val any);
  1307  #pragma textflag NOSPLIT
  1308  void
  1309  runtime·mapiter2(struct hash_iter *it, ...)
  1310  {
  1311  	byte *ak, *av, *res;
  1312  	MapType *t;
  1313  
  1314  	t = it->t;
  1315  	ak = (byte*)(&it + 1);
  1316  	av = ak + ROUND(t->key->size, t->elem->align);
  1317  
  1318  	res = it->key;
  1319  	if(res == nil)
  1320  		runtime·throw("runtime.mapiter2: key:val nil pointer");
  1321  
  1322  	t->key->alg->copy(t->key->size, ak, res);
  1323  	t->elem->alg->copy(t->elem->size, av, it->value);
  1324  
  1325  	if(debug) {
  1326  		runtime·prints("mapiter2: iter=");
  1327  		runtime·printpointer(it);
  1328  		runtime·prints("; map=");
  1329  		runtime·printpointer(it->h);
  1330  		runtime·prints("\n");
  1331  	}
  1332  }