github.com/moontrade/nogc@v0.1.7/tinygc.go (about)

     1  //go:build tinygo && gc.provided
     2  
     3  package nogc
     4  
     5  import (
     6  	"time"
     7  	"unsafe"
     8  )
     9  
    10  const (
    11  	//gc_WHITE       uintptr = 1 << (bits.UintSize-1)
    12  	//gc_BLACK       uintptr = 1 << (bits.UintSize-2)
    13  	//gc_COLOR_MASK = gc_WHITE | gc_BLACK
    14  	gc_WHITE uint32 = 0
    15  	gc_BLACK uint32 = 1
    16  	gc_DEBUG        = false
    17  	gc_TRACE        = false
    18  
    19  	// Overhead of a garbage collector object. Excludes memory manager block overhead.
    20  	gc_ObjectOverhead = unsafe.Sizeof(gcObject{})
    21  	//gc_ObjectOverhead = unsafe.Sizeof(gcObject{}) - _TLSFBlockOverhead + unsafe.Sizeof(uintptr(0))
    22  
    23  	// Maximum size of a garbage collector object's payload.
    24  	gc_ObjectMaxSize = (1 << 30) - gc_ObjectOverhead
    25  
    26  	// Overhead of a garbage collector object. Excludes memory manager block overhead.
    27  	//gc_ObjectOverhead = gc_ObjectOverhead
    28  )
    29  
    30  //type GCObject uintptr
    31  //
    32  //func (o GCObject) Ptr() Pointer {
    33  //	return Pointer(o)
    34  //}
    35  
    36  // GC is a Two-Color Mark & Sweep collector on top of a Two-Level Segmented Fit (Heap)
    37  // allocator built for TinyGo. Similar features to the internal extalloc GC in TinyGo
    38  // except GC uses a robinhood hashset instead of a treap structure and without the need
    39  // for a linked list. Instead, a single linear allocation is used for the hashset. Both
    40  // colors reside in the same hashset. It should provide faster scan performance which
    41  // becomes more noticeable as the scan size increases. For large objects, it's more ideal
    42  // to allocate directly in the Heap allocator manually.
    43  //
    44  // Given the constraints of TinyGo, this is a conservative collector. However, GC
    45  // is tuned for more manual use of the underlying Heap memory allocator. Heap is an O(1)
    46  // time allocator and a great fit for real-time embedded systems. GC compliments
    47  // it with a simple design and extremely quick operation for small object graphs.
    48  //
    49  // Large object graphs should be manually allocated and use the various tools available
    50  // like AutoFree and Ref containers. GC supports a manual free as well as provided by
    51  // the TinyGo compiler. TinyGo LLVM coroutines utilize this feature for internal coroutine
    52  // lifecycle objects. It's quite simple to write Go programs with goroutines and channels
    53  // that never require a GC cycle / sweep.
    54  //
    55  // Goal pause times are less than 10 microseconds. GC aims to complete as quickly
    56  // as possible, but it is largely dependent on the application minimizing root scanning
    57  // by placing manually allocated globals where possible. This effectively removes that
    58  // graph from the marking phase.
    59  //
    60  // Relatively large TinyGo object graphs should still complete under 50 microseconds.
    61  type gc struct {
    62  	//allocs uintptr
    63  	allocs PointerSet
    64  	//set1   PointerSet
    65  	//set2   PointerSet
    66  
    67  	first, last                       uintptr
    68  	maxPointersToScan                 uintptr
    69  	sleepQueue, runqueue, currentTask uintptr
    70  	heapStart, heapEnd                uintptr
    71  	globalsStart, globalsEnd          uintptr
    72  	envArgs                           uintptr
    73  	envArgsPointer                    uintptr
    74  	initGlobals                       bool
    75  	//markGlobals              markFn
    76  	//markStack                markFn
    77  	GCStats
    78  }
    79  
    80  var collector *gc
    81  
    82  type markFn func()
    83  
    84  // GCStats provides all the monitoring metrics needed to see how the GC
    85  // is operating and performing.
    86  type GCStats struct {
    87  	Started           int64   // Epoch in nanos when GC was first started
    88  	Cycles            int64   // Number of times GC collect Has ran
    89  	Live              int64   // Number of live objects
    90  	TotalAllocs       int64   // Count of all allocations created
    91  	TotalBytes        int64   // Sum of all allocation's size in bytes
    92  	Frees             int64   // Count of times an allocation was freed instead of swept
    93  	FreedBytes        int64   // Sum of all freed allocation's size in bytes
    94  	Sweeps            int64   // Count of times an allocation was swept instead of freed
    95  	SweepBytes        int64   // Sum of all swept allocation's size in bytes
    96  	SweepTime         int64   // Sum of all time in nanos spent during the Sweep phase
    97  	SweepTimeMin      int64   // Minimum time in nanos spent during a single Sweep phase
    98  	SweepTimeMax      int64   // Maximum time in nanos spent during a single Sweep phase
    99  	SweepTimeAvg      int64   // Average time in nanos spent during a single Sweep phase
   100  	Roots             int64   //
   101  	RootsMin          int64   //
   102  	RootsMax          int64   //
   103  	RootsTimeMin      int64   //
   104  	RootsTimeMax      int64   //
   105  	RootsTimeAvg      int64   //
   106  	GraphDepth        int64   //
   107  	GraphMinDepth     int64   //
   108  	GraphMaxDepth     int64   //
   109  	GraphAvgDepth     int64   //
   110  	GraphTimeMin      int64   //
   111  	GraphTimeMax      int64   //
   112  	GraphTimeAvg      int64   //
   113  	TotalTime         int64   // Sum of all time in nanos spent doing GC collect
   114  	MinTime           int64   // Minimum time in nanos spent during a single GC collect
   115  	MaxTime           int64   // Maximum time in nanos spent during a single GC collect
   116  	AvgTime           int64   // Average time in nanos spent during a single GC collect
   117  	LastMarkRootsTime int64   // Time in nanos spent during the most recent GC collect "Mark Roots" phase
   118  	LastMarkGraphTime int64   // Time in nanos spent during the most recent GC collect "Mark Graph" phase
   119  	LastSweepTime     int64   // Time in nanos spent during the most recent GC collect "Sweep" phase
   120  	LastGCTime        int64   // Time in nanos spent during the most recent GC collect
   121  	LastSweep         int64   // Number of allocations that were swept during the most recent GC collect "Sweep" phase
   122  	LastSweepBytes    int64   // Number of bytes reclaimed during the most recent GC collect "Sweep" phase
   123  	LiveBytes         uintptr // Sum of all live allocation's size in bytes
   124  }
   125  
   126  func (s *GCStats) Print() {
   127  	println("GC cycle")
   128  	println("\tlive:				", uint(s.Live))
   129  	println("\tlive bytes:			", uint(s.LiveBytes))
   130  	println("\tfrees:				", uint(s.Frees))
   131  	println("\tallocs:				", uint(s.TotalAllocs))
   132  	println("\tfreed bytes:		", uint(s.FreedBytes))
   133  	println("\tsweep bytes:		", uint(s.SweepBytes))
   134  	println("\ttotal bytes:		", uint(s.TotalBytes))
   135  	println("\tlast sweep:			", uint(s.LastSweep))
   136  	println("\tlast sweep bytes:	", uint(s.LastSweepBytes))
   137  	println("\tlast mark time:		", toMicros(s.LastMarkRootsTime), microsSuffix)
   138  	println("\tlast graph time:	", toMicros(s.LastMarkGraphTime), microsSuffix)
   139  	println("\tlast sweep time:	", toMicros(s.LastSweepTime), microsSuffix)
   140  	println("\tlast GC time:		", toMicros(s.LastGCTime), microsSuffix)
   141  }
   142  
   143  func PrintGCStats() {
   144  	collector.GCStats.Print()
   145  }
   146  
   147  func PrintDebugInfo() {
   148  	println("gc_ObjectOverhead	", uint(gc_ObjectOverhead))
   149  	println("gc_ObjectMaxSize		", uint(gc_ObjectMaxSize))
   150  	println("gc_ObjectOverhead		", uint(gc_ObjectOverhead))
   151  }
   152  
   153  //goland:noinspection ALL
   154  func initGC(
   155  	g *gc,
   156  	initialCapacity uintptr,
   157  	//markGlobals, markStack markFn,
   158  ) {
   159  	//g := (*gc)(unsafe.Pointer(AllocZeroed(unsafe.Sizeof(gc{}))))
   160  	//g.set1 = NewPointerSet(initialCapacity)
   161  	//g.set2 = NewPointerSet(initialCapacity)
   162  	//g.allocs = uintptr(unsafe.Pointer(&g.set1))
   163  	g.allocs = NewPointerSet(initialCapacity)
   164  	g.first = ^uintptr(0)
   165  	g.last = 0
   166  	//g.markGlobals = markGlobals
   167  	//g.markStack = markStack
   168  	g.Started = time.Now().UnixNano()
   169  }
   170  
   171  ////goland:noinspection ALL
   172  //func newGC(
   173  //	initialCapacity uintptr,
   174  //	markGlobals, markStack markFn,
   175  //) gc {
   176  //	g := (*gc)(unsafe.Pointer(AllocZeroed(unsafe.Sizeof(gc{}))))
   177  //	g.allocs = NewPointerSet(initialCapacity)
   178  //	g.first = ^uintptr(0)
   179  //	g.last = 0
   180  //	g.markGlobals = markGlobals
   181  //	g.markStack = markStack
   182  //	g.Started = time.Now().UnixNano()
   183  //	return g
   184  //}
   185  
   186  // GCObject Represents a managed object in memory, consisting of a header followed by the object's data.
   187  type gcObject struct {
   188  	//cap    uint32
   189  	color  uint32 // alloc.Pointer to the next object with color flags stored in the alignment bits.
   190  	rtSize uint32 // Runtime size.
   191  	//tag    uint32
   192  	//_      uint32
   193  	//_      uint32
   194  }
   195  
   196  func (g *gc) isReserved(root uintptr) bool {
   197  	if root == 0 {
   198  		return true
   199  	}
   200  	switch root {
   201  	case g.runqueue:
   202  		println("runqueue")
   203  		return true
   204  	case g.sleepQueue:
   205  		println("sleepQueue")
   206  		return false
   207  	case g.currentTask:
   208  		println("currentTask")
   209  		return true
   210  	case g.heapStart:
   211  		println("heapStart")
   212  		return true
   213  	case g.heapEnd:
   214  		println("heapEnd")
   215  		return true
   216  	case g.globalsStart:
   217  		println("globalsStart")
   218  		return true
   219  	case g.globalsEnd:
   220  		println("globalsEnd")
   221  		return true
   222  	case g.envArgs:
   223  		println("envArgs")
   224  		return true
   225  	case g.envArgsPointer:
   226  		println("envArgsPointer")
   227  		return true
   228  	}
   229  	if root == uintptr(unsafe.Pointer(&collector)) {
   230  		println("isCollector")
   231  		return true
   232  	}
   233  	if root == uintptr(unsafe.Pointer(&g.allocs)) {
   234  		//println("isPointerSet")
   235  		return true
   236  	}
   237  	if root >= uintptr(unsafe.Pointer(&collector.allocs)) && root <= uintptr(unsafe.Pointer(&collector.GCStats.LiveBytes)) {
   238  		//println("is gc{}")
   239  		return true
   240  	}
   241  	if root == g.allocs.items {
   242  		//println("isPointerSet.items", uint(root), uint(g.allocs.items))
   243  		return true
   244  	}
   245  	//if root == g.sleepQueue {
   246  	//	//println("isSleepQueue")
   247  	//	return true
   248  	//}
   249  	if root == g.runqueue {
   250  		println("isRunqueue")
   251  		return true
   252  	}
   253  	return false
   254  }
   255  
   256  // MarkRoot marks a single pointer as a root
   257  //goland:noinspection ALL
   258  func (g *gc) markRoot(root uintptr) {
   259  	//if root == 0 {
   260  	//	return
   261  	//}
   262  	//if g.isReserved(root) {
   263  	//	//println("markRoot isReserved", uint(root))
   264  	//	//return
   265  	//}
   266  	//root = root.Add(-int(gc_ObjectOverhead))
   267  	//root -= gc_ObjectOverhead
   268  	//if root < g.first || root > g.last {
   269  	//	return
   270  	//}
   271  	if g.allocs.Has(root) {
   272  		obj := (*gcObject)(Pointer(root - gc_ObjectOverhead).Unsafe())
   273  		// Mark as gc_BLACK
   274  		//obj.color = gc_BLACK
   275  		println("markRoot", uint(root), "size", obj.rtSize)
   276  		//(*(*gcObject)(unsafe.Pointer(root - gc_ObjectOverhead))).color = gc_BLACK
   277  		//g.markGraph(root)
   278  		g.markRecursive(root, 0)
   279  	}
   280  	//else if g.allocs.Has(root + gc_ObjectOverhead) {
   281  	//	(*(*gcObject)(unsafe.Pointer(root))).color = gc_BLACK
   282  	//	println("!!!!!!!!!")
   283  	//}
   284  }
   285  
   286  // MarkRoots scans a block of contiguous memory for root pointers.
   287  //goland:noinspection ALL
   288  func (g *gc) markRoots(start, end uintptr) {
   289  	if gc_TRACE {
   290  		println("MarkRoots", uint(start), uint(end))
   291  	}
   292  
   293  	//if end <= start {
   294  	//	return
   295  	//}
   296  	//if start == 0 || end == 0 {
   297  	//	return
   298  	//}
   299  
   300  	//if g.isReserved(start) {
   301  	//	println("!!!!!!!!")
   302  	//	(*gcObject)(unsafe.Pointer(start)).rtSize = gc_BLACK
   303  	//	return
   304  	//}
   305  
   306  	// Adjust to keep within range GC range
   307  	//println("MarkRoots", uint(start), uint(end))
   308  
   309  	// Align start and end pointers.
   310  	//start = (uintptr(start) + unsafe.Alignof(unsafe.Pointer(nil)) - 1) &^ (unsafe.Alignof(unsafe.Pointer(nil)) - 1)
   311  	//end &^= unsafe.Alignof(unsafe.Pointer(nil)) - 1
   312  
   313  	// Reduce the end bound to avoid reading too far on platforms where pointer alignment is smaller than pointer size.
   314  	// If the size of the range is 0, then end will be slightly below start after this.
   315  	end -= unsafe.Sizeof(end) - unsafe.Alignof(end)
   316  
   317  	// Mark all pointers.
   318  	for ptr := start; ptr < end; ptr += unsafe.Alignof(ptr) {
   319  		p := *(*uintptr)(unsafe.Pointer(ptr))
   320  
   321  		//if g.allocs.Has(p) {
   322  		//	// Mark as gc_BLACK
   323  		//	(*(*gcObject)(unsafe.Pointer(p - gc_ObjectOverhead))).color = gc_BLACK
   324  		//}
   325  		g.markRoot(p)
   326  	}
   327  }
   328  
   329  //goland:noinspection ALL
   330  func (g *gc) markRecursive(root uintptr, depth int) {
   331  	// Are we too deep?
   332  	if depth > 256 {
   333  		return
   334  	}
   335  	if gc_TRACE {
   336  		println("markRecursive", uint(root), "depth", depth)
   337  	}
   338  	obj := (*gcObject)(unsafe.Pointer(root - gc_ObjectOverhead))
   339  	if obj.color == gc_WHITE {
   340  		println("markRecursive", uint(root), "size", uint(obj.rtSize), "depth", depth)
   341  		obj.color = gc_BLACK
   342  		//if g.isReserved(root) {
   343  		//	//return
   344  		//}
   345  
   346  		if gc_TRACE {
   347  			println(uint(root), "color", obj.color, "rtSize", obj.rtSize, "size", uint(obj.rtSize))
   348  		}
   349  		//if uintptr(obj.rtSize)%unsafe.Sizeof(uintptr(0)) != 0 {
   350  		//	return
   351  		//}
   352  		start := root // + gc_ObjectOverhead
   353  		end := start + uintptr(obj.rtSize)
   354  
   355  		//start = (start + unsafe.Alignof(unsafe.Pointer(nil)) - 1) &^ (unsafe.Alignof(unsafe.Pointer(nil)) - 1)
   356  		//end &^= unsafe.Alignof(unsafe.Pointer(nil)) - 1
   357  
   358  		// Reduce the end bound to avoid reading too far on platforms where pointer alignment is smaller than pointer size.
   359  		// If the size of the range is 0, then end will be slightly below start after this.
   360  		end -= unsafe.Sizeof(end) - unsafe.Alignof(end)
   361  
   362  		pointersToCount := (uint(end) - uint(start)) / uint(unsafe.Sizeof(unsafe.Pointer(nil)))
   363  		if pointersToCount > 256 {
   364  			//println("markRecursive -> huge object found", uint(pointersToCount), "pointers to scan")
   365  			//return
   366  		}
   367  
   368  		for ptr := start; ptr < end; ptr += unsafe.Alignof(ptr) {
   369  			p := *(*uintptr)(unsafe.Pointer(ptr)) // - gc_ObjectOverhead
   370  			//if p < g.first || p > g.last {
   371  			//	continue
   372  			//}
   373  			if !g.allocs.Has(p) {
   374  				continue
   375  			}
   376  			g.markRecursive(p, depth+1)
   377  		}
   378  	}
   379  }
   380  
   381  //goland:noinspection ALL
   382  func (g *gc) markGraph(root uintptr) {
   383  	if !g.allocs.Has(root) {
   384  		println("Not has markGraph", uint(root))
   385  		return
   386  	}
   387  	var (
   388  		obj   = (*gcObject)(unsafe.Pointer(root - gc_ObjectOverhead))
   389  		start = root // + gc_ObjectOverhead
   390  		end   = start + uintptr(obj.rtSize)
   391  	)
   392  
   393  	if obj.color == gc_WHITE {
   394  		println("\tWHITE", uint(obj.rtSize))
   395  		return
   396  	} else {
   397  		println("\tBLACK", uint(obj.rtSize))
   398  	}
   399  
   400  	//obj.color = gc_BLACK
   401  
   402  	// Reduce the end bound to avoid reading too far on platforms where pointer alignment is smaller than pointer size.
   403  	// If the size of the range is 0, then end will be slightly below start after this.
   404  	end -= unsafe.Sizeof(end) - unsafe.Alignof(end)
   405  
   406  	// unaligned allocation must be some sort of string or data buffer. skip it.
   407  	//if uintptr(obj.rtSize)%unsafe.Sizeof(uintptr(0)) != 0 {
   408  	//	return
   409  	//}
   410  
   411  	pointersToCount := (uint(end) - uint(start)) / uint(unsafe.Sizeof(unsafe.Pointer(nil)))
   412  	if pointersToCount > 256 {
   413  		//println("markGraph -> huge object found", uint(pointersToCount), "pointers to scan")
   414  		//return
   415  	}
   416  	//println("scanning", uint(pointersToCount), "pointers to scan")
   417  
   418  	// Mark all pointers.
   419  	for ptr := start; ptr < end; ptr += unsafe.Alignof(ptr) {
   420  		p := *(*uintptr)(unsafe.Pointer(ptr)) // - gc_ObjectOverhead
   421  		//if p < g.first || p > g.last {
   422  		//	continue
   423  		//}
   424  		if !g.allocs.Has(p) {
   425  			continue
   426  		}
   427  		g.markRecursive(p, 0)
   428  	}
   429  }
   430  
   431  // New allocates a new GC GCObject
   432  //goland:noinspection ALL
   433  func (g *gc) New(size uintptr) uintptr {
   434  	// Is the size too large?
   435  	if size > uintptr(gc_ObjectMaxSize) {
   436  		panic("allocation too large")
   437  	}
   438  	//println("gc.New", uint(size))
   439  
   440  	// Allocate memory
   441  	p := AllocZeroed(gc_ObjectOverhead + size)
   442  	if gc_TRACE {
   443  		println("gc.New AllocZeroed", uint(size), "cap", uint(Sizeof(p)), "ptr", uint(p))
   444  	}
   445  
   446  	obj := (*gcObject)(p.Unsafe())
   447  	if obj == nil {
   448  		return 0
   449  	}
   450  
   451  	// Add the runtime size and Add to gc_WHITE
   452  	//obj.cap = uint32(c)
   453  	obj.rtSize = uint32(size)
   454  	obj.color = gc_WHITE
   455  	g.LiveBytes += size
   456  	g.TotalBytes += int64(size)
   457  	g.Live++
   458  	g.TotalAllocs++
   459  
   460  	// Convert to uint pointer
   461  	ptr := uintptr(unsafe.Pointer(obj)) + gc_ObjectOverhead
   462  
   463  	// Add to allocations map
   464  	g.allocs.Add(ptr, 0)
   465  
   466  	// Update first pointer if necessary
   467  	if ptr < g.first {
   468  		g.first = ptr
   469  	}
   470  	// Update last pointer if necessary
   471  	if ptr > g.last {
   472  		g.last = ptr
   473  	}
   474  
   475  	//ptr += gc_ObjectOverhead
   476  	//println("New", uint(ptr))
   477  
   478  	// Return pointer to data
   479  	return ptr
   480  }
   481  
   482  // Free will immediately remove the GC GCObject and free up the memory in the allocator.
   483  //goland:noinspection ALL
   484  func (g *gc) Free(ptr uintptr) bool {
   485  	//p := ptr // - gc_ObjectOverhead
   486  	//if !gc.allocs.Has(p) {
   487  	if _, ok := g.allocs.Delete(ptr); !ok {
   488  		return false
   489  	}
   490  
   491  	if gc_TRACE {
   492  		println("GC free", uint(ptr))
   493  	}
   494  
   495  	obj := (*gcObject)(unsafe.Pointer(ptr - gc_ObjectOverhead))
   496  	size := obj.rtSize
   497  	g.LiveBytes -= uintptr(size)
   498  	g.FreedBytes += int64(size)
   499  	g.Live--
   500  	g.Frees++
   501  
   502  	if gc_TRACE {
   503  		println("GC free", uint(uintptr(ptr)), "size", uint(size), "rtSize", obj.rtSize)
   504  	}
   505  
   506  	Free(Pointer(unsafe.Pointer(obj)))
   507  
   508  	return true
   509  }
   510  
   511  func (g *gc) Realloc(ptr uintptr, size uintptr) uintptr {
   512  	if gc_TRACE {
   513  		println("tinygc.Realloc", uint(ptr), "size", uint(size))
   514  	}
   515  	println("tinygc.Realloc", uint(ptr), "size", uint(size))
   516  	if !g.allocs.Has(ptr) {
   517  		return g.New(size)
   518  	}
   519  	//ptr -= gc_ObjectOverhead
   520  	obj := (*gcObject)(Pointer(ptr - gc_ObjectOverhead).Unsafe())
   521  	if obj.rtSize >= uint32(size) {
   522  		if gc_TRACE {
   523  			println("tinygc.Realloc size fits existing", uint(ptr), "existingSize", obj.rtSize, "size", uint(size))
   524  		}
   525  		obj.rtSize = uint32(size)
   526  		return ptr
   527  	}
   528  	newPtr := Realloc(Pointer(ptr-gc_ObjectOverhead), size)
   529  	if uintptr(newPtr) == ptr {
   530  		if gc_TRACE {
   531  			println("tinygc.Realloc nogc.Realloc returned same pointer", uint(ptr), "existingSize", obj.rtSize, "size", uint(size))
   532  		}
   533  		return ptr
   534  	}
   535  
   536  	if _, ok := g.allocs.Delete(ptr); ok {
   537  		if gc_TRACE {
   538  			println("tinygc.Realloc freed previous pointer", uint(ptr), "size", uint(size))
   539  		}
   540  		g.LiveBytes -= uintptr(obj.rtSize)
   541  		g.FreedBytes += int64(obj.rtSize)
   542  		g.Live--
   543  		g.Frees++
   544  		Free(Pointer(ptr))
   545  	}
   546  
   547  	obj = (*gcObject)(newPtr.Unsafe())
   548  	if obj == nil {
   549  		return 0
   550  	}
   551  
   552  	// Add the runtime size and Add to gc_WHITE
   553  	//obj.cap = uint32(c)
   554  	obj.rtSize = uint32(size)
   555  	obj.color = gc_WHITE
   556  	g.LiveBytes += size
   557  	g.TotalBytes += int64(size)
   558  	g.Live++
   559  	g.TotalAllocs++
   560  
   561  	// Convert to uint pointer
   562  	ptr = uintptr(newPtr) + gc_ObjectOverhead
   563  
   564  	// Add to allocations map
   565  	g.allocs.Add(ptr, 0)
   566  
   567  	// Update first pointer if necessary
   568  	if ptr < g.first {
   569  		g.first = ptr
   570  	}
   571  	// Update last pointer if necessary
   572  	if ptr > g.last {
   573  		g.last = ptr
   574  	}
   575  
   576  	return ptr
   577  }
   578  
   579  //goland:noinspection ALL
   580  func (g *gc) Collect() {
   581  	if gc_TRACE {
   582  		println("GC collect started...")
   583  	}
   584  	var (
   585  		start = time.Now().UnixNano()
   586  		k     uintptr
   587  		obj   *gcObject
   588  		first = ^uintptr(0)
   589  		last  = uintptr(0)
   590  	)
   591  	g.Cycles++
   592  
   593  	////////////////////////////////////////////////////////////////////////
   594  	// Mark Roots Phase
   595  	////////////////////////////////////////////////////////////////////////
   596  	markStack()
   597  	//doMarkStack()
   598  	doMarkGlobals()
   599  	//markScheduler()
   600  	// End of mark roots
   601  	end := time.Now().UnixNano()
   602  	markTime := end - start
   603  
   604  	////////////////////////////////////////////////////////////////////////
   605  	// Mark Graph Phase
   606  	////////////////////////////////////////////////////////////////////////
   607  	start = end
   608  	g.LastSweep = 0
   609  	g.LastSweepBytes = 0
   610  	var (
   611  		items     = g.allocs.items
   612  		itemsSize = g.allocs.size
   613  		itemsEnd  = items + (itemsSize * unsafe.Sizeof(pointerSetItem{}))
   614  	)
   615  	for ; items < itemsEnd; items += unsafe.Sizeof(pointerSetItem{}) {
   616  		k = *(*uintptr)(unsafe.Pointer(items))
   617  		if k == 0 {
   618  			continue
   619  		}
   620  		obj := (*gcObject)(Pointer(k - gc_ObjectOverhead).Unsafe())
   621  		println("item", uint(k), "size", obj.rtSize, "color", obj.color)
   622  		//g.markGraph(k)
   623  	}
   624  
   625  	// End of mark graph
   626  	end = time.Now().UnixNano()
   627  	markGraphTime := end - start
   628  
   629  	////////////////////////////////////////////////////////////////////////
   630  	// Sweep Phase
   631  	////////////////////////////////////////////////////////////////////////
   632  	start = markGraphTime + start
   633  
   634  	// Reset items iterator
   635  	items = g.allocs.items
   636  	itemsSize = g.allocs.size
   637  	itemsEnd = items + (itemsSize * unsafe.Sizeof(pointerSetItem{}))
   638  	for ; items < itemsEnd; items += unsafe.Sizeof(pointerSetItem{}) {
   639  		// dereference pointer
   640  		k = *(*uintptr)(unsafe.Pointer(items))
   641  		// Empty item?
   642  		if k == 0 {
   643  			continue
   644  		}
   645  		// cast to object
   646  		obj = (*gcObject)(unsafe.Pointer(k - gc_ObjectOverhead))
   647  		// free all gc_WHITE objects
   648  		if obj.color == gc_WHITE {
   649  			g.LiveBytes -= uintptr(obj.rtSize)
   650  			g.LastSweepBytes += int64(obj.rtSize)
   651  			g.Live--
   652  			g.LastSweep++
   653  
   654  			if gc_TRACE {
   655  				println("GC sweep", uint(k), "size", uint(obj.rtSize))
   656  			}
   657  			println("GC sweep", uint(k), "size", uint(obj.rtSize))
   658  
   659  			//println("GC sweep", uint(uintptr(k)+gc_ObjectOverhead), "size", uint(obj.size()), "rtSize", obj.rtSize)
   660  
   661  			// Free memory
   662  			//Free(Pointer(k))
   663  
   664  			// Remove from alloc map
   665  			g.allocs.Delete(k)
   666  			//items -= unsafe.Sizeof(pointerSetItem{})
   667  		} else { // turn all gc_BLACK objects into gc_WHITE objects
   668  			//k += gc_ObjectOverhead
   669  			if k < first {
   670  				first = k
   671  			}
   672  			if k > last {
   673  				last = k
   674  			}
   675  			if gc_TRACE {
   676  				//println("GC retained", uint(k), "size", uint(obj.size()))
   677  			}
   678  			obj.color = gc_WHITE
   679  		}
   680  	}
   681  
   682  	g.first = first
   683  	g.last = last
   684  	end = time.Now().UnixNano()
   685  	sweepTime := end - start
   686  	g.LastMarkRootsTime = markTime
   687  	g.LastMarkGraphTime = markGraphTime
   688  	g.LastSweepTime = sweepTime
   689  	g.SweepTime += sweepTime
   690  	g.SweepBytes += g.LastSweepBytes
   691  	g.Sweeps += g.LastSweep
   692  	g.LastGCTime = markTime + markGraphTime + sweepTime
   693  	g.TotalTime += g.LastGCTime
   694  
   695  	if gc_TRACE {
   696  		println("GC collect finished")
   697  	}
   698  	//stats.Print()
   699  }