github.com/mattn/go@v0.0.0-20171011075504-07f7db3ea99f/test/fixedbugs/issue18902b.go (about)

     1  // skip
     2  
     3  // Copyright 2016 The Go Authors. All rights reserved.
     4  // Use of this source code is governed by a BSD-style
     5  // license that can be found in the LICENSE file.
     6  
     7  package foo
     8  
     9  import (
    10  	"unsafe"
    11  )
    12  
    13  type gcMaxTreeNodeVal uint64
    14  
    15  var work struct {
    16  	full         uint64    // lock-free list of full blocks workbuf
    17  	empty        uint64    // lock-free list of empty blocks workbuf
    18  	pad0         [64]uint8 // prevents false-sharing between full/empty and nproc/nwait
    19  	bytesMarked  uint64
    20  	markrootNext uint32 // next markroot job
    21  	markrootJobs uint32 // number of markroot jobs
    22  	nproc        uint32
    23  	tstart       int64
    24  	nwait        uint32
    25  	ndone        uint32
    26  }
    27  
    28  type gcShardQueue1 struct {
    29  	partial *workbuf
    30  	full    *workbuf
    31  	n       uintptr
    32  	maxTree gcMaxTreeNodeVal
    33  }
    34  type gcShardQueue struct {
    35  	gcShardQueue1
    36  	pad [64 - unsafe.Sizeof(gcShardQueue1{})]byte
    37  }
    38  
    39  const gcSortBufPointers = (64 << 10) / 8
    40  
    41  type gcSortBuf struct {
    42  	buf *gcSortArray
    43  	tmp *gcSortArray
    44  	n   uintptr
    45  }
    46  
    47  //go:notinheap
    48  type gcSortArray [gcSortBufPointers]uintptr
    49  
    50  const (
    51  	_DebugGC             = 0
    52  	_ConcurrentSweep     = true
    53  	_FinBlockSize        = 4 * 1024
    54  	sweepMinHeapDistance = 1024 * 1024
    55  	gcShardShift         = 2 + 20
    56  	gcShardBytes         = 1 << gcShardShift
    57  )
    58  
    59  //go:notinheap
    60  type mheap struct {
    61  	shardQueues       []gcShardQueue
    62  	_                 uint32     // align uint64 fields on 32-bit for atomics
    63  	pagesInUse        uint64     // pages of spans in stats _MSpanInUse; R/W with mheap.lock
    64  	spanBytesAlloc    uint64     // bytes of spans allocated this cycle; updated atomically
    65  	pagesSwept        uint64     // pages swept this cycle; updated atomically
    66  	sweepPagesPerByte float64    // proportional sweep ratio; written with lock, read without
    67  	largefree         uint64     // bytes freed for large objects (>maxsmallsize)
    68  	nlargefree        uint64     // number of frees for large objects (>maxsmallsize)
    69  	nsmallfree        [67]uint64 // number of frees for small objects (<=maxsmallsize)
    70  	bitmap            uintptr    // Points to one byte past the end of the bitmap
    71  	bitmap_mapped     uintptr
    72  	arena_start       uintptr
    73  	arena_used        uintptr // always mHeap_Map{Bits,Spans} before updating
    74  	arena_end         uintptr
    75  	arena_reserved    bool
    76  }
    77  
    78  var mheap_ mheap
    79  
    80  type lfnode struct {
    81  	next    uint64
    82  	pushcnt uintptr
    83  }
    84  type workbufhdr struct {
    85  	node lfnode // must be first
    86  	next *workbuf
    87  	nobj int
    88  }
    89  
    90  //go:notinheap
    91  type workbuf struct {
    92  	workbufhdr
    93  	obj [(2048 - unsafe.Sizeof(workbufhdr{})) / 8]uintptr
    94  }
    95  
    96  //go:noinline
    97  func (b *workbuf) checkempty() {
    98  	if b.nobj != 0 {
    99  		b.nobj = 0
   100  	}
   101  }
   102  func putempty(b *workbuf) {
   103  	b.checkempty()
   104  	lfstackpush(&work.empty, &b.node)
   105  }
   106  
   107  //go:noinline
   108  func lfstackpush(head *uint64, node *lfnode) {
   109  }
   110  
   111  //go:noinline
   112  func (q *gcShardQueue) add(qidx uintptr, ptrs []uintptr, spare *workbuf) *workbuf {
   113  	return spare
   114  }
   115  
   116  func (b *gcSortBuf) flush() {
   117  	if b.n == 0 {
   118  		return
   119  	}
   120  	const sortDigitBits = 11
   121  	buf, tmp := b.buf[:b.n], b.tmp[:b.n]
   122  	moreBits := true
   123  	for shift := uint(gcShardShift); moreBits; shift += sortDigitBits {
   124  		const k = 1 << sortDigitBits
   125  		var pos [k]uint16
   126  		nshift := shift + sortDigitBits
   127  		nbits := buf[0] >> nshift
   128  		moreBits = false
   129  		for _, v := range buf {
   130  			pos[(v>>shift)%k]++
   131  			moreBits = moreBits || v>>nshift != nbits
   132  		}
   133  		var sum uint16
   134  		for i, count := range &pos {
   135  			pos[i] = sum
   136  			sum += count
   137  		}
   138  		for _, v := range buf {
   139  			digit := (v >> shift) % k
   140  			tmp[pos[digit]] = v
   141  			pos[digit]++
   142  		}
   143  		buf, tmp = tmp, buf
   144  	}
   145  	start := mheap_.arena_start
   146  	i0 := 0
   147  	shard0 := (buf[0] - start) / gcShardBytes
   148  	var spare *workbuf
   149  	for i, p := range buf {
   150  		shard := (p - start) / gcShardBytes
   151  		if shard != shard0 {
   152  			spare = mheap_.shardQueues[shard0].add(shard0, buf[i0:i], spare)
   153  			i0, shard0 = i, shard
   154  		}
   155  	}
   156  	spare = mheap_.shardQueues[shard0].add(shard0, buf[i0:], spare)
   157  	b.n = 0
   158  	if spare != nil {
   159  		putempty(spare)
   160  	}
   161  }