github.com/lxt1045/json@v0.0.0-20231013032136-54d6b1d6e525/pool.go (about)

     1  // MIT License
     2  //
     3  // Copyright (c) 2021 Xiantu Li
     4  //
     5  // Permission is hereby granted, free of charge, to any person obtaining a copy
     6  // of this software and associated documentation files (the "Software"), to deal
     7  // in the Software without restriction, including without limitation the rights
     8  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
     9  // copies of the Software, and to permit persons to whom the Software is
    10  // furnished to do so, subject to the following conditions:
    11  //
    12  // The above copyright notice and this permission notice shall be included in all
    13  // copies or substantial portions of the Software.
    14  //
    15  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    16  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    17  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    18  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    19  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    20  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    21  // SOFTWARE.
    22  
    23  package json
    24  
    25  import (
    26  	"reflect"
    27  	"sync"
    28  	"sync/atomic"
    29  	"unsafe"
    30  )
    31  
    32  var (
    33  	strEface    = UnpackEface("")
    34  	isliceEface = UnpackEface([]interface{}{})
    35  	hmapimp     = func() *hmap {
    36  		m := make(map[string]interface{})
    37  		return *(**hmap)(unsafe.Pointer(&m))
    38  	}()
    39  	mapGoType = func() *maptype {
    40  		m := make(map[string]interface{})
    41  		typ := reflect.TypeOf(m)
    42  		return (*maptype)(unsafe.Pointer(UnpackType(typ)))
    43  	}()
    44  
    45  	chbsPool = func() (ch chan *[]byte) {
    46  		ch = make(chan *[]byte, 4)
    47  		go newBytes(ch)
    48  		return
    49  	}()
    50  )
    51  
    52  func newBytes(ch chan *[]byte) {
    53  	for {
    54  		s := make([]byte, 0, bsPoolN)
    55  		ch <- &s
    56  	}
    57  }
    58  func newMapArray(ch chan *[]byte) {
    59  	N := 1 << 20
    60  	size := int(mapGoType.bucket.Size)
    61  	N = N / size
    62  	cap := N * size
    63  	for {
    64  		p := unsafe_NewArray(mapGoType.bucket, N)
    65  		s := &SliceHeader{
    66  			Data: p,
    67  			Len:  cap,
    68  			Cap:  cap,
    69  		}
    70  		ch <- (*[]byte)(unsafe.Pointer(s))
    71  	}
    72  }
    73  
    74  var (
    75  	poolSliceInterface = sync.Pool{New: func() any {
    76  		return make([]interface{}, 1024)
    77  	}}
    78  
    79  	pairPool = sync.Pool{
    80  		New: func() any {
    81  			s := make([]pair, 0, 128)
    82  			return &s
    83  		},
    84  	}
    85  	bsPool = sync.Pool{New: func() any {
    86  		return <-chbsPool
    87  		// s := make([]byte, 0, bsPoolN)
    88  		// return &s
    89  	}}
    90  	poolMapArrayInterface = func() sync.Pool {
    91  		ch := make(chan *[]byte, 4) // runtime.GOMAXPROCS(0))
    92  		// go func() {
    93  		// 	for {
    94  		// 		N := 1 << 20
    95  		// 		p := unsafe_NewArray(mapGoType.bucket, N)
    96  		// 		s := &SliceHeader{
    97  		// 			Data: p,
    98  		// 			Len:  N * int(mapGoType.bucket.Size),
    99  		// 			Cap:  N * int(mapGoType.bucket.Size),
   100  		// 		}
   101  		// 		ch <- (*[]byte)(unsafe.Pointer(s))
   102  		// 	}
   103  		// }()
   104  		go newMapArray(ch)
   105  		return sync.Pool{New: func() any {
   106  			return <-ch
   107  		}}
   108  	}()
   109  
   110  	cacheStructTagInfo = NewRCU[uint32, *TagInfo]()
   111  	strPool            = NewBatch[string]()
   112  	islicePool         = NewBatch[[]interface{}]()
   113  	imapPool           = NewBatch[hmap]()
   114  )
   115  
   116  const (
   117  	bsPoolN = 1 << 20
   118  	batchN  = 1 << 12
   119  )
   120  
   121  type pair struct {
   122  	k string
   123  	v interface{}
   124  }
   125  
   126  // 获取 string 的起始地址
   127  func strToUintptr(p string) uintptr {
   128  	return *(*uintptr)(unsafe.Pointer(&p))
   129  }
   130  
   131  func LoadTagNode(v reflect.Value, hash uint32) (*TagInfo, error) {
   132  	tag, ok := cacheStructTagInfo.Get(hash)
   133  	if ok {
   134  		return tag, nil
   135  	}
   136  	return LoadTagNodeSlow(v.Type(), hash)
   137  }
   138  
   139  func LoadTagNodeByType(typ reflect.Type, hash uint32) (*TagInfo, error) {
   140  	tag, ok := cacheStructTagInfo.Get(hash)
   141  	if ok {
   142  		return tag, nil
   143  	}
   144  	return LoadTagNodeSlow(typ, hash)
   145  }
   146  
   147  func LoadTagNodeSlow(typ reflect.Type, hash uint32) (*TagInfo, error) {
   148  	ti, err := NewStructTagInfo(typ, nil)
   149  	if err != nil {
   150  		return nil, err
   151  	}
   152  
   153  	cacheStructTagInfo.Set(hash, ti)
   154  	return ti, nil
   155  }
   156  
   157  //RCU 依据 Read Copy Update 原理实现
   158  type RCU[T uintptr | uint32 | string | int, V any] struct {
   159  	m unsafe.Pointer
   160  }
   161  
   162  func NewRCU[T uintptr | uint32 | string | int, V any]() (c RCU[T, V]) {
   163  	m := make(map[T]V, 1)
   164  	c.m = unsafe.Pointer(&m)
   165  	return
   166  }
   167  
   168  func (c *RCU[T, V]) Get(key T) (v V, ok bool) {
   169  	m := *(*map[T]V)(atomic.LoadPointer(&c.m))
   170  	v, ok = m[key]
   171  	return
   172  }
   173  
   174  func (c *RCU[T, V]) Set(key T, v V) {
   175  	m := *(*map[T]V)(atomic.LoadPointer(&c.m))
   176  	if _, ok := m[key]; ok {
   177  		return
   178  	}
   179  	m2 := make(map[T]V, len(m)+10)
   180  	m2[key] = v
   181  	for {
   182  		p := atomic.LoadPointer(&c.m)
   183  		m = *(*map[T]V)(p)
   184  		if _, ok := m[key]; ok {
   185  			return
   186  		}
   187  		for k, v := range m {
   188  			m2[k] = v
   189  		}
   190  		swapped := atomic.CompareAndSwapPointer(&c.m, p, unsafe.Pointer(&m2))
   191  		if swapped {
   192  			break
   193  		}
   194  	}
   195  }
   196  
   197  func (c *RCU[T, V]) GetOrSet(key T, load func() (v V)) (v V) {
   198  	m := *(*map[T]V)(atomic.LoadPointer(&c.m))
   199  	v, ok := m[key]
   200  	if !ok {
   201  		v = load()
   202  		m2 := make(map[T]V, len(m)+10)
   203  		m2[key] = v
   204  		for {
   205  			p := atomic.LoadPointer(&c.m)
   206  			m = *(*map[T]V)(p)
   207  			for k, v := range m {
   208  				m2[k] = v
   209  			}
   210  			swapped := atomic.CompareAndSwapPointer(&c.m, p, unsafe.Pointer(&m2))
   211  			if swapped {
   212  				break
   213  			}
   214  		}
   215  	}
   216  	return
   217  }
   218  
   219  /*
   220    Pool 和 store.Pool 一起,一次 Unmashal 调用,补充一次 pool 填满,此次执行期间,不会其他进程争抢;
   221    结束后再归还,剩下的下次还可以继续使用
   222  */
   223  type sliceNode[T any] struct {
   224  	s   []T
   225  	idx uint32 // atomic
   226  }
   227  type Batch[T any] struct {
   228  	pool   unsafe.Pointer // *sliceNode[T]
   229  	backup *sliceNode[T]
   230  	sync.Mutex
   231  }
   232  
   233  func NewBatch[T any]() *Batch[T] {
   234  	ret := &Batch[T]{
   235  		pool: unsafe.Pointer(&sliceNode[T]{
   236  			s:   make([]T, batchN),
   237  			idx: 0,
   238  		}),
   239  		backup: &sliceNode[T]{
   240  			s:   make([]T, batchN),
   241  			idx: 0,
   242  		},
   243  	}
   244  	return ret
   245  }
   246  
   247  func BatchGet[T any](b *Batch[T]) *T {
   248  	sn := (*sliceNode[T])(atomic.LoadPointer(&b.pool))
   249  	idx := atomic.AddUint32(&sn.idx, 1)
   250  	if int(idx) <= len(sn.s) {
   251  		return &sn.s[idx-1]
   252  	}
   253  	return b.Make()
   254  }
   255  
   256  func (b *Batch[T]) Get() *T {
   257  	sn := (*sliceNode[T])(atomic.LoadPointer(&b.pool))
   258  	idx := atomic.AddUint32(&sn.idx, 1)
   259  	if int(idx) <= len(sn.s) {
   260  		return &sn.s[idx-1]
   261  	}
   262  	return b.Make()
   263  }
   264  
   265  func (b *Batch[T]) GetN(n int) *T {
   266  	sn := (*sliceNode[T])(atomic.LoadPointer(&b.pool))
   267  	idx := atomic.AddUint32(&sn.idx, uint32(n))
   268  	var p *T
   269  	if int(idx) > len(sn.s) {
   270  		p = b.MakeN(n)
   271  	} else {
   272  		p = &sn.s[int(idx)-n]
   273  	}
   274  	return p
   275  }
   276  
   277  func (b *Batch[T]) GetN2(n int) (s []T) {
   278  	sn := (*sliceNode[T])(atomic.LoadPointer(&b.pool))
   279  	idx := atomic.AddUint32(&sn.idx, uint32(n))
   280  	var p *T
   281  	if int(idx) > len(sn.s) {
   282  		p = b.MakeN(n)
   283  	} else {
   284  		p = &sn.s[int(idx)-n]
   285  	}
   286  
   287  	sh := (*SliceHeader)(unsafe.Pointer(&s))
   288  	sh.Data = unsafe.Pointer(p)
   289  	sh.Cap = n
   290  	return
   291  
   292  }
   293  
   294  func (b *Batch[T]) Make() (p *T) {
   295  	b.Lock()
   296  	defer b.Unlock()
   297  
   298  	sn := (*sliceNode[T])(atomic.LoadPointer(&b.pool))
   299  	idx := atomic.AddUint32(&sn.idx, 1)
   300  	if int(idx) <= len(sn.s) {
   301  		p = &sn.s[idx-1]
   302  		return
   303  	}
   304  
   305  	if b.backup != nil {
   306  		sn = b.backup
   307  		sn.idx = 1
   308  		b.backup = nil
   309  	} else {
   310  		sn = &sliceNode[T]{
   311  			s:   make([]T, batchN),
   312  			idx: 1,
   313  		}
   314  	}
   315  	atomic.StorePointer(&b.pool, unsafe.Pointer(sn))
   316  	b.backup = &sliceNode[T]{
   317  		s:   make([]T, batchN),
   318  		idx: 1,
   319  	}
   320  	p = &sn.s[0]
   321  	return
   322  }
   323  
   324  func (b *Batch[T]) MakeN(n int) (p *T) {
   325  	if n > batchN {
   326  		strs := make([]T, n)
   327  		return &strs[0]
   328  	}
   329  	b.Lock()
   330  	defer b.Unlock()
   331  	sn := (*sliceNode[T])(atomic.LoadPointer(&b.pool))
   332  	idx := atomic.AddUint32(&sn.idx, 1)
   333  	if int(idx) <= len(sn.s) {
   334  		p = &sn.s[idx-1]
   335  		return
   336  	}
   337  	if b.backup != nil {
   338  		sn = b.backup
   339  		sn.idx = uint32(n)
   340  		b.backup = nil
   341  	} else {
   342  		sn = &sliceNode[T]{
   343  			s:   make([]T, batchN),
   344  			idx: uint32(n),
   345  		}
   346  	}
   347  	atomic.StorePointer(&b.pool, unsafe.Pointer(sn))
   348  	b.backup = &sliceNode[T]{
   349  		s:   make([]T, batchN),
   350  		idx: 0,
   351  	}
   352  	p = &sn.s[0]
   353  	return
   354  }
   355  
   356  type sliceObj struct {
   357  	p   unsafe.Pointer
   358  	idx uint32 // atomic
   359  	end uint32 // atomic
   360  }
   361  
   362  //BatchObj 通过批量创建的方式减少单个创建平均延时
   363  type BatchObj struct {
   364  	pool   unsafe.Pointer // *sliceObj[T]
   365  	backup *sliceObj
   366  	goType *GoType
   367  	size   uint32
   368  	sync.Mutex
   369  }
   370  
   371  func NewBatchObj(typ reflect.Type) *BatchObj {
   372  	if typ == nil {
   373  		return &BatchObj{
   374  			pool: unsafe.Pointer(&sliceObj{}),
   375  		}
   376  	}
   377  	goType := UnpackType(typ)
   378  	ret := &BatchObj{
   379  		pool:   unsafe.Pointer(&sliceObj{}),
   380  		goType: goType,
   381  		size:   uint32(goType.Size),
   382  	}
   383  	return ret
   384  }
   385  
   386  func (b *BatchObj) Get() unsafe.Pointer {
   387  	sn := (*sliceObj)(atomic.LoadPointer(&b.pool))
   388  	idx := atomic.AddUint32(&sn.idx, b.size)
   389  	if idx <= sn.end {
   390  		return pointerOffset(sn.p, uintptr(idx-b.size))
   391  	}
   392  	return b.MakeN(1)
   393  }
   394  
   395  func (b *BatchObj) Make() unsafe.Pointer {
   396  	b.Lock()
   397  	defer b.Unlock()
   398  
   399  	sn := (*sliceObj)(atomic.LoadPointer(&b.pool))
   400  	idx := atomic.AddUint32(&sn.idx, b.size)
   401  	if idx <= sn.end {
   402  		return pointerOffset(sn.p, uintptr(idx-b.size))
   403  	}
   404  
   405  	const N = 1 << 10
   406  	sn = &sliceObj{
   407  		p:   unsafe_NewArray(b.goType, N),
   408  		idx: uint32(b.goType.Size),
   409  		end: uint32(N) * uint32(b.goType.Size),
   410  	}
   411  	atomic.StorePointer(&b.pool, unsafe.Pointer(sn))
   412  	return sn.p
   413  }
   414  
   415  func (b *BatchObj) GetN(n int) unsafe.Pointer {
   416  	sn := (*sliceObj)(atomic.LoadPointer(&b.pool))
   417  	offset := uint32(n) * b.size
   418  	idx := atomic.AddUint32(&sn.idx, offset)
   419  	if idx <= sn.end {
   420  		return pointerOffset(sn.p, uintptr(idx-offset))
   421  	}
   422  	return b.MakeN(n)
   423  
   424  }
   425  
   426  func (b *BatchObj) MakeN(n int) (p unsafe.Pointer) {
   427  	if n > batchN {
   428  		return unsafe_NewArray(b.goType, n)
   429  	}
   430  	b.Lock()
   431  	defer b.Unlock()
   432  	sn := (*sliceObj)(atomic.LoadPointer(&b.pool))
   433  	offset := uint32(n) * b.size
   434  	idx := atomic.AddUint32(&sn.idx, offset)
   435  	if idx <= sn.end {
   436  		return pointerOffset(sn.p, uintptr(idx-offset))
   437  	}
   438  	const N = 1 << 12
   439  	sn = &sliceObj{
   440  		p:   unsafe_NewArray(b.goType, N),
   441  		idx: offset,
   442  		end: uint32(N) * uint32(b.goType.Size),
   443  	}
   444  	atomic.StorePointer(&b.pool, unsafe.Pointer(sn))
   445  	return sn.p
   446  }
   447  
   448  func (b *BatchObj) MakeN2(n int) (p unsafe.Pointer) {
   449  	if n > batchN {
   450  		return unsafe_NewArray(b.goType, n)
   451  	}
   452  	b.Lock()
   453  	defer b.Unlock()
   454  	sn := (*sliceObj)(atomic.LoadPointer(&b.pool))
   455  	offset := uint32(n) * b.size
   456  	idx := atomic.AddUint32(&sn.idx, offset)
   457  	if idx <= sn.end {
   458  		return pointerOffset(sn.p, uintptr(idx-offset))
   459  	}
   460  	const N = 1 << 12
   461  
   462  	if b.backup != nil {
   463  		sn = b.backup
   464  		sn.idx = offset
   465  		b.backup = nil
   466  	} else {
   467  		sn = &sliceObj{
   468  			p:   unsafe_NewArray(b.goType, N),
   469  			idx: offset,
   470  			end: uint32(N) * uint32(b.goType.Size),
   471  		}
   472  	}
   473  	atomic.StorePointer(&b.pool, unsafe.Pointer(sn))
   474  
   475  	b.backup = &sliceObj{
   476  		p:   unsafe_NewArray(b.goType, N),
   477  		idx: 0,
   478  		end: uint32(N) * uint32(b.goType.Size),
   479  	}
   480  
   481  	return sn.p
   482  }
   483  
   484  type Store struct {
   485  	obj unsafe.Pointer
   486  	tag *TagInfo
   487  }
   488  type PoolStore struct {
   489  	obj         unsafe.Pointer
   490  	tag         *TagInfo
   491  	pointerPool unsafe.Pointer // 也放 tag?放这里能省多少性能?
   492  	slicePool   unsafe.Pointer // 这 slicePool 放 tag,dynPool 放全局,是不是可以减少 copy ? 顺便较少函数调用时栈的开销?
   493  }
   494  
   495  var (
   496  	gStringPool    = NewBatch[string]()      // string 类型太常见了
   497  	gNoscanPool    = NewBatch[byte]()        // 不含指针的类型都可以用这个分配
   498  	gIntPool       = NewBatch[int]()         //
   499  	gInterfacePool = NewBatch[interface{}]() //
   500  )
   501  
   502  func (ps PoolStore) Idx(idx uintptr) (p unsafe.Pointer) {
   503  	p = pointerOffset(ps.pointerPool, idx)
   504  	*(*unsafe.Pointer)(ps.obj) = p
   505  	return
   506  }
   507  
   508  func GrowBytes(in []byte, need int) []byte {
   509  	l := need + len(in)
   510  	if l <= cap(in) {
   511  		return in[:l]
   512  	}
   513  	if l < 8*1024 {
   514  		l = 8 * 1024
   515  	} else {
   516  		l *= 16
   517  	}
   518  	out := make([]byte, 0, l)
   519  	out = append(out, in...)
   520  	return out[:l]
   521  }
   522  func GrowStrings(in []string, need int) []string {
   523  	l := need + len(in)
   524  	if l <= cap(in) {
   525  		return in[:l]
   526  	}
   527  	if l < 1024 {
   528  		l = 1024
   529  	} else {
   530  		l *= 16
   531  	}
   532  	out := make([]string, 0, l)
   533  	out = append(out, in...)
   534  	return out[:need+len(in)]
   535  }
   536  
   537  func GrowInts(in []int) []int {
   538  	l := 1 + len(in)
   539  	if l <= cap(in) {
   540  		return in[:l]
   541  	}
   542  	if l < 1024 {
   543  		l = 1024
   544  	} else {
   545  		l *= 16
   546  	}
   547  	out := make([]int, 0, l)
   548  	out = append(out, in...)
   549  	return out[:1+len(in)]
   550  }
   551  
   552  func (ps PoolStore) GetObjs(goType *GoType) []uint8 {
   553  	pObj := ps.slicePool
   554  	p := (*[]uint8)(pObj)
   555  	pool := *p
   556  	*p = nil
   557  	if cap(pool)-len(pool) > 0 {
   558  		return pool
   559  	}
   560  	l := 1024
   561  	parr := unsafe_NewArray(goType, l)
   562  	l = l * int(goType.Size)
   563  	// s := SliceHeader{
   564  	// 	Data: parr,
   565  	// 	Len:  0,
   566  	// 	Cap:  l,
   567  	// }
   568  	// return *(*[]uint8)(unsafe.Pointer(&s))
   569  	out := (*(*[1 << 30]uint8)(parr))[:0:l]
   570  	return out
   571  }
   572  
   573  func (ps PoolStore) SetObjs(in []uint8) PoolStore {
   574  	p := (*[]uint8)(ps.slicePool)
   575  	*p = in
   576  	return ps
   577  }
   578  
   579  func GrowObjs(in []uint8, need int, goType *GoType) []uint8 {
   580  	l := need + len(in)
   581  	if l <= cap(in) {
   582  		return in[:l]
   583  	}
   584  	if l < 1024 {
   585  		l = 1024
   586  	} else {
   587  		l *= 16
   588  	}
   589  	parr := unsafe_NewArray(goType, l)
   590  	l = l * int(goType.Size)
   591  	out := (*(*[1 << 30]uint8)(parr))[:0:l]
   592  
   593  	// s := SliceHeader{
   594  	// 	Data: parr,
   595  	// 	Len:  0,
   596  	// 	Cap:  l,
   597  	// }
   598  	// out := *(*[]uint8)(unsafe.Pointer(&s))
   599  
   600  	out = append(out, in...)
   601  	return out[:need+len(in)]
   602  }