github.com/matrixorigin/matrixone@v0.7.0/pkg/lockservice/slice.go (about) 1 // Copyright 2023 Matrix Origin 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package lockservice 16 17 import ( 18 "fmt" 19 "math" 20 "sync" 21 "sync/atomic" 22 ) 23 24 var ( 25 cowSlicePool = sync.Pool{ 26 New: func() any { 27 return &cowSlice{} 28 }, 29 } 30 ) 31 32 // cowSlice is used to store information about all the locks occupied by a 33 // transaction. This structure is specially designed to avoid [][]byte expansion 34 // and is lock-free. The cowSlice supports a single-write-multiple-read concurrency 35 // model for a transaction. 36 type cowSlice struct { 37 fsp *fixedSlicePool 38 fs atomic.Value // *fixedSlice 39 v atomic.Uint64 40 41 // just for testing 42 hack struct { 43 replace func() 44 slice func() 45 } 46 } 47 48 func newCowSlice( 49 fsp *fixedSlicePool, 50 values [][]byte) *cowSlice { 51 cs := cowSlicePool.Get().(*cowSlice) 52 cs.fsp = fsp 53 fs := fsp.acquire(len(values)) 54 fs.append(values) 55 cs.fs.Store(fs) 56 return cs 57 } 58 59 func (cs *cowSlice) append(values [][]byte) { 60 old := cs.mustGet() 61 capacity := old.cap() 62 newLen := len(values) + old.len() 63 if capacity >= newLen { 64 old.append(values) 65 return 66 } 67 68 // COW(copy-on-write), which needs to be copied once for each expansion, but for 69 // [][]byte lock information, only the []byte pointer needs to be copied and the 70 // overhead can be ignored. 71 new := cs.fsp.acquire(newLen) 72 new.join(old, values) 73 cs.replace(old, new) 74 } 75 76 func (cs *cowSlice) replace(old, new *fixedSlice) { 77 cs.fs.Store(new) 78 for { 79 v := cs.v.Load() 80 if cs.hack.replace != nil { 81 cs.hack.replace() 82 } 83 // we cannot unref old at will as there may be concurrent reads. 84 if cs.v.CompareAndSwap(v, v+1) { 85 old.unref() 86 return 87 } 88 } 89 } 90 91 func (cs *cowSlice) slice() *fixedSlice { 92 for { 93 v := cs.v.Load() 94 // In either case, if we get an incorrect fs, the following atomic operation 95 // will not succeed, the data will not be read. 96 fs := cs.mustGet() 97 fs.ref() 98 if cs.hack.slice != nil { 99 cs.hack.slice() 100 } 101 if cs.v.CompareAndSwap(v, v+1) { 102 return fs 103 } 104 // anyway, adding a ref does not produce an error. When we find that the fs we 105 // get is incorrect, we unref 106 fs.unref() 107 } 108 } 109 110 func (cs *cowSlice) close() { 111 cs.v.Store(0) 112 cs.mustGet().unref() 113 cowSlicePool.Put(cs) 114 } 115 116 func (cs *cowSlice) mustGet() *fixedSlice { 117 v := cs.fs.Load() 118 if v == nil { 119 panic("BUG: must can get slice") 120 } 121 return v.(*fixedSlice) 122 } 123 124 // fixedSlice is fixed capacity [][]byte slice 125 type fixedSlice struct { 126 values [][]byte 127 sp *fixedSlicePool 128 atomic struct { 129 len atomic.Uint32 130 ref atomic.Int32 131 } 132 } 133 134 func (s *fixedSlice) join( 135 other *fixedSlice, 136 values [][]byte) { 137 n := other.len() 138 copy(s.values, other.values[:n]) 139 copy(s.values[n:n+len(values)], values) 140 s.atomic.len.Store(uint32(n + len(values))) 141 } 142 143 func (s *fixedSlice) append(values [][]byte) { 144 n := len(values) 145 offset := s.len() 146 for i := 0; i < n; i++ { 147 s.values[offset+i] = values[i] 148 } 149 s.atomic.len.Add(uint32(len(values))) 150 } 151 152 func (s *fixedSlice) iter(fn func([]byte) bool) { 153 n := s.len() 154 for i := 0; i < n; i++ { 155 if !fn(s.values[i]) { 156 return 157 } 158 } 159 } 160 161 func (s *fixedSlice) len() int { 162 return int(s.atomic.len.Load()) 163 } 164 165 func (s *fixedSlice) cap() int { 166 return cap(s.values) 167 } 168 169 func (s *fixedSlice) ref() { 170 s.atomic.ref.Add(1) 171 } 172 173 func (s *fixedSlice) unref() { 174 n := s.atomic.ref.Add(-1) 175 if n == 0 { 176 s.close() 177 return 178 } 179 if n < 0 { 180 panic("BUG: invalid ref count") 181 } 182 } 183 184 func (s *fixedSlice) close() { 185 for i := range s.values { 186 s.values[i] = nil 187 } 188 s.atomic.len.Store(0) 189 s.sp.release(s) 190 } 191 192 // fixedSlicePool maintains a set of sync.pool, each of which is used to store a specified 193 // size of slice. The size of the slice increases in multiples of 2. 194 type fixedSlicePool struct { 195 slices []sync.Pool 196 acquireV atomic.Uint64 197 releaseV atomic.Uint64 198 } 199 200 func newFixedSlicePool(max int) *fixedSlicePool { 201 sp := &fixedSlicePool{} 202 max = roundUp(max) 203 for i := 1; i <= max; { 204 cap := i 205 sp.slices = append(sp.slices, sync.Pool{ 206 New: func() any { 207 s := &fixedSlice{values: make([][]byte, cap), sp: sp} 208 return s 209 }}) 210 i = i << 1 211 } 212 return sp 213 } 214 215 func (sp *fixedSlicePool) acquire(n int) *fixedSlice { 216 sp.acquireV.Add(1) 217 n = roundUp(n) 218 i := int(math.Log2(float64(n))) 219 if i >= len(sp.slices) { 220 panic(fmt.Sprintf("too large fixed slice %d, max is %d", 221 n, 222 1<<(len(sp.slices)-1))) 223 } 224 s := sp.slices[i].Get().(*fixedSlice) 225 s.ref() 226 return s 227 } 228 229 func (sp *fixedSlicePool) release(s *fixedSlice) { 230 sp.releaseV.Add(1) 231 n := s.cap() 232 i := int(math.Log2(float64(n))) 233 if i >= len(sp.slices) { 234 panic(fmt.Sprintf("too large fixed slice %d, max is %d", 235 n, 236 2<<(len(sp.slices)-1))) 237 } 238 sp.slices[i].Put(s) 239 } 240 241 // roundUp takes a int greater than 0 and rounds it up to the next 242 // power of 2. 243 func roundUp(v int) int { 244 v-- 245 v |= v >> 1 246 v |= v >> 2 247 v |= v >> 4 248 v |= v >> 8 249 v |= v >> 16 250 v |= v >> 32 251 v++ 252 return v 253 }