trpc.group/trpc-go/trpc-go@v1.0.3/internal/linkbuffer/buffer.go (about) 1 // 2 // 3 // Tencent is pleased to support the open source community by making tRPC available. 4 // 5 // Copyright (C) 2023 THL A29 Limited, a Tencent company. 6 // All rights reserved. 7 // 8 // If you have downloaded a copy of the tRPC source code from Tencent, 9 // please note that tRPC source code is licensed under the Apache 2.0 License, 10 // A copy of the Apache 2.0 License is included in this file. 11 // 12 // 13 14 // Package linkbuffer is a rich buffer to reuse underlying bytes. 15 package linkbuffer 16 17 import ( 18 "io" 19 "sync" 20 ) 21 22 var _ Buffer = (*Buf)(nil) 23 24 // NewBuf creates a new buf. 25 func NewBuf(a Allocator, minMallocSize int) *Buf { 26 bytes := newBytes(nil, nil) 27 return &Buf{ 28 a: a, 29 minMallocSize: minMallocSize, 30 head: bytes, 31 tail: bytes, 32 } 33 } 34 35 // Buf is a rich buffer to reuse underlying bytes. 36 type Buf struct { 37 a Allocator 38 minMallocSize int 39 40 head, tail *bytes 41 dirty *bytes 42 } 43 44 // Write copies p to Buf and implements io.Writer. 45 func (b *Buf) Write(p []byte) (int, error) { 46 if b.tail.release == nil { 47 bts, release := b.a.Malloc(b.minMallocSize) 48 b.tail.next = newBytes(bts[:0], release) 49 b.tail = b.tail.next 50 } 51 available := cap(b.tail.bts) - len(b.tail.bts) 52 if available >= len(p) { 53 b.tail.bts = append(b.tail.bts, p...) 54 return len(p), nil 55 } 56 b.tail.bts = append(b.tail.bts, p[:available]...) 57 bts, release := b.a.Malloc(b.minMallocSize) 58 b.tail.next = newBytes(bts[:0], release) 59 b.tail = b.tail.next 60 n, err := b.Write(p[available:]) 61 return available + n, err 62 } 63 64 // Append appends a slice of bytes to Buf. 65 // Buf owns these bs, but won't release them to underlying allocator. 66 func (b *Buf) Append(bs ...[]byte) { 67 for _, bts := range bs { 68 b.append(bts) 69 } 70 } 71 72 func (b *Buf) append(bts []byte) { 73 if b.tail.release == nil || cap(b.tail.bts) == len(b.tail.bts) { 74 b.tail.next = newBytes(bts, nil) 75 b.tail = b.tail.next 76 } else { 77 remains := b.tail.bts[len(b.tail.bts):] 78 release := b.tail.release 79 b.tail.release = nil 80 b.tail.next = newBytes(bts, nil) 81 b.tail = b.tail.next 82 b.tail.next = newBytes(remains, release) 83 b.tail = b.tail.next 84 } 85 } 86 87 // Prepend prepends a slice to bytes to Buf. Next Read starts with the first bytes of slice. 88 // Buf owns these bs, but won't release them to underlying allocator. 89 func (b *Buf) Prepend(bs ...[]byte) { 90 for i := len(bs) - 1; i >= 0; i-- { 91 bytes := newBytes(bs[i], nil) 92 bytes.next = b.head 93 b.head = bytes 94 } 95 } 96 97 // Alloc allocates a []byte with size n. 98 func (b *Buf) Alloc(n int) []byte { 99 if b.tail.release != nil && cap(b.tail.bts)-len(b.tail.bts) >= n { 100 l := len(b.tail.bts) 101 b.tail.bts = b.tail.bts[:l+n] 102 return b.tail.bts[l : l+n] 103 } 104 bts, release := b.a.Malloc(n) 105 b.tail.next = newBytes(bts[:n], release) 106 b.tail = b.tail.next 107 return bts[:n] 108 } 109 110 // Prelloc allocates a []byte with size n at the beginning of Buf. 111 func (b *Buf) Prelloc(n int) []byte { 112 bts, release := b.a.Malloc(n) 113 bytes := newBytes(bts[:n], release) 114 bytes.next = b.head 115 b.head = bytes 116 return bts[:n] 117 } 118 119 // Merge merges another Reader. 120 // If r is not *Buf, b does not own the bytes of r. 121 // If r is a *Buf, the ownership of r's bytes is changed to b, and the caller should not Release r. 122 func (b *Buf) Merge(r Reader) { 123 bb, ok := r.(*Buf) 124 if !ok { 125 for _, bts := range r.ReadAll() { 126 b.Append(bts) 127 } 128 return 129 } 130 b.tail.next = bb.head 131 b.tail = bb.tail 132 } 133 134 // Read copies data to p, and returns the number of byte copied and an error. 135 // The io.EOF is returned if Buf has no unread bytes and len(p) is not zero. 136 func (b *Buf) Read(p []byte) (int, error) { 137 if len(p) == 0 { 138 return 0, nil 139 } 140 141 defer b.ensureNotEmpty() 142 var copied int 143 for b.head != nil { 144 curCopied := copy(p[copied:], b.head.bts) 145 copied += curCopied 146 b.head.bts = b.head.bts[curCopied:] 147 b.dirtyEmptyHeads() 148 if copied == len(p) { 149 return copied, nil 150 } 151 } 152 if copied > 0 { 153 return copied, nil 154 } 155 return copied, io.EOF 156 } 157 158 // ReadN tries best to read all size into one []byte. 159 // The second return value may be smaller than size if underlying bytes is not continuous. 160 func (b *Buf) ReadN(size int) ([]byte, int) { 161 defer b.ensureNotEmpty() 162 b.dirtyEmptyHeads() 163 for b.head != nil { 164 if size >= len(b.head.bts) { 165 bts := b.dirtyHead() 166 b.dirtyEmptyHeads() 167 return bts, len(bts) 168 } 169 bts := b.head.bts[:size] 170 b.head.bts = b.head.bts[size:] 171 return bts, size 172 } 173 return nil, 0 174 } 175 176 // ReadAll returns all underlying []byte in [][]byte. 177 func (b *Buf) ReadAll() [][]byte { 178 defer b.ensureNotEmpty() 179 var all [][]byte 180 for b.head != nil { 181 if bts := b.dirtyHead(); len(bts) != 0 { 182 all = append(all, bts) 183 } 184 } 185 return all 186 } 187 188 // ReadNext returns the next continuous []byte. 189 func (b *Buf) ReadNext() []byte { 190 defer b.ensureNotEmpty() 191 for b.head != nil { 192 if bts := b.dirtyHead(); len(bts) != 0 { 193 return bts 194 } 195 } 196 return nil 197 } 198 199 // Release releases the read bytes to allocator. 200 func (b *Buf) Release() { 201 for b.dirty != nil { 202 b.a.Free(b.dirty.release) 203 dirty := b.dirty 204 b.dirty = b.dirty.next 205 dirty.release = nil 206 dirty.bts = nil 207 dirty.next = nil 208 bytesPool.Put(dirty) 209 } 210 } 211 212 // Len returns the total len of underlying bytes. 213 func (b *Buf) Len() int { 214 var l int 215 for bytes := b.head; bytes != nil; bytes = bytes.next { 216 l += len(bytes.bts) 217 } 218 return l 219 } 220 221 func (b *Buf) dirtyEmptyHeads() { 222 for b.head != nil && len(b.head.bts) == 0 { 223 b.dirtyHead() 224 } 225 } 226 227 func (b *Buf) dirtyHead() []byte { 228 bts := b.head.bts 229 head := b.head 230 b.head = head.next 231 if head.release == nil { 232 head.bts = nil 233 head.next = nil 234 bytesPool.Put(head) 235 } else { 236 head.next = b.dirty 237 b.dirty = head 238 } 239 return bts 240 } 241 242 func (b *Buf) ensureNotEmpty() { 243 if b.head == nil { 244 b.head = newBytes(nil, nil) 245 b.tail = b.head 246 } 247 } 248 249 // Allocator is the interface to Malloc or Free bytes. 250 type Allocator interface { 251 // Malloc mallocs a []byte with specific size. 252 // The second return value is the consequence for go's escape analysis. 253 // See ClassAllocator and https://github.com/golang/go/issues/8618 for details. 254 Malloc(int) ([]byte, interface{}) 255 // Free frees the allocated bytes. It accepts the second return value of Malloc. 256 Free(interface{}) 257 } 258 259 type bytes struct { 260 bts []byte 261 release interface{} 262 next *bytes 263 } 264 265 var bytesPool = sync.Pool{New: func() interface{} { return &bytes{} }} 266 267 func newBytes(bts []byte, release interface{}) *bytes { 268 bytes := bytesPool.Get().(*bytes) 269 bytes.bts = bts 270 bytes.release = release 271 bytes.next = nil 272 return bytes 273 }