github.com/m3db/m3@v1.5.0/src/dbnode/encoding/ostream.go (about) 1 // Copyright (c) 2016 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 package encoding 22 23 import ( 24 "github.com/m3db/m3/src/x/checked" 25 "github.com/m3db/m3/src/x/pool" 26 ) 27 28 const ( 29 initAllocSize = 1024 30 ) 31 32 // ostream encapsulates a writable stream. 33 type ostream struct { 34 // We want to use a checked.Bytes when transferring ownership of the buffer 35 // of the ostream. Unfortunately, the accounting overhead of going through 36 // the checked.Bytes for every write is massive. As a result, we store both 37 // the rawBuffer that backs the checked.Bytes AND the checked.Bytes themselves 38 // in this struct. 39 // 40 // That way, whenever we're writing to the buffer we can avoid the cost accounting 41 // overhead entirely, but when the data needs to be transffered to another owner 42 // we use the checked.Bytes, which is when the accounting really matters anyways. 43 // 44 // The rawBuffer and checked.Bytes may get out of sync as the rawBuffer is written to, 45 // but thats fine because we perform a "repair" by resetting the checked.Bytes underlying 46 // byte slice to be the rawBuffer whenever we expose a checked.Bytes to an external caller. 47 rawBuffer []byte 48 checked checked.Bytes 49 50 pos int // how many bits have been used in the last byte 51 bytesPool pool.CheckedBytesPool 52 } 53 54 // NewOStream creates a new Ostream 55 func NewOStream( 56 bytes checked.Bytes, 57 initAllocIfEmpty bool, 58 bytesPool pool.CheckedBytesPool, 59 ) OStream { 60 if bytes == nil && initAllocIfEmpty { 61 bytes = checked.NewBytes(make([]byte, 0, initAllocSize), nil) 62 } 63 64 stream := &ostream{bytesPool: bytesPool} 65 stream.Reset(bytes) 66 return stream 67 } 68 69 func (os *ostream) Len() int { 70 return len(os.rawBuffer) 71 } 72 73 func (os *ostream) Empty() bool { 74 return os.Len() == 0 && os.pos == 0 75 } 76 77 func (os *ostream) lastIndex() int { 78 return os.Len() - 1 79 } 80 81 func (os *ostream) hasUnusedBits() bool { 82 return os.pos > 0 && os.pos < 8 83 } 84 85 // grow appends the last byte of v to rawBuffer and sets pos to np. 86 func (os *ostream) grow(v byte, np int) { 87 os.ensureCapacityFor(1) 88 os.rawBuffer = append(os.rawBuffer, v) 89 90 os.pos = np 91 } 92 93 // ensureCapacity ensures that there is at least capacity for n more bytes. 94 func (os *ostream) ensureCapacityFor(n int) { 95 var ( 96 currCap = cap(os.rawBuffer) 97 currLen = len(os.rawBuffer) 98 availableCap = currCap - currLen 99 missingCap = n - availableCap 100 ) 101 if missingCap <= 0 { 102 // Already have enough capacity. 103 return 104 } 105 106 newCap := max(cap(os.rawBuffer)*2, currCap+missingCap) 107 if p := os.bytesPool; p != nil { 108 newChecked := p.Get(newCap) 109 newChecked.IncRef() 110 newChecked.AppendAll(os.rawBuffer) 111 112 if os.checked != nil { 113 os.checked.DecRef() 114 os.checked.Finalize() 115 } 116 117 os.checked = newChecked 118 os.rawBuffer = os.checked.Bytes() 119 } else { 120 newRawBuffer := make([]byte, 0, newCap) 121 newRawBuffer = append(newRawBuffer, os.rawBuffer...) 122 os.rawBuffer = newRawBuffer 123 124 os.checked = checked.NewBytes(os.rawBuffer, nil) 125 os.checked.IncRef() 126 } 127 } 128 129 func (os *ostream) fillUnused(v byte) { 130 os.rawBuffer[os.lastIndex()] |= v >> uint(os.pos) 131 } 132 133 func (os *ostream) WriteBit(v Bit) { 134 v <<= 7 135 if !os.hasUnusedBits() { 136 os.grow(byte(v), 1) 137 return 138 } 139 os.fillUnused(byte(v)) 140 os.pos++ 141 } 142 143 func (os *ostream) WriteByte(v byte) { 144 if !os.hasUnusedBits() { 145 os.grow(v, 8) 146 return 147 } 148 os.fillUnused(v) 149 os.grow(v<<uint(8-os.pos), os.pos) 150 } 151 152 func (os *ostream) WriteBytes(bytes []byte) { 153 // Call ensureCapacityFor ahead of time to ensure that the bytes pool is used to 154 // grow the rawBuffer (as opposed to append possibly triggering an allocation if 155 // it wasn't) and that its only grown a maximum of one time regardless of the size 156 // of the []byte being written. 157 os.ensureCapacityFor(len(bytes)) 158 159 if !os.hasUnusedBits() { 160 // If the stream is aligned on a byte boundary then all of the WriteByte() 161 // function calls and bit-twiddling can be skipped in favor of a single 162 // copy operation. 163 os.rawBuffer = append(os.rawBuffer, bytes...) 164 // Position 8 indicates that the last byte of the buffer has been completely 165 // filled. 166 os.pos = 8 167 return 168 } 169 170 for i := 0; i < len(bytes); i++ { 171 os.WriteByte(bytes[i]) 172 } 173 } 174 175 func (os *ostream) Write(bytes []byte) (int, error) { 176 os.WriteBytes(bytes) 177 return len(bytes), nil 178 } 179 180 func (os *ostream) WriteBits(v uint64, numBits int) { 181 if numBits == 0 { 182 return 183 } 184 185 // we should never write more than 64 bits for a uint64 186 if numBits > 64 { 187 numBits = 64 188 } 189 190 v <<= uint(64 - numBits) 191 192 for numBits >= 32 { 193 os.WriteByte(byte(v >> 56)) 194 os.WriteByte(byte(v >> 48)) 195 os.WriteByte(byte(v >> 40)) 196 os.WriteByte(byte(v >> 32)) 197 198 v <<= 32 199 numBits -= 32 200 } 201 202 for numBits >= 8 { 203 os.WriteByte(byte(v >> 56)) 204 v <<= 8 205 numBits -= 8 206 } 207 208 remainder := byte(v >> 56) 209 for numBits > 0 { 210 val := remainder & 0x80 211 // inlined WriteBit 212 if os.hasUnusedBits() { 213 os.fillUnused(val) 214 os.pos++ 215 } else { 216 os.grow(val, 1) 217 } 218 remainder <<= 1 219 numBits-- 220 } 221 } 222 223 func (os *ostream) Discard() checked.Bytes { 224 os.repairCheckedBytes() 225 226 buffer := os.checked 227 buffer.DecRef() 228 229 os.rawBuffer = nil 230 os.pos = 0 231 os.checked = nil 232 233 return buffer 234 } 235 236 func (os *ostream) Reset(buffer checked.Bytes) { 237 if os.checked != nil { 238 // Release ref of the current raw buffer 239 os.checked.DecRef() 240 os.checked.Finalize() 241 242 os.rawBuffer = nil 243 os.checked = nil 244 } 245 246 if buffer != nil { 247 // Track ref to the new raw buffer 248 buffer.IncRef() 249 250 os.checked = buffer 251 os.rawBuffer = os.checked.Bytes() 252 } 253 254 os.pos = 0 255 if os.Len() > 0 { 256 // If the byte array passed in is not empty, we set 257 // pos to 8 indicating the last byte is fully used. 258 os.pos = 8 259 } 260 } 261 262 func (os *ostream) RawBytes() ([]byte, int) { 263 return os.rawBuffer, os.pos 264 } 265 266 func (os *ostream) CheckedBytes() (checked.Bytes, int) { 267 return os.checked, os.pos 268 } 269 270 // repairCheckedBytes makes sure that the checked.Bytes wraps the rawBuffer as 271 // they may have fallen out of sync during the writing process. 272 func (os *ostream) repairCheckedBytes() { 273 if os.checked != nil { 274 os.checked.Reset(os.rawBuffer) 275 } 276 } 277 278 func max(x, y int) int { 279 if x > y { 280 return x 281 } 282 return y 283 }