github.com/sbinet/go@v0.0.0-20160827155028-54d7de7dd62b/src/compress/flate/deflate.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package flate 6 7 import ( 8 "fmt" 9 "io" 10 "math" 11 ) 12 13 const ( 14 NoCompression = 0 15 BestSpeed = 1 16 BestCompression = 9 17 DefaultCompression = -1 18 19 // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman 20 // entropy encoding. This mode is useful in compressing data that has 21 // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) 22 // that lacks an entropy encoder. Compression gains are achieved when 23 // certain bytes in the input stream occur more frequently than others. 24 // 25 // Note that HuffmanOnly produces a compressed output that is 26 // RFC 1951 compliant. That is, any valid DEFLATE decompressor will 27 // continue to be able to decompress this output. 28 HuffmanOnly = -2 29 ) 30 31 const ( 32 logWindowSize = 15 33 windowSize = 1 << logWindowSize 34 windowMask = windowSize - 1 35 36 // The LZ77 step produces a sequence of literal tokens and <length, offset> 37 // pair tokens. The offset is also known as distance. The underlying wire 38 // format limits the range of lengths and offsets. For example, there are 39 // 256 legitimate lengths: those in the range [3, 258]. This package's 40 // compressor uses a higher minimum match length, enabling optimizations 41 // such as finding matches via 32-bit loads and compares. 42 baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 43 minMatchLength = 4 // The smallest match length that the compressor actually emits 44 maxMatchLength = 258 // The largest match length 45 baseMatchOffset = 1 // The smallest match offset 46 maxMatchOffset = 1 << 15 // The largest match offset 47 48 // The maximum number of tokens we put into a single flate block, just to 49 // stop things from getting too large. 50 maxFlateBlockTokens = 1 << 14 51 maxStoreBlockSize = 65535 52 hashBits = 17 // After 17 performance degrades 53 hashSize = 1 << hashBits 54 hashMask = (1 << hashBits) - 1 55 maxHashOffset = 1 << 24 56 57 skipNever = math.MaxInt32 58 ) 59 60 type compressionLevel struct { 61 level, good, lazy, nice, chain, fastSkipHashing int 62 } 63 64 var levels = []compressionLevel{ 65 {0, 0, 0, 0, 0, 0}, // NoCompression. 66 {1, 0, 0, 0, 0, 0}, // BestSpeed uses a custom algorithm; see deflatefast.go. 67 // For levels 2-3 we don't bother trying with lazy matches. 68 {2, 4, 0, 16, 8, 5}, 69 {3, 4, 0, 32, 32, 6}, 70 // Levels 4-9 use increasingly more lazy matching 71 // and increasingly stringent conditions for "good enough". 72 {4, 4, 4, 16, 16, skipNever}, 73 {5, 8, 16, 32, 32, skipNever}, 74 {6, 8, 16, 128, 128, skipNever}, 75 {7, 8, 32, 128, 256, skipNever}, 76 {8, 32, 128, 258, 1024, skipNever}, 77 {9, 32, 258, 258, 4096, skipNever}, 78 } 79 80 type compressor struct { 81 compressionLevel 82 83 w *huffmanBitWriter 84 bulkHasher func([]byte, []uint32) 85 86 // compression algorithm 87 fill func(*compressor, []byte) int // copy data to window 88 step func(*compressor) // process window 89 sync bool // requesting flush 90 91 // Input hash chains 92 // hashHead[hashValue] contains the largest inputIndex with the specified hash value 93 // If hashHead[hashValue] is within the current window, then 94 // hashPrev[hashHead[hashValue] & windowMask] contains the previous index 95 // with the same hash value. 96 chainHead int 97 hashHead [hashSize]uint32 98 hashPrev [windowSize]uint32 99 hashOffset int 100 101 // input window: unprocessed data is window[index:windowEnd] 102 index int 103 window []byte 104 windowEnd int 105 blockStart int // window index where current tokens start 106 byteAvailable bool // if true, still need to process window[index-1]. 107 108 // queued output tokens 109 tokens []token 110 111 // deflate state 112 length int 113 offset int 114 hash uint32 115 maxInsertIndex int 116 err error 117 118 // hashMatch must be able to contain hashes for the maximum match length. 119 hashMatch [maxMatchLength - 1]uint32 120 } 121 122 func (d *compressor) fillDeflate(b []byte) int { 123 if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) { 124 // shift the window by windowSize 125 copy(d.window, d.window[windowSize:2*windowSize]) 126 d.index -= windowSize 127 d.windowEnd -= windowSize 128 if d.blockStart >= windowSize { 129 d.blockStart -= windowSize 130 } else { 131 d.blockStart = math.MaxInt32 132 } 133 d.hashOffset += windowSize 134 if d.hashOffset > maxHashOffset { 135 delta := d.hashOffset - 1 136 d.hashOffset -= delta 137 d.chainHead -= delta 138 for i, v := range d.hashPrev { 139 if int(v) > delta { 140 d.hashPrev[i] = uint32(int(v) - delta) 141 } else { 142 d.hashPrev[i] = 0 143 } 144 } 145 for i, v := range d.hashHead { 146 if int(v) > delta { 147 d.hashHead[i] = uint32(int(v) - delta) 148 } else { 149 d.hashHead[i] = 0 150 } 151 } 152 } 153 } 154 n := copy(d.window[d.windowEnd:], b) 155 d.windowEnd += n 156 return n 157 } 158 159 func (d *compressor) writeBlock(tokens []token, index int) error { 160 if index > 0 { 161 var window []byte 162 if d.blockStart <= index { 163 window = d.window[d.blockStart:index] 164 } 165 d.blockStart = index 166 d.w.writeBlock(tokens, false, window) 167 return d.w.err 168 } 169 return nil 170 } 171 172 // fillWindow will fill the current window with the supplied 173 // dictionary and calculate all hashes. 174 // This is much faster than doing a full encode. 175 // Should only be used after a reset. 176 func (d *compressor) fillWindow(b []byte) { 177 // Do not fill window if we are in store-only mode. 178 if d.compressionLevel.level < 2 { 179 return 180 } 181 if d.index != 0 || d.windowEnd != 0 { 182 panic("internal error: fillWindow called with stale data") 183 } 184 185 // If we are given too much, cut it. 186 if len(b) > windowSize { 187 b = b[len(b)-windowSize:] 188 } 189 // Add all to window. 190 n := copy(d.window, b) 191 192 // Calculate 256 hashes at the time (more L1 cache hits) 193 loops := (n + 256 - minMatchLength) / 256 194 for j := 0; j < loops; j++ { 195 index := j * 256 196 end := index + 256 + minMatchLength - 1 197 if end > n { 198 end = n 199 } 200 toCheck := d.window[index:end] 201 dstSize := len(toCheck) - minMatchLength + 1 202 203 if dstSize <= 0 { 204 continue 205 } 206 207 dst := d.hashMatch[:dstSize] 208 d.bulkHasher(toCheck, dst) 209 var newH uint32 210 for i, val := range dst { 211 di := i + index 212 newH = val 213 hh := &d.hashHead[newH&hashMask] 214 // Get previous value with the same hash. 215 // Our chain should point to the previous value. 216 d.hashPrev[di&windowMask] = *hh 217 // Set the head of the hash chain to us. 218 *hh = uint32(di + d.hashOffset) 219 } 220 d.hash = newH 221 } 222 // Update window information. 223 d.windowEnd = n 224 d.index = n 225 } 226 227 // Try to find a match starting at index whose length is greater than prevSize. 228 // We only look at chainCount possibilities before giving up. 229 func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { 230 minMatchLook := maxMatchLength 231 if lookahead < minMatchLook { 232 minMatchLook = lookahead 233 } 234 235 win := d.window[0 : pos+minMatchLook] 236 237 // We quit when we get a match that's at least nice long 238 nice := len(win) - pos 239 if d.nice < nice { 240 nice = d.nice 241 } 242 243 // If we've got a match that's good enough, only look in 1/4 the chain. 244 tries := d.chain 245 length = prevLength 246 if length >= d.good { 247 tries >>= 2 248 } 249 250 wEnd := win[pos+length] 251 wPos := win[pos:] 252 minIndex := pos - windowSize 253 254 for i := prevHead; tries > 0; tries-- { 255 if wEnd == win[i+length] { 256 n := matchLen(win[i:], wPos, minMatchLook) 257 258 if n > length && (n > minMatchLength || pos-i <= 4096) { 259 length = n 260 offset = pos - i 261 ok = true 262 if n >= nice { 263 // The match is good enough that we don't try to find a better one. 264 break 265 } 266 wEnd = win[pos+n] 267 } 268 } 269 if i == minIndex { 270 // hashPrev[i & windowMask] has already been overwritten, so stop now. 271 break 272 } 273 i = int(d.hashPrev[i&windowMask]) - d.hashOffset 274 if i < minIndex || i < 0 { 275 break 276 } 277 } 278 return 279 } 280 281 func (d *compressor) writeStoredBlock(buf []byte) error { 282 if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { 283 return d.w.err 284 } 285 d.w.writeBytes(buf) 286 return d.w.err 287 } 288 289 const hashmul = 0x1e35a7bd 290 291 // hash4 returns a hash representation of the first 4 bytes 292 // of the supplied slice. 293 // The caller must ensure that len(b) >= 4. 294 func hash4(b []byte) uint32 { 295 return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits) 296 } 297 298 // bulkHash4 will compute hashes using the same 299 // algorithm as hash4 300 func bulkHash4(b []byte, dst []uint32) { 301 if len(b) < minMatchLength { 302 return 303 } 304 hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 305 dst[0] = (hb * hashmul) >> (32 - hashBits) 306 end := len(b) - minMatchLength + 1 307 for i := 1; i < end; i++ { 308 hb = (hb << 8) | uint32(b[i+3]) 309 dst[i] = (hb * hashmul) >> (32 - hashBits) 310 } 311 } 312 313 // matchLen returns the number of matching bytes in a and b 314 // up to length 'max'. Both slices must be at least 'max' 315 // bytes in size. 316 func matchLen(a, b []byte, max int) int { 317 a = a[:max] 318 b = b[:len(a)] 319 for i, av := range a { 320 if b[i] != av { 321 return i 322 } 323 } 324 return max 325 } 326 327 // encSpeed will compress and store the currently added data, 328 // if enough has been accumulated or we at the end of the stream. 329 // Any error that occurred will be in d.err 330 func (d *compressor) encSpeed() { 331 // We only compress if we have maxStoreBlockSize. 332 if d.windowEnd < maxStoreBlockSize { 333 if !d.sync { 334 return 335 } 336 337 // Handle small sizes. 338 if d.windowEnd < 128 { 339 switch { 340 case d.windowEnd == 0: 341 return 342 case d.windowEnd <= 16: 343 d.err = d.writeStoredBlock(d.window[:d.windowEnd]) 344 default: 345 d.w.writeBlockHuff(false, d.window[:d.windowEnd]) 346 d.err = d.w.err 347 } 348 d.windowEnd = 0 349 return 350 } 351 352 } 353 // Encode the block. 354 d.tokens = encodeBestSpeed(d.tokens[:0], d.window[:d.windowEnd]) 355 356 // If we removed less than 1/16th, Huffman compress the block. 357 if len(d.tokens) > d.windowEnd-(d.windowEnd>>4) { 358 d.w.writeBlockHuff(false, d.window[:d.windowEnd]) 359 } else { 360 d.w.writeBlockDynamic(d.tokens, false, d.window[:d.windowEnd]) 361 } 362 d.err = d.w.err 363 d.windowEnd = 0 364 } 365 366 func (d *compressor) initDeflate() { 367 d.window = make([]byte, 2*windowSize) 368 d.hashOffset = 1 369 d.tokens = make([]token, 0, maxFlateBlockTokens+1) 370 d.length = minMatchLength - 1 371 d.offset = 0 372 d.byteAvailable = false 373 d.index = 0 374 d.hash = 0 375 d.chainHead = -1 376 d.bulkHasher = bulkHash4 377 } 378 379 func (d *compressor) deflate() { 380 if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { 381 return 382 } 383 384 d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) 385 if d.index < d.maxInsertIndex { 386 d.hash = hash4(d.window[d.index : d.index+minMatchLength]) 387 } 388 389 Loop: 390 for { 391 if d.index > d.windowEnd { 392 panic("index > windowEnd") 393 } 394 lookahead := d.windowEnd - d.index 395 if lookahead < minMatchLength+maxMatchLength { 396 if !d.sync { 397 break Loop 398 } 399 if d.index > d.windowEnd { 400 panic("index > windowEnd") 401 } 402 if lookahead == 0 { 403 // Flush current output block if any. 404 if d.byteAvailable { 405 // There is still one pending token that needs to be flushed 406 d.tokens = append(d.tokens, literalToken(uint32(d.window[d.index-1]))) 407 d.byteAvailable = false 408 } 409 if len(d.tokens) > 0 { 410 if d.err = d.writeBlock(d.tokens, d.index); d.err != nil { 411 return 412 } 413 d.tokens = d.tokens[:0] 414 } 415 break Loop 416 } 417 } 418 if d.index < d.maxInsertIndex { 419 // Update the hash 420 d.hash = hash4(d.window[d.index : d.index+minMatchLength]) 421 hh := &d.hashHead[d.hash&hashMask] 422 d.chainHead = int(*hh) 423 d.hashPrev[d.index&windowMask] = uint32(d.chainHead) 424 *hh = uint32(d.index + d.hashOffset) 425 } 426 prevLength := d.length 427 prevOffset := d.offset 428 d.length = minMatchLength - 1 429 d.offset = 0 430 minIndex := d.index - windowSize 431 if minIndex < 0 { 432 minIndex = 0 433 } 434 435 if d.chainHead-d.hashOffset >= minIndex && 436 (d.fastSkipHashing != skipNever && lookahead > minMatchLength-1 || 437 d.fastSkipHashing == skipNever && lookahead > prevLength && prevLength < d.lazy) { 438 if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { 439 d.length = newLength 440 d.offset = newOffset 441 } 442 } 443 if d.fastSkipHashing != skipNever && d.length >= minMatchLength || 444 d.fastSkipHashing == skipNever && prevLength >= minMatchLength && d.length <= prevLength { 445 // There was a match at the previous step, and the current match is 446 // not better. Output the previous match. 447 if d.fastSkipHashing != skipNever { 448 d.tokens = append(d.tokens, matchToken(uint32(d.length-baseMatchLength), uint32(d.offset-baseMatchOffset))) 449 } else { 450 d.tokens = append(d.tokens, matchToken(uint32(prevLength-baseMatchLength), uint32(prevOffset-baseMatchOffset))) 451 } 452 // Insert in the hash table all strings up to the end of the match. 453 // index and index-1 are already inserted. If there is not enough 454 // lookahead, the last two strings are not inserted into the hash 455 // table. 456 if d.length <= d.fastSkipHashing { 457 var newIndex int 458 if d.fastSkipHashing != skipNever { 459 newIndex = d.index + d.length 460 } else { 461 newIndex = d.index + prevLength - 1 462 } 463 for d.index++; d.index < newIndex; d.index++ { 464 if d.index < d.maxInsertIndex { 465 d.hash = hash4(d.window[d.index : d.index+minMatchLength]) 466 // Get previous value with the same hash. 467 // Our chain should point to the previous value. 468 hh := &d.hashHead[d.hash&hashMask] 469 d.hashPrev[d.index&windowMask] = *hh 470 // Set the head of the hash chain to us. 471 *hh = uint32(d.index + d.hashOffset) 472 } 473 } 474 if d.fastSkipHashing == skipNever { 475 d.byteAvailable = false 476 d.length = minMatchLength - 1 477 } 478 } else { 479 // For matches this long, we don't bother inserting each individual 480 // item into the table. 481 d.index += d.length 482 if d.index < d.maxInsertIndex { 483 d.hash = hash4(d.window[d.index : d.index+minMatchLength]) 484 } 485 } 486 if len(d.tokens) == maxFlateBlockTokens { 487 // The block includes the current character 488 if d.err = d.writeBlock(d.tokens, d.index); d.err != nil { 489 return 490 } 491 d.tokens = d.tokens[:0] 492 } 493 } else { 494 if d.fastSkipHashing != skipNever || d.byteAvailable { 495 i := d.index - 1 496 if d.fastSkipHashing != skipNever { 497 i = d.index 498 } 499 d.tokens = append(d.tokens, literalToken(uint32(d.window[i]))) 500 if len(d.tokens) == maxFlateBlockTokens { 501 if d.err = d.writeBlock(d.tokens, i+1); d.err != nil { 502 return 503 } 504 d.tokens = d.tokens[:0] 505 } 506 } 507 d.index++ 508 if d.fastSkipHashing == skipNever { 509 d.byteAvailable = true 510 } 511 } 512 } 513 } 514 515 func (d *compressor) fillStore(b []byte) int { 516 n := copy(d.window[d.windowEnd:], b) 517 d.windowEnd += n 518 return n 519 } 520 521 func (d *compressor) store() { 522 if d.windowEnd > 0 { 523 d.err = d.writeStoredBlock(d.window[:d.windowEnd]) 524 } 525 d.windowEnd = 0 526 } 527 528 // storeHuff compresses and stores the currently added data 529 // when the d.window is full or we are at the end of the stream. 530 // Any error that occurred will be in d.err 531 func (d *compressor) storeHuff() { 532 if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { 533 return 534 } 535 d.w.writeBlockHuff(false, d.window[:d.windowEnd]) 536 d.err = d.w.err 537 d.windowEnd = 0 538 } 539 540 func (d *compressor) write(b []byte) (n int, err error) { 541 if d.err != nil { 542 return 0, d.err 543 } 544 n = len(b) 545 for len(b) > 0 { 546 d.step(d) 547 b = b[d.fill(d, b):] 548 if d.err != nil { 549 return 0, d.err 550 } 551 } 552 return n, nil 553 } 554 555 func (d *compressor) syncFlush() error { 556 if d.err != nil { 557 return d.err 558 } 559 d.sync = true 560 d.step(d) 561 if d.err == nil { 562 d.w.writeStoredHeader(0, false) 563 d.w.flush() 564 d.err = d.w.err 565 } 566 d.sync = false 567 return d.err 568 } 569 570 func (d *compressor) init(w io.Writer, level int) (err error) { 571 d.w = newHuffmanBitWriter(w) 572 573 switch { 574 case level == NoCompression: 575 d.window = make([]byte, maxStoreBlockSize) 576 d.fill = (*compressor).fillStore 577 d.step = (*compressor).store 578 case level == HuffmanOnly: 579 d.window = make([]byte, maxStoreBlockSize) 580 d.fill = (*compressor).fillStore 581 d.step = (*compressor).storeHuff 582 case level == BestSpeed: 583 d.compressionLevel = levels[level] 584 d.window = make([]byte, maxStoreBlockSize) 585 d.fill = (*compressor).fillStore 586 d.step = (*compressor).encSpeed 587 d.tokens = make([]token, maxStoreBlockSize) 588 case level == DefaultCompression: 589 level = 6 590 fallthrough 591 case 2 <= level && level <= 9: 592 d.compressionLevel = levels[level] 593 d.initDeflate() 594 d.fill = (*compressor).fillDeflate 595 d.step = (*compressor).deflate 596 default: 597 return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) 598 } 599 return nil 600 } 601 602 func (d *compressor) reset(w io.Writer) { 603 d.w.reset(w) 604 d.sync = false 605 d.err = nil 606 switch d.compressionLevel.level { 607 case NoCompression: 608 d.windowEnd = 0 609 case BestSpeed: 610 d.windowEnd = 0 611 d.tokens = d.tokens[:0] 612 default: 613 d.chainHead = -1 614 for i := range d.hashHead { 615 d.hashHead[i] = 0 616 } 617 for i := range d.hashPrev { 618 d.hashPrev[i] = 0 619 } 620 d.hashOffset = 1 621 d.index, d.windowEnd = 0, 0 622 d.blockStart, d.byteAvailable = 0, false 623 d.tokens = d.tokens[:0] 624 d.length = minMatchLength - 1 625 d.offset = 0 626 d.hash = 0 627 d.maxInsertIndex = 0 628 } 629 } 630 631 func (d *compressor) close() error { 632 if d.err != nil { 633 return d.err 634 } 635 d.sync = true 636 d.step(d) 637 if d.err != nil { 638 return d.err 639 } 640 if d.w.writeStoredHeader(0, true); d.w.err != nil { 641 return d.w.err 642 } 643 d.w.flush() 644 return d.w.err 645 } 646 647 // NewWriter returns a new Writer compressing data at the given level. 648 // Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); 649 // higher levels typically run slower but compress more. Level 0 650 // (NoCompression) does not attempt any compression; it only adds the 651 // necessary DEFLATE framing. 652 // Level -1 (DefaultCompression) uses the default compression level. 653 // Level -2 (HuffmanOnly) will use Huffman compression only, giving 654 // a very fast compression for all types of input, but sacrificing considerable 655 // compression efficiency. 656 // 657 // If level is in the range [-2, 9] then the error returned will be nil. 658 // Otherwise the error returned will be non-nil. 659 func NewWriter(w io.Writer, level int) (*Writer, error) { 660 var dw Writer 661 if err := dw.d.init(w, level); err != nil { 662 return nil, err 663 } 664 return &dw, nil 665 } 666 667 // NewWriterDict is like NewWriter but initializes the new 668 // Writer with a preset dictionary. The returned Writer behaves 669 // as if the dictionary had been written to it without producing 670 // any compressed output. The compressed data written to w 671 // can only be decompressed by a Reader initialized with the 672 // same dictionary. 673 func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { 674 dw := &dictWriter{w} 675 zw, err := NewWriter(dw, level) 676 if err != nil { 677 return nil, err 678 } 679 zw.d.fillWindow(dict) 680 zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. 681 return zw, err 682 } 683 684 type dictWriter struct { 685 w io.Writer 686 } 687 688 func (w *dictWriter) Write(b []byte) (n int, err error) { 689 return w.w.Write(b) 690 } 691 692 // A Writer takes data written to it and writes the compressed 693 // form of that data to an underlying writer (see NewWriter). 694 type Writer struct { 695 d compressor 696 dict []byte 697 } 698 699 // Write writes data to w, which will eventually write the 700 // compressed form of data to its underlying writer. 701 func (w *Writer) Write(data []byte) (n int, err error) { 702 return w.d.write(data) 703 } 704 705 // Flush flushes any pending compressed data to the underlying writer. 706 // It is useful mainly in compressed network protocols, to ensure that 707 // a remote reader has enough data to reconstruct a packet. 708 // Flush does not return until the data has been written. 709 // If the underlying writer returns an error, Flush returns that error. 710 // 711 // In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. 712 func (w *Writer) Flush() error { 713 // For more about flushing: 714 // http://www.bolet.org/~pornin/deflate-flush.html 715 return w.d.syncFlush() 716 } 717 718 // Close flushes and closes the writer. 719 func (w *Writer) Close() error { 720 return w.d.close() 721 } 722 723 // Reset discards the writer's state and makes it equivalent to 724 // the result of NewWriter or NewWriterDict called with dst 725 // and w's level and dictionary. 726 func (w *Writer) Reset(dst io.Writer) { 727 if dw, ok := w.d.w.writer.(*dictWriter); ok { 728 // w was created with NewWriterDict 729 dw.w = dst 730 w.d.reset(dw) 731 w.d.fillWindow(w.dict) 732 } else { 733 // w was created with NewWriter 734 w.d.reset(dst) 735 } 736 }