github.com/insionng/yougam@v0.0.0-20170714101924-2bc18d833463/libraries/klauspost/compress/flate/deflate.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Copyright (c) 2015 Klaus Post
     3  // Use of this source code is governed by a BSD-style
     4  // license that can be found in the LICENSE file.
     5  
     6  package flate
     7  
     8  import (
     9  	"fmt"
    10  	"io"
    11  	"math"
    12  )
    13  
    14  const (
    15  	NoCompression       = 0
    16  	BestSpeed           = 1
    17  	BestCompression     = 9
    18  	DefaultCompression  = -1
    19  	ConstantCompression = -2 // Does only Huffman encoding
    20  	logWindowSize       = 15
    21  	windowSize          = 1 << logWindowSize
    22  	windowMask          = windowSize - 1
    23  	logMaxOffsetSize    = 15  // Standard DEFLATE
    24  	minMatchLength      = 4   // The smallest match that the compressor looks for
    25  	maxMatchLength      = 258 // The longest match for the compressor
    26  	minOffsetSize       = 1   // The shortest offset that makes any sense
    27  
    28  	// The maximum number of tokens we put into a single flat block, just too
    29  	// stop things from getting too large.
    30  	maxFlateBlockTokens = 1 << 14
    31  	maxStoreBlockSize   = 65535
    32  	hashBits            = 17 // After 17 performance degrades
    33  	hashSize            = 1 << hashBits
    34  	hashMask            = (1 << hashBits) - 1
    35  	hashShift           = (hashBits + minMatchLength - 1) / minMatchLength
    36  	maxHashOffset       = 1 << 24
    37  
    38  	skipNever = math.MaxInt32
    39  )
    40  
    41  var useSSE42 bool
    42  
    43  type compressionLevel struct {
    44  	good, lazy, nice, chain, fastSkipHashing, level int
    45  }
    46  
    47  // Compression levels have been rebalanced from zlib deflate defaults
    48  // to give a bigger spread in speed and compression.
    49  // See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
    50  var levels = []compressionLevel{
    51  	{}, // 0
    52  	// Level 1+2 uses snappy algorithm - values not used
    53  	{0, 0, 0, 0, 0, 1},
    54  	{0, 0, 0, 0, 0, 2},
    55  	// For levels 3-6 we don't bother trying with lazy matches.
    56  	// Lazy matching is at least 30% slower, with 1.5% increase.
    57  	{4, 0, 8, 4, 4, 3},
    58  	{4, 0, 12, 6, 5, 4},
    59  	{6, 0, 24, 16, 6, 5},
    60  	{8, 0, 32, 32, 7, 6},
    61  	// Levels 7-9 use increasingly more lazy matching
    62  	// and increasingly stringent conditions for "good enough".
    63  	{4, 8, 16, 16, skipNever, 7},
    64  	{6, 16, 32, 64, skipNever, 8},
    65  	{32, 258, 258, 4096, skipNever, 9},
    66  }
    67  
    68  type hashid uint32
    69  
    70  type compressor struct {
    71  	compressionLevel
    72  
    73  	w          *huffmanBitWriter
    74  	bulkHasher func([]byte, []hash)
    75  
    76  	// compression algorithm
    77  	fill func(*compressor, []byte) int // copy data to window
    78  	step func(*compressor)             // process window
    79  	sync bool                          // requesting flush
    80  
    81  	// Input hash chains
    82  	// hashHead[hashValue] contains the largest inputIndex with the specified hash value
    83  	// If hashHead[hashValue] is within the current window, then
    84  	// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
    85  	// with the same hash value.
    86  	chainHead  int
    87  	hashHead   []hashid
    88  	hashPrev   []hashid
    89  	hashOffset int
    90  
    91  	// input window: unprocessed data is window[index:windowEnd]
    92  	index         int
    93  	window        []byte
    94  	windowEnd     int
    95  	blockStart    int  // window index where current tokens start
    96  	byteAvailable bool // if true, still need to process window[index-1].
    97  
    98  	// queued output tokens
    99  	tokens tokens
   100  
   101  	// deflate state
   102  	length         int
   103  	offset         int
   104  	hash           hash
   105  	maxInsertIndex int
   106  	err            error
   107  	ii             uint16 // position of last match, intended to overflow to reset.
   108  
   109  	snap      snappyEnc
   110  	hashMatch [maxMatchLength + minMatchLength]hash
   111  }
   112  
   113  type hash int32
   114  
   115  func (d *compressor) fillDeflate(b []byte) int {
   116  	if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
   117  		// shift the window by windowSize
   118  		copy(d.window, d.window[windowSize:2*windowSize])
   119  		d.index -= windowSize
   120  		d.windowEnd -= windowSize
   121  		if d.blockStart >= windowSize {
   122  			d.blockStart -= windowSize
   123  		} else {
   124  			d.blockStart = math.MaxInt32
   125  		}
   126  		d.hashOffset += windowSize
   127  		if d.hashOffset > maxHashOffset {
   128  			delta := d.hashOffset - 1
   129  			d.hashOffset -= delta
   130  			d.chainHead -= delta
   131  			for i, v := range d.hashPrev {
   132  				if int(v) > delta {
   133  					d.hashPrev[i] = hashid(int(v) - delta)
   134  				} else {
   135  					d.hashPrev[i] = 0
   136  				}
   137  			}
   138  			for i, v := range d.hashHead {
   139  				if int(v) > delta {
   140  					d.hashHead[i] = hashid(int(v) - delta)
   141  				} else {
   142  					d.hashHead[i] = 0
   143  				}
   144  			}
   145  		}
   146  	}
   147  	n := copy(d.window[d.windowEnd:], b)
   148  	d.windowEnd += n
   149  	return n
   150  }
   151  
   152  func (d *compressor) writeBlock(tok tokens, index int, eof bool) error {
   153  	if index > 0 || eof {
   154  		var window []byte
   155  		if d.blockStart <= index {
   156  			window = d.window[d.blockStart:index]
   157  		}
   158  		d.blockStart = index
   159  		d.w.writeBlock(tok, eof, window)
   160  		return d.w.err
   161  	}
   162  	return nil
   163  }
   164  
   165  // writeBlockSkip writes the current block and uses the number of tokens
   166  // to determine if the block should be stored on no matches, or
   167  // only huffman encoded.
   168  func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error {
   169  	if index > 0 || eof {
   170  		if d.blockStart <= index {
   171  			window := d.window[d.blockStart:index]
   172  			// If we removed less than a 64th of all literals
   173  			// we huffman compress the block.
   174  			if tok.n > len(window)-(tok.n>>6) {
   175  				d.w.writeBlockHuff(eof, window)
   176  			} else {
   177  				// Write a dynamic huffman block.
   178  				d.w.writeBlockDynamic(tok, eof, window)
   179  			}
   180  		} else {
   181  			d.w.writeBlock(tok, eof, nil)
   182  		}
   183  		d.blockStart = index
   184  		return d.w.err
   185  	}
   186  	return nil
   187  }
   188  
   189  // fillWindow will fill the current window with the supplied
   190  // dictionary and calculate all hashes.
   191  // This is much faster than doing a full encode.
   192  // Should only be used after a start/reset.
   193  func (d *compressor) fillWindow(b []byte) {
   194  	// Do not fill window if we are in store-only mode,
   195  	// use constant or Snappy compression.
   196  	switch d.compressionLevel.level {
   197  	case 0, 1, 2:
   198  		return
   199  	}
   200  	// If we are given too much, cut it.
   201  	if len(b) > windowSize {
   202  		b = b[len(b)-windowSize:]
   203  	}
   204  	// Add all to window.
   205  	n := copy(d.window[d.windowEnd:], b)
   206  
   207  	// Calculate 256 hashes at the time (more L1 cache hits)
   208  	loops := (n + 256 - minMatchLength) / 256
   209  	for j := 0; j < loops; j++ {
   210  		startindex := j * 256
   211  		end := startindex + 256 + minMatchLength - 1
   212  		if end > n {
   213  			end = n
   214  		}
   215  		tocheck := d.window[startindex:end]
   216  		dstSize := len(tocheck) - minMatchLength + 1
   217  
   218  		if dstSize <= 0 {
   219  			continue
   220  		}
   221  
   222  		dst := d.hashMatch[:dstSize]
   223  		d.bulkHasher(tocheck, dst)
   224  		var newH hash
   225  		for i, val := range dst {
   226  			di := i + startindex
   227  			newH = val & hashMask
   228  			// Get previous value with the same hash.
   229  			// Our chain should point to the previous value.
   230  			d.hashPrev[di&windowMask] = d.hashHead[newH]
   231  			// Set the head of the hash chain to us.
   232  			d.hashHead[newH] = hashid(di + d.hashOffset)
   233  		}
   234  		d.hash = newH
   235  	}
   236  	// Update window information.
   237  	d.windowEnd += n
   238  	d.index = n
   239  }
   240  
   241  // Try to find a match starting at index whose length is greater than prevSize.
   242  // We only look at chainCount possibilities before giving up.
   243  // pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead
   244  func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
   245  	minMatchLook := maxMatchLength
   246  	if lookahead < minMatchLook {
   247  		minMatchLook = lookahead
   248  	}
   249  
   250  	win := d.window[0 : pos+minMatchLook]
   251  
   252  	// We quit when we get a match that's at least nice long
   253  	nice := len(win) - pos
   254  	if d.nice < nice {
   255  		nice = d.nice
   256  	}
   257  
   258  	// If we've got a match that's good enough, only look in 1/4 the chain.
   259  	tries := d.chain
   260  	length = prevLength
   261  	if length >= d.good {
   262  		tries >>= 2
   263  	}
   264  
   265  	wEnd := win[pos+length]
   266  	wPos := win[pos:]
   267  	minIndex := pos - windowSize
   268  
   269  	for i := prevHead; tries > 0; tries-- {
   270  		if wEnd == win[i+length] {
   271  			n := matchLen(win[i:], wPos, minMatchLook)
   272  
   273  			if n > length && (n > minMatchLength || pos-i <= 4096) {
   274  				length = n
   275  				offset = pos - i
   276  				ok = true
   277  				if n >= nice {
   278  					// The match is good enough that we don't try to find a better one.
   279  					break
   280  				}
   281  				wEnd = win[pos+n]
   282  			}
   283  		}
   284  		if i == minIndex {
   285  			// hashPrev[i & windowMask] has already been overwritten, so stop now.
   286  			break
   287  		}
   288  		i = int(d.hashPrev[i&windowMask]) - d.hashOffset
   289  		if i < minIndex || i < 0 {
   290  			break
   291  		}
   292  	}
   293  	return
   294  }
   295  
   296  // Try to find a match starting at index whose length is greater than prevSize.
   297  // We only look at chainCount possibilities before giving up.
   298  // pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead
   299  func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
   300  	minMatchLook := maxMatchLength
   301  	if lookahead < minMatchLook {
   302  		minMatchLook = lookahead
   303  	}
   304  
   305  	win := d.window[0 : pos+minMatchLook]
   306  
   307  	// We quit when we get a match that's at least nice long
   308  	nice := len(win) - pos
   309  	if d.nice < nice {
   310  		nice = d.nice
   311  	}
   312  
   313  	// If we've got a match that's good enough, only look in 1/4 the chain.
   314  	tries := d.chain
   315  	length = prevLength
   316  	if length >= d.good {
   317  		tries >>= 2
   318  	}
   319  
   320  	wEnd := win[pos+length]
   321  	wPos := win[pos:]
   322  	minIndex := pos - windowSize
   323  
   324  	for i := prevHead; tries > 0; tries-- {
   325  		if wEnd == win[i+length] {
   326  			n := matchLenSSE4(win[i:], wPos, minMatchLook)
   327  
   328  			if n > length && (n > minMatchLength || pos-i <= 4096) {
   329  				length = n
   330  				offset = pos - i
   331  				ok = true
   332  				if n >= nice {
   333  					// The match is good enough that we don't try to find a better one.
   334  					break
   335  				}
   336  				wEnd = win[pos+n]
   337  			}
   338  		}
   339  		if i == minIndex {
   340  			// hashPrev[i & windowMask] has already been overwritten, so stop now.
   341  			break
   342  		}
   343  		i = int(d.hashPrev[i&windowMask]) - d.hashOffset
   344  		if i < minIndex || i < 0 {
   345  			break
   346  		}
   347  	}
   348  	return
   349  }
   350  
   351  func (d *compressor) writeStoredBlock(buf []byte) error {
   352  	if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
   353  		return d.w.err
   354  	}
   355  	d.w.writeBytes(buf)
   356  	return d.w.err
   357  }
   358  
   359  // oldHash is the hash function used when no native crc32 calculation
   360  // or similar is present.
   361  func oldHash(b []byte) hash {
   362  	return hash(b[0])<<(hashShift*3) + hash(b[1])<<(hashShift*2) + hash(b[2])<<hashShift + hash(b[3])
   363  }
   364  
   365  // oldBulkHash will compute hashes using the same
   366  // algorithm as oldHash
   367  func oldBulkHash(b []byte, dst []hash) {
   368  	if len(b) < minMatchLength {
   369  		return
   370  	}
   371  	h := oldHash(b)
   372  	dst[0] = h
   373  	i := 1
   374  	end := len(b) - minMatchLength + 1
   375  	for ; i < end; i++ {
   376  		h = (h << hashShift) + hash(b[i+3])
   377  		dst[i] = h
   378  	}
   379  }
   380  
   381  // matchLen returns the number of matching bytes in a and b
   382  // up to length 'max'. Both slices must be at least 'max'
   383  // bytes in size.
   384  func matchLen(a, b []byte, max int) int {
   385  	a = a[:max]
   386  	for i, av := range a {
   387  		if b[i] != av {
   388  			return i
   389  		}
   390  	}
   391  	return max
   392  }
   393  
   394  func (d *compressor) initDeflate() {
   395  	d.hashHead = make([]hashid, hashSize)
   396  	d.hashPrev = make([]hashid, windowSize)
   397  	d.window = make([]byte, 2*windowSize)
   398  	d.hashOffset = 1
   399  	d.tokens.tokens = make([]token, maxFlateBlockTokens+1)
   400  	d.length = minMatchLength - 1
   401  	d.offset = 0
   402  	d.byteAvailable = false
   403  	d.index = 0
   404  	d.hash = 0
   405  	d.chainHead = -1
   406  	d.bulkHasher = oldBulkHash
   407  	if useSSE42 {
   408  		d.bulkHasher = crc32sseAll
   409  	}
   410  }
   411  
   412  // Assumes that d.fastSkipHashing != skipNever,
   413  // otherwise use deflateLazy
   414  func (d *compressor) deflate() {
   415  
   416  	// Sanity enables additional runtime tests.
   417  	// It's intended to be used during development
   418  	// to supplement the currently ad-hoc unit tests.
   419  	const sanity = false
   420  
   421  	if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
   422  		return
   423  	}
   424  
   425  	d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
   426  	if d.index < d.maxInsertIndex {
   427  		d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
   428  	}
   429  
   430  	for {
   431  		if sanity && d.index > d.windowEnd {
   432  			panic("index > windowEnd")
   433  		}
   434  		lookahead := d.windowEnd - d.index
   435  		if lookahead < minMatchLength+maxMatchLength {
   436  			if !d.sync {
   437  				return
   438  			}
   439  			if sanity && d.index > d.windowEnd {
   440  				panic("index > windowEnd")
   441  			}
   442  			if lookahead == 0 {
   443  				if d.tokens.n > 0 {
   444  					if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
   445  						return
   446  					}
   447  					d.tokens.n = 0
   448  				}
   449  				return
   450  			}
   451  		}
   452  		if d.index < d.maxInsertIndex {
   453  			// Update the hash
   454  			d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
   455  			ch := d.hashHead[d.hash]
   456  			d.chainHead = int(ch)
   457  			d.hashPrev[d.index&windowMask] = ch
   458  			d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
   459  		}
   460  		d.length = minMatchLength - 1
   461  		d.offset = 0
   462  		minIndex := d.index - windowSize
   463  		if minIndex < 0 {
   464  			minIndex = 0
   465  		}
   466  
   467  		if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 {
   468  			if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
   469  				d.length = newLength
   470  				d.offset = newOffset
   471  			}
   472  		}
   473  		if d.length >= minMatchLength {
   474  			d.ii = 0
   475  			// There was a match at the previous step, and the current match is
   476  			// not better. Output the previous match.
   477  			// "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3
   478  			d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize))
   479  			d.tokens.n++
   480  			// Insert in the hash table all strings up to the end of the match.
   481  			// index and index-1 are already inserted. If there is not enough
   482  			// lookahead, the last two strings are not inserted into the hash
   483  			// table.
   484  			if d.length <= d.fastSkipHashing {
   485  				var newIndex int
   486  				newIndex = d.index + d.length
   487  				// Calculate missing hashes
   488  				end := newIndex
   489  				if end > d.maxInsertIndex {
   490  					end = d.maxInsertIndex
   491  				}
   492  				end += minMatchLength - 1
   493  				startindex := d.index + 1
   494  				if startindex > d.maxInsertIndex {
   495  					startindex = d.maxInsertIndex
   496  				}
   497  				tocheck := d.window[startindex:end]
   498  				dstSize := len(tocheck) - minMatchLength + 1
   499  				if dstSize > 0 {
   500  					dst := d.hashMatch[:dstSize]
   501  					oldBulkHash(tocheck, dst)
   502  					var newH hash
   503  					for i, val := range dst {
   504  						di := i + startindex
   505  						newH = val & hashMask
   506  						// Get previous value with the same hash.
   507  						// Our chain should point to the previous value.
   508  						d.hashPrev[di&windowMask] = d.hashHead[newH]
   509  						// Set the head of the hash chain to us.
   510  						d.hashHead[newH] = hashid(di + d.hashOffset)
   511  					}
   512  					d.hash = newH
   513  				}
   514  				d.index = newIndex
   515  			} else {
   516  				// For matches this long, we don't bother inserting each individual
   517  				// item into the table.
   518  				d.index += d.length
   519  				if d.index < d.maxInsertIndex {
   520  					d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
   521  				}
   522  			}
   523  			if d.tokens.n == maxFlateBlockTokens {
   524  				// The block includes the current character
   525  				if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
   526  					return
   527  				}
   528  				d.tokens.n = 0
   529  			}
   530  		} else {
   531  			d.ii++
   532  			end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1
   533  			if end > d.windowEnd {
   534  				end = d.windowEnd
   535  			}
   536  			for i := d.index; i < end; i++ {
   537  				d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
   538  				d.tokens.n++
   539  				if d.tokens.n == maxFlateBlockTokens {
   540  					if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil {
   541  						return
   542  					}
   543  					d.tokens.n = 0
   544  				}
   545  			}
   546  			d.index = end
   547  		}
   548  	}
   549  }
   550  
   551  // deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
   552  // meaning it always has lazy matching on.
   553  func (d *compressor) deflateLazy() {
   554  	// Sanity enables additional runtime tests.
   555  	// It's intended to be used during development
   556  	// to supplement the currently ad-hoc unit tests.
   557  	const sanity = false
   558  
   559  	if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
   560  		return
   561  	}
   562  
   563  	d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
   564  	if d.index < d.maxInsertIndex {
   565  		d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
   566  	}
   567  
   568  	for {
   569  		if sanity && d.index > d.windowEnd {
   570  			panic("index > windowEnd")
   571  		}
   572  		lookahead := d.windowEnd - d.index
   573  		if lookahead < minMatchLength+maxMatchLength {
   574  			if !d.sync {
   575  				return
   576  			}
   577  			if sanity && d.index > d.windowEnd {
   578  				panic("index > windowEnd")
   579  			}
   580  			if lookahead == 0 {
   581  				// Flush current output block if any.
   582  				if d.byteAvailable {
   583  					// There is still one pending token that needs to be flushed
   584  					d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
   585  					d.tokens.n++
   586  					d.byteAvailable = false
   587  				}
   588  				if d.tokens.n > 0 {
   589  					if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
   590  						return
   591  					}
   592  					d.tokens.n = 0
   593  				}
   594  				return
   595  			}
   596  		}
   597  		if d.index < d.maxInsertIndex {
   598  			// Update the hash
   599  			d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
   600  			ch := d.hashHead[d.hash]
   601  			d.chainHead = int(ch)
   602  			d.hashPrev[d.index&windowMask] = ch
   603  			d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
   604  		}
   605  		prevLength := d.length
   606  		prevOffset := d.offset
   607  		d.length = minMatchLength - 1
   608  		d.offset = 0
   609  		minIndex := d.index - windowSize
   610  		if minIndex < 0 {
   611  			minIndex = 0
   612  		}
   613  
   614  		if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
   615  			if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
   616  				d.length = newLength
   617  				d.offset = newOffset
   618  			}
   619  		}
   620  		if prevLength >= minMatchLength && d.length <= prevLength {
   621  			// There was a match at the previous step, and the current match is
   622  			// not better. Output the previous match.
   623  			d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
   624  			d.tokens.n++
   625  
   626  			// Insert in the hash table all strings up to the end of the match.
   627  			// index and index-1 are already inserted. If there is not enough
   628  			// lookahead, the last two strings are not inserted into the hash
   629  			// table.
   630  			var newIndex int
   631  			newIndex = d.index + prevLength - 1
   632  			// Calculate missing hashes
   633  			end := newIndex
   634  			if end > d.maxInsertIndex {
   635  				end = d.maxInsertIndex
   636  			}
   637  			end += minMatchLength - 1
   638  			startindex := d.index + 1
   639  			if startindex > d.maxInsertIndex {
   640  				startindex = d.maxInsertIndex
   641  			}
   642  			tocheck := d.window[startindex:end]
   643  			dstSize := len(tocheck) - minMatchLength + 1
   644  			if dstSize > 0 {
   645  				dst := d.hashMatch[:dstSize]
   646  				oldBulkHash(tocheck, dst)
   647  				var newH hash
   648  				for i, val := range dst {
   649  					di := i + startindex
   650  					newH = val & hashMask
   651  					// Get previous value with the same hash.
   652  					// Our chain should point to the previous value.
   653  					d.hashPrev[di&windowMask] = d.hashHead[newH]
   654  					// Set the head of the hash chain to us.
   655  					d.hashHead[newH] = hashid(di + d.hashOffset)
   656  				}
   657  				d.hash = newH
   658  			}
   659  
   660  			d.index = newIndex
   661  			d.byteAvailable = false
   662  			d.length = minMatchLength - 1
   663  			if d.tokens.n == maxFlateBlockTokens {
   664  				// The block includes the current character
   665  				if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
   666  					return
   667  				}
   668  				d.tokens.n = 0
   669  			}
   670  		} else {
   671  			// Reset, if we got a match this run.
   672  			if d.length >= minMatchLength {
   673  				d.ii = 0
   674  			}
   675  			// We have a byte waiting. Emit it.
   676  			if d.byteAvailable {
   677  				d.ii++
   678  				d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
   679  				d.tokens.n++
   680  				if d.tokens.n == maxFlateBlockTokens {
   681  					if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
   682  						return
   683  					}
   684  					d.tokens.n = 0
   685  				}
   686  				d.index++
   687  
   688  				// If we have a long run of no matches, skip additional bytes
   689  				// Resets when d.ii overflows after 64KB.
   690  				if d.ii > 31 {
   691  					n := int(d.ii >> 6)
   692  					for j := 0; j < n; j++ {
   693  						if d.index >= d.windowEnd-1 {
   694  							break
   695  						}
   696  
   697  						d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
   698  						d.tokens.n++
   699  						if d.tokens.n == maxFlateBlockTokens {
   700  							if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
   701  								return
   702  							}
   703  							d.tokens.n = 0
   704  						}
   705  						d.index++
   706  					}
   707  					// Flush last byte
   708  					d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
   709  					d.tokens.n++
   710  					d.byteAvailable = false
   711  					// d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength
   712  					if d.tokens.n == maxFlateBlockTokens {
   713  						if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
   714  							return
   715  						}
   716  						d.tokens.n = 0
   717  					}
   718  				}
   719  			} else {
   720  				d.index++
   721  				d.byteAvailable = true
   722  			}
   723  		}
   724  	}
   725  }
   726  
   727  // Assumes that d.fastSkipHashing != skipNever,
   728  // otherwise use deflateLazySSE
   729  func (d *compressor) deflateSSE() {
   730  
   731  	// Sanity enables additional runtime tests.
   732  	// It's intended to be used during development
   733  	// to supplement the currently ad-hoc unit tests.
   734  	const sanity = false
   735  
   736  	if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
   737  		return
   738  	}
   739  
   740  	d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
   741  	if d.index < d.maxInsertIndex {
   742  		d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
   743  	}
   744  
   745  	for {
   746  		if sanity && d.index > d.windowEnd {
   747  			panic("index > windowEnd")
   748  		}
   749  		lookahead := d.windowEnd - d.index
   750  		if lookahead < minMatchLength+maxMatchLength {
   751  			if !d.sync {
   752  				return
   753  			}
   754  			if sanity && d.index > d.windowEnd {
   755  				panic("index > windowEnd")
   756  			}
   757  			if lookahead == 0 {
   758  				if d.tokens.n > 0 {
   759  					if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
   760  						return
   761  					}
   762  					d.tokens.n = 0
   763  				}
   764  				return
   765  			}
   766  		}
   767  		if d.index < d.maxInsertIndex {
   768  			// Update the hash
   769  			d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
   770  			ch := d.hashHead[d.hash]
   771  			d.chainHead = int(ch)
   772  			d.hashPrev[d.index&windowMask] = ch
   773  			d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
   774  		}
   775  		d.length = minMatchLength - 1
   776  		d.offset = 0
   777  		minIndex := d.index - windowSize
   778  		if minIndex < 0 {
   779  			minIndex = 0
   780  		}
   781  
   782  		if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 {
   783  			if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
   784  				d.length = newLength
   785  				d.offset = newOffset
   786  			}
   787  		}
   788  		if d.length >= minMatchLength {
   789  			d.ii = 0
   790  			// There was a match at the previous step, and the current match is
   791  			// not better. Output the previous match.
   792  			// "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3
   793  			d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize))
   794  			d.tokens.n++
   795  			// Insert in the hash table all strings up to the end of the match.
   796  			// index and index-1 are already inserted. If there is not enough
   797  			// lookahead, the last two strings are not inserted into the hash
   798  			// table.
   799  			if d.length <= d.fastSkipHashing {
   800  				var newIndex int
   801  				newIndex = d.index + d.length
   802  				// Calculate missing hashes
   803  				end := newIndex
   804  				if end > d.maxInsertIndex {
   805  					end = d.maxInsertIndex
   806  				}
   807  				end += minMatchLength - 1
   808  				startindex := d.index + 1
   809  				if startindex > d.maxInsertIndex {
   810  					startindex = d.maxInsertIndex
   811  				}
   812  				tocheck := d.window[startindex:end]
   813  				dstSize := len(tocheck) - minMatchLength + 1
   814  				if dstSize > 0 {
   815  					dst := d.hashMatch[:dstSize]
   816  
   817  					crc32sseAll(tocheck, dst)
   818  					var newH hash
   819  					for i, val := range dst {
   820  						di := i + startindex
   821  						newH = val & hashMask
   822  						// Get previous value with the same hash.
   823  						// Our chain should point to the previous value.
   824  						d.hashPrev[di&windowMask] = d.hashHead[newH]
   825  						// Set the head of the hash chain to us.
   826  						d.hashHead[newH] = hashid(di + d.hashOffset)
   827  					}
   828  					d.hash = newH
   829  				}
   830  				d.index = newIndex
   831  			} else {
   832  				// For matches this long, we don't bother inserting each individual
   833  				// item into the table.
   834  				d.index += d.length
   835  				if d.index < d.maxInsertIndex {
   836  					d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
   837  				}
   838  			}
   839  			if d.tokens.n == maxFlateBlockTokens {
   840  				// The block includes the current character
   841  				if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil {
   842  					return
   843  				}
   844  				d.tokens.n = 0
   845  			}
   846  		} else {
   847  			d.ii++
   848  			end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1
   849  			if end > d.windowEnd {
   850  				end = d.windowEnd
   851  			}
   852  			for i := d.index; i < end; i++ {
   853  				d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
   854  				d.tokens.n++
   855  				if d.tokens.n == maxFlateBlockTokens {
   856  					if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil {
   857  						return
   858  					}
   859  					d.tokens.n = 0
   860  				}
   861  			}
   862  			d.index = end
   863  		}
   864  	}
   865  }
   866  
   867  // deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
   868  // meaning it always has lazy matching on.
   869  func (d *compressor) deflateLazySSE() {
   870  	// Sanity enables additional runtime tests.
   871  	// It's intended to be used during development
   872  	// to supplement the currently ad-hoc unit tests.
   873  	const sanity = false
   874  
   875  	if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
   876  		return
   877  	}
   878  
   879  	d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
   880  	if d.index < d.maxInsertIndex {
   881  		d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
   882  	}
   883  
   884  	for {
   885  		if sanity && d.index > d.windowEnd {
   886  			panic("index > windowEnd")
   887  		}
   888  		lookahead := d.windowEnd - d.index
   889  		if lookahead < minMatchLength+maxMatchLength {
   890  			if !d.sync {
   891  				return
   892  			}
   893  			if sanity && d.index > d.windowEnd {
   894  				panic("index > windowEnd")
   895  			}
   896  			if lookahead == 0 {
   897  				// Flush current output block if any.
   898  				if d.byteAvailable {
   899  					// There is still one pending token that needs to be flushed
   900  					d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
   901  					d.tokens.n++
   902  					d.byteAvailable = false
   903  				}
   904  				if d.tokens.n > 0 {
   905  					if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
   906  						return
   907  					}
   908  					d.tokens.n = 0
   909  				}
   910  				return
   911  			}
   912  		}
   913  		if d.index < d.maxInsertIndex {
   914  			// Update the hash
   915  			d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
   916  			ch := d.hashHead[d.hash]
   917  			d.chainHead = int(ch)
   918  			d.hashPrev[d.index&windowMask] = ch
   919  			d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
   920  		}
   921  		prevLength := d.length
   922  		prevOffset := d.offset
   923  		d.length = minMatchLength - 1
   924  		d.offset = 0
   925  		minIndex := d.index - windowSize
   926  		if minIndex < 0 {
   927  			minIndex = 0
   928  		}
   929  
   930  		if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
   931  			if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
   932  				d.length = newLength
   933  				d.offset = newOffset
   934  			}
   935  		}
   936  		if prevLength >= minMatchLength && d.length <= prevLength {
   937  			// There was a match at the previous step, and the current match is
   938  			// not better. Output the previous match.
   939  			d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
   940  			d.tokens.n++
   941  
   942  			// Insert in the hash table all strings up to the end of the match.
   943  			// index and index-1 are already inserted. If there is not enough
   944  			// lookahead, the last two strings are not inserted into the hash
   945  			// table.
   946  			var newIndex int
   947  			newIndex = d.index + prevLength - 1
   948  			// Calculate missing hashes
   949  			end := newIndex
   950  			if end > d.maxInsertIndex {
   951  				end = d.maxInsertIndex
   952  			}
   953  			end += minMatchLength - 1
   954  			startindex := d.index + 1
   955  			if startindex > d.maxInsertIndex {
   956  				startindex = d.maxInsertIndex
   957  			}
   958  			tocheck := d.window[startindex:end]
   959  			dstSize := len(tocheck) - minMatchLength + 1
   960  			if dstSize > 0 {
   961  				dst := d.hashMatch[:dstSize]
   962  				crc32sseAll(tocheck, dst)
   963  				var newH hash
   964  				for i, val := range dst {
   965  					di := i + startindex
   966  					newH = val & hashMask
   967  					// Get previous value with the same hash.
   968  					// Our chain should point to the previous value.
   969  					d.hashPrev[di&windowMask] = d.hashHead[newH]
   970  					// Set the head of the hash chain to us.
   971  					d.hashHead[newH] = hashid(di + d.hashOffset)
   972  				}
   973  				d.hash = newH
   974  			}
   975  
   976  			d.index = newIndex
   977  			d.byteAvailable = false
   978  			d.length = minMatchLength - 1
   979  			if d.tokens.n == maxFlateBlockTokens {
   980  				// The block includes the current character
   981  				if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
   982  					return
   983  				}
   984  				d.tokens.n = 0
   985  			}
   986  		} else {
   987  			// Reset, if we got a match this run.
   988  			if d.length >= minMatchLength {
   989  				d.ii = 0
   990  			}
   991  			// We have a byte waiting. Emit it.
   992  			if d.byteAvailable {
   993  				d.ii++
   994  				d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
   995  				d.tokens.n++
   996  				if d.tokens.n == maxFlateBlockTokens {
   997  					if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
   998  						return
   999  					}
  1000  					d.tokens.n = 0
  1001  				}
  1002  				d.index++
  1003  
  1004  				// If we have a long run of no matches, skip additional bytes
  1005  				// Resets when d.ii overflows after 64KB.
  1006  				if d.ii > 31 {
  1007  					n := int(d.ii >> 6)
  1008  					for j := 0; j < n; j++ {
  1009  						if d.index >= d.windowEnd-1 {
  1010  							break
  1011  						}
  1012  
  1013  						d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
  1014  						d.tokens.n++
  1015  						if d.tokens.n == maxFlateBlockTokens {
  1016  							if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
  1017  								return
  1018  							}
  1019  							d.tokens.n = 0
  1020  						}
  1021  						d.index++
  1022  					}
  1023  					// Flush last byte
  1024  					d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
  1025  					d.tokens.n++
  1026  					d.byteAvailable = false
  1027  					// d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength
  1028  					if d.tokens.n == maxFlateBlockTokens {
  1029  						if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
  1030  							return
  1031  						}
  1032  						d.tokens.n = 0
  1033  					}
  1034  				}
  1035  			} else {
  1036  				d.index++
  1037  				d.byteAvailable = true
  1038  			}
  1039  		}
  1040  	}
  1041  }
  1042  
  1043  func (d *compressor) fillStore(b []byte) int {
  1044  	n := copy(d.window[d.windowEnd:], b)
  1045  	d.windowEnd += n
  1046  	return n
  1047  }
  1048  
  1049  func (d *compressor) store() {
  1050  	if d.windowEnd > 0 {
  1051  		d.err = d.writeStoredBlock(d.window[:d.windowEnd])
  1052  	}
  1053  	d.windowEnd = 0
  1054  }
  1055  
  1056  // fillHuff will fill the buffer with data for huffman-only compression.
  1057  // The number of bytes copied is returned.
  1058  func (d *compressor) fillHuff(b []byte) int {
  1059  	n := copy(d.window[d.windowEnd:], b)
  1060  	d.windowEnd += n
  1061  	return n
  1062  }
  1063  
  1064  // storeHuff will compress and store the currently added data,
  1065  // if enough has been accumulated or we at the end of the stream.
  1066  // Any error that occurred will be in d.err
  1067  func (d *compressor) storeHuff() {
  1068  	// We only compress if we have maxStoreBlockSize or we are at end-of-stream
  1069  	if d.windowEnd < maxStoreBlockSize && !d.sync {
  1070  		return
  1071  	}
  1072  	if d.windowEnd == 0 {
  1073  		return
  1074  	}
  1075  	d.w.writeBlockHuff(false, d.window[:d.windowEnd])
  1076  	d.err = d.w.err
  1077  	d.windowEnd = 0
  1078  }
  1079  
  1080  // storeHuff will compress and store the currently added data,
  1081  // if enough has been accumulated or we at the end of the stream.
  1082  // Any error that occurred will be in d.err
  1083  func (d *compressor) storeSnappy() {
  1084  	// We only compress if we have maxStoreBlockSize.
  1085  	if d.windowEnd < maxStoreBlockSize {
  1086  		if !d.sync {
  1087  			return
  1088  		}
  1089  		// Handle extremely small sizes.
  1090  		if d.windowEnd < 128 {
  1091  			if d.windowEnd == 0 {
  1092  				return
  1093  			}
  1094  			if d.windowEnd <= 32 {
  1095  				d.err = d.writeStoredBlock(d.window[:d.windowEnd])
  1096  				d.tokens.n = 0
  1097  				d.windowEnd = 0
  1098  			} else {
  1099  				d.w.writeBlockHuff(false, d.window[:d.windowEnd])
  1100  				d.err = d.w.err
  1101  			}
  1102  			d.tokens.n = 0
  1103  			d.windowEnd = 0
  1104  			return
  1105  		}
  1106  	}
  1107  
  1108  	d.snap.Encode(&d.tokens, d.window[:d.windowEnd])
  1109  	// If we made zero matches, store the block as is.
  1110  	if d.tokens.n == d.windowEnd {
  1111  		d.err = d.writeStoredBlock(d.window[:d.windowEnd])
  1112  		// If we removed less than 1/16th, huffman compress the block.
  1113  	} else if d.tokens.n > d.windowEnd-(d.windowEnd>>4) {
  1114  		d.w.writeBlockHuff(false, d.window[:d.windowEnd])
  1115  		d.err = d.w.err
  1116  	} else {
  1117  		d.w.writeBlockDynamic(d.tokens, false, d.window[:d.windowEnd])
  1118  		d.err = d.w.err
  1119  	}
  1120  	d.tokens.n = 0
  1121  	d.windowEnd = 0
  1122  }
  1123  
  1124  // write will add input byte to the stream.
  1125  // Unless an error occurs all bytes will be consumed.
  1126  func (d *compressor) write(b []byte) (n int, err error) {
  1127  	if d.err != nil {
  1128  		return 0, d.err
  1129  	}
  1130  	n = len(b)
  1131  	for len(b) > 0 {
  1132  		d.step(d)
  1133  		b = b[d.fill(d, b):]
  1134  		if d.err != nil {
  1135  			return 0, d.err
  1136  		}
  1137  	}
  1138  	return n, d.err
  1139  }
  1140  
  1141  func (d *compressor) syncFlush() error {
  1142  	d.sync = true
  1143  	if d.err != nil {
  1144  		return d.err
  1145  	}
  1146  	d.step(d)
  1147  	if d.err == nil {
  1148  		d.w.writeStoredHeader(0, false)
  1149  		d.w.flush()
  1150  		d.err = d.w.err
  1151  	}
  1152  	d.sync = false
  1153  	return d.err
  1154  }
  1155  
  1156  func (d *compressor) init(w io.Writer, level int) (err error) {
  1157  	d.w = newHuffmanBitWriter(w)
  1158  
  1159  	switch {
  1160  	case level == NoCompression:
  1161  		d.window = make([]byte, maxStoreBlockSize)
  1162  		d.fill = (*compressor).fillStore
  1163  		d.step = (*compressor).store
  1164  	case level == ConstantCompression:
  1165  		d.window = make([]byte, maxStoreBlockSize)
  1166  		d.fill = (*compressor).fillHuff
  1167  		d.step = (*compressor).storeHuff
  1168  	case level >= 1 && level <= 3:
  1169  		d.snap = newSnappy(level)
  1170  		d.window = make([]byte, maxStoreBlockSize)
  1171  		d.fill = (*compressor).fillHuff
  1172  		d.step = (*compressor).storeSnappy
  1173  		d.tokens.tokens = make([]token, maxStoreBlockSize+1)
  1174  	case level == DefaultCompression:
  1175  		level = 5
  1176  		fallthrough
  1177  	case 4 <= level && level <= 9:
  1178  		d.compressionLevel = levels[level]
  1179  		d.initDeflate()
  1180  		d.fill = (*compressor).fillDeflate
  1181  		if d.fastSkipHashing == skipNever {
  1182  			if useSSE42 {
  1183  				d.step = (*compressor).deflateLazySSE
  1184  			} else {
  1185  				d.step = (*compressor).deflateLazy
  1186  			}
  1187  		} else {
  1188  			if useSSE42 {
  1189  				d.step = (*compressor).deflateSSE
  1190  			} else {
  1191  				d.step = (*compressor).deflate
  1192  
  1193  			}
  1194  		}
  1195  	default:
  1196  		return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
  1197  	}
  1198  	return nil
  1199  }
  1200  
  1201  // Used for zeroing the hash slice
  1202  var hzeroes [256]hashid
  1203  
  1204  // reset the state of the compressor.
  1205  func (d *compressor) reset(w io.Writer) {
  1206  	d.w.reset(w)
  1207  	d.sync = false
  1208  	d.err = nil
  1209  	// We only need to reset a few things for Snappy.
  1210  	if d.snap != nil {
  1211  		d.snap.Reset()
  1212  		d.windowEnd = 0
  1213  		d.tokens.n = 0
  1214  		return
  1215  	}
  1216  	switch d.compressionLevel.chain {
  1217  	case 0:
  1218  		// level was NoCompression or ConstantCompresssion.
  1219  		d.windowEnd = 0
  1220  	default:
  1221  		d.chainHead = -1
  1222  		for s := d.hashHead; len(s) > 0; {
  1223  			n := copy(s, hzeroes[:])
  1224  			s = s[n:]
  1225  		}
  1226  		for s := d.hashPrev; len(s) > 0; s = s[len(hzeroes):] {
  1227  			copy(s, hzeroes[:])
  1228  		}
  1229  		d.hashOffset = 1
  1230  
  1231  		d.index, d.windowEnd = 0, 0
  1232  		d.blockStart, d.byteAvailable = 0, false
  1233  
  1234  		d.tokens.n = 0
  1235  		d.length = minMatchLength - 1
  1236  		d.offset = 0
  1237  		d.hash = 0
  1238  		d.ii = 0
  1239  		d.maxInsertIndex = 0
  1240  	}
  1241  }
  1242  
  1243  func (d *compressor) close() error {
  1244  	if d.err != nil {
  1245  		return d.err
  1246  	}
  1247  	d.sync = true
  1248  	d.step(d)
  1249  	if d.err != nil {
  1250  		return d.err
  1251  	}
  1252  	if d.w.writeStoredHeader(0, true); d.w.err != nil {
  1253  		return d.w.err
  1254  	}
  1255  	d.w.flush()
  1256  	return d.w.err
  1257  }
  1258  
  1259  // NewWriter returns a new Writer compressing data at the given level.
  1260  // Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
  1261  // higher levels typically run slower but compress more.
  1262  // Level 0 (NoCompression) does not attempt any compression; it only adds the
  1263  // necessary DEFLATE framing.
  1264  // Level -1 (DefaultCompression) uses the default compression level.
  1265  // Level -2 (ConstantCompression) will use Huffman compression only, giving
  1266  // a very fast compression for all types of input, but sacrificing considerable
  1267  // compression efficiency.
  1268  //
  1269  // If level is in the range [-2, 9] then the error returned will be nil.
  1270  // Otherwise the error returned will be non-nil.
  1271  func NewWriter(w io.Writer, level int) (*Writer, error) {
  1272  	var dw Writer
  1273  	if err := dw.d.init(w, level); err != nil {
  1274  		return nil, err
  1275  	}
  1276  	return &dw, nil
  1277  }
  1278  
  1279  // NewWriterDict is like NewWriter but initializes the new
  1280  // Writer with a preset dictionary.  The returned Writer behaves
  1281  // as if the dictionary had been written to it without producing
  1282  // any compressed output.  The compressed data written to w
  1283  // can only be decompressed by a Reader initialized with the
  1284  // same dictionary.
  1285  func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
  1286  	dw := &dictWriter{w}
  1287  	zw, err := NewWriter(dw, level)
  1288  	if err != nil {
  1289  		return nil, err
  1290  	}
  1291  	zw.d.fillWindow(dict)
  1292  	zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
  1293  	return zw, err
  1294  }
  1295  
  1296  type dictWriter struct {
  1297  	w io.Writer
  1298  }
  1299  
  1300  func (w *dictWriter) Write(b []byte) (n int, err error) {
  1301  	return w.w.Write(b)
  1302  }
  1303  
  1304  // A Writer takes data written to it and writes the compressed
  1305  // form of that data to an underlying writer (see NewWriter).
  1306  type Writer struct {
  1307  	d    compressor
  1308  	dict []byte
  1309  }
  1310  
  1311  // Write writes data to w, which will eventually write the
  1312  // compressed form of data to its underlying writer.
  1313  func (w *Writer) Write(data []byte) (n int, err error) {
  1314  	return w.d.write(data)
  1315  }
  1316  
  1317  // Flush flushes any pending compressed data to the underlying writer.
  1318  // It is useful mainly in compressed network protocols, to ensure that
  1319  // a remote reader has enough data to reconstruct a packet.
  1320  // Flush does not return until the data has been written.
  1321  // If the underlying writer returns an error, Flush returns that error.
  1322  //
  1323  // In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
  1324  func (w *Writer) Flush() error {
  1325  	// For more about flushing:
  1326  	// http://www.bolet.org/~pornin/deflate-flush.html
  1327  	return w.d.syncFlush()
  1328  }
  1329  
  1330  // Close flushes and closes the writer.
  1331  func (w *Writer) Close() error {
  1332  	return w.d.close()
  1333  }
  1334  
  1335  // Reset discards the writer's state and makes it equivalent to
  1336  // the result of NewWriter or NewWriterDict called with dst
  1337  // and w's level and dictionary.
  1338  func (w *Writer) Reset(dst io.Writer) {
  1339  	if dw, ok := w.d.w.w.(*dictWriter); ok {
  1340  		// w was created with NewWriterDict
  1341  		dw.w = dst
  1342  		w.d.reset(dw)
  1343  		w.d.fillWindow(w.dict)
  1344  	} else {
  1345  		// w was created with NewWriter
  1346  		w.d.reset(dst)
  1347  	}
  1348  }
  1349  
  1350  // ResetDict discards the writer's state and makes it equivalent to
  1351  // the result of NewWriter or NewWriterDict called with dst
  1352  // and w's level, but sets a specific dictionary.
  1353  func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
  1354  	w.dict = dict
  1355  	w.d.reset(dst)
  1356  	w.d.fillWindow(w.dict)
  1357  }