// Copyright 2009 The Go Authors. All rights reserved.
// Copyright (c) 2015 Klaus Post
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package flate

import (
	
	
	
	
	
)

const (
	NoCompression      = 0
	BestSpeed          = 1
	BestCompression    = 9
	DefaultCompression = -1

	// HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
	// entropy encoding. This mode is useful in compressing data that has
	// already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
	// that lacks an entropy encoder. Compression gains are achieved when
	// certain bytes in the input stream occur more frequently than others.
	//
	// Note that HuffmanOnly produces a compressed output that is
	// RFC 1951 compliant. That is, any valid DEFLATE decompressor will
	// continue to be able to decompress this output.
	HuffmanOnly         = -2
	ConstantCompression = HuffmanOnly // compatibility alias.

	logWindowSize    = 15
	windowSize       = 1 << logWindowSize
	windowMask       = windowSize - 1
	logMaxOffsetSize = 15  // Standard DEFLATE
	minMatchLength   = 4   // The smallest match that the compressor looks for
	maxMatchLength   = 258 // The longest match for the compressor
	minOffsetSize    = 1   // The shortest offset that makes any sense

	// The maximum number of tokens we will encode at the time.
	// Smaller sizes usually creates less optimal blocks.
	// Bigger can make context switching slow.
	// We use this for levels 7-9, so we make it big.
	maxFlateBlockTokens = 1 << 15
	maxStoreBlockSize   = 65535
	hashBits            = 17 // After 17 performance degrades
	hashSize            = 1 << hashBits
	hashMask            = (1 << hashBits) - 1
	hashShift           = (hashBits + minMatchLength - 1) / minMatchLength
	maxHashOffset       = 1 << 28

	skipNever = math.MaxInt32

	debugDeflate = false
)

type compressionLevel struct {
	good, lazy, nice, chain, fastSkipHashing, level int
}

// Compression levels have been rebalanced from zlib deflate defaults
// to give a bigger spread in speed and compression.
// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
var levels = []compressionLevel{
	{}, // 0
	// Level 1-6 uses specialized algorithm - values not used
	{0, 0, 0, 0, 0, 1},
	{0, 0, 0, 0, 0, 2},
	{0, 0, 0, 0, 0, 3},
	{0, 0, 0, 0, 0, 4},
	{0, 0, 0, 0, 0, 5},
	{0, 0, 0, 0, 0, 6},
	// Levels 7-9 use increasingly more lazy matching
	// and increasingly stringent conditions for "good enough".
	{8, 12, 16, 24, skipNever, 7},
	{16, 30, 40, 64, skipNever, 8},
	{32, 258, 258, 1024, skipNever, 9},
}

// advancedState contains state for the advanced levels, with bigger hash tables, etc.
type advancedState struct {
	// deflate state
	length         int
	offset         int
	maxInsertIndex int
	chainHead      int
	hashOffset     int

	ii uint16 // position of last match, intended to overflow to reset.

	// input window: unprocessed data is window[index:windowEnd]
	index     int
	hashMatch [maxMatchLength + minMatchLength]uint32

	// Input hash chains
	// hashHead[hashValue] contains the largest inputIndex with the specified hash value
	// If hashHead[hashValue] is within the current window, then
	// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
	// with the same hash value.
	hashHead [hashSize]uint32
	hashPrev [windowSize]uint32
}

type compressor struct {
	compressionLevel

	h *huffmanEncoder
	w *huffmanBitWriter

	// compression algorithm
	fill func(*compressor, []byte) int // copy data to window
	step func(*compressor)             // process window

	window     []byte
	windowEnd  int
	blockStart int // window index where current tokens start
	err        error

	// queued output tokens
	tokens tokens
	fast   fastEnc
	state  *advancedState

	sync          bool // requesting flush
	byteAvailable bool // if true, still need to process window[index-1].
}

func ( *compressor) ( []byte) int {
	 := .state
	if .index >= 2*windowSize-(minMatchLength+maxMatchLength) {
		// shift the window by windowSize
		//copy(d.window[:], d.window[windowSize:2*windowSize])
		*(*[windowSize]byte)(.window) = *(*[windowSize]byte)(.window[windowSize:])
		.index -= windowSize
		.windowEnd -= windowSize
		if .blockStart >= windowSize {
			.blockStart -= windowSize
		} else {
			.blockStart = math.MaxInt32
		}
		.hashOffset += windowSize
		if .hashOffset > maxHashOffset {
			 := .hashOffset - 1
			.hashOffset -= 
			.chainHead -= 
			// Iterate over slices instead of arrays to avoid copying
			// the entire table onto the stack (Issue #18625).
			for ,  := range .hashPrev[:] {
				if int() >  {
					.hashPrev[] = uint32(int() - )
				} else {
					.hashPrev[] = 0
				}
			}
			for ,  := range .hashHead[:] {
				if int() >  {
					.hashHead[] = uint32(int() - )
				} else {
					.hashHead[] = 0
				}
			}
		}
	}
	 := copy(.window[.windowEnd:], )
	.windowEnd += 
	return 
}

func ( *compressor) ( *tokens,  int,  bool) error {
	if  > 0 ||  {
		var  []byte
		if .blockStart <=  {
			 = .window[.blockStart:]
		}
		.blockStart = 
		//d.w.writeBlock(tok, eof, window)
		.w.writeBlockDynamic(, , , .sync)
		return .w.err
	}
	return nil
}

// writeBlockSkip writes the current block and uses the number of tokens
// to determine if the block should be stored on no matches, or
// only huffman encoded.
func ( *compressor) ( *tokens,  int,  bool) error {
	if  > 0 ||  {
		if .blockStart <=  {
			 := .window[.blockStart:]
			// If we removed less than a 64th of all literals
			// we huffman compress the block.
			if int(.n) > len()-int(.n>>6) {
				.w.writeBlockHuff(, , .sync)
			} else {
				// Write a dynamic huffman block.
				.w.writeBlockDynamic(, , , .sync)
			}
		} else {
			.w.writeBlock(, , nil)
		}
		.blockStart = 
		return .w.err
	}
	return nil
}

// fillWindow will fill the current window with the supplied
// dictionary and calculate all hashes.
// This is much faster than doing a full encode.
// Should only be used after a start/reset.
func ( *compressor) ( []byte) {
	// Do not fill window if we are in store-only or huffman mode.
	if .level <= 0 {
		return
	}
	if .fast != nil {
		// encode the last data, but discard the result
		if len() > maxMatchOffset {
			 = [len()-maxMatchOffset:]
		}
		.fast.Encode(&.tokens, )
		.tokens.Reset()
		return
	}
	 := .state
	// If we are given too much, cut it.
	if len() > windowSize {
		 = [len()-windowSize:]
	}
	// Add all to window.
	 := copy(.window[.windowEnd:], )

	// Calculate 256 hashes at the time (more L1 cache hits)
	 := ( + 256 - minMatchLength) / 256
	for  := 0;  < ; ++ {
		 :=  * 256
		 :=  + 256 + minMatchLength - 1
		if  >  {
			 = 
		}
		 := .window[:]
		 := len() - minMatchLength + 1

		if  <= 0 {
			continue
		}

		 := .hashMatch[:]
		bulkHash4(, )
		var  uint32
		for ,  := range  {
			 :=  + 
			 =  & hashMask
			// Get previous value with the same hash.
			// Our chain should point to the previous value.
			.hashPrev[&windowMask] = .hashHead[]
			// Set the head of the hash chain to us.
			.hashHead[] = uint32( + .hashOffset)
		}
	}
	// Update window information.
	.windowEnd += 
	.index = 
}

// Try to find a match starting at index whose length is greater than prevSize.
// We only look at chainCount possibilities before giving up.
// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
func ( *compressor) ( int,  int,  int) (,  int,  bool) {
	 := maxMatchLength
	if  <  {
		 = 
	}

	 := .window[0 : +]

	// We quit when we get a match that's at least nice long
	 := len() - 
	if .nice <  {
		 = .nice
	}

	// If we've got a match that's good enough, only look in 1/4 the chain.
	 := .chain
	 = minMatchLength - 1

	 := [+]
	 := [:]
	 :=  - windowSize
	if  < 0 {
		 = 0
	}
	 = 0

	if .chain < 100 {
		for  := ;  > 0; -- {
			if  == [+] {
				 := matchLen([:+], )
				if  >  {
					 = 
					 =  - 
					 = true
					if  >=  {
						// The match is good enough that we don't try to find a better one.
						break
					}
					 = [+]
				}
			}
			if  <=  {
				// hashPrev[i & windowMask] has already been overwritten, so stop now.
				break
			}
			 = int(.state.hashPrev[&windowMask]) - .state.hashOffset
			if  <  {
				break
			}
		}
		return
	}

	// Minimum gain to accept a match.
	 := 4

	// Some like it higher (CSV), some like it lower (JSON)
	const  = 3
	// Base is 4 bytes at with an additional cost.
	// Matches must be better than this.

	for  := ;  > 0; -- {
		if  == [+] {
			 := matchLen([:+], )
			if  >  {
				// Calculate gain. Estimate
				 := .h.bitLengthRaw([:]) - int(offsetExtraBits[offsetCode(uint32(-))]) -  - int(lengthExtraBits[lengthCodes[(-3)&255]])

				//fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length)
				if  >  {
					 = 
					 =  - 
					 = 
					 = true
					if  >=  {
						// The match is good enough that we don't try to find a better one.
						break
					}
					 = [+]
				}
			}
		}
		if  <=  {
			// hashPrev[i & windowMask] has already been overwritten, so stop now.
			break
		}
		 = int(.state.hashPrev[&windowMask]) - .state.hashOffset
		if  <  {
			break
		}
	}
	return
}

func ( *compressor) ( []byte) error {
	if .w.writeStoredHeader(len(), false); .w.err != nil {
		return .w.err
	}
	.w.writeBytes()
	return .w.err
}

// hash4 returns a hash representation of the first 4 bytes
// of the supplied slice.
// The caller must ensure that len(b) >= 4.
func ( []byte) uint32 {
	return hash4u(binary.LittleEndian.Uint32(), hashBits)
}

// hash4 returns the hash of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func ( uint32,  uint8) uint32 {
	return ( * prime4bytes) >> (32 - )
}

// bulkHash4 will compute hashes using the same
// algorithm as hash4
func ( []byte,  []uint32) {
	if len() < 4 {
		return
	}
	 := binary.LittleEndian.Uint32()

	[0] = hash4u(, hashBits)
	 := len() - 4 + 1
	for  := 1;  < ; ++ {
		 = ( >> 8) | uint32([+3])<<24
		[] = hash4u(, hashBits)
	}
}

func ( *compressor) () {
	.window = make([]byte, 2*windowSize)
	.byteAvailable = false
	.err = nil
	if .state == nil {
		return
	}
	 := .state
	.index = 0
	.hashOffset = 1
	.length = minMatchLength - 1
	.offset = 0
	.chainHead = -1
}

// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
// meaning it always has lazy matching on.
func ( *compressor) () {
	 := .state
	// Sanity enables additional runtime tests.
	// It's intended to be used during development
	// to supplement the currently ad-hoc unit tests.
	const  = debugDeflate

	if .windowEnd-.index < minMatchLength+maxMatchLength && !.sync {
		return
	}
	if .windowEnd != .index && .chain > 100 {
		// Get literal huffman coder.
		if .h == nil {
			.h = newHuffmanEncoder(maxFlateBlockTokens)
		}
		var  [256]uint16
		for ,  := range .window[.index:.windowEnd] {
			[]++
		}
		.h.generate([:], 15)
	}

	.maxInsertIndex = .windowEnd - (minMatchLength - 1)

	for {
		if  && .index > .windowEnd {
			panic("index > windowEnd")
		}
		 := .windowEnd - .index
		if  < minMatchLength+maxMatchLength {
			if !.sync {
				return
			}
			if  && .index > .windowEnd {
				panic("index > windowEnd")
			}
			if  == 0 {
				// Flush current output block if any.
				if .byteAvailable {
					// There is still one pending token that needs to be flushed
					.tokens.AddLiteral(.window[.index-1])
					.byteAvailable = false
				}
				if .tokens.n > 0 {
					if .err = .writeBlock(&.tokens, .index, false); .err != nil {
						return
					}
					.tokens.Reset()
				}
				return
			}
		}
		if .index < .maxInsertIndex {
			// Update the hash
			 := hash4(.window[.index:])
			 := .hashHead[]
			.chainHead = int()
			.hashPrev[.index&windowMask] = 
			.hashHead[] = uint32(.index + .hashOffset)
		}
		 := .length
		 := .offset
		.length = minMatchLength - 1
		.offset = 0
		 := .index - windowSize
		if  < 0 {
			 = 0
		}

		if .chainHead-.hashOffset >=  &&  >  &&  < .lazy {
			if , ,  := .findMatch(.index, .chainHead-.hashOffset, );  {
				.length = 
				.offset = 
			}
		}

		if  >= minMatchLength && .length <=  {
			// No better match, but check for better match at end...
			//
			// Skip forward a number of bytes.
			// Offset of 2 seems to yield best results. 3 is sometimes better.
			const  = 2

			// Check all, except full length
			if  < maxMatchLength- {
				 := .index - 1
				if + < .maxInsertIndex {
					 := 
					if  > maxMatchLength+ {
						 = maxMatchLength + 
					}
					 += 

					// Hash at match end.
					 := hash4(.window[+:])
					 := int(.hashHead[]) - .hashOffset - 
					if - !=  &&  > + {
						 := matchLen(.window[+:], .window[+:])
						// It seems like a pure length metric is best.
						if  >  {
							 = 
							 =  - 

							// Extend back...
							for  :=  - 1;  >= 0; -- {
								if  >= maxMatchLength || .window[+] != .window[+] {
									// Emit tokens we "owe"
									for  := 0;  <= ; ++ {
										.tokens.AddLiteral(.window[+])
										if .tokens.n == maxFlateBlockTokens {
											// The block includes the current character
											if .err = .writeBlock(&.tokens, .index, false); .err != nil {
												return
											}
											.tokens.Reset()
										}
										.index++
										if .index < .maxInsertIndex {
											 := hash4(.window[.index:])
											 := .hashHead[]
											.chainHead = int()
											.hashPrev[.index&windowMask] = 
											.hashHead[] = uint32(.index + .hashOffset)
										}
									}
									break
								} else {
									++
								}
							}
						} else if false {
							// Check one further ahead.
							// Only rarely better, disabled for now.
							++
							 := hash4(.window[+:])
							 := int(.hashHead[]) - .hashOffset - 
							if - !=  &&  > + {
								 := matchLen(.window[+:], .window[+:])
								// It seems like a pure length metric is best.
								if  > + {
									 = 
									 =  - 
									--

									// Extend back...
									for  := ;  >= 0; -- {
										if  >= maxMatchLength || .window[+] != .window[+-1] {
											// Emit tokens we "owe"
											for  := 0;  <= ; ++ {
												.tokens.AddLiteral(.window[+])
												if .tokens.n == maxFlateBlockTokens {
													// The block includes the current character
													if .err = .writeBlock(&.tokens, .index, false); .err != nil {
														return
													}
													.tokens.Reset()
												}
												.index++
												if .index < .maxInsertIndex {
													 := hash4(.window[.index:])
													 := .hashHead[]
													.chainHead = int()
													.hashPrev[.index&windowMask] = 
													.hashHead[] = uint32(.index + .hashOffset)
												}
											}
											break
										} else {
											++
										}
									}
								}
							}
						}
					}
				}
			}
			// There was a match at the previous step, and the current match is
			// not better. Output the previous match.
			.tokens.AddMatch(uint32(-3), uint32(-minOffsetSize))

			// Insert in the hash table all strings up to the end of the match.
			// index and index-1 are already inserted. If there is not enough
			// lookahead, the last two strings are not inserted into the hash
			// table.
			 := .index +  - 1
			// Calculate missing hashes
			 := 
			if  > .maxInsertIndex {
				 = .maxInsertIndex
			}
			 += minMatchLength - 1
			 := .index + 1
			if  > .maxInsertIndex {
				 = .maxInsertIndex
			}
			 := .window[:]
			 := len() - minMatchLength + 1
			if  > 0 {
				 := .hashMatch[:]
				bulkHash4(, )
				var  uint32
				for ,  := range  {
					 :=  + 
					 =  & hashMask
					// Get previous value with the same hash.
					// Our chain should point to the previous value.
					.hashPrev[&windowMask] = .hashHead[]
					// Set the head of the hash chain to us.
					.hashHead[] = uint32( + .hashOffset)
				}
			}

			.index = 
			.byteAvailable = false
			.length = minMatchLength - 1
			if .tokens.n == maxFlateBlockTokens {
				// The block includes the current character
				if .err = .writeBlock(&.tokens, .index, false); .err != nil {
					return
				}
				.tokens.Reset()
			}
			.ii = 0
		} else {
			// Reset, if we got a match this run.
			if .length >= minMatchLength {
				.ii = 0
			}
			// We have a byte waiting. Emit it.
			if .byteAvailable {
				.ii++
				.tokens.AddLiteral(.window[.index-1])
				if .tokens.n == maxFlateBlockTokens {
					if .err = .writeBlock(&.tokens, .index, false); .err != nil {
						return
					}
					.tokens.Reset()
				}
				.index++

				// If we have a long run of no matches, skip additional bytes
				// Resets when s.ii overflows after 64KB.
				if  := int(.ii) - .chain;  > 0 {
					 = 1 + int(>>6)
					for  := 0;  < ; ++ {
						if .index >= .windowEnd-1 {
							break
						}
						.tokens.AddLiteral(.window[.index-1])
						if .tokens.n == maxFlateBlockTokens {
							if .err = .writeBlock(&.tokens, .index, false); .err != nil {
								return
							}
							.tokens.Reset()
						}
						// Index...
						if .index < .maxInsertIndex {
							 := hash4(.window[.index:])
							 := .hashHead[]
							.chainHead = int()
							.hashPrev[.index&windowMask] = 
							.hashHead[] = uint32(.index + .hashOffset)
						}
						.index++
					}
					// Flush last byte
					.tokens.AddLiteral(.window[.index-1])
					.byteAvailable = false
					// s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
					if .tokens.n == maxFlateBlockTokens {
						if .err = .writeBlock(&.tokens, .index, false); .err != nil {
							return
						}
						.tokens.Reset()
					}
				}
			} else {
				.index++
				.byteAvailable = true
			}
		}
	}
}

func ( *compressor) () {
	if .windowEnd > 0 && (.windowEnd == maxStoreBlockSize || .sync) {
		.err = .writeStoredBlock(.window[:.windowEnd])
		.windowEnd = 0
	}
}

// fillWindow will fill the buffer with data for huffman-only compression.
// The number of bytes copied is returned.
func ( *compressor) ( []byte) int {
	 := copy(.window[.windowEnd:], )
	.windowEnd += 
	return 
}

// storeHuff will compress and store the currently added data,
// if enough has been accumulated or we at the end of the stream.
// Any error that occurred will be in d.err
func ( *compressor) () {
	if .windowEnd < len(.window) && !.sync || .windowEnd == 0 {
		return
	}
	.w.writeBlockHuff(false, .window[:.windowEnd], .sync)
	.err = .w.err
	.windowEnd = 0
}

// storeFast will compress and store the currently added data,
// if enough has been accumulated or we at the end of the stream.
// Any error that occurred will be in d.err
func ( *compressor) () {
	// We only compress if we have maxStoreBlockSize.
	if .windowEnd < len(.window) {
		if !.sync {
			return
		}
		// Handle extremely small sizes.
		if .windowEnd < 128 {
			if .windowEnd == 0 {
				return
			}
			if .windowEnd <= 32 {
				.err = .writeStoredBlock(.window[:.windowEnd])
			} else {
				.w.writeBlockHuff(false, .window[:.windowEnd], true)
				.err = .w.err
			}
			.tokens.Reset()
			.windowEnd = 0
			.fast.Reset()
			return
		}
	}

	.fast.Encode(&.tokens, .window[:.windowEnd])
	// If we made zero matches, store the block as is.
	if .tokens.n == 0 {
		.err = .writeStoredBlock(.window[:.windowEnd])
		// If we removed less than 1/16th, huffman compress the block.
	} else if int(.tokens.n) > .windowEnd-(.windowEnd>>4) {
		.w.writeBlockHuff(false, .window[:.windowEnd], .sync)
		.err = .w.err
	} else {
		.w.writeBlockDynamic(&.tokens, false, .window[:.windowEnd], .sync)
		.err = .w.err
	}
	.tokens.Reset()
	.windowEnd = 0
}

// write will add input byte to the stream.
// Unless an error occurs all bytes will be consumed.
func ( *compressor) ( []byte) ( int,  error) {
	if .err != nil {
		return 0, .err
	}
	 = len()
	for len() > 0 {
		if .windowEnd == len(.window) || .sync {
			.step()
		}
		 = [.fill(, ):]
		if .err != nil {
			return 0, .err
		}
	}
	return , .err
}

func ( *compressor) () error {
	.sync = true
	if .err != nil {
		return .err
	}
	.step()
	if .err == nil {
		.w.writeStoredHeader(0, false)
		.w.flush()
		.err = .w.err
	}
	.sync = false
	return .err
}

func ( *compressor) ( io.Writer,  int) ( error) {
	.w = newHuffmanBitWriter()

	switch {
	case  == NoCompression:
		.window = make([]byte, maxStoreBlockSize)
		.fill = (*compressor).fillBlock
		.step = (*compressor).store
	case  == ConstantCompression:
		.w.logNewTablePenalty = 10
		.window = make([]byte, 32<<10)
		.fill = (*compressor).fillBlock
		.step = (*compressor).storeHuff
	case  == DefaultCompression:
		 = 5
		fallthrough
	case  >= 1 &&  <= 6:
		.w.logNewTablePenalty = 7
		.fast = newFastEnc()
		.window = make([]byte, maxStoreBlockSize)
		.fill = (*compressor).fillBlock
		.step = (*compressor).storeFast
	case 7 <=  &&  <= 9:
		.w.logNewTablePenalty = 8
		.state = &advancedState{}
		.compressionLevel = levels[]
		.initDeflate()
		.fill = (*compressor).fillDeflate
		.step = (*compressor).deflateLazy
	case - >= MinCustomWindowSize && - <= MaxCustomWindowSize:
		.w.logNewTablePenalty = 7
		.fast = &fastEncL5Window{maxOffset: int32(-), cur: maxStoreBlockSize}
		.window = make([]byte, maxStoreBlockSize)
		.fill = (*compressor).fillBlock
		.step = (*compressor).storeFast
	default:
		return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", )
	}
	.level = 
	return nil
}

// reset the state of the compressor.
func ( *compressor) ( io.Writer) {
	.w.reset()
	.sync = false
	.err = nil
	// We only need to reset a few things for Snappy.
	if .fast != nil {
		.fast.Reset()
		.windowEnd = 0
		.tokens.Reset()
		return
	}
	switch .compressionLevel.chain {
	case 0:
		// level was NoCompression or ConstantCompresssion.
		.windowEnd = 0
	default:
		 := .state
		.chainHead = -1
		for  := range .hashHead {
			.hashHead[] = 0
		}
		for  := range .hashPrev {
			.hashPrev[] = 0
		}
		.hashOffset = 1
		.index, .windowEnd = 0, 0
		.blockStart, .byteAvailable = 0, false
		.tokens.Reset()
		.length = minMatchLength - 1
		.offset = 0
		.ii = 0
		.maxInsertIndex = 0
	}
}

func ( *compressor) () error {
	if .err != nil {
		return .err
	}
	.sync = true
	.step()
	if .err != nil {
		return .err
	}
	if .w.writeStoredHeader(0, true); .w.err != nil {
		return .w.err
	}
	.w.flush()
	.w.reset(nil)
	return .w.err
}

// NewWriter returns a new Writer compressing data at the given level.
// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
// higher levels typically run slower but compress more.
// Level 0 (NoCompression) does not attempt any compression; it only adds the
// necessary DEFLATE framing.
// Level -1 (DefaultCompression) uses the default compression level.
// Level -2 (ConstantCompression) will use Huffman compression only, giving
// a very fast compression for all types of input, but sacrificing considerable
// compression efficiency.
//
// If level is in the range [-2, 9] then the error returned will be nil.
// Otherwise the error returned will be non-nil.
func ( io.Writer,  int) (*Writer, error) {
	var  Writer
	if  := .d.init(, );  != nil {
		return nil, 
	}
	return &, nil
}

// NewWriterDict is like NewWriter but initializes the new
// Writer with a preset dictionary.  The returned Writer behaves
// as if the dictionary had been written to it without producing
// any compressed output.  The compressed data written to w
// can only be decompressed by a Reader initialized with the
// same dictionary.
func ( io.Writer,  int,  []byte) (*Writer, error) {
	,  := NewWriter(, )
	if  != nil {
		return nil, 
	}
	.d.fillWindow()
	.dict = append(.dict, ...) // duplicate dictionary for Reset method.
	return , 
}

// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow.
const MinCustomWindowSize = 32

// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow.
const MaxCustomWindowSize = windowSize

// NewWriterWindow returns a new Writer compressing data with a custom window size.
// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize.
func ( io.Writer,  int) (*Writer, error) {
	if  < MinCustomWindowSize {
		return nil, errors.New("flate: requested window size less than MinWindowSize")
	}
	if  > MaxCustomWindowSize {
		return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize")
	}
	var  Writer
	if  := .d.init(, -);  != nil {
		return nil, 
	}
	return &, nil
}

// A Writer takes data written to it and writes the compressed
// form of that data to an underlying writer (see NewWriter).
type Writer struct {
	d    compressor
	dict []byte
}

// Write writes data to w, which will eventually write the
// compressed form of data to its underlying writer.
func ( *Writer) ( []byte) ( int,  error) {
	return .d.write()
}

// Flush flushes any pending data to the underlying writer.
// It is useful mainly in compressed network protocols, to ensure that
// a remote reader has enough data to reconstruct a packet.
// Flush does not return until the data has been written.
// Calling Flush when there is no pending data still causes the Writer
// to emit a sync marker of at least 4 bytes.
// If the underlying writer returns an error, Flush returns that error.
//
// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
func ( *Writer) () error {
	// For more about flushing:
	// http://www.bolet.org/~pornin/deflate-flush.html
	return .d.syncFlush()
}

// Close flushes and closes the writer.
func ( *Writer) () error {
	return .d.close()
}

// Reset discards the writer's state and makes it equivalent to
// the result of NewWriter or NewWriterDict called with dst
// and w's level and dictionary.
func ( *Writer) ( io.Writer) {
	if len(.dict) > 0 {
		// w was created with NewWriterDict
		.d.reset()
		if  != nil {
			.d.fillWindow(.dict)
		}
	} else {
		// w was created with NewWriter
		.d.reset()
	}
}

// ResetDict discards the writer's state and makes it equivalent to
// the result of NewWriter or NewWriterDict called with dst
// and w's level, but sets a specific dictionary.
func ( *Writer) ( io.Writer,  []byte) {
	.dict = 
	.d.reset()
	.d.fillWindow(.dict)
}