// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package flate

import (
	
	
	
	
)

const (
	// The largest offset code.
	offsetCodeCount = 30

	// The special code used to mark the end of a block.
	endBlockMarker = 256

	// The first length code.
	lengthCodesStart = 257

	// The number of codegen codes.
	codegenCodeCount = 19
	badCode          = 255

	// maxPredefinedTokens is the maximum number of tokens
	// where we check if fixed size is smaller.
	maxPredefinedTokens = 250

	// bufferFlushSize indicates the buffer size
	// after which bytes are flushed to the writer.
	// Should preferably be a multiple of 6, since
	// we accumulate 6 bytes between writes to the buffer.
	bufferFlushSize = 246
)

// Minimum length code that emits bits.
const lengthExtraBitsMinCode = 8

// The number of extra bits needed by length code X - LENGTH_CODES_START.
var lengthExtraBits = [32]uint8{
	/* 257 */ 0, 0, 0,
	/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
	/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
	/* 280 */ 4, 5, 5, 5, 5, 0,
}

// The length indicated by length code X - LENGTH_CODES_START.
var lengthBase = [32]uint8{
	0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
	12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
	64, 80, 96, 112, 128, 160, 192, 224, 255,
}

// Minimum offset code that emits bits.
const offsetExtraBitsMinCode = 4

// offset code word extra bits.
var offsetExtraBits = [32]int8{
	0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
	4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
	9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
	/* extended window */
	14, 14,
}

var offsetCombined = [32]uint32{}

func () {
	var  = [32]uint32{
		/* normal deflate */
		0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
		0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
		0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
		0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
		0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
		0x001800, 0x002000, 0x003000, 0x004000, 0x006000,

		/* extended window */
		0x008000, 0x00c000,
	}

	for  := range offsetCombined[:] {
		// Don't use extended window values...
		if offsetExtraBits[] == 0 || [] > 0x006000 {
			continue
		}
		offsetCombined[] = uint32(offsetExtraBits[]) | ([] << 8)
	}
}

// The odd order in which the codegen code sizes are written.
var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}

type huffmanBitWriter struct {
	// writer is the underlying writer.
	// Do not use it directly; use the write method, which ensures
	// that Write errors are sticky.
	writer io.Writer

	// Data waiting to be written is bytes[0:nbytes]
	// and then the low nbits of bits.
	bits            uint64
	nbits           uint8
	nbytes          uint8
	lastHuffMan     bool
	literalEncoding *huffmanEncoder
	tmpLitEncoding  *huffmanEncoder
	offsetEncoding  *huffmanEncoder
	codegenEncoding *huffmanEncoder
	err             error
	lastHeader      int
	// Set between 0 (reused block can be up to 2x the size)
	logNewTablePenalty uint
	bytes              [256 + 8]byte
	literalFreq        [lengthCodesStart + 32]uint16
	offsetFreq         [32]uint16
	codegenFreq        [codegenCodeCount]uint16

	// codegen must have an extra space for the final symbol.
	codegen [literalCount + offsetCodeCount + 1]uint8
}

// Huffman reuse.
//
// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections.
//
// This is controlled by several variables:
//
// If lastHeader is non-zero the Huffman table can be reused.
// This also indicates that a Huffman table has been generated that can output all
// possible symbols.
// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated
// an EOB with the previous table must be written.
//
// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
//
// An incoming block estimates the output size of a new table using a 'fresh' by calculating the
// optimal size and adding a penalty in 'logNewTablePenalty'.
// A Huffman table is not optimal, which is why we add a penalty, and generating a new table
// is slower both for compression and decompression.

func ( io.Writer) *huffmanBitWriter {
	return &huffmanBitWriter{
		writer:          ,
		literalEncoding: newHuffmanEncoder(literalCount),
		tmpLitEncoding:  newHuffmanEncoder(literalCount),
		codegenEncoding: newHuffmanEncoder(codegenCodeCount),
		offsetEncoding:  newHuffmanEncoder(offsetCodeCount),
	}
}

func ( *huffmanBitWriter) ( io.Writer) {
	.writer = 
	.bits, .nbits, .nbytes, .err = 0, 0, 0, nil
	.lastHeader = 0
	.lastHuffMan = false
}

func ( *huffmanBitWriter) ( *tokens) ( bool) {
	 := .offHist[:offsetCodeCount]
	 := .offsetEncoding.codes
	 = [:len()]
	for ,  := range  {
		if  != 0 && [].zero() {
			return false
		}
	}

	 = .extraHist[:literalCount-256]
	 = .literalEncoding.codes[256:literalCount]
	 = [:len()]
	for ,  := range  {
		if  != 0 && [].zero() {
			return false
		}
	}

	 = .litHist[:256]
	 = .literalEncoding.codes[:len()]
	for ,  := range  {
		if  != 0 && [].zero() {
			return false
		}
	}
	return true
}

func ( *huffmanBitWriter) () {
	if .err != nil {
		.nbits = 0
		return
	}
	if .lastHeader > 0 {
		// We owe an EOB
		.writeCode(.literalEncoding.codes[endBlockMarker])
		.lastHeader = 0
	}
	 := .nbytes
	for .nbits != 0 {
		.bytes[] = byte(.bits)
		.bits >>= 8
		if .nbits > 8 { // Avoid underflow
			.nbits -= 8
		} else {
			.nbits = 0
		}
		++
	}
	.bits = 0
	.write(.bytes[:])
	.nbytes = 0
}

func ( *huffmanBitWriter) ( []byte) {
	if .err != nil {
		return
	}
	_, .err = .writer.Write()
}

func ( *huffmanBitWriter) ( int32,  uint8) {
	.bits |= uint64() << (.nbits & 63)
	.nbits += 
	if .nbits >= 48 {
		.writeOutBits()
	}
}

func ( *huffmanBitWriter) ( []byte) {
	if .err != nil {
		return
	}
	 := .nbytes
	if .nbits&7 != 0 {
		.err = InternalError("writeBytes with unfinished bits")
		return
	}
	for .nbits != 0 {
		.bytes[] = byte(.bits)
		.bits >>= 8
		.nbits -= 8
		++
	}
	if  != 0 {
		.write(.bytes[:])
	}
	.nbytes = 0
	.write()
}

// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
// the literal and offset lengths arrays (which are concatenated into a single
// array).  This method generates that run-length encoding.
//
// The result is written into the codegen array, and the frequencies
// of each code is written into the codegenFreq array.
// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
// information. Code badCode is an end marker
//
//	numLiterals      The number of literals in literalEncoding
//	numOffsets       The number of offsets in offsetEncoding
//	litenc, offenc   The literal and offset encoder to use
func ( *huffmanBitWriter) ( int,  int, ,  *huffmanEncoder) {
	for  := range .codegenFreq {
		.codegenFreq[] = 0
	}
	// Note that we are using codegen both as a temporary variable for holding
	// a copy of the frequencies, and as the place where we put the result.
	// This is fine because the output is always shorter than the input used
	// so far.
	 := .codegen[:] // cache
	// Copy the concatenated code sizes to codegen. Put a marker at the end.
	 := [:]
	for  := range  {
		[] = .codes[].len()
	}

	 = [ : +]
	for  := range  {
		[] = .codes[].len()
	}
	[+] = badCode

	 := [0]
	 := 1
	 := 0
	for  := 1;  != badCode; ++ {
		// INVARIANT: We have seen "count" copies of size that have not yet
		// had output generated for them.
		 := []
		if  ==  {
			++
			continue
		}
		// We need to generate codegen indicating "count" of size.
		if  != 0 {
			[] = 
			++
			.codegenFreq[]++
			--
			for  >= 3 {
				 := 6
				if  >  {
					 = 
				}
				[] = 16
				++
				[] = uint8( - 3)
				++
				.codegenFreq[16]++
				 -= 
			}
		} else {
			for  >= 11 {
				 := 138
				if  >  {
					 = 
				}
				[] = 18
				++
				[] = uint8( - 11)
				++
				.codegenFreq[18]++
				 -= 
			}
			if  >= 3 {
				// count >= 3 && count <= 10
				[] = 17
				++
				[] = uint8( - 3)
				++
				.codegenFreq[17]++
				 = 0
			}
		}
		--
		for ;  >= 0; -- {
			[] = 
			++
			.codegenFreq[]++
		}
		// Set up invariant for next time through the loop.
		 = 
		 = 1
	}
	// Marker indicating the end of the codegen.
	[] = badCode
}

func ( *huffmanBitWriter) () int {
	 := len(.codegenFreq)
	for  > 4 && .codegenFreq[codegenOrder[-1]] == 0 {
		--
	}
	return 
}

func ( *huffmanBitWriter) () (,  int) {
	 = len(.codegenFreq)
	for  > 4 && .codegenFreq[codegenOrder[-1]] == 0 {
		--
	}
	return 3 + 5 + 5 + 4 + (3 * ) +
		.codegenEncoding.bitLength(.codegenFreq[:]) +
		int(.codegenFreq[16])*2 +
		int(.codegenFreq[17])*3 +
		int(.codegenFreq[18])*7, 
}

// dynamicSize returns the size of dynamically encoded data in bits.
func ( *huffmanBitWriter) (,  *huffmanEncoder) ( int) {
	 = .bitLength(.literalFreq[:]) +
		.bitLength(.offsetFreq[:])
	return 
}

// dynamicSize returns the size of dynamically encoded data in bits.
func ( *huffmanBitWriter) (,  *huffmanEncoder,  int) (,  int) {
	,  := .headerSize()
	 =  +
		.bitLength(.literalFreq[:]) +
		.bitLength(.offsetFreq[:]) +
		
	return , 
}

// extraBitSize will return the number of bits that will be written
// as "extra" bits on matches.
func ( *huffmanBitWriter) () int {
	 := 0
	for ,  := range .literalFreq[257:literalCount] {
		 += int() * int(lengthExtraBits[&31])
	}
	for ,  := range .offsetFreq[:offsetCodeCount] {
		 += int() * int(offsetExtraBits[&31])
	}
	return 
}

// fixedSize returns the size of dynamically encoded data in bits.
func ( *huffmanBitWriter) ( int) int {
	return 3 +
		fixedLiteralEncoding.bitLength(.literalFreq[:]) +
		fixedOffsetEncoding.bitLength(.offsetFreq[:]) +
		
}

// storedSize calculates the stored size, including header.
// The function returns the size in bits and whether the block
// fits inside a single block.
func ( *huffmanBitWriter) ( []byte) (int, bool) {
	if  == nil {
		return 0, false
	}
	if len() <= maxStoreBlockSize {
		return (len() + 5) * 8, true
	}
	return 0, false
}

func ( *huffmanBitWriter) ( hcode) {
	// The function does not get inlined if we "& 63" the shift.
	.bits |= .code64() << (.nbits & 63)
	.nbits += .len()
	if .nbits >= 48 {
		.writeOutBits()
	}
}

// writeOutBits will write bits to the buffer.
func ( *huffmanBitWriter) () {
	 := .bits
	.bits >>= 48
	.nbits -= 48
	 := .nbytes

	// We over-write, but faster...
	binary.LittleEndian.PutUint64(.bytes[:], )
	 += 6

	if  >= bufferFlushSize {
		if .err != nil {
			 = 0
			return
		}
		.write(.bytes[:])
		 = 0
	}

	.nbytes = 
}

// Write the header of a dynamic Huffman block to the output stream.
//
//	numLiterals  The number of literals specified in codegen
//	numOffsets   The number of offsets specified in codegen
//	numCodegens  The number of codegens used in codegen
func ( *huffmanBitWriter) ( int,  int,  int,  bool) {
	if .err != nil {
		return
	}
	var  int32 = 4
	if  {
		 = 5
	}
	.writeBits(, 3)
	.writeBits(int32(-257), 5)
	.writeBits(int32(-1), 5)
	.writeBits(int32(-4), 4)

	for  := 0;  < ; ++ {
		 := uint(.codegenEncoding.codes[codegenOrder[]].len())
		.writeBits(int32(), 3)
	}

	 := 0
	for {
		var  = uint32(.codegen[])
		++
		if  == badCode {
			break
		}
		.writeCode(.codegenEncoding.codes[])

		switch  {
		case 16:
			.writeBits(int32(.codegen[]), 2)
			++
		case 17:
			.writeBits(int32(.codegen[]), 3)
			++
		case 18:
			.writeBits(int32(.codegen[]), 7)
			++
		}
	}
}

// writeStoredHeader will write a stored header.
// If the stored block is only used for EOF,
// it is replaced with a fixed huffman block.
func ( *huffmanBitWriter) ( int,  bool) {
	if .err != nil {
		return
	}
	if .lastHeader > 0 {
		// We owe an EOB
		.writeCode(.literalEncoding.codes[endBlockMarker])
		.lastHeader = 0
	}

	// To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes.
	if  == 0 &&  {
		.writeFixedHeader()
		// EOB: 7 bits, value: 0
		.writeBits(0, 7)
		.flush()
		return
	}

	var  int32
	if  {
		 = 1
	}
	.writeBits(, 3)
	.flush()
	.writeBits(int32(), 16)
	.writeBits(int32(^uint16()), 16)
}

func ( *huffmanBitWriter) ( bool) {
	if .err != nil {
		return
	}
	if .lastHeader > 0 {
		// We owe an EOB
		.writeCode(.literalEncoding.codes[endBlockMarker])
		.lastHeader = 0
	}

	// Indicate that we are a fixed Huffman block
	var  int32 = 2
	if  {
		 = 3
	}
	.writeBits(, 3)
}

// writeBlock will write a block of tokens with the smallest encoding.
// The original input can be supplied, and if the huffman encoded data
// is larger than the original bytes, the data will be written as a
// stored block.
// If the input is nil, the tokens will always be Huffman encoded.
func ( *huffmanBitWriter) ( *tokens,  bool,  []byte) {
	if .err != nil {
		return
	}

	.AddEOB()
	if .lastHeader > 0 {
		// We owe an EOB
		.writeCode(.literalEncoding.codes[endBlockMarker])
		.lastHeader = 0
	}
	,  := .indexTokens(, false)
	.generate()
	var  int
	,  := .storedSize()
	if  {
		 = .extraBitSize()
	}

	// Figure out smallest code.
	// Fixed Huffman baseline.
	var  = fixedLiteralEncoding
	var  = fixedOffsetEncoding
	var  = math.MaxInt32
	if .n < maxPredefinedTokens {
		 = .fixedSize()
	}

	// Dynamic Huffman?
	var  int

	// Generate codegen and codegenFrequencies, which indicates how to encode
	// the literalEncoding and the offsetEncoding.
	.generateCodegen(, , .literalEncoding, .offsetEncoding)
	.codegenEncoding.generate(.codegenFreq[:], 7)
	,  := .dynamicSize(.literalEncoding, .offsetEncoding, )

	if  <  {
		 = 
		 = .literalEncoding
		 = .offsetEncoding
	}

	// Stored bytes?
	if  &&  <=  {
		.writeStoredHeader(len(), )
		.writeBytes()
		return
	}

	// Huffman.
	if  == fixedLiteralEncoding {
		.writeFixedHeader()
	} else {
		.writeDynamicHeader(, , , )
	}

	// Write the tokens.
	.writeTokens(.Slice(), .codes, .codes)
}

// writeBlockDynamic encodes a block using a dynamic Huffman table.
// This should be used if the symbols used have a disproportionate
// histogram distribution.
// If input is supplied and the compression savings are below 1/16th of the
// input size the block is stored.
func ( *huffmanBitWriter) ( *tokens,  bool,  []byte,  bool) {
	if .err != nil {
		return
	}

	 =  || 
	if  {
		.AddEOB()
	}

	// We cannot reuse pure huffman table, and must mark as EOF.
	if (.lastHuffMan || ) && .lastHeader > 0 {
		// We will not try to reuse.
		.writeCode(.literalEncoding.codes[endBlockMarker])
		.lastHeader = 0
		.lastHuffMan = false
	}

	// fillReuse enables filling of empty values.
	// This will make encodings always reusable without testing.
	// However, this does not appear to benefit on most cases.
	const  = false

	// Check if we can reuse...
	if ! && .lastHeader > 0 && !.canReuse() {
		.writeCode(.literalEncoding.codes[endBlockMarker])
		.lastHeader = 0
	}

	,  := .indexTokens(, !)
	 := 0
	,  := .storedSize()

	const  = true
	if  || .lastHeader > 0 {
		 = .extraBitSize()
	}

	var  int

	// Check if we should reuse.
	if .lastHeader > 0 {
		// Estimate size for using a new table.
		// Use the previous header size as the best estimate.
		 := .lastHeader + .EstimatedBits()
		 += int(.literalEncoding.codes[endBlockMarker].len()) + >>.logNewTablePenalty

		// The estimated size is calculated as an optimal table.
		// We add a penalty to make it more realistic and re-use a bit more.
		 := .dynamicReuseSize(.literalEncoding, .offsetEncoding) + 

		// Check if a new table is better.
		if  <  {
			// Write the EOB we owe.
			.writeCode(.literalEncoding.codes[endBlockMarker])
			 = 
			.lastHeader = 0
		} else {
			 = 
		}

		if .n < maxPredefinedTokens {
			if  := .fixedSize() + 7;  &&  <  {
				// Check if we get a reasonable size decrease.
				if  &&  <=  {
					.writeStoredHeader(len(), )
					.writeBytes()
					return
				}
				.writeFixedHeader()
				if ! {
					.AddEOB()
				}
				.writeTokens(.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
				return
			}
		}
		// Check if we get a reasonable size decrease.
		if  &&  <=  {
			.writeStoredHeader(len(), )
			.writeBytes()
			return
		}
	}

	// We want a new block/table
	if .lastHeader == 0 {
		if  && ! {
			.fillTokens()
			,  = maxNumLit, maxNumDist
		} else {
			.literalFreq[endBlockMarker] = 1
		}

		.generate()
		// Generate codegen and codegenFrequencies, which indicates how to encode
		// the literalEncoding and the offsetEncoding.
		.generateCodegen(, , .literalEncoding, .offsetEncoding)
		.codegenEncoding.generate(.codegenFreq[:], 7)

		var  int
		if  && ! {
			// Reindex for accurate size...
			.indexTokens(, true)
		}
		,  = .dynamicSize(.literalEncoding, .offsetEncoding, )

		// Store predefined, if we don't get a reasonable improvement.
		if .n < maxPredefinedTokens {
			if  := .fixedSize();  &&  <=  {
				// Store bytes, if we don't get an improvement.
				if  &&  <=  {
					.writeStoredHeader(len(), )
					.writeBytes()
					return
				}
				.writeFixedHeader()
				if ! {
					.AddEOB()
				}
				.writeTokens(.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
				return
			}
		}

		if  &&  <=  {
			// Store bytes, if we don't get an improvement.
			.writeStoredHeader(len(), )
			.writeBytes()
			return
		}

		// Write Huffman table.
		.writeDynamicHeader(, , , )
		if ! {
			.lastHeader, _ = .headerSize()
		}
		.lastHuffMan = false
	}

	if  {
		.lastHeader = 0
	}
	// Write the tokens.
	.writeTokens(.Slice(), .literalEncoding.codes, .offsetEncoding.codes)
}

func ( *huffmanBitWriter) () {
	for ,  := range .literalFreq[:literalCount] {
		if  == 0 {
			.literalFreq[] = 1
		}
	}
	for ,  := range .offsetFreq[:offsetCodeCount] {
		if  == 0 {
			.offsetFreq[] = 1
		}
	}
}

// indexTokens indexes a slice of tokens, and updates
// literalFreq and offsetFreq, and generates literalEncoding
// and offsetEncoding.
// The number of literal and offset tokens is returned.
func ( *huffmanBitWriter) ( *tokens,  bool) (,  int) {
	//copy(w.literalFreq[:], t.litHist[:])
	*(*[256]uint16)(.literalFreq[:]) = .litHist
	//copy(w.literalFreq[256:], t.extraHist[:])
	*(*[32]uint16)(.literalFreq[256:]) = .extraHist
	.offsetFreq = .offHist

	if .n == 0 {
		return
	}
	if  {
		return maxNumLit, maxNumDist
	}
	// get the number of literals
	 = len(.literalFreq)
	for .literalFreq[-1] == 0 {
		--
	}
	// get the number of offsets
	 = len(.offsetFreq)
	for  > 0 && .offsetFreq[-1] == 0 {
		--
	}
	if  == 0 {
		// We haven't found a single match. If we want to go with the dynamic encoding,
		// we should count at least one offset to be sure that the offset huffman tree could be encoded.
		.offsetFreq[0] = 1
		 = 1
	}
	return
}

func ( *huffmanBitWriter) () {
	.literalEncoding.generate(.literalFreq[:literalCount], 15)
	.offsetEncoding.generate(.offsetFreq[:offsetCodeCount], 15)
}

// writeTokens writes a slice of tokens to the output.
// codes for literal and offset encoding must be supplied.
func ( *huffmanBitWriter) ( []token, ,  []hcode) {
	if .err != nil {
		return
	}
	if len() == 0 {
		return
	}

	// Only last token should be endBlockMarker.
	var  bool
	if [len()-1] == endBlockMarker {
		 = [:len()-1]
		 = true
	}

	// Create slices up to the next power of two to avoid bounds checks.
	 := [:256]
	 := [:32]
	 := [lengthCodesStart:]
	 = [:32]

	// Go 1.16 LOVES having these on stack.
	, ,  := .bits, .nbits, .nbytes

	for ,  := range  {
		if  < 256 {
			//w.writeCode(lits[t.literal()])
			 := []
			 |= .code64() << ( & 63)
			 += .len()
			if  >= 48 {
				binary.LittleEndian.PutUint64(.bytes[:], )
				//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
				 >>= 48
				 -= 48
				 += 6
				if  >= bufferFlushSize {
					if .err != nil {
						 = 0
						return
					}
					_, .err = .writer.Write(.bytes[:])
					 = 0
				}
			}
			continue
		}

		// Write the length
		 := .length()
		 := lengthCode() & 31
		if false {
			.writeCode([])
		} else {
			// inlined
			 := []
			 |= .code64() << ( & 63)
			 += .len()
			if  >= 48 {
				binary.LittleEndian.PutUint64(.bytes[:], )
				//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
				 >>= 48
				 -= 48
				 += 6
				if  >= bufferFlushSize {
					if .err != nil {
						 = 0
						return
					}
					_, .err = .writer.Write(.bytes[:])
					 = 0
				}
			}
		}

		if  >= lengthExtraBitsMinCode {
			 := lengthExtraBits[]
			//w.writeBits(extraLength, extraLengthBits)
			 := int32( - lengthBase[])
			 |= uint64() << ( & 63)
			 += 
			if  >= 48 {
				binary.LittleEndian.PutUint64(.bytes[:], )
				//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
				 >>= 48
				 -= 48
				 += 6
				if  >= bufferFlushSize {
					if .err != nil {
						 = 0
						return
					}
					_, .err = .writer.Write(.bytes[:])
					 = 0
				}
			}
		}
		// Write the offset
		 := .offset()
		 := ( >> 16) & 31
		if false {
			.writeCode([])
		} else {
			// inlined
			 := []
			 |= .code64() << ( & 63)
			 += .len()
			if  >= 48 {
				binary.LittleEndian.PutUint64(.bytes[:], )
				//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
				 >>= 48
				 -= 48
				 += 6
				if  >= bufferFlushSize {
					if .err != nil {
						 = 0
						return
					}
					_, .err = .writer.Write(.bytes[:])
					 = 0
				}
			}
		}

		if  >= offsetExtraBitsMinCode {
			 := offsetCombined[]
			//w.writeBits(extraOffset, extraOffsetBits)
			 |= uint64((-(>>8))&matchOffsetOnlyMask) << ( & 63)
			 += uint8()
			if  >= 48 {
				binary.LittleEndian.PutUint64(.bytes[:], )
				//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
				 >>= 48
				 -= 48
				 += 6
				if  >= bufferFlushSize {
					if .err != nil {
						 = 0
						return
					}
					_, .err = .writer.Write(.bytes[:])
					 = 0
				}
			}
		}
	}
	// Restore...
	.bits, .nbits, .nbytes = , , 

	if  {
		.writeCode([endBlockMarker])
	}
}

// huffOffset is a static offset encoder used for huffman only encoding.
// It can be reused since we will not be encoding offset values.
var huffOffset *huffmanEncoder

func () {
	 := newHuffmanBitWriter(nil)
	.offsetFreq[0] = 1
	huffOffset = newHuffmanEncoder(offsetCodeCount)
	huffOffset.generate(.offsetFreq[:offsetCodeCount], 15)
}

// writeBlockHuff encodes a block of bytes as either
// Huffman encoded literals or uncompressed bytes if the
// results only gains very little from compression.
func ( *huffmanBitWriter) ( bool,  []byte,  bool) {
	if .err != nil {
		return
	}

	// Clear histogram
	for  := range .literalFreq[:] {
		.literalFreq[] = 0
	}
	if !.lastHuffMan {
		for  := range .offsetFreq[:] {
			.offsetFreq[] = 0
		}
	}

	const  = endBlockMarker + 1
	const  = 1

	// Add everything as literals
	// We have to estimate the header size.
	// Assume header is around 70 bytes:
	// https://stackoverflow.com/a/25454430
	const  = 70 * 8
	histogram(, .literalFreq[:])
	,  := .storedSize()
	if  && len() > 1024 {
		// Quick check for incompressible content.
		 := float64(0)
		 := float64(len()) / 256
		 := float64(len() * 2)
		for ,  := range .literalFreq[:256] {
			 := float64() - 
			 +=  * 
			if  >  {
				break
			}
		}
		if  <  {
			if debugDeflate {
				fmt.Println("stored", , "<", )
			}
			// No chance we can compress this...
			.writeStoredHeader(len(), )
			.writeBytes()
			return
		}
	}
	.literalFreq[endBlockMarker] = 1
	.tmpLitEncoding.generate(.literalFreq[:], 15)
	 := .tmpLitEncoding.canReuseBits(.literalFreq[:])
	if  < math.MaxInt32 {
		 += .lastHeader
		if .lastHeader == 0 {
			 += 
		}
		 +=  >> .logNewTablePenalty
	}

	// Store bytes, if we don't get a reasonable improvement.
	if  &&  <=  {
		if debugDeflate {
			fmt.Println("stored,", , "<=", )
		}
		.writeStoredHeader(len(), )
		.writeBytes()
		return
	}

	if .lastHeader > 0 {
		 := .literalEncoding.canReuseBits(.literalFreq[:256])

		if  <  {
			if debugDeflate {
				fmt.Println("NOT reusing, reuse:", /8, "> new:", /8, "header est:", .lastHeader/8, "bytes")
			}
			// We owe an EOB
			.writeCode(.literalEncoding.codes[endBlockMarker])
			.lastHeader = 0
		} else if debugDeflate {
			fmt.Println("reusing, reuse:", /8, "> new:", /8, "- header est:", .lastHeader/8)
		}
	}

	 := 0
	if .lastHeader == 0 {
		// Use the temp encoding, so swap.
		.literalEncoding, .tmpLitEncoding = .tmpLitEncoding, .literalEncoding
		// Generate codegen and codegenFrequencies, which indicates how to encode
		// the literalEncoding and the offsetEncoding.
		.generateCodegen(, , .literalEncoding, huffOffset)
		.codegenEncoding.generate(.codegenFreq[:], 7)
		 := .codegens()

		// Huffman.
		.writeDynamicHeader(, , , )
		.lastHuffMan = true
		.lastHeader, _ = .headerSize()
		if debugDeflate {
			 += .lastHeader
			fmt.Println("header:", /8)
		}
	}

	 := .literalEncoding.codes[:256]
	// Go 1.16 LOVES having these on stack. At least 1.5x the speed.
	, ,  := .bits, .nbits, .nbytes

	if debugDeflate {
		 -= int()*8 + int()
	}
	// Unroll, write 3 codes/loop.
	// Fastest number of unrolls.
	for len() > 3 {
		// We must have at least 48 bits free.
		if  >= 8 {
			 :=  >> 3
			binary.LittleEndian.PutUint64(.bytes[:], )
			 >>= ( * 8) & 63
			 -=  * 8
			 += 
		}
		if  >= bufferFlushSize {
			if .err != nil {
				 = 0
				return
			}
			if debugDeflate {
				 += int() * 8
			}
			_, .err = .writer.Write(.bytes[:])
			 = 0
		}
		,  := [[0]], [[1]]
		 |= .code64() << ( & 63)
		 |= .code64() << (( + .len()) & 63)
		 := [[2]]
		 += .len() + .len()
		 |= .code64() << ( & 63)
		 += .len()
		 = [3:]
	}

	// Remaining...
	for ,  := range  {
		if  >= 48 {
			binary.LittleEndian.PutUint64(.bytes[:], )
			//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
			 >>= 48
			 -= 48
			 += 6
			if  >= bufferFlushSize {
				if .err != nil {
					 = 0
					return
				}
				if debugDeflate {
					 += int() * 8
				}
				_, .err = .writer.Write(.bytes[:])
				 = 0
			}
		}
		// Bitwriting inlined, ~30% speedup
		 := []
		 |= .code64() << ( & 63)

		 += .len()
		if debugDeflate {
			 += int(.len())
		}
	}
	// Restore...
	.bits, .nbits, .nbytes = , , 

	if debugDeflate {
		 :=  + int()*8 + int()
		fmt.Println("wrote", , "bits,", /8, "bytes.")
	}
	// Flush if needed to have space.
	if .nbits >= 48 {
		.writeOutBits()
	}

	if  ||  {
		.writeCode(.literalEncoding.codes[endBlockMarker])
		.lastHeader = 0
		.lastHuffMan = false
	}
}