//go:build !js
// +build !js

package websocket

import (
	
	
	
)

// CompressionMode represents the modes available to the permessage-deflate extension.
// See https://tools.ietf.org/html/rfc7692
//
// Works in all modern browsers except Safari which does not implement the permessage-deflate extension.
//
// Compression is only used if the peer supports the mode selected.
type CompressionMode int

const (
	// CompressionDisabled disables the negotiation of the permessage-deflate extension.
	//
	// This is the default. Do not enable compression without benchmarking for your particular use case first.
	CompressionDisabled CompressionMode = iota

	// CompressionContextTakeover compresses each message greater than 128 bytes reusing the 32 KB sliding window from
	// previous messages. i.e compression context across messages is preserved.
	//
	// As most WebSocket protocols are text based and repetitive, this compression mode can be very efficient.
	//
	// The memory overhead is a fixed 32 KB sliding window, a fixed 1.2 MB flate.Writer and a sync.Pool of 40 KB flate.Reader's
	// that are used when reading and then returned.
	//
	// Thus, it uses more memory than CompressionNoContextTakeover but compresses more efficiently.
	//
	// If the peer does not support CompressionContextTakeover then we will fall back to CompressionNoContextTakeover.
	CompressionContextTakeover

	// CompressionNoContextTakeover compresses each message greater than 512 bytes. Each message is compressed with
	// a new 1.2 MB flate.Writer pulled from a sync.Pool. Each message is read with a 40 KB flate.Reader pulled from
	// a sync.Pool.
	//
	// This means less efficient compression as the sliding window from previous messages will not be used but the
	// memory overhead will be lower as there will be no fixed cost for the flate.Writer nor the 32 KB sliding window.
	// Especially if the connections are long lived and seldom written to.
	//
	// Thus, it uses less memory than CompressionContextTakeover but compresses less efficiently.
	//
	// If the peer does not support CompressionNoContextTakeover then we will fall back to CompressionDisabled.
	CompressionNoContextTakeover
)

func ( CompressionMode) () *compressionOptions {
	return &compressionOptions{
		clientNoContextTakeover:  == CompressionNoContextTakeover,
		serverNoContextTakeover:  == CompressionNoContextTakeover,
	}
}

type compressionOptions struct {
	clientNoContextTakeover bool
	serverNoContextTakeover bool
}

func ( *compressionOptions) () string {
	 := "permessage-deflate"
	if .clientNoContextTakeover {
		 += "; client_no_context_takeover"
	}
	if .serverNoContextTakeover {
		 += "; server_no_context_takeover"
	}
	return 
}

// These bytes are required to get flate.Reader to return.
// They are removed when sending to avoid the overhead as
// WebSocket framing tell's when the message has ended but then
// we need to add them back otherwise flate.Reader keeps
// trying to read more bytes.
const deflateMessageTail = "\x00\x00\xff\xff"

type trimLastFourBytesWriter struct {
	w    io.Writer
	tail []byte
}

func ( *trimLastFourBytesWriter) () {
	if  != nil && .tail != nil {
		.tail = .tail[:0]
	}
}

func ( *trimLastFourBytesWriter) ( []byte) (int, error) {
	if .tail == nil {
		.tail = make([]byte, 0, 4)
	}

	 := len(.tail) + len() - 4

	if  <= 0 {
		.tail = append(.tail, ...)
		return len(), nil
	}

	// Now we need to write as many extra bytes as we can from the previous tail.
	if  > len(.tail) {
		 = len(.tail)
	}
	if  > 0 {
		,  := .w.Write(.tail[:])
		if  != nil {
			return 0, 
		}

		// Shift remaining bytes in tail over.
		 := copy(.tail, .tail[:])
		.tail = .tail[:]
	}

	// If p is less than or equal to 4 bytes,
	// all of it is is part of the tail.
	if len() <= 4 {
		.tail = append(.tail, ...)
		return len(), nil
	}

	// Otherwise, only the last 4 bytes are.
	.tail = append(.tail, [len()-4:]...)

	 = [:len()-4]
	,  := .w.Write()
	return  + 4, 
}

var flateReaderPool sync.Pool

func ( io.Reader,  []byte) io.Reader {
	,  := flateReaderPool.Get().(io.Reader)
	if ! {
		return flate.NewReaderDict(, )
	}
	.(flate.Resetter).Reset(, )
	return 
}

func ( io.Reader) {
	flateReaderPool.Put()
}

var flateWriterPool sync.Pool

func ( io.Writer) *flate.Writer {
	,  := flateWriterPool.Get().(*flate.Writer)
	if ! {
		, _ = flate.NewWriter(, flate.BestSpeed)
		return 
	}
	.Reset()
	return 
}

func ( *flate.Writer) {
	flateWriterPool.Put()
}

type slidingWindow struct {
	buf []byte
}

var swPoolMu sync.RWMutex
var swPool = map[int]*sync.Pool{}

func ( int) *sync.Pool {
	swPoolMu.RLock()
	,  := swPool[]
	swPoolMu.RUnlock()
	if  {
		return 
	}

	 = &sync.Pool{}

	swPoolMu.Lock()
	swPool[] = 
	swPoolMu.Unlock()

	return 
}

func ( *slidingWindow) ( int) {
	if .buf != nil {
		return
	}

	if  == 0 {
		 = 32768
	}

	 := slidingWindowPool()
	,  := .Get().(*slidingWindow)
	if  {
		* = *
	} else {
		.buf = make([]byte, 0, )
	}
}

func ( *slidingWindow) () {
	.buf = .buf[:0]
	swPoolMu.Lock()
	swPool[cap(.buf)].Put()
	swPoolMu.Unlock()
}

func ( *slidingWindow) ( []byte) {
	if len() >= cap(.buf) {
		.buf = .buf[:cap(.buf)]
		 = [len()-cap(.buf):]
		copy(.buf, )
		return
	}

	 := cap(.buf) - len(.buf)
	if  < len() {
		// We need to shift spaceNeeded bytes from the end to make room for p at the end.
		 := len() - 
		copy(.buf, .buf[:])
		.buf = .buf[:len(.buf)-]
	}

	.buf = append(.buf, ...)
}