// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

import (
	
	
)

// A Pinner is a set of pinned Go objects. An object can be pinned with
// the Pin method and all pinned objects of a Pinner can be unpinned with the
// Unpin method.
type Pinner struct {
	*pinner
}

// Pin pins a Go object, preventing it from being moved or freed by the garbage
// collector until the Unpin method has been called.
//
// A pointer to a pinned
// object can be directly stored in C memory or can be contained in Go memory
// passed to C functions. If the pinned object itself contains pointers to Go
// objects, these objects must be pinned separately if they are going to be
// accessed from C code.
//
// The argument must be a pointer of any type or an
// unsafe.Pointer. It must be the result of calling new,
// taking the address of a composite literal, or taking the address of a
// local variable. If one of these conditions is not met, Pin will panic.
func ( *Pinner) ( any) {
	if .pinner == nil {
		// Check the pinner cache first.
		 := acquirem()
		if  := .p.ptr();  != nil {
			.pinner = .pinnerCache
			.pinnerCache = nil
		}
		releasem()

		if .pinner == nil {
			// Didn't get anything from the pinner cache.
			.pinner = new(pinner)
			.refs = .refStore[:0]

			// We set this finalizer once and never clear it. Thus, if the
			// pinner gets cached, we'll reuse it, along with its finalizer.
			// This lets us avoid the relatively expensive SetFinalizer call
			// when reusing from the cache. The finalizer however has to be
			// resilient to an empty pinner being finalized, which is done
			// by checking p.refs' length.
			SetFinalizer(.pinner, func( *pinner) {
				if len(.refs) != 0 {
					.unpin() // only required to make the test idempotent
					pinnerLeakPanic()
				}
			})
		}
	}
	 := pinnerGetPtr(&)
	setPinned(, true)
	.refs = append(.refs, )
}

// Unpin unpins all pinned objects of the Pinner.
func ( *Pinner) () {
	.pinner.unpin()

	 := acquirem()
	if  := .p.ptr();  != nil && .pinnerCache == nil {
		// Put the pinner back in the cache, but only if the
		// cache is empty. If application code is reusing Pinners
		// on its own, we want to leave the backing store in place
		// so reuse is more efficient.
		.pinnerCache = .pinner
		.pinner = nil
	}
	releasem()
}

const (
	pinnerSize         = 64
	pinnerRefStoreSize = (pinnerSize - unsafe.Sizeof([]unsafe.Pointer{})) / unsafe.Sizeof(unsafe.Pointer(nil))
)

type pinner struct {
	refs     []unsafe.Pointer
	refStore [pinnerRefStoreSize]unsafe.Pointer
}

func ( *pinner) () {
	if  == nil || .refs == nil {
		return
	}
	for  := range .refs {
		setPinned(.refs[], false)
	}
	// The following two lines make all pointers to references
	// in p.refs unreachable, either by deleting them or dropping
	// p.refs' backing store (if it was not backed by refStore).
	.refStore = [pinnerRefStoreSize]unsafe.Pointer{}
	.refs = .refStore[:0]
}

func ( *any) unsafe.Pointer {
	 := efaceOf()
	 := ._type
	if  == nil {
		panic(errorString("runtime.Pinner: argument is nil"))
	}
	if  := .Kind_ & kindMask;  != kindPtr &&  != kindUnsafePointer {
		panic(errorString("runtime.Pinner: argument is not a pointer: " + toRType().string()))
	}
	if inUserArenaChunk(uintptr(.data)) {
		// Arena-allocated objects are not eligible for pinning.
		panic(errorString("runtime.Pinner: object was allocated into an arena"))
	}
	return .data
}

// isPinned checks if a Go pointer is pinned.
// nosplit, because it's called from nosplit code in cgocheck.
//
//go:nosplit
func ( unsafe.Pointer) bool {
	 := spanOfHeap(uintptr())
	if  == nil {
		// this code is only called for Go pointer, so this must be a
		// linker-allocated global object.
		return true
	}
	 := .getPinnerBits()
	// these pinnerBits might get unlinked by a concurrently running sweep, but
	// that's OK because gcBits don't get cleared until the following GC cycle
	// (nextMarkBitArenaEpoch)
	if  == nil {
		return false
	}
	 := .objIndex(uintptr())
	 := .ofObject()
	KeepAlive() // make sure ptr is alive until we are done so the span can't be freed
	return .isPinned()
}

// setPinned marks or unmarks a Go pointer as pinned.
func ( unsafe.Pointer,  bool) {
	 := spanOfHeap(uintptr())
	if  == nil {
		if isGoPointerWithoutSpan() {
			// this is a linker-allocated or zero size object, nothing to do.
			return
		}
		panic(errorString("runtime.Pinner.Pin: argument is not a Go pointer"))
	}

	// ensure that the span is swept, b/c sweeping accesses the specials list
	// w/o locks.
	 := acquirem()
	.ensureSwept()
	KeepAlive() // make sure ptr is still alive after span is swept

	 := .objIndex(uintptr())

	lock(&.speciallock) // guard against concurrent calls of setPinned on same span

	 := .getPinnerBits()
	if  == nil {
		 = .newPinnerBits()
		.setPinnerBits()
	}
	 := .ofObject()
	if  {
		if .isPinned() {
			// multiple pins on same object, set multipin bit
			.setMultiPinned(true)
			// and increase the pin counter
			// TODO(mknyszek): investigate if systemstack is necessary here
			systemstack(func() {
				 :=  * .elemsize
				.incPinCounter()
			})
		} else {
			// set pin bit
			.setPinned(true)
		}
	} else {
		// unpin
		if .isPinned() {
			if .isMultiPinned() {
				var  bool
				// TODO(mknyszek): investigate if systemstack is necessary here
				systemstack(func() {
					 :=  * .elemsize
					 = .decPinCounter()
				})
				if ! {
					// counter is 0, clear multipin bit
					.setMultiPinned(false)
				}
			} else {
				// no multipins recorded. unpin object.
				.setPinned(false)
			}
		} else {
			// unpinning unpinned object, bail out
			throw("runtime.Pinner: object already unpinned")
		}
	}
	unlock(&.speciallock)
	releasem()
	return
}

type pinState struct {
	bytep   *uint8
	byteVal uint8
	mask    uint8
}

// nosplit, because it's called by isPinned, which is nosplit
//
//go:nosplit
func ( *pinState) () bool {
	return (.byteVal & .mask) != 0
}

func ( *pinState) () bool {
	return (.byteVal & (.mask << 1)) != 0
}

func ( *pinState) ( bool) {
	.set(, false)
}

func ( *pinState) ( bool) {
	.set(, true)
}

// set sets the pin bit of the pinState to val. If multipin is true, it
// sets/unsets the multipin bit instead.
func ( *pinState) ( bool,  bool) {
	 := .mask
	if  {
		 <<= 1
	}
	if  {
		atomic.Or8(.bytep, )
	} else {
		atomic.And8(.bytep, ^)
	}
}

// pinnerBits is the same type as gcBits but has different methods.
type pinnerBits gcBits

// ofObject returns the pinState of the n'th object.
// nosplit, because it's called by isPinned, which is nosplit
//
//go:nosplit
func ( *pinnerBits) ( uintptr) pinState {
	,  := (*gcBits)().bitp( * 2)
	 := atomic.Load8()
	return pinState{, , }
}

func ( *mspan) () uintptr {
	return divRoundUp(.nelems*2, 8)
}

// newPinnerBits returns a pointer to 8 byte aligned bytes to be used for this
// span's pinner bits. newPinneBits is used to mark objects that are pinned.
// They are copied when the span is swept.
func ( *mspan) () *pinnerBits {
	return (*pinnerBits)(newMarkBits(.nelems * 2))
}

// nosplit, because it's called by isPinned, which is nosplit
//
//go:nosplit
func ( *mspan) () *pinnerBits {
	return (*pinnerBits)(atomic.Loadp(unsafe.Pointer(&.pinnerBits)))
}

func ( *mspan) ( *pinnerBits) {
	atomicstorep(unsafe.Pointer(&.pinnerBits), unsafe.Pointer())
}

// refreshPinnerBits replaces pinnerBits with a fresh copy in the arenas for the
// next GC cycle. If it does not contain any pinned objects, pinnerBits of the
// span is set to nil.
func ( *mspan) () {
	 := .getPinnerBits()
	if  == nil {
		return
	}

	 := false
	 := alignUp(.pinnerBitSize(), 8)

	// Iterate over each 8-byte chunk and check for pins. Note that
	// newPinnerBits guarantees that pinnerBits will be 8-byte aligned, so we
	// don't have to worry about edge cases, irrelevant bits will simply be
	// zero.
	for ,  := range unsafe.Slice((*uint64)(unsafe.Pointer(&.x)), /8) {
		if  != 0 {
			 = true
			break
		}
	}

	if  {
		 := .newPinnerBits()
		memmove(unsafe.Pointer(&.x), unsafe.Pointer(&.x), )
		.setPinnerBits()
	} else {
		.setPinnerBits(nil)
	}
}

// incPinCounter is only called for multiple pins of the same object and records
// the _additional_ pins.
func ( *mspan) ( uintptr) {
	var  *specialPinCounter
	,  := .specialFindSplicePoint(, _KindSpecialPinCounter)
	if ! {
		lock(&mheap_.speciallock)
		 = (*specialPinCounter)(mheap_.specialPinCounterAlloc.alloc())
		unlock(&mheap_.speciallock)
		// splice in record, fill in offset.
		.special.offset = uint16()
		.special.kind = _KindSpecialPinCounter
		.special.next = *
		* = (*special)(unsafe.Pointer())
		spanHasSpecials()
	} else {
		 = (*specialPinCounter)(unsafe.Pointer(*))
	}
	.counter++
}

// decPinCounter decreases the counter. If the counter reaches 0, the counter
// special is deleted and false is returned. Otherwise true is returned.
func ( *mspan) ( uintptr) bool {
	,  := .specialFindSplicePoint(, _KindSpecialPinCounter)
	if ! {
		throw("runtime.Pinner: decreased non-existing pin counter")
	}
	 := (*specialPinCounter)(unsafe.Pointer(*))
	.counter--
	if .counter == 0 {
		* = .special.next
		if .specials == nil {
			spanHasNoSpecials()
		}
		lock(&mheap_.speciallock)
		mheap_.specialPinCounterAlloc.free(unsafe.Pointer())
		unlock(&mheap_.speciallock)
		return false
	}
	return true
}

// only for tests
func ( unsafe.Pointer) *uintptr {
	, ,  := findObject(uintptr(), 0, 0)
	 :=  * .elemsize
	,  := .specialFindSplicePoint(, _KindSpecialPinCounter)
	if ! {
		return nil
	}
	 := (*specialPinCounter)(unsafe.Pointer(*))
	return &.counter
}

// to be able to test that the GC panics when a pinned pointer is leaking, this
// panic function is a variable, that can be overwritten by a test.
var pinnerLeakPanic = func() {
	panic(errorString("runtime.Pinner: found leaking pinned pointer; forgot to call Unpin()?"))
}