// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// Address range data structure.
//
// This file contains an implementation of a data structure which
// manages ordered address ranges.

package runtime

import (
	
	
	
)

// addrRange represents a region of address space.
//
// An addrRange must never span a gap in the address space.
type addrRange struct {
	// base and limit together represent the region of address space
	// [base, limit). That is, base is inclusive, limit is exclusive.
	// These are address over an offset view of the address space on
	// platforms with a segmented address space, that is, on platforms
	// where arenaBaseOffset != 0.
	base, limit offAddr
}

// makeAddrRange creates a new address range from two virtual addresses.
//
// Throws if the base and limit are not in the same memory segment.
func (,  uintptr) addrRange {
	 := addrRange{offAddr{}, offAddr{}}
	if (-arenaBaseOffset >= ) != (-arenaBaseOffset >= ) {
		throw("addr range base and limit are not in the same memory segment")
	}
	return 
}

// size returns the size of the range represented in bytes.
func ( addrRange) () uintptr {
	if !.base.lessThan(.limit) {
		return 0
	}
	// Subtraction is safe because limit and base must be in the same
	// segment of the address space.
	return .limit.diff(.base)
}

// contains returns whether or not the range contains a given address.
func ( addrRange) ( uintptr) bool {
	return .base.lessEqual(offAddr{}) && (offAddr{}).lessThan(.limit)
}

// subtract takes the addrRange toPrune and cuts out any overlap with
// from, then returns the new range. subtract assumes that a and b
// either don't overlap at all, only overlap on one side, or are equal.
// If b is strictly contained in a, thus forcing a split, it will throw.
func ( addrRange) ( addrRange) addrRange {
	if .base.lessEqual(.base) && .limit.lessEqual(.limit) {
		return addrRange{}
	} else if .base.lessThan(.base) && .limit.lessThan(.limit) {
		throw("bad prune")
	} else if .limit.lessThan(.limit) && .base.lessThan(.limit) {
		.base = .limit
	} else if .base.lessThan(.base) && .base.lessThan(.limit) {
		.limit = .base
	}
	return 
}

// takeFromFront takes len bytes from the front of the address range, aligning
// the base to align first. On success, returns the aligned start of the region
// taken and true.
func ( *addrRange) ( uintptr,  uint8) (uintptr, bool) {
	 := alignUp(.base.addr(), uintptr()) + 
	if  > .limit.addr() {
		return 0, false
	}
	.base = offAddr{}
	return  - , true
}

// takeFromBack takes len bytes from the end of the address range, aligning
// the limit to align after subtracting len. On success, returns the aligned
// start of the region taken and true.
func ( *addrRange) ( uintptr,  uint8) (uintptr, bool) {
	 := alignDown(.limit.addr()-, uintptr())
	if .base.addr() >  {
		return 0, false
	}
	.limit = offAddr{}
	return , true
}

// removeGreaterEqual removes all addresses in a greater than or equal
// to addr and returns the new range.
func ( addrRange) ( uintptr) addrRange {
	if (offAddr{}).lessEqual(.base) {
		return addrRange{}
	}
	if .limit.lessEqual(offAddr{}) {
		return 
	}
	return makeAddrRange(.base.addr(), )
}

var (
	// minOffAddr is the minimum address in the offset space, and
	// it corresponds to the virtual address arenaBaseOffset.
	minOffAddr = offAddr{arenaBaseOffset}

	// maxOffAddr is the maximum address in the offset address
	// space. It corresponds to the highest virtual address representable
	// by the page alloc chunk and heap arena maps.
	maxOffAddr = offAddr{(((1 << heapAddrBits) - 1) + arenaBaseOffset) & uintptrMask}
)

// offAddr represents an address in a contiguous view
// of the address space on systems where the address space is
// segmented. On other systems, it's just a normal address.
type offAddr struct {
	// a is just the virtual address, but should never be used
	// directly. Call addr() to get this value instead.
	a uintptr
}

// add adds a uintptr offset to the offAddr.
func ( offAddr) ( uintptr) offAddr {
	return offAddr{a: .a + }
}

// sub subtracts a uintptr offset from the offAddr.
func ( offAddr) ( uintptr) offAddr {
	return offAddr{a: .a - }
}

// diff returns the amount of bytes in between the
// two offAddrs.
func ( offAddr) ( offAddr) uintptr {
	return .a - .a
}

// lessThan returns true if l1 is less than l2 in the offset
// address space.
func ( offAddr) ( offAddr) bool {
	return (.a - arenaBaseOffset) < (.a - arenaBaseOffset)
}

// lessEqual returns true if l1 is less than or equal to l2 in
// the offset address space.
func ( offAddr) ( offAddr) bool {
	return (.a - arenaBaseOffset) <= (.a - arenaBaseOffset)
}

// equal returns true if the two offAddr values are equal.
func ( offAddr) ( offAddr) bool {
	// No need to compare in the offset space, it
	// means the same thing.
	return  == 
}

// addr returns the virtual address for this offset address.
func ( offAddr) () uintptr {
	return .a
}

// atomicOffAddr is like offAddr, but operations on it are atomic.
// It also contains operations to be able to store marked addresses
// to ensure that they're not overridden until they've been seen.
type atomicOffAddr struct {
	// a contains the offset address, unlike offAddr.
	a atomic.Int64
}

// Clear attempts to store minOffAddr in atomicOffAddr. It may fail
// if a marked value is placed in the box in the meanwhile.
func ( *atomicOffAddr) () {
	for {
		 := .a.Load()
		if  < 0 {
			return
		}
		if .a.CompareAndSwap(, int64(minOffAddr.addr()-arenaBaseOffset)) {
			return
		}
	}
}

// StoreMin stores addr if it's less than the current value in the
// offset address space if the current value is not marked.
func ( *atomicOffAddr) ( uintptr) {
	 := int64( - arenaBaseOffset)
	for {
		 := .a.Load()
		if  <  {
			return
		}
		if .a.CompareAndSwap(, ) {
			return
		}
	}
}

// StoreUnmark attempts to unmark the value in atomicOffAddr and
// replace it with newAddr. markedAddr must be a marked address
// returned by Load. This function will not store newAddr if the
// box no longer contains markedAddr.
func ( *atomicOffAddr) (,  uintptr) {
	.a.CompareAndSwap(-int64(-arenaBaseOffset), int64(-arenaBaseOffset))
}

// StoreMarked stores addr but first converted to the offset address
// space and then negated.
func ( *atomicOffAddr) ( uintptr) {
	.a.Store(-int64( - arenaBaseOffset))
}

// Load returns the address in the box as a virtual address. It also
// returns if the value was marked or not.
func ( *atomicOffAddr) () (uintptr, bool) {
	 := .a.Load()
	 := false
	if  < 0 {
		 = true
		 = -
	}
	return uintptr() + arenaBaseOffset, 
}

// addrRanges is a data structure holding a collection of ranges of
// address space.
//
// The ranges are coalesced eagerly to reduce the
// number ranges it holds.
//
// The slice backing store for this field is persistentalloc'd
// and thus there is no way to free it.
//
// addrRanges is not thread-safe.
type addrRanges struct {
	// ranges is a slice of ranges sorted by base.
	ranges []addrRange

	// totalBytes is the total amount of address space in bytes counted by
	// this addrRanges.
	totalBytes uintptr

	// sysStat is the stat to track allocations by this type
	sysStat *sysMemStat
}

func ( *addrRanges) ( *sysMemStat) {
	 := (*notInHeapSlice)(unsafe.Pointer(&.ranges))
	.len = 0
	.cap = 16
	.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(.cap), goarch.PtrSize, ))
	.sysStat = 
	.totalBytes = 0
}

// findSucc returns the first index in a such that addr is
// less than the base of the addrRange at that index.
func ( *addrRanges) ( uintptr) int {
	 := offAddr{}

	// Narrow down the search space via a binary search
	// for large addrRanges until we have at most iterMax
	// candidates left.
	const  = 8
	,  := 0, len(.ranges)
	for - >  {
		 := (( - ) / 2) + 
		if .ranges[].contains(.addr()) {
			// a.ranges[i] contains base, so
			// its successor is the next index.
			return  + 1
		}
		if .lessThan(.ranges[].base) {
			// In this case i might actually be
			// the successor, but we can't be sure
			// until we check the ones before it.
			 = 
		} else {
			// In this case we know base is
			// greater than or equal to a.ranges[i].limit-1,
			// so i is definitely not the successor.
			// We already checked i, so pick the next
			// one.
			 =  + 1
		}
	}
	// There are top-bot candidates left, so
	// iterate over them and find the first that
	// base is strictly less than.
	for  := ;  < ; ++ {
		if .lessThan(.ranges[].base) {
			return 
		}
	}
	return 
}

// findAddrGreaterEqual returns the smallest address represented by a
// that is >= addr. Thus, if the address is represented by a,
// then it returns addr. The second return value indicates whether
// such an address exists for addr in a. That is, if addr is larger than
// any address known to a, the second return value will be false.
func ( *addrRanges) ( uintptr) (uintptr, bool) {
	 := .findSucc()
	if  == 0 {
		return .ranges[0].base.addr(), true
	}
	if .ranges[-1].contains() {
		return , true
	}
	if  < len(.ranges) {
		return .ranges[].base.addr(), true
	}
	return 0, false
}

// contains returns true if a covers the address addr.
func ( *addrRanges) ( uintptr) bool {
	 := .findSucc()
	if  == 0 {
		return false
	}
	return .ranges[-1].contains()
}

// add inserts a new address range to a.
//
// r must not overlap with any address range in a and r.size() must be > 0.
func ( *addrRanges) ( addrRange) {
	// The copies in this function are potentially expensive, but this data
	// structure is meant to represent the Go heap. At worst, copying this
	// would take ~160µs assuming a conservative copying rate of 25 GiB/s (the
	// copy will almost never trigger a page fault) for a 1 TiB heap with 4 MiB
	// arenas which is completely discontiguous. ~160µs is still a lot, but in
	// practice most platforms have 64 MiB arenas (which cuts this by a factor
	// of 16) and Go heaps are usually mostly contiguous, so the chance that
	// an addrRanges even grows to that size is extremely low.

	// An empty range has no effect on the set of addresses represented
	// by a, but passing a zero-sized range is almost always a bug.
	if .size() == 0 {
		print("runtime: range = {", hex(.base.addr()), ", ", hex(.limit.addr()), "}\n")
		throw("attempted to add zero-sized address range")
	}
	// Because we assume r is not currently represented in a,
	// findSucc gives us our insertion index.
	 := .findSucc(.base.addr())
	 :=  > 0 && .ranges[-1].limit.equal(.base)
	 :=  < len(.ranges) && .limit.equal(.ranges[].base)
	if  &&  {
		// We have neighbors and they both border us.
		// Merge a.ranges[i-1], r, and a.ranges[i] together into a.ranges[i-1].
		.ranges[-1].limit = .ranges[].limit

		// Delete a.ranges[i].
		copy(.ranges[:], .ranges[+1:])
		.ranges = .ranges[:len(.ranges)-1]
	} else if  {
		// We have a neighbor at a lower address only and it borders us.
		// Merge the new space into a.ranges[i-1].
		.ranges[-1].limit = .limit
	} else if  {
		// We have a neighbor at a higher address only and it borders us.
		// Merge the new space into a.ranges[i].
		.ranges[].base = .base
	} else {
		// We may or may not have neighbors which don't border us.
		// Add the new range.
		if len(.ranges)+1 > cap(.ranges) {
			// Grow the array. Note that this leaks the old array, but since
			// we're doubling we have at most 2x waste. For a 1 TiB heap and
			// 4 MiB arenas which are all discontiguous (both very conservative
			// assumptions), this would waste at most 4 MiB of memory.
			 := .ranges
			 := (*notInHeapSlice)(unsafe.Pointer(&.ranges))
			.len = len() + 1
			.cap = cap() * 2
			.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(.cap), goarch.PtrSize, .sysStat))

			// Copy in the old array, but make space for the new range.
			copy(.ranges[:], [:])
			copy(.ranges[+1:], [:])
		} else {
			.ranges = .ranges[:len(.ranges)+1]
			copy(.ranges[+1:], .ranges[:])
		}
		.ranges[] = 
	}
	.totalBytes += .size()
}

// removeLast removes and returns the highest-addressed contiguous range
// of a, or the last nBytes of that range, whichever is smaller. If a is
// empty, it returns an empty range.
func ( *addrRanges) ( uintptr) addrRange {
	if len(.ranges) == 0 {
		return addrRange{}
	}
	 := .ranges[len(.ranges)-1]
	 := .size()
	if  >  {
		 := .limit.sub()
		.ranges[len(.ranges)-1].limit = 
		.totalBytes -= 
		return addrRange{, .limit}
	}
	.ranges = .ranges[:len(.ranges)-1]
	.totalBytes -= 
	return 
}

// removeGreaterEqual removes the ranges of a which are above addr, and additionally
// splits any range containing addr.
func ( *addrRanges) ( uintptr) {
	 := .findSucc()
	if  == 0 {
		// addr is before all ranges in a.
		.totalBytes = 0
		.ranges = .ranges[:0]
		return
	}
	 := uintptr(0)
	for ,  := range .ranges[:] {
		 += .size()
	}
	if  := .ranges[-1]; .contains() {
		 += .size()
		 = .removeGreaterEqual()
		if .size() == 0 {
			--
		} else {
			 -= .size()
			.ranges[-1] = 
		}
	}
	.ranges = .ranges[:]
	.totalBytes -= 
}

// cloneInto makes a deep clone of a's state into b, re-using
// b's ranges if able.
func ( *addrRanges) ( *addrRanges) {
	if len(.ranges) > cap(.ranges) {
		// Grow the array.
		 := (*notInHeapSlice)(unsafe.Pointer(&.ranges))
		.len = 0
		.cap = cap(.ranges)
		.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(.cap), goarch.PtrSize, .sysStat))
	}
	.ranges = .ranges[:len(.ranges)]
	.totalBytes = .totalBytes
	copy(.ranges, .ranges)
}