const internal/goarch.PtrSize

306 uses

	internal/goarch (current package)
		goarch.go#L33: const PtrSize = 4 << (^uintptr(0) >> 63)
		goarch.go#L49: const Int64Align = PtrSize
		goarch_amd64.go#L12: 	_StackAlign          = PtrSize

	internal/abi
		abi.go#L75: 	if argSize > goarch.PtrSize || argSize == 0 || argSize&(argSize-1) != 0 {
		abi.go#L80: 		offset = goarch.PtrSize - argSize

	internal/reflectlite
		swapper.go#L40: 		if size == goarch.PtrSize {
		value.go#L93: 	if v.typ.Size() != goarch.PtrSize || !v.typ.Pointers() {

	reflect
		abi.go#L170: 		ok = a.assignIntN(0, goarch.PtrSize, 1, 0b1)
		abi.go#L179: 		ok = a.assignIntN(0, goarch.PtrSize, 1, 0b0)
		abi.go#L183: 		a.stackAssign(goarch.PtrSize, goarch.PtrSize)
		abi.go#L205: 		switch goarch.PtrSize {
		abi.go#L218: 		return a.assignIntN(offset, goarch.PtrSize, 2, 0b01)
		abi.go#L220: 		return a.assignIntN(offset, goarch.PtrSize, 2, 0b10)
		abi.go#L222: 		return a.assignIntN(offset, goarch.PtrSize, 3, 0b001)
		abi.go#L265: 	if ptrMap != 0 && size != goarch.PtrSize {
		abi.go#L416: 			spill += goarch.PtrSize
		abi.go#L433: 	spill = align(spill, goarch.PtrSize)
		abi.go#L438: 	retOffset := align(in.stackBytes, goarch.PtrSize)
		swapper.go#L41: 		if size == goarch.PtrSize {
		type.go#L1689: 		mt.KeySize = uint8(goarch.PtrSize)
		type.go#L1695: 		mt.ValueSize = uint8(goarch.PtrSize)
		type.go#L1974: 	size := bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
		type.go#L1980: 		nptr := (bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
		type.go#L1984: 		n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
		type.go#L1986: 		base := bucketSize / goarch.PtrSize
		type.go#L1991: 		base += bucketSize * ktyp.Size_ / goarch.PtrSize
		type.go#L1996: 		base += bucketSize * etyp.Size_ / goarch.PtrSize
		type.go#L2001: 		ptrdata = (word + 1) * goarch.PtrSize
		type.go#L2010: 		Align_:   goarch.PtrSize,
		type.go#L2031: 	ptrs := typ.PtrBytes / goarch.PtrSize
		type.go#L2032: 	words := typ.Size_ / goarch.PtrSize
		type.go#L2055: 	ptrs := typ.PtrBytes / goarch.PtrSize
		type.go#L2515: 				n := (ft.Offset - off) / goarch.PtrSize
		type.go#L2687: 	case typ.Kind_&kindGCProg == 0 && array.Size_ <= maxPtrmaskBytes*8*goarch.PtrSize:
		type.go#L2691: 		n := (array.PtrBytes/goarch.PtrSize + 7) / 8
		type.go#L2693: 		n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1)
		type.go#L2704: 		elemPtrs := typ.PtrBytes / goarch.PtrSize
		type.go#L2705: 		elemWords := typ.Size_ / goarch.PtrSize
		type.go#L2817: 		Align_: goarch.PtrSize,
		type.go#L2822: 		Size_:    align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize),
		type.go#L2823: 		PtrBytes: uintptr(abid.stackPtrs.n) * goarch.PtrSize,
		type.go#L2863: 	if bv.n%(8*goarch.PtrSize) == 0 {
		type.go#L2867: 		for i := 0; i < goarch.PtrSize; i++ {
		type.go#L2883: 		for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
		type.go#L2890: 		for bv.n < uint32(offset/uintptr(goarch.PtrSize)) {
		value.go#L109: 	if v.typ().Size() != goarch.PtrSize || !v.typ().Pointers() {
		value.go#L580: 	frameSize = align(frameSize, goarch.PtrSize)
		value.go#L1104: 	methodFrameSize = align(methodFrameSize, goarch.PtrSize)

	runtime
		alg.go#L15: 	c0 = uintptr((8-goarch.PtrSize)/4*2860486313 + (goarch.PtrSize-4)/4*33054211828000289)
		alg.go#L16: 	c1 = uintptr((8-goarch.PtrSize)/4*3267000013 + (goarch.PtrSize-4)/4*23344194077549503)
		alg.go#L304: const hashRandomBytes = goarch.PtrSize / 4 * 64
		alg.go#L325: 	getRandomData((*[len(hashkey) * goarch.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
		arena.go#L545: 	if uintptr(ptr)%(8*goarch.PtrSize*goarch.PtrSize) == 0 && uintptr(ptr) != base {
		arena.go#L549: 		r := heapBitsForAddr(uintptr(ptr)-goarch.PtrSize, goarch.PtrSize)
		arena.go#L552: 		if p == uintptr(ptr)-goarch.PtrSize {
		arena.go#L555: 		h = writeHeapBitsForAddr(uintptr(ptr) - goarch.PtrSize)
		arena.go#L566: 	nb := typ.PtrBytes / goarch.PtrSize
		arena.go#L597: 		for i := uintptr(0); i < size; i += goarch.PtrSize {
		arena.go#L602: 				j := off / goarch.PtrSize
		cgocall.go#L507: 		p = *(*unsafe.Pointer)(add(p, goarch.PtrSize))
		cgocheck.go#L200: 	skipMask := off / goarch.PtrSize / 8
		cgocheck.go#L201: 	skipBytes := skipMask * goarch.PtrSize * 8
		cgocheck.go#L207: 	for i := uintptr(0); i < size; i += goarch.PtrSize {
		cgocheck.go#L208: 		if i&(goarch.PtrSize*8-1) == 0 {
		cgocheck.go#L215: 			off -= goarch.PtrSize
		heapdump.go#L249: 			dumpint(uint64(offset + i*goarch.PtrSize))
		heapdump.go#L299: 		for off := child.argoff; off < child.argoff+child.arglen; off += goarch.PtrSize {
		heapdump.go#L308: 		for off := child.arglen; off < s.varp-s.sp; off += goarch.PtrSize {
		heapdump.go#L315: 		for off := s.varp - size - s.sp; off < s.varp-s.sp; off += goarch.PtrSize {
		heapdump.go#L322: 		dumpbv(&bv, s.varp-uintptr(bv.n)*goarch.PtrSize-s.sp)
		heapdump.go#L515: 	dumpint(goarch.PtrSize)
		heapdump.go#L724: 	nptr := size / goarch.PtrSize
		heapdump.go#L748: 		i := (addr - p) / goarch.PtrSize
		iface.go#L67: 	m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys))
		iface.go#L104: 		p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
		iface.go#L137: 		t2 := (*itabTableType)(mallocgc((2+2*t.size)*goarch.PtrSize, nil, true))
		iface.go#L165: 		p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize))
		iface.go#L486: 		m := *(**itab)(add(unsafe.Pointer(&t.entries), i*goarch.PtrSize))
		malloc.go#L149: 	_NumStackOrders = 4 - goarch.PtrSize/4*goos.IsWindows - 1*goos.IsPlan9
		malloc.go#L250: 	heapArenaWords = heapArenaBytes / goarch.PtrSize
		malloc.go#L258: 	heapArenaBitmapWords = heapArenaWords / (8 * goarch.PtrSize)
		malloc.go#L447: 	if goarch.PtrSize == 8 {
		malloc.go#L755: 		r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
		malloc.go#L757: 			r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
		malloc.go#L766: 				size := 2 * uintptr(cap(h.allArenas)) * goarch.PtrSize
		malloc.go#L770: 				newArray := (*notInHeap)(persistentalloc(size, goarch.PtrSize, &memstats.gcMiscSys))
		malloc.go#L775: 				*(*notInHeapSlice)(unsafe.Pointer(&h.allArenas)) = notInHeapSlice{newArray, len(h.allArenas), int(size / goarch.PtrSize)}
		malloc.go#L1065: 			} else if goarch.PtrSize == 4 && size == 12 {
		malloc.go#L1525: 		persistent.off = alignUp(goarch.PtrSize, align)
		map.go#L108: 	noCheck = 1<<(8*goarch.PtrSize) - 1
		map.go#L187: 	return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
		map.go#L197: 	top := uint8(hash >> (goarch.PtrSize*8 - 8))
		map.go#L210: 	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
		map.go#L214: 	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
		map.go#L827: 	if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
		map.go#L1315: 	if t.Key.Size_ > maxKeySize && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
		map.go#L1319: 	if t.Elem.Size_ > maxElemSize && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
		map_fast32.go#L305: 			if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 {
		map_fast32.go#L431: 				if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 && writeBarrier.enabled {
		map_fast64.go#L304: 				if goarch.PtrSize == 8 {
		map_fast64.go#L434: 					if goarch.PtrSize == 8 {
		map_faststr.go#L30: 			for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
		map_faststr.go#L39: 					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
		map_faststr.go#L46: 		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
		map_faststr.go#L55: 				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
		map_faststr.go#L72: 			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
		map_faststr.go#L74: 				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize))
		map_faststr.go#L95: 		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
		map_faststr.go#L101: 				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
		map_faststr.go#L125: 			for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
		map_faststr.go#L134: 					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
		map_faststr.go#L141: 		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
		map_faststr.go#L150: 				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
		map_faststr.go#L167: 			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize))
		map_faststr.go#L169: 				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true
		map_faststr.go#L190: 		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
		map_faststr.go#L196: 				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true
		map_faststr.go#L249: 			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*goarch.PtrSize))
		map_faststr.go#L287: 	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize)
		map_faststr.go#L293: 	elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.ValueSize))
		map_faststr.go#L328: 		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
		map_faststr.go#L338: 			e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize))
		map_faststr.go#L414: 		x.e = add(x.k, bucketCnt*2*goarch.PtrSize)
		map_faststr.go#L422: 			y.e = add(y.k, bucketCnt*2*goarch.PtrSize)
		map_faststr.go#L427: 			e := add(k, bucketCnt*2*goarch.PtrSize)
		map_faststr.go#L428: 			for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) {
		map_faststr.go#L454: 					dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize)
		map_faststr.go#L467: 				dst.k = add(dst.k, 2*goarch.PtrSize)
		mbarrier.go#L225: 	if writeBarrier.needed && typ != nil && typ.PtrBytes != 0 && size >= goarch.PtrSize {
		mbitmap.go#L384: const ptrBits = 8 * goarch.PtrSize
		mbitmap.go#L416: 	word := addr / goarch.PtrSize % heapArenaWords
		mbitmap.go#L427: 	nptr := size / goarch.PtrSize
		mbitmap.go#L442: 			size = valid * goarch.PtrSize
		mbitmap.go#L461: 			if goarch.PtrSize == 8 {
		mbitmap.go#L467: 			return h, h.addr + uintptr(i)*goarch.PtrSize
		mbitmap.go#L471: 		h.addr += h.valid * goarch.PtrSize
		mbitmap.go#L472: 		h.size -= h.valid * goarch.PtrSize
		mbitmap.go#L503: 	if goarch.PtrSize == 8 {
		mbitmap.go#L511: 	return h, h.addr + uintptr(i)*goarch.PtrSize
		mbitmap.go#L542: 	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
		mbitmap.go#L612: 	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
		mbitmap.go#L640: 	word := maskOffset / goarch.PtrSize
		mbitmap.go#L645: 	for i := uintptr(0); i < size; i += goarch.PtrSize {
		mbitmap.go#L650: 				i += 7 * goarch.PtrSize
		mbitmap.go#L706: 	for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
		mbitmap.go#L707: 		if i&(goarch.PtrSize*8-1) == 0 {
		mbitmap.go#L737: 	isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
		mbitmap.go#L743: 	nptrs := size / goarch.PtrSize
		mbitmap.go#L781: 	h.low = addr / goarch.PtrSize % ptrBits
		mbitmap.go#L784: 	h.addr = addr - h.low*goarch.PtrSize
		mbitmap.go#L813: 	idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
		mbitmap.go#L827: 	h.addr += ptrBits * goarch.PtrSize
		mbitmap.go#L837: 	words := size / goarch.PtrSize
		mbitmap.go#L851: 	zeros := (addr+size-h.addr)/goarch.PtrSize - h.valid
		mbitmap.go#L866: 	idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
		mbitmap.go#L883: 	h.addr += ptrBits * goarch.PtrSize
		mbitmap.go#L894: 		idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
		mbitmap.go#L906: 		h.addr += ptrBits * goarch.PtrSize
		mbitmap.go#L915: 		if goarch.PtrSize == 8 {
		mbitmap.go#L953: 	if goarch.PtrSize == 8 && size == goarch.PtrSize {
		mbitmap.go#L996: 			h = h.pad(typ.Size_ - n*goarch.PtrSize)
		mbitmap.go#L1026: 	ptrs := typ.PtrBytes / goarch.PtrSize
		mbitmap.go#L1045: 		words := typ.Size_ / goarch.PtrSize // total words, including scalar tail
		mbitmap.go#L1090: 		for i := uintptr(0); i < size; i += goarch.PtrSize {
		mbitmap.go#L1096: 					j := off / goarch.PtrSize
		mbitmap.go#L1123: 	n := (size/goarch.PtrSize + 7) / 8
		mbitmap.go#L1224: 		const maxBits = goarch.PtrSize*8 - 7
		mbitmap.go#L1267: 					for nb <= goarch.PtrSize*8 {
		mbitmap.go#L1349: 	bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
		mbitmap.go#L1428: 			mask = make([]byte, n/goarch.PtrSize)
		mbitmap.go#L1429: 			for i := uintptr(0); i < n; i += goarch.PtrSize {
		mbitmap.go#L1430: 				off := (uintptr(p) + i - datap.data) / goarch.PtrSize
		mbitmap.go#L1431: 				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
		mbitmap.go#L1440: 			mask = make([]byte, n/goarch.PtrSize)
		mbitmap.go#L1441: 			for i := uintptr(0); i < n; i += goarch.PtrSize {
		mbitmap.go#L1442: 				off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
		mbitmap.go#L1443: 				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
		mbitmap.go#L1456: 		mask = make([]byte, n/goarch.PtrSize)
		mbitmap.go#L1462: 			mask[(addr-base)/goarch.PtrSize] = 1
		mbitmap.go#L1486: 			size := uintptr(locals.n) * goarch.PtrSize
		mbitmap.go#L1488: 			mask = make([]byte, n/goarch.PtrSize)
		mbitmap.go#L1489: 			for i := uintptr(0); i < n; i += goarch.PtrSize {
		mbitmap.go#L1490: 				off := (uintptr(p) + i - u.frame.varp + size) / goarch.PtrSize
		mbitmap.go#L1491: 				mask[i/goarch.PtrSize] = locals.ptrbit(off)
		mcheckmark.go#L28: 	b [heapArenaBytes / goarch.PtrSize / 8]uint8
		mfinal.go#L29: 	fin     [(_FinBlockSize - 2*goarch.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
		mfinal.go#L47: var finptrmask [_FinBlockSize / goarch.PtrSize / 8]byte
		mfinal.go#L114: 				if (unsafe.Sizeof(finalizer{}) != 5*goarch.PtrSize ||
		mfinal.go#L116: 					unsafe.Offsetof(finalizer{}.arg) != goarch.PtrSize ||
		mfinal.go#L117: 					unsafe.Offsetof(finalizer{}.nret) != 2*goarch.PtrSize ||
		mfinal.go#L118: 					unsafe.Offsetof(finalizer{}.fint) != 3*goarch.PtrSize ||
		mfinal.go#L119: 					unsafe.Offsetof(finalizer{}.ot) != 4*goarch.PtrSize) {
		mfinal.go#L478: 	nret = alignUp(nret, goarch.PtrSize)
		mgcmark.go#L265: 	if rootBlockBytes%(8*goarch.PtrSize) != 0 {
		mgcmark.go#L278: 	ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*goarch.PtrSize))))
		mgcmark.go#L395: 				scanblock(uintptr(unsafe.Pointer(&spf.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
		mgcmark.go#L797: 		scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
		mgcmark.go#L814: 			scanblock(uintptr(unsafe.Pointer(&d.fn)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
		mgcmark.go#L819: 			scanblock(uintptr(unsafe.Pointer(&d.link)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
		mgcmark.go#L825: 			scanblock(uintptr(unsafe.Pointer(&d)), goarch.PtrSize, &oneptrmask[0], gcw, &state)
		mgcmark.go#L971: 		size := uintptr(locals.n) * goarch.PtrSize
		mgcmark.go#L977: 		scanblock(frame.argp, uintptr(args.n)*goarch.PtrSize, args.bytedata, gcw, state)
		mgcmark.go#L1227: 		bits := uint32(*addb(ptrmask, i/(goarch.PtrSize*8)))
		mgcmark.go#L1229: 			i += goarch.PtrSize * 8
		mgcmark.go#L1245: 			i += goarch.PtrSize
		mgcmark.go#L1317: 		scanSize = addr - b + goarch.PtrSize
		mgcmark.go#L1358: 				word := (p - b) / goarch.PtrSize
		mgcmark.go#L1383: 	for i := uintptr(0); i < n; i += goarch.PtrSize {
		mgcmark.go#L1385: 			word := i / goarch.PtrSize
		mgcmark.go#L1394: 				if i%(goarch.PtrSize*8) != 0 {
		mgcmark.go#L1397: 				i += goarch.PtrSize*8 - goarch.PtrSize
		mgcmark.go#L1460: 	if obj&(goarch.PtrSize-1) != 0 {
		mgcmark.go#L1532: 		size = off + goarch.PtrSize
		mgcmark.go#L1534: 	for i := uintptr(0); i < size; i += goarch.PtrSize {
		mgcmark.go#L1538: 		if !(i < 128*goarch.PtrSize || off-16*goarch.PtrSize < i && i < off+16*goarch.PtrSize) {
		mgcstack.go#L110: 	obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr
		mgcwork.go#L328: 	obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
		mheap.go#L539: 		n := 64 * 1024 / goarch.PtrSize
		mheap.go#L545: 		sp.array = sysAlloc(uintptr(n)*goarch.PtrSize, &memstats.other_sys)
		mheap.go#L1967: 			scanblock(uintptr(unsafe.Pointer(&s.fn)), goarch.PtrSize, &oneptrmask[0], gcw, nil)
		mranges.go#L258: 	ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, sysStat))
		mranges.go#L385: 			ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, a.sysStat))
		mranges.go#L455: 		ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, b.sysStat))
		mspanset.go#L104: 			newSpine := persistentalloc(newCap*goarch.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
		mspanset.go#L108: 				memmove(newSpine, spine.p, b.spineCap*goarch.PtrSize)
		mspanset.go#L299: 	return (*atomic.Pointer[spanSetBlock])(add(unsafe.Pointer(s.p), goarch.PtrSize*idx))
		mwbbuf.go#L132: 	if b.next+goarch.PtrSize > b.end {
		mwbbuf.go#L136: 	b.next += goarch.PtrSize
		mwbbuf.go#L143: 	if b.next+2*goarch.PtrSize > b.end {
		mwbbuf.go#L147: 	b.next += 2 * goarch.PtrSize
		os_linux.go#L244: 	auxvp := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*goarch.PtrSize))
		panic.go#L1137: 		gp.sched.bp = *(*uintptr)(unsafe.Pointer(sp - 2*goarch.PtrSize))
		panic.go#L1141: 		gp.sched.bp = sp - goarch.PtrSize
		preempt.go#L322: 	asyncPreemptStack = uintptr(total) + 8*goarch.PtrSize
		print.go#L273: 	for i := uintptr(0); p+i < end; i += goarch.PtrSize {
		proc.go#L154: 	if goarch.PtrSize == 8 {
		proc.go#L599: 	return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
		proc.go#L2116: 	gp.sched.sp -= 4 * goarch.PtrSize // extra space in case of reads slightly beyond frame
		proc.go#L4516: 	totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame
		proc.go#L6738: 			p := add(firstFunc, uintptr(i)*goarch.PtrSize)
		runtime1.go#L63: 	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
		runtime1.go#L198: 	if unsafe.Sizeof(k) != goarch.PtrSize {
		runtime1.go#L201: 	if unsafe.Sizeof(l) != goarch.PtrSize {
		runtime2.go#L523: 	tlsSize  = tlsSlots * goarch.PtrSize
		runtime2.go#L985: 		for i := 0; i < goarch.PtrSize && n < len(r); i++ {
		signal_amd64.go#L83: 	sp -= goarch.PtrSize
		signal_linux_amd64.go#L55: 	*(*uintptr)(add(unsafe.Pointer(c.info), 2*goarch.PtrSize)) = uintptr(x)
		slice.go#L218: 	case et.Size_ == goarch.PtrSize:
		slice.go#L219: 		lenmem = uintptr(oldLen) * goarch.PtrSize
		slice.go#L220: 		newlenmem = uintptr(newLen) * goarch.PtrSize
		slice.go#L221: 		capmem = roundupsize(uintptr(newcap) * goarch.PtrSize)
		slice.go#L222: 		overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
		slice.go#L223: 		newcap = int(capmem / goarch.PtrSize)
		slice.go#L226: 		if goarch.PtrSize == 8 {
		stack.go#L72: 	stackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
		stack.go#L122: 	uintptrMask = 1<<(8*goarch.PtrSize) - 1
		stack.go#L612: 				print("        ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
		stack.go#L619: 			pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
		stack.go#L658: 	if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize {
		stack.go#L683: 		size := uintptr(locals.n) * goarch.PtrSize
		stack.go#L720: 			for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
		stack.go#L721: 				if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
		stack.go#L751: 		if oldfp == gp.sched.sp-goarch.PtrSize {
		stack.go#L752: 			memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize)
		stack.go#L1041: 		sp -= goarch.PtrSize
		stkframe.go#L79: 	return uintptr(argMap.n) * goarch.PtrSize
		stkframe.go#L97: 		argMap.n = f.args / goarch.PtrSize
		stkframe.go#L112: 			minSP -= goarch.PtrSize
		stkframe.go#L137: 		retValid := *(*bool)(unsafe.Pointer(arg0 + 4*goarch.PtrSize))
		stkframe.go#L146: 			n := int32((uintptr(mv.argLen) &^ (goarch.PtrSize - 1)) / goarch.PtrSize)
		stkframe.go#L221: 			print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(args.n*goarch.PtrSize), "\n")
		stkframe.go#L248: 			p = add(p, goarch.PtrSize)
		stubs.go#L190: 	if goarch.PtrSize == 4 {
		symtab.go#L529: 		hdr.minLC != sys.PCQuantum || hdr.ptrSize != goarch.PtrSize || hdr.textStart != datap.text {
		symtab.go#L841: 	return (targetpc / goarch.PtrSize) % uintptr(len(pcvalueCache{}.entries))
		symtab.go#L995: 	if debugPcln && x&(goarch.PtrSize-1) != 0 {
		sys_x86.go#L18: 	sp -= goarch.PtrSize
		trace.go#L1018: 		pcBuf[i] = *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
		trace.go#L1269: 	return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize))
		trace.go#L1439: 	data [64<<10 - goarch.PtrSize]byte
		trace.go#L1450: 	n = alignUp(n, goarch.PtrSize)
		traceback.go#L184: 			frame.sp += goarch.PtrSize
		traceback.go#L331: 			frame.fp += goarch.PtrSize
		traceback.go#L382: 				lrPtr = frame.fp - goarch.PtrSize
		traceback.go#L391: 		frame.varp -= goarch.PtrSize
		traceback.go#L412: 		frame.varp -= goarch.PtrSize
		traceback.go#L1263: 	const expand = 32 * goarch.PtrSize
		traceback.go#L1264: 	const maxExpand = 256 * goarch.PtrSize

	runtime/internal/math
		math.go#L14: 	if a|b < 1<<(4*goarch.PtrSize) || a == 0 {

	runtime/internal/sys
		consts.go#L25: const Int64Align = goarch.PtrSize