internal/abi.Type.PtrBytes (field)

90 uses

	internal/abi (current package)
		type.go#L22: 	PtrBytes    uintptr // number of (prefix) bytes in the type that can contain pointers
		type.go#L169: func (t *Type) Pointers() bool { return t.PtrBytes != 0 }

	internal/reflectlite
		swapper.go#L36: 	hasPtr := typ.PtrBytes != 0

	reflect
		deepequal.go#L42: 			if v1.typ().PtrBytes == 0 {
		swapper.go#L37: 	hasPtr := typ.PtrBytes != 0
		type.go#L1979: 	if ktyp.PtrBytes != 0 || etyp.PtrBytes != 0 {
		type.go#L1988: 		if ktyp.PtrBytes != 0 {
		type.go#L1993: 		if etyp.PtrBytes != 0 {
		type.go#L2013: 		PtrBytes: ptrdata,
		type.go#L2031: 	ptrs := typ.PtrBytes / goarch.PtrSize
		type.go#L2055: 	ptrs := typ.PtrBytes / goarch.PtrSize
		type.go#L2486: 	typ.PtrBytes = typeptrdata(&typ.Type)
		type.go#L2525: 			off += ft.Typ.PtrBytes
		type.go#L2609: 		return f.Offset + f.Typ.PtrBytes
		type.go#L2667: 	if length > 0 && typ.PtrBytes != 0 {
		type.go#L2668: 		array.PtrBytes = typ.Size_*uintptr(length-1) + typ.PtrBytes
		type.go#L2676: 	case typ.PtrBytes == 0 || array.Size_ == 0:
		type.go#L2679: 		array.PtrBytes = 0
		type.go#L2685: 		array.PtrBytes = typ.PtrBytes
		type.go#L2691: 		n := (array.PtrBytes/goarch.PtrSize + 7) / 8
		type.go#L2704: 		elemPtrs := typ.PtrBytes / goarch.PtrSize
		type.go#L2726: 		array.PtrBytes = array.Size_ // overestimate but ok; must match program
		type.go#L2823: 		PtrBytes: uintptr(abid.stackPtrs.n) * goarch.PtrSize,
		type.go#L2876: 	if t.PtrBytes == 0 {
		value.go#L2151: 		if v.typ().PtrBytes == 0 {
		value.go#L2731: 		if v.typ().PtrBytes == 0 {

	runtime
		arena.go#L471: 	if typ.PtrBytes == 0 {
		arena.go#L493: 	if typ.PtrBytes != 0 {
		arena.go#L504: 			c.scanAlloc += size - (typ.Size_ - typ.PtrBytes)
		arena.go#L506: 			c.scanAlloc += typ.PtrBytes
		arena.go#L566: 	nb := typ.PtrBytes / goarch.PtrSize
		arena.go#L581: 	h = h.pad(typ.Size_ - typ.PtrBytes)
		arena.go#L601: 			if off < typ.PtrBytes {
		cgocall.go#L462: 	if t.PtrBytes == 0 || p == nil {
		cgocall.go#L525: 		if st.Elem.PtrBytes == 0 {
		cgocall.go#L550: 			if f.Typ.PtrBytes == 0 {
		cgocheck.go#L92: 	if typ.PtrBytes == 0 {
		cgocheck.go#L113: 	if typ.PtrBytes == 0 {
		cgocheck.go#L137: 	if typ.PtrBytes <= off {
		cgocheck.go#L140: 	if ptrdataSize := typ.PtrBytes - off; size > ptrdataSize {
		cgocheck.go#L236: 	if typ.PtrBytes == 0 {
		cgocheck.go#L241: 	if typ.PtrBytes <= off {
		cgocheck.go#L244: 	if ptrdataSize := typ.PtrBytes - off; size > ptrdataSize {
		chan.go#L99: 	case elem.PtrBytes == 0:
		checkptr.go#L19: 	if elem.PtrBytes != 0 && uintptr(p)&(uintptr(elem.Align_)-1) != 0 {
		heapdump.go#L208: 	dumpbool(t.Kind_&kindDirectIface == 0 || t.PtrBytes != 0)
		malloc.go#L1026: 	noscan := typ == nil || typ.PtrBytes == 0
		malloc.go#L1153: 			if typ.PtrBytes != 0 {
		malloc.go#L1154: 				scanSize = dataSize - typ.Size_ + typ.PtrBytes
		malloc.go#L1157: 			scanSize = typ.PtrBytes
		map.go#L267: 	if t.Bucket.PtrBytes == 0 {
		map.go#L371: 		if t.Bucket.PtrBytes != 0 {
		map.go#L752: 			} else if t.Key.PtrBytes != 0 {
		map.go#L758: 			} else if t.Elem.PtrBytes != 0 {
		map.go#L835: 	if t.Bucket.PtrBytes == 0 {
		map.go#L1268: 		if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
		map_fast32.go#L305: 			if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 {
		map_fast32.go#L311: 			if t.Elem.PtrBytes != 0 {
		map_fast32.go#L431: 				if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 && writeBarrier.enabled {
		map_fast32.go#L449: 		if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
		map_fast64.go#L303: 			if t.Key.PtrBytes != 0 {
		map_fast64.go#L313: 			if t.Elem.PtrBytes != 0 {
		map_fast64.go#L433: 				if t.Key.PtrBytes != 0 && writeBarrier.enabled {
		map_fast64.go#L457: 		if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
		map_faststr.go#L339: 			if t.Elem.PtrBytes != 0 {
		map_faststr.go#L472: 		if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
		mbarrier.go#L156: 	if writeBarrier.needed && typ.PtrBytes != 0 {
		mbarrier.go#L157: 		bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes)
		mbarrier.go#L179: 	bulkBarrierPreWrite(uintptr(dst), 0, typ.PtrBytes)
		mbarrier.go#L189: 	bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.PtrBytes)
		mbarrier.go#L225: 	if writeBarrier.needed && typ != nil && typ.PtrBytes != 0 && size >= goarch.PtrSize {
		mbarrier.go#L281: 		pwsize := size - typ.Size_ + typ.PtrBytes
		mbarrier.go#L292: 	if elemType.PtrBytes == 0 {
		mbarrier.go#L310: 	if writeBarrier.needed && typ.PtrBytes != 0 {
		mbarrier.go#L311: 		bulkBarrierPreWrite(uintptr(ptr), 0, typ.PtrBytes)
		mbarrier.go#L323: 	if writeBarrier.needed && typ.PtrBytes != 0 {
		mbarrier.go#L332: 	if writeBarrier.needed && typ.PtrBytes != 0 {
		mbitmap.go#L706: 	for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize {
		mbitmap.go#L1026: 	ptrs := typ.PtrBytes / goarch.PtrSize
		mbitmap.go#L1082: 				h = h.pad(typ.Size_ - typ.PtrBytes)
		mbitmap.go#L1095: 				if off < typ.PtrBytes {
		mfinal.go#L425: 		if ot.Elem == nil || ot.Elem.PtrBytes != 0 || ot.Elem.Size_ >= maxTinySize {
		slice.go#L56: 	if et.PtrBytes == 0 {
		slice.go#L265: 	if et.PtrBytes == 0 {
		slice.go#L278: 			bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes)
		slice.go#L296: 	if et.PtrBytes == 0 {
		stkframe.go#L286: 		_ptrdata:  int32(abiRegArgsType.PtrBytes),