internal/abi.Type.Size_ (field)
184 uses
internal/abi (current package)
type.go#L21: Size_ uintptr
type.go#L408: func (t *Type) Size() uintptr { return t.Size_ }
reflect
type.go#L559: return int(t.t.Size_) * 8
type.go#L1596: if typ.Size_ >= 1<<16 {
type.go#L1688: if ktyp.Size_ > maxKeySize {
type.go#L1692: mt.KeySize = uint8(ktyp.Size_)
type.go#L1694: if etyp.Size_ > maxValSize {
type.go#L1698: mt.MapType.ValueSize = uint8(etyp.Size_)
type.go#L1700: mt.MapType.BucketSize = uint16(mt.Bucket.Size_)
type.go#L1959: if ktyp.Size_ > maxKeySize {
type.go#L1962: if etyp.Size_ > maxValSize {
type.go#L1974: size := bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize
type.go#L1980: nptr := (bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize
type.go#L1991: base += bucketSize * ktyp.Size_ / goarch.PtrSize
type.go#L1996: base += bucketSize * etyp.Size_ / goarch.PtrSize
type.go#L2011: Size_: size,
type.go#L2032: words := typ.Size_ / goarch.PtrSize
type.go#L2361: size = offset + ft.Size_
type.go#L2367: if ft.Size_ == 0 {
type.go#L2485: typ.Size_ = size
type.go#L2660: if typ.Size_ > 0 {
type.go#L2661: max := ^uintptr(0) / typ.Size_
type.go#L2666: array.Size_ = typ.Size_ * uintptr(length)
type.go#L2668: array.PtrBytes = typ.Size_*uintptr(length-1) + typ.PtrBytes
type.go#L2676: case typ.PtrBytes == 0 || array.Size_ == 0:
type.go#L2687: case typ.Kind_&kindGCProg == 0 && array.Size_ <= maxPtrmaskBytes*8*goarch.PtrSize:
type.go#L2705: elemWords := typ.Size_ / goarch.PtrSize
type.go#L2726: array.PtrBytes = array.Size_ // overestimate but ok; must match program
type.go#L2822: Size_: align(abid.retOffset+abid.ret.stackBytes, goarch.PtrSize),
type.go#L2900: addTypeBits(bv, offset+uintptr(i)*tt.Elem.Size_, tt.Elem)
runtime
alg.go#L148: switch t.Size_ {
alg.go#L154: return memhash(p, h, t.Size_)
alg.go#L177: h = typehash(a.Elem, add(p, i*a.Elem.Size_), h)
arena.go#L438: size := typ.Size_
arena.go#L504: c.scanAlloc += size - (typ.Size_ - typ.PtrBytes)
arena.go#L581: h = h.pad(typ.Size_ - typ.PtrBytes)
arena.go#L582: h.flush(uintptr(ptr), typ.Size_)
arena.go#L594: size := typ.Size_
arena.go#L600: off := i % typ.Size_
arena.go#L623: mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
arena.go#L628: userArenaHeapBitsSetType(typ, add(ptr, uintptr(i)*typ.Size_), base)
cgocall.go#L481: p = add(p, at.Elem.Size_)
cgocall.go#L530: p = add(p, st.Elem.Size_)
cgocheck.go#L80: cgoCheckMemmove2(typ, dst, src, 0, typ.Size_)
cgocheck.go#L124: cgoCheckTypedBlock(typ, p, 0, typ.Size_)
cgocheck.go#L125: p = add(p, typ.Size_)
cgocheck.go#L258: if off < at.Elem.Size_ {
cgocheck.go#L261: src = add(src, at.Elem.Size_)
cgocheck.go#L263: if skipped > at.Elem.Size_ {
cgocheck.go#L264: skipped = at.Elem.Size_
cgocheck.go#L266: checked := at.Elem.Size_ - skipped
cgocheck.go#L276: if off < f.Typ.Size_ {
cgocheck.go#L279: src = add(src, f.Typ.Size_)
cgocheck.go#L281: if skipped > f.Typ.Size_ {
cgocheck.go#L282: skipped = f.Typ.Size_
cgocheck.go#L284: checked := f.Typ.Size_ - skipped
chan.go#L76: if elem.Size_ >= 1<<16 {
chan.go#L83: mem, overflow := math.MulUintptr(elem.Size_, uintptr(size))
chan.go#L110: c.elemsize = uint16(elem.Size_)
chan.go#L116: print("makechan: chan=", c, "; elemsize=", elem.Size_, "; dataqsiz=", size, "\n")
chan.go#L342: typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_)
chan.go#L345: memmove(dst, src, t.Size_)
chan.go#L353: typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_)
chan.go#L354: memmove(dst, src, t.Size_)
checkptr.go#L25: if checkptrStraddles(p, n*elem.Size_) {
heapdump.go#L196: dumpint(uint64(t.Size_))
iface.go#L327: msanread(v, t.Size_)
iface.go#L330: asanread(v, t.Size_)
iface.go#L332: x := mallocgc(t.Size_, t, true)
iface.go#L342: msanread(v, t.Size_)
iface.go#L345: asanread(v, t.Size_)
iface.go#L348: x := mallocgc(t.Size_, t, false)
iface.go#L349: memmove(x, v, t.Size_)
malloc.go#L1149: if dataSize > typ.Size_ {
malloc.go#L1154: scanSize = dataSize - typ.Size_ + typ.PtrBytes
malloc.go#L1328: return mallocgc(typ.Size_, typ, true)
malloc.go#L1333: return mallocgc(typ.Size_, typ, true)
malloc.go#L1338: return mallocgc(typ.Size_, typ, true)
malloc.go#L1344: return mallocgc(typ.Size_, typ, true)
malloc.go#L1346: mem, overflow := math.MulUintptr(typ.Size_, uintptr(n))
map.go#L306: mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
map.go#L356: sz := t.Bucket.Size_ * nbuckets
map.go#L359: nbuckets = up / t.Bucket.Size_
map.go#L370: size := t.Bucket.Size_ * nbuckets
map.go#L404: msanread(key, t.Key.Size_)
map.go#L407: asanread(key, t.Key.Size_)
map.go#L465: msanread(key, t.Key.Size_)
map.go#L468: asanread(key, t.Key.Size_)
map.go#L590: msanread(key, t.Key.Size_)
map.go#L593: asanread(key, t.Key.Size_)
map.go#L704: msanread(key, t.Key.Size_)
map.go#L707: asanread(key, t.Key.Size_)
map.go#L753: memclrHasPointers(k, t.Key.Size_)
map.go#L759: memclrHasPointers(e, t.Elem.Size_)
map.go#L761: memclrNoHeapPointers(e, t.Elem.Size_)
map.go#L1315: if t.Key.Size_ > maxKeySize && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
map.go#L1316: t.Key.Size_ <= maxKeySize && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
map.go#L1319: if t.Elem.Size_ > maxElemSize && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
map.go#L1320: t.Elem.Size_ <= maxElemSize && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
map.go#L1329: if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
map.go#L1332: if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
map_fast32.go#L312: memclrHasPointers(e, t.Elem.Size_)
map_fast32.go#L314: memclrNoHeapPointers(e, t.Elem.Size_)
map_fast64.go#L314: memclrHasPointers(e, t.Elem.Size_)
map_fast64.go#L316: memclrNoHeapPointers(e, t.Elem.Size_)
map_faststr.go#L340: memclrHasPointers(e, t.Elem.Size_)
map_faststr.go#L342: memclrNoHeapPointers(e, t.Elem.Size_)
mbarrier.go#L166: memmove(dst, src, typ.Size_)
mbarrier.go#L168: cgoCheckMemmove2(typ, dst, src, 0, typ.Size_)
mbarrier.go#L199: msanwrite(dst, typ.Size_)
mbarrier.go#L200: msanread(src, typ.Size_)
mbarrier.go#L203: asanwrite(dst, typ.Size_)
mbarrier.go#L204: asanread(src, typ.Size_)
mbarrier.go#L255: racewriterangepc(dstPtr, uintptr(n)*typ.Size_, callerpc, pc)
mbarrier.go#L256: racereadrangepc(srcPtr, uintptr(n)*typ.Size_, callerpc, pc)
mbarrier.go#L259: msanwrite(dstPtr, uintptr(n)*typ.Size_)
mbarrier.go#L260: msanread(srcPtr, uintptr(n)*typ.Size_)
mbarrier.go#L263: asanwrite(dstPtr, uintptr(n)*typ.Size_)
mbarrier.go#L264: asanread(srcPtr, uintptr(n)*typ.Size_)
mbarrier.go#L279: size := uintptr(n) * typ.Size_
mbarrier.go#L281: pwsize := size - typ.Size_ + typ.PtrBytes
mbarrier.go#L293: return slicecopy(dst.array, dst.len, src.array, src.len, elemType.Size_)
mbarrier.go#L313: memclrNoHeapPointers(ptr, typ.Size_)
mbarrier.go#L331: size := typ.Size_ * uintptr(len)
mbitmap.go#L692: if typ.Size_ != size {
mbitmap.go#L693: println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size)
mbitmap.go#L949: if doubleCheck && dataSize%typ.Size_ != 0 {
mbitmap.go#L980: for i := uintptr(0); true; i += typ.Size_ {
mbitmap.go#L991: if i+typ.Size_ == dataSize {
mbitmap.go#L996: h = h.pad(typ.Size_ - n*goarch.PtrSize)
mbitmap.go#L1027: if typ.Size_ == dataSize { // Single element
mbitmap.go#L1045: words := typ.Size_ / goarch.PtrSize // total words, including scalar tail
mbitmap.go#L1047: n := dataSize / typ.Size_
mbitmap.go#L1068: for i := uintptr(0); true; i += typ.Size_ {
mbitmap.go#L1078: if i+typ.Size_ == dataSize {
mbitmap.go#L1082: h = h.pad(typ.Size_ - typ.PtrBytes)
mbitmap.go#L1094: off := i % typ.Size_
mbitmap.go#L1427: n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
mbitmap.go#L1439: n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
mbitmap.go#L1487: n := (*ptrtype)(unsafe.Pointer(t)).Elem.Size_
mfinal.go#L425: if ot.Elem == nil || ot.Elem.PtrBytes != 0 || ot.Elem.Size_ >= maxTinySize {
mfinal.go#L476: nret = alignUp(nret, uintptr(t.Align_)) + uintptr(t.Size_)
select.go#L403: msanread(cas.elem, c.elemtype.Size_)
select.go#L405: msanwrite(cas.elem, c.elemtype.Size_)
select.go#L410: asanread(cas.elem, c.elemtype.Size_)
select.go#L412: asanwrite(cas.elem, c.elemtype.Size_)
select.go#L428: msanwrite(cas.elem, c.elemtype.Size_)
select.go#L431: asanwrite(cas.elem, c.elemtype.Size_)
select.go#L454: msanread(cas.elem, c.elemtype.Size_)
select.go#L457: asanread(cas.elem, c.elemtype.Size_)
select.go#L495: msanread(cas.elem, c.elemtype.Size_)
select.go#L498: asanread(cas.elem, c.elemtype.Size_)
slice.go#L42: tomem, overflow = math.MulUintptr(et.Size_, uintptr(tolen))
slice.go#L46: copymem = et.Size_ * uintptr(fromlen)
slice.go#L51: tomem = et.Size_ * uintptr(tolen)
slice.go#L89: mem, overflow := math.MulUintptr(et.Size_, uintptr(cap))
slice.go#L96: mem, overflow := math.MulUintptr(et.Size_, uintptr(len))
slice.go#L161: racereadrangepc(oldPtr, uintptr(oldLen*int(et.Size_)), callerpc, abi.FuncPCABIInternal(growslice))
slice.go#L164: msanread(oldPtr, uintptr(oldLen*int(et.Size_)))
slice.go#L167: asanread(oldPtr, uintptr(oldLen*int(et.Size_)))
slice.go#L174: if et.Size_ == 0 {
slice.go#L212: case et.Size_ == 1:
slice.go#L218: case et.Size_ == goarch.PtrSize:
slice.go#L224: case isPowerOfTwo(et.Size_):
slice.go#L228: shift = uintptr(sys.TrailingZeros64(uint64(et.Size_))) & 63
slice.go#L230: shift = uintptr(sys.TrailingZeros32(uint32(et.Size_))) & 31
slice.go#L239: lenmem = uintptr(oldLen) * et.Size_
slice.go#L240: newlenmem = uintptr(newLen) * et.Size_
slice.go#L241: capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap))
slice.go#L243: newcap = int(capmem / et.Size_)
slice.go#L244: capmem = uintptr(newcap) * et.Size_
slice.go#L278: bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes)
slice.go#L297: oldcapmem := uintptr(old.cap) * et.Size_
slice.go#L298: newlenmem := uintptr(new.len) * et.Size_
stkframe.go#L284: off: -int32(alignUp(abiRegArgsType.Size_, 8)), // It's always the highest address local.
stkframe.go#L285: size: int32(abiRegArgsType.Size_),
unsafe.go#L58: if et.Size_ == 0 {
unsafe.go#L64: mem, overflow := math.MulUintptr(et.Size_, uintptr(len))
unsafe.go#L87: if checkptrStraddles(ptr, uintptr(len64)*et.Size_) {
|
The pages are generated with Golds v0.6.7. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |