func unsafe.Sizeof
164 uses
unsafe (current package)
unsafe.go#L196: func Sizeof(x ArbitraryType) uintptr
internal/abi
type.go#L517: uadd := unsafe.Sizeof(*t)
type.go#L519: uadd += unsafe.Sizeof(UncommonType{})
type.go#L531: uadd := unsafe.Sizeof(*t)
type.go#L533: uadd += unsafe.Sizeof(UncommonType{})
internal/poll
splice_linux.go#L187: _ [24 - unsafe.Sizeof(splicePipeFields{})%24]byte
net
cgo_unix.go#L320: state := (*_C_struct___res_state)(_C_malloc(unsafe.Sizeof(_C_struct___res_state{})))
os
dirent_linux.go#L13: return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Ino), unsafe.Sizeof(syscall.Dirent{}.Ino))
dirent_linux.go#L17: return readInt(buf, unsafe.Offsetof(syscall.Dirent{}.Reclen), unsafe.Sizeof(syscall.Dirent{}.Reclen))
reflect
type.go#L2416: ut.Moff = uint32(unsafe.Sizeof(uncommonType{}))
runtime
alg.go#L38: size := *(*uintptr)(unsafe.Pointer(ptr + unsafe.Sizeof(h)))
cgo_sigaction.go#L29: msanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new))
cgo_sigaction.go#L32: asanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new))
cgo_sigaction.go#L83: msanread(unsafe.Pointer(old), unsafe.Sizeof(*old))
cgo_sigaction.go#L86: asanread(unsafe.Pointer(old), unsafe.Sizeof(*old))
chan.go#L29: hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
covercounter.go#L15: u32sz := unsafe.Sizeof(uint32(0))
debuglog.go#L78: l = (*dlogger)(sysAllocOS(unsafe.Sizeof(dlogger{})))
debuglog.go#L727: state1 := sysAllocOS(unsafe.Sizeof(readState{}) * uintptr(n))
defs_linux_amd64.go#L136: _ [_si_max_size - unsafe.Sizeof(siginfoFields{})]byte
defs_linux_amd64.go#L161: _ [_sigev_max_size - unsafe.Sizeof(sigeventFields{})]byte
heapdump.go#L680: memclrNoHeapPointers(unsafe.Pointer(&typecache), unsafe.Sizeof(typecache))
iface.go#L67: m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys))
iface.go#L393: x = mallocgc(unsafe.Sizeof(val), stringType, true)
iface.go#L404: x = mallocgc(unsafe.Sizeof(val), sliceType, true)
malloc.go#L543: const arenaMetaSize = (1 << arenaBits) * unsafe.Sizeof(heapArena{})
malloc.go#L739: l2 = (*[1 << arenaL2Bits]*heapArena)(sysAllocOS(unsafe.Sizeof(*l2)))
malloc.go#L744: sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
malloc.go#L746: sysNoHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
malloc.go#L755: r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
malloc.go#L757: r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), goarch.PtrSize, &memstats.gcMiscSys))
malloc.go#L883: sysHugePage(unsafe.Pointer(l2), unsafe.Sizeof(*l2))
map.go#L827: if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
mcheckmark.go#L48: bitmap = (*checkmarksMap)(persistentalloc(unsafe.Sizeof(*bitmap), 0, &memstats.gcMiscSys))
mfinal.go#L29: fin [(_FinBlockSize - 2*goarch.PtrSize - 2*4) / unsafe.Sizeof(finalizer{})]finalizer
mfinal.go#L114: if (unsafe.Sizeof(finalizer{}) != 5*goarch.PtrSize ||
mfinal.go#L214: framesz := unsafe.Sizeof((any)(nil)) + f.nret
mfixalloc.go#L60: if min := unsafe.Sizeof(mlink{}); size < min {
mgc.go#L175: if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
mgcmark.go#L183: scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw, nil)
mgcstack.go#L110: obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr
mgcstack.go#L128: obj [(_WorkbufSize - unsafe.Sizeof(stackObjectBufHdr{})) / unsafe.Sizeof(stackObject{})]stackObject
mgcstack.go#L138: if unsafe.Sizeof(stackWorkBuf{}) > unsafe.Sizeof(workbuf{}) {
mgcstack.go#L141: if unsafe.Sizeof(stackObjectBuf{}) > unsafe.Sizeof(workbuf{}) {
mgcwork.go#L328: obj [(_WorkbufSize - unsafe.Sizeof(workbufhdr{})) / goarch.PtrSize]uintptr
mgcwork.go#L443: memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), uintptr(n)*unsafe.Sizeof(b1.obj[0]))
mheap.go#L199: pad [(cpu.CacheLinePadSize - unsafe.Sizeof(mcentral{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
mheap.go#L557: sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys)
mheap.go#L756: h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
mheap.go#L757: h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
mheap.go#L758: h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
mheap.go#L759: h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
mheap.go#L760: h.specialReachableAlloc.init(unsafe.Sizeof(specialReachable{}), nil, nil, &memstats.other_sys)
mheap.go#L761: h.specialPinCounterAlloc.init(unsafe.Sizeof(specialPinCounter{}), nil, nil, &memstats.other_sys)
mheap.go#L762: h.arenaHintAlloc.init(unsafe.Sizeof(arenaHint{}), nil, nil, &memstats.other_sys)
mheap.go#L2100: const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
minmax.go#L55: switch unsafe.Sizeof(x) {
minmax.go#L65: switch unsafe.Sizeof(x) {
mpagealloc.go#L403: const l2Size = unsafe.Sizeof(*p.chunks[0])
mpagealloc.go#L472: sysHugePage(unsafe.Pointer(p.chunks[i]), unsafe.Sizeof(*p.chunks[0]))
mpagealloc.go#L976: pallocSumBytes = unsafe.Sizeof(pallocSum(0))
mpagealloc_64bit.go#L196: scSize := unsafe.Sizeof(atomicScavChunkData{})
mpagealloc_64bit.go#L253: nbytes := n * unsafe.Sizeof(atomicScavChunkData{})
mpagecache.go#L12: const pageCachePages = 8 * unsafe.Sizeof(pageCache{}.cache)
mprof.go#L215: size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
mprof.go#L220: size += unsafe.Sizeof(memRecord{})
mprof.go#L222: size += unsafe.Sizeof(blockRecord{})
mprof.go#L233: stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
mprof.go#L242: data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
mprof.go#L251: data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
mprof.go#L263: bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys))
mprof.go#L713: racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
mprof.go#L716: msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
mprof.go#L719: asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
mprof.go#L771: racewriterangepc(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
mprof.go#L774: msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
mprof.go#L777: asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
mranges.go#L258: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, sysStat))
mranges.go#L385: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, a.sysStat))
mranges.go#L455: ranges.array = (*notInHeap)(persistentalloc(unsafe.Sizeof(addrRange{})*uintptr(ranges.cap), goarch.PtrSize, b.sysStat))
mspanset.go#L316: return (*spanSetBlock)(persistentalloc(unsafe.Sizeof(spanSetBlock{}), cpu.CacheLineSize, &memstats.gcMiscSys))
mstats.go#L347: if size := unsafe.Sizeof(heapStatsDelta{}); size%8 != 0 {
mwbbuf.go#L87: b.end = start + uintptr(len(b.buf))*unsafe.Sizeof(b.buf[0])
mwbbuf.go#L90: if (b.end-b.next)%unsafe.Sizeof(b.buf[0]) != 0 {
mwbbuf.go#L198: n := (pp.wbBuf.next - start) / unsafe.Sizeof(pp.wbBuf.buf[0])
netpoll.go#L658: const pdSize = unsafe.Sizeof(pollDesc{})
os_linux.go#L107: r := sched_getaffinity(0, unsafe.Sizeof(buf), &buf[0])
os_linux.go#L278: n = read(fd, noescape(unsafe.Pointer(&auxvreadbuf[0])), int32(unsafe.Sizeof(auxvreadbuf)))
os_linux.go#L459: rtsigprocmask(how, new, old, int32(unsafe.Sizeof(*new)))
os_linux.go#L545: if rt_sigaction(uintptr(sig), new, old, unsafe.Sizeof(sigactiont{}.sa_mask)) != 0 {
panic.go#L733: fd = add(fd, unsafe.Sizeof(b))
pinner.go#L84: pinnerRefStoreSize = (pinnerSize - unsafe.Sizeof([]unsafe.Pointer{})) / unsafe.Sizeof(unsafe.Pointer(nil))
print.go#L272: minhexdigits = int(unsafe.Sizeof(uintptr(0)) * 2)
proc.go#L885: s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:]
proc.go#L2444: msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
proc.go#L2447: asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
proc.go#L4527: memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
runtime1.go#L168: if unsafe.Sizeof(a) != 1 {
runtime1.go#L171: if unsafe.Sizeof(b) != 1 {
runtime1.go#L174: if unsafe.Sizeof(c) != 2 {
runtime1.go#L177: if unsafe.Sizeof(d) != 2 {
runtime1.go#L180: if unsafe.Sizeof(e) != 4 {
runtime1.go#L183: if unsafe.Sizeof(f) != 4 {
runtime1.go#L186: if unsafe.Sizeof(g) != 8 {
runtime1.go#L189: if unsafe.Sizeof(h) != 8 {
runtime1.go#L192: if unsafe.Sizeof(i) != 4 {
runtime1.go#L195: if unsafe.Sizeof(j) != 8 {
runtime1.go#L198: if unsafe.Sizeof(k) != goarch.PtrSize {
runtime1.go#L201: if unsafe.Sizeof(l) != goarch.PtrSize {
runtime1.go#L204: if unsafe.Sizeof(x1) != 1 {
runtime1.go#L210: if unsafe.Sizeof(y1) != 2 {
sema.go#L53: pad [cpu.CacheLinePadSize - unsafe.Sizeof(semaRoot{})]byte
sema.go#L624: if sz != unsafe.Sizeof(notifyList{}) {
sema.go#L625: print("runtime: bad notifyList size - sync=", sz, " runtime=", unsafe.Sizeof(notifyList{}), "\n")
stack.go#L152: _ [(cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
stkframe.go#L238: unsafe.Sizeof(abi.RegArgs{}) > 0 && isReflect {
string.go#L205: uintptr(len(a))*unsafe.Sizeof(a[0]),
string.go#L210: msanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
string.go#L213: asanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
symtab.go#L788: ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{})))
symtab.go#L1022: return *(*uint32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4))
symtab.go#L1057: p := uintptr(unsafe.Pointer(&f.nfuncdata)) + unsafe.Sizeof(f.nfuncdata) + uintptr(f.npcdata)*4 + uintptr(i)*4
trace.go#L241: arr [64<<10 - unsafe.Sizeof(traceBufHeader{})]byte // underlying buffer for traceBufHeader.buf
trace.go#L511: sysFree(unsafe.Pointer(buf), unsafe.Sizeof(*buf.ptr()), &memstats.other_sys)
trace.go#L1068: buf = traceBufPtr(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys))
trace.go#L1219: hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
trace.go#L1269: return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*goarch.PtrSize))
trace.go#L1455: block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
trace.go#L1473: sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys)
traceback.go#L1612: msanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
traceback.go#L1615: asanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
traceback.go#L1637: msanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
traceback.go#L1640: asanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
vdso_linux.go#L53: vdsoSymTabSize = vdsoArrayMax / unsafe.Sizeof(elfSym{})
vdso_linux.go#L54: vdsoDynSize = vdsoArrayMax / unsafe.Sizeof(elfDyn{})
vdso_linux.go#L61: vdsoBloomSizeScale = unsafe.Sizeof(uintptr(0)) / 4 // uint32
vdso_linux.go#L113: pt := (*elfPhdr)(add(pt, uintptr(i)*unsafe.Sizeof(elfPhdr{})))
sync
pool.go#L75: pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte
pool.go#L280: lp := unsafe.Pointer(uintptr(l) + uintptr(i)*unsafe.Sizeof(poolLocal{}))
runtime.go#L53: runtime_notifyListCheck(unsafe.Sizeof(n))
syscall
exec_linux.go#L151: RawSyscall(SYS_WRITE, uintptr(mapPipe[1]), uintptr(unsafe.Pointer(&err2)), unsafe.Sizeof(err2))
exec_linux.go#L311: pid, err1 = rawVforkSyscall(_SYS_clone3, uintptr(unsafe.Pointer(clone3)), unsafe.Sizeof(*clone3))
exec_linux.go#L346: pid, _, err1 = RawSyscall(SYS_READ, uintptr(mapPipe[0]), uintptr(unsafe.Pointer(&err2)), unsafe.Sizeof(err2))
exec_linux.go#L350: if pid != unsafe.Sizeof(err2) {
exec_linux.go#L638: RawSyscall(SYS_WRITE, uintptr(pipe), uintptr(unsafe.Pointer(&err1)), unsafe.Sizeof(err1))
exec_unix.go#L217: n, err = readlen(p[0], (*byte)(unsafe.Pointer(&err1)), int(unsafe.Sizeof(err1)))
exec_unix.go#L224: if n == int(unsafe.Sizeof(err1)) {
lsf_linux.go#L79: return setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, unsafe.Pointer(&p), unsafe.Sizeof(p))
lsf_linux.go#L85: return setsockopt(fd, SOL_SOCKET, SO_DETACH_FILTER, unsafe.Pointer(&dummy), unsafe.Sizeof(dummy))
sockcmsg_linux.go#L34: if uintptr(len(m.Data)) < unsafe.Sizeof(Ucred{}) {
syscall_linux.go#L748: return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq))
syscall_linux.go#L946: iov.SetLen(int(unsafe.Sizeof(*regsout)))
syscall_linux.go#L953: iov.SetLen(int(unsafe.Sizeof(*regs)))
syscall_linux.go#L993: return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino))
syscall_linux.go#L997: return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen))
syscall_unix.go#L494: return setsockopt(fd, level, opt, unsafe.Pointer(tv), unsafe.Sizeof(*tv))
|
The pages are generated with Golds v0.6.7. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |