package runtime
import (
"internal/abi"
"internal/goarch"
"unsafe"
)
func mapaccess1_fast64 (t *maptype , h *hmap , key uint64 ) unsafe .Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc ()
racereadpc (unsafe .Pointer (h ), callerpc , abi .FuncPCABIInternal (mapaccess1_fast64 ))
}
if h == nil || h .count == 0 {
return unsafe .Pointer (&zeroVal [0 ])
}
if h .flags &hashWriting != 0 {
fatal ("concurrent map read and map write" )
}
var b *bmap
if h .B == 0 {
b = (*bmap )(h .buckets )
} else {
hash := t .Hasher (noescape (unsafe .Pointer (&key )), uintptr (h .hash0 ))
m := bucketMask (h .B )
b = (*bmap )(add (h .buckets , (hash &m )*uintptr (t .BucketSize )))
if c := h .oldbuckets ; c != nil {
if !h .sameSizeGrow () {
m >>= 1
}
oldb := (*bmap )(add (c , (hash &m )*uintptr (t .BucketSize )))
if !evacuated (oldb ) {
b = oldb
}
}
}
for ; b != nil ; b = b .overflow (t ) {
for i , k := uintptr (0 ), b .keys (); i < bucketCnt ; i , k = i +1 , add (k , 8 ) {
if *(*uint64 )(k ) == key && !isEmpty (b .tophash [i ]) {
return add (unsafe .Pointer (b ), dataOffset +bucketCnt *8 +i *uintptr (t .ValueSize ))
}
}
}
return unsafe .Pointer (&zeroVal [0 ])
}
func mapaccess2_fast64 (t *maptype , h *hmap , key uint64 ) (unsafe .Pointer , bool ) {
if raceenabled && h != nil {
callerpc := getcallerpc ()
racereadpc (unsafe .Pointer (h ), callerpc , abi .FuncPCABIInternal (mapaccess2_fast64 ))
}
if h == nil || h .count == 0 {
return unsafe .Pointer (&zeroVal [0 ]), false
}
if h .flags &hashWriting != 0 {
fatal ("concurrent map read and map write" )
}
var b *bmap
if h .B == 0 {
b = (*bmap )(h .buckets )
} else {
hash := t .Hasher (noescape (unsafe .Pointer (&key )), uintptr (h .hash0 ))
m := bucketMask (h .B )
b = (*bmap )(add (h .buckets , (hash &m )*uintptr (t .BucketSize )))
if c := h .oldbuckets ; c != nil {
if !h .sameSizeGrow () {
m >>= 1
}
oldb := (*bmap )(add (c , (hash &m )*uintptr (t .BucketSize )))
if !evacuated (oldb ) {
b = oldb
}
}
}
for ; b != nil ; b = b .overflow (t ) {
for i , k := uintptr (0 ), b .keys (); i < bucketCnt ; i , k = i +1 , add (k , 8 ) {
if *(*uint64 )(k ) == key && !isEmpty (b .tophash [i ]) {
return add (unsafe .Pointer (b ), dataOffset +bucketCnt *8 +i *uintptr (t .ValueSize )), true
}
}
}
return unsafe .Pointer (&zeroVal [0 ]), false
}
func mapassign_fast64 (t *maptype , h *hmap , key uint64 ) unsafe .Pointer {
if h == nil {
panic (plainError ("assignment to entry in nil map" ))
}
if raceenabled {
callerpc := getcallerpc ()
racewritepc (unsafe .Pointer (h ), callerpc , abi .FuncPCABIInternal (mapassign_fast64 ))
}
if h .flags &hashWriting != 0 {
fatal ("concurrent map writes" )
}
hash := t .Hasher (noescape (unsafe .Pointer (&key )), uintptr (h .hash0 ))
h .flags ^= hashWriting
if h .buckets == nil {
h .buckets = newobject (t .Bucket )
}
again :
bucket := hash & bucketMask (h .B )
if h .growing () {
growWork_fast64 (t , h , bucket )
}
b := (*bmap )(add (h .buckets , bucket *uintptr (t .BucketSize )))
var insertb *bmap
var inserti uintptr
var insertk unsafe .Pointer
bucketloop :
for {
for i := uintptr (0 ); i < bucketCnt ; i ++ {
if isEmpty (b .tophash [i ]) {
if insertb == nil {
insertb = b
inserti = i
}
if b .tophash [i ] == emptyRest {
break bucketloop
}
continue
}
k := *((*uint64 )(add (unsafe .Pointer (b ), dataOffset +i *8 )))
if k != key {
continue
}
insertb = b
inserti = i
goto done
}
ovf := b .overflow (t )
if ovf == nil {
break
}
b = ovf
}
if !h .growing () && (overLoadFactor (h .count +1 , h .B ) || tooManyOverflowBuckets (h .noverflow , h .B )) {
hashGrow (t , h )
goto again
}
if insertb == nil {
insertb = h .newoverflow (t , b )
inserti = 0
}
insertb .tophash [inserti &(bucketCnt -1 )] = tophash (hash )
insertk = add (unsafe .Pointer (insertb ), dataOffset +inserti *8 )
*(*uint64 )(insertk ) = key
h .count ++
done :
elem := add (unsafe .Pointer (insertb ), dataOffset +bucketCnt *8 +inserti *uintptr (t .ValueSize ))
if h .flags &hashWriting == 0 {
fatal ("concurrent map writes" )
}
h .flags &^= hashWriting
return elem
}
func mapassign_fast64ptr (t *maptype , h *hmap , key unsafe .Pointer ) unsafe .Pointer {
if h == nil {
panic (plainError ("assignment to entry in nil map" ))
}
if raceenabled {
callerpc := getcallerpc ()
racewritepc (unsafe .Pointer (h ), callerpc , abi .FuncPCABIInternal (mapassign_fast64 ))
}
if h .flags &hashWriting != 0 {
fatal ("concurrent map writes" )
}
hash := t .Hasher (noescape (unsafe .Pointer (&key )), uintptr (h .hash0 ))
h .flags ^= hashWriting
if h .buckets == nil {
h .buckets = newobject (t .Bucket )
}
again :
bucket := hash & bucketMask (h .B )
if h .growing () {
growWork_fast64 (t , h , bucket )
}
b := (*bmap )(add (h .buckets , bucket *uintptr (t .BucketSize )))
var insertb *bmap
var inserti uintptr
var insertk unsafe .Pointer
bucketloop :
for {
for i := uintptr (0 ); i < bucketCnt ; i ++ {
if isEmpty (b .tophash [i ]) {
if insertb == nil {
insertb = b
inserti = i
}
if b .tophash [i ] == emptyRest {
break bucketloop
}
continue
}
k := *((*unsafe .Pointer )(add (unsafe .Pointer (b ), dataOffset +i *8 )))
if k != key {
continue
}
insertb = b
inserti = i
goto done
}
ovf := b .overflow (t )
if ovf == nil {
break
}
b = ovf
}
if !h .growing () && (overLoadFactor (h .count +1 , h .B ) || tooManyOverflowBuckets (h .noverflow , h .B )) {
hashGrow (t , h )
goto again
}
if insertb == nil {
insertb = h .newoverflow (t , b )
inserti = 0
}
insertb .tophash [inserti &(bucketCnt -1 )] = tophash (hash )
insertk = add (unsafe .Pointer (insertb ), dataOffset +inserti *8 )
*(*unsafe .Pointer )(insertk ) = key
h .count ++
done :
elem := add (unsafe .Pointer (insertb ), dataOffset +bucketCnt *8 +inserti *uintptr (t .ValueSize ))
if h .flags &hashWriting == 0 {
fatal ("concurrent map writes" )
}
h .flags &^= hashWriting
return elem
}
func mapdelete_fast64 (t *maptype , h *hmap , key uint64 ) {
if raceenabled && h != nil {
callerpc := getcallerpc ()
racewritepc (unsafe .Pointer (h ), callerpc , abi .FuncPCABIInternal (mapdelete_fast64 ))
}
if h == nil || h .count == 0 {
return
}
if h .flags &hashWriting != 0 {
fatal ("concurrent map writes" )
}
hash := t .Hasher (noescape (unsafe .Pointer (&key )), uintptr (h .hash0 ))
h .flags ^= hashWriting
bucket := hash & bucketMask (h .B )
if h .growing () {
growWork_fast64 (t , h , bucket )
}
b := (*bmap )(add (h .buckets , bucket *uintptr (t .BucketSize )))
bOrig := b
search :
for ; b != nil ; b = b .overflow (t ) {
for i , k := uintptr (0 ), b .keys (); i < bucketCnt ; i , k = i +1 , add (k , 8 ) {
if key != *(*uint64 )(k ) || isEmpty (b .tophash [i ]) {
continue
}
if t .Key .PtrBytes != 0 {
if goarch .PtrSize == 8 {
*(*unsafe .Pointer )(k ) = nil
} else {
memclrHasPointers (k , 8 )
}
}
e := add (unsafe .Pointer (b ), dataOffset +bucketCnt *8 +i *uintptr (t .ValueSize ))
if t .Elem .PtrBytes != 0 {
memclrHasPointers (e , t .Elem .Size_ )
} else {
memclrNoHeapPointers (e , t .Elem .Size_ )
}
b .tophash [i ] = emptyOne
if i == bucketCnt -1 {
if b .overflow (t ) != nil && b .overflow (t ).tophash [0 ] != emptyRest {
goto notLast
}
} else {
if b .tophash [i +1 ] != emptyRest {
goto notLast
}
}
for {
b .tophash [i ] = emptyRest
if i == 0 {
if b == bOrig {
break
}
c := b
for b = bOrig ; b .overflow (t ) != c ; b = b .overflow (t ) {
}
i = bucketCnt - 1
} else {
i --
}
if b .tophash [i ] != emptyOne {
break
}
}
notLast :
h .count --
if h .count == 0 {
h .hash0 = fastrand ()
}
break search
}
}
if h .flags &hashWriting == 0 {
fatal ("concurrent map writes" )
}
h .flags &^= hashWriting
}
func growWork_fast64 (t *maptype , h *hmap , bucket uintptr ) {
evacuate_fast64 (t , h , bucket &h .oldbucketmask ())
if h .growing () {
evacuate_fast64 (t , h , h .nevacuate )
}
}
func evacuate_fast64 (t *maptype , h *hmap , oldbucket uintptr ) {
b := (*bmap )(add (h .oldbuckets , oldbucket *uintptr (t .BucketSize )))
newbit := h .noldbuckets ()
if !evacuated (b ) {
var xy [2 ]evacDst
x := &xy [0 ]
x .b = (*bmap )(add (h .buckets , oldbucket *uintptr (t .BucketSize )))
x .k = add (unsafe .Pointer (x .b ), dataOffset )
x .e = add (x .k , bucketCnt *8 )
if !h .sameSizeGrow () {
y := &xy [1 ]
y .b = (*bmap )(add (h .buckets , (oldbucket +newbit )*uintptr (t .BucketSize )))
y .k = add (unsafe .Pointer (y .b ), dataOffset )
y .e = add (y .k , bucketCnt *8 )
}
for ; b != nil ; b = b .overflow (t ) {
k := add (unsafe .Pointer (b ), dataOffset )
e := add (k , bucketCnt *8 )
for i := 0 ; i < bucketCnt ; i , k , e = i +1 , add (k , 8 ), add (e , uintptr (t .ValueSize )) {
top := b .tophash [i ]
if isEmpty (top ) {
b .tophash [i ] = evacuatedEmpty
continue
}
if top < minTopHash {
throw ("bad map state" )
}
var useY uint8
if !h .sameSizeGrow () {
hash := t .Hasher (k , uintptr (h .hash0 ))
if hash &newbit != 0 {
useY = 1
}
}
b .tophash [i ] = evacuatedX + useY
dst := &xy [useY ]
if dst .i == bucketCnt {
dst .b = h .newoverflow (t , dst .b )
dst .i = 0
dst .k = add (unsafe .Pointer (dst .b ), dataOffset )
dst .e = add (dst .k , bucketCnt *8 )
}
dst .b .tophash [dst .i &(bucketCnt -1 )] = top
if t .Key .PtrBytes != 0 && writeBarrier .enabled {
if goarch .PtrSize == 8 {
*(*unsafe .Pointer )(dst .k ) = *(*unsafe .Pointer )(k )
} else {
typedmemmove (t .Key , dst .k , k )
}
} else {
*(*uint64 )(dst .k ) = *(*uint64 )(k )
}
typedmemmove (t .Elem , dst .e , e )
dst .i ++
dst .k = add (dst .k , 8 )
dst .e = add (dst .e , uintptr (t .ValueSize ))
}
}
if h .flags &oldIterator == 0 && t .Bucket .PtrBytes != 0 {
b := add (h .oldbuckets , oldbucket *uintptr (t .BucketSize ))
ptr := add (b , dataOffset )
n := uintptr (t .BucketSize ) - dataOffset
memclrHasPointers (ptr , n )
}
}
if oldbucket == h .nevacuate {
advanceEvacuationMark (h , t , newbit )
}
}
The pages are generated with Golds v0.6.7 . (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu .
PR and bug reports are welcome and can be submitted to the issue list .
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds .