package runtime
import (
"internal/abi"
"internal/goarch"
"runtime/internal/atomic"
"runtime/internal/math"
"unsafe"
)
const (
bucketCntBits = abi .MapBucketCountBits
bucketCnt = abi .MapBucketCount
loadFactorDen = 2
loadFactorNum = (bucketCnt * 13 / 16 ) * loadFactorDen
maxKeySize = abi .MapMaxKeyBytes
maxElemSize = abi .MapMaxElemBytes
dataOffset = unsafe .Offsetof (struct {
b bmap
v int64
}{}.v )
emptyRest = 0
emptyOne = 1
evacuatedX = 2
evacuatedY = 3
evacuatedEmpty = 4
minTopHash = 5
iterator = 1
oldIterator = 2
hashWriting = 4
sameSizeGrow = 8
noCheck = 1 <<(8 *goarch .PtrSize ) - 1
)
func isEmpty (x uint8 ) bool {
return x <= emptyOne
}
type hmap struct {
count int
flags uint8
B uint8
noverflow uint16
hash0 uint32
buckets unsafe .Pointer
oldbuckets unsafe .Pointer
nevacuate uintptr
extra *mapextra
}
type mapextra struct {
overflow *[]*bmap
oldoverflow *[]*bmap
nextOverflow *bmap
}
type bmap struct {
tophash [bucketCnt ]uint8
}
type hiter struct {
key unsafe .Pointer
elem unsafe .Pointer
t *maptype
h *hmap
buckets unsafe .Pointer
bptr *bmap
overflow *[]*bmap
oldoverflow *[]*bmap
startBucket uintptr
offset uint8
wrapped bool
B uint8
i uint8
bucket uintptr
checkBucket uintptr
}
func bucketShift (b uint8 ) uintptr {
return uintptr (1 ) << (b & (goarch .PtrSize *8 - 1 ))
}
func bucketMask (b uint8 ) uintptr {
return bucketShift (b ) - 1
}
func tophash (hash uintptr ) uint8 {
top := uint8 (hash >> (goarch .PtrSize *8 - 8 ))
if top < minTopHash {
top += minTopHash
}
return top
}
func evacuated (b *bmap ) bool {
h := b .tophash [0 ]
return h > emptyOne && h < minTopHash
}
func (b *bmap ) overflow (t *maptype ) *bmap {
return *(**bmap )(add (unsafe .Pointer (b ), uintptr (t .BucketSize )-goarch .PtrSize ))
}
func (b *bmap ) setoverflow (t *maptype , ovf *bmap ) {
*(**bmap )(add (unsafe .Pointer (b ), uintptr (t .BucketSize )-goarch .PtrSize )) = ovf
}
func (b *bmap ) keys () unsafe .Pointer {
return add (unsafe .Pointer (b ), dataOffset )
}
func (h *hmap ) incrnoverflow () {
if h .B < 16 {
h .noverflow ++
return
}
mask := uint32 (1 )<<(h .B -15 ) - 1
if fastrand ()&mask == 0 {
h .noverflow ++
}
}
func (h *hmap ) newoverflow (t *maptype , b *bmap ) *bmap {
var ovf *bmap
if h .extra != nil && h .extra .nextOverflow != nil {
ovf = h .extra .nextOverflow
if ovf .overflow (t ) == nil {
h .extra .nextOverflow = (*bmap )(add (unsafe .Pointer (ovf ), uintptr (t .BucketSize )))
} else {
ovf .setoverflow (t , nil )
h .extra .nextOverflow = nil
}
} else {
ovf = (*bmap )(newobject (t .Bucket ))
}
h .incrnoverflow ()
if t .Bucket .PtrBytes == 0 {
h .createOverflow ()
*h .extra .overflow = append (*h .extra .overflow , ovf )
}
b .setoverflow (t , ovf )
return ovf
}
func (h *hmap ) createOverflow () {
if h .extra == nil {
h .extra = new (mapextra )
}
if h .extra .overflow == nil {
h .extra .overflow = new ([]*bmap )
}
}
func makemap64 (t *maptype , hint int64 , h *hmap ) *hmap {
if int64 (int (hint )) != hint {
hint = 0
}
return makemap (t , int (hint ), h )
}
func makemap_small () *hmap {
h := new (hmap )
h .hash0 = fastrand ()
return h
}
func makemap (t *maptype , hint int , h *hmap ) *hmap {
mem , overflow := math .MulUintptr (uintptr (hint ), t .Bucket .Size_ )
if overflow || mem > maxAlloc {
hint = 0
}
if h == nil {
h = new (hmap )
}
h .hash0 = fastrand ()
B := uint8 (0 )
for overLoadFactor (hint , B ) {
B ++
}
h .B = B
if h .B != 0 {
var nextOverflow *bmap
h .buckets , nextOverflow = makeBucketArray (t , h .B , nil )
if nextOverflow != nil {
h .extra = new (mapextra )
h .extra .nextOverflow = nextOverflow
}
}
return h
}
func makeBucketArray (t *maptype , b uint8 , dirtyalloc unsafe .Pointer ) (buckets unsafe .Pointer , nextOverflow *bmap ) {
base := bucketShift (b )
nbuckets := base
if b >= 4 {
nbuckets += bucketShift (b - 4 )
sz := t .Bucket .Size_ * nbuckets
up := roundupsize (sz )
if up != sz {
nbuckets = up / t .Bucket .Size_
}
}
if dirtyalloc == nil {
buckets = newarray (t .Bucket , int (nbuckets ))
} else {
buckets = dirtyalloc
size := t .Bucket .Size_ * nbuckets
if t .Bucket .PtrBytes != 0 {
memclrHasPointers (buckets , size )
} else {
memclrNoHeapPointers (buckets , size )
}
}
if base != nbuckets {
nextOverflow = (*bmap )(add (buckets , base *uintptr (t .BucketSize )))
last := (*bmap )(add (buckets , (nbuckets -1 )*uintptr (t .BucketSize )))
last .setoverflow (t , (*bmap )(buckets ))
}
return buckets , nextOverflow
}
func mapaccess1 (t *maptype , h *hmap , key unsafe .Pointer ) unsafe .Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc ()
pc := abi .FuncPCABIInternal (mapaccess1 )
racereadpc (unsafe .Pointer (h ), callerpc , pc )
raceReadObjectPC (t .Key , key , callerpc , pc )
}
if msanenabled && h != nil {
msanread (key , t .Key .Size_ )
}
if asanenabled && h != nil {
asanread (key , t .Key .Size_ )
}
if h == nil || h .count == 0 {
if t .HashMightPanic () {
t .Hasher (key , 0 )
}
return unsafe .Pointer (&zeroVal [0 ])
}
if h .flags &hashWriting != 0 {
fatal ("concurrent map read and map write" )
}
hash := t .Hasher (key , uintptr (h .hash0 ))
m := bucketMask (h .B )
b := (*bmap )(add (h .buckets , (hash &m )*uintptr (t .BucketSize )))
if c := h .oldbuckets ; c != nil {
if !h .sameSizeGrow () {
m >>= 1
}
oldb := (*bmap )(add (c , (hash &m )*uintptr (t .BucketSize )))
if !evacuated (oldb ) {
b = oldb
}
}
top := tophash (hash )
bucketloop :
for ; b != nil ; b = b .overflow (t ) {
for i := uintptr (0 ); i < bucketCnt ; i ++ {
if b .tophash [i ] != top {
if b .tophash [i ] == emptyRest {
break bucketloop
}
continue
}
k := add (unsafe .Pointer (b ), dataOffset +i *uintptr (t .KeySize ))
if t .IndirectKey () {
k = *((*unsafe .Pointer )(k ))
}
if t .Key .Equal (key , k ) {
e := add (unsafe .Pointer (b ), dataOffset +bucketCnt *uintptr (t .KeySize )+i *uintptr (t .ValueSize ))
if t .IndirectElem () {
e = *((*unsafe .Pointer )(e ))
}
return e
}
}
}
return unsafe .Pointer (&zeroVal [0 ])
}
func mapaccess2 (t *maptype , h *hmap , key unsafe .Pointer ) (unsafe .Pointer , bool ) {
if raceenabled && h != nil {
callerpc := getcallerpc ()
pc := abi .FuncPCABIInternal (mapaccess2 )
racereadpc (unsafe .Pointer (h ), callerpc , pc )
raceReadObjectPC (t .Key , key , callerpc , pc )
}
if msanenabled && h != nil {
msanread (key , t .Key .Size_ )
}
if asanenabled && h != nil {
asanread (key , t .Key .Size_ )
}
if h == nil || h .count == 0 {
if t .HashMightPanic () {
t .Hasher (key , 0 )
}
return unsafe .Pointer (&zeroVal [0 ]), false
}
if h .flags &hashWriting != 0 {
fatal ("concurrent map read and map write" )
}
hash := t .Hasher (key , uintptr (h .hash0 ))
m := bucketMask (h .B )
b := (*bmap )(add (h .buckets , (hash &m )*uintptr (t .BucketSize )))
if c := h .oldbuckets ; c != nil {
if !h .sameSizeGrow () {
m >>= 1
}
oldb := (*bmap )(add (c , (hash &m )*uintptr (t .BucketSize )))
if !evacuated (oldb ) {
b = oldb
}
}
top := tophash (hash )
bucketloop :
for ; b != nil ; b = b .overflow (t ) {
for i := uintptr (0 ); i < bucketCnt ; i ++ {
if b .tophash [i ] != top {
if b .tophash [i ] == emptyRest {
break bucketloop
}
continue
}
k := add (unsafe .Pointer (b ), dataOffset +i *uintptr (t .KeySize ))
if t .IndirectKey () {
k = *((*unsafe .Pointer )(k ))
}
if t .Key .Equal (key , k ) {
e := add (unsafe .Pointer (b ), dataOffset +bucketCnt *uintptr (t .KeySize )+i *uintptr (t .ValueSize ))
if t .IndirectElem () {
e = *((*unsafe .Pointer )(e ))
}
return e , true
}
}
}
return unsafe .Pointer (&zeroVal [0 ]), false
}
func mapaccessK (t *maptype , h *hmap , key unsafe .Pointer ) (unsafe .Pointer , unsafe .Pointer ) {
if h == nil || h .count == 0 {
return nil , nil
}
hash := t .Hasher (key , uintptr (h .hash0 ))
m := bucketMask (h .B )
b := (*bmap )(add (h .buckets , (hash &m )*uintptr (t .BucketSize )))
if c := h .oldbuckets ; c != nil {
if !h .sameSizeGrow () {
m >>= 1
}
oldb := (*bmap )(add (c , (hash &m )*uintptr (t .BucketSize )))
if !evacuated (oldb ) {
b = oldb
}
}
top := tophash (hash )
bucketloop :
for ; b != nil ; b = b .overflow (t ) {
for i := uintptr (0 ); i < bucketCnt ; i ++ {
if b .tophash [i ] != top {
if b .tophash [i ] == emptyRest {
break bucketloop
}
continue
}
k := add (unsafe .Pointer (b ), dataOffset +i *uintptr (t .KeySize ))
if t .IndirectKey () {
k = *((*unsafe .Pointer )(k ))
}
if t .Key .Equal (key , k ) {
e := add (unsafe .Pointer (b ), dataOffset +bucketCnt *uintptr (t .KeySize )+i *uintptr (t .ValueSize ))
if t .IndirectElem () {
e = *((*unsafe .Pointer )(e ))
}
return k , e
}
}
}
return nil , nil
}
func mapaccess1_fat (t *maptype , h *hmap , key , zero unsafe .Pointer ) unsafe .Pointer {
e := mapaccess1 (t , h , key )
if e == unsafe .Pointer (&zeroVal [0 ]) {
return zero
}
return e
}
func mapaccess2_fat (t *maptype , h *hmap , key , zero unsafe .Pointer ) (unsafe .Pointer , bool ) {
e := mapaccess1 (t , h , key )
if e == unsafe .Pointer (&zeroVal [0 ]) {
return zero , false
}
return e , true
}
func mapassign (t *maptype , h *hmap , key unsafe .Pointer ) unsafe .Pointer {
if h == nil {
panic (plainError ("assignment to entry in nil map" ))
}
if raceenabled {
callerpc := getcallerpc ()
pc := abi .FuncPCABIInternal (mapassign )
racewritepc (unsafe .Pointer (h ), callerpc , pc )
raceReadObjectPC (t .Key , key , callerpc , pc )
}
if msanenabled {
msanread (key , t .Key .Size_ )
}
if asanenabled {
asanread (key , t .Key .Size_ )
}
if h .flags &hashWriting != 0 {
fatal ("concurrent map writes" )
}
hash := t .Hasher (key , uintptr (h .hash0 ))
h .flags ^= hashWriting
if h .buckets == nil {
h .buckets = newobject (t .Bucket )
}
again :
bucket := hash & bucketMask (h .B )
if h .growing () {
growWork (t , h , bucket )
}
b := (*bmap )(add (h .buckets , bucket *uintptr (t .BucketSize )))
top := tophash (hash )
var inserti *uint8
var insertk unsafe .Pointer
var elem unsafe .Pointer
bucketloop :
for {
for i := uintptr (0 ); i < bucketCnt ; i ++ {
if b .tophash [i ] != top {
if isEmpty (b .tophash [i ]) && inserti == nil {
inserti = &b .tophash [i ]
insertk = add (unsafe .Pointer (b ), dataOffset +i *uintptr (t .KeySize ))
elem = add (unsafe .Pointer (b ), dataOffset +bucketCnt *uintptr (t .KeySize )+i *uintptr (t .ValueSize ))
}
if b .tophash [i ] == emptyRest {
break bucketloop
}
continue
}
k := add (unsafe .Pointer (b ), dataOffset +i *uintptr (t .KeySize ))
if t .IndirectKey () {
k = *((*unsafe .Pointer )(k ))
}
if !t .Key .Equal (key , k ) {
continue
}
if t .NeedKeyUpdate () {
typedmemmove (t .Key , k , key )
}
elem = add (unsafe .Pointer (b ), dataOffset +bucketCnt *uintptr (t .KeySize )+i *uintptr (t .ValueSize ))
goto done
}
ovf := b .overflow (t )
if ovf == nil {
break
}
b = ovf
}
if !h .growing () && (overLoadFactor (h .count +1 , h .B ) || tooManyOverflowBuckets (h .noverflow , h .B )) {
hashGrow (t , h )
goto again
}
if inserti == nil {
newb := h .newoverflow (t , b )
inserti = &newb .tophash [0 ]
insertk = add (unsafe .Pointer (newb ), dataOffset )
elem = add (insertk , bucketCnt *uintptr (t .KeySize ))
}
if t .IndirectKey () {
kmem := newobject (t .Key )
*(*unsafe .Pointer )(insertk ) = kmem
insertk = kmem
}
if t .IndirectElem () {
vmem := newobject (t .Elem )
*(*unsafe .Pointer )(elem ) = vmem
}
typedmemmove (t .Key , insertk , key )
*inserti = top
h .count ++
done :
if h .flags &hashWriting == 0 {
fatal ("concurrent map writes" )
}
h .flags &^= hashWriting
if t .IndirectElem () {
elem = *((*unsafe .Pointer )(elem ))
}
return elem
}
func mapdelete (t *maptype , h *hmap , key unsafe .Pointer ) {
if raceenabled && h != nil {
callerpc := getcallerpc ()
pc := abi .FuncPCABIInternal (mapdelete )
racewritepc (unsafe .Pointer (h ), callerpc , pc )
raceReadObjectPC (t .Key , key , callerpc , pc )
}
if msanenabled && h != nil {
msanread (key , t .Key .Size_ )
}
if asanenabled && h != nil {
asanread (key , t .Key .Size_ )
}
if h == nil || h .count == 0 {
if t .HashMightPanic () {
t .Hasher (key , 0 )
}
return
}
if h .flags &hashWriting != 0 {
fatal ("concurrent map writes" )
}
hash := t .Hasher (key , uintptr (h .hash0 ))
h .flags ^= hashWriting
bucket := hash & bucketMask (h .B )
if h .growing () {
growWork (t , h , bucket )
}
b := (*bmap )(add (h .buckets , bucket *uintptr (t .BucketSize )))
bOrig := b
top := tophash (hash )
search :
for ; b != nil ; b = b .overflow (t ) {
for i := uintptr (0 ); i < bucketCnt ; i ++ {
if b .tophash [i ] != top {
if b .tophash [i ] == emptyRest {
break search
}
continue
}
k := add (unsafe .Pointer (b ), dataOffset +i *uintptr (t .KeySize ))
k2 := k
if t .IndirectKey () {
k2 = *((*unsafe .Pointer )(k2 ))
}
if !t .Key .Equal (key , k2 ) {
continue
}
if t .IndirectKey () {
*(*unsafe .Pointer )(k ) = nil
} else if t .Key .PtrBytes != 0 {
memclrHasPointers (k , t .Key .Size_ )
}
e := add (unsafe .Pointer (b ), dataOffset +bucketCnt *uintptr (t .KeySize )+i *uintptr (t .ValueSize ))
if t .IndirectElem () {
*(*unsafe .Pointer )(e ) = nil
} else if t .Elem .PtrBytes != 0 {
memclrHasPointers (e , t .Elem .Size_ )
} else {
memclrNoHeapPointers (e , t .Elem .Size_ )
}
b .tophash [i ] = emptyOne
if i == bucketCnt -1 {
if b .overflow (t ) != nil && b .overflow (t ).tophash [0 ] != emptyRest {
goto notLast
}
} else {
if b .tophash [i +1 ] != emptyRest {
goto notLast
}
}
for {
b .tophash [i ] = emptyRest
if i == 0 {
if b == bOrig {
break
}
c := b
for b = bOrig ; b .overflow (t ) != c ; b = b .overflow (t ) {
}
i = bucketCnt - 1
} else {
i --
}
if b .tophash [i ] != emptyOne {
break
}
}
notLast :
h .count --
if h .count == 0 {
h .hash0 = fastrand ()
}
break search
}
}
if h .flags &hashWriting == 0 {
fatal ("concurrent map writes" )
}
h .flags &^= hashWriting
}
func mapiterinit (t *maptype , h *hmap , it *hiter ) {
if raceenabled && h != nil {
callerpc := getcallerpc ()
racereadpc (unsafe .Pointer (h ), callerpc , abi .FuncPCABIInternal (mapiterinit ))
}
it .t = t
if h == nil || h .count == 0 {
return
}
if unsafe .Sizeof (hiter {})/goarch .PtrSize != 12 {
throw ("hash_iter size incorrect" )
}
it .h = h
it .B = h .B
it .buckets = h .buckets
if t .Bucket .PtrBytes == 0 {
h .createOverflow ()
it .overflow = h .extra .overflow
it .oldoverflow = h .extra .oldoverflow
}
var r uintptr
if h .B > 31 -bucketCntBits {
r = uintptr (fastrand64 ())
} else {
r = uintptr (fastrand ())
}
it .startBucket = r & bucketMask (h .B )
it .offset = uint8 (r >> h .B & (bucketCnt - 1 ))
it .bucket = it .startBucket
if old := h .flags ; old &(iterator |oldIterator ) != iterator |oldIterator {
atomic .Or8 (&h .flags , iterator |oldIterator )
}
mapiternext (it )
}
func mapiternext (it *hiter ) {
h := it .h
if raceenabled {
callerpc := getcallerpc ()
racereadpc (unsafe .Pointer (h ), callerpc , abi .FuncPCABIInternal (mapiternext ))
}
if h .flags &hashWriting != 0 {
fatal ("concurrent map iteration and map write" )
}
t := it .t
bucket := it .bucket
b := it .bptr
i := it .i
checkBucket := it .checkBucket
next :
if b == nil {
if bucket == it .startBucket && it .wrapped {
it .key = nil
it .elem = nil
return
}
if h .growing () && it .B == h .B {
oldbucket := bucket & it .h .oldbucketmask ()
b = (*bmap )(add (h .oldbuckets , oldbucket *uintptr (t .BucketSize )))
if !evacuated (b ) {
checkBucket = bucket
} else {
b = (*bmap )(add (it .buckets , bucket *uintptr (t .BucketSize )))
checkBucket = noCheck
}
} else {
b = (*bmap )(add (it .buckets , bucket *uintptr (t .BucketSize )))
checkBucket = noCheck
}
bucket ++
if bucket == bucketShift (it .B ) {
bucket = 0
it .wrapped = true
}
i = 0
}
for ; i < bucketCnt ; i ++ {
offi := (i + it .offset ) & (bucketCnt - 1 )
if isEmpty (b .tophash [offi ]) || b .tophash [offi ] == evacuatedEmpty {
continue
}
k := add (unsafe .Pointer (b ), dataOffset +uintptr (offi )*uintptr (t .KeySize ))
if t .IndirectKey () {
k = *((*unsafe .Pointer )(k ))
}
e := add (unsafe .Pointer (b ), dataOffset +bucketCnt *uintptr (t .KeySize )+uintptr (offi )*uintptr (t .ValueSize ))
if checkBucket != noCheck && !h .sameSizeGrow () {
if t .ReflexiveKey () || t .Key .Equal (k , k ) {
hash := t .Hasher (k , uintptr (h .hash0 ))
if hash &bucketMask (it .B ) != checkBucket {
continue
}
} else {
if checkBucket >>(it .B -1 ) != uintptr (b .tophash [offi ]&1 ) {
continue
}
}
}
if (b .tophash [offi ] != evacuatedX && b .tophash [offi ] != evacuatedY ) ||
!(t .ReflexiveKey () || t .Key .Equal (k , k )) {
it .key = k
if t .IndirectElem () {
e = *((*unsafe .Pointer )(e ))
}
it .elem = e
} else {
rk , re := mapaccessK (t , h , k )
if rk == nil {
continue
}
it .key = rk
it .elem = re
}
it .bucket = bucket
if it .bptr != b {
it .bptr = b
}
it .i = i + 1
it .checkBucket = checkBucket
return
}
b = b .overflow (t )
i = 0
goto next
}
func mapclear (t *maptype , h *hmap ) {
if raceenabled && h != nil {
callerpc := getcallerpc ()
pc := abi .FuncPCABIInternal (mapclear )
racewritepc (unsafe .Pointer (h ), callerpc , pc )
}
if h == nil || h .count == 0 {
return
}
if h .flags &hashWriting != 0 {
fatal ("concurrent map writes" )
}
h .flags ^= hashWriting
markBucketsEmpty := func (bucket unsafe .Pointer , mask uintptr ) {
for i := uintptr (0 ); i <= mask ; i ++ {
b := (*bmap )(add (bucket , i *uintptr (t .BucketSize )))
for ; b != nil ; b = b .overflow (t ) {
for i := uintptr (0 ); i < bucketCnt ; i ++ {
b .tophash [i ] = emptyRest
}
}
}
}
markBucketsEmpty (h .buckets , bucketMask (h .B ))
if oldBuckets := h .oldbuckets ; oldBuckets != nil {
markBucketsEmpty (oldBuckets , h .oldbucketmask ())
}
h .flags &^= sameSizeGrow
h .oldbuckets = nil
h .nevacuate = 0
h .noverflow = 0
h .count = 0
h .hash0 = fastrand ()
if h .extra != nil {
*h .extra = mapextra {}
}
_ , nextOverflow := makeBucketArray (t , h .B , h .buckets )
if nextOverflow != nil {
h .extra .nextOverflow = nextOverflow
}
if h .flags &hashWriting == 0 {
fatal ("concurrent map writes" )
}
h .flags &^= hashWriting
}
func hashGrow (t *maptype , h *hmap ) {
bigger := uint8 (1 )
if !overLoadFactor (h .count +1 , h .B ) {
bigger = 0
h .flags |= sameSizeGrow
}
oldbuckets := h .buckets
newbuckets , nextOverflow := makeBucketArray (t , h .B +bigger , nil )
flags := h .flags &^ (iterator | oldIterator )
if h .flags &iterator != 0 {
flags |= oldIterator
}
h .B += bigger
h .flags = flags
h .oldbuckets = oldbuckets
h .buckets = newbuckets
h .nevacuate = 0
h .noverflow = 0
if h .extra != nil && h .extra .overflow != nil {
if h .extra .oldoverflow != nil {
throw ("oldoverflow is not nil" )
}
h .extra .oldoverflow = h .extra .overflow
h .extra .overflow = nil
}
if nextOverflow != nil {
if h .extra == nil {
h .extra = new (mapextra )
}
h .extra .nextOverflow = nextOverflow
}
}
func overLoadFactor (count int , B uint8 ) bool {
return count > bucketCnt && uintptr (count ) > loadFactorNum *(bucketShift (B )/loadFactorDen )
}
func tooManyOverflowBuckets (noverflow uint16 , B uint8 ) bool {
if B > 15 {
B = 15
}
return noverflow >= uint16 (1 )<<(B &15 )
}
func (h *hmap ) growing () bool {
return h .oldbuckets != nil
}
func (h *hmap ) sameSizeGrow () bool {
return h .flags &sameSizeGrow != 0
}
func (h *hmap ) noldbuckets () uintptr {
oldB := h .B
if !h .sameSizeGrow () {
oldB --
}
return bucketShift (oldB )
}
func (h *hmap ) oldbucketmask () uintptr {
return h .noldbuckets () - 1
}
func growWork (t *maptype , h *hmap , bucket uintptr ) {
evacuate (t , h , bucket &h .oldbucketmask ())
if h .growing () {
evacuate (t , h , h .nevacuate )
}
}
func bucketEvacuated (t *maptype , h *hmap , bucket uintptr ) bool {
b := (*bmap )(add (h .oldbuckets , bucket *uintptr (t .BucketSize )))
return evacuated (b )
}
type evacDst struct {
b *bmap
i int
k unsafe .Pointer
e unsafe .Pointer
}
func evacuate (t *maptype , h *hmap , oldbucket uintptr ) {
b := (*bmap )(add (h .oldbuckets , oldbucket *uintptr (t .BucketSize )))
newbit := h .noldbuckets ()
if !evacuated (b ) {
var xy [2 ]evacDst
x := &xy [0 ]
x .b = (*bmap )(add (h .buckets , oldbucket *uintptr (t .BucketSize )))
x .k = add (unsafe .Pointer (x .b ), dataOffset )
x .e = add (x .k , bucketCnt *uintptr (t .KeySize ))
if !h .sameSizeGrow () {
y := &xy [1 ]
y .b = (*bmap )(add (h .buckets , (oldbucket +newbit )*uintptr (t .BucketSize )))
y .k = add (unsafe .Pointer (y .b ), dataOffset )
y .e = add (y .k , bucketCnt *uintptr (t .KeySize ))
}
for ; b != nil ; b = b .overflow (t ) {
k := add (unsafe .Pointer (b ), dataOffset )
e := add (k , bucketCnt *uintptr (t .KeySize ))
for i := 0 ; i < bucketCnt ; i , k , e = i +1 , add (k , uintptr (t .KeySize )), add (e , uintptr (t .ValueSize )) {
top := b .tophash [i ]
if isEmpty (top ) {
b .tophash [i ] = evacuatedEmpty
continue
}
if top < minTopHash {
throw ("bad map state" )
}
k2 := k
if t .IndirectKey () {
k2 = *((*unsafe .Pointer )(k2 ))
}
var useY uint8
if !h .sameSizeGrow () {
hash := t .Hasher (k2 , uintptr (h .hash0 ))
if h .flags &iterator != 0 && !t .ReflexiveKey () && !t .Key .Equal (k2 , k2 ) {
useY = top & 1
top = tophash (hash )
} else {
if hash &newbit != 0 {
useY = 1
}
}
}
if evacuatedX +1 != evacuatedY || evacuatedX ^1 != evacuatedY {
throw ("bad evacuatedN" )
}
b .tophash [i ] = evacuatedX + useY
dst := &xy [useY ]
if dst .i == bucketCnt {
dst .b = h .newoverflow (t , dst .b )
dst .i = 0
dst .k = add (unsafe .Pointer (dst .b ), dataOffset )
dst .e = add (dst .k , bucketCnt *uintptr (t .KeySize ))
}
dst .b .tophash [dst .i &(bucketCnt -1 )] = top
if t .IndirectKey () {
*(*unsafe .Pointer )(dst .k ) = k2
} else {
typedmemmove (t .Key , dst .k , k )
}
if t .IndirectElem () {
*(*unsafe .Pointer )(dst .e ) = *(*unsafe .Pointer )(e )
} else {
typedmemmove (t .Elem , dst .e , e )
}
dst .i ++
dst .k = add (dst .k , uintptr (t .KeySize ))
dst .e = add (dst .e , uintptr (t .ValueSize ))
}
}
if h .flags &oldIterator == 0 && t .Bucket .PtrBytes != 0 {
b := add (h .oldbuckets , oldbucket *uintptr (t .BucketSize ))
ptr := add (b , dataOffset )
n := uintptr (t .BucketSize ) - dataOffset
memclrHasPointers (ptr , n )
}
}
if oldbucket == h .nevacuate {
advanceEvacuationMark (h , t , newbit )
}
}
func advanceEvacuationMark (h *hmap , t *maptype , newbit uintptr ) {
h .nevacuate ++
stop := h .nevacuate + 1024
if stop > newbit {
stop = newbit
}
for h .nevacuate != stop && bucketEvacuated (t , h , h .nevacuate ) {
h .nevacuate ++
}
if h .nevacuate == newbit {
h .oldbuckets = nil
if h .extra != nil {
h .extra .oldoverflow = nil
}
h .flags &^= sameSizeGrow
}
}
func reflect_makemap (t *maptype , cap int ) *hmap {
if t .Key .Equal == nil {
throw ("runtime.reflect_makemap: unsupported map key type" )
}
if t .Key .Size_ > maxKeySize && (!t .IndirectKey () || t .KeySize != uint8 (goarch .PtrSize )) ||
t .Key .Size_ <= maxKeySize && (t .IndirectKey () || t .KeySize != uint8 (t .Key .Size_ )) {
throw ("key size wrong" )
}
if t .Elem .Size_ > maxElemSize && (!t .IndirectElem () || t .ValueSize != uint8 (goarch .PtrSize )) ||
t .Elem .Size_ <= maxElemSize && (t .IndirectElem () || t .ValueSize != uint8 (t .Elem .Size_ )) {
throw ("elem size wrong" )
}
if t .Key .Align_ > bucketCnt {
throw ("key align too big" )
}
if t .Elem .Align_ > bucketCnt {
throw ("elem align too big" )
}
if t .Key .Size_ %uintptr (t .Key .Align_ ) != 0 {
throw ("key size not a multiple of key align" )
}
if t .Elem .Size_ %uintptr (t .Elem .Align_ ) != 0 {
throw ("elem size not a multiple of elem align" )
}
if bucketCnt < 8 {
throw ("bucketsize too small for proper alignment" )
}
if dataOffset %uintptr (t .Key .Align_ ) != 0 {
throw ("need padding in bucket (key)" )
}
if dataOffset %uintptr (t .Elem .Align_ ) != 0 {
throw ("need padding in bucket (elem)" )
}
return makemap (t , cap , nil )
}
func reflect_mapaccess (t *maptype , h *hmap , key unsafe .Pointer ) unsafe .Pointer {
elem , ok := mapaccess2 (t , h , key )
if !ok {
elem = nil
}
return elem
}
func reflect_mapaccess_faststr (t *maptype , h *hmap , key string ) unsafe .Pointer {
elem , ok := mapaccess2_faststr (t , h , key )
if !ok {
elem = nil
}
return elem
}
func reflect_mapassign (t *maptype , h *hmap , key unsafe .Pointer , elem unsafe .Pointer ) {
p := mapassign (t , h , key )
typedmemmove (t .Elem , p , elem )
}
func reflect_mapassign_faststr (t *maptype , h *hmap , key string , elem unsafe .Pointer ) {
p := mapassign_faststr (t , h , key )
typedmemmove (t .Elem , p , elem )
}
func reflect_mapdelete (t *maptype , h *hmap , key unsafe .Pointer ) {
mapdelete (t , h , key )
}
func reflect_mapdelete_faststr (t *maptype , h *hmap , key string ) {
mapdelete_faststr (t , h , key )
}
func reflect_mapiterinit (t *maptype , h *hmap , it *hiter ) {
mapiterinit (t , h , it )
}
func reflect_mapiternext (it *hiter ) {
mapiternext (it )
}
func reflect_mapiterkey (it *hiter ) unsafe .Pointer {
return it .key
}
func reflect_mapiterelem (it *hiter ) unsafe .Pointer {
return it .elem
}
func reflect_maplen (h *hmap ) int {
if h == nil {
return 0
}
if raceenabled {
callerpc := getcallerpc ()
racereadpc (unsafe .Pointer (h ), callerpc , abi .FuncPCABIInternal (reflect_maplen ))
}
return h .count
}
func reflect_mapclear (t *maptype , h *hmap ) {
mapclear (t , h )
}
func reflectlite_maplen (h *hmap ) int {
if h == nil {
return 0
}
if raceenabled {
callerpc := getcallerpc ()
racereadpc (unsafe .Pointer (h ), callerpc , abi .FuncPCABIInternal (reflect_maplen ))
}
return h .count
}
const maxZero = 1024
var zeroVal [maxZero ]byte
func mapinitnoop ()
func mapclone (m any ) any {
e := efaceOf (&m )
e .data = unsafe .Pointer (mapclone2 ((*maptype )(unsafe .Pointer (e ._type )), (*hmap )(e .data )))
return m
}
func moveToBmap (t *maptype , h *hmap , dst *bmap , pos int , src *bmap ) (*bmap , int ) {
for i := 0 ; i < bucketCnt ; i ++ {
if isEmpty (src .tophash [i ]) {
continue
}
for ; pos < bucketCnt ; pos ++ {
if isEmpty (dst .tophash [pos ]) {
break
}
}
if pos == bucketCnt {
dst = h .newoverflow (t , dst )
pos = 0
}
srcK := add (unsafe .Pointer (src ), dataOffset +uintptr (i )*uintptr (t .KeySize ))
srcEle := add (unsafe .Pointer (src ), dataOffset +bucketCnt *uintptr (t .KeySize )+uintptr (i )*uintptr (t .ValueSize ))
dstK := add (unsafe .Pointer (dst ), dataOffset +uintptr (pos )*uintptr (t .KeySize ))
dstEle := add (unsafe .Pointer (dst ), dataOffset +bucketCnt *uintptr (t .KeySize )+uintptr (pos )*uintptr (t .ValueSize ))
dst .tophash [pos ] = src .tophash [i ]
if t .IndirectKey () {
*(*unsafe .Pointer )(dstK ) = *(*unsafe .Pointer )(srcK )
} else {
typedmemmove (t .Key , dstK , srcK )
}
if t .IndirectElem () {
*(*unsafe .Pointer )(dstEle ) = *(*unsafe .Pointer )(srcEle )
} else {
typedmemmove (t .Elem , dstEle , srcEle )
}
pos ++
h .count ++
}
return dst , pos
}
func mapclone2 (t *maptype , src *hmap ) *hmap {
dst := makemap (t , src .count , nil )
dst .hash0 = src .hash0
dst .nevacuate = 0
if src .count == 0 {
return dst
}
if src .flags &hashWriting != 0 {
fatal ("concurrent map clone and map write" )
}
if src .B == 0 {
dst .buckets = newobject (t .Bucket )
dst .count = src .count
typedmemmove (t .Bucket , dst .buckets , src .buckets )
return dst
}
if dst .B == 0 {
dst .buckets = newobject (t .Bucket )
}
dstArraySize := int (bucketShift (dst .B ))
srcArraySize := int (bucketShift (src .B ))
for i := 0 ; i < dstArraySize ; i ++ {
dstBmap := (*bmap )(add (dst .buckets , uintptr (i *int (t .BucketSize ))))
pos := 0
for j := 0 ; j < srcArraySize ; j += dstArraySize {
srcBmap := (*bmap )(add (src .buckets , uintptr ((i +j )*int (t .BucketSize ))))
for srcBmap != nil {
dstBmap , pos = moveToBmap (t , dst , dstBmap , pos , srcBmap )
srcBmap = srcBmap .overflow (t )
}
}
}
if src .oldbuckets == nil {
return dst
}
oldB := src .B
srcOldbuckets := src .oldbuckets
if !src .sameSizeGrow () {
oldB --
}
oldSrcArraySize := int (bucketShift (oldB ))
for i := 0 ; i < oldSrcArraySize ; i ++ {
srcBmap := (*bmap )(add (srcOldbuckets , uintptr (i *int (t .BucketSize ))))
if evacuated (srcBmap ) {
continue
}
if oldB >= dst .B {
dstBmap := (*bmap )(add (dst .buckets , (uintptr (i )&bucketMask (dst .B ))*uintptr (t .BucketSize )))
for dstBmap .overflow (t ) != nil {
dstBmap = dstBmap .overflow (t )
}
pos := 0
for srcBmap != nil {
dstBmap , pos = moveToBmap (t , dst , dstBmap , pos , srcBmap )
srcBmap = srcBmap .overflow (t )
}
continue
}
for srcBmap != nil {
for i := uintptr (0 ); i < bucketCnt ; i ++ {
if isEmpty (srcBmap .tophash [i ]) {
continue
}
if src .flags &hashWriting != 0 {
fatal ("concurrent map clone and map write" )
}
srcK := add (unsafe .Pointer (srcBmap ), dataOffset +i *uintptr (t .KeySize ))
if t .IndirectKey () {
srcK = *((*unsafe .Pointer )(srcK ))
}
srcEle := add (unsafe .Pointer (srcBmap ), dataOffset +bucketCnt *uintptr (t .KeySize )+i *uintptr (t .ValueSize ))
if t .IndirectElem () {
srcEle = *((*unsafe .Pointer )(srcEle ))
}
dstEle := mapassign (t , dst , srcK )
typedmemmove (t .Elem , dstEle , srcEle )
}
srcBmap = srcBmap .overflow (t )
}
}
return dst
}
func keys (m any , p unsafe .Pointer ) {
e := efaceOf (&m )
t := (*maptype )(unsafe .Pointer (e ._type ))
h := (*hmap )(e .data )
if h == nil || h .count == 0 {
return
}
s := (*slice )(p )
r := int (fastrand ())
offset := uint8 (r >> h .B & (bucketCnt - 1 ))
if h .B == 0 {
copyKeys (t , h , (*bmap )(h .buckets ), s , offset )
return
}
arraySize := int (bucketShift (h .B ))
buckets := h .buckets
for i := 0 ; i < arraySize ; i ++ {
bucket := (i + r ) & (arraySize - 1 )
b := (*bmap )(add (buckets , uintptr (bucket )*uintptr (t .BucketSize )))
copyKeys (t , h , b , s , offset )
}
if h .growing () {
oldArraySize := int (h .noldbuckets ())
for i := 0 ; i < oldArraySize ; i ++ {
bucket := (i + r ) & (oldArraySize - 1 )
b := (*bmap )(add (h .oldbuckets , uintptr (bucket )*uintptr (t .BucketSize )))
if evacuated (b ) {
continue
}
copyKeys (t , h , b , s , offset )
}
}
return
}
func copyKeys (t *maptype , h *hmap , b *bmap , s *slice , offset uint8 ) {
for b != nil {
for i := uintptr (0 ); i < bucketCnt ; i ++ {
offi := (i + uintptr (offset )) & (bucketCnt - 1 )
if isEmpty (b .tophash [offi ]) {
continue
}
if h .flags &hashWriting != 0 {
fatal ("concurrent map read and map write" )
}
k := add (unsafe .Pointer (b ), dataOffset +offi *uintptr (t .KeySize ))
if t .IndirectKey () {
k = *((*unsafe .Pointer )(k ))
}
if s .len >= s .cap {
fatal ("concurrent map read and map write" )
}
typedmemmove (t .Key , add (s .array , uintptr (s .len )*uintptr (t .KeySize )), k )
s .len ++
}
b = b .overflow (t )
}
}
func values (m any , p unsafe .Pointer ) {
e := efaceOf (&m )
t := (*maptype )(unsafe .Pointer (e ._type ))
h := (*hmap )(e .data )
if h == nil || h .count == 0 {
return
}
s := (*slice )(p )
r := int (fastrand ())
offset := uint8 (r >> h .B & (bucketCnt - 1 ))
if h .B == 0 {
copyValues (t , h , (*bmap )(h .buckets ), s , offset )
return
}
arraySize := int (bucketShift (h .B ))
buckets := h .buckets
for i := 0 ; i < arraySize ; i ++ {
bucket := (i + r ) & (arraySize - 1 )
b := (*bmap )(add (buckets , uintptr (bucket )*uintptr (t .BucketSize )))
copyValues (t , h , b , s , offset )
}
if h .growing () {
oldArraySize := int (h .noldbuckets ())
for i := 0 ; i < oldArraySize ; i ++ {
bucket := (i + r ) & (oldArraySize - 1 )
b := (*bmap )(add (h .oldbuckets , uintptr (bucket )*uintptr (t .BucketSize )))
if evacuated (b ) {
continue
}
copyValues (t , h , b , s , offset )
}
}
return
}
func copyValues (t *maptype , h *hmap , b *bmap , s *slice , offset uint8 ) {
for b != nil {
for i := uintptr (0 ); i < bucketCnt ; i ++ {
offi := (i + uintptr (offset )) & (bucketCnt - 1 )
if isEmpty (b .tophash [offi ]) {
continue
}
if h .flags &hashWriting != 0 {
fatal ("concurrent map read and map write" )
}
ele := add (unsafe .Pointer (b ), dataOffset +bucketCnt *uintptr (t .KeySize )+offi *uintptr (t .ValueSize ))
if t .IndirectElem () {
ele = *((*unsafe .Pointer )(ele ))
}
if s .len >= s .cap {
fatal ("concurrent map read and map write" )
}
typedmemmove (t .Elem , add (s .array , uintptr (s .len )*uintptr (t .ValueSize )), ele )
s .len ++
}
b = b .overflow (t )
}
}
The pages are generated with Golds v0.6.7 . (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu .
PR and bug reports are welcome and can be submitted to the issue list .
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds .