1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "unsafe" 9 ) 10 11 func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { 12 if raceenabled && h != nil { 13 callerpc := getcallerpc(unsafe.Pointer(&t)) 14 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32)) 15 } 16 if h == nil || h.count == 0 { 17 return unsafe.Pointer(t.elem.zero) 18 } 19 var b *bmap 20 if h.B == 0 { 21 // One-bucket table. No need to hash. 22 b = (*bmap)(h.buckets) 23 } else { 24 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 25 m := uintptr(1)<<h.B - 1 26 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 27 if c := h.oldbuckets; c != nil { 28 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) 29 if !evacuated(oldb) { 30 b = oldb 31 } 32 } 33 } 34 for { 35 for i := uintptr(0); i < bucketCnt; i++ { 36 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) 37 if k != key { 38 continue 39 } 40 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check 41 if x == empty { 42 continue 43 } 44 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)) 45 } 46 b = b.overflow(t) 47 if b == nil { 48 return unsafe.Pointer(t.elem.zero) 49 } 50 } 51 } 52 53 func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { 54 if raceenabled && h != nil { 55 callerpc := getcallerpc(unsafe.Pointer(&t)) 56 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32)) 57 } 58 if h == nil || h.count == 0 { 59 return unsafe.Pointer(t.elem.zero), false 60 } 61 var b *bmap 62 if h.B == 0 { 63 // One-bucket table. No need to hash. 64 b = (*bmap)(h.buckets) 65 } else { 66 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 67 m := uintptr(1)<<h.B - 1 68 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 69 if c := h.oldbuckets; c != nil { 70 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) 71 if !evacuated(oldb) { 72 b = oldb 73 } 74 } 75 } 76 for { 77 for i := uintptr(0); i < bucketCnt; i++ { 78 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) 79 if k != key { 80 continue 81 } 82 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check 83 if x == empty { 84 continue 85 } 86 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true 87 } 88 b = b.overflow(t) 89 if b == nil { 90 return unsafe.Pointer(t.elem.zero), false 91 } 92 } 93 } 94 95 func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { 96 if raceenabled && h != nil { 97 callerpc := getcallerpc(unsafe.Pointer(&t)) 98 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64)) 99 } 100 if h == nil || h.count == 0 { 101 return unsafe.Pointer(t.elem.zero) 102 } 103 var b *bmap 104 if h.B == 0 { 105 // One-bucket table. No need to hash. 106 b = (*bmap)(h.buckets) 107 } else { 108 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 109 m := uintptr(1)<<h.B - 1 110 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 111 if c := h.oldbuckets; c != nil { 112 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) 113 if !evacuated(oldb) { 114 b = oldb 115 } 116 } 117 } 118 for { 119 for i := uintptr(0); i < bucketCnt; i++ { 120 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) 121 if k != key { 122 continue 123 } 124 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check 125 if x == empty { 126 continue 127 } 128 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)) 129 } 130 b = b.overflow(t) 131 if b == nil { 132 return unsafe.Pointer(t.elem.zero) 133 } 134 } 135 } 136 137 func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { 138 if raceenabled && h != nil { 139 callerpc := getcallerpc(unsafe.Pointer(&t)) 140 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64)) 141 } 142 if h == nil || h.count == 0 { 143 return unsafe.Pointer(t.elem.zero), false 144 } 145 var b *bmap 146 if h.B == 0 { 147 // One-bucket table. No need to hash. 148 b = (*bmap)(h.buckets) 149 } else { 150 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 151 m := uintptr(1)<<h.B - 1 152 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 153 if c := h.oldbuckets; c != nil { 154 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) 155 if !evacuated(oldb) { 156 b = oldb 157 } 158 } 159 } 160 for { 161 for i := uintptr(0); i < bucketCnt; i++ { 162 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) 163 if k != key { 164 continue 165 } 166 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check 167 if x == empty { 168 continue 169 } 170 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true 171 } 172 b = b.overflow(t) 173 if b == nil { 174 return unsafe.Pointer(t.elem.zero), false 175 } 176 } 177 } 178 179 func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { 180 if raceenabled && h != nil { 181 callerpc := getcallerpc(unsafe.Pointer(&t)) 182 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr)) 183 } 184 if h == nil || h.count == 0 { 185 return unsafe.Pointer(t.elem.zero) 186 } 187 key := (*stringStruct)(unsafe.Pointer(&ky)) 188 if h.B == 0 { 189 // One-bucket table. 190 b := (*bmap)(h.buckets) 191 if key.len < 32 { 192 // short key, doing lots of comparisons is ok 193 for i := uintptr(0); i < bucketCnt; i++ { 194 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check 195 if x == empty { 196 continue 197 } 198 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) 199 if k.len != key.len { 200 continue 201 } 202 if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) { 203 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)) 204 } 205 } 206 return unsafe.Pointer(t.elem.zero) 207 } 208 // long key, try not to do more comparisons than necessary 209 keymaybe := uintptr(bucketCnt) 210 for i := uintptr(0); i < bucketCnt; i++ { 211 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check 212 if x == empty { 213 continue 214 } 215 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) 216 if k.len != key.len { 217 continue 218 } 219 if k.str == key.str { 220 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)) 221 } 222 // check first 4 bytes 223 // TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of 224 // four 1-byte comparisons. 225 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { 226 continue 227 } 228 // check last 4 bytes 229 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { 230 continue 231 } 232 if keymaybe != bucketCnt { 233 // Two keys are potential matches. Use hash to distinguish them. 234 goto dohash 235 } 236 keymaybe = i 237 } 238 if keymaybe != bucketCnt { 239 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize)) 240 if memeq(k.str, key.str, uintptr(key.len)) { 241 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)) 242 } 243 } 244 return unsafe.Pointer(t.elem.zero) 245 } 246 dohash: 247 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 248 m := uintptr(1)<<h.B - 1 249 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 250 if c := h.oldbuckets; c != nil { 251 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) 252 if !evacuated(oldb) { 253 b = oldb 254 } 255 } 256 top := uint8(hash >> (ptrSize*8 - 8)) 257 if top < minTopHash { 258 top += minTopHash 259 } 260 for { 261 for i := uintptr(0); i < bucketCnt; i++ { 262 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check 263 if x != top { 264 continue 265 } 266 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) 267 if k.len != key.len { 268 continue 269 } 270 if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) { 271 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)) 272 } 273 } 274 b = b.overflow(t) 275 if b == nil { 276 return unsafe.Pointer(t.elem.zero) 277 } 278 } 279 } 280 281 func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { 282 if raceenabled && h != nil { 283 callerpc := getcallerpc(unsafe.Pointer(&t)) 284 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr)) 285 } 286 if h == nil || h.count == 0 { 287 return unsafe.Pointer(t.elem.zero), false 288 } 289 key := (*stringStruct)(unsafe.Pointer(&ky)) 290 if h.B == 0 { 291 // One-bucket table. 292 b := (*bmap)(h.buckets) 293 if key.len < 32 { 294 // short key, doing lots of comparisons is ok 295 for i := uintptr(0); i < bucketCnt; i++ { 296 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check 297 if x == empty { 298 continue 299 } 300 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) 301 if k.len != key.len { 302 continue 303 } 304 if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) { 305 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true 306 } 307 } 308 return unsafe.Pointer(t.elem.zero), false 309 } 310 // long key, try not to do more comparisons than necessary 311 keymaybe := uintptr(bucketCnt) 312 for i := uintptr(0); i < bucketCnt; i++ { 313 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check 314 if x == empty { 315 continue 316 } 317 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) 318 if k.len != key.len { 319 continue 320 } 321 if k.str == key.str { 322 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true 323 } 324 // check first 4 bytes 325 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { 326 continue 327 } 328 // check last 4 bytes 329 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { 330 continue 331 } 332 if keymaybe != bucketCnt { 333 // Two keys are potential matches. Use hash to distinguish them. 334 goto dohash 335 } 336 keymaybe = i 337 } 338 if keymaybe != bucketCnt { 339 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize)) 340 if memeq(k.str, key.str, uintptr(key.len)) { 341 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true 342 } 343 } 344 return unsafe.Pointer(t.elem.zero), false 345 } 346 dohash: 347 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 348 m := uintptr(1)<<h.B - 1 349 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 350 if c := h.oldbuckets; c != nil { 351 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) 352 if !evacuated(oldb) { 353 b = oldb 354 } 355 } 356 top := uint8(hash >> (ptrSize*8 - 8)) 357 if top < minTopHash { 358 top += minTopHash 359 } 360 for { 361 for i := uintptr(0); i < bucketCnt; i++ { 362 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check 363 if x != top { 364 continue 365 } 366 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize)) 367 if k.len != key.len { 368 continue 369 } 370 if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) { 371 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true 372 } 373 } 374 b = b.overflow(t) 375 if b == nil { 376 return unsafe.Pointer(t.elem.zero), false 377 } 378 } 379 } 380