Home | History | Annotate | Download | only in atomic
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // +build arm
      6 
      7 package atomic
      8 
      9 import (
     10 	"runtime/internal/sys"
     11 	"unsafe"
     12 )
     13 
     14 type spinlock struct {
     15 	v uint32
     16 }
     17 
     18 //go:nosplit
     19 func (l *spinlock) lock() {
     20 	for {
     21 		if Cas(&l.v, 0, 1) {
     22 			return
     23 		}
     24 	}
     25 }
     26 
     27 //go:nosplit
     28 func (l *spinlock) unlock() {
     29 	Store(&l.v, 0)
     30 }
     31 
     32 var locktab [57]struct {
     33 	l   spinlock
     34 	pad [sys.CacheLineSize - unsafe.Sizeof(spinlock{})]byte
     35 }
     36 
     37 func addrLock(addr *uint64) *spinlock {
     38 	return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l
     39 }
     40 
     41 // Atomic add and return new value.
     42 //go:nosplit
     43 func Xadd(val *uint32, delta int32) uint32 {
     44 	for {
     45 		oval := *val
     46 		nval := oval + uint32(delta)
     47 		if Cas(val, oval, nval) {
     48 			return nval
     49 		}
     50 	}
     51 }
     52 
     53 //go:noescape
     54 func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
     55 
     56 //go:nosplit
     57 func Xchg(addr *uint32, v uint32) uint32 {
     58 	for {
     59 		old := *addr
     60 		if Cas(addr, old, v) {
     61 			return old
     62 		}
     63 	}
     64 }
     65 
     66 //go:nosplit
     67 func Xchguintptr(addr *uintptr, v uintptr) uintptr {
     68 	return uintptr(Xchg((*uint32)(unsafe.Pointer(addr)), uint32(v)))
     69 }
     70 
     71 //go:nosplit
     72 func Load(addr *uint32) uint32 {
     73 	return Xadd(addr, 0)
     74 }
     75 
     76 // Should be a built-in for unsafe.Pointer?
     77 //go:nosplit
     78 func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
     79 	return unsafe.Pointer(uintptr(p) + x)
     80 }
     81 
     82 //go:nosplit
     83 func Loadp(addr unsafe.Pointer) unsafe.Pointer {
     84 	return unsafe.Pointer(uintptr(Xadd((*uint32)(addr), 0)))
     85 }
     86 
     87 //go:nosplit
     88 func StorepNoWB(addr unsafe.Pointer, v unsafe.Pointer) {
     89 	for {
     90 		old := *(*unsafe.Pointer)(addr)
     91 		if Casp1((*unsafe.Pointer)(addr), old, v) {
     92 			return
     93 		}
     94 	}
     95 }
     96 
     97 //go:nosplit
     98 func Store(addr *uint32, v uint32) {
     99 	for {
    100 		old := *addr
    101 		if Cas(addr, old, v) {
    102 			return
    103 		}
    104 	}
    105 }
    106 
    107 //go:nosplit
    108 func Cas64(addr *uint64, old, new uint64) bool {
    109 	if uintptr(unsafe.Pointer(addr))&7 != 0 {
    110 		*(*int)(nil) = 0 // crash on unaligned uint64
    111 	}
    112 	var ok bool
    113 	addrLock(addr).lock()
    114 	if *addr == old {
    115 		*addr = new
    116 		ok = true
    117 	}
    118 	addrLock(addr).unlock()
    119 	return ok
    120 }
    121 
    122 //go:nosplit
    123 func Xadd64(addr *uint64, delta int64) uint64 {
    124 	if uintptr(unsafe.Pointer(addr))&7 != 0 {
    125 		*(*int)(nil) = 0 // crash on unaligned uint64
    126 	}
    127 	var r uint64
    128 	addrLock(addr).lock()
    129 	r = *addr + uint64(delta)
    130 	*addr = r
    131 	addrLock(addr).unlock()
    132 	return r
    133 }
    134 
    135 //go:nosplit
    136 func Xchg64(addr *uint64, v uint64) uint64 {
    137 	if uintptr(unsafe.Pointer(addr))&7 != 0 {
    138 		*(*int)(nil) = 0 // crash on unaligned uint64
    139 	}
    140 	var r uint64
    141 	addrLock(addr).lock()
    142 	r = *addr
    143 	*addr = v
    144 	addrLock(addr).unlock()
    145 	return r
    146 }
    147 
    148 //go:nosplit
    149 func Load64(addr *uint64) uint64 {
    150 	if uintptr(unsafe.Pointer(addr))&7 != 0 {
    151 		*(*int)(nil) = 0 // crash on unaligned uint64
    152 	}
    153 	var r uint64
    154 	addrLock(addr).lock()
    155 	r = *addr
    156 	addrLock(addr).unlock()
    157 	return r
    158 }
    159 
    160 //go:nosplit
    161 func Store64(addr *uint64, v uint64) {
    162 	if uintptr(unsafe.Pointer(addr))&7 != 0 {
    163 		*(*int)(nil) = 0 // crash on unaligned uint64
    164 	}
    165 	addrLock(addr).lock()
    166 	*addr = v
    167 	addrLock(addr).unlock()
    168 }
    169 
    170 //go:nosplit
    171 func Or8(addr *uint8, v uint8) {
    172 	// Align down to 4 bytes and use 32-bit CAS.
    173 	uaddr := uintptr(unsafe.Pointer(addr))
    174 	addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
    175 	word := uint32(v) << ((uaddr & 3) * 8) // little endian
    176 	for {
    177 		old := *addr32
    178 		if Cas(addr32, old, old|word) {
    179 			return
    180 		}
    181 	}
    182 }
    183 
    184 //go:nosplit
    185 func And8(addr *uint8, v uint8) {
    186 	// Align down to 4 bytes and use 32-bit CAS.
    187 	uaddr := uintptr(unsafe.Pointer(addr))
    188 	addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
    189 	word := uint32(v) << ((uaddr & 3) * 8)    // little endian
    190 	mask := uint32(0xFF) << ((uaddr & 3) * 8) // little endian
    191 	word |= ^mask
    192 	for {
    193 		old := *addr32
    194 		if Cas(addr32, old, old&word) {
    195 			return
    196 		}
    197 	}
    198 }
    199 
    200 //go:nosplit
    201 func armcas(ptr *uint32, old, new uint32) bool
    202