Home | History | Annotate | Download | only in atomic
      1 // Copyright 2014 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 #include "textflag.h"
      6 
      7 // uint32 runtimeinternalatomicLoad(uint32 volatile* addr)
      8 TEXT Load(SB),NOSPLIT,$-8-12
      9 	MOVD	ptr+0(FP), R0
     10 	LDARW	(R0), R0
     11 	MOVW	R0, ret+8(FP)
     12 	RET
     13 
     14 // uint64 runtimeinternalatomicLoad64(uint64 volatile* addr)
     15 TEXT Load64(SB),NOSPLIT,$-8-16
     16 	MOVD	ptr+0(FP), R0
     17 	LDAR	(R0), R0
     18 	MOVD	R0, ret+8(FP)
     19 	RET
     20 
     21 // void *runtimeinternalatomicLoadp(void *volatile *addr)
     22 TEXT Loadp(SB),NOSPLIT,$-8-16
     23 	MOVD	ptr+0(FP), R0
     24 	LDAR	(R0), R0
     25 	MOVD	R0, ret+8(FP)
     26 	RET
     27 
     28 TEXT runtimeinternalatomicStorepNoWB(SB), NOSPLIT, $0-16
     29 	B	runtimeinternalatomicStore64(SB)
     30 
     31 TEXT runtimeinternalatomicStore(SB), NOSPLIT, $0-12
     32 	MOVD	ptr+0(FP), R0
     33 	MOVW	val+8(FP), R1
     34 	STLRW	R1, (R0)
     35 	RET
     36 
     37 TEXT runtimeinternalatomicStore64(SB), NOSPLIT, $0-16
     38 	MOVD	ptr+0(FP), R0
     39 	MOVD	val+8(FP), R1
     40 	STLR	R1, (R0)
     41 	RET
     42 
     43 TEXT runtimeinternalatomicXchg(SB), NOSPLIT, $0-20
     44 again:
     45 	MOVD	ptr+0(FP), R0
     46 	MOVW	new+8(FP), R1
     47 	LDAXRW	(R0), R2
     48 	STLXRW	R1, (R0), R3
     49 	CBNZ	R3, again
     50 	MOVW	R2, ret+16(FP)
     51 	RET
     52 
     53 TEXT runtimeinternalatomicXchg64(SB), NOSPLIT, $0-24
     54 again:
     55 	MOVD	ptr+0(FP), R0
     56 	MOVD	new+8(FP), R1
     57 	LDAXR	(R0), R2
     58 	STLXR	R1, (R0), R3
     59 	CBNZ	R3, again
     60 	MOVD	R2, ret+16(FP)
     61 	RET
     62 
     63 // bool runtimeinternalatomicCas64(uint64 *ptr, uint64 old, uint64 new)
     64 // Atomically:
     65 //      if(*val == *old){
     66 //              *val = new;
     67 //              return 1;
     68 //      } else {
     69 //              return 0;
     70 //      }
     71 TEXT runtimeinternalatomicCas64(SB), NOSPLIT, $0-25
     72 	MOVD	ptr+0(FP), R0
     73 	MOVD	old+8(FP), R1
     74 	MOVD	new+16(FP), R2
     75 again:
     76 	LDAXR	(R0), R3
     77 	CMP	R1, R3
     78 	BNE	ok
     79 	STLXR	R2, (R0), R3
     80 	CBNZ	R3, again
     81 ok:
     82 	CSET	EQ, R0
     83 	MOVB	R0, ret+24(FP)
     84 	RET
     85 
     86 // uint32 xadd(uint32 volatile *ptr, int32 delta)
     87 // Atomically:
     88 //      *val += delta;
     89 //      return *val;
     90 TEXT runtimeinternalatomicXadd(SB), NOSPLIT, $0-20
     91 again:
     92 	MOVD	ptr+0(FP), R0
     93 	MOVW	delta+8(FP), R1
     94 	LDAXRW	(R0), R2
     95 	ADDW	R2, R1, R2
     96 	STLXRW	R2, (R0), R3
     97 	CBNZ	R3, again
     98 	MOVW	R2, ret+16(FP)
     99 	RET
    100 
    101 TEXT runtimeinternalatomicXadd64(SB), NOSPLIT, $0-24
    102 again:
    103 	MOVD	ptr+0(FP), R0
    104 	MOVD	delta+8(FP), R1
    105 	LDAXR	(R0), R2
    106 	ADD	R2, R1, R2
    107 	STLXR	R2, (R0), R3
    108 	CBNZ	R3, again
    109 	MOVD	R2, ret+16(FP)
    110 	RET
    111 
    112 TEXT runtimeinternalatomicXchguintptr(SB), NOSPLIT, $0-24
    113 	B	runtimeinternalatomicXchg64(SB)
    114 
    115 TEXT And8(SB), NOSPLIT, $0-9
    116 	MOVD	ptr+0(FP), R0
    117 	MOVB	val+8(FP), R1
    118 	LDAXRB	(R0), R2
    119 	AND	R1, R2
    120 	STLXRB	R2, (R0), R3
    121 	CBNZ	R3, -3(PC)
    122 	RET
    123 
    124 TEXT Or8(SB), NOSPLIT, $0-9
    125 	MOVD	ptr+0(FP), R0
    126 	MOVB	val+8(FP), R1
    127 	LDAXRB	(R0), R2
    128 	ORR	R1, R2
    129 	STLXRB	R2, (R0), R3
    130 	CBNZ	R3, -3(PC)
    131 	RET
    132 
    133