Home | History | Annotate | Download | only in atomic
      1 // Copyright 2015 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // +build mips64 mips64le
      6 
      7 #include "textflag.h"
      8 
      9 #define LL(base, rt)	WORD	$((060<<26)|((base)<<21)|((rt)<<16))
     10 #define LLV(base, rt)	WORD	$((064<<26)|((base)<<21)|((rt)<<16))
     11 #define SC(base, rt)	WORD	$((070<<26)|((base)<<21)|((rt)<<16))
     12 #define SCV(base, rt)	WORD	$((074<<26)|((base)<<21)|((rt)<<16))
     13 #define SYNC	WORD $0xf
     14 
     15 // bool cas(uint32 *ptr, uint32 old, uint32 new)
     16 // Atomically:
     17 //	if(*val == old){
     18 //		*val = new;
     19 //		return 1;
     20 //	} else
     21 //		return 0;
     22 TEXT Cas(SB), NOSPLIT, $0-17
     23 	MOVV	ptr+0(FP), R1
     24 	MOVW	old+8(FP), R2
     25 	MOVW	new+12(FP), R5
     26 	SYNC
     27 cas_again:
     28 	MOVV	R5, R3
     29 	LL(1, 4)	// R4 = *R1
     30 	BNE	R2, R4, cas_fail
     31 	SC(1, 3)	// *R1 = R3
     32 	BEQ	R3, cas_again
     33 	MOVV	$1, R1
     34 	MOVB	R1, ret+16(FP)
     35 	SYNC
     36 	RET
     37 cas_fail:
     38 	MOVV	$0, R1
     39 	JMP	-4(PC)
     40 
     41 // bool	cas64(uint64 *ptr, uint64 old, uint64 new)
     42 // Atomically:
     43 //	if(*val == *old){
     44 //		*val = new;
     45 //		return 1;
     46 //	} else {
     47 //		return 0;
     48 //	}
     49 TEXT Cas64(SB), NOSPLIT, $0-25
     50 	MOVV	ptr+0(FP), R1
     51 	MOVV	old+8(FP), R2
     52 	MOVV	new+16(FP), R5
     53 	SYNC
     54 cas64_again:
     55 	MOVV	R5, R3
     56 	LLV(1, 4)	// R4 = *R1
     57 	BNE	R2, R4, cas64_fail
     58 	SCV(1, 3)	// *R1 = R3
     59 	BEQ	R3, cas64_again
     60 	MOVV	$1, R1
     61 	MOVB	R1, ret+24(FP)
     62 	SYNC
     63 	RET
     64 cas64_fail:
     65 	MOVV	$0, R1
     66 	JMP	-4(PC)
     67 
     68 TEXT Casuintptr(SB), NOSPLIT, $0-25
     69 	JMP	Cas64(SB)
     70 
     71 TEXT Loaduintptr(SB),  NOSPLIT|NOFRAME, $0-16
     72 	JMP	Load64(SB)
     73 
     74 TEXT Loaduint(SB), NOSPLIT|NOFRAME, $0-16
     75 	JMP	Load64(SB)
     76 
     77 TEXT Storeuintptr(SB), NOSPLIT, $0-16
     78 	JMP	Store64(SB)
     79 
     80 TEXT Xadduintptr(SB), NOSPLIT, $0-24
     81 	JMP	Xadd64(SB)
     82 
     83 TEXT Loadint64(SB), NOSPLIT, $0-16
     84 	JMP	Load64(SB)
     85 
     86 TEXT Xaddint64(SB), NOSPLIT, $0-24
     87 	JMP	Xadd64(SB)
     88 
     89 // bool casp(void **val, void *old, void *new)
     90 // Atomically:
     91 //	if(*val == old){
     92 //		*val = new;
     93 //		return 1;
     94 //	} else
     95 //		return 0;
     96 TEXT Casp1(SB), NOSPLIT, $0-25
     97 	JMP runtimeinternalatomicCas64(SB)
     98 
     99 // uint32 xadd(uint32 volatile *ptr, int32 delta)
    100 // Atomically:
    101 //	*val += delta;
    102 //	return *val;
    103 TEXT Xadd(SB), NOSPLIT, $0-20
    104 	MOVV	ptr+0(FP), R2
    105 	MOVW	delta+8(FP), R3
    106 	SYNC
    107 	LL(2, 1)	// R1 = *R2
    108 	ADDU	R1, R3, R4
    109 	MOVV	R4, R1
    110 	SC(2, 4)	// *R2 = R4
    111 	BEQ	R4, -4(PC)
    112 	MOVW	R1, ret+16(FP)
    113 	SYNC
    114 	RET
    115 
    116 TEXT Xadd64(SB), NOSPLIT, $0-24
    117 	MOVV	ptr+0(FP), R2
    118 	MOVV	delta+8(FP), R3
    119 	SYNC
    120 	LLV(2, 1)	// R1 = *R2
    121 	ADDVU	R1, R3, R4
    122 	MOVV	R4, R1
    123 	SCV(2, 4)	// *R2 = R4
    124 	BEQ	R4, -4(PC)
    125 	MOVV	R1, ret+16(FP)
    126 	SYNC
    127 	RET
    128 
    129 TEXT Xchg(SB), NOSPLIT, $0-20
    130 	MOVV	ptr+0(FP), R2
    131 	MOVW	new+8(FP), R5
    132 
    133 	SYNC
    134 	MOVV	R5, R3
    135 	LL(2, 1)	// R1 = *R2
    136 	SC(2, 3)	// *R2 = R3
    137 	BEQ	R3, -3(PC)
    138 	MOVW	R1, ret+16(FP)
    139 	SYNC
    140 	RET
    141 
    142 TEXT Xchg64(SB), NOSPLIT, $0-24
    143 	MOVV	ptr+0(FP), R2
    144 	MOVV	new+8(FP), R5
    145 
    146 	SYNC
    147 	MOVV	R5, R3
    148 	LLV(2, 1)	// R1 = *R2
    149 	SCV(2, 3)	// *R2 = R3
    150 	BEQ	R3, -3(PC)
    151 	MOVV	R1, ret+16(FP)
    152 	SYNC
    153 	RET
    154 
    155 TEXT Xchguintptr(SB), NOSPLIT, $0-24
    156 	JMP	Xchg64(SB)
    157 
    158 TEXT StorepNoWB(SB), NOSPLIT, $0-16
    159 	JMP	Store64(SB)
    160 
    161 TEXT Store(SB), NOSPLIT, $0-12
    162 	MOVV	ptr+0(FP), R1
    163 	MOVW	val+8(FP), R2
    164 	SYNC
    165 	MOVW	R2, 0(R1)
    166 	SYNC
    167 	RET
    168 
    169 TEXT Store64(SB), NOSPLIT, $0-16
    170 	MOVV	ptr+0(FP), R1
    171 	MOVV	val+8(FP), R2
    172 	SYNC
    173 	MOVV	R2, 0(R1)
    174 	SYNC
    175 	RET
    176 
    177 // void	Or8(byte volatile*, byte);
    178 TEXT Or8(SB), NOSPLIT, $0-9
    179 	MOVV	ptr+0(FP), R1
    180 	MOVBU	val+8(FP), R2
    181 	// Align ptr down to 4 bytes so we can use 32-bit load/store.
    182 	MOVV	$~3, R3
    183 	AND	R1, R3
    184 	// Compute val shift.
    185 #ifdef GOARCH_mips64
    186 	// Big endian.  ptr = ptr ^ 3
    187 	XOR	$3, R1
    188 #endif
    189 	// R4 = ((ptr & 3) * 8)
    190 	AND	$3, R1, R4
    191 	SLLV	$3, R4
    192 	// Shift val for aligned ptr. R2 = val << R4
    193 	SLLV	R4, R2
    194 
    195 	SYNC
    196 	LL(3, 4)	// R4 = *R3
    197 	OR	R2, R4
    198 	SC(3, 4)	// *R3 = R4
    199 	BEQ	R4, -4(PC)
    200 	SYNC
    201 	RET
    202 
    203 // void	And8(byte volatile*, byte);
    204 TEXT And8(SB), NOSPLIT, $0-9
    205 	MOVV	ptr+0(FP), R1
    206 	MOVBU	val+8(FP), R2
    207 	// Align ptr down to 4 bytes so we can use 32-bit load/store.
    208 	MOVV	$~3, R3
    209 	AND	R1, R3
    210 	// Compute val shift.
    211 #ifdef GOARCH_mips64
    212 	// Big endian.  ptr = ptr ^ 3
    213 	XOR	$3, R1
    214 #endif
    215 	// R4 = ((ptr & 3) * 8)
    216 	AND	$3, R1, R4
    217 	SLLV	$3, R4
    218 	// Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4)
    219 	MOVV	$0xFF, R5
    220 	SLLV	R4, R2
    221 	SLLV	R4, R5
    222 	NOR	R0, R5
    223 	OR	R5, R2
    224 
    225 	SYNC
    226 	LL(3, 4)	// R4 = *R3
    227 	AND	R2, R4
    228 	SC(3, 4)	// *R3 = R4
    229 	BEQ	R4, -4(PC)
    230 	SYNC
    231 	RET
    232