Home | History | Annotate | Download | only in atomic
      1 // Copyright 2016 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // +build mips mipsle
      6 
      7 #include "textflag.h"
      8 
      9 TEXT Cas(SB),NOSPLIT,$0-13
     10 	MOVW	ptr+0(FP), R1
     11 	MOVW	old+4(FP), R2
     12 	MOVW	new+8(FP), R5
     13 	SYNC
     14 try_cas:
     15 	MOVW	R5, R3
     16 	LL	(R1), R4	// R4 = *R1
     17 	BNE	R2, R4, cas_fail
     18 	SC	R3, (R1)	// *R1 = R3
     19 	BEQ	R3, try_cas
     20 	SYNC
     21 	MOVB	R3, ret+12(FP)
     22 	RET
     23 cas_fail:
     24 	MOVB	R0, ret+12(FP)
     25 	RET
     26 
     27 TEXT Store(SB),NOSPLIT,$0-8
     28 	MOVW	ptr+0(FP), R1
     29 	MOVW	val+4(FP), R2
     30 	SYNC
     31 	MOVW	R2, 0(R1)
     32 	SYNC
     33 	RET
     34 
     35 TEXT Load(SB),NOSPLIT,$0-8
     36 	MOVW	ptr+0(FP), R1
     37 	SYNC
     38 	MOVW	0(R1), R1
     39 	SYNC
     40 	MOVW	R1, ret+4(FP)
     41 	RET
     42 
     43 TEXT Xadd(SB),NOSPLIT,$0-12
     44 	MOVW	ptr+0(FP), R2
     45 	MOVW	delta+4(FP), R3
     46 	SYNC
     47 try_xadd:
     48 	LL	(R2), R1	// R1 = *R2
     49 	ADDU	R1, R3, R4
     50 	MOVW	R4, R1
     51 	SC	R4, (R2)	// *R2 = R4
     52 	BEQ	R4, try_xadd
     53 	SYNC
     54 	MOVW	R1, ret+8(FP)
     55 	RET
     56 
     57 TEXT Xchg(SB),NOSPLIT,$0-12
     58 	MOVW	ptr+0(FP), R2
     59 	MOVW	new+4(FP), R5
     60 	SYNC
     61 try_xchg:
     62 	MOVW	R5, R3
     63 	LL	(R2), R1	// R1 = *R2
     64 	SC	R3, (R2)	// *R2 = R3
     65 	BEQ	R3, try_xchg
     66 	SYNC
     67 	MOVW	R1, ret+8(FP)
     68 	RET
     69 
     70 TEXT Casuintptr(SB),NOSPLIT,$0-13
     71 	JMP	Cas(SB)
     72 
     73 TEXT Loaduintptr(SB),NOSPLIT,$0-8
     74 	JMP	Load(SB)
     75 
     76 TEXT Loaduint(SB),NOSPLIT,$0-8
     77 	JMP	Load(SB)
     78 
     79 TEXT Loadp(SB),NOSPLIT,$-0-8
     80 	JMP	Load(SB)
     81 
     82 TEXT Storeuintptr(SB),NOSPLIT,$0-8
     83 	JMP	Store(SB)
     84 
     85 TEXT Xadduintptr(SB),NOSPLIT,$0-12
     86 	JMP	Xadd(SB)
     87 
     88 TEXT Loadint64(SB),NOSPLIT,$0-12
     89 	JMP	Load64(SB)
     90 
     91 TEXT Xaddint64(SB),NOSPLIT,$0-20
     92 	JMP	Xadd64(SB)
     93 
     94 TEXT Casp1(SB),NOSPLIT,$0-13
     95 	JMP	Cas(SB)
     96 
     97 TEXT Xchguintptr(SB),NOSPLIT,$0-12
     98 	JMP	Xchg(SB)
     99 
    100 TEXT StorepNoWB(SB),NOSPLIT,$0-8
    101 	JMP	Store(SB)
    102 
    103 // void	Or8(byte volatile*, byte);
    104 TEXT Or8(SB),NOSPLIT,$0-5
    105 	MOVW	ptr+0(FP), R1
    106 	MOVBU	val+4(FP), R2
    107 	MOVW	$~3, R3	// Align ptr down to 4 bytes so we can use 32-bit load/store.
    108 	AND	R1, R3
    109 #ifdef GOARCH_mips
    110 	// Big endian.  ptr = ptr ^ 3
    111 	XOR	$3, R1
    112 #endif
    113 	AND	$3, R1, R4	// R4 = ((ptr & 3) * 8)
    114 	SLL	$3, R4
    115 	SLL	R4, R2, R2	// Shift val for aligned ptr. R2 = val << R4
    116 	SYNC
    117 try_or8:
    118 	LL	(R3), R4	// R4 = *R3
    119 	OR	R2, R4
    120 	SC	R4, (R3)	// *R3 = R4
    121 	BEQ	R4, try_or8
    122 	SYNC
    123 	RET
    124 
    125 // void	And8(byte volatile*, byte);
    126 TEXT And8(SB),NOSPLIT,$0-5
    127 	MOVW	ptr+0(FP), R1
    128 	MOVBU	val+4(FP), R2
    129 	MOVW	$~3, R3
    130 	AND	R1, R3
    131 #ifdef GOARCH_mips
    132 	// Big endian.  ptr = ptr ^ 3
    133 	XOR	$3, R1
    134 #endif
    135 	AND	$3, R1, R4	// R4 = ((ptr & 3) * 8)
    136 	SLL	$3, R4
    137 	MOVW	$0xFF, R5
    138 	SLL	R4, R2
    139 	SLL	R4, R5
    140 	NOR	R0, R5
    141 	OR	R5, R2	// Shift val for aligned ptr. R2 = val << R4 | ^(0xFF << R4)
    142 	SYNC
    143 try_and8:
    144 	LL	(R3), R4	// R4 = *R3
    145 	AND	R2, R4
    146 	SC	R4, (R3)	// *R3 = R4
    147 	BEQ	R4, try_and8
    148 	SYNC
    149 	RET
    150