Home | History | Annotate | Download | only in atomic
      1 // Copyright 2015 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 #include "textflag.h"
      6 
      7 // bool Cas(int32 *val, int32 old, int32 new)
      8 // Atomically:
      9 //	if(*val == old){
     10 //		*val = new;
     11 //		return 1;
     12 //	}else
     13 //		return 0;
     14 TEXT runtimeinternalatomicCas(SB), NOSPLIT, $0-13
     15 	MOVL	ptr+0(FP), BX
     16 	MOVL	old+4(FP), AX
     17 	MOVL	new+8(FP), CX
     18 	LOCK
     19 	CMPXCHGL	CX, 0(BX)
     20 	SETEQ	ret+12(FP)
     21 	RET
     22 
     23 TEXT runtimeinternalatomicCasuintptr(SB), NOSPLIT, $0-13
     24 	JMP	runtimeinternalatomicCas(SB)
     25 
     26 TEXT runtimeinternalatomicLoaduintptr(SB), NOSPLIT, $0-8
     27 	JMP	runtimeinternalatomicLoad(SB)
     28 
     29 TEXT runtimeinternalatomicLoaduint(SB), NOSPLIT, $0-8
     30 	JMP	runtimeinternalatomicLoad(SB)
     31 
     32 TEXT runtimeinternalatomicStoreuintptr(SB), NOSPLIT, $0-8
     33 	JMP	runtimeinternalatomicStore(SB)
     34 
     35 TEXT runtimeinternalatomicXadduintptr(SB), NOSPLIT, $0-12
     36 	JMP runtimeinternalatomicXadd(SB)
     37 
     38 TEXT runtimeinternalatomicLoadint64(SB), NOSPLIT, $0-12
     39 	JMP runtimeinternalatomicLoad64(SB)
     40 
     41 TEXT runtimeinternalatomicXaddint64(SB), NOSPLIT, $0-20
     42 	JMP runtimeinternalatomicXadd64(SB)
     43 
     44 
     45 // bool runtimeinternalatomicCas64(uint64 *val, uint64 old, uint64 new)
     46 // Atomically:
     47 //	if(*val == *old){
     48 //		*val = new;
     49 //		return 1;
     50 //	} else {
     51 //		return 0;
     52 //	}
     53 TEXT runtimeinternalatomicCas64(SB), NOSPLIT, $0-21
     54 	MOVL	ptr+0(FP), BP
     55 	TESTL	$7, BP
     56 	JZ	2(PC)
     57 	MOVL	0, BP // crash with nil ptr deref
     58 	MOVL	old_lo+4(FP), AX
     59 	MOVL	old_hi+8(FP), DX
     60 	MOVL	new_lo+12(FP), BX
     61 	MOVL	new_hi+16(FP), CX
     62 	LOCK
     63 	CMPXCHG8B	0(BP)
     64 	SETEQ	ret+20(FP)
     65 	RET
     66 
     67 // bool Casp1(void **p, void *old, void *new)
     68 // Atomically:
     69 //	if(*p == old){
     70 //		*p = new;
     71 //		return 1;
     72 //	}else
     73 //		return 0;
     74 TEXT runtimeinternalatomicCasp1(SB), NOSPLIT, $0-13
     75 	MOVL	ptr+0(FP), BX
     76 	MOVL	old+4(FP), AX
     77 	MOVL	new+8(FP), CX
     78 	LOCK
     79 	CMPXCHGL	CX, 0(BX)
     80 	SETEQ	ret+12(FP)
     81 	RET
     82 
     83 // uint32 Xadd(uint32 volatile *val, int32 delta)
     84 // Atomically:
     85 //	*val += delta;
     86 //	return *val;
     87 TEXT runtimeinternalatomicXadd(SB), NOSPLIT, $0-12
     88 	MOVL	ptr+0(FP), BX
     89 	MOVL	delta+4(FP), AX
     90 	MOVL	AX, CX
     91 	LOCK
     92 	XADDL	AX, 0(BX)
     93 	ADDL	CX, AX
     94 	MOVL	AX, ret+8(FP)
     95 	RET
     96 
     97 TEXT runtimeinternalatomicXchg(SB), NOSPLIT, $0-12
     98 	MOVL	ptr+0(FP), BX
     99 	MOVL	new+4(FP), AX
    100 	XCHGL	AX, 0(BX)
    101 	MOVL	AX, ret+8(FP)
    102 	RET
    103 
    104 TEXT runtimeinternalatomicXchguintptr(SB), NOSPLIT, $0-12
    105 	JMP	runtimeinternalatomicXchg(SB)
    106 
    107 
    108 TEXT runtimeinternalatomicStorepNoWB(SB), NOSPLIT, $0-8
    109 	MOVL	ptr+0(FP), BX
    110 	MOVL	val+4(FP), AX
    111 	XCHGL	AX, 0(BX)
    112 	RET
    113 
    114 TEXT runtimeinternalatomicStore(SB), NOSPLIT, $0-8
    115 	MOVL	ptr+0(FP), BX
    116 	MOVL	val+4(FP), AX
    117 	XCHGL	AX, 0(BX)
    118 	RET
    119 
    120 // uint64 atomicload64(uint64 volatile* addr);
    121 TEXT runtimeinternalatomicLoad64(SB), NOSPLIT, $0-12
    122 	MOVL	ptr+0(FP), AX
    123 	TESTL	$7, AX
    124 	JZ	2(PC)
    125 	MOVL	0, AX // crash with nil ptr deref
    126 	LEAL	ret_lo+4(FP), BX
    127 	// MOVQ (%EAX), %MM0
    128 	BYTE $0x0f; BYTE $0x6f; BYTE $0x00
    129 	// MOVQ %MM0, 0(%EBX)
    130 	BYTE $0x0f; BYTE $0x7f; BYTE $0x03
    131 	// EMMS
    132 	BYTE $0x0F; BYTE $0x77
    133 	RET
    134 
    135 // void runtimeinternalatomicStore64(uint64 volatile* addr, uint64 v);
    136 TEXT runtimeinternalatomicStore64(SB), NOSPLIT, $0-12
    137 	MOVL	ptr+0(FP), AX
    138 	TESTL	$7, AX
    139 	JZ	2(PC)
    140 	MOVL	0, AX // crash with nil ptr deref
    141 	// MOVQ and EMMS were introduced on the Pentium MMX.
    142 	// MOVQ 0x8(%ESP), %MM0
    143 	BYTE $0x0f; BYTE $0x6f; BYTE $0x44; BYTE $0x24; BYTE $0x08
    144 	// MOVQ %MM0, (%EAX)
    145 	BYTE $0x0f; BYTE $0x7f; BYTE $0x00
    146 	// EMMS
    147 	BYTE $0x0F; BYTE $0x77
    148 	// This is essentially a no-op, but it provides required memory fencing.
    149 	// It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2).
    150 	MOVL	$0, AX
    151 	LOCK
    152 	XADDL	AX, (SP)
    153 	RET
    154 
    155 // void	runtimeinternalatomicOr8(byte volatile*, byte);
    156 TEXT runtimeinternalatomicOr8(SB), NOSPLIT, $0-5
    157 	MOVL	ptr+0(FP), AX
    158 	MOVB	val+4(FP), BX
    159 	LOCK
    160 	ORB	BX, (AX)
    161 	RET
    162 
    163 // void	runtimeinternalatomicAnd8(byte volatile*, byte);
    164 TEXT runtimeinternalatomicAnd8(SB), NOSPLIT, $0-5
    165 	MOVL	ptr+0(FP), AX
    166 	MOVB	val+4(FP), BX
    167 	LOCK
    168 	ANDB	BX, (AX)
    169 	RET
    170