1 /* 2 * Copyright (C) 2008 The Android Open Source Project 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 #include <sys/linux-syscalls.h> 29 30 .global __atomic_cmpxchg 31 .type __atomic_cmpxchg, %function 32 .global __atomic_swap 33 .type __atomic_swap, %function 34 .global __atomic_dec 35 .type __atomic_dec, %function 36 .global __atomic_inc 37 .type __atomic_inc, %function 38 39 #define FUTEX_WAIT 0 40 #define FUTEX_WAKE 1 41 42 #if 1 43 .equ kernel_cmpxchg, 0xFFFF0FC0 44 .equ kernel_atomic_base, 0xFFFF0FFF 45 __atomic_dec: 46 .fnstart 47 .save {r4, lr} 48 stmdb sp!, {r4, lr} 49 mov r2, r0 50 1: @ atomic_dec 51 ldr r0, [r2] 52 mov r3, #kernel_atomic_base 53 add lr, pc, #4 54 sub r1, r0, #1 55 add pc, r3, #(kernel_cmpxchg - kernel_atomic_base) 56 bcc 1b 57 add r0, r1, #1 58 ldmia sp!, {r4, lr} 59 bx lr 60 .fnend 61 62 __atomic_inc: 63 .fnstart 64 .save {r4, lr} 65 stmdb sp!, {r4, lr} 66 mov r2, r0 67 1: @ atomic_inc 68 ldr r0, [r2] 69 mov r3, #kernel_atomic_base 70 add lr, pc, #4 71 add r1, r0, #1 72 add pc, r3, #(kernel_cmpxchg - kernel_atomic_base) 73 bcc 1b 74 sub r0, r1, #1 75 ldmia sp!, {r4, lr} 76 bx lr 77 .fnend 78 79 /* r0(old) r1(new) r2(addr) -> r0(zero_if_succeeded) */ 80 __atomic_cmpxchg: 81 .fnstart 82 .save {r4, lr} 83 stmdb sp!, {r4, lr} 84 mov r4, r0 /* r4 = save oldvalue */ 85 1: @ atomic_cmpxchg 86 mov r3, #kernel_atomic_base 87 add lr, pc, #4 88 mov r0, r4 /* r0 = oldvalue */ 89 add pc, r3, #(kernel_cmpxchg - kernel_atomic_base) 90 bcs 2f /* swap was made. we're good, return. */ 91 ldr r3, [r2] /* swap not made, see if it's because *ptr!=oldvalue */ 92 cmp r3, r4 93 beq 1b 94 2: @ atomic_cmpxchg 95 ldmia sp!, {r4, lr} 96 bx lr 97 .fnend 98 #else 99 #define KUSER_CMPXCHG 0xffffffc0 100 101 /* r0(old) r1(new) r2(addr) -> r0(zero_if_succeeded) */ 102 __atomic_cmpxchg: 103 stmdb sp!, {r4, lr} 104 mov r4, r0 /* r4 = save oldvalue */ 105 1: add lr, pc, #4 106 mov r0, r4 /* r0 = oldvalue */ 107 mov pc, #KUSER_CMPXCHG 108 bcs 2f /* swap was made. we're good, return. */ 109 ldr r3, [r2] /* swap not made, see if it's because *ptr!=oldvalue */ 110 cmp r3, r4 111 beq 1b 112 2: ldmia sp!, {r4, lr} 113 bx lr 114 115 /* r0(addr) -> r0(old) */ 116 __atomic_dec: 117 stmdb sp!, {r4, lr} 118 mov r2, r0 /* address */ 119 1: ldr r0, [r2] /* oldvalue */ 120 add lr, pc, #4 121 sub r1, r0, #1 /* newvalue = oldvalue - 1 */ 122 mov pc, #KUSER_CMPXCHG 123 bcc 1b /* no swap, try again until we get it right */ 124 mov r0, ip /* swapped, return the old value */ 125 ldmia sp!, {r4, lr} 126 bx lr 127 128 /* r0(addr) -> r0(old) */ 129 __atomic_inc: 130 stmdb sp!, {r4, lr} 131 mov r2, r0 /* address */ 132 1: ldr r0, [r2] /* oldvalue */ 133 add lr, pc, #4 134 add r1, r0, #1 /* newvalue = oldvalue + 1 */ 135 mov pc, #KUSER_CMPXCHG 136 bcc 1b /* no swap, try again until we get it right */ 137 mov r0, ip /* swapped, return the old value */ 138 ldmia sp!, {r4, lr} 139 bx lr 140 #endif 141 142 /* r0(new) r1(addr) -> r0(old) */ 143 /* replaced swp instruction with ldrex/strex for ARMv6 & ARMv7 */ 144 __atomic_swap: 145 #if defined (_ARM_HAVE_LDREX_STREX) 146 1: ldrex r2, [r1] 147 strex r3, r0, [r1] 148 teq r3, #0 149 bne 1b 150 mov r0, r2 151 mcr p15, 0, r0, c7, c10, 5 /* or, use dmb */ 152 #else 153 swp r0, r0, [r1] 154 #endif 155 bx lr 156 157 /* __futex_wait(*ftx, val, *timespec) */ 158 /* __futex_wake(*ftx, counter) */ 159 /* __futex_syscall3(*ftx, op, val) */ 160 /* __futex_syscall4(*ftx, op, val, *timespec) */ 161 162 .global __futex_wait 163 .type __futex_wait, %function 164 165 .global __futex_wake 166 .type __futex_wake, %function 167 168 .global __futex_syscall3 169 .type __futex_syscall3, %function 170 171 .global __futex_syscall4 172 .type __futex_syscall4, %function 173 174 #if __ARM_EABI__ 175 176 __futex_syscall3: 177 .fnstart 178 stmdb sp!, {r4, r7} 179 .save {r4, r7} 180 ldr r7, =__NR_futex 181 swi #0 182 ldmia sp!, {r4, r7} 183 bx lr 184 .fnend 185 186 __futex_wait: 187 .fnstart 188 stmdb sp!, {r4, r7} 189 .save {r4, r7} 190 mov r3, r2 191 mov r2, r1 192 mov r1, #FUTEX_WAIT 193 ldr r7, =__NR_futex 194 swi #0 195 ldmia sp!, {r4, r7} 196 bx lr 197 .fnend 198 199 __futex_wake: 200 stmdb sp!, {r4, r7} 201 mov r2, r1 202 mov r1, #FUTEX_WAKE 203 ldr r7, =__NR_futex 204 swi #0 205 ldmia sp!, {r4, r7} 206 bx lr 207 208 #else 209 210 __futex_syscall3: 211 swi #__NR_futex 212 bx lr 213 214 __futex_wait: 215 mov r3, r2 216 mov r2, r1 217 mov r1, #FUTEX_WAIT 218 swi #__NR_futex 219 bx lr 220 221 __futex_wake: 222 mov r2, r1 223 mov r1, #FUTEX_WAKE 224 swi #__NR_futex 225 bx lr 226 227 #endif 228 229 __futex_syscall4: 230 b __futex_syscall3 231