Home | History | Annotate | Download | only in vm
      1 /*
      2  * Copyright (C) 2010 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "Dalvik.h"
     18 
     19 #include <cutils/atomic.h>
     20 
     21 #if defined(__arm__)
     22 #include <machine/cpu-features.h>
     23 #endif
     24 
     25 /*****************************************************************************/
     26 
     27 #if defined(HAVE_MACOSX_IPC)
     28 #define NEED_MAC_QUASI_ATOMICS 1
     29 
     30 #elif defined(__i386__) || defined(__x86_64__)
     31 #define NEED_PTHREADS_QUASI_ATOMICS 1
     32 
     33 #elif defined(__mips__)
     34 #define NEED_PTHREADS_QUASI_ATOMICS 1
     35 
     36 #elif defined(__arm__)
     37 
     38 // TODO: Clang can not process our inline assembly at the moment.
     39 #if defined(__ARM_HAVE_LDREXD) && !defined(__clang__)
     40 #define NEED_ARM_LDREXD_QUASI_ATOMICS 1
     41 #else
     42 #define NEED_PTHREADS_QUASI_ATOMICS 1
     43 #endif
     44 
     45 #elif defined(__sh__)
     46 #define NEED_PTHREADS_QUASI_ATOMICS 1
     47 
     48 #else
     49 #error "Unsupported atomic operations for this platform"
     50 #endif
     51 
     52 /*****************************************************************************/
     53 
     54 #if NEED_ARM_LDREXD_QUASI_ATOMICS
     55 
     56 static inline int64_t dvmQuasiAtomicSwap64Body(int64_t newvalue,
     57                                                volatile int64_t* addr)
     58 {
     59     int64_t prev;
     60     int status;
     61     do {
     62         __asm__ __volatile__ ("@ dvmQuasiAtomicSwap64\n"
     63             "ldrexd     %0, %H0, [%3]\n"
     64             "strexd     %1, %4, %H4, [%3]"
     65             : "=&r" (prev), "=&r" (status), "+m"(*addr)
     66             : "r" (addr), "r" (newvalue)
     67             : "cc");
     68     } while (__builtin_expect(status != 0, 0));
     69     return prev;
     70 }
     71 
     72 int64_t dvmQuasiAtomicSwap64(int64_t newvalue, volatile int64_t* addr)
     73 {
     74     return dvmQuasiAtomicSwap64Body(newvalue, addr);
     75 }
     76 
     77 int64_t dvmQuasiAtomicSwap64Sync(int64_t newvalue, volatile int64_t* addr)
     78 {
     79     int64_t prev;
     80     ANDROID_MEMBAR_STORE();
     81     prev = dvmQuasiAtomicSwap64Body(newvalue, addr);
     82     ANDROID_MEMBAR_FULL();
     83     return prev;
     84 }
     85 
     86 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
     87     volatile int64_t* addr)
     88 {
     89     int64_t prev;
     90     int status;
     91     do {
     92         __asm__ __volatile__ ("@ dvmQuasiAtomicCas64\n"
     93             "ldrexd     %0, %H0, [%3]\n"
     94             "mov        %1, #0\n"
     95             "teq        %0, %4\n"
     96             "teqeq      %H0, %H4\n"
     97             "strexdeq   %1, %5, %H5, [%3]"
     98             : "=&r" (prev), "=&r" (status), "+m"(*addr)
     99             : "r" (addr), "Ir" (oldvalue), "r" (newvalue)
    100             : "cc");
    101     } while (__builtin_expect(status != 0, 0));
    102     return prev != oldvalue;
    103 }
    104 
    105 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
    106 {
    107     int64_t value;
    108     __asm__ __volatile__ ("@ dvmQuasiAtomicRead64\n"
    109         "ldrexd     %0, %H0, [%1]"
    110         : "=&r" (value)
    111         : "r" (addr));
    112     return value;
    113 }
    114 #endif
    115 
    116 /*****************************************************************************/
    117 
    118 #if NEED_MAC_QUASI_ATOMICS
    119 
    120 #include <libkern/OSAtomic.h>
    121 
    122 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
    123     volatile int64_t* addr)
    124 {
    125     return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
    126             (int64_t*)addr) == 0;
    127 }
    128 
    129 
    130 static inline int64_t dvmQuasiAtomicSwap64Body(int64_t value,
    131                                                volatile int64_t* addr)
    132 {
    133     int64_t oldValue;
    134     do {
    135         oldValue = *addr;
    136     } while (dvmQuasiAtomicCas64(oldValue, value, addr));
    137     return oldValue;
    138 }
    139 
    140 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
    141 {
    142     return dvmQuasiAtomicSwap64Body(value, addr);
    143 }
    144 
    145 int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
    146 {
    147     int64_t oldValue;
    148     ANDROID_MEMBAR_STORE();
    149     oldValue = dvmQuasiAtomicSwap64Body(value, addr);
    150     /* TUNING: barriers can be avoided on some architectures */
    151     ANDROID_MEMBAR_FULL();
    152     return oldValue;
    153 }
    154 
    155 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
    156 {
    157     return OSAtomicAdd64Barrier(0, addr);
    158 }
    159 #endif
    160 
    161 /*****************************************************************************/
    162 
    163 #if NEED_PTHREADS_QUASI_ATOMICS
    164 
    165 // In the absence of a better implementation, we implement the 64-bit atomic
    166 // operations through mutex locking.
    167 
    168 // another twist is that we use a small array of mutexes to dispatch
    169 // the contention locks from different memory addresses
    170 
    171 #include <pthread.h>
    172 
    173 static const size_t kSwapLockCount = 32;
    174 static pthread_mutex_t* gSwapLocks[kSwapLockCount];
    175 
    176 void dvmQuasiAtomicsStartup() {
    177     for (size_t i = 0; i < kSwapLockCount; ++i) {
    178         pthread_mutex_t* m = new pthread_mutex_t;
    179         dvmInitMutex(m);
    180         gSwapLocks[i] = m;
    181     }
    182 }
    183 
    184 void dvmQuasiAtomicsShutdown() {
    185     for (size_t i = 0; i < kSwapLockCount; ++i) {
    186         pthread_mutex_t* m = gSwapLocks[i];
    187         gSwapLocks[i] = NULL;
    188         if (m != NULL) {
    189             dvmDestroyMutex(m);
    190         }
    191         delete m;
    192     }
    193 }
    194 
    195 static inline pthread_mutex_t* GetSwapLock(const volatile int64_t* addr) {
    196     return gSwapLocks[((unsigned)(void*)(addr) >> 3U) % kSwapLockCount];
    197 }
    198 
    199 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
    200 {
    201     int64_t oldValue;
    202     pthread_mutex_t* lock = GetSwapLock(addr);
    203 
    204     pthread_mutex_lock(lock);
    205 
    206     oldValue = *addr;
    207     *addr    = value;
    208 
    209     pthread_mutex_unlock(lock);
    210     return oldValue;
    211 }
    212 
    213 /* Same as dvmQuasiAtomicSwap64 - mutex handles barrier */
    214 int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
    215 {
    216     return dvmQuasiAtomicSwap64(value, addr);
    217 }
    218 
    219 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
    220     volatile int64_t* addr)
    221 {
    222     int result;
    223     pthread_mutex_t* lock = GetSwapLock(addr);
    224 
    225     pthread_mutex_lock(lock);
    226 
    227     if (*addr == oldvalue) {
    228         *addr  = newvalue;
    229         result = 0;
    230     } else {
    231         result = 1;
    232     }
    233     pthread_mutex_unlock(lock);
    234     return result;
    235 }
    236 
    237 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
    238 {
    239     int64_t result;
    240     pthread_mutex_t* lock = GetSwapLock(addr);
    241 
    242     pthread_mutex_lock(lock);
    243     result = *addr;
    244     pthread_mutex_unlock(lock);
    245     return result;
    246 }
    247 
    248 #else
    249 
    250 // The other implementations don't need any special setup.
    251 void dvmQuasiAtomicsStartup() {}
    252 void dvmQuasiAtomicsShutdown() {}
    253 
    254 #endif /*NEED_PTHREADS_QUASI_ATOMICS*/
    255