Home | History | Annotate | Download | only in vm
      1 /*
      2  * Copyright (C) 2010 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "Dalvik.h"
     18 
     19 #include <cutils/atomic.h>
     20 
     21 /*
     22  * Quasi-atomic 64-bit operations, for platforms that lack the real thing.
     23  *
     24  * TODO: unify ARMv6/x86/sh implementations using the to-be-written
     25  * spin lock implementation.  We don't want to rely on mutex innards,
     26  * and it would be great if all platforms were running the same code.
     27  */
     28 
     29 #if defined(HAVE_MACOSX_IPC)
     30 
     31 #include <libkern/OSAtomic.h>
     32 
     33 #if defined(__ppc__)        \
     34     || defined(__PPC__)     \
     35     || defined(__powerpc__) \
     36     || defined(__powerpc)   \
     37     || defined(__POWERPC__) \
     38     || defined(_M_PPC)      \
     39     || defined(__PPC)
     40 #define NEED_QUASIATOMICS 1
     41 #else
     42 
     43 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
     44     volatile int64_t* addr)
     45 {
     46     return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
     47             (int64_t*)addr) == 0;
     48 }
     49 
     50 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
     51 {
     52     int64_t oldValue;
     53     do {
     54         oldValue = *addr;
     55     } while (dvmQuasiAtomicCas64(oldValue, value, addr));
     56     return oldValue;
     57 }
     58 
     59 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
     60 {
     61     return OSAtomicAdd64Barrier(0, addr);
     62 }
     63 #endif
     64 
     65 #elif defined(__i386__) || defined(__x86_64__)
     66 #define NEED_QUASIATOMICS 1
     67 
     68 #elif __arm__
     69 #include <machine/cpu-features.h>
     70 
     71 #ifdef __ARM_HAVE_LDREXD
     72 int64_t dvmQuasiAtomicSwap64(int64_t newvalue, volatile int64_t* addr)
     73 {
     74     int64_t prev;
     75     int status;
     76     do {
     77         __asm__ __volatile__ ("@ dvmQuasiAtomicSwap64\n"
     78             "ldrexd     %0, %H0, [%3]\n"
     79             "strexd     %1, %4, %H4, [%3]"
     80             : "=&r" (prev), "=&r" (status), "+m"(*addr)
     81             : "r" (addr), "r" (newvalue)
     82             : "cc");
     83     } while (__builtin_expect(status != 0, 0));
     84     return prev;
     85 }
     86 
     87 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
     88     volatile int64_t* addr)
     89 {
     90     int64_t prev;
     91     int status;
     92     do {
     93         __asm__ __volatile__ ("@ dvmQuasiAtomicCas64\n"
     94             "ldrexd     %0, %H0, [%3]\n"
     95             "mov        %1, #0\n"
     96             "teq        %0, %4\n"
     97             "teqeq      %H0, %H4\n"
     98             "strexdeq   %1, %5, %H5, [%3]"
     99             : "=&r" (prev), "=&r" (status), "+m"(*addr)
    100             : "r" (addr), "Ir" (oldvalue), "r" (newvalue)
    101             : "cc");
    102     } while (__builtin_expect(status != 0, 0));
    103     return prev != oldvalue;
    104 }
    105 
    106 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
    107 {
    108     int64_t value;
    109     __asm__ __volatile__ ("@ dvmQuasiAtomicRead64\n"
    110         "ldrexd     %0, %H0, [%1]"
    111         : "=&r" (value)
    112         : "r" (addr));
    113     return value;
    114 }
    115 
    116 #else
    117 
    118 // on the device, we implement the 64-bit atomic operations through
    119 // mutex locking. normally, this is bad because we must initialize
    120 // a pthread_mutex_t before being able to use it, and this means
    121 // having to do an initialization check on each function call, and
    122 // that's where really ugly things begin...
    123 //
    124 // BUT, as a special twist, we take advantage of the fact that in our
    125 // pthread library, a mutex is simply a volatile word whose value is always
    126 // initialized to 0. In other words, simply declaring a static mutex
    127 // object initializes it !
    128 //
    129 // another twist is that we use a small array of mutexes to dispatch
    130 // the contention locks from different memory addresses
    131 //
    132 
    133 #include <pthread.h>
    134 
    135 #define  SWAP_LOCK_COUNT  32U
    136 static pthread_mutex_t  _swap_locks[SWAP_LOCK_COUNT];
    137 
    138 #define  SWAP_LOCK(addr)   \
    139    &_swap_locks[((unsigned)(void*)(addr) >> 3U) % SWAP_LOCK_COUNT]
    140 
    141 
    142 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
    143 {
    144     int64_t oldValue;
    145     pthread_mutex_t*  lock = SWAP_LOCK(addr);
    146 
    147     pthread_mutex_lock(lock);
    148 
    149     oldValue = *addr;
    150     *addr    = value;
    151 
    152     pthread_mutex_unlock(lock);
    153     return oldValue;
    154 }
    155 
    156 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
    157     volatile int64_t* addr)
    158 {
    159     int result;
    160     pthread_mutex_t*  lock = SWAP_LOCK(addr);
    161 
    162     pthread_mutex_lock(lock);
    163 
    164     if (*addr == oldvalue) {
    165         *addr  = newvalue;
    166         result = 0;
    167     } else {
    168         result = 1;
    169     }
    170     pthread_mutex_unlock(lock);
    171     return result;
    172 }
    173 
    174 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
    175 {
    176     int64_t result;
    177     pthread_mutex_t*  lock = SWAP_LOCK(addr);
    178 
    179     pthread_mutex_lock(lock);
    180     result = *addr;
    181     pthread_mutex_unlock(lock);
    182     return result;
    183 }
    184 
    185 #endif /*__ARM_HAVE_LDREXD*/
    186 
    187 /*****************************************************************************/
    188 #elif __sh__
    189 #define NEED_QUASIATOMICS 1
    190 
    191 #else
    192 #error "Unsupported atomic operations for this platform"
    193 #endif
    194 
    195 
    196 #if NEED_QUASIATOMICS
    197 
    198 /* Note that a spinlock is *not* a good idea in general
    199  * since they can introduce subtle issues. For example,
    200  * a real-time thread trying to acquire a spinlock already
    201  * acquired by another thread will never yeld, making the
    202  * CPU loop endlessly!
    203  *
    204  * However, this code is only used on the Linux simulator
    205  * so it's probably ok for us.
    206  *
    207  * The alternative is to use a pthread mutex, but
    208  * these must be initialized before being used, and
    209  * then you have the problem of lazily initializing
    210  * a mutex without any other synchronization primitive.
    211  *
    212  * TODO: these currently use sched_yield(), which is not guaranteed to
    213  * do anything at all.  We need to use dvmIterativeSleep or a wait /
    214  * notify mechanism if the initial attempt fails.
    215  */
    216 
    217 /* global spinlock for all 64-bit quasiatomic operations */
    218 static int32_t quasiatomic_spinlock = 0;
    219 
    220 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
    221     volatile int64_t* addr)
    222 {
    223     int result;
    224 
    225     while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
    226 #ifdef HAVE_WIN32_THREADS
    227         Sleep(0);
    228 #else
    229         sched_yield();
    230 #endif
    231     }
    232 
    233     if (*addr == oldvalue) {
    234         *addr = newvalue;
    235         result = 0;
    236     } else {
    237         result = 1;
    238     }
    239 
    240     android_atomic_release_store(0, &quasiatomic_spinlock);
    241 
    242     return result;
    243 }
    244 
    245 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
    246 {
    247     int64_t result;
    248 
    249     while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
    250 #ifdef HAVE_WIN32_THREADS
    251         Sleep(0);
    252 #else
    253         sched_yield();
    254 #endif
    255     }
    256 
    257     result = *addr;
    258     android_atomic_release_store(0, &quasiatomic_spinlock);
    259 
    260     return result;
    261 }
    262 
    263 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
    264 {
    265     int64_t result;
    266 
    267     while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
    268 #ifdef HAVE_WIN32_THREADS
    269         Sleep(0);
    270 #else
    271         sched_yield();
    272 #endif
    273     }
    274 
    275     result = *addr;
    276     *addr = value;
    277     android_atomic_release_store(0, &quasiatomic_spinlock);
    278 
    279     return result;
    280 }
    281 
    282 #endif /*NEED_QUASIATOMICS*/
    283