Home | History | Annotate | Download | only in vm
      1 /*
      2  * Copyright (C) 2010 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "Dalvik.h"
     18 
     19 #include <cutils/atomic.h>
     20 
     21 /*
     22  * Quasi-atomic 64-bit operations, for platforms that lack the real thing.
     23  *
     24  * TODO: unify ARMv6/x86/sh implementations using the to-be-written
     25  * spin lock implementation.  We don't want to rely on mutex innards,
     26  * and it would be great if all platforms were running the same code.
     27  */
     28 
     29 #if defined(HAVE_MACOSX_IPC)
     30 
     31 #include <libkern/OSAtomic.h>
     32 
     33 #if defined(__ppc__)        \
     34     || defined(__PPC__)     \
     35     || defined(__powerpc__) \
     36     || defined(__powerpc)   \
     37     || defined(__POWERPC__) \
     38     || defined(_M_PPC)      \
     39     || defined(__PPC)
     40 #define NEED_QUASIATOMICS 1
     41 #else
     42 
     43 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
     44     volatile int64_t* addr)
     45 {
     46     return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
     47             (int64_t*)addr) == 0;
     48 }
     49 
     50 
     51 static inline int64_t dvmQuasiAtomicSwap64Body(int64_t value,
     52                                                volatile int64_t* addr)
     53 {
     54     int64_t oldValue;
     55     do {
     56         oldValue = *addr;
     57     } while (dvmQuasiAtomicCas64(oldValue, value, addr));
     58     return oldValue;
     59 }
     60 
     61 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
     62 {
     63     return dvmQuasiAtomicSwap64Body(value, addr);
     64 }
     65 
     66 int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
     67 {
     68     int64_t oldValue;
     69     ANDROID_MEMBAR_STORE();
     70     oldValue = dvmQuasiAtomicSwap64Body(value, addr);
     71     /* TUNING: barriers can be avoided on some architectures */
     72     ANDROID_MEMBAR_FULL();
     73     return oldValue;
     74 }
     75 
     76 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
     77 {
     78     return OSAtomicAdd64Barrier(0, addr);
     79 }
     80 #endif
     81 
     82 #elif defined(__i386__) || defined(__x86_64__)
     83 #define NEED_QUASIATOMICS 1
     84 
     85 #elif __arm__
     86 #include <machine/cpu-features.h>
     87 
     88 #ifdef __ARM_HAVE_LDREXD
     89 static inline int64_t dvmQuasiAtomicSwap64Body(int64_t newvalue,
     90                                                volatile int64_t* addr)
     91 {
     92     int64_t prev;
     93     int status;
     94     do {
     95         __asm__ __volatile__ ("@ dvmQuasiAtomicSwap64\n"
     96             "ldrexd     %0, %H0, [%3]\n"
     97             "strexd     %1, %4, %H4, [%3]"
     98             : "=&r" (prev), "=&r" (status), "+m"(*addr)
     99             : "r" (addr), "r" (newvalue)
    100             : "cc");
    101     } while (__builtin_expect(status != 0, 0));
    102     return prev;
    103 }
    104 
    105 int64_t dvmQuasiAtomicSwap64(int64_t newvalue, volatile int64_t* addr)
    106 {
    107     return dvmQuasiAtomicSwap64Body(newvalue, addr);
    108 }
    109 
    110 int64_t dvmQuasiAtomicSwap64Sync(int64_t newvalue, volatile int64_t* addr)
    111 {
    112     int64_t prev;
    113     ANDROID_MEMBAR_STORE();
    114     prev = dvmQuasiAtomicSwap64Body(newvalue, addr);
    115     ANDROID_MEMBAR_FULL();
    116     return prev;
    117 }
    118 
    119 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
    120     volatile int64_t* addr)
    121 {
    122     int64_t prev;
    123     int status;
    124     do {
    125         __asm__ __volatile__ ("@ dvmQuasiAtomicCas64\n"
    126             "ldrexd     %0, %H0, [%3]\n"
    127             "mov        %1, #0\n"
    128             "teq        %0, %4\n"
    129             "teqeq      %H0, %H4\n"
    130             "strexdeq   %1, %5, %H5, [%3]"
    131             : "=&r" (prev), "=&r" (status), "+m"(*addr)
    132             : "r" (addr), "Ir" (oldvalue), "r" (newvalue)
    133             : "cc");
    134     } while (__builtin_expect(status != 0, 0));
    135     return prev != oldvalue;
    136 }
    137 
    138 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
    139 {
    140     int64_t value;
    141     __asm__ __volatile__ ("@ dvmQuasiAtomicRead64\n"
    142         "ldrexd     %0, %H0, [%1]"
    143         : "=&r" (value)
    144         : "r" (addr));
    145     return value;
    146 }
    147 
    148 #else
    149 
    150 // on the device, we implement the 64-bit atomic operations through
    151 // mutex locking. normally, this is bad because we must initialize
    152 // a pthread_mutex_t before being able to use it, and this means
    153 // having to do an initialization check on each function call, and
    154 // that's where really ugly things begin...
    155 //
    156 // BUT, as a special twist, we take advantage of the fact that in our
    157 // pthread library, a mutex is simply a volatile word whose value is always
    158 // initialized to 0. In other words, simply declaring a static mutex
    159 // object initializes it !
    160 //
    161 // another twist is that we use a small array of mutexes to dispatch
    162 // the contention locks from different memory addresses
    163 //
    164 
    165 #include <pthread.h>
    166 
    167 #define  SWAP_LOCK_COUNT  32U
    168 static pthread_mutex_t  _swap_locks[SWAP_LOCK_COUNT];
    169 
    170 #define  SWAP_LOCK(addr)   \
    171    &_swap_locks[((unsigned)(void*)(addr) >> 3U) % SWAP_LOCK_COUNT]
    172 
    173 
    174 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
    175 {
    176     int64_t oldValue;
    177     pthread_mutex_t*  lock = SWAP_LOCK(addr);
    178 
    179     pthread_mutex_lock(lock);
    180 
    181     oldValue = *addr;
    182     *addr    = value;
    183 
    184     pthread_mutex_unlock(lock);
    185     return oldValue;
    186 }
    187 
    188 /* Same as dvmQuasiAtomicSwap64 - mutex handles barrier */
    189 int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
    190 {
    191     return dvmQuasiAtomicSwap64(value, addr);
    192 }
    193 
    194 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
    195     volatile int64_t* addr)
    196 {
    197     int result;
    198     pthread_mutex_t*  lock = SWAP_LOCK(addr);
    199 
    200     pthread_mutex_lock(lock);
    201 
    202     if (*addr == oldvalue) {
    203         *addr  = newvalue;
    204         result = 0;
    205     } else {
    206         result = 1;
    207     }
    208     pthread_mutex_unlock(lock);
    209     return result;
    210 }
    211 
    212 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
    213 {
    214     int64_t result;
    215     pthread_mutex_t*  lock = SWAP_LOCK(addr);
    216 
    217     pthread_mutex_lock(lock);
    218     result = *addr;
    219     pthread_mutex_unlock(lock);
    220     return result;
    221 }
    222 
    223 #endif /*__ARM_HAVE_LDREXD*/
    224 
    225 /*****************************************************************************/
    226 #elif __sh__
    227 #define NEED_QUASIATOMICS 1
    228 
    229 #else
    230 #error "Unsupported atomic operations for this platform"
    231 #endif
    232 
    233 
    234 #if NEED_QUASIATOMICS
    235 
    236 /* Note that a spinlock is *not* a good idea in general
    237  * since they can introduce subtle issues. For example,
    238  * a real-time thread trying to acquire a spinlock already
    239  * acquired by another thread will never yeld, making the
    240  * CPU loop endlessly!
    241  *
    242  * However, this code is only used on the Linux simulator
    243  * so it's probably ok for us.
    244  *
    245  * The alternative is to use a pthread mutex, but
    246  * these must be initialized before being used, and
    247  * then you have the problem of lazily initializing
    248  * a mutex without any other synchronization primitive.
    249  *
    250  * TODO: these currently use sched_yield(), which is not guaranteed to
    251  * do anything at all.  We need to use dvmIterativeSleep or a wait /
    252  * notify mechanism if the initial attempt fails.
    253  */
    254 
    255 /* global spinlock for all 64-bit quasiatomic operations */
    256 static int32_t quasiatomic_spinlock = 0;
    257 
    258 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
    259     volatile int64_t* addr)
    260 {
    261     int result;
    262 
    263     while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
    264 #ifdef HAVE_WIN32_THREADS
    265         Sleep(0);
    266 #else
    267         sched_yield();
    268 #endif
    269     }
    270 
    271     if (*addr == oldvalue) {
    272         *addr = newvalue;
    273         result = 0;
    274     } else {
    275         result = 1;
    276     }
    277 
    278     android_atomic_release_store(0, &quasiatomic_spinlock);
    279 
    280     return result;
    281 }
    282 
    283 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
    284 {
    285     int64_t result;
    286 
    287     while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
    288 #ifdef HAVE_WIN32_THREADS
    289         Sleep(0);
    290 #else
    291         sched_yield();
    292 #endif
    293     }
    294 
    295     result = *addr;
    296     android_atomic_release_store(0, &quasiatomic_spinlock);
    297 
    298     return result;
    299 }
    300 
    301 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
    302 {
    303     int64_t result;
    304 
    305     while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
    306 #ifdef HAVE_WIN32_THREADS
    307         Sleep(0);
    308 #else
    309         sched_yield();
    310 #endif
    311     }
    312 
    313     result = *addr;
    314     *addr = value;
    315     android_atomic_release_store(0, &quasiatomic_spinlock);
    316 
    317     return result;
    318 }
    319 
    320 /* Same as dvmQuasiAtomicSwap64 - syscall handles barrier */
    321 int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
    322 {
    323     return dvmQuasiAtomicSwap64(value, addr);
    324 }
    325 
    326 #endif /*NEED_QUASIATOMICS*/
    327