Home | History | Annotate | Download | only in native
      1 /*
      2  * Copyright (C) 2007 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define LOG_TAG "Memory"
     18 
     19 #include "JNIHelp.h"
     20 #include "JniConstants.h"
     21 #include "Portability.h"
     22 #include "ScopedBytes.h"
     23 #include "ScopedPrimitiveArray.h"
     24 #include "UniquePtr.h"
     25 
     26 #include <errno.h>
     27 #include <stdlib.h>
     28 #include <string.h>
     29 #include <sys/mman.h>
     30 
     31 #if defined(__arm__)
     32 // 32-bit ARM has load/store alignment restrictions for longs.
     33 #define LONG_ALIGNMENT_MASK 0x3
     34 #define INT_ALIGNMENT_MASK 0x0
     35 #define SHORT_ALIGNMENT_MASK 0x0
     36 #elif defined(__mips__)
     37 // MIPS has load/store alignment restrictions for longs, ints and shorts.
     38 #define LONG_ALIGNMENT_MASK 0x7
     39 #define INT_ALIGNMENT_MASK 0x3
     40 #define SHORT_ALIGNMENT_MASK 0x1
     41 #elif defined(__aarch64__) || defined(__i386__) || defined(__x86_64__)
     42 // These architectures can load anything at any alignment.
     43 #define LONG_ALIGNMENT_MASK 0x0
     44 #define INT_ALIGNMENT_MASK 0x0
     45 #define SHORT_ALIGNMENT_MASK 0x0
     46 #else
     47 #error unknown load/store alignment restrictions for this architecture
     48 #endif
     49 
     50 // Use packed structures for access to unaligned data on targets with alignment restrictions.
     51 // The compiler will generate appropriate code to access these structures without
     52 // generating alignment exceptions.
     53 template <typename T> static inline T get_unaligned(const T* address) {
     54     struct unaligned { T v; } __attribute__ ((packed));
     55     const unaligned* p = reinterpret_cast<const unaligned*>(address);
     56     return p->v;
     57 }
     58 
     59 template <typename T> static inline void put_unaligned(T* address, T v) {
     60     struct unaligned { T v; } __attribute__ ((packed));
     61     unaligned* p = reinterpret_cast<unaligned*>(address);
     62     p->v = v;
     63 }
     64 
     65 template <typename T> static T cast(jlong address) {
     66     return reinterpret_cast<T>(static_cast<uintptr_t>(address));
     67 }
     68 
     69 // Byte-swap 2 jshort values packed in a jint.
     70 static inline jint bswap_2x16(jint v) {
     71     // v is initially ABCD
     72 #if defined(__mips__) && defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
     73     __asm__ volatile ("wsbh %0, %0" : "+r" (v));  // v=BADC
     74 #else
     75     v = bswap_32(v);                              // v=DCBA
     76     v = (v << 16) | ((v >> 16) & 0xffff);         // v=BADC
     77 #endif
     78     return v;
     79 }
     80 
     81 static inline void swapShorts(jshort* dstShorts, const jshort* srcShorts, size_t count) {
     82     // Do 32-bit swaps as long as possible...
     83     jint* dst = reinterpret_cast<jint*>(dstShorts);
     84     const jint* src = reinterpret_cast<const jint*>(srcShorts);
     85 
     86     if ((reinterpret_cast<uintptr_t>(dst) & INT_ALIGNMENT_MASK) == 0 &&
     87         (reinterpret_cast<uintptr_t>(src) & INT_ALIGNMENT_MASK) == 0) {
     88         for (size_t i = 0; i < count / 2; ++i) {
     89             jint v = *src++;
     90             *dst++ = bswap_2x16(v);
     91         }
     92         // ...with one last 16-bit swap if necessary.
     93         if ((count % 2) != 0) {
     94             jshort v = *reinterpret_cast<const jshort*>(src);
     95             *reinterpret_cast<jshort*>(dst) = bswap_16(v);
     96         }
     97     } else {
     98         for (size_t i = 0; i < count / 2; ++i) {
     99             jint v = get_unaligned<jint>(src++);
    100             put_unaligned<jint>(dst++, bswap_2x16(v));
    101         }
    102         if ((count % 2) != 0) {
    103           jshort v = get_unaligned<jshort>(reinterpret_cast<const jshort*>(src));
    104           put_unaligned<jshort>(reinterpret_cast<jshort*>(dst), bswap_16(v));
    105         }
    106     }
    107 }
    108 
    109 static inline void swapInts(jint* dstInts, const jint* srcInts, size_t count) {
    110     if ((reinterpret_cast<uintptr_t>(dstInts) & INT_ALIGNMENT_MASK) == 0 &&
    111         (reinterpret_cast<uintptr_t>(srcInts) & INT_ALIGNMENT_MASK) == 0) {
    112         for (size_t i = 0; i < count; ++i) {
    113             jint v = *srcInts++;
    114             *dstInts++ = bswap_32(v);
    115         }
    116     } else {
    117         for (size_t i = 0; i < count; ++i) {
    118             jint v = get_unaligned<int>(srcInts++);
    119             put_unaligned<jint>(dstInts++, bswap_32(v));
    120         }
    121     }
    122 }
    123 
    124 static inline void swapLongs(jlong* dstLongs, const jlong* srcLongs, size_t count) {
    125     jint* dst = reinterpret_cast<jint*>(dstLongs);
    126     const jint* src = reinterpret_cast<const jint*>(srcLongs);
    127     if ((reinterpret_cast<uintptr_t>(dstLongs) & INT_ALIGNMENT_MASK) == 0 &&
    128         (reinterpret_cast<uintptr_t>(srcLongs) & INT_ALIGNMENT_MASK) == 0) {
    129         for (size_t i = 0; i < count; ++i) {
    130           jint v1 = *src++;
    131           jint v2 = *src++;
    132           *dst++ = bswap_32(v2);
    133           *dst++ = bswap_32(v1);
    134         }
    135     } else {
    136         for (size_t i = 0; i < count; ++i) {
    137             jint v1 = get_unaligned<jint>(src++);
    138             jint v2 = get_unaligned<jint>(src++);
    139             put_unaligned<jint>(dst++, bswap_32(v2));
    140             put_unaligned<jint>(dst++, bswap_32(v1));
    141         }
    142     }
    143 }
    144 
    145 static void Memory_memmove(JNIEnv* env, jclass, jobject dstObject, jint dstOffset, jobject srcObject, jint srcOffset, jlong length) {
    146     ScopedBytesRW dstBytes(env, dstObject);
    147     if (dstBytes.get() == NULL) {
    148         return;
    149     }
    150     ScopedBytesRO srcBytes(env, srcObject);
    151     if (srcBytes.get() == NULL) {
    152         return;
    153     }
    154     memmove(dstBytes.get() + dstOffset, srcBytes.get() + srcOffset, length);
    155 }
    156 
    157 static jbyte Memory_peekByte(JNIEnv*, jclass, jlong srcAddress) {
    158     return *cast<const jbyte*>(srcAddress);
    159 }
    160 
    161 static void Memory_peekByteArray(JNIEnv* env, jclass, jlong srcAddress, jbyteArray dst, jint dstOffset, jint byteCount) {
    162     env->SetByteArrayRegion(dst, dstOffset, byteCount, cast<const jbyte*>(srcAddress));
    163 }
    164 
    165 // Implements the peekXArray methods:
    166 // - For unswapped access, we just use the JNI SetXArrayRegion functions.
    167 // - For swapped access, we use GetXArrayElements and our own copy-and-swap routines.
    168 //   GetXArrayElements is disproportionately cheap on Dalvik because it doesn't copy (as opposed
    169 //   to Hotspot, which always copies). The SWAP_FN copies and swaps in one pass, which is cheaper
    170 //   than copying and then swapping in a second pass. Depending on future VM/GC changes, the
    171 //   swapped case might need to be revisited.
    172 #define PEEKER(SCALAR_TYPE, JNI_NAME, SWAP_TYPE, SWAP_FN) { \
    173     if (swap) { \
    174         Scoped ## JNI_NAME ## ArrayRW elements(env, dst); \
    175         if (elements.get() == NULL) { \
    176             return; \
    177         } \
    178         const SWAP_TYPE* src = cast<const SWAP_TYPE*>(srcAddress); \
    179         SWAP_FN(reinterpret_cast<SWAP_TYPE*>(elements.get()) + dstOffset, src, count); \
    180     } else { \
    181         const SCALAR_TYPE* src = cast<const SCALAR_TYPE*>(srcAddress); \
    182         env->Set ## JNI_NAME ## ArrayRegion(dst, dstOffset, count, src); \
    183     } \
    184 }
    185 
    186 static void Memory_peekCharArray(JNIEnv* env, jclass, jlong srcAddress, jcharArray dst, jint dstOffset, jint count, jboolean swap) {
    187     PEEKER(jchar, Char, jshort, swapShorts);
    188 }
    189 
    190 static void Memory_peekDoubleArray(JNIEnv* env, jclass, jlong srcAddress, jdoubleArray dst, jint dstOffset, jint count, jboolean swap) {
    191     PEEKER(jdouble, Double, jlong, swapLongs);
    192 }
    193 
    194 static void Memory_peekFloatArray(JNIEnv* env, jclass, jlong srcAddress, jfloatArray dst, jint dstOffset, jint count, jboolean swap) {
    195     PEEKER(jfloat, Float, jint, swapInts);
    196 }
    197 
    198 static void Memory_peekIntArray(JNIEnv* env, jclass, jlong srcAddress, jintArray dst, jint dstOffset, jint count, jboolean swap) {
    199     PEEKER(jint, Int, jint, swapInts);
    200 }
    201 
    202 static void Memory_peekLongArray(JNIEnv* env, jclass, jlong srcAddress, jlongArray dst, jint dstOffset, jint count, jboolean swap) {
    203     PEEKER(jlong, Long, jlong, swapLongs);
    204 }
    205 
    206 static void Memory_peekShortArray(JNIEnv* env, jclass, jlong srcAddress, jshortArray dst, jint dstOffset, jint count, jboolean swap) {
    207     PEEKER(jshort, Short, jshort, swapShorts);
    208 }
    209 
    210 static void Memory_pokeByte(JNIEnv*, jclass, jlong dstAddress, jbyte value) {
    211     *cast<jbyte*>(dstAddress) = value;
    212 }
    213 
    214 static void Memory_pokeByteArray(JNIEnv* env, jclass, jlong dstAddress, jbyteArray src, jint offset, jint length) {
    215     env->GetByteArrayRegion(src, offset, length, cast<jbyte*>(dstAddress));
    216 }
    217 
    218 // Implements the pokeXArray methods:
    219 // - For unswapped access, we just use the JNI GetXArrayRegion functions.
    220 // - For swapped access, we use GetXArrayElements and our own copy-and-swap routines.
    221 //   GetXArrayElements is disproportionately cheap on Dalvik because it doesn't copy (as opposed
    222 //   to Hotspot, which always copies). The SWAP_FN copies and swaps in one pass, which is cheaper
    223 //   than copying and then swapping in a second pass. Depending on future VM/GC changes, the
    224 //   swapped case might need to be revisited.
    225 #define POKER(SCALAR_TYPE, JNI_NAME, SWAP_TYPE, SWAP_FN) { \
    226     if (swap) { \
    227         Scoped ## JNI_NAME ## ArrayRO elements(env, src); \
    228         if (elements.get() == NULL) { \
    229             return; \
    230         } \
    231         const SWAP_TYPE* src = reinterpret_cast<const SWAP_TYPE*>(elements.get()) + srcOffset; \
    232         SWAP_FN(cast<SWAP_TYPE*>(dstAddress), src, count); \
    233     } else { \
    234         env->Get ## JNI_NAME ## ArrayRegion(src, srcOffset, count, cast<SCALAR_TYPE*>(dstAddress)); \
    235     } \
    236 }
    237 
    238 static void Memory_pokeCharArray(JNIEnv* env, jclass, jlong dstAddress, jcharArray src, jint srcOffset, jint count, jboolean swap) {
    239     POKER(jchar, Char, jshort, swapShorts);
    240 }
    241 
    242 static void Memory_pokeDoubleArray(JNIEnv* env, jclass, jlong dstAddress, jdoubleArray src, jint srcOffset, jint count, jboolean swap) {
    243     POKER(jdouble, Double, jlong, swapLongs);
    244 }
    245 
    246 static void Memory_pokeFloatArray(JNIEnv* env, jclass, jlong dstAddress, jfloatArray src, jint srcOffset, jint count, jboolean swap) {
    247     POKER(jfloat, Float, jint, swapInts);
    248 }
    249 
    250 static void Memory_pokeIntArray(JNIEnv* env, jclass, jlong dstAddress, jintArray src, jint srcOffset, jint count, jboolean swap) {
    251     POKER(jint, Int, jint, swapInts);
    252 }
    253 
    254 static void Memory_pokeLongArray(JNIEnv* env, jclass, jlong dstAddress, jlongArray src, jint srcOffset, jint count, jboolean swap) {
    255     POKER(jlong, Long, jlong, swapLongs);
    256 }
    257 
    258 static void Memory_pokeShortArray(JNIEnv* env, jclass, jlong dstAddress, jshortArray src, jint srcOffset, jint count, jboolean swap) {
    259     POKER(jshort, Short, jshort, swapShorts);
    260 }
    261 
    262 static jshort Memory_peekShortNative(JNIEnv*, jclass, jlong srcAddress) {
    263     return *cast<const jshort*>(srcAddress);
    264 }
    265 
    266 static void Memory_pokeShortNative(JNIEnv*, jclass, jlong dstAddress, jshort value) {
    267     *cast<jshort*>(dstAddress) = value;
    268 }
    269 
    270 static jint Memory_peekIntNative(JNIEnv*, jclass, jlong srcAddress) {
    271     return *cast<const jint*>(srcAddress);
    272 }
    273 
    274 static void Memory_pokeIntNative(JNIEnv*, jclass, jlong dstAddress, jint value) {
    275     *cast<jint*>(dstAddress) = value;
    276 }
    277 
    278 static jlong Memory_peekLongNative(JNIEnv*, jclass, jlong srcAddress) {
    279     jlong result;
    280     const jlong* src = cast<const jlong*>(srcAddress);
    281     if ((srcAddress & LONG_ALIGNMENT_MASK) == 0) {
    282         result = *src;
    283     } else {
    284         result = get_unaligned<jlong>(src);
    285     }
    286     return result;
    287 }
    288 
    289 static void Memory_pokeLongNative(JNIEnv*, jclass, jlong dstAddress, jlong value) {
    290     jlong* dst = cast<jlong*>(dstAddress);
    291     if ((dstAddress & LONG_ALIGNMENT_MASK) == 0) {
    292         *dst = value;
    293     } else {
    294         put_unaligned<jlong>(dst, value);
    295     }
    296 }
    297 
    298 static void unsafeBulkCopy(jbyte* dst, const jbyte* src, jint byteCount,
    299         jint sizeofElement, jboolean swap) {
    300     if (!swap) {
    301         memcpy(dst, src, byteCount);
    302         return;
    303     }
    304 
    305     if (sizeofElement == 2) {
    306         jshort* dstShorts = reinterpret_cast<jshort*>(dst);
    307         const jshort* srcShorts = reinterpret_cast<const jshort*>(src);
    308         swapShorts(dstShorts, srcShorts, byteCount / 2);
    309     } else if (sizeofElement == 4) {
    310         jint* dstInts = reinterpret_cast<jint*>(dst);
    311         const jint* srcInts = reinterpret_cast<const jint*>(src);
    312         swapInts(dstInts, srcInts, byteCount / 4);
    313     } else if (sizeofElement == 8) {
    314         jlong* dstLongs = reinterpret_cast<jlong*>(dst);
    315         const jlong* srcLongs = reinterpret_cast<const jlong*>(src);
    316         swapLongs(dstLongs, srcLongs, byteCount / 8);
    317     }
    318 }
    319 
    320 static void Memory_unsafeBulkGet(JNIEnv* env, jclass, jobject dstObject, jint dstOffset,
    321         jint byteCount, jbyteArray srcArray, jint srcOffset, jint sizeofElement, jboolean swap) {
    322     ScopedByteArrayRO srcBytes(env, srcArray);
    323     if (srcBytes.get() == NULL) {
    324         return;
    325     }
    326     jarray dstArray = reinterpret_cast<jarray>(dstObject);
    327     jbyte* dstBytes = reinterpret_cast<jbyte*>(env->GetPrimitiveArrayCritical(dstArray, NULL));
    328     if (dstBytes == NULL) {
    329         return;
    330     }
    331     jbyte* dst = dstBytes + dstOffset*sizeofElement;
    332     const jbyte* src = srcBytes.get() + srcOffset;
    333     unsafeBulkCopy(dst, src, byteCount, sizeofElement, swap);
    334     env->ReleasePrimitiveArrayCritical(dstArray, dstBytes, 0);
    335 }
    336 
    337 static void Memory_unsafeBulkPut(JNIEnv* env, jclass, jbyteArray dstArray, jint dstOffset,
    338         jint byteCount, jobject srcObject, jint srcOffset, jint sizeofElement, jboolean swap) {
    339     ScopedByteArrayRW dstBytes(env, dstArray);
    340     if (dstBytes.get() == NULL) {
    341         return;
    342     }
    343     jarray srcArray = reinterpret_cast<jarray>(srcObject);
    344     jbyte* srcBytes = reinterpret_cast<jbyte*>(env->GetPrimitiveArrayCritical(srcArray, NULL));
    345     if (srcBytes == NULL) {
    346         return;
    347     }
    348     jbyte* dst = dstBytes.get() + dstOffset;
    349     const jbyte* src = srcBytes + srcOffset*sizeofElement;
    350     unsafeBulkCopy(dst, src, byteCount, sizeofElement, swap);
    351     env->ReleasePrimitiveArrayCritical(srcArray, srcBytes, 0);
    352 }
    353 
    354 static JNINativeMethod gMethods[] = {
    355     NATIVE_METHOD(Memory, memmove, "(Ljava/lang/Object;ILjava/lang/Object;IJ)V"),
    356     NATIVE_METHOD(Memory, peekByte, "!(J)B"),
    357     NATIVE_METHOD(Memory, peekByteArray, "(J[BII)V"),
    358     NATIVE_METHOD(Memory, peekCharArray, "(J[CIIZ)V"),
    359     NATIVE_METHOD(Memory, peekDoubleArray, "(J[DIIZ)V"),
    360     NATIVE_METHOD(Memory, peekFloatArray, "(J[FIIZ)V"),
    361     NATIVE_METHOD(Memory, peekIntNative, "!(J)I"),
    362     NATIVE_METHOD(Memory, peekIntArray, "(J[IIIZ)V"),
    363     NATIVE_METHOD(Memory, peekLongNative, "!(J)J"),
    364     NATIVE_METHOD(Memory, peekLongArray, "(J[JIIZ)V"),
    365     NATIVE_METHOD(Memory, peekShortNative, "!(J)S"),
    366     NATIVE_METHOD(Memory, peekShortArray, "(J[SIIZ)V"),
    367     NATIVE_METHOD(Memory, pokeByte, "!(JB)V"),
    368     NATIVE_METHOD(Memory, pokeByteArray, "(J[BII)V"),
    369     NATIVE_METHOD(Memory, pokeCharArray, "(J[CIIZ)V"),
    370     NATIVE_METHOD(Memory, pokeDoubleArray, "(J[DIIZ)V"),
    371     NATIVE_METHOD(Memory, pokeFloatArray, "(J[FIIZ)V"),
    372     NATIVE_METHOD(Memory, pokeIntNative, "!(JI)V"),
    373     NATIVE_METHOD(Memory, pokeIntArray, "(J[IIIZ)V"),
    374     NATIVE_METHOD(Memory, pokeLongNative, "!(JJ)V"),
    375     NATIVE_METHOD(Memory, pokeLongArray, "(J[JIIZ)V"),
    376     NATIVE_METHOD(Memory, pokeShortNative, "!(JS)V"),
    377     NATIVE_METHOD(Memory, pokeShortArray, "(J[SIIZ)V"),
    378     NATIVE_METHOD(Memory, unsafeBulkGet, "(Ljava/lang/Object;II[BIIZ)V"),
    379     NATIVE_METHOD(Memory, unsafeBulkPut, "([BIILjava/lang/Object;IIZ)V"),
    380 };
    381 void register_libcore_io_Memory(JNIEnv* env) {
    382     jniRegisterNativeMethods(env, "libcore/io/Memory", gMethods, NELEM(gMethods));
    383 }
    384