Home | History | Annotate | Download | only in native
      1 /*
      2  * Copyright (C) 2007 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define LOG_TAG "Memory"
     18 
     19 #include "JNIHelp.h"
     20 #include "JniConstants.h"
     21 #include "ScopedBytes.h"
     22 #include "ScopedPrimitiveArray.h"
     23 #include "UniquePtr.h"
     24 
     25 #include <byteswap.h>
     26 #include <errno.h>
     27 #include <stdlib.h>
     28 #include <string.h>
     29 #include <sys/mman.h>
     30 
     31 #if defined(__arm__)
     32 // 32-bit ARM has load/store alignment restrictions for longs.
     33 #define LONG_ALIGNMENT_MASK 0x3
     34 #define INT_ALIGNMENT_MASK 0x0
     35 #define SHORT_ALIGNMENT_MASK 0x0
     36 #elif defined(__mips__)
     37 // MIPS has load/store alignment restrictions for longs, ints and shorts.
     38 #define LONG_ALIGNMENT_MASK 0x7
     39 #define INT_ALIGNMENT_MASK 0x3
     40 #define SHORT_ALIGNMENT_MASK 0x1
     41 #elif defined(__i386__)
     42 // x86 can load anything at any alignment.
     43 #define LONG_ALIGNMENT_MASK 0x0
     44 #define INT_ALIGNMENT_MASK 0x0
     45 #define SHORT_ALIGNMENT_MASK 0x0
     46 #else
     47 #error unknown load/store alignment restrictions for this architecture
     48 #endif
     49 
     50 // Use packed structures for access to unaligned data on targets with alignment restrictions.
     51 // The compiler will generate appropriate code to access these structures without
     52 // generating alignment exceptions.
     53 template <typename T> static inline T get_unaligned(const T* address) {
     54     struct unaligned { T v; } __attribute__ ((packed));
     55     const unaligned* p = reinterpret_cast<const unaligned*>(address);
     56     return p->v;
     57 }
     58 
     59 template <typename T> static inline void put_unaligned(T* address, T v) {
     60     struct unaligned { T v; } __attribute__ ((packed));
     61     unaligned* p = reinterpret_cast<unaligned*>(address);
     62     p->v = v;
     63 }
     64 
     65 template <typename T> static T cast(jint address) {
     66     return reinterpret_cast<T>(static_cast<uintptr_t>(address));
     67 }
     68 
     69 // Byte-swap 2 jshort values packed in a jint.
     70 static inline jint bswap_2x16(jint v) {
     71     // v is initially ABCD
     72 #if defined(__mips__) && defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
     73     __asm__ volatile ("wsbh %0, %0" : "+r" (v));  // v=BADC
     74 #else
     75     v = bswap_32(v);                              // v=DCBA
     76     v = (v << 16) | ((v >> 16) & 0xffff);         // v=BADC
     77 #endif
     78     return v;
     79 }
     80 
     81 static inline void swapShorts(jshort* dstShorts, const jshort* srcShorts, size_t count) {
     82     // Do 32-bit swaps as long as possible...
     83     jint* dst = reinterpret_cast<jint*>(dstShorts);
     84     const jint* src = reinterpret_cast<const jint*>(srcShorts);
     85 
     86     if ((reinterpret_cast<uintptr_t>(dst) & INT_ALIGNMENT_MASK) == 0 &&
     87         (reinterpret_cast<uintptr_t>(src) & INT_ALIGNMENT_MASK) == 0) {
     88         for (size_t i = 0; i < count / 2; ++i) {
     89             jint v = *src++;
     90             *dst++ = bswap_2x16(v);
     91         }
     92         // ...with one last 16-bit swap if necessary.
     93         if ((count % 2) != 0) {
     94             jshort v = *reinterpret_cast<const jshort*>(src);
     95             *reinterpret_cast<jshort*>(dst) = bswap_16(v);
     96         }
     97     } else {
     98         for (size_t i = 0; i < count / 2; ++i) {
     99             jint v = get_unaligned<jint>(src++);
    100             put_unaligned<jint>(dst++, bswap_2x16(v));
    101         }
    102         if ((count % 2) != 0) {
    103           jshort v = get_unaligned<jshort>(reinterpret_cast<const jshort*>(src));
    104           put_unaligned<jshort>(reinterpret_cast<jshort*>(dst), bswap_16(v));
    105         }
    106     }
    107 }
    108 
    109 static inline void swapInts(jint* dstInts, const jint* srcInts, size_t count) {
    110     if ((reinterpret_cast<uintptr_t>(dstInts) & INT_ALIGNMENT_MASK) == 0 &&
    111         (reinterpret_cast<uintptr_t>(srcInts) & INT_ALIGNMENT_MASK) == 0) {
    112         for (size_t i = 0; i < count; ++i) {
    113             jint v = *srcInts++;
    114             *dstInts++ = bswap_32(v);
    115         }
    116     } else {
    117         for (size_t i = 0; i < count; ++i) {
    118             jint v = get_unaligned<int>(srcInts++);
    119             put_unaligned<jint>(dstInts++, bswap_32(v));
    120         }
    121     }
    122 }
    123 
    124 static inline void swapLongs(jlong* dstLongs, const jlong* srcLongs, size_t count) {
    125     jint* dst = reinterpret_cast<jint*>(dstLongs);
    126     const jint* src = reinterpret_cast<const jint*>(srcLongs);
    127     if ((reinterpret_cast<uintptr_t>(dstLongs) & INT_ALIGNMENT_MASK) == 0 &&
    128         (reinterpret_cast<uintptr_t>(srcLongs) & INT_ALIGNMENT_MASK) == 0) {
    129         for (size_t i = 0; i < count; ++i) {
    130           jint v1 = *src++;
    131           jint v2 = *src++;
    132           *dst++ = bswap_32(v2);
    133           *dst++ = bswap_32(v1);
    134         }
    135     } else {
    136         for (size_t i = 0; i < count; ++i) {
    137             jint v1 = get_unaligned<jint>(src++);
    138             jint v2 = get_unaligned<jint>(src++);
    139             put_unaligned<jint>(dst++, bswap_32(v2));
    140             put_unaligned<jint>(dst++, bswap_32(v1));
    141         }
    142     }
    143 }
    144 
    145 static void Memory_memmove(JNIEnv* env, jclass, jobject dstObject, jint dstOffset, jobject srcObject, jint srcOffset, jlong length) {
    146     ScopedBytesRW dstBytes(env, dstObject);
    147     if (dstBytes.get() == NULL) {
    148         return;
    149     }
    150     ScopedBytesRO srcBytes(env, srcObject);
    151     if (srcBytes.get() == NULL) {
    152         return;
    153     }
    154     memmove(dstBytes.get() + dstOffset, srcBytes.get() + srcOffset, length);
    155 }
    156 
    157 static jbyte Memory_peekByte(JNIEnv*, jclass, jint srcAddress) {
    158     return *cast<const jbyte*>(srcAddress);
    159 }
    160 
    161 static void Memory_peekByteArray(JNIEnv* env, jclass, jint srcAddress, jbyteArray dst, jint dstOffset, jint byteCount) {
    162     env->SetByteArrayRegion(dst, dstOffset, byteCount, cast<const jbyte*>(srcAddress));
    163 }
    164 
    165 // Implements the peekXArray methods:
    166 // - For unswapped access, we just use the JNI SetXArrayRegion functions.
    167 // - For swapped access, we use GetXArrayElements and our own copy-and-swap routines.
    168 //   GetXArrayElements is disproportionately cheap on Dalvik because it doesn't copy (as opposed
    169 //   to Hotspot, which always copies). The SWAP_FN copies and swaps in one pass, which is cheaper
    170 //   than copying and then swapping in a second pass. Depending on future VM/GC changes, the
    171 //   swapped case might need to be revisited.
    172 #define PEEKER(SCALAR_TYPE, JNI_NAME, SWAP_TYPE, SWAP_FN) { \
    173     if (swap) { \
    174         Scoped ## JNI_NAME ## ArrayRW elements(env, dst); \
    175         if (elements.get() == NULL) { \
    176             return; \
    177         } \
    178         const SWAP_TYPE* src = cast<const SWAP_TYPE*>(srcAddress); \
    179         SWAP_FN(reinterpret_cast<SWAP_TYPE*>(elements.get()) + dstOffset, src, count); \
    180     } else { \
    181         const SCALAR_TYPE* src = cast<const SCALAR_TYPE*>(srcAddress); \
    182         env->Set ## JNI_NAME ## ArrayRegion(dst, dstOffset, count, src); \
    183     } \
    184 }
    185 
    186 static void Memory_peekCharArray(JNIEnv* env, jclass, jint srcAddress, jcharArray dst, jint dstOffset, jint count, jboolean swap) {
    187     PEEKER(jchar, Char, jshort, swapShorts);
    188 }
    189 
    190 static void Memory_peekDoubleArray(JNIEnv* env, jclass, jint srcAddress, jdoubleArray dst, jint dstOffset, jint count, jboolean swap) {
    191     PEEKER(jdouble, Double, jlong, swapLongs);
    192 }
    193 
    194 static void Memory_peekFloatArray(JNIEnv* env, jclass, jint srcAddress, jfloatArray dst, jint dstOffset, jint count, jboolean swap) {
    195     PEEKER(jfloat, Float, jint, swapInts);
    196 }
    197 
    198 static void Memory_peekIntArray(JNIEnv* env, jclass, jint srcAddress, jintArray dst, jint dstOffset, jint count, jboolean swap) {
    199     PEEKER(jint, Int, jint, swapInts);
    200 }
    201 
    202 static void Memory_peekLongArray(JNIEnv* env, jclass, jint srcAddress, jlongArray dst, jint dstOffset, jint count, jboolean swap) {
    203     PEEKER(jlong, Long, jlong, swapLongs);
    204 }
    205 
    206 static void Memory_peekShortArray(JNIEnv* env, jclass, jint srcAddress, jshortArray dst, jint dstOffset, jint count, jboolean swap) {
    207     PEEKER(jshort, Short, jshort, swapShorts);
    208 }
    209 
    210 static void Memory_pokeByte(JNIEnv*, jclass, jint dstAddress, jbyte value) {
    211     *cast<jbyte*>(dstAddress) = value;
    212 }
    213 
    214 static void Memory_pokeByteArray(JNIEnv* env, jclass, jint dstAddress, jbyteArray src, jint offset, jint length) {
    215     env->GetByteArrayRegion(src, offset, length, cast<jbyte*>(dstAddress));
    216 }
    217 
    218 // Implements the pokeXArray methods:
    219 // - For unswapped access, we just use the JNI GetXArrayRegion functions.
    220 // - For swapped access, we use GetXArrayElements and our own copy-and-swap routines.
    221 //   GetXArrayElements is disproportionately cheap on Dalvik because it doesn't copy (as opposed
    222 //   to Hotspot, which always copies). The SWAP_FN copies and swaps in one pass, which is cheaper
    223 //   than copying and then swapping in a second pass. Depending on future VM/GC changes, the
    224 //   swapped case might need to be revisited.
    225 #define POKER(SCALAR_TYPE, JNI_NAME, SWAP_TYPE, SWAP_FN) { \
    226     if (swap) { \
    227         Scoped ## JNI_NAME ## ArrayRO elements(env, src); \
    228         if (elements.get() == NULL) { \
    229             return; \
    230         } \
    231         const SWAP_TYPE* src = reinterpret_cast<const SWAP_TYPE*>(elements.get()) + srcOffset; \
    232         SWAP_FN(cast<SWAP_TYPE*>(dstAddress), src, count); \
    233     } else { \
    234         env->Get ## JNI_NAME ## ArrayRegion(src, srcOffset, count, cast<SCALAR_TYPE*>(dstAddress)); \
    235     } \
    236 }
    237 
    238 static void Memory_pokeCharArray(JNIEnv* env, jclass, jint dstAddress, jcharArray src, jint srcOffset, jint count, jboolean swap) {
    239     POKER(jchar, Char, jshort, swapShorts);
    240 }
    241 
    242 static void Memory_pokeDoubleArray(JNIEnv* env, jclass, jint dstAddress, jdoubleArray src, jint srcOffset, jint count, jboolean swap) {
    243     POKER(jdouble, Double, jlong, swapLongs);
    244 }
    245 
    246 static void Memory_pokeFloatArray(JNIEnv* env, jclass, jint dstAddress, jfloatArray src, jint srcOffset, jint count, jboolean swap) {
    247     POKER(jfloat, Float, jint, swapInts);
    248 }
    249 
    250 static void Memory_pokeIntArray(JNIEnv* env, jclass, jint dstAddress, jintArray src, jint srcOffset, jint count, jboolean swap) {
    251     POKER(jint, Int, jint, swapInts);
    252 }
    253 
    254 static void Memory_pokeLongArray(JNIEnv* env, jclass, jint dstAddress, jlongArray src, jint srcOffset, jint count, jboolean swap) {
    255     POKER(jlong, Long, jlong, swapLongs);
    256 }
    257 
    258 static void Memory_pokeShortArray(JNIEnv* env, jclass, jint dstAddress, jshortArray src, jint srcOffset, jint count, jboolean swap) {
    259     POKER(jshort, Short, jshort, swapShorts);
    260 }
    261 
    262 static jshort Memory_peekShort(JNIEnv*, jclass, jint srcAddress, jboolean swap) {
    263     jshort result = *cast<const jshort*>(srcAddress);
    264     if (swap) {
    265         result = bswap_16(result);
    266     }
    267     return result;
    268 }
    269 
    270 static void Memory_pokeShort(JNIEnv*, jclass, jint dstAddress, jshort value, jboolean swap) {
    271     if (swap) {
    272         value = bswap_16(value);
    273     }
    274     *cast<jshort*>(dstAddress) = value;
    275 }
    276 
    277 static jint Memory_peekInt(JNIEnv*, jclass, jint srcAddress, jboolean swap) {
    278     jint result = *cast<const jint*>(srcAddress);
    279     if (swap) {
    280         result = bswap_32(result);
    281     }
    282     return result;
    283 }
    284 
    285 static void Memory_pokeInt(JNIEnv*, jclass, jint dstAddress, jint value, jboolean swap) {
    286     if (swap) {
    287         value = bswap_32(value);
    288     }
    289     *cast<jint*>(dstAddress) = value;
    290 }
    291 
    292 static jlong Memory_peekLong(JNIEnv*, jclass, jint srcAddress, jboolean swap) {
    293     jlong result;
    294     const jlong* src = cast<const jlong*>(srcAddress);
    295     if ((srcAddress & LONG_ALIGNMENT_MASK) == 0) {
    296         result = *src;
    297     } else {
    298         result = get_unaligned<jlong>(src);
    299     }
    300     if (swap) {
    301         result = bswap_64(result);
    302     }
    303     return result;
    304 }
    305 
    306 static void Memory_pokeLong(JNIEnv*, jclass, jint dstAddress, jlong value, jboolean swap) {
    307     jlong* dst = cast<jlong*>(dstAddress);
    308     if (swap) {
    309         value = bswap_64(value);
    310     }
    311     if ((dstAddress & LONG_ALIGNMENT_MASK) == 0) {
    312         *dst = value;
    313     } else {
    314         put_unaligned<jlong>(dst, value);
    315     }
    316 }
    317 
    318 static void unsafeBulkCopy(jbyte* dst, const jbyte* src, jint byteCount,
    319         jint sizeofElement, jboolean swap) {
    320     if (!swap) {
    321         memcpy(dst, src, byteCount);
    322         return;
    323     }
    324 
    325     if (sizeofElement == 2) {
    326         jshort* dstShorts = reinterpret_cast<jshort*>(dst);
    327         const jshort* srcShorts = reinterpret_cast<const jshort*>(src);
    328         swapShorts(dstShorts, srcShorts, byteCount / 2);
    329     } else if (sizeofElement == 4) {
    330         jint* dstInts = reinterpret_cast<jint*>(dst);
    331         const jint* srcInts = reinterpret_cast<const jint*>(src);
    332         swapInts(dstInts, srcInts, byteCount / 4);
    333     } else if (sizeofElement == 8) {
    334         jlong* dstLongs = reinterpret_cast<jlong*>(dst);
    335         const jlong* srcLongs = reinterpret_cast<const jlong*>(src);
    336         swapLongs(dstLongs, srcLongs, byteCount / 8);
    337     }
    338 }
    339 
    340 static void Memory_unsafeBulkGet(JNIEnv* env, jclass, jobject dstObject, jint dstOffset,
    341         jint byteCount, jbyteArray srcArray, jint srcOffset, jint sizeofElement, jboolean swap) {
    342     ScopedByteArrayRO srcBytes(env, srcArray);
    343     if (srcBytes.get() == NULL) {
    344         return;
    345     }
    346     jarray dstArray = reinterpret_cast<jarray>(dstObject);
    347     jbyte* dstBytes = reinterpret_cast<jbyte*>(env->GetPrimitiveArrayCritical(dstArray, NULL));
    348     if (dstBytes == NULL) {
    349         return;
    350     }
    351     jbyte* dst = dstBytes + dstOffset*sizeofElement;
    352     const jbyte* src = srcBytes.get() + srcOffset;
    353     unsafeBulkCopy(dst, src, byteCount, sizeofElement, swap);
    354     env->ReleasePrimitiveArrayCritical(dstArray, dstBytes, 0);
    355 }
    356 
    357 static void Memory_unsafeBulkPut(JNIEnv* env, jclass, jbyteArray dstArray, jint dstOffset,
    358         jint byteCount, jobject srcObject, jint srcOffset, jint sizeofElement, jboolean swap) {
    359     ScopedByteArrayRW dstBytes(env, dstArray);
    360     if (dstBytes.get() == NULL) {
    361         return;
    362     }
    363     jarray srcArray = reinterpret_cast<jarray>(srcObject);
    364     jbyte* srcBytes = reinterpret_cast<jbyte*>(env->GetPrimitiveArrayCritical(srcArray, NULL));
    365     if (srcBytes == NULL) {
    366         return;
    367     }
    368     jbyte* dst = dstBytes.get() + dstOffset;
    369     const jbyte* src = srcBytes + srcOffset*sizeofElement;
    370     unsafeBulkCopy(dst, src, byteCount, sizeofElement, swap);
    371     env->ReleasePrimitiveArrayCritical(srcArray, srcBytes, 0);
    372 }
    373 
    374 static JNINativeMethod gMethods[] = {
    375     NATIVE_METHOD(Memory, memmove, "(Ljava/lang/Object;ILjava/lang/Object;IJ)V"),
    376     NATIVE_METHOD(Memory, peekByte, "!(I)B"),
    377     NATIVE_METHOD(Memory, peekByteArray, "(I[BII)V"),
    378     NATIVE_METHOD(Memory, peekCharArray, "(I[CIIZ)V"),
    379     NATIVE_METHOD(Memory, peekDoubleArray, "(I[DIIZ)V"),
    380     NATIVE_METHOD(Memory, peekFloatArray, "(I[FIIZ)V"),
    381     NATIVE_METHOD(Memory, peekInt, "!(IZ)I"),
    382     NATIVE_METHOD(Memory, peekIntArray, "(I[IIIZ)V"),
    383     NATIVE_METHOD(Memory, peekLong, "!(IZ)J"),
    384     NATIVE_METHOD(Memory, peekLongArray, "(I[JIIZ)V"),
    385     NATIVE_METHOD(Memory, peekShort, "!(IZ)S"),
    386     NATIVE_METHOD(Memory, peekShortArray, "(I[SIIZ)V"),
    387     NATIVE_METHOD(Memory, pokeByte, "!(IB)V"),
    388     NATIVE_METHOD(Memory, pokeByteArray, "(I[BII)V"),
    389     NATIVE_METHOD(Memory, pokeCharArray, "(I[CIIZ)V"),
    390     NATIVE_METHOD(Memory, pokeDoubleArray, "(I[DIIZ)V"),
    391     NATIVE_METHOD(Memory, pokeFloatArray, "(I[FIIZ)V"),
    392     NATIVE_METHOD(Memory, pokeInt, "!(IIZ)V"),
    393     NATIVE_METHOD(Memory, pokeIntArray, "(I[IIIZ)V"),
    394     NATIVE_METHOD(Memory, pokeLong, "!(IJZ)V"),
    395     NATIVE_METHOD(Memory, pokeLongArray, "(I[JIIZ)V"),
    396     NATIVE_METHOD(Memory, pokeShort, "!(ISZ)V"),
    397     NATIVE_METHOD(Memory, pokeShortArray, "(I[SIIZ)V"),
    398     NATIVE_METHOD(Memory, unsafeBulkGet, "(Ljava/lang/Object;II[BIIZ)V"),
    399     NATIVE_METHOD(Memory, unsafeBulkPut, "([BIILjava/lang/Object;IIZ)V"),
    400 };
    401 void register_libcore_io_Memory(JNIEnv* env) {
    402     jniRegisterNativeMethods(env, "libcore/io/Memory", gMethods, NELEM(gMethods));
    403 }
    404