HomeSort by relevance Sort by last modified time
    Searched refs:unaligned_load (Results 1 - 9 of 9) sorted by null

  /external/skia/src/opts/
SkChecksum_opts.h 23 static inline T unaligned_load(const uint8_t* src) { function in namespace:SK_OPTS_NS
45 a = _mm_crc32_u64(a, unaligned_load<uint64_t>(data+ 0));
46 b = _mm_crc32_u64(b, unaligned_load<uint64_t>(data+ 8));
47 c = _mm_crc32_u64(c, unaligned_load<uint64_t>(data+16));
56 hash = _mm_crc32_u64(hash, unaligned_load<uint64_t>(data));
63 hash = _mm_crc32_u64(hash, unaligned_load<uint64_t>(data));
72 hash32 = _mm_crc32_u32(hash32, unaligned_load<uint32_t>(data));
76 hash32 = _mm_crc32_u16(hash32, unaligned_load<uint16_t>(data));
80 hash32 = _mm_crc32_u8(hash32, unaligned_load<uint8_t>(data));
99 a = _mm_crc32_u32(a, unaligned_load<uint32_t>(data+0))
    [all...]
  /external/skqp/src/opts/
SkChecksum_opts.h 23 static inline T unaligned_load(const uint8_t* src) { function in namespace:SK_OPTS_NS
45 a = _mm_crc32_u64(a, unaligned_load<uint64_t>(data+ 0));
46 b = _mm_crc32_u64(b, unaligned_load<uint64_t>(data+ 8));
47 c = _mm_crc32_u64(c, unaligned_load<uint64_t>(data+16));
56 hash = _mm_crc32_u64(hash, unaligned_load<uint64_t>(data));
63 hash = _mm_crc32_u64(hash, unaligned_load<uint64_t>(data));
72 hash32 = _mm_crc32_u32(hash32, unaligned_load<uint32_t>(data));
76 hash32 = _mm_crc32_u16(hash32, unaligned_load<uint16_t>(data));
80 hash32 = _mm_crc32_u8(hash32, unaligned_load<uint8_t>(data));
99 a = _mm_crc32_u32(a, unaligned_load<uint32_t>(data+0))
    [all...]
  /external/skia/src/jumper/
SkJumper_misc.h 24 SI T unaligned_load(const P* p) { // const void* would work too, but const P* helps ARMv7 codegen. function
38 return unaligned_load<Dst>(&src);
SkJumper_stages.cpp 264 return unaligned_load<U8>(&r);
472 return unaligned_load<U16>(&p); // We have two copies. Return (the lower) one.
477 return unaligned_load<U8>(&r);
529 *r = unaligned_load<U16>(&R);
530 *g = unaligned_load<U16>(&G);
531 *b = unaligned_load<U16>(&B);
553 *r = unaligned_load<U16>((uint16_t*)&rg + 0);
554 *g = unaligned_load<U16>((uint16_t*)&rg + 4);
555 *b = unaligned_load<U16>((uint16_t*)&ba + 0);
556 *a = unaligned_load<U16>((uint16_t*)&ba + 4)
    [all...]
SkJumper_stages_lowp.cpp 250 x = cast<F>(I32(dx)) + unaligned_load<F>(iota);
828 auto mask = unaligned_load<U16>(ctx->mask);
    [all...]
  /external/skqp/src/jumper/
SkJumper_misc.h 24 SI T unaligned_load(const P* p) { // const void* would work too, but const P* helps ARMv7 codegen. function
38 return unaligned_load<Dst>(&src);
SkJumper_stages.cpp 264 return unaligned_load<U8>(&r);
472 return unaligned_load<U16>(&p); // We have two copies. Return (the lower) one.
477 return unaligned_load<U8>(&r);
529 *r = unaligned_load<U16>(&R);
530 *g = unaligned_load<U16>(&G);
531 *b = unaligned_load<U16>(&B);
553 *r = unaligned_load<U16>((uint16_t*)&rg + 0);
554 *g = unaligned_load<U16>((uint16_t*)&rg + 4);
555 *b = unaligned_load<U16>((uint16_t*)&ba + 0);
556 *a = unaligned_load<U16>((uint16_t*)&ba + 4)
    [all...]
SkJumper_stages_lowp.cpp 250 x = cast<F>(I32(dx)) + unaligned_load<F>(iota);
    [all...]
  /prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/lib/gcc/x86_64-w64-mingw32/4.8.3/plugin/include/
target.h 146 unaligned_load, enumerator in enum:vect_cost_for_stmt

Completed in 188 milliseconds