Home | History | Annotate | Download | only in include

Lines Matching defs:uint64_t

97     typedef unsigned long long uint64_t;
107 typedef unsigned long long uint64_t;
114 uint64_t m64_u64[1];
395 #define _Ui64(a) (*(uint64_t*)&(a))
398 #define _SIGNBIT64 ((uint64_t)1 << 63)
1190 uint64x2_t vld1q_u64(__transfersize(2) uint64_t const * ptr); // VLD1.64 {d0, d1}, [r0]
1202 uint64x1_t vld1_u64(__transfersize(1) uint64_t const * ptr); // VLD1.64 {d0}, [r0]
1215 uint64x2_t vld1q_lane_u64(__transfersize(1) uint64_t const * ptr, uint64x2_t vec, __constrange(0,1) int lane); // VLD1.64 {d0}, [r0]
1227 uint64x1_t vld1_lane_u64(__transfersize(1) uint64_t const * ptr, uint64x1_t vec, __constrange(0,0) int lane); //VLD1.64 {d0}, [r0]
1240 uint64x2_t vld1q_dup_u64(__transfersize(1) uint64_t const * ptr); // VLD1.64 {d0}, [r0]
1252 uint64x1_t vld1_dup_u64(__transfersize(1) uint64_t const * ptr); // VLD1.64 {d0}, [r0]
1266 void vst1q_u64(__transfersize(2) uint64_t * ptr, uint64x2_t val); // VST1.64 {d0, d1}, [r0]
1278 void vst1_u64(__transfersize(1) uint64_t * ptr, uint64x1_t val); // VST1.64 {d0}, [r0]
1303 uint64x1x2_t vld2_u64(__transfersize(2) uint64_t const * ptr); // VLD1.64 {d0, d1}, [r0]
1325 uint64x1x3_t vld3_u64(__transfersize(3) uint64_t const * ptr); // VLD1.64 {d0, d1, d2}, [r0]
1347 uint64x1x4_t vld4_u64(__transfersize(4) uint64_t const * ptr); // VLD1.64 {d0, d1, d2, d3}, [r0]
1360 uint64x1x2_t vld2_dup_u64(__transfersize(2) uint64_t const * ptr); // VLD1.64 {d0, d1}, [r0]
1372 uint64x1x3_t vld3_dup_u64(__transfersize(3) uint64_t const * ptr); // VLD1.64 {d0, d1, d2}, [r0]
1384 uint64x1x4_t vld4_dup_u64(__transfersize(4) uint64_t const * ptr); // VLD1.64 {d0, d1, d2, d3}, [r0]
1460 void vst2_u64_ptr(__transfersize(2) uint64_t * ptr, uint64x1x2_t * val); // VST1.64 {d0, d1}, [r0]
1482 void vst3_u64_ptr(__transfersize(3) uint64_t * ptr, uint64x1x3_t * val); // VST1.64 {d0, d1, d2}, [r0]
1504 void vst4_u64_ptr(__transfersize(4) uint64_t * ptr, uint64x1x4_t * val); // VST1.64 {d0, d1, d2, d3}, [r0]
1585 uint64_t vget_lane_u64(uint64x1_t vec, __constrange(0,0) int lane); // VMOV r0,r0,d0
1587 uint64_t vgetq_lane_u64(uint64x2_t vec, __constrange(0,1) int lane); // VMOV r0,r0,d0
1608 uint64x1_t vset_lane_u64(uint64_t value, uint64x1_t vec, __constrange(0,0) int lane); // VMOV d0,r0,r0
1610 uint64x2_t vsetq_lane_u64(uint64_t value, uint64x2_t vec, __constrange(0,1) int lane); // VMOV d0,r0,r0
1612 int8x8_t vcreate_s8(uint64_t a); // VMOV d0,r0,r0
1613 int16x4_t vcreate_s16(uint64_t a); // VMOV d0,r0,r0
1614 int32x2_t vcreate_s32(uint64_t a); // VMOV d0,r0,r0
1615 float16x4_t vcreate_f16(uint64_t a); // VMOV d0,r0,r0
1616 float32x2_t vcreate_f32(uint64_t a); // VMOV d0,r0,r0
1617 uint8x8_t vcreate_u8(uint64_t a); // VMOV d0,r0,r0
1618 uint16x4_t vcreate_u16(uint64_t a); // VMOV d0,r0,r0
1619 uint32x2_t vcreate_u32(uint64_t a); // VMOV d0,r0,r0
1620 uint64x1_t vcreate_u64(uint64_t a); // VMOV d0,r0,r0
1621 poly8x8_t vcreate_p8(uint64_t a); // VMOV d0,r0,r0
1622 poly16x4_t vcreate_p16(uint64_t a); // VMOV d0,r0,r0
1623 int64x1_t vcreate_s64(uint64_t a); // VMOV d0,r0,r0
1645 uint64x1_t vdup_n_u64(uint64_t value); // VMOV d0,r0,r0
1647 uint64x2_t vdupq_n_u64(uint64_t value); // VMOV d0,r0,r0
1667 uint64x1_t vmov_n_u64(uint64_t value); // VMOV d0,r0,r0
1669 uint64x2_t vmovq_n_u64(uint64_t value); // VMOV d0,r0,r0
2676 _NEON2SSE_ALIGN_16 uint64_t pvec[2] = {0,0};
2677 _NEON2SSE_ALIGN_16 uint64_t mask[2] = {0xffffffffffffffff, 0xffffffffffffffff};
3197 uint64_t a64, b64;
3235 _NEON2SSE_ALIGN_16 uint64_t a64, b64;
3241 res.m64_u64[0] = ~(uint64_t)0;
3273 _NEON2SSE_ALIGN_16 uint64_t atmp[2], btmp[2], res[2];
3324 _NEON2SSE_ALIGN_16 uint64_t atmp[2], btmp[2], res[2];
3329 if (res[0] < atmp[0]) res[0] = ~(uint64_t)0;
3330 if (res[1] < atmp[1]) res[1] = ~(uint64_t)0;
4554 uint64_t a64,b64;
4594 uint64_t a64, b64;
4633 _NEON2SSE_ALIGN_16 uint64_t res[2];
4678 _NEON2SSE_ALIGN_16 uint64_t atmp[2], btmp[2], res[2];
4964 _NEON2SSE_ALIGN_16 uint64_t cmask[] = {0x8000000000000000, 0x8000000000000000};
5878 _NEON2SSE_ALIGN_16 uint64_t res[2];
5879 if(a.m64_u32[0] > b.m64_u32[0]) res[0] = ( uint64_t) a.m64_u32[0] - ( uint64_t) b.m64_u32[0];
5880 else res[0] = ( uint64_t) b.m64_u32[0] - ( uint64_t) a.m64_u32[0];
5881 if(a.m64_u32[1] > b.m64_u32[1]) res[1] = ( uint64_t) a.m64_u32[1] - ( uint64_t) b.m64_u32[1];
5882 else res[1] = ( uint64_t) b.m64_u32[1] - ( uint64_t) a.m64_u32[1];
6358 res.m64_u64[0] = (uint64_t)a.m64_u32[0] + (uint64_t)a.m64_u32[1];
6427 _NEON2SSE_ALIGN_16 uint64_t res[2];
6429 res[0] = (uint64_t)atmp[0] + (uint64_t)atmp[1];
6430 res[1] = (uint64_t)atmp[2] + (uint64_t)atmp[3];
6480 res.m64_u64[0] = (uint64_t)b.m64_u32[0] + (uint64_t)b.m64_u32[1] + a.m64_u64[0];
7023 SERIAL_SHIFT(uint64_t, int64_t, 2, 2)
7315 SERIAL_ROUNDING_SHIFT(uint64_t, int64_t, 2, 2)
7587 _NEON2SSE_ALIGN_16 uint64_t mask[] = {0x8000000000000000, 0x8000000000000000};
8245 uint64_t bmask;
8246 uint64_t a_i64 = *(uint64_t*)&a;
8247 bmask = ( uint64_t)1 << (64 - b);
8367 _NEON2SSE_ALIGN_16 uint64_t atmp[2], res[2];
8368 uint64_t bmask;
8370 bmask = ( uint64_t)1 << (64 - b);
8414 uint64_t limit;
8418 limit = (uint64_t) 1 << (64 - b);
8419 res.m64_u64[0] = ( ((uint64_t)a.m64_i64[0]) >= limit) ? res.m64_u64[0] = ~((uint64_t)0) : a.m64_i64[0] << b;
8474 _NEON2SSE_ALIGN_16 uint64_t res[2];
8475 uint64_t limit;
8482 limit = (uint64_t) 1 << (64 - b);
8483 res[i] = ( ((uint64_t)atmp[i]) >= limit) ? res[i] = ~((uint64_t)0) : atmp[i] << b;
9197 uint64x2_t vld1q_u64(__transfersize(2) uint64_t const * ptr); // VLD1.64 {d0, d1}, [r0]
9245 uint64x1_t vld1_u64(__transfersize(1) uint64_t const * ptr); // VLD1.64 {d0}, [r0]
9290 uint64x2_t vld1q_lane_u64(__transfersize(1) uint64_t const * ptr, uint64x2_t vec, __constrange(0,1) int lane); // VLD1.64 {d0}, [r0]
9351 uint64x1_t vld1_lane_u64(__transfersize(1) uint64_t const * ptr, uint64x1_t vec, __constrange(0,0) int lane); // VLD1.64 {d0}, [r0]
9352 _NEON2SSE_INLINE uint64x1_t vld1_lane_u64(__transfersize(1) uint64_t const * ptr, uint64x1_t vec, __constrange(0,0) int lane)
9382 #define vld1_lane_s64(ptr, vec, lane) vld1_lane_u64((uint64_t*)ptr, vec, lane)
9401 uint64x2_t vld1q_dup_u64(__transfersize(1) uint64_t const * ptr); // VLD1.64 {d0}, [r0]
9402 _NEON2SSE_INLINE uint64x2_t vld1q_dup_u64(__transfersize(1) uint64_t const * ptr)
9404 _NEON2SSE_ALIGN_16 uint64_t val[2] = {*(ptr), *(ptr)};
9418 #define vld1q_dup_s64(ptr) vld1q_dup_u64((uint64_t*)ptr)
9463 uint64x1_t vld1_dup_u64(__transfersize(1) uint64_t const * ptr); // VLD1.64 {d0}, [r0]
9464 _NEON2SSE_INLINE uint64x1_t vld1_dup_u64(__transfersize(1) uint64_t const * ptr)
9484 #define vld1_dup_s64(ptr) vld1_dup_u64((uint64_t*)ptr)
9523 void vst1q_u64(__transfersize(2) uint64_t * ptr, uint64x2_t val); // VST1.64 {d0, d1}, [r0]
9589 void vst1_u64(__transfersize(1) uint64_t * ptr, uint64x1_t val); // VST1.64 {d0}, [r0]
9590 _NEON2SSE_INLINE void vst1_u64(__transfersize(1) uint64_t * ptr, uint64x1_t val)
9592 *(ptr) = *((uint64_t*)&val);
9607 #define vst1_s64(ptr,val) vst1_u64((uint64_t*)ptr,val)
9637 void vst1q_lane_u64(__transfersize(1) uint64_t * ptr, uint64x2_t val, __constrange(0,1) int lane); // VST1.64 {d0}, [r0]
9687 void vst1_lane_u64(__transfersize(1) uint64_t * ptr, uint64x1_t val, __constrange(0,0) int lane); // VST1.64 {d0}, [r0]
9688 _NEON2SSE_INLINE void vst1_lane_u64(__transfersize(1) uint64_t * ptr, uint64x1_t val, __constrange(0,0) int lane)
9704 #define vst1_lane_s64(ptr, val, lane) vst1_lane_u64((uint64_t*)ptr, val, lane)
9822 uint64x1x2_t vld2_u64(__transfersize(2) uint64_t const * ptr); // VLD1.64 {d0, d1}, [r0]
9823 _NEON2SSE_INLINE uint64x1x2_t vld2_u64(__transfersize(2) uint64_t const * ptr)
9841 #define vld2_s64(ptr) vld2_u64((uint64_t*)ptr)
10109 uint64x1x3_t vld3_u64(__transfersize(3) uint64_t const * ptr); // VLD1.64 {d0, d1, d2}, [r0]
10110 _NEON2SSE_INLINE uint64x1x3_t vld3_u64(__transfersize(3) uint64_t const * ptr) // VLD1.64 {d0, d1, d2}, [r0]
10129 #define vld3_s64(ptr) vld3_u64((uint64_t*)ptr)
10327 uint64x1x4_t vld4_u64(__transfersize(4) uint64_t const * ptr); // VLD1.64 {d0, d1, d2, d3}, [r0]
10328 _NEON2SSE_INLINE uint64x1x4_t vld4_u64(__transfersize(4) uint64_t const * ptr) // VLD1.64 {d0, d1, d2, d3}, [r0]
10348 #define vld4_s64(ptr) vld4_u64((uint64_t*)ptr)
10414 uint64x1x2_t vld2_dup_u64(__transfersize(2) uint64_t const * ptr); // VLD1.64 {d0, d1}, [r0]
10427 #define vld2_dup_s64(ptr) vld2_dup_u64((uint64_t*)ptr)
10497 uint64x1x3_t vld3_dup_u64(__transfersize(3) uint64_t const * ptr); // VLD1.64 {d0, d1, d2}, [r0]
10498 _NEON2SSE_INLINE uint64x1x3_t vld3_dup_u64(__transfersize(3) uint64_t const * ptr) // VLD1.64 {d0, d1, d2}, [r0]
10517 #define vld3_dup_s64(ptr) vld3_dup_u64((uint64_t*)ptr)
10594 uint64x1x4_t vld4_dup_u64(__transfersize(4) uint64_t const * ptr); // VLD1.64 {d0, d1, d2, d3}, [r0]
10595 _NEON2SSE_INLINE uint64x1x4_t vld4_dup_u64(__transfersize(4) uint64_t const * ptr) // VLD1.64 {d0, d1, d2, d3}, [r0]
10615 #define vld4_dup_s64(ptr) vld4_dup_u64((uint64_t*)ptr)
11129 //void vst2_u64(__transfersize(2) uint64_t * ptr, uint64x1x2_t val);// VST1.64 {d0, d1}, [r0]
11130 void vst2_u64_ptr(__transfersize(2) uint64_t * ptr, uint64x1x2_t * val);
11131 _NEON2SSE_INLINE void vst2_u64_ptr(__transfersize(2) uint64_t * ptr, uint64x1x2_t* val)
11148 #define vst2_s64(ptr,val) vst2_u64((uint64_t*) ptr,val)
11361 //void vst3_u64(__transfersize(3) uint64_t * ptr, uint64x1x3_t val)// VST1.64 {d0, d1, d2}, [r0]
11362 _NEON2SSE_INLINE void vst3_u64_ptr(__transfersize(3) uint64_t * ptr, uint64x1x3_t* val)
11380 #define vst3_s64(ptr, val) vst3_u64_ptr((uint64_t*)ptr, &val)
11552 //void vst4_u64(__transfersize(4) uint64_t * ptr, uint64x1x4_t val)// VST1.64 {d0, d1, d2, d3}, [r0]
11553 _NEON2SSE_INLINE void vst4_u64_ptr(__transfersize(4) uint64_t * ptr, uint64x1x4_t* val)
11573 #define vst4_s64(ptr, val) vst4_u64((uint64_t*)ptr, val)
11976 uint64_t vget_lane_u64(uint64x1_t vec, __constrange(0,0) int lane); // VMOV r0,r0,d0
11983 uint64_t vgetq_lane_u64(uint64x2_t vec, __constrange(0,1) int lane); // VMOV r0,r0,d0
12123 uint64x1_t vset_lane_u64(uint64_t value, uint64x1_t vec, __constrange(0,0) int lane); // VMOV d0,r0,r0
12124 _NEON2SSE_INLINE uint64x1_t vset_lane_u64(uint64_t value, uint64x1_t vec, __constrange(0,0) int lane)
12126 uint64_t val;
12134 uint64_t val;
12139 uint64x2_t vsetq_lane_u64(uint64_t value, uint64x2_t vec, __constrange(0,1) int lane); // VMOV d0,r0,r0
12146 int8x8_t vcreate_s8(uint64_t a); // VMOV d0,r0,r0
12150 int16x4_t vcreate_s16(uint64_t a); // VMOV d0,r0,r0
12153 int32x2_t vcreate_s32(uint64_t a); // VMOV d0,r0,r0
12156 float16x4_t vcreate_f16(uint64_t a); // VMOV d0,r0,r0
12159 float32x2_t vcreate_f32(uint64_t a); // VMOV d0,r0,r0
12162 uint8x8_t vcreate_u8(uint64_t a); // VMOV d0,r0,r0
12165 uint16x4_t vcreate_u16(uint64_t a); // VMOV d0,r0,r0
12168 uint32x2_t vcreate_u32(uint64_t a); // VMOV d0,r0,r0
12171 uint64x1_t vcreate_u64(uint64_t a); // VMOV d0,r0,r0
12175 poly8x8_t vcreate_p8(uint64_t a); // VMOV d0,r0,r0
12178 poly16x4_t vcreate_p16(uint64_t a); // VMOV d0,r0,r0
12181 int64x1_t vcreate_s64(uint64_t a); // VMOV d0,r0,r0
12299 uint64x1_t vdup_n_u64(uint64_t value); // VMOV d0,r0,r0
12300 _NEON2SSE_INLINE uint64x1_t vdup_n_u64(uint64_t value)
12314 uint64x2_t vdupq_n_u64(uint64_t value); // VMOV d0,r0,r0
12315 _NEON2SSE_INLINE uint64x2_t vdupq_n_u64(uint64_t value)
12317 _NEON2SSE_ALIGN_16 uint64_t val[2] = {value, value}; //value may be an immediate
12381 uint64x1_t vmov_n_u64(uint64_t value); // VMOV d0,r0,r0
12387 uint64x2_t vmovq_n_u64(uint64_t value); // VMOV d0,r0,r0