Home | History | Annotate | Download | only in CodeGen
      1 // RUN: %clang_cc1 -O1 -triple arm64-apple-ios7 -target-feature +neon -ffreestanding -S -o - -emit-llvm %s | FileCheck %s
      2 
      3 // Test ARM64 SIMD fused multiply add intrinsics
      4 
      5 #include <arm_neon.h>
      6 
      7 int64x2_t test_vabsq_s64(int64x2_t a1) {
      8   // CHECK: test_vabsq_s64
      9   return vabsq_s64(a1);
     10   // CHECK: llvm.aarch64.neon.abs.v2i64
     11   // CHECK-NEXT: ret
     12 }
     13 
     14 int64_t test_vceqd_s64(int64_t a1, int64_t a2) {
     15   // CHECK: test_vceqd_s64
     16   return vceqd_s64(a1, a2);
     17   // CHECK: [[BIT:%[0-9a-zA-Z.]+]] = icmp eq i64 %a1, %a2
     18   // CHECK: sext i1 [[BIT]] to i64
     19 }
     20 
     21 int64_t test_vceqd_f64(float64_t a1, float64_t a2) {
     22   // CHECK: test_vceqd_f64
     23   return vceqd_f64(a1, a2);
     24   // CHECK: [[BIT:%[0-9a-zA-Z.]+]] = fcmp oeq double %a1, %a2
     25   // CHECK: sext i1 [[BIT]] to i64
     26 }
     27 
     28 uint64_t test_vcgtd_u64(uint64_t a1, uint64_t a2) {
     29   // CHECK: test_vcgtd_u64
     30   return vcgtd_u64(a1, a2);
     31   // CHECK: [[BIT:%[0-9a-zA-Z.]+]] = icmp ugt i64 %a1, %a2
     32   // CHECK: sext i1 [[BIT]] to i64
     33 }
     34 
     35 uint64_t test_vcled_u64(uint64_t a1, uint64_t a2) {
     36   // CHECK: test_vcled_u64
     37   return vcled_u64(a1, a2);
     38   // CHECK: [[BIT:%[0-9a-zA-Z.]+]] = icmp ule i64 %a1, %a2
     39   // CHECK: sext i1 [[BIT]] to i64
     40 }
     41 
     42 int64_t test_vceqzd_s64(int64_t a1) {
     43   // CHECK: test_vceqzd_s64
     44   return vceqzd_s64(a1);
     45   // CHECK: [[BIT:%[0-9a-zA-Z.]+]] = icmp eq i64 %a1, 0
     46   // CHECK: sext i1 [[BIT]] to i64
     47 }
     48 
     49 uint64x2_t test_vceqq_u64(uint64x2_t a1, uint64x2_t a2) {
     50   // CHECK: test_vceqq_u64
     51   return vceqq_u64(a1, a2);
     52   // CHECK:  icmp eq <2 x i64> %a1, %a2
     53 }
     54 
     55 uint64x2_t test_vcgeq_s64(int64x2_t a1, int64x2_t a2) {
     56   // CHECK: test_vcgeq_s64
     57   return vcgeq_s64(a1, a2);
     58   // CHECK:  icmp sge <2 x i64> %a1, %a2
     59 }
     60 
     61 uint64x2_t test_vcgeq_u64(uint64x2_t a1, uint64x2_t a2) {
     62   // CHECK: test_vcgeq_u64
     63   return vcgeq_u64(a1, a2);
     64   // CHECK:  icmp uge <2 x i64> %a1, %a2
     65 }
     66 
     67 uint64x2_t test_vcgtq_s64(int64x2_t a1, int64x2_t a2) {
     68   // CHECK: test_vcgtq_s64
     69   return vcgtq_s64(a1, a2);
     70   // CHECK: icmp sgt <2 x i64> %a1, %a2
     71 }
     72 
     73 uint64x2_t test_vcgtq_u64(uint64x2_t a1, uint64x2_t a2) {
     74   // CHECK: test_vcgtq_u64
     75   return vcgtq_u64(a1, a2);
     76   // CHECK: icmp ugt <2 x i64> %a1, %a2
     77 }
     78 
     79 uint64x2_t test_vcleq_s64(int64x2_t a1, int64x2_t a2) {
     80   // CHECK: test_vcleq_s64
     81   return vcleq_s64(a1, a2);
     82   // CHECK: icmp sle <2 x i64> %a1, %a2
     83 }
     84 
     85 uint64x2_t test_vcleq_u64(uint64x2_t a1, uint64x2_t a2) {
     86   // CHECK: test_vcleq_u64
     87   return vcleq_u64(a1, a2);
     88   // CHECK: icmp ule <2 x i64> %a1, %a2
     89 }
     90 
     91 uint64x2_t test_vcltq_s64(int64x2_t a1, int64x2_t a2) {
     92   // CHECK: test_vcltq_s64
     93   return vcltq_s64(a1, a2);
     94   // CHECK: icmp slt <2 x i64> %a1, %a2
     95 }
     96 
     97 uint64x2_t test_vcltq_u64(uint64x2_t a1, uint64x2_t a2) {
     98   // CHECK: test_vcltq_u64
     99   return vcltq_u64(a1, a2);
    100   // CHECK: icmp ult <2 x i64> %a1, %a2
    101 }
    102 
    103 int64x2_t test_vqabsq_s64(int64x2_t a1) {
    104   // CHECK: test_vqabsq_s64
    105   return vqabsq_s64(a1);
    106   // CHECK: llvm.aarch64.neon.sqabs.v2i64(<2 x i64> %a1)
    107   // CHECK-NEXT: ret
    108 }
    109