1 ; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 | FileCheck %s 2 ; RUN: llc < %s -mtriple=arm-apple-ios -mcpu=cortex-a8 -regalloc=basic | FileCheck %s 3 ; Implementing vld / vst as REG_SEQUENCE eliminates the extra vmov's. 4 5 %struct.int16x8_t = type { <8 x i16> } 6 %struct.int32x4_t = type { <4 x i32> } 7 %struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> } 8 %struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> } 9 %struct.__neon_int16x8x2_t = type { <8 x i16>, <8 x i16> } 10 %struct.__neon_int32x4x2_t = type { <4 x i32>, <4 x i32> } 11 12 define void @t1(i16* %i_ptr, i16* %o_ptr, %struct.int32x4_t* nocapture %vT0ptr, %struct.int32x4_t* nocapture %vT1ptr) nounwind { 13 entry: 14 ; CHECK-LABEL: t1: 15 ; CHECK: vld1.16 16 ; CHECK-NOT: vmov d 17 ; CHECK: vmovl.s16 18 ; CHECK: vshrn.i32 19 ; CHECK: vshrn.i32 20 ; CHECK-NOT: vmov d 21 ; CHECK-NEXT: vst1.16 22 %0 = getelementptr inbounds %struct.int32x4_t* %vT0ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1] 23 %1 = load <4 x i32>* %0, align 16 ; <<4 x i32>> [#uses=1] 24 %2 = getelementptr inbounds %struct.int32x4_t* %vT1ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1] 25 %3 = load <4 x i32>* %2, align 16 ; <<4 x i32>> [#uses=1] 26 %4 = bitcast i16* %i_ptr to i8* ; <i8*> [#uses=1] 27 %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4, i32 1) ; <<8 x i16>> [#uses=1] 28 %6 = bitcast <8 x i16> %5 to <2 x double> ; <<2 x double>> [#uses=2] 29 %7 = extractelement <2 x double> %6, i32 0 ; <double> [#uses=1] 30 %8 = bitcast double %7 to <4 x i16> ; <<4 x i16>> [#uses=1] 31 %9 = sext <4 x i16> %8 to <4 x i32> ; <<4 x i32>> [#uses=1] 32 %10 = extractelement <2 x double> %6, i32 1 ; <double> [#uses=1] 33 %11 = bitcast double %10 to <4 x i16> ; <<4 x i16>> [#uses=1] 34 %12 = sext <4 x i16> %11 to <4 x i32> ; <<4 x i32>> [#uses=1] 35 %13 = mul <4 x i32> %1, %9 ; <<4 x i32>> [#uses=1] 36 %14 = mul <4 x i32> %3, %12 ; <<4 x i32>> [#uses=1] 37 %15 = tail call <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32> %13, <4 x i32> <i32 -12, i32 -12, i32 -12, i32 -12>) ; <<4 x i16>> [#uses=1] 38 %16 = tail call <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32> %14, <4 x i32> <i32 -12, i32 -12, i32 -12, i32 -12>) ; <<4 x i16>> [#uses=1] 39 %17 = shufflevector <4 x i16> %15, <4 x i16> %16, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> ; <<8 x i16>> [#uses=1] 40 %18 = bitcast i16* %o_ptr to i8* ; <i8*> [#uses=1] 41 tail call void @llvm.arm.neon.vst1.v8i16(i8* %18, <8 x i16> %17, i32 1) 42 ret void 43 } 44 45 define void @t2(i16* %i_ptr, i16* %o_ptr, %struct.int16x8_t* nocapture %vT0ptr, %struct.int16x8_t* nocapture %vT1ptr) nounwind { 46 entry: 47 ; CHECK-LABEL: t2: 48 ; CHECK: vld1.16 49 ; CHECK-NOT: vmov 50 ; CHECK: vmul.i16 51 ; CHECK: vld1.16 52 ; CHECK: vmul.i16 53 ; CHECK-NOT: vmov 54 ; CHECK: vst1.16 55 ; CHECK: vst1.16 56 %0 = getelementptr inbounds %struct.int16x8_t* %vT0ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1] 57 %1 = load <8 x i16>* %0, align 16 ; <<8 x i16>> [#uses=1] 58 %2 = getelementptr inbounds %struct.int16x8_t* %vT1ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1] 59 %3 = load <8 x i16>* %2, align 16 ; <<8 x i16>> [#uses=1] 60 %4 = bitcast i16* %i_ptr to i8* ; <i8*> [#uses=1] 61 %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4, i32 1) ; <<8 x i16>> [#uses=1] 62 %6 = getelementptr inbounds i16* %i_ptr, i32 8 ; <i16*> [#uses=1] 63 %7 = bitcast i16* %6 to i8* ; <i8*> [#uses=1] 64 %8 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %7, i32 1) ; <<8 x i16>> [#uses=1] 65 %9 = mul <8 x i16> %1, %5 ; <<8 x i16>> [#uses=1] 66 %10 = mul <8 x i16> %3, %8 ; <<8 x i16>> [#uses=1] 67 %11 = bitcast i16* %o_ptr to i8* ; <i8*> [#uses=1] 68 tail call void @llvm.arm.neon.vst1.v8i16(i8* %11, <8 x i16> %9, i32 1) 69 %12 = getelementptr inbounds i16* %o_ptr, i32 8 ; <i16*> [#uses=1] 70 %13 = bitcast i16* %12 to i8* ; <i8*> [#uses=1] 71 tail call void @llvm.arm.neon.vst1.v8i16(i8* %13, <8 x i16> %10, i32 1) 72 ret void 73 } 74 75 define <8 x i8> @t3(i8* %A, i8* %B) nounwind { 76 ; CHECK-LABEL: t3: 77 ; CHECK: vld3.8 78 ; CHECK: vmul.i8 79 ; CHECK: vmov r 80 ; CHECK-NOT: vmov d 81 ; CHECK: vst3.8 82 %tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=2] 83 %tmp2 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 0 ; <<8 x i8>> [#uses=1] 84 %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 2 ; <<8 x i8>> [#uses=1] 85 %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 1 ; <<8 x i8>> [#uses=1] 86 %tmp5 = sub <8 x i8> %tmp3, %tmp4 87 %tmp6 = add <8 x i8> %tmp2, %tmp3 ; <<8 x i8>> [#uses=1] 88 %tmp7 = mul <8 x i8> %tmp4, %tmp2 89 tail call void @llvm.arm.neon.vst3.v8i8(i8* %B, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7, i32 1) 90 ret <8 x i8> %tmp4 91 } 92 93 define void @t4(i32* %in, i32* %out) nounwind { 94 entry: 95 ; CHECK-LABEL: t4: 96 ; CHECK: vld2.32 97 ; CHECK-NOT: vmov 98 ; CHECK: vld2.32 99 ; CHECK-NOT: vmov 100 ; CHECK: bne 101 %tmp1 = bitcast i32* %in to i8* ; <i8*> [#uses=1] 102 %tmp2 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp1, i32 1) ; <%struct.__neon_int32x4x2_t> [#uses=2] 103 %tmp3 = getelementptr inbounds i32* %in, i32 8 ; <i32*> [#uses=1] 104 %tmp4 = bitcast i32* %tmp3 to i8* ; <i8*> [#uses=1] 105 %tmp5 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp4, i32 1) ; <%struct.__neon_int32x4x2_t> [#uses=2] 106 %tmp8 = bitcast i32* %out to i8* ; <i8*> [#uses=1] 107 br i1 undef, label %return1, label %return2 108 109 return1: 110 ; CHECK: %return1 111 ; CHECK-NOT: vmov 112 ; CHECK-NEXT: vadd.i32 113 ; CHECK-NEXT: vadd.i32 114 ; CHECK-NEXT: vst2.32 115 %tmp52 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 0 ; <<4 x i32>> [#uses=1] 116 %tmp57 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 1 ; <<4 x i32>> [#uses=1] 117 %tmp = extractvalue %struct.__neon_int32x4x2_t %tmp5, 0 ; <<4 x i32>> [#uses=1] 118 %tmp39 = extractvalue %struct.__neon_int32x4x2_t %tmp5, 1 ; <<4 x i32>> [#uses=1] 119 %tmp6 = add <4 x i32> %tmp52, %tmp ; <<4 x i32>> [#uses=1] 120 %tmp7 = add <4 x i32> %tmp57, %tmp39 ; <<4 x i32>> [#uses=1] 121 tail call void @llvm.arm.neon.vst2.v4i32(i8* %tmp8, <4 x i32> %tmp6, <4 x i32> %tmp7, i32 1) 122 ret void 123 124 return2: 125 ; CHECK: %return2 126 ; CHECK: vadd.i32 127 ; CHECK-NOT: vmov 128 ; CHECK: vst2.32 {d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}} 129 %tmp100 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 0 ; <<4 x i32>> [#uses=1] 130 %tmp101 = extractvalue %struct.__neon_int32x4x2_t %tmp5, 1 ; <<4 x i32>> [#uses=1] 131 %tmp102 = add <4 x i32> %tmp100, %tmp101 ; <<4 x i32>> [#uses=1] 132 tail call void @llvm.arm.neon.vst2.v4i32(i8* %tmp8, <4 x i32> %tmp102, <4 x i32> %tmp101, i32 1) 133 call void @llvm.trap() 134 unreachable 135 } 136 137 define <8 x i16> @t5(i16* %A, <8 x i16>* %B) nounwind { 138 ; CHECK-LABEL: t5: 139 ; CHECK: vld1.32 140 ; How can FileCheck match Q and D registers? We need a lisp interpreter. 141 ; CHECK: vorr {{q[0-9]+}}, {{q[0-9]+}}, {{q[0-9]+}} 142 ; CHECK-NOT: vmov 143 ; CHECK: vld2.16 {d{{[0-9]+}}[1], d{{[0-9]+}}[1]}, [r0] 144 ; CHECK-NOT: vmov 145 ; CHECK: vadd.i16 146 %tmp0 = bitcast i16* %A to i8* ; <i8*> [#uses=1] 147 %tmp1 = load <8 x i16>* %B ; <<8 x i16>> [#uses=2] 148 %tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1, i32 1) ; <%struct.__neon_int16x8x2_t> [#uses=2] 149 %tmp3 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 0 ; <<8 x i16>> [#uses=1] 150 %tmp4 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 1 ; <<8 x i16>> [#uses=1] 151 %tmp5 = add <8 x i16> %tmp3, %tmp4 ; <<8 x i16>> [#uses=1] 152 ret <8 x i16> %tmp5 153 } 154 155 define <8 x i8> @t6(i8* %A, <8 x i8>* %B) nounwind { 156 ; CHECK-LABEL: t6: 157 ; CHECK: vldr 158 ; CHECK: vorr d[[D0:[0-9]+]], d[[D1:[0-9]+]] 159 ; CHECK-NEXT: vld2.8 {d[[D1]][1], d[[D0]][1]} 160 %tmp1 = load <8 x i8>* %B ; <<8 x i8>> [#uses=2] 161 %tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1, i32 1) ; <%struct.__neon_int8x8x2_t> [#uses=2] 162 %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0 ; <<8 x i8>> [#uses=1] 163 %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1 ; <<8 x i8>> [#uses=1] 164 %tmp5 = add <8 x i8> %tmp3, %tmp4 ; <<8 x i8>> [#uses=1] 165 ret <8 x i8> %tmp5 166 } 167 168 define void @t7(i32* %iptr, i32* %optr) nounwind { 169 entry: 170 ; CHECK-LABEL: t7: 171 ; CHECK: vld2.32 172 ; CHECK: vst2.32 173 ; CHECK: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, 174 ; CHECK: vorr q[[Q0:[0-9]+]], q[[Q1:[0-9]+]], q[[Q1:[0-9]+]] 175 ; CHECK-NOT: vmov 176 ; CHECK: vuzp.32 q[[Q1]], q[[Q0]] 177 ; CHECK: vst1.32 178 %0 = bitcast i32* %iptr to i8* ; <i8*> [#uses=2] 179 %1 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %0, i32 1) ; <%struct.__neon_int32x4x2_t> [#uses=2] 180 %tmp57 = extractvalue %struct.__neon_int32x4x2_t %1, 0 ; <<4 x i32>> [#uses=1] 181 %tmp60 = extractvalue %struct.__neon_int32x4x2_t %1, 1 ; <<4 x i32>> [#uses=1] 182 %2 = bitcast i32* %optr to i8* ; <i8*> [#uses=2] 183 tail call void @llvm.arm.neon.vst2.v4i32(i8* %2, <4 x i32> %tmp57, <4 x i32> %tmp60, i32 1) 184 %3 = tail call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %0, i32 1) ; <<4 x i32>> [#uses=1] 185 %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 0, i32 2> ; <<4 x i32>> [#uses=1] 186 tail call void @llvm.arm.neon.vst1.v4i32(i8* %2, <4 x i32> %4, i32 1) 187 ret void 188 } 189 190 ; PR7156 191 define arm_aapcs_vfpcc i32 @t8() nounwind { 192 ; CHECK-LABEL: t8: 193 ; CHECK: vrsqrte.f32 q8, q8 194 bb.nph55.bb.nph55.split_crit_edge: 195 br label %bb3 196 197 bb3: ; preds = %bb3, %bb.nph55.bb.nph55.split_crit_edge 198 br i1 undef, label %bb5, label %bb3 199 200 bb5: ; preds = %bb3 201 br label %bb.i25 202 203 bb.i25: ; preds = %bb.i25, %bb5 204 %0 = shufflevector <2 x float> undef, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1] 205 %1 = call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %0) nounwind ; <<4 x float>> [#uses=1] 206 %2 = fmul <4 x float> %1, undef ; <<4 x float>> [#uses=1] 207 %3 = fmul <4 x float> undef, %2 ; <<4 x float>> [#uses=1] 208 %tmp26.i = bitcast <4 x float> %3 to <2 x double> ; <<2 x double>> [#uses=1] 209 %4 = extractelement <2 x double> %tmp26.i, i32 0 ; <double> [#uses=1] 210 %5 = bitcast double %4 to <2 x float> ; <<2 x float>> [#uses=1] 211 %6 = extractelement <2 x float> %5, i32 1 ; <float> [#uses=1] 212 store float %6, float* undef, align 4 213 br i1 undef, label %bb6, label %bb.i25 214 215 bb6: ; preds = %bb.i25 216 br i1 undef, label %bb7, label %bb14 217 218 bb7: ; preds = %bb6 219 br label %bb.i49 220 221 bb.i49: ; preds = %bb.i49, %bb7 222 br i1 undef, label %bb.i19, label %bb.i49 223 224 bb.i19: ; preds = %bb.i19, %bb.i49 225 br i1 undef, label %exit, label %bb.i19 226 227 exit: ; preds = %bb.i19 228 unreachable 229 230 bb14: ; preds = %bb6 231 ret i32 0 232 } 233 234 %0 = type { %1, %1, %1, %1 } 235 %1 = type { %2 } 236 %2 = type { <4 x float> } 237 %3 = type { %0, %1 } 238 239 ; PR7157 240 define arm_aapcs_vfpcc float @t9(%0* nocapture, %3* nocapture) nounwind { 241 ; CHECK-LABEL: t9: 242 ; CHECK: vldr 243 ; CHECK-NOT: vmov d{{.*}}, d16 244 ; CHECK: vmov.i32 d17 245 ; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128] 246 ; CHECK-NEXT: vst1.64 {d16, d17}, [r0:128] 247 %3 = bitcast double 0.000000e+00 to <2 x float> ; <<2 x float>> [#uses=2] 248 %4 = shufflevector <2 x float> %3, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1] 249 store <4 x float> %4, <4 x float>* undef, align 16 250 %5 = shufflevector <2 x float> %3, <2 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1] 251 store <4 x float> %5, <4 x float>* undef, align 16 252 br label %8 253 254 ; <label>:6 ; preds = %8 255 br label %7 256 257 ; <label>:7 ; preds = %6 258 br label %8 259 260 ; <label>:8 ; preds = %7, %2 261 br label %6 262 263 ; <label>:9 ; preds = %8 264 ret float undef 265 266 ; <label>:10 ; preds = %6 267 ret float 9.990000e+02 268 } 269 270 ; PR7162 271 define arm_aapcs_vfpcc i32 @t10() nounwind { 272 entry: 273 ; CHECK-LABEL: t10: 274 ; CHECK: vmov.i32 q[[Q0:[0-9]+]], #0x3f000000 275 ; CHECK: vmul.f32 q8, q8, d[[DREG:[0-1]+]] 276 ; CHECK: vadd.f32 q8, q8, q8 277 %0 = shufflevector <4 x float> zeroinitializer, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1] 278 %1 = insertelement <4 x float> %0, float undef, i32 1 ; <<4 x float>> [#uses=1] 279 %2 = insertelement <4 x float> %1, float undef, i32 2 ; <<4 x float>> [#uses=1] 280 %3 = insertelement <4 x float> %2, float undef, i32 3 ; <<4 x float>> [#uses=1] 281 %tmp54.i = bitcast <4 x float> %3 to <2 x double> ; <<2 x double>> [#uses=1] 282 %4 = extractelement <2 x double> %tmp54.i, i32 1 ; <double> [#uses=1] 283 %5 = bitcast double %4 to <2 x float> ; <<2 x float>> [#uses=1] 284 %6 = shufflevector <2 x float> %5, <2 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1] 285 %7 = fmul <4 x float> undef, %6 ; <<4 x float>> [#uses=1] 286 %8 = fadd <4 x float> %7, undef ; <<4 x float>> [#uses=1] 287 %9 = fadd <4 x float> %8, undef ; <<4 x float>> [#uses=1] 288 %10 = shufflevector <4 x float> undef, <4 x float> %9, <4 x i32> <i32 0, i32 1, i32 2, i32 7> ; <<4 x float>> [#uses=1] 289 %11 = fmul <4 x float> %10, <float 5.000000e-01, float 5.000000e-01, float 5.000000e-01, float 5.000000e-01> ; <<4 x float>> [#uses=1] 290 %12 = shufflevector <4 x float> %11, <4 x float> undef, <4 x i32> <i32 3, i32 undef, i32 undef, i32 undef> ; <<4 x float>> [#uses=1] 291 %13 = shufflevector <4 x float> %12, <4 x float> undef, <4 x i32> zeroinitializer ; <<4 x float>> [#uses=1] 292 %14 = fmul <4 x float> %13, undef ; <<4 x float>> [#uses=1] 293 %15 = fadd <4 x float> undef, %14 ; <<4 x float>> [#uses=1] 294 %16 = shufflevector <4 x float> undef, <4 x float> %15, <4 x i32> <i32 0, i32 1, i32 6, i32 3> ; <<4 x float>> [#uses=1] 295 %17 = fmul <4 x float> %16, undef ; <<4 x float>> [#uses=1] 296 %18 = extractelement <4 x float> %17, i32 2 ; <float> [#uses=1] 297 store float %18, float* undef, align 4 298 br i1 undef, label %exit, label %bb14 299 300 exit: ; preds = %bb.i19 301 unreachable 302 303 bb14: ; preds = %bb6 304 ret i32 0 305 } 306 307 ; This test crashes the coalescer because live variables were not updated properly. 308 define <8 x i8> @t11(i8* %A1, i8* %A2, i8* %A3, i8* %A4, i8* %A5, i8* %A6, i8* %A7, i8* %A8, i8* %B) nounwind { 309 %tmp1d = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A4, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=1] 310 %tmp2d = extractvalue %struct.__neon_int8x8x3_t %tmp1d, 0 ; <<8 x i8>> [#uses=1] 311 %tmp1f = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A6, i32 1) ; <%struct.__neon_int8x8x3_t> [#uses=1] 312 %tmp2f = extractvalue %struct.__neon_int8x8x3_t %tmp1f, 0 ; <<8 x i8>> [#uses=1] 313 %tmp2bd = add <8 x i8> zeroinitializer, %tmp2d ; <<8 x i8>> [#uses=1] 314 %tmp2abcd = mul <8 x i8> zeroinitializer, %tmp2bd ; <<8 x i8>> [#uses=1] 315 %tmp2ef = sub <8 x i8> zeroinitializer, %tmp2f ; <<8 x i8>> [#uses=1] 316 %tmp2efgh = mul <8 x i8> %tmp2ef, undef ; <<8 x i8>> [#uses=2] 317 call void @llvm.arm.neon.vst3.v8i8(i8* %A2, <8 x i8> undef, <8 x i8> undef, <8 x i8> %tmp2efgh, i32 1) 318 %tmp2 = sub <8 x i8> %tmp2efgh, %tmp2abcd ; <<8 x i8>> [#uses=1] 319 %tmp7 = mul <8 x i8> undef, %tmp2 ; <<8 x i8>> [#uses=1] 320 tail call void @llvm.arm.neon.vst3.v8i8(i8* %B, <8 x i8> undef, <8 x i8> undef, <8 x i8> %tmp7, i32 1) 321 ret <8 x i8> undef 322 } 323 324 declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*, i32) nounwind readonly 325 326 declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32) nounwind readonly 327 328 declare <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone 329 330 declare void @llvm.arm.neon.vst1.v4i32(i8*, <4 x i32>, i32) nounwind 331 332 declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind 333 334 declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>, i32) 335 nounwind 336 337 declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8*, i32) nounwind readonly 338 339 declare %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8*, i32) nounwind readonly 340 341 declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32, i32) nounwind readonly 342 343 declare %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32, i32) nounwind readonly 344 345 declare void @llvm.arm.neon.vst2.v4i32(i8*, <4 x i32>, <4 x i32>, i32) nounwind 346 347 declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone 348 349 declare void @llvm.trap() nounwind 350