Home | History | Annotate | Download | only in ARM

Lines Matching full:i16

9 define <4 x i16> @v_movi16a() nounwind {
11 ;CHECK: vmov.i16 d{{.*}}, #0x10
12 ret <4 x i16> < i16 16, i16 16, i16 16, i16 16 >
15 define <4 x i16> @v_movi16b() nounwind {
17 ;CHECK: vmov.i16 d{{.*}}, #0x1000
18 ret <4 x i16> < i16 4096, i16 4096, i16 4096, i16 4096 >
21 define <4 x i16> @v_mvni16a() nounwind {
23 ;CHECK: vmvn.i16 d{{.*}}, #0x10
24 ret <4 x i16> < i16 65519, i16 65519, i16 65519, i16 65519 >
27 define <4 x i16> @v_mvni16b() nounwind {
29 ;CHECK: vmvn.i16 d{{.*}}, #0x1000
30 ret <4 x i16> < i16 61439, i16 61439, i16 61439, i16 61439 >
117 define <8 x i16> @v_movQi16a() nounwind {
119 ;CHECK: vmov.i16 q{{.*}}, #0x10
120 ret <8 x i16> < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 >
123 define <8 x i16> @v_movQi16b() nounwind {
125 ;CHECK: vmov.i16 q{{.*}}, #0x1000
126 ret <8 x i16> < i16 4096, i16 4096, i16 4096, i16 4096, i16 4096, i16 4096, i16 4096, i16 4096 >
191 define <8 x i16> @vmovls8(<8 x i8>* %A) nounwind {
195 %tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
196 ret <8 x i16> %tmp2
199 define <4 x i32> @vmovls16(<4 x i16>* %A) nounwind {
202 %tmp1 = load <4 x i16>* %A
203 %tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
215 define <8 x i16> @vmovlu8(<8 x i8>* %A) nounwind {
219 %tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
220 ret <8 x i16> %tmp2
223 define <4 x i32> @vmovlu16(<4 x i16>* %A) nounwind {
226 %tmp1 = load <4 x i16>* %A
227 %tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
239 define <8 x i8> @vmovni16(<8 x i16>* %A) nounwind {
241 ;CHECK: vmovn.i16
242 %tmp1 = load <8 x i16>* %A
243 %tmp2 = trunc <8 x i16> %tmp1 to <8 x i8>
247 define <4 x i16> @vmovni32(<4 x i32>* %A) nounwind {
251 %tmp2 = trunc <4 x i32> %tmp1 to <4 x i16>
252 ret <4 x i16> %tmp2
263 define <8 x i8> @vqmovns16(<8 x i16>* %A) nounwind {
266 %tmp1 = load <8 x i16>* %A
267 %tmp2 = call <8 x i8> @llvm.arm.neon.vqmovns.v8i8(<8 x i16> %tmp1)
271 define <4 x i16> @vqmovns32(<4 x i32>* %A) nounwind {
275 %tmp2 = call <4 x i16> @llvm.arm.neon.vqmovns.v4i16(<4 x i32> %tmp1)
276 ret <4 x i16> %tmp2
287 define <8 x i8> @vqmovnu16(<8 x i16>* %A) nounwind {
290 %tmp1 = load <8 x i16>* %A
291 %tmp2 = call <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16> %tmp1)
295 define <4 x i16> @vqmovnu32(<4 x i32>* %A) nounwind {
299 %tmp2 = call <4 x i16> @llvm.arm.neon.vqmovnu.v4i16(<4 x i32> %tmp1)
300 ret <4 x i16> %tmp2
311 define <8 x i8> @vqmovuns16(<8 x i16>* %A) nounwind {
314 %tmp1 = load <8 x i16>* %A
315 %tmp2 = call <8 x i8> @llvm.arm.neon.vqmovnsu.v8i8(<8 x i16> %tmp1)
319 define <4 x i16> @vqmovuns32(<4 x i32>* %A) nounwind {
323 %tmp2 = call <4 x i16> @llvm.arm.neon.vqmovnsu.v4i16(<4 x i32> %tmp1)
324 ret <4 x i16> %tmp2
335 declare <8 x i8> @llvm.arm.neon.vqmovns.v8i8(<8 x i16>) nounwind readnone
336 declare <4 x i16> @llvm.arm.neon.vqmovns.v4i16(<4 x i32>) nounwind readnone
339 declare <8 x i8> @llvm.arm.neon.vqmovnu.v8i8(<8 x i16>) nounwind readnone
340 declare <4 x i16> @llvm.arm.neon.vqmovnu.v4i16(<4 x i32>) nounwind readnone
343 declare <8 x i8> @llvm.arm.neon.vqmovnsu.v8i8(<8 x i16>) nounwind readnone
344 declare <4 x i16> @llvm.arm.neon.vqmovnsu.v4i16(<4 x i32>) nounwind readnone
349 define void @noTruncStore(<4 x i32>* %a, <4 x i16>* %b) nounwind {
352 %tmp2 = trunc <4 x i32> %tmp1 to <4 x i16>
353 store <4 x i16> %tmp2, <4 x i16>* %b, align 8
395 %vmovn.i = trunc <4 x i32> %add.i to <4 x i16>
396 tail call void @llvm.arm.neon.vst1.v4i16(i8* undef, <4 x i16> %vmovn.i, i32 2)
400 declare void @llvm.arm.neon.vst1.v4i16(i8*, <4 x i16>, i32) nounwind