/external/llvm/test/CodeGen/X86/ |
mmx-arith.ll | 6 define void @foo(x86_mmx* %A, x86_mmx* %B) { 8 %tmp1 = load x86_mmx* %A ; <x86_mmx> [#uses=1] 9 %tmp3 = load x86_mmx* %B ; <x86_mmx> [#uses=1] 10 %tmp1a = bitcast x86_mmx %tmp1 to <8 x i8> 11 %tmp3a = bitcast x86_mmx %tmp3 to <8 x i8> 13 %tmp4a = bitcast <8 x i8> %tmp4 to x86_mmx 14 store x86_mmx %tmp4a, x86_mmx* % [all...] |
mmx-bitcast-to-i64.ll | 3 define i64 @foo(x86_mmx* %p) { 4 %t = load x86_mmx* %p 5 %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %t) 6 %s = bitcast x86_mmx %u to i64 9 define i64 @goo(x86_mmx* %p) { 10 %t = load x86_mmx* %p 11 %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %t [all...] |
3dnow-intrinsics.ll | 3 define <8 x i8> @test_pavgusb(x86_mmx %a.coerce, x86_mmx %b.coerce) nounwind readnone { 6 %0 = bitcast x86_mmx %a.coerce to <8 x i8> 7 %1 = bitcast x86_mmx %b.coerce to <8 x i8> 8 %2 = bitcast <8 x i8> %0 to x86_mmx 9 %3 = bitcast <8 x i8> %1 to x86_mmx 10 %4 = call x86_mmx @llvm.x86.3dnow.pavgusb(x86_mmx %2, x86_mmx %3) 11 %5 = bitcast x86_mmx %4 to <8 x i8 [all...] |
2007-06-15-IntToMMX.ll | 2 @R = external global x86_mmx ; <x86_mmx*> [#uses=1] 6 %tmp2 = bitcast <1 x i64> %A to x86_mmx 7 %tmp3 = bitcast <1 x i64> %B to x86_mmx 8 %tmp7 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp2, x86_mmx %tmp3 ) ; <x86_mmx> [#uses=1] 9 store x86_mmx %tmp7, x86_mmx* @ [all...] |
mmx-shift.ll | 8 %tmp = bitcast <1 x i64> %mm1 to x86_mmx 9 %tmp6 = tail call x86_mmx @llvm.x86.mmx.pslli.q( x86_mmx %tmp, i32 32 ) ; <x86_mmx> [#uses=1] 10 %retval1112 = bitcast x86_mmx %tmp6 to i64 14 declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32) nounwind readnone 16 define i64 @t2(x86_mmx %mm1, x86_mmx %mm2) nounwind { 18 %tmp7 = tail call x86_mmx @llvm.x86.mmx.psra.d( x86_mmx %mm1, x86_mmx %mm2 ) nounwind readnone ; <x86_mmx> [#uses=1 [all...] |
2010-04-23-mmx-movdq2q.ll | 52 %tmp1 = bitcast double %a to x86_mmx 54 %tmp2 = bitcast double %b to x86_mmx 56 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2) 57 store x86_mmx %tmp3, x86_mmx* null 64 %tmp1 = bitcast double %a to x86_mmx 66 %tmp2 = bitcast double %b to x86_mmx 68 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2 [all...] |
2009-08-02-mmx-scalar-to-vector.ll | 3 declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32) 8 %t0 = bitcast <1 x i64> %t1 to x86_mmx 9 %t2 = tail call x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %t0, i32 48) 10 %t3 = bitcast x86_mmx %t2 to <1 x i64>
|
2007-07-03-GR64ToVR64.ll | 7 @R = external global x86_mmx ; <x86_mmx*> [#uses=1] 11 %tmp4 = bitcast <1 x i64> %B to x86_mmx ; <<4 x i16>> [#uses=1] 12 %tmp6 = bitcast <1 x i64> %A to x86_mmx ; <<4 x i16>> [#uses=1] 13 %tmp7 = tail call x86_mmx @llvm.x86.mmx.paddus.w( x86_mmx %tmp6, x86_mmx %tmp4 ) ; <x86_mmx> [#uses=1] 14 store x86_mmx %tmp7, x86_mmx* @ [all...] |
mmx-builtins.ll | 3 declare x86_mmx @llvm.x86.ssse3.phadd.w(x86_mmx, x86_mmx) nounwind readnone 10 %2 = bitcast <4 x i16> %1 to x86_mmx 11 %3 = bitcast <4 x i16> %0 to x86_mmx 12 %4 = tail call x86_mmx @llvm.x86.ssse3.phadd.w(x86_mmx %2, x86_mmx %3) nounwind readnone 13 %5 = bitcast x86_mmx %4 to <4 x i16> 19 declare x86_mmx @llvm.x86.mmx.pcmpgt.d(x86_mmx, x86_mmx) nounwind readnon [all...] |
mmx-punpckhdq.ll | 19 define void @pork(x86_mmx* %x) { 23 %tmp2 = load x86_mmx* %x ; <x86_mmx> [#uses=1] 24 %tmp9 = tail call x86_mmx @llvm.x86.mmx.punpckhdq (x86_mmx %tmp2, x86_mmx %tmp2) 25 store x86_mmx %tmp9, x86_mmx* %x 30 declare x86_mmx @llvm.x86.mmx.punpckhdq(x86_mmx, x86_mmx [all...] |
2007-04-25-MMX-PADDQ.ll | 34 define <1 x i64> @unsigned_add3a(x86_mmx* %a, x86_mmx* %b, i32 %count) nounwind { 36 %tmp2943 = bitcast <1 x i64><i64 0> to x86_mmx 47 %sum.035.0 = phi x86_mmx [ %tmp2943, %entry ], [ %tmp22, %bb26 ] ; <x86_mmx> [#uses=1] 48 %tmp13 = getelementptr x86_mmx* %b, i32 %i.037.0 ; <x86_mmx*> [#uses=1] 49 %tmp14 = load x86_mmx* %tmp13 ; <x86_mmx> [#uses=1] 50 %tmp18 = getelementptr x86_mmx* %a, i32 %i.037.0 ; <x86_mmx*> [#uses=1 [all...] |
2007-05-15-maskmovq.ll | 8 %tmp4 = bitcast <1 x i64> %mask1 to x86_mmx ; <x86_mmx> [#uses=1] 9 %tmp6 = bitcast <1 x i64> %c64 to x86_mmx ; <x86_mmx> [#uses=1] 10 tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp4, x86_mmx %tmp6, i8* %P ) 14 declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*)
|
mmx-arg-passing.ll | 13 @u1 = external global x86_mmx 15 define void @t1(x86_mmx %v1) nounwind { 16 store x86_mmx %v1, x86_mmx* @u1, align 8 20 @u2 = external global x86_mmx 23 %tmp = bitcast <1 x i64> %v1 to x86_mmx 24 store x86_mmx %tmp, x86_mmx* @u2, align 8
|
mmx-arg-passing2.ll | 8 %tmp3a = bitcast <8 x i8> %tmp3 to x86_mmx 9 %tmp4 = tail call i32 (...)* @pass_v8qi( x86_mmx %tmp3a ) nounwind 13 define void @t2(x86_mmx %v1, x86_mmx %v2) nounwind { 14 %v1a = bitcast x86_mmx %v1 to <8 x i8> 15 %v2b = bitcast x86_mmx %v2 to <8 x i8> 17 %tmp3a = bitcast <8 x i8> %tmp3 to x86_mmx 18 %tmp4 = tail call i32 (...)* @pass_v8qi( x86_mmx %tmp3a ) nounwind
|
vec_insert-7.ll | 5 define x86_mmx @mmx_movzl(x86_mmx %x) nounwind { 10 %tmp = bitcast x86_mmx %x to <2 x i32> 13 %tmp9 = bitcast <2 x i32> %tmp8 to x86_mmx 14 ret x86_mmx %tmp9
|
mmx-insert-element.ll | 5 define x86_mmx @qux(i32 %A) nounwind { 7 %tmp4 = bitcast <2 x i32> %tmp3 to x86_mmx 8 ret x86_mmx %tmp4
|
2008-09-05-sinttofp-2xi32.ll | 22 define <2 x double> @a2(x86_mmx %x) nounwind { 24 %y = tail call <2 x double> @llvm.x86.sse.cvtpi2pd(x86_mmx %x) 28 define x86_mmx @b2(<2 x double> %x) nounwind { 30 %y = tail call x86_mmx @llvm.x86.sse.cvttpd2pi (<2 x double> %x) 31 ret x86_mmx %y 34 declare <2 x double> @llvm.x86.sse.cvtpi2pd(x86_mmx) 35 declare x86_mmx @llvm.x86.sse.cvttpd2pi(<2 x double>)
|
2011-06-14-mmx-inlineasm.ll | 6 %0 = type { x86_mmx, x86_mmx, x86_mmx, x86_mmx, x86_mmx, x86_mmx, x86_mmx } 13 %0 = bitcast i64 %or to x86_mmx 21 %1 = tail call %0 asm "movq\09\09$7,\09$0\0Amovq\09\09$7,\09$1\0Amovq\09\09$7,\09$2\0Amovq\09\09$7,\09$3\0Amovq\09\09$7,\09$4\0Amovq\09\09$7,\09$5\0Amovq\09\09$7,\09$6\0A", "=&y,=&y,=&y,=&y,=&y,=&y,=y,y,~{dirflag},~{fpsr},~{flags}"(x86_mmx %0) nounwind, !srcloc !0 37 tail call void asm sideeffect "movq\09$1,\09 ($0)\0Amovq\09$2,\09 8($0)\0Amovq\09$3,\0916($0)\0Amovq\09$4,\0924($0)\0Amovq\09$5,\0932($0)\0Amovq\09$6,\0940($0)\0Amovq\09$7,\0948($0)\0Amovq\09$8,\0956($0)\0A", "r,y,y,y,y,y,y,y,y,~{memory},~{dirflag},~{fpsr},~{flags}"(i8* undef, x86_mmx %0, x86_mmx %asmresult, x86_mmx %asmresult6, x86_mmx %asmresult7, x86_mmx %asmresult8, x86_mmx %as (…) [all...] |
fast-isel-bc.ll | 8 declare void @func2(x86_mmx) 14 ; For now, handling of x86_mmx parameters in fast Isel is unimplemented, 20 %tmp0 = bitcast <2 x i32><i32 0, i32 2> to x86_mmx 21 call void @func2(x86_mmx %tmp0)
|
2008-04-08-CoalescerCrash.ll | 8 %tmp1 = tail call x86_mmx asm sideeffect "movd $1, $0", "=={mm4},{bp},~{dirflag},~{fpsr},~{flags},~{memory}"( i32 undef ) nounwind ; <x86_mmx> [#uses=1] 11 %tmp3 = tail call i32 asm sideeffect "movd $1, $0", "=={bp},{mm3},~{dirflag},~{fpsr},~{flags},~{memory}"( x86_mmx undef ) nounwind ; <i32> [#uses=1] 14 tail call void asm sideeffect "movntq $0, 0($1,$2)", "{mm0},{di},{bp},~{dirflag},~{fpsr},~{flags},~{memory}"( x86_mmx undef, i32 undef, i32 %tmp3 ) nounwind 17 %tmp8 = tail call i32 asm sideeffect "movd $1, $0", "=={bp},{mm4},~{dirflag},~{fpsr},~{flags},~{memory}"( x86_mmx %tmp1 ) nounwind ; <i32> [#uses=0]
|
2008-08-23-64Bit-maskmovq.ll | 20 %tmp = bitcast <8 x i8> zeroinitializer to x86_mmx 21 %tmp2 = bitcast <8 x i8> zeroinitializer to x86_mmx 22 tail call void @llvm.x86.mmx.maskmovq( x86_mmx %tmp, x86_mmx %tmp2, i8* null ) nounwind 29 declare void @llvm.x86.mmx.maskmovq(x86_mmx, x86_mmx, i8*) nounwind
|
vec_insert-5.ll | 8 define void @t1(i32 %a, x86_mmx* %P) nounwind { 12 %tmp23 = bitcast <2 x i32> %tmp22 to x86_mmx 13 store x86_mmx %tmp23, x86_mmx* %P
|
/external/llvm/test/Assembler/ |
x86mmx.ll | 2 ; Basic smoke test for x86_mmx type. 4 ; CHECK: define x86_mmx @sh16 5 define x86_mmx @sh16(x86_mmx %A) { 6 ; CHECK: ret x86_mmx %A 7 ret x86_mmx %A
|
/external/clang/test/CodeGen/ |
mmx-shift-with-immediate.c | 5 // CHECK: x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx %{{.*}}, i32 {{.*}}) 7 // CHECK: x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx %{{.*}}, i32 {{.*}}) 9 // CHECK: x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx %{{.*}}, i32 {{.*}}) 12 // CHECK: x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx %{{.*}}, i32 {{.*}}) 14 // CHECK: x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx %{{.*}}, i32 {{.*}} [all...] |
mmx-inline-asm.c | 5 // CHECK: { x86_mmx, x86_mmx, x86_mmx, x86_mmx, x86_mmx, x86_mmx, x86_mmx }
|