HomeSort by relevance Sort by last modified time
    Searched full:mul3 (Results 1 - 25 of 38) sorted by null

1 2

  /external/llvm/test/Transforms/Reassociate/
mixed-fast-nonfast-fp.ll 4 ; CHECK: %mul3 = fmul float %a, %b
8 ; CHECK-NEXT: fadd fast float %tmp2, %mul3
12 %mul3 = fmul float %a, %b
14 %add1 = fadd fast float %mul1, %mul3
canonicalize-neg-const.ll 16 %mul3 = fmul double %add, %add2
17 ret double %mul3
32 %mul3 = fmul double %add, %add2
33 ret double %mul3
48 %mul3 = fmul double %add, %add2
49 ret double %mul3
  /external/llvm/test/Transforms/LICM/
extra-copies.ll 14 %mul3 = add nsw i32 %add2, %mul
20 %a9.0.lcssa = phi i32 [ %mul3, %for.body ]
  /external/llvm/test/Transforms/SimplifyCFG/AArch64/
prefer-fma.ll 55 ; CHECK: %mul3 = fmul fast double %5, 3.000000e+00
56 ; CHECK-NEXT: %neg = fsub fast double 0.000000e+00, %mul3
58 %mul3 = fmul fast double %6, 3.0000000e+00
59 %neg = fsub fast double 0.0000000e+00, %mul3
  /external/llvm/test/Analysis/ScalarEvolution/
2012-05-29-MulAddRec.ll 5 ; outer loop. While reducing the recurrence at %mul3, unsigned integer overflow
34 %mul3 = phi i8 [ undef, %entry ], [ %mul.lcssa, %for.cond.loopexit ]
40 %mul45 = phi i8 [ %mul3, %for.cond ], [ %mul, %for.body ]
  /external/llvm/test/Transforms/LoopVectorize/PowerPC/
agg-interleave-a2.ll 22 %mul3 = fmul double %0, %mul
27 %add = fadd double %mul3, %mul9
  /external/llvm/test/Transforms/BBVectorize/X86/
loop1.ll 20 %mul3 = fmul double %0, %1
21 %add = fadd double %mul, %mul3
47 ; CHECK-UNRL: %mul3 = fmul <2 x double> %2, %3
48 ; CHECK-UNRL: %add = fadd <2 x double> %mul, %mul3
  /external/llvm/test/Transforms/BBVectorize/
loop1.ll 20 %mul3 = fmul double %0, %1
21 %add = fadd double %mul, %mul3
43 ; CHECK: %mul3 = fmul double %0, %1
44 ; CHECK: %add = fadd double %mul, %mul3
73 ; CHECK-UNRL: %mul3 = fmul <2 x double> %2, %3
74 ; CHECK-UNRL: %add = fadd <2 x double> %mul, %mul3
  /external/llvm/test/CodeGen/X86/
fmul-combines.ll 110 %mul3 = fmul fast <4 x float> %a, %mul2
111 ret <4 x float> %mul3
126 %mul3 = fmul fast <4 x float> %a, %mul2
127 ret <4 x float> %mul3
  /external/llvm/test/Transforms/SLPVectorizer/X86/
external_user.ll 48 %mul3 = fmul double %add2, 4.000000e+00
50 %add5 = fadd double %mul3, 4.000000e+00
59 ret double %mul3
horizontal.ll 104 %mul3 = fmul float %0, %5
109 %add8 = fadd fast float %mul3, %mul7
186 %mul3 = fmul fast float %0, %10
191 %add8 = fadd fast float %mul3, %mul7
278 %mul3 = fmul fast float %0, %5
279 %add = fadd fast float %sum.042, %mul3
342 %mul3 = fmul fast float %1, %2
348 %add8 = fadd fast float %mul3, %mul7
403 %mul3 = fmul fast double %0, %3
408 %add8 = fadd fast double %mul3, %mul
    [all...]
crash_smallpt.ll 75 %mul3.i.i792 = fmul double undef, undef
76 %mul.i764 = fmul double undef, %mul3.i.i792
80 %mul9.i772 = fmul double undef, %mul3.i.i792
  /external/llvm/test/CodeGen/AMDGPU/
wrong-transalu-pos-fix.ll 13 %mul3 = mul i32 %mul, %z.i17
34 store i32 %mul3, i32 addrspace(1)* %arrayidx, align 4
  /external/llvm/test/CodeGen/ARM/
2011-11-14-EarlyClobber.ll 39 %mul3 = fmul double %mul, %sub
42 %add = fadd double %mul3, %mul5
  /external/llvm/test/CodeGen/SystemZ/
fp-mul-01.ll 109 %mul3 = fmul float %mul2, %val3
110 %mul4 = fmul float %mul3, %val4
fp-mul-03.ll 111 %mul3 = fmul double %mul2, %val3
112 %mul4 = fmul double %mul3, %val4
int-mul-04.ll 130 %mul3 = mul i64 %mul2, %val3
131 %mul4 = mul i64 %mul3, %val4
  /external/llvm/test/Transforms/LoopVectorize/AArch64/
gather-cost.ll 37 %mul3 = fmul fast float %0, %1
40 %mul5 = fmul fast float %mul3, %2
  /external/llvm/test/Transforms/LoopVectorize/ARM/
gather-cost.ll 40 %mul3 = fmul fast float %0, %1
43 %mul5 = fmul fast float %mul3, %2
  /external/llvm/test/Transforms/LoopVectorize/
version-mem-access.ll 43 %mul3 = mul nsw i64 %indvars.iv, %AStride
44 %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %mul3
  /external/llvm/test/Transforms/StraightLineStrengthReduce/
slsr-mul.ll 83 %mul3 = mul i32 %a1, %b1
88 call void @foo(i32 %mul3)
  /external/vulkan-validation-layers/libs/glm/detail/
intrinsic_geometric.inl 136 __m128 mul3 = _mm_mul_ps(eta, dot0);
137 __m128 add0 = _mm_add_ps(mul3, sqt0);
  /external/vulkan-validation-layers/libs/glm/gtx/
simd_quat.inl 126 __m128 mul3 = _mm_mul_ps(q1.Data, q2.Data);
132 __m128 add3 = _mm_dp_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f), 0xff);
146 mul3 = _mm_mul_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f));
147 __m128 add3 = _mm_add_ps(mul3, _mm_movehl_ps(mul3, mul3));
  /external/libvpx/libvpx/vp8/common/mips/msa/
idct_msa.c 226 v8i16 mul0, mul1, mul2, mul3, dequant_in0, dequant_in1; local
236 mul0, mul1, mul2, mul3);
237 PCKEV_D2_SH(mul2, mul0, mul3, mul1, in0, in2);
238 PCKOD_D2_SH(mul2, mul0, mul3, mul1, in1, in3);
postproc_msa.c 591 v4i32 mul0, mul1, mul2, mul3; local
625 mul0, mul1, mul2, mul3);
641 sum_sq3[0] = sum_sq2[3] + mul3[0];
644 sum_sq3[cnt + 1] = sum_sq3[cnt] + mul3[cnt + 1];
710 v4i32 mul3 = { 0 }; local
    [all...]

Completed in 1258 milliseconds

1 2