HomeSort by relevance Sort by last modified time
    Searched full:add4 (Results 76 - 100 of 124) sorted by null

1 2 34 5

  /external/llvm/test/CodeGen/X86/
combine-multiplies.ll 49 %add4 = add nsw i32 %lll, 25
50 %arrayidx6 = getelementptr inbounds [100 x i32], [100 x i32]* %a, i32 %add4, i32 20
sse3-avx-addsub-2.ll 235 %add4 = fadd float %15, %16
241 %vecinsert6 = insertelement <8 x float> %vecinsert5, float %add4, i32 7
  /external/libvpx/libvpx/vpx_dsp/mips/
idct16x16_msa.c 36 ADD4(reg2, loc1, reg14, loc0, reg6, loc3, reg10, loc2, reg2, reg14, reg6,
282 ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3);
283 ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7);
fwd_txfm_msa.h 24 ADD4(in4_m, in5_m, in6_m, in7_m, in0_m, in2_m, in4_m, in6_m, \
  /external/llvm/test/Transforms/SLPVectorizer/X86/
phi.ll 80 %R.017 = phi double [ %0, %entry ], [ %add4, %for.body ]
85 %add4 = fadd double %mul, 4.000000e+00
94 store double %add4, double* %arrayidx7, align 8
cse.ll 29 %add4 = fadd double %mul3, 6.000000e+00
31 store double %add4, double* %arrayidx5, align 8
addsub.ll 37 %add4 = add nsw i32 %6, %7
38 %sub = sub nsw i32 %add3, %add4
82 %add4 = add nsw i32 %add2, %add3
83 store i32 %add4, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i32 0, i64 1), align 4
  /external/llvm/test/CodeGen/Mips/
tailcall.ll 108 %add4 = add nsw i32 %add3, %a5
109 %add5 = add nsw i32 %add4, %a6
return-vector.ll 27 %add4 = add i32 %v6, %v7
29 %add6 = add i32 %add3, %add4
  /external/llvm/test/Transforms/LoopVectorize/
if-conversion.ll 94 %add4 = add i32 %add, %0
98 %sum.1 = phi i32 [ %add4, %if.then ], [ %sum.011, %for.body ]
interleaved-accesses.ll 401 %add4 = add i32 %tmp2, %i.013
402 store i32 %add4, i32* %incdec.ptr1, align 4
  /external/libvpx/libvpx/vp8/encoder/mips/msa/
dct_msa.c 195 ADD4(in0_w, 3, in1_w, 3, in2_w, 3, in3_w, 3, in0_w, in1_w, in2_w, in3_w);
  /external/llvm/test/CodeGen/AMDGPU/
salu-to-valu.ll 313 %add4 = add i32 %add3, %elt5
314 %add5 = add i32 %add4, %elt6
385 %add4 = add i32 %add3, %elt5
386 %add5 = add i32 %add4, %elt6
  /external/llvm/test/CodeGen/AArch64/
arm64-abi.ll 31 %add4 = add i64 %add2, %a8
32 %add5 = add i64 %add4, %conv8
arm64-narrow-ldst-merge.ll 58 %add4 = sub nuw nsw i16 %l1, %l0
59 %add9 = udiv i16 %add4, %l2
  /external/llvm/test/Analysis/DependenceAnalysis/
SymbolicSIV.ll 83 %add4 = add i64 %mul2, %mul3
84 %arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %add4
  /external/llvm/test/CodeGen/ARM/
coalesce-subregs.ll 169 %add4 = fadd float %vecext, %1
178 %a.0 = phi float [ %add4, %if.then ], [ %vecext, %entry ]
ehabi.ll 379 %add4 = add nsw i32 %add3, %g
380 %add5 = add nsw i32 %add4, %h
debug-frame.ll 426 %add4 = add nsw i32 %add3, %g
427 %add5 = add nsw i32 %add4, %h
  /external/llvm/test/Transforms/LoopStrengthReduce/X86/
ivchain-X86.ll 143 %add4 = add i32 %add, %5
144 %add5 = add i32 %add4, %7
  /toolchain/binutils/binutils-2.25/gas/testsuite/gas/tic6x/
insns-bad-1.s 189 add4 .S1 a1,a2,a3
190 add4 .L1 a1,a2,a3,a4
191 add4 .L1 b1,a1,a2
192 add4 .L2X b1,b2,b3
  /external/llvm/test/Transforms/LoopIdiom/
basic.ll 374 %add4 = add nsw i32 %tmp5, 5
375 %idxprom5 = sext i32 %add4 to i64
  /external/llvm/test/Transforms/LoopStrengthReduce/ARM/
ivchain-ARM.ll 120 %add4 = add i32 %add, %5
121 %add5 = add i32 %add4, %7
  /external/llvm/test/CodeGen/PowerPC/
BreakableToken-reduced.ll 246 %add4 = add i64 %add, %1
247 %conv5 = trunc i64 %add4 to i32
  /external/llvm/test/CodeGen/SystemZ/
asm-18.ll 626 %add4 = add i32 %res3, 128
628 "=r,h,0"(i32 %res3, i32 %add4)

Completed in 858 milliseconds

1 2 34 5