HomeSort by relevance Sort by last modified time
    Searched full:val6 (Results 1 - 25 of 70) sorted by null

1 2 3

  /external/llvm/test/CodeGen/AArch64/
floatdp_2source.ll 25 %val6 = fmul float %val1, %val2
26 %val7 = fsub float -0.0, %val6
53 %val6 = fmul double %val1, %val2
54 %val7 = fsub double -0.0, %val6
regress-w29-reserved-with-fp.ll 15 %val6 = load volatile i32, i32* @var
30 store volatile i32 %val6, i32* @var
addsub-shifted.ll 48 %val6 = add i64 %shift6, %lhs64
49 store volatile i64 %val6, i64* @var64
110 %val6 = add i64 %shift6, %lhs64
111 store volatile i64 %val6, i64* @var64
169 %val6 = add i64 %shift6, %lhs64
170 store volatile i64 %val6, i64* @var64
289 %val6 = sub i64 0, %shift6
290 %tst6 = icmp ne i64 %lhs64, %val6
callee-save.ll 20 %val6 = load volatile float, float* @var
53 store volatile float %val6, float* @var
  /external/llvm/test/CodeGen/SystemZ/
spill-01.ll 53 %val6 = load i32 , i32 *%ptr6
63 store i32 %val6, i32 *%ptr6
91 %val6 = load i32 , i32 *%ptr6
103 store i32 %val6, i32 *%ptr6
133 %val6 = load i64 , i64 *%ptr6
145 store i64 %val6, i64 *%ptr6
179 %val6 = load float , float *%ptr6
192 store float %val6, float *%ptr6
223 %val6 = load double , double *%ptr6
236 store double %val6, double *%ptr
    [all...]
int-add-11.ll 143 %val6 = load volatile i32 , i32 *%ptr
164 %add6 = add i32 %val6, 127
183 %new6 = phi i32 [ %val6, %entry ], [ %add6, %add ]
226 %val6 = load volatile i32 , i32 *%ptr
247 %add6 = add i32 %val6, -128
266 %new6 = phi i32 [ %val6, %entry ], [ %add6, %add ]
int-add-12.ll 142 %val6 = load volatile i64 , i64 *%ptr
163 %add6 = add i64 %val6, 127
182 %new6 = phi i64 [ %val6, %entry ], [ %add6, %add ]
225 %val6 = load volatile i64 , i64 *%ptr
246 %add6 = add i64 %val6, -128
265 %new6 = phi i64 [ %val6, %entry ], [ %add6, %add ]
fp-conv-02.ll 87 %val6 = load volatile float , float *%ptr2
105 %ext6 = fpext float %val6 to double
123 store volatile float %val6, float *%ptr2
fp-conv-03.ll 103 %val6 = load volatile float , float *%ptr2
121 %ext6 = fpext float %val6 to fp128
139 store volatile float %val6, float *%ptr2
fp-conv-04.ll 103 %val6 = load volatile double , double *%ptr2
121 %ext6 = fpext double %val6 to fp128
139 store volatile double %val6, double *%ptr2
fp-sqrt-01.ll 88 %val6 = load volatile float , float *%ptr
106 %sqrt6 = call float @llvm.sqrt.f32(float %val6)
124 store volatile float %val6, float *%ptr
fp-sqrt-02.ll 90 %val6 = load volatile double , double *%ptr
108 %sqrt6 = call double @llvm.sqrt.f64(double %val6)
126 store volatile double %val6, double *%ptr
and-03.ll 121 %val6 = load i64 , i64 *%ptr6
134 %and6 = and i64 %and5, %val6
fp-add-01.ll 98 %val6 = load float , float *%ptr6
112 %add6 = fadd float %add5, %val6
fp-add-02.ll 99 %val6 = load double , double *%ptr6
113 %add6 = fadd double %add5, %val6
fp-div-01.ll 98 %val6 = load float , float *%ptr6
112 %div6 = fdiv float %div5, %val6
fp-div-02.ll 100 %val6 = load double , double *%ptr6
114 %div6 = fdiv double %div5, %val6
fp-mul-01.ll 98 %val6 = load float , float *%ptr6
112 %mul6 = fmul float %mul5, %val6
fp-mul-03.ll 100 %val6 = load double , double *%ptr6
114 %mul6 = fmul double %mul5, %val6
fp-sub-01.ll 98 %val6 = load float , float *%ptr6
112 %sub6 = fsub float %sub5, %val6
  /external/llvm/test/CodeGen/ARM/
gpr-paired-spill-thumbinst.ll 14 %val6 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
27 store volatile i64 %val6, i64* %addr
gpr-paired-spill.ll 11 %val6 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
41 store volatile i64 %val6, i64* %addr
inlineasm-64bit.ll 13 define void @multi_writes(i64* %p, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind {
37 tail call void asm sideeffect " strexd $1, ${1:H}, [$0]\0A strexd $2, ${2:H}, [$0]\0A strexd $3, ${3:H}, [$0]\0A strexd $4, ${4:H}, [$0]\0A strexd $5, ${5:H}, [$0]\0A strexd $6, ${6:H}, [$0]\0A", "r,r,r,r,r,r,r"(i64* %p, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind
39 tail call void asm sideeffect " strexd $1, ${1:H}, [$0]\0A strexd $2, ${2:H}, [$0]\0A strexd $3, ${3:H}, [$0]\0A strexd $4, ${4:H}, [$0]\0A strexd $5, ${5:H}, [$0]\0A strexd $6, ${6:H}, [$0]\0A", "r,r,r,r,r,r,r"(i64* %incdec.ptr, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind
40 tail call void asm sideeffect " strexd $1, ${1:H}, [$0]\0A strexd $2, ${2:H}, [$0]\0A strexd $3, ${3:H}, [$0]\0A strexd $4, ${4:H}, [$0]\0A strexd $5, ${5:H}, [$0]\0A strexd $6, ${6:H}, [$0]\0A", "r,r,r,r,r,r,r"(i64* %incdec.ptr, i64 %val1, i64 %val2, i64 %val3, i64 %val4, i64 %val5, i64 %val6) nounwind
  /external/llvm/test/CodeGen/Mips/
nacl-reserved-regs.ll 13 %val6 = load volatile i32, i32* @var
29 store volatile i32 %val6, i32* @var
  /external/llvm/test/CodeGen/AMDGPU/
ds_read2_offset_order.ll 42 %val6 = load float, float addrspace(3)* %ptr6
43 %add6 = fadd float %add5, %val6

Completed in 499 milliseconds

1 2 3