HomeSort by relevance Sort by last modified time
    Searched full:val4 (Results 1 - 25 of 97) sorted by null

1 2 3 4

  /external/llvm/test/CodeGen/AArch64/
compare-branch.ll 27 %val4 = load volatile i64, i64* @var64
28 %tst4 = icmp ne i64 %val4, 0
33 store volatile i64 %val4, i64* @var64
floatdp_2source.ll 16 %val4 = fdiv float %val3, %val1
19 %val5 = fsub float %val4, %val2
44 %val4 = fdiv double %val3, %val1
47 %val5 = fsub double %val4, %val2
regress-w29-reserved-with-fp.ll 13 %val4 = load volatile i32, i32* @var
28 store volatile i32 %val4, i32* @var
addsub-shifted.ll 30 %val4 = sub i32 %shift4, %lhs32
31 store volatile i32 %val4, i32* @var32
95 %val4 = sub i32 %shift4, %lhs32
96 store volatile i32 %val4, i32* @var32
154 %val4 = sub i32 %shift4, %lhs32
155 store volatile i32 %val4, i32* @var32
273 %val4 = sub i64 0, %shift4
274 %tst4 = icmp slt i64 %lhs64, %val4
callee-save.ll 18 %val4 = load volatile float, float* @var
51 store volatile float %val4, float* @var
cond-sel.ll 89 %val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
90 store volatile i64 %val4, i64* @var64
129 %val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
130 store volatile i64 %val4, i64* @var64
169 %val4 = select i1 %tst4, i64 %lhs64, i64 %inc4
170 store volatile i64 %val4, i64* @var64
  /external/llvm/test/CodeGen/SystemZ/
vec-cmp-01.ll 112 <16 x i8> %val3, <16 x i8> %val4) {
118 %ret = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
124 <16 x i8> %val3, <16 x i8> %val4) {
130 %ret = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
136 <16 x i8> %val3, <16 x i8> %val4) {
142 %ret = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
148 <16 x i8> %val3, <16 x i8> %val4) {
154 %ret = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
160 <16 x i8> %val3, <16 x i8> %val4) {
166 %ret = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
    [all...]
vec-cmp-02.ll 112 <8 x i16> %val3, <8 x i16> %val4) {
118 %ret = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
124 <8 x i16> %val3, <8 x i16> %val4) {
130 %ret = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
136 <8 x i16> %val3, <8 x i16> %val4) {
142 %ret = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
148 <8 x i16> %val3, <8 x i16> %val4) {
154 %ret = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
160 <8 x i16> %val3, <8 x i16> %val4) {
166 %ret = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
    [all...]
vec-cmp-03.ll 112 <4 x i32> %val3, <4 x i32> %val4) {
118 %ret = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
124 <4 x i32> %val3, <4 x i32> %val4) {
130 %ret = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
136 <4 x i32> %val3, <4 x i32> %val4) {
142 %ret = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
148 <4 x i32> %val3, <4 x i32> %val4) {
154 %ret = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
160 <4 x i32> %val3, <4 x i32> %val4) {
166 %ret = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
    [all...]
vec-cmp-04.ll 112 <2 x i64> %val3, <2 x i64> %val4) {
118 %ret = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
124 <2 x i64> %val3, <2 x i64> %val4) {
130 %ret = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
136 <2 x i64> %val3, <2 x i64> %val4) {
142 %ret = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
148 <2 x i64> %val3, <2 x i64> %val4) {
154 %ret = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
160 <2 x i64> %val3, <2 x i64> %val4) {
166 %ret = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
    [all...]
vec-cmp-06.ll 165 <2 x double> %val3, <2 x double> %val4) {
171 %ret = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
177 <2 x double> %val3, <2 x double> %val4) {
185 %ret = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
191 <2 x double> %val3, <2 x double> %val4) {
197 %ret = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
203 <2 x double> %val3, <2 x double> %val4) {
209 %ret = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
215 <2 x double> %val3, <2 x double> %val4) {
221 %ret = select <2 x i1> %cmp, <2 x double> %val3, <2 x double> %val4
    [all...]
spill-01.ll 51 %val4 = load i32 , i32 *%ptr4
61 store i32 %val4, i32 *%ptr4
89 %val4 = load i32 , i32 *%ptr4
101 store i32 %val4, i32 *%ptr4
131 %val4 = load i64 , i64 *%ptr4
143 store i64 %val4, i64 *%ptr4
177 %val4 = load float , float *%ptr4
190 store float %val4, float *%ptr4
221 %val4 = load double , double *%ptr4
234 store double %val4, double *%ptr
    [all...]
vec-cmp-05.ll 308 <4 x float> %val3, <4 x float> %val4) {
314 %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
320 <4 x float> %val3, <4 x float> %val4) {
326 %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
332 <4 x float> %val3, <4 x float> %val4) {
338 %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
344 <4 x float> %val3, <4 x float> %val4) {
350 %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
356 <4 x float> %val3, <4 x float> %val4) {
362 %ret = select <4 x i1> %cmp, <4 x float> %val3, <4 x float> %val4
    [all...]
int-add-11.ll 141 %val4 = load volatile i32 , i32 *%ptr
162 %add4 = add i32 %val4, 127
181 %new4 = phi i32 [ %val4, %entry ], [ %add4, %add ]
224 %val4 = load volatile i32 , i32 *%ptr
245 %add4 = add i32 %val4, -128
264 %new4 = phi i32 [ %val4, %entry ], [ %add4, %add ]
  /packages/apps/Email/tests/src/com/android/emailcommon/mail/
PackedStringTests.java 41 DELIMITER_ELEMENT + "val4" + DELIMITER_TAG + "tag4";
49 assertEquals("val4", ps.get("tag4"));
79 assertEquals("val4", b.get("tag4"));
90 assertEquals("val4", b.get("tag4"));
100 assertEquals("val4", b2.get("tag4"));
  /external/llvm/test/CodeGen/ARM/
gpr-paired-spill-thumbinst.ll 12 %val4 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
25 store volatile i64 %val4, i64* %addr
vldm-liveness.ll 30 %val4 = load float, float* %off4
36 %vec3 = insertelement <4 x float> %vec2, float %val4, i32 2
gpr-paired-spill.ll 9 %val4 = tail call i64 asm sideeffect "ldrexd $0, ${0:H}, [r0]", "=&r,r"(i64* %addr)
39 store volatile i64 %val4, i64* %addr
  /external/llvm/test/CodeGen/Mips/
nacl-reserved-regs.ll 11 %val4 = load volatile i32, i32* @var
27 store volatile i32 %val4, i32* @var
  /cts/suite/audio_quality/test/
TaskCaseTest.cpp 74 TaskCase::Value val4((int64_t)2);
89 ASSERT_TRUE(mTaskCase->registerValue(V4, val4));
98 ASSERT_TRUE(!mTaskCase->updateValue(V5, val4));
105 ((list->front().second == val3) && (list->back().second == val4)));
  /external/llvm/test/CodeGen/AMDGPU/
ds_read2_offset_order.ll 34 %val4 = load float, float addrspace(3)* %ptr4
35 %add4 = fadd float %add3, %val4
  /external/llvm/test/TableGen/
Dag.td 67 def VAL4 : bar<foo2, somedef2>;
68 // CHECK: def VAL4 {
  /external/llvm/test/CodeGen/X86/
tailcallbyval64.ll 33 declare fastcc i64 @tailcallee(%struct.s* byval %a, i64 %val, i64 %val2, i64 %val3, i64 %val4, i64 %val5)
  /external/deqp/framework/delibs/debase/
deSha1.c 217 deUint8 val4; local
220 val4 = (deUint8)(buffer[charNdx] - '0');
222 val4 = (deUint8)(10 + (buffer[charNdx] - 'a'));
224 val4 = (deUint8)(10 + (buffer[charNdx] - 'A'));
228 hash->hash[charNdx / 8] |= ((deUint32)val4) << (4 * (8u - 1u - (charNdx % 8u)));
  /external/elfutils/tests/
cleanup-13.c 20 #define VAL4(x) ((x)>>24)&0xff,((x)>>16)&0xff,((x)>>8)&0xff,(x)&0xff
24 #define VAL4(x) (x)&0xff,((x)>>8)&0xff,((x)>>16)&0xff,((x)>>24)&0xff
31 #define OP_const4u(x) 0x0c,VAL4(x),
32 #define OP_const4s(x) 0x0d,VAL4(x),
173 #define OP_call4(x) 0x99,VAL4(x),

Completed in 241 milliseconds

1 2 3 4