/external/llvm/test/CodeGen/X86/ |
2012-08-07-CmpISelBug.ll | 17 %tmp57 = xor i32 %tmp56, 1601159181 18 %tmp58 = xor i32 %arg5, 1601159181 22 %tmp103 = xor i32 %tmp56, 13 30 %tmp143 = xor i8 %tmp142, 81
|
fastisel-gep-promote-before-add.ll | 28 %xor = xor i8 %0, -128 ; %0 ^ 0x80 29 %add = add i8 %xor, -127 ; %xor + 0x81
|
negative-sin.ll | 2 ; CHECK-NOT: {{addsd|subsd|xor}}
|
/external/llvm/test/MC/Hexagon/ |
inst_xor64.ll | 6 %1 = xor i64 %a, %b
|
/external/llvm/test/Transforms/InstCombine/ |
2007-01-18-VectorInfLoop.ll | 4 %B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 >
|
2012-04-30-SRem.ll | 5 %y = xor i32 %x, 3
|
sub-xor.ll | 10 ; CHECK-NEXT: xor i32 %and, 63 23 ; CHECK-NEXT: xor i32 %count, 31 29 %sub = xor i32 31, %and 40 %sub = xor i32 %x, 2147483648
|
bit-tracking.ll | 6 ; Reduce down to a single XOR 9 %tmp.5 = xor i32 %ELIMinc, 1 ; <i32> [#uses=1] 16 ; incrementing a single-bit bitfield, which should become just an xor.
|
/external/swiftshader/third_party/LLVM/test/CodeGen/Alpha/ |
bic.ll | 6 %tmp.1 = xor i64 %x, -1 ; <i64> [#uses=1]
|
ornot.ll | 6 %tmp.1 = xor i64 %x, -1 ; <i64> [#uses=1]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/MSP430/ |
Inst8mm.ll | 46 define void @xor() nounwind { 47 ; CHECK: xor: 48 ; CHECK: xor.b &bar, &foo 51 %3 = xor i8 %2, %1
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
2009-07-09-ExtractBoolFromVector.ll | 5 %1 = xor <4 x i1> zeroinitializer, < i1 true, i1 true, i1 true, i1 true >
|
negative-sin.ll | 2 ; CHECK-NOT: {{addsd|subsd|xor}}
|
x86-shifts.ll | 13 %K = xor <4 x i32> %B, %C 24 %K = xor <4 x i32> %B, %C 35 %K = xor <4 x i32> %B, %C 46 %K = xor <2 x i64> %B, %C 57 %K = xor <2 x i64> %B, %C 69 %K = xor <8 x i16> %B, %C 80 %K = xor <8 x i16> %B, %C 91 %K = xor <8 x i16> %B, %C 105 %K = xor <8 x i16> %B, %C 117 %K = xor <2 x i64> %B, % [all...] |
/external/swiftshader/third_party/LLVM/test/Transforms/InstCombine/ |
2007-01-18-VectorInfLoop.ll | 4 %B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 >
|
zext-fold.ll | 8 %tmp = xor i8 %tmp34, 1 ; <i8> [#uses=1]
|
/device/linaro/bootloader/edk2/CryptoPkg/Library/IntrinsicLib/Ia32/ |
MathLShiftS64.c | 44 xor eax, eax
50 xor eax,eax
51 xor edx,edx
|
MathRShiftU64.c | 44 xor edx, edx
53 xor eax, eax
54 xor edx, edx
|
/device/linaro/bootloader/edk2/StdLib/LibC/CRT/Ia32/ |
llshl.c | 44 xor eax, eax
50 xor eax,eax
51 xor edx,edx
|
ullshr.c | 44 xor edx, edx
53 xor eax, eax
54 xor edx, edx
|
/external/llvm/test/CodeGen/SystemZ/ |
atomicrmw-xor-01.ll | 7 ; Check XOR of a variable. 44 %res = atomicrmw xor i8 *%src, i8 %b seq_cst 48 ; Check the minimum signed value. We XOR the rotated word with 0x80000000. 73 %res = atomicrmw xor i8 *%src, i8 -128 seq_cst 77 ; Check XORs of -1. We XOR the rotated word with 0xff000000. 87 %res = atomicrmw xor i8 *%src, i8 -1 seq_cst 91 ; Check XORs of 1. We XOR the rotated word with 0x01000000. 101 %res = atomicrmw xor i8 *%src, i8 1 seq_cst 105 ; Check the maximum signed value. We XOR the rotated word with 0x7f000000. 115 %res = atomicrmw xor i8 *%src, i8 127 seq_cs [all...] |
atomicrmw-xor-02.ll | 7 ; Check XOR of a variable. 44 %res = atomicrmw xor i16 *%src, i16 %b seq_cst 48 ; Check the minimum signed value. We XOR the rotated word with 0x80000000. 73 %res = atomicrmw xor i16 *%src, i16 -32768 seq_cst 77 ; Check XORs of -1. We XOR the rotated word with 0xffff0000. 87 %res = atomicrmw xor i16 *%src, i16 -1 seq_cst 91 ; Check XORs of 1. We XOR the rotated word with 0x00010000. 101 %res = atomicrmw xor i16 *%src, i16 1 seq_cst 105 ; Check the maximum signed value. We XOR the rotated word with 0x7fff0000. 115 %res = atomicrmw xor i16 *%src, i16 32767 seq_cs [all...] |
vec-or-02.ll | 10 %not = xor <16 x i8> %val3, <i8 -1, i8 -1, i8 -1, i8 -1, 20 ; ...and again with the XOR applied to the other operand of the AND. 25 %not = xor <16 x i8> %val3, <i8 -1, i8 -1, i8 -1, i8 -1, 40 %not = xor <8 x i16> %val3, <i16 -1, i16 -1, i16 -1, i16 -1, 48 ; ...and again with the XOR applied to the other operand of the AND. 53 %not = xor <8 x i16> %val3, <i16 -1, i16 -1, i16 -1, i16 -1, 66 %not = xor <4 x i32> %val3, <i32 -1, i32 -1, i32 -1, i32 -1> 73 ; ...and again with the XOR applied to the other operand of the AND. 78 %not = xor <4 x i32> %val3, <i32 -1, i32 -1, i32 -1, i32 -1> 90 %not = xor <2 x i64> %val3, <i64 -1, i64 -1 [all...] |
/external/llvm/test/MC/ELF/ |
got-relaxed-rex.s | 14 xor xor@GOTPCREL(%rip), %rax 27 // CHECK-NEXT: R_X86_64_REX_GOTPCRELX xor
|
/external/llvm/test/Transforms/Reassociate/ |
otherops.ll | 1 ; Reassociation should apply to Add, Mul, And, Or, & Xor 39 %tmp1 = xor i32 12, %arg 40 %tmp2 = xor i32 %tmp1, 12
|