/art/compiler/utils/ |
assembler_thumb_test.cc | 341 __ mov(R3, ShifterOperand(R4, LSR, 5)); 348 __ mov(R8, ShifterOperand(R4, LSR, 5)); [all...] |
/external/v8/src/arm64/ |
debug-arm64.cc | 166 __ Lsr(scratch, reg, 32);
|
regexp-macro-assembler-arm64.cc | 298 __ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits); 427 __ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits); 827 __ Add(input_length, start_offset(), Operand(w10, LSR, 1)); 837 __ Lsr(capture_end.X(), capture_start.X(), kWRegSizeInBits); [all...] |
macro-assembler-arm64-inl.h | 906 void MacroAssembler::Lsr(const Register& rd, 911 lsr(rd, rn, shift); 915 void MacroAssembler::Lsr(const Register& rd, [all...] |
macro-assembler-arm64.h | 442 inline void Lsr(const Register& rd, const Register& rn, unsigned shift); 443 inline void Lsr(const Register& rd, const Register& rn, const Register& rm); [all...] |
lithium-codegen-arm64.cc | [all...] |
macro-assembler-arm64.cc | 465 return Operand(dst, LSR, shift_high); [all...] |
code-stubs-arm64.cc | [all...] |
full-codegen-arm64.cc | [all...] |
/art/compiler/utils/arm/ |
assembler_arm32.h | 207 void Lsr(Register rd, Register rm, uint32_t shift_imm, bool setcc = false, 218 void Lsr(Register rd, Register rm, Register rn, bool setcc = false,
|
assembler_thumb2.h | 246 void Lsr(Register rd, Register rm, uint32_t shift_imm, bool setcc = false, 257 void Lsr(Register rd, Register rm, Register rn, bool setcc = false,
|
assembler_arm.h | 624 virtual void Lsr(Register rd, Register rm, uint32_t shift_imm, bool setcc = false, 635 virtual void Lsr(Register rd, Register rm, Register rn, bool setcc = false,
|
assembler_arm32.cc | [all...] |
assembler_thumb2.cc | 727 // However, there is no actual shift available, neither for ADD nor for MOV (ASR/LSR/LSL/ROR). 918 case LSR: thumb_opcode = 1U /* 0b01 */; break; [all...] |
/external/v8/src/compiler/arm64/ |
code-generator-arm64.cc | 283 ASSEMBLE_SHIFT(Lsr, 64); 286 ASSEMBLE_SHIFT(Lsr, 32);
|
/external/llvm/lib/Transforms/InstCombine/ |
InstCombineInternal.h | 522 Value *SimplifyShrShlDemandedBits(Instruction *Lsr, Instruction *Sftl,
|
/art/compiler/optimizing/ |
code_generator_arm.cc | [all...] |
code_generator_arm64.cc | 613 __ Lsr(temp, object, gc::accounting::CardTable::kCardShift); [all...] |
/external/v8/test/cctest/ |
test-assembler-arm64.cc | 299 __ Mvn(w4, Operand(w0, LSR, 3)); 300 __ Mvn(x5, Operand(x1, LSR, 4)); 372 __ Mov(w15, Operand(w11, LSR, 3)); 373 __ Mov(x18, Operand(x12, LSR, 4)); 529 __ Orr(x5, x0, Operand(x1, LSR, 4)); 626 __ Orn(x5, x0, Operand(x1, LSR, 1)); 695 __ And(x5, x0, Operand(x1, LSR, 1)); 770 __ Ands(w0, w0, Operand(w1, LSR, 4)); 824 __ Bic(x5, x0, Operand(x1, LSR, 1)); 910 __ Bics(w0, w0, Operand(w0, LSR, 1)) [all...] |
/external/vixl/src/vixl/a64/ |
macro-assembler-a64.h | [all...] |
/external/vixl/test/ |
test-assembler-a64.cc | 292 __ Mvn(w4, Operand(w0, LSR, 3)); 293 __ Mvn(x5, Operand(x1, LSR, 4)); 465 __ Mov(w15, Operand(w11, LSR, 3)); 466 __ Mov(x18, Operand(x12, LSR, 4)); 524 __ Orr(x5, x0, Operand(x1, LSR, 4)); 618 __ Orn(x5, x0, Operand(x1, LSR, 1)); 685 __ And(x5, x0, Operand(x1, LSR, 1)); 758 __ Ands(w0, w0, Operand(w1, LSR, 4)); 811 __ Bic(x5, x0, Operand(x1, LSR, 1)); 895 __ Bics(w0, w0, Operand(w0, LSR, 1)) [all...] |