HomeSort by relevance Sort by last modified time
    Searched refs:Lsl (Results 1 - 18 of 18) sorted by null

  /art/compiler/utils/
assembler_thumb_test.cc 411 __ mov(R3, ShifterOperand(R4, LSL, 4));
418 __ mov(R8, ShifterOperand(R4, LSL, 4));
    [all...]
  /art/compiler/utils/arm/
assembler_arm.h 199 am_(am), is_immed_offset_(true), shift_(LSL) {
203 am_(am), is_immed_offset_(false), shift_(LSL) {
216 am_(Offset), is_immed_offset_(false), shift_(LSL) {
583 virtual void Lsl(Register rd, Register rm, uint32_t shift_imm, bool setcc = false,
594 virtual void Lsl(Register rd, Register rm, Register rn, bool setcc = false,
assembler_arm32.h 200 void Lsl(Register rd, Register rm, uint32_t shift_imm, bool setcc = false,
211 void Lsl(Register rd, Register rm, Register rn, bool setcc = false,
assembler_thumb2.h 231 void Lsl(Register rd, Register rm, uint32_t shift_imm, bool setcc = false,
242 void Lsl(Register rd, Register rm, Register rn, bool setcc = false,
assembler_arm32.cc     [all...]
assembler_thumb2.cc 842 case LSL: thumb_opcode = 0b00; break;
1097 case LSL: opcode = 0b00; break;
1117 case LSL: opcode = 0b00; break;
    [all...]
  /external/chromium_org/v8/src/arm64/
builtins-arm64.cc 448 Operand(prealloc_fields, LSL, kPointerSizeLog2));
452 __ Add(obj_end, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
466 __ Add(first_prop, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
599 __ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2));
727 __ Add(x10, argv, Operand(argc, LSL, kPointerSizeLog2));
    [all...]
macro-assembler-arm64-inl.h 892 void MacroAssembler::Lsl(const Register& rd,
897 lsl(rd, rn, shift);
901 void MacroAssembler::Lsl(const Register& rd,
    [all...]
macro-assembler-arm64.h 401 inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
402 inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
    [all...]
codegen-arm64.cc 165 __ Lsl(array_size, length, kDoubleSizeLog2);
195 __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
276 __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
294 __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
605 __ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeLog2));
607 __ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
code-stubs-arm64.cc 600 __ Lsl(result, mantissa, exponent);
    [all...]
ic-arm64.cc 269 __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
366 __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
372 __ Lsl(scratch1, scratch1, kPointerSizeLog2);
405 return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
687 __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
720 __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
735 __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
744 __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
    [all...]
lithium-codegen-arm64.cc     [all...]
macro-assembler-arm64.cc     [all...]
full-codegen-arm64.cc     [all...]
  /external/vixl/src/a64/
macro-assembler-a64.h 772 void Lsl(const Register& rd, const Register& rn, unsigned shift) {
776 lsl(rd, rn, shift);
778 void Lsl(const Register& rd, const Register& rn, const Register& rm) {
    [all...]
  /external/chromium_org/v8/test/cctest/
test-assembler-arm64.cc 297 __ Mvn(w2, Operand(w0, LSL, 1));
298 __ Mvn(x3, Operand(x1, LSL, 2));
370 __ Mov(w13, Operand(w11, LSL, 1));
371 __ Mov(x14, Operand(x12, LSL, 2));
521 __ Orr(w3, w0, Operand(w1, LSL, 28));
522 __ Orr(x4, x0, Operand(x1, LSL, 32));
613 __ Orn(w3, w0, Operand(w1, LSL, 4));
614 __ Orn(x4, x0, Operand(x1, LSL, 4));
682 __ And(w3, w0, Operand(w1, LSL, 4));
683 __ And(x4, x0, Operand(x1, LSL, 4))
    [all...]
  /external/vixl/test/
test-assembler-a64.cc 262 __ Mvn(w2, Operand(w0, LSL, 1));
263 __ Mvn(x3, Operand(x1, LSL, 2));
428 __ Mov(w13, Operand(w11, LSL, 1));
429 __ Mov(x14, Operand(x12, LSL, 2));
487 __ Orr(w3, w0, Operand(w1, LSL, 28));
488 __ Orr(x4, x0, Operand(x1, LSL, 32));
576 __ Orn(w3, w0, Operand(w1, LSL, 4));
577 __ Orn(x4, x0, Operand(x1, LSL, 4));
643 __ And(w3, w0, Operand(w1, LSL, 4));
644 __ And(x4, x0, Operand(x1, LSL, 4))
    [all...]

Completed in 166 milliseconds