HomeSort by relevance Sort by last modified time
    Searched refs:LIR (Results 1 - 25 of 41) sorted by null

1 2

  /art/compiler/dex/quick/arm64/
codegen_arm64.h 65 LIR* CheckSuspendUsingLoad() OVERRIDE;
67 LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
69 LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
71 LIR* LoadConstantNoClobber(RegStorage r_dest, int value) OVERRIDE;
72 LIR* LoadConstantWide(RegStorage r_dest, int64_t value) OVERRIDE;
73 LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
75 LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
85 LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
86 int offset, int check_value, LIR* target, LIR** compare) OVERRIDE
    [all...]
call_arm64.cc 79 LIR* loop_entry = NewLIR0(kPseudoTargetLabel);
80 LIR* branch_out = NewLIR2(kA64Cbz2rt, r_idx.GetReg(), 0);
91 LIR* switch_label = NewLIR3(kA64Adr2xd, r_base.GetReg(), 0, -1);
99 LIR* loop_exit = NewLIR0(kPseudoTargetLabel);
131 LIR* branch_over = OpCondBranch(kCondHi, nullptr);
139 LIR* switch_label = NewLIR3(kA64Adr2xd, branch_reg.GetReg(), 0, -1);
147 LIR* target = NewLIR0(kPseudoTargetLabel);
165 LIR* null_check_branch = nullptr;
180 LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w2, 0, nullptr);
185 LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, nullptr)
    [all...]
utility_arm64.cc 92 size_t Arm64Mir2Lir::GetLoadStoreSize(LIR* lir) {
93 bool opcode_is_wide = IS_WIDE(lir->opcode);
94 A64Opcode opcode = UNWIDE(lir->opcode);
101 size_t Arm64Mir2Lir::GetInstructionOffset(LIR* lir) {
102 size_t offset = lir->operands[2];
103 uint64_t check_flags = GetTargetInstFlags(lir->opcode);
107 offset = offset * (1 << GetLoadStoreSize(lir));
112 LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value)
    [all...]
int_arm64.cc 35 LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
40 LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
46 void Arm64Mir2Lir::OpEndIT(LIR* it) {
226 LIR* taken = &block_label_list_[bb->taken];
227 LIR* not_taken = &block_label_list_[bb->fall_through];
268 LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
269 LIR* target) {
270 LIR* branch = nullptr;
300 LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg
942 LIR* lir = NewLIR2(kA64Ldr2rp, As32BitReg(reg).GetReg(), 0); local
    [all...]
  /art/compiler/dex/quick/
lazy_debug_frame_opcode_writer.h 25 struct LIR;
29 // this class stores the LIR references and patches the instruction stream later.
43 explicit LazyDebugFrameOpCodeWriter(LIR** last_lir_insn, bool enable_writes,
54 LIR* last_lir_insn;
59 LIR** last_lir_insn_;
mir_to_lir.h 143 struct LIR;
164 LIR* pcrel_next; // Chain of LIR nodes needing pc relative fixups.
167 struct LIR {
171 LIR* next;
172 LIR* prev;
173 LIR* target;
176 bool is_nop:1; // LIR is optimized away.
189 // Utility macros to traverse the LIR list.
190 #define NEXT_LIR(lir) (lir->next
    [all...]
mir_to_lir-inl.h 46 inline LIR* Mir2Lir::RawLIR(DexOffset dalvik_offset, int opcode, int op0,
47 int op1, int op2, int op3, int op4, LIR* target) {
48 LIR* insn = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
71 inline LIR* Mir2Lir::NewLIR0(int opcode) {
76 LIR* insn = RawLIR(current_dalvik_offset_, opcode);
81 inline LIR* Mir2Lir::NewLIR1(int opcode, int dest) {
86 LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest);
91 inline LIR* Mir2Lir::NewLIR2(int opcode, int dest, int src1)
    [all...]
local_optimizations.cc 61 static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) {
71 void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src) {
73 LIR* move_lir;
85 void Mir2Lir::DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type) {
87 LOG(INFO) << "Check LIR:";
89 LOG(INFO) << "This LIR:";
93 inline void Mir2Lir::EliminateLoad(LIR* lir, int reg_id)
    [all...]
codegen_util.cc 88 void Mir2Lir::MarkSafepointPC(LIR* inst) {
91 LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC);
97 void Mir2Lir::MarkSafepointPCAfter(LIR* after) {
100 // As NewLIR0 uses Append, we need to create the LIR by hand.
101 LIR* safepoint_pc = RawLIR(current_dalvik_offset_, kPseudoSafepointPC);
113 /* Remove a LIR from the list. */
114 void Mir2Lir::UnlinkLIR(LIR* lir) {
115 if (UNLIKELY(lir == first_lir_insn_)) {
116 first_lir_insn_ = lir->next
    [all...]
lazy_debug_frame_opcode_writer.cc 44 LIR* next_lir = NEXT_LIR(advance.last_lir_insn);
gen_common.cc 64 CallHelperImmMethodSlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont,
84 LIR* branch = OpCmpImmBranch(kCondEq, r_result, 0, nullptr);
85 LIR* cont = NewLIR0(kPseudoTargetLabel);
110 LIR* unresolved_branch = nullptr;
115 LIR* uninit_branch = nullptr;
134 StaticFieldSlowPath(Mir2Lir* m2l, LIR* unresolved, LIR* uninit, LIR* cont, int storage_index,
142 LIR* target = GenerateTargetLabel()
    [all...]
gen_loadstore.cc 31 LIR* Mir2Lir::LoadConstant(RegStorage r_dest, int value) {
146 LIR* def_start;
147 LIR* def_end;
231 LIR* def_start;
232 LIR* def_end;
297 LIR *def_start = last_lir_insn_;
301 LIR *def_end = last_lir_insn_;
331 LIR *def_start = last_lir_insn_;
337 LIR *def_end = last_lir_insn_;
  /art/compiler/dex/quick/arm/
codegen_arm.h 69 LIR* CheckSuspendUsingLoad() OVERRIDE;
71 LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
73 LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
75 LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
76 LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
77 LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
79 LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
124 uint32_t LinkFixupInsns(LIR* head_lir, LIR* tail_lir, CodeOffset offset);
127 static uint8_t* EncodeLIRs(uint8_t* write_pos, LIR* lir)
    [all...]
utility_arm.cc 77 LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) {
92 LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
97 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs,
223 LIR* ArmMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
224 LIR* res;
257 LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target) {
258 LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched during assembly */);
263 LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target)
892 LIR* lir = nullptr; local
    [all...]
call_arm.cc 85 LIR* target = NewLIR0(kPseudoTargetLabel);
90 LIR* it = OpIT(kCondEq, "");
91 LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp.GetReg());
128 LIR* branch_over = OpCondBranch(kCondHi, nullptr);
135 LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg.GetReg());
139 LIR* target = NewLIR0(kPseudoTargetLabel);
154 LIR* null_check_branch = nullptr;
169 LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, nullptr);
174 LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, nullptr);
177 LIR* slow_path_target = NewLIR0(kPseudoTargetLabel)
    [all...]
int_arm.cc 35 LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
50 LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) {
80 void ArmMir2Lir::UpdateIT(LIR* it, const char* new_guide) {
110 void ArmMir2Lir::OpEndIT(LIR* it) {
111 // TODO: use the 'it' pointer to do some checks with the LIR, for example
134 LIR* target1;
135 LIR* target2;
141 LIR* branch1 = OpCondBranch(kCondLt, nullptr);
142 LIR* branch2 = OpCondBranch(kCondGt, nullptr)
1091 LIR* lir = NewLIR2(kThumb2LdrPcRel12, reg.GetReg(), 0); local
    [all...]
  /art/compiler/dex/quick/mips/
codegen_mips.h 80 LIR* CheckSuspendUsingLoad() OVERRIDE;
83 LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
85 LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
87 LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
88 LIR* LoadConstantWideNoClobber(RegStorage r_dest, int64_t value);
89 LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
90 LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
92 LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
94 LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
95 LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src)
    [all...]
utility_mips.cc 32 LIR* MipsMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
89 LIR* res;
130 LIR* MipsMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
131 LIR *res;
163 LIR* MipsMir2Lir::LoadConstantWideNoClobber(RegStorage r_dest, int64_t value) {
164 LIR* res = nullptr;
274 LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) {
275 LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/);
280 LIR* MipsMir2Lir::OpReg(OpKind op, RegStorage r_dest_src)
    [all...]
assemble_mips.cc 592 void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) {
595 int opcode = lir->opcode;
596 int dalvik_offset = lir->dalvik_offset;
616 LIR* hop_target = nullptr;
619 LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
620 lir->operands[1], 0, 0, 0, hop_target);
621 InsertLIRBefore(lir, hop_branch);
623 LIR* curr_pc = RawLIR(dalvik_offset, kMipsCurrPC)
650 LIR *lir; local
863 LIR* lir; local
    [all...]
int_mips.cc 71 LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, nullptr);
77 LIR* target = NewLIR0(kPseudoTargetLabel);
83 LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
84 LIR* branch;
149 LIR* MipsMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) {
150 LIR* branch;
181 LIR* MipsMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
182 LIR* res
    [all...]
  /art/compiler/dex/quick/x86/
codegen_x86.h 90 LIR* CheckSuspendUsingLoad() OVERRIDE;
92 LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
94 LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
96 LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
97 LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
99 LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
101 LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
160 void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
161 void SetupTargetResourceMasks(LIR* lir, uint64_t flags
    [all...]
utility_x86.cc 33 LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
53 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
87 LIR* X86Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
96 LIR *res;
101 // 64-bit immediate is not supported by LIR structure
113 LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) {
114 LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/);
119 LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target)
    [all...]
fp_x86.cc 170 LIR *fild64 = NewLIR2NoDest(kX86Fild64M, rs_rX86_SP_32.GetReg(),
178 LIR *fstp = NewLIR2NoDest(opcode, rs_rX86_SP_32.GetReg(), displacement);
242 LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondAe);
243 LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
245 LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
263 LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondAe);
264 LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
266 LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
302 LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondAe);
303 LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP)
653 LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP_32.GetReg(), displacement, 0x7fffffff); local
717 LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP_32.GetReg(), displacement + HIWORD_OFFSET, 0x7fffffff); local
    [all...]
target_x86.cc 277 void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
280 DCHECK(!lir->flags.use_def_invalid);
315 if (lir->opcode == kX86RepneScasw) {
357 std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
374 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands.
376 int operand = lir->operands[operand_number];
387 static_cast<uint32_t>(lir->operands[operand_number+1]));
408 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand
    [all...]
int_x86.cc 99 LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
102 LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
108 LIR* X86Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg,
109 int check_value, LIR* target) {
121 LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
126 LIR* X86Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
136 LIR* res = RawLIR(current_dalvik_offset_, r_dest.Is64Bit() ? kX86Mov64RR : kX86Mov32RR
1980 LIR *lir = NewLIR3(x86op, cu_->target64 ? rl_dest.reg.GetReg() : rl_dest.reg.GetLowReg(), local
2023 LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET, local
2874 LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET, val); local
2906 LIR *lir = NewLIR3(x86op, r_base, displacement + LOWORD_OFFSET, val_lo); local
2914 LIR *lir = NewLIR3(x86op, r_base, displacement + HIWORD_OFFSET, val_hi); local
    [all...]

Completed in 1628 milliseconds

1 2