HomeSort by relevance Sort by last modified time
    Searched refs:LIR (Results 1 - 25 of 38) sorted by null

1 2

  /art/compiler/dex/quick/arm/
codegen_arm.h 33 LIR* CheckSuspendUsingLoad() OVERRIDE;
35 LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
37 LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
39 LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
40 LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
41 LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
43 LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
68 uint32_t LinkFixupInsns(LIR* head_lir, LIR* tail_lir, CodeOffset offset);
71 static uint8_t* EncodeLIRs(uint8_t* write_pos, LIR* lir)
    [all...]
call_arm.cc 57 tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
78 LIR* target = NewLIR0(kPseudoTargetLabel);
83 LIR* it = OpIT(kCondEq, "");
84 LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp.GetReg());
106 static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
125 LIR* branch_over = OpCondBranch(kCondHi, NULL);
132 LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg.GetReg());
136 LIR* target = NewLIR0(kPseudoTargetLabel)
    [all...]
utility_arm.cc 72 LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) {
87 LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
92 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs,
173 LIR* ArmMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
174 LIR* res;
207 LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target) {
208 LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched during assembly */);
213 LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target)
836 LIR* lir = nullptr; local
981 LIR* lir = NewLIR3(kThumb2Ldrexd, r_dest.GetLowReg(), r_dest.GetHighReg(), r_ptr.GetReg()); local
    [all...]
int_arm.cc 28 LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
43 LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) {
71 void ArmMir2Lir::UpdateIT(LIR* it, const char* new_guide) {
99 void ArmMir2Lir::OpEndIT(LIR* it) {
100 // TODO: use the 'it' pointer to do some checks with the LIR, for example
123 LIR* target1;
124 LIR* target2;
130 LIR* branch1 = OpCondBranch(kCondLt, NULL);
131 LIR* branch2 = OpCondBranch(kCondGt, NULL)
    [all...]
  /art/compiler/dex/quick/arm64/
codegen_arm64.h 73 LIR* CheckSuspendUsingLoad() OVERRIDE;
75 LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
77 LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
79 LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
81 LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale)
83 LIR* LoadConstantNoClobber(RegStorage r_dest, int value) OVERRIDE;
84 LIR* LoadConstantWide(RegStorage r_dest, int64_t value) OVERRIDE;
85 LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
87 LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src, VolatileKind is_volatile)
89 LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale
    [all...]
call_arm64.cc 57 tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
73 LIR* loop_entry = NewLIR0(kPseudoTargetLabel);
74 LIR* branch_out = NewLIR2(kA64Cbz2rt, r_idx.GetReg(), 0);
85 LIR* switch_label = NewLIR3(kA64Adr2xd, r_base.GetReg(), 0, -1);
93 LIR* loop_exit = NewLIR0(kPseudoTargetLabel);
110 static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
129 LIR* branch_over = OpCondBranch(kCondHi, NULL);
137 LIR* switch_label = NewLIR3(kA64Adr2xd, branch_reg.GetReg(), 0, -1)
    [all...]
utility_arm64.cc 90 size_t Arm64Mir2Lir::GetLoadStoreSize(LIR* lir) {
91 bool opcode_is_wide = IS_WIDE(lir->opcode);
92 ArmOpcode opcode = UNWIDE(lir->opcode);
99 size_t Arm64Mir2Lir::GetInstructionOffset(LIR* lir) {
100 size_t offset = lir->operands[2];
101 uint64_t check_flags = GetTargetInstFlags(lir->opcode);
105 offset = offset * (1 << GetLoadStoreSize(lir));
110 LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value)
    [all...]
int_arm64.cc 29 LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
34 LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
39 void Arm64Mir2Lir::OpEndIT(LIR* it) {
217 LIR* taken = &block_label_list_[bb->taken];
218 LIR* not_taken = &block_label_list_[bb->fall_through];
259 LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
260 LIR* target) {
261 LIR* branch = nullptr;
287 LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg
    [all...]
assemble_arm64.cc 815 LIR* lir; local
    [all...]
  /art/compiler/dex/quick/mips/
codegen_mips.h 33 LIR* CheckSuspendUsingLoad() OVERRIDE;
35 LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
37 LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
39 LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
40 LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
41 LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
43 LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
45 LIR* GenAtomic64Load(RegStorage r_base, int displacement, RegStorage r_dest);
46 LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
72 void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE
    [all...]
utility_mips.cc 25 LIR* MipsMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
47 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_src.GetReg(), r_dest.GetReg());
79 LIR* MipsMir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
80 LIR *res;
110 LIR* MipsMir2Lir::OpUnconditionalBranch(LIR* target) {
111 LIR* res = NewLIR1(kMipsB, 0 /* offset to be patched during assembly*/);
116 LIR* MipsMir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
131 LIR* MipsMir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
132 LIR *res
    [all...]
call_mips.cc 76 static_cast<LIR**>(arena_->Alloc(elements * sizeof(LIR*), kArenaAllocLIR));
101 LIR* base_label = NewLIR0(kPseudoTargetLabel);
113 LIR* loop_label = NewLIR0(kPseudoTargetLabel);
114 LIR* exit_branch = OpCmpBranch(kCondEq, r_base, r_end, NULL);
124 LIR* exit_label = NewLIR0(kPseudoTargetLabel);
152 tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
189 LIR* base_label = NewLIR0(kPseudoTargetLabel);
194 LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL)
    [all...]
assemble_mips.cc 77 /* NOTE: must be kept in sync with enum MipsOpcode from LIR.h */
460 void MipsMir2Lir::ConvertShortToLongBranch(LIR* lir) {
463 int opcode = lir->opcode;
464 int dalvik_offset = lir->dalvik_offset;
482 LIR* hop_target = NULL;
485 LIR* hop_branch = RawLIR(dalvik_offset, opcode, lir->operands[0],
486 lir->operands[1], 0, 0, 0, hop_target);
487 InsertLIRBefore(lir, hop_branch)
516 LIR *lir; local
720 LIR* lir; local
    [all...]
int_mips.cc 54 LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg, 0, NULL);
60 LIR* target = NewLIR0(kPseudoTargetLabel);
65 LIR* MipsMir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
66 LIR* branch;
131 LIR* MipsMir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, LIR* target) {
132 LIR* branch;
163 LIR* MipsMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
173 LIR* res = RawLIR(current_dalvik_offset_, kMipsMove
    [all...]
target_mips.cc 146 void MipsMir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
149 DCHECK(!lir->flags.use_def_invalid);
194 std::string MipsMir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
211 operand = lir->operands[nc-'0'];
245 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 + (operand << 1),
246 lir->target);
252 int offset_1 = lir->operands[0];
253 int offset_2 = NEXT_LIR(lir)->operands[0]
    [all...]
  /art/compiler/dex/quick/
mir_to_lir.h 146 struct LIR;
165 LIR* pcrel_next; // Chain of LIR nodes needing pc relative fixups.
168 struct LIR {
172 LIR* next;
173 LIR* prev;
174 LIR* target;
177 bool is_nop:1; // LIR is optimized away.
200 // Utility macros to traverse the LIR list.
201 #define NEXT_LIR(lir) (lir->next
    [all...]
mir_to_lir-inl.h 44 inline LIR* Mir2Lir::RawLIR(DexOffset dalvik_offset, int opcode, int op0,
45 int op1, int op2, int op3, int op4, LIR* target) {
46 LIR* insn = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
69 inline LIR* Mir2Lir::NewLIR0(int opcode) {
74 LIR* insn = RawLIR(current_dalvik_offset_, opcode);
79 inline LIR* Mir2Lir::NewLIR1(int opcode, int dest) {
84 LIR* insn = RawLIR(current_dalvik_offset_, opcode, dest);
89 inline LIR* Mir2Lir::NewLIR2(int opcode, int dest, int src1)
    [all...]
local_optimizations.cc 60 static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) {
70 void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src) {
72 LIR* move_lir;
84 void Mir2Lir::DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type) {
86 LOG(INFO) << "Check LIR:";
88 LOG(INFO) << "This LIR:";
92 inline void Mir2Lir::EliminateLoad(LIR* lir, int reg_id)
    [all...]
codegen_util.cc 82 void Mir2Lir::MarkSafepointPC(LIR* inst) {
85 LIR* safepoint_pc = NewLIR0(kPseudoSafepointPC);
89 void Mir2Lir::MarkSafepointPCAfter(LIR* after) {
92 // As NewLIR0 uses Append, we need to create the LIR by hand.
93 LIR* safepoint_pc = RawLIR(current_dalvik_offset_, kPseudoSafepointPC);
103 /* Remove a LIR from the list. */
104 void Mir2Lir::UnlinkLIR(LIR* lir) {
105 if (UNLIKELY(lir == first_lir_insn_)) {
106 first_lir_insn_ = lir->next
    [all...]
gen_common.cc 45 LIR* barrier = NewLIR0(kPseudoBarrier);
52 LIR* branch = OpUnconditionalBranch(nullptr);
57 LIR* branch = OpCondBranch(c_code, nullptr);
62 LIR* branch = OpCmpImmBranch(kCondEq, reg, 0, nullptr);
66 void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) {
69 DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch)
87 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length)
104 LIR* branch = OpCmpBranch(kCondUge, index, length, nullptr);
111 ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length)
134 LIR* branch = OpCmpImmBranch(kCondLs, length, index, nullptr)
    [all...]
gen_loadstore.cc 30 LIR* Mir2Lir::LoadConstant(RegStorage r_dest, int value) {
184 LIR* def_start;
185 LIR* def_end;
268 LIR* def_start;
269 LIR* def_end;
334 LIR *def_start = last_lir_insn_;
338 LIR *def_end = last_lir_insn_;
368 LIR *def_start = last_lir_insn_;
374 LIR *def_end = last_lir_insn_;
  /art/compiler/dex/quick/x86/
codegen_x86.h 70 LIR* CheckSuspendUsingLoad() OVERRIDE;
72 LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
74 LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
76 LIR* LoadConstantNoClobber(RegStorage r_dest, int value);
77 LIR* LoadConstantWide(RegStorage r_dest, int64_t value);
78 LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
80 LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
131 void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) OVERRIDE;
132 void SetupTargetResourceMasks(LIR* lir, uint64_t flags
    [all...]
utility_x86.cc 29 LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
49 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
81 LIR* X86Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
90 LIR *res;
95 // 64-bit immediate is not supported by LIR structure
107 LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) {
108 LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/);
113 LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target)
    [all...]
fp_x86.cc 153 LIR *fild64 = NewLIR2NoDest(kX86Fild64M, rs_rX86_SP.GetReg(),
161 LIR *fstp = NewLIR2NoDest(opcode, rs_rX86_SP.GetReg(), displacement);
225 LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondAe);
226 LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
228 LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
246 LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondAe);
247 LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
249 LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
285 LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondAe);
286 LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP)
630 LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP.GetReg(), displacement, 0x7fffffff); local
694 LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP.GetReg(), displacement + HIWORD_OFFSET, 0x7fffffff); local
    [all...]
call_x86.cc 74 tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
110 LIR* branch_over = OpCondBranch(kCondHi, NULL);
119 LIR* switch_branch = NewLIR1(kX86JmpR, start_of_method_reg.GetReg());
123 LIR* target = NewLIR0(kPseudoTargetLabel);
193 LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
200 LIR* target = NewLIR0(kPseudoTargetLabel);
246 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
278 LIR* branch = OpCondBranch(kCondUlt, nullptr);

Completed in 413 milliseconds

1 2