HomeSort by relevance Sort by last modified time
    Searched refs:MIR (Results 1 - 25 of 40) sorted by null

1 2

  /art/compiler/dex/
mir_graph.h 239 * Normalized use/def for a MIR operation using SSA names rather than vregs. Note that
268 struct MIR {
271 * additional fields on as-needed basis. Question: how to support MIR Pseudo-ops; probably
350 int16_t m_unit_index; // From which method was this MIR included
352 MIR* next;
358 MIR* throw_insn;
371 explicit MIR():offset(0), optimization_flags(0), m_unit_index(0), bb(NullBasicBlockId),
380 MIR* Copy(CompilationUnit *c_unit);
381 MIR* Copy(MIRGraph* mir_Graph);
384 return arena->Alloc(sizeof(MIR), kArenaAllocMIR)
532 MIR* mir; member in struct:art::CallInfo
    [all...]
local_value_numbering.h 77 uint16_t GetValueNumber(MIR* mir);
295 uint16_t MarkNonAliasingNonNull(MIR* mir);
299 void HandleNullCheck(MIR* mir, uint16_t reg);
300 void HandleRangeCheck(MIR* mir, uint16_t array, uint16_t index);
301 void HandlePutObject(MIR* mir);
    [all...]
mir_graph.cc 140 int MIRGraph::ParseInsn(const uint16_t* code_ptr, MIR::DecodedInstruction* decoded_instruction) {
158 MIR* insn = orig_block->first_mir_insn;
159 MIR* prev = NULL;
234 !MIR::DecodedInstruction::IsPseudoMirOp(insn->dalvikInsn.opcode));
236 MIR* p = insn;
249 if ((opcode == kMirOpCheck) || !MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
399 BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
466 BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
542 BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
627 * pseudo exception edge MIR. Note also that this new block i
911 const MIR* mir; local
1093 MIR* mir = *it; local
1480 MIR* mir = new (arena_) MIR(); local
    [all...]
mir_optimization.cc 46 MIR* mir; local
48 for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
49 // Skip pass if BB has MIR without SSA representation.
50 if (mir->ssa_rep == nullptr) {
54 uint64_t df_attributes = GetDataFlowAttributes(mir);
56 MIR::DecodedInstruction* d_insn = &mir->dalvikInsn
678 MIR* mir = bb->last_mir_insn; local
    [all...]
post_opt_passes.cc 59 MIR* mir = bb->first_mir_insn; local
61 while (mir != nullptr) {
62 MIR* next = mir->next;
64 Instruction::Code opcode = mir->dalvikInsn.opcode;
67 bb->RemoveMIR(mir);
70 mir = next;
73 // We do not care in reporting a change or not in the MIR.
mir_dataflow.cc 26 * instructions, where extended opcode at the MIR level are appended
801 // Beginning of extended MIR opcodes
914 const MIR::DecodedInstruction& d_insn) {
927 MIR* mir; local
939 for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
940 uint64_t df_attributes = GetDataFlowAttributes(mir);
1074 MIR* mir; local
    [all...]
mir_analysis.cc 802 // Beginning of extended MIR opcodes
904 for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) {
905 if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
906 // Skip any MIR pseudo-op.
909 uint32_t flags = analysis_attributes_[mir->dalvikInsn.opcode]
    [all...]
mir_optimization_test.cc 158 mirs_ = reinterpret_cast<MIR*>(cu_.arena.Alloc(sizeof(MIR) * count, kArenaAllocMIR));
162 MIR* mir = &mirs_[i]; local
163 mir->dalvikInsn.opcode = def->opcode;
166 bb->AppendMIR(mir);
169 mir->meta.sfield_lowering_info = def->field_or_method_info;
171 mir->ssa_rep = nullptr;
172 mir->offset = 2 * i; // All insns need to be at least 2 code units long.
173 mir->optimization_flags = 0u
    [all...]
local_value_numbering.cc 464 const MIR* mir = fall_through_bb->first_mir_insn; local
465 DCHECK(mir != nullptr);
467 if ((Instruction::FlagsOf(mir->dalvikInsn.opcode) & Instruction::kInvoke) != 0) {
468 for (uint16_t i = 0u; i != mir->ssa_rep->num_uses; ++i) {
469 uint16_t value_name = lvn->GetOperandValue(mir->ssa_rep->uses[i]);
    [all...]
vreg_analysis.cc 124 bool MIRGraph::InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed) {
125 SSARepresentation *ssa_rep = mir->ssa_rep;
137 uint64_t attrs = GetDataFlowAttributes(mir);
218 if ((mir->dalvikInsn.opcode == Instruction::RETURN) ||
219 (mir->dalvikInsn.opcode == Instruction::RETURN_WIDE) ||
220 (mir->dalvikInsn.opcode == Instruction::RETURN_OBJECT)) {
253 Instruction::Code opcode = mir->dalvikInsn.opcode;
254 int flags = MIR::DecodedInstruction::IsPseudoMirOp(opcode) ?
255 0 : Instruction::FlagsOf(mir->dalvikInsn.opcode)
    [all...]
  /art/compiler/dex/quick/
dex_file_method_inliner.h 36 struct MIR;
90 bool GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke, uint32_t method_idx)
313 static bool GenInlineConst(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
314 MIR* move_result, const InlineMethod& method);
315 static bool GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
316 MIR* move_result, const InlineMethod& method);
317 static bool GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
318 MIR* move_result, const InlineMethod& method, uint32_t method_idx);
319 static bool GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
320 MIR* move_result, const InlineMethod& method, uint32_t method_idx)
    [all...]
dex_file_method_inliner.cc 102 MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke, MIR* move_return) {
103 MIR* insn = mir_graph->NewMIR();
109 uint32_t GetInvokeReg(MIR* invoke, uint32_t arg) {
111 DCHECK(!MIR::DecodedInstruction::IsPseudoMirOp(invoke->dalvikInsn.opcode));
120 bool WideArgIsInConsecutiveDalvikRegs(MIR* invoke, uint32_t arg) {
122 DCHECK(!MIR::DecodedInstruction::IsPseudoMirOp(invoke->dalvikInsn.opcode));
529 bool DexFileMethodInliner::GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
541 MIR* move_result = nullptr
    [all...]
mir_to_lir.h 145 struct MIR;
    [all...]
mir_to_lir.cc 228 bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
241 GenPrintLabel(mir);
268 bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) {
285 GenPrintLabel(mir);
303 bool Mir2Lir::GenSpecialIdentity(MIR* mir, const InlineMethod& special) {
308 GenPrintLabel(mir);
319 bool Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special)
1118 MIR* mir; local
1226 MIR* mir = bb->first_mir_insn; local
    [all...]
  /art/compiler/dex/quick/x86/
codegen_x86.h 237 void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
238 void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
239 void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
249 void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
250 void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE
    [all...]
call_x86.cc 30 void X86Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
63 void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
target_x86.cc     [all...]
utility_x86.cc     [all...]
  /art/compiler/dex/portable/
mir_to_gbc.h 40 struct MIR;
117 void ConvertCompareAndBranch(BasicBlock* bb, MIR* mir, ConditionCode cc,
119 void ConvertCompareZeroAndBranch(BasicBlock* bb, MIR* mir, ConditionCode cc,
135 void ConvertInvoke(BasicBlock* bb, MIR* mir, InvokeType invoke_type,
168 bool ConvertMIRNode(MIR* mir, BasicBlock* bb, ::llvm::BasicBlock* llvm_bb);
172 void ConvertExtendedMIR(BasicBlock* bb, MIR* mir, ::llvm::BasicBlock* llvm_bb)
    [all...]
  /art/compiler/dex/quick/mips/
codegen_mips.h 119 void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
120 void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
121 void GenSelect(BasicBlock* bb, MIR* mir);
131 void GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
132 void GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src)
    [all...]
call_mips.cc 27 bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
64 void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
141 void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
fp_mips.cc 210 void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir,
  /art/compiler/dex/quick/arm/
codegen_arm.h 120 void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
121 void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
122 void GenSelect(BasicBlock* bb, MIR* mir);
134 void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
135 void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src)
    [all...]
  /art/compiler/dex/quick/arm64/
codegen_arm64.h 185 void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
186 void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
187 void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
200 void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
201 void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE
    [all...]
call_arm64.cc 46 void Arm64Mir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
98 void Arm64Mir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {

Completed in 261 milliseconds

1 2