1 /* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 18 #define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 19 20 #include "invoke_type.h" 21 #include "compiled_method.h" 22 #include "dex/compiler_enums.h" 23 #include "dex/compiler_ir.h" 24 #include "dex/backend.h" 25 #include "dex/growable_array.h" 26 #include "dex/arena_allocator.h" 27 #include "driver/compiler_driver.h" 28 #include "leb128_encoder.h" 29 #include "safe_map.h" 30 31 namespace art { 32 33 // Set to 1 to measure cost of suspend check. 34 #define NO_SUSPEND 0 35 36 #define IS_BINARY_OP (1ULL << kIsBinaryOp) 37 #define IS_BRANCH (1ULL << kIsBranch) 38 #define IS_IT (1ULL << kIsIT) 39 #define IS_LOAD (1ULL << kMemLoad) 40 #define IS_QUAD_OP (1ULL << kIsQuadOp) 41 #define IS_QUIN_OP (1ULL << kIsQuinOp) 42 #define IS_SEXTUPLE_OP (1ULL << kIsSextupleOp) 43 #define IS_STORE (1ULL << kMemStore) 44 #define IS_TERTIARY_OP (1ULL << kIsTertiaryOp) 45 #define IS_UNARY_OP (1ULL << kIsUnaryOp) 46 #define NEEDS_FIXUP (1ULL << kPCRelFixup) 47 #define NO_OPERAND (1ULL << kNoOperand) 48 #define REG_DEF0 (1ULL << kRegDef0) 49 #define REG_DEF1 (1ULL << kRegDef1) 50 #define REG_DEFA (1ULL << kRegDefA) 51 #define REG_DEFD (1ULL << kRegDefD) 52 #define REG_DEF_FPCS_LIST0 (1ULL << kRegDefFPCSList0) 53 #define REG_DEF_FPCS_LIST2 (1ULL << kRegDefFPCSList2) 54 #define REG_DEF_LIST0 (1ULL << kRegDefList0) 55 #define REG_DEF_LIST1 (1ULL << kRegDefList1) 56 #define REG_DEF_LR (1ULL << kRegDefLR) 57 #define REG_DEF_SP (1ULL << kRegDefSP) 58 #define REG_USE0 (1ULL << kRegUse0) 59 #define REG_USE1 (1ULL << kRegUse1) 60 #define REG_USE2 (1ULL << kRegUse2) 61 #define REG_USE3 (1ULL << kRegUse3) 62 #define REG_USE4 (1ULL << kRegUse4) 63 #define REG_USEA (1ULL << kRegUseA) 64 #define REG_USEC (1ULL << kRegUseC) 65 #define REG_USED (1ULL << kRegUseD) 66 #define REG_USE_FPCS_LIST0 (1ULL << kRegUseFPCSList0) 67 #define REG_USE_FPCS_LIST2 (1ULL << kRegUseFPCSList2) 68 #define REG_USE_LIST0 (1ULL << kRegUseList0) 69 #define REG_USE_LIST1 (1ULL << kRegUseList1) 70 #define REG_USE_LR (1ULL << kRegUseLR) 71 #define REG_USE_PC (1ULL << kRegUsePC) 72 #define REG_USE_SP (1ULL << kRegUseSP) 73 #define SETS_CCODES (1ULL << kSetsCCodes) 74 #define USES_CCODES (1ULL << kUsesCCodes) 75 76 // Common combo register usage patterns. 77 #define REG_DEF01 (REG_DEF0 | REG_DEF1) 78 #define REG_DEF01_USE2 (REG_DEF0 | REG_DEF1 | REG_USE2) 79 #define REG_DEF0_USE01 (REG_DEF0 | REG_USE01) 80 #define REG_DEF0_USE0 (REG_DEF0 | REG_USE0) 81 #define REG_DEF0_USE12 (REG_DEF0 | REG_USE12) 82 #define REG_DEF0_USE1 (REG_DEF0 | REG_USE1) 83 #define REG_DEF0_USE2 (REG_DEF0 | REG_USE2) 84 #define REG_DEFAD_USEAD (REG_DEFAD_USEA | REG_USED) 85 #define REG_DEFAD_USEA (REG_DEFA_USEA | REG_DEFD) 86 #define REG_DEFA_USEA (REG_DEFA | REG_USEA) 87 #define REG_USE012 (REG_USE01 | REG_USE2) 88 #define REG_USE014 (REG_USE01 | REG_USE4) 89 #define REG_USE01 (REG_USE0 | REG_USE1) 90 #define REG_USE02 (REG_USE0 | REG_USE2) 91 #define REG_USE12 (REG_USE1 | REG_USE2) 92 #define REG_USE23 (REG_USE2 | REG_USE3) 93 94 struct BasicBlock; 95 struct CallInfo; 96 struct CompilationUnit; 97 struct MIR; 98 struct RegLocation; 99 struct RegisterInfo; 100 class MIRGraph; 101 class Mir2Lir; 102 103 typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, 104 const MethodReference& target_method, 105 uint32_t method_idx, uintptr_t direct_code, 106 uintptr_t direct_method, InvokeType type); 107 108 typedef std::vector<uint8_t> CodeBuffer; 109 110 111 struct LIR { 112 int offset; // Offset of this instruction. 113 int dalvik_offset; // Offset of Dalvik opcode. 114 LIR* next; 115 LIR* prev; 116 LIR* target; 117 int opcode; 118 int operands[5]; // [0..4] = [dest, src1, src2, extra, extra2]. 119 struct { 120 bool is_nop:1; // LIR is optimized away. 121 bool pcRelFixup:1; // May need pc-relative fixup. 122 unsigned int size:5; // Note: size is in bytes. 123 unsigned int unused:25; 124 } flags; 125 int alias_info; // For Dalvik register & litpool disambiguation. 126 uint64_t use_mask; // Resource mask for use. 127 uint64_t def_mask; // Resource mask for def. 128 }; 129 130 // Target-specific initialization. 131 Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 132 ArenaAllocator* const arena); 133 Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 134 ArenaAllocator* const arena); 135 Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph, 136 ArenaAllocator* const arena); 137 138 // Utility macros to traverse the LIR list. 139 #define NEXT_LIR(lir) (lir->next) 140 #define PREV_LIR(lir) (lir->prev) 141 142 // Defines for alias_info (tracks Dalvik register references). 143 #define DECODE_ALIAS_INFO_REG(X) (X & 0xffff) 144 #define DECODE_ALIAS_INFO_WIDE_FLAG (0x80000000) 145 #define DECODE_ALIAS_INFO_WIDE(X) ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0) 146 #define ENCODE_ALIAS_INFO(REG, ISWIDE) (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0)) 147 148 // Common resource macros. 149 #define ENCODE_CCODE (1ULL << kCCode) 150 #define ENCODE_FP_STATUS (1ULL << kFPStatus) 151 152 // Abstract memory locations. 153 #define ENCODE_DALVIK_REG (1ULL << kDalvikReg) 154 #define ENCODE_LITERAL (1ULL << kLiteral) 155 #define ENCODE_HEAP_REF (1ULL << kHeapRef) 156 #define ENCODE_MUST_NOT_ALIAS (1ULL << kMustNotAlias) 157 158 #define ENCODE_ALL (~0ULL) 159 #define ENCODE_MEM (ENCODE_DALVIK_REG | ENCODE_LITERAL | \ 160 ENCODE_HEAP_REF | ENCODE_MUST_NOT_ALIAS) 161 // TODO: replace these macros 162 #define SLOW_FIELD_PATH (cu_->enable_debug & (1 << kDebugSlowFieldPath)) 163 #define SLOW_INVOKE_PATH (cu_->enable_debug & (1 << kDebugSlowInvokePath)) 164 #define SLOW_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowStringPath)) 165 #define SLOW_TYPE_PATH (cu_->enable_debug & (1 << kDebugSlowTypePath)) 166 #define EXERCISE_SLOWEST_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowestStringPath)) 167 #define is_pseudo_opcode(opcode) (static_cast<int>(opcode) < 0) 168 169 class Mir2Lir : public Backend { 170 public: 171 struct SwitchTable { 172 int offset; 173 const uint16_t* table; // Original dex table. 174 int vaddr; // Dalvik offset of switch opcode. 175 LIR* anchor; // Reference instruction for relative offsets. 176 LIR** targets; // Array of case targets. 177 }; 178 179 struct FillArrayData { 180 int offset; 181 const uint16_t* table; // Original dex table. 182 int size; 183 int vaddr; // Dalvik offset of FILL_ARRAY_DATA opcode. 184 }; 185 186 /* Static register use counts */ 187 struct RefCounts { 188 int count; 189 int s_reg; 190 bool double_start; // Starting v_reg for a double 191 }; 192 193 /* 194 * Data structure tracking the mapping between a Dalvik register (pair) and a 195 * native register (pair). The idea is to reuse the previously loaded value 196 * if possible, otherwise to keep the value in a native register as long as 197 * possible. 198 */ 199 struct RegisterInfo { 200 int reg; // Reg number 201 bool in_use; // Has it been allocated? 202 bool is_temp; // Can allocate as temp? 203 bool pair; // Part of a register pair? 204 int partner; // If pair, other reg of pair. 205 bool live; // Is there an associated SSA name? 206 bool dirty; // If live, is it dirty? 207 int s_reg; // Name of live value. 208 LIR *def_start; // Starting inst in last def sequence. 209 LIR *def_end; // Ending inst in last def sequence. 210 }; 211 212 struct RegisterPool { 213 int num_core_regs; 214 RegisterInfo *core_regs; 215 int next_core_reg; 216 int num_fp_regs; 217 RegisterInfo *FPRegs; 218 int next_fp_reg; 219 }; 220 221 struct PromotionMap { 222 RegLocationType core_location:3; 223 uint8_t core_reg; 224 RegLocationType fp_location:3; 225 uint8_t FpReg; 226 bool first_in_pair; 227 }; 228 229 virtual ~Mir2Lir() {} 230 231 int32_t s4FromSwitchData(const void* switch_data) { 232 return *reinterpret_cast<const int32_t*>(switch_data); 233 } 234 235 RegisterClass oat_reg_class_by_size(OpSize size) { 236 return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte || 237 size == kSignedByte) ? kCoreReg : kAnyReg; 238 } 239 240 size_t CodeBufferSizeInBytes() { 241 return code_buffer_.size() / sizeof(code_buffer_[0]); 242 } 243 244 // Shared by all targets - implemented in codegen_util.cc 245 void AppendLIR(LIR* lir); 246 void InsertLIRBefore(LIR* current_lir, LIR* new_lir); 247 void InsertLIRAfter(LIR* current_lir, LIR* new_lir); 248 249 int ComputeFrameSize(); 250 virtual void Materialize(); 251 virtual CompiledMethod* GetCompiledMethod(); 252 void MarkSafepointPC(LIR* inst); 253 bool FastInstance(uint32_t field_idx, int& field_offset, bool& is_volatile, bool is_put); 254 void SetupResourceMasks(LIR* lir); 255 void AssembleLIR(); 256 void SetMemRefType(LIR* lir, bool is_load, int mem_type); 257 void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit); 258 void SetupRegMask(uint64_t* mask, int reg); 259 void DumpLIRInsn(LIR* arg, unsigned char* base_addr); 260 void DumpPromotionMap(); 261 void CodegenDump(); 262 LIR* RawLIR(int dalvik_offset, int opcode, int op0 = 0, int op1 = 0, 263 int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL); 264 LIR* NewLIR0(int opcode); 265 LIR* NewLIR1(int opcode, int dest); 266 LIR* NewLIR2(int opcode, int dest, int src1); 267 LIR* NewLIR3(int opcode, int dest, int src1, int src2); 268 LIR* NewLIR4(int opcode, int dest, int src1, int src2, int info); 269 LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2); 270 LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta); 271 LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi); 272 LIR* AddWordData(LIR* *constant_list_p, int value); 273 LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi); 274 void ProcessSwitchTables(); 275 void DumpSparseSwitchTable(const uint16_t* table); 276 void DumpPackedSwitchTable(const uint16_t* table); 277 LIR* MarkBoundary(int offset, const char* inst_str); 278 void NopLIR(LIR* lir); 279 bool EvaluateBranch(Instruction::Code opcode, int src1, int src2); 280 bool IsInexpensiveConstant(RegLocation rl_src); 281 ConditionCode FlipComparisonOrder(ConditionCode before); 282 void DumpMappingTable(const char* table_name, const std::string& descriptor, 283 const std::string& name, const std::string& signature, 284 const std::vector<uint32_t>& v); 285 void InstallLiteralPools(); 286 void InstallSwitchTables(); 287 void InstallFillArrayData(); 288 bool VerifyCatchEntries(); 289 void CreateMappingTables(); 290 void CreateNativeGcMap(); 291 int AssignLiteralOffset(int offset); 292 int AssignSwitchTablesOffset(int offset); 293 int AssignFillArrayDataOffset(int offset); 294 int AssignInsnOffsets(); 295 void AssignOffsets(); 296 LIR* InsertCaseLabel(int vaddr, int keyVal); 297 void MarkPackedCaseLabels(Mir2Lir::SwitchTable *tab_rec); 298 void MarkSparseCaseLabels(Mir2Lir::SwitchTable *tab_rec); 299 300 // Shared by all targets - implemented in local_optimizations.cc 301 void ConvertMemOpIntoMove(LIR* orig_lir, int dest, int src); 302 void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir); 303 void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir); 304 void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir); 305 void RemoveRedundantBranches(); 306 307 // Shared by all targets - implemented in ralloc_util.cc 308 int GetSRegHi(int lowSreg); 309 bool oat_live_out(int s_reg); 310 int oatSSASrc(MIR* mir, int num); 311 void SimpleRegAlloc(); 312 void ResetRegPool(); 313 void CompilerInitPool(RegisterInfo* regs, int* reg_nums, int num); 314 void DumpRegPool(RegisterInfo* p, int num_regs); 315 void DumpCoreRegPool(); 316 void DumpFpRegPool(); 317 /* Mark a temp register as dead. Does not affect allocation state. */ 318 void Clobber(int reg) { 319 ClobberBody(GetRegInfo(reg)); 320 } 321 void ClobberSRegBody(RegisterInfo* p, int num_regs, int s_reg); 322 void ClobberSReg(int s_reg); 323 int SRegToPMap(int s_reg); 324 void RecordCorePromotion(int reg, int s_reg); 325 int AllocPreservedCoreReg(int s_reg); 326 void RecordFpPromotion(int reg, int s_reg); 327 int AllocPreservedSingle(int s_reg, bool even); 328 int AllocPreservedDouble(int s_reg); 329 int AllocPreservedFPReg(int s_reg, bool double_start); 330 int AllocTempBody(RegisterInfo* p, int num_regs, int* next_temp, 331 bool required); 332 int AllocTempDouble(); 333 int AllocFreeTemp(); 334 int AllocTemp(); 335 int AllocTempFloat(); 336 RegisterInfo* AllocLiveBody(RegisterInfo* p, int num_regs, int s_reg); 337 RegisterInfo* AllocLive(int s_reg, int reg_class); 338 void FreeTemp(int reg); 339 RegisterInfo* IsLive(int reg); 340 RegisterInfo* IsTemp(int reg); 341 RegisterInfo* IsPromoted(int reg); 342 bool IsDirty(int reg); 343 void LockTemp(int reg); 344 void ResetDef(int reg); 345 void NullifyRange(LIR *start, LIR *finish, int s_reg1, int s_reg2); 346 void MarkDef(RegLocation rl, LIR *start, LIR *finish); 347 void MarkDefWide(RegLocation rl, LIR *start, LIR *finish); 348 RegLocation WideToNarrow(RegLocation rl); 349 void ResetDefLoc(RegLocation rl); 350 void ResetDefLocWide(RegLocation rl); 351 void ResetDefTracking(); 352 void ClobberAllRegs(); 353 void FlushAllRegsBody(RegisterInfo* info, int num_regs); 354 void FlushAllRegs(); 355 bool RegClassMatches(int reg_class, int reg); 356 void MarkLive(int reg, int s_reg); 357 void MarkTemp(int reg); 358 void UnmarkTemp(int reg); 359 void MarkPair(int low_reg, int high_reg); 360 void MarkClean(RegLocation loc); 361 void MarkDirty(RegLocation loc); 362 void MarkInUse(int reg); 363 void CopyRegInfo(int new_reg, int old_reg); 364 bool CheckCorePoolSanity(); 365 RegLocation UpdateLoc(RegLocation loc); 366 RegLocation UpdateLocWide(RegLocation loc); 367 RegLocation UpdateRawLoc(RegLocation loc); 368 RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update); 369 RegLocation EvalLoc(RegLocation loc, int reg_class, bool update); 370 void CountRefs(RefCounts* core_counts, RefCounts* fp_counts); 371 void DumpCounts(const RefCounts* arr, int size, const char* msg); 372 void DoPromotion(); 373 int VRegOffset(int v_reg); 374 int SRegOffset(int s_reg); 375 RegLocation GetReturnWide(bool is_double); 376 RegLocation GetReturn(bool is_float); 377 378 // Shared by all targets - implemented in gen_common.cc. 379 bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 380 RegLocation rl_src, RegLocation rl_dest, int lit); 381 bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit); 382 void HandleSuspendLaunchPads(); 383 void HandleIntrinsicLaunchPads(); 384 void HandleThrowLaunchPads(); 385 void GenBarrier(); 386 LIR* GenCheck(ConditionCode c_code, ThrowKind kind); 387 LIR* GenImmedCheck(ConditionCode c_code, int reg, int imm_val, 388 ThrowKind kind); 389 LIR* GenNullCheck(int s_reg, int m_reg, int opt_flags); 390 LIR* GenRegRegCheck(ConditionCode c_code, int reg1, int reg2, 391 ThrowKind kind); 392 void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, 393 RegLocation rl_src2, LIR* taken, LIR* fall_through); 394 void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, 395 LIR* taken, LIR* fall_through); 396 void GenIntToLong(RegLocation rl_dest, RegLocation rl_src); 397 void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 398 RegLocation rl_src); 399 void GenNewArray(uint32_t type_idx, RegLocation rl_dest, 400 RegLocation rl_src); 401 void GenFilledNewArray(CallInfo* info); 402 void GenSput(uint32_t field_idx, RegLocation rl_src, 403 bool is_long_or_double, bool is_object); 404 void GenSget(uint32_t field_idx, RegLocation rl_dest, 405 bool is_long_or_double, bool is_object); 406 void GenIGet(uint32_t field_idx, int opt_flags, OpSize size, 407 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object); 408 void GenIPut(uint32_t field_idx, int opt_flags, OpSize size, 409 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object); 410 void GenConstClass(uint32_t type_idx, RegLocation rl_dest); 411 void GenConstString(uint32_t string_idx, RegLocation rl_dest); 412 void GenNewInstance(uint32_t type_idx, RegLocation rl_dest); 413 void GenThrow(RegLocation rl_src); 414 void GenInstanceof(uint32_t type_idx, RegLocation rl_dest, 415 RegLocation rl_src); 416 void GenCheckCast(uint32_t insn_idx, uint32_t type_idx, 417 RegLocation rl_src); 418 void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 419 RegLocation rl_src1, RegLocation rl_src2); 420 void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 421 RegLocation rl_src1, RegLocation rl_shift); 422 void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 423 RegLocation rl_src1, RegLocation rl_src2); 424 void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, 425 RegLocation rl_src, int lit); 426 void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 427 RegLocation rl_src1, RegLocation rl_src2); 428 void GenConversionCall(ThreadOffset func_offset, RegLocation rl_dest, 429 RegLocation rl_src); 430 void GenSuspendTest(int opt_flags); 431 void GenSuspendTestAndBranch(int opt_flags, LIR* target); 432 433 // Shared by all targets - implemented in gen_invoke.cc. 434 int CallHelperSetup(ThreadOffset helper_offset); 435 LIR* CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc); 436 void CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc); 437 void CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc); 438 void CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0, 439 bool safepoint_pc); 440 void CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1, 441 bool safepoint_pc); 442 void CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0, 443 RegLocation arg1, bool safepoint_pc); 444 void CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0, 445 int arg1, bool safepoint_pc); 446 void CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1, 447 bool safepoint_pc); 448 void CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1, 449 bool safepoint_pc); 450 void CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0, 451 bool safepoint_pc); 452 void CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, 453 RegLocation arg0, RegLocation arg1, 454 bool safepoint_pc); 455 void CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1, 456 bool safepoint_pc); 457 void CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1, 458 int arg2, bool safepoint_pc); 459 void CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset, int arg0, 460 RegLocation arg2, bool safepoint_pc); 461 void CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0, int arg2, 462 bool safepoint_pc); 463 void CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset, 464 int arg0, RegLocation arg1, RegLocation arg2, 465 bool safepoint_pc); 466 void GenInvoke(CallInfo* info); 467 void FlushIns(RegLocation* ArgLocs, RegLocation rl_method); 468 int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel, 469 NextCallInsn next_call_insn, 470 const MethodReference& target_method, 471 uint32_t vtable_idx, 472 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 473 bool skip_this); 474 int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel, 475 NextCallInsn next_call_insn, 476 const MethodReference& target_method, 477 uint32_t vtable_idx, 478 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 479 bool skip_this); 480 RegLocation InlineTarget(CallInfo* info); 481 RegLocation InlineTargetWide(CallInfo* info); 482 483 bool GenInlinedCharAt(CallInfo* info); 484 bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty); 485 bool GenInlinedAbsInt(CallInfo* info); 486 bool GenInlinedAbsLong(CallInfo* info); 487 bool GenInlinedFloatCvt(CallInfo* info); 488 bool GenInlinedDoubleCvt(CallInfo* info); 489 bool GenInlinedIndexOf(CallInfo* info, bool zero_based); 490 bool GenInlinedStringCompareTo(CallInfo* info); 491 bool GenInlinedCurrentThread(CallInfo* info); 492 bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile); 493 bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object, 494 bool is_volatile, bool is_ordered); 495 bool GenIntrinsic(CallInfo* info); 496 int LoadArgRegs(CallInfo* info, int call_state, 497 NextCallInsn next_call_insn, 498 const MethodReference& target_method, 499 uint32_t vtable_idx, 500 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 501 bool skip_this); 502 503 // Shared by all targets - implemented in gen_loadstore.cc. 504 RegLocation LoadCurrMethod(); 505 void LoadCurrMethodDirect(int r_tgt); 506 LIR* LoadConstant(int r_dest, int value); 507 LIR* LoadWordDisp(int rBase, int displacement, int r_dest); 508 RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind); 509 RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind); 510 void LoadValueDirect(RegLocation rl_src, int r_dest); 511 void LoadValueDirectFixed(RegLocation rl_src, int r_dest); 512 void LoadValueDirectWide(RegLocation rl_src, int reg_lo, int reg_hi); 513 void LoadValueDirectWideFixed(RegLocation rl_src, int reg_lo, int reg_hi); 514 LIR* StoreWordDisp(int rBase, int displacement, int r_src); 515 void StoreValue(RegLocation rl_dest, RegLocation rl_src); 516 void StoreValueWide(RegLocation rl_dest, RegLocation rl_src); 517 518 // Shared by all targets - implemented in mir_to_lir.cc. 519 void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list); 520 void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir); 521 bool MethodBlockCodeGen(BasicBlock* bb); 522 void SpecialMIR2LIR(SpecialCaseHandler special_case); 523 void MethodMIR2LIR(); 524 525 526 527 // Required for target - codegen helpers. 528 virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, 529 RegLocation rl_src, RegLocation rl_dest, int lit) = 0; 530 virtual int LoadHelper(ThreadOffset offset) = 0; 531 virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg) = 0; 532 virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi, 533 int s_reg) = 0; 534 virtual LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size) = 0; 535 virtual LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement, 536 int r_dest, int r_dest_hi, OpSize size, int s_reg) = 0; 537 virtual LIR* LoadConstantNoClobber(int r_dest, int value) = 0; 538 virtual LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) = 0; 539 virtual LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size) = 0; 540 virtual LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi) = 0; 541 virtual LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size) = 0; 542 virtual LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement, 543 int r_src, int r_src_hi, OpSize size, int s_reg) = 0; 544 virtual void MarkGCCard(int val_reg, int tgt_addr_reg) = 0; 545 546 // Required for target - register utilities. 547 virtual bool IsFpReg(int reg) = 0; 548 virtual bool SameRegType(int reg1, int reg2) = 0; 549 virtual int AllocTypedTemp(bool fp_hint, int reg_class) = 0; 550 virtual int AllocTypedTempPair(bool fp_hint, int reg_class) = 0; 551 virtual int S2d(int low_reg, int high_reg) = 0; 552 virtual int TargetReg(SpecialTargetRegister reg) = 0; 553 virtual RegisterInfo* GetRegInfo(int reg) = 0; 554 virtual RegLocation GetReturnAlt() = 0; 555 virtual RegLocation GetReturnWideAlt() = 0; 556 virtual RegLocation LocCReturn() = 0; 557 virtual RegLocation LocCReturnDouble() = 0; 558 virtual RegLocation LocCReturnFloat() = 0; 559 virtual RegLocation LocCReturnWide() = 0; 560 virtual uint32_t FpRegMask() = 0; 561 virtual uint64_t GetRegMaskCommon(int reg) = 0; 562 virtual void AdjustSpillMask() = 0; 563 virtual void ClobberCalleeSave() = 0; 564 virtual void FlushReg(int reg) = 0; 565 virtual void FlushRegWide(int reg1, int reg2) = 0; 566 virtual void FreeCallTemps() = 0; 567 virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) = 0; 568 virtual void LockCallTemps() = 0; 569 virtual void MarkPreservedSingle(int v_reg, int reg) = 0; 570 virtual void CompilerInitializeRegAlloc() = 0; 571 572 // Required for target - miscellaneous. 573 virtual AssemblerStatus AssembleInstructions(uintptr_t start_addr) = 0; 574 virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix) = 0; 575 virtual void SetupTargetResourceMasks(LIR* lir) = 0; 576 virtual const char* GetTargetInstFmt(int opcode) = 0; 577 virtual const char* GetTargetInstName(int opcode) = 0; 578 virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0; 579 virtual uint64_t GetPCUseDefEncoding() = 0; 580 virtual uint64_t GetTargetInstFlags(int opcode) = 0; 581 virtual int GetInsnSize(LIR* lir) = 0; 582 virtual bool IsUnconditionalBranch(LIR* lir) = 0; 583 584 // Required for target - Dalvik-level generators. 585 virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 586 RegLocation rl_src1, RegLocation rl_src2) = 0; 587 virtual void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, 588 RegLocation rl_src2) = 0; 589 virtual void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, 590 RegLocation rl_src2) = 0; 591 virtual void GenAndLong(RegLocation rl_dest, RegLocation rl_src1, 592 RegLocation rl_src2) = 0; 593 virtual void GenArithOpDouble(Instruction::Code opcode, 594 RegLocation rl_dest, RegLocation rl_src1, 595 RegLocation rl_src2) = 0; 596 virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, 597 RegLocation rl_src1, RegLocation rl_src2) = 0; 598 virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, 599 RegLocation rl_src1, RegLocation rl_src2) = 0; 600 virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest, 601 RegLocation rl_src) = 0; 602 virtual bool GenInlinedCas32(CallInfo* info, bool need_write_barrier) = 0; 603 virtual bool GenInlinedMinMaxInt(CallInfo* info, bool is_min) = 0; 604 virtual bool GenInlinedSqrt(CallInfo* info) = 0; 605 virtual void GenNegLong(RegLocation rl_dest, RegLocation rl_src) = 0; 606 virtual void GenOrLong(RegLocation rl_dest, RegLocation rl_src1, 607 RegLocation rl_src2) = 0; 608 virtual void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, 609 RegLocation rl_src2) = 0; 610 virtual void GenXorLong(RegLocation rl_dest, RegLocation rl_src1, 611 RegLocation rl_src2) = 0; 612 virtual LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, 613 int offset, ThrowKind kind) = 0; 614 virtual RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, 615 bool is_div) = 0; 616 virtual RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, 617 bool is_div) = 0; 618 virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, 619 RegLocation rl_src2) = 0; 620 virtual void GenDivZeroCheck(int reg_lo, int reg_hi) = 0; 621 virtual void GenEntrySequence(RegLocation* ArgLocs, 622 RegLocation rl_method) = 0; 623 virtual void GenExitSequence() = 0; 624 virtual void GenFillArrayData(uint32_t table_offset, 625 RegLocation rl_src) = 0; 626 virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, 627 bool is_double) = 0; 628 virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0; 629 virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0; 630 virtual void GenMemBarrier(MemBarrierKind barrier_kind) = 0; 631 virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src) = 0; 632 virtual void GenMonitorExit(int opt_flags, RegLocation rl_src) = 0; 633 virtual void GenMoveException(RegLocation rl_dest) = 0; 634 virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, 635 RegLocation rl_result, int lit, int first_bit, 636 int second_bit) = 0; 637 virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0; 638 virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0; 639 virtual void GenPackedSwitch(MIR* mir, uint32_t table_offset, 640 RegLocation rl_src) = 0; 641 virtual void GenSparseSwitch(MIR* mir, uint32_t table_offset, 642 RegLocation rl_src) = 0; 643 virtual void GenSpecialCase(BasicBlock* bb, MIR* mir, 644 SpecialCaseHandler special_case) = 0; 645 virtual void GenArrayObjPut(int opt_flags, RegLocation rl_array, 646 RegLocation rl_index, RegLocation rl_src, int scale) = 0; 647 virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, 648 RegLocation rl_index, RegLocation rl_dest, int scale) = 0; 649 virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, 650 RegLocation rl_index, RegLocation rl_src, int scale) = 0; 651 virtual void GenShiftImmOpLong(Instruction::Code opcode, 652 RegLocation rl_dest, RegLocation rl_src1, 653 RegLocation rl_shift) = 0; 654 655 // Required for target - single operation generators. 656 virtual LIR* OpUnconditionalBranch(LIR* target) = 0; 657 virtual LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, 658 LIR* target) = 0; 659 virtual LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, 660 LIR* target) = 0; 661 virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0; 662 virtual LIR* OpDecAndBranch(ConditionCode c_code, int reg, 663 LIR* target) = 0; 664 virtual LIR* OpFpRegCopy(int r_dest, int r_src) = 0; 665 virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0; 666 virtual LIR* OpMem(OpKind op, int rBase, int disp) = 0; 667 virtual LIR* OpPcRelLoad(int reg, LIR* target) = 0; 668 virtual LIR* OpReg(OpKind op, int r_dest_src) = 0; 669 virtual LIR* OpRegCopy(int r_dest, int r_src) = 0; 670 virtual LIR* OpRegCopyNoInsert(int r_dest, int r_src) = 0; 671 virtual LIR* OpRegImm(OpKind op, int r_dest_src1, int value) = 0; 672 virtual LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset) = 0; 673 virtual LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2) = 0; 674 virtual LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) = 0; 675 virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, 676 int r_src2) = 0; 677 virtual LIR* OpTestSuspend(LIR* target) = 0; 678 virtual LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset) = 0; 679 virtual LIR* OpVldm(int rBase, int count) = 0; 680 virtual LIR* OpVstm(int rBase, int count) = 0; 681 virtual void OpLea(int rBase, int reg1, int reg2, int scale, 682 int offset) = 0; 683 virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, 684 int src_hi) = 0; 685 virtual void OpTlsCmp(ThreadOffset offset, int val) = 0; 686 virtual bool InexpensiveConstantInt(int32_t value) = 0; 687 virtual bool InexpensiveConstantFloat(int32_t value) = 0; 688 virtual bool InexpensiveConstantLong(int64_t value) = 0; 689 virtual bool InexpensiveConstantDouble(int64_t value) = 0; 690 691 // Temp workaround 692 void Workaround7250540(RegLocation rl_dest, int value); 693 694 protected: 695 Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); 696 697 CompilationUnit* GetCompilationUnit() { 698 return cu_; 699 } 700 701 private: 702 void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, 703 RegLocation rl_src); 704 void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 705 bool type_known_abstract, bool use_declaring_class, 706 bool can_assume_type_is_in_dex_cache, 707 uint32_t type_idx, RegLocation rl_dest, 708 RegLocation rl_src); 709 710 void ClobberBody(RegisterInfo* p); 711 void ResetDefBody(RegisterInfo* p) { 712 p->def_start = NULL; 713 p->def_end = NULL; 714 } 715 716 public: 717 // TODO: add accessors for these. 718 LIR* literal_list_; // Constants. 719 LIR* method_literal_list_; // Method literals requiring patching. 720 LIR* code_literal_list_; // Code literals requiring patching. 721 722 protected: 723 CompilationUnit* const cu_; 724 MIRGraph* const mir_graph_; 725 GrowableArray<SwitchTable*> switch_tables_; 726 GrowableArray<FillArrayData*> fill_array_data_; 727 GrowableArray<LIR*> throw_launchpads_; 728 GrowableArray<LIR*> suspend_launchpads_; 729 GrowableArray<LIR*> intrinsic_launchpads_; 730 SafeMap<unsigned int, LIR*> boundary_map_; // boundary lookup cache. 731 /* 732 * Holds mapping from native PC to dex PC for safepoints where we may deoptimize. 733 * Native PC is on the return address of the safepointed operation. Dex PC is for 734 * the instruction being executed at the safepoint. 735 */ 736 std::vector<uint32_t> pc2dex_mapping_table_; 737 /* 738 * Holds mapping from Dex PC to native PC for catch entry points. Native PC and Dex PC 739 * immediately preceed the instruction. 740 */ 741 std::vector<uint32_t> dex2pc_mapping_table_; 742 int data_offset_; // starting offset of literal pool. 743 int total_size_; // header + code size. 744 LIR* block_label_list_; 745 PromotionMap* promotion_map_; 746 /* 747 * TODO: The code generation utilities don't have a built-in 748 * mechanism to propagate the original Dalvik opcode address to the 749 * associated generated instructions. For the trace compiler, this wasn't 750 * necessary because the interpreter handled all throws and debugging 751 * requests. For now we'll handle this by placing the Dalvik offset 752 * in the CompilationUnit struct before codegen for each instruction. 753 * The low-level LIR creation utilites will pull it from here. Rework this. 754 */ 755 int current_dalvik_offset_; 756 RegisterPool* reg_pool_; 757 /* 758 * Sanity checking for the register temp tracking. The same ssa 759 * name should never be associated with one temp register per 760 * instruction compilation. 761 */ 762 int live_sreg_; 763 CodeBuffer code_buffer_; 764 // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix. 765 UnsignedLeb128EncodingVector encoded_mapping_table_; 766 std::vector<uint32_t> core_vmap_table_; 767 std::vector<uint32_t> fp_vmap_table_; 768 std::vector<uint8_t> native_gc_map_; 769 int num_core_spills_; 770 int num_fp_spills_; 771 int frame_size_; 772 unsigned int core_spill_mask_; 773 unsigned int fp_spill_mask_; 774 LIR* first_lir_insn_; 775 LIR* last_lir_insn_; 776 }; // Class Mir2Lir 777 778 } // namespace art 779 780 #endif // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 781