/art/compiler/optimizing/ |
register_allocation_resolver.cc | 282 size_t core_spills = local 287 core_register_spill_size * core_spills + fp_register_spill_size * fp_spills;
|
code_generator.cc | 1522 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, \/* core_registers *\/ true); local 1546 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, \/* core_registers *\/ true); local [all...] |
code_generator_arm64.cc | 177 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true); local 179 DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spills, 184 CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize, core_spills); 221 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true); local 222 for (uint32_t i : LowToHighBits(core_spills)) { [all...] |
code_generator_arm_vixl.cc | 341 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true); local 342 for (uint32_t i : LowToHighBits(core_spills)) { 354 arm_codegen->GetAssembler()->StoreRegisterList(core_spills, orig_offset); 379 const uint32_t core_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ true); local 380 for (uint32_t i : LowToHighBits(core_spills)) { 388 arm_codegen->GetAssembler()->LoadRegisterList(core_spills, orig_offset); [all...] |
/art/oatdump/ |
oatdump.cc | [all...] |