HomeSort by relevance Sort by last modified time
    Searched defs:fp_spills (Results 1 - 4 of 4) sorted by null

  /art/compiler/optimizing/
register_allocation_resolver.cc 284 size_t fp_spills = local
287 core_register_spill_size * core_spills + fp_register_spill_size * fp_spills;
code_generator.cc 1534 const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, \/* core_registers *\/ false); local
1553 const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, \/* core_registers *\/ false); local
    [all...]
code_generator_arm64.cc 178 const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); local
181 fp_spills,
186 CPURegList fp_list = CPURegList(CPURegister::kVRegister, v_reg_size, fp_spills);
233 const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); local
234 for (uint32_t i : LowToHighBits(fp_spills)) {
    [all...]
code_generator_arm_vixl.cc 356 uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); local
358 for (uint32_t i : LowToHighBits(fp_spills)) {
365 while (fp_spills != 0u) {
366 uint32_t begin = CTZ(fp_spills);
367 uint32_t tmp = fp_spills + (1u << begin);
368 fp_spills &= tmp; // Clear the contiguous range of 1s.
390 uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers */ false); local
391 while (fp_spills != 0u) {
392 uint32_t begin = CTZ(fp_spills);
393 uint32_t tmp = fp_spills + (1u << begin)
    [all...]

Completed in 156 milliseconds