Home | History | Annotate | Download | only in ia32
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #if V8_TARGET_ARCH_IA32
      6 
      7 #include "src/crankshaft/ia32/lithium-codegen-ia32.h"
      8 #include "src/crankshaft/ia32/lithium-gap-resolver-ia32.h"
      9 #include "src/register-configuration.h"
     10 
     11 namespace v8 {
     12 namespace internal {
     13 
     14 LGapResolver::LGapResolver(LCodeGen* owner)
     15     : cgen_(owner),
     16       moves_(32, owner->zone()),
     17       source_uses_(),
     18       destination_uses_(),
     19       spilled_register_(-1) {}
     20 
     21 
     22 void LGapResolver::Resolve(LParallelMove* parallel_move) {
     23   DCHECK(HasBeenReset());
     24   // Build up a worklist of moves.
     25   BuildInitialMoveList(parallel_move);
     26 
     27   for (int i = 0; i < moves_.length(); ++i) {
     28     LMoveOperands move = moves_[i];
     29     // Skip constants to perform them last.  They don't block other moves
     30     // and skipping such moves with register destinations keeps those
     31     // registers free for the whole algorithm.
     32     if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
     33       PerformMove(i);
     34     }
     35   }
     36 
     37   // Perform the moves with constant sources.
     38   for (int i = 0; i < moves_.length(); ++i) {
     39     if (!moves_[i].IsEliminated()) {
     40       DCHECK(moves_[i].source()->IsConstantOperand());
     41       EmitMove(i);
     42     }
     43   }
     44 
     45   Finish();
     46   DCHECK(HasBeenReset());
     47 }
     48 
     49 
     50 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
     51   // Perform a linear sweep of the moves to add them to the initial list of
     52   // moves to perform, ignoring any move that is redundant (the source is
     53   // the same as the destination, the destination is ignored and
     54   // unallocated, or the move was already eliminated).
     55   const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
     56   for (int i = 0; i < moves->length(); ++i) {
     57     LMoveOperands move = moves->at(i);
     58     if (!move.IsRedundant()) AddMove(move);
     59   }
     60   Verify();
     61 }
     62 
     63 
     64 void LGapResolver::PerformMove(int index) {
     65   // Each call to this function performs a move and deletes it from the move
     66   // graph.  We first recursively perform any move blocking this one.  We
     67   // mark a move as "pending" on entry to PerformMove in order to detect
     68   // cycles in the move graph.  We use operand swaps to resolve cycles,
     69   // which means that a call to PerformMove could change any source operand
     70   // in the move graph.
     71 
     72   DCHECK(!moves_[index].IsPending());
     73   DCHECK(!moves_[index].IsRedundant());
     74 
     75   // Clear this move's destination to indicate a pending move.  The actual
     76   // destination is saved on the side.
     77   DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
     78   LOperand* destination = moves_[index].destination();
     79   moves_[index].set_destination(NULL);
     80 
     81   // Perform a depth-first traversal of the move graph to resolve
     82   // dependencies.  Any unperformed, unpending move with a source the same
     83   // as this one's destination blocks this one so recursively perform all
     84   // such moves.
     85   for (int i = 0; i < moves_.length(); ++i) {
     86     LMoveOperands other_move = moves_[i];
     87     if (other_move.Blocks(destination) && !other_move.IsPending()) {
     88       // Though PerformMove can change any source operand in the move graph,
     89       // this call cannot create a blocking move via a swap (this loop does
     90       // not miss any).  Assume there is a non-blocking move with source A
     91       // and this move is blocked on source B and there is a swap of A and
     92       // B.  Then A and B must be involved in the same cycle (or they would
     93       // not be swapped).  Since this move's destination is B and there is
     94       // only a single incoming edge to an operand, this move must also be
     95       // involved in the same cycle.  In that case, the blocking move will
     96       // be created but will be "pending" when we return from PerformMove.
     97       PerformMove(i);
     98     }
     99   }
    100 
    101   // We are about to resolve this move and don't need it marked as
    102   // pending, so restore its destination.
    103   moves_[index].set_destination(destination);
    104 
    105   // This move's source may have changed due to swaps to resolve cycles and
    106   // so it may now be the last move in the cycle.  If so remove it.
    107   if (moves_[index].source()->Equals(destination)) {
    108     RemoveMove(index);
    109     return;
    110   }
    111 
    112   // The move may be blocked on a (at most one) pending move, in which case
    113   // we have a cycle.  Search for such a blocking move and perform a swap to
    114   // resolve it.
    115   for (int i = 0; i < moves_.length(); ++i) {
    116     LMoveOperands other_move = moves_[i];
    117     if (other_move.Blocks(destination)) {
    118       DCHECK(other_move.IsPending());
    119       EmitSwap(index);
    120       return;
    121     }
    122   }
    123 
    124   // This move is not blocked.
    125   EmitMove(index);
    126 }
    127 
    128 
    129 void LGapResolver::AddMove(LMoveOperands move) {
    130   LOperand* source = move.source();
    131   if (source->IsRegister()) ++source_uses_[source->index()];
    132 
    133   LOperand* destination = move.destination();
    134   if (destination->IsRegister()) ++destination_uses_[destination->index()];
    135 
    136   moves_.Add(move, cgen_->zone());
    137 }
    138 
    139 
    140 void LGapResolver::RemoveMove(int index) {
    141   LOperand* source = moves_[index].source();
    142   if (source->IsRegister()) {
    143     --source_uses_[source->index()];
    144     DCHECK(source_uses_[source->index()] >= 0);
    145   }
    146 
    147   LOperand* destination = moves_[index].destination();
    148   if (destination->IsRegister()) {
    149     --destination_uses_[destination->index()];
    150     DCHECK(destination_uses_[destination->index()] >= 0);
    151   }
    152 
    153   moves_[index].Eliminate();
    154 }
    155 
    156 
    157 int LGapResolver::CountSourceUses(LOperand* operand) {
    158   int count = 0;
    159   for (int i = 0; i < moves_.length(); ++i) {
    160     if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
    161       ++count;
    162     }
    163   }
    164   return count;
    165 }
    166 
    167 
    168 Register LGapResolver::GetFreeRegisterNot(Register reg) {
    169   int skip_index = reg.is(no_reg) ? -1 : reg.code();
    170   const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
    171   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
    172     int code = config->GetAllocatableGeneralCode(i);
    173     if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
    174         code != skip_index) {
    175       return Register::from_code(code);
    176     }
    177   }
    178   return no_reg;
    179 }
    180 
    181 
    182 bool LGapResolver::HasBeenReset() {
    183   if (!moves_.is_empty()) return false;
    184   if (spilled_register_ >= 0) return false;
    185   const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
    186   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
    187     int code = config->GetAllocatableGeneralCode(i);
    188     if (source_uses_[code] != 0) return false;
    189     if (destination_uses_[code] != 0) return false;
    190   }
    191   return true;
    192 }
    193 
    194 
    195 void LGapResolver::Verify() {
    196 #ifdef ENABLE_SLOW_DCHECKS
    197   // No operand should be the destination for more than one move.
    198   for (int i = 0; i < moves_.length(); ++i) {
    199     LOperand* destination = moves_[i].destination();
    200     for (int j = i + 1; j < moves_.length(); ++j) {
    201       SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
    202     }
    203   }
    204 #endif
    205 }
    206 
    207 
    208 #define __ ACCESS_MASM(cgen_->masm())
    209 
    210 void LGapResolver::Finish() {
    211   if (spilled_register_ >= 0) {
    212     __ pop(Register::from_code(spilled_register_));
    213     spilled_register_ = -1;
    214   }
    215   moves_.Rewind(0);
    216 }
    217 
    218 
    219 void LGapResolver::EnsureRestored(LOperand* operand) {
    220   if (operand->IsRegister() && operand->index() == spilled_register_) {
    221     __ pop(Register::from_code(spilled_register_));
    222     spilled_register_ = -1;
    223   }
    224 }
    225 
    226 
    227 Register LGapResolver::EnsureTempRegister() {
    228   // 1. We may have already spilled to create a temp register.
    229   if (spilled_register_ >= 0) {
    230     return Register::from_code(spilled_register_);
    231   }
    232 
    233   // 2. We may have a free register that we can use without spilling.
    234   Register free = GetFreeRegisterNot(no_reg);
    235   if (!free.is(no_reg)) return free;
    236 
    237   // 3. Prefer to spill a register that is not used in any remaining move
    238   // because it will not need to be restored until the end.
    239   const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
    240   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
    241     int code = config->GetAllocatableGeneralCode(i);
    242     if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
    243       Register scratch = Register::from_code(code);
    244       __ push(scratch);
    245       spilled_register_ = code;
    246       return scratch;
    247     }
    248   }
    249 
    250   // 4. Use an arbitrary register.  Register 0 is as arbitrary as any other.
    251   spilled_register_ = config->GetAllocatableGeneralCode(0);
    252   Register scratch = Register::from_code(spilled_register_);
    253   __ push(scratch);
    254   return scratch;
    255 }
    256 
    257 
    258 void LGapResolver::EmitMove(int index) {
    259   LOperand* source = moves_[index].source();
    260   LOperand* destination = moves_[index].destination();
    261   EnsureRestored(source);
    262   EnsureRestored(destination);
    263 
    264   // Dispatch on the source and destination operand kinds.  Not all
    265   // combinations are possible.
    266   if (source->IsRegister()) {
    267     DCHECK(destination->IsRegister() || destination->IsStackSlot());
    268     Register src = cgen_->ToRegister(source);
    269     Operand dst = cgen_->ToOperand(destination);
    270     __ mov(dst, src);
    271 
    272   } else if (source->IsStackSlot()) {
    273     DCHECK(destination->IsRegister() || destination->IsStackSlot());
    274     Operand src = cgen_->ToOperand(source);
    275     if (destination->IsRegister()) {
    276       Register dst = cgen_->ToRegister(destination);
    277       __ mov(dst, src);
    278     } else {
    279       // Spill on demand to use a temporary register for memory-to-memory
    280       // moves.
    281       Register tmp = EnsureTempRegister();
    282       Operand dst = cgen_->ToOperand(destination);
    283       __ mov(tmp, src);
    284       __ mov(dst, tmp);
    285     }
    286 
    287   } else if (source->IsConstantOperand()) {
    288     LConstantOperand* constant_source = LConstantOperand::cast(source);
    289     if (destination->IsRegister()) {
    290       Register dst = cgen_->ToRegister(destination);
    291       Representation r = cgen_->IsSmi(constant_source)
    292           ? Representation::Smi() : Representation::Integer32();
    293       if (cgen_->IsInteger32(constant_source)) {
    294         __ Move(dst, cgen_->ToImmediate(constant_source, r));
    295       } else {
    296         __ LoadObject(dst, cgen_->ToHandle(constant_source));
    297       }
    298     } else if (destination->IsDoubleRegister()) {
    299       double v = cgen_->ToDouble(constant_source);
    300       uint64_t int_val = bit_cast<uint64_t, double>(v);
    301       int32_t lower = static_cast<int32_t>(int_val);
    302       int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
    303       XMMRegister dst = cgen_->ToDoubleRegister(destination);
    304       if (int_val == 0) {
    305         __ xorps(dst, dst);
    306       } else {
    307         __ push(Immediate(upper));
    308         __ push(Immediate(lower));
    309         __ movsd(dst, Operand(esp, 0));
    310         __ add(esp, Immediate(kDoubleSize));
    311       }
    312     } else {
    313       DCHECK(destination->IsStackSlot());
    314       Operand dst = cgen_->ToOperand(destination);
    315       Representation r = cgen_->IsSmi(constant_source)
    316           ? Representation::Smi() : Representation::Integer32();
    317       if (cgen_->IsInteger32(constant_source)) {
    318         __ Move(dst, cgen_->ToImmediate(constant_source, r));
    319       } else {
    320         Register tmp = EnsureTempRegister();
    321         __ LoadObject(tmp, cgen_->ToHandle(constant_source));
    322         __ mov(dst, tmp);
    323       }
    324     }
    325 
    326   } else if (source->IsDoubleRegister()) {
    327     XMMRegister src = cgen_->ToDoubleRegister(source);
    328     if (destination->IsDoubleRegister()) {
    329       XMMRegister dst = cgen_->ToDoubleRegister(destination);
    330       __ movaps(dst, src);
    331     } else {
    332       DCHECK(destination->IsDoubleStackSlot());
    333       Operand dst = cgen_->ToOperand(destination);
    334       __ movsd(dst, src);
    335     }
    336   } else if (source->IsDoubleStackSlot()) {
    337     DCHECK(destination->IsDoubleRegister() ||
    338            destination->IsDoubleStackSlot());
    339     Operand src = cgen_->ToOperand(source);
    340     if (destination->IsDoubleRegister()) {
    341       XMMRegister dst = cgen_->ToDoubleRegister(destination);
    342       __ movsd(dst, src);
    343     } else {
    344       // We rely on having xmm0 available as a fixed scratch register.
    345       Operand dst = cgen_->ToOperand(destination);
    346       __ movsd(xmm0, src);
    347       __ movsd(dst, xmm0);
    348     }
    349   } else {
    350     UNREACHABLE();
    351   }
    352 
    353   RemoveMove(index);
    354 }
    355 
    356 
    357 void LGapResolver::EmitSwap(int index) {
    358   LOperand* source = moves_[index].source();
    359   LOperand* destination = moves_[index].destination();
    360   EnsureRestored(source);
    361   EnsureRestored(destination);
    362 
    363   // Dispatch on the source and destination operand kinds.  Not all
    364   // combinations are possible.
    365   if (source->IsRegister() && destination->IsRegister()) {
    366     // Register-register.
    367     Register src = cgen_->ToRegister(source);
    368     Register dst = cgen_->ToRegister(destination);
    369     __ push(src);
    370     __ mov(src, dst);
    371     __ pop(dst);
    372 
    373   } else if ((source->IsRegister() && destination->IsStackSlot()) ||
    374              (source->IsStackSlot() && destination->IsRegister())) {
    375     // Register-memory.  Use a free register as a temp if possible.  Do not
    376     // spill on demand because the simple spill implementation cannot avoid
    377     // spilling src at this point.
    378     Register tmp = GetFreeRegisterNot(no_reg);
    379     Register reg =
    380         cgen_->ToRegister(source->IsRegister() ? source : destination);
    381     Operand mem =
    382         cgen_->ToOperand(source->IsRegister() ? destination : source);
    383     if (tmp.is(no_reg)) {
    384       __ xor_(reg, mem);
    385       __ xor_(mem, reg);
    386       __ xor_(reg, mem);
    387     } else {
    388       __ mov(tmp, mem);
    389       __ mov(mem, reg);
    390       __ mov(reg, tmp);
    391     }
    392 
    393   } else if (source->IsStackSlot() && destination->IsStackSlot()) {
    394     // Memory-memory.  Spill on demand to use a temporary.  If there is a
    395     // free register after that, use it as a second temporary.
    396     Register tmp0 = EnsureTempRegister();
    397     Register tmp1 = GetFreeRegisterNot(tmp0);
    398     Operand src = cgen_->ToOperand(source);
    399     Operand dst = cgen_->ToOperand(destination);
    400     if (tmp1.is(no_reg)) {
    401       // Only one temp register available to us.
    402       __ mov(tmp0, dst);
    403       __ xor_(tmp0, src);
    404       __ xor_(src, tmp0);
    405       __ xor_(tmp0, src);
    406       __ mov(dst, tmp0);
    407     } else {
    408       __ mov(tmp0, dst);
    409       __ mov(tmp1, src);
    410       __ mov(dst, tmp1);
    411       __ mov(src, tmp0);
    412     }
    413   } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
    414     // XMM register-register swap. We rely on having xmm0
    415     // available as a fixed scratch register.
    416     XMMRegister src = cgen_->ToDoubleRegister(source);
    417     XMMRegister dst = cgen_->ToDoubleRegister(destination);
    418     __ movaps(xmm0, src);
    419     __ movaps(src, dst);
    420     __ movaps(dst, xmm0);
    421   } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
    422     // XMM register-memory swap.  We rely on having xmm0
    423     // available as a fixed scratch register.
    424     DCHECK(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
    425     XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
    426                                               ? source
    427                                               : destination);
    428     Operand other =
    429         cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
    430     __ movsd(xmm0, other);
    431     __ movsd(other, reg);
    432     __ movaps(reg, xmm0);
    433   } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
    434     // Double-width memory-to-memory.  Spill on demand to use a general
    435     // purpose temporary register and also rely on having xmm0 available as
    436     // a fixed scratch register.
    437     Register tmp = EnsureTempRegister();
    438     Operand src0 = cgen_->ToOperand(source);
    439     Operand src1 = cgen_->HighOperand(source);
    440     Operand dst0 = cgen_->ToOperand(destination);
    441     Operand dst1 = cgen_->HighOperand(destination);
    442     __ movsd(xmm0, dst0);  // Save destination in xmm0.
    443     __ mov(tmp, src0);  // Then use tmp to copy source to destination.
    444     __ mov(dst0, tmp);
    445     __ mov(tmp, src1);
    446     __ mov(dst1, tmp);
    447     __ movsd(src0, xmm0);
    448 
    449   } else {
    450     // No other combinations are possible.
    451     UNREACHABLE();
    452   }
    453 
    454   // The swap of source and destination has executed a move from source to
    455   // destination.
    456   RemoveMove(index);
    457 
    458   // Any unperformed (including pending) move with a source of either
    459   // this move's source or destination needs to have their source
    460   // changed to reflect the state of affairs after the swap.
    461   for (int i = 0; i < moves_.length(); ++i) {
    462     LMoveOperands other_move = moves_[i];
    463     if (other_move.Blocks(source)) {
    464       moves_[i].set_source(destination);
    465     } else if (other_move.Blocks(destination)) {
    466       moves_[i].set_source(source);
    467     }
    468   }
    469 
    470   // In addition to swapping the actual uses as sources, we need to update
    471   // the use counts.
    472   if (source->IsRegister() && destination->IsRegister()) {
    473     int temp = source_uses_[source->index()];
    474     source_uses_[source->index()] = source_uses_[destination->index()];
    475     source_uses_[destination->index()] = temp;
    476   } else if (source->IsRegister()) {
    477     // We don't have use counts for non-register operands like destination.
    478     // Compute those counts now.
    479     source_uses_[source->index()] = CountSourceUses(source);
    480   } else if (destination->IsRegister()) {
    481     source_uses_[destination->index()] = CountSourceUses(destination);
    482   }
    483 }
    484 
    485 #undef __
    486 
    487 }  // namespace internal
    488 }  // namespace v8
    489 
    490 #endif  // V8_TARGET_ARCH_IA32
    491