Home | History | Annotate | Download | only in ia32
      1 // Copyright 2011 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #if defined(V8_TARGET_ARCH_IA32)
     31 
     32 #include "ia32/lithium-gap-resolver-ia32.h"
     33 #include "ia32/lithium-codegen-ia32.h"
     34 
     35 namespace v8 {
     36 namespace internal {
     37 
     38 LGapResolver::LGapResolver(LCodeGen* owner)
     39     : cgen_(owner),
     40       moves_(32),
     41       source_uses_(),
     42       destination_uses_(),
     43       spilled_register_(-1) {}
     44 
     45 
     46 void LGapResolver::Resolve(LParallelMove* parallel_move) {
     47   ASSERT(HasBeenReset());
     48   // Build up a worklist of moves.
     49   BuildInitialMoveList(parallel_move);
     50 
     51   for (int i = 0; i < moves_.length(); ++i) {
     52     LMoveOperands move = moves_[i];
     53     // Skip constants to perform them last.  They don't block other moves
     54     // and skipping such moves with register destinations keeps those
     55     // registers free for the whole algorithm.
     56     if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
     57       PerformMove(i);
     58     }
     59   }
     60 
     61   // Perform the moves with constant sources.
     62   for (int i = 0; i < moves_.length(); ++i) {
     63     if (!moves_[i].IsEliminated()) {
     64       ASSERT(moves_[i].source()->IsConstantOperand());
     65       EmitMove(i);
     66     }
     67   }
     68 
     69   Finish();
     70   ASSERT(HasBeenReset());
     71 }
     72 
     73 
     74 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
     75   // Perform a linear sweep of the moves to add them to the initial list of
     76   // moves to perform, ignoring any move that is redundant (the source is
     77   // the same as the destination, the destination is ignored and
     78   // unallocated, or the move was already eliminated).
     79   const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
     80   for (int i = 0; i < moves->length(); ++i) {
     81     LMoveOperands move = moves->at(i);
     82     if (!move.IsRedundant()) AddMove(move);
     83   }
     84   Verify();
     85 }
     86 
     87 
     88 void LGapResolver::PerformMove(int index) {
     89   // Each call to this function performs a move and deletes it from the move
     90   // graph.  We first recursively perform any move blocking this one.  We
     91   // mark a move as "pending" on entry to PerformMove in order to detect
     92   // cycles in the move graph.  We use operand swaps to resolve cycles,
     93   // which means that a call to PerformMove could change any source operand
     94   // in the move graph.
     95 
     96   ASSERT(!moves_[index].IsPending());
     97   ASSERT(!moves_[index].IsRedundant());
     98 
     99   // Clear this move's destination to indicate a pending move.  The actual
    100   // destination is saved on the side.
    101   ASSERT(moves_[index].source() != NULL);  // Or else it will look eliminated.
    102   LOperand* destination = moves_[index].destination();
    103   moves_[index].set_destination(NULL);
    104 
    105   // Perform a depth-first traversal of the move graph to resolve
    106   // dependencies.  Any unperformed, unpending move with a source the same
    107   // as this one's destination blocks this one so recursively perform all
    108   // such moves.
    109   for (int i = 0; i < moves_.length(); ++i) {
    110     LMoveOperands other_move = moves_[i];
    111     if (other_move.Blocks(destination) && !other_move.IsPending()) {
    112       // Though PerformMove can change any source operand in the move graph,
    113       // this call cannot create a blocking move via a swap (this loop does
    114       // not miss any).  Assume there is a non-blocking move with source A
    115       // and this move is blocked on source B and there is a swap of A and
    116       // B.  Then A and B must be involved in the same cycle (or they would
    117       // not be swapped).  Since this move's destination is B and there is
    118       // only a single incoming edge to an operand, this move must also be
    119       // involved in the same cycle.  In that case, the blocking move will
    120       // be created but will be "pending" when we return from PerformMove.
    121       PerformMove(i);
    122     }
    123   }
    124 
    125   // We are about to resolve this move and don't need it marked as
    126   // pending, so restore its destination.
    127   moves_[index].set_destination(destination);
    128 
    129   // This move's source may have changed due to swaps to resolve cycles and
    130   // so it may now be the last move in the cycle.  If so remove it.
    131   if (moves_[index].source()->Equals(destination)) {
    132     RemoveMove(index);
    133     return;
    134   }
    135 
    136   // The move may be blocked on a (at most one) pending move, in which case
    137   // we have a cycle.  Search for such a blocking move and perform a swap to
    138   // resolve it.
    139   for (int i = 0; i < moves_.length(); ++i) {
    140     LMoveOperands other_move = moves_[i];
    141     if (other_move.Blocks(destination)) {
    142       ASSERT(other_move.IsPending());
    143       EmitSwap(index);
    144       return;
    145     }
    146   }
    147 
    148   // This move is not blocked.
    149   EmitMove(index);
    150 }
    151 
    152 
    153 void LGapResolver::AddMove(LMoveOperands move) {
    154   LOperand* source = move.source();
    155   if (source->IsRegister()) ++source_uses_[source->index()];
    156 
    157   LOperand* destination = move.destination();
    158   if (destination->IsRegister()) ++destination_uses_[destination->index()];
    159 
    160   moves_.Add(move);
    161 }
    162 
    163 
    164 void LGapResolver::RemoveMove(int index) {
    165   LOperand* source = moves_[index].source();
    166   if (source->IsRegister()) {
    167     --source_uses_[source->index()];
    168     ASSERT(source_uses_[source->index()] >= 0);
    169   }
    170 
    171   LOperand* destination = moves_[index].destination();
    172   if (destination->IsRegister()) {
    173     --destination_uses_[destination->index()];
    174     ASSERT(destination_uses_[destination->index()] >= 0);
    175   }
    176 
    177   moves_[index].Eliminate();
    178 }
    179 
    180 
    181 int LGapResolver::CountSourceUses(LOperand* operand) {
    182   int count = 0;
    183   for (int i = 0; i < moves_.length(); ++i) {
    184     if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
    185       ++count;
    186     }
    187   }
    188   return count;
    189 }
    190 
    191 
    192 Register LGapResolver::GetFreeRegisterNot(Register reg) {
    193   int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
    194   for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
    195     if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
    196       return Register::FromAllocationIndex(i);
    197     }
    198   }
    199   return no_reg;
    200 }
    201 
    202 
    203 bool LGapResolver::HasBeenReset() {
    204   if (!moves_.is_empty()) return false;
    205   if (spilled_register_ >= 0) return false;
    206 
    207   for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
    208     if (source_uses_[i] != 0) return false;
    209     if (destination_uses_[i] != 0) return false;
    210   }
    211   return true;
    212 }
    213 
    214 
    215 void LGapResolver::Verify() {
    216 #ifdef ENABLE_SLOW_ASSERTS
    217   // No operand should be the destination for more than one move.
    218   for (int i = 0; i < moves_.length(); ++i) {
    219     LOperand* destination = moves_[i].destination();
    220     for (int j = i + 1; j < moves_.length(); ++j) {
    221       SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
    222     }
    223   }
    224 #endif
    225 }
    226 
    227 
    228 #define __ ACCESS_MASM(cgen_->masm())
    229 
    230 void LGapResolver::Finish() {
    231   if (spilled_register_ >= 0) {
    232     __ pop(Register::FromAllocationIndex(spilled_register_));
    233     spilled_register_ = -1;
    234   }
    235   moves_.Rewind(0);
    236 }
    237 
    238 
    239 void LGapResolver::EnsureRestored(LOperand* operand) {
    240   if (operand->IsRegister() && operand->index() == spilled_register_) {
    241     __ pop(Register::FromAllocationIndex(spilled_register_));
    242     spilled_register_ = -1;
    243   }
    244 }
    245 
    246 
    247 Register LGapResolver::EnsureTempRegister() {
    248   // 1. We may have already spilled to create a temp register.
    249   if (spilled_register_ >= 0) {
    250     return Register::FromAllocationIndex(spilled_register_);
    251   }
    252 
    253   // 2. We may have a free register that we can use without spilling.
    254   Register free = GetFreeRegisterNot(no_reg);
    255   if (!free.is(no_reg)) return free;
    256 
    257   // 3. Prefer to spill a register that is not used in any remaining move
    258   // because it will not need to be restored until the end.
    259   for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
    260     if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
    261       Register scratch = Register::FromAllocationIndex(i);
    262       __ push(scratch);
    263       spilled_register_ = i;
    264       return scratch;
    265     }
    266   }
    267 
    268   // 4. Use an arbitrary register.  Register 0 is as arbitrary as any other.
    269   Register scratch = Register::FromAllocationIndex(0);
    270   __ push(scratch);
    271   spilled_register_ = 0;
    272   return scratch;
    273 }
    274 
    275 
    276 void LGapResolver::EmitMove(int index) {
    277   LOperand* source = moves_[index].source();
    278   LOperand* destination = moves_[index].destination();
    279   EnsureRestored(source);
    280   EnsureRestored(destination);
    281 
    282   // Dispatch on the source and destination operand kinds.  Not all
    283   // combinations are possible.
    284   if (source->IsRegister()) {
    285     ASSERT(destination->IsRegister() || destination->IsStackSlot());
    286     Register src = cgen_->ToRegister(source);
    287     Operand dst = cgen_->ToOperand(destination);
    288     __ mov(dst, src);
    289 
    290   } else if (source->IsStackSlot()) {
    291     ASSERT(destination->IsRegister() || destination->IsStackSlot());
    292     Operand src = cgen_->ToOperand(source);
    293     if (destination->IsRegister()) {
    294       Register dst = cgen_->ToRegister(destination);
    295       __ mov(dst, src);
    296     } else {
    297       // Spill on demand to use a temporary register for memory-to-memory
    298       // moves.
    299       Register tmp = EnsureTempRegister();
    300       Operand dst = cgen_->ToOperand(destination);
    301       __ mov(tmp, src);
    302       __ mov(dst, tmp);
    303     }
    304 
    305   } else if (source->IsConstantOperand()) {
    306     ASSERT(destination->IsRegister() || destination->IsStackSlot());
    307     Immediate src = cgen_->ToImmediate(source);
    308     Operand dst = cgen_->ToOperand(destination);
    309     __ mov(dst, src);
    310 
    311   } else if (source->IsDoubleRegister()) {
    312     ASSERT(destination->IsDoubleRegister() ||
    313            destination->IsDoubleStackSlot());
    314     XMMRegister src = cgen_->ToDoubleRegister(source);
    315     Operand dst = cgen_->ToOperand(destination);
    316     __ movdbl(dst, src);
    317 
    318   } else if (source->IsDoubleStackSlot()) {
    319     ASSERT(destination->IsDoubleRegister() ||
    320            destination->IsDoubleStackSlot());
    321     Operand src = cgen_->ToOperand(source);
    322     if (destination->IsDoubleRegister()) {
    323       XMMRegister dst = cgen_->ToDoubleRegister(destination);
    324       __ movdbl(dst, src);
    325     } else {
    326       // We rely on having xmm0 available as a fixed scratch register.
    327       Operand dst = cgen_->ToOperand(destination);
    328       __ movdbl(xmm0, src);
    329       __ movdbl(dst, xmm0);
    330     }
    331 
    332   } else {
    333     UNREACHABLE();
    334   }
    335 
    336   RemoveMove(index);
    337 }
    338 
    339 
    340 void LGapResolver::EmitSwap(int index) {
    341   LOperand* source = moves_[index].source();
    342   LOperand* destination = moves_[index].destination();
    343   EnsureRestored(source);
    344   EnsureRestored(destination);
    345 
    346   // Dispatch on the source and destination operand kinds.  Not all
    347   // combinations are possible.
    348   if (source->IsRegister() && destination->IsRegister()) {
    349     // Register-register.
    350     Register src = cgen_->ToRegister(source);
    351     Register dst = cgen_->ToRegister(destination);
    352     __ xchg(dst, src);
    353 
    354   } else if ((source->IsRegister() && destination->IsStackSlot()) ||
    355              (source->IsStackSlot() && destination->IsRegister())) {
    356     // Register-memory.  Use a free register as a temp if possible.  Do not
    357     // spill on demand because the simple spill implementation cannot avoid
    358     // spilling src at this point.
    359     Register tmp = GetFreeRegisterNot(no_reg);
    360     Register reg =
    361         cgen_->ToRegister(source->IsRegister() ? source : destination);
    362     Operand mem =
    363         cgen_->ToOperand(source->IsRegister() ? destination : source);
    364     if (tmp.is(no_reg)) {
    365       __ xor_(reg, mem);
    366       __ xor_(mem, reg);
    367       __ xor_(reg, mem);
    368     } else {
    369       __ mov(tmp, mem);
    370       __ mov(mem, reg);
    371       __ mov(reg, tmp);
    372     }
    373 
    374   } else if (source->IsStackSlot() && destination->IsStackSlot()) {
    375     // Memory-memory.  Spill on demand to use a temporary.  If there is a
    376     // free register after that, use it as a second temporary.
    377     Register tmp0 = EnsureTempRegister();
    378     Register tmp1 = GetFreeRegisterNot(tmp0);
    379     Operand src = cgen_->ToOperand(source);
    380     Operand dst = cgen_->ToOperand(destination);
    381     if (tmp1.is(no_reg)) {
    382       // Only one temp register available to us.
    383       __ mov(tmp0, dst);
    384       __ xor_(tmp0, src);
    385       __ xor_(src, tmp0);
    386       __ xor_(tmp0, src);
    387       __ mov(dst, tmp0);
    388     } else {
    389       __ mov(tmp0, dst);
    390       __ mov(tmp1, src);
    391       __ mov(dst, tmp1);
    392       __ mov(src, tmp0);
    393     }
    394 
    395   } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
    396     // XMM register-register or register-memory.  We rely on having xmm0
    397     // available as a fixed scratch register.
    398     ASSERT(source->IsDoubleRegister() || source->IsDoubleStackSlot());
    399     ASSERT(destination->IsDoubleRegister() ||
    400            destination->IsDoubleStackSlot());
    401     XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
    402                                                   ? source
    403                                                   : destination);
    404     Operand other =
    405         cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
    406     __ movdbl(xmm0, other);
    407     __ movdbl(other, reg);
    408     __ movdbl(reg, Operand(xmm0));
    409 
    410   } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
    411     // Double-width memory-to-memory.  Spill on demand to use a general
    412     // purpose temporary register and also rely on having xmm0 available as
    413     // a fixed scratch register.
    414     Register tmp = EnsureTempRegister();
    415     Operand src0 = cgen_->ToOperand(source);
    416     Operand src1 = cgen_->HighOperand(source);
    417     Operand dst0 = cgen_->ToOperand(destination);
    418     Operand dst1 = cgen_->HighOperand(destination);
    419     __ movdbl(xmm0, dst0);  // Save destination in xmm0.
    420     __ mov(tmp, src0);  // Then use tmp to copy source to destination.
    421     __ mov(dst0, tmp);
    422     __ mov(tmp, src1);
    423     __ mov(dst1, tmp);
    424     __ movdbl(src0, xmm0);
    425 
    426   } else {
    427     // No other combinations are possible.
    428     UNREACHABLE();
    429   }
    430 
    431   // The swap of source and destination has executed a move from source to
    432   // destination.
    433   RemoveMove(index);
    434 
    435   // Any unperformed (including pending) move with a source of either
    436   // this move's source or destination needs to have their source
    437   // changed to reflect the state of affairs after the swap.
    438   for (int i = 0; i < moves_.length(); ++i) {
    439     LMoveOperands other_move = moves_[i];
    440     if (other_move.Blocks(source)) {
    441       moves_[i].set_source(destination);
    442     } else if (other_move.Blocks(destination)) {
    443       moves_[i].set_source(source);
    444     }
    445   }
    446 
    447   // In addition to swapping the actual uses as sources, we need to update
    448   // the use counts.
    449   if (source->IsRegister() && destination->IsRegister()) {
    450     int temp = source_uses_[source->index()];
    451     source_uses_[source->index()] = source_uses_[destination->index()];
    452     source_uses_[destination->index()] = temp;
    453   } else if (source->IsRegister()) {
    454     // We don't have use counts for non-register operands like destination.
    455     // Compute those counts now.
    456     source_uses_[source->index()] = CountSourceUses(source);
    457   } else if (destination->IsRegister()) {
    458     source_uses_[destination->index()] = CountSourceUses(destination);
    459   }
    460 }
    461 
    462 #undef __
    463 
    464 } }  // namespace v8::internal
    465 
    466 #endif  // V8_TARGET_ARCH_IA32
    467