Home | History | Annotate | Download | only in mips
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/v8.h"
      6 
      7 #include "src/mips/lithium-codegen-mips.h"
      8 #include "src/mips/lithium-gap-resolver-mips.h"
      9 
     10 namespace v8 {
     11 namespace internal {
     12 
     13 LGapResolver::LGapResolver(LCodeGen* owner)
     14     : cgen_(owner),
     15       moves_(32, owner->zone()),
     16       root_index_(0),
     17       in_cycle_(false),
     18       saved_destination_(NULL) {}
     19 
     20 
     21 void LGapResolver::Resolve(LParallelMove* parallel_move) {
     22   DCHECK(moves_.is_empty());
     23   // Build up a worklist of moves.
     24   BuildInitialMoveList(parallel_move);
     25 
     26   for (int i = 0; i < moves_.length(); ++i) {
     27     LMoveOperands move = moves_[i];
     28     // Skip constants to perform them last.  They don't block other moves
     29     // and skipping such moves with register destinations keeps those
     30     // registers free for the whole algorithm.
     31     if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
     32       root_index_ = i;  // Any cycle is found when by reaching this move again.
     33       PerformMove(i);
     34       if (in_cycle_) {
     35         RestoreValue();
     36       }
     37     }
     38   }
     39 
     40   // Perform the moves with constant sources.
     41   for (int i = 0; i < moves_.length(); ++i) {
     42     if (!moves_[i].IsEliminated()) {
     43       DCHECK(moves_[i].source()->IsConstantOperand());
     44       EmitMove(i);
     45     }
     46   }
     47 
     48   moves_.Rewind(0);
     49 }
     50 
     51 
     52 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
     53   // Perform a linear sweep of the moves to add them to the initial list of
     54   // moves to perform, ignoring any move that is redundant (the source is
     55   // the same as the destination, the destination is ignored and
     56   // unallocated, or the move was already eliminated).
     57   const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
     58   for (int i = 0; i < moves->length(); ++i) {
     59     LMoveOperands move = moves->at(i);
     60     if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
     61   }
     62   Verify();
     63 }
     64 
     65 
     66 void LGapResolver::PerformMove(int index) {
     67   // Each call to this function performs a move and deletes it from the move
     68   // graph.  We first recursively perform any move blocking this one.  We
     69   // mark a move as "pending" on entry to PerformMove in order to detect
     70   // cycles in the move graph.
     71 
     72   // We can only find a cycle, when doing a depth-first traversal of moves,
     73   // be encountering the starting move again. So by spilling the source of
     74   // the starting move, we break the cycle.  All moves are then unblocked,
     75   // and the starting move is completed by writing the spilled value to
     76   // its destination.  All other moves from the spilled source have been
     77   // completed prior to breaking the cycle.
     78   // An additional complication is that moves to MemOperands with large
     79   // offsets (more than 1K or 4K) require us to spill this spilled value to
     80   // the stack, to free up the register.
     81   DCHECK(!moves_[index].IsPending());
     82   DCHECK(!moves_[index].IsRedundant());
     83 
     84   // Clear this move's destination to indicate a pending move.  The actual
     85   // destination is saved in a stack allocated local.  Multiple moves can
     86   // be pending because this function is recursive.
     87   DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
     88   LOperand* destination = moves_[index].destination();
     89   moves_[index].set_destination(NULL);
     90 
     91   // Perform a depth-first traversal of the move graph to resolve
     92   // dependencies.  Any unperformed, unpending move with a source the same
     93   // as this one's destination blocks this one so recursively perform all
     94   // such moves.
     95   for (int i = 0; i < moves_.length(); ++i) {
     96     LMoveOperands other_move = moves_[i];
     97     if (other_move.Blocks(destination) && !other_move.IsPending()) {
     98       PerformMove(i);
     99       // If there is a blocking, pending move it must be moves_[root_index_]
    100       // and all other moves with the same source as moves_[root_index_] are
    101       // sucessfully executed (because they are cycle-free) by this loop.
    102     }
    103   }
    104 
    105   // We are about to resolve this move and don't need it marked as
    106   // pending, so restore its destination.
    107   moves_[index].set_destination(destination);
    108 
    109   // The move may be blocked on a pending move, which must be the starting move.
    110   // In this case, we have a cycle, and we save the source of this move to
    111   // a scratch register to break it.
    112   LMoveOperands other_move = moves_[root_index_];
    113   if (other_move.Blocks(destination)) {
    114     DCHECK(other_move.IsPending());
    115     BreakCycle(index);
    116     return;
    117   }
    118 
    119   // This move is no longer blocked.
    120   EmitMove(index);
    121 }
    122 
    123 
    124 void LGapResolver::Verify() {
    125 #ifdef ENABLE_SLOW_DCHECKS
    126   // No operand should be the destination for more than one move.
    127   for (int i = 0; i < moves_.length(); ++i) {
    128     LOperand* destination = moves_[i].destination();
    129     for (int j = i + 1; j < moves_.length(); ++j) {
    130       SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
    131     }
    132   }
    133 #endif
    134 }
    135 
    136 #define __ ACCESS_MASM(cgen_->masm())
    137 
    138 void LGapResolver::BreakCycle(int index) {
    139   // We save in a register the value that should end up in the source of
    140   // moves_[root_index].  After performing all moves in the tree rooted
    141   // in that move, we save the value to that source.
    142   DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
    143   DCHECK(!in_cycle_);
    144   in_cycle_ = true;
    145   LOperand* source = moves_[index].source();
    146   saved_destination_ = moves_[index].destination();
    147   if (source->IsRegister()) {
    148     __ mov(kLithiumScratchReg, cgen_->ToRegister(source));
    149   } else if (source->IsStackSlot()) {
    150     __ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
    151   } else if (source->IsDoubleRegister()) {
    152     __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
    153   } else if (source->IsDoubleStackSlot()) {
    154     __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
    155   } else {
    156     UNREACHABLE();
    157   }
    158   // This move will be done by restoring the saved value to the destination.
    159   moves_[index].Eliminate();
    160 }
    161 
    162 
    163 void LGapResolver::RestoreValue() {
    164   DCHECK(in_cycle_);
    165   DCHECK(saved_destination_ != NULL);
    166 
    167   // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
    168   if (saved_destination_->IsRegister()) {
    169     __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
    170   } else if (saved_destination_->IsStackSlot()) {
    171     __ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
    172   } else if (saved_destination_->IsDoubleRegister()) {
    173     __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
    174             kLithiumScratchDouble);
    175   } else if (saved_destination_->IsDoubleStackSlot()) {
    176     __ sdc1(kLithiumScratchDouble,
    177             cgen_->ToMemOperand(saved_destination_));
    178   } else {
    179     UNREACHABLE();
    180   }
    181 
    182   in_cycle_ = false;
    183   saved_destination_ = NULL;
    184 }
    185 
    186 
    187 void LGapResolver::EmitMove(int index) {
    188   LOperand* source = moves_[index].source();
    189   LOperand* destination = moves_[index].destination();
    190 
    191   // Dispatch on the source and destination operand kinds.  Not all
    192   // combinations are possible.
    193 
    194   if (source->IsRegister()) {
    195     Register source_register = cgen_->ToRegister(source);
    196     if (destination->IsRegister()) {
    197       __ mov(cgen_->ToRegister(destination), source_register);
    198     } else {
    199       DCHECK(destination->IsStackSlot());
    200       __ sw(source_register, cgen_->ToMemOperand(destination));
    201     }
    202   } else if (source->IsStackSlot()) {
    203     MemOperand source_operand = cgen_->ToMemOperand(source);
    204     if (destination->IsRegister()) {
    205       __ lw(cgen_->ToRegister(destination), source_operand);
    206     } else {
    207       DCHECK(destination->IsStackSlot());
    208       MemOperand destination_operand = cgen_->ToMemOperand(destination);
    209       if (in_cycle_) {
    210         if (!destination_operand.OffsetIsInt16Encodable()) {
    211           // 'at' is overwritten while saving the value to the destination.
    212           // Therefore we can't use 'at'.  It is OK if the read from the source
    213           // destroys 'at', since that happens before the value is read.
    214           // This uses only a single reg of the double reg-pair.
    215           __ lwc1(kLithiumScratchDouble, source_operand);
    216           __ swc1(kLithiumScratchDouble, destination_operand);
    217         } else {
    218           __ lw(at, source_operand);
    219           __ sw(at, destination_operand);
    220         }
    221       } else {
    222         __ lw(kLithiumScratchReg, source_operand);
    223         __ sw(kLithiumScratchReg, destination_operand);
    224       }
    225     }
    226 
    227   } else if (source->IsConstantOperand()) {
    228     LConstantOperand* constant_source = LConstantOperand::cast(source);
    229     if (destination->IsRegister()) {
    230       Register dst = cgen_->ToRegister(destination);
    231       Representation r = cgen_->IsSmi(constant_source)
    232           ? Representation::Smi() : Representation::Integer32();
    233       if (cgen_->IsInteger32(constant_source)) {
    234         __ li(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
    235       } else {
    236         __ li(dst, cgen_->ToHandle(constant_source));
    237       }
    238     } else if (destination->IsDoubleRegister()) {
    239       DoubleRegister result = cgen_->ToDoubleRegister(destination);
    240       double v = cgen_->ToDouble(constant_source);
    241       __ Move(result, v);
    242     } else {
    243       DCHECK(destination->IsStackSlot());
    244       DCHECK(!in_cycle_);  // Constant moves happen after all cycles are gone.
    245       Representation r = cgen_->IsSmi(constant_source)
    246           ? Representation::Smi() : Representation::Integer32();
    247       if (cgen_->IsInteger32(constant_source)) {
    248         __ li(kLithiumScratchReg,
    249               Operand(cgen_->ToRepresentation(constant_source, r)));
    250       } else {
    251         __ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
    252       }
    253       __ sw(kLithiumScratchReg, cgen_->ToMemOperand(destination));
    254     }
    255 
    256   } else if (source->IsDoubleRegister()) {
    257     DoubleRegister source_register = cgen_->ToDoubleRegister(source);
    258     if (destination->IsDoubleRegister()) {
    259       __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
    260     } else {
    261       DCHECK(destination->IsDoubleStackSlot());
    262       MemOperand destination_operand = cgen_->ToMemOperand(destination);
    263       __ sdc1(source_register, destination_operand);
    264     }
    265 
    266   } else if (source->IsDoubleStackSlot()) {
    267     MemOperand source_operand = cgen_->ToMemOperand(source);
    268     if (destination->IsDoubleRegister()) {
    269       __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
    270     } else {
    271       DCHECK(destination->IsDoubleStackSlot());
    272       MemOperand destination_operand = cgen_->ToMemOperand(destination);
    273       if (in_cycle_) {
    274         // kLithiumScratchDouble was used to break the cycle,
    275         // but kLithiumScratchReg is free.
    276         MemOperand source_high_operand =
    277             cgen_->ToHighMemOperand(source);
    278         MemOperand destination_high_operand =
    279             cgen_->ToHighMemOperand(destination);
    280         __ lw(kLithiumScratchReg, source_operand);
    281         __ sw(kLithiumScratchReg, destination_operand);
    282         __ lw(kLithiumScratchReg, source_high_operand);
    283         __ sw(kLithiumScratchReg, destination_high_operand);
    284       } else {
    285         __ ldc1(kLithiumScratchDouble, source_operand);
    286         __ sdc1(kLithiumScratchDouble, destination_operand);
    287       }
    288     }
    289   } else {
    290     UNREACHABLE();
    291   }
    292 
    293   moves_[index].Eliminate();
    294 }
    295 
    296 
    297 #undef __
    298 
    299 } }  // namespace v8::internal
    300