Home | History | Annotate | Download | only in mips64
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
      6 
      7 #include "src/crankshaft/mips64/lithium-codegen-mips64.h"
      8 
      9 namespace v8 {
     10 namespace internal {
     11 
     12 LGapResolver::LGapResolver(LCodeGen* owner)
     13     : cgen_(owner),
     14       moves_(32, owner->zone()),
     15       root_index_(0),
     16       in_cycle_(false),
     17       saved_destination_(NULL) {}
     18 
     19 
     20 void LGapResolver::Resolve(LParallelMove* parallel_move) {
     21   DCHECK(moves_.is_empty());
     22   // Build up a worklist of moves.
     23   BuildInitialMoveList(parallel_move);
     24 
     25   for (int i = 0; i < moves_.length(); ++i) {
     26     LMoveOperands move = moves_[i];
     27     // Skip constants to perform them last.  They don't block other moves
     28     // and skipping such moves with register destinations keeps those
     29     // registers free for the whole algorithm.
     30     if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
     31       root_index_ = i;  // Any cycle is found when by reaching this move again.
     32       PerformMove(i);
     33       if (in_cycle_) {
     34         RestoreValue();
     35       }
     36     }
     37   }
     38 
     39   // Perform the moves with constant sources.
     40   for (int i = 0; i < moves_.length(); ++i) {
     41     if (!moves_[i].IsEliminated()) {
     42       DCHECK(moves_[i].source()->IsConstantOperand());
     43       EmitMove(i);
     44     }
     45   }
     46 
     47   moves_.Rewind(0);
     48 }
     49 
     50 
     51 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
     52   // Perform a linear sweep of the moves to add them to the initial list of
     53   // moves to perform, ignoring any move that is redundant (the source is
     54   // the same as the destination, the destination is ignored and
     55   // unallocated, or the move was already eliminated).
     56   const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
     57   for (int i = 0; i < moves->length(); ++i) {
     58     LMoveOperands move = moves->at(i);
     59     if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
     60   }
     61   Verify();
     62 }
     63 
     64 
     65 void LGapResolver::PerformMove(int index) {
     66   // Each call to this function performs a move and deletes it from the move
     67   // graph.  We first recursively perform any move blocking this one.  We
     68   // mark a move as "pending" on entry to PerformMove in order to detect
     69   // cycles in the move graph.
     70 
     71   // We can only find a cycle, when doing a depth-first traversal of moves,
     72   // be encountering the starting move again. So by spilling the source of
     73   // the starting move, we break the cycle.  All moves are then unblocked,
     74   // and the starting move is completed by writing the spilled value to
     75   // its destination.  All other moves from the spilled source have been
     76   // completed prior to breaking the cycle.
     77   // An additional complication is that moves to MemOperands with large
     78   // offsets (more than 1K or 4K) require us to spill this spilled value to
     79   // the stack, to free up the register.
     80   DCHECK(!moves_[index].IsPending());
     81   DCHECK(!moves_[index].IsRedundant());
     82 
     83   // Clear this move's destination to indicate a pending move.  The actual
     84   // destination is saved in a stack allocated local.  Multiple moves can
     85   // be pending because this function is recursive.
     86   DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
     87   LOperand* destination = moves_[index].destination();
     88   moves_[index].set_destination(NULL);
     89 
     90   // Perform a depth-first traversal of the move graph to resolve
     91   // dependencies.  Any unperformed, unpending move with a source the same
     92   // as this one's destination blocks this one so recursively perform all
     93   // such moves.
     94   for (int i = 0; i < moves_.length(); ++i) {
     95     LMoveOperands other_move = moves_[i];
     96     if (other_move.Blocks(destination) && !other_move.IsPending()) {
     97       PerformMove(i);
     98       // If there is a blocking, pending move it must be moves_[root_index_]
     99       // and all other moves with the same source as moves_[root_index_] are
    100       // sucessfully executed (because they are cycle-free) by this loop.
    101     }
    102   }
    103 
    104   // We are about to resolve this move and don't need it marked as
    105   // pending, so restore its destination.
    106   moves_[index].set_destination(destination);
    107 
    108   // The move may be blocked on a pending move, which must be the starting move.
    109   // In this case, we have a cycle, and we save the source of this move to
    110   // a scratch register to break it.
    111   LMoveOperands other_move = moves_[root_index_];
    112   if (other_move.Blocks(destination)) {
    113     DCHECK(other_move.IsPending());
    114     BreakCycle(index);
    115     return;
    116   }
    117 
    118   // This move is no longer blocked.
    119   EmitMove(index);
    120 }
    121 
    122 
    123 void LGapResolver::Verify() {
    124 #ifdef ENABLE_SLOW_DCHECKS
    125   // No operand should be the destination for more than one move.
    126   for (int i = 0; i < moves_.length(); ++i) {
    127     LOperand* destination = moves_[i].destination();
    128     for (int j = i + 1; j < moves_.length(); ++j) {
    129       SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
    130     }
    131   }
    132 #endif
    133 }
    134 
    135 #define __ ACCESS_MASM(cgen_->masm())
    136 
    137 void LGapResolver::BreakCycle(int index) {
    138   // We save in a register the value that should end up in the source of
    139   // moves_[root_index].  After performing all moves in the tree rooted
    140   // in that move, we save the value to that source.
    141   DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
    142   DCHECK(!in_cycle_);
    143   in_cycle_ = true;
    144   LOperand* source = moves_[index].source();
    145   saved_destination_ = moves_[index].destination();
    146   if (source->IsRegister()) {
    147     __ mov(kLithiumScratchReg, cgen_->ToRegister(source));
    148   } else if (source->IsStackSlot()) {
    149     __ ld(kLithiumScratchReg, cgen_->ToMemOperand(source));
    150   } else if (source->IsDoubleRegister()) {
    151     __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
    152   } else if (source->IsDoubleStackSlot()) {
    153     __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
    154   } else {
    155     UNREACHABLE();
    156   }
    157   // This move will be done by restoring the saved value to the destination.
    158   moves_[index].Eliminate();
    159 }
    160 
    161 
    162 void LGapResolver::RestoreValue() {
    163   DCHECK(in_cycle_);
    164   DCHECK(saved_destination_ != NULL);
    165 
    166   // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
    167   if (saved_destination_->IsRegister()) {
    168     __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
    169   } else if (saved_destination_->IsStackSlot()) {
    170     __ sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
    171   } else if (saved_destination_->IsDoubleRegister()) {
    172     __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
    173             kLithiumScratchDouble);
    174   } else if (saved_destination_->IsDoubleStackSlot()) {
    175     __ sdc1(kLithiumScratchDouble,
    176             cgen_->ToMemOperand(saved_destination_));
    177   } else {
    178     UNREACHABLE();
    179   }
    180 
    181   in_cycle_ = false;
    182   saved_destination_ = NULL;
    183 }
    184 
    185 
    186 void LGapResolver::EmitMove(int index) {
    187   LOperand* source = moves_[index].source();
    188   LOperand* destination = moves_[index].destination();
    189 
    190   // Dispatch on the source and destination operand kinds.  Not all
    191   // combinations are possible.
    192 
    193   if (source->IsRegister()) {
    194     Register source_register = cgen_->ToRegister(source);
    195     if (destination->IsRegister()) {
    196       __ mov(cgen_->ToRegister(destination), source_register);
    197     } else {
    198       DCHECK(destination->IsStackSlot());
    199       __ sd(source_register, cgen_->ToMemOperand(destination));
    200     }
    201   } else if (source->IsStackSlot()) {
    202     MemOperand source_operand = cgen_->ToMemOperand(source);
    203     if (destination->IsRegister()) {
    204       __ ld(cgen_->ToRegister(destination), source_operand);
    205     } else {
    206       DCHECK(destination->IsStackSlot());
    207       MemOperand destination_operand = cgen_->ToMemOperand(destination);
    208       if (in_cycle_) {
    209         if (!destination_operand.OffsetIsInt16Encodable()) {
    210           // 'at' is overwritten while saving the value to the destination.
    211           // Therefore we can't use 'at'.  It is OK if the read from the source
    212           // destroys 'at', since that happens before the value is read.
    213           // This uses only a single reg of the double reg-pair.
    214           __ ldc1(kLithiumScratchDouble, source_operand);
    215           __ sdc1(kLithiumScratchDouble, destination_operand);
    216         } else {
    217           __ ld(at, source_operand);
    218           __ sd(at, destination_operand);
    219         }
    220       } else {
    221         __ ld(kLithiumScratchReg, source_operand);
    222         __ sd(kLithiumScratchReg, destination_operand);
    223       }
    224     }
    225 
    226   } else if (source->IsConstantOperand()) {
    227     LConstantOperand* constant_source = LConstantOperand::cast(source);
    228     if (destination->IsRegister()) {
    229       Register dst = cgen_->ToRegister(destination);
    230       if (cgen_->IsSmi(constant_source)) {
    231          __ li(dst, Operand(cgen_->ToSmi(constant_source)));
    232       } else if (cgen_->IsInteger32(constant_source)) {
    233          __ li(dst, Operand(cgen_->ToInteger32(constant_source)));
    234       } else {
    235          __ li(dst, cgen_->ToHandle(constant_source));
    236       }
    237     } else if (destination->IsDoubleRegister()) {
    238       DoubleRegister result = cgen_->ToDoubleRegister(destination);
    239       double v = cgen_->ToDouble(constant_source);
    240       __ Move(result, v);
    241     } else {
    242       DCHECK(destination->IsStackSlot());
    243       DCHECK(!in_cycle_);  // Constant moves happen after all cycles are gone.
    244       if (cgen_->IsSmi(constant_source)) {
    245          __ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source)));
    246          __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
    247       } else if (cgen_->IsInteger32(constant_source)) {
    248         __ li(kLithiumScratchReg, Operand(cgen_->ToInteger32(constant_source)));
    249         __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
    250       } else {
    251         __ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
    252         __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
    253       }
    254     }
    255 
    256   } else if (source->IsDoubleRegister()) {
    257     DoubleRegister source_register = cgen_->ToDoubleRegister(source);
    258     if (destination->IsDoubleRegister()) {
    259       __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
    260     } else {
    261       DCHECK(destination->IsDoubleStackSlot());
    262       MemOperand destination_operand = cgen_->ToMemOperand(destination);
    263       __ sdc1(source_register, destination_operand);
    264     }
    265 
    266   } else if (source->IsDoubleStackSlot()) {
    267     MemOperand source_operand = cgen_->ToMemOperand(source);
    268     if (destination->IsDoubleRegister()) {
    269       __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
    270     } else {
    271       DCHECK(destination->IsDoubleStackSlot());
    272       MemOperand destination_operand = cgen_->ToMemOperand(destination);
    273       if (in_cycle_) {
    274         // kLithiumScratchDouble was used to break the cycle,
    275         // but kLithiumScratchReg is free.
    276         MemOperand source_high_operand =
    277             cgen_->ToHighMemOperand(source);
    278         MemOperand destination_high_operand =
    279             cgen_->ToHighMemOperand(destination);
    280         __ lw(kLithiumScratchReg, source_operand);
    281         __ sw(kLithiumScratchReg, destination_operand);
    282         __ lw(kLithiumScratchReg, source_high_operand);
    283         __ sw(kLithiumScratchReg, destination_high_operand);
    284       } else {
    285         __ ldc1(kLithiumScratchDouble, source_operand);
    286         __ sdc1(kLithiumScratchDouble, destination_operand);
    287       }
    288     }
    289   } else {
    290     UNREACHABLE();
    291   }
    292 
    293   moves_[index].Eliminate();
    294 }
    295 
    296 
    297 #undef __
    298 
    299 }  // namespace internal
    300 }  // namespace v8
    301