Home | History | Annotate | Download | only in arm
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/v8.h"
      6 
      7 #include "src/arm/lithium-gap-resolver-arm.h"
      8 #include "src/arm/lithium-codegen-arm.h"
      9 
     10 namespace v8 {
     11 namespace internal {
     12 
     13 // We use the root register to spill a value while breaking a cycle in parallel
     14 // moves. We don't need access to roots while resolving the move list and using
     15 // the root register has two advantages:
     16 //  - It is not in crankshaft allocatable registers list, so it can't interfere
     17 //    with any of the moves we are resolving.
     18 //  - We don't need to push it on the stack, as we can reload it with its value
     19 //    once we have resolved a cycle.
     20 #define kSavedValueRegister kRootRegister
     21 
     22 
     23 LGapResolver::LGapResolver(LCodeGen* owner)
     24     : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
     25       saved_destination_(NULL), need_to_restore_root_(false) { }
     26 
     27 
     28 #define __ ACCESS_MASM(cgen_->masm())
     29 
     30 
     31 void LGapResolver::Resolve(LParallelMove* parallel_move) {
     32   ASSERT(moves_.is_empty());
     33   // Build up a worklist of moves.
     34   BuildInitialMoveList(parallel_move);
     35 
     36   for (int i = 0; i < moves_.length(); ++i) {
     37     LMoveOperands move = moves_[i];
     38     // Skip constants to perform them last.  They don't block other moves
     39     // and skipping such moves with register destinations keeps those
     40     // registers free for the whole algorithm.
     41     if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
     42       root_index_ = i;  // Any cycle is found when by reaching this move again.
     43       PerformMove(i);
     44       if (in_cycle_) {
     45         RestoreValue();
     46       }
     47     }
     48   }
     49 
     50   // Perform the moves with constant sources.
     51   for (int i = 0; i < moves_.length(); ++i) {
     52     if (!moves_[i].IsEliminated()) {
     53       ASSERT(moves_[i].source()->IsConstantOperand());
     54       EmitMove(i);
     55     }
     56   }
     57 
     58   if (need_to_restore_root_) {
     59     ASSERT(kSavedValueRegister.is(kRootRegister));
     60     __ InitializeRootRegister();
     61     need_to_restore_root_ = false;
     62   }
     63 
     64   moves_.Rewind(0);
     65 }
     66 
     67 
     68 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
     69   // Perform a linear sweep of the moves to add them to the initial list of
     70   // moves to perform, ignoring any move that is redundant (the source is
     71   // the same as the destination, the destination is ignored and
     72   // unallocated, or the move was already eliminated).
     73   const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
     74   for (int i = 0; i < moves->length(); ++i) {
     75     LMoveOperands move = moves->at(i);
     76     if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
     77   }
     78   Verify();
     79 }
     80 
     81 
     82 void LGapResolver::PerformMove(int index) {
     83   // Each call to this function performs a move and deletes it from the move
     84   // graph.  We first recursively perform any move blocking this one.  We
     85   // mark a move as "pending" on entry to PerformMove in order to detect
     86   // cycles in the move graph.
     87 
     88   // We can only find a cycle, when doing a depth-first traversal of moves,
     89   // be encountering the starting move again. So by spilling the source of
     90   // the starting move, we break the cycle.  All moves are then unblocked,
     91   // and the starting move is completed by writing the spilled value to
     92   // its destination.  All other moves from the spilled source have been
     93   // completed prior to breaking the cycle.
     94   // An additional complication is that moves to MemOperands with large
     95   // offsets (more than 1K or 4K) require us to spill this spilled value to
     96   // the stack, to free up the register.
     97   ASSERT(!moves_[index].IsPending());
     98   ASSERT(!moves_[index].IsRedundant());
     99 
    100   // Clear this move's destination to indicate a pending move.  The actual
    101   // destination is saved in a stack allocated local.  Multiple moves can
    102   // be pending because this function is recursive.
    103   ASSERT(moves_[index].source() != NULL);  // Or else it will look eliminated.
    104   LOperand* destination = moves_[index].destination();
    105   moves_[index].set_destination(NULL);
    106 
    107   // Perform a depth-first traversal of the move graph to resolve
    108   // dependencies.  Any unperformed, unpending move with a source the same
    109   // as this one's destination blocks this one so recursively perform all
    110   // such moves.
    111   for (int i = 0; i < moves_.length(); ++i) {
    112     LMoveOperands other_move = moves_[i];
    113     if (other_move.Blocks(destination) && !other_move.IsPending()) {
    114       PerformMove(i);
    115       // If there is a blocking, pending move it must be moves_[root_index_]
    116       // and all other moves with the same source as moves_[root_index_] are
    117       // sucessfully executed (because they are cycle-free) by this loop.
    118     }
    119   }
    120 
    121   // We are about to resolve this move and don't need it marked as
    122   // pending, so restore its destination.
    123   moves_[index].set_destination(destination);
    124 
    125   // The move may be blocked on a pending move, which must be the starting move.
    126   // In this case, we have a cycle, and we save the source of this move to
    127   // a scratch register to break it.
    128   LMoveOperands other_move = moves_[root_index_];
    129   if (other_move.Blocks(destination)) {
    130     ASSERT(other_move.IsPending());
    131     BreakCycle(index);
    132     return;
    133   }
    134 
    135   // This move is no longer blocked.
    136   EmitMove(index);
    137 }
    138 
    139 
    140 void LGapResolver::Verify() {
    141 #ifdef ENABLE_SLOW_ASSERTS
    142   // No operand should be the destination for more than one move.
    143   for (int i = 0; i < moves_.length(); ++i) {
    144     LOperand* destination = moves_[i].destination();
    145     for (int j = i + 1; j < moves_.length(); ++j) {
    146       SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
    147     }
    148   }
    149 #endif
    150 }
    151 
    152 
    153 void LGapResolver::BreakCycle(int index) {
    154   // We save in a register the source of that move and we remember its
    155   // destination. Then we mark this move as resolved so the cycle is
    156   // broken and we can perform the other moves.
    157   ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
    158   ASSERT(!in_cycle_);
    159   in_cycle_ = true;
    160   LOperand* source = moves_[index].source();
    161   saved_destination_ = moves_[index].destination();
    162   if (source->IsRegister()) {
    163     need_to_restore_root_ = true;
    164     __ mov(kSavedValueRegister, cgen_->ToRegister(source));
    165   } else if (source->IsStackSlot()) {
    166     need_to_restore_root_ = true;
    167     __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
    168   } else if (source->IsDoubleRegister()) {
    169     __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
    170   } else if (source->IsDoubleStackSlot()) {
    171     __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
    172   } else {
    173     UNREACHABLE();
    174   }
    175   // This move will be done by restoring the saved value to the destination.
    176   moves_[index].Eliminate();
    177 }
    178 
    179 
    180 void LGapResolver::RestoreValue() {
    181   ASSERT(in_cycle_);
    182   ASSERT(saved_destination_ != NULL);
    183 
    184   if (saved_destination_->IsRegister()) {
    185     __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
    186   } else if (saved_destination_->IsStackSlot()) {
    187     __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
    188   } else if (saved_destination_->IsDoubleRegister()) {
    189     __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
    190   } else if (saved_destination_->IsDoubleStackSlot()) {
    191     __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
    192   } else {
    193     UNREACHABLE();
    194   }
    195 
    196   in_cycle_ = false;
    197   saved_destination_ = NULL;
    198 }
    199 
    200 
    201 void LGapResolver::EmitMove(int index) {
    202   LOperand* source = moves_[index].source();
    203   LOperand* destination = moves_[index].destination();
    204 
    205   // Dispatch on the source and destination operand kinds.  Not all
    206   // combinations are possible.
    207 
    208   if (source->IsRegister()) {
    209     Register source_register = cgen_->ToRegister(source);
    210     if (destination->IsRegister()) {
    211       __ mov(cgen_->ToRegister(destination), source_register);
    212     } else {
    213       ASSERT(destination->IsStackSlot());
    214       __ str(source_register, cgen_->ToMemOperand(destination));
    215     }
    216   } else if (source->IsStackSlot()) {
    217     MemOperand source_operand = cgen_->ToMemOperand(source);
    218     if (destination->IsRegister()) {
    219       __ ldr(cgen_->ToRegister(destination), source_operand);
    220     } else {
    221       ASSERT(destination->IsStackSlot());
    222       MemOperand destination_operand = cgen_->ToMemOperand(destination);
    223       if (!destination_operand.OffsetIsUint12Encodable()) {
    224         // ip is overwritten while saving the value to the destination.
    225         // Therefore we can't use ip.  It is OK if the read from the source
    226         // destroys ip, since that happens before the value is read.
    227         __ vldr(kScratchDoubleReg.low(), source_operand);
    228         __ vstr(kScratchDoubleReg.low(), destination_operand);
    229       } else {
    230         __ ldr(ip, source_operand);
    231         __ str(ip, destination_operand);
    232       }
    233     }
    234 
    235   } else if (source->IsConstantOperand()) {
    236     LConstantOperand* constant_source = LConstantOperand::cast(source);
    237     if (destination->IsRegister()) {
    238       Register dst = cgen_->ToRegister(destination);
    239       Representation r = cgen_->IsSmi(constant_source)
    240           ? Representation::Smi() : Representation::Integer32();
    241       if (cgen_->IsInteger32(constant_source)) {
    242         __ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
    243       } else {
    244         __ Move(dst, cgen_->ToHandle(constant_source));
    245       }
    246     } else if (destination->IsDoubleRegister()) {
    247       DwVfpRegister result = cgen_->ToDoubleRegister(destination);
    248       double v = cgen_->ToDouble(constant_source);
    249       __ Vmov(result, v, ip);
    250     } else {
    251       ASSERT(destination->IsStackSlot());
    252       ASSERT(!in_cycle_);  // Constant moves happen after all cycles are gone.
    253       need_to_restore_root_ = true;
    254       Representation r = cgen_->IsSmi(constant_source)
    255           ? Representation::Smi() : Representation::Integer32();
    256       if (cgen_->IsInteger32(constant_source)) {
    257         __ mov(kSavedValueRegister,
    258                Operand(cgen_->ToRepresentation(constant_source, r)));
    259       } else {
    260         __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
    261       }
    262       __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
    263     }
    264 
    265   } else if (source->IsDoubleRegister()) {
    266     DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
    267     if (destination->IsDoubleRegister()) {
    268       __ vmov(cgen_->ToDoubleRegister(destination), source_register);
    269     } else {
    270       ASSERT(destination->IsDoubleStackSlot());
    271       __ vstr(source_register, cgen_->ToMemOperand(destination));
    272     }
    273 
    274   } else if (source->IsDoubleStackSlot()) {
    275     MemOperand source_operand = cgen_->ToMemOperand(source);
    276     if (destination->IsDoubleRegister()) {
    277       __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
    278     } else {
    279       ASSERT(destination->IsDoubleStackSlot());
    280       MemOperand destination_operand = cgen_->ToMemOperand(destination);
    281       if (in_cycle_) {
    282         // kScratchDoubleReg was used to break the cycle.
    283         __ vstm(db_w, sp, kScratchDoubleReg, kScratchDoubleReg);
    284         __ vldr(kScratchDoubleReg, source_operand);
    285         __ vstr(kScratchDoubleReg, destination_operand);
    286         __ vldm(ia_w, sp, kScratchDoubleReg, kScratchDoubleReg);
    287       } else {
    288         __ vldr(kScratchDoubleReg, source_operand);
    289         __ vstr(kScratchDoubleReg, destination_operand);
    290       }
    291     }
    292   } else {
    293     UNREACHABLE();
    294   }
    295 
    296   moves_[index].Eliminate();
    297 }
    298 
    299 
    300 #undef __
    301 
    302 } }  // namespace v8::internal
    303