1 // Copyright 2011 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "src/v8.h" 6 7 #if V8_TARGET_ARCH_X87 8 9 #include "src/x87/lithium-gap-resolver-x87.h" 10 #include "src/x87/lithium-codegen-x87.h" 11 12 namespace v8 { 13 namespace internal { 14 15 LGapResolver::LGapResolver(LCodeGen* owner) 16 : cgen_(owner), 17 moves_(32, owner->zone()), 18 source_uses_(), 19 destination_uses_(), 20 spilled_register_(-1) {} 21 22 23 void LGapResolver::Resolve(LParallelMove* parallel_move) { 24 ASSERT(HasBeenReset()); 25 // Build up a worklist of moves. 26 BuildInitialMoveList(parallel_move); 27 28 for (int i = 0; i < moves_.length(); ++i) { 29 LMoveOperands move = moves_[i]; 30 // Skip constants to perform them last. They don't block other moves 31 // and skipping such moves with register destinations keeps those 32 // registers free for the whole algorithm. 33 if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { 34 PerformMove(i); 35 } 36 } 37 38 // Perform the moves with constant sources. 39 for (int i = 0; i < moves_.length(); ++i) { 40 if (!moves_[i].IsEliminated()) { 41 ASSERT(moves_[i].source()->IsConstantOperand()); 42 EmitMove(i); 43 } 44 } 45 46 Finish(); 47 ASSERT(HasBeenReset()); 48 } 49 50 51 void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { 52 // Perform a linear sweep of the moves to add them to the initial list of 53 // moves to perform, ignoring any move that is redundant (the source is 54 // the same as the destination, the destination is ignored and 55 // unallocated, or the move was already eliminated). 56 const ZoneList<LMoveOperands>* moves = parallel_move->move_operands(); 57 for (int i = 0; i < moves->length(); ++i) { 58 LMoveOperands move = moves->at(i); 59 if (!move.IsRedundant()) AddMove(move); 60 } 61 Verify(); 62 } 63 64 65 void LGapResolver::PerformMove(int index) { 66 // Each call to this function performs a move and deletes it from the move 67 // graph. We first recursively perform any move blocking this one. We 68 // mark a move as "pending" on entry to PerformMove in order to detect 69 // cycles in the move graph. We use operand swaps to resolve cycles, 70 // which means that a call to PerformMove could change any source operand 71 // in the move graph. 72 73 ASSERT(!moves_[index].IsPending()); 74 ASSERT(!moves_[index].IsRedundant()); 75 76 // Clear this move's destination to indicate a pending move. The actual 77 // destination is saved on the side. 78 ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated. 79 LOperand* destination = moves_[index].destination(); 80 moves_[index].set_destination(NULL); 81 82 // Perform a depth-first traversal of the move graph to resolve 83 // dependencies. Any unperformed, unpending move with a source the same 84 // as this one's destination blocks this one so recursively perform all 85 // such moves. 86 for (int i = 0; i < moves_.length(); ++i) { 87 LMoveOperands other_move = moves_[i]; 88 if (other_move.Blocks(destination) && !other_move.IsPending()) { 89 // Though PerformMove can change any source operand in the move graph, 90 // this call cannot create a blocking move via a swap (this loop does 91 // not miss any). Assume there is a non-blocking move with source A 92 // and this move is blocked on source B and there is a swap of A and 93 // B. Then A and B must be involved in the same cycle (or they would 94 // not be swapped). Since this move's destination is B and there is 95 // only a single incoming edge to an operand, this move must also be 96 // involved in the same cycle. In that case, the blocking move will 97 // be created but will be "pending" when we return from PerformMove. 98 PerformMove(i); 99 } 100 } 101 102 // We are about to resolve this move and don't need it marked as 103 // pending, so restore its destination. 104 moves_[index].set_destination(destination); 105 106 // This move's source may have changed due to swaps to resolve cycles and 107 // so it may now be the last move in the cycle. If so remove it. 108 if (moves_[index].source()->Equals(destination)) { 109 RemoveMove(index); 110 return; 111 } 112 113 // The move may be blocked on a (at most one) pending move, in which case 114 // we have a cycle. Search for such a blocking move and perform a swap to 115 // resolve it. 116 for (int i = 0; i < moves_.length(); ++i) { 117 LMoveOperands other_move = moves_[i]; 118 if (other_move.Blocks(destination)) { 119 ASSERT(other_move.IsPending()); 120 EmitSwap(index); 121 return; 122 } 123 } 124 125 // This move is not blocked. 126 EmitMove(index); 127 } 128 129 130 void LGapResolver::AddMove(LMoveOperands move) { 131 LOperand* source = move.source(); 132 if (source->IsRegister()) ++source_uses_[source->index()]; 133 134 LOperand* destination = move.destination(); 135 if (destination->IsRegister()) ++destination_uses_[destination->index()]; 136 137 moves_.Add(move, cgen_->zone()); 138 } 139 140 141 void LGapResolver::RemoveMove(int index) { 142 LOperand* source = moves_[index].source(); 143 if (source->IsRegister()) { 144 --source_uses_[source->index()]; 145 ASSERT(source_uses_[source->index()] >= 0); 146 } 147 148 LOperand* destination = moves_[index].destination(); 149 if (destination->IsRegister()) { 150 --destination_uses_[destination->index()]; 151 ASSERT(destination_uses_[destination->index()] >= 0); 152 } 153 154 moves_[index].Eliminate(); 155 } 156 157 158 int LGapResolver::CountSourceUses(LOperand* operand) { 159 int count = 0; 160 for (int i = 0; i < moves_.length(); ++i) { 161 if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) { 162 ++count; 163 } 164 } 165 return count; 166 } 167 168 169 Register LGapResolver::GetFreeRegisterNot(Register reg) { 170 int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg); 171 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { 172 if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) { 173 return Register::FromAllocationIndex(i); 174 } 175 } 176 return no_reg; 177 } 178 179 180 bool LGapResolver::HasBeenReset() { 181 if (!moves_.is_empty()) return false; 182 if (spilled_register_ >= 0) return false; 183 184 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { 185 if (source_uses_[i] != 0) return false; 186 if (destination_uses_[i] != 0) return false; 187 } 188 return true; 189 } 190 191 192 void LGapResolver::Verify() { 193 #ifdef ENABLE_SLOW_ASSERTS 194 // No operand should be the destination for more than one move. 195 for (int i = 0; i < moves_.length(); ++i) { 196 LOperand* destination = moves_[i].destination(); 197 for (int j = i + 1; j < moves_.length(); ++j) { 198 SLOW_ASSERT(!destination->Equals(moves_[j].destination())); 199 } 200 } 201 #endif 202 } 203 204 205 #define __ ACCESS_MASM(cgen_->masm()) 206 207 void LGapResolver::Finish() { 208 if (spilled_register_ >= 0) { 209 __ pop(Register::FromAllocationIndex(spilled_register_)); 210 spilled_register_ = -1; 211 } 212 moves_.Rewind(0); 213 } 214 215 216 void LGapResolver::EnsureRestored(LOperand* operand) { 217 if (operand->IsRegister() && operand->index() == spilled_register_) { 218 __ pop(Register::FromAllocationIndex(spilled_register_)); 219 spilled_register_ = -1; 220 } 221 } 222 223 224 Register LGapResolver::EnsureTempRegister() { 225 // 1. We may have already spilled to create a temp register. 226 if (spilled_register_ >= 0) { 227 return Register::FromAllocationIndex(spilled_register_); 228 } 229 230 // 2. We may have a free register that we can use without spilling. 231 Register free = GetFreeRegisterNot(no_reg); 232 if (!free.is(no_reg)) return free; 233 234 // 3. Prefer to spill a register that is not used in any remaining move 235 // because it will not need to be restored until the end. 236 for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { 237 if (source_uses_[i] == 0 && destination_uses_[i] == 0) { 238 Register scratch = Register::FromAllocationIndex(i); 239 __ push(scratch); 240 spilled_register_ = i; 241 return scratch; 242 } 243 } 244 245 // 4. Use an arbitrary register. Register 0 is as arbitrary as any other. 246 Register scratch = Register::FromAllocationIndex(0); 247 __ push(scratch); 248 spilled_register_ = 0; 249 return scratch; 250 } 251 252 253 void LGapResolver::EmitMove(int index) { 254 LOperand* source = moves_[index].source(); 255 LOperand* destination = moves_[index].destination(); 256 EnsureRestored(source); 257 EnsureRestored(destination); 258 259 // Dispatch on the source and destination operand kinds. Not all 260 // combinations are possible. 261 if (source->IsRegister()) { 262 ASSERT(destination->IsRegister() || destination->IsStackSlot()); 263 Register src = cgen_->ToRegister(source); 264 Operand dst = cgen_->ToOperand(destination); 265 __ mov(dst, src); 266 267 } else if (source->IsStackSlot()) { 268 ASSERT(destination->IsRegister() || destination->IsStackSlot()); 269 Operand src = cgen_->ToOperand(source); 270 if (destination->IsRegister()) { 271 Register dst = cgen_->ToRegister(destination); 272 __ mov(dst, src); 273 } else { 274 // Spill on demand to use a temporary register for memory-to-memory 275 // moves. 276 Register tmp = EnsureTempRegister(); 277 Operand dst = cgen_->ToOperand(destination); 278 __ mov(tmp, src); 279 __ mov(dst, tmp); 280 } 281 282 } else if (source->IsConstantOperand()) { 283 LConstantOperand* constant_source = LConstantOperand::cast(source); 284 if (destination->IsRegister()) { 285 Register dst = cgen_->ToRegister(destination); 286 Representation r = cgen_->IsSmi(constant_source) 287 ? Representation::Smi() : Representation::Integer32(); 288 if (cgen_->IsInteger32(constant_source)) { 289 __ Move(dst, cgen_->ToImmediate(constant_source, r)); 290 } else { 291 __ LoadObject(dst, cgen_->ToHandle(constant_source)); 292 } 293 } else if (destination->IsDoubleRegister()) { 294 double v = cgen_->ToDouble(constant_source); 295 uint64_t int_val = BitCast<uint64_t, double>(v); 296 int32_t lower = static_cast<int32_t>(int_val); 297 int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt); 298 __ push(Immediate(upper)); 299 __ push(Immediate(lower)); 300 X87Register dst = cgen_->ToX87Register(destination); 301 cgen_->X87Mov(dst, MemOperand(esp, 0)); 302 __ add(esp, Immediate(kDoubleSize)); 303 } else { 304 ASSERT(destination->IsStackSlot()); 305 Operand dst = cgen_->ToOperand(destination); 306 Representation r = cgen_->IsSmi(constant_source) 307 ? Representation::Smi() : Representation::Integer32(); 308 if (cgen_->IsInteger32(constant_source)) { 309 __ Move(dst, cgen_->ToImmediate(constant_source, r)); 310 } else { 311 Register tmp = EnsureTempRegister(); 312 __ LoadObject(tmp, cgen_->ToHandle(constant_source)); 313 __ mov(dst, tmp); 314 } 315 } 316 317 } else if (source->IsDoubleRegister()) { 318 // load from the register onto the stack, store in destination, which must 319 // be a double stack slot in the non-SSE2 case. 320 ASSERT(destination->IsDoubleStackSlot()); 321 Operand dst = cgen_->ToOperand(destination); 322 X87Register src = cgen_->ToX87Register(source); 323 cgen_->X87Mov(dst, src); 324 } else if (source->IsDoubleStackSlot()) { 325 // load from the stack slot on top of the floating point stack, and then 326 // store in destination. If destination is a double register, then it 327 // represents the top of the stack and nothing needs to be done. 328 if (destination->IsDoubleStackSlot()) { 329 Register tmp = EnsureTempRegister(); 330 Operand src0 = cgen_->ToOperand(source); 331 Operand src1 = cgen_->HighOperand(source); 332 Operand dst0 = cgen_->ToOperand(destination); 333 Operand dst1 = cgen_->HighOperand(destination); 334 __ mov(tmp, src0); // Then use tmp to copy source to destination. 335 __ mov(dst0, tmp); 336 __ mov(tmp, src1); 337 __ mov(dst1, tmp); 338 } else { 339 Operand src = cgen_->ToOperand(source); 340 X87Register dst = cgen_->ToX87Register(destination); 341 cgen_->X87Mov(dst, src); 342 } 343 } else { 344 UNREACHABLE(); 345 } 346 347 RemoveMove(index); 348 } 349 350 351 void LGapResolver::EmitSwap(int index) { 352 LOperand* source = moves_[index].source(); 353 LOperand* destination = moves_[index].destination(); 354 EnsureRestored(source); 355 EnsureRestored(destination); 356 357 // Dispatch on the source and destination operand kinds. Not all 358 // combinations are possible. 359 if (source->IsRegister() && destination->IsRegister()) { 360 // Register-register. 361 Register src = cgen_->ToRegister(source); 362 Register dst = cgen_->ToRegister(destination); 363 __ xchg(dst, src); 364 365 } else if ((source->IsRegister() && destination->IsStackSlot()) || 366 (source->IsStackSlot() && destination->IsRegister())) { 367 // Register-memory. Use a free register as a temp if possible. Do not 368 // spill on demand because the simple spill implementation cannot avoid 369 // spilling src at this point. 370 Register tmp = GetFreeRegisterNot(no_reg); 371 Register reg = 372 cgen_->ToRegister(source->IsRegister() ? source : destination); 373 Operand mem = 374 cgen_->ToOperand(source->IsRegister() ? destination : source); 375 if (tmp.is(no_reg)) { 376 __ xor_(reg, mem); 377 __ xor_(mem, reg); 378 __ xor_(reg, mem); 379 } else { 380 __ mov(tmp, mem); 381 __ mov(mem, reg); 382 __ mov(reg, tmp); 383 } 384 385 } else if (source->IsStackSlot() && destination->IsStackSlot()) { 386 // Memory-memory. Spill on demand to use a temporary. If there is a 387 // free register after that, use it as a second temporary. 388 Register tmp0 = EnsureTempRegister(); 389 Register tmp1 = GetFreeRegisterNot(tmp0); 390 Operand src = cgen_->ToOperand(source); 391 Operand dst = cgen_->ToOperand(destination); 392 if (tmp1.is(no_reg)) { 393 // Only one temp register available to us. 394 __ mov(tmp0, dst); 395 __ xor_(tmp0, src); 396 __ xor_(src, tmp0); 397 __ xor_(tmp0, src); 398 __ mov(dst, tmp0); 399 } else { 400 __ mov(tmp0, dst); 401 __ mov(tmp1, src); 402 __ mov(dst, tmp1); 403 __ mov(src, tmp0); 404 } 405 } else { 406 // No other combinations are possible. 407 UNREACHABLE(); 408 } 409 410 // The swap of source and destination has executed a move from source to 411 // destination. 412 RemoveMove(index); 413 414 // Any unperformed (including pending) move with a source of either 415 // this move's source or destination needs to have their source 416 // changed to reflect the state of affairs after the swap. 417 for (int i = 0; i < moves_.length(); ++i) { 418 LMoveOperands other_move = moves_[i]; 419 if (other_move.Blocks(source)) { 420 moves_[i].set_source(destination); 421 } else if (other_move.Blocks(destination)) { 422 moves_[i].set_source(source); 423 } 424 } 425 426 // In addition to swapping the actual uses as sources, we need to update 427 // the use counts. 428 if (source->IsRegister() && destination->IsRegister()) { 429 int temp = source_uses_[source->index()]; 430 source_uses_[source->index()] = source_uses_[destination->index()]; 431 source_uses_[destination->index()] = temp; 432 } else if (source->IsRegister()) { 433 // We don't have use counts for non-register operands like destination. 434 // Compute those counts now. 435 source_uses_[source->index()] = CountSourceUses(source); 436 } else if (destination->IsRegister()) { 437 source_uses_[destination->index()] = CountSourceUses(destination); 438 } 439 } 440 441 #undef __ 442 443 } } // namespace v8::internal 444 445 #endif // V8_TARGET_ARCH_X87 446