1 /* 2 * Copyright (C) 2008 Apple Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 */ 25 26 #include "config.h" 27 28 #if ENABLE(JIT) 29 #if USE(JSVALUE32_64) 30 #include "JIT.h" 31 32 #include "CodeBlock.h" 33 #include "JITInlineMethods.h" 34 #include "JITStubCall.h" 35 #include "JITStubs.h" 36 #include "JSArray.h" 37 #include "JSFunction.h" 38 #include "Interpreter.h" 39 #include "ResultType.h" 40 #include "SamplingTool.h" 41 42 #ifndef NDEBUG 43 #include <stdio.h> 44 #endif 45 46 using namespace std; 47 48 namespace JSC { 49 50 void JIT::emit_op_negate(Instruction* currentInstruction) 51 { 52 unsigned dst = currentInstruction[1].u.operand; 53 unsigned src = currentInstruction[2].u.operand; 54 55 emitLoad(src, regT1, regT0); 56 57 Jump srcNotInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)); 58 addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff))); 59 neg32(regT0); 60 emitStoreInt32(dst, regT0, (dst == src)); 61 62 Jump end = jump(); 63 64 srcNotInt.link(this); 65 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); 66 67 xor32(TrustedImm32(1 << 31), regT1); 68 store32(regT1, tagFor(dst)); 69 if (dst != src) 70 store32(regT0, payloadFor(dst)); 71 72 end.link(this); 73 } 74 75 void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 76 { 77 unsigned dst = currentInstruction[1].u.operand; 78 79 linkSlowCase(iter); // 0x7fffffff check 80 linkSlowCase(iter); // double check 81 82 JITStubCall stubCall(this, cti_op_negate); 83 stubCall.addArgument(regT1, regT0); 84 stubCall.call(dst); 85 } 86 87 void JIT::emit_op_jnless(Instruction* currentInstruction) 88 { 89 unsigned op1 = currentInstruction[1].u.operand; 90 unsigned op2 = currentInstruction[2].u.operand; 91 unsigned target = currentInstruction[3].u.operand; 92 93 JumpList notInt32Op1; 94 JumpList notInt32Op2; 95 96 // Character less. 97 if (isOperandConstantImmediateChar(op1)) { 98 emitLoad(op2, regT1, regT0); 99 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); 100 JumpList failures; 101 emitLoadCharacterString(regT0, regT0, failures); 102 addSlowCase(failures); 103 addJump(branch32(LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target); 104 return; 105 } 106 if (isOperandConstantImmediateChar(op2)) { 107 emitLoad(op1, regT1, regT0); 108 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); 109 JumpList failures; 110 emitLoadCharacterString(regT0, regT0, failures); 111 addSlowCase(failures); 112 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target); 113 return; 114 } 115 if (isOperandConstantImmediateInt(op1)) { 116 // Int32 less. 117 emitLoad(op2, regT3, regT2); 118 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 119 addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target); 120 } else if (isOperandConstantImmediateInt(op2)) { 121 emitLoad(op1, regT1, regT0); 122 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 123 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target); 124 } else { 125 emitLoad2(op1, regT1, regT0, op2, regT3, regT2); 126 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 127 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 128 addJump(branch32(GreaterThanOrEqual, regT0, regT2), target); 129 } 130 131 if (!supportsFloatingPoint()) { 132 addSlowCase(notInt32Op1); 133 addSlowCase(notInt32Op2); 134 return; 135 } 136 Jump end = jump(); 137 138 // Double less. 139 emitBinaryDoubleOp(op_jnless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2)); 140 end.link(this); 141 } 142 143 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 144 { 145 unsigned op1 = currentInstruction[1].u.operand; 146 unsigned op2 = currentInstruction[2].u.operand; 147 unsigned target = currentInstruction[3].u.operand; 148 149 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) { 150 linkSlowCase(iter); 151 linkSlowCase(iter); 152 linkSlowCase(iter); 153 linkSlowCase(iter); 154 } else { 155 if (!supportsFloatingPoint()) { 156 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2)) 157 linkSlowCase(iter); // int32 check 158 linkSlowCase(iter); // int32 check 159 } else { 160 if (!isOperandConstantImmediateInt(op1)) { 161 linkSlowCase(iter); // double check 162 linkSlowCase(iter); // int32 check 163 } 164 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2)) 165 linkSlowCase(iter); // double check 166 } 167 } 168 169 JITStubCall stubCall(this, cti_op_jless); 170 stubCall.addArgument(op1); 171 stubCall.addArgument(op2); 172 stubCall.call(); 173 emitJumpSlowToHot(branchTest32(Zero, regT0), target); 174 } 175 176 void JIT::emit_op_jless(Instruction* currentInstruction) 177 { 178 unsigned op1 = currentInstruction[1].u.operand; 179 unsigned op2 = currentInstruction[2].u.operand; 180 unsigned target = currentInstruction[3].u.operand; 181 182 JumpList notInt32Op1; 183 JumpList notInt32Op2; 184 185 // Character less. 186 if (isOperandConstantImmediateChar(op1)) { 187 emitLoad(op2, regT1, regT0); 188 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); 189 JumpList failures; 190 emitLoadCharacterString(regT0, regT0, failures); 191 addSlowCase(failures); 192 addJump(branch32(GreaterThan, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target); 193 return; 194 } 195 if (isOperandConstantImmediateChar(op2)) { 196 emitLoad(op1, regT1, regT0); 197 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); 198 JumpList failures; 199 emitLoadCharacterString(regT0, regT0, failures); 200 addSlowCase(failures); 201 addJump(branch32(LessThan, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target); 202 return; 203 } 204 if (isOperandConstantImmediateInt(op1)) { 205 emitLoad(op2, regT3, regT2); 206 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 207 addJump(branch32(GreaterThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target); 208 } else if (isOperandConstantImmediateInt(op2)) { 209 emitLoad(op1, regT1, regT0); 210 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 211 addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target); 212 } else { 213 emitLoad2(op1, regT1, regT0, op2, regT3, regT2); 214 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 215 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 216 addJump(branch32(LessThan, regT0, regT2), target); 217 } 218 219 if (!supportsFloatingPoint()) { 220 addSlowCase(notInt32Op1); 221 addSlowCase(notInt32Op2); 222 return; 223 } 224 Jump end = jump(); 225 226 // Double less. 227 emitBinaryDoubleOp(op_jless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2)); 228 end.link(this); 229 } 230 231 void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 232 { 233 unsigned op1 = currentInstruction[1].u.operand; 234 unsigned op2 = currentInstruction[2].u.operand; 235 unsigned target = currentInstruction[3].u.operand; 236 237 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) { 238 linkSlowCase(iter); 239 linkSlowCase(iter); 240 linkSlowCase(iter); 241 linkSlowCase(iter); 242 } else { 243 if (!supportsFloatingPoint()) { 244 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2)) 245 linkSlowCase(iter); // int32 check 246 linkSlowCase(iter); // int32 check 247 } else { 248 if (!isOperandConstantImmediateInt(op1)) { 249 linkSlowCase(iter); // double check 250 linkSlowCase(iter); // int32 check 251 } 252 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2)) 253 linkSlowCase(iter); // double check 254 } 255 } 256 JITStubCall stubCall(this, cti_op_jless); 257 stubCall.addArgument(op1); 258 stubCall.addArgument(op2); 259 stubCall.call(); 260 emitJumpSlowToHot(branchTest32(NonZero, regT0), target); 261 } 262 263 void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert) 264 { 265 unsigned op1 = currentInstruction[1].u.operand; 266 unsigned op2 = currentInstruction[2].u.operand; 267 unsigned target = currentInstruction[3].u.operand; 268 269 JumpList notInt32Op1; 270 JumpList notInt32Op2; 271 272 // Character less. 273 if (isOperandConstantImmediateChar(op1)) { 274 emitLoad(op2, regT1, regT0); 275 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); 276 JumpList failures; 277 emitLoadCharacterString(regT0, regT0, failures); 278 addSlowCase(failures); 279 addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target); 280 return; 281 } 282 if (isOperandConstantImmediateChar(op2)) { 283 emitLoad(op1, regT1, regT0); 284 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); 285 JumpList failures; 286 emitLoadCharacterString(regT0, regT0, failures); 287 addSlowCase(failures); 288 addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target); 289 return; 290 } 291 if (isOperandConstantImmediateInt(op1)) { 292 emitLoad(op2, regT3, regT2); 293 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 294 addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target); 295 } else if (isOperandConstantImmediateInt(op2)) { 296 emitLoad(op1, regT1, regT0); 297 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 298 addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target); 299 } else { 300 emitLoad2(op1, regT1, regT0, op2, regT3, regT2); 301 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 302 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 303 addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, regT2), target); 304 } 305 306 if (!supportsFloatingPoint()) { 307 addSlowCase(notInt32Op1); 308 addSlowCase(notInt32Op2); 309 return; 310 } 311 Jump end = jump(); 312 313 // Double less. 314 emitBinaryDoubleOp(invert ? op_jnlesseq : op_jlesseq, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2)); 315 end.link(this); 316 } 317 318 void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool invert) 319 { 320 unsigned op1 = currentInstruction[1].u.operand; 321 unsigned op2 = currentInstruction[2].u.operand; 322 unsigned target = currentInstruction[3].u.operand; 323 324 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) { 325 linkSlowCase(iter); 326 linkSlowCase(iter); 327 linkSlowCase(iter); 328 linkSlowCase(iter); 329 } else { 330 if (!supportsFloatingPoint()) { 331 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2)) 332 linkSlowCase(iter); // int32 check 333 linkSlowCase(iter); // int32 check 334 } else { 335 if (!isOperandConstantImmediateInt(op1)) { 336 linkSlowCase(iter); // double check 337 linkSlowCase(iter); // int32 check 338 } 339 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2)) 340 linkSlowCase(iter); // double check 341 } 342 } 343 344 JITStubCall stubCall(this, cti_op_jlesseq); 345 stubCall.addArgument(op1); 346 stubCall.addArgument(op2); 347 stubCall.call(); 348 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target); 349 } 350 351 void JIT::emit_op_jnlesseq(Instruction* currentInstruction) 352 { 353 emit_op_jlesseq(currentInstruction, true); 354 } 355 356 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 357 { 358 emitSlow_op_jlesseq(currentInstruction, iter, true); 359 } 360 361 // LeftShift (<<) 362 363 void JIT::emit_op_lshift(Instruction* currentInstruction) 364 { 365 unsigned dst = currentInstruction[1].u.operand; 366 unsigned op1 = currentInstruction[2].u.operand; 367 unsigned op2 = currentInstruction[3].u.operand; 368 369 if (isOperandConstantImmediateInt(op2)) { 370 emitLoad(op1, regT1, regT0); 371 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 372 lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0); 373 emitStoreInt32(dst, regT0, dst == op1); 374 return; 375 } 376 377 emitLoad2(op1, regT1, regT0, op2, regT3, regT2); 378 if (!isOperandConstantImmediateInt(op1)) 379 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 380 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 381 lshift32(regT2, regT0); 382 emitStoreInt32(dst, regT0, dst == op1 || dst == op2); 383 } 384 385 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 386 { 387 unsigned dst = currentInstruction[1].u.operand; 388 unsigned op1 = currentInstruction[2].u.operand; 389 unsigned op2 = currentInstruction[3].u.operand; 390 391 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2)) 392 linkSlowCase(iter); // int32 check 393 linkSlowCase(iter); // int32 check 394 395 JITStubCall stubCall(this, cti_op_lshift); 396 stubCall.addArgument(op1); 397 stubCall.addArgument(op2); 398 stubCall.call(dst); 399 } 400 401 // RightShift (>>) and UnsignedRightShift (>>>) helper 402 403 void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned) 404 { 405 unsigned dst = currentInstruction[1].u.operand; 406 unsigned op1 = currentInstruction[2].u.operand; 407 unsigned op2 = currentInstruction[3].u.operand; 408 409 // Slow case of rshift makes assumptions about what registers hold the 410 // shift arguments, so any changes must be updated there as well. 411 if (isOperandConstantImmediateInt(op2)) { 412 emitLoad(op1, regT1, regT0); 413 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 414 int shift = getConstantOperand(op2).asInt32(); 415 if (isUnsigned) { 416 if (shift) 417 urshift32(Imm32(shift & 0x1f), regT0); 418 // unsigned shift < 0 or shift = k*2^32 may result in (essentially) 419 // a toUint conversion, which can result in a value we can represent 420 // as an immediate int. 421 if (shift < 0 || !(shift & 31)) 422 addSlowCase(branch32(LessThan, regT0, TrustedImm32(0))); 423 } else if (shift) { // signed right shift by zero is simply toInt conversion 424 rshift32(Imm32(shift & 0x1f), regT0); 425 } 426 emitStoreInt32(dst, regT0, dst == op1); 427 return; 428 } 429 430 emitLoad2(op1, regT1, regT0, op2, regT3, regT2); 431 if (!isOperandConstantImmediateInt(op1)) 432 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 433 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 434 if (isUnsigned) { 435 urshift32(regT2, regT0); 436 addSlowCase(branch32(LessThan, regT0, TrustedImm32(0))); 437 } else 438 rshift32(regT2, regT0); 439 emitStoreInt32(dst, regT0, dst == op1 || dst == op2); 440 } 441 442 void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned) 443 { 444 unsigned dst = currentInstruction[1].u.operand; 445 unsigned op1 = currentInstruction[2].u.operand; 446 unsigned op2 = currentInstruction[3].u.operand; 447 if (isOperandConstantImmediateInt(op2)) { 448 int shift = getConstantOperand(op2).asInt32(); 449 // op1 = regT1:regT0 450 linkSlowCase(iter); // int32 check 451 if (supportsFloatingPointTruncate()) { 452 JumpList failures; 453 failures.append(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag))); 454 emitLoadDouble(op1, fpRegT0); 455 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0)); 456 if (isUnsigned) { 457 if (shift) 458 urshift32(Imm32(shift & 0x1f), regT0); 459 if (shift < 0 || !(shift & 31)) 460 failures.append(branch32(LessThan, regT0, TrustedImm32(0))); 461 } else if (shift) 462 rshift32(Imm32(shift & 0x1f), regT0); 463 emitStoreInt32(dst, regT0, false); 464 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift)); 465 failures.link(this); 466 } 467 if (isUnsigned && (shift < 0 || !(shift & 31))) 468 linkSlowCase(iter); // failed to box in hot path 469 } else { 470 // op1 = regT1:regT0 471 // op2 = regT3:regT2 472 if (!isOperandConstantImmediateInt(op1)) { 473 linkSlowCase(iter); // int32 check -- op1 is not an int 474 if (supportsFloatingPointTruncate()) { 475 Jump notDouble = branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)); // op1 is not a double 476 emitLoadDouble(op1, fpRegT0); 477 Jump notInt = branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)); // op2 is not an int 478 Jump cantTruncate = branchTruncateDoubleToInt32(fpRegT0, regT0); 479 if (isUnsigned) 480 urshift32(regT2, regT0); 481 else 482 rshift32(regT2, regT0); 483 emitStoreInt32(dst, regT0, false); 484 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift)); 485 notDouble.link(this); 486 notInt.link(this); 487 cantTruncate.link(this); 488 } 489 } 490 491 linkSlowCase(iter); // int32 check - op2 is not an int 492 if (isUnsigned) 493 linkSlowCase(iter); // Can't represent unsigned result as an immediate 494 } 495 496 JITStubCall stubCall(this, isUnsigned ? cti_op_urshift : cti_op_rshift); 497 stubCall.addArgument(op1); 498 stubCall.addArgument(op2); 499 stubCall.call(dst); 500 } 501 502 // RightShift (>>) 503 504 void JIT::emit_op_rshift(Instruction* currentInstruction) 505 { 506 emitRightShift(currentInstruction, false); 507 } 508 509 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 510 { 511 emitRightShiftSlowCase(currentInstruction, iter, false); 512 } 513 514 // UnsignedRightShift (>>>) 515 516 void JIT::emit_op_urshift(Instruction* currentInstruction) 517 { 518 emitRightShift(currentInstruction, true); 519 } 520 521 void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 522 { 523 emitRightShiftSlowCase(currentInstruction, iter, true); 524 } 525 526 // BitAnd (&) 527 528 void JIT::emit_op_bitand(Instruction* currentInstruction) 529 { 530 unsigned dst = currentInstruction[1].u.operand; 531 unsigned op1 = currentInstruction[2].u.operand; 532 unsigned op2 = currentInstruction[3].u.operand; 533 534 unsigned op; 535 int32_t constant; 536 if (getOperandConstantImmediateInt(op1, op2, op, constant)) { 537 emitLoad(op, regT1, regT0); 538 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 539 and32(Imm32(constant), regT0); 540 emitStoreInt32(dst, regT0, (op == dst)); 541 return; 542 } 543 544 emitLoad2(op1, regT1, regT0, op2, regT3, regT2); 545 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 546 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 547 and32(regT2, regT0); 548 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst)); 549 } 550 551 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 552 { 553 unsigned dst = currentInstruction[1].u.operand; 554 unsigned op1 = currentInstruction[2].u.operand; 555 unsigned op2 = currentInstruction[3].u.operand; 556 557 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2)) 558 linkSlowCase(iter); // int32 check 559 linkSlowCase(iter); // int32 check 560 561 JITStubCall stubCall(this, cti_op_bitand); 562 stubCall.addArgument(op1); 563 stubCall.addArgument(op2); 564 stubCall.call(dst); 565 } 566 567 // BitOr (|) 568 569 void JIT::emit_op_bitor(Instruction* currentInstruction) 570 { 571 unsigned dst = currentInstruction[1].u.operand; 572 unsigned op1 = currentInstruction[2].u.operand; 573 unsigned op2 = currentInstruction[3].u.operand; 574 575 unsigned op; 576 int32_t constant; 577 if (getOperandConstantImmediateInt(op1, op2, op, constant)) { 578 emitLoad(op, regT1, regT0); 579 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 580 or32(Imm32(constant), regT0); 581 emitStoreInt32(dst, regT0, (op == dst)); 582 return; 583 } 584 585 emitLoad2(op1, regT1, regT0, op2, regT3, regT2); 586 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 587 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 588 or32(regT2, regT0); 589 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst)); 590 } 591 592 void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 593 { 594 unsigned dst = currentInstruction[1].u.operand; 595 unsigned op1 = currentInstruction[2].u.operand; 596 unsigned op2 = currentInstruction[3].u.operand; 597 598 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2)) 599 linkSlowCase(iter); // int32 check 600 linkSlowCase(iter); // int32 check 601 602 JITStubCall stubCall(this, cti_op_bitor); 603 stubCall.addArgument(op1); 604 stubCall.addArgument(op2); 605 stubCall.call(dst); 606 } 607 608 // BitXor (^) 609 610 void JIT::emit_op_bitxor(Instruction* currentInstruction) 611 { 612 unsigned dst = currentInstruction[1].u.operand; 613 unsigned op1 = currentInstruction[2].u.operand; 614 unsigned op2 = currentInstruction[3].u.operand; 615 616 unsigned op; 617 int32_t constant; 618 if (getOperandConstantImmediateInt(op1, op2, op, constant)) { 619 emitLoad(op, regT1, regT0); 620 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 621 xor32(Imm32(constant), regT0); 622 emitStoreInt32(dst, regT0, (op == dst)); 623 return; 624 } 625 626 emitLoad2(op1, regT1, regT0, op2, regT3, regT2); 627 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 628 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 629 xor32(regT2, regT0); 630 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst)); 631 } 632 633 void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 634 { 635 unsigned dst = currentInstruction[1].u.operand; 636 unsigned op1 = currentInstruction[2].u.operand; 637 unsigned op2 = currentInstruction[3].u.operand; 638 639 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2)) 640 linkSlowCase(iter); // int32 check 641 linkSlowCase(iter); // int32 check 642 643 JITStubCall stubCall(this, cti_op_bitxor); 644 stubCall.addArgument(op1); 645 stubCall.addArgument(op2); 646 stubCall.call(dst); 647 } 648 649 // BitNot (~) 650 651 void JIT::emit_op_bitnot(Instruction* currentInstruction) 652 { 653 unsigned dst = currentInstruction[1].u.operand; 654 unsigned src = currentInstruction[2].u.operand; 655 656 emitLoad(src, regT1, regT0); 657 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 658 659 not32(regT0); 660 emitStoreInt32(dst, regT0, (dst == src)); 661 } 662 663 void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 664 { 665 unsigned dst = currentInstruction[1].u.operand; 666 667 linkSlowCase(iter); // int32 check 668 669 JITStubCall stubCall(this, cti_op_bitnot); 670 stubCall.addArgument(regT1, regT0); 671 stubCall.call(dst); 672 } 673 674 // PostInc (i++) 675 676 void JIT::emit_op_post_inc(Instruction* currentInstruction) 677 { 678 unsigned dst = currentInstruction[1].u.operand; 679 unsigned srcDst = currentInstruction[2].u.operand; 680 681 emitLoad(srcDst, regT1, regT0); 682 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 683 684 if (dst == srcDst) // x = x++ is a noop for ints. 685 return; 686 687 emitStoreInt32(dst, regT0); 688 689 addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0)); 690 emitStoreInt32(srcDst, regT0, true); 691 } 692 693 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 694 { 695 unsigned dst = currentInstruction[1].u.operand; 696 unsigned srcDst = currentInstruction[2].u.operand; 697 698 linkSlowCase(iter); // int32 check 699 if (dst != srcDst) 700 linkSlowCase(iter); // overflow check 701 702 JITStubCall stubCall(this, cti_op_post_inc); 703 stubCall.addArgument(srcDst); 704 stubCall.addArgument(Imm32(srcDst)); 705 stubCall.call(dst); 706 } 707 708 // PostDec (i--) 709 710 void JIT::emit_op_post_dec(Instruction* currentInstruction) 711 { 712 unsigned dst = currentInstruction[1].u.operand; 713 unsigned srcDst = currentInstruction[2].u.operand; 714 715 emitLoad(srcDst, regT1, regT0); 716 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 717 718 if (dst == srcDst) // x = x-- is a noop for ints. 719 return; 720 721 emitStoreInt32(dst, regT0); 722 723 addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0)); 724 emitStoreInt32(srcDst, regT0, true); 725 } 726 727 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 728 { 729 unsigned dst = currentInstruction[1].u.operand; 730 unsigned srcDst = currentInstruction[2].u.operand; 731 732 linkSlowCase(iter); // int32 check 733 if (dst != srcDst) 734 linkSlowCase(iter); // overflow check 735 736 JITStubCall stubCall(this, cti_op_post_dec); 737 stubCall.addArgument(srcDst); 738 stubCall.addArgument(TrustedImm32(srcDst)); 739 stubCall.call(dst); 740 } 741 742 // PreInc (++i) 743 744 void JIT::emit_op_pre_inc(Instruction* currentInstruction) 745 { 746 unsigned srcDst = currentInstruction[1].u.operand; 747 748 emitLoad(srcDst, regT1, regT0); 749 750 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 751 addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0)); 752 emitStoreInt32(srcDst, regT0, true); 753 } 754 755 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 756 { 757 unsigned srcDst = currentInstruction[1].u.operand; 758 759 linkSlowCase(iter); // int32 check 760 linkSlowCase(iter); // overflow check 761 762 JITStubCall stubCall(this, cti_op_pre_inc); 763 stubCall.addArgument(srcDst); 764 stubCall.call(srcDst); 765 } 766 767 // PreDec (--i) 768 769 void JIT::emit_op_pre_dec(Instruction* currentInstruction) 770 { 771 unsigned srcDst = currentInstruction[1].u.operand; 772 773 emitLoad(srcDst, regT1, regT0); 774 775 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 776 addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0)); 777 emitStoreInt32(srcDst, regT0, true); 778 } 779 780 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 781 { 782 unsigned srcDst = currentInstruction[1].u.operand; 783 784 linkSlowCase(iter); // int32 check 785 linkSlowCase(iter); // overflow check 786 787 JITStubCall stubCall(this, cti_op_pre_dec); 788 stubCall.addArgument(srcDst); 789 stubCall.call(srcDst); 790 } 791 792 // Addition (+) 793 794 void JIT::emit_op_add(Instruction* currentInstruction) 795 { 796 unsigned dst = currentInstruction[1].u.operand; 797 unsigned op1 = currentInstruction[2].u.operand; 798 unsigned op2 = currentInstruction[3].u.operand; 799 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); 800 801 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) { 802 JITStubCall stubCall(this, cti_op_add); 803 stubCall.addArgument(op1); 804 stubCall.addArgument(op2); 805 stubCall.call(dst); 806 return; 807 } 808 809 JumpList notInt32Op1; 810 JumpList notInt32Op2; 811 812 unsigned op; 813 int32_t constant; 814 if (getOperandConstantImmediateInt(op1, op2, op, constant)) { 815 emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second()); 816 return; 817 } 818 819 emitLoad2(op1, regT1, regT0, op2, regT3, regT2); 820 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 821 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 822 823 // Int32 case. 824 addSlowCase(branchAdd32(Overflow, regT2, regT0)); 825 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst)); 826 827 if (!supportsFloatingPoint()) { 828 addSlowCase(notInt32Op1); 829 addSlowCase(notInt32Op2); 830 return; 831 } 832 Jump end = jump(); 833 834 // Double case. 835 emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2); 836 end.link(this); 837 } 838 839 void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType) 840 { 841 // Int32 case. 842 emitLoad(op, regT1, regT0); 843 Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)); 844 addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0)); 845 emitStoreInt32(dst, regT0, (op == dst)); 846 847 // Double case. 848 if (!supportsFloatingPoint()) { 849 addSlowCase(notInt32); 850 return; 851 } 852 Jump end = jump(); 853 854 notInt32.link(this); 855 if (!opType.definitelyIsNumber()) 856 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); 857 move(Imm32(constant), regT2); 858 convertInt32ToDouble(regT2, fpRegT0); 859 emitLoadDouble(op, fpRegT1); 860 addDouble(fpRegT1, fpRegT0); 861 emitStoreDouble(dst, fpRegT0); 862 863 end.link(this); 864 } 865 866 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 867 { 868 unsigned dst = currentInstruction[1].u.operand; 869 unsigned op1 = currentInstruction[2].u.operand; 870 unsigned op2 = currentInstruction[3].u.operand; 871 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); 872 873 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) 874 return; 875 876 unsigned op; 877 int32_t constant; 878 if (getOperandConstantImmediateInt(op1, op2, op, constant)) { 879 linkSlowCase(iter); // overflow check 880 881 if (!supportsFloatingPoint()) 882 linkSlowCase(iter); // non-sse case 883 else { 884 ResultType opType = op == op1 ? types.first() : types.second(); 885 if (!opType.definitelyIsNumber()) 886 linkSlowCase(iter); // double check 887 } 888 } else { 889 linkSlowCase(iter); // overflow check 890 891 if (!supportsFloatingPoint()) { 892 linkSlowCase(iter); // int32 check 893 linkSlowCase(iter); // int32 check 894 } else { 895 if (!types.first().definitelyIsNumber()) 896 linkSlowCase(iter); // double check 897 898 if (!types.second().definitelyIsNumber()) { 899 linkSlowCase(iter); // int32 check 900 linkSlowCase(iter); // double check 901 } 902 } 903 } 904 905 JITStubCall stubCall(this, cti_op_add); 906 stubCall.addArgument(op1); 907 stubCall.addArgument(op2); 908 stubCall.call(dst); 909 } 910 911 // Subtraction (-) 912 913 void JIT::emit_op_sub(Instruction* currentInstruction) 914 { 915 unsigned dst = currentInstruction[1].u.operand; 916 unsigned op1 = currentInstruction[2].u.operand; 917 unsigned op2 = currentInstruction[3].u.operand; 918 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); 919 920 JumpList notInt32Op1; 921 JumpList notInt32Op2; 922 923 if (isOperandConstantImmediateInt(op2)) { 924 emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first()); 925 return; 926 } 927 928 emitLoad2(op1, regT1, regT0, op2, regT3, regT2); 929 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 930 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 931 932 // Int32 case. 933 addSlowCase(branchSub32(Overflow, regT2, regT0)); 934 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst)); 935 936 if (!supportsFloatingPoint()) { 937 addSlowCase(notInt32Op1); 938 addSlowCase(notInt32Op2); 939 return; 940 } 941 Jump end = jump(); 942 943 // Double case. 944 emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2); 945 end.link(this); 946 } 947 948 void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType) 949 { 950 // Int32 case. 951 emitLoad(op, regT1, regT0); 952 Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)); 953 addSlowCase(branchSub32(Overflow, Imm32(constant), regT0)); 954 emitStoreInt32(dst, regT0, (op == dst)); 955 956 // Double case. 957 if (!supportsFloatingPoint()) { 958 addSlowCase(notInt32); 959 return; 960 } 961 Jump end = jump(); 962 963 notInt32.link(this); 964 if (!opType.definitelyIsNumber()) 965 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); 966 move(Imm32(constant), regT2); 967 convertInt32ToDouble(regT2, fpRegT0); 968 emitLoadDouble(op, fpRegT1); 969 subDouble(fpRegT0, fpRegT1); 970 emitStoreDouble(dst, fpRegT1); 971 972 end.link(this); 973 } 974 975 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 976 { 977 unsigned dst = currentInstruction[1].u.operand; 978 unsigned op1 = currentInstruction[2].u.operand; 979 unsigned op2 = currentInstruction[3].u.operand; 980 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); 981 982 if (isOperandConstantImmediateInt(op2)) { 983 linkSlowCase(iter); // overflow check 984 985 if (!supportsFloatingPoint() || !types.first().definitelyIsNumber()) 986 linkSlowCase(iter); // int32 or double check 987 } else { 988 linkSlowCase(iter); // overflow check 989 990 if (!supportsFloatingPoint()) { 991 linkSlowCase(iter); // int32 check 992 linkSlowCase(iter); // int32 check 993 } else { 994 if (!types.first().definitelyIsNumber()) 995 linkSlowCase(iter); // double check 996 997 if (!types.second().definitelyIsNumber()) { 998 linkSlowCase(iter); // int32 check 999 linkSlowCase(iter); // double check 1000 } 1001 } 1002 } 1003 1004 JITStubCall stubCall(this, cti_op_sub); 1005 stubCall.addArgument(op1); 1006 stubCall.addArgument(op2); 1007 stubCall.call(dst); 1008 } 1009 1010 void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters) 1011 { 1012 JumpList end; 1013 1014 if (!notInt32Op1.empty()) { 1015 // Double case 1: Op1 is not int32; Op2 is unknown. 1016 notInt32Op1.link(this); 1017 1018 ASSERT(op1IsInRegisters); 1019 1020 // Verify Op1 is double. 1021 if (!types.first().definitelyIsNumber()) 1022 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); 1023 1024 if (!op2IsInRegisters) 1025 emitLoad(op2, regT3, regT2); 1026 1027 Jump doubleOp2 = branch32(Below, regT3, TrustedImm32(JSValue::LowestTag)); 1028 1029 if (!types.second().definitelyIsNumber()) 1030 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 1031 1032 convertInt32ToDouble(regT2, fpRegT0); 1033 Jump doTheMath = jump(); 1034 1035 // Load Op2 as double into double register. 1036 doubleOp2.link(this); 1037 emitLoadDouble(op2, fpRegT0); 1038 1039 // Do the math. 1040 doTheMath.link(this); 1041 switch (opcodeID) { 1042 case op_mul: 1043 emitLoadDouble(op1, fpRegT2); 1044 mulDouble(fpRegT2, fpRegT0); 1045 emitStoreDouble(dst, fpRegT0); 1046 break; 1047 case op_add: 1048 emitLoadDouble(op1, fpRegT2); 1049 addDouble(fpRegT2, fpRegT0); 1050 emitStoreDouble(dst, fpRegT0); 1051 break; 1052 case op_sub: 1053 emitLoadDouble(op1, fpRegT1); 1054 subDouble(fpRegT0, fpRegT1); 1055 emitStoreDouble(dst, fpRegT1); 1056 break; 1057 case op_div: 1058 emitLoadDouble(op1, fpRegT1); 1059 divDouble(fpRegT0, fpRegT1); 1060 emitStoreDouble(dst, fpRegT1); 1061 break; 1062 case op_jnless: 1063 emitLoadDouble(op1, fpRegT2); 1064 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst); 1065 break; 1066 case op_jless: 1067 emitLoadDouble(op1, fpRegT2); 1068 addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst); 1069 break; 1070 case op_jlesseq: 1071 emitLoadDouble(op1, fpRegT2); 1072 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), dst); 1073 break; 1074 case op_jnlesseq: 1075 emitLoadDouble(op1, fpRegT2); 1076 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst); 1077 break; 1078 default: 1079 ASSERT_NOT_REACHED(); 1080 } 1081 1082 if (!notInt32Op2.empty()) 1083 end.append(jump()); 1084 } 1085 1086 if (!notInt32Op2.empty()) { 1087 // Double case 2: Op1 is int32; Op2 is not int32. 1088 notInt32Op2.link(this); 1089 1090 ASSERT(op2IsInRegisters); 1091 1092 if (!op1IsInRegisters) 1093 emitLoadPayload(op1, regT0); 1094 1095 convertInt32ToDouble(regT0, fpRegT0); 1096 1097 // Verify op2 is double. 1098 if (!types.second().definitelyIsNumber()) 1099 addSlowCase(branch32(Above, regT3, TrustedImm32(JSValue::LowestTag))); 1100 1101 // Do the math. 1102 switch (opcodeID) { 1103 case op_mul: 1104 emitLoadDouble(op2, fpRegT2); 1105 mulDouble(fpRegT2, fpRegT0); 1106 emitStoreDouble(dst, fpRegT0); 1107 break; 1108 case op_add: 1109 emitLoadDouble(op2, fpRegT2); 1110 addDouble(fpRegT2, fpRegT0); 1111 emitStoreDouble(dst, fpRegT0); 1112 break; 1113 case op_sub: 1114 emitLoadDouble(op2, fpRegT2); 1115 subDouble(fpRegT2, fpRegT0); 1116 emitStoreDouble(dst, fpRegT0); 1117 break; 1118 case op_div: 1119 emitLoadDouble(op2, fpRegT2); 1120 divDouble(fpRegT2, fpRegT0); 1121 emitStoreDouble(dst, fpRegT0); 1122 break; 1123 case op_jnless: 1124 emitLoadDouble(op2, fpRegT1); 1125 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst); 1126 break; 1127 case op_jless: 1128 emitLoadDouble(op2, fpRegT1); 1129 addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst); 1130 break; 1131 case op_jnlesseq: 1132 emitLoadDouble(op2, fpRegT1); 1133 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst); 1134 break; 1135 case op_jlesseq: 1136 emitLoadDouble(op2, fpRegT1); 1137 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), dst); 1138 break; 1139 default: 1140 ASSERT_NOT_REACHED(); 1141 } 1142 } 1143 1144 end.link(this); 1145 } 1146 1147 // Multiplication (*) 1148 1149 void JIT::emit_op_mul(Instruction* currentInstruction) 1150 { 1151 unsigned dst = currentInstruction[1].u.operand; 1152 unsigned op1 = currentInstruction[2].u.operand; 1153 unsigned op2 = currentInstruction[3].u.operand; 1154 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); 1155 1156 JumpList notInt32Op1; 1157 JumpList notInt32Op2; 1158 1159 emitLoad2(op1, regT1, regT0, op2, regT3, regT2); 1160 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 1161 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 1162 1163 // Int32 case. 1164 move(regT0, regT3); 1165 addSlowCase(branchMul32(Overflow, regT2, regT0)); 1166 addSlowCase(branchTest32(Zero, regT0)); 1167 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst)); 1168 1169 if (!supportsFloatingPoint()) { 1170 addSlowCase(notInt32Op1); 1171 addSlowCase(notInt32Op2); 1172 return; 1173 } 1174 Jump end = jump(); 1175 1176 // Double case. 1177 emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2); 1178 end.link(this); 1179 } 1180 1181 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 1182 { 1183 unsigned dst = currentInstruction[1].u.operand; 1184 unsigned op1 = currentInstruction[2].u.operand; 1185 unsigned op2 = currentInstruction[3].u.operand; 1186 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); 1187 1188 Jump overflow = getSlowCase(iter); // overflow check 1189 linkSlowCase(iter); // zero result check 1190 1191 Jump negZero = branchOr32(Signed, regT2, regT3); 1192 emitStoreInt32(dst, TrustedImm32(0), (op1 == dst || op2 == dst)); 1193 1194 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul)); 1195 1196 negZero.link(this); 1197 overflow.link(this); 1198 1199 if (!supportsFloatingPoint()) { 1200 linkSlowCase(iter); // int32 check 1201 linkSlowCase(iter); // int32 check 1202 } 1203 1204 if (supportsFloatingPoint()) { 1205 if (!types.first().definitelyIsNumber()) 1206 linkSlowCase(iter); // double check 1207 1208 if (!types.second().definitelyIsNumber()) { 1209 linkSlowCase(iter); // int32 check 1210 linkSlowCase(iter); // double check 1211 } 1212 } 1213 1214 Label jitStubCall(this); 1215 JITStubCall stubCall(this, cti_op_mul); 1216 stubCall.addArgument(op1); 1217 stubCall.addArgument(op2); 1218 stubCall.call(dst); 1219 } 1220 1221 // Division (/) 1222 1223 void JIT::emit_op_div(Instruction* currentInstruction) 1224 { 1225 unsigned dst = currentInstruction[1].u.operand; 1226 unsigned op1 = currentInstruction[2].u.operand; 1227 unsigned op2 = currentInstruction[3].u.operand; 1228 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); 1229 1230 if (!supportsFloatingPoint()) { 1231 addSlowCase(jump()); 1232 return; 1233 } 1234 1235 // Int32 divide. 1236 JumpList notInt32Op1; 1237 JumpList notInt32Op2; 1238 1239 JumpList end; 1240 1241 emitLoad2(op1, regT1, regT0, op2, regT3, regT2); 1242 1243 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 1244 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 1245 1246 convertInt32ToDouble(regT0, fpRegT0); 1247 convertInt32ToDouble(regT2, fpRegT1); 1248 divDouble(fpRegT1, fpRegT0); 1249 1250 JumpList doubleResult; 1251 branchConvertDoubleToInt32(fpRegT0, regT0, doubleResult, fpRegT1); 1252 1253 // Int32 result. 1254 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst)); 1255 end.append(jump()); 1256 1257 // Double result. 1258 doubleResult.link(this); 1259 emitStoreDouble(dst, fpRegT0); 1260 end.append(jump()); 1261 1262 // Double divide. 1263 emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2); 1264 end.link(this); 1265 } 1266 1267 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 1268 { 1269 unsigned dst = currentInstruction[1].u.operand; 1270 unsigned op1 = currentInstruction[2].u.operand; 1271 unsigned op2 = currentInstruction[3].u.operand; 1272 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); 1273 1274 if (!supportsFloatingPoint()) 1275 linkSlowCase(iter); 1276 else { 1277 if (!types.first().definitelyIsNumber()) 1278 linkSlowCase(iter); // double check 1279 1280 if (!types.second().definitelyIsNumber()) { 1281 linkSlowCase(iter); // int32 check 1282 linkSlowCase(iter); // double check 1283 } 1284 } 1285 1286 JITStubCall stubCall(this, cti_op_div); 1287 stubCall.addArgument(op1); 1288 stubCall.addArgument(op2); 1289 stubCall.call(dst); 1290 } 1291 1292 // Mod (%) 1293 1294 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */ 1295 1296 #if CPU(X86) || CPU(X86_64) || CPU(MIPS) 1297 1298 void JIT::emit_op_mod(Instruction* currentInstruction) 1299 { 1300 unsigned dst = currentInstruction[1].u.operand; 1301 unsigned op1 = currentInstruction[2].u.operand; 1302 unsigned op2 = currentInstruction[3].u.operand; 1303 1304 #if CPU(X86) || CPU(X86_64) 1305 // Make sure registers are correct for x86 IDIV instructions. 1306 ASSERT(regT0 == X86Registers::eax); 1307 ASSERT(regT1 == X86Registers::edx); 1308 ASSERT(regT2 == X86Registers::ecx); 1309 ASSERT(regT3 == X86Registers::ebx); 1310 #endif 1311 1312 if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) { 1313 emitLoad(op1, regT1, regT0); 1314 move(Imm32(getConstantOperand(op2).asInt32()), regT2); 1315 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 1316 if (getConstantOperand(op2).asInt32() == -1) 1317 addSlowCase(branch32(Equal, regT0, TrustedImm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC 1318 } else { 1319 emitLoad2(op1, regT1, regT0, op2, regT3, regT2); 1320 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 1321 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 1322 1323 addSlowCase(branch32(Equal, regT0, TrustedImm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC 1324 addSlowCase(branch32(Equal, regT2, TrustedImm32(0))); // divide by 0 1325 } 1326 1327 move(regT0, regT3); // Save dividend payload, in case of 0. 1328 #if CPU(X86) || CPU(X86_64) 1329 m_assembler.cdq(); 1330 m_assembler.idivl_r(regT2); 1331 #elif CPU(MIPS) 1332 m_assembler.div(regT0, regT2); 1333 m_assembler.mfhi(regT1); 1334 #endif 1335 1336 // If the remainder is zero and the dividend is negative, the result is -0. 1337 Jump storeResult1 = branchTest32(NonZero, regT1); 1338 Jump storeResult2 = branchTest32(Zero, regT3, TrustedImm32(0x80000000)); // not negative 1339 emitStore(dst, jsNumber(-0.0)); 1340 Jump end = jump(); 1341 1342 storeResult1.link(this); 1343 storeResult2.link(this); 1344 emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst)); 1345 end.link(this); 1346 } 1347 1348 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 1349 { 1350 unsigned dst = currentInstruction[1].u.operand; 1351 unsigned op1 = currentInstruction[2].u.operand; 1352 unsigned op2 = currentInstruction[3].u.operand; 1353 1354 if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) { 1355 linkSlowCase(iter); // int32 check 1356 if (getConstantOperand(op2).asInt32() == -1) 1357 linkSlowCase(iter); // 0x80000000 check 1358 } else { 1359 linkSlowCase(iter); // int32 check 1360 linkSlowCase(iter); // int32 check 1361 linkSlowCase(iter); // 0 check 1362 linkSlowCase(iter); // 0x80000000 check 1363 } 1364 1365 JITStubCall stubCall(this, cti_op_mod); 1366 stubCall.addArgument(op1); 1367 stubCall.addArgument(op2); 1368 stubCall.call(dst); 1369 } 1370 1371 #else // CPU(X86) || CPU(X86_64) || CPU(MIPS) 1372 1373 void JIT::emit_op_mod(Instruction* currentInstruction) 1374 { 1375 unsigned dst = currentInstruction[1].u.operand; 1376 unsigned op1 = currentInstruction[2].u.operand; 1377 unsigned op2 = currentInstruction[3].u.operand; 1378 1379 #if ENABLE(JIT_USE_SOFT_MODULO) 1380 emitLoad2(op1, regT1, regT0, op2, regT3, regT2); 1381 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); 1382 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); 1383 1384 addSlowCase(branch32(Equal, regT2, TrustedImm32(0))); 1385 1386 emitNakedCall(m_globalData->jitStubs->ctiSoftModulo()); 1387 1388 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst)); 1389 #else 1390 JITStubCall stubCall(this, cti_op_mod); 1391 stubCall.addArgument(op1); 1392 stubCall.addArgument(op2); 1393 stubCall.call(dst); 1394 #endif 1395 } 1396 1397 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 1398 { 1399 UNUSED_PARAM(currentInstruction); 1400 UNUSED_PARAM(iter); 1401 #if ENABLE(JIT_USE_SOFT_MODULO) 1402 unsigned result = currentInstruction[1].u.operand; 1403 unsigned op1 = currentInstruction[2].u.operand; 1404 unsigned op2 = currentInstruction[3].u.operand; 1405 linkSlowCase(iter); 1406 linkSlowCase(iter); 1407 linkSlowCase(iter); 1408 JITStubCall stubCall(this, cti_op_mod); 1409 stubCall.addArgument(op1); 1410 stubCall.addArgument(op2); 1411 stubCall.call(result); 1412 #else 1413 ASSERT_NOT_REACHED(); 1414 #endif 1415 } 1416 1417 #endif // CPU(X86) || CPU(X86_64) 1418 1419 /* ------------------------------ END: OP_MOD ------------------------------ */ 1420 1421 } // namespace JSC 1422 1423 #endif // USE(JSVALUE32_64) 1424 #endif // ENABLE(JIT) 1425