1 /* 2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 */ 25 26 #include "config.h" 27 28 #if ENABLE(JIT) 29 #include "JIT.h" 30 31 // This probably does not belong here; adding here for now as a quick Windows build fix. 32 #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X) 33 #include "MacroAssembler.h" 34 JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2; 35 #endif 36 37 #include "CodeBlock.h" 38 #include "Interpreter.h" 39 #include "JITInlineMethods.h" 40 #include "JITStubCall.h" 41 #include "JSArray.h" 42 #include "JSFunction.h" 43 #include "LinkBuffer.h" 44 #include "RepatchBuffer.h" 45 #include "ResultType.h" 46 #include "SamplingTool.h" 47 #include "dfg/DFGNode.h" // for DFG_SUCCESS_STATS 48 49 using namespace std; 50 51 namespace JSC { 52 53 void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) 54 { 55 RepatchBuffer repatchBuffer(codeblock); 56 repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction); 57 } 58 59 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) 60 { 61 RepatchBuffer repatchBuffer(codeblock); 62 repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction); 63 } 64 65 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction) 66 { 67 RepatchBuffer repatchBuffer(codeblock); 68 repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction); 69 } 70 71 JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock, void* linkerOffset) 72 : m_interpreter(globalData->interpreter) 73 , m_globalData(globalData) 74 , m_codeBlock(codeBlock) 75 , m_labels(codeBlock ? codeBlock->instructions().size() : 0) 76 , m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0) 77 , m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0) 78 , m_bytecodeOffset((unsigned)-1) 79 #if USE(JSVALUE32_64) 80 , m_jumpTargetIndex(0) 81 , m_mappedBytecodeOffset((unsigned)-1) 82 , m_mappedVirtualRegisterIndex((unsigned)-1) 83 , m_mappedTag((RegisterID)-1) 84 , m_mappedPayload((RegisterID)-1) 85 #else 86 , m_lastResultBytecodeRegister(std::numeric_limits<int>::max()) 87 , m_jumpTargetsPosition(0) 88 #endif 89 , m_linkerOffset(linkerOffset) 90 { 91 } 92 93 #if USE(JSVALUE32_64) 94 void JIT::emitTimeoutCheck() 95 { 96 Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister); 97 JITStubCall stubCall(this, cti_timeout_check); 98 stubCall.addArgument(regT1, regT0); // save last result registers. 99 stubCall.call(timeoutCheckRegister); 100 stubCall.getArgument(0, regT1, regT0); // reload last result registers. 101 skipTimeout.link(this); 102 } 103 #else 104 void JIT::emitTimeoutCheck() 105 { 106 Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister); 107 JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister); 108 skipTimeout.link(this); 109 110 killLastResultRegister(); 111 } 112 #endif 113 114 #define NEXT_OPCODE(name) \ 115 m_bytecodeOffset += OPCODE_LENGTH(name); \ 116 break; 117 118 #if USE(JSVALUE32_64) 119 #define DEFINE_BINARY_OP(name) \ 120 case name: { \ 121 JITStubCall stubCall(this, cti_##name); \ 122 stubCall.addArgument(currentInstruction[2].u.operand); \ 123 stubCall.addArgument(currentInstruction[3].u.operand); \ 124 stubCall.call(currentInstruction[1].u.operand); \ 125 NEXT_OPCODE(name); \ 126 } 127 128 #define DEFINE_UNARY_OP(name) \ 129 case name: { \ 130 JITStubCall stubCall(this, cti_##name); \ 131 stubCall.addArgument(currentInstruction[2].u.operand); \ 132 stubCall.call(currentInstruction[1].u.operand); \ 133 NEXT_OPCODE(name); \ 134 } 135 136 #else // USE(JSVALUE32_64) 137 138 #define DEFINE_BINARY_OP(name) \ 139 case name: { \ 140 JITStubCall stubCall(this, cti_##name); \ 141 stubCall.addArgument(currentInstruction[2].u.operand, regT2); \ 142 stubCall.addArgument(currentInstruction[3].u.operand, regT2); \ 143 stubCall.call(currentInstruction[1].u.operand); \ 144 NEXT_OPCODE(name); \ 145 } 146 147 #define DEFINE_UNARY_OP(name) \ 148 case name: { \ 149 JITStubCall stubCall(this, cti_##name); \ 150 stubCall.addArgument(currentInstruction[2].u.operand, regT2); \ 151 stubCall.call(currentInstruction[1].u.operand); \ 152 NEXT_OPCODE(name); \ 153 } 154 #endif // USE(JSVALUE32_64) 155 156 #define DEFINE_OP(name) \ 157 case name: { \ 158 emit_##name(currentInstruction); \ 159 NEXT_OPCODE(name); \ 160 } 161 162 #define DEFINE_SLOWCASE_OP(name) \ 163 case name: { \ 164 emitSlow_##name(currentInstruction, iter); \ 165 NEXT_OPCODE(name); \ 166 } 167 168 void JIT::privateCompileMainPass() 169 { 170 Instruction* instructionsBegin = m_codeBlock->instructions().begin(); 171 unsigned instructionCount = m_codeBlock->instructions().size(); 172 173 m_propertyAccessInstructionIndex = 0; 174 m_globalResolveInfoIndex = 0; 175 m_callLinkInfoIndex = 0; 176 177 for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) { 178 Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; 179 ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); 180 181 #if ENABLE(OPCODE_SAMPLING) 182 if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice. 183 sampleInstruction(currentInstruction); 184 #endif 185 186 #if USE(JSVALUE64) 187 if (m_labels[m_bytecodeOffset].isUsed()) 188 killLastResultRegister(); 189 #endif 190 191 m_labels[m_bytecodeOffset] = label(); 192 193 switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) { 194 DEFINE_BINARY_OP(op_del_by_val) 195 DEFINE_BINARY_OP(op_in) 196 DEFINE_BINARY_OP(op_less) 197 DEFINE_BINARY_OP(op_lesseq) 198 DEFINE_UNARY_OP(op_is_boolean) 199 DEFINE_UNARY_OP(op_is_function) 200 DEFINE_UNARY_OP(op_is_number) 201 DEFINE_UNARY_OP(op_is_object) 202 DEFINE_UNARY_OP(op_is_string) 203 DEFINE_UNARY_OP(op_is_undefined) 204 #if USE(JSVALUE64) 205 DEFINE_UNARY_OP(op_negate) 206 #endif 207 DEFINE_UNARY_OP(op_typeof) 208 209 DEFINE_OP(op_add) 210 DEFINE_OP(op_bitand) 211 DEFINE_OP(op_bitnot) 212 DEFINE_OP(op_bitor) 213 DEFINE_OP(op_bitxor) 214 DEFINE_OP(op_call) 215 DEFINE_OP(op_call_eval) 216 DEFINE_OP(op_call_varargs) 217 DEFINE_OP(op_catch) 218 DEFINE_OP(op_construct) 219 DEFINE_OP(op_get_callee) 220 DEFINE_OP(op_create_this) 221 DEFINE_OP(op_convert_this) 222 DEFINE_OP(op_convert_this_strict) 223 DEFINE_OP(op_init_lazy_reg) 224 DEFINE_OP(op_create_arguments) 225 DEFINE_OP(op_debug) 226 DEFINE_OP(op_del_by_id) 227 DEFINE_OP(op_div) 228 DEFINE_OP(op_end) 229 DEFINE_OP(op_enter) 230 DEFINE_OP(op_create_activation) 231 DEFINE_OP(op_eq) 232 DEFINE_OP(op_eq_null) 233 DEFINE_OP(op_get_by_id) 234 DEFINE_OP(op_get_arguments_length) 235 DEFINE_OP(op_get_by_val) 236 DEFINE_OP(op_get_argument_by_val) 237 DEFINE_OP(op_get_by_pname) 238 DEFINE_OP(op_get_global_var) 239 DEFINE_OP(op_get_pnames) 240 DEFINE_OP(op_get_scoped_var) 241 DEFINE_OP(op_check_has_instance) 242 DEFINE_OP(op_instanceof) 243 DEFINE_OP(op_jeq_null) 244 DEFINE_OP(op_jfalse) 245 DEFINE_OP(op_jmp) 246 DEFINE_OP(op_jmp_scopes) 247 DEFINE_OP(op_jneq_null) 248 DEFINE_OP(op_jneq_ptr) 249 DEFINE_OP(op_jnless) 250 DEFINE_OP(op_jless) 251 DEFINE_OP(op_jlesseq) 252 DEFINE_OP(op_jnlesseq) 253 DEFINE_OP(op_jsr) 254 DEFINE_OP(op_jtrue) 255 DEFINE_OP(op_load_varargs) 256 DEFINE_OP(op_loop) 257 DEFINE_OP(op_loop_if_less) 258 DEFINE_OP(op_loop_if_lesseq) 259 DEFINE_OP(op_loop_if_true) 260 DEFINE_OP(op_loop_if_false) 261 DEFINE_OP(op_lshift) 262 DEFINE_OP(op_method_check) 263 DEFINE_OP(op_mod) 264 DEFINE_OP(op_mov) 265 DEFINE_OP(op_mul) 266 #if USE(JSVALUE32_64) 267 DEFINE_OP(op_negate) 268 #endif 269 DEFINE_OP(op_neq) 270 DEFINE_OP(op_neq_null) 271 DEFINE_OP(op_new_array) 272 DEFINE_OP(op_new_func) 273 DEFINE_OP(op_new_func_exp) 274 DEFINE_OP(op_new_object) 275 DEFINE_OP(op_new_regexp) 276 DEFINE_OP(op_next_pname) 277 DEFINE_OP(op_not) 278 DEFINE_OP(op_nstricteq) 279 DEFINE_OP(op_pop_scope) 280 DEFINE_OP(op_post_dec) 281 DEFINE_OP(op_post_inc) 282 DEFINE_OP(op_pre_dec) 283 DEFINE_OP(op_pre_inc) 284 DEFINE_OP(op_profile_did_call) 285 DEFINE_OP(op_profile_will_call) 286 DEFINE_OP(op_push_new_scope) 287 DEFINE_OP(op_push_scope) 288 DEFINE_OP(op_put_by_id) 289 DEFINE_OP(op_put_by_index) 290 DEFINE_OP(op_put_by_val) 291 DEFINE_OP(op_put_getter) 292 DEFINE_OP(op_put_global_var) 293 DEFINE_OP(op_put_scoped_var) 294 DEFINE_OP(op_put_setter) 295 DEFINE_OP(op_resolve) 296 DEFINE_OP(op_resolve_base) 297 DEFINE_OP(op_ensure_property_exists) 298 DEFINE_OP(op_resolve_global) 299 DEFINE_OP(op_resolve_global_dynamic) 300 DEFINE_OP(op_resolve_skip) 301 DEFINE_OP(op_resolve_with_base) 302 DEFINE_OP(op_ret) 303 DEFINE_OP(op_call_put_result) 304 DEFINE_OP(op_ret_object_or_this) 305 DEFINE_OP(op_rshift) 306 DEFINE_OP(op_urshift) 307 DEFINE_OP(op_sret) 308 DEFINE_OP(op_strcat) 309 DEFINE_OP(op_stricteq) 310 DEFINE_OP(op_sub) 311 DEFINE_OP(op_switch_char) 312 DEFINE_OP(op_switch_imm) 313 DEFINE_OP(op_switch_string) 314 DEFINE_OP(op_tear_off_activation) 315 DEFINE_OP(op_tear_off_arguments) 316 DEFINE_OP(op_throw) 317 DEFINE_OP(op_throw_reference_error) 318 DEFINE_OP(op_to_jsnumber) 319 DEFINE_OP(op_to_primitive) 320 321 case op_get_array_length: 322 case op_get_by_id_chain: 323 case op_get_by_id_generic: 324 case op_get_by_id_proto: 325 case op_get_by_id_proto_list: 326 case op_get_by_id_self: 327 case op_get_by_id_self_list: 328 case op_get_by_id_getter_chain: 329 case op_get_by_id_getter_proto: 330 case op_get_by_id_getter_proto_list: 331 case op_get_by_id_getter_self: 332 case op_get_by_id_getter_self_list: 333 case op_get_by_id_custom_chain: 334 case op_get_by_id_custom_proto: 335 case op_get_by_id_custom_proto_list: 336 case op_get_by_id_custom_self: 337 case op_get_by_id_custom_self_list: 338 case op_get_string_length: 339 case op_put_by_id_generic: 340 case op_put_by_id_replace: 341 case op_put_by_id_transition: 342 ASSERT_NOT_REACHED(); 343 } 344 } 345 346 ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos()); 347 ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos()); 348 349 #ifndef NDEBUG 350 // Reset this, in order to guard its use with ASSERTs. 351 m_bytecodeOffset = (unsigned)-1; 352 #endif 353 } 354 355 356 void JIT::privateCompileLinkPass() 357 { 358 unsigned jmpTableCount = m_jmpTable.size(); 359 for (unsigned i = 0; i < jmpTableCount; ++i) 360 m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this); 361 m_jmpTable.clear(); 362 } 363 364 void JIT::privateCompileSlowCases() 365 { 366 Instruction* instructionsBegin = m_codeBlock->instructions().begin(); 367 368 m_propertyAccessInstructionIndex = 0; 369 m_globalResolveInfoIndex = 0; 370 m_callLinkInfoIndex = 0; 371 372 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) { 373 #if USE(JSVALUE64) 374 killLastResultRegister(); 375 #endif 376 377 m_bytecodeOffset = iter->to; 378 #ifndef NDEBUG 379 unsigned firstTo = m_bytecodeOffset; 380 #endif 381 Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; 382 383 switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) { 384 DEFINE_SLOWCASE_OP(op_add) 385 DEFINE_SLOWCASE_OP(op_bitand) 386 DEFINE_SLOWCASE_OP(op_bitnot) 387 DEFINE_SLOWCASE_OP(op_bitor) 388 DEFINE_SLOWCASE_OP(op_bitxor) 389 DEFINE_SLOWCASE_OP(op_call) 390 DEFINE_SLOWCASE_OP(op_call_eval) 391 DEFINE_SLOWCASE_OP(op_call_varargs) 392 DEFINE_SLOWCASE_OP(op_construct) 393 DEFINE_SLOWCASE_OP(op_convert_this) 394 DEFINE_SLOWCASE_OP(op_convert_this_strict) 395 DEFINE_SLOWCASE_OP(op_div) 396 DEFINE_SLOWCASE_OP(op_eq) 397 DEFINE_SLOWCASE_OP(op_get_by_id) 398 DEFINE_SLOWCASE_OP(op_get_arguments_length) 399 DEFINE_SLOWCASE_OP(op_get_by_val) 400 DEFINE_SLOWCASE_OP(op_get_argument_by_val) 401 DEFINE_SLOWCASE_OP(op_get_by_pname) 402 DEFINE_SLOWCASE_OP(op_check_has_instance) 403 DEFINE_SLOWCASE_OP(op_instanceof) 404 DEFINE_SLOWCASE_OP(op_jfalse) 405 DEFINE_SLOWCASE_OP(op_jnless) 406 DEFINE_SLOWCASE_OP(op_jless) 407 DEFINE_SLOWCASE_OP(op_jlesseq) 408 DEFINE_SLOWCASE_OP(op_jnlesseq) 409 DEFINE_SLOWCASE_OP(op_jtrue) 410 DEFINE_SLOWCASE_OP(op_load_varargs) 411 DEFINE_SLOWCASE_OP(op_loop_if_less) 412 DEFINE_SLOWCASE_OP(op_loop_if_lesseq) 413 DEFINE_SLOWCASE_OP(op_loop_if_true) 414 DEFINE_SLOWCASE_OP(op_loop_if_false) 415 DEFINE_SLOWCASE_OP(op_lshift) 416 DEFINE_SLOWCASE_OP(op_method_check) 417 DEFINE_SLOWCASE_OP(op_mod) 418 DEFINE_SLOWCASE_OP(op_mul) 419 #if USE(JSVALUE32_64) 420 DEFINE_SLOWCASE_OP(op_negate) 421 #endif 422 DEFINE_SLOWCASE_OP(op_neq) 423 DEFINE_SLOWCASE_OP(op_not) 424 DEFINE_SLOWCASE_OP(op_nstricteq) 425 DEFINE_SLOWCASE_OP(op_post_dec) 426 DEFINE_SLOWCASE_OP(op_post_inc) 427 DEFINE_SLOWCASE_OP(op_pre_dec) 428 DEFINE_SLOWCASE_OP(op_pre_inc) 429 DEFINE_SLOWCASE_OP(op_put_by_id) 430 DEFINE_SLOWCASE_OP(op_put_by_val) 431 DEFINE_SLOWCASE_OP(op_resolve_global) 432 DEFINE_SLOWCASE_OP(op_resolve_global_dynamic) 433 DEFINE_SLOWCASE_OP(op_rshift) 434 DEFINE_SLOWCASE_OP(op_urshift) 435 DEFINE_SLOWCASE_OP(op_stricteq) 436 DEFINE_SLOWCASE_OP(op_sub) 437 DEFINE_SLOWCASE_OP(op_to_jsnumber) 438 DEFINE_SLOWCASE_OP(op_to_primitive) 439 default: 440 ASSERT_NOT_REACHED(); 441 } 442 443 ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen."); 444 ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen."); 445 446 emitJumpSlowToHot(jump(), 0); 447 } 448 449 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) 450 ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos()); 451 #endif 452 ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos()); 453 454 #ifndef NDEBUG 455 // Reset this, in order to guard its use with ASSERTs. 456 m_bytecodeOffset = (unsigned)-1; 457 #endif 458 } 459 460 JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck) 461 { 462 // Could use a pop_m, but would need to offset the following instruction if so. 463 preserveReturnAddressAfterCall(regT2); 464 emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC); 465 466 Label beginLabel(this); 467 468 sampleCodeBlock(m_codeBlock); 469 #if ENABLE(OPCODE_SAMPLING) 470 sampleInstruction(m_codeBlock->instructions().begin()); 471 #endif 472 473 Jump registerFileCheck; 474 if (m_codeBlock->codeType() == FunctionCode) { 475 #if DFG_SUCCESS_STATS 476 static SamplingCounter counter("orignalJIT"); 477 emitCount(counter); 478 #endif 479 480 // In the case of a fast linked call, we do not set this up in the caller. 481 emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock); 482 483 addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1); 484 registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1); 485 } 486 487 Label functionBody = label(); 488 489 privateCompileMainPass(); 490 privateCompileLinkPass(); 491 privateCompileSlowCases(); 492 493 Label arityCheck; 494 if (m_codeBlock->codeType() == FunctionCode) { 495 registerFileCheck.link(this); 496 m_bytecodeOffset = 0; 497 JITStubCall(this, cti_register_file_check).call(); 498 #ifndef NDEBUG 499 m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. 500 #endif 501 jump(functionBody); 502 503 arityCheck = label(); 504 preserveReturnAddressAfterCall(regT2); 505 emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC); 506 branch32(Equal, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); 507 restoreArgumentReference(); 508 509 JITStubCall(this, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck).call(callFrameRegister); 510 511 jump(beginLabel); 512 } 513 514 ASSERT(m_jmpTable.isEmpty()); 515 516 LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()), m_linkerOffset); 517 518 // Translate vPC offsets into addresses in JIT generated code, for switch tables. 519 for (unsigned i = 0; i < m_switches.size(); ++i) { 520 SwitchRecord record = m_switches[i]; 521 unsigned bytecodeOffset = record.bytecodeOffset; 522 523 if (record.type != SwitchRecord::String) { 524 ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); 525 ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size()); 526 527 record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]); 528 529 for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) { 530 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j]; 531 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault; 532 } 533 } else { 534 ASSERT(record.type == SwitchRecord::String); 535 536 record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]); 537 538 StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end(); 539 for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) { 540 unsigned offset = it->second.branchOffset; 541 it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault; 542 } 543 } 544 } 545 546 for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) { 547 HandlerInfo& handler = m_codeBlock->exceptionHandler(i); 548 handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]); 549 } 550 551 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { 552 if (iter->to) 553 patchBuffer.link(iter->from, FunctionPtr(iter->to)); 554 } 555 556 if (m_codeBlock->needsCallReturnIndices()) { 557 m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size()); 558 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) 559 m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset)); 560 } 561 562 // Link absolute addresses for jsr 563 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter) 564 patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).executableAddress()); 565 566 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) 567 for (unsigned i = 0; i < m_codeBlock->numberOfStructureStubInfos(); ++i) { 568 StructureStubInfo& info = m_codeBlock->structureStubInfo(i); 569 info.callReturnLocation = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].callReturnLocation); 570 info.hotPathBegin = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].hotPathBegin); 571 } 572 #endif 573 #if ENABLE(JIT_OPTIMIZE_CALL) 574 for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) { 575 CallLinkInfo& info = m_codeBlock->callLinkInfo(i); 576 info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation); 577 info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin); 578 info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther); 579 } 580 #endif 581 unsigned methodCallCount = m_methodCallCompilationInfo.size(); 582 m_codeBlock->addMethodCallLinkInfos(methodCallCount); 583 for (unsigned i = 0; i < methodCallCount; ++i) { 584 MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(i); 585 info.structureLabel = patchBuffer.locationOf(m_methodCallCompilationInfo[i].structureToCompare); 586 info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation; 587 } 588 589 if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck) 590 *functionEntryArityCheck = patchBuffer.locationOf(arityCheck); 591 592 return patchBuffer.finalizeCode(); 593 } 594 595 #if ENABLE(JIT_OPTIMIZE_CALL) 596 597 void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData) 598 { 599 RepatchBuffer repatchBuffer(callerCodeBlock); 600 601 // Currently we only link calls with the exact number of arguments. 602 // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant 603 if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) { 604 ASSERT(!callLinkInfo->isLinked()); 605 callLinkInfo->callee.set(*globalData, callerCodeBlock->ownerExecutable(), callee); 606 repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee); 607 repatchBuffer.relink(callLinkInfo->hotPathOther, code); 608 } 609 610 // patch the call so we do not continue to try to link. 611 repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualCall()); 612 } 613 614 void JIT::linkConstruct(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData) 615 { 616 RepatchBuffer repatchBuffer(callerCodeBlock); 617 618 // Currently we only link calls with the exact number of arguments. 619 // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant 620 if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) { 621 ASSERT(!callLinkInfo->isLinked()); 622 callLinkInfo->callee.set(*globalData, callerCodeBlock->ownerExecutable(), callee); 623 repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee); 624 repatchBuffer.relink(callLinkInfo->hotPathOther, code); 625 } 626 627 // patch the call so we do not continue to try to link. 628 repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualConstruct()); 629 } 630 #endif // ENABLE(JIT_OPTIMIZE_CALL) 631 632 } // namespace JSC 633 634 #endif // ENABLE(JIT) 635