Home | History | Annotate | Download | only in jit
      1 /*
      2  * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1. Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2. Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  *
     13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
     14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
     17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
     21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     24  */
     25 
     26 #include "config.h"
     27 
     28 #if ENABLE(JIT)
     29 #if USE(JSVALUE64)
     30 #include "JIT.h"
     31 
     32 #include "CodeBlock.h"
     33 #include "GetterSetter.h"
     34 #include "JITInlineMethods.h"
     35 #include "JITStubCall.h"
     36 #include "JSArray.h"
     37 #include "JSFunction.h"
     38 #include "JSPropertyNameIterator.h"
     39 #include "Interpreter.h"
     40 #include "LinkBuffer.h"
     41 #include "RepatchBuffer.h"
     42 #include "ResultType.h"
     43 #include "SamplingTool.h"
     44 
     45 #ifndef NDEBUG
     46 #include <stdio.h>
     47 #endif
     48 
     49 using namespace std;
     50 
     51 namespace JSC {
     52 
     53 JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
     54 {
     55     JSInterfaceJIT jit;
     56     JumpList failures;
     57     failures.append(jit.branchPtr(NotEqual, Address(regT0), TrustedImmPtr(globalData->jsStringVPtr)));
     58     failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
     59 
     60     // Load string length to regT1, and start the process of loading the data pointer into regT0
     61     jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
     62     jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
     63     jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
     64 
     65     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
     66     failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
     67 
     68     // Load the character
     69     jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
     70 
     71     failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
     72     jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
     73     jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
     74     jit.ret();
     75 
     76     failures.link(&jit);
     77     jit.move(TrustedImm32(0), regT0);
     78     jit.ret();
     79 
     80     LinkBuffer patchBuffer(&jit, pool, 0);
     81     return patchBuffer.finalizeCode().m_code;
     82 }
     83 
     84 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
     85 {
     86     unsigned dst = currentInstruction[1].u.operand;
     87     unsigned base = currentInstruction[2].u.operand;
     88     unsigned property = currentInstruction[3].u.operand;
     89 
     90     emitGetVirtualRegisters(base, regT0, property, regT1);
     91     emitJumpSlowCaseIfNotImmediateInteger(regT1);
     92 
     93     // This is technically incorrect - we're zero-extending an int32.  On the hot path this doesn't matter.
     94     // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
     95     // number was signed since m_vectorLength is always less than intmax (since the total allocation
     96     // size is always less than 4Gb).  As such zero extending wil have been correct (and extending the value
     97     // to 64-bits is necessary since it's used in the address calculation.  We zero extend rather than sign
     98     // extending since it makes it easier to re-tag the value in the slow case.
     99     zeroExtend32ToPtr(regT1, regT1);
    100 
    101     emitJumpSlowCaseIfNotJSCell(regT0, base);
    102     addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
    103 
    104     loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
    105     addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
    106 
    107     loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
    108     addSlowCase(branchTestPtr(Zero, regT0));
    109 
    110     emitPutVirtualRegister(dst);
    111 }
    112 
    113 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
    114 {
    115     unsigned dst = currentInstruction[1].u.operand;
    116     unsigned base = currentInstruction[2].u.operand;
    117     unsigned property = currentInstruction[3].u.operand;
    118 
    119     linkSlowCase(iter); // property int32 check
    120     linkSlowCaseIfNotJSCell(iter, base); // base cell check
    121     Jump nonCell = jump();
    122     linkSlowCase(iter); // base array check
    123     Jump notString = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
    124     emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
    125     Jump failed = branchTestPtr(Zero, regT0);
    126     emitPutVirtualRegister(dst, regT0);
    127     emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
    128     failed.link(this);
    129     notString.link(this);
    130     nonCell.link(this);
    131 
    132     linkSlowCase(iter); // vector length check
    133     linkSlowCase(iter); // empty value
    134 
    135     JITStubCall stubCall(this, cti_op_get_by_val);
    136     stubCall.addArgument(base, regT2);
    137     stubCall.addArgument(property, regT2);
    138     stubCall.call(dst);
    139 }
    140 
    141 void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch)
    142 {
    143     loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), scratch);
    144     loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result);
    145 }
    146 
    147 void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
    148 {
    149     unsigned dst = currentInstruction[1].u.operand;
    150     unsigned base = currentInstruction[2].u.operand;
    151     unsigned property = currentInstruction[3].u.operand;
    152     unsigned expected = currentInstruction[4].u.operand;
    153     unsigned iter = currentInstruction[5].u.operand;
    154     unsigned i = currentInstruction[6].u.operand;
    155 
    156     emitGetVirtualRegister(property, regT0);
    157     addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected)));
    158     emitGetVirtualRegisters(base, regT0, iter, regT1);
    159     emitJumpSlowCaseIfNotJSCell(regT0, base);
    160 
    161     // Test base's structure
    162     loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
    163     addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
    164     load32(addressFor(i), regT3);
    165     sub32(TrustedImm32(1), regT3);
    166     addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
    167     compileGetDirectOffset(regT0, regT0, regT3, regT1);
    168 
    169     emitPutVirtualRegister(dst, regT0);
    170 }
    171 
    172 void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
    173 {
    174     unsigned dst = currentInstruction[1].u.operand;
    175     unsigned base = currentInstruction[2].u.operand;
    176     unsigned property = currentInstruction[3].u.operand;
    177 
    178     linkSlowCase(iter);
    179     linkSlowCaseIfNotJSCell(iter, base);
    180     linkSlowCase(iter);
    181     linkSlowCase(iter);
    182 
    183     JITStubCall stubCall(this, cti_op_get_by_val);
    184     stubCall.addArgument(base, regT2);
    185     stubCall.addArgument(property, regT2);
    186     stubCall.call(dst);
    187 }
    188 
    189 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
    190 {
    191     unsigned base = currentInstruction[1].u.operand;
    192     unsigned property = currentInstruction[2].u.operand;
    193     unsigned value = currentInstruction[3].u.operand;
    194 
    195     emitGetVirtualRegisters(base, regT0, property, regT1);
    196     emitJumpSlowCaseIfNotImmediateInteger(regT1);
    197     // See comment in op_get_by_val.
    198     zeroExtend32ToPtr(regT1, regT1);
    199     emitJumpSlowCaseIfNotJSCell(regT0, base);
    200     addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
    201     addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
    202 
    203     loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
    204     Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
    205 
    206     Label storeResult(this);
    207     emitGetVirtualRegister(value, regT0);
    208     storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
    209     Jump end = jump();
    210 
    211     empty.link(this);
    212     add32(TrustedImm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
    213     branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
    214 
    215     move(regT1, regT0);
    216     add32(TrustedImm32(1), regT0);
    217     store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
    218     jump().linkTo(storeResult, this);
    219 
    220     end.link(this);
    221 }
    222 
    223 void JIT::emit_op_put_by_index(Instruction* currentInstruction)
    224 {
    225     JITStubCall stubCall(this, cti_op_put_by_index);
    226     stubCall.addArgument(currentInstruction[1].u.operand, regT2);
    227     stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
    228     stubCall.addArgument(currentInstruction[3].u.operand, regT2);
    229     stubCall.call();
    230 }
    231 
    232 void JIT::emit_op_put_getter(Instruction* currentInstruction)
    233 {
    234     JITStubCall stubCall(this, cti_op_put_getter);
    235     stubCall.addArgument(currentInstruction[1].u.operand, regT2);
    236     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
    237     stubCall.addArgument(currentInstruction[3].u.operand, regT2);
    238     stubCall.call();
    239 }
    240 
    241 void JIT::emit_op_put_setter(Instruction* currentInstruction)
    242 {
    243     JITStubCall stubCall(this, cti_op_put_setter);
    244     stubCall.addArgument(currentInstruction[1].u.operand, regT2);
    245     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
    246     stubCall.addArgument(currentInstruction[3].u.operand, regT2);
    247     stubCall.call();
    248 }
    249 
    250 void JIT::emit_op_del_by_id(Instruction* currentInstruction)
    251 {
    252     JITStubCall stubCall(this, cti_op_del_by_id);
    253     stubCall.addArgument(currentInstruction[2].u.operand, regT2);
    254     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
    255     stubCall.call(currentInstruction[1].u.operand);
    256 }
    257 
    258 
    259 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
    260 
    261 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
    262 
    263 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
    264 void JIT::emit_op_method_check(Instruction*) {}
    265 void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
    266 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
    267 #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
    268 #endif
    269 
    270 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
    271 {
    272     unsigned resultVReg = currentInstruction[1].u.operand;
    273     unsigned baseVReg = currentInstruction[2].u.operand;
    274     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
    275 
    276     emitGetVirtualRegister(baseVReg, regT0);
    277     JITStubCall stubCall(this, cti_op_get_by_id_generic);
    278     stubCall.addArgument(regT0);
    279     stubCall.addArgument(TrustedImmPtr(ident));
    280     stubCall.call(resultVReg);
    281 
    282     m_propertyAccessInstructionIndex++;
    283 }
    284 
    285 void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
    286 {
    287     ASSERT_NOT_REACHED();
    288 }
    289 
    290 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
    291 {
    292     unsigned baseVReg = currentInstruction[1].u.operand;
    293     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
    294     unsigned valueVReg = currentInstruction[3].u.operand;
    295     unsigned direct = currentInstruction[8].u.operand;
    296 
    297     emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
    298 
    299     JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct_generic, cti_op_put_by_id_generic);
    300     stubCall.addArgument(regT0);
    301     stubCall.addArgument(TrustedImmPtr(ident));
    302     stubCall.addArgument(regT1);
    303     stubCall.call();
    304 
    305     m_propertyAccessInstructionIndex++;
    306 }
    307 
    308 void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
    309 {
    310     ASSERT_NOT_REACHED();
    311 }
    312 
    313 #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
    314 
    315 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
    316 
    317 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
    318 
    319 void JIT::emit_op_method_check(Instruction* currentInstruction)
    320 {
    321     // Assert that the following instruction is a get_by_id.
    322     ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
    323 
    324     currentInstruction += OPCODE_LENGTH(op_method_check);
    325     unsigned resultVReg = currentInstruction[1].u.operand;
    326     unsigned baseVReg = currentInstruction[2].u.operand;
    327     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
    328 
    329     emitGetVirtualRegister(baseVReg, regT0);
    330 
    331     // Do the method check - check the object & its prototype's structure inline (this is the common case).
    332     m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
    333     MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
    334 
    335     Jump notCell = emitJumpIfNotJSCell(regT0);
    336 
    337     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
    338 
    339     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
    340     DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT1);
    341     Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
    342 
    343     // This will be relinked to load the function without doing a load.
    344     DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0);
    345 
    346     END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
    347 
    348     Jump match = jump();
    349 
    350     ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
    351     ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
    352     ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
    353 
    354     // Link the failure cases here.
    355     notCell.link(this);
    356     structureCheck.link(this);
    357     protoStructureCheck.link(this);
    358 
    359     // Do a regular(ish) get_by_id (the slow case will be link to
    360     // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
    361     compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
    362 
    363     match.link(this);
    364     emitPutVirtualRegister(resultVReg);
    365 
    366     // We've already generated the following get_by_id, so make sure it's skipped over.
    367     m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
    368 }
    369 
    370 void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
    371 {
    372     currentInstruction += OPCODE_LENGTH(op_method_check);
    373     unsigned resultVReg = currentInstruction[1].u.operand;
    374     unsigned baseVReg = currentInstruction[2].u.operand;
    375     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
    376 
    377     compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
    378 
    379     // We've already generated the following get_by_id, so make sure it's skipped over.
    380     m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
    381 }
    382 
    383 #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
    384 
    385 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
    386 void JIT::emit_op_method_check(Instruction*) {}
    387 void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
    388 
    389 #endif
    390 
    391 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
    392 {
    393     unsigned resultVReg = currentInstruction[1].u.operand;
    394     unsigned baseVReg = currentInstruction[2].u.operand;
    395     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
    396 
    397     emitGetVirtualRegister(baseVReg, regT0);
    398     compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
    399     emitPutVirtualRegister(resultVReg);
    400 }
    401 
    402 void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
    403 {
    404     // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
    405     // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
    406     // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
    407     // to jump back to if one of these trampolies finds a match.
    408 
    409     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
    410 
    411     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
    412 
    413     Label hotPathBegin(this);
    414     m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
    415 
    416     DataLabelPtr structureToCompare;
    417     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
    418     addSlowCase(structureCheck);
    419     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
    420     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase)
    421 
    422     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), regT0);
    423     DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
    424     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetGetByIdPropertyMapOffset);
    425 
    426     Label putResult(this);
    427 
    428     END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
    429 
    430     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
    431 }
    432 
    433 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
    434 {
    435     unsigned resultVReg = currentInstruction[1].u.operand;
    436     unsigned baseVReg = currentInstruction[2].u.operand;
    437     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
    438 
    439     compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
    440 }
    441 
    442 void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
    443 {
    444     // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
    445     // so that we only need track one pointer into the slow case code - we track a pointer to the location
    446     // of the call (which we can use to look up the patch information), but should a array-length or
    447     // prototype access trampoline fail we want to bail out back to here.  To do so we can subtract back
    448     // the distance from the call to the head of the slow case.
    449 
    450     linkSlowCaseIfNotJSCell(iter, baseVReg);
    451     linkSlowCase(iter);
    452 
    453     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
    454 
    455 #ifndef NDEBUG
    456     Label coldPathBegin(this);
    457 #endif
    458     JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
    459     stubCall.addArgument(regT0);
    460     stubCall.addArgument(TrustedImmPtr(ident));
    461     Call call = stubCall.call(resultVReg);
    462 
    463     END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
    464 
    465     ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
    466 
    467     // Track the location of the call; this will be used to recover patch information.
    468     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
    469     m_propertyAccessInstructionIndex++;
    470 }
    471 
    472 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
    473 {
    474     unsigned baseVReg = currentInstruction[1].u.operand;
    475     unsigned valueVReg = currentInstruction[3].u.operand;
    476 
    477     unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
    478 
    479     // In order to be able to patch both the Structure, and the object offset, we store one pointer,
    480     // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
    481     // such that the Structure & offset are always at the same distance from this.
    482 
    483     emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
    484 
    485     // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
    486     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
    487 
    488     BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
    489 
    490     Label hotPathBegin(this);
    491     m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
    492 
    493     // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
    494     DataLabelPtr structureToCompare;
    495     addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
    496     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
    497 
    498     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), regT0);
    499     DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
    500 
    501     END_UNINTERRUPTED_SEQUENCE(sequencePutById);
    502 
    503     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset);
    504 }
    505 
    506 void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
    507 {
    508     unsigned baseVReg = currentInstruction[1].u.operand;
    509     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
    510     unsigned direct = currentInstruction[8].u.operand;
    511 
    512     unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
    513 
    514     linkSlowCaseIfNotJSCell(iter, baseVReg);
    515     linkSlowCase(iter);
    516 
    517     JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
    518     stubCall.addArgument(regT0);
    519     stubCall.addArgument(TrustedImmPtr(ident));
    520     stubCall.addArgument(regT1);
    521     Call call = stubCall.call();
    522 
    523     // Track the location of the call; this will be used to recover patch information.
    524     m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
    525 }
    526 
    527 // Compile a store into an object's property storage.  May overwrite the
    528 // value in objectReg.
    529 void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset)
    530 {
    531     int offset = cachedOffset * sizeof(JSValue);
    532     if (structure->isUsingInlineStorage())
    533         offset += JSObject::offsetOfInlineStorage();
    534     else
    535         loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), base);
    536     storePtr(value, Address(base, offset));
    537 }
    538 
    539 // Compile a load from an object's property storage.  May overwrite base.
    540 void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset)
    541 {
    542     int offset = cachedOffset * sizeof(JSValue);
    543     if (structure->isUsingInlineStorage()) {
    544         offset += JSObject::offsetOfInlineStorage();
    545         loadPtr(Address(base, offset), result);
    546     } else {
    547         loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), result);
    548         loadPtr(Address(result, offset), result);
    549     }
    550 }
    551 
    552 void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset)
    553 {
    554     loadPtr(static_cast<void*>(&base->m_propertyStorage[cachedOffset]), result);
    555 }
    556 
    557 void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
    558 {
    559     if (prototype.isNull())
    560         return;
    561 
    562     // We have a special case for X86_64 here because X86 instructions that take immediate values
    563     // only take 32 bit immediate values, wheras the pointer constants we are using here are 64 bit
    564     // values.  In the non X86_64 case, the generated code is slightly more efficient because it uses
    565     // two less instructions and doesn't require any scratch registers.
    566 #if CPU(X86_64)
    567     move(TrustedImmPtr(prototype.asCell()->structure()), regT3);
    568     failureCases.append(branchPtr(NotEqual, AbsoluteAddress(prototype.asCell()->addressOfStructure()), regT3));
    569 #else
    570     failureCases.append(branchPtr(NotEqual, AbsoluteAddress(prototype.asCell()->addressOfStructure()), TrustedImmPtr(prototype.asCell()->structure())));
    571 #endif
    572 }
    573 
    574 void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
    575 {
    576     JumpList failureCases;
    577     // Check eax is an object of the right Structure.
    578     failureCases.append(emitJumpIfNotJSCell(regT0));
    579     failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
    580     testPrototype(oldStructure->storedPrototype(), failureCases);
    581 
    582     // ecx = baseObject->m_structure
    583     if (!direct) {
    584         for (WriteBarrier<Structure>* it = chain->head(); *it; ++it)
    585             testPrototype((*it)->storedPrototype(), failureCases);
    586     }
    587 
    588     Call callTarget;
    589 
    590     // emit a call only if storage realloc is needed
    591     bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
    592     if (willNeedStorageRealloc) {
    593         // This trampoline was called to like a JIT stub; before we can can call again we need to
    594         // remove the return address from the stack, to prevent the stack from becoming misaligned.
    595         preserveReturnAddressAfterCall(regT3);
    596 
    597         JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
    598         stubCall.skipArgument(); // base
    599         stubCall.skipArgument(); // ident
    600         stubCall.skipArgument(); // value
    601         stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
    602         stubCall.addArgument(TrustedImm32(newStructure->propertyStorageCapacity()));
    603         stubCall.call(regT0);
    604         emitGetJITStubArg(2, regT1);
    605 
    606         restoreReturnAddressBeforeReturn(regT3);
    607     }
    608 
    609     storePtrWithWriteBarrier(TrustedImmPtr(newStructure), regT0, Address(regT0, JSCell::structureOffset()));
    610 
    611     // write the value
    612     compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
    613 
    614     ret();
    615 
    616     ASSERT(!failureCases.empty());
    617     failureCases.link(this);
    618     restoreArgumentReferenceForTrampoline();
    619     Call failureCall = tailRecursiveCall();
    620 
    621     LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
    622 
    623     patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
    624 
    625     if (willNeedStorageRealloc) {
    626         ASSERT(m_calls.size() == 1);
    627         patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
    628     }
    629 
    630     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
    631     stubInfo->stubRoutine = entryLabel;
    632     RepatchBuffer repatchBuffer(m_codeBlock);
    633     repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
    634 }
    635 
    636 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
    637 {
    638     RepatchBuffer repatchBuffer(codeBlock);
    639 
    640     // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
    641     // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
    642     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
    643 
    644     int offset = sizeof(JSValue) * cachedOffset;
    645 
    646     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
    647     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
    648     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
    649 }
    650 
    651 void JIT::patchMethodCallProto(JSGlobalData& globalData, CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
    652 {
    653     RepatchBuffer repatchBuffer(codeBlock);
    654 
    655     ASSERT(!methodCallLinkInfo.cachedStructure);
    656     methodCallLinkInfo.cachedStructure.set(globalData, codeBlock->ownerExecutable(), structure);
    657 
    658     Structure* prototypeStructure = proto->structure();
    659     methodCallLinkInfo.cachedPrototypeStructure.set(globalData, codeBlock->ownerExecutable(), prototypeStructure);
    660 
    661     repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
    662     repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
    663     repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
    664     repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
    665 
    666     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
    667 }
    668 
    669 void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
    670 {
    671     RepatchBuffer repatchBuffer(codeBlock);
    672 
    673     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
    674     // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
    675     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
    676 
    677     int offset = sizeof(JSValue) * cachedOffset;
    678 
    679     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
    680     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
    681     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
    682 }
    683 
    684 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
    685 {
    686     StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
    687 
    688     // Check eax is an array
    689     Jump failureCases1 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr));
    690 
    691     // Checks out okay! - get the length from the storage
    692     loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
    693     load32(Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
    694     Jump failureCases2 = branch32(LessThan, regT2, TrustedImm32(0));
    695 
    696     emitFastArithIntToImmNoCheck(regT2, regT0);
    697     Jump success = jump();
    698 
    699     LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
    700 
    701     // Use the patch information to link the failure cases back to the original slow case routine.
    702     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
    703     patchBuffer.link(failureCases1, slowCaseBegin);
    704     patchBuffer.link(failureCases2, slowCaseBegin);
    705 
    706     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    707     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
    708 
    709     // Track the stub we have created so that it will be deleted later.
    710     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
    711     stubInfo->stubRoutine = entryLabel;
    712 
    713     // Finally patch the jump to slow case back in the hot path to jump here instead.
    714     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
    715     RepatchBuffer repatchBuffer(m_codeBlock);
    716     repatchBuffer.relink(jumpLocation, entryLabel);
    717 
    718     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
    719     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
    720 }
    721 
    722 void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
    723 {
    724     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
    725     // referencing the prototype object - let's speculatively load it's table nice and early!)
    726     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
    727 
    728     // Check eax is an object of the right Structure.
    729     Jump failureCases1 = checkStructure(regT0, structure);
    730 
    731     // Check the prototype object's Structure had not changed.
    732     const void* prototypeStructureAddress = protoObject->addressOfStructure();
    733 #if CPU(X86_64)
    734     move(TrustedImmPtr(prototypeStructure), regT3);
    735     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
    736 #else
    737     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), TrustedImmPtr(prototypeStructure));
    738 #endif
    739 
    740     bool needsStubLink = false;
    741 
    742     // Checks out okay!
    743     if (slot.cachedPropertyType() == PropertySlot::Getter) {
    744         needsStubLink = true;
    745         compileGetDirectOffset(protoObject, regT1, cachedOffset);
    746         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
    747         stubCall.addArgument(regT1);
    748         stubCall.addArgument(regT0);
    749         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    750         stubCall.call();
    751     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
    752         needsStubLink = true;
    753         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
    754         stubCall.addArgument(TrustedImmPtr(protoObject));
    755         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
    756         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
    757         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    758         stubCall.call();
    759     } else
    760         compileGetDirectOffset(protoObject, regT0, cachedOffset);
    761     Jump success = jump();
    762     LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
    763 
    764     // Use the patch information to link the failure cases back to the original slow case routine.
    765     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
    766     patchBuffer.link(failureCases1, slowCaseBegin);
    767     patchBuffer.link(failureCases2, slowCaseBegin);
    768 
    769     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    770     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
    771 
    772     if (needsStubLink) {
    773         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
    774             if (iter->to)
    775                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
    776         }
    777     }
    778     // Track the stub we have created so that it will be deleted later.
    779     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
    780     stubInfo->stubRoutine = entryLabel;
    781 
    782     // Finally patch the jump to slow case back in the hot path to jump here instead.
    783     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
    784     RepatchBuffer repatchBuffer(m_codeBlock);
    785     repatchBuffer.relink(jumpLocation, entryLabel);
    786 
    787     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
    788     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
    789 }
    790 
    791 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
    792 {
    793     Jump failureCase = checkStructure(regT0, structure);
    794     bool needsStubLink = false;
    795     if (slot.cachedPropertyType() == PropertySlot::Getter) {
    796         needsStubLink = true;
    797         compileGetDirectOffset(regT0, regT1, structure, cachedOffset);
    798         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
    799         stubCall.addArgument(regT1);
    800         stubCall.addArgument(regT0);
    801         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    802         stubCall.call();
    803     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
    804         needsStubLink = true;
    805         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
    806         stubCall.addArgument(regT0);
    807         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
    808         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
    809         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    810         stubCall.call();
    811     } else
    812         compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
    813     Jump success = jump();
    814 
    815     LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
    816 
    817     if (needsStubLink) {
    818         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
    819             if (iter->to)
    820                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
    821         }
    822     }
    823 
    824     // Use the patch information to link the failure cases back to the original slow case routine.
    825     CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
    826     if (!lastProtoBegin)
    827         lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
    828 
    829     patchBuffer.link(failureCase, lastProtoBegin);
    830 
    831     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    832     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
    833 
    834     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
    835 
    836     polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), entryLabel, structure);
    837 
    838     // Finally patch the jump to slow case back in the hot path to jump here instead.
    839     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
    840     RepatchBuffer repatchBuffer(m_codeBlock);
    841     repatchBuffer.relink(jumpLocation, entryLabel);
    842 }
    843 
    844 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
    845 {
    846     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
    847     // referencing the prototype object - let's speculatively load it's table nice and early!)
    848     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
    849 
    850     // Check eax is an object of the right Structure.
    851     Jump failureCases1 = checkStructure(regT0, structure);
    852 
    853     // Check the prototype object's Structure had not changed.
    854     const void* prototypeStructureAddress = protoObject->addressOfStructure();
    855 #if CPU(X86_64)
    856     move(TrustedImmPtr(prototypeStructure), regT3);
    857     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
    858 #else
    859     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), TrustedImmPtr(prototypeStructure));
    860 #endif
    861 
    862     // Checks out okay!
    863     bool needsStubLink = false;
    864     if (slot.cachedPropertyType() == PropertySlot::Getter) {
    865         needsStubLink = true;
    866         compileGetDirectOffset(protoObject, regT1, cachedOffset);
    867         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
    868         stubCall.addArgument(regT1);
    869         stubCall.addArgument(regT0);
    870         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    871         stubCall.call();
    872     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
    873         needsStubLink = true;
    874         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
    875         stubCall.addArgument(TrustedImmPtr(protoObject));
    876         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
    877         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
    878         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    879         stubCall.call();
    880     } else
    881         compileGetDirectOffset(protoObject, regT0, cachedOffset);
    882 
    883     Jump success = jump();
    884 
    885     LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
    886 
    887     if (needsStubLink) {
    888         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
    889             if (iter->to)
    890                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
    891         }
    892     }
    893 
    894     // Use the patch information to link the failure cases back to the original slow case routine.
    895     CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
    896     patchBuffer.link(failureCases1, lastProtoBegin);
    897     patchBuffer.link(failureCases2, lastProtoBegin);
    898 
    899     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    900     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
    901 
    902     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
    903     prototypeStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), entryLabel, structure, prototypeStructure);
    904 
    905     // Finally patch the jump to slow case back in the hot path to jump here instead.
    906     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
    907     RepatchBuffer repatchBuffer(m_codeBlock);
    908     repatchBuffer.relink(jumpLocation, entryLabel);
    909 }
    910 
    911 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
    912 {
    913     ASSERT(count);
    914     JumpList bucketsOfFail;
    915 
    916     // Check eax is an object of the right Structure.
    917     Jump baseObjectCheck = checkStructure(regT0, structure);
    918     bucketsOfFail.append(baseObjectCheck);
    919 
    920     Structure* currStructure = structure;
    921     WriteBarrier<Structure>* it = chain->head();
    922     JSObject* protoObject = 0;
    923     for (unsigned i = 0; i < count; ++i, ++it) {
    924         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
    925         currStructure = it->get();
    926         testPrototype(protoObject, bucketsOfFail);
    927     }
    928     ASSERT(protoObject);
    929 
    930     bool needsStubLink = false;
    931     if (slot.cachedPropertyType() == PropertySlot::Getter) {
    932         needsStubLink = true;
    933         compileGetDirectOffset(protoObject, regT1, cachedOffset);
    934         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
    935         stubCall.addArgument(regT1);
    936         stubCall.addArgument(regT0);
    937         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    938         stubCall.call();
    939     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
    940         needsStubLink = true;
    941         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
    942         stubCall.addArgument(TrustedImmPtr(protoObject));
    943         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
    944         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
    945         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    946         stubCall.call();
    947     } else
    948         compileGetDirectOffset(protoObject, regT0, cachedOffset);
    949     Jump success = jump();
    950 
    951     LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
    952 
    953     if (needsStubLink) {
    954         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
    955             if (iter->to)
    956                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
    957         }
    958     }
    959 
    960     // Use the patch information to link the failure cases back to the original slow case routine.
    961     CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
    962 
    963     patchBuffer.link(bucketsOfFail, lastProtoBegin);
    964 
    965     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    966     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
    967 
    968     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
    969 
    970     // Track the stub we have created so that it will be deleted later.
    971     prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), entryLabel, structure, chain);
    972 
    973     // Finally patch the jump to slow case back in the hot path to jump here instead.
    974     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
    975     RepatchBuffer repatchBuffer(m_codeBlock);
    976     repatchBuffer.relink(jumpLocation, entryLabel);
    977 }
    978 
    979 void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
    980 {
    981     ASSERT(count);
    982 
    983     JumpList bucketsOfFail;
    984 
    985     // Check eax is an object of the right Structure.
    986     bucketsOfFail.append(checkStructure(regT0, structure));
    987 
    988     Structure* currStructure = structure;
    989     WriteBarrier<Structure>* it = chain->head();
    990     JSObject* protoObject = 0;
    991     for (unsigned i = 0; i < count; ++i, ++it) {
    992         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
    993         currStructure = it->get();
    994         testPrototype(protoObject, bucketsOfFail);
    995     }
    996     ASSERT(protoObject);
    997 
    998     bool needsStubLink = false;
    999     if (slot.cachedPropertyType() == PropertySlot::Getter) {
   1000         needsStubLink = true;
   1001         compileGetDirectOffset(protoObject, regT1, cachedOffset);
   1002         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
   1003         stubCall.addArgument(regT1);
   1004         stubCall.addArgument(regT0);
   1005         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
   1006         stubCall.call();
   1007     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
   1008         needsStubLink = true;
   1009         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
   1010         stubCall.addArgument(TrustedImmPtr(protoObject));
   1011         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
   1012         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
   1013         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
   1014         stubCall.call();
   1015     } else
   1016         compileGetDirectOffset(protoObject, regT0, cachedOffset);
   1017     Jump success = jump();
   1018 
   1019     LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
   1020 
   1021     if (needsStubLink) {
   1022         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
   1023             if (iter->to)
   1024                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
   1025         }
   1026     }
   1027 
   1028     // Use the patch information to link the failure cases back to the original slow case routine.
   1029     patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
   1030 
   1031     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
   1032     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
   1033 
   1034     // Track the stub we have created so that it will be deleted later.
   1035     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
   1036     stubInfo->stubRoutine = entryLabel;
   1037 
   1038     // Finally patch the jump to slow case back in the hot path to jump here instead.
   1039     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
   1040     RepatchBuffer repatchBuffer(m_codeBlock);
   1041     repatchBuffer.relink(jumpLocation, entryLabel);
   1042 
   1043     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
   1044     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
   1045 }
   1046 
   1047 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
   1048 
   1049 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
   1050 
   1051 } // namespace JSC
   1052 
   1053 #endif // USE(JSVALUE64)
   1054 #endif // ENABLE(JIT)
   1055