Home | History | Annotate | Download | only in jit
      1 /*
      2  * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1. Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2. Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  *
     13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
     14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
     17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
     21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     24  */
     25 
     26 #include "config.h"
     27 
     28 #if ENABLE(JIT)
     29 #if USE(JSVALUE32_64)
     30 #include "JIT.h"
     31 
     32 #include "CodeBlock.h"
     33 #include "JITInlineMethods.h"
     34 #include "JITStubCall.h"
     35 #include "JSArray.h"
     36 #include "JSFunction.h"
     37 #include "JSPropertyNameIterator.h"
     38 #include "Interpreter.h"
     39 #include "LinkBuffer.h"
     40 #include "RepatchBuffer.h"
     41 #include "ResultType.h"
     42 #include "SamplingTool.h"
     43 
     44 #ifndef NDEBUG
     45 #include <stdio.h>
     46 #endif
     47 
     48 using namespace std;
     49 
     50 namespace JSC {
     51 
     52 void JIT::emit_op_put_by_index(Instruction* currentInstruction)
     53 {
     54     unsigned base = currentInstruction[1].u.operand;
     55     unsigned property = currentInstruction[2].u.operand;
     56     unsigned value = currentInstruction[3].u.operand;
     57 
     58     JITStubCall stubCall(this, cti_op_put_by_index);
     59     stubCall.addArgument(base);
     60     stubCall.addArgument(Imm32(property));
     61     stubCall.addArgument(value);
     62     stubCall.call();
     63 }
     64 
     65 void JIT::emit_op_put_getter(Instruction* currentInstruction)
     66 {
     67     unsigned base = currentInstruction[1].u.operand;
     68     unsigned property = currentInstruction[2].u.operand;
     69     unsigned function = currentInstruction[3].u.operand;
     70 
     71     JITStubCall stubCall(this, cti_op_put_getter);
     72     stubCall.addArgument(base);
     73     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
     74     stubCall.addArgument(function);
     75     stubCall.call();
     76 }
     77 
     78 void JIT::emit_op_put_setter(Instruction* currentInstruction)
     79 {
     80     unsigned base = currentInstruction[1].u.operand;
     81     unsigned property = currentInstruction[2].u.operand;
     82     unsigned function = currentInstruction[3].u.operand;
     83 
     84     JITStubCall stubCall(this, cti_op_put_setter);
     85     stubCall.addArgument(base);
     86     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
     87     stubCall.addArgument(function);
     88     stubCall.call();
     89 }
     90 
     91 void JIT::emit_op_del_by_id(Instruction* currentInstruction)
     92 {
     93     unsigned dst = currentInstruction[1].u.operand;
     94     unsigned base = currentInstruction[2].u.operand;
     95     unsigned property = currentInstruction[3].u.operand;
     96 
     97     JITStubCall stubCall(this, cti_op_del_by_id);
     98     stubCall.addArgument(base);
     99     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
    100     stubCall.call(dst);
    101 }
    102 
    103 
    104 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
    105 
    106 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
    107 
    108 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
    109 void JIT::emit_op_method_check(Instruction*) {}
    110 void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
    111 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
    112 #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
    113 #endif
    114 
    115 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
    116 {
    117     unsigned dst = currentInstruction[1].u.operand;
    118     unsigned base = currentInstruction[2].u.operand;
    119     unsigned property = currentInstruction[3].u.operand;
    120 
    121     JITStubCall stubCall(this, cti_op_get_by_val);
    122     stubCall.addArgument(base);
    123     stubCall.addArgument(property);
    124     stubCall.call(dst);
    125 }
    126 
    127 void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
    128 {
    129     ASSERT_NOT_REACHED();
    130 }
    131 
    132 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
    133 {
    134     unsigned base = currentInstruction[1].u.operand;
    135     unsigned property = currentInstruction[2].u.operand;
    136     unsigned value = currentInstruction[3].u.operand;
    137 
    138     JITStubCall stubCall(this, cti_op_put_by_val);
    139     stubCall.addArgument(base);
    140     stubCall.addArgument(property);
    141     stubCall.addArgument(value);
    142     stubCall.call();
    143 }
    144 
    145 void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
    146 {
    147     ASSERT_NOT_REACHED();
    148 }
    149 
    150 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
    151 {
    152     int dst = currentInstruction[1].u.operand;
    153     int base = currentInstruction[2].u.operand;
    154     int ident = currentInstruction[3].u.operand;
    155 
    156     JITStubCall stubCall(this, cti_op_get_by_id_generic);
    157     stubCall.addArgument(base);
    158     stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
    159     stubCall.call(dst);
    160 
    161     m_propertyAccessInstructionIndex++;
    162 }
    163 
    164 void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
    165 {
    166     m_propertyAccessInstructionIndex++;
    167     ASSERT_NOT_REACHED();
    168 }
    169 
    170 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
    171 {
    172     int base = currentInstruction[1].u.operand;
    173     int ident = currentInstruction[2].u.operand;
    174     int value = currentInstruction[3].u.operand;
    175 
    176     JITStubCall stubCall(this, cti_op_put_by_id_generic);
    177     stubCall.addArgument(base);
    178     stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
    179     stubCall.addArgument(value);
    180     stubCall.call();
    181 
    182     m_propertyAccessInstructionIndex++;
    183 }
    184 
    185 void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
    186 {
    187     m_propertyAccessInstructionIndex++;
    188     ASSERT_NOT_REACHED();
    189 }
    190 
    191 #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
    192 
    193 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
    194 
    195 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
    196 
    197 void JIT::emit_op_method_check(Instruction* currentInstruction)
    198 {
    199     // Assert that the following instruction is a get_by_id.
    200     ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
    201 
    202     currentInstruction += OPCODE_LENGTH(op_method_check);
    203 
    204     // Do the method check - check the object & its prototype's structure inline (this is the common case).
    205     m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
    206     MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
    207 
    208     int dst = currentInstruction[1].u.operand;
    209     int base = currentInstruction[2].u.operand;
    210 
    211     emitLoad(base, regT1, regT0);
    212     emitJumpSlowCaseIfNotJSCell(base, regT1);
    213 
    214     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
    215 
    216     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
    217     DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT2);
    218     Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
    219 
    220     // This will be relinked to load the function without doing a load.
    221     DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0);
    222 
    223     END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
    224 
    225     move(TrustedImm32(JSValue::CellTag), regT1);
    226     Jump match = jump();
    227 
    228     ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
    229     ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
    230     ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
    231 
    232     // Link the failure cases here.
    233     structureCheck.link(this);
    234     protoStructureCheck.link(this);
    235 
    236     // Do a regular(ish) get_by_id (the slow case will be link to
    237     // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
    238     compileGetByIdHotPath();
    239 
    240     match.link(this);
    241     emitStore(dst, regT1, regT0);
    242     map(m_bytecodeOffset + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
    243 
    244     // We've already generated the following get_by_id, so make sure it's skipped over.
    245     m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
    246 }
    247 
    248 void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
    249 {
    250     currentInstruction += OPCODE_LENGTH(op_method_check);
    251 
    252     int dst = currentInstruction[1].u.operand;
    253     int base = currentInstruction[2].u.operand;
    254     int ident = currentInstruction[3].u.operand;
    255 
    256     compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
    257 
    258     // We've already generated the following get_by_id, so make sure it's skipped over.
    259     m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
    260 }
    261 
    262 #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
    263 
    264 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
    265 void JIT::emit_op_method_check(Instruction*) {}
    266 void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
    267 
    268 #endif
    269 
    270 JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
    271 {
    272     JSInterfaceJIT jit;
    273     JumpList failures;
    274     failures.append(jit.branchPtr(NotEqual, Address(regT0), TrustedImmPtr(globalData->jsStringVPtr)));
    275     failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
    276 
    277     // Load string length to regT1, and start the process of loading the data pointer into regT0
    278     jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1);
    279     jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
    280     jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
    281 
    282     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
    283     failures.append(jit.branch32(AboveOrEqual, regT2, regT1));
    284 
    285     // Load the character
    286     jit.load16(BaseIndex(regT0, regT2, TimesTwo, 0), regT0);
    287 
    288     failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
    289     jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
    290     jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
    291     jit.move(TrustedImm32(JSValue::CellTag), regT1); // We null check regT0 on return so this is safe
    292     jit.ret();
    293 
    294     failures.link(&jit);
    295     jit.move(TrustedImm32(0), regT0);
    296     jit.ret();
    297 
    298     LinkBuffer patchBuffer(&jit, pool, 0);
    299     return patchBuffer.finalizeCode().m_code;
    300 }
    301 
    302 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
    303 {
    304     unsigned dst = currentInstruction[1].u.operand;
    305     unsigned base = currentInstruction[2].u.operand;
    306     unsigned property = currentInstruction[3].u.operand;
    307 
    308     emitLoad2(base, regT1, regT0, property, regT3, regT2);
    309 
    310     addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
    311     emitJumpSlowCaseIfNotJSCell(base, regT1);
    312     addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
    313 
    314     loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
    315     addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, JSArray::vectorLengthOffset())));
    316 
    317     load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
    318     load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
    319     addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
    320 
    321     emitStore(dst, regT1, regT0);
    322     map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
    323 }
    324 
    325 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
    326 {
    327     unsigned dst = currentInstruction[1].u.operand;
    328     unsigned base = currentInstruction[2].u.operand;
    329     unsigned property = currentInstruction[3].u.operand;
    330 
    331     linkSlowCase(iter); // property int32 check
    332     linkSlowCaseIfNotJSCell(iter, base); // base cell check
    333 
    334     Jump nonCell = jump();
    335     linkSlowCase(iter); // base array check
    336     Jump notString = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
    337     emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
    338     Jump failed = branchTestPtr(Zero, regT0);
    339     emitStore(dst, regT1, regT0);
    340     emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
    341     failed.link(this);
    342     notString.link(this);
    343     nonCell.link(this);
    344 
    345     linkSlowCase(iter); // vector length check
    346     linkSlowCase(iter); // empty value
    347 
    348     JITStubCall stubCall(this, cti_op_get_by_val);
    349     stubCall.addArgument(base);
    350     stubCall.addArgument(property);
    351     stubCall.call(dst);
    352 }
    353 
    354 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
    355 {
    356     unsigned base = currentInstruction[1].u.operand;
    357     unsigned property = currentInstruction[2].u.operand;
    358     unsigned value = currentInstruction[3].u.operand;
    359 
    360     emitLoad2(base, regT1, regT0, property, regT3, regT2);
    361 
    362     addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
    363     emitJumpSlowCaseIfNotJSCell(base, regT1);
    364     addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
    365     addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, JSArray::vectorLengthOffset())));
    366 
    367     loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
    368 
    369     Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
    370 
    371     Label storeResult(this);
    372     emitLoad(value, regT1, regT0);
    373     store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); // payload
    374     store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); // tag
    375     Jump end = jump();
    376 
    377     empty.link(this);
    378     add32(TrustedImm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
    379     branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
    380 
    381     add32(TrustedImm32(1), regT2, regT0);
    382     store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
    383     jump().linkTo(storeResult, this);
    384 
    385     end.link(this);
    386 }
    387 
    388 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
    389 {
    390     unsigned base = currentInstruction[1].u.operand;
    391     unsigned property = currentInstruction[2].u.operand;
    392     unsigned value = currentInstruction[3].u.operand;
    393 
    394     linkSlowCase(iter); // property int32 check
    395     linkSlowCaseIfNotJSCell(iter, base); // base cell check
    396     linkSlowCase(iter); // base not array check
    397     linkSlowCase(iter); // in vector check
    398 
    399     JITStubCall stubPutByValCall(this, cti_op_put_by_val);
    400     stubPutByValCall.addArgument(base);
    401     stubPutByValCall.addArgument(property);
    402     stubPutByValCall.addArgument(value);
    403     stubPutByValCall.call();
    404 }
    405 
    406 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
    407 {
    408     int dst = currentInstruction[1].u.operand;
    409     int base = currentInstruction[2].u.operand;
    410 
    411     emitLoad(base, regT1, regT0);
    412     emitJumpSlowCaseIfNotJSCell(base, regT1);
    413     compileGetByIdHotPath();
    414     emitStore(dst, regT1, regT0);
    415     map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
    416 }
    417 
    418 void JIT::compileGetByIdHotPath()
    419 {
    420     // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
    421     // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
    422     // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
    423     // to jump back to if one of these trampolies finds a match.
    424 
    425     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
    426 
    427     Label hotPathBegin(this);
    428     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
    429     m_propertyAccessInstructionIndex++;
    430 
    431     DataLabelPtr structureToCompare;
    432     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
    433     addSlowCase(structureCheck);
    434     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
    435     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase);
    436 
    437     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), regT2);
    438     DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
    439     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel1), patchOffsetGetByIdPropertyMapOffset1);
    440     DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
    441     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel2), patchOffsetGetByIdPropertyMapOffset2);
    442 
    443     Label putResult(this);
    444     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
    445 
    446     END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
    447 }
    448 
    449 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
    450 {
    451     int dst = currentInstruction[1].u.operand;
    452     int base = currentInstruction[2].u.operand;
    453     int ident = currentInstruction[3].u.operand;
    454 
    455     compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
    456 }
    457 
    458 void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
    459 {
    460     // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
    461     // so that we only need track one pointer into the slow case code - we track a pointer to the location
    462     // of the call (which we can use to look up the patch information), but should a array-length or
    463     // prototype access trampoline fail we want to bail out back to here.  To do so we can subtract back
    464     // the distance from the call to the head of the slow case.
    465     linkSlowCaseIfNotJSCell(iter, base);
    466     linkSlowCase(iter);
    467 
    468     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
    469 
    470 #ifndef NDEBUG
    471     Label coldPathBegin(this);
    472 #endif
    473     JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
    474     stubCall.addArgument(regT1, regT0);
    475     stubCall.addArgument(TrustedImmPtr(ident));
    476     Call call = stubCall.call(dst);
    477 
    478     END_UNINTERRUPTED_SEQUENCE_FOR_PUT(sequenceGetByIdSlowCase, dst);
    479 
    480     ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
    481 
    482     // Track the location of the call; this will be used to recover patch information.
    483     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
    484     m_propertyAccessInstructionIndex++;
    485 }
    486 
    487 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
    488 {
    489     // In order to be able to patch both the Structure, and the object offset, we store one pointer,
    490     // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
    491     // such that the Structure & offset are always at the same distance from this.
    492 
    493     int base = currentInstruction[1].u.operand;
    494     int value = currentInstruction[3].u.operand;
    495 
    496     emitLoad2(base, regT1, regT0, value, regT3, regT2);
    497 
    498     emitJumpSlowCaseIfNotJSCell(base, regT1);
    499 
    500     BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
    501 
    502     Label hotPathBegin(this);
    503     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
    504     m_propertyAccessInstructionIndex++;
    505 
    506     // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
    507     DataLabelPtr structureToCompare;
    508     addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
    509     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
    510 
    511     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), regT0);
    512     DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
    513     DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
    514 
    515     END_UNINTERRUPTED_SEQUENCE(sequencePutById);
    516 
    517     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel1), patchOffsetPutByIdPropertyMapOffset1);
    518     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel2), patchOffsetPutByIdPropertyMapOffset2);
    519 }
    520 
    521 void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
    522 {
    523     int base = currentInstruction[1].u.operand;
    524     int ident = currentInstruction[2].u.operand;
    525     int direct = currentInstruction[8].u.operand;
    526 
    527     linkSlowCaseIfNotJSCell(iter, base);
    528     linkSlowCase(iter);
    529 
    530     JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
    531     stubCall.addArgument(regT1, regT0);
    532     stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
    533     stubCall.addArgument(regT3, regT2);
    534     Call call = stubCall.call();
    535 
    536     // Track the location of the call; this will be used to recover patch information.
    537     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
    538     m_propertyAccessInstructionIndex++;
    539 }
    540 
    541 // Compile a store into an object's property storage.  May overwrite base.
    542 void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
    543 {
    544     int offset = cachedOffset;
    545     if (structure->isUsingInlineStorage())
    546         offset += JSObject::offsetOfInlineStorage() /  sizeof(Register);
    547     else
    548         loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), base);
    549     emitStore(offset, valueTag, valuePayload, base);
    550 }
    551 
    552 // Compile a load from an object's property storage.  May overwrite base.
    553 void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
    554 {
    555     int offset = cachedOffset;
    556     if (structure->isUsingInlineStorage()) {
    557         offset += JSObject::offsetOfInlineStorage() / sizeof(Register);
    558         emitLoad(offset, resultTag, resultPayload, base);
    559     } else {
    560         RegisterID temp = resultPayload;
    561         loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), temp);
    562         emitLoad(offset, resultTag, resultPayload, temp);
    563     }
    564 }
    565 
    566 void JIT::compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
    567 {
    568     load32(reinterpret_cast<char*>(&base->m_propertyStorage[cachedOffset]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload), resultPayload);
    569     load32(reinterpret_cast<char*>(&base->m_propertyStorage[cachedOffset]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag), resultTag);
    570 }
    571 
    572 void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
    573 {
    574     if (prototype.isNull())
    575         return;
    576 
    577     // We have a special case for X86_64 here because X86 instructions that take immediate values
    578     // only take 32 bit immediate values, wheras the pointer constants we are using here are 64 bit
    579     // values.  In the non X86_64 case, the generated code is slightly more efficient because it uses
    580     // two less instructions and doesn't require any scratch registers.
    581 #if CPU(X86_64)
    582     move(TrustedImmPtr(prototype.asCell()->structure()), regT3);
    583     failureCases.append(branchPtr(NotEqual, AbsoluteAddress(prototype.asCell()->addressOfStructure()), regT3));
    584 #else
    585     failureCases.append(branchPtr(NotEqual, AbsoluteAddress(prototype.asCell()->addressOfStructure()), TrustedImmPtr(prototype.asCell()->structure())));
    586 #endif
    587 }
    588 
    589 void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
    590 {
    591     // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag.  The value can be found on the stack.
    592 
    593     JumpList failureCases;
    594     failureCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
    595     failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
    596     testPrototype(oldStructure->storedPrototype(), failureCases);
    597 
    598     if (!direct) {
    599         // Verify that nothing in the prototype chain has a setter for this property.
    600         for (WriteBarrier<Structure>* it = chain->head(); *it; ++it)
    601             testPrototype((*it)->storedPrototype(), failureCases);
    602     }
    603 
    604     // Reallocate property storage if needed.
    605     Call callTarget;
    606     bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
    607     if (willNeedStorageRealloc) {
    608         // This trampoline was called to like a JIT stub; before we can can call again we need to
    609         // remove the return address from the stack, to prevent the stack from becoming misaligned.
    610         preserveReturnAddressAfterCall(regT3);
    611 
    612         JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
    613         stubCall.skipArgument(); // base
    614         stubCall.skipArgument(); // ident
    615         stubCall.skipArgument(); // value
    616         stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
    617         stubCall.addArgument(TrustedImm32(newStructure->propertyStorageCapacity()));
    618         stubCall.call(regT0);
    619 
    620         restoreReturnAddressBeforeReturn(regT3);
    621     }
    622 
    623     storePtrWithWriteBarrier(TrustedImmPtr(newStructure), regT0, Address(regT0, JSCell::structureOffset()));
    624 
    625 #if CPU(MIPS) || CPU(SH4)
    626     // For MIPS, we don't add sizeof(void*) to the stack offset.
    627     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
    628     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
    629 #else
    630     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
    631     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
    632 #endif
    633 
    634     // Write the value
    635     compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
    636 
    637     ret();
    638 
    639     ASSERT(!failureCases.empty());
    640     failureCases.link(this);
    641     restoreArgumentReferenceForTrampoline();
    642     Call failureCall = tailRecursiveCall();
    643 
    644     LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
    645 
    646     patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
    647 
    648     if (willNeedStorageRealloc) {
    649         ASSERT(m_calls.size() == 1);
    650         patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
    651     }
    652 
    653     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
    654     stubInfo->stubRoutine = entryLabel;
    655     RepatchBuffer repatchBuffer(m_codeBlock);
    656     repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
    657 }
    658 
    659 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
    660 {
    661     RepatchBuffer repatchBuffer(codeBlock);
    662 
    663     // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
    664     // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
    665     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
    666 
    667     int offset = sizeof(JSValue) * cachedOffset;
    668 
    669     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
    670     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
    671     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
    672     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
    673 }
    674 
    675 void JIT::patchMethodCallProto(JSGlobalData& globalData, CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
    676 {
    677     RepatchBuffer repatchBuffer(codeBlock);
    678 
    679     ASSERT(!methodCallLinkInfo.cachedStructure);
    680     methodCallLinkInfo.cachedStructure.set(globalData, codeBlock->ownerExecutable(), structure);
    681     Structure* prototypeStructure = proto->structure();
    682     methodCallLinkInfo.cachedPrototypeStructure.set(globalData, codeBlock->ownerExecutable(), prototypeStructure);
    683 
    684     repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
    685     repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
    686     repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
    687     repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
    688 
    689     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
    690 }
    691 
    692 void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
    693 {
    694     RepatchBuffer repatchBuffer(codeBlock);
    695 
    696     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
    697     // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
    698     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
    699 
    700     int offset = sizeof(JSValue) * cachedOffset;
    701 
    702     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
    703     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
    704     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
    705     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
    706 }
    707 
    708 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
    709 {
    710     StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
    711 
    712     // regT0 holds a JSCell*
    713 
    714     // Check for array
    715     Jump failureCases1 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr));
    716 
    717     // Checks out okay! - get the length from the storage
    718     loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
    719     load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
    720 
    721     Jump failureCases2 = branch32(Above, regT2, TrustedImm32(INT_MAX));
    722     move(regT2, regT0);
    723     move(TrustedImm32(JSValue::Int32Tag), regT1);
    724     Jump success = jump();
    725 
    726     LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
    727 
    728     // Use the patch information to link the failure cases back to the original slow case routine.
    729     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
    730     patchBuffer.link(failureCases1, slowCaseBegin);
    731     patchBuffer.link(failureCases2, slowCaseBegin);
    732 
    733     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    734     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
    735 
    736     // Track the stub we have created so that it will be deleted later.
    737     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
    738     stubInfo->stubRoutine = entryLabel;
    739 
    740     // Finally patch the jump to slow case back in the hot path to jump here instead.
    741     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
    742     RepatchBuffer repatchBuffer(m_codeBlock);
    743     repatchBuffer.relink(jumpLocation, entryLabel);
    744 
    745     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
    746     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
    747 }
    748 
    749 void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
    750 {
    751     // regT0 holds a JSCell*
    752 
    753     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
    754     // referencing the prototype object - let's speculatively load it's table nice and early!)
    755     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
    756 
    757     Jump failureCases1 = checkStructure(regT0, structure);
    758 
    759     // Check the prototype object's Structure had not changed.
    760     const void* prototypeStructureAddress = protoObject->addressOfStructure();
    761 #if CPU(X86_64)
    762     move(TrustedImmPtr(prototypeStructure), regT3);
    763     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
    764 #else
    765     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), TrustedImmPtr(prototypeStructure));
    766 #endif
    767     bool needsStubLink = false;
    768     // Checks out okay!
    769     if (slot.cachedPropertyType() == PropertySlot::Getter) {
    770         needsStubLink = true;
    771         compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
    772         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
    773         stubCall.addArgument(regT1);
    774         stubCall.addArgument(regT0);
    775         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    776         stubCall.call();
    777     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
    778         needsStubLink = true;
    779         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
    780         stubCall.addArgument(TrustedImmPtr(protoObject));
    781         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
    782         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
    783         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    784         stubCall.call();
    785     } else
    786         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
    787 
    788     Jump success = jump();
    789 
    790     LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
    791 
    792     // Use the patch information to link the failure cases back to the original slow case routine.
    793     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
    794     patchBuffer.link(failureCases1, slowCaseBegin);
    795     patchBuffer.link(failureCases2, slowCaseBegin);
    796 
    797     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    798     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
    799 
    800     if (needsStubLink) {
    801         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
    802             if (iter->to)
    803                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
    804         }
    805     }
    806 
    807     // Track the stub we have created so that it will be deleted later.
    808     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
    809     stubInfo->stubRoutine = entryLabel;
    810 
    811     // Finally patch the jump to slow case back in the hot path to jump here instead.
    812     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
    813     RepatchBuffer repatchBuffer(m_codeBlock);
    814     repatchBuffer.relink(jumpLocation, entryLabel);
    815 
    816     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
    817     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
    818 }
    819 
    820 
    821 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
    822 {
    823     // regT0 holds a JSCell*
    824     Jump failureCase = checkStructure(regT0, structure);
    825     bool needsStubLink = false;
    826     if (slot.cachedPropertyType() == PropertySlot::Getter) {
    827         needsStubLink = true;
    828         compileGetDirectOffset(regT0, regT2, regT1, structure, cachedOffset);
    829         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
    830         stubCall.addArgument(regT1);
    831         stubCall.addArgument(regT0);
    832         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    833         stubCall.call();
    834     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
    835         needsStubLink = true;
    836         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
    837         stubCall.addArgument(regT0);
    838         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
    839         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
    840         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    841         stubCall.call();
    842     } else
    843         compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
    844 
    845     Jump success = jump();
    846 
    847     LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
    848     if (needsStubLink) {
    849         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
    850             if (iter->to)
    851                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
    852         }
    853     }
    854     // Use the patch information to link the failure cases back to the original slow case routine.
    855     CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
    856     if (!lastProtoBegin)
    857         lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
    858 
    859     patchBuffer.link(failureCase, lastProtoBegin);
    860 
    861     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    862     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
    863 
    864     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
    865 
    866     polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), entryLabel, structure);
    867 
    868     // Finally patch the jump to slow case back in the hot path to jump here instead.
    869     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
    870     RepatchBuffer repatchBuffer(m_codeBlock);
    871     repatchBuffer.relink(jumpLocation, entryLabel);
    872 }
    873 
    874 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
    875 {
    876     // regT0 holds a JSCell*
    877 
    878     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
    879     // referencing the prototype object - let's speculatively load it's table nice and early!)
    880     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
    881 
    882     // Check eax is an object of the right Structure.
    883     Jump failureCases1 = checkStructure(regT0, structure);
    884 
    885     // Check the prototype object's Structure had not changed.
    886     const void* prototypeStructureAddress = protoObject->addressOfStructure();
    887 #if CPU(X86_64)
    888     move(TrustedImmPtr(prototypeStructure), regT3);
    889     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
    890 #else
    891     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), TrustedImmPtr(prototypeStructure));
    892 #endif
    893 
    894     bool needsStubLink = false;
    895     if (slot.cachedPropertyType() == PropertySlot::Getter) {
    896         needsStubLink = true;
    897         compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
    898         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
    899         stubCall.addArgument(regT1);
    900         stubCall.addArgument(regT0);
    901         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    902         stubCall.call();
    903     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
    904         needsStubLink = true;
    905         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
    906         stubCall.addArgument(TrustedImmPtr(protoObject));
    907         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
    908         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
    909         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    910         stubCall.call();
    911     } else
    912         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
    913 
    914     Jump success = jump();
    915 
    916     LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
    917     if (needsStubLink) {
    918         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
    919             if (iter->to)
    920                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
    921         }
    922     }
    923     // Use the patch information to link the failure cases back to the original slow case routine.
    924     CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
    925     patchBuffer.link(failureCases1, lastProtoBegin);
    926     patchBuffer.link(failureCases2, lastProtoBegin);
    927 
    928     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    929     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
    930 
    931     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
    932 
    933     prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), entryLabel, structure, prototypeStructure);
    934 
    935     // Finally patch the jump to slow case back in the hot path to jump here instead.
    936     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
    937     RepatchBuffer repatchBuffer(m_codeBlock);
    938     repatchBuffer.relink(jumpLocation, entryLabel);
    939 }
    940 
    941 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
    942 {
    943     // regT0 holds a JSCell*
    944     ASSERT(count);
    945 
    946     JumpList bucketsOfFail;
    947 
    948     // Check eax is an object of the right Structure.
    949     bucketsOfFail.append(checkStructure(regT0, structure));
    950 
    951     Structure* currStructure = structure;
    952     WriteBarrier<Structure>* it = chain->head();
    953     JSObject* protoObject = 0;
    954     for (unsigned i = 0; i < count; ++i, ++it) {
    955         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
    956         currStructure = it->get();
    957         testPrototype(protoObject, bucketsOfFail);
    958     }
    959     ASSERT(protoObject);
    960 
    961     bool needsStubLink = false;
    962     if (slot.cachedPropertyType() == PropertySlot::Getter) {
    963         needsStubLink = true;
    964         compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
    965         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
    966         stubCall.addArgument(regT1);
    967         stubCall.addArgument(regT0);
    968         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    969         stubCall.call();
    970     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
    971         needsStubLink = true;
    972         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
    973         stubCall.addArgument(TrustedImmPtr(protoObject));
    974         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
    975         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
    976         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
    977         stubCall.call();
    978     } else
    979         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
    980 
    981     Jump success = jump();
    982 
    983     LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
    984     if (needsStubLink) {
    985         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
    986             if (iter->to)
    987                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
    988         }
    989     }
    990     // Use the patch information to link the failure cases back to the original slow case routine.
    991     CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
    992 
    993     patchBuffer.link(bucketsOfFail, lastProtoBegin);
    994 
    995     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
    996     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
    997 
    998     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
    999 
   1000     // Track the stub we have created so that it will be deleted later.
   1001     prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), entryLabel, structure, chain);
   1002 
   1003     // Finally patch the jump to slow case back in the hot path to jump here instead.
   1004     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
   1005     RepatchBuffer repatchBuffer(m_codeBlock);
   1006     repatchBuffer.relink(jumpLocation, entryLabel);
   1007 }
   1008 
   1009 void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
   1010 {
   1011     // regT0 holds a JSCell*
   1012     ASSERT(count);
   1013 
   1014     JumpList bucketsOfFail;
   1015 
   1016     // Check eax is an object of the right Structure.
   1017     bucketsOfFail.append(checkStructure(regT0, structure));
   1018 
   1019     Structure* currStructure = structure;
   1020     WriteBarrier<Structure>* it = chain->head();
   1021     JSObject* protoObject = 0;
   1022     for (unsigned i = 0; i < count; ++i, ++it) {
   1023         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
   1024         currStructure = it->get();
   1025         testPrototype(protoObject, bucketsOfFail);
   1026     }
   1027     ASSERT(protoObject);
   1028 
   1029     bool needsStubLink = false;
   1030     if (slot.cachedPropertyType() == PropertySlot::Getter) {
   1031         needsStubLink = true;
   1032         compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
   1033         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
   1034         stubCall.addArgument(regT1);
   1035         stubCall.addArgument(regT0);
   1036         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
   1037         stubCall.call();
   1038     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
   1039         needsStubLink = true;
   1040         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
   1041         stubCall.addArgument(TrustedImmPtr(protoObject));
   1042         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
   1043         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
   1044         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
   1045         stubCall.call();
   1046     } else
   1047         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
   1048     Jump success = jump();
   1049 
   1050     LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
   1051     if (needsStubLink) {
   1052         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
   1053             if (iter->to)
   1054                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
   1055         }
   1056     }
   1057     // Use the patch information to link the failure cases back to the original slow case routine.
   1058     patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
   1059 
   1060     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
   1061     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
   1062 
   1063     // Track the stub we have created so that it will be deleted later.
   1064     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
   1065     stubInfo->stubRoutine = entryLabel;
   1066 
   1067     // Finally patch the jump to slow case back in the hot path to jump here instead.
   1068     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
   1069     RepatchBuffer repatchBuffer(m_codeBlock);
   1070     repatchBuffer.relink(jumpLocation, entryLabel);
   1071 
   1072     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
   1073     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
   1074 }
   1075 
   1076 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
   1077 
   1078 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
   1079 
   1080 void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset)
   1081 {
   1082     ASSERT(sizeof(JSValue) == 8);
   1083 
   1084     loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), base);
   1085     loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
   1086     loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
   1087 }
   1088 
   1089 void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
   1090 {
   1091     unsigned dst = currentInstruction[1].u.operand;
   1092     unsigned base = currentInstruction[2].u.operand;
   1093     unsigned property = currentInstruction[3].u.operand;
   1094     unsigned expected = currentInstruction[4].u.operand;
   1095     unsigned iter = currentInstruction[5].u.operand;
   1096     unsigned i = currentInstruction[6].u.operand;
   1097 
   1098     emitLoad2(property, regT1, regT0, base, regT3, regT2);
   1099     emitJumpSlowCaseIfNotJSCell(property, regT1);
   1100     addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
   1101     // Property registers are now available as the property is known
   1102     emitJumpSlowCaseIfNotJSCell(base, regT3);
   1103     emitLoadPayload(iter, regT1);
   1104 
   1105     // Test base's structure
   1106     loadPtr(Address(regT2, JSCell::structureOffset()), regT0);
   1107     addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
   1108     load32(addressFor(i), regT3);
   1109     sub32(TrustedImm32(1), regT3);
   1110     addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
   1111     compileGetDirectOffset(regT2, regT1, regT0, regT3);
   1112 
   1113     emitStore(dst, regT1, regT0);
   1114     map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
   1115 }
   1116 
   1117 void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
   1118 {
   1119     unsigned dst = currentInstruction[1].u.operand;
   1120     unsigned base = currentInstruction[2].u.operand;
   1121     unsigned property = currentInstruction[3].u.operand;
   1122 
   1123     linkSlowCaseIfNotJSCell(iter, property);
   1124     linkSlowCase(iter);
   1125     linkSlowCaseIfNotJSCell(iter, base);
   1126     linkSlowCase(iter);
   1127     linkSlowCase(iter);
   1128 
   1129     JITStubCall stubCall(this, cti_op_get_by_val);
   1130     stubCall.addArgument(base);
   1131     stubCall.addArgument(property);
   1132     stubCall.call(dst);
   1133 }
   1134 
   1135 } // namespace JSC
   1136 
   1137 #endif // USE(JSVALUE32_64)
   1138 #endif // ENABLE(JIT)
   1139