Home | History | Annotate | Download | only in dfg
      1 /*
      2  * Copyright (C) 2011 Apple Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1. Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2. Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  *
     13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
     14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
     17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
     21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     24  */
     25 
     26 #include "config.h"
     27 #include "DFGJITCompiler.h"
     28 
     29 #if ENABLE(DFG_JIT)
     30 
     31 #include "CodeBlock.h"
     32 #include "DFGJITCodeGenerator.h"
     33 #include "DFGNonSpeculativeJIT.h"
     34 #include "DFGOperations.h"
     35 #include "DFGRegisterBank.h"
     36 #include "DFGSpeculativeJIT.h"
     37 #include "JSGlobalData.h"
     38 #include "LinkBuffer.h"
     39 
     40 namespace JSC { namespace DFG {
     41 
     42 // This method used to fill a numeric value to a FPR when linking speculative -> non-speculative.
     43 void JITCompiler::fillNumericToDouble(NodeIndex nodeIndex, FPRReg fpr, GPRReg temporary)
     44 {
     45     Node& node = graph()[nodeIndex];
     46     MacroAssembler::RegisterID tempReg = gprToRegisterID(temporary);
     47 
     48     if (node.isConstant()) {
     49         ASSERT(node.op == DoubleConstant);
     50         move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfDoubleConstant(nodeIndex)))), tempReg);
     51         movePtrToDouble(tempReg, fprToRegisterID(fpr));
     52     } else {
     53         loadPtr(addressFor(node.virtualRegister), tempReg);
     54         Jump isInteger = branchPtr(MacroAssembler::AboveOrEqual, tempReg, tagTypeNumberRegister);
     55         jitAssertIsJSDouble(gpr0);
     56         addPtr(tagTypeNumberRegister, tempReg);
     57         movePtrToDouble(tempReg, fprToRegisterID(fpr));
     58         Jump hasUnboxedDouble = jump();
     59         isInteger.link(this);
     60         convertInt32ToDouble(tempReg, fprToRegisterID(fpr));
     61         hasUnboxedDouble.link(this);
     62     }
     63 }
     64 
     65 // This method used to fill an integer value to a GPR when linking speculative -> non-speculative.
     66 void JITCompiler::fillInt32ToInteger(NodeIndex nodeIndex, GPRReg gpr)
     67 {
     68     Node& node = graph()[nodeIndex];
     69 
     70     if (node.isConstant()) {
     71         ASSERT(node.op == Int32Constant);
     72         move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gprToRegisterID(gpr));
     73     } else {
     74 #if DFG_JIT_ASSERT
     75         // Redundant load, just so we can check the tag!
     76         loadPtr(addressFor(node.virtualRegister), gprToRegisterID(gpr));
     77         jitAssertIsJSInt32(gpr);
     78 #endif
     79         load32(addressFor(node.virtualRegister), gprToRegisterID(gpr));
     80     }
     81 }
     82 
     83 // This method used to fill a JSValue to a GPR when linking speculative -> non-speculative.
     84 void JITCompiler::fillToJS(NodeIndex nodeIndex, GPRReg gpr)
     85 {
     86     Node& node = graph()[nodeIndex];
     87 
     88     if (node.isConstant()) {
     89         if (isInt32Constant(nodeIndex)) {
     90             JSValue jsValue = jsNumber(valueOfInt32Constant(nodeIndex));
     91             move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gprToRegisterID(gpr));
     92         } else if (isDoubleConstant(nodeIndex)) {
     93             JSValue jsValue(JSValue::EncodeAsDouble, valueOfDoubleConstant(nodeIndex));
     94             move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gprToRegisterID(gpr));
     95         } else {
     96             ASSERT(isJSConstant(nodeIndex));
     97             JSValue jsValue = valueOfJSConstant(nodeIndex);
     98             move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gprToRegisterID(gpr));
     99         }
    100         return;
    101     }
    102 
    103     loadPtr(addressFor(node.virtualRegister), gprToRegisterID(gpr));
    104 }
    105 
    106 void JITCompiler::jumpFromSpeculativeToNonSpeculative(const SpeculationCheck& check, const EntryLocation& entry, SpeculationRecovery* recovery)
    107 {
    108     ASSERT(check.m_nodeIndex == entry.m_nodeIndex);
    109 
    110     // Link the jump from the Speculative path to here.
    111     check.m_check.link(this);
    112 
    113     // Does this speculation check require any additional recovery to be performed,
    114     // to restore any state that has been overwritten before we enter back in to the
    115     // non-speculative path.
    116     if (recovery) {
    117         // The only additional recovery we currently support is for integer add operation
    118         ASSERT(recovery->type() == SpeculativeAdd);
    119         // Revert the add.
    120         sub32(gprToRegisterID(recovery->src()), gprToRegisterID(recovery->dest()));
    121     }
    122 
    123     // FIXME: - This is hideously inefficient!
    124     // Where a value is live in a register in the speculative path, and is required in a register
    125     // on the non-speculative path, we should not need to be spilling it and reloading (we may
    126     // need to spill anyway, if the value is marked as spilled on the non-speculative path).
    127     // This may also be spilling values that don't need spilling, e.g. are already spilled,
    128     // are constants, or are arguments.
    129 
    130     // Spill all GPRs in use by the speculative path.
    131     for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) {
    132         NodeIndex nodeIndex = check.m_gprInfo[gpr].nodeIndex;
    133         if (nodeIndex == NoNode)
    134             continue;
    135 
    136         DataFormat dataFormat = check.m_gprInfo[gpr].format;
    137         VirtualRegister virtualRegister = graph()[nodeIndex].virtualRegister;
    138 
    139         ASSERT(dataFormat == DataFormatInteger || DataFormatCell || dataFormat & DataFormatJS);
    140         if (dataFormat == DataFormatInteger)
    141             orPtr(tagTypeNumberRegister, gprToRegisterID(gpr));
    142         storePtr(gprToRegisterID(gpr), addressFor(virtualRegister));
    143     }
    144 
    145     // Spill all FPRs in use by the speculative path.
    146     for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) {
    147         NodeIndex nodeIndex = check.m_fprInfo[fpr];
    148         if (nodeIndex == NoNode)
    149             continue;
    150 
    151         VirtualRegister virtualRegister = graph()[nodeIndex].virtualRegister;
    152 
    153         moveDoubleToPtr(fprToRegisterID(fpr), regT0);
    154         subPtr(tagTypeNumberRegister, regT0);
    155         storePtr(regT0, addressFor(virtualRegister));
    156     }
    157 
    158     // Fill all FPRs in use by the non-speculative path.
    159     for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) {
    160         NodeIndex nodeIndex = entry.m_fprInfo[fpr];
    161         if (nodeIndex == NoNode)
    162             continue;
    163 
    164         fillNumericToDouble(nodeIndex, fpr, gpr0);
    165     }
    166 
    167     // Fill all GPRs in use by the non-speculative path.
    168     for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) {
    169         NodeIndex nodeIndex = entry.m_gprInfo[gpr].nodeIndex;
    170         if (nodeIndex == NoNode)
    171             continue;
    172 
    173         DataFormat dataFormat = entry.m_gprInfo[gpr].format;
    174         if (dataFormat == DataFormatInteger)
    175             fillInt32ToInteger(nodeIndex, gpr);
    176         else {
    177             ASSERT(dataFormat & DataFormatJS || dataFormat == DataFormatCell); // Treat cell as JSValue for now!
    178             fillToJS(nodeIndex, gpr);
    179             // FIXME: For subtypes of DataFormatJS, should jitAssert the subtype?
    180         }
    181     }
    182 
    183     // Jump into the non-speculative path.
    184     jump(entry.m_entry);
    185 }
    186 
    187 void JITCompiler::linkSpeculationChecks(SpeculativeJIT& speculative, NonSpeculativeJIT& nonSpeculative)
    188 {
    189     // Iterators to walk over the set of bail outs & corresponding entry points.
    190     SpeculationCheckVector::Iterator checksIter = speculative.speculationChecks().begin();
    191     SpeculationCheckVector::Iterator checksEnd = speculative.speculationChecks().end();
    192     NonSpeculativeJIT::EntryLocationVector::Iterator entriesIter = nonSpeculative.entryLocations().begin();
    193     NonSpeculativeJIT::EntryLocationVector::Iterator entriesEnd = nonSpeculative.entryLocations().end();
    194 
    195     // Iterate over the speculation checks.
    196     while (checksIter != checksEnd) {
    197         // For every bail out from the speculative path, we must have provided an entry point
    198         // into the non-speculative one.
    199         ASSERT(checksIter->m_nodeIndex == entriesIter->m_nodeIndex);
    200 
    201         // There may be multiple bail outs that map to the same entry point!
    202         do {
    203             ASSERT(checksIter != checksEnd);
    204             ASSERT(entriesIter != entriesEnd);
    205 
    206             // Plant code to link this speculation failure.
    207             const SpeculationCheck& check = *checksIter;
    208             const EntryLocation& entry = *entriesIter;
    209             jumpFromSpeculativeToNonSpeculative(check, entry, speculative.speculationRecovery(check.m_recoveryIndex));
    210              ++checksIter;
    211         } while (checksIter != checksEnd && checksIter->m_nodeIndex == entriesIter->m_nodeIndex);
    212          ++entriesIter;
    213     }
    214 
    215     // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56289
    216     ASSERT(!(checksIter != checksEnd));
    217     ASSERT(!(entriesIter != entriesEnd));
    218 }
    219 
    220 void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
    221 {
    222     // === Stage 1 - Function header code generation ===
    223     //
    224     // This code currently matches the old JIT. In the function header we need to
    225     // pop the return address (since we do not allow any recursion on the machine
    226     // stack), and perform a fast register file check.
    227 
    228     // This is the main entry point, without performing an arity check.
    229     // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
    230     // We'll need to convert the remaining cti_ style calls (specifically the register file
    231     // check) which will be dependent on stack layout. (We'd need to account for this in
    232     // both normal return code and when jumping to an exception handler).
    233     preserveReturnAddressAfterCall(regT2);
    234     emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
    235     // If we needed to perform an arity check we will already have moved the return address,
    236     // so enter after this.
    237     Label fromArityCheck(this);
    238 
    239     // Setup a pointer to the codeblock in the CallFrameHeader.
    240     emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
    241 
    242     // Plant a check that sufficient space is available in the RegisterFile.
    243     // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
    244     addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
    245     Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1);
    246     // Return here after register file check.
    247     Label fromRegisterFileCheck = label();
    248 
    249 
    250     // === Stage 2 - Function body code generation ===
    251     //
    252     // We generate the speculative code path, followed by the non-speculative
    253     // code for the function. Next we need to link the two together, making
    254     // bail-outs from the speculative path jump to the corresponding point on
    255     // the non-speculative one (and generating any code necessary to juggle
    256     // register values around, rebox values, and ensure spilled, to match the
    257     // non-speculative path's requirements).
    258 
    259 #if DFG_JIT_BREAK_ON_EVERY_FUNCTION
    260     // Handy debug tool!
    261     breakpoint();
    262 #endif
    263 
    264     // First generate the speculative path.
    265     Label speculativePathBegin = label();
    266     SpeculativeJIT speculative(*this);
    267     bool compiledSpeculative = speculative.compile();
    268 
    269     // Next, generate the non-speculative path. We pass this a SpeculationCheckIndexIterator
    270     // to allow it to check which nodes in the graph may bail out, and may need to reenter the
    271     // non-speculative path.
    272     if (compiledSpeculative) {
    273         SpeculationCheckIndexIterator checkIterator(speculative.speculationChecks());
    274         NonSpeculativeJIT nonSpeculative(*this);
    275         nonSpeculative.compile(checkIterator);
    276 
    277         // Link the bail-outs from the speculative path to the corresponding entry points into the non-speculative one.
    278         linkSpeculationChecks(speculative, nonSpeculative);
    279     } else {
    280         // If compilation through the SpeculativeJIT failed, throw away the code we generated.
    281         m_calls.clear();
    282         rewindToLabel(speculativePathBegin);
    283 
    284         SpeculationCheckVector noChecks;
    285         SpeculationCheckIndexIterator checkIterator(noChecks);
    286         NonSpeculativeJIT nonSpeculative(*this);
    287         nonSpeculative.compile(checkIterator);
    288     }
    289 
    290     // === Stage 3 - Function footer code generation ===
    291     //
    292     // Generate code to lookup and jump to exception handlers, to perform the slow
    293     // register file check (if the fast one in the function header fails), and
    294     // generate the entry point with arity check.
    295 
    296     // Iterate over the m_calls vector, checking for exception checks,
    297     // and linking them to here.
    298     unsigned exceptionCheckCount = 0;
    299     for (unsigned i = 0; i < m_calls.size(); ++i) {
    300         Jump& exceptionCheck = m_calls[i].m_exceptionCheck;
    301         if (exceptionCheck.isSet()) {
    302             exceptionCheck.link(this);
    303             ++exceptionCheckCount;
    304         }
    305     }
    306     // If any exception checks were linked, generate code to lookup a handler.
    307     if (exceptionCheckCount) {
    308         // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and
    309         // an identifier for the operation that threw the exception, which we can use
    310         // to look up handler information. The identifier we use is the return address
    311         // of the call out from JIT code that threw the exception; this is still
    312         // available on the stack, just below the stack pointer!
    313         move(callFrameRegister, argumentRegister0);
    314         peek(argumentRegister1, -1);
    315         m_calls.append(CallRecord(call(), lookupExceptionHandler));
    316         // lookupExceptionHandler leaves the handler CallFrame* in the returnValueRegister,
    317         // and the address of the handler in returnValueRegister2.
    318         jump(returnValueRegister2);
    319     }
    320 
    321     // Generate the register file check; if the fast check in the function head fails,
    322     // we need to call out to a helper function to check whether more space is available.
    323     // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    324     registerFileCheck.link(this);
    325     move(stackPointerRegister, argumentRegister0);
    326     poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
    327     Call callRegisterFileCheck = call();
    328     jump(fromRegisterFileCheck);
    329 
    330     // The fast entry point into a function does not check the correct number of arguments
    331     // have been passed to the call (we only use the fast entry point where we can statically
    332     // determine the correct number of arguments have been passed, or have already checked).
    333     // In cases where an arity check is necessary, we enter here.
    334     // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
    335     Label arityCheck = label();
    336     preserveReturnAddressAfterCall(regT2);
    337     emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
    338     branch32(Equal, regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(fromArityCheck, this);
    339     move(stackPointerRegister, argumentRegister0);
    340     poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
    341     Call callArityCheck = call();
    342     move(regT0, callFrameRegister);
    343     jump(fromArityCheck);
    344 
    345 
    346     // === Stage 4 - Link ===
    347     //
    348     // Link the code, populate data in CodeBlock data structures.
    349 
    350     LinkBuffer linkBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()), 0);
    351 
    352 #if DFG_DEBUG_VERBOSE
    353     fprintf(stderr, "JIT code start at %p\n", linkBuffer.debugAddress());
    354 #endif
    355 
    356     // Link all calls out from the JIT code to their respective functions.
    357     for (unsigned i = 0; i < m_calls.size(); ++i)
    358         linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
    359 
    360     if (m_codeBlock->needsCallReturnIndices()) {
    361         m_codeBlock->callReturnIndexVector().reserveCapacity(exceptionCheckCount);
    362         for (unsigned i = 0; i < m_calls.size(); ++i) {
    363             if (m_calls[i].m_exceptionCheck.isSet()) {
    364                 unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_calls[i].m_call);
    365                 unsigned exceptionInfo = m_calls[i].m_exceptionInfo;
    366                 m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo));
    367             }
    368         }
    369     }
    370 
    371     // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs.
    372     linkBuffer.link(callRegisterFileCheck, cti_register_file_check);
    373     linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);
    374 
    375     entryWithArityCheck = linkBuffer.locationOf(arityCheck);
    376     entry = linkBuffer.finalizeCode();
    377 }
    378 
    379 #if DFG_JIT_ASSERT
    380 void JITCompiler::jitAssertIsInt32(GPRReg gpr)
    381 {
    382 #if CPU(X86_64)
    383     Jump checkInt32 = branchPtr(BelowOrEqual, gprToRegisterID(gpr), TrustedImmPtr(reinterpret_cast<void*>(static_cast<uintptr_t>(0xFFFFFFFFu))));
    384     breakpoint();
    385     checkInt32.link(this);
    386 #else
    387     UNUSED_PARAM(gpr);
    388 #endif
    389 }
    390 
    391 void JITCompiler::jitAssertIsJSInt32(GPRReg gpr)
    392 {
    393     Jump checkJSInt32 = branchPtr(AboveOrEqual, gprToRegisterID(gpr), tagTypeNumberRegister);
    394     breakpoint();
    395     checkJSInt32.link(this);
    396 }
    397 
    398 void JITCompiler::jitAssertIsJSNumber(GPRReg gpr)
    399 {
    400     Jump checkJSNumber = branchTestPtr(MacroAssembler::NonZero, gprToRegisterID(gpr), tagTypeNumberRegister);
    401     breakpoint();
    402     checkJSNumber.link(this);
    403 }
    404 
    405 void JITCompiler::jitAssertIsJSDouble(GPRReg gpr)
    406 {
    407     Jump checkJSInt32 = branchPtr(AboveOrEqual, gprToRegisterID(gpr), tagTypeNumberRegister);
    408     Jump checkJSNumber = branchTestPtr(MacroAssembler::NonZero, gprToRegisterID(gpr), tagTypeNumberRegister);
    409     checkJSInt32.link(this);
    410     breakpoint();
    411     checkJSNumber.link(this);
    412 }
    413 #endif
    414 
    415 #if ENABLE(SAMPLING_COUNTERS) && CPU(X86_64) // Or any other 64-bit platform!
    416 void JITCompiler::emitCount(AbstractSamplingCounter& counter, uint32_t increment)
    417 {
    418     addPtr(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
    419 }
    420 #endif
    421 
    422 #if ENABLE(SAMPLING_COUNTERS) && CPU(X86) // Or any other little-endian 32-bit platform!
    423 void JITCompiler::emitCount(AbstractSamplingCounter& counter, uint32_t increment)
    424 {
    425     intptr_t hiWord = reinterpret_cast<intptr_t>(counter.addressOfCounter()) + sizeof(int32_t);
    426     add32(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
    427     addWithCarry32(TrustedImm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
    428 }
    429 #endif
    430 
    431 } } // namespace JSC::DFG
    432 
    433 #endif
    434