Home | History | Annotate | Download | only in src
      1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
      2 // All Rights Reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions are
      6 // met:
      7 //
      8 // - Redistributions of source code must retain the above copyright notice,
      9 // this list of conditions and the following disclaimer.
     10 //
     11 // - Redistribution in binary form must reproduce the above copyright
     12 // notice, this list of conditions and the following disclaimer in the
     13 // documentation and/or other materials provided with the distribution.
     14 //
     15 // - Neither the name of Sun Microsystems or the names of contributors may
     16 // be used to endorse or promote products derived from this software without
     17 // specific prior written permission.
     18 //
     19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
     20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
     21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
     23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
     27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
     28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
     29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30 
     31 // The original source code covered by the above license above has been
     32 // modified significantly by Google Inc.
     33 // Copyright 2012 the V8 project authors. All rights reserved.
     34 
     35 #include "src/assembler.h"
     36 
     37 #include "src/assembler-inl.h"
     38 #include "src/code-stubs.h"
     39 #include "src/deoptimizer.h"
     40 #include "src/disassembler.h"
     41 #include "src/instruction-stream.h"
     42 #include "src/isolate.h"
     43 #include "src/ostreams.h"
     44 #include "src/simulator.h"  // For flushing instruction cache.
     45 #include "src/snapshot/serializer-common.h"
     46 #include "src/snapshot/snapshot.h"
     47 
     48 namespace v8 {
     49 namespace internal {
     50 
     51 AssemblerOptions AssemblerOptions::Default(
     52     Isolate* isolate, bool explicitly_support_serialization) {
     53   AssemblerOptions options;
     54   bool serializer =
     55       isolate->serializer_enabled() || explicitly_support_serialization;
     56   options.record_reloc_info_for_serialization = serializer;
     57   options.enable_root_array_delta_access = !serializer;
     58 #ifdef USE_SIMULATOR
     59   // Don't generate simulator specific code if we are building a snapshot, which
     60   // might be run on real hardware.
     61   options.enable_simulator_code = !serializer;
     62 #endif
     63   options.inline_offheap_trampolines = !serializer;
     64 #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
     65   options.code_range_start =
     66       isolate->heap()->memory_allocator()->code_range()->start();
     67 #endif
     68   return options;
     69 }
     70 
     71 // -----------------------------------------------------------------------------
     72 // Implementation of AssemblerBase
     73 
     74 AssemblerBase::AssemblerBase(const AssemblerOptions& options, void* buffer,
     75                              int buffer_size)
     76     : options_(options),
     77       enabled_cpu_features_(0),
     78       emit_debug_code_(FLAG_debug_code),
     79       predictable_code_size_(false),
     80       constant_pool_available_(false),
     81       jump_optimization_info_(nullptr) {
     82   own_buffer_ = buffer == nullptr;
     83   if (buffer_size == 0) buffer_size = kMinimalBufferSize;
     84   DCHECK_GT(buffer_size, 0);
     85   if (own_buffer_) buffer = NewArray<byte>(buffer_size);
     86   buffer_ = static_cast<byte*>(buffer);
     87   buffer_size_ = buffer_size;
     88   pc_ = buffer_;
     89 }
     90 
     91 AssemblerBase::~AssemblerBase() {
     92   if (own_buffer_) DeleteArray(buffer_);
     93 }
     94 
     95 void AssemblerBase::FlushICache(void* start, size_t size) {
     96   if (size == 0) return;
     97 
     98 #if defined(USE_SIMULATOR)
     99   base::LockGuard<base::Mutex> lock_guard(Simulator::i_cache_mutex());
    100   Simulator::FlushICache(Simulator::i_cache(), start, size);
    101 #else
    102   CpuFeatures::FlushICache(start, size);
    103 #endif  // USE_SIMULATOR
    104 }
    105 
    106 void AssemblerBase::Print(Isolate* isolate) {
    107   StdoutStream os;
    108   v8::internal::Disassembler::Decode(isolate, &os, buffer_, pc_);
    109 }
    110 
    111 // -----------------------------------------------------------------------------
    112 // Implementation of PredictableCodeSizeScope
    113 
    114 PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
    115                                                    int expected_size)
    116     : assembler_(assembler),
    117       expected_size_(expected_size),
    118       start_offset_(assembler->pc_offset()),
    119       old_value_(assembler->predictable_code_size()) {
    120   assembler_->set_predictable_code_size(true);
    121 }
    122 
    123 PredictableCodeSizeScope::~PredictableCodeSizeScope() {
    124   CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
    125   assembler_->set_predictable_code_size(old_value_);
    126 }
    127 
    128 // -----------------------------------------------------------------------------
    129 // Implementation of CpuFeatureScope
    130 
    131 #ifdef DEBUG
    132 CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
    133                                  CheckPolicy check)
    134     : assembler_(assembler) {
    135   DCHECK_IMPLIES(check == kCheckSupported, CpuFeatures::IsSupported(f));
    136   old_enabled_ = assembler_->enabled_cpu_features();
    137   assembler_->EnableCpuFeature(f);
    138 }
    139 
    140 CpuFeatureScope::~CpuFeatureScope() {
    141   assembler_->set_enabled_cpu_features(old_enabled_);
    142 }
    143 #endif
    144 
    145 bool CpuFeatures::initialized_ = false;
    146 unsigned CpuFeatures::supported_ = 0;
    147 unsigned CpuFeatures::icache_line_size_ = 0;
    148 unsigned CpuFeatures::dcache_line_size_ = 0;
    149 
    150 ConstantPoolBuilder::ConstantPoolBuilder(int ptr_reach_bits,
    151                                          int double_reach_bits) {
    152   info_[ConstantPoolEntry::INTPTR].entries.reserve(64);
    153   info_[ConstantPoolEntry::INTPTR].regular_reach_bits = ptr_reach_bits;
    154   info_[ConstantPoolEntry::DOUBLE].regular_reach_bits = double_reach_bits;
    155 }
    156 
    157 ConstantPoolEntry::Access ConstantPoolBuilder::NextAccess(
    158     ConstantPoolEntry::Type type) const {
    159   const PerTypeEntryInfo& info = info_[type];
    160 
    161   if (info.overflow()) return ConstantPoolEntry::OVERFLOWED;
    162 
    163   int dbl_count = info_[ConstantPoolEntry::DOUBLE].regular_count;
    164   int dbl_offset = dbl_count * kDoubleSize;
    165   int ptr_count = info_[ConstantPoolEntry::INTPTR].regular_count;
    166   int ptr_offset = ptr_count * kPointerSize + dbl_offset;
    167 
    168   if (type == ConstantPoolEntry::DOUBLE) {
    169     // Double overflow detection must take into account the reach for both types
    170     int ptr_reach_bits = info_[ConstantPoolEntry::INTPTR].regular_reach_bits;
    171     if (!is_uintn(dbl_offset, info.regular_reach_bits) ||
    172         (ptr_count > 0 &&
    173          !is_uintn(ptr_offset + kDoubleSize - kPointerSize, ptr_reach_bits))) {
    174       return ConstantPoolEntry::OVERFLOWED;
    175     }
    176   } else {
    177     DCHECK(type == ConstantPoolEntry::INTPTR);
    178     if (!is_uintn(ptr_offset, info.regular_reach_bits)) {
    179       return ConstantPoolEntry::OVERFLOWED;
    180     }
    181   }
    182 
    183   return ConstantPoolEntry::REGULAR;
    184 }
    185 
    186 ConstantPoolEntry::Access ConstantPoolBuilder::AddEntry(
    187     ConstantPoolEntry& entry, ConstantPoolEntry::Type type) {
    188   DCHECK(!emitted_label_.is_bound());
    189   PerTypeEntryInfo& info = info_[type];
    190   const int entry_size = ConstantPoolEntry::size(type);
    191   bool merged = false;
    192 
    193   if (entry.sharing_ok()) {
    194     // Try to merge entries
    195     std::vector<ConstantPoolEntry>::iterator it = info.shared_entries.begin();
    196     int end = static_cast<int>(info.shared_entries.size());
    197     for (int i = 0; i < end; i++, it++) {
    198       if ((entry_size == kPointerSize) ? entry.value() == it->value()
    199                                        : entry.value64() == it->value64()) {
    200         // Merge with found entry.
    201         entry.set_merged_index(i);
    202         merged = true;
    203         break;
    204       }
    205     }
    206   }
    207 
    208   // By definition, merged entries have regular access.
    209   DCHECK(!merged || entry.merged_index() < info.regular_count);
    210   ConstantPoolEntry::Access access =
    211       (merged ? ConstantPoolEntry::REGULAR : NextAccess(type));
    212 
    213   // Enforce an upper bound on search time by limiting the search to
    214   // unique sharable entries which fit in the regular section.
    215   if (entry.sharing_ok() && !merged && access == ConstantPoolEntry::REGULAR) {
    216     info.shared_entries.push_back(entry);
    217   } else {
    218     info.entries.push_back(entry);
    219   }
    220 
    221   // We're done if we found a match or have already triggered the
    222   // overflow state.
    223   if (merged || info.overflow()) return access;
    224 
    225   if (access == ConstantPoolEntry::REGULAR) {
    226     info.regular_count++;
    227   } else {
    228     info.overflow_start = static_cast<int>(info.entries.size()) - 1;
    229   }
    230 
    231   return access;
    232 }
    233 
    234 void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
    235                                             ConstantPoolEntry::Type type) {
    236   PerTypeEntryInfo& info = info_[type];
    237   std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
    238   const int entry_size = ConstantPoolEntry::size(type);
    239   int base = emitted_label_.pos();
    240   DCHECK_GT(base, 0);
    241   int shared_end = static_cast<int>(shared_entries.size());
    242   std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
    243   for (int i = 0; i < shared_end; i++, shared_it++) {
    244     int offset = assm->pc_offset() - base;
    245     shared_it->set_offset(offset);  // Save offset for merged entries.
    246     if (entry_size == kPointerSize) {
    247       assm->dp(shared_it->value());
    248     } else {
    249       assm->dq(shared_it->value64());
    250     }
    251     DCHECK(is_uintn(offset, info.regular_reach_bits));
    252 
    253     // Patch load sequence with correct offset.
    254     assm->PatchConstantPoolAccessInstruction(shared_it->position(), offset,
    255                                              ConstantPoolEntry::REGULAR, type);
    256   }
    257 }
    258 
    259 void ConstantPoolBuilder::EmitGroup(Assembler* assm,
    260                                     ConstantPoolEntry::Access access,
    261                                     ConstantPoolEntry::Type type) {
    262   PerTypeEntryInfo& info = info_[type];
    263   const bool overflow = info.overflow();
    264   std::vector<ConstantPoolEntry>& entries = info.entries;
    265   std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
    266   const int entry_size = ConstantPoolEntry::size(type);
    267   int base = emitted_label_.pos();
    268   DCHECK_GT(base, 0);
    269   int begin;
    270   int end;
    271 
    272   if (access == ConstantPoolEntry::REGULAR) {
    273     // Emit any shared entries first
    274     EmitSharedEntries(assm, type);
    275   }
    276 
    277   if (access == ConstantPoolEntry::REGULAR) {
    278     begin = 0;
    279     end = overflow ? info.overflow_start : static_cast<int>(entries.size());
    280   } else {
    281     DCHECK(access == ConstantPoolEntry::OVERFLOWED);
    282     if (!overflow) return;
    283     begin = info.overflow_start;
    284     end = static_cast<int>(entries.size());
    285   }
    286 
    287   std::vector<ConstantPoolEntry>::iterator it = entries.begin();
    288   if (begin > 0) std::advance(it, begin);
    289   for (int i = begin; i < end; i++, it++) {
    290     // Update constant pool if necessary and get the entry's offset.
    291     int offset;
    292     ConstantPoolEntry::Access entry_access;
    293     if (!it->is_merged()) {
    294       // Emit new entry
    295       offset = assm->pc_offset() - base;
    296       entry_access = access;
    297       if (entry_size == kPointerSize) {
    298         assm->dp(it->value());
    299       } else {
    300         assm->dq(it->value64());
    301       }
    302     } else {
    303       // Retrieve offset from shared entry.
    304       offset = shared_entries[it->merged_index()].offset();
    305       entry_access = ConstantPoolEntry::REGULAR;
    306     }
    307 
    308     DCHECK(entry_access == ConstantPoolEntry::OVERFLOWED ||
    309            is_uintn(offset, info.regular_reach_bits));
    310 
    311     // Patch load sequence with correct offset.
    312     assm->PatchConstantPoolAccessInstruction(it->position(), offset,
    313                                              entry_access, type);
    314   }
    315 }
    316 
    317 // Emit and return position of pool.  Zero implies no constant pool.
    318 int ConstantPoolBuilder::Emit(Assembler* assm) {
    319   bool emitted = emitted_label_.is_bound();
    320   bool empty = IsEmpty();
    321 
    322   if (!emitted) {
    323     // Mark start of constant pool.  Align if necessary.
    324     if (!empty) assm->DataAlign(kDoubleSize);
    325     assm->bind(&emitted_label_);
    326     if (!empty) {
    327       // Emit in groups based on access and type.
    328       // Emit doubles first for alignment purposes.
    329       EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::DOUBLE);
    330       EmitGroup(assm, ConstantPoolEntry::REGULAR, ConstantPoolEntry::INTPTR);
    331       if (info_[ConstantPoolEntry::DOUBLE].overflow()) {
    332         assm->DataAlign(kDoubleSize);
    333         EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
    334                   ConstantPoolEntry::DOUBLE);
    335       }
    336       if (info_[ConstantPoolEntry::INTPTR].overflow()) {
    337         EmitGroup(assm, ConstantPoolEntry::OVERFLOWED,
    338                   ConstantPoolEntry::INTPTR);
    339       }
    340     }
    341   }
    342 
    343   return !empty ? emitted_label_.pos() : 0;
    344 }
    345 
    346 HeapObjectRequest::HeapObjectRequest(double heap_number, int offset)
    347     : kind_(kHeapNumber), offset_(offset) {
    348   value_.heap_number = heap_number;
    349   DCHECK(!IsSmiDouble(value_.heap_number));
    350 }
    351 
    352 HeapObjectRequest::HeapObjectRequest(CodeStub* code_stub, int offset)
    353     : kind_(kCodeStub), offset_(offset) {
    354   value_.code_stub = code_stub;
    355   DCHECK_NOT_NULL(value_.code_stub);
    356 }
    357 
    358 // Platform specific but identical code for all the platforms.
    359 
    360 void Assembler::RecordDeoptReason(DeoptimizeReason reason,
    361                                   SourcePosition position, int id) {
    362   EnsureSpace ensure_space(this);
    363   RecordRelocInfo(RelocInfo::DEOPT_SCRIPT_OFFSET, position.ScriptOffset());
    364   RecordRelocInfo(RelocInfo::DEOPT_INLINING_ID, position.InliningId());
    365   RecordRelocInfo(RelocInfo::DEOPT_REASON, static_cast<int>(reason));
    366   RecordRelocInfo(RelocInfo::DEOPT_ID, id);
    367 }
    368 
    369 void Assembler::RecordComment(const char* msg) {
    370   if (FLAG_code_comments) {
    371     EnsureSpace ensure_space(this);
    372     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
    373   }
    374 }
    375 
    376 void Assembler::DataAlign(int m) {
    377   DCHECK(m >= 2 && base::bits::IsPowerOfTwo(m));
    378   while ((pc_offset() & (m - 1)) != 0) {
    379     db(0);
    380   }
    381 }
    382 
    383 void AssemblerBase::RequestHeapObject(HeapObjectRequest request) {
    384   request.set_offset(pc_offset());
    385   heap_object_requests_.push_front(request);
    386 }
    387 
    388 int AssemblerBase::AddCodeTarget(Handle<Code> target) {
    389   int current = static_cast<int>(code_targets_.size());
    390   if (current > 0 && !target.is_null() &&
    391       code_targets_.back().address() == target.address()) {
    392     // Optimization if we keep jumping to the same code target.
    393     return current - 1;
    394   } else {
    395     code_targets_.push_back(target);
    396     return current;
    397   }
    398 }
    399 
    400 Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
    401   DCHECK_LE(0, code_target_index);
    402   DCHECK_LT(code_target_index, code_targets_.size());
    403   return code_targets_[code_target_index];
    404 }
    405 
    406 void AssemblerBase::UpdateCodeTarget(intptr_t code_target_index,
    407                                      Handle<Code> code) {
    408   DCHECK_LE(0, code_target_index);
    409   DCHECK_LT(code_target_index, code_targets_.size());
    410   code_targets_[code_target_index] = code;
    411 }
    412 
    413 }  // namespace internal
    414 }  // namespace v8
    415