Home | History | Annotate | Download | only in wasm
      1 // Copyright 2017 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/wasm/wasm-code-manager.h"
      6 
      7 #include <iomanip>
      8 
      9 #include "src/assembler-inl.h"
     10 #include "src/base/macros.h"
     11 #include "src/base/platform/platform.h"
     12 #include "src/codegen.h"
     13 #include "src/disassembler.h"
     14 #include "src/globals.h"
     15 #include "src/macro-assembler-inl.h"
     16 #include "src/macro-assembler.h"
     17 #include "src/objects-inl.h"
     18 #include "src/wasm/function-compiler.h"
     19 #include "src/wasm/jump-table-assembler.h"
     20 #include "src/wasm/wasm-module.h"
     21 #include "src/wasm/wasm-objects-inl.h"
     22 #include "src/wasm/wasm-objects.h"
     23 
     24 #define TRACE_HEAP(...)                                   \
     25   do {                                                    \
     26     if (FLAG_wasm_trace_native_heap) PrintF(__VA_ARGS__); \
     27   } while (false)
     28 
     29 namespace v8 {
     30 namespace internal {
     31 namespace wasm {
     32 
     33 namespace {
     34 
     35 // Binary predicate to perform lookups in {NativeModule::owned_code_} with a
     36 // given address into a code object. Use with {std::upper_bound} for example.
     37 struct WasmCodeUniquePtrComparator {
     38   bool operator()(Address pc, const std::unique_ptr<WasmCode>& code) const {
     39     DCHECK_NE(kNullAddress, pc);
     40     DCHECK_NOT_NULL(code);
     41     return pc < code->instruction_start();
     42   }
     43 };
     44 
     45 }  // namespace
     46 
     47 void DisjointAllocationPool::Merge(AddressRange range) {
     48   auto dest_it = ranges_.begin();
     49   auto dest_end = ranges_.end();
     50 
     51   // Skip over dest ranges strictly before {range}.
     52   while (dest_it != dest_end && dest_it->end < range.start) ++dest_it;
     53 
     54   // After last dest range: insert and done.
     55   if (dest_it == dest_end) {
     56     ranges_.push_back(range);
     57     return;
     58   }
     59 
     60   // Adjacent (from below) to dest: merge and done.
     61   if (dest_it->start == range.end) {
     62     dest_it->start = range.start;
     63     return;
     64   }
     65 
     66   // Before dest: insert and done.
     67   if (dest_it->start > range.end) {
     68     ranges_.insert(dest_it, range);
     69     return;
     70   }
     71 
     72   // Src is adjacent from above. Merge and check whether the merged range is now
     73   // adjacent to the next range.
     74   DCHECK_EQ(dest_it->end, range.start);
     75   dest_it->end = range.end;
     76   auto next_dest = dest_it;
     77   ++next_dest;
     78   if (next_dest != dest_end && dest_it->end == next_dest->start) {
     79     dest_it->end = next_dest->end;
     80     ranges_.erase(next_dest);
     81   }
     82 }
     83 
     84 AddressRange DisjointAllocationPool::Allocate(size_t size) {
     85   for (auto it = ranges_.begin(), end = ranges_.end(); it != end; ++it) {
     86     size_t range_size = it->size();
     87     if (size > range_size) continue;
     88     AddressRange ret{it->start, it->start + size};
     89     if (size == range_size) {
     90       ranges_.erase(it);
     91     } else {
     92       it->start += size;
     93       DCHECK_LT(it->start, it->end);
     94     }
     95     return ret;
     96   }
     97   return {};
     98 }
     99 
    100 Address WasmCode::constant_pool() const {
    101   if (FLAG_enable_embedded_constant_pool) {
    102     if (constant_pool_offset_ < instructions().size()) {
    103       return instruction_start() + constant_pool_offset_;
    104     }
    105   }
    106   return kNullAddress;
    107 }
    108 
    109 size_t WasmCode::trap_handler_index() const {
    110   CHECK(HasTrapHandlerIndex());
    111   return static_cast<size_t>(trap_handler_index_);
    112 }
    113 
    114 void WasmCode::set_trap_handler_index(size_t value) {
    115   trap_handler_index_ = value;
    116 }
    117 
    118 void WasmCode::RegisterTrapHandlerData() {
    119   DCHECK(!HasTrapHandlerIndex());
    120   if (kind() != WasmCode::kFunction) return;
    121 
    122   Address base = instruction_start();
    123 
    124   size_t size = instructions().size();
    125   const int index =
    126       RegisterHandlerData(base, size, protected_instructions().size(),
    127                           protected_instructions().start());
    128 
    129   // TODO(eholk): if index is negative, fail.
    130   CHECK_LE(0, index);
    131   set_trap_handler_index(static_cast<size_t>(index));
    132 }
    133 
    134 bool WasmCode::HasTrapHandlerIndex() const { return trap_handler_index_ >= 0; }
    135 
    136 bool WasmCode::ShouldBeLogged(Isolate* isolate) {
    137   return isolate->logger()->is_listening_to_code_events() ||
    138          isolate->is_profiling();
    139 }
    140 
    141 void WasmCode::LogCode(Isolate* isolate) const {
    142   DCHECK(ShouldBeLogged(isolate));
    143   if (IsAnonymous()) return;
    144   ModuleWireBytes wire_bytes(native_module()->wire_bytes());
    145   // TODO(herhut): Allow to log code without on-heap round-trip of the name.
    146   ModuleEnv* module_env = GetModuleEnv(native_module()->compilation_state());
    147   WireBytesRef name_ref =
    148       module_env->module->LookupFunctionName(wire_bytes, index());
    149   WasmName name_vec = wire_bytes.GetName(name_ref);
    150   MaybeHandle<String> maybe_name =
    151       isolate->factory()->NewStringFromUtf8(Vector<const char>::cast(name_vec));
    152   Handle<String> name;
    153   if (!maybe_name.ToHandle(&name)) {
    154     name = isolate->factory()->NewStringFromAsciiChecked("<name too long>");
    155   }
    156   int name_length;
    157   auto cname =
    158       name->ToCString(AllowNullsFlag::DISALLOW_NULLS,
    159                       RobustnessFlag::ROBUST_STRING_TRAVERSAL, &name_length);
    160   PROFILE(isolate,
    161           CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this,
    162                           {cname.get(), static_cast<size_t>(name_length)}));
    163   if (!source_positions().is_empty()) {
    164     LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
    165                                                        source_positions()));
    166   }
    167 }
    168 
    169 void WasmCode::Validate() const {
    170 #ifdef DEBUG
    171   // We expect certain relocation info modes to never appear in {WasmCode}
    172   // objects or to be restricted to a small set of valid values. Hence the
    173   // iteration below does not use a mask, but visits all relocation data.
    174   for (RelocIterator it(instructions(), reloc_info(), constant_pool());
    175        !it.done(); it.next()) {
    176     RelocInfo::Mode mode = it.rinfo()->rmode();
    177     switch (mode) {
    178       case RelocInfo::WASM_CALL: {
    179         Address target = it.rinfo()->wasm_call_address();
    180         WasmCode* code = native_module_->Lookup(target);
    181         CHECK_NOT_NULL(code);
    182         CHECK_EQ(WasmCode::kJumpTable, code->kind());
    183         CHECK(code->contains(target));
    184         break;
    185       }
    186       case RelocInfo::WASM_STUB_CALL: {
    187         Address target = it.rinfo()->wasm_stub_call_address();
    188         WasmCode* code = native_module_->Lookup(target);
    189         CHECK_NOT_NULL(code);
    190         CHECK_EQ(WasmCode::kRuntimeStub, code->kind());
    191         CHECK_EQ(target, code->instruction_start());
    192         break;
    193       }
    194       case RelocInfo::INTERNAL_REFERENCE:
    195       case RelocInfo::INTERNAL_REFERENCE_ENCODED: {
    196         Address target = it.rinfo()->target_internal_reference();
    197         CHECK(contains(target));
    198         break;
    199       }
    200       case RelocInfo::JS_TO_WASM_CALL:
    201       case RelocInfo::EXTERNAL_REFERENCE:
    202       case RelocInfo::COMMENT:
    203       case RelocInfo::CONST_POOL:
    204       case RelocInfo::VENEER_POOL:
    205         // These are OK to appear.
    206         break;
    207       default:
    208         FATAL("Unexpected mode: %d", mode);
    209     }
    210   }
    211 #endif
    212 }
    213 
    214 void WasmCode::Print(const char* name) const {
    215   StdoutStream os;
    216   os << "--- WebAssembly code ---\n";
    217   Disassemble(name, os);
    218   os << "--- End code ---\n";
    219 }
    220 
    221 void WasmCode::Disassemble(const char* name, std::ostream& os,
    222                            Address current_pc) const {
    223   if (name) os << "name: " << name << "\n";
    224   if (!IsAnonymous()) os << "index: " << index() << "\n";
    225   os << "kind: " << GetWasmCodeKindAsString(kind_) << "\n";
    226   os << "compiler: " << (is_liftoff() ? "Liftoff" : "TurboFan") << "\n";
    227   size_t body_size = instructions().size();
    228   os << "Body (size = " << body_size << ")\n";
    229 
    230 #ifdef ENABLE_DISASSEMBLER
    231   size_t instruction_size = body_size;
    232   if (constant_pool_offset_ && constant_pool_offset_ < instruction_size) {
    233     instruction_size = constant_pool_offset_;
    234   }
    235   if (safepoint_table_offset_ && safepoint_table_offset_ < instruction_size) {
    236     instruction_size = safepoint_table_offset_;
    237   }
    238   if (handler_table_offset_ && handler_table_offset_ < instruction_size) {
    239     instruction_size = handler_table_offset_;
    240   }
    241   DCHECK_LT(0, instruction_size);
    242   os << "Instructions (size = " << instruction_size << ")\n";
    243   Disassembler::Decode(nullptr, &os, instructions().start(),
    244                        instructions().start() + instruction_size,
    245                        CodeReference(this), current_pc);
    246   os << "\n";
    247 
    248   if (handler_table_offset_ > 0) {
    249     HandlerTable table(instruction_start(), handler_table_offset_);
    250     os << "Exception Handler Table (size = " << table.NumberOfReturnEntries()
    251        << "):\n";
    252     table.HandlerTableReturnPrint(os);
    253     os << "\n";
    254   }
    255 
    256   if (!protected_instructions_.is_empty()) {
    257     os << "Protected instructions:\n pc offset  land pad\n";
    258     for (auto& data : protected_instructions()) {
    259       os << std::setw(10) << std::hex << data.instr_offset << std::setw(10)
    260          << std::hex << data.landing_offset << "\n";
    261     }
    262     os << "\n";
    263   }
    264 
    265   if (!source_positions().is_empty()) {
    266     os << "Source positions:\n pc offset  position\n";
    267     for (SourcePositionTableIterator it(source_positions()); !it.done();
    268          it.Advance()) {
    269       os << std::setw(10) << std::hex << it.code_offset() << std::dec
    270          << std::setw(10) << it.source_position().ScriptOffset()
    271          << (it.is_statement() ? "  statement" : "") << "\n";
    272     }
    273     os << "\n";
    274   }
    275 
    276   os << "RelocInfo (size = " << reloc_info_.size() << ")\n";
    277   for (RelocIterator it(instructions(), reloc_info(), constant_pool());
    278        !it.done(); it.next()) {
    279     it.rinfo()->Print(nullptr, os);
    280   }
    281   os << "\n";
    282 #endif  // ENABLE_DISASSEMBLER
    283 }
    284 
    285 const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {
    286   switch (kind) {
    287     case WasmCode::kFunction:
    288       return "wasm function";
    289     case WasmCode::kWasmToJsWrapper:
    290       return "wasm-to-js";
    291     case WasmCode::kLazyStub:
    292       return "lazy-compile";
    293     case WasmCode::kRuntimeStub:
    294       return "runtime-stub";
    295     case WasmCode::kInterpreterEntry:
    296       return "interpreter entry";
    297     case WasmCode::kJumpTable:
    298       return "jump table";
    299   }
    300   return "unknown kind";
    301 }
    302 
    303 WasmCode::~WasmCode() {
    304   if (HasTrapHandlerIndex()) {
    305     CHECK_LT(trap_handler_index(),
    306              static_cast<size_t>(std::numeric_limits<int>::max()));
    307     trap_handler::ReleaseHandlerData(static_cast<int>(trap_handler_index()));
    308   }
    309 }
    310 
    311 NativeModule::NativeModule(Isolate* isolate, const WasmFeatures& enabled,
    312                            bool can_request_more, VirtualMemory* code_space,
    313                            WasmCodeManager* code_manager,
    314                            std::shared_ptr<const WasmModule> module,
    315                            const ModuleEnv& env)
    316     : enabled_features_(enabled),
    317       module_(std::move(module)),
    318       compilation_state_(NewCompilationState(isolate, env)),
    319       free_code_space_({code_space->address(), code_space->end()}),
    320       wasm_code_manager_(code_manager),
    321       can_request_more_memory_(can_request_more),
    322       use_trap_handler_(env.use_trap_handler) {
    323   DCHECK_EQ(module_.get(), env.module);
    324   DCHECK_NOT_NULL(module_);
    325   VirtualMemory my_mem;
    326   owned_code_space_.push_back(my_mem);
    327   owned_code_space_.back().TakeControl(code_space);
    328   owned_code_.reserve(num_functions());
    329 
    330   uint32_t num_wasm_functions = module_->num_declared_functions;
    331   if (num_wasm_functions > 0) {
    332     code_table_.reset(new WasmCode*[num_wasm_functions]);
    333     memset(code_table_.get(), 0, num_wasm_functions * sizeof(WasmCode*));
    334 
    335     jump_table_ = CreateEmptyJumpTable(num_wasm_functions);
    336   }
    337 }
    338 
    339 void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
    340   DCHECK_LE(num_functions(), max_functions);
    341   WasmCode** new_table = new WasmCode*[max_functions];
    342   memset(new_table, 0, max_functions * sizeof(*new_table));
    343   memcpy(new_table, code_table_.get(),
    344          module_->num_declared_functions * sizeof(*new_table));
    345   code_table_.reset(new_table);
    346 
    347   // Re-allocate jump table.
    348   jump_table_ = CreateEmptyJumpTable(max_functions);
    349 }
    350 
    351 void NativeModule::LogWasmCodes(Isolate* isolate) {
    352   if (!WasmCode::ShouldBeLogged(isolate)) return;
    353 
    354   // TODO(titzer): we skip the logging of the import wrappers
    355   // here, but they should be included somehow.
    356   for (WasmCode* code : code_table()) {
    357     if (code != nullptr) code->LogCode(isolate);
    358   }
    359 }
    360 
    361 WasmCode* NativeModule::AddOwnedCode(
    362     Maybe<uint32_t> index, Vector<const byte> instructions,
    363     uint32_t stack_slots, size_t safepoint_table_offset,
    364     size_t handler_table_offset, size_t constant_pool_offset,
    365     OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
    366     OwnedVector<const byte> reloc_info,
    367     OwnedVector<const byte> source_position_table, WasmCode::Kind kind,
    368     WasmCode::Tier tier) {
    369   WasmCode* code;
    370   {
    371     // Both allocation and insertion in owned_code_ happen in the same critical
    372     // section, thus ensuring owned_code_'s elements are rarely if ever moved.
    373     base::LockGuard<base::Mutex> lock(&allocation_mutex_);
    374     Address executable_buffer = AllocateForCode(instructions.size());
    375     if (executable_buffer == kNullAddress) {
    376       V8::FatalProcessOutOfMemory(nullptr, "NativeModule::AddOwnedCode");
    377       UNREACHABLE();
    378     }
    379     // Ownership will be transferred to {owned_code_} below.
    380     code = new WasmCode(
    381         this, index,
    382         {reinterpret_cast<byte*>(executable_buffer), instructions.size()},
    383         stack_slots, safepoint_table_offset, handler_table_offset,
    384         constant_pool_offset, std::move(protected_instructions),
    385         std::move(reloc_info), std::move(source_position_table), kind, tier);
    386 
    387     if (owned_code_.empty() ||
    388         code->instruction_start() > owned_code_.back()->instruction_start()) {
    389       // Common case.
    390       owned_code_.emplace_back(code);
    391     } else {
    392       // Slow but unlikely case.
    393       // TODO(mtrofin): We allocate in increasing address order, and
    394       // even if we end up with segmented memory, we may end up only with a few
    395       // large moves - if, for example, a new segment is below the current ones.
    396       auto insert_before = std::upper_bound(
    397           owned_code_.begin(), owned_code_.end(), code->instruction_start(),
    398           WasmCodeUniquePtrComparator{});
    399       owned_code_.emplace(insert_before, code);
    400     }
    401   }
    402   memcpy(reinterpret_cast<void*>(code->instruction_start()),
    403          instructions.start(), instructions.size());
    404 
    405   return code;
    406 }
    407 
    408 WasmCode* NativeModule::AddCodeCopy(Handle<Code> code, WasmCode::Kind kind,
    409                                     uint32_t index) {
    410   // TODO(wasm): Adding instance-specific wasm-to-js wrappers as owned code to
    411   // this NativeModule is a memory leak until the whole NativeModule dies.
    412   WasmCode* ret = AddAnonymousCode(code, kind);
    413   ret->index_ = Just(index);
    414   if (index >= module_->num_imported_functions) set_code(index, ret);
    415   return ret;
    416 }
    417 
    418 WasmCode* NativeModule::AddInterpreterEntry(Handle<Code> code, uint32_t index) {
    419   WasmCode* ret = AddAnonymousCode(code, WasmCode::kInterpreterEntry);
    420   ret->index_ = Just(index);
    421   base::LockGuard<base::Mutex> lock(&allocation_mutex_);
    422   PatchJumpTable(index, ret->instruction_start(), WasmCode::kFlushICache);
    423   set_code(index, ret);
    424   return ret;
    425 }
    426 
    427 void NativeModule::SetLazyBuiltin(Handle<Code> code) {
    428   uint32_t num_wasm_functions = module_->num_declared_functions;
    429   if (num_wasm_functions == 0) return;
    430   WasmCode* lazy_builtin = AddAnonymousCode(code, WasmCode::kLazyStub);
    431   // Fill the jump table with jumps to the lazy compile stub.
    432   Address lazy_compile_target = lazy_builtin->instruction_start();
    433   for (uint32_t i = 0; i < num_wasm_functions; ++i) {
    434     JumpTableAssembler::EmitLazyCompileJumpSlot(
    435         jump_table_->instruction_start(), i,
    436         i + module_->num_imported_functions, lazy_compile_target,
    437         WasmCode::kNoFlushICache);
    438   }
    439   Assembler::FlushICache(jump_table_->instructions().start(),
    440                          jump_table_->instructions().size());
    441 }
    442 
    443 void NativeModule::SetRuntimeStubs(Isolate* isolate) {
    444   DCHECK_NULL(runtime_stub_table_[0]);  // Only called once.
    445 #define COPY_BUILTIN(Name)                                                     \
    446   runtime_stub_table_[WasmCode::k##Name] =                                     \
    447       AddAnonymousCode(isolate->builtins()->builtin_handle(Builtins::k##Name), \
    448                        WasmCode::kRuntimeStub);
    449 #define COPY_BUILTIN_TRAP(Name) COPY_BUILTIN(ThrowWasm##Name)
    450   WASM_RUNTIME_STUB_LIST(COPY_BUILTIN, COPY_BUILTIN_TRAP);
    451 #undef COPY_BUILTIN_TRAP
    452 #undef COPY_BUILTIN
    453 }
    454 
    455 WasmCode* NativeModule::AddAnonymousCode(Handle<Code> code,
    456                                          WasmCode::Kind kind) {
    457   // For off-heap builtins, we create a copy of the off-heap instruction stream
    458   // instead of the on-heap code object containing the trampoline. Ensure that
    459   // we do not apply the on-heap reloc info to the off-heap instructions.
    460   const size_t relocation_size =
    461       code->is_off_heap_trampoline() ? 0 : code->relocation_size();
    462   OwnedVector<byte> reloc_info = OwnedVector<byte>::New(relocation_size);
    463   memcpy(reloc_info.start(), code->relocation_start(), relocation_size);
    464   Handle<ByteArray> source_pos_table(code->SourcePositionTable(),
    465                                      code->GetIsolate());
    466   OwnedVector<byte> source_pos =
    467       OwnedVector<byte>::New(source_pos_table->length());
    468   source_pos_table->copy_out(0, source_pos.start(), source_pos_table->length());
    469   Vector<const byte> instructions(
    470       reinterpret_cast<byte*>(code->InstructionStart()),
    471       static_cast<size_t>(code->InstructionSize()));
    472   int stack_slots = code->has_safepoint_info() ? code->stack_slots() : 0;
    473   int safepoint_table_offset =
    474       code->has_safepoint_info() ? code->safepoint_table_offset() : 0;
    475   WasmCode* ret =
    476       AddOwnedCode(Nothing<uint32_t>(),           // index
    477                    instructions,                  // instructions
    478                    stack_slots,                   // stack_slots
    479                    safepoint_table_offset,        // safepoint_table_offset
    480                    code->handler_table_offset(),  // handler_table_offset
    481                    code->constant_pool_offset(),  // constant_pool_offset
    482                    {},                            // protected_instructions
    483                    std::move(reloc_info),         // reloc_info
    484                    std::move(source_pos),         // source positions
    485                    kind,                          // kind
    486                    WasmCode::kOther);             // tier
    487 
    488   // Apply the relocation delta by iterating over the RelocInfo.
    489   intptr_t delta = ret->instruction_start() - code->InstructionStart();
    490   int mode_mask = RelocInfo::kApplyMask |
    491                   RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
    492   RelocIterator orig_it(*code, mode_mask);
    493   for (RelocIterator it(ret->instructions(), ret->reloc_info(),
    494                         ret->constant_pool(), mode_mask);
    495        !it.done(); it.next(), orig_it.next()) {
    496     RelocInfo::Mode mode = it.rinfo()->rmode();
    497     if (RelocInfo::IsWasmStubCall(mode)) {
    498       uint32_t stub_call_tag = orig_it.rinfo()->wasm_call_tag();
    499       DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
    500       WasmCode* code =
    501           runtime_stub(static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
    502       it.rinfo()->set_wasm_stub_call_address(code->instruction_start(),
    503                                              SKIP_ICACHE_FLUSH);
    504     } else {
    505       it.rinfo()->apply(delta);
    506     }
    507   }
    508 
    509   // Flush the i-cache here instead of in AddOwnedCode, to include the changes
    510   // made while iterating over the RelocInfo above.
    511   Assembler::FlushICache(ret->instructions().start(),
    512                          ret->instructions().size());
    513   if (FLAG_print_code || FLAG_print_wasm_code) ret->Print();
    514   ret->Validate();
    515   return ret;
    516 }
    517 
    518 WasmCode* NativeModule::AddCode(
    519     uint32_t index, const CodeDesc& desc, uint32_t stack_slots,
    520     size_t safepoint_table_offset, size_t handler_table_offset,
    521     OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
    522     OwnedVector<const byte> source_pos_table, WasmCode::Tier tier) {
    523   OwnedVector<byte> reloc_info = OwnedVector<byte>::New(desc.reloc_size);
    524   memcpy(reloc_info.start(), desc.buffer + desc.buffer_size - desc.reloc_size,
    525          desc.reloc_size);
    526   WasmCode* ret = AddOwnedCode(
    527       Just(index), {desc.buffer, static_cast<size_t>(desc.instr_size)},
    528       stack_slots, safepoint_table_offset, handler_table_offset,
    529       desc.instr_size - desc.constant_pool_size,
    530       std::move(protected_instructions), std::move(reloc_info),
    531       std::move(source_pos_table), WasmCode::kFunction, tier);
    532 
    533   // Apply the relocation delta by iterating over the RelocInfo.
    534   intptr_t delta = ret->instructions().start() - desc.buffer;
    535   int mode_mask = RelocInfo::kApplyMask |
    536                   RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
    537                   RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
    538   for (RelocIterator it(ret->instructions(), ret->reloc_info(),
    539                         ret->constant_pool(), mode_mask);
    540        !it.done(); it.next()) {
    541     RelocInfo::Mode mode = it.rinfo()->rmode();
    542     if (RelocInfo::IsWasmCall(mode)) {
    543       uint32_t call_tag = it.rinfo()->wasm_call_tag();
    544       Address target = GetCallTargetForFunction(call_tag);
    545       it.rinfo()->set_wasm_call_address(target, SKIP_ICACHE_FLUSH);
    546     } else if (RelocInfo::IsWasmStubCall(mode)) {
    547       uint32_t stub_call_tag = it.rinfo()->wasm_call_tag();
    548       DCHECK_LT(stub_call_tag, WasmCode::kRuntimeStubCount);
    549       WasmCode* code =
    550           runtime_stub(static_cast<WasmCode::RuntimeStubId>(stub_call_tag));
    551       it.rinfo()->set_wasm_stub_call_address(code->instruction_start(),
    552                                              SKIP_ICACHE_FLUSH);
    553     } else {
    554       it.rinfo()->apply(delta);
    555     }
    556   }
    557 
    558   // Flush the i-cache here instead of in AddOwnedCode, to include the changes
    559   // made while iterating over the RelocInfo above.
    560   Assembler::FlushICache(ret->instructions().start(),
    561                          ret->instructions().size());
    562   if (FLAG_print_code || FLAG_print_wasm_code) ret->Print();
    563   ret->Validate();
    564   return ret;
    565 }
    566 
    567 WasmCode* NativeModule::AddDeserializedCode(
    568     uint32_t index, Vector<const byte> instructions, uint32_t stack_slots,
    569     size_t safepoint_table_offset, size_t handler_table_offset,
    570     size_t constant_pool_offset,
    571     OwnedVector<trap_handler::ProtectedInstructionData> protected_instructions,
    572     OwnedVector<const byte> reloc_info,
    573     OwnedVector<const byte> source_position_table, WasmCode::Tier tier) {
    574   WasmCode* code = AddOwnedCode(
    575       Just(index), instructions, stack_slots, safepoint_table_offset,
    576       handler_table_offset, constant_pool_offset,
    577       std::move(protected_instructions), std::move(reloc_info),
    578       std::move(source_position_table), WasmCode::kFunction, tier);
    579 
    580   if (!code->protected_instructions_.is_empty()) {
    581     code->RegisterTrapHandlerData();
    582   }
    583   set_code(index, code);
    584   PatchJumpTable(index, code->instruction_start(), WasmCode::kFlushICache);
    585   // Note: we do not flush the i-cache here, since the code needs to be
    586   // relocated anyway. The caller is responsible for flushing the i-cache later.
    587   return code;
    588 }
    589 
    590 void NativeModule::PublishCode(WasmCode* code) {
    591   base::LockGuard<base::Mutex> lock(&allocation_mutex_);
    592   // Skip publishing code if there is an active redirection to the interpreter
    593   // for the given function index, in order to preserve the redirection.
    594   if (has_code(code->index()) &&
    595       this->code(code->index())->kind() == WasmCode::kInterpreterEntry) {
    596     return;
    597   }
    598   if (!code->protected_instructions_.is_empty()) {
    599     code->RegisterTrapHandlerData();
    600   }
    601   DCHECK(!code->IsAnonymous());
    602   set_code(code->index(), code);
    603   PatchJumpTable(code->index(), code->instruction_start(),
    604                  WasmCode::kFlushICache);
    605 }
    606 
    607 std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
    608   base::LockGuard<base::Mutex> lock(&allocation_mutex_);
    609   std::vector<WasmCode*> result;
    610   result.reserve(code_table().size());
    611   for (WasmCode* code : code_table()) result.push_back(code);
    612   return result;
    613 }
    614 
    615 WasmCode* NativeModule::CreateEmptyJumpTable(uint32_t num_wasm_functions) {
    616   // Only call this if we really need a jump table.
    617   DCHECK_LT(0, num_wasm_functions);
    618   OwnedVector<byte> instructions = OwnedVector<byte>::New(
    619       JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
    620   memset(instructions.start(), 0, instructions.size());
    621   return AddOwnedCode(Nothing<uint32_t>(),       // index
    622                       instructions.as_vector(),  // instructions
    623                       0,                         // stack_slots
    624                       0,                         // safepoint_table_offset
    625                       0,                         // handler_table_offset
    626                       0,                         // constant_pool_offset
    627                       {},                        // protected_instructions
    628                       {},                        // reloc_info
    629                       {},                        // source_pos
    630                       WasmCode::kJumpTable,      // kind
    631                       WasmCode::kOther);         // tier
    632 }
    633 
    634 void NativeModule::PatchJumpTable(uint32_t func_index, Address target,
    635                                   WasmCode::FlushICache flush_icache) {
    636   DCHECK_LE(module_->num_imported_functions, func_index);
    637   uint32_t slot_idx = func_index - module_->num_imported_functions;
    638   JumpTableAssembler::PatchJumpTableSlot(jump_table_->instruction_start(),
    639                                          slot_idx, target, flush_icache);
    640 }
    641 
    642 Address NativeModule::AllocateForCode(size_t size) {
    643   // This happens under a lock assumed by the caller.
    644   size = RoundUp(size, kCodeAlignment);
    645   AddressRange mem = free_code_space_.Allocate(size);
    646   if (mem.is_empty()) {
    647     if (!can_request_more_memory_) return kNullAddress;
    648 
    649     Address hint = owned_code_space_.empty() ? kNullAddress
    650                                              : owned_code_space_.back().end();
    651     VirtualMemory empty_mem;
    652     owned_code_space_.push_back(empty_mem);
    653     VirtualMemory& new_mem = owned_code_space_.back();
    654     wasm_code_manager_->TryAllocate(size, &new_mem,
    655                                     reinterpret_cast<void*>(hint));
    656     if (!new_mem.IsReserved()) return kNullAddress;
    657     base::LockGuard<base::Mutex> lock(
    658         &wasm_code_manager_->native_modules_mutex_);
    659     wasm_code_manager_->AssignRanges(new_mem.address(), new_mem.end(), this);
    660 
    661     free_code_space_.Merge({new_mem.address(), new_mem.end()});
    662     mem = free_code_space_.Allocate(size);
    663     if (mem.is_empty()) return kNullAddress;
    664   }
    665   Address commit_start = RoundUp(mem.start, AllocatePageSize());
    666   Address commit_end = RoundUp(mem.end, AllocatePageSize());
    667   // {commit_start} will be either mem.start or the start of the next page.
    668   // {commit_end} will be the start of the page after the one in which
    669   // the allocation ends.
    670   // We start from an aligned start, and we know we allocated vmem in
    671   // page multiples.
    672   // We just need to commit what's not committed. The page in which we
    673   // start is already committed (or we start at the beginning of a page).
    674   // The end needs to be committed all through the end of the page.
    675   if (commit_start < commit_end) {
    676 #if V8_OS_WIN
    677     // On Windows, we cannot commit a range that straddles different
    678     // reservations of virtual memory. Because we bump-allocate, and because, if
    679     // we need more memory, we append that memory at the end of the
    680     // owned_code_space_ list, we traverse that list in reverse order to find
    681     // the reservation(s) that guide how to chunk the region to commit.
    682     for (auto it = owned_code_space_.crbegin(),
    683               rend = owned_code_space_.crend();
    684          it != rend && commit_start < commit_end; ++it) {
    685       if (commit_end > it->end() || it->address() >= commit_end) continue;
    686       Address start = std::max(commit_start, it->address());
    687       size_t commit_size = static_cast<size_t>(commit_end - start);
    688       DCHECK(IsAligned(commit_size, AllocatePageSize()));
    689       if (!wasm_code_manager_->Commit(start, commit_size)) {
    690         return kNullAddress;
    691       }
    692       committed_code_space_.fetch_add(commit_size);
    693       commit_end = start;
    694     }
    695 #else
    696     size_t commit_size = static_cast<size_t>(commit_end - commit_start);
    697     DCHECK(IsAligned(commit_size, AllocatePageSize()));
    698     if (!wasm_code_manager_->Commit(commit_start, commit_size)) {
    699       return kNullAddress;
    700     }
    701     committed_code_space_.fetch_add(commit_size);
    702 #endif
    703   }
    704   DCHECK(IsAligned(mem.start, kCodeAlignment));
    705   allocated_code_space_.Merge(std::move(mem));
    706   TRACE_HEAP("Code alloc for %p: %" PRIuPTR ",+%zu\n", this, mem.start, size);
    707   return mem.start;
    708 }
    709 
    710 WasmCode* NativeModule::Lookup(Address pc) const {
    711   base::LockGuard<base::Mutex> lock(&allocation_mutex_);
    712   if (owned_code_.empty()) return nullptr;
    713   auto iter = std::upper_bound(owned_code_.begin(), owned_code_.end(), pc,
    714                                WasmCodeUniquePtrComparator());
    715   if (iter == owned_code_.begin()) return nullptr;
    716   --iter;
    717   WasmCode* candidate = iter->get();
    718   DCHECK_NOT_NULL(candidate);
    719   return candidate->contains(pc) ? candidate : nullptr;
    720 }
    721 
    722 Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
    723   // TODO(clemensh): Measure performance win of returning instruction start
    724   // directly if we have turbofan code. Downside: Redirecting functions (e.g.
    725   // for debugging) gets much harder.
    726 
    727   // Return the jump table slot for that function index.
    728   DCHECK_NOT_NULL(jump_table_);
    729   uint32_t slot_idx = func_index - module_->num_imported_functions;
    730   uint32_t slot_offset = JumpTableAssembler::SlotIndexToOffset(slot_idx);
    731   DCHECK_LT(slot_offset, jump_table_->instructions().size());
    732   return jump_table_->instruction_start() + slot_offset;
    733 }
    734 
    735 uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
    736     Address slot_address) const {
    737   DCHECK(is_jump_table_slot(slot_address));
    738   uint32_t slot_offset =
    739       static_cast<uint32_t>(slot_address - jump_table_->instruction_start());
    740   uint32_t slot_idx = JumpTableAssembler::SlotOffsetToIndex(slot_offset);
    741   DCHECK_LT(slot_idx, module_->num_declared_functions);
    742   return module_->num_imported_functions + slot_idx;
    743 }
    744 
    745 void NativeModule::DisableTrapHandler() {
    746   // Switch {use_trap_handler_} from true to false.
    747   DCHECK(use_trap_handler_);
    748   use_trap_handler_ = false;
    749 
    750   // Clear the code table (just to increase the chances to hit an error if we
    751   // forget to re-add all code).
    752   uint32_t num_wasm_functions = module_->num_declared_functions;
    753   memset(code_table_.get(), 0, num_wasm_functions * sizeof(WasmCode*));
    754 
    755   // TODO(clemensh): Actually free the owned code, such that the memory can be
    756   // recycled.
    757 }
    758 
    759 NativeModule::~NativeModule() {
    760   TRACE_HEAP("Deleting native module: %p\n", reinterpret_cast<void*>(this));
    761   compilation_state_.reset();  // Cancels tasks, needs to be done first.
    762   wasm_code_manager_->FreeNativeModule(this);
    763 }
    764 
    765 WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
    766                                  size_t max_committed)
    767     : memory_tracker_(memory_tracker),
    768       remaining_uncommitted_code_space_(max_committed) {
    769   DCHECK_LE(max_committed, kMaxWasmCodeMemory);
    770 }
    771 
    772 bool WasmCodeManager::Commit(Address start, size_t size) {
    773   DCHECK(IsAligned(start, AllocatePageSize()));
    774   DCHECK(IsAligned(size, AllocatePageSize()));
    775   // Reserve the size. Use CAS loop to avoid underflow on
    776   // {remaining_uncommitted_}. Temporary underflow would allow concurrent
    777   // threads to over-commit.
    778   while (true) {
    779     size_t old_value = remaining_uncommitted_code_space_.load();
    780     if (old_value < size) return false;
    781     if (remaining_uncommitted_code_space_.compare_exchange_weak(
    782             old_value, old_value - size)) {
    783       break;
    784     }
    785   }
    786   PageAllocator::Permission permission = FLAG_wasm_write_protect_code_memory
    787                                              ? PageAllocator::kReadWrite
    788                                              : PageAllocator::kReadWriteExecute;
    789 
    790   bool ret = SetPermissions(start, size, permission);
    791   TRACE_HEAP("Setting rw permissions for %p:%p\n",
    792              reinterpret_cast<void*>(start),
    793              reinterpret_cast<void*>(start + size));
    794 
    795   if (!ret) {
    796     // Highly unlikely.
    797     remaining_uncommitted_code_space_.fetch_add(size);
    798     return false;
    799   }
    800   return ret;
    801 }
    802 
    803 void WasmCodeManager::AssignRanges(Address start, Address end,
    804                                    NativeModule* native_module) {
    805   lookup_map_.insert(std::make_pair(start, std::make_pair(end, native_module)));
    806 }
    807 
    808 void WasmCodeManager::TryAllocate(size_t size, VirtualMemory* ret, void* hint) {
    809   DCHECK_GT(size, 0);
    810   size = RoundUp(size, AllocatePageSize());
    811   DCHECK(!ret->IsReserved());
    812   if (!memory_tracker_->ReserveAddressSpace(size)) return;
    813   if (hint == nullptr) hint = GetRandomMmapAddr();
    814 
    815   if (!AlignedAllocVirtualMemory(size, static_cast<size_t>(AllocatePageSize()),
    816                                  hint, ret)) {
    817     DCHECK(!ret->IsReserved());
    818     memory_tracker_->ReleaseReservation(size);
    819   }
    820   TRACE_HEAP("VMem alloc: %p:%p (%zu)\n",
    821              reinterpret_cast<void*>(ret->address()),
    822              reinterpret_cast<void*>(ret->end()), ret->size());
    823 }
    824 
    825 void WasmCodeManager::SampleModuleSizes(Isolate* isolate) const {
    826   base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
    827   for (NativeModule* native_module : native_modules_) {
    828     int code_size =
    829         static_cast<int>(native_module->committed_code_space_.load() / MB);
    830     isolate->counters()->wasm_module_code_size_mb()->AddSample(code_size);
    831   }
    832 }
    833 
    834 namespace {
    835 
    836 void ModuleSamplingCallback(v8::Isolate* v8_isolate, v8::GCType type,
    837                             v8::GCCallbackFlags flags, void* data) {
    838   Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
    839   isolate->wasm_engine()->code_manager()->SampleModuleSizes(isolate);
    840 }
    841 
    842 }  // namespace
    843 
    844 // static
    845 void WasmCodeManager::InstallSamplingGCCallback(Isolate* isolate) {
    846   isolate->heap()->AddGCEpilogueCallback(ModuleSamplingCallback,
    847                                          v8::kGCTypeMarkSweepCompact, nullptr);
    848 }
    849 
    850 // static
    851 size_t WasmCodeManager::EstimateNativeModuleSize(const WasmModule* module) {
    852   constexpr size_t kCodeSizeMultiplier = 4;
    853   constexpr size_t kImportSize = 32 * kPointerSize;
    854 
    855   uint32_t num_wasm_functions = module->num_declared_functions;
    856 
    857   size_t estimate =
    858       AllocatePageSize() /* TODO(titzer): 1 page spot bonus */ +
    859       sizeof(NativeModule) +
    860       (sizeof(WasmCode*) * num_wasm_functions /* code table size */) +
    861       (sizeof(WasmCode) * num_wasm_functions /* code object size */) +
    862       (kImportSize * module->num_imported_functions /* import size */) +
    863       (JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions));
    864 
    865   for (auto& function : module->functions) {
    866     estimate += kCodeSizeMultiplier * function.code.length();
    867   }
    868 
    869   return estimate;
    870 }
    871 
    872 bool WasmCodeManager::ShouldForceCriticalMemoryPressureNotification() {
    873   base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
    874   // TODO(titzer): we force a critical memory pressure notification
    875   // when the code space is almost exhausted, but only upon the next module
    876   // creation. This is only for one isolate, and it should really do this for
    877   // all isolates, at the point of commit.
    878   constexpr size_t kCriticalThreshold = 32 * 1024 * 1024;
    879   return native_modules_.size() > 1 &&
    880          remaining_uncommitted_code_space_.load() < kCriticalThreshold;
    881 }
    882 
    883 std::unique_ptr<NativeModule> WasmCodeManager::NewNativeModule(
    884     Isolate* isolate, const WasmFeatures& enabled, size_t memory_estimate,
    885     bool can_request_more, std::shared_ptr<const WasmModule> module,
    886     const ModuleEnv& env) {
    887   if (ShouldForceCriticalMemoryPressureNotification()) {
    888     (reinterpret_cast<v8::Isolate*>(isolate))
    889         ->MemoryPressureNotification(MemoryPressureLevel::kCritical);
    890   }
    891 
    892   // If the code must be contiguous, reserve enough address space up front.
    893   size_t vmem_size = kRequiresCodeRange ? kMaxWasmCodeMemory : memory_estimate;
    894   // Try up to three times; getting rid of dead JSArrayBuffer allocations might
    895   // require two GCs because the first GC maybe incremental and may have
    896   // floating garbage.
    897   static constexpr int kAllocationRetries = 2;
    898   VirtualMemory mem;
    899   for (int retries = 0;; ++retries) {
    900     TryAllocate(vmem_size, &mem);
    901     if (mem.IsReserved()) break;
    902     if (retries == kAllocationRetries) {
    903       V8::FatalProcessOutOfMemory(isolate, "WasmCodeManager::NewNativeModule");
    904       UNREACHABLE();
    905     }
    906     // Run one GC, then try the allocation again.
    907     isolate->heap()->MemoryPressureNotification(MemoryPressureLevel::kCritical,
    908                                                 true);
    909   }
    910 
    911   Address start = mem.address();
    912   size_t size = mem.size();
    913   Address end = mem.end();
    914   std::unique_ptr<NativeModule> ret(new NativeModule(
    915       isolate, enabled, can_request_more, &mem, this, std::move(module), env));
    916   TRACE_HEAP("New NativeModule %p: Mem: %" PRIuPTR ",+%zu\n", this, start,
    917              size);
    918   base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
    919   AssignRanges(start, end, ret.get());
    920   native_modules_.emplace(ret.get());
    921   return ret;
    922 }
    923 
    924 bool NativeModule::SetExecutable(bool executable) {
    925   if (is_executable_ == executable) return true;
    926   TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);
    927 
    928   if (FLAG_wasm_write_protect_code_memory) {
    929     PageAllocator::Permission permission =
    930         executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
    931 #if V8_OS_WIN
    932     // On windows, we need to switch permissions per separate virtual memory
    933     // reservation. This is really just a problem when the NativeModule is
    934     // growable (meaning can_request_more_memory_). That's 32-bit in production,
    935     // or unittests.
    936     // For now, in that case, we commit at reserved memory granularity.
    937     // Technically, that may be a waste, because we may reserve more than we
    938     // use. On 32-bit though, the scarce resource is the address space -
    939     // committed or not.
    940     if (can_request_more_memory_) {
    941       for (auto& vmem : owned_code_space_) {
    942         if (!SetPermissions(vmem.address(), vmem.size(), permission)) {
    943           return false;
    944         }
    945         TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
    946                    executable);
    947       }
    948       is_executable_ = executable;
    949       return true;
    950     }
    951 #endif
    952     for (auto& range : allocated_code_space_.ranges()) {
    953       // allocated_code_space_ is fine-grained, so we need to
    954       // page-align it.
    955       size_t range_size = RoundUp(range.size(), AllocatePageSize());
    956       if (!SetPermissions(range.start, range_size, permission)) {
    957         return false;
    958       }
    959       TRACE_HEAP("Set %p:%p to executable:%d\n",
    960                  reinterpret_cast<void*>(range.start),
    961                  reinterpret_cast<void*>(range.end), executable);
    962     }
    963   }
    964   is_executable_ = executable;
    965   return true;
    966 }
    967 
    968 void WasmCodeManager::FreeNativeModule(NativeModule* native_module) {
    969   base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
    970   DCHECK_EQ(1, native_modules_.count(native_module));
    971   native_modules_.erase(native_module);
    972   TRACE_HEAP("Freeing NativeModule %p\n", this);
    973   for (auto& vmem : native_module->owned_code_space_) {
    974     lookup_map_.erase(vmem.address());
    975     Free(&vmem);
    976     DCHECK(!vmem.IsReserved());
    977   }
    978   native_module->owned_code_space_.clear();
    979 
    980   size_t code_size = native_module->committed_code_space_.load();
    981   DCHECK(IsAligned(code_size, AllocatePageSize()));
    982   remaining_uncommitted_code_space_.fetch_add(code_size);
    983 }
    984 
    985 // TODO(wasm): We can make this more efficient if needed. For
    986 // example, we can preface the first instruction with a pointer to
    987 // the WasmCode. In the meantime, we have a separate API so we can
    988 // easily identify those places where we know we have the first
    989 // instruction PC.
    990 WasmCode* WasmCodeManager::GetCodeFromStartAddress(Address pc) const {
    991   WasmCode* code = LookupCode(pc);
    992   // This method can only be called for valid instruction start addresses.
    993   DCHECK_NOT_NULL(code);
    994   DCHECK_EQ(pc, code->instruction_start());
    995   return code;
    996 }
    997 
    998 NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {
    999   base::LockGuard<base::Mutex> lock(&native_modules_mutex_);
   1000   if (lookup_map_.empty()) return nullptr;
   1001 
   1002   auto iter = lookup_map_.upper_bound(pc);
   1003   if (iter == lookup_map_.begin()) return nullptr;
   1004   --iter;
   1005   Address range_start = iter->first;
   1006   Address range_end = iter->second.first;
   1007   NativeModule* candidate = iter->second.second;
   1008 
   1009   DCHECK_NOT_NULL(candidate);
   1010   return range_start <= pc && pc < range_end ? candidate : nullptr;
   1011 }
   1012 
   1013 WasmCode* WasmCodeManager::LookupCode(Address pc) const {
   1014   NativeModule* candidate = LookupNativeModule(pc);
   1015   return candidate ? candidate->Lookup(pc) : nullptr;
   1016 }
   1017 
   1018 void WasmCodeManager::Free(VirtualMemory* mem) {
   1019   DCHECK(mem->IsReserved());
   1020   void* start = reinterpret_cast<void*>(mem->address());
   1021   void* end = reinterpret_cast<void*>(mem->end());
   1022   size_t size = mem->size();
   1023   mem->Free();
   1024   memory_tracker_->ReleaseReservation(size);
   1025   TRACE_HEAP("VMem Release: %p:%p (%zu)\n", start, end, size);
   1026 }
   1027 
   1028 size_t WasmCodeManager::remaining_uncommitted_code_space() const {
   1029   return remaining_uncommitted_code_space_.load();
   1030 }
   1031 
   1032 // TODO(v8:7424): Code protection scopes are not yet supported with shared code
   1033 // enabled and need to be revisited to work with --wasm-shared-code as well.
   1034 NativeModuleModificationScope::NativeModuleModificationScope(
   1035     NativeModule* native_module)
   1036     : native_module_(native_module) {
   1037   if (FLAG_wasm_write_protect_code_memory && native_module_ &&
   1038       (native_module_->modification_scope_depth_++) == 0) {
   1039     bool success = native_module_->SetExecutable(false);
   1040     CHECK(success);
   1041   }
   1042 }
   1043 
   1044 NativeModuleModificationScope::~NativeModuleModificationScope() {
   1045   if (FLAG_wasm_write_protect_code_memory && native_module_ &&
   1046       (native_module_->modification_scope_depth_--) == 1) {
   1047     bool success = native_module_->SetExecutable(true);
   1048     CHECK(success);
   1049   }
   1050 }
   1051 
   1052 }  // namespace wasm
   1053 }  // namespace internal
   1054 }  // namespace v8
   1055 #undef TRACE_HEAP
   1056