Home | History | Annotate | Download | only in compiler
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/compiler/value-numbering-reducer.h"
      6 
      7 #include <cstring>
      8 
      9 #include "src/base/functional.h"
     10 #include "src/compiler/node-properties.h"
     11 #include "src/compiler/node.h"
     12 
     13 namespace v8 {
     14 namespace internal {
     15 namespace compiler {
     16 
     17 namespace {
     18 
     19 size_t HashCode(Node* node) {
     20   size_t h = base::hash_combine(node->op()->HashCode(), node->InputCount());
     21   for (Node* input : node->inputs()) {
     22     h = base::hash_combine(h, input->id());
     23   }
     24   return h;
     25 }
     26 
     27 
     28 bool Equals(Node* a, Node* b) {
     29   DCHECK_NOT_NULL(a);
     30   DCHECK_NOT_NULL(b);
     31   DCHECK_NOT_NULL(a->op());
     32   DCHECK_NOT_NULL(b->op());
     33   if (!a->op()->Equals(b->op())) return false;
     34   if (a->InputCount() != b->InputCount()) return false;
     35   Node::Inputs aInputs = a->inputs();
     36   Node::Inputs bInputs = b->inputs();
     37 
     38   auto aIt = aInputs.begin();
     39   auto bIt = bInputs.begin();
     40   auto aEnd = aInputs.end();
     41 
     42   for (; aIt != aEnd; ++aIt, ++bIt) {
     43     DCHECK_NOT_NULL(*aIt);
     44     DCHECK_NOT_NULL(*bIt);
     45     if ((*aIt)->id() != (*bIt)->id()) return false;
     46   }
     47   return true;
     48 }
     49 
     50 }  // namespace
     51 
     52 ValueNumberingReducer::ValueNumberingReducer(Zone* temp_zone, Zone* graph_zone)
     53     : entries_(nullptr),
     54       capacity_(0),
     55       size_(0),
     56       temp_zone_(temp_zone),
     57       graph_zone_(graph_zone) {}
     58 
     59 ValueNumberingReducer::~ValueNumberingReducer() {}
     60 
     61 
     62 Reduction ValueNumberingReducer::Reduce(Node* node) {
     63   if (!node->op()->HasProperty(Operator::kIdempotent)) return NoChange();
     64 
     65   const size_t hash = HashCode(node);
     66   if (!entries_) {
     67     DCHECK(size_ == 0);
     68     DCHECK(capacity_ == 0);
     69     // Allocate the initial entries and insert the first entry.
     70     capacity_ = kInitialCapacity;
     71     entries_ = temp_zone()->NewArray<Node*>(kInitialCapacity);
     72     memset(entries_, 0, sizeof(*entries_) * kInitialCapacity);
     73     entries_[hash & (kInitialCapacity - 1)] = node;
     74     size_ = 1;
     75     return NoChange();
     76   }
     77 
     78   DCHECK(size_ < capacity_);
     79   DCHECK(size_ + size_ / 4 < capacity_);
     80 
     81   const size_t mask = capacity_ - 1;
     82   size_t dead = capacity_;
     83 
     84   for (size_t i = hash & mask;; i = (i + 1) & mask) {
     85     Node* entry = entries_[i];
     86     if (!entry) {
     87       if (dead != capacity_) {
     88         // Reuse dead entry that we discovered on the way.
     89         entries_[dead] = node;
     90       } else {
     91         // Have to insert a new entry.
     92         entries_[i] = node;
     93         size_++;
     94 
     95         // Resize to keep load factor below 80%
     96         if (size_ + size_ / 4 >= capacity_) Grow();
     97       }
     98       DCHECK(size_ + size_ / 4 < capacity_);
     99       return NoChange();
    100     }
    101 
    102     if (entry == node) {
    103       // We need to check for a certain class of collisions here. Imagine the
    104       // following scenario:
    105       //
    106       //  1. We insert node1 with op1 and certain inputs at index i.
    107       //  2. We insert node2 with op2 and certain inputs at index i+1.
    108       //  3. Some other reducer changes node1 to op2 and the inputs from node2.
    109       //
    110       // Now we are called again to reduce node1, and we would return NoChange
    111       // in this case because we find node1 first, but what we should actually
    112       // do is return Replace(node2) instead.
    113       for (size_t j = (i + 1) & mask;; j = (j + 1) & mask) {
    114         Node* entry = entries_[j];
    115         if (!entry) {
    116           // No collision, {node} is fine.
    117           return NoChange();
    118         }
    119         if (entry->IsDead()) {
    120           continue;
    121         }
    122         if (entry == node) {
    123           // Collision with ourselves, doesn't count as a real collision.
    124           // Opportunistically clean-up the duplicate entry if we're at the end
    125           // of a bucket.
    126           if (!entries_[(j + 1) & mask]) {
    127             entries_[j] = nullptr;
    128             size_--;
    129             return NoChange();
    130           }
    131           // Otherwise, keep searching for another collision.
    132           continue;
    133         }
    134         if (Equals(entry, node)) {
    135           Reduction reduction = ReplaceIfTypesMatch(node, entry);
    136           if (reduction.Changed()) {
    137             // Overwrite the colliding entry with the actual entry.
    138             entries_[i] = entry;
    139             // Opportunistically clean-up the duplicate entry if we're at the
    140             // end of a bucket.
    141             if (!entries_[(j + 1) & mask]) {
    142               entries_[j] = nullptr;
    143               size_--;
    144             }
    145           }
    146           return reduction;
    147         }
    148       }
    149     }
    150 
    151     // Skip dead entries, but remember their indices so we can reuse them.
    152     if (entry->IsDead()) {
    153       dead = i;
    154       continue;
    155     }
    156     if (Equals(entry, node)) {
    157       return ReplaceIfTypesMatch(node, entry);
    158     }
    159   }
    160 }
    161 
    162 Reduction ValueNumberingReducer::ReplaceIfTypesMatch(Node* node,
    163                                                      Node* replacement) {
    164   // Make sure the replacement has at least as good type as the original node.
    165   if (NodeProperties::IsTyped(replacement) && NodeProperties::IsTyped(node)) {
    166     Type* replacement_type = NodeProperties::GetType(replacement);
    167     Type* node_type = NodeProperties::GetType(node);
    168     if (!replacement_type->Is(node_type)) {
    169       // Ideally, we would set an intersection of {replacement_type} and
    170       // {node_type} here. However, typing of NumberConstants assigns different
    171       // types to constants with the same value (it creates a fresh heap
    172       // number), which would make the intersection empty. To be safe, we use
    173       // the smaller type if the types are comparable.
    174       if (node_type->Is(replacement_type)) {
    175         NodeProperties::SetType(replacement, node_type);
    176       } else {
    177         // Types are not comparable => do not replace.
    178         return NoChange();
    179       }
    180     }
    181   }
    182   return Replace(replacement);
    183 }
    184 
    185 
    186 void ValueNumberingReducer::Grow() {
    187   // Allocate a new block of entries double the previous capacity.
    188   Node** const old_entries = entries_;
    189   size_t const old_capacity = capacity_;
    190   capacity_ *= 2;
    191   entries_ = temp_zone()->NewArray<Node*>(capacity_);
    192   memset(entries_, 0, sizeof(*entries_) * capacity_);
    193   size_ = 0;
    194   size_t const mask = capacity_ - 1;
    195 
    196   // Insert the old entries into the new block (skipping dead nodes).
    197   for (size_t i = 0; i < old_capacity; ++i) {
    198     Node* const old_entry = old_entries[i];
    199     if (!old_entry || old_entry->IsDead()) continue;
    200     for (size_t j = HashCode(old_entry) & mask;; j = (j + 1) & mask) {
    201       Node* const entry = entries_[j];
    202       if (entry == old_entry) {
    203         // Skip duplicate of the old entry.
    204         break;
    205       }
    206       if (!entry) {
    207         entries_[j] = old_entry;
    208         size_++;
    209         break;
    210       }
    211     }
    212   }
    213 }
    214 
    215 }  // namespace compiler
    216 }  // namespace internal
    217 }  // namespace v8
    218