1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/value-numbering-reducer.h"
6 
7 #include <cstring>
8 
9 #include "src/base/functional.h"
10 #include "src/compiler/node-properties.h"
11 #include "src/compiler/node.h"
12 
13 namespace v8 {
14 namespace internal {
15 namespace compiler {
16 
17 namespace {
18 
HashCode(Node * node)19 size_t HashCode(Node* node) {
20   size_t h = base::hash_combine(node->op()->HashCode(), node->InputCount());
21   for (int j = 0; j < node->InputCount(); ++j) {
22     h = base::hash_combine(h, node->InputAt(j)->id());
23   }
24   return h;
25 }
26 
27 
Equals(Node * a,Node * b)28 bool Equals(Node* a, Node* b) {
29   DCHECK_NOT_NULL(a);
30   DCHECK_NOT_NULL(b);
31   DCHECK_NOT_NULL(a->op());
32   DCHECK_NOT_NULL(b->op());
33   if (!a->op()->Equals(b->op())) return false;
34   if (a->InputCount() != b->InputCount()) return false;
35   for (int j = 0; j < a->InputCount(); ++j) {
36     DCHECK_NOT_NULL(a->InputAt(j));
37     DCHECK_NOT_NULL(b->InputAt(j));
38     if (a->InputAt(j)->id() != b->InputAt(j)->id()) return false;
39   }
40   return true;
41 }
42 
43 }  // namespace
44 
ValueNumberingReducer(Zone * temp_zone,Zone * graph_zone)45 ValueNumberingReducer::ValueNumberingReducer(Zone* temp_zone, Zone* graph_zone)
46     : entries_(nullptr),
47       capacity_(0),
48       size_(0),
49       temp_zone_(temp_zone),
50       graph_zone_(graph_zone) {}
51 
~ValueNumberingReducer()52 ValueNumberingReducer::~ValueNumberingReducer() {}
53 
54 
Reduce(Node * node)55 Reduction ValueNumberingReducer::Reduce(Node* node) {
56   if (!node->op()->HasProperty(Operator::kIdempotent)) return NoChange();
57 
58   const size_t hash = HashCode(node);
59   if (!entries_) {
60     DCHECK(size_ == 0);
61     DCHECK(capacity_ == 0);
62     // Allocate the initial entries and insert the first entry.
63     capacity_ = kInitialCapacity;
64     entries_ = temp_zone()->NewArray<Node*>(kInitialCapacity);
65     memset(entries_, 0, sizeof(*entries_) * kInitialCapacity);
66     entries_[hash & (kInitialCapacity - 1)] = node;
67     size_ = 1;
68     return NoChange();
69   }
70 
71   DCHECK(size_ < capacity_);
72   DCHECK(size_ + size_ / 4 < capacity_);
73 
74   const size_t mask = capacity_ - 1;
75   size_t dead = capacity_;
76 
77   for (size_t i = hash & mask;; i = (i + 1) & mask) {
78     Node* entry = entries_[i];
79     if (!entry) {
80       if (dead != capacity_) {
81         // Reuse dead entry that we discovered on the way.
82         entries_[dead] = node;
83       } else {
84         // Have to insert a new entry.
85         entries_[i] = node;
86         size_++;
87 
88         // Resize to keep load factor below 80%
89         if (size_ + size_ / 4 >= capacity_) Grow();
90       }
91       DCHECK(size_ + size_ / 4 < capacity_);
92       return NoChange();
93     }
94 
95     if (entry == node) {
96       // We need to check for a certain class of collisions here. Imagine the
97       // following scenario:
98       //
99       //  1. We insert node1 with op1 and certain inputs at index i.
100       //  2. We insert node2 with op2 and certain inputs at index i+1.
101       //  3. Some other reducer changes node1 to op2 and the inputs from node2.
102       //
103       // Now we are called again to reduce node1, and we would return NoChange
104       // in this case because we find node1 first, but what we should actually
105       // do is return Replace(node2) instead.
106       for (size_t j = (i + 1) & mask;; j = (j + 1) & mask) {
107         Node* entry = entries_[j];
108         if (!entry) {
109           // No collision, {node} is fine.
110           return NoChange();
111         }
112         if (entry->IsDead()) {
113           continue;
114         }
115         if (entry == node) {
116           // Collision with ourselves, doesn't count as a real collision.
117           // Opportunistically clean-up the duplicate entry if we're at the end
118           // of a bucket.
119           if (!entries_[(j + 1) & mask]) {
120             entries_[j] = nullptr;
121             size_--;
122             return NoChange();
123           }
124           // Otherwise, keep searching for another collision.
125           continue;
126         }
127         if (Equals(entry, node)) {
128           Reduction reduction = ReplaceIfTypesMatch(node, entry);
129           if (reduction.Changed()) {
130             // Overwrite the colliding entry with the actual entry.
131             entries_[i] = entry;
132             // Opportunistically clean-up the duplicate entry if we're at the
133             // end of a bucket.
134             if (!entries_[(j + 1) & mask]) {
135               entries_[j] = nullptr;
136               size_--;
137             }
138           }
139           return reduction;
140         }
141       }
142     }
143 
144     // Skip dead entries, but remember their indices so we can reuse them.
145     if (entry->IsDead()) {
146       dead = i;
147       continue;
148     }
149     if (Equals(entry, node)) {
150       return ReplaceIfTypesMatch(node, entry);
151     }
152   }
153 }
154 
ReplaceIfTypesMatch(Node * node,Node * replacement)155 Reduction ValueNumberingReducer::ReplaceIfTypesMatch(Node* node,
156                                                      Node* replacement) {
157   // Make sure the replacement has at least as good type as the original node.
158   if (NodeProperties::IsTyped(replacement) && NodeProperties::IsTyped(node)) {
159     Type* replacement_type = NodeProperties::GetType(replacement);
160     Type* node_type = NodeProperties::GetType(node);
161     if (!replacement_type->Is(node_type)) {
162       // Ideally, we would set an intersection of {replacement_type} and
163       // {node_type} here. However, typing of NumberConstants assigns different
164       // types to constants with the same value (it creates a fresh heap
165       // number), which would make the intersection empty. To be safe, we use
166       // the smaller type if the types are comparable.
167       if (node_type->Is(replacement_type)) {
168         NodeProperties::SetType(replacement, node_type);
169       } else {
170         // Types are not comparable => do not replace.
171         return NoChange();
172       }
173     }
174   }
175   return Replace(replacement);
176 }
177 
178 
Grow()179 void ValueNumberingReducer::Grow() {
180   // Allocate a new block of entries double the previous capacity.
181   Node** const old_entries = entries_;
182   size_t const old_capacity = capacity_;
183   capacity_ *= 2;
184   entries_ = temp_zone()->NewArray<Node*>(capacity_);
185   memset(entries_, 0, sizeof(*entries_) * capacity_);
186   size_ = 0;
187   size_t const mask = capacity_ - 1;
188 
189   // Insert the old entries into the new block (skipping dead nodes).
190   for (size_t i = 0; i < old_capacity; ++i) {
191     Node* const old_entry = old_entries[i];
192     if (!old_entry || old_entry->IsDead()) continue;
193     for (size_t j = HashCode(old_entry) & mask;; j = (j + 1) & mask) {
194       Node* const entry = entries_[j];
195       if (entry == old_entry) {
196         // Skip duplicate of the old entry.
197         break;
198       }
199       if (!entry) {
200         entries_[j] = old_entry;
201         size_++;
202         break;
203       }
204     }
205   }
206 }
207 
208 }  // namespace compiler
209 }  // namespace internal
210 }  // namespace v8
211