1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/value-numbering-reducer.h"
6 
7 #include <cstring>
8 
9 #include "src/base/functional.h"
10 #include "src/compiler/node-properties.h"
11 #include "src/compiler/node.h"
12 
13 namespace v8 {
14 namespace internal {
15 namespace compiler {
16 
ValueNumberingReducer(Zone * temp_zone,Zone * graph_zone)17 ValueNumberingReducer::ValueNumberingReducer(Zone* temp_zone, Zone* graph_zone)
18     : entries_(nullptr),
19       capacity_(0),
20       size_(0),
21       temp_zone_(temp_zone),
22       graph_zone_(graph_zone) {}
23 
~ValueNumberingReducer()24 ValueNumberingReducer::~ValueNumberingReducer() {}
25 
26 
Reduce(Node * node)27 Reduction ValueNumberingReducer::Reduce(Node* node) {
28   if (!node->op()->HasProperty(Operator::kIdempotent)) return NoChange();
29 
30   const size_t hash = NodeProperties::HashCode(node);
31   if (!entries_) {
32     DCHECK_EQ(0, size_);
33     DCHECK_EQ(0, capacity_);
34     // Allocate the initial entries and insert the first entry.
35     capacity_ = kInitialCapacity;
36     entries_ = temp_zone()->NewArray<Node*>(kInitialCapacity);
37     memset(entries_, 0, sizeof(*entries_) * kInitialCapacity);
38     entries_[hash & (kInitialCapacity - 1)] = node;
39     size_ = 1;
40     return NoChange();
41   }
42 
43   DCHECK(size_ < capacity_);
44   DCHECK(size_ + size_ / 4 < capacity_);
45 
46   const size_t mask = capacity_ - 1;
47   size_t dead = capacity_;
48 
49   for (size_t i = hash & mask;; i = (i + 1) & mask) {
50     Node* entry = entries_[i];
51     if (!entry) {
52       if (dead != capacity_) {
53         // Reuse dead entry that we discovered on the way.
54         entries_[dead] = node;
55       } else {
56         // Have to insert a new entry.
57         entries_[i] = node;
58         size_++;
59 
60         // Resize to keep load factor below 80%
61         if (size_ + size_ / 4 >= capacity_) Grow();
62       }
63       DCHECK(size_ + size_ / 4 < capacity_);
64       return NoChange();
65     }
66 
67     if (entry == node) {
68       // We need to check for a certain class of collisions here. Imagine the
69       // following scenario:
70       //
71       //  1. We insert node1 with op1 and certain inputs at index i.
72       //  2. We insert node2 with op2 and certain inputs at index i+1.
73       //  3. Some other reducer changes node1 to op2 and the inputs from node2.
74       //
75       // Now we are called again to reduce node1, and we would return NoChange
76       // in this case because we find node1 first, but what we should actually
77       // do is return Replace(node2) instead.
78       for (size_t j = (i + 1) & mask;; j = (j + 1) & mask) {
79         Node* entry = entries_[j];
80         if (!entry) {
81           // No collision, {node} is fine.
82           return NoChange();
83         }
84         if (entry->IsDead()) {
85           continue;
86         }
87         if (entry == node) {
88           // Collision with ourselves, doesn't count as a real collision.
89           // Opportunistically clean-up the duplicate entry if we're at the end
90           // of a bucket.
91           if (!entries_[(j + 1) & mask]) {
92             entries_[j] = nullptr;
93             size_--;
94             return NoChange();
95           }
96           // Otherwise, keep searching for another collision.
97           continue;
98         }
99         if (NodeProperties::Equals(entry, node)) {
100           Reduction reduction = ReplaceIfTypesMatch(node, entry);
101           if (reduction.Changed()) {
102             // Overwrite the colliding entry with the actual entry.
103             entries_[i] = entry;
104             // Opportunistically clean-up the duplicate entry if we're at the
105             // end of a bucket.
106             if (!entries_[(j + 1) & mask]) {
107               entries_[j] = nullptr;
108               size_--;
109             }
110           }
111           return reduction;
112         }
113       }
114     }
115 
116     // Skip dead entries, but remember their indices so we can reuse them.
117     if (entry->IsDead()) {
118       dead = i;
119       continue;
120     }
121     if (NodeProperties::Equals(entry, node)) {
122       return ReplaceIfTypesMatch(node, entry);
123     }
124   }
125 }
126 
ReplaceIfTypesMatch(Node * node,Node * replacement)127 Reduction ValueNumberingReducer::ReplaceIfTypesMatch(Node* node,
128                                                      Node* replacement) {
129   // Make sure the replacement has at least as good type as the original node.
130   if (NodeProperties::IsTyped(replacement) && NodeProperties::IsTyped(node)) {
131     Type replacement_type = NodeProperties::GetType(replacement);
132     Type node_type = NodeProperties::GetType(node);
133     if (!replacement_type.Is(node_type)) {
134       // Ideally, we would set an intersection of {replacement_type} and
135       // {node_type} here. However, typing of NumberConstants assigns different
136       // types to constants with the same value (it creates a fresh heap
137       // number), which would make the intersection empty. To be safe, we use
138       // the smaller type if the types are comparable.
139       if (node_type.Is(replacement_type)) {
140         NodeProperties::SetType(replacement, node_type);
141       } else {
142         // Types are not comparable => do not replace.
143         return NoChange();
144       }
145     }
146   }
147   return Replace(replacement);
148 }
149 
150 
Grow()151 void ValueNumberingReducer::Grow() {
152   // Allocate a new block of entries double the previous capacity.
153   Node** const old_entries = entries_;
154   size_t const old_capacity = capacity_;
155   capacity_ *= 2;
156   entries_ = temp_zone()->NewArray<Node*>(capacity_);
157   memset(entries_, 0, sizeof(*entries_) * capacity_);
158   size_ = 0;
159   size_t const mask = capacity_ - 1;
160 
161   // Insert the old entries into the new block (skipping dead nodes).
162   for (size_t i = 0; i < old_capacity; ++i) {
163     Node* const old_entry = old_entries[i];
164     if (!old_entry || old_entry->IsDead()) continue;
165     for (size_t j = NodeProperties::HashCode(old_entry) & mask;;
166          j = (j + 1) & mask) {
167       Node* const entry = entries_[j];
168       if (entry == old_entry) {
169         // Skip duplicate of the old entry.
170         break;
171       }
172       if (!entry) {
173         entries_[j] = old_entry;
174         size_++;
175         break;
176       }
177     }
178   }
179 }
180 
181 }  // namespace compiler
182 }  // namespace internal
183 }  // namespace v8
184