1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "global_value_numbering.h"
18
19 #include "local_value_numbering.h"
20
21 namespace art {
22
GlobalValueNumbering(CompilationUnit * cu,ScopedArenaAllocator * allocator)23 GlobalValueNumbering::GlobalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator)
24 : cu_(cu),
25 mir_graph_(cu->mir_graph.get()),
26 allocator_(allocator),
27 bbs_processed_(0u),
28 max_bbs_to_process_(kMaxBbsToProcessMultiplyFactor * mir_graph_->GetNumReachableBlocks()),
29 last_value_(0u),
30 modifications_allowed_(false),
31 global_value_map_(std::less<uint64_t>(), allocator->Adapter()),
32 field_index_map_(FieldReferenceComparator(), allocator->Adapter()),
33 field_index_reverse_map_(allocator->Adapter()),
34 array_location_map_(ArrayLocationComparator(), allocator->Adapter()),
35 array_location_reverse_map_(allocator->Adapter()),
36 ref_set_map_(std::less<ValueNameSet>(), allocator->Adapter()),
37 lvns_(mir_graph_->GetNumBlocks(), nullptr, allocator->Adapter()),
38 work_lvn_(nullptr),
39 merge_lvns_(allocator->Adapter()) {
40 }
41
~GlobalValueNumbering()42 GlobalValueNumbering::~GlobalValueNumbering() {
43 STLDeleteElements(&lvns_);
44 }
45
PrepareBasicBlock(BasicBlock * bb,ScopedArenaAllocator * allocator)46 LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb,
47 ScopedArenaAllocator* allocator) {
48 if (UNLIKELY(!Good())) {
49 return nullptr;
50 }
51 if (UNLIKELY(bb->data_flow_info == nullptr)) {
52 return nullptr;
53 }
54 if (UNLIKELY(bb->block_type == kExitBlock)) {
55 DCHECK(bb->first_mir_insn == nullptr);
56 return nullptr;
57 }
58 if (UNLIKELY(bbs_processed_ == max_bbs_to_process_)) {
59 last_value_ = kNoValue; // Make bad.
60 return nullptr;
61 }
62 if (allocator == nullptr) {
63 allocator = allocator_;
64 }
65 DCHECK(work_lvn_.get() == nullptr);
66 work_lvn_.reset(new (allocator) LocalValueNumbering(this, bb->id, allocator));
67 if (bb->block_type == kEntryBlock) {
68 if ((cu_->access_flags & kAccStatic) == 0) {
69 // If non-static method, mark "this" as non-null
70 int this_reg = cu_->num_dalvik_registers - cu_->num_ins;
71 uint16_t value_name = work_lvn_->GetSRegValueName(this_reg);
72 work_lvn_->SetValueNameNullChecked(value_name);
73 }
74 } else {
75 // To avoid repeated allocation on the ArenaStack, reuse a single vector kept as a member.
76 DCHECK(merge_lvns_.empty());
77 // If we're running the full GVN, the RepeatingTopologicalSortIterator keeps the loop
78 // head stack in the MIRGraph up to date and for a loop head we need to check whether
79 // we're making the initial computation and need to merge only preceding blocks in the
80 // topological order, or we're recalculating a loop head and need to merge all incoming
81 // LVNs. When we're not at a loop head (including having an empty loop head stack) all
82 // predecessors should be preceding blocks and we shall merge all of them anyway.
83 //
84 // If we're running the modification phase of the full GVN, the loop head stack will be
85 // empty and we need to merge all incoming LVNs. If we're running just a simple LVN,
86 // the loop head stack will also be empty and there will be nothing to merge anyway.
87 bool use_all_predecessors = true;
88 uint16_t loop_head_idx = 0u; // Used only if !use_all_predecessors.
89 if (mir_graph_->GetTopologicalSortOrderLoopHeadStack()->Size() != 0) {
90 // Full GVN inside a loop, see if we're at the loop head for the first time.
91 auto top = mir_graph_->GetTopologicalSortOrderLoopHeadStack()->Peek();
92 loop_head_idx = top.first;
93 bool recalculating = top.second;
94 use_all_predecessors = recalculating ||
95 loop_head_idx != mir_graph_->GetTopologicalSortOrderIndexes()->Get(bb->id);
96 }
97 GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
98 for (BasicBlock* pred_bb = mir_graph_->GetBasicBlock(iter.Next());
99 pred_bb != nullptr; pred_bb = mir_graph_->GetBasicBlock(iter.Next())) {
100 if (lvns_[pred_bb->id] != nullptr &&
101 (use_all_predecessors ||
102 mir_graph_->GetTopologicalSortOrderIndexes()->Get(pred_bb->id) < loop_head_idx)) {
103 merge_lvns_.push_back(lvns_[pred_bb->id]);
104 }
105 }
106 // Determine merge type.
107 LocalValueNumbering::MergeType merge_type = LocalValueNumbering::kNormalMerge;
108 if (bb->catch_entry) {
109 merge_type = LocalValueNumbering::kCatchMerge;
110 } else if (bb->last_mir_insn != nullptr &&
111 (bb->last_mir_insn->dalvikInsn.opcode == Instruction::RETURN_VOID ||
112 bb->last_mir_insn->dalvikInsn.opcode == Instruction::RETURN ||
113 bb->last_mir_insn->dalvikInsn.opcode == Instruction::RETURN_OBJECT ||
114 bb->last_mir_insn->dalvikInsn.opcode == Instruction::RETURN_WIDE) &&
115 (bb->first_mir_insn == bb->last_mir_insn ||
116 (static_cast<int>(bb->first_mir_insn->dalvikInsn.opcode) == kMirOpPhi &&
117 (bb->first_mir_insn->next == bb->last_mir_insn ||
118 (static_cast<int>(bb->first_mir_insn->next->dalvikInsn.opcode) == kMirOpPhi &&
119 bb->first_mir_insn->next->next == bb->last_mir_insn))))) {
120 merge_type = LocalValueNumbering::kReturnMerge;
121 }
122 // At least one predecessor must have been processed before this bb.
123 CHECK(!merge_lvns_.empty());
124 if (merge_lvns_.size() == 1u) {
125 work_lvn_->MergeOne(*merge_lvns_[0], merge_type);
126 BasicBlock* pred_bb = mir_graph_->GetBasicBlock(merge_lvns_[0]->Id());
127 if (HasNullCheckLastInsn(pred_bb, bb->id)) {
128 int s_reg = pred_bb->last_mir_insn->ssa_rep->uses[0];
129 uint16_t value_name = merge_lvns_[0]->GetSRegValueName(s_reg);
130 work_lvn_->SetValueNameNullChecked(value_name);
131 }
132 } else {
133 work_lvn_->Merge(merge_type);
134 }
135 }
136 return work_lvn_.get();
137 }
138
FinishBasicBlock(BasicBlock * bb)139 bool GlobalValueNumbering::FinishBasicBlock(BasicBlock* bb) {
140 DCHECK(work_lvn_ != nullptr);
141 DCHECK_EQ(bb->id, work_lvn_->Id());
142 ++bbs_processed_;
143 merge_lvns_.clear();
144
145 bool change = (lvns_[bb->id] == nullptr) || !lvns_[bb->id]->Equals(*work_lvn_);
146 if (change) {
147 std::unique_ptr<const LocalValueNumbering> old_lvn(lvns_[bb->id]);
148 lvns_[bb->id] = work_lvn_.release();
149 } else {
150 work_lvn_.reset();
151 }
152 return change;
153 }
154
GetFieldId(const MirFieldInfo & field_info,uint16_t type)155 uint16_t GlobalValueNumbering::GetFieldId(const MirFieldInfo& field_info, uint16_t type) {
156 FieldReference key = { field_info.DeclaringDexFile(), field_info.DeclaringFieldIndex(), type };
157 auto lb = field_index_map_.lower_bound(key);
158 if (lb != field_index_map_.end() && !field_index_map_.key_comp()(key, lb->first)) {
159 return lb->second;
160 }
161 DCHECK_LT(field_index_map_.size(), kNoValue);
162 uint16_t id = field_index_map_.size();
163 auto it = field_index_map_.PutBefore(lb, key, id);
164 field_index_reverse_map_.push_back(&*it);
165 return id;
166 }
167
GetArrayLocation(uint16_t base,uint16_t index)168 uint16_t GlobalValueNumbering::GetArrayLocation(uint16_t base, uint16_t index) {
169 auto cmp = array_location_map_.key_comp();
170 ArrayLocation key = { base, index };
171 auto lb = array_location_map_.lower_bound(key);
172 if (lb != array_location_map_.end() && !cmp(key, lb->first)) {
173 return lb->second;
174 }
175 uint16_t location = static_cast<uint16_t>(array_location_reverse_map_.size());
176 DCHECK_EQ(location, array_location_reverse_map_.size()); // No overflow.
177 auto it = array_location_map_.PutBefore(lb, key, location);
178 array_location_reverse_map_.push_back(&*it);
179 return location;
180 }
181
HasNullCheckLastInsn(const BasicBlock * pred_bb,BasicBlockId succ_id)182 bool GlobalValueNumbering::HasNullCheckLastInsn(const BasicBlock* pred_bb,
183 BasicBlockId succ_id) {
184 if (pred_bb->block_type != kDalvikByteCode || pred_bb->last_mir_insn == nullptr) {
185 return false;
186 }
187 Instruction::Code last_opcode = pred_bb->last_mir_insn->dalvikInsn.opcode;
188 return ((last_opcode == Instruction::IF_EQZ && pred_bb->fall_through == succ_id) ||
189 (last_opcode == Instruction::IF_NEZ && pred_bb->taken == succ_id));
190 }
191
NullCheckedInAllPredecessors(const ScopedArenaVector<uint16_t> & merge_names) const192 bool GlobalValueNumbering::NullCheckedInAllPredecessors(
193 const ScopedArenaVector<uint16_t>& merge_names) const {
194 // Implicit parameters:
195 // - *work_lvn: the LVN for which we're checking predecessors.
196 // - merge_lvns_: the predecessor LVNs.
197 DCHECK_EQ(merge_lvns_.size(), merge_names.size());
198 for (size_t i = 0, size = merge_lvns_.size(); i != size; ++i) {
199 const LocalValueNumbering* pred_lvn = merge_lvns_[i];
200 uint16_t value_name = merge_names[i];
201 if (!pred_lvn->IsValueNullChecked(value_name)) {
202 // Check if the predecessor has an IF_EQZ/IF_NEZ as the last insn.
203 const BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_lvn->Id());
204 if (!HasNullCheckLastInsn(pred_bb, work_lvn_->Id())) {
205 return false;
206 }
207 // IF_EQZ/IF_NEZ checks some sreg, see if that sreg contains the value_name.
208 int s_reg = pred_bb->last_mir_insn->ssa_rep->uses[0];
209 if (!pred_lvn->IsSregValue(s_reg, value_name)) {
210 return false;
211 }
212 }
213 }
214 return true;
215 }
216
217 } // namespace art
218