1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "execution_subgraph.h"
18 
19 #include <algorithm>
20 #include <unordered_set>
21 
22 #include "android-base/macros.h"
23 #include "base/arena_allocator.h"
24 #include "base/arena_bit_vector.h"
25 #include "base/globals.h"
26 #include "base/scoped_arena_allocator.h"
27 #include "nodes.h"
28 
29 namespace art {
30 
ExecutionSubgraph(HGraph * graph,ScopedArenaAllocator * allocator)31 ExecutionSubgraph::ExecutionSubgraph(HGraph* graph, ScopedArenaAllocator* allocator)
32     : graph_(graph),
33       allocator_(allocator),
34       allowed_successors_(graph_->GetBlocks().size(),
35                           ~(std::bitset<kMaxFilterableSuccessors> {}),
36                           allocator_->Adapter(kArenaAllocLSA)),
37       unreachable_blocks_(
38           allocator_, graph_->GetBlocks().size(), /*expandable=*/ false, kArenaAllocLSA),
39       valid_(true),
40       needs_prune_(false),
41       finalized_(false) {
42   if (valid_) {
__anon3e34c45b0102(HBasicBlock* it) 43     DCHECK(std::all_of(graph->GetBlocks().begin(), graph->GetBlocks().end(), [](HBasicBlock* it) {
44       return it == nullptr || it->GetSuccessors().size() <= kMaxFilterableSuccessors;
45     }));
46   }
47 }
48 
RemoveBlock(const HBasicBlock * to_remove)49 void ExecutionSubgraph::RemoveBlock(const HBasicBlock* to_remove) {
50   if (!valid_) {
51     return;
52   }
53   uint32_t id = to_remove->GetBlockId();
54   if (unreachable_blocks_.IsBitSet(id)) {
55     if (kIsDebugBuild) {
56       // This isn't really needed but it's good to have this so it functions as
57       // a DCHECK that we always call Prune after removing any block.
58       needs_prune_ = true;
59     }
60     return;
61   }
62   unreachable_blocks_.SetBit(id);
63   for (HBasicBlock* pred : to_remove->GetPredecessors()) {
64     std::bitset<kMaxFilterableSuccessors> allowed_successors {};
65     // ZipCount iterates over both the successors and the index of them at the same time.
66     for (auto [succ, i] : ZipCount(MakeIterationRange(pred->GetSuccessors()))) {
67       if (succ != to_remove) {
68         allowed_successors.set(i);
69       }
70     }
71     LimitBlockSuccessors(pred, allowed_successors);
72   }
73 }
74 
75 // Removes sink nodes.
Prune()76 void ExecutionSubgraph::Prune() {
77   if (UNLIKELY(!valid_)) {
78     return;
79   }
80   needs_prune_ = false;
81   // This is the record of the edges that were both (1) explored and (2) reached
82   // the exit node.
83   {
84     // Allocator for temporary values.
85     ScopedArenaAllocator temporaries(graph_->GetArenaStack());
86     ScopedArenaVector<std::bitset<kMaxFilterableSuccessors>> results(
87         graph_->GetBlocks().size(), temporaries.Adapter(kArenaAllocLSA));
88     unreachable_blocks_.ClearAllBits();
89     // Fills up the 'results' map with what we need to add to update
90     // allowed_successors in order to prune sink nodes.
91     bool start_reaches_end = false;
92     // This is basically a DFS of the graph with some edges skipped.
93     {
94       const size_t num_blocks = graph_->GetBlocks().size();
95       constexpr ssize_t kUnvisitedSuccIdx = -1;
96       ArenaBitVector visiting(&temporaries, num_blocks, false, kArenaAllocLSA);
97       // How many of the successors of each block we have already examined. This
98       // has three states.
99       // (1) kUnvisitedSuccIdx: we have not examined any edges,
100       // (2) 0 <= val < # of successors: we have examined 'val' successors/are
101       // currently examining successors_[val],
102       // (3) kMaxFilterableSuccessors: We have examined all of the successors of
103       // the block (the 'result' is final).
104       ScopedArenaVector<ssize_t> last_succ_seen(
105           num_blocks, kUnvisitedSuccIdx, temporaries.Adapter(kArenaAllocLSA));
106       // A stack of which blocks we are visiting in this DFS traversal. Does not
107       // include the current-block. Used with last_succ_seen to figure out which
108       // bits to set if we find a path to the end/loop.
109       ScopedArenaVector<uint32_t> current_path(temporaries.Adapter(kArenaAllocLSA));
110       // Just ensure we have enough space. The allocator will be cleared shortly
111       // anyway so this is fast.
112       current_path.reserve(num_blocks);
113       // Current block we are examining. Modified only by 'push_block' and 'pop_block'
114       const HBasicBlock* cur_block = graph_->GetEntryBlock();
115       // Used to note a recur where we will start iterating on 'blk' and save
116       // where we are. We must 'continue' immediately after this.
117       auto push_block = [&](const HBasicBlock* blk) {
118         DCHECK(std::find(current_path.cbegin(), current_path.cend(), cur_block->GetBlockId()) ==
119                current_path.end());
120         if (kIsDebugBuild) {
121           std::for_each(current_path.cbegin(), current_path.cend(), [&](auto id) {
122             DCHECK_GT(last_succ_seen[id], kUnvisitedSuccIdx) << id;
123             DCHECK_LT(last_succ_seen[id], static_cast<ssize_t>(kMaxFilterableSuccessors)) << id;
124           });
125         }
126         current_path.push_back(cur_block->GetBlockId());
127         visiting.SetBit(cur_block->GetBlockId());
128         cur_block = blk;
129       };
130       // Used to note that we have fully explored a block and should return back
131       // up. Sets cur_block appropriately. We must 'continue' immediately after
132       // calling this.
133       auto pop_block = [&]() {
134         if (UNLIKELY(current_path.empty())) {
135           // Should only happen if entry-blocks successors are exhausted.
136           DCHECK_GE(last_succ_seen[graph_->GetEntryBlock()->GetBlockId()],
137                     static_cast<ssize_t>(graph_->GetEntryBlock()->GetSuccessors().size()));
138           cur_block = nullptr;
139         } else {
140           const HBasicBlock* last = graph_->GetBlocks()[current_path.back()];
141           visiting.ClearBit(current_path.back());
142           current_path.pop_back();
143           cur_block = last;
144         }
145       };
146       // Mark the current path as a path to the end. This is in contrast to paths
147       // that end in (eg) removed blocks.
148       auto propagate_true = [&]() {
149         for (uint32_t id : current_path) {
150           DCHECK_GT(last_succ_seen[id], kUnvisitedSuccIdx);
151           DCHECK_LT(last_succ_seen[id], static_cast<ssize_t>(kMaxFilterableSuccessors));
152           results[id].set(last_succ_seen[id]);
153         }
154       };
155       ssize_t num_entry_succ = graph_->GetEntryBlock()->GetSuccessors().size();
156       // As long as the entry-block has not explored all successors we still have
157       // work to do.
158       const uint32_t entry_block_id = graph_->GetEntryBlock()->GetBlockId();
159       while (num_entry_succ > last_succ_seen[entry_block_id]) {
160         DCHECK(cur_block != nullptr);
161         uint32_t id = cur_block->GetBlockId();
162         DCHECK((current_path.empty() && cur_block == graph_->GetEntryBlock()) ||
163                current_path.front() == graph_->GetEntryBlock()->GetBlockId())
164             << "current path size: " << current_path.size()
165             << " cur_block id: " << cur_block->GetBlockId() << " entry id "
166             << graph_->GetEntryBlock()->GetBlockId();
167         if (visiting.IsBitSet(id)) {
168           // TODO We should support infinite loops as well.
169           start_reaches_end = false;
170           break;
171         }
172         std::bitset<kMaxFilterableSuccessors>& result = results[id];
173         if (cur_block == graph_->GetExitBlock()) {
174           start_reaches_end = true;
175           propagate_true();
176           pop_block();
177           continue;
178         } else if (last_succ_seen[id] == kMaxFilterableSuccessors) {
179           // Already fully explored.
180           if (result.any()) {
181             propagate_true();
182           }
183           pop_block();
184           continue;
185         }
186         // NB This is a pointer. Modifications modify the last_succ_seen.
187         ssize_t* cur_succ = &last_succ_seen[id];
188         std::bitset<kMaxFilterableSuccessors> succ_bitmap = GetAllowedSuccessors(cur_block);
189         // Get next successor allowed.
190         while (++(*cur_succ) < static_cast<ssize_t>(kMaxFilterableSuccessors) &&
191                !succ_bitmap.test(*cur_succ)) {
192           DCHECK_GE(*cur_succ, 0);
193         }
194         if (*cur_succ >= static_cast<ssize_t>(cur_block->GetSuccessors().size())) {
195           // No more successors. Mark that we've checked everything. Later visits
196           // to this node can use the existing data.
197           DCHECK_LE(*cur_succ, static_cast<ssize_t>(kMaxFilterableSuccessors));
198           *cur_succ = kMaxFilterableSuccessors;
199           pop_block();
200           continue;
201         }
202         const HBasicBlock* nxt = cur_block->GetSuccessors()[*cur_succ];
203         DCHECK(nxt != nullptr) << "id: " << *cur_succ
204                                << " max: " << cur_block->GetSuccessors().size();
205         if (visiting.IsBitSet(nxt->GetBlockId())) {
206           // This is a loop. Mark it and continue on. Mark allowed-successor on
207           // this block's results as well.
208           result.set(*cur_succ);
209           propagate_true();
210         } else {
211           // Not a loop yet. Recur.
212           push_block(nxt);
213         }
214       }
215     }
216     // If we can't reach the end then there is no path through the graph without
217     // hitting excluded blocks
218     if (UNLIKELY(!start_reaches_end)) {
219       valid_ = false;
220       return;
221     }
222     // Mark blocks we didn't see in the ReachesEnd flood-fill
223     for (const HBasicBlock* blk : graph_->GetBlocks()) {
224       if (blk != nullptr &&
225           results[blk->GetBlockId()].none() &&
226           blk != graph_->GetExitBlock() &&
227           blk != graph_->GetEntryBlock()) {
228         // We never visited this block, must be unreachable.
229         unreachable_blocks_.SetBit(blk->GetBlockId());
230       }
231     }
232     // write the new data.
233     memcpy(allowed_successors_.data(),
234            results.data(),
235            results.size() * sizeof(std::bitset<kMaxFilterableSuccessors>));
236   }
237   RecalculateExcludedCohort();
238 }
239 
RemoveConcavity()240 void ExecutionSubgraph::RemoveConcavity() {
241   if (UNLIKELY(!valid_)) {
242     return;
243   }
244   DCHECK(!needs_prune_);
245   for (const HBasicBlock* blk : graph_->GetBlocks()) {
246     if (blk == nullptr || unreachable_blocks_.IsBitSet(blk->GetBlockId())) {
247       continue;
248     }
249     uint32_t blkid = blk->GetBlockId();
250     if (std::any_of(unreachable_blocks_.Indexes().begin(),
251                     unreachable_blocks_.Indexes().end(),
252                     [&](uint32_t skipped) { return graph_->PathBetween(skipped, blkid); }) &&
253         std::any_of(unreachable_blocks_.Indexes().begin(),
254                     unreachable_blocks_.Indexes().end(),
255                     [&](uint32_t skipped) { return graph_->PathBetween(blkid, skipped); })) {
256       RemoveBlock(blk);
257     }
258   }
259   Prune();
260 }
261 
RecalculateExcludedCohort()262 void ExecutionSubgraph::RecalculateExcludedCohort() {
263   DCHECK(!needs_prune_);
264   excluded_list_.emplace(allocator_->Adapter(kArenaAllocLSA));
265   ScopedArenaVector<ExcludedCohort>& res = excluded_list_.value();
266   // Make a copy of unreachable_blocks_;
267   ArenaBitVector unreachable(allocator_, graph_->GetBlocks().size(), false, kArenaAllocLSA);
268   unreachable.Copy(&unreachable_blocks_);
269   // Split cohorts with union-find
270   while (unreachable.IsAnyBitSet()) {
271     res.emplace_back(allocator_, graph_);
272     ExcludedCohort& cohort = res.back();
273     // We don't allocate except for the queue beyond here so create another arena to save memory.
274     ScopedArenaAllocator alloc(graph_->GetArenaStack());
275     ScopedArenaQueue<const HBasicBlock*> worklist(alloc.Adapter(kArenaAllocLSA));
276     // Select an arbitrary node
277     const HBasicBlock* first = graph_->GetBlocks()[unreachable.GetHighestBitSet()];
278     worklist.push(first);
279     do {
280       // Flood-fill both forwards and backwards.
281       const HBasicBlock* cur = worklist.front();
282       worklist.pop();
283       if (!unreachable.IsBitSet(cur->GetBlockId())) {
284         // Already visited or reachable somewhere else.
285         continue;
286       }
287       unreachable.ClearBit(cur->GetBlockId());
288       cohort.blocks_.SetBit(cur->GetBlockId());
289       // don't bother filtering here, it's done next go-around
290       for (const HBasicBlock* pred : cur->GetPredecessors()) {
291         worklist.push(pred);
292       }
293       for (const HBasicBlock* succ : cur->GetSuccessors()) {
294         worklist.push(succ);
295       }
296     } while (!worklist.empty());
297   }
298   // Figure out entry & exit nodes.
299   for (ExcludedCohort& cohort : res) {
300     DCHECK(cohort.blocks_.IsAnyBitSet());
301     auto is_external = [&](const HBasicBlock* ext) -> bool {
302       return !cohort.blocks_.IsBitSet(ext->GetBlockId());
303     };
304     for (const HBasicBlock* blk : cohort.Blocks()) {
305       const auto& preds = blk->GetPredecessors();
306       const auto& succs = blk->GetSuccessors();
307       if (std::any_of(preds.cbegin(), preds.cend(), is_external)) {
308         cohort.entry_blocks_.SetBit(blk->GetBlockId());
309       }
310       if (std::any_of(succs.cbegin(), succs.cend(), is_external)) {
311         cohort.exit_blocks_.SetBit(blk->GetBlockId());
312       }
313     }
314   }
315 }
316 
operator <<(std::ostream & os,const ExecutionSubgraph::ExcludedCohort & ex)317 std::ostream& operator<<(std::ostream& os, const ExecutionSubgraph::ExcludedCohort& ex) {
318   ex.Dump(os);
319   return os;
320 }
321 
Dump(std::ostream & os) const322 void ExecutionSubgraph::ExcludedCohort::Dump(std::ostream& os) const {
323   auto dump = [&](BitVecBlockRange arr) {
324     os << "[";
325     bool first = true;
326     for (const HBasicBlock* b : arr) {
327       if (!first) {
328         os << ", ";
329       }
330       first = false;
331       os << b->GetBlockId();
332     }
333     os << "]";
334   };
335   auto dump_blocks = [&]() {
336     os << "[";
337     bool first = true;
338     for (const HBasicBlock* b : Blocks()) {
339       if (!entry_blocks_.IsBitSet(b->GetBlockId()) && !exit_blocks_.IsBitSet(b->GetBlockId())) {
340         if (!first) {
341           os << ", ";
342         }
343         first = false;
344         os << b->GetBlockId();
345       }
346     }
347     os << "]";
348   };
349 
350   os << "{ entry: ";
351   dump(EntryBlocks());
352   os << ", interior: ";
353   dump_blocks();
354   os << ", exit: ";
355   dump(ExitBlocks());
356   os << "}";
357 }
358 
359 }  // namespace art
360