1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "loop_analysis.h"
18
19 #include "base/bit_vector-inl.h"
20 #include "code_generator.h"
21 #include "induction_var_range.h"
22
23 namespace art {
24
CalculateLoopBasicProperties(HLoopInformation * loop_info,LoopAnalysisInfo * analysis_results,int64_t trip_count)25 void LoopAnalysis::CalculateLoopBasicProperties(HLoopInformation* loop_info,
26 LoopAnalysisInfo* analysis_results,
27 int64_t trip_count) {
28 analysis_results->trip_count_ = trip_count;
29
30 for (HBlocksInLoopIterator block_it(*loop_info);
31 !block_it.Done();
32 block_it.Advance()) {
33 HBasicBlock* block = block_it.Current();
34
35 // Check whether one of the successor is loop exit.
36 for (HBasicBlock* successor : block->GetSuccessors()) {
37 if (!loop_info->Contains(*successor)) {
38 analysis_results->exits_num_++;
39
40 // We track number of invariant loop exits which correspond to HIf instruction and
41 // can be eliminated by loop peeling; other control flow instruction are ignored and will
42 // not cause loop peeling to happen as they either cannot be inside a loop, or by
43 // definition cannot be loop exits (unconditional instructions), or are not beneficial for
44 // the optimization.
45 HIf* hif = block->GetLastInstruction()->AsIf();
46 if (hif != nullptr && !loop_info->Contains(*hif->InputAt(0)->GetBlock())) {
47 analysis_results->invariant_exits_num_++;
48 }
49 }
50 }
51
52 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
53 HInstruction* instruction = it.Current();
54 if (it.Current()->GetType() == DataType::Type::kInt64) {
55 analysis_results->has_long_type_instructions_ = true;
56 }
57 if (MakesScalarPeelingUnrollingNonBeneficial(instruction)) {
58 analysis_results->has_instructions_preventing_scalar_peeling_ = true;
59 analysis_results->has_instructions_preventing_scalar_unrolling_ = true;
60 }
61 analysis_results->instr_num_++;
62 }
63 analysis_results->bb_num_++;
64 }
65 }
66
GetLoopTripCount(HLoopInformation * loop_info,const InductionVarRange * induction_range)67 int64_t LoopAnalysis::GetLoopTripCount(HLoopInformation* loop_info,
68 const InductionVarRange* induction_range) {
69 int64_t trip_count;
70 if (!induction_range->HasKnownTripCount(loop_info, &trip_count)) {
71 trip_count = LoopAnalysisInfo::kUnknownTripCount;
72 }
73 return trip_count;
74 }
75
76 // Default implementation of loop helper; used for all targets unless a custom implementation
77 // is provided. Enables scalar loop peeling and unrolling with the most conservative heuristics.
78 class ArchDefaultLoopHelper : public ArchNoOptsLoopHelper {
79 public:
ArchDefaultLoopHelper(const CodeGenerator & codegen)80 explicit ArchDefaultLoopHelper(const CodeGenerator& codegen) : ArchNoOptsLoopHelper(codegen) {}
81 // Scalar loop unrolling parameters and heuristics.
82 //
83 // Maximum possible unrolling factor.
84 static constexpr uint32_t kScalarMaxUnrollFactor = 2;
85 // Loop's maximum instruction count. Loops with higher count will not be peeled/unrolled.
86 static constexpr uint32_t kScalarHeuristicMaxBodySizeInstr = 17;
87 // Loop's maximum basic block count. Loops with higher count will not be peeled/unrolled.
88 static constexpr uint32_t kScalarHeuristicMaxBodySizeBlocks = 6;
89 // Maximum number of instructions to be created as a result of full unrolling.
90 static constexpr uint32_t kScalarHeuristicFullyUnrolledMaxInstrThreshold = 35;
91
IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo * analysis_info) const92 bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* analysis_info) const override {
93 return analysis_info->HasLongTypeInstructions() ||
94 IsLoopTooBig(analysis_info,
95 kScalarHeuristicMaxBodySizeInstr,
96 kScalarHeuristicMaxBodySizeBlocks);
97 }
98
GetScalarUnrollingFactor(const LoopAnalysisInfo * analysis_info) const99 uint32_t GetScalarUnrollingFactor(const LoopAnalysisInfo* analysis_info) const override {
100 int64_t trip_count = analysis_info->GetTripCount();
101 // Unroll only loops with known trip count.
102 if (trip_count == LoopAnalysisInfo::kUnknownTripCount) {
103 return LoopAnalysisInfo::kNoUnrollingFactor;
104 }
105 uint32_t desired_unrolling_factor = kScalarMaxUnrollFactor;
106 if (trip_count < desired_unrolling_factor || trip_count % desired_unrolling_factor != 0) {
107 return LoopAnalysisInfo::kNoUnrollingFactor;
108 }
109
110 return desired_unrolling_factor;
111 }
112
IsLoopPeelingEnabled() const113 bool IsLoopPeelingEnabled() const override { return true; }
114
IsFullUnrollingBeneficial(LoopAnalysisInfo * analysis_info) const115 bool IsFullUnrollingBeneficial(LoopAnalysisInfo* analysis_info) const override {
116 int64_t trip_count = analysis_info->GetTripCount();
117 // We assume that trip count is known.
118 DCHECK_NE(trip_count, LoopAnalysisInfo::kUnknownTripCount);
119 size_t instr_num = analysis_info->GetNumberOfInstructions();
120 return (trip_count * instr_num < kScalarHeuristicFullyUnrolledMaxInstrThreshold);
121 }
122
123 protected:
IsLoopTooBig(LoopAnalysisInfo * loop_analysis_info,size_t instr_threshold,size_t bb_threshold) const124 bool IsLoopTooBig(LoopAnalysisInfo* loop_analysis_info,
125 size_t instr_threshold,
126 size_t bb_threshold) const {
127 size_t instr_num = loop_analysis_info->GetNumberOfInstructions();
128 size_t bb_num = loop_analysis_info->GetNumberOfBasicBlocks();
129 return (instr_num >= instr_threshold || bb_num >= bb_threshold);
130 }
131 };
132
133 // Custom implementation of loop helper for arm64 target. Enables heuristics for scalar loop
134 // peeling and unrolling and supports SIMD loop unrolling.
135 class Arm64LoopHelper : public ArchDefaultLoopHelper {
136 public:
Arm64LoopHelper(const CodeGenerator & codegen)137 explicit Arm64LoopHelper(const CodeGenerator& codegen) : ArchDefaultLoopHelper(codegen) {}
138 // SIMD loop unrolling parameters and heuristics.
139 //
140 // Maximum possible unrolling factor.
141 static constexpr uint32_t kArm64SimdMaxUnrollFactor = 8;
142 // Loop's maximum instruction count. Loops with higher count will not be unrolled.
143 static constexpr uint32_t kArm64SimdHeuristicMaxBodySizeInstr = 50;
144
145 // Loop's maximum instruction count. Loops with higher count will not be peeled/unrolled.
146 static constexpr uint32_t kArm64ScalarHeuristicMaxBodySizeInstr = 40;
147 // Loop's maximum basic block count. Loops with higher count will not be peeled/unrolled.
148 static constexpr uint32_t kArm64ScalarHeuristicMaxBodySizeBlocks = 8;
149
IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo * loop_analysis_info) const150 bool IsLoopNonBeneficialForScalarOpts(LoopAnalysisInfo* loop_analysis_info) const override {
151 return IsLoopTooBig(loop_analysis_info,
152 kArm64ScalarHeuristicMaxBodySizeInstr,
153 kArm64ScalarHeuristicMaxBodySizeBlocks);
154 }
155
GetSIMDUnrollingFactor(HBasicBlock * block,int64_t trip_count,uint32_t max_peel,uint32_t vector_length) const156 uint32_t GetSIMDUnrollingFactor(HBasicBlock* block,
157 int64_t trip_count,
158 uint32_t max_peel,
159 uint32_t vector_length) const override {
160 // Don't unroll with insufficient iterations.
161 // TODO: Unroll loops with unknown trip count.
162 DCHECK_NE(vector_length, 0u);
163 // TODO: Unroll loops in predicated vectorization mode.
164 if (codegen_.SupportsPredicatedSIMD()) {
165 return LoopAnalysisInfo::kNoUnrollingFactor;
166 }
167 if (trip_count < (2 * vector_length + max_peel)) {
168 return LoopAnalysisInfo::kNoUnrollingFactor;
169 }
170 // Don't unroll for large loop body size.
171 uint32_t instruction_count = block->GetInstructions().CountSize();
172 if (instruction_count >= kArm64SimdHeuristicMaxBodySizeInstr) {
173 return LoopAnalysisInfo::kNoUnrollingFactor;
174 }
175 // Find a beneficial unroll factor with the following restrictions:
176 // - At least one iteration of the transformed loop should be executed.
177 // - The loop body shouldn't be "too big" (heuristic).
178
179 uint32_t uf1 = kArm64SimdHeuristicMaxBodySizeInstr / instruction_count;
180 uint32_t uf2 = (trip_count - max_peel) / vector_length;
181 uint32_t unroll_factor =
182 TruncToPowerOfTwo(std::min({uf1, uf2, kArm64SimdMaxUnrollFactor}));
183 DCHECK_GE(unroll_factor, 1u);
184 return unroll_factor;
185 }
186 };
187
188 // Custom implementation of loop helper for X86_64 target. Enables heuristics for scalar loop
189 // peeling and unrolling and supports SIMD loop unrolling.
190 class X86_64LoopHelper : public ArchDefaultLoopHelper {
191 // mapping of machine instruction count for most used IR instructions
192 // Few IRs generate different number of instructions based on input and result type.
193 // We checked top java apps, benchmarks and used the most generated instruction count.
GetMachineInstructionCount(HInstruction * inst) const194 uint32_t GetMachineInstructionCount(HInstruction* inst) const {
195 switch (inst->GetKind()) {
196 case HInstruction::InstructionKind::kAbs:
197 return 3;
198 case HInstruction::InstructionKind::kAdd:
199 return 1;
200 case HInstruction::InstructionKind::kAnd:
201 return 1;
202 case HInstruction::InstructionKind::kArrayLength:
203 return 1;
204 case HInstruction::InstructionKind::kArrayGet:
205 return 1;
206 case HInstruction::InstructionKind::kArraySet:
207 return 1;
208 case HInstruction::InstructionKind::kBoundsCheck:
209 return 2;
210 case HInstruction::InstructionKind::kCheckCast:
211 return 9;
212 case HInstruction::InstructionKind::kDiv:
213 return 8;
214 case HInstruction::InstructionKind::kDivZeroCheck:
215 return 2;
216 case HInstruction::InstructionKind::kEqual:
217 return 3;
218 case HInstruction::InstructionKind::kGreaterThan:
219 return 3;
220 case HInstruction::InstructionKind::kGreaterThanOrEqual:
221 return 3;
222 case HInstruction::InstructionKind::kIf:
223 return 2;
224 case HInstruction::InstructionKind::kPredicatedInstanceFieldGet:
225 // test + cond-jump + IFieldGet
226 return 4;
227 case HInstruction::InstructionKind::kInstanceFieldGet:
228 return 2;
229 case HInstruction::InstructionKind::kInstanceFieldSet:
230 return 1;
231 case HInstruction::InstructionKind::kLessThan:
232 return 3;
233 case HInstruction::InstructionKind::kLessThanOrEqual:
234 return 3;
235 case HInstruction::InstructionKind::kMax:
236 return 2;
237 case HInstruction::InstructionKind::kMin:
238 return 2;
239 case HInstruction::InstructionKind::kMul:
240 return 1;
241 case HInstruction::InstructionKind::kNotEqual:
242 return 3;
243 case HInstruction::InstructionKind::kOr:
244 return 1;
245 case HInstruction::InstructionKind::kRem:
246 return 11;
247 case HInstruction::InstructionKind::kSelect:
248 return 2;
249 case HInstruction::InstructionKind::kShl:
250 return 1;
251 case HInstruction::InstructionKind::kShr:
252 return 1;
253 case HInstruction::InstructionKind::kSub:
254 return 1;
255 case HInstruction::InstructionKind::kTypeConversion:
256 return 1;
257 case HInstruction::InstructionKind::kUShr:
258 return 1;
259 case HInstruction::InstructionKind::kVecReplicateScalar:
260 return 2;
261 case HInstruction::InstructionKind::kVecExtractScalar:
262 return 1;
263 case HInstruction::InstructionKind::kVecReduce:
264 return 4;
265 case HInstruction::InstructionKind::kVecNeg:
266 return 2;
267 case HInstruction::InstructionKind::kVecAbs:
268 return 4;
269 case HInstruction::InstructionKind::kVecNot:
270 return 3;
271 case HInstruction::InstructionKind::kVecAdd:
272 return 1;
273 case HInstruction::InstructionKind::kVecSub:
274 return 1;
275 case HInstruction::InstructionKind::kVecMul:
276 return 1;
277 case HInstruction::InstructionKind::kVecDiv:
278 return 1;
279 case HInstruction::InstructionKind::kVecMax:
280 return 1;
281 case HInstruction::InstructionKind::kVecMin:
282 return 1;
283 case HInstruction::InstructionKind::kVecOr:
284 return 1;
285 case HInstruction::InstructionKind::kVecXor:
286 return 1;
287 case HInstruction::InstructionKind::kVecShl:
288 return 1;
289 case HInstruction::InstructionKind::kVecShr:
290 return 1;
291 case HInstruction::InstructionKind::kVecLoad:
292 return 1;
293 case HInstruction::InstructionKind::kVecStore:
294 return 1;
295 case HInstruction::InstructionKind::kXor:
296 return 1;
297 default:
298 return 1;
299 }
300 }
301
302 // Maximum possible unrolling factor.
303 static constexpr uint32_t kX86_64MaxUnrollFactor = 2; // pow(2,2) = 4
304
305 // According to Intel® 64 and IA-32 Architectures Optimization Reference Manual,
306 // avoid excessive loop unrolling to ensure LSD (loop stream decoder) is operating efficiently.
307 // This variable takes care that unrolled loop instructions should not exceed LSD size.
308 // For Intel Atom processors (silvermont & goldmont), LSD size is 28
309 // TODO - identify architecture and LSD size at runtime
310 static constexpr uint32_t kX86_64UnrolledMaxBodySizeInstr = 28;
311
312 // Loop's maximum basic block count. Loops with higher count will not be partial
313 // unrolled (unknown iterations).
314 static constexpr uint32_t kX86_64UnknownIterMaxBodySizeBlocks = 2;
315
316 uint32_t GetUnrollingFactor(HLoopInformation* loop_info, HBasicBlock* header) const;
317
318 public:
X86_64LoopHelper(const CodeGenerator & codegen)319 explicit X86_64LoopHelper(const CodeGenerator& codegen) : ArchDefaultLoopHelper(codegen) {}
320
GetSIMDUnrollingFactor(HBasicBlock * block,int64_t trip_count,uint32_t max_peel,uint32_t vector_length) const321 uint32_t GetSIMDUnrollingFactor(HBasicBlock* block,
322 int64_t trip_count,
323 uint32_t max_peel,
324 uint32_t vector_length) const override {
325 DCHECK_NE(vector_length, 0u);
326 HLoopInformation* loop_info = block->GetLoopInformation();
327 DCHECK(loop_info);
328 HBasicBlock* header = loop_info->GetHeader();
329 DCHECK(header);
330 uint32_t unroll_factor = 0;
331
332 if ((trip_count == 0) || (trip_count == LoopAnalysisInfo::kUnknownTripCount)) {
333 // Don't unroll for large loop body size.
334 unroll_factor = GetUnrollingFactor(loop_info, header);
335 if (unroll_factor <= 1) {
336 return LoopAnalysisInfo::kNoUnrollingFactor;
337 }
338 } else {
339 // Don't unroll with insufficient iterations.
340 if (trip_count < (2 * vector_length + max_peel)) {
341 return LoopAnalysisInfo::kNoUnrollingFactor;
342 }
343
344 // Don't unroll for large loop body size.
345 uint32_t unroll_cnt = GetUnrollingFactor(loop_info, header);
346 if (unroll_cnt <= 1) {
347 return LoopAnalysisInfo::kNoUnrollingFactor;
348 }
349
350 // Find a beneficial unroll factor with the following restrictions:
351 // - At least one iteration of the transformed loop should be executed.
352 // - The loop body shouldn't be "too big" (heuristic).
353 uint32_t uf2 = (trip_count - max_peel) / vector_length;
354 unroll_factor = TruncToPowerOfTwo(std::min(uf2, unroll_cnt));
355 DCHECK_GE(unroll_factor, 1u);
356 }
357
358 return unroll_factor;
359 }
360 };
361
GetUnrollingFactor(HLoopInformation * loop_info,HBasicBlock * header) const362 uint32_t X86_64LoopHelper::GetUnrollingFactor(HLoopInformation* loop_info,
363 HBasicBlock* header) const {
364 uint32_t num_inst = 0, num_inst_header = 0, num_inst_loop_body = 0;
365 for (HBlocksInLoopIterator it(*loop_info); !it.Done(); it.Advance()) {
366 HBasicBlock* block = it.Current();
367 DCHECK(block);
368 num_inst = 0;
369
370 for (HInstructionIterator it1(block->GetInstructions()); !it1.Done(); it1.Advance()) {
371 HInstruction* inst = it1.Current();
372 DCHECK(inst);
373
374 // SuspendCheck inside loop is handled with Goto.
375 // Ignoring SuspendCheck & Goto as partially unrolled loop body will have only one Goto.
376 // Instruction count for Goto is being handled during unroll factor calculation below.
377 if (inst->IsSuspendCheck() || inst->IsGoto()) {
378 continue;
379 }
380
381 num_inst += GetMachineInstructionCount(inst);
382 }
383
384 if (block == header) {
385 num_inst_header = num_inst;
386 } else {
387 num_inst_loop_body += num_inst;
388 }
389 }
390
391 // Calculate actual unroll factor.
392 uint32_t unrolling_factor = kX86_64MaxUnrollFactor;
393 uint32_t unrolling_inst = kX86_64UnrolledMaxBodySizeInstr;
394 // "-3" for one Goto instruction.
395 uint32_t desired_size = unrolling_inst - num_inst_header - 3;
396 if (desired_size < (2 * num_inst_loop_body)) {
397 return 1;
398 }
399
400 while (unrolling_factor > 0) {
401 if ((desired_size >> unrolling_factor) >= num_inst_loop_body) {
402 break;
403 }
404 unrolling_factor--;
405 }
406
407 return (1 << unrolling_factor);
408 }
409
Create(const CodeGenerator & codegen,ArenaAllocator * allocator)410 ArchNoOptsLoopHelper* ArchNoOptsLoopHelper::Create(const CodeGenerator& codegen,
411 ArenaAllocator* allocator) {
412 InstructionSet isa = codegen.GetInstructionSet();
413 switch (isa) {
414 case InstructionSet::kArm64: {
415 return new (allocator) Arm64LoopHelper(codegen);
416 }
417 case InstructionSet::kX86_64: {
418 return new (allocator) X86_64LoopHelper(codegen);
419 }
420 default: {
421 return new (allocator) ArchDefaultLoopHelper(codegen);
422 }
423 }
424 }
425
426 } // namespace art
427