1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "loop_optimization.h"
18 
19 #include "arch/arm/instruction_set_features_arm.h"
20 #include "arch/arm64/instruction_set_features_arm64.h"
21 #include "arch/instruction_set.h"
22 #include "arch/mips/instruction_set_features_mips.h"
23 #include "arch/mips64/instruction_set_features_mips64.h"
24 #include "arch/x86/instruction_set_features_x86.h"
25 #include "arch/x86_64/instruction_set_features_x86_64.h"
26 #include "driver/compiler_options.h"
27 #include "linear_order.h"
28 #include "mirror/array-inl.h"
29 #include "mirror/string.h"
30 
31 namespace art {
32 
33 // Enables vectorization (SIMDization) in the loop optimizer.
34 static constexpr bool kEnableVectorization = true;
35 
36 //
37 // Static helpers.
38 //
39 
40 // Base alignment for arrays/strings guaranteed by the Android runtime.
BaseAlignment()41 static uint32_t BaseAlignment() {
42   return kObjectAlignment;
43 }
44 
45 // Hidden offset for arrays/strings guaranteed by the Android runtime.
HiddenOffset(DataType::Type type,bool is_string_char_at)46 static uint32_t HiddenOffset(DataType::Type type, bool is_string_char_at) {
47   return is_string_char_at
48       ? mirror::String::ValueOffset().Uint32Value()
49       : mirror::Array::DataOffset(DataType::Size(type)).Uint32Value();
50 }
51 
52 // Remove the instruction from the graph. A bit more elaborate than the usual
53 // instruction removal, since there may be a cycle in the use structure.
RemoveFromCycle(HInstruction * instruction)54 static void RemoveFromCycle(HInstruction* instruction) {
55   instruction->RemoveAsUserOfAllInputs();
56   instruction->RemoveEnvironmentUsers();
57   instruction->GetBlock()->RemoveInstructionOrPhi(instruction, /*ensure_safety=*/ false);
58   RemoveEnvironmentUses(instruction);
59   ResetEnvironmentInputRecords(instruction);
60 }
61 
62 // Detect a goto block and sets succ to the single successor.
IsGotoBlock(HBasicBlock * block,HBasicBlock ** succ)63 static bool IsGotoBlock(HBasicBlock* block, /*out*/ HBasicBlock** succ) {
64   if (block->GetPredecessors().size() == 1 &&
65       block->GetSuccessors().size() == 1 &&
66       block->IsSingleGoto()) {
67     *succ = block->GetSingleSuccessor();
68     return true;
69   }
70   return false;
71 }
72 
73 // Detect an early exit loop.
IsEarlyExit(HLoopInformation * loop_info)74 static bool IsEarlyExit(HLoopInformation* loop_info) {
75   HBlocksInLoopReversePostOrderIterator it_loop(*loop_info);
76   for (it_loop.Advance(); !it_loop.Done(); it_loop.Advance()) {
77     for (HBasicBlock* successor : it_loop.Current()->GetSuccessors()) {
78       if (!loop_info->Contains(*successor)) {
79         return true;
80       }
81     }
82   }
83   return false;
84 }
85 
86 // Forward declaration.
87 static bool IsZeroExtensionAndGet(HInstruction* instruction,
88                                   DataType::Type type,
89                                   /*out*/ HInstruction** operand);
90 
91 // Detect a sign extension in instruction from the given type.
92 // Returns the promoted operand on success.
IsSignExtensionAndGet(HInstruction * instruction,DataType::Type type,HInstruction ** operand)93 static bool IsSignExtensionAndGet(HInstruction* instruction,
94                                   DataType::Type type,
95                                   /*out*/ HInstruction** operand) {
96   // Accept any already wider constant that would be handled properly by sign
97   // extension when represented in the *width* of the given narrower data type
98   // (the fact that Uint8/Uint16 normally zero extend does not matter here).
99   int64_t value = 0;
100   if (IsInt64AndGet(instruction, /*out*/ &value)) {
101     switch (type) {
102       case DataType::Type::kUint8:
103       case DataType::Type::kInt8:
104         if (IsInt<8>(value)) {
105           *operand = instruction;
106           return true;
107         }
108         return false;
109       case DataType::Type::kUint16:
110       case DataType::Type::kInt16:
111         if (IsInt<16>(value)) {
112           *operand = instruction;
113           return true;
114         }
115         return false;
116       default:
117         return false;
118     }
119   }
120   // An implicit widening conversion of any signed expression sign-extends.
121   if (instruction->GetType() == type) {
122     switch (type) {
123       case DataType::Type::kInt8:
124       case DataType::Type::kInt16:
125         *operand = instruction;
126         return true;
127       default:
128         return false;
129     }
130   }
131   // An explicit widening conversion of a signed expression sign-extends.
132   if (instruction->IsTypeConversion()) {
133     HInstruction* conv = instruction->InputAt(0);
134     DataType::Type from = conv->GetType();
135     switch (instruction->GetType()) {
136       case DataType::Type::kInt32:
137       case DataType::Type::kInt64:
138         if (type == from && (from == DataType::Type::kInt8 ||
139                              from == DataType::Type::kInt16 ||
140                              from == DataType::Type::kInt32)) {
141           *operand = conv;
142           return true;
143         }
144         return false;
145       case DataType::Type::kInt16:
146         return type == DataType::Type::kUint16 &&
147                from == DataType::Type::kUint16 &&
148                IsZeroExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand);
149       default:
150         return false;
151     }
152   }
153   return false;
154 }
155 
156 // Detect a zero extension in instruction from the given type.
157 // Returns the promoted operand on success.
IsZeroExtensionAndGet(HInstruction * instruction,DataType::Type type,HInstruction ** operand)158 static bool IsZeroExtensionAndGet(HInstruction* instruction,
159                                   DataType::Type type,
160                                   /*out*/ HInstruction** operand) {
161   // Accept any already wider constant that would be handled properly by zero
162   // extension when represented in the *width* of the given narrower data type
163   // (the fact that Int8/Int16 normally sign extend does not matter here).
164   int64_t value = 0;
165   if (IsInt64AndGet(instruction, /*out*/ &value)) {
166     switch (type) {
167       case DataType::Type::kUint8:
168       case DataType::Type::kInt8:
169         if (IsUint<8>(value)) {
170           *operand = instruction;
171           return true;
172         }
173         return false;
174       case DataType::Type::kUint16:
175       case DataType::Type::kInt16:
176         if (IsUint<16>(value)) {
177           *operand = instruction;
178           return true;
179         }
180         return false;
181       default:
182         return false;
183     }
184   }
185   // An implicit widening conversion of any unsigned expression zero-extends.
186   if (instruction->GetType() == type) {
187     switch (type) {
188       case DataType::Type::kUint8:
189       case DataType::Type::kUint16:
190         *operand = instruction;
191         return true;
192       default:
193         return false;
194     }
195   }
196   // An explicit widening conversion of an unsigned expression zero-extends.
197   if (instruction->IsTypeConversion()) {
198     HInstruction* conv = instruction->InputAt(0);
199     DataType::Type from = conv->GetType();
200     switch (instruction->GetType()) {
201       case DataType::Type::kInt32:
202       case DataType::Type::kInt64:
203         if (type == from && from == DataType::Type::kUint16) {
204           *operand = conv;
205           return true;
206         }
207         return false;
208       case DataType::Type::kUint16:
209         return type == DataType::Type::kInt16 &&
210                from == DataType::Type::kInt16 &&
211                IsSignExtensionAndGet(instruction->InputAt(0), type, /*out*/ operand);
212       default:
213         return false;
214     }
215   }
216   return false;
217 }
218 
219 // Detect situations with same-extension narrower operands.
220 // Returns true on success and sets is_unsigned accordingly.
IsNarrowerOperands(HInstruction * a,HInstruction * b,DataType::Type type,HInstruction ** r,HInstruction ** s,bool * is_unsigned)221 static bool IsNarrowerOperands(HInstruction* a,
222                                HInstruction* b,
223                                DataType::Type type,
224                                /*out*/ HInstruction** r,
225                                /*out*/ HInstruction** s,
226                                /*out*/ bool* is_unsigned) {
227   DCHECK(a != nullptr && b != nullptr);
228   // Look for a matching sign extension.
229   DataType::Type stype = HVecOperation::ToSignedType(type);
230   if (IsSignExtensionAndGet(a, stype, r) && IsSignExtensionAndGet(b, stype, s)) {
231     *is_unsigned = false;
232     return true;
233   }
234   // Look for a matching zero extension.
235   DataType::Type utype = HVecOperation::ToUnsignedType(type);
236   if (IsZeroExtensionAndGet(a, utype, r) && IsZeroExtensionAndGet(b, utype, s)) {
237     *is_unsigned = true;
238     return true;
239   }
240   return false;
241 }
242 
243 // As above, single operand.
IsNarrowerOperand(HInstruction * a,DataType::Type type,HInstruction ** r,bool * is_unsigned)244 static bool IsNarrowerOperand(HInstruction* a,
245                               DataType::Type type,
246                               /*out*/ HInstruction** r,
247                               /*out*/ bool* is_unsigned) {
248   DCHECK(a != nullptr);
249   // Look for a matching sign extension.
250   DataType::Type stype = HVecOperation::ToSignedType(type);
251   if (IsSignExtensionAndGet(a, stype, r)) {
252     *is_unsigned = false;
253     return true;
254   }
255   // Look for a matching zero extension.
256   DataType::Type utype = HVecOperation::ToUnsignedType(type);
257   if (IsZeroExtensionAndGet(a, utype, r)) {
258     *is_unsigned = true;
259     return true;
260   }
261   return false;
262 }
263 
264 // Compute relative vector length based on type difference.
GetOtherVL(DataType::Type other_type,DataType::Type vector_type,uint32_t vl)265 static uint32_t GetOtherVL(DataType::Type other_type, DataType::Type vector_type, uint32_t vl) {
266   DCHECK(DataType::IsIntegralType(other_type));
267   DCHECK(DataType::IsIntegralType(vector_type));
268   DCHECK_GE(DataType::SizeShift(other_type), DataType::SizeShift(vector_type));
269   return vl >> (DataType::SizeShift(other_type) - DataType::SizeShift(vector_type));
270 }
271 
272 // Detect up to two added operands a and b and an acccumulated constant c.
IsAddConst(HInstruction * instruction,HInstruction ** a,HInstruction ** b,int64_t * c,int32_t depth=8)273 static bool IsAddConst(HInstruction* instruction,
274                        /*out*/ HInstruction** a,
275                        /*out*/ HInstruction** b,
276                        /*out*/ int64_t* c,
277                        int32_t depth = 8) {  // don't search too deep
278   int64_t value = 0;
279   // Enter add/sub while still within reasonable depth.
280   if (depth > 0) {
281     if (instruction->IsAdd()) {
282       return IsAddConst(instruction->InputAt(0), a, b, c, depth - 1) &&
283              IsAddConst(instruction->InputAt(1), a, b, c, depth - 1);
284     } else if (instruction->IsSub() &&
285                IsInt64AndGet(instruction->InputAt(1), &value)) {
286       *c -= value;
287       return IsAddConst(instruction->InputAt(0), a, b, c, depth - 1);
288     }
289   }
290   // Otherwise, deal with leaf nodes.
291   if (IsInt64AndGet(instruction, &value)) {
292     *c += value;
293     return true;
294   } else if (*a == nullptr) {
295     *a = instruction;
296     return true;
297   } else if (*b == nullptr) {
298     *b = instruction;
299     return true;
300   }
301   return false;  // too many operands
302 }
303 
304 // Detect a + b + c with optional constant c.
IsAddConst2(HGraph * graph,HInstruction * instruction,HInstruction ** a,HInstruction ** b,int64_t * c)305 static bool IsAddConst2(HGraph* graph,
306                         HInstruction* instruction,
307                         /*out*/ HInstruction** a,
308                         /*out*/ HInstruction** b,
309                         /*out*/ int64_t* c) {
310   if (IsAddConst(instruction, a, b, c) && *a != nullptr) {
311     if (*b == nullptr) {
312       // Constant is usually already present, unless accumulated.
313       *b = graph->GetConstant(instruction->GetType(), (*c));
314       *c = 0;
315     }
316     return true;
317   }
318   return false;
319 }
320 
321 // Detect a direct a - b or a hidden a - (-c).
IsSubConst2(HGraph * graph,HInstruction * instruction,HInstruction ** a,HInstruction ** b)322 static bool IsSubConst2(HGraph* graph,
323                         HInstruction* instruction,
324                         /*out*/ HInstruction** a,
325                         /*out*/ HInstruction** b) {
326   int64_t c = 0;
327   if (instruction->IsSub()) {
328     *a = instruction->InputAt(0);
329     *b = instruction->InputAt(1);
330     return true;
331   } else if (IsAddConst(instruction, a, b, &c) && *a != nullptr && *b == nullptr) {
332     // Constant for the hidden subtraction.
333     *b = graph->GetConstant(instruction->GetType(), -c);
334     return true;
335   }
336   return false;
337 }
338 
339 // Detect reductions of the following forms,
340 //   x = x_phi + ..
341 //   x = x_phi - ..
HasReductionFormat(HInstruction * reduction,HInstruction * phi)342 static bool HasReductionFormat(HInstruction* reduction, HInstruction* phi) {
343   if (reduction->IsAdd()) {
344     return (reduction->InputAt(0) == phi && reduction->InputAt(1) != phi) ||
345            (reduction->InputAt(0) != phi && reduction->InputAt(1) == phi);
346   } else if (reduction->IsSub()) {
347     return (reduction->InputAt(0) == phi && reduction->InputAt(1) != phi);
348   }
349   return false;
350 }
351 
352 // Translates vector operation to reduction kind.
GetReductionKind(HVecOperation * reduction)353 static HVecReduce::ReductionKind GetReductionKind(HVecOperation* reduction) {
354   if (reduction->IsVecAdd() ||
355       reduction->IsVecSub() ||
356       reduction->IsVecSADAccumulate() ||
357       reduction->IsVecDotProd()) {
358     return HVecReduce::kSum;
359   }
360   LOG(FATAL) << "Unsupported SIMD reduction " << reduction->GetId();
361   UNREACHABLE();
362 }
363 
364 // Test vector restrictions.
HasVectorRestrictions(uint64_t restrictions,uint64_t tested)365 static bool HasVectorRestrictions(uint64_t restrictions, uint64_t tested) {
366   return (restrictions & tested) != 0;
367 }
368 
369 // Insert an instruction.
Insert(HBasicBlock * block,HInstruction * instruction)370 static HInstruction* Insert(HBasicBlock* block, HInstruction* instruction) {
371   DCHECK(block != nullptr);
372   DCHECK(instruction != nullptr);
373   block->InsertInstructionBefore(instruction, block->GetLastInstruction());
374   return instruction;
375 }
376 
377 // Check that instructions from the induction sets are fully removed: have no uses
378 // and no other instructions use them.
CheckInductionSetFullyRemoved(ScopedArenaSet<HInstruction * > * iset)379 static bool CheckInductionSetFullyRemoved(ScopedArenaSet<HInstruction*>* iset) {
380   for (HInstruction* instr : *iset) {
381     if (instr->GetBlock() != nullptr ||
382         !instr->GetUses().empty() ||
383         !instr->GetEnvUses().empty() ||
384         HasEnvironmentUsedByOthers(instr)) {
385       return false;
386     }
387   }
388   return true;
389 }
390 
391 // Tries to statically evaluate condition of the specified "HIf" for other condition checks.
TryToEvaluateIfCondition(HIf * instruction,HGraph * graph)392 static void TryToEvaluateIfCondition(HIf* instruction, HGraph* graph) {
393   HInstruction* cond = instruction->InputAt(0);
394 
395   // If a condition 'cond' is evaluated in an HIf instruction then in the successors of the
396   // IF_BLOCK we statically know the value of the condition 'cond' (TRUE in TRUE_SUCC, FALSE in
397   // FALSE_SUCC). Using that we can replace another evaluation (use) EVAL of the same 'cond'
398   // with TRUE value (FALSE value) if every path from the ENTRY_BLOCK to EVAL_BLOCK contains the
399   // edge HIF_BLOCK->TRUE_SUCC (HIF_BLOCK->FALSE_SUCC).
400   //     if (cond) {               if(cond) {
401   //       if (cond) {}              if (1) {}
402   //     } else {        =======>  } else {
403   //       if (cond) {}              if (0) {}
404   //     }                         }
405   if (!cond->IsConstant()) {
406     HBasicBlock* true_succ = instruction->IfTrueSuccessor();
407     HBasicBlock* false_succ = instruction->IfFalseSuccessor();
408 
409     DCHECK_EQ(true_succ->GetPredecessors().size(), 1u);
410     DCHECK_EQ(false_succ->GetPredecessors().size(), 1u);
411 
412     const HUseList<HInstruction*>& uses = cond->GetUses();
413     for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
414       HInstruction* user = it->GetUser();
415       size_t index = it->GetIndex();
416       HBasicBlock* user_block = user->GetBlock();
417       // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput().
418       ++it;
419       if (true_succ->Dominates(user_block)) {
420         user->ReplaceInput(graph->GetIntConstant(1), index);
421      } else if (false_succ->Dominates(user_block)) {
422         user->ReplaceInput(graph->GetIntConstant(0), index);
423       }
424     }
425   }
426 }
427 
428 // Peel the first 'count' iterations of the loop.
PeelByCount(HLoopInformation * loop_info,int count,InductionVarRange * induction_range)429 static void PeelByCount(HLoopInformation* loop_info,
430                         int count,
431                         InductionVarRange* induction_range) {
432   for (int i = 0; i < count; i++) {
433     // Perform peeling.
434     PeelUnrollSimpleHelper helper(loop_info, induction_range);
435     helper.DoPeeling();
436   }
437 }
438 
439 // Returns the narrower type out of instructions a and b types.
GetNarrowerType(HInstruction * a,HInstruction * b)440 static DataType::Type GetNarrowerType(HInstruction* a, HInstruction* b) {
441   DataType::Type type = a->GetType();
442   if (DataType::Size(b->GetType()) < DataType::Size(type)) {
443     type = b->GetType();
444   }
445   if (a->IsTypeConversion() &&
446       DataType::Size(a->InputAt(0)->GetType()) < DataType::Size(type)) {
447     type = a->InputAt(0)->GetType();
448   }
449   if (b->IsTypeConversion() &&
450       DataType::Size(b->InputAt(0)->GetType()) < DataType::Size(type)) {
451     type = b->InputAt(0)->GetType();
452   }
453   return type;
454 }
455 
456 //
457 // Public methods.
458 //
459 
HLoopOptimization(HGraph * graph,const CompilerOptions * compiler_options,HInductionVarAnalysis * induction_analysis,OptimizingCompilerStats * stats,const char * name)460 HLoopOptimization::HLoopOptimization(HGraph* graph,
461                                      const CompilerOptions* compiler_options,
462                                      HInductionVarAnalysis* induction_analysis,
463                                      OptimizingCompilerStats* stats,
464                                      const char* name)
465     : HOptimization(graph, name, stats),
466       compiler_options_(compiler_options),
467       induction_range_(induction_analysis),
468       loop_allocator_(nullptr),
469       global_allocator_(graph_->GetAllocator()),
470       top_loop_(nullptr),
471       last_loop_(nullptr),
472       iset_(nullptr),
473       reductions_(nullptr),
474       simplified_(false),
475       vector_length_(0),
476       vector_refs_(nullptr),
477       vector_static_peeling_factor_(0),
478       vector_dynamic_peeling_candidate_(nullptr),
479       vector_runtime_test_a_(nullptr),
480       vector_runtime_test_b_(nullptr),
481       vector_map_(nullptr),
482       vector_permanent_map_(nullptr),
483       vector_mode_(kSequential),
484       vector_preheader_(nullptr),
485       vector_header_(nullptr),
486       vector_body_(nullptr),
487       vector_index_(nullptr),
488       arch_loop_helper_(ArchNoOptsLoopHelper::Create(compiler_options_ != nullptr
489                                                           ? compiler_options_->GetInstructionSet()
490                                                           : InstructionSet::kNone,
491                                                       global_allocator_)) {
492 }
493 
Run()494 bool HLoopOptimization::Run() {
495   // Skip if there is no loop or the graph has try-catch/irreducible loops.
496   // TODO: make this less of a sledgehammer.
497   if (!graph_->HasLoops() || graph_->HasTryCatch() || graph_->HasIrreducibleLoops()) {
498     return false;
499   }
500 
501   // Phase-local allocator.
502   ScopedArenaAllocator allocator(graph_->GetArenaStack());
503   loop_allocator_ = &allocator;
504 
505   // Perform loop optimizations.
506   bool didLoopOpt = LocalRun();
507   if (top_loop_ == nullptr) {
508     graph_->SetHasLoops(false);  // no more loops
509   }
510 
511   // Detach.
512   loop_allocator_ = nullptr;
513   last_loop_ = top_loop_ = nullptr;
514 
515   return didLoopOpt;
516 }
517 
518 //
519 // Loop setup and traversal.
520 //
521 
LocalRun()522 bool HLoopOptimization::LocalRun() {
523   bool didLoopOpt = false;
524   // Build the linear order using the phase-local allocator. This step enables building
525   // a loop hierarchy that properly reflects the outer-inner and previous-next relation.
526   ScopedArenaVector<HBasicBlock*> linear_order(loop_allocator_->Adapter(kArenaAllocLinearOrder));
527   LinearizeGraph(graph_, &linear_order);
528 
529   // Build the loop hierarchy.
530   for (HBasicBlock* block : linear_order) {
531     if (block->IsLoopHeader()) {
532       AddLoop(block->GetLoopInformation());
533     }
534   }
535 
536   // Traverse the loop hierarchy inner-to-outer and optimize. Traversal can use
537   // temporary data structures using the phase-local allocator. All new HIR
538   // should use the global allocator.
539   if (top_loop_ != nullptr) {
540     ScopedArenaSet<HInstruction*> iset(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
541     ScopedArenaSafeMap<HInstruction*, HInstruction*> reds(
542         std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
543     ScopedArenaSet<ArrayReference> refs(loop_allocator_->Adapter(kArenaAllocLoopOptimization));
544     ScopedArenaSafeMap<HInstruction*, HInstruction*> map(
545         std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
546     ScopedArenaSafeMap<HInstruction*, HInstruction*> perm(
547         std::less<HInstruction*>(), loop_allocator_->Adapter(kArenaAllocLoopOptimization));
548     // Attach.
549     iset_ = &iset;
550     reductions_ = &reds;
551     vector_refs_ = &refs;
552     vector_map_ = &map;
553     vector_permanent_map_ = &perm;
554     // Traverse.
555     didLoopOpt = TraverseLoopsInnerToOuter(top_loop_);
556     // Detach.
557     iset_ = nullptr;
558     reductions_ = nullptr;
559     vector_refs_ = nullptr;
560     vector_map_ = nullptr;
561     vector_permanent_map_ = nullptr;
562   }
563   return didLoopOpt;
564 }
565 
AddLoop(HLoopInformation * loop_info)566 void HLoopOptimization::AddLoop(HLoopInformation* loop_info) {
567   DCHECK(loop_info != nullptr);
568   LoopNode* node = new (loop_allocator_) LoopNode(loop_info);
569   if (last_loop_ == nullptr) {
570     // First loop.
571     DCHECK(top_loop_ == nullptr);
572     last_loop_ = top_loop_ = node;
573   } else if (loop_info->IsIn(*last_loop_->loop_info)) {
574     // Inner loop.
575     node->outer = last_loop_;
576     DCHECK(last_loop_->inner == nullptr);
577     last_loop_ = last_loop_->inner = node;
578   } else {
579     // Subsequent loop.
580     while (last_loop_->outer != nullptr && !loop_info->IsIn(*last_loop_->outer->loop_info)) {
581       last_loop_ = last_loop_->outer;
582     }
583     node->outer = last_loop_->outer;
584     node->previous = last_loop_;
585     DCHECK(last_loop_->next == nullptr);
586     last_loop_ = last_loop_->next = node;
587   }
588 }
589 
RemoveLoop(LoopNode * node)590 void HLoopOptimization::RemoveLoop(LoopNode* node) {
591   DCHECK(node != nullptr);
592   DCHECK(node->inner == nullptr);
593   if (node->previous != nullptr) {
594     // Within sequence.
595     node->previous->next = node->next;
596     if (node->next != nullptr) {
597       node->next->previous = node->previous;
598     }
599   } else {
600     // First of sequence.
601     if (node->outer != nullptr) {
602       node->outer->inner = node->next;
603     } else {
604       top_loop_ = node->next;
605     }
606     if (node->next != nullptr) {
607       node->next->outer = node->outer;
608       node->next->previous = nullptr;
609     }
610   }
611 }
612 
TraverseLoopsInnerToOuter(LoopNode * node)613 bool HLoopOptimization::TraverseLoopsInnerToOuter(LoopNode* node) {
614   bool changed = false;
615   for ( ; node != nullptr; node = node->next) {
616     // Visit inner loops first. Recompute induction information for this
617     // loop if the induction of any inner loop has changed.
618     if (TraverseLoopsInnerToOuter(node->inner)) {
619       induction_range_.ReVisit(node->loop_info);
620       changed = true;
621     }
622     // Repeat simplifications in the loop-body until no more changes occur.
623     // Note that since each simplification consists of eliminating code (without
624     // introducing new code), this process is always finite.
625     do {
626       simplified_ = false;
627       SimplifyInduction(node);
628       SimplifyBlocks(node);
629       changed = simplified_ || changed;
630     } while (simplified_);
631     // Optimize inner loop.
632     if (node->inner == nullptr) {
633       changed = OptimizeInnerLoop(node) || changed;
634     }
635   }
636   return changed;
637 }
638 
639 //
640 // Optimization.
641 //
642 
SimplifyInduction(LoopNode * node)643 void HLoopOptimization::SimplifyInduction(LoopNode* node) {
644   HBasicBlock* header = node->loop_info->GetHeader();
645   HBasicBlock* preheader = node->loop_info->GetPreHeader();
646   // Scan the phis in the header to find opportunities to simplify an induction
647   // cycle that is only used outside the loop. Replace these uses, if any, with
648   // the last value and remove the induction cycle.
649   // Examples: for (int i = 0; x != null;   i++) { .... no i .... }
650   //           for (int i = 0; i < 10; i++, k++) { .... no k .... } return k;
651   for (HInstructionIterator it(header->GetPhis()); !it.Done(); it.Advance()) {
652     HPhi* phi = it.Current()->AsPhi();
653     if (TrySetPhiInduction(phi, /*restrict_uses*/ true) &&
654         TryAssignLastValue(node->loop_info, phi, preheader, /*collect_loop_uses*/ false)) {
655       // Note that it's ok to have replaced uses after the loop with the last value, without
656       // being able to remove the cycle. Environment uses (which are the reason we may not be
657       // able to remove the cycle) within the loop will still hold the right value. We must
658       // have tried first, however, to replace outside uses.
659       if (CanRemoveCycle()) {
660         simplified_ = true;
661         for (HInstruction* i : *iset_) {
662           RemoveFromCycle(i);
663         }
664         DCHECK(CheckInductionSetFullyRemoved(iset_));
665       }
666     }
667   }
668 }
669 
SimplifyBlocks(LoopNode * node)670 void HLoopOptimization::SimplifyBlocks(LoopNode* node) {
671   // Iterate over all basic blocks in the loop-body.
672   for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
673     HBasicBlock* block = it.Current();
674     // Remove dead instructions from the loop-body.
675     RemoveDeadInstructions(block->GetPhis());
676     RemoveDeadInstructions(block->GetInstructions());
677     // Remove trivial control flow blocks from the loop-body.
678     if (block->GetPredecessors().size() == 1 &&
679         block->GetSuccessors().size() == 1 &&
680         block->GetSingleSuccessor()->GetPredecessors().size() == 1) {
681       simplified_ = true;
682       block->MergeWith(block->GetSingleSuccessor());
683     } else if (block->GetSuccessors().size() == 2) {
684       // Trivial if block can be bypassed to either branch.
685       HBasicBlock* succ0 = block->GetSuccessors()[0];
686       HBasicBlock* succ1 = block->GetSuccessors()[1];
687       HBasicBlock* meet0 = nullptr;
688       HBasicBlock* meet1 = nullptr;
689       if (succ0 != succ1 &&
690           IsGotoBlock(succ0, &meet0) &&
691           IsGotoBlock(succ1, &meet1) &&
692           meet0 == meet1 &&  // meets again
693           meet0 != block &&  // no self-loop
694           meet0->GetPhis().IsEmpty()) {  // not used for merging
695         simplified_ = true;
696         succ0->DisconnectAndDelete();
697         if (block->Dominates(meet0)) {
698           block->RemoveDominatedBlock(meet0);
699           succ1->AddDominatedBlock(meet0);
700           meet0->SetDominator(succ1);
701         }
702       }
703     }
704   }
705 }
706 
TryOptimizeInnerLoopFinite(LoopNode * node)707 bool HLoopOptimization::TryOptimizeInnerLoopFinite(LoopNode* node) {
708   HBasicBlock* header = node->loop_info->GetHeader();
709   HBasicBlock* preheader = node->loop_info->GetPreHeader();
710   // Ensure loop header logic is finite.
711   int64_t trip_count = 0;
712   if (!induction_range_.IsFinite(node->loop_info, &trip_count)) {
713     return false;
714   }
715   // Ensure there is only a single loop-body (besides the header).
716   HBasicBlock* body = nullptr;
717   for (HBlocksInLoopIterator it(*node->loop_info); !it.Done(); it.Advance()) {
718     if (it.Current() != header) {
719       if (body != nullptr) {
720         return false;
721       }
722       body = it.Current();
723     }
724   }
725   CHECK(body != nullptr);
726   // Ensure there is only a single exit point.
727   if (header->GetSuccessors().size() != 2) {
728     return false;
729   }
730   HBasicBlock* exit = (header->GetSuccessors()[0] == body)
731       ? header->GetSuccessors()[1]
732       : header->GetSuccessors()[0];
733   // Ensure exit can only be reached by exiting loop.
734   if (exit->GetPredecessors().size() != 1) {
735     return false;
736   }
737   // Detect either an empty loop (no side effects other than plain iteration) or
738   // a trivial loop (just iterating once). Replace subsequent index uses, if any,
739   // with the last value and remove the loop, possibly after unrolling its body.
740   HPhi* main_phi = nullptr;
741   if (TrySetSimpleLoopHeader(header, &main_phi)) {
742     bool is_empty = IsEmptyBody(body);
743     if (reductions_->empty() &&  // TODO: possible with some effort
744         (is_empty || trip_count == 1) &&
745         TryAssignLastValue(node->loop_info, main_phi, preheader, /*collect_loop_uses*/ true)) {
746       if (!is_empty) {
747         // Unroll the loop-body, which sees initial value of the index.
748         main_phi->ReplaceWith(main_phi->InputAt(0));
749         preheader->MergeInstructionsWith(body);
750       }
751       body->DisconnectAndDelete();
752       exit->RemovePredecessor(header);
753       header->RemoveSuccessor(exit);
754       header->RemoveDominatedBlock(exit);
755       header->DisconnectAndDelete();
756       preheader->AddSuccessor(exit);
757       preheader->AddInstruction(new (global_allocator_) HGoto());
758       preheader->AddDominatedBlock(exit);
759       exit->SetDominator(preheader);
760       RemoveLoop(node);  // update hierarchy
761       return true;
762     }
763   }
764   // Vectorize loop, if possible and valid.
765   if (kEnableVectorization &&
766       TrySetSimpleLoopHeader(header, &main_phi) &&
767       ShouldVectorize(node, body, trip_count) &&
768       TryAssignLastValue(node->loop_info, main_phi, preheader, /*collect_loop_uses*/ true)) {
769     Vectorize(node, body, exit, trip_count);
770     graph_->SetHasSIMD(true);  // flag SIMD usage
771     MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorized);
772     return true;
773   }
774   return false;
775 }
776 
OptimizeInnerLoop(LoopNode * node)777 bool HLoopOptimization::OptimizeInnerLoop(LoopNode* node) {
778   return TryOptimizeInnerLoopFinite(node) || TryPeelingAndUnrolling(node);
779 }
780 
781 
782 
783 //
784 // Scalar loop peeling and unrolling: generic part methods.
785 //
786 
TryUnrollingForBranchPenaltyReduction(LoopAnalysisInfo * analysis_info,bool generate_code)787 bool HLoopOptimization::TryUnrollingForBranchPenaltyReduction(LoopAnalysisInfo* analysis_info,
788                                                               bool generate_code) {
789   if (analysis_info->GetNumberOfExits() > 1) {
790     return false;
791   }
792 
793   uint32_t unrolling_factor = arch_loop_helper_->GetScalarUnrollingFactor(analysis_info);
794   if (unrolling_factor == LoopAnalysisInfo::kNoUnrollingFactor) {
795     return false;
796   }
797 
798   if (generate_code) {
799     // TODO: support other unrolling factors.
800     DCHECK_EQ(unrolling_factor, 2u);
801 
802     // Perform unrolling.
803     HLoopInformation* loop_info = analysis_info->GetLoopInfo();
804     PeelUnrollSimpleHelper helper(loop_info, &induction_range_);
805     helper.DoUnrolling();
806 
807     // Remove the redundant loop check after unrolling.
808     HIf* copy_hif =
809         helper.GetBasicBlockMap()->Get(loop_info->GetHeader())->GetLastInstruction()->AsIf();
810     int32_t constant = loop_info->Contains(*copy_hif->IfTrueSuccessor()) ? 1 : 0;
811     copy_hif->ReplaceInput(graph_->GetIntConstant(constant), 0u);
812   }
813   return true;
814 }
815 
TryPeelingForLoopInvariantExitsElimination(LoopAnalysisInfo * analysis_info,bool generate_code)816 bool HLoopOptimization::TryPeelingForLoopInvariantExitsElimination(LoopAnalysisInfo* analysis_info,
817                                                                    bool generate_code) {
818   HLoopInformation* loop_info = analysis_info->GetLoopInfo();
819   if (!arch_loop_helper_->IsLoopPeelingEnabled()) {
820     return false;
821   }
822 
823   if (analysis_info->GetNumberOfInvariantExits() == 0) {
824     return false;
825   }
826 
827   if (generate_code) {
828     // Perform peeling.
829     PeelUnrollSimpleHelper helper(loop_info, &induction_range_);
830     helper.DoPeeling();
831 
832     // Statically evaluate loop check after peeling for loop invariant condition.
833     const SuperblockCloner::HInstructionMap* hir_map = helper.GetInstructionMap();
834     for (auto entry : *hir_map) {
835       HInstruction* copy = entry.second;
836       if (copy->IsIf()) {
837         TryToEvaluateIfCondition(copy->AsIf(), graph_);
838       }
839     }
840   }
841 
842   return true;
843 }
844 
TryFullUnrolling(LoopAnalysisInfo * analysis_info,bool generate_code)845 bool HLoopOptimization::TryFullUnrolling(LoopAnalysisInfo* analysis_info, bool generate_code) {
846   // Fully unroll loops with a known and small trip count.
847   int64_t trip_count = analysis_info->GetTripCount();
848   if (!arch_loop_helper_->IsLoopPeelingEnabled() ||
849       trip_count == LoopAnalysisInfo::kUnknownTripCount ||
850       !arch_loop_helper_->IsFullUnrollingBeneficial(analysis_info)) {
851     return false;
852   }
853 
854   if (generate_code) {
855     // Peeling of the N first iterations (where N equals to the trip count) will effectively
856     // eliminate the loop: after peeling we will have N sequential iterations copied into the loop
857     // preheader and the original loop. The trip count of this loop will be 0 as the sequential
858     // iterations are executed first and there are exactly N of them. Thus we can statically
859     // evaluate the loop exit condition to 'false' and fully eliminate it.
860     //
861     // Here is an example of full unrolling of a loop with a trip count 2:
862     //
863     //                                           loop_cond_1
864     //                                           loop_body_1        <- First iteration.
865     //                                               |
866     //                             \                 v
867     //                            ==\            loop_cond_2
868     //                            ==/            loop_body_2        <- Second iteration.
869     //                             /                 |
870     //               <-                              v     <-
871     //     loop_cond   \                         loop_cond   \      <- This cond is always false.
872     //     loop_body  _/                         loop_body  _/
873     //
874     HLoopInformation* loop_info = analysis_info->GetLoopInfo();
875     PeelByCount(loop_info, trip_count, &induction_range_);
876     HIf* loop_hif = loop_info->GetHeader()->GetLastInstruction()->AsIf();
877     int32_t constant = loop_info->Contains(*loop_hif->IfTrueSuccessor()) ? 0 : 1;
878     loop_hif->ReplaceInput(graph_->GetIntConstant(constant), 0u);
879   }
880 
881   return true;
882 }
883 
TryPeelingAndUnrolling(LoopNode * node)884 bool HLoopOptimization::TryPeelingAndUnrolling(LoopNode* node) {
885   // Don't run peeling/unrolling if compiler_options_ is nullptr (i.e., running under tests)
886   // as InstructionSet is needed.
887   if (compiler_options_ == nullptr) {
888     return false;
889   }
890 
891   HLoopInformation* loop_info = node->loop_info;
892   int64_t trip_count = LoopAnalysis::GetLoopTripCount(loop_info, &induction_range_);
893   LoopAnalysisInfo analysis_info(loop_info);
894   LoopAnalysis::CalculateLoopBasicProperties(loop_info, &analysis_info, trip_count);
895 
896   if (analysis_info.HasInstructionsPreventingScalarOpts() ||
897       arch_loop_helper_->IsLoopNonBeneficialForScalarOpts(&analysis_info)) {
898     return false;
899   }
900 
901   if (!TryFullUnrolling(&analysis_info, /*generate_code*/ false) &&
902       !TryPeelingForLoopInvariantExitsElimination(&analysis_info, /*generate_code*/ false) &&
903       !TryUnrollingForBranchPenaltyReduction(&analysis_info, /*generate_code*/ false)) {
904     return false;
905   }
906 
907   // Run 'IsLoopClonable' the last as it might be time-consuming.
908   if (!PeelUnrollHelper::IsLoopClonable(loop_info)) {
909     return false;
910   }
911 
912   return TryFullUnrolling(&analysis_info) ||
913          TryPeelingForLoopInvariantExitsElimination(&analysis_info) ||
914          TryUnrollingForBranchPenaltyReduction(&analysis_info);
915 }
916 
917 //
918 // Loop vectorization. The implementation is based on the book by Aart J.C. Bik:
919 // "The Software Vectorization Handbook. Applying Multimedia Extensions for Maximum Performance."
920 // Intel Press, June, 2004 (http://www.aartbik.com/).
921 //
922 
ShouldVectorize(LoopNode * node,HBasicBlock * block,int64_t trip_count)923 bool HLoopOptimization::ShouldVectorize(LoopNode* node, HBasicBlock* block, int64_t trip_count) {
924   // Reset vector bookkeeping.
925   vector_length_ = 0;
926   vector_refs_->clear();
927   vector_static_peeling_factor_ = 0;
928   vector_dynamic_peeling_candidate_ = nullptr;
929   vector_runtime_test_a_ =
930   vector_runtime_test_b_ = nullptr;
931 
932   // Phis in the loop-body prevent vectorization.
933   if (!block->GetPhis().IsEmpty()) {
934     return false;
935   }
936 
937   // Scan the loop-body, starting a right-hand-side tree traversal at each left-hand-side
938   // occurrence, which allows passing down attributes down the use tree.
939   for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
940     if (!VectorizeDef(node, it.Current(), /*generate_code*/ false)) {
941       return false;  // failure to vectorize a left-hand-side
942     }
943   }
944 
945   // Prepare alignment analysis:
946   // (1) find desired alignment (SIMD vector size in bytes).
947   // (2) initialize static loop peeling votes (peeling factor that will
948   //     make one particular reference aligned), never to exceed (1).
949   // (3) variable to record how many references share same alignment.
950   // (4) variable to record suitable candidate for dynamic loop peeling.
951   uint32_t desired_alignment = GetVectorSizeInBytes();
952   DCHECK_LE(desired_alignment, 16u);
953   uint32_t peeling_votes[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
954   uint32_t max_num_same_alignment = 0;
955   const ArrayReference* peeling_candidate = nullptr;
956 
957   // Data dependence analysis. Find each pair of references with same type, where
958   // at least one is a write. Each such pair denotes a possible data dependence.
959   // This analysis exploits the property that differently typed arrays cannot be
960   // aliased, as well as the property that references either point to the same
961   // array or to two completely disjoint arrays, i.e., no partial aliasing.
962   // Other than a few simply heuristics, no detailed subscript analysis is done.
963   // The scan over references also prepares finding a suitable alignment strategy.
964   for (auto i = vector_refs_->begin(); i != vector_refs_->end(); ++i) {
965     uint32_t num_same_alignment = 0;
966     // Scan over all next references.
967     for (auto j = i; ++j != vector_refs_->end(); ) {
968       if (i->type == j->type && (i->lhs || j->lhs)) {
969         // Found same-typed a[i+x] vs. b[i+y], where at least one is a write.
970         HInstruction* a = i->base;
971         HInstruction* b = j->base;
972         HInstruction* x = i->offset;
973         HInstruction* y = j->offset;
974         if (a == b) {
975           // Found a[i+x] vs. a[i+y]. Accept if x == y (loop-independent data dependence).
976           // Conservatively assume a loop-carried data dependence otherwise, and reject.
977           if (x != y) {
978             return false;
979           }
980           // Count the number of references that have the same alignment (since
981           // base and offset are the same) and where at least one is a write, so
982           // e.g. a[i] = a[i] + b[i] counts a[i] but not b[i]).
983           num_same_alignment++;
984         } else {
985           // Found a[i+x] vs. b[i+y]. Accept if x == y (at worst loop-independent data dependence).
986           // Conservatively assume a potential loop-carried data dependence otherwise, avoided by
987           // generating an explicit a != b disambiguation runtime test on the two references.
988           if (x != y) {
989             // To avoid excessive overhead, we only accept one a != b test.
990             if (vector_runtime_test_a_ == nullptr) {
991               // First test found.
992               vector_runtime_test_a_ = a;
993               vector_runtime_test_b_ = b;
994             } else if ((vector_runtime_test_a_ != a || vector_runtime_test_b_ != b) &&
995                        (vector_runtime_test_a_ != b || vector_runtime_test_b_ != a)) {
996               return false;  // second test would be needed
997             }
998           }
999         }
1000       }
1001     }
1002     // Update information for finding suitable alignment strategy:
1003     // (1) update votes for static loop peeling,
1004     // (2) update suitable candidate for dynamic loop peeling.
1005     Alignment alignment = ComputeAlignment(i->offset, i->type, i->is_string_char_at);
1006     if (alignment.Base() >= desired_alignment) {
1007       // If the array/string object has a known, sufficient alignment, use the
1008       // initial offset to compute the static loop peeling vote (this always
1009       // works, since elements have natural alignment).
1010       uint32_t offset = alignment.Offset() & (desired_alignment - 1u);
1011       uint32_t vote = (offset == 0)
1012           ? 0
1013           : ((desired_alignment - offset) >> DataType::SizeShift(i->type));
1014       DCHECK_LT(vote, 16u);
1015       ++peeling_votes[vote];
1016     } else if (BaseAlignment() >= desired_alignment &&
1017                num_same_alignment > max_num_same_alignment) {
1018       // Otherwise, if the array/string object has a known, sufficient alignment
1019       // for just the base but with an unknown offset, record the candidate with
1020       // the most occurrences for dynamic loop peeling (again, the peeling always
1021       // works, since elements have natural alignment).
1022       max_num_same_alignment = num_same_alignment;
1023       peeling_candidate = &(*i);
1024     }
1025   }  // for i
1026 
1027   // Find a suitable alignment strategy.
1028   SetAlignmentStrategy(peeling_votes, peeling_candidate);
1029 
1030   // Does vectorization seem profitable?
1031   if (!IsVectorizationProfitable(trip_count)) {
1032     return false;
1033   }
1034 
1035   // Success!
1036   return true;
1037 }
1038 
Vectorize(LoopNode * node,HBasicBlock * block,HBasicBlock * exit,int64_t trip_count)1039 void HLoopOptimization::Vectorize(LoopNode* node,
1040                                   HBasicBlock* block,
1041                                   HBasicBlock* exit,
1042                                   int64_t trip_count) {
1043   HBasicBlock* header = node->loop_info->GetHeader();
1044   HBasicBlock* preheader = node->loop_info->GetPreHeader();
1045 
1046   // Pick a loop unrolling factor for the vector loop.
1047   uint32_t unroll = arch_loop_helper_->GetSIMDUnrollingFactor(
1048       block, trip_count, MaxNumberPeeled(), vector_length_);
1049   uint32_t chunk = vector_length_ * unroll;
1050 
1051   DCHECK(trip_count == 0 || (trip_count >= MaxNumberPeeled() + chunk));
1052 
1053   // A cleanup loop is needed, at least, for any unknown trip count or
1054   // for a known trip count with remainder iterations after vectorization.
1055   bool needs_cleanup = trip_count == 0 ||
1056       ((trip_count - vector_static_peeling_factor_) % chunk) != 0;
1057 
1058   // Adjust vector bookkeeping.
1059   HPhi* main_phi = nullptr;
1060   bool is_simple_loop_header = TrySetSimpleLoopHeader(header, &main_phi);  // refills sets
1061   DCHECK(is_simple_loop_header);
1062   vector_header_ = header;
1063   vector_body_ = block;
1064 
1065   // Loop induction type.
1066   DataType::Type induc_type = main_phi->GetType();
1067   DCHECK(induc_type == DataType::Type::kInt32 || induc_type == DataType::Type::kInt64)
1068       << induc_type;
1069 
1070   // Generate the trip count for static or dynamic loop peeling, if needed:
1071   // ptc = <peeling factor>;
1072   HInstruction* ptc = nullptr;
1073   if (vector_static_peeling_factor_ != 0) {
1074     // Static loop peeling for SIMD alignment (using the most suitable
1075     // fixed peeling factor found during prior alignment analysis).
1076     DCHECK(vector_dynamic_peeling_candidate_ == nullptr);
1077     ptc = graph_->GetConstant(induc_type, vector_static_peeling_factor_);
1078   } else if (vector_dynamic_peeling_candidate_ != nullptr) {
1079     // Dynamic loop peeling for SIMD alignment (using the most suitable
1080     // candidate found during prior alignment analysis):
1081     // rem = offset % ALIGN;    // adjusted as #elements
1082     // ptc = rem == 0 ? 0 : (ALIGN - rem);
1083     uint32_t shift = DataType::SizeShift(vector_dynamic_peeling_candidate_->type);
1084     uint32_t align = GetVectorSizeInBytes() >> shift;
1085     uint32_t hidden_offset = HiddenOffset(vector_dynamic_peeling_candidate_->type,
1086                                           vector_dynamic_peeling_candidate_->is_string_char_at);
1087     HInstruction* adjusted_offset = graph_->GetConstant(induc_type, hidden_offset >> shift);
1088     HInstruction* offset = Insert(preheader, new (global_allocator_) HAdd(
1089         induc_type, vector_dynamic_peeling_candidate_->offset, adjusted_offset));
1090     HInstruction* rem = Insert(preheader, new (global_allocator_) HAnd(
1091         induc_type, offset, graph_->GetConstant(induc_type, align - 1u)));
1092     HInstruction* sub = Insert(preheader, new (global_allocator_) HSub(
1093         induc_type, graph_->GetConstant(induc_type, align), rem));
1094     HInstruction* cond = Insert(preheader, new (global_allocator_) HEqual(
1095         rem, graph_->GetConstant(induc_type, 0)));
1096     ptc = Insert(preheader, new (global_allocator_) HSelect(
1097         cond, graph_->GetConstant(induc_type, 0), sub, kNoDexPc));
1098     needs_cleanup = true;  // don't know the exact amount
1099   }
1100 
1101   // Generate loop control:
1102   // stc = <trip-count>;
1103   // ptc = min(stc, ptc);
1104   // vtc = stc - (stc - ptc) % chunk;
1105   // i = 0;
1106   HInstruction* stc = induction_range_.GenerateTripCount(node->loop_info, graph_, preheader);
1107   HInstruction* vtc = stc;
1108   if (needs_cleanup) {
1109     DCHECK(IsPowerOfTwo(chunk));
1110     HInstruction* diff = stc;
1111     if (ptc != nullptr) {
1112       if (trip_count == 0) {
1113         HInstruction* cond = Insert(preheader, new (global_allocator_) HAboveOrEqual(stc, ptc));
1114         ptc = Insert(preheader, new (global_allocator_) HSelect(cond, ptc, stc, kNoDexPc));
1115       }
1116       diff = Insert(preheader, new (global_allocator_) HSub(induc_type, stc, ptc));
1117     }
1118     HInstruction* rem = Insert(
1119         preheader, new (global_allocator_) HAnd(induc_type,
1120                                                 diff,
1121                                                 graph_->GetConstant(induc_type, chunk - 1)));
1122     vtc = Insert(preheader, new (global_allocator_) HSub(induc_type, stc, rem));
1123   }
1124   vector_index_ = graph_->GetConstant(induc_type, 0);
1125 
1126   // Generate runtime disambiguation test:
1127   // vtc = a != b ? vtc : 0;
1128   if (vector_runtime_test_a_ != nullptr) {
1129     HInstruction* rt = Insert(
1130         preheader,
1131         new (global_allocator_) HNotEqual(vector_runtime_test_a_, vector_runtime_test_b_));
1132     vtc = Insert(preheader,
1133                  new (global_allocator_)
1134                  HSelect(rt, vtc, graph_->GetConstant(induc_type, 0), kNoDexPc));
1135     needs_cleanup = true;
1136   }
1137 
1138   // Generate alignment peeling loop, if needed:
1139   // for ( ; i < ptc; i += 1)
1140   //    <loop-body>
1141   //
1142   // NOTE: The alignment forced by the peeling loop is preserved even if data is
1143   //       moved around during suspend checks, since all analysis was based on
1144   //       nothing more than the Android runtime alignment conventions.
1145   if (ptc != nullptr) {
1146     vector_mode_ = kSequential;
1147     GenerateNewLoop(node,
1148                     block,
1149                     graph_->TransformLoopForVectorization(vector_header_, vector_body_, exit),
1150                     vector_index_,
1151                     ptc,
1152                     graph_->GetConstant(induc_type, 1),
1153                     LoopAnalysisInfo::kNoUnrollingFactor);
1154   }
1155 
1156   // Generate vector loop, possibly further unrolled:
1157   // for ( ; i < vtc; i += chunk)
1158   //    <vectorized-loop-body>
1159   vector_mode_ = kVector;
1160   GenerateNewLoop(node,
1161                   block,
1162                   graph_->TransformLoopForVectorization(vector_header_, vector_body_, exit),
1163                   vector_index_,
1164                   vtc,
1165                   graph_->GetConstant(induc_type, vector_length_),  // increment per unroll
1166                   unroll);
1167   HLoopInformation* vloop = vector_header_->GetLoopInformation();
1168 
1169   // Generate cleanup loop, if needed:
1170   // for ( ; i < stc; i += 1)
1171   //    <loop-body>
1172   if (needs_cleanup) {
1173     vector_mode_ = kSequential;
1174     GenerateNewLoop(node,
1175                     block,
1176                     graph_->TransformLoopForVectorization(vector_header_, vector_body_, exit),
1177                     vector_index_,
1178                     stc,
1179                     graph_->GetConstant(induc_type, 1),
1180                     LoopAnalysisInfo::kNoUnrollingFactor);
1181   }
1182 
1183   // Link reductions to their final uses.
1184   for (auto i = reductions_->begin(); i != reductions_->end(); ++i) {
1185     if (i->first->IsPhi()) {
1186       HInstruction* phi = i->first;
1187       HInstruction* repl = ReduceAndExtractIfNeeded(i->second);
1188       // Deal with regular uses.
1189       for (const HUseListNode<HInstruction*>& use : phi->GetUses()) {
1190         induction_range_.Replace(use.GetUser(), phi, repl);  // update induction use
1191       }
1192       phi->ReplaceWith(repl);
1193     }
1194   }
1195 
1196   // Remove the original loop by disconnecting the body block
1197   // and removing all instructions from the header.
1198   block->DisconnectAndDelete();
1199   while (!header->GetFirstInstruction()->IsGoto()) {
1200     header->RemoveInstruction(header->GetFirstInstruction());
1201   }
1202 
1203   // Update loop hierarchy: the old header now resides in the same outer loop
1204   // as the old preheader. Note that we don't bother putting sequential
1205   // loops back in the hierarchy at this point.
1206   header->SetLoopInformation(preheader->GetLoopInformation());  // outward
1207   node->loop_info = vloop;
1208 }
1209 
GenerateNewLoop(LoopNode * node,HBasicBlock * block,HBasicBlock * new_preheader,HInstruction * lo,HInstruction * hi,HInstruction * step,uint32_t unroll)1210 void HLoopOptimization::GenerateNewLoop(LoopNode* node,
1211                                         HBasicBlock* block,
1212                                         HBasicBlock* new_preheader,
1213                                         HInstruction* lo,
1214                                         HInstruction* hi,
1215                                         HInstruction* step,
1216                                         uint32_t unroll) {
1217   DCHECK(unroll == 1 || vector_mode_ == kVector);
1218   DataType::Type induc_type = lo->GetType();
1219   // Prepare new loop.
1220   vector_preheader_ = new_preheader,
1221   vector_header_ = vector_preheader_->GetSingleSuccessor();
1222   vector_body_ = vector_header_->GetSuccessors()[1];
1223   HPhi* phi = new (global_allocator_) HPhi(global_allocator_,
1224                                            kNoRegNumber,
1225                                            0,
1226                                            HPhi::ToPhiType(induc_type));
1227   // Generate header and prepare body.
1228   // for (i = lo; i < hi; i += step)
1229   //    <loop-body>
1230   HInstruction* cond = new (global_allocator_) HAboveOrEqual(phi, hi);
1231   vector_header_->AddPhi(phi);
1232   vector_header_->AddInstruction(cond);
1233   vector_header_->AddInstruction(new (global_allocator_) HIf(cond));
1234   vector_index_ = phi;
1235   vector_permanent_map_->clear();  // preserved over unrolling
1236   for (uint32_t u = 0; u < unroll; u++) {
1237     // Generate instruction map.
1238     vector_map_->clear();
1239     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
1240       bool vectorized_def = VectorizeDef(node, it.Current(), /*generate_code*/ true);
1241       DCHECK(vectorized_def);
1242     }
1243     // Generate body from the instruction map, but in original program order.
1244     HEnvironment* env = vector_header_->GetFirstInstruction()->GetEnvironment();
1245     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
1246       auto i = vector_map_->find(it.Current());
1247       if (i != vector_map_->end() && !i->second->IsInBlock()) {
1248         Insert(vector_body_, i->second);
1249         // Deal with instructions that need an environment, such as the scalar intrinsics.
1250         if (i->second->NeedsEnvironment()) {
1251           i->second->CopyEnvironmentFromWithLoopPhiAdjustment(env, vector_header_);
1252         }
1253       }
1254     }
1255     // Generate the induction.
1256     vector_index_ = new (global_allocator_) HAdd(induc_type, vector_index_, step);
1257     Insert(vector_body_, vector_index_);
1258   }
1259   // Finalize phi inputs for the reductions (if any).
1260   for (auto i = reductions_->begin(); i != reductions_->end(); ++i) {
1261     if (!i->first->IsPhi()) {
1262       DCHECK(i->second->IsPhi());
1263       GenerateVecReductionPhiInputs(i->second->AsPhi(), i->first);
1264     }
1265   }
1266   // Finalize phi inputs for the loop index.
1267   phi->AddInput(lo);
1268   phi->AddInput(vector_index_);
1269   vector_index_ = phi;
1270 }
1271 
VectorizeDef(LoopNode * node,HInstruction * instruction,bool generate_code)1272 bool HLoopOptimization::VectorizeDef(LoopNode* node,
1273                                      HInstruction* instruction,
1274                                      bool generate_code) {
1275   // Accept a left-hand-side array base[index] for
1276   // (1) supported vector type,
1277   // (2) loop-invariant base,
1278   // (3) unit stride index,
1279   // (4) vectorizable right-hand-side value.
1280   uint64_t restrictions = kNone;
1281   if (instruction->IsArraySet()) {
1282     DataType::Type type = instruction->AsArraySet()->GetComponentType();
1283     HInstruction* base = instruction->InputAt(0);
1284     HInstruction* index = instruction->InputAt(1);
1285     HInstruction* value = instruction->InputAt(2);
1286     HInstruction* offset = nullptr;
1287     // For narrow types, explicit type conversion may have been
1288     // optimized way, so set the no hi bits restriction here.
1289     if (DataType::Size(type) <= 2) {
1290       restrictions |= kNoHiBits;
1291     }
1292     if (TrySetVectorType(type, &restrictions) &&
1293         node->loop_info->IsDefinedOutOfTheLoop(base) &&
1294         induction_range_.IsUnitStride(instruction, index, graph_, &offset) &&
1295         VectorizeUse(node, value, generate_code, type, restrictions)) {
1296       if (generate_code) {
1297         GenerateVecSub(index, offset);
1298         GenerateVecMem(instruction, vector_map_->Get(index), vector_map_->Get(value), offset, type);
1299       } else {
1300         vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ true));
1301       }
1302       return true;
1303     }
1304     return false;
1305   }
1306   // Accept a left-hand-side reduction for
1307   // (1) supported vector type,
1308   // (2) vectorizable right-hand-side value.
1309   auto redit = reductions_->find(instruction);
1310   if (redit != reductions_->end()) {
1311     DataType::Type type = instruction->GetType();
1312     // Recognize SAD idiom or direct reduction.
1313     if (VectorizeSADIdiom(node, instruction, generate_code, type, restrictions) ||
1314         VectorizeDotProdIdiom(node, instruction, generate_code, type, restrictions) ||
1315         (TrySetVectorType(type, &restrictions) &&
1316          VectorizeUse(node, instruction, generate_code, type, restrictions))) {
1317       if (generate_code) {
1318         HInstruction* new_red = vector_map_->Get(instruction);
1319         vector_permanent_map_->Put(new_red, vector_map_->Get(redit->second));
1320         vector_permanent_map_->Overwrite(redit->second, new_red);
1321       }
1322       return true;
1323     }
1324     return false;
1325   }
1326   // Branch back okay.
1327   if (instruction->IsGoto()) {
1328     return true;
1329   }
1330   // Otherwise accept only expressions with no effects outside the immediate loop-body.
1331   // Note that actual uses are inspected during right-hand-side tree traversal.
1332   return !IsUsedOutsideLoop(node->loop_info, instruction) && !instruction->DoesAnyWrite();
1333 }
1334 
VectorizeUse(LoopNode * node,HInstruction * instruction,bool generate_code,DataType::Type type,uint64_t restrictions)1335 bool HLoopOptimization::VectorizeUse(LoopNode* node,
1336                                      HInstruction* instruction,
1337                                      bool generate_code,
1338                                      DataType::Type type,
1339                                      uint64_t restrictions) {
1340   // Accept anything for which code has already been generated.
1341   if (generate_code) {
1342     if (vector_map_->find(instruction) != vector_map_->end()) {
1343       return true;
1344     }
1345   }
1346   // Continue the right-hand-side tree traversal, passing in proper
1347   // types and vector restrictions along the way. During code generation,
1348   // all new nodes are drawn from the global allocator.
1349   if (node->loop_info->IsDefinedOutOfTheLoop(instruction)) {
1350     // Accept invariant use, using scalar expansion.
1351     if (generate_code) {
1352       GenerateVecInv(instruction, type);
1353     }
1354     return true;
1355   } else if (instruction->IsArrayGet()) {
1356     // Deal with vector restrictions.
1357     bool is_string_char_at = instruction->AsArrayGet()->IsStringCharAt();
1358     if (is_string_char_at && HasVectorRestrictions(restrictions, kNoStringCharAt)) {
1359       return false;
1360     }
1361     // Accept a right-hand-side array base[index] for
1362     // (1) matching vector type (exact match or signed/unsigned integral type of the same size),
1363     // (2) loop-invariant base,
1364     // (3) unit stride index,
1365     // (4) vectorizable right-hand-side value.
1366     HInstruction* base = instruction->InputAt(0);
1367     HInstruction* index = instruction->InputAt(1);
1368     HInstruction* offset = nullptr;
1369     if (HVecOperation::ToSignedType(type) == HVecOperation::ToSignedType(instruction->GetType()) &&
1370         node->loop_info->IsDefinedOutOfTheLoop(base) &&
1371         induction_range_.IsUnitStride(instruction, index, graph_, &offset)) {
1372       if (generate_code) {
1373         GenerateVecSub(index, offset);
1374         GenerateVecMem(instruction, vector_map_->Get(index), nullptr, offset, type);
1375       } else {
1376         vector_refs_->insert(ArrayReference(base, offset, type, /*lhs*/ false, is_string_char_at));
1377       }
1378       return true;
1379     }
1380   } else if (instruction->IsPhi()) {
1381     // Accept particular phi operations.
1382     if (reductions_->find(instruction) != reductions_->end()) {
1383       // Deal with vector restrictions.
1384       if (HasVectorRestrictions(restrictions, kNoReduction)) {
1385         return false;
1386       }
1387       // Accept a reduction.
1388       if (generate_code) {
1389         GenerateVecReductionPhi(instruction->AsPhi());
1390       }
1391       return true;
1392     }
1393     // TODO: accept right-hand-side induction?
1394     return false;
1395   } else if (instruction->IsTypeConversion()) {
1396     // Accept particular type conversions.
1397     HTypeConversion* conversion = instruction->AsTypeConversion();
1398     HInstruction* opa = conversion->InputAt(0);
1399     DataType::Type from = conversion->GetInputType();
1400     DataType::Type to = conversion->GetResultType();
1401     if (DataType::IsIntegralType(from) && DataType::IsIntegralType(to)) {
1402       uint32_t size_vec = DataType::Size(type);
1403       uint32_t size_from = DataType::Size(from);
1404       uint32_t size_to = DataType::Size(to);
1405       // Accept an integral conversion
1406       // (1a) narrowing into vector type, "wider" operations cannot bring in higher order bits, or
1407       // (1b) widening from at least vector type, and
1408       // (2) vectorizable operand.
1409       if ((size_to < size_from &&
1410            size_to == size_vec &&
1411            VectorizeUse(node, opa, generate_code, type, restrictions | kNoHiBits)) ||
1412           (size_to >= size_from &&
1413            size_from >= size_vec &&
1414            VectorizeUse(node, opa, generate_code, type, restrictions))) {
1415         if (generate_code) {
1416           if (vector_mode_ == kVector) {
1417             vector_map_->Put(instruction, vector_map_->Get(opa));  // operand pass-through
1418           } else {
1419             GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
1420           }
1421         }
1422         return true;
1423       }
1424     } else if (to == DataType::Type::kFloat32 && from == DataType::Type::kInt32) {
1425       DCHECK_EQ(to, type);
1426       // Accept int to float conversion for
1427       // (1) supported int,
1428       // (2) vectorizable operand.
1429       if (TrySetVectorType(from, &restrictions) &&
1430           VectorizeUse(node, opa, generate_code, from, restrictions)) {
1431         if (generate_code) {
1432           GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
1433         }
1434         return true;
1435       }
1436     }
1437     return false;
1438   } else if (instruction->IsNeg() || instruction->IsNot() || instruction->IsBooleanNot()) {
1439     // Accept unary operator for vectorizable operand.
1440     HInstruction* opa = instruction->InputAt(0);
1441     if (VectorizeUse(node, opa, generate_code, type, restrictions)) {
1442       if (generate_code) {
1443         GenerateVecOp(instruction, vector_map_->Get(opa), nullptr, type);
1444       }
1445       return true;
1446     }
1447   } else if (instruction->IsAdd() || instruction->IsSub() ||
1448              instruction->IsMul() || instruction->IsDiv() ||
1449              instruction->IsAnd() || instruction->IsOr()  || instruction->IsXor()) {
1450     // Deal with vector restrictions.
1451     if ((instruction->IsMul() && HasVectorRestrictions(restrictions, kNoMul)) ||
1452         (instruction->IsDiv() && HasVectorRestrictions(restrictions, kNoDiv))) {
1453       return false;
1454     }
1455     // Accept binary operator for vectorizable operands.
1456     HInstruction* opa = instruction->InputAt(0);
1457     HInstruction* opb = instruction->InputAt(1);
1458     if (VectorizeUse(node, opa, generate_code, type, restrictions) &&
1459         VectorizeUse(node, opb, generate_code, type, restrictions)) {
1460       if (generate_code) {
1461         GenerateVecOp(instruction, vector_map_->Get(opa), vector_map_->Get(opb), type);
1462       }
1463       return true;
1464     }
1465   } else if (instruction->IsShl() || instruction->IsShr() || instruction->IsUShr()) {
1466     // Recognize halving add idiom.
1467     if (VectorizeHalvingAddIdiom(node, instruction, generate_code, type, restrictions)) {
1468       return true;
1469     }
1470     // Deal with vector restrictions.
1471     HInstruction* opa = instruction->InputAt(0);
1472     HInstruction* opb = instruction->InputAt(1);
1473     HInstruction* r = opa;
1474     bool is_unsigned = false;
1475     if ((HasVectorRestrictions(restrictions, kNoShift)) ||
1476         (instruction->IsShr() && HasVectorRestrictions(restrictions, kNoShr))) {
1477       return false;  // unsupported instruction
1478     } else if (HasVectorRestrictions(restrictions, kNoHiBits)) {
1479       // Shifts right need extra care to account for higher order bits.
1480       // TODO: less likely shr/unsigned and ushr/signed can by flipping signess.
1481       if (instruction->IsShr() &&
1482           (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || is_unsigned)) {
1483         return false;  // reject, unless all operands are sign-extension narrower
1484       } else if (instruction->IsUShr() &&
1485                  (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || !is_unsigned)) {
1486         return false;  // reject, unless all operands are zero-extension narrower
1487       }
1488     }
1489     // Accept shift operator for vectorizable/invariant operands.
1490     // TODO: accept symbolic, albeit loop invariant shift factors.
1491     DCHECK(r != nullptr);
1492     if (generate_code && vector_mode_ != kVector) {  // de-idiom
1493       r = opa;
1494     }
1495     int64_t distance = 0;
1496     if (VectorizeUse(node, r, generate_code, type, restrictions) &&
1497         IsInt64AndGet(opb, /*out*/ &distance)) {
1498       // Restrict shift distance to packed data type width.
1499       int64_t max_distance = DataType::Size(type) * 8;
1500       if (0 <= distance && distance < max_distance) {
1501         if (generate_code) {
1502           GenerateVecOp(instruction, vector_map_->Get(r), opb, type);
1503         }
1504         return true;
1505       }
1506     }
1507   } else if (instruction->IsAbs()) {
1508     // Deal with vector restrictions.
1509     HInstruction* opa = instruction->InputAt(0);
1510     HInstruction* r = opa;
1511     bool is_unsigned = false;
1512     if (HasVectorRestrictions(restrictions, kNoAbs)) {
1513       return false;
1514     } else if (HasVectorRestrictions(restrictions, kNoHiBits) &&
1515                (!IsNarrowerOperand(opa, type, &r, &is_unsigned) || is_unsigned)) {
1516       return false;  // reject, unless operand is sign-extension narrower
1517     }
1518     // Accept ABS(x) for vectorizable operand.
1519     DCHECK(r != nullptr);
1520     if (generate_code && vector_mode_ != kVector) {  // de-idiom
1521       r = opa;
1522     }
1523     if (VectorizeUse(node, r, generate_code, type, restrictions)) {
1524       if (generate_code) {
1525         GenerateVecOp(instruction,
1526                       vector_map_->Get(r),
1527                       nullptr,
1528                       HVecOperation::ToProperType(type, is_unsigned));
1529       }
1530       return true;
1531     }
1532   }
1533   return false;
1534 }
1535 
GetVectorSizeInBytes()1536 uint32_t HLoopOptimization::GetVectorSizeInBytes() {
1537   switch (compiler_options_->GetInstructionSet()) {
1538     case InstructionSet::kArm:
1539     case InstructionSet::kThumb2:
1540       return 8;  // 64-bit SIMD
1541     default:
1542       return 16;  // 128-bit SIMD
1543   }
1544 }
1545 
TrySetVectorType(DataType::Type type,uint64_t * restrictions)1546 bool HLoopOptimization::TrySetVectorType(DataType::Type type, uint64_t* restrictions) {
1547   const InstructionSetFeatures* features = compiler_options_->GetInstructionSetFeatures();
1548   switch (compiler_options_->GetInstructionSet()) {
1549     case InstructionSet::kArm:
1550     case InstructionSet::kThumb2:
1551       // Allow vectorization for all ARM devices, because Android assumes that
1552       // ARM 32-bit always supports advanced SIMD (64-bit SIMD).
1553       switch (type) {
1554         case DataType::Type::kBool:
1555         case DataType::Type::kUint8:
1556         case DataType::Type::kInt8:
1557           *restrictions |= kNoDiv | kNoReduction | kNoDotProd;
1558           return TrySetVectorLength(8);
1559         case DataType::Type::kUint16:
1560         case DataType::Type::kInt16:
1561           *restrictions |= kNoDiv | kNoStringCharAt | kNoReduction | kNoDotProd;
1562           return TrySetVectorLength(4);
1563         case DataType::Type::kInt32:
1564           *restrictions |= kNoDiv | kNoWideSAD;
1565           return TrySetVectorLength(2);
1566         default:
1567           break;
1568       }
1569       return false;
1570     case InstructionSet::kArm64:
1571       // Allow vectorization for all ARM devices, because Android assumes that
1572       // ARMv8 AArch64 always supports advanced SIMD (128-bit SIMD).
1573       switch (type) {
1574         case DataType::Type::kBool:
1575         case DataType::Type::kUint8:
1576         case DataType::Type::kInt8:
1577           *restrictions |= kNoDiv;
1578           return TrySetVectorLength(16);
1579         case DataType::Type::kUint16:
1580         case DataType::Type::kInt16:
1581           *restrictions |= kNoDiv;
1582           return TrySetVectorLength(8);
1583         case DataType::Type::kInt32:
1584           *restrictions |= kNoDiv;
1585           return TrySetVectorLength(4);
1586         case DataType::Type::kInt64:
1587           *restrictions |= kNoDiv | kNoMul;
1588           return TrySetVectorLength(2);
1589         case DataType::Type::kFloat32:
1590           *restrictions |= kNoReduction;
1591           return TrySetVectorLength(4);
1592         case DataType::Type::kFloat64:
1593           *restrictions |= kNoReduction;
1594           return TrySetVectorLength(2);
1595         default:
1596           return false;
1597       }
1598     case InstructionSet::kX86:
1599     case InstructionSet::kX86_64:
1600       // Allow vectorization for SSE4.1-enabled X86 devices only (128-bit SIMD).
1601       if (features->AsX86InstructionSetFeatures()->HasSSE4_1()) {
1602         switch (type) {
1603           case DataType::Type::kBool:
1604           case DataType::Type::kUint8:
1605           case DataType::Type::kInt8:
1606             *restrictions |= kNoMul |
1607                              kNoDiv |
1608                              kNoShift |
1609                              kNoAbs |
1610                              kNoSignedHAdd |
1611                              kNoUnroundedHAdd |
1612                              kNoSAD |
1613                              kNoDotProd;
1614             return TrySetVectorLength(16);
1615           case DataType::Type::kUint16:
1616           case DataType::Type::kInt16:
1617             *restrictions |= kNoDiv |
1618                              kNoAbs |
1619                              kNoSignedHAdd |
1620                              kNoUnroundedHAdd |
1621                              kNoSAD|
1622                              kNoDotProd;
1623             return TrySetVectorLength(8);
1624           case DataType::Type::kInt32:
1625             *restrictions |= kNoDiv | kNoSAD;
1626             return TrySetVectorLength(4);
1627           case DataType::Type::kInt64:
1628             *restrictions |= kNoMul | kNoDiv | kNoShr | kNoAbs | kNoSAD;
1629             return TrySetVectorLength(2);
1630           case DataType::Type::kFloat32:
1631             *restrictions |= kNoReduction;
1632             return TrySetVectorLength(4);
1633           case DataType::Type::kFloat64:
1634             *restrictions |= kNoReduction;
1635             return TrySetVectorLength(2);
1636           default:
1637             break;
1638         }  // switch type
1639       }
1640       return false;
1641     case InstructionSet::kMips:
1642       if (features->AsMipsInstructionSetFeatures()->HasMsa()) {
1643         switch (type) {
1644           case DataType::Type::kBool:
1645           case DataType::Type::kUint8:
1646           case DataType::Type::kInt8:
1647             *restrictions |= kNoDiv | kNoDotProd;
1648             return TrySetVectorLength(16);
1649           case DataType::Type::kUint16:
1650           case DataType::Type::kInt16:
1651             *restrictions |= kNoDiv | kNoStringCharAt | kNoDotProd;
1652             return TrySetVectorLength(8);
1653           case DataType::Type::kInt32:
1654             *restrictions |= kNoDiv;
1655             return TrySetVectorLength(4);
1656           case DataType::Type::kInt64:
1657             *restrictions |= kNoDiv;
1658             return TrySetVectorLength(2);
1659           case DataType::Type::kFloat32:
1660             *restrictions |= kNoReduction;
1661             return TrySetVectorLength(4);
1662           case DataType::Type::kFloat64:
1663             *restrictions |= kNoReduction;
1664             return TrySetVectorLength(2);
1665           default:
1666             break;
1667         }  // switch type
1668       }
1669       return false;
1670     case InstructionSet::kMips64:
1671       if (features->AsMips64InstructionSetFeatures()->HasMsa()) {
1672         switch (type) {
1673           case DataType::Type::kBool:
1674           case DataType::Type::kUint8:
1675           case DataType::Type::kInt8:
1676             *restrictions |= kNoDiv | kNoDotProd;
1677             return TrySetVectorLength(16);
1678           case DataType::Type::kUint16:
1679           case DataType::Type::kInt16:
1680             *restrictions |= kNoDiv | kNoStringCharAt | kNoDotProd;
1681             return TrySetVectorLength(8);
1682           case DataType::Type::kInt32:
1683             *restrictions |= kNoDiv;
1684             return TrySetVectorLength(4);
1685           case DataType::Type::kInt64:
1686             *restrictions |= kNoDiv;
1687             return TrySetVectorLength(2);
1688           case DataType::Type::kFloat32:
1689             *restrictions |= kNoReduction;
1690             return TrySetVectorLength(4);
1691           case DataType::Type::kFloat64:
1692             *restrictions |= kNoReduction;
1693             return TrySetVectorLength(2);
1694           default:
1695             break;
1696         }  // switch type
1697       }
1698       return false;
1699     default:
1700       return false;
1701   }  // switch instruction set
1702 }
1703 
TrySetVectorLength(uint32_t length)1704 bool HLoopOptimization::TrySetVectorLength(uint32_t length) {
1705   DCHECK(IsPowerOfTwo(length) && length >= 2u);
1706   // First time set?
1707   if (vector_length_ == 0) {
1708     vector_length_ = length;
1709   }
1710   // Different types are acceptable within a loop-body, as long as all the corresponding vector
1711   // lengths match exactly to obtain a uniform traversal through the vector iteration space
1712   // (idiomatic exceptions to this rule can be handled by further unrolling sub-expressions).
1713   return vector_length_ == length;
1714 }
1715 
GenerateVecInv(HInstruction * org,DataType::Type type)1716 void HLoopOptimization::GenerateVecInv(HInstruction* org, DataType::Type type) {
1717   if (vector_map_->find(org) == vector_map_->end()) {
1718     // In scalar code, just use a self pass-through for scalar invariants
1719     // (viz. expression remains itself).
1720     if (vector_mode_ == kSequential) {
1721       vector_map_->Put(org, org);
1722       return;
1723     }
1724     // In vector code, explicit scalar expansion is needed.
1725     HInstruction* vector = nullptr;
1726     auto it = vector_permanent_map_->find(org);
1727     if (it != vector_permanent_map_->end()) {
1728       vector = it->second;  // reuse during unrolling
1729     } else {
1730       // Generates ReplicateScalar( (optional_type_conv) org ).
1731       HInstruction* input = org;
1732       DataType::Type input_type = input->GetType();
1733       if (type != input_type && (type == DataType::Type::kInt64 ||
1734                                  input_type == DataType::Type::kInt64)) {
1735         input = Insert(vector_preheader_,
1736                        new (global_allocator_) HTypeConversion(type, input, kNoDexPc));
1737       }
1738       vector = new (global_allocator_)
1739           HVecReplicateScalar(global_allocator_, input, type, vector_length_, kNoDexPc);
1740       vector_permanent_map_->Put(org, Insert(vector_preheader_, vector));
1741     }
1742     vector_map_->Put(org, vector);
1743   }
1744 }
1745 
GenerateVecSub(HInstruction * org,HInstruction * offset)1746 void HLoopOptimization::GenerateVecSub(HInstruction* org, HInstruction* offset) {
1747   if (vector_map_->find(org) == vector_map_->end()) {
1748     HInstruction* subscript = vector_index_;
1749     int64_t value = 0;
1750     if (!IsInt64AndGet(offset, &value) || value != 0) {
1751       subscript = new (global_allocator_) HAdd(DataType::Type::kInt32, subscript, offset);
1752       if (org->IsPhi()) {
1753         Insert(vector_body_, subscript);  // lacks layout placeholder
1754       }
1755     }
1756     vector_map_->Put(org, subscript);
1757   }
1758 }
1759 
GenerateVecMem(HInstruction * org,HInstruction * opa,HInstruction * opb,HInstruction * offset,DataType::Type type)1760 void HLoopOptimization::GenerateVecMem(HInstruction* org,
1761                                        HInstruction* opa,
1762                                        HInstruction* opb,
1763                                        HInstruction* offset,
1764                                        DataType::Type type) {
1765   uint32_t dex_pc = org->GetDexPc();
1766   HInstruction* vector = nullptr;
1767   if (vector_mode_ == kVector) {
1768     // Vector store or load.
1769     bool is_string_char_at = false;
1770     HInstruction* base = org->InputAt(0);
1771     if (opb != nullptr) {
1772       vector = new (global_allocator_) HVecStore(
1773           global_allocator_, base, opa, opb, type, org->GetSideEffects(), vector_length_, dex_pc);
1774     } else  {
1775       is_string_char_at = org->AsArrayGet()->IsStringCharAt();
1776       vector = new (global_allocator_) HVecLoad(global_allocator_,
1777                                                 base,
1778                                                 opa,
1779                                                 type,
1780                                                 org->GetSideEffects(),
1781                                                 vector_length_,
1782                                                 is_string_char_at,
1783                                                 dex_pc);
1784     }
1785     // Known (forced/adjusted/original) alignment?
1786     if (vector_dynamic_peeling_candidate_ != nullptr) {
1787       if (vector_dynamic_peeling_candidate_->offset == offset &&  // TODO: diffs too?
1788           DataType::Size(vector_dynamic_peeling_candidate_->type) == DataType::Size(type) &&
1789           vector_dynamic_peeling_candidate_->is_string_char_at == is_string_char_at) {
1790         vector->AsVecMemoryOperation()->SetAlignment(  // forced
1791             Alignment(GetVectorSizeInBytes(), 0));
1792       }
1793     } else {
1794       vector->AsVecMemoryOperation()->SetAlignment(  // adjusted/original
1795           ComputeAlignment(offset, type, is_string_char_at, vector_static_peeling_factor_));
1796     }
1797   } else {
1798     // Scalar store or load.
1799     DCHECK(vector_mode_ == kSequential);
1800     if (opb != nullptr) {
1801       DataType::Type component_type = org->AsArraySet()->GetComponentType();
1802       vector = new (global_allocator_) HArraySet(
1803           org->InputAt(0), opa, opb, component_type, org->GetSideEffects(), dex_pc);
1804     } else  {
1805       bool is_string_char_at = org->AsArrayGet()->IsStringCharAt();
1806       vector = new (global_allocator_) HArrayGet(
1807           org->InputAt(0), opa, org->GetType(), org->GetSideEffects(), dex_pc, is_string_char_at);
1808     }
1809   }
1810   vector_map_->Put(org, vector);
1811 }
1812 
GenerateVecReductionPhi(HPhi * phi)1813 void HLoopOptimization::GenerateVecReductionPhi(HPhi* phi) {
1814   DCHECK(reductions_->find(phi) != reductions_->end());
1815   DCHECK(reductions_->Get(phi->InputAt(1)) == phi);
1816   HInstruction* vector = nullptr;
1817   if (vector_mode_ == kSequential) {
1818     HPhi* new_phi = new (global_allocator_) HPhi(
1819         global_allocator_, kNoRegNumber, 0, phi->GetType());
1820     vector_header_->AddPhi(new_phi);
1821     vector = new_phi;
1822   } else {
1823     // Link vector reduction back to prior unrolled update, or a first phi.
1824     auto it = vector_permanent_map_->find(phi);
1825     if (it != vector_permanent_map_->end()) {
1826       vector = it->second;
1827     } else {
1828       HPhi* new_phi = new (global_allocator_) HPhi(
1829           global_allocator_, kNoRegNumber, 0, HVecOperation::kSIMDType);
1830       vector_header_->AddPhi(new_phi);
1831       vector = new_phi;
1832     }
1833   }
1834   vector_map_->Put(phi, vector);
1835 }
1836 
GenerateVecReductionPhiInputs(HPhi * phi,HInstruction * reduction)1837 void HLoopOptimization::GenerateVecReductionPhiInputs(HPhi* phi, HInstruction* reduction) {
1838   HInstruction* new_phi = vector_map_->Get(phi);
1839   HInstruction* new_init = reductions_->Get(phi);
1840   HInstruction* new_red = vector_map_->Get(reduction);
1841   // Link unrolled vector loop back to new phi.
1842   for (; !new_phi->IsPhi(); new_phi = vector_permanent_map_->Get(new_phi)) {
1843     DCHECK(new_phi->IsVecOperation());
1844   }
1845   // Prepare the new initialization.
1846   if (vector_mode_ == kVector) {
1847     // Generate a [initial, 0, .., 0] vector for add or
1848     // a [initial, initial, .., initial] vector for min/max.
1849     HVecOperation* red_vector = new_red->AsVecOperation();
1850     HVecReduce::ReductionKind kind = GetReductionKind(red_vector);
1851     uint32_t vector_length = red_vector->GetVectorLength();
1852     DataType::Type type = red_vector->GetPackedType();
1853     if (kind == HVecReduce::ReductionKind::kSum) {
1854       new_init = Insert(vector_preheader_,
1855                         new (global_allocator_) HVecSetScalars(global_allocator_,
1856                                                                &new_init,
1857                                                                type,
1858                                                                vector_length,
1859                                                                1,
1860                                                                kNoDexPc));
1861     } else {
1862       new_init = Insert(vector_preheader_,
1863                         new (global_allocator_) HVecReplicateScalar(global_allocator_,
1864                                                                     new_init,
1865                                                                     type,
1866                                                                     vector_length,
1867                                                                     kNoDexPc));
1868     }
1869   } else {
1870     new_init = ReduceAndExtractIfNeeded(new_init);
1871   }
1872   // Set the phi inputs.
1873   DCHECK(new_phi->IsPhi());
1874   new_phi->AsPhi()->AddInput(new_init);
1875   new_phi->AsPhi()->AddInput(new_red);
1876   // New feed value for next phi (safe mutation in iteration).
1877   reductions_->find(phi)->second = new_phi;
1878 }
1879 
ReduceAndExtractIfNeeded(HInstruction * instruction)1880 HInstruction* HLoopOptimization::ReduceAndExtractIfNeeded(HInstruction* instruction) {
1881   if (instruction->IsPhi()) {
1882     HInstruction* input = instruction->InputAt(1);
1883     if (HVecOperation::ReturnsSIMDValue(input)) {
1884       DCHECK(!input->IsPhi());
1885       HVecOperation* input_vector = input->AsVecOperation();
1886       uint32_t vector_length = input_vector->GetVectorLength();
1887       DataType::Type type = input_vector->GetPackedType();
1888       HVecReduce::ReductionKind kind = GetReductionKind(input_vector);
1889       HBasicBlock* exit = instruction->GetBlock()->GetSuccessors()[0];
1890       // Generate a vector reduction and scalar extract
1891       //    x = REDUCE( [x_1, .., x_n] )
1892       //    y = x_1
1893       // along the exit of the defining loop.
1894       HInstruction* reduce = new (global_allocator_) HVecReduce(
1895           global_allocator_, instruction, type, vector_length, kind, kNoDexPc);
1896       exit->InsertInstructionBefore(reduce, exit->GetFirstInstruction());
1897       instruction = new (global_allocator_) HVecExtractScalar(
1898           global_allocator_, reduce, type, vector_length, 0, kNoDexPc);
1899       exit->InsertInstructionAfter(instruction, reduce);
1900     }
1901   }
1902   return instruction;
1903 }
1904 
1905 #define GENERATE_VEC(x, y) \
1906   if (vector_mode_ == kVector) { \
1907     vector = (x); \
1908   } else { \
1909     DCHECK(vector_mode_ == kSequential); \
1910     vector = (y); \
1911   } \
1912   break;
1913 
GenerateVecOp(HInstruction * org,HInstruction * opa,HInstruction * opb,DataType::Type type)1914 void HLoopOptimization::GenerateVecOp(HInstruction* org,
1915                                       HInstruction* opa,
1916                                       HInstruction* opb,
1917                                       DataType::Type type) {
1918   uint32_t dex_pc = org->GetDexPc();
1919   HInstruction* vector = nullptr;
1920   DataType::Type org_type = org->GetType();
1921   switch (org->GetKind()) {
1922     case HInstruction::kNeg:
1923       DCHECK(opb == nullptr);
1924       GENERATE_VEC(
1925         new (global_allocator_) HVecNeg(global_allocator_, opa, type, vector_length_, dex_pc),
1926         new (global_allocator_) HNeg(org_type, opa, dex_pc));
1927     case HInstruction::kNot:
1928       DCHECK(opb == nullptr);
1929       GENERATE_VEC(
1930         new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_, dex_pc),
1931         new (global_allocator_) HNot(org_type, opa, dex_pc));
1932     case HInstruction::kBooleanNot:
1933       DCHECK(opb == nullptr);
1934       GENERATE_VEC(
1935         new (global_allocator_) HVecNot(global_allocator_, opa, type, vector_length_, dex_pc),
1936         new (global_allocator_) HBooleanNot(opa, dex_pc));
1937     case HInstruction::kTypeConversion:
1938       DCHECK(opb == nullptr);
1939       GENERATE_VEC(
1940         new (global_allocator_) HVecCnv(global_allocator_, opa, type, vector_length_, dex_pc),
1941         new (global_allocator_) HTypeConversion(org_type, opa, dex_pc));
1942     case HInstruction::kAdd:
1943       GENERATE_VEC(
1944         new (global_allocator_) HVecAdd(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1945         new (global_allocator_) HAdd(org_type, opa, opb, dex_pc));
1946     case HInstruction::kSub:
1947       GENERATE_VEC(
1948         new (global_allocator_) HVecSub(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1949         new (global_allocator_) HSub(org_type, opa, opb, dex_pc));
1950     case HInstruction::kMul:
1951       GENERATE_VEC(
1952         new (global_allocator_) HVecMul(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1953         new (global_allocator_) HMul(org_type, opa, opb, dex_pc));
1954     case HInstruction::kDiv:
1955       GENERATE_VEC(
1956         new (global_allocator_) HVecDiv(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1957         new (global_allocator_) HDiv(org_type, opa, opb, dex_pc));
1958     case HInstruction::kAnd:
1959       GENERATE_VEC(
1960         new (global_allocator_) HVecAnd(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1961         new (global_allocator_) HAnd(org_type, opa, opb, dex_pc));
1962     case HInstruction::kOr:
1963       GENERATE_VEC(
1964         new (global_allocator_) HVecOr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1965         new (global_allocator_) HOr(org_type, opa, opb, dex_pc));
1966     case HInstruction::kXor:
1967       GENERATE_VEC(
1968         new (global_allocator_) HVecXor(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1969         new (global_allocator_) HXor(org_type, opa, opb, dex_pc));
1970     case HInstruction::kShl:
1971       GENERATE_VEC(
1972         new (global_allocator_) HVecShl(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1973         new (global_allocator_) HShl(org_type, opa, opb, dex_pc));
1974     case HInstruction::kShr:
1975       GENERATE_VEC(
1976         new (global_allocator_) HVecShr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1977         new (global_allocator_) HShr(org_type, opa, opb, dex_pc));
1978     case HInstruction::kUShr:
1979       GENERATE_VEC(
1980         new (global_allocator_) HVecUShr(global_allocator_, opa, opb, type, vector_length_, dex_pc),
1981         new (global_allocator_) HUShr(org_type, opa, opb, dex_pc));
1982     case HInstruction::kAbs:
1983       DCHECK(opb == nullptr);
1984       GENERATE_VEC(
1985         new (global_allocator_) HVecAbs(global_allocator_, opa, type, vector_length_, dex_pc),
1986         new (global_allocator_) HAbs(org_type, opa, dex_pc));
1987     default:
1988       break;
1989   }  // switch
1990   CHECK(vector != nullptr) << "Unsupported SIMD operator";
1991   vector_map_->Put(org, vector);
1992 }
1993 
1994 #undef GENERATE_VEC
1995 
1996 //
1997 // Vectorization idioms.
1998 //
1999 
2000 // Method recognizes the following idioms:
2001 //   rounding  halving add (a + b + 1) >> 1 for unsigned/signed operands a, b
2002 //   truncated halving add (a + b)     >> 1 for unsigned/signed operands a, b
2003 // Provided that the operands are promoted to a wider form to do the arithmetic and
2004 // then cast back to narrower form, the idioms can be mapped into efficient SIMD
2005 // implementation that operates directly in narrower form (plus one extra bit).
2006 // TODO: current version recognizes implicit byte/short/char widening only;
2007 //       explicit widening from int to long could be added later.
VectorizeHalvingAddIdiom(LoopNode * node,HInstruction * instruction,bool generate_code,DataType::Type type,uint64_t restrictions)2008 bool HLoopOptimization::VectorizeHalvingAddIdiom(LoopNode* node,
2009                                                  HInstruction* instruction,
2010                                                  bool generate_code,
2011                                                  DataType::Type type,
2012                                                  uint64_t restrictions) {
2013   // Test for top level arithmetic shift right x >> 1 or logical shift right x >>> 1
2014   // (note whether the sign bit in wider precision is shifted in has no effect
2015   // on the narrow precision computed by the idiom).
2016   if ((instruction->IsShr() ||
2017        instruction->IsUShr()) &&
2018       IsInt64Value(instruction->InputAt(1), 1)) {
2019     // Test for (a + b + c) >> 1 for optional constant c.
2020     HInstruction* a = nullptr;
2021     HInstruction* b = nullptr;
2022     int64_t       c = 0;
2023     if (IsAddConst2(graph_, instruction->InputAt(0), /*out*/ &a, /*out*/ &b, /*out*/ &c)) {
2024       // Accept c == 1 (rounded) or c == 0 (not rounded).
2025       bool is_rounded = false;
2026       if (c == 1) {
2027         is_rounded = true;
2028       } else if (c != 0) {
2029         return false;
2030       }
2031       // Accept consistent zero or sign extension on operands a and b.
2032       HInstruction* r = nullptr;
2033       HInstruction* s = nullptr;
2034       bool is_unsigned = false;
2035       if (!IsNarrowerOperands(a, b, type, &r, &s, &is_unsigned)) {
2036         return false;
2037       }
2038       // Deal with vector restrictions.
2039       if ((!is_unsigned && HasVectorRestrictions(restrictions, kNoSignedHAdd)) ||
2040           (!is_rounded && HasVectorRestrictions(restrictions, kNoUnroundedHAdd))) {
2041         return false;
2042       }
2043       // Accept recognized halving add for vectorizable operands. Vectorized code uses the
2044       // shorthand idiomatic operation. Sequential code uses the original scalar expressions.
2045       DCHECK(r != nullptr && s != nullptr);
2046       if (generate_code && vector_mode_ != kVector) {  // de-idiom
2047         r = instruction->InputAt(0);
2048         s = instruction->InputAt(1);
2049       }
2050       if (VectorizeUse(node, r, generate_code, type, restrictions) &&
2051           VectorizeUse(node, s, generate_code, type, restrictions)) {
2052         if (generate_code) {
2053           if (vector_mode_ == kVector) {
2054             vector_map_->Put(instruction, new (global_allocator_) HVecHalvingAdd(
2055                 global_allocator_,
2056                 vector_map_->Get(r),
2057                 vector_map_->Get(s),
2058                 HVecOperation::ToProperType(type, is_unsigned),
2059                 vector_length_,
2060                 is_rounded,
2061                 kNoDexPc));
2062             MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
2063           } else {
2064             GenerateVecOp(instruction, vector_map_->Get(r), vector_map_->Get(s), type);
2065           }
2066         }
2067         return true;
2068       }
2069     }
2070   }
2071   return false;
2072 }
2073 
2074 // Method recognizes the following idiom:
2075 //   q += ABS(a - b) for signed operands a, b
2076 // Provided that the operands have the same type or are promoted to a wider form.
2077 // Since this may involve a vector length change, the idiom is handled by going directly
2078 // to a sad-accumulate node (rather than relying combining finer grained nodes later).
2079 // TODO: unsigned SAD too?
VectorizeSADIdiom(LoopNode * node,HInstruction * instruction,bool generate_code,DataType::Type reduction_type,uint64_t restrictions)2080 bool HLoopOptimization::VectorizeSADIdiom(LoopNode* node,
2081                                           HInstruction* instruction,
2082                                           bool generate_code,
2083                                           DataType::Type reduction_type,
2084                                           uint64_t restrictions) {
2085   // Filter integral "q += ABS(a - b);" reduction, where ABS and SUB
2086   // are done in the same precision (either int or long).
2087   if (!instruction->IsAdd() ||
2088       (reduction_type != DataType::Type::kInt32 && reduction_type != DataType::Type::kInt64)) {
2089     return false;
2090   }
2091   HInstruction* q = instruction->InputAt(0);
2092   HInstruction* v = instruction->InputAt(1);
2093   HInstruction* a = nullptr;
2094   HInstruction* b = nullptr;
2095   if (v->IsAbs() &&
2096       v->GetType() == reduction_type &&
2097       IsSubConst2(graph_, v->InputAt(0), /*out*/ &a, /*out*/ &b)) {
2098     DCHECK(a != nullptr && b != nullptr);
2099   } else {
2100     return false;
2101   }
2102   // Accept same-type or consistent sign extension for narrower-type on operands a and b.
2103   // The same-type or narrower operands are called r (a or lower) and s (b or lower).
2104   // We inspect the operands carefully to pick the most suited type.
2105   HInstruction* r = a;
2106   HInstruction* s = b;
2107   bool is_unsigned = false;
2108   DataType::Type sub_type = GetNarrowerType(a, b);
2109   if (reduction_type != sub_type &&
2110       (!IsNarrowerOperands(a, b, sub_type, &r, &s, &is_unsigned) || is_unsigned)) {
2111     return false;
2112   }
2113   // Try same/narrower type and deal with vector restrictions.
2114   if (!TrySetVectorType(sub_type, &restrictions) ||
2115       HasVectorRestrictions(restrictions, kNoSAD) ||
2116       (reduction_type != sub_type && HasVectorRestrictions(restrictions, kNoWideSAD))) {
2117     return false;
2118   }
2119   // Accept SAD idiom for vectorizable operands. Vectorized code uses the shorthand
2120   // idiomatic operation. Sequential code uses the original scalar expressions.
2121   DCHECK(r != nullptr && s != nullptr);
2122   if (generate_code && vector_mode_ != kVector) {  // de-idiom
2123     r = s = v->InputAt(0);
2124   }
2125   if (VectorizeUse(node, q, generate_code, sub_type, restrictions) &&
2126       VectorizeUse(node, r, generate_code, sub_type, restrictions) &&
2127       VectorizeUse(node, s, generate_code, sub_type, restrictions)) {
2128     if (generate_code) {
2129       if (vector_mode_ == kVector) {
2130         vector_map_->Put(instruction, new (global_allocator_) HVecSADAccumulate(
2131             global_allocator_,
2132             vector_map_->Get(q),
2133             vector_map_->Get(r),
2134             vector_map_->Get(s),
2135             HVecOperation::ToProperType(reduction_type, is_unsigned),
2136             GetOtherVL(reduction_type, sub_type, vector_length_),
2137             kNoDexPc));
2138         MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
2139       } else {
2140         GenerateVecOp(v, vector_map_->Get(r), nullptr, reduction_type);
2141         GenerateVecOp(instruction, vector_map_->Get(q), vector_map_->Get(v), reduction_type);
2142       }
2143     }
2144     return true;
2145   }
2146   return false;
2147 }
2148 
2149 // Method recognises the following dot product idiom:
2150 //   q += a * b for operands a, b whose type is narrower than the reduction one.
2151 // Provided that the operands have the same type or are promoted to a wider form.
2152 // Since this may involve a vector length change, the idiom is handled by going directly
2153 // to a dot product node (rather than relying combining finer grained nodes later).
VectorizeDotProdIdiom(LoopNode * node,HInstruction * instruction,bool generate_code,DataType::Type reduction_type,uint64_t restrictions)2154 bool HLoopOptimization::VectorizeDotProdIdiom(LoopNode* node,
2155                                               HInstruction* instruction,
2156                                               bool generate_code,
2157                                               DataType::Type reduction_type,
2158                                               uint64_t restrictions) {
2159   if (!instruction->IsAdd() || (reduction_type != DataType::Type::kInt32)) {
2160     return false;
2161   }
2162 
2163   HInstruction* q = instruction->InputAt(0);
2164   HInstruction* v = instruction->InputAt(1);
2165   if (!v->IsMul() || v->GetType() != reduction_type) {
2166     return false;
2167   }
2168 
2169   HInstruction* a = v->InputAt(0);
2170   HInstruction* b = v->InputAt(1);
2171   HInstruction* r = a;
2172   HInstruction* s = b;
2173   DataType::Type op_type = GetNarrowerType(a, b);
2174   bool is_unsigned = false;
2175 
2176   if (!IsNarrowerOperands(a, b, op_type, &r, &s, &is_unsigned)) {
2177     return false;
2178   }
2179   op_type = HVecOperation::ToProperType(op_type, is_unsigned);
2180 
2181   if (!TrySetVectorType(op_type, &restrictions) ||
2182       HasVectorRestrictions(restrictions, kNoDotProd)) {
2183     return false;
2184   }
2185 
2186   DCHECK(r != nullptr && s != nullptr);
2187   // Accept dot product idiom for vectorizable operands. Vectorized code uses the shorthand
2188   // idiomatic operation. Sequential code uses the original scalar expressions.
2189   if (generate_code && vector_mode_ != kVector) {  // de-idiom
2190     r = a;
2191     s = b;
2192   }
2193   if (VectorizeUse(node, q, generate_code, op_type, restrictions) &&
2194       VectorizeUse(node, r, generate_code, op_type, restrictions) &&
2195       VectorizeUse(node, s, generate_code, op_type, restrictions)) {
2196     if (generate_code) {
2197       if (vector_mode_ == kVector) {
2198         vector_map_->Put(instruction, new (global_allocator_) HVecDotProd(
2199             global_allocator_,
2200             vector_map_->Get(q),
2201             vector_map_->Get(r),
2202             vector_map_->Get(s),
2203             reduction_type,
2204             is_unsigned,
2205             GetOtherVL(reduction_type, op_type, vector_length_),
2206             kNoDexPc));
2207         MaybeRecordStat(stats_, MethodCompilationStat::kLoopVectorizedIdiom);
2208       } else {
2209         GenerateVecOp(v, vector_map_->Get(r), vector_map_->Get(s), reduction_type);
2210         GenerateVecOp(instruction, vector_map_->Get(q), vector_map_->Get(v), reduction_type);
2211       }
2212     }
2213     return true;
2214   }
2215   return false;
2216 }
2217 
2218 //
2219 // Vectorization heuristics.
2220 //
2221 
ComputeAlignment(HInstruction * offset,DataType::Type type,bool is_string_char_at,uint32_t peeling)2222 Alignment HLoopOptimization::ComputeAlignment(HInstruction* offset,
2223                                               DataType::Type type,
2224                                               bool is_string_char_at,
2225                                               uint32_t peeling) {
2226   // Combine the alignment and hidden offset that is guaranteed by
2227   // the Android runtime with a known starting index adjusted as bytes.
2228   int64_t value = 0;
2229   if (IsInt64AndGet(offset, /*out*/ &value)) {
2230     uint32_t start_offset =
2231         HiddenOffset(type, is_string_char_at) + (value + peeling) * DataType::Size(type);
2232     return Alignment(BaseAlignment(), start_offset & (BaseAlignment() - 1u));
2233   }
2234   // Otherwise, the Android runtime guarantees at least natural alignment.
2235   return Alignment(DataType::Size(type), 0);
2236 }
2237 
SetAlignmentStrategy(uint32_t peeling_votes[],const ArrayReference * peeling_candidate)2238 void HLoopOptimization::SetAlignmentStrategy(uint32_t peeling_votes[],
2239                                              const ArrayReference* peeling_candidate) {
2240   // Current heuristic: pick the best static loop peeling factor, if any,
2241   // or otherwise use dynamic loop peeling on suggested peeling candidate.
2242   uint32_t max_vote = 0;
2243   for (int32_t i = 0; i < 16; i++) {
2244     if (peeling_votes[i] > max_vote) {
2245       max_vote = peeling_votes[i];
2246       vector_static_peeling_factor_ = i;
2247     }
2248   }
2249   if (max_vote == 0) {
2250     vector_dynamic_peeling_candidate_ = peeling_candidate;
2251   }
2252 }
2253 
MaxNumberPeeled()2254 uint32_t HLoopOptimization::MaxNumberPeeled() {
2255   if (vector_dynamic_peeling_candidate_ != nullptr) {
2256     return vector_length_ - 1u;  // worst-case
2257   }
2258   return vector_static_peeling_factor_;  // known exactly
2259 }
2260 
IsVectorizationProfitable(int64_t trip_count)2261 bool HLoopOptimization::IsVectorizationProfitable(int64_t trip_count) {
2262   // Current heuristic: non-empty body with sufficient number of iterations (if known).
2263   // TODO: refine by looking at e.g. operation count, alignment, etc.
2264   // TODO: trip count is really unsigned entity, provided the guarding test
2265   //       is satisfied; deal with this more carefully later
2266   uint32_t max_peel = MaxNumberPeeled();
2267   if (vector_length_ == 0) {
2268     return false;  // nothing found
2269   } else if (trip_count < 0) {
2270     return false;  // guard against non-taken/large
2271   } else if ((0 < trip_count) && (trip_count < (vector_length_ + max_peel))) {
2272     return false;  // insufficient iterations
2273   }
2274   return true;
2275 }
2276 
2277 //
2278 // Helpers.
2279 //
2280 
TrySetPhiInduction(HPhi * phi,bool restrict_uses)2281 bool HLoopOptimization::TrySetPhiInduction(HPhi* phi, bool restrict_uses) {
2282   // Start with empty phi induction.
2283   iset_->clear();
2284 
2285   // Special case Phis that have equivalent in a debuggable setup. Our graph checker isn't
2286   // smart enough to follow strongly connected components (and it's probably not worth
2287   // it to make it so). See b/33775412.
2288   if (graph_->IsDebuggable() && phi->HasEquivalentPhi()) {
2289     return false;
2290   }
2291 
2292   // Lookup phi induction cycle.
2293   ArenaSet<HInstruction*>* set = induction_range_.LookupCycle(phi);
2294   if (set != nullptr) {
2295     for (HInstruction* i : *set) {
2296       // Check that, other than instructions that are no longer in the graph (removed earlier)
2297       // each instruction is removable and, when restrict uses are requested, other than for phi,
2298       // all uses are contained within the cycle.
2299       if (!i->IsInBlock()) {
2300         continue;
2301       } else if (!i->IsRemovable()) {
2302         return false;
2303       } else if (i != phi && restrict_uses) {
2304         // Deal with regular uses.
2305         for (const HUseListNode<HInstruction*>& use : i->GetUses()) {
2306           if (set->find(use.GetUser()) == set->end()) {
2307             return false;
2308           }
2309         }
2310       }
2311       iset_->insert(i);  // copy
2312     }
2313     return true;
2314   }
2315   return false;
2316 }
2317 
TrySetPhiReduction(HPhi * phi)2318 bool HLoopOptimization::TrySetPhiReduction(HPhi* phi) {
2319   DCHECK(iset_->empty());
2320   // Only unclassified phi cycles are candidates for reductions.
2321   if (induction_range_.IsClassified(phi)) {
2322     return false;
2323   }
2324   // Accept operations like x = x + .., provided that the phi and the reduction are
2325   // used exactly once inside the loop, and by each other.
2326   HInputsRef inputs = phi->GetInputs();
2327   if (inputs.size() == 2) {
2328     HInstruction* reduction = inputs[1];
2329     if (HasReductionFormat(reduction, phi)) {
2330       HLoopInformation* loop_info = phi->GetBlock()->GetLoopInformation();
2331       uint32_t use_count = 0;
2332       bool single_use_inside_loop =
2333           // Reduction update only used by phi.
2334           reduction->GetUses().HasExactlyOneElement() &&
2335           !reduction->HasEnvironmentUses() &&
2336           // Reduction update is only use of phi inside the loop.
2337           IsOnlyUsedAfterLoop(loop_info, phi, /*collect_loop_uses*/ true, &use_count) &&
2338           iset_->size() == 1;
2339       iset_->clear();  // leave the way you found it
2340       if (single_use_inside_loop) {
2341         // Link reduction back, and start recording feed value.
2342         reductions_->Put(reduction, phi);
2343         reductions_->Put(phi, phi->InputAt(0));
2344         return true;
2345       }
2346     }
2347   }
2348   return false;
2349 }
2350 
TrySetSimpleLoopHeader(HBasicBlock * block,HPhi ** main_phi)2351 bool HLoopOptimization::TrySetSimpleLoopHeader(HBasicBlock* block, /*out*/ HPhi** main_phi) {
2352   // Start with empty phi induction and reductions.
2353   iset_->clear();
2354   reductions_->clear();
2355 
2356   // Scan the phis to find the following (the induction structure has already
2357   // been optimized, so we don't need to worry about trivial cases):
2358   // (1) optional reductions in loop,
2359   // (2) the main induction, used in loop control.
2360   HPhi* phi = nullptr;
2361   for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
2362     if (TrySetPhiReduction(it.Current()->AsPhi())) {
2363       continue;
2364     } else if (phi == nullptr) {
2365       // Found the first candidate for main induction.
2366       phi = it.Current()->AsPhi();
2367     } else {
2368       return false;
2369     }
2370   }
2371 
2372   // Then test for a typical loopheader:
2373   //   s:  SuspendCheck
2374   //   c:  Condition(phi, bound)
2375   //   i:  If(c)
2376   if (phi != nullptr && TrySetPhiInduction(phi, /*restrict_uses*/ false)) {
2377     HInstruction* s = block->GetFirstInstruction();
2378     if (s != nullptr && s->IsSuspendCheck()) {
2379       HInstruction* c = s->GetNext();
2380       if (c != nullptr &&
2381           c->IsCondition() &&
2382           c->GetUses().HasExactlyOneElement() &&  // only used for termination
2383           !c->HasEnvironmentUses()) {  // unlikely, but not impossible
2384         HInstruction* i = c->GetNext();
2385         if (i != nullptr && i->IsIf() && i->InputAt(0) == c) {
2386           iset_->insert(c);
2387           iset_->insert(s);
2388           *main_phi = phi;
2389           return true;
2390         }
2391       }
2392     }
2393   }
2394   return false;
2395 }
2396 
IsEmptyBody(HBasicBlock * block)2397 bool HLoopOptimization::IsEmptyBody(HBasicBlock* block) {
2398   if (!block->GetPhis().IsEmpty()) {
2399     return false;
2400   }
2401   for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
2402     HInstruction* instruction = it.Current();
2403     if (!instruction->IsGoto() && iset_->find(instruction) == iset_->end()) {
2404       return false;
2405     }
2406   }
2407   return true;
2408 }
2409 
IsUsedOutsideLoop(HLoopInformation * loop_info,HInstruction * instruction)2410 bool HLoopOptimization::IsUsedOutsideLoop(HLoopInformation* loop_info,
2411                                           HInstruction* instruction) {
2412   // Deal with regular uses.
2413   for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
2414     if (use.GetUser()->GetBlock()->GetLoopInformation() != loop_info) {
2415       return true;
2416     }
2417   }
2418   return false;
2419 }
2420 
IsOnlyUsedAfterLoop(HLoopInformation * loop_info,HInstruction * instruction,bool collect_loop_uses,uint32_t * use_count)2421 bool HLoopOptimization::IsOnlyUsedAfterLoop(HLoopInformation* loop_info,
2422                                             HInstruction* instruction,
2423                                             bool collect_loop_uses,
2424                                             /*out*/ uint32_t* use_count) {
2425   // Deal with regular uses.
2426   for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
2427     HInstruction* user = use.GetUser();
2428     if (iset_->find(user) == iset_->end()) {  // not excluded?
2429       HLoopInformation* other_loop_info = user->GetBlock()->GetLoopInformation();
2430       if (other_loop_info != nullptr && other_loop_info->IsIn(*loop_info)) {
2431         // If collect_loop_uses is set, simply keep adding those uses to the set.
2432         // Otherwise, reject uses inside the loop that were not already in the set.
2433         if (collect_loop_uses) {
2434           iset_->insert(user);
2435           continue;
2436         }
2437         return false;
2438       }
2439       ++*use_count;
2440     }
2441   }
2442   return true;
2443 }
2444 
TryReplaceWithLastValue(HLoopInformation * loop_info,HInstruction * instruction,HBasicBlock * block)2445 bool HLoopOptimization::TryReplaceWithLastValue(HLoopInformation* loop_info,
2446                                                 HInstruction* instruction,
2447                                                 HBasicBlock* block) {
2448   // Try to replace outside uses with the last value.
2449   if (induction_range_.CanGenerateLastValue(instruction)) {
2450     HInstruction* replacement = induction_range_.GenerateLastValue(instruction, graph_, block);
2451     // Deal with regular uses.
2452     const HUseList<HInstruction*>& uses = instruction->GetUses();
2453     for (auto it = uses.begin(), end = uses.end(); it != end;) {
2454       HInstruction* user = it->GetUser();
2455       size_t index = it->GetIndex();
2456       ++it;  // increment before replacing
2457       if (iset_->find(user) == iset_->end()) {  // not excluded?
2458         if (kIsDebugBuild) {
2459           // We have checked earlier in 'IsOnlyUsedAfterLoop' that the use is after the loop.
2460           HLoopInformation* other_loop_info = user->GetBlock()->GetLoopInformation();
2461           CHECK(other_loop_info == nullptr || !other_loop_info->IsIn(*loop_info));
2462         }
2463         user->ReplaceInput(replacement, index);
2464         induction_range_.Replace(user, instruction, replacement);  // update induction
2465       }
2466     }
2467     // Deal with environment uses.
2468     const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
2469     for (auto it = env_uses.begin(), end = env_uses.end(); it != end;) {
2470       HEnvironment* user = it->GetUser();
2471       size_t index = it->GetIndex();
2472       ++it;  // increment before replacing
2473       if (iset_->find(user->GetHolder()) == iset_->end()) {  // not excluded?
2474         // Only update environment uses after the loop.
2475         HLoopInformation* other_loop_info = user->GetHolder()->GetBlock()->GetLoopInformation();
2476         if (other_loop_info == nullptr || !other_loop_info->IsIn(*loop_info)) {
2477           user->RemoveAsUserOfInput(index);
2478           user->SetRawEnvAt(index, replacement);
2479           replacement->AddEnvUseAt(user, index);
2480         }
2481       }
2482     }
2483     return true;
2484   }
2485   return false;
2486 }
2487 
TryAssignLastValue(HLoopInformation * loop_info,HInstruction * instruction,HBasicBlock * block,bool collect_loop_uses)2488 bool HLoopOptimization::TryAssignLastValue(HLoopInformation* loop_info,
2489                                            HInstruction* instruction,
2490                                            HBasicBlock* block,
2491                                            bool collect_loop_uses) {
2492   // Assigning the last value is always successful if there are no uses.
2493   // Otherwise, it succeeds in a no early-exit loop by generating the
2494   // proper last value assignment.
2495   uint32_t use_count = 0;
2496   return IsOnlyUsedAfterLoop(loop_info, instruction, collect_loop_uses, &use_count) &&
2497       (use_count == 0 ||
2498        (!IsEarlyExit(loop_info) && TryReplaceWithLastValue(loop_info, instruction, block)));
2499 }
2500 
RemoveDeadInstructions(const HInstructionList & list)2501 void HLoopOptimization::RemoveDeadInstructions(const HInstructionList& list) {
2502   for (HBackwardInstructionIterator i(list); !i.Done(); i.Advance()) {
2503     HInstruction* instruction = i.Current();
2504     if (instruction->IsDeadAndRemovable()) {
2505       simplified_ = true;
2506       instruction->GetBlock()->RemoveInstructionOrPhi(instruction);
2507     }
2508   }
2509 }
2510 
CanRemoveCycle()2511 bool HLoopOptimization::CanRemoveCycle() {
2512   for (HInstruction* i : *iset_) {
2513     // We can never remove instructions that have environment
2514     // uses when we compile 'debuggable'.
2515     if (i->HasEnvironmentUses() && graph_->IsDebuggable()) {
2516       return false;
2517     }
2518     // A deoptimization should never have an environment input removed.
2519     for (const HUseListNode<HEnvironment*>& use : i->GetEnvUses()) {
2520       if (use.GetUser()->GetHolder()->IsDeoptimize()) {
2521         return false;
2522       }
2523     }
2524   }
2525   return true;
2526 }
2527 
2528 }  // namespace art
2529