1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "instruction_simplifier_arm64.h"
18
19 #include "common_arm64.h"
20 #include "instruction_simplifier_shared.h"
21 #include "mirror/array-inl.h"
22
23 namespace art {
24 namespace arm64 {
25
26 using helpers::CanFitInShifterOperand;
27 using helpers::HasShifterOperand;
28 using helpers::ShifterOperandSupportsExtension;
29
TryExtractArrayAccessAddress(HInstruction * access,HInstruction * array,HInstruction * index,int access_size)30 void InstructionSimplifierArm64Visitor::TryExtractArrayAccessAddress(HInstruction* access,
31 HInstruction* array,
32 HInstruction* index,
33 int access_size) {
34 if (kEmitCompilerReadBarrier) {
35 // The read barrier instrumentation does not support the
36 // HArm64IntermediateAddress instruction yet.
37 //
38 // TODO: Handle this case properly in the ARM64 code generator and
39 // re-enable this optimization; otherwise, remove this TODO.
40 // b/26601270
41 return;
42 }
43 if (index->IsConstant() ||
44 (index->IsBoundsCheck() && index->AsBoundsCheck()->GetIndex()->IsConstant())) {
45 // When the index is a constant all the addressing can be fitted in the
46 // memory access instruction, so do not split the access.
47 return;
48 }
49 if (access->IsArraySet() &&
50 access->AsArraySet()->GetValue()->GetType() == Primitive::kPrimNot) {
51 // The access may require a runtime call or the original array pointer.
52 return;
53 }
54
55 // Proceed to extract the base address computation.
56 ArenaAllocator* arena = GetGraph()->GetArena();
57
58 HIntConstant* offset =
59 GetGraph()->GetIntConstant(mirror::Array::DataOffset(access_size).Uint32Value());
60 HArm64IntermediateAddress* address =
61 new (arena) HArm64IntermediateAddress(array, offset, kNoDexPc);
62 address->SetReferenceTypeInfo(array->GetReferenceTypeInfo());
63 access->GetBlock()->InsertInstructionBefore(address, access);
64 access->ReplaceInput(address, 0);
65 // Both instructions must depend on GC to prevent any instruction that can
66 // trigger GC to be inserted between the two.
67 access->AddSideEffects(SideEffects::DependsOnGC());
68 DCHECK(address->GetSideEffects().Includes(SideEffects::DependsOnGC()));
69 DCHECK(access->GetSideEffects().Includes(SideEffects::DependsOnGC()));
70 // TODO: Code generation for HArrayGet and HArraySet will check whether the input address
71 // is an HArm64IntermediateAddress and generate appropriate code.
72 // We would like to replace the `HArrayGet` and `HArraySet` with custom instructions (maybe
73 // `HArm64Load` and `HArm64Store`). We defer these changes because these new instructions would
74 // not bring any advantages yet.
75 // Also see the comments in
76 // `InstructionCodeGeneratorARM64::VisitArrayGet()` and
77 // `InstructionCodeGeneratorARM64::VisitArraySet()`.
78 RecordSimplification();
79 }
80
TryMergeIntoShifterOperand(HInstruction * use,HInstruction * bitfield_op,bool do_merge)81 bool InstructionSimplifierArm64Visitor::TryMergeIntoShifterOperand(HInstruction* use,
82 HInstruction* bitfield_op,
83 bool do_merge) {
84 DCHECK(HasShifterOperand(use));
85 DCHECK(use->IsBinaryOperation() || use->IsNeg());
86 DCHECK(CanFitInShifterOperand(bitfield_op));
87 DCHECK(!bitfield_op->HasEnvironmentUses());
88
89 Primitive::Type type = use->GetType();
90 if (type != Primitive::kPrimInt && type != Primitive::kPrimLong) {
91 return false;
92 }
93
94 HInstruction* left;
95 HInstruction* right;
96 if (use->IsBinaryOperation()) {
97 left = use->InputAt(0);
98 right = use->InputAt(1);
99 } else {
100 DCHECK(use->IsNeg());
101 right = use->AsNeg()->InputAt(0);
102 left = GetGraph()->GetConstant(right->GetType(), 0);
103 }
104 DCHECK(left == bitfield_op || right == bitfield_op);
105
106 if (left == right) {
107 // TODO: Handle special transformations in this situation?
108 // For example should we transform `(x << 1) + (x << 1)` into `(x << 2)`?
109 // Or should this be part of a separate transformation logic?
110 return false;
111 }
112
113 bool is_commutative = use->IsBinaryOperation() && use->AsBinaryOperation()->IsCommutative();
114 HInstruction* other_input;
115 if (bitfield_op == right) {
116 other_input = left;
117 } else {
118 if (is_commutative) {
119 other_input = right;
120 } else {
121 return false;
122 }
123 }
124
125 HArm64DataProcWithShifterOp::OpKind op_kind;
126 int shift_amount = 0;
127 HArm64DataProcWithShifterOp::GetOpInfoFromInstruction(bitfield_op, &op_kind, &shift_amount);
128
129 if (HArm64DataProcWithShifterOp::IsExtensionOp(op_kind) &&
130 !ShifterOperandSupportsExtension(use)) {
131 return false;
132 }
133
134 if (do_merge) {
135 HArm64DataProcWithShifterOp* alu_with_op =
136 new (GetGraph()->GetArena()) HArm64DataProcWithShifterOp(use,
137 other_input,
138 bitfield_op->InputAt(0),
139 op_kind,
140 shift_amount,
141 use->GetDexPc());
142 use->GetBlock()->ReplaceAndRemoveInstructionWith(use, alu_with_op);
143 if (bitfield_op->GetUses().empty()) {
144 bitfield_op->GetBlock()->RemoveInstruction(bitfield_op);
145 }
146 RecordSimplification();
147 }
148
149 return true;
150 }
151
152 // Merge a bitfield move instruction into its uses if it can be merged in all of them.
TryMergeIntoUsersShifterOperand(HInstruction * bitfield_op)153 bool InstructionSimplifierArm64Visitor::TryMergeIntoUsersShifterOperand(HInstruction* bitfield_op) {
154 DCHECK(CanFitInShifterOperand(bitfield_op));
155
156 if (bitfield_op->HasEnvironmentUses()) {
157 return false;
158 }
159
160 const HUseList<HInstruction*>& uses = bitfield_op->GetUses();
161
162 // Check whether we can merge the instruction in all its users' shifter operand.
163 for (const HUseListNode<HInstruction*>& use : uses) {
164 HInstruction* user = use.GetUser();
165 if (!HasShifterOperand(user)) {
166 return false;
167 }
168 if (!CanMergeIntoShifterOperand(user, bitfield_op)) {
169 return false;
170 }
171 }
172
173 // Merge the instruction into its uses.
174 for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) {
175 HInstruction* user = it->GetUser();
176 // Increment `it` now because `*it` will disappear thanks to MergeIntoShifterOperand().
177 ++it;
178 bool merged = MergeIntoShifterOperand(user, bitfield_op);
179 DCHECK(merged);
180 }
181
182 return true;
183 }
184
VisitAnd(HAnd * instruction)185 void InstructionSimplifierArm64Visitor::VisitAnd(HAnd* instruction) {
186 if (TryMergeNegatedInput(instruction)) {
187 RecordSimplification();
188 }
189 }
190
VisitArrayGet(HArrayGet * instruction)191 void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) {
192 TryExtractArrayAccessAddress(instruction,
193 instruction->GetArray(),
194 instruction->GetIndex(),
195 Primitive::ComponentSize(instruction->GetType()));
196 }
197
VisitArraySet(HArraySet * instruction)198 void InstructionSimplifierArm64Visitor::VisitArraySet(HArraySet* instruction) {
199 TryExtractArrayAccessAddress(instruction,
200 instruction->GetArray(),
201 instruction->GetIndex(),
202 Primitive::ComponentSize(instruction->GetComponentType()));
203 }
204
VisitMul(HMul * instruction)205 void InstructionSimplifierArm64Visitor::VisitMul(HMul* instruction) {
206 if (TryCombineMultiplyAccumulate(instruction, kArm64)) {
207 RecordSimplification();
208 }
209 }
210
VisitOr(HOr * instruction)211 void InstructionSimplifierArm64Visitor::VisitOr(HOr* instruction) {
212 if (TryMergeNegatedInput(instruction)) {
213 RecordSimplification();
214 }
215 }
216
VisitShl(HShl * instruction)217 void InstructionSimplifierArm64Visitor::VisitShl(HShl* instruction) {
218 if (instruction->InputAt(1)->IsConstant()) {
219 TryMergeIntoUsersShifterOperand(instruction);
220 }
221 }
222
VisitShr(HShr * instruction)223 void InstructionSimplifierArm64Visitor::VisitShr(HShr* instruction) {
224 if (instruction->InputAt(1)->IsConstant()) {
225 TryMergeIntoUsersShifterOperand(instruction);
226 }
227 }
228
VisitTypeConversion(HTypeConversion * instruction)229 void InstructionSimplifierArm64Visitor::VisitTypeConversion(HTypeConversion* instruction) {
230 Primitive::Type result_type = instruction->GetResultType();
231 Primitive::Type input_type = instruction->GetInputType();
232
233 if (input_type == result_type) {
234 // We let the arch-independent code handle this.
235 return;
236 }
237
238 if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
239 TryMergeIntoUsersShifterOperand(instruction);
240 }
241 }
242
VisitUShr(HUShr * instruction)243 void InstructionSimplifierArm64Visitor::VisitUShr(HUShr* instruction) {
244 if (instruction->InputAt(1)->IsConstant()) {
245 TryMergeIntoUsersShifterOperand(instruction);
246 }
247 }
248
VisitXor(HXor * instruction)249 void InstructionSimplifierArm64Visitor::VisitXor(HXor* instruction) {
250 if (TryMergeNegatedInput(instruction)) {
251 RecordSimplification();
252 }
253 }
254
255 } // namespace arm64
256 } // namespace art
257