1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "dex/quick/mir_to_lir-inl.h"
18
19 #include "base/logging.h"
20
21 namespace art {
22
23 #define DEBUG_OPT(X)
24
25 #define LOAD_STORE_CHECK_REG_DEP(mask, check) (mask.Intersects(*check->u.m.def_mask))
26
27 /* Check RAW, WAR, and RAW dependency on the register operands */
28 #define CHECK_REG_DEP(use, def, check) (def.Intersects(*check->u.m.use_mask)) || \
29 (use.Union(def).Intersects(*check->u.m.def_mask))
30
31 /* Load Store Elimination filter:
32 * - Wide Load/Store
33 * - Exclusive Load/Store
34 * - Quad operand Load/Store
35 * - List Load/Store
36 * - IT blocks
37 * - Branch
38 * - Dmb
39 */
40 #define LOAD_STORE_FILTER(flags) ((flags & (IS_QUAD_OP|IS_STORE)) == (IS_QUAD_OP|IS_STORE) || \
41 (flags & (IS_QUAD_OP|IS_LOAD)) == (IS_QUAD_OP|IS_LOAD) || \
42 (flags & REG_USE012) == REG_USE012 || \
43 (flags & REG_DEF01) == REG_DEF01 || \
44 (flags & REG_DEF_LIST0) || \
45 (flags & REG_DEF_LIST1) || \
46 (flags & REG_USE_LIST0) || \
47 (flags & REG_USE_LIST1) || \
48 (flags & REG_DEF_FPCS_LIST0) || \
49 (flags & REG_DEF_FPCS_LIST2) || \
50 (flags & REG_USE_FPCS_LIST0) || \
51 (flags & REG_USE_FPCS_LIST2) || \
52 (flags & IS_VOLATILE) || \
53 (flags & IS_BRANCH) || \
54 (flags & IS_IT))
55
56 /* Scheduler heuristics */
57 #define MAX_HOIST_DISTANCE 20
58 #define LDLD_DISTANCE 4
59 #define LD_LATENCY 2
60
IsDalvikRegisterClobbered(LIR * lir1,LIR * lir2)61 static bool IsDalvikRegisterClobbered(LIR* lir1, LIR* lir2) {
62 int reg1Lo = DECODE_ALIAS_INFO_REG(lir1->flags.alias_info);
63 int reg1Hi = reg1Lo + DECODE_ALIAS_INFO_WIDE(lir1->flags.alias_info);
64 int reg2Lo = DECODE_ALIAS_INFO_REG(lir2->flags.alias_info);
65 int reg2Hi = reg2Lo + DECODE_ALIAS_INFO_WIDE(lir2->flags.alias_info);
66
67 return (reg1Lo == reg2Lo) || (reg1Lo == reg2Hi) || (reg1Hi == reg2Lo);
68 }
69
70 /* Convert a more expensive instruction (ie load) into a move */
ConvertMemOpIntoMove(LIR * orig_lir,RegStorage dest,RegStorage src)71 void Mir2Lir::ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src) {
72 /* Insert a move to replace the load */
73 LIR* move_lir;
74 move_lir = OpRegCopyNoInsert(dest, src);
75 move_lir->dalvik_offset = orig_lir->dalvik_offset;
76 /*
77 * Insert the converted instruction after the original since the
78 * optimization is scannng in the top-down order and the new instruction
79 * will need to be re-checked (eg the new dest clobbers the src used in
80 * this_lir).
81 */
82 InsertLIRAfter(orig_lir, move_lir);
83 }
84
DumpDependentInsnPair(LIR * check_lir,LIR * this_lir,const char * type)85 void Mir2Lir::DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type) {
86 LOG(INFO) << type;
87 LOG(INFO) << "Check LIR:";
88 DumpLIRInsn(check_lir, 0);
89 LOG(INFO) << "This LIR:";
90 DumpLIRInsn(this_lir, 0);
91 }
92
EliminateLoad(LIR * lir,int reg_id)93 inline void Mir2Lir::EliminateLoad(LIR* lir, int reg_id) {
94 DCHECK(RegStorage::SameRegType(lir->operands[0], reg_id));
95 RegStorage dest_reg, src_reg;
96
97 /* Same Register - Nop */
98 if (lir->operands[0] == reg_id) {
99 NopLIR(lir);
100 return;
101 }
102
103 /* different Regsister - Move + Nop */
104 switch (reg_id & RegStorage::kShapeTypeMask) {
105 case RegStorage::k32BitSolo | RegStorage::kCoreRegister:
106 dest_reg = RegStorage::Solo32(lir->operands[0]);
107 src_reg = RegStorage::Solo32(reg_id);
108 break;
109 case RegStorage::k64BitSolo | RegStorage::kCoreRegister:
110 dest_reg = RegStorage::Solo64(lir->operands[0]);
111 src_reg = RegStorage::Solo64(reg_id);
112 break;
113 case RegStorage::k32BitSolo | RegStorage::kFloatingPoint:
114 dest_reg = RegStorage::FloatSolo32(lir->operands[0]);
115 src_reg = RegStorage::FloatSolo32(reg_id);
116 break;
117 case RegStorage::k64BitSolo | RegStorage::kFloatingPoint:
118 dest_reg = RegStorage::FloatSolo64(lir->operands[0]);
119 src_reg = RegStorage::FloatSolo64(reg_id);
120 break;
121 default:
122 LOG(INFO) << "Load Store: Unsuported register type!";
123 return;
124 }
125 ConvertMemOpIntoMove(lir, dest_reg, src_reg);
126 NopLIR(lir);
127 return;
128 }
129
130 /*
131 * Perform a pass of top-down walk, from the first to the last instruction in the
132 * superblock, to eliminate redundant loads and stores.
133 *
134 * An earlier load can eliminate a later load iff
135 * 1) They are must-aliases
136 * 2) The native register is not clobbered in between
137 * 3) The memory location is not written to in between
138 *
139 * An earlier store can eliminate a later load iff
140 * 1) They are must-aliases
141 * 2) The native register is not clobbered in between
142 * 3) The memory location is not written to in between
143 *
144 * An earlier store can eliminate a later store iff
145 * 1) They are must-aliases
146 * 2) The memory location is not written to in between
147 */
ApplyLoadStoreElimination(LIR * head_lir,LIR * tail_lir)148 void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
149 LIR* this_lir, *check_lir;
150 std::vector<int> alias_list;
151
152 if (head_lir == tail_lir) {
153 return;
154 }
155
156 for (this_lir = head_lir; this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) {
157 if (this_lir->flags.is_nop || IsPseudoLirOp(this_lir->opcode)) {
158 continue;
159 }
160
161 uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
162 /* Target LIR - skip if instr is:
163 * - NOP
164 * - Branch
165 * - Load and store
166 * - Wide load
167 * - Wide store
168 * - Exclusive load/store
169 */
170 if (LOAD_STORE_FILTER(target_flags) ||
171 ((target_flags & (IS_LOAD | IS_STORE)) == (IS_LOAD | IS_STORE)) ||
172 !(target_flags & (IS_LOAD | IS_STORE))) {
173 continue;
174 }
175 int native_reg_id = this_lir->operands[0];
176 int dest_reg_id = this_lir->operands[1];
177 bool is_this_lir_load = target_flags & IS_LOAD;
178 ResourceMask this_mem_mask = kEncodeMem.Intersection(this_lir->u.m.use_mask->Union(
179 *this_lir->u.m.def_mask));
180
181 /* Memory region */
182 if (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeDalvikReg)) &&
183 (!this_mem_mask.Intersects(kEncodeLiteral.Union(kEncodeHeapRef)))) {
184 continue;
185 }
186
187 /* Does not redefine the address */
188 if (this_lir->u.m.def_mask->Intersects(*this_lir->u.m.use_mask)) {
189 continue;
190 }
191
192 ResourceMask stop_def_reg_mask = this_lir->u.m.def_mask->Without(kEncodeMem);
193 ResourceMask stop_use_reg_mask = this_lir->u.m.use_mask->Without(kEncodeMem);
194
195 /* The ARM backend can load/store PC */
196 ResourceMask uses_pc = GetPCUseDefEncoding();
197 if (uses_pc.Intersects(this_lir->u.m.use_mask->Union(*this_lir->u.m.def_mask))) {
198 continue;
199 }
200
201 /* Initialize alias list */
202 alias_list.clear();
203 ResourceMask alias_reg_list_mask = kEncodeNone;
204 if (!this_mem_mask.Intersects(kEncodeMem) && !this_mem_mask.Intersects(kEncodeLiteral)) {
205 alias_list.push_back(dest_reg_id);
206 SetupRegMask(&alias_reg_list_mask, dest_reg_id);
207 }
208
209 /* Scan through the BB for posible elimination candidates */
210 for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
211 if (check_lir->flags.is_nop || IsPseudoLirOp(check_lir->opcode)) {
212 continue;
213 }
214
215 if (uses_pc.Intersects(check_lir->u.m.use_mask->Union(*check_lir->u.m.def_mask))) {
216 break;
217 }
218
219 ResourceMask check_mem_mask = kEncodeMem.Intersection(check_lir->u.m.use_mask->Union(
220 *check_lir->u.m.def_mask));
221 ResourceMask alias_mem_mask = this_mem_mask.Intersection(check_mem_mask);
222 uint64_t check_flags = GetTargetInstFlags(check_lir->opcode);
223 bool stop_here = false;
224 bool pass_over = false;
225
226 /* Check LIR - skip if instr is:
227 * - Wide Load
228 * - Wide Store
229 * - Branch
230 * - Dmb
231 * - Exclusive load/store
232 * - IT blocks
233 * - Quad loads
234 */
235 if (LOAD_STORE_FILTER(check_flags)) {
236 stop_here = true;
237 /* Possible alias or result of earlier pass */
238 } else if (check_flags & IS_MOVE) {
239 for (auto ® : alias_list) {
240 if (RegStorage::RegNum(check_lir->operands[1]) == RegStorage::RegNum(reg)) {
241 pass_over = true;
242 alias_list.push_back(check_lir->operands[0]);
243 SetupRegMask(&alias_reg_list_mask, check_lir->operands[0]);
244 }
245 }
246 /* Memory regions */
247 } else if (!alias_mem_mask.Equals(kEncodeNone)) {
248 DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
249 bool is_check_lir_load = check_flags & IS_LOAD;
250 bool reg_compatible = RegStorage::SameRegType(check_lir->operands[0], native_reg_id);
251
252 if (!alias_mem_mask.Intersects(kEncodeMem) && alias_mem_mask.Equals(kEncodeLiteral)) {
253 DCHECK(check_flags & IS_LOAD);
254 /* Same value && same register type */
255 if (reg_compatible && (this_lir->target == check_lir->target)) {
256 DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "LITERAL"));
257 EliminateLoad(check_lir, native_reg_id);
258 }
259 } else if (((alias_mem_mask.Equals(kEncodeDalvikReg)) || (alias_mem_mask.Equals(kEncodeHeapRef))) &&
260 alias_reg_list_mask.Intersects((check_lir->u.m.use_mask)->Without(kEncodeMem))) {
261 bool same_offset = (GetInstructionOffset(this_lir) == GetInstructionOffset(check_lir));
262 if (same_offset && !is_check_lir_load) {
263 if (check_lir->operands[0] != native_reg_id) {
264 DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "STORE STOP"));
265 stop_here = true;
266 break;
267 }
268 }
269
270 if (reg_compatible && same_offset &&
271 ((is_this_lir_load && is_check_lir_load) /* LDR - LDR */ ||
272 (!is_this_lir_load && is_check_lir_load) /* STR - LDR */ ||
273 (!is_this_lir_load && !is_check_lir_load) /* STR - STR */)) {
274 DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "LOAD STORE"));
275 EliminateLoad(check_lir, native_reg_id);
276 }
277 } else {
278 /* Unsupported memory region */
279 }
280 }
281
282 if (pass_over) {
283 continue;
284 }
285
286 if (stop_here == false) {
287 bool stop_alias = LOAD_STORE_CHECK_REG_DEP(alias_reg_list_mask, check_lir);
288 if (stop_alias) {
289 /* Scan through alias list and if alias remove from alias list. */
290 for (auto ® : alias_list) {
291 stop_alias = false;
292 ResourceMask alias_reg_mask = kEncodeNone;
293 SetupRegMask(&alias_reg_mask, reg);
294 stop_alias = LOAD_STORE_CHECK_REG_DEP(alias_reg_mask, check_lir);
295 if (stop_alias) {
296 ClearRegMask(&alias_reg_list_mask, reg);
297 alias_list.erase(std::remove(alias_list.begin(), alias_list.end(),
298 reg), alias_list.end());
299 }
300 }
301 }
302 ResourceMask stop_search_mask = stop_def_reg_mask.Union(stop_use_reg_mask);
303 stop_search_mask = stop_search_mask.Union(alias_reg_list_mask);
304 stop_here = LOAD_STORE_CHECK_REG_DEP(stop_search_mask, check_lir);
305 if (stop_here) {
306 break;
307 }
308 } else {
309 break;
310 }
311 }
312 }
313 }
314
315 /*
316 * Perform a pass of bottom-up walk, from the second instruction in the
317 * superblock, to try to hoist loads to earlier slots.
318 */
ApplyLoadHoisting(LIR * head_lir,LIR * tail_lir)319 void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) {
320 LIR* this_lir, *check_lir;
321 /*
322 * Store the list of independent instructions that can be hoisted past.
323 * Will decide the best place to insert later.
324 */
325 LIR* prev_inst_list[MAX_HOIST_DISTANCE];
326
327 /* Empty block */
328 if (head_lir == tail_lir) {
329 return;
330 }
331
332 /* Start from the second instruction */
333 for (this_lir = NEXT_LIR(head_lir); this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) {
334 if (IsPseudoLirOp(this_lir->opcode)) {
335 continue;
336 }
337
338 uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
339 /* Skip non-interesting instructions */
340 if (!(target_flags & IS_LOAD) ||
341 (this_lir->flags.is_nop == true) ||
342 ((target_flags & (REG_DEF0 | REG_DEF1)) == (REG_DEF0 | REG_DEF1)) ||
343 ((target_flags & (IS_STORE | IS_LOAD)) == (IS_STORE | IS_LOAD))) {
344 continue;
345 }
346
347 ResourceMask stop_use_all_mask = *this_lir->u.m.use_mask;
348
349 /*
350 * Branches for null/range checks are marked with the true resource
351 * bits, and loads to Dalvik registers, constant pools, and non-alias
352 * locations are safe to be hoisted. So only mark the heap references
353 * conservatively here.
354 *
355 * Note: on x86(-64) and Arm64 this will add kEncodeNone.
356 * TODO: Sanity check. LoadStoreElimination uses kBranchBit to fake a PC.
357 */
358 if (stop_use_all_mask.HasBit(ResourceMask::kHeapRef)) {
359 stop_use_all_mask.SetBits(GetPCUseDefEncoding());
360 }
361
362 /* Similar as above, but just check for pure register dependency */
363 ResourceMask stop_use_reg_mask = stop_use_all_mask.Without(kEncodeMem);
364 ResourceMask stop_def_reg_mask = this_lir->u.m.def_mask->Without(kEncodeMem);
365
366 int next_slot = 0;
367 bool stop_here = false;
368
369 /* Try to hoist the load to a good spot */
370 for (check_lir = PREV_LIR(this_lir); check_lir != head_lir; check_lir = PREV_LIR(check_lir)) {
371 /*
372 * Skip already dead instructions (whose dataflow information is
373 * outdated and misleading).
374 */
375 if (check_lir->flags.is_nop) {
376 continue;
377 }
378
379 ResourceMask check_mem_mask = check_lir->u.m.def_mask->Intersection(kEncodeMem);
380 ResourceMask alias_condition = stop_use_all_mask.Intersection(check_mem_mask);
381 stop_here = false;
382
383 /* Potential WAR alias seen - check the exact relation */
384 if (!check_mem_mask.Equals(kEncodeMem) && !alias_condition.Equals(kEncodeNone)) {
385 /* We can fully disambiguate Dalvik references */
386 if (alias_condition.Equals(kEncodeDalvikReg)) {
387 /* Must alias or partially overlap */
388 if ((check_lir->flags.alias_info == this_lir->flags.alias_info) ||
389 IsDalvikRegisterClobbered(this_lir, check_lir)) {
390 stop_here = true;
391 }
392 /* Conservatively treat all heap refs as may-alias */
393 } else {
394 DCHECK(alias_condition.Equals(kEncodeHeapRef));
395 stop_here = true;
396 }
397 /* Memory content may be updated. Stop looking now. */
398 if (stop_here) {
399 prev_inst_list[next_slot++] = check_lir;
400 break;
401 }
402 }
403
404 if (stop_here == false) {
405 stop_here = CHECK_REG_DEP(stop_use_reg_mask, stop_def_reg_mask,
406 check_lir);
407 }
408
409 /*
410 * Store the dependent or non-pseudo/indepedent instruction to the
411 * list.
412 */
413 if (stop_here || !IsPseudoLirOp(check_lir->opcode)) {
414 prev_inst_list[next_slot++] = check_lir;
415 if (next_slot == MAX_HOIST_DISTANCE) {
416 break;
417 }
418 }
419
420 /* Found a new place to put the load - move it here */
421 if (stop_here == true) {
422 DEBUG_OPT(DumpDependentInsnPair(check_lir, this_lir, "HOIST STOP"));
423 break;
424 }
425 }
426
427 /*
428 * Reached the top - use head_lir as the dependent marker as all labels
429 * are barriers.
430 */
431 if (stop_here == false && next_slot < MAX_HOIST_DISTANCE) {
432 prev_inst_list[next_slot++] = head_lir;
433 }
434
435 /*
436 * At least one independent instruction is found. Scan in the reversed
437 * direction to find a beneficial slot.
438 */
439 if (next_slot >= 2) {
440 int first_slot = next_slot - 2;
441 int slot;
442 LIR* dep_lir = prev_inst_list[next_slot-1];
443 /* If there is ld-ld dependency, wait LDLD_DISTANCE cycles */
444 if (!IsPseudoLirOp(dep_lir->opcode) &&
445 (GetTargetInstFlags(dep_lir->opcode) & IS_LOAD)) {
446 first_slot -= LDLD_DISTANCE;
447 }
448 /*
449 * Make sure we check slot >= 0 since first_slot may be negative
450 * when the loop is first entered.
451 */
452 for (slot = first_slot; slot >= 0; slot--) {
453 LIR* cur_lir = prev_inst_list[slot];
454 LIR* prev_lir = prev_inst_list[slot+1];
455
456 /* Check the highest instruction */
457 if (prev_lir->u.m.def_mask->Equals(kEncodeAll)) {
458 /*
459 * If the first instruction is a load, don't hoist anything
460 * above it since it is unlikely to be beneficial.
461 */
462 if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) {
463 continue;
464 }
465 /*
466 * If the remaining number of slots is less than LD_LATENCY,
467 * insert the hoisted load here.
468 */
469 if (slot < LD_LATENCY) {
470 break;
471 }
472 }
473
474 // Don't look across a barrier label
475 if ((prev_lir->opcode == kPseudoTargetLabel) ||
476 (prev_lir->opcode == kPseudoSafepointPC) ||
477 (prev_lir->opcode == kPseudoBarrier)) {
478 break;
479 }
480
481 /*
482 * Try to find two instructions with load/use dependency until
483 * the remaining instructions are less than LD_LATENCY.
484 */
485 bool prev_is_load = IsPseudoLirOp(prev_lir->opcode) ? false :
486 (GetTargetInstFlags(prev_lir->opcode) & IS_LOAD);
487 if ((prev_is_load && (cur_lir->u.m.use_mask->Intersects(*prev_lir->u.m.def_mask))) ||
488 (slot < LD_LATENCY)) {
489 break;
490 }
491 }
492
493 /* Found a slot to hoist to */
494 if (slot >= 0) {
495 LIR* cur_lir = prev_inst_list[slot];
496 LIR* prev_lir = PREV_LIR(this_lir);
497 UnlinkLIR(this_lir);
498 /*
499 * Insertion is guaranteed to succeed since check_lir
500 * is never the first LIR on the list
501 */
502 InsertLIRBefore(cur_lir, this_lir);
503 this_lir = prev_lir; // Continue the loop with the next LIR.
504 }
505 }
506 }
507 }
508
ApplyLocalOptimizations(LIR * head_lir,LIR * tail_lir)509 void Mir2Lir::ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir) {
510 if (!(cu_->disable_opt & (1 << kLoadStoreElimination))) {
511 ApplyLoadStoreElimination(head_lir, tail_lir);
512 }
513 if (!(cu_->disable_opt & (1 << kLoadHoisting))) {
514 ApplyLoadHoisting(head_lir, tail_lir);
515 }
516 }
517
518 } // namespace art
519