1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // InstructionCombining - Combine instructions to form fewer, simple
11 // instructions. This pass does not modify the CFG. This pass is where
12 // algebraic simplification happens.
13 //
14 // This pass combines things like:
15 // %Y = add i32 %X, 1
16 // %Z = add i32 %Y, 1
17 // into:
18 // %Z = add i32 %X, 2
19 //
20 // This is a simple worklist driven algorithm.
21 //
22 // This pass guarantees that the following canonicalizations are performed on
23 // the program:
24 // 1. If a binary operator has a constant operand, it is moved to the RHS
25 // 2. Bitwise operators with constant operands are always grouped so that
26 // shifts are performed first, then or's, then and's, then xor's.
27 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28 // 4. All cmp instructions on boolean values are replaced with logical ops
29 // 5. add X, X is represented as (X*2) => (X << 1)
30 // 6. Multiplies with a power-of-two constant argument are transformed into
31 // shifts.
32 // ... etc.
33 //
34 //===----------------------------------------------------------------------===//
35
36 #define DEBUG_TYPE "instcombine"
37 #include "llvm/Transforms/Scalar.h"
38 #include "InstCombine.h"
39 #include "llvm/IntrinsicInst.h"
40 #include "llvm/Analysis/ConstantFolding.h"
41 #include "llvm/Analysis/InstructionSimplify.h"
42 #include "llvm/Analysis/MemoryBuiltins.h"
43 #include "llvm/Target/TargetData.h"
44 #include "llvm/Transforms/Utils/Local.h"
45 #include "llvm/Support/CFG.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Support/GetElementPtrTypeIterator.h"
48 #include "llvm/Support/PatternMatch.h"
49 #include "llvm/Support/ValueHandle.h"
50 #include "llvm/ADT/SmallPtrSet.h"
51 #include "llvm/ADT/Statistic.h"
52 #include "llvm/ADT/StringSwitch.h"
53 #include "llvm-c/Initialization.h"
54 #include <algorithm>
55 #include <climits>
56 using namespace llvm;
57 using namespace llvm::PatternMatch;
58
59 STATISTIC(NumCombined , "Number of insts combined");
60 STATISTIC(NumConstProp, "Number of constant folds");
61 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
62 STATISTIC(NumSunkInst , "Number of instructions sunk");
63 STATISTIC(NumExpand, "Number of expansions");
64 STATISTIC(NumFactor , "Number of factorizations");
65 STATISTIC(NumReassoc , "Number of reassociations");
66
67 // Initialization Routines
initializeInstCombine(PassRegistry & Registry)68 void llvm::initializeInstCombine(PassRegistry &Registry) {
69 initializeInstCombinerPass(Registry);
70 }
71
LLVMInitializeInstCombine(LLVMPassRegistryRef R)72 void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
73 initializeInstCombine(*unwrap(R));
74 }
75
76 char InstCombiner::ID = 0;
77 INITIALIZE_PASS(InstCombiner, "instcombine",
78 "Combine redundant instructions", false, false)
79
getAnalysisUsage(AnalysisUsage & AU) const80 void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
81 AU.setPreservesCFG();
82 }
83
84
85 /// ShouldChangeType - Return true if it is desirable to convert a computation
86 /// from 'From' to 'To'. We don't want to convert from a legal to an illegal
87 /// type for example, or from a smaller to a larger illegal type.
ShouldChangeType(Type * From,Type * To) const88 bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {
89 assert(From->isIntegerTy() && To->isIntegerTy());
90
91 // If we don't have TD, we don't know if the source/dest are legal.
92 if (!TD) return false;
93
94 unsigned FromWidth = From->getPrimitiveSizeInBits();
95 unsigned ToWidth = To->getPrimitiveSizeInBits();
96 bool FromLegal = TD->isLegalInteger(FromWidth);
97 bool ToLegal = TD->isLegalInteger(ToWidth);
98
99 // If this is a legal integer from type, and the result would be an illegal
100 // type, don't do the transformation.
101 if (FromLegal && !ToLegal)
102 return false;
103
104 // Otherwise, if both are illegal, do not increase the size of the result. We
105 // do allow things like i160 -> i64, but not i64 -> i160.
106 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
107 return false;
108
109 return true;
110 }
111
112 // Return true, if No Signed Wrap should be maintained for I.
113 // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
114 // where both B and C should be ConstantInts, results in a constant that does
115 // not overflow. This function only handles the Add and Sub opcodes. For
116 // all other opcodes, the function conservatively returns false.
MaintainNoSignedWrap(BinaryOperator & I,Value * B,Value * C)117 static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
118 OverflowingBinaryOperator *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
119 if (!OBO || !OBO->hasNoSignedWrap()) {
120 return false;
121 }
122
123 // We reason about Add and Sub Only.
124 Instruction::BinaryOps Opcode = I.getOpcode();
125 if (Opcode != Instruction::Add &&
126 Opcode != Instruction::Sub) {
127 return false;
128 }
129
130 ConstantInt *CB = dyn_cast<ConstantInt>(B);
131 ConstantInt *CC = dyn_cast<ConstantInt>(C);
132
133 if (!CB || !CC) {
134 return false;
135 }
136
137 const APInt &BVal = CB->getValue();
138 const APInt &CVal = CC->getValue();
139 bool Overflow = false;
140
141 if (Opcode == Instruction::Add) {
142 BVal.sadd_ov(CVal, Overflow);
143 } else {
144 BVal.ssub_ov(CVal, Overflow);
145 }
146
147 return !Overflow;
148 }
149
150 /// SimplifyAssociativeOrCommutative - This performs a few simplifications for
151 /// operators which are associative or commutative:
152 //
153 // Commutative operators:
154 //
155 // 1. Order operands such that they are listed from right (least complex) to
156 // left (most complex). This puts constants before unary operators before
157 // binary operators.
158 //
159 // Associative operators:
160 //
161 // 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
162 // 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
163 //
164 // Associative and commutative operators:
165 //
166 // 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
167 // 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
168 // 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
169 // if C1 and C2 are constants.
170 //
SimplifyAssociativeOrCommutative(BinaryOperator & I)171 bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
172 Instruction::BinaryOps Opcode = I.getOpcode();
173 bool Changed = false;
174
175 do {
176 // Order operands such that they are listed from right (least complex) to
177 // left (most complex). This puts constants before unary operators before
178 // binary operators.
179 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
180 getComplexity(I.getOperand(1)))
181 Changed = !I.swapOperands();
182
183 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
184 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
185
186 if (I.isAssociative()) {
187 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
188 if (Op0 && Op0->getOpcode() == Opcode) {
189 Value *A = Op0->getOperand(0);
190 Value *B = Op0->getOperand(1);
191 Value *C = I.getOperand(1);
192
193 // Does "B op C" simplify?
194 if (Value *V = SimplifyBinOp(Opcode, B, C, TD)) {
195 // It simplifies to V. Form "A op V".
196 I.setOperand(0, A);
197 I.setOperand(1, V);
198 // Conservatively clear the optional flags, since they may not be
199 // preserved by the reassociation.
200 if (MaintainNoSignedWrap(I, B, C) &&
201 (!Op0 || (isa<BinaryOperator>(Op0) && Op0->hasNoSignedWrap()))) {
202 // Note: this is only valid because SimplifyBinOp doesn't look at
203 // the operands to Op0.
204 I.clearSubclassOptionalData();
205 I.setHasNoSignedWrap(true);
206 } else {
207 I.clearSubclassOptionalData();
208 }
209
210 Changed = true;
211 ++NumReassoc;
212 continue;
213 }
214 }
215
216 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
217 if (Op1 && Op1->getOpcode() == Opcode) {
218 Value *A = I.getOperand(0);
219 Value *B = Op1->getOperand(0);
220 Value *C = Op1->getOperand(1);
221
222 // Does "A op B" simplify?
223 if (Value *V = SimplifyBinOp(Opcode, A, B, TD)) {
224 // It simplifies to V. Form "V op C".
225 I.setOperand(0, V);
226 I.setOperand(1, C);
227 // Conservatively clear the optional flags, since they may not be
228 // preserved by the reassociation.
229 I.clearSubclassOptionalData();
230 Changed = true;
231 ++NumReassoc;
232 continue;
233 }
234 }
235 }
236
237 if (I.isAssociative() && I.isCommutative()) {
238 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
239 if (Op0 && Op0->getOpcode() == Opcode) {
240 Value *A = Op0->getOperand(0);
241 Value *B = Op0->getOperand(1);
242 Value *C = I.getOperand(1);
243
244 // Does "C op A" simplify?
245 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
246 // It simplifies to V. Form "V op B".
247 I.setOperand(0, V);
248 I.setOperand(1, B);
249 // Conservatively clear the optional flags, since they may not be
250 // preserved by the reassociation.
251 I.clearSubclassOptionalData();
252 Changed = true;
253 ++NumReassoc;
254 continue;
255 }
256 }
257
258 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
259 if (Op1 && Op1->getOpcode() == Opcode) {
260 Value *A = I.getOperand(0);
261 Value *B = Op1->getOperand(0);
262 Value *C = Op1->getOperand(1);
263
264 // Does "C op A" simplify?
265 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
266 // It simplifies to V. Form "B op V".
267 I.setOperand(0, B);
268 I.setOperand(1, V);
269 // Conservatively clear the optional flags, since they may not be
270 // preserved by the reassociation.
271 I.clearSubclassOptionalData();
272 Changed = true;
273 ++NumReassoc;
274 continue;
275 }
276 }
277
278 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
279 // if C1 and C2 are constants.
280 if (Op0 && Op1 &&
281 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
282 isa<Constant>(Op0->getOperand(1)) &&
283 isa<Constant>(Op1->getOperand(1)) &&
284 Op0->hasOneUse() && Op1->hasOneUse()) {
285 Value *A = Op0->getOperand(0);
286 Constant *C1 = cast<Constant>(Op0->getOperand(1));
287 Value *B = Op1->getOperand(0);
288 Constant *C2 = cast<Constant>(Op1->getOperand(1));
289
290 Constant *Folded = ConstantExpr::get(Opcode, C1, C2);
291 BinaryOperator *New = BinaryOperator::Create(Opcode, A, B);
292 InsertNewInstWith(New, I);
293 New->takeName(Op1);
294 I.setOperand(0, New);
295 I.setOperand(1, Folded);
296 // Conservatively clear the optional flags, since they may not be
297 // preserved by the reassociation.
298 I.clearSubclassOptionalData();
299
300 Changed = true;
301 continue;
302 }
303 }
304
305 // No further simplifications.
306 return Changed;
307 } while (1);
308 }
309
310 /// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to
311 /// "(X LOp Y) ROp (X LOp Z)".
LeftDistributesOverRight(Instruction::BinaryOps LOp,Instruction::BinaryOps ROp)312 static bool LeftDistributesOverRight(Instruction::BinaryOps LOp,
313 Instruction::BinaryOps ROp) {
314 switch (LOp) {
315 default:
316 return false;
317
318 case Instruction::And:
319 // And distributes over Or and Xor.
320 switch (ROp) {
321 default:
322 return false;
323 case Instruction::Or:
324 case Instruction::Xor:
325 return true;
326 }
327
328 case Instruction::Mul:
329 // Multiplication distributes over addition and subtraction.
330 switch (ROp) {
331 default:
332 return false;
333 case Instruction::Add:
334 case Instruction::Sub:
335 return true;
336 }
337
338 case Instruction::Or:
339 // Or distributes over And.
340 switch (ROp) {
341 default:
342 return false;
343 case Instruction::And:
344 return true;
345 }
346 }
347 }
348
349 /// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to
350 /// "(X ROp Z) LOp (Y ROp Z)".
RightDistributesOverLeft(Instruction::BinaryOps LOp,Instruction::BinaryOps ROp)351 static bool RightDistributesOverLeft(Instruction::BinaryOps LOp,
352 Instruction::BinaryOps ROp) {
353 if (Instruction::isCommutative(ROp))
354 return LeftDistributesOverRight(ROp, LOp);
355 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
356 // but this requires knowing that the addition does not overflow and other
357 // such subtleties.
358 return false;
359 }
360
361 /// SimplifyUsingDistributiveLaws - This tries to simplify binary operations
362 /// which some other binary operation distributes over either by factorizing
363 /// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this
364 /// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is
365 /// a win). Returns the simplified value, or null if it didn't simplify.
SimplifyUsingDistributiveLaws(BinaryOperator & I)366 Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
367 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
368 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
369 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
370 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); // op
371
372 // Factorization.
373 if (Op0 && Op1 && Op0->getOpcode() == Op1->getOpcode()) {
374 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
375 // a common term.
376 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1);
377 Value *C = Op1->getOperand(0), *D = Op1->getOperand(1);
378 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
379
380 // Does "X op' Y" always equal "Y op' X"?
381 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
382
383 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
384 if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
385 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
386 // commutative case, "(A op' B) op (C op' A)"?
387 if (A == C || (InnerCommutative && A == D)) {
388 if (A != C)
389 std::swap(C, D);
390 // Consider forming "A op' (B op D)".
391 // If "B op D" simplifies then it can be formed with no cost.
392 Value *V = SimplifyBinOp(TopLevelOpcode, B, D, TD);
393 // If "B op D" doesn't simplify then only go on if both of the existing
394 // operations "A op' B" and "C op' D" will be zapped as no longer used.
395 if (!V && Op0->hasOneUse() && Op1->hasOneUse())
396 V = Builder->CreateBinOp(TopLevelOpcode, B, D, Op1->getName());
397 if (V) {
398 ++NumFactor;
399 V = Builder->CreateBinOp(InnerOpcode, A, V);
400 V->takeName(&I);
401 return V;
402 }
403 }
404
405 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
406 if (RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
407 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
408 // commutative case, "(A op' B) op (B op' D)"?
409 if (B == D || (InnerCommutative && B == C)) {
410 if (B != D)
411 std::swap(C, D);
412 // Consider forming "(A op C) op' B".
413 // If "A op C" simplifies then it can be formed with no cost.
414 Value *V = SimplifyBinOp(TopLevelOpcode, A, C, TD);
415 // If "A op C" doesn't simplify then only go on if both of the existing
416 // operations "A op' B" and "C op' D" will be zapped as no longer used.
417 if (!V && Op0->hasOneUse() && Op1->hasOneUse())
418 V = Builder->CreateBinOp(TopLevelOpcode, A, C, Op0->getName());
419 if (V) {
420 ++NumFactor;
421 V = Builder->CreateBinOp(InnerOpcode, V, B);
422 V->takeName(&I);
423 return V;
424 }
425 }
426 }
427
428 // Expansion.
429 if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
430 // The instruction has the form "(A op' B) op C". See if expanding it out
431 // to "(A op C) op' (B op C)" results in simplifications.
432 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
433 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
434
435 // Do "A op C" and "B op C" both simplify?
436 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, TD))
437 if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, TD)) {
438 // They do! Return "L op' R".
439 ++NumExpand;
440 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
441 if ((L == A && R == B) ||
442 (Instruction::isCommutative(InnerOpcode) && L == B && R == A))
443 return Op0;
444 // Otherwise return "L op' R" if it simplifies.
445 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
446 return V;
447 // Otherwise, create a new instruction.
448 C = Builder->CreateBinOp(InnerOpcode, L, R);
449 C->takeName(&I);
450 return C;
451 }
452 }
453
454 if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
455 // The instruction has the form "A op (B op' C)". See if expanding it out
456 // to "(A op B) op' (A op C)" results in simplifications.
457 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
458 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
459
460 // Do "A op B" and "A op C" both simplify?
461 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, TD))
462 if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, TD)) {
463 // They do! Return "L op' R".
464 ++NumExpand;
465 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
466 if ((L == B && R == C) ||
467 (Instruction::isCommutative(InnerOpcode) && L == C && R == B))
468 return Op1;
469 // Otherwise return "L op' R" if it simplifies.
470 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
471 return V;
472 // Otherwise, create a new instruction.
473 A = Builder->CreateBinOp(InnerOpcode, L, R);
474 A->takeName(&I);
475 return A;
476 }
477 }
478
479 return 0;
480 }
481
482 // dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
483 // if the LHS is a constant zero (which is the 'negate' form).
484 //
dyn_castNegVal(Value * V) const485 Value *InstCombiner::dyn_castNegVal(Value *V) const {
486 if (BinaryOperator::isNeg(V))
487 return BinaryOperator::getNegArgument(V);
488
489 // Constants can be considered to be negated values if they can be folded.
490 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
491 return ConstantExpr::getNeg(C);
492
493 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
494 if (C->getType()->getElementType()->isIntegerTy())
495 return ConstantExpr::getNeg(C);
496
497 return 0;
498 }
499
500 // dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
501 // instruction if the LHS is a constant negative zero (which is the 'negate'
502 // form).
503 //
dyn_castFNegVal(Value * V) const504 Value *InstCombiner::dyn_castFNegVal(Value *V) const {
505 if (BinaryOperator::isFNeg(V))
506 return BinaryOperator::getFNegArgument(V);
507
508 // Constants can be considered to be negated values if they can be folded.
509 if (ConstantFP *C = dyn_cast<ConstantFP>(V))
510 return ConstantExpr::getFNeg(C);
511
512 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
513 if (C->getType()->getElementType()->isFloatingPointTy())
514 return ConstantExpr::getFNeg(C);
515
516 return 0;
517 }
518
FoldOperationIntoSelectOperand(Instruction & I,Value * SO,InstCombiner * IC)519 static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
520 InstCombiner *IC) {
521 if (CastInst *CI = dyn_cast<CastInst>(&I)) {
522 return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
523 }
524
525 // Figure out if the constant is the left or the right argument.
526 bool ConstIsRHS = isa<Constant>(I.getOperand(1));
527 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
528
529 if (Constant *SOC = dyn_cast<Constant>(SO)) {
530 if (ConstIsRHS)
531 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
532 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
533 }
534
535 Value *Op0 = SO, *Op1 = ConstOperand;
536 if (!ConstIsRHS)
537 std::swap(Op0, Op1);
538
539 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
540 return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
541 SO->getName()+".op");
542 if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
543 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
544 SO->getName()+".cmp");
545 if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
546 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
547 SO->getName()+".cmp");
548 llvm_unreachable("Unknown binary instruction type!");
549 }
550
551 // FoldOpIntoSelect - Given an instruction with a select as one operand and a
552 // constant as the other operand, try to fold the binary operator into the
553 // select arguments. This also works for Cast instructions, which obviously do
554 // not have a second operand.
FoldOpIntoSelect(Instruction & Op,SelectInst * SI)555 Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
556 // Don't modify shared select instructions
557 if (!SI->hasOneUse()) return 0;
558 Value *TV = SI->getOperand(1);
559 Value *FV = SI->getOperand(2);
560
561 if (isa<Constant>(TV) || isa<Constant>(FV)) {
562 // Bool selects with constant operands can be folded to logical ops.
563 if (SI->getType()->isIntegerTy(1)) return 0;
564
565 // If it's a bitcast involving vectors, make sure it has the same number of
566 // elements on both sides.
567 if (BitCastInst *BC = dyn_cast<BitCastInst>(&Op)) {
568 VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
569 VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
570
571 // Verify that either both or neither are vectors.
572 if ((SrcTy == NULL) != (DestTy == NULL)) return 0;
573 // If vectors, verify that they have the same number of elements.
574 if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
575 return 0;
576 }
577
578 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
579 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
580
581 return SelectInst::Create(SI->getCondition(),
582 SelectTrueVal, SelectFalseVal);
583 }
584 return 0;
585 }
586
587
588 /// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which
589 /// has a PHI node as operand #0, see if we can fold the instruction into the
590 /// PHI (which is only possible if all operands to the PHI are constants).
591 ///
FoldOpIntoPhi(Instruction & I)592 Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
593 PHINode *PN = cast<PHINode>(I.getOperand(0));
594 unsigned NumPHIValues = PN->getNumIncomingValues();
595 if (NumPHIValues == 0)
596 return 0;
597
598 // We normally only transform phis with a single use. However, if a PHI has
599 // multiple uses and they are all the same operation, we can fold *all* of the
600 // uses into the PHI.
601 if (!PN->hasOneUse()) {
602 // Walk the use list for the instruction, comparing them to I.
603 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
604 UI != E; ++UI) {
605 Instruction *User = cast<Instruction>(*UI);
606 if (User != &I && !I.isIdenticalTo(User))
607 return 0;
608 }
609 // Otherwise, we can replace *all* users with the new PHI we form.
610 }
611
612 // Check to see if all of the operands of the PHI are simple constants
613 // (constantint/constantfp/undef). If there is one non-constant value,
614 // remember the BB it is in. If there is more than one or if *it* is a PHI,
615 // bail out. We don't do arbitrary constant expressions here because moving
616 // their computation can be expensive without a cost model.
617 BasicBlock *NonConstBB = 0;
618 for (unsigned i = 0; i != NumPHIValues; ++i) {
619 Value *InVal = PN->getIncomingValue(i);
620 if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal))
621 continue;
622
623 if (isa<PHINode>(InVal)) return 0; // Itself a phi.
624 if (NonConstBB) return 0; // More than one non-const value.
625
626 NonConstBB = PN->getIncomingBlock(i);
627
628 // If the InVal is an invoke at the end of the pred block, then we can't
629 // insert a computation after it without breaking the edge.
630 if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
631 if (II->getParent() == NonConstBB)
632 return 0;
633
634 // If the incoming non-constant value is in I's block, we will remove one
635 // instruction, but insert another equivalent one, leading to infinite
636 // instcombine.
637 if (NonConstBB == I.getParent())
638 return 0;
639 }
640
641 // If there is exactly one non-constant value, we can insert a copy of the
642 // operation in that block. However, if this is a critical edge, we would be
643 // inserting the computation one some other paths (e.g. inside a loop). Only
644 // do this if the pred block is unconditionally branching into the phi block.
645 if (NonConstBB != 0) {
646 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
647 if (!BI || !BI->isUnconditional()) return 0;
648 }
649
650 // Okay, we can do the transformation: create the new PHI node.
651 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
652 InsertNewInstBefore(NewPN, *PN);
653 NewPN->takeName(PN);
654
655 // If we are going to have to insert a new computation, do so right before the
656 // predecessors terminator.
657 if (NonConstBB)
658 Builder->SetInsertPoint(NonConstBB->getTerminator());
659
660 // Next, add all of the operands to the PHI.
661 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
662 // We only currently try to fold the condition of a select when it is a phi,
663 // not the true/false values.
664 Value *TrueV = SI->getTrueValue();
665 Value *FalseV = SI->getFalseValue();
666 BasicBlock *PhiTransBB = PN->getParent();
667 for (unsigned i = 0; i != NumPHIValues; ++i) {
668 BasicBlock *ThisBB = PN->getIncomingBlock(i);
669 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
670 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
671 Value *InV = 0;
672 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
673 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
674 else
675 InV = Builder->CreateSelect(PN->getIncomingValue(i),
676 TrueVInPred, FalseVInPred, "phitmp");
677 NewPN->addIncoming(InV, ThisBB);
678 }
679 } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) {
680 Constant *C = cast<Constant>(I.getOperand(1));
681 for (unsigned i = 0; i != NumPHIValues; ++i) {
682 Value *InV = 0;
683 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
684 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
685 else if (isa<ICmpInst>(CI))
686 InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i),
687 C, "phitmp");
688 else
689 InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i),
690 C, "phitmp");
691 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
692 }
693 } else if (I.getNumOperands() == 2) {
694 Constant *C = cast<Constant>(I.getOperand(1));
695 for (unsigned i = 0; i != NumPHIValues; ++i) {
696 Value *InV = 0;
697 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
698 InV = ConstantExpr::get(I.getOpcode(), InC, C);
699 else
700 InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
701 PN->getIncomingValue(i), C, "phitmp");
702 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
703 }
704 } else {
705 CastInst *CI = cast<CastInst>(&I);
706 Type *RetTy = CI->getType();
707 for (unsigned i = 0; i != NumPHIValues; ++i) {
708 Value *InV;
709 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
710 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
711 else
712 InV = Builder->CreateCast(CI->getOpcode(),
713 PN->getIncomingValue(i), I.getType(), "phitmp");
714 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
715 }
716 }
717
718 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
719 UI != E; ) {
720 Instruction *User = cast<Instruction>(*UI++);
721 if (User == &I) continue;
722 ReplaceInstUsesWith(*User, NewPN);
723 EraseInstFromFunction(*User);
724 }
725 return ReplaceInstUsesWith(I, NewPN);
726 }
727
728 /// FindElementAtOffset - Given a type and a constant offset, determine whether
729 /// or not there is a sequence of GEP indices into the type that will land us at
730 /// the specified offset. If so, fill them into NewIndices and return the
731 /// resultant element type, otherwise return null.
FindElementAtOffset(Type * Ty,int64_t Offset,SmallVectorImpl<Value * > & NewIndices)732 Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
733 SmallVectorImpl<Value*> &NewIndices) {
734 if (!TD) return 0;
735 if (!Ty->isSized()) return 0;
736
737 // Start with the index over the outer type. Note that the type size
738 // might be zero (even if the offset isn't zero) if the indexed type
739 // is something like [0 x {int, int}]
740 Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
741 int64_t FirstIdx = 0;
742 if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
743 FirstIdx = Offset/TySize;
744 Offset -= FirstIdx*TySize;
745
746 // Handle hosts where % returns negative instead of values [0..TySize).
747 if (Offset < 0) {
748 --FirstIdx;
749 Offset += TySize;
750 assert(Offset >= 0);
751 }
752 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
753 }
754
755 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
756
757 // Index into the types. If we fail, set OrigBase to null.
758 while (Offset) {
759 // Indexing into tail padding between struct/array elements.
760 if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
761 return 0;
762
763 if (StructType *STy = dyn_cast<StructType>(Ty)) {
764 const StructLayout *SL = TD->getStructLayout(STy);
765 assert(Offset < (int64_t)SL->getSizeInBytes() &&
766 "Offset must stay within the indexed type");
767
768 unsigned Elt = SL->getElementContainingOffset(Offset);
769 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
770 Elt));
771
772 Offset -= SL->getElementOffset(Elt);
773 Ty = STy->getElementType(Elt);
774 } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
775 uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
776 assert(EltSize && "Cannot index into a zero-sized array");
777 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
778 Offset %= EltSize;
779 Ty = AT->getElementType();
780 } else {
781 // Otherwise, we can't index into the middle of this atomic type, bail.
782 return 0;
783 }
784 }
785
786 return Ty;
787 }
788
shouldMergeGEPs(GEPOperator & GEP,GEPOperator & Src)789 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
790 // If this GEP has only 0 indices, it is the same pointer as
791 // Src. If Src is not a trivial GEP too, don't combine
792 // the indices.
793 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
794 !Src.hasOneUse())
795 return false;
796 return true;
797 }
798
visitGetElementPtrInst(GetElementPtrInst & GEP)799 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
800 SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
801
802 if (Value *V = SimplifyGEPInst(Ops, TD))
803 return ReplaceInstUsesWith(GEP, V);
804
805 Value *PtrOp = GEP.getOperand(0);
806
807 // Eliminate unneeded casts for indices, and replace indices which displace
808 // by multiples of a zero size type with zero.
809 if (TD) {
810 bool MadeChange = false;
811 Type *IntPtrTy = TD->getIntPtrType(GEP.getContext());
812
813 gep_type_iterator GTI = gep_type_begin(GEP);
814 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
815 I != E; ++I, ++GTI) {
816 // Skip indices into struct types.
817 SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
818 if (!SeqTy) continue;
819
820 // If the element type has zero size then any index over it is equivalent
821 // to an index of zero, so replace it with zero if it is not zero already.
822 if (SeqTy->getElementType()->isSized() &&
823 TD->getTypeAllocSize(SeqTy->getElementType()) == 0)
824 if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
825 *I = Constant::getNullValue(IntPtrTy);
826 MadeChange = true;
827 }
828
829 if ((*I)->getType() != IntPtrTy) {
830 // If we are using a wider index than needed for this platform, shrink
831 // it to what we need. If narrower, sign-extend it to what we need.
832 // This explicit cast can make subsequent optimizations more obvious.
833 *I = Builder->CreateIntCast(*I, IntPtrTy, true);
834 MadeChange = true;
835 }
836 }
837 if (MadeChange) return &GEP;
838 }
839
840 // Combine Indices - If the source pointer to this getelementptr instruction
841 // is a getelementptr instruction, combine the indices of the two
842 // getelementptr instructions into a single instruction.
843 //
844 if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
845 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
846 return 0;
847
848 // Note that if our source is a gep chain itself that we wait for that
849 // chain to be resolved before we perform this transformation. This
850 // avoids us creating a TON of code in some cases.
851 if (GEPOperator *SrcGEP =
852 dyn_cast<GEPOperator>(Src->getOperand(0)))
853 if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
854 return 0; // Wait until our source is folded to completion.
855
856 SmallVector<Value*, 8> Indices;
857
858 // Find out whether the last index in the source GEP is a sequential idx.
859 bool EndsWithSequential = false;
860 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
861 I != E; ++I)
862 EndsWithSequential = !(*I)->isStructTy();
863
864 // Can we combine the two pointer arithmetics offsets?
865 if (EndsWithSequential) {
866 // Replace: gep (gep %P, long B), long A, ...
867 // With: T = long A+B; gep %P, T, ...
868 //
869 Value *Sum;
870 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
871 Value *GO1 = GEP.getOperand(1);
872 if (SO1 == Constant::getNullValue(SO1->getType())) {
873 Sum = GO1;
874 } else if (GO1 == Constant::getNullValue(GO1->getType())) {
875 Sum = SO1;
876 } else {
877 // If they aren't the same type, then the input hasn't been processed
878 // by the loop above yet (which canonicalizes sequential index types to
879 // intptr_t). Just avoid transforming this until the input has been
880 // normalized.
881 if (SO1->getType() != GO1->getType())
882 return 0;
883 Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
884 }
885
886 // Update the GEP in place if possible.
887 if (Src->getNumOperands() == 2) {
888 GEP.setOperand(0, Src->getOperand(0));
889 GEP.setOperand(1, Sum);
890 return &GEP;
891 }
892 Indices.append(Src->op_begin()+1, Src->op_end()-1);
893 Indices.push_back(Sum);
894 Indices.append(GEP.op_begin()+2, GEP.op_end());
895 } else if (isa<Constant>(*GEP.idx_begin()) &&
896 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
897 Src->getNumOperands() != 1) {
898 // Otherwise we can do the fold if the first index of the GEP is a zero
899 Indices.append(Src->op_begin()+1, Src->op_end());
900 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
901 }
902
903 if (!Indices.empty())
904 return (GEP.isInBounds() && Src->isInBounds()) ?
905 GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices,
906 GEP.getName()) :
907 GetElementPtrInst::Create(Src->getOperand(0), Indices, GEP.getName());
908 }
909
910 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
911 Value *StrippedPtr = PtrOp->stripPointerCasts();
912 PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType());
913 if (StrippedPtr != PtrOp &&
914 StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
915
916 bool HasZeroPointerIndex = false;
917 if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
918 HasZeroPointerIndex = C->isZero();
919
920 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
921 // into : GEP [10 x i8]* X, i32 0, ...
922 //
923 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
924 // into : GEP i8* X, ...
925 //
926 // This occurs when the program declares an array extern like "int X[];"
927 if (HasZeroPointerIndex) {
928 PointerType *CPTy = cast<PointerType>(PtrOp->getType());
929 if (ArrayType *CATy =
930 dyn_cast<ArrayType>(CPTy->getElementType())) {
931 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
932 if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
933 // -> GEP i8* X, ...
934 SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end());
935 GetElementPtrInst *Res =
936 GetElementPtrInst::Create(StrippedPtr, Idx, GEP.getName());
937 Res->setIsInBounds(GEP.isInBounds());
938 return Res;
939 }
940
941 if (ArrayType *XATy =
942 dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
943 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
944 if (CATy->getElementType() == XATy->getElementType()) {
945 // -> GEP [10 x i8]* X, i32 0, ...
946 // At this point, we know that the cast source type is a pointer
947 // to an array of the same type as the destination pointer
948 // array. Because the array type is never stepped over (there
949 // is a leading zero) we can fold the cast into this GEP.
950 GEP.setOperand(0, StrippedPtr);
951 return &GEP;
952 }
953 }
954 }
955 } else if (GEP.getNumOperands() == 2) {
956 // Transform things like:
957 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
958 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
959 Type *SrcElTy = StrippedPtrTy->getElementType();
960 Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
961 if (TD && SrcElTy->isArrayTy() &&
962 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
963 TD->getTypeAllocSize(ResElTy)) {
964 Value *Idx[2];
965 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
966 Idx[1] = GEP.getOperand(1);
967 Value *NewGEP = GEP.isInBounds() ?
968 Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :
969 Builder->CreateGEP(StrippedPtr, Idx, GEP.getName());
970 // V and GEP are both pointer types --> BitCast
971 return new BitCastInst(NewGEP, GEP.getType());
972 }
973
974 // Transform things like:
975 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
976 // (where tmp = 8*tmp2) into:
977 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
978
979 if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) {
980 uint64_t ArrayEltSize =
981 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
982
983 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
984 // allow either a mul, shift, or constant here.
985 Value *NewIdx = 0;
986 ConstantInt *Scale = 0;
987 if (ArrayEltSize == 1) {
988 NewIdx = GEP.getOperand(1);
989 Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1);
990 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) {
991 NewIdx = ConstantInt::get(CI->getType(), 1);
992 Scale = CI;
993 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){
994 if (Inst->getOpcode() == Instruction::Shl &&
995 isa<ConstantInt>(Inst->getOperand(1))) {
996 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
997 uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
998 Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()),
999 1ULL << ShAmtVal);
1000 NewIdx = Inst->getOperand(0);
1001 } else if (Inst->getOpcode() == Instruction::Mul &&
1002 isa<ConstantInt>(Inst->getOperand(1))) {
1003 Scale = cast<ConstantInt>(Inst->getOperand(1));
1004 NewIdx = Inst->getOperand(0);
1005 }
1006 }
1007
1008 // If the index will be to exactly the right offset with the scale taken
1009 // out, perform the transformation. Note, we don't know whether Scale is
1010 // signed or not. We'll use unsigned version of division/modulo
1011 // operation after making sure Scale doesn't have the sign bit set.
1012 if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL &&
1013 Scale->getZExtValue() % ArrayEltSize == 0) {
1014 Scale = ConstantInt::get(Scale->getType(),
1015 Scale->getZExtValue() / ArrayEltSize);
1016 if (Scale->getZExtValue() != 1) {
1017 Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(),
1018 false /*ZExt*/);
1019 NewIdx = Builder->CreateMul(NewIdx, C, "idxscale");
1020 }
1021
1022 // Insert the new GEP instruction.
1023 Value *Idx[2];
1024 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
1025 Idx[1] = NewIdx;
1026 Value *NewGEP = GEP.isInBounds() ?
1027 Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()):
1028 Builder->CreateGEP(StrippedPtr, Idx, GEP.getName());
1029 // The NewGEP must be pointer typed, so must the old one -> BitCast
1030 return new BitCastInst(NewGEP, GEP.getType());
1031 }
1032 }
1033 }
1034 }
1035
1036 /// See if we can simplify:
1037 /// X = bitcast A* to B*
1038 /// Y = gep X, <...constant indices...>
1039 /// into a gep of the original struct. This is important for SROA and alias
1040 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
1041 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
1042 if (TD &&
1043 !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices() &&
1044 StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
1045
1046 // Determine how much the GEP moves the pointer. We are guaranteed to get
1047 // a constant back from EmitGEPOffset.
1048 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP));
1049 int64_t Offset = OffsetV->getSExtValue();
1050
1051 // If this GEP instruction doesn't move the pointer, just replace the GEP
1052 // with a bitcast of the real input to the dest type.
1053 if (Offset == 0) {
1054 // If the bitcast is of an allocation, and the allocation will be
1055 // converted to match the type of the cast, don't touch this.
1056 if (isa<AllocaInst>(BCI->getOperand(0)) ||
1057 isMalloc(BCI->getOperand(0))) {
1058 // See if the bitcast simplifies, if so, don't nuke this GEP yet.
1059 if (Instruction *I = visitBitCast(*BCI)) {
1060 if (I != BCI) {
1061 I->takeName(BCI);
1062 BCI->getParent()->getInstList().insert(BCI, I);
1063 ReplaceInstUsesWith(*BCI, I);
1064 }
1065 return &GEP;
1066 }
1067 }
1068 return new BitCastInst(BCI->getOperand(0), GEP.getType());
1069 }
1070
1071 // Otherwise, if the offset is non-zero, we need to find out if there is a
1072 // field at Offset in 'A's type. If so, we can pull the cast through the
1073 // GEP.
1074 SmallVector<Value*, 8> NewIndices;
1075 Type *InTy =
1076 cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
1077 if (FindElementAtOffset(InTy, Offset, NewIndices)) {
1078 Value *NGEP = GEP.isInBounds() ?
1079 Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices) :
1080 Builder->CreateGEP(BCI->getOperand(0), NewIndices);
1081
1082 if (NGEP->getType() == GEP.getType())
1083 return ReplaceInstUsesWith(GEP, NGEP);
1084 NGEP->takeName(&GEP);
1085 return new BitCastInst(NGEP, GEP.getType());
1086 }
1087 }
1088 }
1089
1090 return 0;
1091 }
1092
1093
1094
IsOnlyNullComparedAndFreed(Value * V,SmallVectorImpl<WeakVH> & Users,int Depth=0)1095 static bool IsOnlyNullComparedAndFreed(Value *V, SmallVectorImpl<WeakVH> &Users,
1096 int Depth = 0) {
1097 if (Depth == 8)
1098 return false;
1099
1100 for (Value::use_iterator UI = V->use_begin(), UE = V->use_end();
1101 UI != UE; ++UI) {
1102 User *U = *UI;
1103 if (isFreeCall(U)) {
1104 Users.push_back(U);
1105 continue;
1106 }
1107 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U)) {
1108 if (ICI->isEquality() && isa<ConstantPointerNull>(ICI->getOperand(1))) {
1109 Users.push_back(ICI);
1110 continue;
1111 }
1112 }
1113 if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
1114 if (IsOnlyNullComparedAndFreed(BCI, Users, Depth+1)) {
1115 Users.push_back(BCI);
1116 continue;
1117 }
1118 }
1119 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
1120 if (IsOnlyNullComparedAndFreed(GEPI, Users, Depth+1)) {
1121 Users.push_back(GEPI);
1122 continue;
1123 }
1124 }
1125 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
1126 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
1127 II->getIntrinsicID() == Intrinsic::lifetime_end) {
1128 Users.push_back(II);
1129 continue;
1130 }
1131 }
1132 return false;
1133 }
1134 return true;
1135 }
1136
visitMalloc(Instruction & MI)1137 Instruction *InstCombiner::visitMalloc(Instruction &MI) {
1138 // If we have a malloc call which is only used in any amount of comparisons
1139 // to null and free calls, delete the calls and replace the comparisons with
1140 // true or false as appropriate.
1141 SmallVector<WeakVH, 64> Users;
1142 if (IsOnlyNullComparedAndFreed(&MI, Users)) {
1143 for (unsigned i = 0, e = Users.size(); i != e; ++i) {
1144 Instruction *I = cast_or_null<Instruction>(&*Users[i]);
1145 if (!I) continue;
1146
1147 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
1148 ReplaceInstUsesWith(*C,
1149 ConstantInt::get(Type::getInt1Ty(C->getContext()),
1150 C->isFalseWhenEqual()));
1151 } else if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
1152 ReplaceInstUsesWith(*I, UndefValue::get(I->getType()));
1153 }
1154 EraseInstFromFunction(*I);
1155 }
1156 return EraseInstFromFunction(MI);
1157 }
1158 return 0;
1159 }
1160
1161
1162
visitFree(CallInst & FI)1163 Instruction *InstCombiner::visitFree(CallInst &FI) {
1164 Value *Op = FI.getArgOperand(0);
1165
1166 // free undef -> unreachable.
1167 if (isa<UndefValue>(Op)) {
1168 // Insert a new store to null because we cannot modify the CFG here.
1169 Builder->CreateStore(ConstantInt::getTrue(FI.getContext()),
1170 UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
1171 return EraseInstFromFunction(FI);
1172 }
1173
1174 // If we have 'free null' delete the instruction. This can happen in stl code
1175 // when lots of inlining happens.
1176 if (isa<ConstantPointerNull>(Op))
1177 return EraseInstFromFunction(FI);
1178
1179 return 0;
1180 }
1181
1182
1183
visitBranchInst(BranchInst & BI)1184 Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
1185 // Change br (not X), label True, label False to: br X, label False, True
1186 Value *X = 0;
1187 BasicBlock *TrueDest;
1188 BasicBlock *FalseDest;
1189 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
1190 !isa<Constant>(X)) {
1191 // Swap Destinations and condition...
1192 BI.setCondition(X);
1193 BI.swapSuccessors();
1194 return &BI;
1195 }
1196
1197 // Cannonicalize fcmp_one -> fcmp_oeq
1198 FCmpInst::Predicate FPred; Value *Y;
1199 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
1200 TrueDest, FalseDest)) &&
1201 BI.getCondition()->hasOneUse())
1202 if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
1203 FPred == FCmpInst::FCMP_OGE) {
1204 FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
1205 Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
1206
1207 // Swap Destinations and condition.
1208 BI.swapSuccessors();
1209 Worklist.Add(Cond);
1210 return &BI;
1211 }
1212
1213 // Cannonicalize icmp_ne -> icmp_eq
1214 ICmpInst::Predicate IPred;
1215 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
1216 TrueDest, FalseDest)) &&
1217 BI.getCondition()->hasOneUse())
1218 if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE ||
1219 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
1220 IPred == ICmpInst::ICMP_SGE) {
1221 ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
1222 Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
1223 // Swap Destinations and condition.
1224 BI.swapSuccessors();
1225 Worklist.Add(Cond);
1226 return &BI;
1227 }
1228
1229 return 0;
1230 }
1231
visitSwitchInst(SwitchInst & SI)1232 Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
1233 Value *Cond = SI.getCondition();
1234 if (Instruction *I = dyn_cast<Instruction>(Cond)) {
1235 if (I->getOpcode() == Instruction::Add)
1236 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
1237 // change 'switch (X+4) case 1:' into 'switch (X) case -3'
1238 unsigned NumCases = SI.getNumCases();
1239 // Skip the first item since that's the default case.
1240 for (unsigned i = 1; i < NumCases; ++i) {
1241 ConstantInt* CaseVal = SI.getCaseValue(i);
1242 Constant* NewCaseVal = ConstantExpr::getSub(cast<Constant>(CaseVal),
1243 AddRHS);
1244 assert(isa<ConstantInt>(NewCaseVal) &&
1245 "Result of expression should be constant");
1246 SI.setSuccessorValue(i, cast<ConstantInt>(NewCaseVal));
1247 }
1248 SI.setCondition(I->getOperand(0));
1249 Worklist.Add(I);
1250 return &SI;
1251 }
1252 }
1253 return 0;
1254 }
1255
visitExtractValueInst(ExtractValueInst & EV)1256 Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
1257 Value *Agg = EV.getAggregateOperand();
1258
1259 if (!EV.hasIndices())
1260 return ReplaceInstUsesWith(EV, Agg);
1261
1262 if (Constant *C = dyn_cast<Constant>(Agg)) {
1263 if (isa<UndefValue>(C))
1264 return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType()));
1265
1266 if (isa<ConstantAggregateZero>(C))
1267 return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType()));
1268
1269 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
1270 // Extract the element indexed by the first index out of the constant
1271 Value *V = C->getOperand(*EV.idx_begin());
1272 if (EV.getNumIndices() > 1)
1273 // Extract the remaining indices out of the constant indexed by the
1274 // first index
1275 return ExtractValueInst::Create(V, EV.getIndices().slice(1));
1276 else
1277 return ReplaceInstUsesWith(EV, V);
1278 }
1279 return 0; // Can't handle other constants
1280 }
1281 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
1282 // We're extracting from an insertvalue instruction, compare the indices
1283 const unsigned *exti, *exte, *insi, *inse;
1284 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
1285 exte = EV.idx_end(), inse = IV->idx_end();
1286 exti != exte && insi != inse;
1287 ++exti, ++insi) {
1288 if (*insi != *exti)
1289 // The insert and extract both reference distinctly different elements.
1290 // This means the extract is not influenced by the insert, and we can
1291 // replace the aggregate operand of the extract with the aggregate
1292 // operand of the insert. i.e., replace
1293 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
1294 // %E = extractvalue { i32, { i32 } } %I, 0
1295 // with
1296 // %E = extractvalue { i32, { i32 } } %A, 0
1297 return ExtractValueInst::Create(IV->getAggregateOperand(),
1298 EV.getIndices());
1299 }
1300 if (exti == exte && insi == inse)
1301 // Both iterators are at the end: Index lists are identical. Replace
1302 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
1303 // %C = extractvalue { i32, { i32 } } %B, 1, 0
1304 // with "i32 42"
1305 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
1306 if (exti == exte) {
1307 // The extract list is a prefix of the insert list. i.e. replace
1308 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
1309 // %E = extractvalue { i32, { i32 } } %I, 1
1310 // with
1311 // %X = extractvalue { i32, { i32 } } %A, 1
1312 // %E = insertvalue { i32 } %X, i32 42, 0
1313 // by switching the order of the insert and extract (though the
1314 // insertvalue should be left in, since it may have other uses).
1315 Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
1316 EV.getIndices());
1317 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
1318 makeArrayRef(insi, inse));
1319 }
1320 if (insi == inse)
1321 // The insert list is a prefix of the extract list
1322 // We can simply remove the common indices from the extract and make it
1323 // operate on the inserted value instead of the insertvalue result.
1324 // i.e., replace
1325 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
1326 // %E = extractvalue { i32, { i32 } } %I, 1, 0
1327 // with
1328 // %E extractvalue { i32 } { i32 42 }, 0
1329 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
1330 makeArrayRef(exti, exte));
1331 }
1332 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
1333 // We're extracting from an intrinsic, see if we're the only user, which
1334 // allows us to simplify multiple result intrinsics to simpler things that
1335 // just get one value.
1336 if (II->hasOneUse()) {
1337 // Check if we're grabbing the overflow bit or the result of a 'with
1338 // overflow' intrinsic. If it's the latter we can remove the intrinsic
1339 // and replace it with a traditional binary instruction.
1340 switch (II->getIntrinsicID()) {
1341 case Intrinsic::uadd_with_overflow:
1342 case Intrinsic::sadd_with_overflow:
1343 if (*EV.idx_begin() == 0) { // Normal result.
1344 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
1345 ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
1346 EraseInstFromFunction(*II);
1347 return BinaryOperator::CreateAdd(LHS, RHS);
1348 }
1349
1350 // If the normal result of the add is dead, and the RHS is a constant,
1351 // we can transform this into a range comparison.
1352 // overflow = uadd a, -4 --> overflow = icmp ugt a, 3
1353 if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow)
1354 if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1)))
1355 return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0),
1356 ConstantExpr::getNot(CI));
1357 break;
1358 case Intrinsic::usub_with_overflow:
1359 case Intrinsic::ssub_with_overflow:
1360 if (*EV.idx_begin() == 0) { // Normal result.
1361 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
1362 ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
1363 EraseInstFromFunction(*II);
1364 return BinaryOperator::CreateSub(LHS, RHS);
1365 }
1366 break;
1367 case Intrinsic::umul_with_overflow:
1368 case Intrinsic::smul_with_overflow:
1369 if (*EV.idx_begin() == 0) { // Normal result.
1370 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
1371 ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
1372 EraseInstFromFunction(*II);
1373 return BinaryOperator::CreateMul(LHS, RHS);
1374 }
1375 break;
1376 default:
1377 break;
1378 }
1379 }
1380 }
1381 if (LoadInst *L = dyn_cast<LoadInst>(Agg))
1382 // If the (non-volatile) load only has one use, we can rewrite this to a
1383 // load from a GEP. This reduces the size of the load.
1384 // FIXME: If a load is used only by extractvalue instructions then this
1385 // could be done regardless of having multiple uses.
1386 if (L->isSimple() && L->hasOneUse()) {
1387 // extractvalue has integer indices, getelementptr has Value*s. Convert.
1388 SmallVector<Value*, 4> Indices;
1389 // Prefix an i32 0 since we need the first element.
1390 Indices.push_back(Builder->getInt32(0));
1391 for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end();
1392 I != E; ++I)
1393 Indices.push_back(Builder->getInt32(*I));
1394
1395 // We need to insert these at the location of the old load, not at that of
1396 // the extractvalue.
1397 Builder->SetInsertPoint(L->getParent(), L);
1398 Value *GEP = Builder->CreateInBoundsGEP(L->getPointerOperand(), Indices);
1399 // Returning the load directly will cause the main loop to insert it in
1400 // the wrong spot, so use ReplaceInstUsesWith().
1401 return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP));
1402 }
1403 // We could simplify extracts from other values. Note that nested extracts may
1404 // already be simplified implicitly by the above: extract (extract (insert) )
1405 // will be translated into extract ( insert ( extract ) ) first and then just
1406 // the value inserted, if appropriate. Similarly for extracts from single-use
1407 // loads: extract (extract (load)) will be translated to extract (load (gep))
1408 // and if again single-use then via load (gep (gep)) to load (gep).
1409 // However, double extracts from e.g. function arguments or return values
1410 // aren't handled yet.
1411 return 0;
1412 }
1413
1414 enum Personality_Type {
1415 Unknown_Personality,
1416 GNU_Ada_Personality,
1417 GNU_CXX_Personality
1418 };
1419
1420 /// RecognizePersonality - See if the given exception handling personality
1421 /// function is one that we understand. If so, return a description of it;
1422 /// otherwise return Unknown_Personality.
RecognizePersonality(Value * Pers)1423 static Personality_Type RecognizePersonality(Value *Pers) {
1424 Function *F = dyn_cast<Function>(Pers->stripPointerCasts());
1425 if (!F)
1426 return Unknown_Personality;
1427 return StringSwitch<Personality_Type>(F->getName())
1428 .Case("__gnat_eh_personality", GNU_Ada_Personality)
1429 .Case("__gxx_personality_v0", GNU_CXX_Personality)
1430 .Default(Unknown_Personality);
1431 }
1432
1433 /// isCatchAll - Return 'true' if the given typeinfo will match anything.
isCatchAll(Personality_Type Personality,Constant * TypeInfo)1434 static bool isCatchAll(Personality_Type Personality, Constant *TypeInfo) {
1435 switch (Personality) {
1436 case Unknown_Personality:
1437 return false;
1438 case GNU_Ada_Personality:
1439 // While __gnat_all_others_value will match any Ada exception, it doesn't
1440 // match foreign exceptions (or didn't, before gcc-4.7).
1441 return false;
1442 case GNU_CXX_Personality:
1443 return TypeInfo->isNullValue();
1444 }
1445 llvm_unreachable("Unknown personality!");
1446 }
1447
shorter_filter(const Value * LHS,const Value * RHS)1448 static bool shorter_filter(const Value *LHS, const Value *RHS) {
1449 return
1450 cast<ArrayType>(LHS->getType())->getNumElements()
1451 <
1452 cast<ArrayType>(RHS->getType())->getNumElements();
1453 }
1454
visitLandingPadInst(LandingPadInst & LI)1455 Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) {
1456 // The logic here should be correct for any real-world personality function.
1457 // However if that turns out not to be true, the offending logic can always
1458 // be conditioned on the personality function, like the catch-all logic is.
1459 Personality_Type Personality = RecognizePersonality(LI.getPersonalityFn());
1460
1461 // Simplify the list of clauses, eg by removing repeated catch clauses
1462 // (these are often created by inlining).
1463 bool MakeNewInstruction = false; // If true, recreate using the following:
1464 SmallVector<Value *, 16> NewClauses; // - Clauses for the new instruction;
1465 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup.
1466
1467 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already.
1468 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) {
1469 bool isLastClause = i + 1 == e;
1470 if (LI.isCatch(i)) {
1471 // A catch clause.
1472 Value *CatchClause = LI.getClause(i);
1473 Constant *TypeInfo = cast<Constant>(CatchClause->stripPointerCasts());
1474
1475 // If we already saw this clause, there is no point in having a second
1476 // copy of it.
1477 if (AlreadyCaught.insert(TypeInfo)) {
1478 // This catch clause was not already seen.
1479 NewClauses.push_back(CatchClause);
1480 } else {
1481 // Repeated catch clause - drop the redundant copy.
1482 MakeNewInstruction = true;
1483 }
1484
1485 // If this is a catch-all then there is no point in keeping any following
1486 // clauses or marking the landingpad as having a cleanup.
1487 if (isCatchAll(Personality, TypeInfo)) {
1488 if (!isLastClause)
1489 MakeNewInstruction = true;
1490 CleanupFlag = false;
1491 break;
1492 }
1493 } else {
1494 // A filter clause. If any of the filter elements were already caught
1495 // then they can be dropped from the filter. It is tempting to try to
1496 // exploit the filter further by saying that any typeinfo that does not
1497 // occur in the filter can't be caught later (and thus can be dropped).
1498 // However this would be wrong, since typeinfos can match without being
1499 // equal (for example if one represents a C++ class, and the other some
1500 // class derived from it).
1501 assert(LI.isFilter(i) && "Unsupported landingpad clause!");
1502 Value *FilterClause = LI.getClause(i);
1503 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType());
1504 unsigned NumTypeInfos = FilterType->getNumElements();
1505
1506 // An empty filter catches everything, so there is no point in keeping any
1507 // following clauses or marking the landingpad as having a cleanup. By
1508 // dealing with this case here the following code is made a bit simpler.
1509 if (!NumTypeInfos) {
1510 NewClauses.push_back(FilterClause);
1511 if (!isLastClause)
1512 MakeNewInstruction = true;
1513 CleanupFlag = false;
1514 break;
1515 }
1516
1517 bool MakeNewFilter = false; // If true, make a new filter.
1518 SmallVector<Constant *, 16> NewFilterElts; // New elements.
1519 if (isa<ConstantAggregateZero>(FilterClause)) {
1520 // Not an empty filter - it contains at least one null typeinfo.
1521 assert(NumTypeInfos > 0 && "Should have handled empty filter already!");
1522 Constant *TypeInfo =
1523 Constant::getNullValue(FilterType->getElementType());
1524 // If this typeinfo is a catch-all then the filter can never match.
1525 if (isCatchAll(Personality, TypeInfo)) {
1526 // Throw the filter away.
1527 MakeNewInstruction = true;
1528 continue;
1529 }
1530
1531 // There is no point in having multiple copies of this typeinfo, so
1532 // discard all but the first copy if there is more than one.
1533 NewFilterElts.push_back(TypeInfo);
1534 if (NumTypeInfos > 1)
1535 MakeNewFilter = true;
1536 } else {
1537 ConstantArray *Filter = cast<ConstantArray>(FilterClause);
1538 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements.
1539 NewFilterElts.reserve(NumTypeInfos);
1540
1541 // Remove any filter elements that were already caught or that already
1542 // occurred in the filter. While there, see if any of the elements are
1543 // catch-alls. If so, the filter can be discarded.
1544 bool SawCatchAll = false;
1545 for (unsigned j = 0; j != NumTypeInfos; ++j) {
1546 Value *Elt = Filter->getOperand(j);
1547 Constant *TypeInfo = cast<Constant>(Elt->stripPointerCasts());
1548 if (isCatchAll(Personality, TypeInfo)) {
1549 // This element is a catch-all. Bail out, noting this fact.
1550 SawCatchAll = true;
1551 break;
1552 }
1553 if (AlreadyCaught.count(TypeInfo))
1554 // Already caught by an earlier clause, so having it in the filter
1555 // is pointless.
1556 continue;
1557 // There is no point in having multiple copies of the same typeinfo in
1558 // a filter, so only add it if we didn't already.
1559 if (SeenInFilter.insert(TypeInfo))
1560 NewFilterElts.push_back(cast<Constant>(Elt));
1561 }
1562 // A filter containing a catch-all cannot match anything by definition.
1563 if (SawCatchAll) {
1564 // Throw the filter away.
1565 MakeNewInstruction = true;
1566 continue;
1567 }
1568
1569 // If we dropped something from the filter, make a new one.
1570 if (NewFilterElts.size() < NumTypeInfos)
1571 MakeNewFilter = true;
1572 }
1573 if (MakeNewFilter) {
1574 FilterType = ArrayType::get(FilterType->getElementType(),
1575 NewFilterElts.size());
1576 FilterClause = ConstantArray::get(FilterType, NewFilterElts);
1577 MakeNewInstruction = true;
1578 }
1579
1580 NewClauses.push_back(FilterClause);
1581
1582 // If the new filter is empty then it will catch everything so there is
1583 // no point in keeping any following clauses or marking the landingpad
1584 // as having a cleanup. The case of the original filter being empty was
1585 // already handled above.
1586 if (MakeNewFilter && !NewFilterElts.size()) {
1587 assert(MakeNewInstruction && "New filter but not a new instruction!");
1588 CleanupFlag = false;
1589 break;
1590 }
1591 }
1592 }
1593
1594 // If several filters occur in a row then reorder them so that the shortest
1595 // filters come first (those with the smallest number of elements). This is
1596 // advantageous because shorter filters are more likely to match, speeding up
1597 // unwinding, but mostly because it increases the effectiveness of the other
1598 // filter optimizations below.
1599 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) {
1600 unsigned j;
1601 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters.
1602 for (j = i; j != e; ++j)
1603 if (!isa<ArrayType>(NewClauses[j]->getType()))
1604 break;
1605
1606 // Check whether the filters are already sorted by length. We need to know
1607 // if sorting them is actually going to do anything so that we only make a
1608 // new landingpad instruction if it does.
1609 for (unsigned k = i; k + 1 < j; ++k)
1610 if (shorter_filter(NewClauses[k+1], NewClauses[k])) {
1611 // Not sorted, so sort the filters now. Doing an unstable sort would be
1612 // correct too but reordering filters pointlessly might confuse users.
1613 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j,
1614 shorter_filter);
1615 MakeNewInstruction = true;
1616 break;
1617 }
1618
1619 // Look for the next batch of filters.
1620 i = j + 1;
1621 }
1622
1623 // If typeinfos matched if and only if equal, then the elements of a filter L
1624 // that occurs later than a filter F could be replaced by the intersection of
1625 // the elements of F and L. In reality two typeinfos can match without being
1626 // equal (for example if one represents a C++ class, and the other some class
1627 // derived from it) so it would be wrong to perform this transform in general.
1628 // However the transform is correct and useful if F is a subset of L. In that
1629 // case L can be replaced by F, and thus removed altogether since repeating a
1630 // filter is pointless. So here we look at all pairs of filters F and L where
1631 // L follows F in the list of clauses, and remove L if every element of F is
1632 // an element of L. This can occur when inlining C++ functions with exception
1633 // specifications.
1634 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) {
1635 // Examine each filter in turn.
1636 Value *Filter = NewClauses[i];
1637 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType());
1638 if (!FTy)
1639 // Not a filter - skip it.
1640 continue;
1641 unsigned FElts = FTy->getNumElements();
1642 // Examine each filter following this one. Doing this backwards means that
1643 // we don't have to worry about filters disappearing under us when removed.
1644 for (unsigned j = NewClauses.size() - 1; j != i; --j) {
1645 Value *LFilter = NewClauses[j];
1646 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType());
1647 if (!LTy)
1648 // Not a filter - skip it.
1649 continue;
1650 // If Filter is a subset of LFilter, i.e. every element of Filter is also
1651 // an element of LFilter, then discard LFilter.
1652 SmallVector<Value *, 16>::iterator J = NewClauses.begin() + j;
1653 // If Filter is empty then it is a subset of LFilter.
1654 if (!FElts) {
1655 // Discard LFilter.
1656 NewClauses.erase(J);
1657 MakeNewInstruction = true;
1658 // Move on to the next filter.
1659 continue;
1660 }
1661 unsigned LElts = LTy->getNumElements();
1662 // If Filter is longer than LFilter then it cannot be a subset of it.
1663 if (FElts > LElts)
1664 // Move on to the next filter.
1665 continue;
1666 // At this point we know that LFilter has at least one element.
1667 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros.
1668 // Filter is a subset of LFilter iff Filter contains only zeros (as we
1669 // already know that Filter is not longer than LFilter).
1670 if (isa<ConstantAggregateZero>(Filter)) {
1671 assert(FElts <= LElts && "Should have handled this case earlier!");
1672 // Discard LFilter.
1673 NewClauses.erase(J);
1674 MakeNewInstruction = true;
1675 }
1676 // Move on to the next filter.
1677 continue;
1678 }
1679 ConstantArray *LArray = cast<ConstantArray>(LFilter);
1680 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros.
1681 // Since Filter is non-empty and contains only zeros, it is a subset of
1682 // LFilter iff LFilter contains a zero.
1683 assert(FElts > 0 && "Should have eliminated the empty filter earlier!");
1684 for (unsigned l = 0; l != LElts; ++l)
1685 if (LArray->getOperand(l)->isNullValue()) {
1686 // LFilter contains a zero - discard it.
1687 NewClauses.erase(J);
1688 MakeNewInstruction = true;
1689 break;
1690 }
1691 // Move on to the next filter.
1692 continue;
1693 }
1694 // At this point we know that both filters are ConstantArrays. Loop over
1695 // operands to see whether every element of Filter is also an element of
1696 // LFilter. Since filters tend to be short this is probably faster than
1697 // using a method that scales nicely.
1698 ConstantArray *FArray = cast<ConstantArray>(Filter);
1699 bool AllFound = true;
1700 for (unsigned f = 0; f != FElts; ++f) {
1701 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts();
1702 AllFound = false;
1703 for (unsigned l = 0; l != LElts; ++l) {
1704 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts();
1705 if (LTypeInfo == FTypeInfo) {
1706 AllFound = true;
1707 break;
1708 }
1709 }
1710 if (!AllFound)
1711 break;
1712 }
1713 if (AllFound) {
1714 // Discard LFilter.
1715 NewClauses.erase(J);
1716 MakeNewInstruction = true;
1717 }
1718 // Move on to the next filter.
1719 }
1720 }
1721
1722 // If we changed any of the clauses, replace the old landingpad instruction
1723 // with a new one.
1724 if (MakeNewInstruction) {
1725 LandingPadInst *NLI = LandingPadInst::Create(LI.getType(),
1726 LI.getPersonalityFn(),
1727 NewClauses.size());
1728 for (unsigned i = 0, e = NewClauses.size(); i != e; ++i)
1729 NLI->addClause(NewClauses[i]);
1730 // A landing pad with no clauses must have the cleanup flag set. It is
1731 // theoretically possible, though highly unlikely, that we eliminated all
1732 // clauses. If so, force the cleanup flag to true.
1733 if (NewClauses.empty())
1734 CleanupFlag = true;
1735 NLI->setCleanup(CleanupFlag);
1736 return NLI;
1737 }
1738
1739 // Even if none of the clauses changed, we may nonetheless have understood
1740 // that the cleanup flag is pointless. Clear it if so.
1741 if (LI.isCleanup() != CleanupFlag) {
1742 assert(!CleanupFlag && "Adding a cleanup, not removing one?!");
1743 LI.setCleanup(CleanupFlag);
1744 return &LI;
1745 }
1746
1747 return 0;
1748 }
1749
1750
1751
1752
1753 /// TryToSinkInstruction - Try to move the specified instruction from its
1754 /// current block into the beginning of DestBlock, which can only happen if it's
1755 /// safe to move the instruction past all of the instructions between it and the
1756 /// end of its block.
TryToSinkInstruction(Instruction * I,BasicBlock * DestBlock)1757 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
1758 assert(I->hasOneUse() && "Invariants didn't hold!");
1759
1760 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
1761 if (isa<PHINode>(I) || isa<LandingPadInst>(I) || I->mayHaveSideEffects() ||
1762 isa<TerminatorInst>(I))
1763 return false;
1764
1765 // Do not sink alloca instructions out of the entry block.
1766 if (isa<AllocaInst>(I) && I->getParent() ==
1767 &DestBlock->getParent()->getEntryBlock())
1768 return false;
1769
1770 // We can only sink load instructions if there is nothing between the load and
1771 // the end of block that could change the value.
1772 if (I->mayReadFromMemory()) {
1773 for (BasicBlock::iterator Scan = I, E = I->getParent()->end();
1774 Scan != E; ++Scan)
1775 if (Scan->mayWriteToMemory())
1776 return false;
1777 }
1778
1779 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
1780 I->moveBefore(InsertPos);
1781 ++NumSunkInst;
1782 return true;
1783 }
1784
1785
1786 /// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
1787 /// all reachable code to the worklist.
1788 ///
1789 /// This has a couple of tricks to make the code faster and more powerful. In
1790 /// particular, we constant fold and DCE instructions as we go, to avoid adding
1791 /// them to the worklist (this significantly speeds up instcombine on code where
1792 /// many instructions are dead or constant). Additionally, if we find a branch
1793 /// whose condition is a known constant, we only visit the reachable successors.
1794 ///
AddReachableCodeToWorklist(BasicBlock * BB,SmallPtrSet<BasicBlock *,64> & Visited,InstCombiner & IC,const TargetData * TD)1795 static bool AddReachableCodeToWorklist(BasicBlock *BB,
1796 SmallPtrSet<BasicBlock*, 64> &Visited,
1797 InstCombiner &IC,
1798 const TargetData *TD) {
1799 bool MadeIRChange = false;
1800 SmallVector<BasicBlock*, 256> Worklist;
1801 Worklist.push_back(BB);
1802
1803 SmallVector<Instruction*, 128> InstrsForInstCombineWorklist;
1804 DenseMap<ConstantExpr*, Constant*> FoldedConstants;
1805
1806 do {
1807 BB = Worklist.pop_back_val();
1808
1809 // We have now visited this block! If we've already been here, ignore it.
1810 if (!Visited.insert(BB)) continue;
1811
1812 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
1813 Instruction *Inst = BBI++;
1814
1815 // DCE instruction if trivially dead.
1816 if (isInstructionTriviallyDead(Inst)) {
1817 ++NumDeadInst;
1818 DEBUG(errs() << "IC: DCE: " << *Inst << '\n');
1819 Inst->eraseFromParent();
1820 continue;
1821 }
1822
1823 // ConstantProp instruction if trivially constant.
1824 if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
1825 if (Constant *C = ConstantFoldInstruction(Inst, TD)) {
1826 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: "
1827 << *Inst << '\n');
1828 Inst->replaceAllUsesWith(C);
1829 ++NumConstProp;
1830 Inst->eraseFromParent();
1831 continue;
1832 }
1833
1834 if (TD) {
1835 // See if we can constant fold its operands.
1836 for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
1837 i != e; ++i) {
1838 ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
1839 if (CE == 0) continue;
1840
1841 Constant*& FoldRes = FoldedConstants[CE];
1842 if (!FoldRes)
1843 FoldRes = ConstantFoldConstantExpression(CE, TD);
1844 if (!FoldRes)
1845 FoldRes = CE;
1846
1847 if (FoldRes != CE) {
1848 *i = FoldRes;
1849 MadeIRChange = true;
1850 }
1851 }
1852 }
1853
1854 InstrsForInstCombineWorklist.push_back(Inst);
1855 }
1856
1857 // Recursively visit successors. If this is a branch or switch on a
1858 // constant, only visit the reachable successor.
1859 TerminatorInst *TI = BB->getTerminator();
1860 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
1861 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
1862 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
1863 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
1864 Worklist.push_back(ReachableBB);
1865 continue;
1866 }
1867 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
1868 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
1869 // See if this is an explicit destination.
1870 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i)
1871 if (SI->getCaseValue(i) == Cond) {
1872 BasicBlock *ReachableBB = SI->getSuccessor(i);
1873 Worklist.push_back(ReachableBB);
1874 continue;
1875 }
1876
1877 // Otherwise it is the default destination.
1878 Worklist.push_back(SI->getSuccessor(0));
1879 continue;
1880 }
1881 }
1882
1883 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
1884 Worklist.push_back(TI->getSuccessor(i));
1885 } while (!Worklist.empty());
1886
1887 // Once we've found all of the instructions to add to instcombine's worklist,
1888 // add them in reverse order. This way instcombine will visit from the top
1889 // of the function down. This jives well with the way that it adds all uses
1890 // of instructions to the worklist after doing a transformation, thus avoiding
1891 // some N^2 behavior in pathological cases.
1892 IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0],
1893 InstrsForInstCombineWorklist.size());
1894
1895 return MadeIRChange;
1896 }
1897
DoOneIteration(Function & F,unsigned Iteration)1898 bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
1899 MadeIRChange = false;
1900
1901 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
1902 << F.getNameStr() << "\n");
1903
1904 {
1905 // Do a depth-first traversal of the function, populate the worklist with
1906 // the reachable instructions. Ignore blocks that are not reachable. Keep
1907 // track of which blocks we visit.
1908 SmallPtrSet<BasicBlock*, 64> Visited;
1909 MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD);
1910
1911 // Do a quick scan over the function. If we find any blocks that are
1912 // unreachable, remove any instructions inside of them. This prevents
1913 // the instcombine code from having to deal with some bad special cases.
1914 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
1915 if (Visited.count(BB)) continue;
1916
1917 // Delete the instructions backwards, as it has a reduced likelihood of
1918 // having to update as many def-use and use-def chains.
1919 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
1920 while (EndInst != BB->begin()) {
1921 // Delete the next to last instruction.
1922 BasicBlock::iterator I = EndInst;
1923 Instruction *Inst = --I;
1924 if (!Inst->use_empty())
1925 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
1926 if (isa<LandingPadInst>(Inst)) {
1927 EndInst = Inst;
1928 continue;
1929 }
1930 if (!isa<DbgInfoIntrinsic>(Inst)) {
1931 ++NumDeadInst;
1932 MadeIRChange = true;
1933 }
1934 Inst->eraseFromParent();
1935 }
1936 }
1937 }
1938
1939 while (!Worklist.isEmpty()) {
1940 Instruction *I = Worklist.RemoveOne();
1941 if (I == 0) continue; // skip null values.
1942
1943 // Check to see if we can DCE the instruction.
1944 if (isInstructionTriviallyDead(I)) {
1945 DEBUG(errs() << "IC: DCE: " << *I << '\n');
1946 EraseInstFromFunction(*I);
1947 ++NumDeadInst;
1948 MadeIRChange = true;
1949 continue;
1950 }
1951
1952 // Instruction isn't dead, see if we can constant propagate it.
1953 if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
1954 if (Constant *C = ConstantFoldInstruction(I, TD)) {
1955 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
1956
1957 // Add operands to the worklist.
1958 ReplaceInstUsesWith(*I, C);
1959 ++NumConstProp;
1960 EraseInstFromFunction(*I);
1961 MadeIRChange = true;
1962 continue;
1963 }
1964
1965 // See if we can trivially sink this instruction to a successor basic block.
1966 if (I->hasOneUse()) {
1967 BasicBlock *BB = I->getParent();
1968 Instruction *UserInst = cast<Instruction>(I->use_back());
1969 BasicBlock *UserParent;
1970
1971 // Get the block the use occurs in.
1972 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
1973 UserParent = PN->getIncomingBlock(I->use_begin().getUse());
1974 else
1975 UserParent = UserInst->getParent();
1976
1977 if (UserParent != BB) {
1978 bool UserIsSuccessor = false;
1979 // See if the user is one of our successors.
1980 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
1981 if (*SI == UserParent) {
1982 UserIsSuccessor = true;
1983 break;
1984 }
1985
1986 // If the user is one of our immediate successors, and if that successor
1987 // only has us as a predecessors (we'd have to split the critical edge
1988 // otherwise), we can keep going.
1989 if (UserIsSuccessor && UserParent->getSinglePredecessor())
1990 // Okay, the CFG is simple enough, try to sink this instruction.
1991 MadeIRChange |= TryToSinkInstruction(I, UserParent);
1992 }
1993 }
1994
1995 // Now that we have an instruction, try combining it to simplify it.
1996 Builder->SetInsertPoint(I->getParent(), I);
1997 Builder->SetCurrentDebugLocation(I->getDebugLoc());
1998
1999 #ifndef NDEBUG
2000 std::string OrigI;
2001 #endif
2002 DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
2003 DEBUG(errs() << "IC: Visiting: " << OrigI << '\n');
2004
2005 if (Instruction *Result = visit(*I)) {
2006 ++NumCombined;
2007 // Should we replace the old instruction with a new one?
2008 if (Result != I) {
2009 DEBUG(errs() << "IC: Old = " << *I << '\n'
2010 << " New = " << *Result << '\n');
2011
2012 if (!I->getDebugLoc().isUnknown())
2013 Result->setDebugLoc(I->getDebugLoc());
2014 // Everything uses the new instruction now.
2015 I->replaceAllUsesWith(Result);
2016
2017 // Move the name to the new instruction first.
2018 Result->takeName(I);
2019
2020 // Push the new instruction and any users onto the worklist.
2021 Worklist.Add(Result);
2022 Worklist.AddUsersToWorkList(*Result);
2023
2024 // Insert the new instruction into the basic block...
2025 BasicBlock *InstParent = I->getParent();
2026 BasicBlock::iterator InsertPos = I;
2027
2028 // If we replace a PHI with something that isn't a PHI, fix up the
2029 // insertion point.
2030 if (!isa<PHINode>(Result) && isa<PHINode>(InsertPos))
2031 InsertPos = InstParent->getFirstInsertionPt();
2032
2033 InstParent->getInstList().insert(InsertPos, Result);
2034
2035 EraseInstFromFunction(*I);
2036 } else {
2037 #ifndef NDEBUG
2038 DEBUG(errs() << "IC: Mod = " << OrigI << '\n'
2039 << " New = " << *I << '\n');
2040 #endif
2041
2042 // If the instruction was modified, it's possible that it is now dead.
2043 // if so, remove it.
2044 if (isInstructionTriviallyDead(I)) {
2045 EraseInstFromFunction(*I);
2046 } else {
2047 Worklist.Add(I);
2048 Worklist.AddUsersToWorkList(*I);
2049 }
2050 }
2051 MadeIRChange = true;
2052 }
2053 }
2054
2055 Worklist.Zap();
2056 return MadeIRChange;
2057 }
2058
2059
runOnFunction(Function & F)2060 bool InstCombiner::runOnFunction(Function &F) {
2061 TD = getAnalysisIfAvailable<TargetData>();
2062
2063
2064 /// Builder - This is an IRBuilder that automatically inserts new
2065 /// instructions into the worklist when they are created.
2066 IRBuilder<true, TargetFolder, InstCombineIRInserter>
2067 TheBuilder(F.getContext(), TargetFolder(TD),
2068 InstCombineIRInserter(Worklist));
2069 Builder = &TheBuilder;
2070
2071 bool EverMadeChange = false;
2072
2073 // Lower dbg.declare intrinsics otherwise their value may be clobbered
2074 // by instcombiner.
2075 EverMadeChange = LowerDbgDeclare(F);
2076
2077 // Iterate while there is work to do.
2078 unsigned Iteration = 0;
2079 while (DoOneIteration(F, Iteration++))
2080 EverMadeChange = true;
2081
2082 Builder = 0;
2083 return EverMadeChange;
2084 }
2085
createInstructionCombiningPass()2086 FunctionPass *llvm::createInstructionCombiningPass() {
2087 return new InstCombiner();
2088 }
2089