1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass transforms simple global variables that never have their address
11 // taken.  If obviously true, it marks read/write globals as constant, deletes
12 // variables only stored to, etc.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Transforms/IPO/GlobalOpt.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/TargetLibraryInfo.h"
26 #include "llvm/IR/CallSite.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Dominators.h"
32 #include "llvm/IR/GetElementPtrTypeIterator.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/IR/ValueHandle.h"
38 #include "llvm/Pass.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/MathExtras.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/IPO.h"
44 #include "llvm/Transforms/Utils/CtorUtils.h"
45 #include "llvm/Transforms/Utils/Evaluator.h"
46 #include "llvm/Transforms/Utils/GlobalStatus.h"
47 #include <algorithm>
48 using namespace llvm;
49 
50 #define DEBUG_TYPE "globalopt"
51 
52 STATISTIC(NumMarked    , "Number of globals marked constant");
53 STATISTIC(NumUnnamed   , "Number of globals marked unnamed_addr");
54 STATISTIC(NumSRA       , "Number of aggregate globals broken into scalars");
55 STATISTIC(NumHeapSRA   , "Number of heap objects SRA'd");
56 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them");
57 STATISTIC(NumDeleted   , "Number of globals deleted");
58 STATISTIC(NumGlobUses  , "Number of global uses devirtualized");
59 STATISTIC(NumLocalized , "Number of globals localized");
60 STATISTIC(NumShrunkToBool  , "Number of global vars shrunk to booleans");
61 STATISTIC(NumFastCallFns   , "Number of functions converted to fastcc");
62 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated");
63 STATISTIC(NumNestRemoved   , "Number of nest attributes removed");
64 STATISTIC(NumAliasesResolved, "Number of global aliases resolved");
65 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated");
66 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed");
67 
68 /// Is this global variable possibly used by a leak checker as a root?  If so,
69 /// we might not really want to eliminate the stores to it.
isLeakCheckerRoot(GlobalVariable * GV)70 static bool isLeakCheckerRoot(GlobalVariable *GV) {
71   // A global variable is a root if it is a pointer, or could plausibly contain
72   // a pointer.  There are two challenges; one is that we could have a struct
73   // the has an inner member which is a pointer.  We recurse through the type to
74   // detect these (up to a point).  The other is that we may actually be a union
75   // of a pointer and another type, and so our LLVM type is an integer which
76   // gets converted into a pointer, or our type is an [i8 x #] with a pointer
77   // potentially contained here.
78 
79   if (GV->hasPrivateLinkage())
80     return false;
81 
82   SmallVector<Type *, 4> Types;
83   Types.push_back(GV->getValueType());
84 
85   unsigned Limit = 20;
86   do {
87     Type *Ty = Types.pop_back_val();
88     switch (Ty->getTypeID()) {
89       default: break;
90       case Type::PointerTyID: return true;
91       case Type::ArrayTyID:
92       case Type::VectorTyID: {
93         SequentialType *STy = cast<SequentialType>(Ty);
94         Types.push_back(STy->getElementType());
95         break;
96       }
97       case Type::StructTyID: {
98         StructType *STy = cast<StructType>(Ty);
99         if (STy->isOpaque()) return true;
100         for (StructType::element_iterator I = STy->element_begin(),
101                  E = STy->element_end(); I != E; ++I) {
102           Type *InnerTy = *I;
103           if (isa<PointerType>(InnerTy)) return true;
104           if (isa<CompositeType>(InnerTy))
105             Types.push_back(InnerTy);
106         }
107         break;
108       }
109     }
110     if (--Limit == 0) return true;
111   } while (!Types.empty());
112   return false;
113 }
114 
115 /// Given a value that is stored to a global but never read, determine whether
116 /// it's safe to remove the store and the chain of computation that feeds the
117 /// store.
IsSafeComputationToRemove(Value * V,const TargetLibraryInfo * TLI)118 static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) {
119   do {
120     if (isa<Constant>(V))
121       return true;
122     if (!V->hasOneUse())
123       return false;
124     if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) ||
125         isa<GlobalValue>(V))
126       return false;
127     if (isAllocationFn(V, TLI))
128       return true;
129 
130     Instruction *I = cast<Instruction>(V);
131     if (I->mayHaveSideEffects())
132       return false;
133     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
134       if (!GEP->hasAllConstantIndices())
135         return false;
136     } else if (I->getNumOperands() != 1) {
137       return false;
138     }
139 
140     V = I->getOperand(0);
141   } while (1);
142 }
143 
144 /// This GV is a pointer root.  Loop over all users of the global and clean up
145 /// any that obviously don't assign the global a value that isn't dynamically
146 /// allocated.
CleanupPointerRootUsers(GlobalVariable * GV,const TargetLibraryInfo * TLI)147 static bool CleanupPointerRootUsers(GlobalVariable *GV,
148                                     const TargetLibraryInfo *TLI) {
149   // A brief explanation of leak checkers.  The goal is to find bugs where
150   // pointers are forgotten, causing an accumulating growth in memory
151   // usage over time.  The common strategy for leak checkers is to whitelist the
152   // memory pointed to by globals at exit.  This is popular because it also
153   // solves another problem where the main thread of a C++ program may shut down
154   // before other threads that are still expecting to use those globals.  To
155   // handle that case, we expect the program may create a singleton and never
156   // destroy it.
157 
158   bool Changed = false;
159 
160   // If Dead[n].first is the only use of a malloc result, we can delete its
161   // chain of computation and the store to the global in Dead[n].second.
162   SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead;
163 
164   // Constants can't be pointers to dynamically allocated memory.
165   for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end();
166        UI != E;) {
167     User *U = *UI++;
168     if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
169       Value *V = SI->getValueOperand();
170       if (isa<Constant>(V)) {
171         Changed = true;
172         SI->eraseFromParent();
173       } else if (Instruction *I = dyn_cast<Instruction>(V)) {
174         if (I->hasOneUse())
175           Dead.push_back(std::make_pair(I, SI));
176       }
177     } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) {
178       if (isa<Constant>(MSI->getValue())) {
179         Changed = true;
180         MSI->eraseFromParent();
181       } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) {
182         if (I->hasOneUse())
183           Dead.push_back(std::make_pair(I, MSI));
184       }
185     } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) {
186       GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource());
187       if (MemSrc && MemSrc->isConstant()) {
188         Changed = true;
189         MTI->eraseFromParent();
190       } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) {
191         if (I->hasOneUse())
192           Dead.push_back(std::make_pair(I, MTI));
193       }
194     } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
195       if (CE->use_empty()) {
196         CE->destroyConstant();
197         Changed = true;
198       }
199     } else if (Constant *C = dyn_cast<Constant>(U)) {
200       if (isSafeToDestroyConstant(C)) {
201         C->destroyConstant();
202         // This could have invalidated UI, start over from scratch.
203         Dead.clear();
204         CleanupPointerRootUsers(GV, TLI);
205         return true;
206       }
207     }
208   }
209 
210   for (int i = 0, e = Dead.size(); i != e; ++i) {
211     if (IsSafeComputationToRemove(Dead[i].first, TLI)) {
212       Dead[i].second->eraseFromParent();
213       Instruction *I = Dead[i].first;
214       do {
215         if (isAllocationFn(I, TLI))
216           break;
217         Instruction *J = dyn_cast<Instruction>(I->getOperand(0));
218         if (!J)
219           break;
220         I->eraseFromParent();
221         I = J;
222       } while (1);
223       I->eraseFromParent();
224     }
225   }
226 
227   return Changed;
228 }
229 
230 /// We just marked GV constant.  Loop over all users of the global, cleaning up
231 /// the obvious ones.  This is largely just a quick scan over the use list to
232 /// clean up the easy and obvious cruft.  This returns true if it made a change.
CleanupConstantGlobalUsers(Value * V,Constant * Init,const DataLayout & DL,TargetLibraryInfo * TLI)233 static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
234                                        const DataLayout &DL,
235                                        TargetLibraryInfo *TLI) {
236   bool Changed = false;
237   // Note that we need to use a weak value handle for the worklist items. When
238   // we delete a constant array, we may also be holding pointer to one of its
239   // elements (or an element of one of its elements if we're dealing with an
240   // array of arrays) in the worklist.
241   SmallVector<WeakVH, 8> WorkList(V->user_begin(), V->user_end());
242   while (!WorkList.empty()) {
243     Value *UV = WorkList.pop_back_val();
244     if (!UV)
245       continue;
246 
247     User *U = cast<User>(UV);
248 
249     if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
250       if (Init) {
251         // Replace the load with the initializer.
252         LI->replaceAllUsesWith(Init);
253         LI->eraseFromParent();
254         Changed = true;
255       }
256     } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
257       // Store must be unreachable or storing Init into the global.
258       SI->eraseFromParent();
259       Changed = true;
260     } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
261       if (CE->getOpcode() == Instruction::GetElementPtr) {
262         Constant *SubInit = nullptr;
263         if (Init)
264           SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
265         Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI);
266       } else if ((CE->getOpcode() == Instruction::BitCast &&
267                   CE->getType()->isPointerTy()) ||
268                  CE->getOpcode() == Instruction::AddrSpaceCast) {
269         // Pointer cast, delete any stores and memsets to the global.
270         Changed |= CleanupConstantGlobalUsers(CE, nullptr, DL, TLI);
271       }
272 
273       if (CE->use_empty()) {
274         CE->destroyConstant();
275         Changed = true;
276       }
277     } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
278       // Do not transform "gepinst (gep constexpr (GV))" here, because forming
279       // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold
280       // and will invalidate our notion of what Init is.
281       Constant *SubInit = nullptr;
282       if (!isa<ConstantExpr>(GEP->getOperand(0))) {
283         ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(
284             ConstantFoldInstruction(GEP, DL, TLI));
285         if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
286           SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
287 
288         // If the initializer is an all-null value and we have an inbounds GEP,
289         // we already know what the result of any load from that GEP is.
290         // TODO: Handle splats.
291         if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())
292           SubInit = Constant::getNullValue(GEP->getResultElementType());
293       }
294       Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI);
295 
296       if (GEP->use_empty()) {
297         GEP->eraseFromParent();
298         Changed = true;
299       }
300     } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv
301       if (MI->getRawDest() == V) {
302         MI->eraseFromParent();
303         Changed = true;
304       }
305 
306     } else if (Constant *C = dyn_cast<Constant>(U)) {
307       // If we have a chain of dead constantexprs or other things dangling from
308       // us, and if they are all dead, nuke them without remorse.
309       if (isSafeToDestroyConstant(C)) {
310         C->destroyConstant();
311         CleanupConstantGlobalUsers(V, Init, DL, TLI);
312         return true;
313       }
314     }
315   }
316   return Changed;
317 }
318 
319 /// Return true if the specified instruction is a safe user of a derived
320 /// expression from a global that we want to SROA.
isSafeSROAElementUse(Value * V)321 static bool isSafeSROAElementUse(Value *V) {
322   // We might have a dead and dangling constant hanging off of here.
323   if (Constant *C = dyn_cast<Constant>(V))
324     return isSafeToDestroyConstant(C);
325 
326   Instruction *I = dyn_cast<Instruction>(V);
327   if (!I) return false;
328 
329   // Loads are ok.
330   if (isa<LoadInst>(I)) return true;
331 
332   // Stores *to* the pointer are ok.
333   if (StoreInst *SI = dyn_cast<StoreInst>(I))
334     return SI->getOperand(0) != V;
335 
336   // Otherwise, it must be a GEP.
337   GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I);
338   if (!GEPI) return false;
339 
340   if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) ||
341       !cast<Constant>(GEPI->getOperand(1))->isNullValue())
342     return false;
343 
344   for (User *U : GEPI->users())
345     if (!isSafeSROAElementUse(U))
346       return false;
347   return true;
348 }
349 
350 
351 /// U is a direct user of the specified global value.  Look at it and its uses
352 /// and decide whether it is safe to SROA this global.
IsUserOfGlobalSafeForSRA(User * U,GlobalValue * GV)353 static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
354   // The user of the global must be a GEP Inst or a ConstantExpr GEP.
355   if (!isa<GetElementPtrInst>(U) &&
356       (!isa<ConstantExpr>(U) ||
357        cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr))
358     return false;
359 
360   // Check to see if this ConstantExpr GEP is SRA'able.  In particular, we
361   // don't like < 3 operand CE's, and we don't like non-constant integer
362   // indices.  This enforces that all uses are 'gep GV, 0, C, ...' for some
363   // value of C.
364   if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) ||
365       !cast<Constant>(U->getOperand(1))->isNullValue() ||
366       !isa<ConstantInt>(U->getOperand(2)))
367     return false;
368 
369   gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U);
370   ++GEPI;  // Skip over the pointer index.
371 
372   // If this is a use of an array allocation, do a bit more checking for sanity.
373   if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) {
374     uint64_t NumElements = AT->getNumElements();
375     ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2));
376 
377     // Check to make sure that index falls within the array.  If not,
378     // something funny is going on, so we won't do the optimization.
379     //
380     if (Idx->getZExtValue() >= NumElements)
381       return false;
382 
383     // We cannot scalar repl this level of the array unless any array
384     // sub-indices are in-range constants.  In particular, consider:
385     // A[0][i].  We cannot know that the user isn't doing invalid things like
386     // allowing i to index an out-of-range subscript that accesses A[1].
387     //
388     // Scalar replacing *just* the outer index of the array is probably not
389     // going to be a win anyway, so just give up.
390     for (++GEPI; // Skip array index.
391          GEPI != E;
392          ++GEPI) {
393       uint64_t NumElements;
394       if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI))
395         NumElements = SubArrayTy->getNumElements();
396       else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI))
397         NumElements = SubVectorTy->getNumElements();
398       else {
399         assert((*GEPI)->isStructTy() &&
400                "Indexed GEP type is not array, vector, or struct!");
401         continue;
402       }
403 
404       ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
405       if (!IdxVal || IdxVal->getZExtValue() >= NumElements)
406         return false;
407     }
408   }
409 
410   for (User *UU : U->users())
411     if (!isSafeSROAElementUse(UU))
412       return false;
413 
414   return true;
415 }
416 
417 /// Look at all uses of the global and decide whether it is safe for us to
418 /// perform this transformation.
GlobalUsersSafeToSRA(GlobalValue * GV)419 static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
420   for (User *U : GV->users())
421     if (!IsUserOfGlobalSafeForSRA(U, GV))
422       return false;
423 
424   return true;
425 }
426 
427 
428 /// Perform scalar replacement of aggregates on the specified global variable.
429 /// This opens the door for other optimizations by exposing the behavior of the
430 /// program in a more fine-grained way.  We have determined that this
431 /// transformation is safe already.  We return the first global variable we
432 /// insert so that the caller can reprocess it.
SRAGlobal(GlobalVariable * GV,const DataLayout & DL)433 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
434   // Make sure this global only has simple uses that we can SRA.
435   if (!GlobalUsersSafeToSRA(GV))
436     return nullptr;
437 
438   assert(GV->hasLocalLinkage());
439   Constant *Init = GV->getInitializer();
440   Type *Ty = Init->getType();
441 
442   std::vector<GlobalVariable*> NewGlobals;
443   Module::GlobalListType &Globals = GV->getParent()->getGlobalList();
444 
445   // Get the alignment of the global, either explicit or target-specific.
446   unsigned StartAlignment = GV->getAlignment();
447   if (StartAlignment == 0)
448     StartAlignment = DL.getABITypeAlignment(GV->getType());
449 
450   if (StructType *STy = dyn_cast<StructType>(Ty)) {
451     NewGlobals.reserve(STy->getNumElements());
452     const StructLayout &Layout = *DL.getStructLayout(STy);
453     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
454       Constant *In = Init->getAggregateElement(i);
455       assert(In && "Couldn't get element of initializer?");
456       GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false,
457                                                GlobalVariable::InternalLinkage,
458                                                In, GV->getName()+"."+Twine(i),
459                                                GV->getThreadLocalMode(),
460                                               GV->getType()->getAddressSpace());
461       NGV->setExternallyInitialized(GV->isExternallyInitialized());
462       NGV->copyAttributesFrom(GV);
463       Globals.push_back(NGV);
464       NewGlobals.push_back(NGV);
465 
466       // Calculate the known alignment of the field.  If the original aggregate
467       // had 256 byte alignment for example, something might depend on that:
468       // propagate info to each field.
469       uint64_t FieldOffset = Layout.getElementOffset(i);
470       unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset);
471       if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i)))
472         NGV->setAlignment(NewAlign);
473     }
474   } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
475     unsigned NumElements = 0;
476     if (ArrayType *ATy = dyn_cast<ArrayType>(STy))
477       NumElements = ATy->getNumElements();
478     else
479       NumElements = cast<VectorType>(STy)->getNumElements();
480 
481     if (NumElements > 16 && GV->hasNUsesOrMore(16))
482       return nullptr; // It's not worth it.
483     NewGlobals.reserve(NumElements);
484 
485     uint64_t EltSize = DL.getTypeAllocSize(STy->getElementType());
486     unsigned EltAlign = DL.getABITypeAlignment(STy->getElementType());
487     for (unsigned i = 0, e = NumElements; i != e; ++i) {
488       Constant *In = Init->getAggregateElement(i);
489       assert(In && "Couldn't get element of initializer?");
490 
491       GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false,
492                                                GlobalVariable::InternalLinkage,
493                                                In, GV->getName()+"."+Twine(i),
494                                                GV->getThreadLocalMode(),
495                                               GV->getType()->getAddressSpace());
496       NGV->setExternallyInitialized(GV->isExternallyInitialized());
497       NGV->copyAttributesFrom(GV);
498       Globals.push_back(NGV);
499       NewGlobals.push_back(NGV);
500 
501       // Calculate the known alignment of the field.  If the original aggregate
502       // had 256 byte alignment for example, something might depend on that:
503       // propagate info to each field.
504       unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i);
505       if (NewAlign > EltAlign)
506         NGV->setAlignment(NewAlign);
507     }
508   }
509 
510   if (NewGlobals.empty())
511     return nullptr;
512 
513   DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n");
514 
515   Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext()));
516 
517   // Loop over all of the uses of the global, replacing the constantexpr geps,
518   // with smaller constantexpr geps or direct references.
519   while (!GV->use_empty()) {
520     User *GEP = GV->user_back();
521     assert(((isa<ConstantExpr>(GEP) &&
522              cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||
523             isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!");
524 
525     // Ignore the 1th operand, which has to be zero or else the program is quite
526     // broken (undefined).  Get the 2nd operand, which is the structure or array
527     // index.
528     unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
529     if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access.
530 
531     Value *NewPtr = NewGlobals[Val];
532     Type *NewTy = NewGlobals[Val]->getValueType();
533 
534     // Form a shorter GEP if needed.
535     if (GEP->getNumOperands() > 3) {
536       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) {
537         SmallVector<Constant*, 8> Idxs;
538         Idxs.push_back(NullInt);
539         for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i)
540           Idxs.push_back(CE->getOperand(i));
541         NewPtr =
542             ConstantExpr::getGetElementPtr(NewTy, cast<Constant>(NewPtr), Idxs);
543       } else {
544         GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP);
545         SmallVector<Value*, 8> Idxs;
546         Idxs.push_back(NullInt);
547         for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i)
548           Idxs.push_back(GEPI->getOperand(i));
549         NewPtr = GetElementPtrInst::Create(
550             NewTy, NewPtr, Idxs, GEPI->getName() + "." + Twine(Val), GEPI);
551       }
552     }
553     GEP->replaceAllUsesWith(NewPtr);
554 
555     if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP))
556       GEPI->eraseFromParent();
557     else
558       cast<ConstantExpr>(GEP)->destroyConstant();
559   }
560 
561   // Delete the old global, now that it is dead.
562   Globals.erase(GV);
563   ++NumSRA;
564 
565   // Loop over the new globals array deleting any globals that are obviously
566   // dead.  This can arise due to scalarization of a structure or an array that
567   // has elements that are dead.
568   unsigned FirstGlobal = 0;
569   for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i)
570     if (NewGlobals[i]->use_empty()) {
571       Globals.erase(NewGlobals[i]);
572       if (FirstGlobal == i) ++FirstGlobal;
573     }
574 
575   return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : nullptr;
576 }
577 
578 /// Return true if all users of the specified value will trap if the value is
579 /// dynamically null.  PHIs keeps track of any phi nodes we've seen to avoid
580 /// reprocessing them.
AllUsesOfValueWillTrapIfNull(const Value * V,SmallPtrSetImpl<const PHINode * > & PHIs)581 static bool AllUsesOfValueWillTrapIfNull(const Value *V,
582                                         SmallPtrSetImpl<const PHINode*> &PHIs) {
583   for (const User *U : V->users())
584     if (isa<LoadInst>(U)) {
585       // Will trap.
586     } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
587       if (SI->getOperand(0) == V) {
588         //cerr << "NONTRAPPING USE: " << *U;
589         return false;  // Storing the value.
590       }
591     } else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
592       if (CI->getCalledValue() != V) {
593         //cerr << "NONTRAPPING USE: " << *U;
594         return false;  // Not calling the ptr
595       }
596     } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) {
597       if (II->getCalledValue() != V) {
598         //cerr << "NONTRAPPING USE: " << *U;
599         return false;  // Not calling the ptr
600       }
601     } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) {
602       if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false;
603     } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
604       if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false;
605     } else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
606       // If we've already seen this phi node, ignore it, it has already been
607       // checked.
608       if (PHIs.insert(PN).second && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
609         return false;
610     } else if (isa<ICmpInst>(U) &&
611                isa<ConstantPointerNull>(U->getOperand(1))) {
612       // Ignore icmp X, null
613     } else {
614       //cerr << "NONTRAPPING USE: " << *U;
615       return false;
616     }
617 
618   return true;
619 }
620 
621 /// Return true if all uses of any loads from GV will trap if the loaded value
622 /// is null.  Note that this also permits comparisons of the loaded value
623 /// against null, as a special case.
AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable * GV)624 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
625   for (const User *U : GV->users())
626     if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
627       SmallPtrSet<const PHINode*, 8> PHIs;
628       if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
629         return false;
630     } else if (isa<StoreInst>(U)) {
631       // Ignore stores to the global.
632     } else {
633       // We don't know or understand this user, bail out.
634       //cerr << "UNKNOWN USER OF GLOBAL!: " << *U;
635       return false;
636     }
637   return true;
638 }
639 
OptimizeAwayTrappingUsesOfValue(Value * V,Constant * NewV)640 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
641   bool Changed = false;
642   for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) {
643     Instruction *I = cast<Instruction>(*UI++);
644     if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
645       LI->setOperand(0, NewV);
646       Changed = true;
647     } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
648       if (SI->getOperand(1) == V) {
649         SI->setOperand(1, NewV);
650         Changed = true;
651       }
652     } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
653       CallSite CS(I);
654       if (CS.getCalledValue() == V) {
655         // Calling through the pointer!  Turn into a direct call, but be careful
656         // that the pointer is not also being passed as an argument.
657         CS.setCalledFunction(NewV);
658         Changed = true;
659         bool PassedAsArg = false;
660         for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
661           if (CS.getArgument(i) == V) {
662             PassedAsArg = true;
663             CS.setArgument(i, NewV);
664           }
665 
666         if (PassedAsArg) {
667           // Being passed as an argument also.  Be careful to not invalidate UI!
668           UI = V->user_begin();
669         }
670       }
671     } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
672       Changed |= OptimizeAwayTrappingUsesOfValue(CI,
673                                 ConstantExpr::getCast(CI->getOpcode(),
674                                                       NewV, CI->getType()));
675       if (CI->use_empty()) {
676         Changed = true;
677         CI->eraseFromParent();
678       }
679     } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
680       // Should handle GEP here.
681       SmallVector<Constant*, 8> Idxs;
682       Idxs.reserve(GEPI->getNumOperands()-1);
683       for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end();
684            i != e; ++i)
685         if (Constant *C = dyn_cast<Constant>(*i))
686           Idxs.push_back(C);
687         else
688           break;
689       if (Idxs.size() == GEPI->getNumOperands()-1)
690         Changed |= OptimizeAwayTrappingUsesOfValue(
691             GEPI, ConstantExpr::getGetElementPtr(nullptr, NewV, Idxs));
692       if (GEPI->use_empty()) {
693         Changed = true;
694         GEPI->eraseFromParent();
695       }
696     }
697   }
698 
699   return Changed;
700 }
701 
702 
703 /// The specified global has only one non-null value stored into it.  If there
704 /// are uses of the loaded value that would trap if the loaded value is
705 /// dynamically null, then we know that they cannot be reachable with a null
706 /// optimize away the load.
OptimizeAwayTrappingUsesOfLoads(GlobalVariable * GV,Constant * LV,const DataLayout & DL,TargetLibraryInfo * TLI)707 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
708                                             const DataLayout &DL,
709                                             TargetLibraryInfo *TLI) {
710   bool Changed = false;
711 
712   // Keep track of whether we are able to remove all the uses of the global
713   // other than the store that defines it.
714   bool AllNonStoreUsesGone = true;
715 
716   // Replace all uses of loads with uses of uses of the stored value.
717   for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){
718     User *GlobalUser = *GUI++;
719     if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
720       Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV);
721       // If we were able to delete all uses of the loads
722       if (LI->use_empty()) {
723         LI->eraseFromParent();
724         Changed = true;
725       } else {
726         AllNonStoreUsesGone = false;
727       }
728     } else if (isa<StoreInst>(GlobalUser)) {
729       // Ignore the store that stores "LV" to the global.
730       assert(GlobalUser->getOperand(1) == GV &&
731              "Must be storing *to* the global");
732     } else {
733       AllNonStoreUsesGone = false;
734 
735       // If we get here we could have other crazy uses that are transitively
736       // loaded.
737       assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) ||
738               isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) ||
739               isa<BitCastInst>(GlobalUser) ||
740               isa<GetElementPtrInst>(GlobalUser)) &&
741              "Only expect load and stores!");
742     }
743   }
744 
745   if (Changed) {
746     DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV << "\n");
747     ++NumGlobUses;
748   }
749 
750   // If we nuked all of the loads, then none of the stores are needed either,
751   // nor is the global.
752   if (AllNonStoreUsesGone) {
753     if (isLeakCheckerRoot(GV)) {
754       Changed |= CleanupPointerRootUsers(GV, TLI);
755     } else {
756       Changed = true;
757       CleanupConstantGlobalUsers(GV, nullptr, DL, TLI);
758     }
759     if (GV->use_empty()) {
760       DEBUG(dbgs() << "  *** GLOBAL NOW DEAD!\n");
761       Changed = true;
762       GV->eraseFromParent();
763       ++NumDeleted;
764     }
765   }
766   return Changed;
767 }
768 
769 /// Walk the use list of V, constant folding all of the instructions that are
770 /// foldable.
ConstantPropUsersOf(Value * V,const DataLayout & DL,TargetLibraryInfo * TLI)771 static void ConstantPropUsersOf(Value *V, const DataLayout &DL,
772                                 TargetLibraryInfo *TLI) {
773   for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; )
774     if (Instruction *I = dyn_cast<Instruction>(*UI++))
775       if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
776         I->replaceAllUsesWith(NewC);
777 
778         // Advance UI to the next non-I use to avoid invalidating it!
779         // Instructions could multiply use V.
780         while (UI != E && *UI == I)
781           ++UI;
782         I->eraseFromParent();
783       }
784 }
785 
786 /// This function takes the specified global variable, and transforms the
787 /// program as if it always contained the result of the specified malloc.
788 /// Because it is always the result of the specified malloc, there is no reason
789 /// to actually DO the malloc.  Instead, turn the malloc into a global, and any
790 /// loads of GV as uses of the new global.
791 static GlobalVariable *
OptimizeGlobalAddressOfMalloc(GlobalVariable * GV,CallInst * CI,Type * AllocTy,ConstantInt * NElements,const DataLayout & DL,TargetLibraryInfo * TLI)792 OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
793                               ConstantInt *NElements, const DataLayout &DL,
794                               TargetLibraryInfo *TLI) {
795   DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << "  CALL = " << *CI << '\n');
796 
797   Type *GlobalType;
798   if (NElements->getZExtValue() == 1)
799     GlobalType = AllocTy;
800   else
801     // If we have an array allocation, the global variable is of an array.
802     GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue());
803 
804   // Create the new global variable.  The contents of the malloc'd memory is
805   // undefined, so initialize with an undef value.
806   GlobalVariable *NewGV = new GlobalVariable(
807       *GV->getParent(), GlobalType, false, GlobalValue::InternalLinkage,
808       UndefValue::get(GlobalType), GV->getName() + ".body", nullptr,
809       GV->getThreadLocalMode());
810 
811   // If there are bitcast users of the malloc (which is typical, usually we have
812   // a malloc + bitcast) then replace them with uses of the new global.  Update
813   // other users to use the global as well.
814   BitCastInst *TheBC = nullptr;
815   while (!CI->use_empty()) {
816     Instruction *User = cast<Instruction>(CI->user_back());
817     if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
818       if (BCI->getType() == NewGV->getType()) {
819         BCI->replaceAllUsesWith(NewGV);
820         BCI->eraseFromParent();
821       } else {
822         BCI->setOperand(0, NewGV);
823       }
824     } else {
825       if (!TheBC)
826         TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI);
827       User->replaceUsesOfWith(CI, TheBC);
828     }
829   }
830 
831   Constant *RepValue = NewGV;
832   if (NewGV->getType() != GV->getValueType())
833     RepValue = ConstantExpr::getBitCast(RepValue, GV->getValueType());
834 
835   // If there is a comparison against null, we will insert a global bool to
836   // keep track of whether the global was initialized yet or not.
837   GlobalVariable *InitBool =
838     new GlobalVariable(Type::getInt1Ty(GV->getContext()), false,
839                        GlobalValue::InternalLinkage,
840                        ConstantInt::getFalse(GV->getContext()),
841                        GV->getName()+".init", GV->getThreadLocalMode());
842   bool InitBoolUsed = false;
843 
844   // Loop over all uses of GV, processing them in turn.
845   while (!GV->use_empty()) {
846     if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) {
847       // The global is initialized when the store to it occurs.
848       new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0,
849                     SI->getOrdering(), SI->getSynchScope(), SI);
850       SI->eraseFromParent();
851       continue;
852     }
853 
854     LoadInst *LI = cast<LoadInst>(GV->user_back());
855     while (!LI->use_empty()) {
856       Use &LoadUse = *LI->use_begin();
857       ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser());
858       if (!ICI) {
859         LoadUse = RepValue;
860         continue;
861       }
862 
863       // Replace the cmp X, 0 with a use of the bool value.
864       // Sink the load to where the compare was, if atomic rules allow us to.
865       Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0,
866                                LI->getOrdering(), LI->getSynchScope(),
867                                LI->isUnordered() ? (Instruction*)ICI : LI);
868       InitBoolUsed = true;
869       switch (ICI->getPredicate()) {
870       default: llvm_unreachable("Unknown ICmp Predicate!");
871       case ICmpInst::ICMP_ULT:
872       case ICmpInst::ICMP_SLT:   // X < null -> always false
873         LV = ConstantInt::getFalse(GV->getContext());
874         break;
875       case ICmpInst::ICMP_ULE:
876       case ICmpInst::ICMP_SLE:
877       case ICmpInst::ICMP_EQ:
878         LV = BinaryOperator::CreateNot(LV, "notinit", ICI);
879         break;
880       case ICmpInst::ICMP_NE:
881       case ICmpInst::ICMP_UGE:
882       case ICmpInst::ICMP_SGE:
883       case ICmpInst::ICMP_UGT:
884       case ICmpInst::ICMP_SGT:
885         break;  // no change.
886       }
887       ICI->replaceAllUsesWith(LV);
888       ICI->eraseFromParent();
889     }
890     LI->eraseFromParent();
891   }
892 
893   // If the initialization boolean was used, insert it, otherwise delete it.
894   if (!InitBoolUsed) {
895     while (!InitBool->use_empty())  // Delete initializations
896       cast<StoreInst>(InitBool->user_back())->eraseFromParent();
897     delete InitBool;
898   } else
899     GV->getParent()->getGlobalList().insert(GV->getIterator(), InitBool);
900 
901   // Now the GV is dead, nuke it and the malloc..
902   GV->eraseFromParent();
903   CI->eraseFromParent();
904 
905   // To further other optimizations, loop over all users of NewGV and try to
906   // constant prop them.  This will promote GEP instructions with constant
907   // indices into GEP constant-exprs, which will allow global-opt to hack on it.
908   ConstantPropUsersOf(NewGV, DL, TLI);
909   if (RepValue != NewGV)
910     ConstantPropUsersOf(RepValue, DL, TLI);
911 
912   return NewGV;
913 }
914 
915 /// Scan the use-list of V checking to make sure that there are no complex uses
916 /// of V.  We permit simple things like dereferencing the pointer, but not
917 /// storing through the address, unless it is to the specified global.
ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction * V,const GlobalVariable * GV,SmallPtrSetImpl<const PHINode * > & PHIs)918 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
919                                                       const GlobalVariable *GV,
920                                         SmallPtrSetImpl<const PHINode*> &PHIs) {
921   for (const User *U : V->users()) {
922     const Instruction *Inst = cast<Instruction>(U);
923 
924     if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
925       continue; // Fine, ignore.
926     }
927 
928     if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
929       if (SI->getOperand(0) == V && SI->getOperand(1) != GV)
930         return false;  // Storing the pointer itself... bad.
931       continue; // Otherwise, storing through it, or storing into GV... fine.
932     }
933 
934     // Must index into the array and into the struct.
935     if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) {
936       if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs))
937         return false;
938       continue;
939     }
940 
941     if (const PHINode *PN = dyn_cast<PHINode>(Inst)) {
942       // PHIs are ok if all uses are ok.  Don't infinitely recurse through PHI
943       // cycles.
944       if (PHIs.insert(PN).second)
945         if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs))
946           return false;
947       continue;
948     }
949 
950     if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) {
951       if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs))
952         return false;
953       continue;
954     }
955 
956     return false;
957   }
958   return true;
959 }
960 
961 /// The Alloc pointer is stored into GV somewhere.  Transform all uses of the
962 /// allocation into loads from the global and uses of the resultant pointer.
963 /// Further, delete the store into GV.  This assumes that these value pass the
964 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate.
ReplaceUsesOfMallocWithGlobal(Instruction * Alloc,GlobalVariable * GV)965 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
966                                           GlobalVariable *GV) {
967   while (!Alloc->use_empty()) {
968     Instruction *U = cast<Instruction>(*Alloc->user_begin());
969     Instruction *InsertPt = U;
970     if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
971       // If this is the store of the allocation into the global, remove it.
972       if (SI->getOperand(1) == GV) {
973         SI->eraseFromParent();
974         continue;
975       }
976     } else if (PHINode *PN = dyn_cast<PHINode>(U)) {
977       // Insert the load in the corresponding predecessor, not right before the
978       // PHI.
979       InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator();
980     } else if (isa<BitCastInst>(U)) {
981       // Must be bitcast between the malloc and store to initialize the global.
982       ReplaceUsesOfMallocWithGlobal(U, GV);
983       U->eraseFromParent();
984       continue;
985     } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
986       // If this is a "GEP bitcast" and the user is a store to the global, then
987       // just process it as a bitcast.
988       if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse())
989         if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back()))
990           if (SI->getOperand(1) == GV) {
991             // Must be bitcast GEP between the malloc and store to initialize
992             // the global.
993             ReplaceUsesOfMallocWithGlobal(GEPI, GV);
994             GEPI->eraseFromParent();
995             continue;
996           }
997     }
998 
999     // Insert a load from the global, and use it instead of the malloc.
1000     Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt);
1001     U->replaceUsesOfWith(Alloc, NL);
1002   }
1003 }
1004 
1005 /// Verify that all uses of V (a load, or a phi of a load) are simple enough to
1006 /// perform heap SRA on.  This permits GEP's that index through the array and
1007 /// struct field, icmps of null, and PHIs.
LoadUsesSimpleEnoughForHeapSRA(const Value * V,SmallPtrSetImpl<const PHINode * > & LoadUsingPHIs,SmallPtrSetImpl<const PHINode * > & LoadUsingPHIsPerLoad)1008 static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
1009                         SmallPtrSetImpl<const PHINode*> &LoadUsingPHIs,
1010                         SmallPtrSetImpl<const PHINode*> &LoadUsingPHIsPerLoad) {
1011   // We permit two users of the load: setcc comparing against the null
1012   // pointer, and a getelementptr of a specific form.
1013   for (const User *U : V->users()) {
1014     const Instruction *UI = cast<Instruction>(U);
1015 
1016     // Comparison against null is ok.
1017     if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) {
1018       if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
1019         return false;
1020       continue;
1021     }
1022 
1023     // getelementptr is also ok, but only a simple form.
1024     if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) {
1025       // Must index into the array and into the struct.
1026       if (GEPI->getNumOperands() < 3)
1027         return false;
1028 
1029       // Otherwise the GEP is ok.
1030       continue;
1031     }
1032 
1033     if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1034       if (!LoadUsingPHIsPerLoad.insert(PN).second)
1035         // This means some phi nodes are dependent on each other.
1036         // Avoid infinite looping!
1037         return false;
1038       if (!LoadUsingPHIs.insert(PN).second)
1039         // If we have already analyzed this PHI, then it is safe.
1040         continue;
1041 
1042       // Make sure all uses of the PHI are simple enough to transform.
1043       if (!LoadUsesSimpleEnoughForHeapSRA(PN,
1044                                           LoadUsingPHIs, LoadUsingPHIsPerLoad))
1045         return false;
1046 
1047       continue;
1048     }
1049 
1050     // Otherwise we don't know what this is, not ok.
1051     return false;
1052   }
1053 
1054   return true;
1055 }
1056 
1057 
1058 /// If all users of values loaded from GV are simple enough to perform HeapSRA,
1059 /// return true.
AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable * GV,Instruction * StoredVal)1060 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
1061                                                     Instruction *StoredVal) {
1062   SmallPtrSet<const PHINode*, 32> LoadUsingPHIs;
1063   SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad;
1064   for (const User *U : GV->users())
1065     if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
1066       if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
1067                                           LoadUsingPHIsPerLoad))
1068         return false;
1069       LoadUsingPHIsPerLoad.clear();
1070     }
1071 
1072   // If we reach here, we know that all uses of the loads and transitive uses
1073   // (through PHI nodes) are simple enough to transform.  However, we don't know
1074   // that all inputs the to the PHI nodes are in the same equivalence sets.
1075   // Check to verify that all operands of the PHIs are either PHIS that can be
1076   // transformed, loads from GV, or MI itself.
1077   for (const PHINode *PN : LoadUsingPHIs) {
1078     for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) {
1079       Value *InVal = PN->getIncomingValue(op);
1080 
1081       // PHI of the stored value itself is ok.
1082       if (InVal == StoredVal) continue;
1083 
1084       if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) {
1085         // One of the PHIs in our set is (optimistically) ok.
1086         if (LoadUsingPHIs.count(InPN))
1087           continue;
1088         return false;
1089       }
1090 
1091       // Load from GV is ok.
1092       if (const LoadInst *LI = dyn_cast<LoadInst>(InVal))
1093         if (LI->getOperand(0) == GV)
1094           continue;
1095 
1096       // UNDEF? NULL?
1097 
1098       // Anything else is rejected.
1099       return false;
1100     }
1101   }
1102 
1103   return true;
1104 }
1105 
GetHeapSROAValue(Value * V,unsigned FieldNo,DenseMap<Value *,std::vector<Value * >> & InsertedScalarizedValues,std::vector<std::pair<PHINode *,unsigned>> & PHIsToRewrite)1106 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
1107                DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1108                    std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1109   std::vector<Value*> &FieldVals = InsertedScalarizedValues[V];
1110 
1111   if (FieldNo >= FieldVals.size())
1112     FieldVals.resize(FieldNo+1);
1113 
1114   // If we already have this value, just reuse the previously scalarized
1115   // version.
1116   if (Value *FieldVal = FieldVals[FieldNo])
1117     return FieldVal;
1118 
1119   // Depending on what instruction this is, we have several cases.
1120   Value *Result;
1121   if (LoadInst *LI = dyn_cast<LoadInst>(V)) {
1122     // This is a scalarized version of the load from the global.  Just create
1123     // a new Load of the scalarized global.
1124     Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo,
1125                                            InsertedScalarizedValues,
1126                                            PHIsToRewrite),
1127                           LI->getName()+".f"+Twine(FieldNo), LI);
1128   } else {
1129     PHINode *PN = cast<PHINode>(V);
1130     // PN's type is pointer to struct.  Make a new PHI of pointer to struct
1131     // field.
1132 
1133     PointerType *PTy = cast<PointerType>(PN->getType());
1134     StructType *ST = cast<StructType>(PTy->getElementType());
1135 
1136     unsigned AS = PTy->getAddressSpace();
1137     PHINode *NewPN =
1138       PHINode::Create(PointerType::get(ST->getElementType(FieldNo), AS),
1139                      PN->getNumIncomingValues(),
1140                      PN->getName()+".f"+Twine(FieldNo), PN);
1141     Result = NewPN;
1142     PHIsToRewrite.push_back(std::make_pair(PN, FieldNo));
1143   }
1144 
1145   return FieldVals[FieldNo] = Result;
1146 }
1147 
1148 /// Given a load instruction and a value derived from the load, rewrite the
1149 /// derived value to use the HeapSRoA'd load.
RewriteHeapSROALoadUser(Instruction * LoadUser,DenseMap<Value *,std::vector<Value * >> & InsertedScalarizedValues,std::vector<std::pair<PHINode *,unsigned>> & PHIsToRewrite)1150 static void RewriteHeapSROALoadUser(Instruction *LoadUser,
1151              DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1152                    std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1153   // If this is a comparison against null, handle it.
1154   if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) {
1155     assert(isa<ConstantPointerNull>(SCI->getOperand(1)));
1156     // If we have a setcc of the loaded pointer, we can use a setcc of any
1157     // field.
1158     Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0,
1159                                    InsertedScalarizedValues, PHIsToRewrite);
1160 
1161     Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr,
1162                               Constant::getNullValue(NPtr->getType()),
1163                               SCI->getName());
1164     SCI->replaceAllUsesWith(New);
1165     SCI->eraseFromParent();
1166     return;
1167   }
1168 
1169   // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...'
1170   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) {
1171     assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2))
1172            && "Unexpected GEPI!");
1173 
1174     // Load the pointer for this field.
1175     unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
1176     Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo,
1177                                      InsertedScalarizedValues, PHIsToRewrite);
1178 
1179     // Create the new GEP idx vector.
1180     SmallVector<Value*, 8> GEPIdx;
1181     GEPIdx.push_back(GEPI->getOperand(1));
1182     GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end());
1183 
1184     Value *NGEPI = GetElementPtrInst::Create(GEPI->getResultElementType(), NewPtr, GEPIdx,
1185                                              GEPI->getName(), GEPI);
1186     GEPI->replaceAllUsesWith(NGEPI);
1187     GEPI->eraseFromParent();
1188     return;
1189   }
1190 
1191   // Recursively transform the users of PHI nodes.  This will lazily create the
1192   // PHIs that are needed for individual elements.  Keep track of what PHIs we
1193   // see in InsertedScalarizedValues so that we don't get infinite loops (very
1194   // antisocial).  If the PHI is already in InsertedScalarizedValues, it has
1195   // already been seen first by another load, so its uses have already been
1196   // processed.
1197   PHINode *PN = cast<PHINode>(LoadUser);
1198   if (!InsertedScalarizedValues.insert(std::make_pair(PN,
1199                                               std::vector<Value*>())).second)
1200     return;
1201 
1202   // If this is the first time we've seen this PHI, recursively process all
1203   // users.
1204   for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
1205     Instruction *User = cast<Instruction>(*UI++);
1206     RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1207   }
1208 }
1209 
1210 /// We are performing Heap SRoA on a global.  Ptr is a value loaded from the
1211 /// global.  Eliminate all uses of Ptr, making them use FieldGlobals instead.
1212 /// All uses of loaded values satisfy AllGlobalLoadUsesSimpleEnoughForHeapSRA.
RewriteUsesOfLoadForHeapSRoA(LoadInst * Load,DenseMap<Value *,std::vector<Value * >> & InsertedScalarizedValues,std::vector<std::pair<PHINode *,unsigned>> & PHIsToRewrite)1213 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
1214                DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1215                    std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1216   for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) {
1217     Instruction *User = cast<Instruction>(*UI++);
1218     RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1219   }
1220 
1221   if (Load->use_empty()) {
1222     Load->eraseFromParent();
1223     InsertedScalarizedValues.erase(Load);
1224   }
1225 }
1226 
1227 /// CI is an allocation of an array of structures.  Break it up into multiple
1228 /// allocations of arrays of the fields.
PerformHeapAllocSRoA(GlobalVariable * GV,CallInst * CI,Value * NElems,const DataLayout & DL,const TargetLibraryInfo * TLI)1229 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
1230                                             Value *NElems, const DataLayout &DL,
1231                                             const TargetLibraryInfo *TLI) {
1232   DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << "  MALLOC = " << *CI << '\n');
1233   Type *MAT = getMallocAllocatedType(CI, TLI);
1234   StructType *STy = cast<StructType>(MAT);
1235 
1236   // There is guaranteed to be at least one use of the malloc (storing
1237   // it into GV).  If there are other uses, change them to be uses of
1238   // the global to simplify later code.  This also deletes the store
1239   // into GV.
1240   ReplaceUsesOfMallocWithGlobal(CI, GV);
1241 
1242   // Okay, at this point, there are no users of the malloc.  Insert N
1243   // new mallocs at the same place as CI, and N globals.
1244   std::vector<Value*> FieldGlobals;
1245   std::vector<Value*> FieldMallocs;
1246 
1247   SmallVector<OperandBundleDef, 1> OpBundles;
1248   CI->getOperandBundlesAsDefs(OpBundles);
1249 
1250   unsigned AS = GV->getType()->getPointerAddressSpace();
1251   for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
1252     Type *FieldTy = STy->getElementType(FieldNo);
1253     PointerType *PFieldTy = PointerType::get(FieldTy, AS);
1254 
1255     GlobalVariable *NGV = new GlobalVariable(
1256         *GV->getParent(), PFieldTy, false, GlobalValue::InternalLinkage,
1257         Constant::getNullValue(PFieldTy), GV->getName() + ".f" + Twine(FieldNo),
1258         nullptr, GV->getThreadLocalMode());
1259     NGV->copyAttributesFrom(GV);
1260     FieldGlobals.push_back(NGV);
1261 
1262     unsigned TypeSize = DL.getTypeAllocSize(FieldTy);
1263     if (StructType *ST = dyn_cast<StructType>(FieldTy))
1264       TypeSize = DL.getStructLayout(ST)->getSizeInBytes();
1265     Type *IntPtrTy = DL.getIntPtrType(CI->getType());
1266     Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
1267                                         ConstantInt::get(IntPtrTy, TypeSize),
1268                                         NElems, OpBundles, nullptr,
1269                                         CI->getName() + ".f" + Twine(FieldNo));
1270     FieldMallocs.push_back(NMI);
1271     new StoreInst(NMI, NGV, CI);
1272   }
1273 
1274   // The tricky aspect of this transformation is handling the case when malloc
1275   // fails.  In the original code, malloc failing would set the result pointer
1276   // of malloc to null.  In this case, some mallocs could succeed and others
1277   // could fail.  As such, we emit code that looks like this:
1278   //    F0 = malloc(field0)
1279   //    F1 = malloc(field1)
1280   //    F2 = malloc(field2)
1281   //    if (F0 == 0 || F1 == 0 || F2 == 0) {
1282   //      if (F0) { free(F0); F0 = 0; }
1283   //      if (F1) { free(F1); F1 = 0; }
1284   //      if (F2) { free(F2); F2 = 0; }
1285   //    }
1286   // The malloc can also fail if its argument is too large.
1287   Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0);
1288   Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0),
1289                                   ConstantZero, "isneg");
1290   for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
1291     Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i],
1292                              Constant::getNullValue(FieldMallocs[i]->getType()),
1293                                "isnull");
1294     RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI);
1295   }
1296 
1297   // Split the basic block at the old malloc.
1298   BasicBlock *OrigBB = CI->getParent();
1299   BasicBlock *ContBB =
1300       OrigBB->splitBasicBlock(CI->getIterator(), "malloc_cont");
1301 
1302   // Create the block to check the first condition.  Put all these blocks at the
1303   // end of the function as they are unlikely to be executed.
1304   BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(),
1305                                                 "malloc_ret_null",
1306                                                 OrigBB->getParent());
1307 
1308   // Remove the uncond branch from OrigBB to ContBB, turning it into a cond
1309   // branch on RunningOr.
1310   OrigBB->getTerminator()->eraseFromParent();
1311   BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB);
1312 
1313   // Within the NullPtrBlock, we need to emit a comparison and branch for each
1314   // pointer, because some may be null while others are not.
1315   for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1316     Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock);
1317     Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
1318                               Constant::getNullValue(GVVal->getType()));
1319     BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it",
1320                                                OrigBB->getParent());
1321     BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next",
1322                                                OrigBB->getParent());
1323     Instruction *BI = BranchInst::Create(FreeBlock, NextBlock,
1324                                          Cmp, NullPtrBlock);
1325 
1326     // Fill in FreeBlock.
1327     CallInst::CreateFree(GVVal, OpBundles, BI);
1328     new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i],
1329                   FreeBlock);
1330     BranchInst::Create(NextBlock, FreeBlock);
1331 
1332     NullPtrBlock = NextBlock;
1333   }
1334 
1335   BranchInst::Create(ContBB, NullPtrBlock);
1336 
1337   // CI is no longer needed, remove it.
1338   CI->eraseFromParent();
1339 
1340   /// As we process loads, if we can't immediately update all uses of the load,
1341   /// keep track of what scalarized loads are inserted for a given load.
1342   DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues;
1343   InsertedScalarizedValues[GV] = FieldGlobals;
1344 
1345   std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite;
1346 
1347   // Okay, the malloc site is completely handled.  All of the uses of GV are now
1348   // loads, and all uses of those loads are simple.  Rewrite them to use loads
1349   // of the per-field globals instead.
1350   for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) {
1351     Instruction *User = cast<Instruction>(*UI++);
1352 
1353     if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1354       RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite);
1355       continue;
1356     }
1357 
1358     // Must be a store of null.
1359     StoreInst *SI = cast<StoreInst>(User);
1360     assert(isa<ConstantPointerNull>(SI->getOperand(0)) &&
1361            "Unexpected heap-sra user!");
1362 
1363     // Insert a store of null into each global.
1364     for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1365       Type *ValTy = cast<GlobalValue>(FieldGlobals[i])->getValueType();
1366       Constant *Null = Constant::getNullValue(ValTy);
1367       new StoreInst(Null, FieldGlobals[i], SI);
1368     }
1369     // Erase the original store.
1370     SI->eraseFromParent();
1371   }
1372 
1373   // While we have PHIs that are interesting to rewrite, do it.
1374   while (!PHIsToRewrite.empty()) {
1375     PHINode *PN = PHIsToRewrite.back().first;
1376     unsigned FieldNo = PHIsToRewrite.back().second;
1377     PHIsToRewrite.pop_back();
1378     PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]);
1379     assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi");
1380 
1381     // Add all the incoming values.  This can materialize more phis.
1382     for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1383       Value *InVal = PN->getIncomingValue(i);
1384       InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues,
1385                                PHIsToRewrite);
1386       FieldPN->addIncoming(InVal, PN->getIncomingBlock(i));
1387     }
1388   }
1389 
1390   // Drop all inter-phi links and any loads that made it this far.
1391   for (DenseMap<Value*, std::vector<Value*> >::iterator
1392        I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1393        I != E; ++I) {
1394     if (PHINode *PN = dyn_cast<PHINode>(I->first))
1395       PN->dropAllReferences();
1396     else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1397       LI->dropAllReferences();
1398   }
1399 
1400   // Delete all the phis and loads now that inter-references are dead.
1401   for (DenseMap<Value*, std::vector<Value*> >::iterator
1402        I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1403        I != E; ++I) {
1404     if (PHINode *PN = dyn_cast<PHINode>(I->first))
1405       PN->eraseFromParent();
1406     else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1407       LI->eraseFromParent();
1408   }
1409 
1410   // The old global is now dead, remove it.
1411   GV->eraseFromParent();
1412 
1413   ++NumHeapSRA;
1414   return cast<GlobalVariable>(FieldGlobals[0]);
1415 }
1416 
1417 /// This function is called when we see a pointer global variable with a single
1418 /// value stored it that is a malloc or cast of malloc.
tryToOptimizeStoreOfMallocToGlobal(GlobalVariable * GV,CallInst * CI,Type * AllocTy,AtomicOrdering Ordering,const DataLayout & DL,TargetLibraryInfo * TLI)1419 static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
1420                                                Type *AllocTy,
1421                                                AtomicOrdering Ordering,
1422                                                const DataLayout &DL,
1423                                                TargetLibraryInfo *TLI) {
1424   // If this is a malloc of an abstract type, don't touch it.
1425   if (!AllocTy->isSized())
1426     return false;
1427 
1428   // We can't optimize this global unless all uses of it are *known* to be
1429   // of the malloc value, not of the null initializer value (consider a use
1430   // that compares the global's value against zero to see if the malloc has
1431   // been reached).  To do this, we check to see if all uses of the global
1432   // would trap if the global were null: this proves that they must all
1433   // happen after the malloc.
1434   if (!AllUsesOfLoadedValueWillTrapIfNull(GV))
1435     return false;
1436 
1437   // We can't optimize this if the malloc itself is used in a complex way,
1438   // for example, being stored into multiple globals.  This allows the
1439   // malloc to be stored into the specified global, loaded icmp'd, and
1440   // GEP'd.  These are all things we could transform to using the global
1441   // for.
1442   SmallPtrSet<const PHINode*, 8> PHIs;
1443   if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs))
1444     return false;
1445 
1446   // If we have a global that is only initialized with a fixed size malloc,
1447   // transform the program to use global memory instead of malloc'd memory.
1448   // This eliminates dynamic allocation, avoids an indirection accessing the
1449   // data, and exposes the resultant global to further GlobalOpt.
1450   // We cannot optimize the malloc if we cannot determine malloc array size.
1451   Value *NElems = getMallocArraySize(CI, DL, TLI, true);
1452   if (!NElems)
1453     return false;
1454 
1455   if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
1456     // Restrict this transformation to only working on small allocations
1457     // (2048 bytes currently), as we don't want to introduce a 16M global or
1458     // something.
1459     if (NElements->getZExtValue() * DL.getTypeAllocSize(AllocTy) < 2048) {
1460       OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI);
1461       return true;
1462     }
1463 
1464   // If the allocation is an array of structures, consider transforming this
1465   // into multiple malloc'd arrays, one for each field.  This is basically
1466   // SRoA for malloc'd memory.
1467 
1468   if (Ordering != AtomicOrdering::NotAtomic)
1469     return false;
1470 
1471   // If this is an allocation of a fixed size array of structs, analyze as a
1472   // variable size array.  malloc [100 x struct],1 -> malloc struct, 100
1473   if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
1474     if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
1475       AllocTy = AT->getElementType();
1476 
1477   StructType *AllocSTy = dyn_cast<StructType>(AllocTy);
1478   if (!AllocSTy)
1479     return false;
1480 
1481   // This the structure has an unreasonable number of fields, leave it
1482   // alone.
1483   if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 &&
1484       AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) {
1485 
1486     // If this is a fixed size array, transform the Malloc to be an alloc of
1487     // structs.  malloc [100 x struct],1 -> malloc struct, 100
1488     if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
1489       Type *IntPtrTy = DL.getIntPtrType(CI->getType());
1490       unsigned TypeSize = DL.getStructLayout(AllocSTy)->getSizeInBytes();
1491       Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
1492       Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
1493       SmallVector<OperandBundleDef, 1> OpBundles;
1494       CI->getOperandBundlesAsDefs(OpBundles);
1495       Instruction *Malloc =
1496           CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, AllocSize, NumElements,
1497                                  OpBundles, nullptr, CI->getName());
1498       Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
1499       CI->replaceAllUsesWith(Cast);
1500       CI->eraseFromParent();
1501       if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc))
1502         CI = cast<CallInst>(BCI->getOperand(0));
1503       else
1504         CI = cast<CallInst>(Malloc);
1505     }
1506 
1507     PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true), DL,
1508                          TLI);
1509     return true;
1510   }
1511 
1512   return false;
1513 }
1514 
1515 // Try to optimize globals based on the knowledge that only one value (besides
1516 // its initializer) is ever stored to the global.
optimizeOnceStoredGlobal(GlobalVariable * GV,Value * StoredOnceVal,AtomicOrdering Ordering,const DataLayout & DL,TargetLibraryInfo * TLI)1517 static bool optimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
1518                                      AtomicOrdering Ordering,
1519                                      const DataLayout &DL,
1520                                      TargetLibraryInfo *TLI) {
1521   // Ignore no-op GEPs and bitcasts.
1522   StoredOnceVal = StoredOnceVal->stripPointerCasts();
1523 
1524   // If we are dealing with a pointer global that is initialized to null and
1525   // only has one (non-null) value stored into it, then we can optimize any
1526   // users of the loaded value (often calls and loads) that would trap if the
1527   // value was null.
1528   if (GV->getInitializer()->getType()->isPointerTy() &&
1529       GV->getInitializer()->isNullValue()) {
1530     if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
1531       if (GV->getInitializer()->getType() != SOVC->getType())
1532         SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
1533 
1534       // Optimize away any trapping uses of the loaded value.
1535       if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI))
1536         return true;
1537     } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) {
1538       Type *MallocType = getMallocAllocatedType(CI, TLI);
1539       if (MallocType && tryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType,
1540                                                            Ordering, DL, TLI))
1541         return true;
1542     }
1543   }
1544 
1545   return false;
1546 }
1547 
1548 /// At this point, we have learned that the only two values ever stored into GV
1549 /// are its initializer and OtherVal.  See if we can shrink the global into a
1550 /// boolean and select between the two values whenever it is used.  This exposes
1551 /// the values to other scalar optimizations.
TryToShrinkGlobalToBoolean(GlobalVariable * GV,Constant * OtherVal)1552 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
1553   Type *GVElType = GV->getValueType();
1554 
1555   // If GVElType is already i1, it is already shrunk.  If the type of the GV is
1556   // an FP value, pointer or vector, don't do this optimization because a select
1557   // between them is very expensive and unlikely to lead to later
1558   // simplification.  In these cases, we typically end up with "cond ? v1 : v2"
1559   // where v1 and v2 both require constant pool loads, a big loss.
1560   if (GVElType == Type::getInt1Ty(GV->getContext()) ||
1561       GVElType->isFloatingPointTy() ||
1562       GVElType->isPointerTy() || GVElType->isVectorTy())
1563     return false;
1564 
1565   // Walk the use list of the global seeing if all the uses are load or store.
1566   // If there is anything else, bail out.
1567   for (User *U : GV->users())
1568     if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
1569       return false;
1570 
1571   DEBUG(dbgs() << "   *** SHRINKING TO BOOL: " << *GV << "\n");
1572 
1573   // Create the new global, initializing it to false.
1574   GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()),
1575                                              false,
1576                                              GlobalValue::InternalLinkage,
1577                                         ConstantInt::getFalse(GV->getContext()),
1578                                              GV->getName()+".b",
1579                                              GV->getThreadLocalMode(),
1580                                              GV->getType()->getAddressSpace());
1581   NewGV->copyAttributesFrom(GV);
1582   GV->getParent()->getGlobalList().insert(GV->getIterator(), NewGV);
1583 
1584   Constant *InitVal = GV->getInitializer();
1585   assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) &&
1586          "No reason to shrink to bool!");
1587 
1588   // If initialized to zero and storing one into the global, we can use a cast
1589   // instead of a select to synthesize the desired value.
1590   bool IsOneZero = false;
1591   if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal))
1592     IsOneZero = InitVal->isNullValue() && CI->isOne();
1593 
1594   while (!GV->use_empty()) {
1595     Instruction *UI = cast<Instruction>(GV->user_back());
1596     if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
1597       // Change the store into a boolean store.
1598       bool StoringOther = SI->getOperand(0) == OtherVal;
1599       // Only do this if we weren't storing a loaded value.
1600       Value *StoreVal;
1601       if (StoringOther || SI->getOperand(0) == InitVal) {
1602         StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()),
1603                                     StoringOther);
1604       } else {
1605         // Otherwise, we are storing a previously loaded copy.  To do this,
1606         // change the copy from copying the original value to just copying the
1607         // bool.
1608         Instruction *StoredVal = cast<Instruction>(SI->getOperand(0));
1609 
1610         // If we've already replaced the input, StoredVal will be a cast or
1611         // select instruction.  If not, it will be a load of the original
1612         // global.
1613         if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
1614           assert(LI->getOperand(0) == GV && "Not a copy!");
1615           // Insert a new load, to preserve the saved value.
1616           StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0,
1617                                   LI->getOrdering(), LI->getSynchScope(), LI);
1618         } else {
1619           assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
1620                  "This is not a form that we understand!");
1621           StoreVal = StoredVal->getOperand(0);
1622           assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!");
1623         }
1624       }
1625       new StoreInst(StoreVal, NewGV, false, 0,
1626                     SI->getOrdering(), SI->getSynchScope(), SI);
1627     } else {
1628       // Change the load into a load of bool then a select.
1629       LoadInst *LI = cast<LoadInst>(UI);
1630       LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0,
1631                                    LI->getOrdering(), LI->getSynchScope(), LI);
1632       Value *NSI;
1633       if (IsOneZero)
1634         NSI = new ZExtInst(NLI, LI->getType(), "", LI);
1635       else
1636         NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI);
1637       NSI->takeName(LI);
1638       LI->replaceAllUsesWith(NSI);
1639     }
1640     UI->eraseFromParent();
1641   }
1642 
1643   // Retain the name of the old global variable. People who are debugging their
1644   // programs may expect these variables to be named the same.
1645   NewGV->takeName(GV);
1646   GV->eraseFromParent();
1647   return true;
1648 }
1649 
deleteIfDead(GlobalValue & GV,SmallSet<const Comdat *,8> & NotDiscardableComdats)1650 static bool deleteIfDead(GlobalValue &GV,
1651                          SmallSet<const Comdat *, 8> &NotDiscardableComdats) {
1652   GV.removeDeadConstantUsers();
1653 
1654   if (!GV.isDiscardableIfUnused())
1655     return false;
1656 
1657   if (const Comdat *C = GV.getComdat())
1658     if (!GV.hasLocalLinkage() && NotDiscardableComdats.count(C))
1659       return false;
1660 
1661   bool Dead;
1662   if (auto *F = dyn_cast<Function>(&GV))
1663     Dead = F->isDefTriviallyDead();
1664   else
1665     Dead = GV.use_empty();
1666   if (!Dead)
1667     return false;
1668 
1669   DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n");
1670   GV.eraseFromParent();
1671   ++NumDeleted;
1672   return true;
1673 }
1674 
isPointerValueDeadOnEntryToFunction(const Function * F,GlobalValue * GV,function_ref<DominatorTree & (Function &)> LookupDomTree)1675 static bool isPointerValueDeadOnEntryToFunction(
1676     const Function *F, GlobalValue *GV,
1677     function_ref<DominatorTree &(Function &)> LookupDomTree) {
1678   // Find all uses of GV. We expect them all to be in F, and if we can't
1679   // identify any of the uses we bail out.
1680   //
1681   // On each of these uses, identify if the memory that GV points to is
1682   // used/required/live at the start of the function. If it is not, for example
1683   // if the first thing the function does is store to the GV, the GV can
1684   // possibly be demoted.
1685   //
1686   // We don't do an exhaustive search for memory operations - simply look
1687   // through bitcasts as they're quite common and benign.
1688   const DataLayout &DL = GV->getParent()->getDataLayout();
1689   SmallVector<LoadInst *, 4> Loads;
1690   SmallVector<StoreInst *, 4> Stores;
1691   for (auto *U : GV->users()) {
1692     if (Operator::getOpcode(U) == Instruction::BitCast) {
1693       for (auto *UU : U->users()) {
1694         if (auto *LI = dyn_cast<LoadInst>(UU))
1695           Loads.push_back(LI);
1696         else if (auto *SI = dyn_cast<StoreInst>(UU))
1697           Stores.push_back(SI);
1698         else
1699           return false;
1700       }
1701       continue;
1702     }
1703 
1704     Instruction *I = dyn_cast<Instruction>(U);
1705     if (!I)
1706       return false;
1707     assert(I->getParent()->getParent() == F);
1708 
1709     if (auto *LI = dyn_cast<LoadInst>(I))
1710       Loads.push_back(LI);
1711     else if (auto *SI = dyn_cast<StoreInst>(I))
1712       Stores.push_back(SI);
1713     else
1714       return false;
1715   }
1716 
1717   // We have identified all uses of GV into loads and stores. Now check if all
1718   // of them are known not to depend on the value of the global at the function
1719   // entry point. We do this by ensuring that every load is dominated by at
1720   // least one store.
1721   auto &DT = LookupDomTree(*const_cast<Function *>(F));
1722 
1723   // The below check is quadratic. Check we're not going to do too many tests.
1724   // FIXME: Even though this will always have worst-case quadratic time, we
1725   // could put effort into minimizing the average time by putting stores that
1726   // have been shown to dominate at least one load at the beginning of the
1727   // Stores array, making subsequent dominance checks more likely to succeed
1728   // early.
1729   //
1730   // The threshold here is fairly large because global->local demotion is a
1731   // very powerful optimization should it fire.
1732   const unsigned Threshold = 100;
1733   if (Loads.size() * Stores.size() > Threshold)
1734     return false;
1735 
1736   for (auto *L : Loads) {
1737     auto *LTy = L->getType();
1738     if (!std::any_of(Stores.begin(), Stores.end(), [&](StoreInst *S) {
1739           auto *STy = S->getValueOperand()->getType();
1740           // The load is only dominated by the store if DomTree says so
1741           // and the number of bits loaded in L is less than or equal to
1742           // the number of bits stored in S.
1743           return DT.dominates(S, L) &&
1744                  DL.getTypeStoreSize(LTy) <= DL.getTypeStoreSize(STy);
1745         }))
1746       return false;
1747   }
1748   // All loads have known dependences inside F, so the global can be localized.
1749   return true;
1750 }
1751 
1752 /// C may have non-instruction users. Can all of those users be turned into
1753 /// instructions?
allNonInstructionUsersCanBeMadeInstructions(Constant * C)1754 static bool allNonInstructionUsersCanBeMadeInstructions(Constant *C) {
1755   // We don't do this exhaustively. The most common pattern that we really need
1756   // to care about is a constant GEP or constant bitcast - so just looking
1757   // through one single ConstantExpr.
1758   //
1759   // The set of constants that this function returns true for must be able to be
1760   // handled by makeAllConstantUsesInstructions.
1761   for (auto *U : C->users()) {
1762     if (isa<Instruction>(U))
1763       continue;
1764     if (!isa<ConstantExpr>(U))
1765       // Non instruction, non-constantexpr user; cannot convert this.
1766       return false;
1767     for (auto *UU : U->users())
1768       if (!isa<Instruction>(UU))
1769         // A constantexpr used by another constant. We don't try and recurse any
1770         // further but just bail out at this point.
1771         return false;
1772   }
1773 
1774   return true;
1775 }
1776 
1777 /// C may have non-instruction users, and
1778 /// allNonInstructionUsersCanBeMadeInstructions has returned true. Convert the
1779 /// non-instruction users to instructions.
makeAllConstantUsesInstructions(Constant * C)1780 static void makeAllConstantUsesInstructions(Constant *C) {
1781   SmallVector<ConstantExpr*,4> Users;
1782   for (auto *U : C->users()) {
1783     if (isa<ConstantExpr>(U))
1784       Users.push_back(cast<ConstantExpr>(U));
1785     else
1786       // We should never get here; allNonInstructionUsersCanBeMadeInstructions
1787       // should not have returned true for C.
1788       assert(
1789           isa<Instruction>(U) &&
1790           "Can't transform non-constantexpr non-instruction to instruction!");
1791   }
1792 
1793   SmallVector<Value*,4> UUsers;
1794   for (auto *U : Users) {
1795     UUsers.clear();
1796     for (auto *UU : U->users())
1797       UUsers.push_back(UU);
1798     for (auto *UU : UUsers) {
1799       Instruction *UI = cast<Instruction>(UU);
1800       Instruction *NewU = U->getAsInstruction();
1801       NewU->insertBefore(UI);
1802       UI->replaceUsesOfWith(U, NewU);
1803     }
1804     U->dropAllReferences();
1805   }
1806 }
1807 
1808 /// Analyze the specified global variable and optimize
1809 /// it if possible.  If we make a change, return true.
processInternalGlobal(GlobalVariable * GV,const GlobalStatus & GS,TargetLibraryInfo * TLI,function_ref<DominatorTree & (Function &)> LookupDomTree)1810 static bool processInternalGlobal(
1811     GlobalVariable *GV, const GlobalStatus &GS, TargetLibraryInfo *TLI,
1812     function_ref<DominatorTree &(Function &)> LookupDomTree) {
1813   auto &DL = GV->getParent()->getDataLayout();
1814   // If this is a first class global and has only one accessing function and
1815   // this function is non-recursive, we replace the global with a local alloca
1816   // in this function.
1817   //
1818   // NOTE: It doesn't make sense to promote non-single-value types since we
1819   // are just replacing static memory to stack memory.
1820   //
1821   // If the global is in different address space, don't bring it to stack.
1822   if (!GS.HasMultipleAccessingFunctions &&
1823       GS.AccessingFunction &&
1824       GV->getValueType()->isSingleValueType() &&
1825       GV->getType()->getAddressSpace() == 0 &&
1826       !GV->isExternallyInitialized() &&
1827       allNonInstructionUsersCanBeMadeInstructions(GV) &&
1828       GS.AccessingFunction->doesNotRecurse() &&
1829       isPointerValueDeadOnEntryToFunction(GS.AccessingFunction, GV,
1830                                           LookupDomTree)) {
1831     DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n");
1832     Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction
1833                                                    ->getEntryBlock().begin());
1834     Type *ElemTy = GV->getValueType();
1835     // FIXME: Pass Global's alignment when globals have alignment
1836     AllocaInst *Alloca = new AllocaInst(ElemTy, nullptr,
1837                                         GV->getName(), &FirstI);
1838     if (!isa<UndefValue>(GV->getInitializer()))
1839       new StoreInst(GV->getInitializer(), Alloca, &FirstI);
1840 
1841     makeAllConstantUsesInstructions(GV);
1842 
1843     GV->replaceAllUsesWith(Alloca);
1844     GV->eraseFromParent();
1845     ++NumLocalized;
1846     return true;
1847   }
1848 
1849   // If the global is never loaded (but may be stored to), it is dead.
1850   // Delete it now.
1851   if (!GS.IsLoaded) {
1852     DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n");
1853 
1854     bool Changed;
1855     if (isLeakCheckerRoot(GV)) {
1856       // Delete any constant stores to the global.
1857       Changed = CleanupPointerRootUsers(GV, TLI);
1858     } else {
1859       // Delete any stores we can find to the global.  We may not be able to
1860       // make it completely dead though.
1861       Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
1862     }
1863 
1864     // If the global is dead now, delete it.
1865     if (GV->use_empty()) {
1866       GV->eraseFromParent();
1867       ++NumDeleted;
1868       Changed = true;
1869     }
1870     return Changed;
1871 
1872   }
1873   if (GS.StoredType <= GlobalStatus::InitializerStored) {
1874     DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n");
1875     GV->setConstant(true);
1876 
1877     // Clean up any obviously simplifiable users now.
1878     CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
1879 
1880     // If the global is dead now, just nuke it.
1881     if (GV->use_empty()) {
1882       DEBUG(dbgs() << "   *** Marking constant allowed us to simplify "
1883             << "all users and delete global!\n");
1884       GV->eraseFromParent();
1885       ++NumDeleted;
1886       return true;
1887     }
1888 
1889     // Fall through to the next check; see if we can optimize further.
1890     ++NumMarked;
1891   }
1892   if (!GV->getInitializer()->getType()->isSingleValueType()) {
1893     const DataLayout &DL = GV->getParent()->getDataLayout();
1894     if (SRAGlobal(GV, DL))
1895       return true;
1896   }
1897   if (GS.StoredType == GlobalStatus::StoredOnce && GS.StoredOnceValue) {
1898     // If the initial value for the global was an undef value, and if only
1899     // one other value was stored into it, we can just change the
1900     // initializer to be the stored value, then delete all stores to the
1901     // global.  This allows us to mark it constant.
1902     if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
1903       if (isa<UndefValue>(GV->getInitializer())) {
1904         // Change the initial value here.
1905         GV->setInitializer(SOVConstant);
1906 
1907         // Clean up any obviously simplifiable users now.
1908         CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
1909 
1910         if (GV->use_empty()) {
1911           DEBUG(dbgs() << "   *** Substituting initializer allowed us to "
1912                        << "simplify all users and delete global!\n");
1913           GV->eraseFromParent();
1914           ++NumDeleted;
1915         }
1916         ++NumSubstitute;
1917         return true;
1918       }
1919 
1920     // Try to optimize globals based on the knowledge that only one value
1921     // (besides its initializer) is ever stored to the global.
1922     if (optimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, DL, TLI))
1923       return true;
1924 
1925     // Otherwise, if the global was not a boolean, we can shrink it to be a
1926     // boolean.
1927     if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) {
1928       if (GS.Ordering == AtomicOrdering::NotAtomic) {
1929         if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
1930           ++NumShrunkToBool;
1931           return true;
1932         }
1933       }
1934     }
1935   }
1936 
1937   return false;
1938 }
1939 
1940 /// Analyze the specified global variable and optimize it if possible.  If we
1941 /// make a change, return true.
1942 static bool
processGlobal(GlobalValue & GV,TargetLibraryInfo * TLI,function_ref<DominatorTree & (Function &)> LookupDomTree)1943 processGlobal(GlobalValue &GV, TargetLibraryInfo *TLI,
1944               function_ref<DominatorTree &(Function &)> LookupDomTree) {
1945   if (GV.getName().startswith("llvm."))
1946     return false;
1947 
1948   GlobalStatus GS;
1949 
1950   if (GlobalStatus::analyzeGlobal(&GV, GS))
1951     return false;
1952 
1953   bool Changed = false;
1954   if (!GS.IsCompared && !GV.hasGlobalUnnamedAddr()) {
1955     auto NewUnnamedAddr = GV.hasLocalLinkage() ? GlobalValue::UnnamedAddr::Global
1956                                                : GlobalValue::UnnamedAddr::Local;
1957     if (NewUnnamedAddr != GV.getUnnamedAddr()) {
1958       GV.setUnnamedAddr(NewUnnamedAddr);
1959       NumUnnamed++;
1960       Changed = true;
1961     }
1962   }
1963 
1964   // Do more involved optimizations if the global is internal.
1965   if (!GV.hasLocalLinkage())
1966     return Changed;
1967 
1968   auto *GVar = dyn_cast<GlobalVariable>(&GV);
1969   if (!GVar)
1970     return Changed;
1971 
1972   if (GVar->isConstant() || !GVar->hasInitializer())
1973     return Changed;
1974 
1975   return processInternalGlobal(GVar, GS, TLI, LookupDomTree) || Changed;
1976 }
1977 
1978 /// Walk all of the direct calls of the specified function, changing them to
1979 /// FastCC.
ChangeCalleesToFastCall(Function * F)1980 static void ChangeCalleesToFastCall(Function *F) {
1981   for (User *U : F->users()) {
1982     if (isa<BlockAddress>(U))
1983       continue;
1984     CallSite CS(cast<Instruction>(U));
1985     CS.setCallingConv(CallingConv::Fast);
1986   }
1987 }
1988 
StripNest(LLVMContext & C,const AttributeSet & Attrs)1989 static AttributeSet StripNest(LLVMContext &C, const AttributeSet &Attrs) {
1990   for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) {
1991     unsigned Index = Attrs.getSlotIndex(i);
1992     if (!Attrs.getSlotAttributes(i).hasAttribute(Index, Attribute::Nest))
1993       continue;
1994 
1995     // There can be only one.
1996     return Attrs.removeAttribute(C, Index, Attribute::Nest);
1997   }
1998 
1999   return Attrs;
2000 }
2001 
RemoveNestAttribute(Function * F)2002 static void RemoveNestAttribute(Function *F) {
2003   F->setAttributes(StripNest(F->getContext(), F->getAttributes()));
2004   for (User *U : F->users()) {
2005     if (isa<BlockAddress>(U))
2006       continue;
2007     CallSite CS(cast<Instruction>(U));
2008     CS.setAttributes(StripNest(F->getContext(), CS.getAttributes()));
2009   }
2010 }
2011 
2012 /// Return true if this is a calling convention that we'd like to change.  The
2013 /// idea here is that we don't want to mess with the convention if the user
2014 /// explicitly requested something with performance implications like coldcc,
2015 /// GHC, or anyregcc.
isProfitableToMakeFastCC(Function * F)2016 static bool isProfitableToMakeFastCC(Function *F) {
2017   CallingConv::ID CC = F->getCallingConv();
2018   // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc?
2019   return CC == CallingConv::C || CC == CallingConv::X86_ThisCall;
2020 }
2021 
2022 static bool
OptimizeFunctions(Module & M,TargetLibraryInfo * TLI,function_ref<DominatorTree & (Function &)> LookupDomTree,SmallSet<const Comdat *,8> & NotDiscardableComdats)2023 OptimizeFunctions(Module &M, TargetLibraryInfo *TLI,
2024                   function_ref<DominatorTree &(Function &)> LookupDomTree,
2025                   SmallSet<const Comdat *, 8> &NotDiscardableComdats) {
2026   bool Changed = false;
2027   // Optimize functions.
2028   for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) {
2029     Function *F = &*FI++;
2030     // Functions without names cannot be referenced outside this module.
2031     if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage())
2032       F->setLinkage(GlobalValue::InternalLinkage);
2033 
2034     if (deleteIfDead(*F, NotDiscardableComdats)) {
2035       Changed = true;
2036       continue;
2037     }
2038 
2039     Changed |= processGlobal(*F, TLI, LookupDomTree);
2040 
2041     if (!F->hasLocalLinkage())
2042       continue;
2043     if (isProfitableToMakeFastCC(F) && !F->isVarArg() &&
2044         !F->hasAddressTaken()) {
2045       // If this function has a calling convention worth changing, is not a
2046       // varargs function, and is only called directly, promote it to use the
2047       // Fast calling convention.
2048       F->setCallingConv(CallingConv::Fast);
2049       ChangeCalleesToFastCall(F);
2050       ++NumFastCallFns;
2051       Changed = true;
2052     }
2053 
2054     if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) &&
2055         !F->hasAddressTaken()) {
2056       // The function is not used by a trampoline intrinsic, so it is safe
2057       // to remove the 'nest' attribute.
2058       RemoveNestAttribute(F);
2059       ++NumNestRemoved;
2060       Changed = true;
2061     }
2062   }
2063   return Changed;
2064 }
2065 
2066 static bool
OptimizeGlobalVars(Module & M,TargetLibraryInfo * TLI,function_ref<DominatorTree & (Function &)> LookupDomTree,SmallSet<const Comdat *,8> & NotDiscardableComdats)2067 OptimizeGlobalVars(Module &M, TargetLibraryInfo *TLI,
2068                    function_ref<DominatorTree &(Function &)> LookupDomTree,
2069                    SmallSet<const Comdat *, 8> &NotDiscardableComdats) {
2070   bool Changed = false;
2071 
2072   for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
2073        GVI != E; ) {
2074     GlobalVariable *GV = &*GVI++;
2075     // Global variables without names cannot be referenced outside this module.
2076     if (!GV->hasName() && !GV->isDeclaration() && !GV->hasLocalLinkage())
2077       GV->setLinkage(GlobalValue::InternalLinkage);
2078     // Simplify the initializer.
2079     if (GV->hasInitializer())
2080       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) {
2081         auto &DL = M.getDataLayout();
2082         Constant *New = ConstantFoldConstantExpression(CE, DL, TLI);
2083         if (New && New != CE)
2084           GV->setInitializer(New);
2085       }
2086 
2087     if (deleteIfDead(*GV, NotDiscardableComdats)) {
2088       Changed = true;
2089       continue;
2090     }
2091 
2092     Changed |= processGlobal(*GV, TLI, LookupDomTree);
2093   }
2094   return Changed;
2095 }
2096 
2097 /// Evaluate a piece of a constantexpr store into a global initializer.  This
2098 /// returns 'Init' modified to reflect 'Val' stored into it.  At this point, the
2099 /// GEP operands of Addr [0, OpNo) have been stepped into.
EvaluateStoreInto(Constant * Init,Constant * Val,ConstantExpr * Addr,unsigned OpNo)2100 static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
2101                                    ConstantExpr *Addr, unsigned OpNo) {
2102   // Base case of the recursion.
2103   if (OpNo == Addr->getNumOperands()) {
2104     assert(Val->getType() == Init->getType() && "Type mismatch!");
2105     return Val;
2106   }
2107 
2108   SmallVector<Constant*, 32> Elts;
2109   if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
2110     // Break up the constant into its elements.
2111     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
2112       Elts.push_back(Init->getAggregateElement(i));
2113 
2114     // Replace the element that we are supposed to.
2115     ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo));
2116     unsigned Idx = CU->getZExtValue();
2117     assert(Idx < STy->getNumElements() && "Struct index out of range!");
2118     Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1);
2119 
2120     // Return the modified struct.
2121     return ConstantStruct::get(STy, Elts);
2122   }
2123 
2124   ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo));
2125   SequentialType *InitTy = cast<SequentialType>(Init->getType());
2126 
2127   uint64_t NumElts;
2128   if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy))
2129     NumElts = ATy->getNumElements();
2130   else
2131     NumElts = InitTy->getVectorNumElements();
2132 
2133   // Break up the array into elements.
2134   for (uint64_t i = 0, e = NumElts; i != e; ++i)
2135     Elts.push_back(Init->getAggregateElement(i));
2136 
2137   assert(CI->getZExtValue() < NumElts);
2138   Elts[CI->getZExtValue()] =
2139     EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1);
2140 
2141   if (Init->getType()->isArrayTy())
2142     return ConstantArray::get(cast<ArrayType>(InitTy), Elts);
2143   return ConstantVector::get(Elts);
2144 }
2145 
2146 /// We have decided that Addr (which satisfies the predicate
2147 /// isSimpleEnoughPointerToCommit) should get Val as its value.  Make it happen.
CommitValueTo(Constant * Val,Constant * Addr)2148 static void CommitValueTo(Constant *Val, Constant *Addr) {
2149   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
2150     assert(GV->hasInitializer());
2151     GV->setInitializer(Val);
2152     return;
2153   }
2154 
2155   ConstantExpr *CE = cast<ConstantExpr>(Addr);
2156   GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2157   GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2));
2158 }
2159 
2160 /// Evaluate static constructors in the function, if we can.  Return true if we
2161 /// can, false otherwise.
EvaluateStaticConstructor(Function * F,const DataLayout & DL,TargetLibraryInfo * TLI)2162 static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL,
2163                                       TargetLibraryInfo *TLI) {
2164   // Call the function.
2165   Evaluator Eval(DL, TLI);
2166   Constant *RetValDummy;
2167   bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
2168                                            SmallVector<Constant*, 0>());
2169 
2170   if (EvalSuccess) {
2171     ++NumCtorsEvaluated;
2172 
2173     // We succeeded at evaluation: commit the result.
2174     DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
2175           << F->getName() << "' to " << Eval.getMutatedMemory().size()
2176           << " stores.\n");
2177     for (const auto &I : Eval.getMutatedMemory())
2178       CommitValueTo(I.second, I.first);
2179     for (GlobalVariable *GV : Eval.getInvariants())
2180       GV->setConstant(true);
2181   }
2182 
2183   return EvalSuccess;
2184 }
2185 
compareNames(Constant * const * A,Constant * const * B)2186 static int compareNames(Constant *const *A, Constant *const *B) {
2187   Value *AStripped = (*A)->stripPointerCastsNoFollowAliases();
2188   Value *BStripped = (*B)->stripPointerCastsNoFollowAliases();
2189   return AStripped->getName().compare(BStripped->getName());
2190 }
2191 
setUsedInitializer(GlobalVariable & V,const SmallPtrSet<GlobalValue *,8> & Init)2192 static void setUsedInitializer(GlobalVariable &V,
2193                                const SmallPtrSet<GlobalValue *, 8> &Init) {
2194   if (Init.empty()) {
2195     V.eraseFromParent();
2196     return;
2197   }
2198 
2199   // Type of pointer to the array of pointers.
2200   PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0);
2201 
2202   SmallVector<llvm::Constant *, 8> UsedArray;
2203   for (GlobalValue *GV : Init) {
2204     Constant *Cast
2205       = ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, Int8PtrTy);
2206     UsedArray.push_back(Cast);
2207   }
2208   // Sort to get deterministic order.
2209   array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames);
2210   ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size());
2211 
2212   Module *M = V.getParent();
2213   V.removeFromParent();
2214   GlobalVariable *NV =
2215       new GlobalVariable(*M, ATy, false, llvm::GlobalValue::AppendingLinkage,
2216                          llvm::ConstantArray::get(ATy, UsedArray), "");
2217   NV->takeName(&V);
2218   NV->setSection("llvm.metadata");
2219   delete &V;
2220 }
2221 
2222 namespace {
2223 /// An easy to access representation of llvm.used and llvm.compiler.used.
2224 class LLVMUsed {
2225   SmallPtrSet<GlobalValue *, 8> Used;
2226   SmallPtrSet<GlobalValue *, 8> CompilerUsed;
2227   GlobalVariable *UsedV;
2228   GlobalVariable *CompilerUsedV;
2229 
2230 public:
LLVMUsed(Module & M)2231   LLVMUsed(Module &M) {
2232     UsedV = collectUsedGlobalVariables(M, Used, false);
2233     CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true);
2234   }
2235   typedef SmallPtrSet<GlobalValue *, 8>::iterator iterator;
2236   typedef iterator_range<iterator> used_iterator_range;
usedBegin()2237   iterator usedBegin() { return Used.begin(); }
usedEnd()2238   iterator usedEnd() { return Used.end(); }
used()2239   used_iterator_range used() {
2240     return used_iterator_range(usedBegin(), usedEnd());
2241   }
compilerUsedBegin()2242   iterator compilerUsedBegin() { return CompilerUsed.begin(); }
compilerUsedEnd()2243   iterator compilerUsedEnd() { return CompilerUsed.end(); }
compilerUsed()2244   used_iterator_range compilerUsed() {
2245     return used_iterator_range(compilerUsedBegin(), compilerUsedEnd());
2246   }
usedCount(GlobalValue * GV) const2247   bool usedCount(GlobalValue *GV) const { return Used.count(GV); }
compilerUsedCount(GlobalValue * GV) const2248   bool compilerUsedCount(GlobalValue *GV) const {
2249     return CompilerUsed.count(GV);
2250   }
usedErase(GlobalValue * GV)2251   bool usedErase(GlobalValue *GV) { return Used.erase(GV); }
compilerUsedErase(GlobalValue * GV)2252   bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); }
usedInsert(GlobalValue * GV)2253   bool usedInsert(GlobalValue *GV) { return Used.insert(GV).second; }
compilerUsedInsert(GlobalValue * GV)2254   bool compilerUsedInsert(GlobalValue *GV) {
2255     return CompilerUsed.insert(GV).second;
2256   }
2257 
syncVariablesAndSets()2258   void syncVariablesAndSets() {
2259     if (UsedV)
2260       setUsedInitializer(*UsedV, Used);
2261     if (CompilerUsedV)
2262       setUsedInitializer(*CompilerUsedV, CompilerUsed);
2263   }
2264 };
2265 }
2266 
hasUseOtherThanLLVMUsed(GlobalAlias & GA,const LLVMUsed & U)2267 static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) {
2268   if (GA.use_empty()) // No use at all.
2269     return false;
2270 
2271   assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) &&
2272          "We should have removed the duplicated "
2273          "element from llvm.compiler.used");
2274   if (!GA.hasOneUse())
2275     // Strictly more than one use. So at least one is not in llvm.used and
2276     // llvm.compiler.used.
2277     return true;
2278 
2279   // Exactly one use. Check if it is in llvm.used or llvm.compiler.used.
2280   return !U.usedCount(&GA) && !U.compilerUsedCount(&GA);
2281 }
2282 
hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue & V,const LLVMUsed & U)2283 static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V,
2284                                                const LLVMUsed &U) {
2285   unsigned N = 2;
2286   assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) &&
2287          "We should have removed the duplicated "
2288          "element from llvm.compiler.used");
2289   if (U.usedCount(&V) || U.compilerUsedCount(&V))
2290     ++N;
2291   return V.hasNUsesOrMore(N);
2292 }
2293 
mayHaveOtherReferences(GlobalAlias & GA,const LLVMUsed & U)2294 static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) {
2295   if (!GA.hasLocalLinkage())
2296     return true;
2297 
2298   return U.usedCount(&GA) || U.compilerUsedCount(&GA);
2299 }
2300 
hasUsesToReplace(GlobalAlias & GA,const LLVMUsed & U,bool & RenameTarget)2301 static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U,
2302                              bool &RenameTarget) {
2303   RenameTarget = false;
2304   bool Ret = false;
2305   if (hasUseOtherThanLLVMUsed(GA, U))
2306     Ret = true;
2307 
2308   // If the alias is externally visible, we may still be able to simplify it.
2309   if (!mayHaveOtherReferences(GA, U))
2310     return Ret;
2311 
2312   // If the aliasee has internal linkage, give it the name and linkage
2313   // of the alias, and delete the alias.  This turns:
2314   //   define internal ... @f(...)
2315   //   @a = alias ... @f
2316   // into:
2317   //   define ... @a(...)
2318   Constant *Aliasee = GA.getAliasee();
2319   GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts());
2320   if (!Target->hasLocalLinkage())
2321     return Ret;
2322 
2323   // Do not perform the transform if multiple aliases potentially target the
2324   // aliasee. This check also ensures that it is safe to replace the section
2325   // and other attributes of the aliasee with those of the alias.
2326   if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U))
2327     return Ret;
2328 
2329   RenameTarget = true;
2330   return true;
2331 }
2332 
2333 static bool
OptimizeGlobalAliases(Module & M,SmallSet<const Comdat *,8> & NotDiscardableComdats)2334 OptimizeGlobalAliases(Module &M,
2335                       SmallSet<const Comdat *, 8> &NotDiscardableComdats) {
2336   bool Changed = false;
2337   LLVMUsed Used(M);
2338 
2339   for (GlobalValue *GV : Used.used())
2340     Used.compilerUsedErase(GV);
2341 
2342   for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
2343        I != E;) {
2344     GlobalAlias *J = &*I++;
2345 
2346     // Aliases without names cannot be referenced outside this module.
2347     if (!J->hasName() && !J->isDeclaration() && !J->hasLocalLinkage())
2348       J->setLinkage(GlobalValue::InternalLinkage);
2349 
2350     if (deleteIfDead(*J, NotDiscardableComdats)) {
2351       Changed = true;
2352       continue;
2353     }
2354 
2355     // If the aliasee may change at link time, nothing can be done - bail out.
2356     if (J->isInterposable())
2357       continue;
2358 
2359     Constant *Aliasee = J->getAliasee();
2360     GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts());
2361     // We can't trivially replace the alias with the aliasee if the aliasee is
2362     // non-trivial in some way.
2363     // TODO: Try to handle non-zero GEPs of local aliasees.
2364     if (!Target)
2365       continue;
2366     Target->removeDeadConstantUsers();
2367 
2368     // Make all users of the alias use the aliasee instead.
2369     bool RenameTarget;
2370     if (!hasUsesToReplace(*J, Used, RenameTarget))
2371       continue;
2372 
2373     J->replaceAllUsesWith(ConstantExpr::getBitCast(Aliasee, J->getType()));
2374     ++NumAliasesResolved;
2375     Changed = true;
2376 
2377     if (RenameTarget) {
2378       // Give the aliasee the name, linkage and other attributes of the alias.
2379       Target->takeName(&*J);
2380       Target->setLinkage(J->getLinkage());
2381       Target->setVisibility(J->getVisibility());
2382       Target->setDLLStorageClass(J->getDLLStorageClass());
2383 
2384       if (Used.usedErase(&*J))
2385         Used.usedInsert(Target);
2386 
2387       if (Used.compilerUsedErase(&*J))
2388         Used.compilerUsedInsert(Target);
2389     } else if (mayHaveOtherReferences(*J, Used))
2390       continue;
2391 
2392     // Delete the alias.
2393     M.getAliasList().erase(J);
2394     ++NumAliasesRemoved;
2395     Changed = true;
2396   }
2397 
2398   Used.syncVariablesAndSets();
2399 
2400   return Changed;
2401 }
2402 
FindCXAAtExit(Module & M,TargetLibraryInfo * TLI)2403 static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) {
2404   LibFunc::Func F = LibFunc::cxa_atexit;
2405   if (!TLI->has(F))
2406     return nullptr;
2407 
2408   Function *Fn = M.getFunction(TLI->getName(F));
2409   if (!Fn)
2410     return nullptr;
2411 
2412   // Make sure that the function has the correct prototype.
2413   if (!TLI->getLibFunc(*Fn, F) || F != LibFunc::cxa_atexit)
2414     return nullptr;
2415 
2416   return Fn;
2417 }
2418 
2419 /// Returns whether the given function is an empty C++ destructor and can
2420 /// therefore be eliminated.
2421 /// Note that we assume that other optimization passes have already simplified
2422 /// the code so we only look for a function with a single basic block, where
2423 /// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and
2424 /// other side-effect free instructions.
cxxDtorIsEmpty(const Function & Fn,SmallPtrSet<const Function *,8> & CalledFunctions)2425 static bool cxxDtorIsEmpty(const Function &Fn,
2426                            SmallPtrSet<const Function *, 8> &CalledFunctions) {
2427   // FIXME: We could eliminate C++ destructors if they're readonly/readnone and
2428   // nounwind, but that doesn't seem worth doing.
2429   if (Fn.isDeclaration())
2430     return false;
2431 
2432   if (++Fn.begin() != Fn.end())
2433     return false;
2434 
2435   const BasicBlock &EntryBlock = Fn.getEntryBlock();
2436   for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end();
2437        I != E; ++I) {
2438     if (const CallInst *CI = dyn_cast<CallInst>(I)) {
2439       // Ignore debug intrinsics.
2440       if (isa<DbgInfoIntrinsic>(CI))
2441         continue;
2442 
2443       const Function *CalledFn = CI->getCalledFunction();
2444 
2445       if (!CalledFn)
2446         return false;
2447 
2448       SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions);
2449 
2450       // Don't treat recursive functions as empty.
2451       if (!NewCalledFunctions.insert(CalledFn).second)
2452         return false;
2453 
2454       if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions))
2455         return false;
2456     } else if (isa<ReturnInst>(*I))
2457       return true; // We're done.
2458     else if (I->mayHaveSideEffects())
2459       return false; // Destructor with side effects, bail.
2460   }
2461 
2462   return false;
2463 }
2464 
OptimizeEmptyGlobalCXXDtors(Function * CXAAtExitFn)2465 static bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
2466   /// Itanium C++ ABI p3.3.5:
2467   ///
2468   ///   After constructing a global (or local static) object, that will require
2469   ///   destruction on exit, a termination function is registered as follows:
2470   ///
2471   ///   extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
2472   ///
2473   ///   This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the
2474   ///   call f(p) when DSO d is unloaded, before all such termination calls
2475   ///   registered before this one. It returns zero if registration is
2476   ///   successful, nonzero on failure.
2477 
2478   // This pass will look for calls to __cxa_atexit where the function is trivial
2479   // and remove them.
2480   bool Changed = false;
2481 
2482   for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end();
2483        I != E;) {
2484     // We're only interested in calls. Theoretically, we could handle invoke
2485     // instructions as well, but neither llvm-gcc nor clang generate invokes
2486     // to __cxa_atexit.
2487     CallInst *CI = dyn_cast<CallInst>(*I++);
2488     if (!CI)
2489       continue;
2490 
2491     Function *DtorFn =
2492       dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts());
2493     if (!DtorFn)
2494       continue;
2495 
2496     SmallPtrSet<const Function *, 8> CalledFunctions;
2497     if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions))
2498       continue;
2499 
2500     // Just remove the call.
2501     CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
2502     CI->eraseFromParent();
2503 
2504     ++NumCXXDtorsRemoved;
2505 
2506     Changed |= true;
2507   }
2508 
2509   return Changed;
2510 }
2511 
optimizeGlobalsInModule(Module & M,const DataLayout & DL,TargetLibraryInfo * TLI,function_ref<DominatorTree & (Function &)> LookupDomTree)2512 static bool optimizeGlobalsInModule(
2513     Module &M, const DataLayout &DL, TargetLibraryInfo *TLI,
2514     function_ref<DominatorTree &(Function &)> LookupDomTree) {
2515   SmallSet<const Comdat *, 8> NotDiscardableComdats;
2516   bool Changed = false;
2517   bool LocalChange = true;
2518   while (LocalChange) {
2519     LocalChange = false;
2520 
2521     NotDiscardableComdats.clear();
2522     for (const GlobalVariable &GV : M.globals())
2523       if (const Comdat *C = GV.getComdat())
2524         if (!GV.isDiscardableIfUnused() || !GV.use_empty())
2525           NotDiscardableComdats.insert(C);
2526     for (Function &F : M)
2527       if (const Comdat *C = F.getComdat())
2528         if (!F.isDefTriviallyDead())
2529           NotDiscardableComdats.insert(C);
2530     for (GlobalAlias &GA : M.aliases())
2531       if (const Comdat *C = GA.getComdat())
2532         if (!GA.isDiscardableIfUnused() || !GA.use_empty())
2533           NotDiscardableComdats.insert(C);
2534 
2535     // Delete functions that are trivially dead, ccc -> fastcc
2536     LocalChange |=
2537         OptimizeFunctions(M, TLI, LookupDomTree, NotDiscardableComdats);
2538 
2539     // Optimize global_ctors list.
2540     LocalChange |= optimizeGlobalCtorsList(M, [&](Function *F) {
2541       return EvaluateStaticConstructor(F, DL, TLI);
2542     });
2543 
2544     // Optimize non-address-taken globals.
2545     LocalChange |= OptimizeGlobalVars(M, TLI, LookupDomTree,
2546                                       NotDiscardableComdats);
2547 
2548     // Resolve aliases, when possible.
2549     LocalChange |= OptimizeGlobalAliases(M, NotDiscardableComdats);
2550 
2551     // Try to remove trivial global destructors if they are not removed
2552     // already.
2553     Function *CXAAtExitFn = FindCXAAtExit(M, TLI);
2554     if (CXAAtExitFn)
2555       LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn);
2556 
2557     Changed |= LocalChange;
2558   }
2559 
2560   // TODO: Move all global ctors functions to the end of the module for code
2561   // layout.
2562 
2563   return Changed;
2564 }
2565 
run(Module & M,AnalysisManager<Module> & AM)2566 PreservedAnalyses GlobalOptPass::run(Module &M, AnalysisManager<Module> &AM) {
2567     auto &DL = M.getDataLayout();
2568     auto &TLI = AM.getResult<TargetLibraryAnalysis>(M);
2569     auto &FAM =
2570         AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
2571     auto LookupDomTree = [&FAM](Function &F) -> DominatorTree &{
2572       return FAM.getResult<DominatorTreeAnalysis>(F);
2573     };
2574     if (!optimizeGlobalsInModule(M, DL, &TLI, LookupDomTree))
2575       return PreservedAnalyses::all();
2576     return PreservedAnalyses::none();
2577 }
2578 
2579 namespace {
2580 struct GlobalOptLegacyPass : public ModulePass {
2581   static char ID; // Pass identification, replacement for typeid
GlobalOptLegacyPass__anone1b9d84b0511::GlobalOptLegacyPass2582   GlobalOptLegacyPass() : ModulePass(ID) {
2583     initializeGlobalOptLegacyPassPass(*PassRegistry::getPassRegistry());
2584   }
2585 
runOnModule__anone1b9d84b0511::GlobalOptLegacyPass2586   bool runOnModule(Module &M) override {
2587     if (skipModule(M))
2588       return false;
2589 
2590     auto &DL = M.getDataLayout();
2591     auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
2592     auto LookupDomTree = [this](Function &F) -> DominatorTree & {
2593       return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
2594     };
2595     return optimizeGlobalsInModule(M, DL, TLI, LookupDomTree);
2596   }
2597 
getAnalysisUsage__anone1b9d84b0511::GlobalOptLegacyPass2598   void getAnalysisUsage(AnalysisUsage &AU) const override {
2599     AU.addRequired<TargetLibraryInfoWrapperPass>();
2600     AU.addRequired<DominatorTreeWrapperPass>();
2601   }
2602 };
2603 }
2604 
2605 char GlobalOptLegacyPass::ID = 0;
2606 INITIALIZE_PASS_BEGIN(GlobalOptLegacyPass, "globalopt",
2607                       "Global Variable Optimizer", false, false)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)2608 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2609 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2610 INITIALIZE_PASS_END(GlobalOptLegacyPass, "globalopt",
2611                     "Global Variable Optimizer", false, false)
2612 
2613 ModulePass *llvm::createGlobalOptimizerPass() {
2614   return new GlobalOptLegacyPass();
2615 }
2616