1 //===-- DataFlowSanitizer.cpp - dynamic data flow analysis ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file is a part of DataFlowSanitizer, a generalised dynamic data flow
11 /// analysis.
12 ///
13 /// Unlike other Sanitizer tools, this tool is not designed to detect a specific
14 /// class of bugs on its own. Instead, it provides a generic dynamic data flow
15 /// analysis framework to be used by clients to help detect application-specific
16 /// issues within their own code.
17 ///
18 /// The analysis is based on automatic propagation of data flow labels (also
19 /// known as taint labels) through a program as it performs computation. Each
20 /// byte of application memory is backed by two bytes of shadow memory which
21 /// hold the label. On Linux/x86_64, memory is laid out as follows:
22 ///
23 /// +--------------------+ 0x800000000000 (top of memory)
24 /// | application memory |
25 /// +--------------------+ 0x700000008000 (kAppAddr)
26 /// | |
27 /// | unused |
28 /// | |
29 /// +--------------------+ 0x200200000000 (kUnusedAddr)
30 /// | union table |
31 /// +--------------------+ 0x200000000000 (kUnionTableAddr)
32 /// | shadow memory |
33 /// +--------------------+ 0x000000010000 (kShadowAddr)
34 /// | reserved by kernel |
35 /// +--------------------+ 0x000000000000
36 ///
37 /// To derive a shadow memory address from an application memory address,
38 /// bits 44-46 are cleared to bring the address into the range
39 /// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to
40 /// account for the double byte representation of shadow labels and move the
41 /// address into the shadow memory range. See the function
42 /// DataFlowSanitizer::getShadowAddress below.
43 ///
44 /// For more information, please refer to the design document:
45 /// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html
46
47 #include "llvm/Transforms/Instrumentation.h"
48 #include "llvm/ADT/DenseMap.h"
49 #include "llvm/ADT/DenseSet.h"
50 #include "llvm/ADT/DepthFirstIterator.h"
51 #include "llvm/ADT/StringExtras.h"
52 #include "llvm/ADT/Triple.h"
53 #include "llvm/Analysis/ValueTracking.h"
54 #include "llvm/IR/Dominators.h"
55 #include "llvm/IR/DebugInfo.h"
56 #include "llvm/IR/IRBuilder.h"
57 #include "llvm/IR/InlineAsm.h"
58 #include "llvm/IR/InstVisitor.h"
59 #include "llvm/IR/LLVMContext.h"
60 #include "llvm/IR/MDBuilder.h"
61 #include "llvm/IR/Type.h"
62 #include "llvm/IR/Value.h"
63 #include "llvm/Pass.h"
64 #include "llvm/Support/CommandLine.h"
65 #include "llvm/Support/SpecialCaseList.h"
66 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
67 #include "llvm/Transforms/Utils/Local.h"
68 #include <algorithm>
69 #include <iterator>
70 #include <set>
71 #include <utility>
72
73 using namespace llvm;
74
75 // External symbol to be used when generating the shadow address for
76 // architectures with multiple VMAs. Instead of using a constant integer
77 // the runtime will set the external mask based on the VMA range.
78 static const char *const kDFSanExternShadowPtrMask = "__dfsan_shadow_ptr_mask";
79
80 // The -dfsan-preserve-alignment flag controls whether this pass assumes that
81 // alignment requirements provided by the input IR are correct. For example,
82 // if the input IR contains a load with alignment 8, this flag will cause
83 // the shadow load to have alignment 16. This flag is disabled by default as
84 // we have unfortunately encountered too much code (including Clang itself;
85 // see PR14291) which performs misaligned access.
86 static cl::opt<bool> ClPreserveAlignment(
87 "dfsan-preserve-alignment",
88 cl::desc("respect alignment requirements provided by input IR"), cl::Hidden,
89 cl::init(false));
90
91 // The ABI list files control how shadow parameters are passed. The pass treats
92 // every function labelled "uninstrumented" in the ABI list file as conforming
93 // to the "native" (i.e. unsanitized) ABI. Unless the ABI list contains
94 // additional annotations for those functions, a call to one of those functions
95 // will produce a warning message, as the labelling behaviour of the function is
96 // unknown. The other supported annotations are "functional" and "discard",
97 // which are described below under DataFlowSanitizer::WrapperKind.
98 static cl::list<std::string> ClABIListFiles(
99 "dfsan-abilist",
100 cl::desc("File listing native ABI functions and how the pass treats them"),
101 cl::Hidden);
102
103 // Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented
104 // functions (see DataFlowSanitizer::InstrumentedABI below).
105 static cl::opt<bool> ClArgsABI(
106 "dfsan-args-abi",
107 cl::desc("Use the argument ABI rather than the TLS ABI"),
108 cl::Hidden);
109
110 // Controls whether the pass includes or ignores the labels of pointers in load
111 // instructions.
112 static cl::opt<bool> ClCombinePointerLabelsOnLoad(
113 "dfsan-combine-pointer-labels-on-load",
114 cl::desc("Combine the label of the pointer with the label of the data when "
115 "loading from memory."),
116 cl::Hidden, cl::init(true));
117
118 // Controls whether the pass includes or ignores the labels of pointers in
119 // stores instructions.
120 static cl::opt<bool> ClCombinePointerLabelsOnStore(
121 "dfsan-combine-pointer-labels-on-store",
122 cl::desc("Combine the label of the pointer with the label of the data when "
123 "storing in memory."),
124 cl::Hidden, cl::init(false));
125
126 static cl::opt<bool> ClDebugNonzeroLabels(
127 "dfsan-debug-nonzero-labels",
128 cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, "
129 "load or return with a nonzero label"),
130 cl::Hidden);
131
132
133 namespace {
134
GetGlobalTypeString(const GlobalValue & G)135 StringRef GetGlobalTypeString(const GlobalValue &G) {
136 // Types of GlobalVariables are always pointer types.
137 Type *GType = G.getType()->getElementType();
138 // For now we support blacklisting struct types only.
139 if (StructType *SGType = dyn_cast<StructType>(GType)) {
140 if (!SGType->isLiteral())
141 return SGType->getName();
142 }
143 return "<unknown type>";
144 }
145
146 class DFSanABIList {
147 std::unique_ptr<SpecialCaseList> SCL;
148
149 public:
DFSanABIList()150 DFSanABIList() {}
151
set(std::unique_ptr<SpecialCaseList> List)152 void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); }
153
154 /// Returns whether either this function or its source file are listed in the
155 /// given category.
isIn(const Function & F,StringRef Category) const156 bool isIn(const Function &F, StringRef Category) const {
157 return isIn(*F.getParent(), Category) ||
158 SCL->inSection("fun", F.getName(), Category);
159 }
160
161 /// Returns whether this global alias is listed in the given category.
162 ///
163 /// If GA aliases a function, the alias's name is matched as a function name
164 /// would be. Similarly, aliases of globals are matched like globals.
isIn(const GlobalAlias & GA,StringRef Category) const165 bool isIn(const GlobalAlias &GA, StringRef Category) const {
166 if (isIn(*GA.getParent(), Category))
167 return true;
168
169 if (isa<FunctionType>(GA.getType()->getElementType()))
170 return SCL->inSection("fun", GA.getName(), Category);
171
172 return SCL->inSection("global", GA.getName(), Category) ||
173 SCL->inSection("type", GetGlobalTypeString(GA), Category);
174 }
175
176 /// Returns whether this module is listed in the given category.
isIn(const Module & M,StringRef Category) const177 bool isIn(const Module &M, StringRef Category) const {
178 return SCL->inSection("src", M.getModuleIdentifier(), Category);
179 }
180 };
181
182 class DataFlowSanitizer : public ModulePass {
183 friend struct DFSanFunction;
184 friend class DFSanVisitor;
185
186 enum {
187 ShadowWidth = 16
188 };
189
190 /// Which ABI should be used for instrumented functions?
191 enum InstrumentedABI {
192 /// Argument and return value labels are passed through additional
193 /// arguments and by modifying the return type.
194 IA_Args,
195
196 /// Argument and return value labels are passed through TLS variables
197 /// __dfsan_arg_tls and __dfsan_retval_tls.
198 IA_TLS
199 };
200
201 /// How should calls to uninstrumented functions be handled?
202 enum WrapperKind {
203 /// This function is present in an uninstrumented form but we don't know
204 /// how it should be handled. Print a warning and call the function anyway.
205 /// Don't label the return value.
206 WK_Warning,
207
208 /// This function does not write to (user-accessible) memory, and its return
209 /// value is unlabelled.
210 WK_Discard,
211
212 /// This function does not write to (user-accessible) memory, and the label
213 /// of its return value is the union of the label of its arguments.
214 WK_Functional,
215
216 /// Instead of calling the function, a custom wrapper __dfsw_F is called,
217 /// where F is the name of the function. This function may wrap the
218 /// original function or provide its own implementation. This is similar to
219 /// the IA_Args ABI, except that IA_Args uses a struct return type to
220 /// pass the return value shadow in a register, while WK_Custom uses an
221 /// extra pointer argument to return the shadow. This allows the wrapped
222 /// form of the function type to be expressed in C.
223 WK_Custom
224 };
225
226 Module *Mod;
227 LLVMContext *Ctx;
228 IntegerType *ShadowTy;
229 PointerType *ShadowPtrTy;
230 IntegerType *IntptrTy;
231 ConstantInt *ZeroShadow;
232 ConstantInt *ShadowPtrMask;
233 ConstantInt *ShadowPtrMul;
234 Constant *ArgTLS;
235 Constant *RetvalTLS;
236 void *(*GetArgTLSPtr)();
237 void *(*GetRetvalTLSPtr)();
238 Constant *GetArgTLS;
239 Constant *GetRetvalTLS;
240 Constant *ExternalShadowMask;
241 FunctionType *DFSanUnionFnTy;
242 FunctionType *DFSanUnionLoadFnTy;
243 FunctionType *DFSanUnimplementedFnTy;
244 FunctionType *DFSanSetLabelFnTy;
245 FunctionType *DFSanNonzeroLabelFnTy;
246 FunctionType *DFSanVarargWrapperFnTy;
247 Constant *DFSanUnionFn;
248 Constant *DFSanCheckedUnionFn;
249 Constant *DFSanUnionLoadFn;
250 Constant *DFSanUnimplementedFn;
251 Constant *DFSanSetLabelFn;
252 Constant *DFSanNonzeroLabelFn;
253 Constant *DFSanVarargWrapperFn;
254 MDNode *ColdCallWeights;
255 DFSanABIList ABIList;
256 DenseMap<Value *, Function *> UnwrappedFnMap;
257 AttributeSet ReadOnlyNoneAttrs;
258 bool DFSanRuntimeShadowMask;
259
260 Value *getShadowAddress(Value *Addr, Instruction *Pos);
261 bool isInstrumented(const Function *F);
262 bool isInstrumented(const GlobalAlias *GA);
263 FunctionType *getArgsFunctionType(FunctionType *T);
264 FunctionType *getTrampolineFunctionType(FunctionType *T);
265 FunctionType *getCustomFunctionType(FunctionType *T);
266 InstrumentedABI getInstrumentedABI();
267 WrapperKind getWrapperKind(Function *F);
268 void addGlobalNamePrefix(GlobalValue *GV);
269 Function *buildWrapperFunction(Function *F, StringRef NewFName,
270 GlobalValue::LinkageTypes NewFLink,
271 FunctionType *NewFT);
272 Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName);
273
274 public:
275 DataFlowSanitizer(
276 const std::vector<std::string> &ABIListFiles = std::vector<std::string>(),
277 void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr);
278 static char ID;
279 bool doInitialization(Module &M) override;
280 bool runOnModule(Module &M) override;
281 };
282
283 struct DFSanFunction {
284 DataFlowSanitizer &DFS;
285 Function *F;
286 DominatorTree DT;
287 DataFlowSanitizer::InstrumentedABI IA;
288 bool IsNativeABI;
289 Value *ArgTLSPtr;
290 Value *RetvalTLSPtr;
291 AllocaInst *LabelReturnAlloca;
292 DenseMap<Value *, Value *> ValShadowMap;
293 DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap;
294 std::vector<std::pair<PHINode *, PHINode *> > PHIFixups;
295 DenseSet<Instruction *> SkipInsts;
296 std::vector<Value *> NonZeroChecks;
297 bool AvoidNewBlocks;
298
299 struct CachedCombinedShadow {
300 BasicBlock *Block;
301 Value *Shadow;
302 };
303 DenseMap<std::pair<Value *, Value *>, CachedCombinedShadow>
304 CachedCombinedShadows;
305 DenseMap<Value *, std::set<Value *>> ShadowElements;
306
DFSanFunction__anonf274973e0111::DFSanFunction307 DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI)
308 : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()),
309 IsNativeABI(IsNativeABI), ArgTLSPtr(nullptr), RetvalTLSPtr(nullptr),
310 LabelReturnAlloca(nullptr) {
311 DT.recalculate(*F);
312 // FIXME: Need to track down the register allocator issue which causes poor
313 // performance in pathological cases with large numbers of basic blocks.
314 AvoidNewBlocks = F->size() > 1000;
315 }
316 Value *getArgTLSPtr();
317 Value *getArgTLS(unsigned Index, Instruction *Pos);
318 Value *getRetvalTLS();
319 Value *getShadow(Value *V);
320 void setShadow(Instruction *I, Value *Shadow);
321 Value *combineShadows(Value *V1, Value *V2, Instruction *Pos);
322 Value *combineOperandShadows(Instruction *Inst);
323 Value *loadShadow(Value *ShadowAddr, uint64_t Size, uint64_t Align,
324 Instruction *Pos);
325 void storeShadow(Value *Addr, uint64_t Size, uint64_t Align, Value *Shadow,
326 Instruction *Pos);
327 };
328
329 class DFSanVisitor : public InstVisitor<DFSanVisitor> {
330 public:
331 DFSanFunction &DFSF;
DFSanVisitor(DFSanFunction & DFSF)332 DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {}
333
334 void visitOperandShadowInst(Instruction &I);
335
336 void visitBinaryOperator(BinaryOperator &BO);
337 void visitCastInst(CastInst &CI);
338 void visitCmpInst(CmpInst &CI);
339 void visitGetElementPtrInst(GetElementPtrInst &GEPI);
340 void visitLoadInst(LoadInst &LI);
341 void visitStoreInst(StoreInst &SI);
342 void visitReturnInst(ReturnInst &RI);
343 void visitCallSite(CallSite CS);
344 void visitPHINode(PHINode &PN);
345 void visitExtractElementInst(ExtractElementInst &I);
346 void visitInsertElementInst(InsertElementInst &I);
347 void visitShuffleVectorInst(ShuffleVectorInst &I);
348 void visitExtractValueInst(ExtractValueInst &I);
349 void visitInsertValueInst(InsertValueInst &I);
350 void visitAllocaInst(AllocaInst &I);
351 void visitSelectInst(SelectInst &I);
352 void visitMemSetInst(MemSetInst &I);
353 void visitMemTransferInst(MemTransferInst &I);
354 };
355
356 }
357
358 char DataFlowSanitizer::ID;
359 INITIALIZE_PASS(DataFlowSanitizer, "dfsan",
360 "DataFlowSanitizer: dynamic data flow analysis.", false, false)
361
362 ModulePass *
createDataFlowSanitizerPass(const std::vector<std::string> & ABIListFiles,void * (* getArgTLS)(),void * (* getRetValTLS)())363 llvm::createDataFlowSanitizerPass(const std::vector<std::string> &ABIListFiles,
364 void *(*getArgTLS)(),
365 void *(*getRetValTLS)()) {
366 return new DataFlowSanitizer(ABIListFiles, getArgTLS, getRetValTLS);
367 }
368
DataFlowSanitizer(const std::vector<std::string> & ABIListFiles,void * (* getArgTLS)(),void * (* getRetValTLS)())369 DataFlowSanitizer::DataFlowSanitizer(
370 const std::vector<std::string> &ABIListFiles, void *(*getArgTLS)(),
371 void *(*getRetValTLS)())
372 : ModulePass(ID), GetArgTLSPtr(getArgTLS), GetRetvalTLSPtr(getRetValTLS),
373 DFSanRuntimeShadowMask(false) {
374 std::vector<std::string> AllABIListFiles(std::move(ABIListFiles));
375 AllABIListFiles.insert(AllABIListFiles.end(), ClABIListFiles.begin(),
376 ClABIListFiles.end());
377 ABIList.set(SpecialCaseList::createOrDie(AllABIListFiles));
378 }
379
getArgsFunctionType(FunctionType * T)380 FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) {
381 llvm::SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end());
382 ArgTypes.append(T->getNumParams(), ShadowTy);
383 if (T->isVarArg())
384 ArgTypes.push_back(ShadowPtrTy);
385 Type *RetType = T->getReturnType();
386 if (!RetType->isVoidTy())
387 RetType = StructType::get(RetType, ShadowTy, (Type *)nullptr);
388 return FunctionType::get(RetType, ArgTypes, T->isVarArg());
389 }
390
getTrampolineFunctionType(FunctionType * T)391 FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) {
392 assert(!T->isVarArg());
393 llvm::SmallVector<Type *, 4> ArgTypes;
394 ArgTypes.push_back(T->getPointerTo());
395 ArgTypes.append(T->param_begin(), T->param_end());
396 ArgTypes.append(T->getNumParams(), ShadowTy);
397 Type *RetType = T->getReturnType();
398 if (!RetType->isVoidTy())
399 ArgTypes.push_back(ShadowPtrTy);
400 return FunctionType::get(T->getReturnType(), ArgTypes, false);
401 }
402
getCustomFunctionType(FunctionType * T)403 FunctionType *DataFlowSanitizer::getCustomFunctionType(FunctionType *T) {
404 llvm::SmallVector<Type *, 4> ArgTypes;
405 for (FunctionType::param_iterator i = T->param_begin(), e = T->param_end();
406 i != e; ++i) {
407 FunctionType *FT;
408 if (isa<PointerType>(*i) && (FT = dyn_cast<FunctionType>(cast<PointerType>(
409 *i)->getElementType()))) {
410 ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo());
411 ArgTypes.push_back(Type::getInt8PtrTy(*Ctx));
412 } else {
413 ArgTypes.push_back(*i);
414 }
415 }
416 for (unsigned i = 0, e = T->getNumParams(); i != e; ++i)
417 ArgTypes.push_back(ShadowTy);
418 if (T->isVarArg())
419 ArgTypes.push_back(ShadowPtrTy);
420 Type *RetType = T->getReturnType();
421 if (!RetType->isVoidTy())
422 ArgTypes.push_back(ShadowPtrTy);
423 return FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg());
424 }
425
doInitialization(Module & M)426 bool DataFlowSanitizer::doInitialization(Module &M) {
427 llvm::Triple TargetTriple(M.getTargetTriple());
428 bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64;
429 bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 ||
430 TargetTriple.getArch() == llvm::Triple::mips64el;
431 bool IsAArch64 = TargetTriple.getArch() == llvm::Triple::aarch64 ||
432 TargetTriple.getArch() == llvm::Triple::aarch64_be;
433
434 const DataLayout &DL = M.getDataLayout();
435
436 Mod = &M;
437 Ctx = &M.getContext();
438 ShadowTy = IntegerType::get(*Ctx, ShadowWidth);
439 ShadowPtrTy = PointerType::getUnqual(ShadowTy);
440 IntptrTy = DL.getIntPtrType(*Ctx);
441 ZeroShadow = ConstantInt::getSigned(ShadowTy, 0);
442 ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidth / 8);
443 if (IsX86_64)
444 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL);
445 else if (IsMIPS64)
446 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0xF000000000LL);
447 // AArch64 supports multiple VMAs and the shadow mask is set at runtime.
448 else if (IsAArch64)
449 DFSanRuntimeShadowMask = true;
450 else
451 report_fatal_error("unsupported triple");
452
453 Type *DFSanUnionArgs[2] = { ShadowTy, ShadowTy };
454 DFSanUnionFnTy =
455 FunctionType::get(ShadowTy, DFSanUnionArgs, /*isVarArg=*/ false);
456 Type *DFSanUnionLoadArgs[2] = { ShadowPtrTy, IntptrTy };
457 DFSanUnionLoadFnTy =
458 FunctionType::get(ShadowTy, DFSanUnionLoadArgs, /*isVarArg=*/ false);
459 DFSanUnimplementedFnTy = FunctionType::get(
460 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
461 Type *DFSanSetLabelArgs[3] = { ShadowTy, Type::getInt8PtrTy(*Ctx), IntptrTy };
462 DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx),
463 DFSanSetLabelArgs, /*isVarArg=*/false);
464 DFSanNonzeroLabelFnTy = FunctionType::get(
465 Type::getVoidTy(*Ctx), None, /*isVarArg=*/false);
466 DFSanVarargWrapperFnTy = FunctionType::get(
467 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
468
469 if (GetArgTLSPtr) {
470 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64);
471 ArgTLS = nullptr;
472 GetArgTLS = ConstantExpr::getIntToPtr(
473 ConstantInt::get(IntptrTy, uintptr_t(GetArgTLSPtr)),
474 PointerType::getUnqual(
475 FunctionType::get(PointerType::getUnqual(ArgTLSTy),
476 (Type *)nullptr)));
477 }
478 if (GetRetvalTLSPtr) {
479 RetvalTLS = nullptr;
480 GetRetvalTLS = ConstantExpr::getIntToPtr(
481 ConstantInt::get(IntptrTy, uintptr_t(GetRetvalTLSPtr)),
482 PointerType::getUnqual(
483 FunctionType::get(PointerType::getUnqual(ShadowTy),
484 (Type *)nullptr)));
485 }
486
487 ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000);
488 return true;
489 }
490
isInstrumented(const Function * F)491 bool DataFlowSanitizer::isInstrumented(const Function *F) {
492 return !ABIList.isIn(*F, "uninstrumented");
493 }
494
isInstrumented(const GlobalAlias * GA)495 bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) {
496 return !ABIList.isIn(*GA, "uninstrumented");
497 }
498
getInstrumentedABI()499 DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() {
500 return ClArgsABI ? IA_Args : IA_TLS;
501 }
502
getWrapperKind(Function * F)503 DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) {
504 if (ABIList.isIn(*F, "functional"))
505 return WK_Functional;
506 if (ABIList.isIn(*F, "discard"))
507 return WK_Discard;
508 if (ABIList.isIn(*F, "custom"))
509 return WK_Custom;
510
511 return WK_Warning;
512 }
513
addGlobalNamePrefix(GlobalValue * GV)514 void DataFlowSanitizer::addGlobalNamePrefix(GlobalValue *GV) {
515 std::string GVName = GV->getName(), Prefix = "dfs$";
516 GV->setName(Prefix + GVName);
517
518 // Try to change the name of the function in module inline asm. We only do
519 // this for specific asm directives, currently only ".symver", to try to avoid
520 // corrupting asm which happens to contain the symbol name as a substring.
521 // Note that the substitution for .symver assumes that the versioned symbol
522 // also has an instrumented name.
523 std::string Asm = GV->getParent()->getModuleInlineAsm();
524 std::string SearchStr = ".symver " + GVName + ",";
525 size_t Pos = Asm.find(SearchStr);
526 if (Pos != std::string::npos) {
527 Asm.replace(Pos, SearchStr.size(),
528 ".symver " + Prefix + GVName + "," + Prefix);
529 GV->getParent()->setModuleInlineAsm(Asm);
530 }
531 }
532
533 Function *
buildWrapperFunction(Function * F,StringRef NewFName,GlobalValue::LinkageTypes NewFLink,FunctionType * NewFT)534 DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName,
535 GlobalValue::LinkageTypes NewFLink,
536 FunctionType *NewFT) {
537 FunctionType *FT = F->getFunctionType();
538 Function *NewF = Function::Create(NewFT, NewFLink, NewFName,
539 F->getParent());
540 NewF->copyAttributesFrom(F);
541 NewF->removeAttributes(
542 AttributeSet::ReturnIndex,
543 AttributeSet::get(F->getContext(), AttributeSet::ReturnIndex,
544 AttributeFuncs::typeIncompatible(NewFT->getReturnType())));
545
546 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF);
547 if (F->isVarArg()) {
548 NewF->removeAttributes(
549 AttributeSet::FunctionIndex,
550 AttributeSet().addAttribute(*Ctx, AttributeSet::FunctionIndex,
551 "split-stack"));
552 CallInst::Create(DFSanVarargWrapperFn,
553 IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "",
554 BB);
555 new UnreachableInst(*Ctx, BB);
556 } else {
557 std::vector<Value *> Args;
558 unsigned n = FT->getNumParams();
559 for (Function::arg_iterator ai = NewF->arg_begin(); n != 0; ++ai, --n)
560 Args.push_back(&*ai);
561 CallInst *CI = CallInst::Create(F, Args, "", BB);
562 if (FT->getReturnType()->isVoidTy())
563 ReturnInst::Create(*Ctx, BB);
564 else
565 ReturnInst::Create(*Ctx, CI, BB);
566 }
567
568 return NewF;
569 }
570
getOrBuildTrampolineFunction(FunctionType * FT,StringRef FName)571 Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT,
572 StringRef FName) {
573 FunctionType *FTT = getTrampolineFunctionType(FT);
574 Constant *C = Mod->getOrInsertFunction(FName, FTT);
575 Function *F = dyn_cast<Function>(C);
576 if (F && F->isDeclaration()) {
577 F->setLinkage(GlobalValue::LinkOnceODRLinkage);
578 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F);
579 std::vector<Value *> Args;
580 Function::arg_iterator AI = F->arg_begin(); ++AI;
581 for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N)
582 Args.push_back(&*AI);
583 CallInst *CI =
584 CallInst::Create(&F->getArgumentList().front(), Args, "", BB);
585 ReturnInst *RI;
586 if (FT->getReturnType()->isVoidTy())
587 RI = ReturnInst::Create(*Ctx, BB);
588 else
589 RI = ReturnInst::Create(*Ctx, CI, BB);
590
591 DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true);
592 Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI; ++ValAI;
593 for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N)
594 DFSF.ValShadowMap[&*ValAI] = &*ShadowAI;
595 DFSanVisitor(DFSF).visitCallInst(*CI);
596 if (!FT->getReturnType()->isVoidTy())
597 new StoreInst(DFSF.getShadow(RI->getReturnValue()),
598 &F->getArgumentList().back(), RI);
599 }
600
601 return C;
602 }
603
runOnModule(Module & M)604 bool DataFlowSanitizer::runOnModule(Module &M) {
605 if (ABIList.isIn(M, "skip"))
606 return false;
607
608 if (!GetArgTLSPtr) {
609 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64);
610 ArgTLS = Mod->getOrInsertGlobal("__dfsan_arg_tls", ArgTLSTy);
611 if (GlobalVariable *G = dyn_cast<GlobalVariable>(ArgTLS))
612 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
613 }
614 if (!GetRetvalTLSPtr) {
615 RetvalTLS = Mod->getOrInsertGlobal("__dfsan_retval_tls", ShadowTy);
616 if (GlobalVariable *G = dyn_cast<GlobalVariable>(RetvalTLS))
617 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
618 }
619
620 ExternalShadowMask =
621 Mod->getOrInsertGlobal(kDFSanExternShadowPtrMask, IntptrTy);
622
623 DFSanUnionFn = Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy);
624 if (Function *F = dyn_cast<Function>(DFSanUnionFn)) {
625 F->addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
626 F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone);
627 F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
628 F->addAttribute(1, Attribute::ZExt);
629 F->addAttribute(2, Attribute::ZExt);
630 }
631 DFSanCheckedUnionFn = Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy);
632 if (Function *F = dyn_cast<Function>(DFSanCheckedUnionFn)) {
633 F->addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
634 F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone);
635 F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
636 F->addAttribute(1, Attribute::ZExt);
637 F->addAttribute(2, Attribute::ZExt);
638 }
639 DFSanUnionLoadFn =
640 Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy);
641 if (Function *F = dyn_cast<Function>(DFSanUnionLoadFn)) {
642 F->addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
643 F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly);
644 F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
645 }
646 DFSanUnimplementedFn =
647 Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy);
648 DFSanSetLabelFn =
649 Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy);
650 if (Function *F = dyn_cast<Function>(DFSanSetLabelFn)) {
651 F->addAttribute(1, Attribute::ZExt);
652 }
653 DFSanNonzeroLabelFn =
654 Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy);
655 DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper",
656 DFSanVarargWrapperFnTy);
657
658 std::vector<Function *> FnsToInstrument;
659 llvm::SmallPtrSet<Function *, 2> FnsWithNativeABI;
660 for (Function &i : M) {
661 if (!i.isIntrinsic() &&
662 &i != DFSanUnionFn &&
663 &i != DFSanCheckedUnionFn &&
664 &i != DFSanUnionLoadFn &&
665 &i != DFSanUnimplementedFn &&
666 &i != DFSanSetLabelFn &&
667 &i != DFSanNonzeroLabelFn &&
668 &i != DFSanVarargWrapperFn)
669 FnsToInstrument.push_back(&i);
670 }
671
672 // Give function aliases prefixes when necessary, and build wrappers where the
673 // instrumentedness is inconsistent.
674 for (Module::alias_iterator i = M.alias_begin(), e = M.alias_end(); i != e;) {
675 GlobalAlias *GA = &*i;
676 ++i;
677 // Don't stop on weak. We assume people aren't playing games with the
678 // instrumentedness of overridden weak aliases.
679 if (auto F = dyn_cast<Function>(GA->getBaseObject())) {
680 bool GAInst = isInstrumented(GA), FInst = isInstrumented(F);
681 if (GAInst && FInst) {
682 addGlobalNamePrefix(GA);
683 } else if (GAInst != FInst) {
684 // Non-instrumented alias of an instrumented function, or vice versa.
685 // Replace the alias with a native-ABI wrapper of the aliasee. The pass
686 // below will take care of instrumenting it.
687 Function *NewF =
688 buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType());
689 GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType()));
690 NewF->takeName(GA);
691 GA->eraseFromParent();
692 FnsToInstrument.push_back(NewF);
693 }
694 }
695 }
696
697 AttrBuilder B;
698 B.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone);
699 ReadOnlyNoneAttrs = AttributeSet::get(*Ctx, AttributeSet::FunctionIndex, B);
700
701 // First, change the ABI of every function in the module. ABI-listed
702 // functions keep their original ABI and get a wrapper function.
703 for (std::vector<Function *>::iterator i = FnsToInstrument.begin(),
704 e = FnsToInstrument.end();
705 i != e; ++i) {
706 Function &F = **i;
707 FunctionType *FT = F.getFunctionType();
708
709 bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() &&
710 FT->getReturnType()->isVoidTy());
711
712 if (isInstrumented(&F)) {
713 // Instrumented functions get a 'dfs$' prefix. This allows us to more
714 // easily identify cases of mismatching ABIs.
715 if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) {
716 FunctionType *NewFT = getArgsFunctionType(FT);
717 Function *NewF = Function::Create(NewFT, F.getLinkage(), "", &M);
718 NewF->copyAttributesFrom(&F);
719 NewF->removeAttributes(
720 AttributeSet::ReturnIndex,
721 AttributeSet::get(NewF->getContext(), AttributeSet::ReturnIndex,
722 AttributeFuncs::typeIncompatible(NewFT->getReturnType())));
723 for (Function::arg_iterator FArg = F.arg_begin(),
724 NewFArg = NewF->arg_begin(),
725 FArgEnd = F.arg_end();
726 FArg != FArgEnd; ++FArg, ++NewFArg) {
727 FArg->replaceAllUsesWith(&*NewFArg);
728 }
729 NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList());
730
731 for (Function::user_iterator UI = F.user_begin(), UE = F.user_end();
732 UI != UE;) {
733 BlockAddress *BA = dyn_cast<BlockAddress>(*UI);
734 ++UI;
735 if (BA) {
736 BA->replaceAllUsesWith(
737 BlockAddress::get(NewF, BA->getBasicBlock()));
738 delete BA;
739 }
740 }
741 F.replaceAllUsesWith(
742 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)));
743 NewF->takeName(&F);
744 F.eraseFromParent();
745 *i = NewF;
746 addGlobalNamePrefix(NewF);
747 } else {
748 addGlobalNamePrefix(&F);
749 }
750 } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) {
751 // Build a wrapper function for F. The wrapper simply calls F, and is
752 // added to FnsToInstrument so that any instrumentation according to its
753 // WrapperKind is done in the second pass below.
754 FunctionType *NewFT = getInstrumentedABI() == IA_Args
755 ? getArgsFunctionType(FT)
756 : FT;
757 Function *NewF = buildWrapperFunction(
758 &F, std::string("dfsw$") + std::string(F.getName()),
759 GlobalValue::LinkOnceODRLinkage, NewFT);
760 if (getInstrumentedABI() == IA_TLS)
761 NewF->removeAttributes(AttributeSet::FunctionIndex, ReadOnlyNoneAttrs);
762
763 Value *WrappedFnCst =
764 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT));
765 F.replaceAllUsesWith(WrappedFnCst);
766
767 UnwrappedFnMap[WrappedFnCst] = &F;
768 *i = NewF;
769
770 if (!F.isDeclaration()) {
771 // This function is probably defining an interposition of an
772 // uninstrumented function and hence needs to keep the original ABI.
773 // But any functions it may call need to use the instrumented ABI, so
774 // we instrument it in a mode which preserves the original ABI.
775 FnsWithNativeABI.insert(&F);
776
777 // This code needs to rebuild the iterators, as they may be invalidated
778 // by the push_back, taking care that the new range does not include
779 // any functions added by this code.
780 size_t N = i - FnsToInstrument.begin(),
781 Count = e - FnsToInstrument.begin();
782 FnsToInstrument.push_back(&F);
783 i = FnsToInstrument.begin() + N;
784 e = FnsToInstrument.begin() + Count;
785 }
786 // Hopefully, nobody will try to indirectly call a vararg
787 // function... yet.
788 } else if (FT->isVarArg()) {
789 UnwrappedFnMap[&F] = &F;
790 *i = nullptr;
791 }
792 }
793
794 for (std::vector<Function *>::iterator i = FnsToInstrument.begin(),
795 e = FnsToInstrument.end();
796 i != e; ++i) {
797 if (!*i || (*i)->isDeclaration())
798 continue;
799
800 removeUnreachableBlocks(**i);
801
802 DFSanFunction DFSF(*this, *i, FnsWithNativeABI.count(*i));
803
804 // DFSanVisitor may create new basic blocks, which confuses df_iterator.
805 // Build a copy of the list before iterating over it.
806 llvm::SmallVector<BasicBlock *, 4> BBList(
807 depth_first(&(*i)->getEntryBlock()));
808
809 for (llvm::SmallVector<BasicBlock *, 4>::iterator i = BBList.begin(),
810 e = BBList.end();
811 i != e; ++i) {
812 Instruction *Inst = &(*i)->front();
813 while (1) {
814 // DFSanVisitor may split the current basic block, changing the current
815 // instruction's next pointer and moving the next instruction to the
816 // tail block from which we should continue.
817 Instruction *Next = Inst->getNextNode();
818 // DFSanVisitor may delete Inst, so keep track of whether it was a
819 // terminator.
820 bool IsTerminator = isa<TerminatorInst>(Inst);
821 if (!DFSF.SkipInsts.count(Inst))
822 DFSanVisitor(DFSF).visit(Inst);
823 if (IsTerminator)
824 break;
825 Inst = Next;
826 }
827 }
828
829 // We will not necessarily be able to compute the shadow for every phi node
830 // until we have visited every block. Therefore, the code that handles phi
831 // nodes adds them to the PHIFixups list so that they can be properly
832 // handled here.
833 for (std::vector<std::pair<PHINode *, PHINode *> >::iterator
834 i = DFSF.PHIFixups.begin(),
835 e = DFSF.PHIFixups.end();
836 i != e; ++i) {
837 for (unsigned val = 0, n = i->first->getNumIncomingValues(); val != n;
838 ++val) {
839 i->second->setIncomingValue(
840 val, DFSF.getShadow(i->first->getIncomingValue(val)));
841 }
842 }
843
844 // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy
845 // places (i.e. instructions in basic blocks we haven't even begun visiting
846 // yet). To make our life easier, do this work in a pass after the main
847 // instrumentation.
848 if (ClDebugNonzeroLabels) {
849 for (Value *V : DFSF.NonZeroChecks) {
850 Instruction *Pos;
851 if (Instruction *I = dyn_cast<Instruction>(V))
852 Pos = I->getNextNode();
853 else
854 Pos = &DFSF.F->getEntryBlock().front();
855 while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos))
856 Pos = Pos->getNextNode();
857 IRBuilder<> IRB(Pos);
858 Value *Ne = IRB.CreateICmpNE(V, DFSF.DFS.ZeroShadow);
859 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
860 Ne, Pos, /*Unreachable=*/false, ColdCallWeights));
861 IRBuilder<> ThenIRB(BI);
862 ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {});
863 }
864 }
865 }
866
867 return false;
868 }
869
getArgTLSPtr()870 Value *DFSanFunction::getArgTLSPtr() {
871 if (ArgTLSPtr)
872 return ArgTLSPtr;
873 if (DFS.ArgTLS)
874 return ArgTLSPtr = DFS.ArgTLS;
875
876 IRBuilder<> IRB(&F->getEntryBlock().front());
877 return ArgTLSPtr = IRB.CreateCall(DFS.GetArgTLS, {});
878 }
879
getRetvalTLS()880 Value *DFSanFunction::getRetvalTLS() {
881 if (RetvalTLSPtr)
882 return RetvalTLSPtr;
883 if (DFS.RetvalTLS)
884 return RetvalTLSPtr = DFS.RetvalTLS;
885
886 IRBuilder<> IRB(&F->getEntryBlock().front());
887 return RetvalTLSPtr = IRB.CreateCall(DFS.GetRetvalTLS, {});
888 }
889
getArgTLS(unsigned Idx,Instruction * Pos)890 Value *DFSanFunction::getArgTLS(unsigned Idx, Instruction *Pos) {
891 IRBuilder<> IRB(Pos);
892 return IRB.CreateConstGEP2_64(getArgTLSPtr(), 0, Idx);
893 }
894
getShadow(Value * V)895 Value *DFSanFunction::getShadow(Value *V) {
896 if (!isa<Argument>(V) && !isa<Instruction>(V))
897 return DFS.ZeroShadow;
898 Value *&Shadow = ValShadowMap[V];
899 if (!Shadow) {
900 if (Argument *A = dyn_cast<Argument>(V)) {
901 if (IsNativeABI)
902 return DFS.ZeroShadow;
903 switch (IA) {
904 case DataFlowSanitizer::IA_TLS: {
905 Value *ArgTLSPtr = getArgTLSPtr();
906 Instruction *ArgTLSPos =
907 DFS.ArgTLS ? &*F->getEntryBlock().begin()
908 : cast<Instruction>(ArgTLSPtr)->getNextNode();
909 IRBuilder<> IRB(ArgTLSPos);
910 Shadow = IRB.CreateLoad(getArgTLS(A->getArgNo(), ArgTLSPos));
911 break;
912 }
913 case DataFlowSanitizer::IA_Args: {
914 unsigned ArgIdx = A->getArgNo() + F->getArgumentList().size() / 2;
915 Function::arg_iterator i = F->arg_begin();
916 while (ArgIdx--)
917 ++i;
918 Shadow = &*i;
919 assert(Shadow->getType() == DFS.ShadowTy);
920 break;
921 }
922 }
923 NonZeroChecks.push_back(Shadow);
924 } else {
925 Shadow = DFS.ZeroShadow;
926 }
927 }
928 return Shadow;
929 }
930
setShadow(Instruction * I,Value * Shadow)931 void DFSanFunction::setShadow(Instruction *I, Value *Shadow) {
932 assert(!ValShadowMap.count(I));
933 assert(Shadow->getType() == DFS.ShadowTy);
934 ValShadowMap[I] = Shadow;
935 }
936
getShadowAddress(Value * Addr,Instruction * Pos)937 Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) {
938 assert(Addr != RetvalTLS && "Reinstrumenting?");
939 IRBuilder<> IRB(Pos);
940 Value *ShadowPtrMaskValue;
941 if (DFSanRuntimeShadowMask)
942 ShadowPtrMaskValue = IRB.CreateLoad(IntptrTy, ExternalShadowMask);
943 else
944 ShadowPtrMaskValue = ShadowPtrMask;
945 return IRB.CreateIntToPtr(
946 IRB.CreateMul(
947 IRB.CreateAnd(IRB.CreatePtrToInt(Addr, IntptrTy),
948 IRB.CreatePtrToInt(ShadowPtrMaskValue, IntptrTy)),
949 ShadowPtrMul),
950 ShadowPtrTy);
951 }
952
953 // Generates IR to compute the union of the two given shadows, inserting it
954 // before Pos. Returns the computed union Value.
combineShadows(Value * V1,Value * V2,Instruction * Pos)955 Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) {
956 if (V1 == DFS.ZeroShadow)
957 return V2;
958 if (V2 == DFS.ZeroShadow)
959 return V1;
960 if (V1 == V2)
961 return V1;
962
963 auto V1Elems = ShadowElements.find(V1);
964 auto V2Elems = ShadowElements.find(V2);
965 if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) {
966 if (std::includes(V1Elems->second.begin(), V1Elems->second.end(),
967 V2Elems->second.begin(), V2Elems->second.end())) {
968 return V1;
969 } else if (std::includes(V2Elems->second.begin(), V2Elems->second.end(),
970 V1Elems->second.begin(), V1Elems->second.end())) {
971 return V2;
972 }
973 } else if (V1Elems != ShadowElements.end()) {
974 if (V1Elems->second.count(V2))
975 return V1;
976 } else if (V2Elems != ShadowElements.end()) {
977 if (V2Elems->second.count(V1))
978 return V2;
979 }
980
981 auto Key = std::make_pair(V1, V2);
982 if (V1 > V2)
983 std::swap(Key.first, Key.second);
984 CachedCombinedShadow &CCS = CachedCombinedShadows[Key];
985 if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent()))
986 return CCS.Shadow;
987
988 IRBuilder<> IRB(Pos);
989 if (AvoidNewBlocks) {
990 CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {V1, V2});
991 Call->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
992 Call->addAttribute(1, Attribute::ZExt);
993 Call->addAttribute(2, Attribute::ZExt);
994
995 CCS.Block = Pos->getParent();
996 CCS.Shadow = Call;
997 } else {
998 BasicBlock *Head = Pos->getParent();
999 Value *Ne = IRB.CreateICmpNE(V1, V2);
1000 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
1001 Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT));
1002 IRBuilder<> ThenIRB(BI);
1003 CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {V1, V2});
1004 Call->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
1005 Call->addAttribute(1, Attribute::ZExt);
1006 Call->addAttribute(2, Attribute::ZExt);
1007
1008 BasicBlock *Tail = BI->getSuccessor(0);
1009 PHINode *Phi = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front());
1010 Phi->addIncoming(Call, Call->getParent());
1011 Phi->addIncoming(V1, Head);
1012
1013 CCS.Block = Tail;
1014 CCS.Shadow = Phi;
1015 }
1016
1017 std::set<Value *> UnionElems;
1018 if (V1Elems != ShadowElements.end()) {
1019 UnionElems = V1Elems->second;
1020 } else {
1021 UnionElems.insert(V1);
1022 }
1023 if (V2Elems != ShadowElements.end()) {
1024 UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end());
1025 } else {
1026 UnionElems.insert(V2);
1027 }
1028 ShadowElements[CCS.Shadow] = std::move(UnionElems);
1029
1030 return CCS.Shadow;
1031 }
1032
1033 // A convenience function which folds the shadows of each of the operands
1034 // of the provided instruction Inst, inserting the IR before Inst. Returns
1035 // the computed union Value.
combineOperandShadows(Instruction * Inst)1036 Value *DFSanFunction::combineOperandShadows(Instruction *Inst) {
1037 if (Inst->getNumOperands() == 0)
1038 return DFS.ZeroShadow;
1039
1040 Value *Shadow = getShadow(Inst->getOperand(0));
1041 for (unsigned i = 1, n = Inst->getNumOperands(); i != n; ++i) {
1042 Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst);
1043 }
1044 return Shadow;
1045 }
1046
visitOperandShadowInst(Instruction & I)1047 void DFSanVisitor::visitOperandShadowInst(Instruction &I) {
1048 Value *CombinedShadow = DFSF.combineOperandShadows(&I);
1049 DFSF.setShadow(&I, CombinedShadow);
1050 }
1051
1052 // Generates IR to load shadow corresponding to bytes [Addr, Addr+Size), where
1053 // Addr has alignment Align, and take the union of each of those shadows.
loadShadow(Value * Addr,uint64_t Size,uint64_t Align,Instruction * Pos)1054 Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
1055 Instruction *Pos) {
1056 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
1057 llvm::DenseMap<AllocaInst *, AllocaInst *>::iterator i =
1058 AllocaShadowMap.find(AI);
1059 if (i != AllocaShadowMap.end()) {
1060 IRBuilder<> IRB(Pos);
1061 return IRB.CreateLoad(i->second);
1062 }
1063 }
1064
1065 uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8;
1066 SmallVector<Value *, 2> Objs;
1067 GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
1068 bool AllConstants = true;
1069 for (SmallVector<Value *, 2>::iterator i = Objs.begin(), e = Objs.end();
1070 i != e; ++i) {
1071 if (isa<Function>(*i) || isa<BlockAddress>(*i))
1072 continue;
1073 if (isa<GlobalVariable>(*i) && cast<GlobalVariable>(*i)->isConstant())
1074 continue;
1075
1076 AllConstants = false;
1077 break;
1078 }
1079 if (AllConstants)
1080 return DFS.ZeroShadow;
1081
1082 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
1083 switch (Size) {
1084 case 0:
1085 return DFS.ZeroShadow;
1086 case 1: {
1087 LoadInst *LI = new LoadInst(ShadowAddr, "", Pos);
1088 LI->setAlignment(ShadowAlign);
1089 return LI;
1090 }
1091 case 2: {
1092 IRBuilder<> IRB(Pos);
1093 Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr,
1094 ConstantInt::get(DFS.IntptrTy, 1));
1095 return combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign),
1096 IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign), Pos);
1097 }
1098 }
1099 if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidth) == 0) {
1100 // Fast path for the common case where each byte has identical shadow: load
1101 // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any
1102 // shadow is non-equal.
1103 BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F);
1104 IRBuilder<> FallbackIRB(FallbackBB);
1105 CallInst *FallbackCall = FallbackIRB.CreateCall(
1106 DFS.DFSanUnionLoadFn,
1107 {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
1108 FallbackCall->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
1109
1110 // Compare each of the shadows stored in the loaded 64 bits to each other,
1111 // by computing (WideShadow rotl ShadowWidth) == WideShadow.
1112 IRBuilder<> IRB(Pos);
1113 Value *WideAddr =
1114 IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
1115 Value *WideShadow = IRB.CreateAlignedLoad(WideAddr, ShadowAlign);
1116 Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy);
1117 Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidth);
1118 Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidth);
1119 Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow);
1120 Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow);
1121
1122 BasicBlock *Head = Pos->getParent();
1123 BasicBlock *Tail = Head->splitBasicBlock(Pos->getIterator());
1124
1125 if (DomTreeNode *OldNode = DT.getNode(Head)) {
1126 std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end());
1127
1128 DomTreeNode *NewNode = DT.addNewBlock(Tail, Head);
1129 for (auto Child : Children)
1130 DT.changeImmediateDominator(Child, NewNode);
1131 }
1132
1133 // In the following code LastBr will refer to the previous basic block's
1134 // conditional branch instruction, whose true successor is fixed up to point
1135 // to the next block during the loop below or to the tail after the final
1136 // iteration.
1137 BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq);
1138 ReplaceInstWithInst(Head->getTerminator(), LastBr);
1139 DT.addNewBlock(FallbackBB, Head);
1140
1141 for (uint64_t Ofs = 64 / DFS.ShadowWidth; Ofs != Size;
1142 Ofs += 64 / DFS.ShadowWidth) {
1143 BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F);
1144 DT.addNewBlock(NextBB, LastBr->getParent());
1145 IRBuilder<> NextIRB(NextBB);
1146 WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
1147 ConstantInt::get(DFS.IntptrTy, 1));
1148 Value *NextWideShadow = NextIRB.CreateAlignedLoad(WideAddr, ShadowAlign);
1149 ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow);
1150 LastBr->setSuccessor(0, NextBB);
1151 LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB);
1152 }
1153
1154 LastBr->setSuccessor(0, Tail);
1155 FallbackIRB.CreateBr(Tail);
1156 PHINode *Shadow = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front());
1157 Shadow->addIncoming(FallbackCall, FallbackBB);
1158 Shadow->addIncoming(TruncShadow, LastBr->getParent());
1159 return Shadow;
1160 }
1161
1162 IRBuilder<> IRB(Pos);
1163 CallInst *FallbackCall = IRB.CreateCall(
1164 DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
1165 FallbackCall->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
1166 return FallbackCall;
1167 }
1168
visitLoadInst(LoadInst & LI)1169 void DFSanVisitor::visitLoadInst(LoadInst &LI) {
1170 auto &DL = LI.getModule()->getDataLayout();
1171 uint64_t Size = DL.getTypeStoreSize(LI.getType());
1172 if (Size == 0) {
1173 DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow);
1174 return;
1175 }
1176
1177 uint64_t Align;
1178 if (ClPreserveAlignment) {
1179 Align = LI.getAlignment();
1180 if (Align == 0)
1181 Align = DL.getABITypeAlignment(LI.getType());
1182 } else {
1183 Align = 1;
1184 }
1185 IRBuilder<> IRB(&LI);
1186 Value *Shadow = DFSF.loadShadow(LI.getPointerOperand(), Size, Align, &LI);
1187 if (ClCombinePointerLabelsOnLoad) {
1188 Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand());
1189 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &LI);
1190 }
1191 if (Shadow != DFSF.DFS.ZeroShadow)
1192 DFSF.NonZeroChecks.push_back(Shadow);
1193
1194 DFSF.setShadow(&LI, Shadow);
1195 }
1196
storeShadow(Value * Addr,uint64_t Size,uint64_t Align,Value * Shadow,Instruction * Pos)1197 void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align,
1198 Value *Shadow, Instruction *Pos) {
1199 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
1200 llvm::DenseMap<AllocaInst *, AllocaInst *>::iterator i =
1201 AllocaShadowMap.find(AI);
1202 if (i != AllocaShadowMap.end()) {
1203 IRBuilder<> IRB(Pos);
1204 IRB.CreateStore(Shadow, i->second);
1205 return;
1206 }
1207 }
1208
1209 uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8;
1210 IRBuilder<> IRB(Pos);
1211 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
1212 if (Shadow == DFS.ZeroShadow) {
1213 IntegerType *ShadowTy = IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidth);
1214 Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
1215 Value *ExtShadowAddr =
1216 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy));
1217 IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign);
1218 return;
1219 }
1220
1221 const unsigned ShadowVecSize = 128 / DFS.ShadowWidth;
1222 uint64_t Offset = 0;
1223 if (Size >= ShadowVecSize) {
1224 VectorType *ShadowVecTy = VectorType::get(DFS.ShadowTy, ShadowVecSize);
1225 Value *ShadowVec = UndefValue::get(ShadowVecTy);
1226 for (unsigned i = 0; i != ShadowVecSize; ++i) {
1227 ShadowVec = IRB.CreateInsertElement(
1228 ShadowVec, Shadow, ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), i));
1229 }
1230 Value *ShadowVecAddr =
1231 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy));
1232 do {
1233 Value *CurShadowVecAddr =
1234 IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset);
1235 IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign);
1236 Size -= ShadowVecSize;
1237 ++Offset;
1238 } while (Size >= ShadowVecSize);
1239 Offset *= ShadowVecSize;
1240 }
1241 while (Size > 0) {
1242 Value *CurShadowAddr =
1243 IRB.CreateConstGEP1_32(DFS.ShadowTy, ShadowAddr, Offset);
1244 IRB.CreateAlignedStore(Shadow, CurShadowAddr, ShadowAlign);
1245 --Size;
1246 ++Offset;
1247 }
1248 }
1249
visitStoreInst(StoreInst & SI)1250 void DFSanVisitor::visitStoreInst(StoreInst &SI) {
1251 auto &DL = SI.getModule()->getDataLayout();
1252 uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType());
1253 if (Size == 0)
1254 return;
1255
1256 uint64_t Align;
1257 if (ClPreserveAlignment) {
1258 Align = SI.getAlignment();
1259 if (Align == 0)
1260 Align = DL.getABITypeAlignment(SI.getValueOperand()->getType());
1261 } else {
1262 Align = 1;
1263 }
1264
1265 Value* Shadow = DFSF.getShadow(SI.getValueOperand());
1266 if (ClCombinePointerLabelsOnStore) {
1267 Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand());
1268 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &SI);
1269 }
1270 DFSF.storeShadow(SI.getPointerOperand(), Size, Align, Shadow, &SI);
1271 }
1272
visitBinaryOperator(BinaryOperator & BO)1273 void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) {
1274 visitOperandShadowInst(BO);
1275 }
1276
visitCastInst(CastInst & CI)1277 void DFSanVisitor::visitCastInst(CastInst &CI) { visitOperandShadowInst(CI); }
1278
visitCmpInst(CmpInst & CI)1279 void DFSanVisitor::visitCmpInst(CmpInst &CI) { visitOperandShadowInst(CI); }
1280
visitGetElementPtrInst(GetElementPtrInst & GEPI)1281 void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
1282 visitOperandShadowInst(GEPI);
1283 }
1284
visitExtractElementInst(ExtractElementInst & I)1285 void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) {
1286 visitOperandShadowInst(I);
1287 }
1288
visitInsertElementInst(InsertElementInst & I)1289 void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) {
1290 visitOperandShadowInst(I);
1291 }
1292
visitShuffleVectorInst(ShuffleVectorInst & I)1293 void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) {
1294 visitOperandShadowInst(I);
1295 }
1296
visitExtractValueInst(ExtractValueInst & I)1297 void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) {
1298 visitOperandShadowInst(I);
1299 }
1300
visitInsertValueInst(InsertValueInst & I)1301 void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) {
1302 visitOperandShadowInst(I);
1303 }
1304
visitAllocaInst(AllocaInst & I)1305 void DFSanVisitor::visitAllocaInst(AllocaInst &I) {
1306 bool AllLoadsStores = true;
1307 for (User *U : I.users()) {
1308 if (isa<LoadInst>(U))
1309 continue;
1310
1311 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1312 if (SI->getPointerOperand() == &I)
1313 continue;
1314 }
1315
1316 AllLoadsStores = false;
1317 break;
1318 }
1319 if (AllLoadsStores) {
1320 IRBuilder<> IRB(&I);
1321 DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.ShadowTy);
1322 }
1323 DFSF.setShadow(&I, DFSF.DFS.ZeroShadow);
1324 }
1325
visitSelectInst(SelectInst & I)1326 void DFSanVisitor::visitSelectInst(SelectInst &I) {
1327 Value *CondShadow = DFSF.getShadow(I.getCondition());
1328 Value *TrueShadow = DFSF.getShadow(I.getTrueValue());
1329 Value *FalseShadow = DFSF.getShadow(I.getFalseValue());
1330
1331 if (isa<VectorType>(I.getCondition()->getType())) {
1332 DFSF.setShadow(
1333 &I,
1334 DFSF.combineShadows(
1335 CondShadow, DFSF.combineShadows(TrueShadow, FalseShadow, &I), &I));
1336 } else {
1337 Value *ShadowSel;
1338 if (TrueShadow == FalseShadow) {
1339 ShadowSel = TrueShadow;
1340 } else {
1341 ShadowSel =
1342 SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I);
1343 }
1344 DFSF.setShadow(&I, DFSF.combineShadows(CondShadow, ShadowSel, &I));
1345 }
1346 }
1347
visitMemSetInst(MemSetInst & I)1348 void DFSanVisitor::visitMemSetInst(MemSetInst &I) {
1349 IRBuilder<> IRB(&I);
1350 Value *ValShadow = DFSF.getShadow(I.getValue());
1351 IRB.CreateCall(DFSF.DFS.DFSanSetLabelFn,
1352 {ValShadow, IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy(
1353 *DFSF.DFS.Ctx)),
1354 IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)});
1355 }
1356
visitMemTransferInst(MemTransferInst & I)1357 void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
1358 IRBuilder<> IRB(&I);
1359 Value *DestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I);
1360 Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I);
1361 Value *LenShadow = IRB.CreateMul(
1362 I.getLength(),
1363 ConstantInt::get(I.getLength()->getType(), DFSF.DFS.ShadowWidth / 8));
1364 Value *AlignShadow;
1365 if (ClPreserveAlignment) {
1366 AlignShadow = IRB.CreateMul(I.getAlignmentCst(),
1367 ConstantInt::get(I.getAlignmentCst()->getType(),
1368 DFSF.DFS.ShadowWidth / 8));
1369 } else {
1370 AlignShadow = ConstantInt::get(I.getAlignmentCst()->getType(),
1371 DFSF.DFS.ShadowWidth / 8);
1372 }
1373 Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx);
1374 DestShadow = IRB.CreateBitCast(DestShadow, Int8Ptr);
1375 SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);
1376 IRB.CreateCall(I.getCalledValue(), {DestShadow, SrcShadow, LenShadow,
1377 AlignShadow, I.getVolatileCst()});
1378 }
1379
visitReturnInst(ReturnInst & RI)1380 void DFSanVisitor::visitReturnInst(ReturnInst &RI) {
1381 if (!DFSF.IsNativeABI && RI.getReturnValue()) {
1382 switch (DFSF.IA) {
1383 case DataFlowSanitizer::IA_TLS: {
1384 Value *S = DFSF.getShadow(RI.getReturnValue());
1385 IRBuilder<> IRB(&RI);
1386 IRB.CreateStore(S, DFSF.getRetvalTLS());
1387 break;
1388 }
1389 case DataFlowSanitizer::IA_Args: {
1390 IRBuilder<> IRB(&RI);
1391 Type *RT = DFSF.F->getFunctionType()->getReturnType();
1392 Value *InsVal =
1393 IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0);
1394 Value *InsShadow =
1395 IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1);
1396 RI.setOperand(0, InsShadow);
1397 break;
1398 }
1399 }
1400 }
1401 }
1402
visitCallSite(CallSite CS)1403 void DFSanVisitor::visitCallSite(CallSite CS) {
1404 Function *F = CS.getCalledFunction();
1405 if ((F && F->isIntrinsic()) || isa<InlineAsm>(CS.getCalledValue())) {
1406 visitOperandShadowInst(*CS.getInstruction());
1407 return;
1408 }
1409
1410 // Calls to this function are synthesized in wrappers, and we shouldn't
1411 // instrument them.
1412 if (F == DFSF.DFS.DFSanVarargWrapperFn)
1413 return;
1414
1415 assert(!(cast<FunctionType>(
1416 CS.getCalledValue()->getType()->getPointerElementType())->isVarArg() &&
1417 dyn_cast<InvokeInst>(CS.getInstruction())));
1418
1419 IRBuilder<> IRB(CS.getInstruction());
1420
1421 DenseMap<Value *, Function *>::iterator i =
1422 DFSF.DFS.UnwrappedFnMap.find(CS.getCalledValue());
1423 if (i != DFSF.DFS.UnwrappedFnMap.end()) {
1424 Function *F = i->second;
1425 switch (DFSF.DFS.getWrapperKind(F)) {
1426 case DataFlowSanitizer::WK_Warning: {
1427 CS.setCalledFunction(F);
1428 IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn,
1429 IRB.CreateGlobalStringPtr(F->getName()));
1430 DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow);
1431 return;
1432 }
1433 case DataFlowSanitizer::WK_Discard: {
1434 CS.setCalledFunction(F);
1435 DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow);
1436 return;
1437 }
1438 case DataFlowSanitizer::WK_Functional: {
1439 CS.setCalledFunction(F);
1440 visitOperandShadowInst(*CS.getInstruction());
1441 return;
1442 }
1443 case DataFlowSanitizer::WK_Custom: {
1444 // Don't try to handle invokes of custom functions, it's too complicated.
1445 // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_
1446 // wrapper.
1447 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
1448 FunctionType *FT = F->getFunctionType();
1449 FunctionType *CustomFT = DFSF.DFS.getCustomFunctionType(FT);
1450 std::string CustomFName = "__dfsw_";
1451 CustomFName += F->getName();
1452 Constant *CustomF =
1453 DFSF.DFS.Mod->getOrInsertFunction(CustomFName, CustomFT);
1454 if (Function *CustomFn = dyn_cast<Function>(CustomF)) {
1455 CustomFn->copyAttributesFrom(F);
1456
1457 // Custom functions returning non-void will write to the return label.
1458 if (!FT->getReturnType()->isVoidTy()) {
1459 CustomFn->removeAttributes(AttributeSet::FunctionIndex,
1460 DFSF.DFS.ReadOnlyNoneAttrs);
1461 }
1462 }
1463
1464 std::vector<Value *> Args;
1465
1466 CallSite::arg_iterator i = CS.arg_begin();
1467 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) {
1468 Type *T = (*i)->getType();
1469 FunctionType *ParamFT;
1470 if (isa<PointerType>(T) &&
1471 (ParamFT = dyn_cast<FunctionType>(
1472 cast<PointerType>(T)->getElementType()))) {
1473 std::string TName = "dfst";
1474 TName += utostr(FT->getNumParams() - n);
1475 TName += "$";
1476 TName += F->getName();
1477 Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName);
1478 Args.push_back(T);
1479 Args.push_back(
1480 IRB.CreateBitCast(*i, Type::getInt8PtrTy(*DFSF.DFS.Ctx)));
1481 } else {
1482 Args.push_back(*i);
1483 }
1484 }
1485
1486 i = CS.arg_begin();
1487 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
1488 Args.push_back(DFSF.getShadow(*i));
1489
1490 if (FT->isVarArg()) {
1491 auto *LabelVATy = ArrayType::get(DFSF.DFS.ShadowTy,
1492 CS.arg_size() - FT->getNumParams());
1493 auto *LabelVAAlloca = new AllocaInst(
1494 LabelVATy, "labelva", &DFSF.F->getEntryBlock().front());
1495
1496 for (unsigned n = 0; i != CS.arg_end(); ++i, ++n) {
1497 auto LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, n);
1498 IRB.CreateStore(DFSF.getShadow(*i), LabelVAPtr);
1499 }
1500
1501 Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0));
1502 }
1503
1504 if (!FT->getReturnType()->isVoidTy()) {
1505 if (!DFSF.LabelReturnAlloca) {
1506 DFSF.LabelReturnAlloca =
1507 new AllocaInst(DFSF.DFS.ShadowTy, "labelreturn",
1508 &DFSF.F->getEntryBlock().front());
1509 }
1510 Args.push_back(DFSF.LabelReturnAlloca);
1511 }
1512
1513 for (i = CS.arg_begin() + FT->getNumParams(); i != CS.arg_end(); ++i)
1514 Args.push_back(*i);
1515
1516 CallInst *CustomCI = IRB.CreateCall(CustomF, Args);
1517 CustomCI->setCallingConv(CI->getCallingConv());
1518 CustomCI->setAttributes(CI->getAttributes());
1519
1520 if (!FT->getReturnType()->isVoidTy()) {
1521 LoadInst *LabelLoad = IRB.CreateLoad(DFSF.LabelReturnAlloca);
1522 DFSF.setShadow(CustomCI, LabelLoad);
1523 }
1524
1525 CI->replaceAllUsesWith(CustomCI);
1526 CI->eraseFromParent();
1527 return;
1528 }
1529 break;
1530 }
1531 }
1532 }
1533
1534 FunctionType *FT = cast<FunctionType>(
1535 CS.getCalledValue()->getType()->getPointerElementType());
1536 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
1537 for (unsigned i = 0, n = FT->getNumParams(); i != n; ++i) {
1538 IRB.CreateStore(DFSF.getShadow(CS.getArgument(i)),
1539 DFSF.getArgTLS(i, CS.getInstruction()));
1540 }
1541 }
1542
1543 Instruction *Next = nullptr;
1544 if (!CS.getType()->isVoidTy()) {
1545 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
1546 if (II->getNormalDest()->getSinglePredecessor()) {
1547 Next = &II->getNormalDest()->front();
1548 } else {
1549 BasicBlock *NewBB =
1550 SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT);
1551 Next = &NewBB->front();
1552 }
1553 } else {
1554 assert(CS->getIterator() != CS->getParent()->end());
1555 Next = CS->getNextNode();
1556 }
1557
1558 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
1559 IRBuilder<> NextIRB(Next);
1560 LoadInst *LI = NextIRB.CreateLoad(DFSF.getRetvalTLS());
1561 DFSF.SkipInsts.insert(LI);
1562 DFSF.setShadow(CS.getInstruction(), LI);
1563 DFSF.NonZeroChecks.push_back(LI);
1564 }
1565 }
1566
1567 // Do all instrumentation for IA_Args down here to defer tampering with the
1568 // CFG in a way that SplitEdge may be able to detect.
1569 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) {
1570 FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT);
1571 Value *Func =
1572 IRB.CreateBitCast(CS.getCalledValue(), PointerType::getUnqual(NewFT));
1573 std::vector<Value *> Args;
1574
1575 CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1576 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
1577 Args.push_back(*i);
1578
1579 i = CS.arg_begin();
1580 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
1581 Args.push_back(DFSF.getShadow(*i));
1582
1583 if (FT->isVarArg()) {
1584 unsigned VarArgSize = CS.arg_size() - FT->getNumParams();
1585 ArrayType *VarArgArrayTy = ArrayType::get(DFSF.DFS.ShadowTy, VarArgSize);
1586 AllocaInst *VarArgShadow =
1587 new AllocaInst(VarArgArrayTy, "", &DFSF.F->getEntryBlock().front());
1588 Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0));
1589 for (unsigned n = 0; i != e; ++i, ++n) {
1590 IRB.CreateStore(
1591 DFSF.getShadow(*i),
1592 IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, n));
1593 Args.push_back(*i);
1594 }
1595 }
1596
1597 CallSite NewCS;
1598 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
1599 NewCS = IRB.CreateInvoke(Func, II->getNormalDest(), II->getUnwindDest(),
1600 Args);
1601 } else {
1602 NewCS = IRB.CreateCall(Func, Args);
1603 }
1604 NewCS.setCallingConv(CS.getCallingConv());
1605 NewCS.setAttributes(CS.getAttributes().removeAttributes(
1606 *DFSF.DFS.Ctx, AttributeSet::ReturnIndex,
1607 AttributeFuncs::typeIncompatible(NewCS.getInstruction()->getType())));
1608
1609 if (Next) {
1610 ExtractValueInst *ExVal =
1611 ExtractValueInst::Create(NewCS.getInstruction(), 0, "", Next);
1612 DFSF.SkipInsts.insert(ExVal);
1613 ExtractValueInst *ExShadow =
1614 ExtractValueInst::Create(NewCS.getInstruction(), 1, "", Next);
1615 DFSF.SkipInsts.insert(ExShadow);
1616 DFSF.setShadow(ExVal, ExShadow);
1617 DFSF.NonZeroChecks.push_back(ExShadow);
1618
1619 CS.getInstruction()->replaceAllUsesWith(ExVal);
1620 }
1621
1622 CS.getInstruction()->eraseFromParent();
1623 }
1624 }
1625
visitPHINode(PHINode & PN)1626 void DFSanVisitor::visitPHINode(PHINode &PN) {
1627 PHINode *ShadowPN =
1628 PHINode::Create(DFSF.DFS.ShadowTy, PN.getNumIncomingValues(), "", &PN);
1629
1630 // Give the shadow phi node valid predecessors to fool SplitEdge into working.
1631 Value *UndefShadow = UndefValue::get(DFSF.DFS.ShadowTy);
1632 for (PHINode::block_iterator i = PN.block_begin(), e = PN.block_end(); i != e;
1633 ++i) {
1634 ShadowPN->addIncoming(UndefShadow, *i);
1635 }
1636
1637 DFSF.PHIFixups.push_back(std::make_pair(&PN, ShadowPN));
1638 DFSF.setShadow(&PN, ShadowPN);
1639 }
1640