1 //===- DataFlowSanitizer.cpp - dynamic data flow analysis -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// This file is a part of DataFlowSanitizer, a generalised dynamic data flow
12 /// analysis.
13 ///
14 /// Unlike other Sanitizer tools, this tool is not designed to detect a specific
15 /// class of bugs on its own. Instead, it provides a generic dynamic data flow
16 /// analysis framework to be used by clients to help detect application-specific
17 /// issues within their own code.
18 ///
19 /// The analysis is based on automatic propagation of data flow labels (also
20 /// known as taint labels) through a program as it performs computation. Each
21 /// byte of application memory is backed by two bytes of shadow memory which
22 /// hold the label. On Linux/x86_64, memory is laid out as follows:
23 ///
24 /// +--------------------+ 0x800000000000 (top of memory)
25 /// | application memory |
26 /// +--------------------+ 0x700000008000 (kAppAddr)
27 /// | |
28 /// | unused |
29 /// | |
30 /// +--------------------+ 0x200200000000 (kUnusedAddr)
31 /// | union table |
32 /// +--------------------+ 0x200000000000 (kUnionTableAddr)
33 /// | shadow memory |
34 /// +--------------------+ 0x000000010000 (kShadowAddr)
35 /// | reserved by kernel |
36 /// +--------------------+ 0x000000000000
37 ///
38 /// To derive a shadow memory address from an application memory address,
39 /// bits 44-46 are cleared to bring the address into the range
40 /// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to
41 /// account for the double byte representation of shadow labels and move the
42 /// address into the shadow memory range. See the function
43 /// DataFlowSanitizer::getShadowAddress below.
44 ///
45 /// For more information, please refer to the design document:
46 /// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html
47 //
48 //===----------------------------------------------------------------------===//
49
50 #include "llvm/ADT/DenseMap.h"
51 #include "llvm/ADT/DenseSet.h"
52 #include "llvm/ADT/DepthFirstIterator.h"
53 #include "llvm/ADT/None.h"
54 #include "llvm/ADT/SmallPtrSet.h"
55 #include "llvm/ADT/SmallVector.h"
56 #include "llvm/ADT/StringExtras.h"
57 #include "llvm/ADT/StringRef.h"
58 #include "llvm/ADT/Triple.h"
59 #include "llvm/Transforms/Utils/Local.h"
60 #include "llvm/Analysis/ValueTracking.h"
61 #include "llvm/IR/Argument.h"
62 #include "llvm/IR/Attributes.h"
63 #include "llvm/IR/BasicBlock.h"
64 #include "llvm/IR/CallSite.h"
65 #include "llvm/IR/Constant.h"
66 #include "llvm/IR/Constants.h"
67 #include "llvm/IR/DataLayout.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/Dominators.h"
70 #include "llvm/IR/Function.h"
71 #include "llvm/IR/GlobalAlias.h"
72 #include "llvm/IR/GlobalValue.h"
73 #include "llvm/IR/GlobalVariable.h"
74 #include "llvm/IR/IRBuilder.h"
75 #include "llvm/IR/InlineAsm.h"
76 #include "llvm/IR/InstVisitor.h"
77 #include "llvm/IR/InstrTypes.h"
78 #include "llvm/IR/Instruction.h"
79 #include "llvm/IR/Instructions.h"
80 #include "llvm/IR/IntrinsicInst.h"
81 #include "llvm/IR/LLVMContext.h"
82 #include "llvm/IR/MDBuilder.h"
83 #include "llvm/IR/Module.h"
84 #include "llvm/IR/Type.h"
85 #include "llvm/IR/User.h"
86 #include "llvm/IR/Value.h"
87 #include "llvm/Pass.h"
88 #include "llvm/Support/Casting.h"
89 #include "llvm/Support/CommandLine.h"
90 #include "llvm/Support/ErrorHandling.h"
91 #include "llvm/Support/SpecialCaseList.h"
92 #include "llvm/Transforms/Instrumentation.h"
93 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
94 #include <algorithm>
95 #include <cassert>
96 #include <cstddef>
97 #include <cstdint>
98 #include <iterator>
99 #include <memory>
100 #include <set>
101 #include <string>
102 #include <utility>
103 #include <vector>
104
105 using namespace llvm;
106
107 // External symbol to be used when generating the shadow address for
108 // architectures with multiple VMAs. Instead of using a constant integer
109 // the runtime will set the external mask based on the VMA range.
110 static const char *const kDFSanExternShadowPtrMask = "__dfsan_shadow_ptr_mask";
111
112 // The -dfsan-preserve-alignment flag controls whether this pass assumes that
113 // alignment requirements provided by the input IR are correct. For example,
114 // if the input IR contains a load with alignment 8, this flag will cause
115 // the shadow load to have alignment 16. This flag is disabled by default as
116 // we have unfortunately encountered too much code (including Clang itself;
117 // see PR14291) which performs misaligned access.
118 static cl::opt<bool> ClPreserveAlignment(
119 "dfsan-preserve-alignment",
120 cl::desc("respect alignment requirements provided by input IR"), cl::Hidden,
121 cl::init(false));
122
123 // The ABI list files control how shadow parameters are passed. The pass treats
124 // every function labelled "uninstrumented" in the ABI list file as conforming
125 // to the "native" (i.e. unsanitized) ABI. Unless the ABI list contains
126 // additional annotations for those functions, a call to one of those functions
127 // will produce a warning message, as the labelling behaviour of the function is
128 // unknown. The other supported annotations are "functional" and "discard",
129 // which are described below under DataFlowSanitizer::WrapperKind.
130 static cl::list<std::string> ClABIListFiles(
131 "dfsan-abilist",
132 cl::desc("File listing native ABI functions and how the pass treats them"),
133 cl::Hidden);
134
135 // Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented
136 // functions (see DataFlowSanitizer::InstrumentedABI below).
137 static cl::opt<bool> ClArgsABI(
138 "dfsan-args-abi",
139 cl::desc("Use the argument ABI rather than the TLS ABI"),
140 cl::Hidden);
141
142 // Controls whether the pass includes or ignores the labels of pointers in load
143 // instructions.
144 static cl::opt<bool> ClCombinePointerLabelsOnLoad(
145 "dfsan-combine-pointer-labels-on-load",
146 cl::desc("Combine the label of the pointer with the label of the data when "
147 "loading from memory."),
148 cl::Hidden, cl::init(true));
149
150 // Controls whether the pass includes or ignores the labels of pointers in
151 // stores instructions.
152 static cl::opt<bool> ClCombinePointerLabelsOnStore(
153 "dfsan-combine-pointer-labels-on-store",
154 cl::desc("Combine the label of the pointer with the label of the data when "
155 "storing in memory."),
156 cl::Hidden, cl::init(false));
157
158 static cl::opt<bool> ClDebugNonzeroLabels(
159 "dfsan-debug-nonzero-labels",
160 cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, "
161 "load or return with a nonzero label"),
162 cl::Hidden);
163
GetGlobalTypeString(const GlobalValue & G)164 static StringRef GetGlobalTypeString(const GlobalValue &G) {
165 // Types of GlobalVariables are always pointer types.
166 Type *GType = G.getValueType();
167 // For now we support blacklisting struct types only.
168 if (StructType *SGType = dyn_cast<StructType>(GType)) {
169 if (!SGType->isLiteral())
170 return SGType->getName();
171 }
172 return "<unknown type>";
173 }
174
175 namespace {
176
177 class DFSanABIList {
178 std::unique_ptr<SpecialCaseList> SCL;
179
180 public:
181 DFSanABIList() = default;
182
set(std::unique_ptr<SpecialCaseList> List)183 void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); }
184
185 /// Returns whether either this function or its source file are listed in the
186 /// given category.
isIn(const Function & F,StringRef Category) const187 bool isIn(const Function &F, StringRef Category) const {
188 return isIn(*F.getParent(), Category) ||
189 SCL->inSection("dataflow", "fun", F.getName(), Category);
190 }
191
192 /// Returns whether this global alias is listed in the given category.
193 ///
194 /// If GA aliases a function, the alias's name is matched as a function name
195 /// would be. Similarly, aliases of globals are matched like globals.
isIn(const GlobalAlias & GA,StringRef Category) const196 bool isIn(const GlobalAlias &GA, StringRef Category) const {
197 if (isIn(*GA.getParent(), Category))
198 return true;
199
200 if (isa<FunctionType>(GA.getValueType()))
201 return SCL->inSection("dataflow", "fun", GA.getName(), Category);
202
203 return SCL->inSection("dataflow", "global", GA.getName(), Category) ||
204 SCL->inSection("dataflow", "type", GetGlobalTypeString(GA),
205 Category);
206 }
207
208 /// Returns whether this module is listed in the given category.
isIn(const Module & M,StringRef Category) const209 bool isIn(const Module &M, StringRef Category) const {
210 return SCL->inSection("dataflow", "src", M.getModuleIdentifier(), Category);
211 }
212 };
213
214 /// TransformedFunction is used to express the result of transforming one
215 /// function type into another. This struct is immutable. It holds metadata
216 /// useful for updating calls of the old function to the new type.
217 struct TransformedFunction {
TransformedFunction__anon83c261760111::TransformedFunction218 TransformedFunction(FunctionType* OriginalType,
219 FunctionType* TransformedType,
220 std::vector<unsigned> ArgumentIndexMapping)
221 : OriginalType(OriginalType),
222 TransformedType(TransformedType),
223 ArgumentIndexMapping(ArgumentIndexMapping) {}
224
225 // Disallow copies.
226 TransformedFunction(const TransformedFunction&) = delete;
227 TransformedFunction& operator=(const TransformedFunction&) = delete;
228
229 // Allow moves.
230 TransformedFunction(TransformedFunction&&) = default;
231 TransformedFunction& operator=(TransformedFunction&&) = default;
232
233 /// Type of the function before the transformation.
234 FunctionType *OriginalType;
235
236 /// Type of the function after the transformation.
237 FunctionType *TransformedType;
238
239 /// Transforming a function may change the position of arguments. This
240 /// member records the mapping from each argument's old position to its new
241 /// position. Argument positions are zero-indexed. If the transformation
242 /// from F to F' made the first argument of F into the third argument of F',
243 /// then ArgumentIndexMapping[0] will equal 2.
244 std::vector<unsigned> ArgumentIndexMapping;
245 };
246
247 /// Given function attributes from a call site for the original function,
248 /// return function attributes appropriate for a call to the transformed
249 /// function.
TransformFunctionAttributes(const TransformedFunction & TransformedFunction,LLVMContext & Ctx,AttributeList CallSiteAttrs)250 AttributeList TransformFunctionAttributes(
251 const TransformedFunction& TransformedFunction,
252 LLVMContext& Ctx, AttributeList CallSiteAttrs) {
253
254 // Construct a vector of AttributeSet for each function argument.
255 std::vector<llvm::AttributeSet> ArgumentAttributes(
256 TransformedFunction.TransformedType->getNumParams());
257
258 // Copy attributes from the parameter of the original function to the
259 // transformed version. 'ArgumentIndexMapping' holds the mapping from
260 // old argument position to new.
261 for (unsigned i=0, ie = TransformedFunction.ArgumentIndexMapping.size();
262 i < ie; ++i) {
263 unsigned TransformedIndex = TransformedFunction.ArgumentIndexMapping[i];
264 ArgumentAttributes[TransformedIndex] = CallSiteAttrs.getParamAttributes(i);
265 }
266
267 // Copy annotations on varargs arguments.
268 for (unsigned i = TransformedFunction.OriginalType->getNumParams(),
269 ie = CallSiteAttrs.getNumAttrSets(); i<ie; ++i) {
270 ArgumentAttributes.push_back(CallSiteAttrs.getParamAttributes(i));
271 }
272
273 return AttributeList::get(
274 Ctx,
275 CallSiteAttrs.getFnAttributes(),
276 CallSiteAttrs.getRetAttributes(),
277 llvm::makeArrayRef(ArgumentAttributes));
278 }
279
280 class DataFlowSanitizer : public ModulePass {
281 friend struct DFSanFunction;
282 friend class DFSanVisitor;
283
284 enum {
285 ShadowWidth = 16
286 };
287
288 /// Which ABI should be used for instrumented functions?
289 enum InstrumentedABI {
290 /// Argument and return value labels are passed through additional
291 /// arguments and by modifying the return type.
292 IA_Args,
293
294 /// Argument and return value labels are passed through TLS variables
295 /// __dfsan_arg_tls and __dfsan_retval_tls.
296 IA_TLS
297 };
298
299 /// How should calls to uninstrumented functions be handled?
300 enum WrapperKind {
301 /// This function is present in an uninstrumented form but we don't know
302 /// how it should be handled. Print a warning and call the function anyway.
303 /// Don't label the return value.
304 WK_Warning,
305
306 /// This function does not write to (user-accessible) memory, and its return
307 /// value is unlabelled.
308 WK_Discard,
309
310 /// This function does not write to (user-accessible) memory, and the label
311 /// of its return value is the union of the label of its arguments.
312 WK_Functional,
313
314 /// Instead of calling the function, a custom wrapper __dfsw_F is called,
315 /// where F is the name of the function. This function may wrap the
316 /// original function or provide its own implementation. This is similar to
317 /// the IA_Args ABI, except that IA_Args uses a struct return type to
318 /// pass the return value shadow in a register, while WK_Custom uses an
319 /// extra pointer argument to return the shadow. This allows the wrapped
320 /// form of the function type to be expressed in C.
321 WK_Custom
322 };
323
324 Module *Mod;
325 LLVMContext *Ctx;
326 IntegerType *ShadowTy;
327 PointerType *ShadowPtrTy;
328 IntegerType *IntptrTy;
329 ConstantInt *ZeroShadow;
330 ConstantInt *ShadowPtrMask;
331 ConstantInt *ShadowPtrMul;
332 Constant *ArgTLS;
333 Constant *RetvalTLS;
334 void *(*GetArgTLSPtr)();
335 void *(*GetRetvalTLSPtr)();
336 Constant *GetArgTLS;
337 Constant *GetRetvalTLS;
338 Constant *ExternalShadowMask;
339 FunctionType *DFSanUnionFnTy;
340 FunctionType *DFSanUnionLoadFnTy;
341 FunctionType *DFSanUnimplementedFnTy;
342 FunctionType *DFSanSetLabelFnTy;
343 FunctionType *DFSanNonzeroLabelFnTy;
344 FunctionType *DFSanVarargWrapperFnTy;
345 Constant *DFSanUnionFn;
346 Constant *DFSanCheckedUnionFn;
347 Constant *DFSanUnionLoadFn;
348 Constant *DFSanUnimplementedFn;
349 Constant *DFSanSetLabelFn;
350 Constant *DFSanNonzeroLabelFn;
351 Constant *DFSanVarargWrapperFn;
352 MDNode *ColdCallWeights;
353 DFSanABIList ABIList;
354 DenseMap<Value *, Function *> UnwrappedFnMap;
355 AttrBuilder ReadOnlyNoneAttrs;
356 bool DFSanRuntimeShadowMask = false;
357
358 Value *getShadowAddress(Value *Addr, Instruction *Pos);
359 bool isInstrumented(const Function *F);
360 bool isInstrumented(const GlobalAlias *GA);
361 FunctionType *getArgsFunctionType(FunctionType *T);
362 FunctionType *getTrampolineFunctionType(FunctionType *T);
363 TransformedFunction getCustomFunctionType(FunctionType *T);
364 InstrumentedABI getInstrumentedABI();
365 WrapperKind getWrapperKind(Function *F);
366 void addGlobalNamePrefix(GlobalValue *GV);
367 Function *buildWrapperFunction(Function *F, StringRef NewFName,
368 GlobalValue::LinkageTypes NewFLink,
369 FunctionType *NewFT);
370 Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName);
371
372 public:
373 static char ID;
374
375 DataFlowSanitizer(
376 const std::vector<std::string> &ABIListFiles = std::vector<std::string>(),
377 void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr);
378
379 bool doInitialization(Module &M) override;
380 bool runOnModule(Module &M) override;
381 };
382
383 struct DFSanFunction {
384 DataFlowSanitizer &DFS;
385 Function *F;
386 DominatorTree DT;
387 DataFlowSanitizer::InstrumentedABI IA;
388 bool IsNativeABI;
389 Value *ArgTLSPtr = nullptr;
390 Value *RetvalTLSPtr = nullptr;
391 AllocaInst *LabelReturnAlloca = nullptr;
392 DenseMap<Value *, Value *> ValShadowMap;
393 DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap;
394 std::vector<std::pair<PHINode *, PHINode *>> PHIFixups;
395 DenseSet<Instruction *> SkipInsts;
396 std::vector<Value *> NonZeroChecks;
397 bool AvoidNewBlocks;
398
399 struct CachedCombinedShadow {
400 BasicBlock *Block;
401 Value *Shadow;
402 };
403 DenseMap<std::pair<Value *, Value *>, CachedCombinedShadow>
404 CachedCombinedShadows;
405 DenseMap<Value *, std::set<Value *>> ShadowElements;
406
DFSanFunction__anon83c261760111::DFSanFunction407 DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI)
408 : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()), IsNativeABI(IsNativeABI) {
409 DT.recalculate(*F);
410 // FIXME: Need to track down the register allocator issue which causes poor
411 // performance in pathological cases with large numbers of basic blocks.
412 AvoidNewBlocks = F->size() > 1000;
413 }
414
415 Value *getArgTLSPtr();
416 Value *getArgTLS(unsigned Index, Instruction *Pos);
417 Value *getRetvalTLS();
418 Value *getShadow(Value *V);
419 void setShadow(Instruction *I, Value *Shadow);
420 Value *combineShadows(Value *V1, Value *V2, Instruction *Pos);
421 Value *combineOperandShadows(Instruction *Inst);
422 Value *loadShadow(Value *ShadowAddr, uint64_t Size, uint64_t Align,
423 Instruction *Pos);
424 void storeShadow(Value *Addr, uint64_t Size, uint64_t Align, Value *Shadow,
425 Instruction *Pos);
426 };
427
428 class DFSanVisitor : public InstVisitor<DFSanVisitor> {
429 public:
430 DFSanFunction &DFSF;
431
DFSanVisitor(DFSanFunction & DFSF)432 DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {}
433
getDataLayout() const434 const DataLayout &getDataLayout() const {
435 return DFSF.F->getParent()->getDataLayout();
436 }
437
438 void visitOperandShadowInst(Instruction &I);
439 void visitBinaryOperator(BinaryOperator &BO);
440 void visitCastInst(CastInst &CI);
441 void visitCmpInst(CmpInst &CI);
442 void visitGetElementPtrInst(GetElementPtrInst &GEPI);
443 void visitLoadInst(LoadInst &LI);
444 void visitStoreInst(StoreInst &SI);
445 void visitReturnInst(ReturnInst &RI);
446 void visitCallSite(CallSite CS);
447 void visitPHINode(PHINode &PN);
448 void visitExtractElementInst(ExtractElementInst &I);
449 void visitInsertElementInst(InsertElementInst &I);
450 void visitShuffleVectorInst(ShuffleVectorInst &I);
451 void visitExtractValueInst(ExtractValueInst &I);
452 void visitInsertValueInst(InsertValueInst &I);
453 void visitAllocaInst(AllocaInst &I);
454 void visitSelectInst(SelectInst &I);
455 void visitMemSetInst(MemSetInst &I);
456 void visitMemTransferInst(MemTransferInst &I);
457 };
458
459 } // end anonymous namespace
460
461 char DataFlowSanitizer::ID;
462
463 INITIALIZE_PASS(DataFlowSanitizer, "dfsan",
464 "DataFlowSanitizer: dynamic data flow analysis.", false, false)
465
466 ModulePass *
createDataFlowSanitizerPass(const std::vector<std::string> & ABIListFiles,void * (* getArgTLS)(),void * (* getRetValTLS)())467 llvm::createDataFlowSanitizerPass(const std::vector<std::string> &ABIListFiles,
468 void *(*getArgTLS)(),
469 void *(*getRetValTLS)()) {
470 return new DataFlowSanitizer(ABIListFiles, getArgTLS, getRetValTLS);
471 }
472
DataFlowSanitizer(const std::vector<std::string> & ABIListFiles,void * (* getArgTLS)(),void * (* getRetValTLS)())473 DataFlowSanitizer::DataFlowSanitizer(
474 const std::vector<std::string> &ABIListFiles, void *(*getArgTLS)(),
475 void *(*getRetValTLS)())
476 : ModulePass(ID), GetArgTLSPtr(getArgTLS), GetRetvalTLSPtr(getRetValTLS) {
477 std::vector<std::string> AllABIListFiles(std::move(ABIListFiles));
478 AllABIListFiles.insert(AllABIListFiles.end(), ClABIListFiles.begin(),
479 ClABIListFiles.end());
480 ABIList.set(SpecialCaseList::createOrDie(AllABIListFiles));
481 }
482
getArgsFunctionType(FunctionType * T)483 FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) {
484 SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end());
485 ArgTypes.append(T->getNumParams(), ShadowTy);
486 if (T->isVarArg())
487 ArgTypes.push_back(ShadowPtrTy);
488 Type *RetType = T->getReturnType();
489 if (!RetType->isVoidTy())
490 RetType = StructType::get(RetType, ShadowTy);
491 return FunctionType::get(RetType, ArgTypes, T->isVarArg());
492 }
493
getTrampolineFunctionType(FunctionType * T)494 FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) {
495 assert(!T->isVarArg());
496 SmallVector<Type *, 4> ArgTypes;
497 ArgTypes.push_back(T->getPointerTo());
498 ArgTypes.append(T->param_begin(), T->param_end());
499 ArgTypes.append(T->getNumParams(), ShadowTy);
500 Type *RetType = T->getReturnType();
501 if (!RetType->isVoidTy())
502 ArgTypes.push_back(ShadowPtrTy);
503 return FunctionType::get(T->getReturnType(), ArgTypes, false);
504 }
505
getCustomFunctionType(FunctionType * T)506 TransformedFunction DataFlowSanitizer::getCustomFunctionType(FunctionType *T) {
507 SmallVector<Type *, 4> ArgTypes;
508
509 // Some parameters of the custom function being constructed are
510 // parameters of T. Record the mapping from parameters of T to
511 // parameters of the custom function, so that parameter attributes
512 // at call sites can be updated.
513 std::vector<unsigned> ArgumentIndexMapping;
514 for (unsigned i = 0, ie = T->getNumParams(); i != ie; ++i) {
515 Type* param_type = T->getParamType(i);
516 FunctionType *FT;
517 if (isa<PointerType>(param_type) && (FT = dyn_cast<FunctionType>(
518 cast<PointerType>(param_type)->getElementType()))) {
519 ArgumentIndexMapping.push_back(ArgTypes.size());
520 ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo());
521 ArgTypes.push_back(Type::getInt8PtrTy(*Ctx));
522 } else {
523 ArgumentIndexMapping.push_back(ArgTypes.size());
524 ArgTypes.push_back(param_type);
525 }
526 }
527 for (unsigned i = 0, e = T->getNumParams(); i != e; ++i)
528 ArgTypes.push_back(ShadowTy);
529 if (T->isVarArg())
530 ArgTypes.push_back(ShadowPtrTy);
531 Type *RetType = T->getReturnType();
532 if (!RetType->isVoidTy())
533 ArgTypes.push_back(ShadowPtrTy);
534 return TransformedFunction(
535 T, FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg()),
536 ArgumentIndexMapping);
537 }
538
doInitialization(Module & M)539 bool DataFlowSanitizer::doInitialization(Module &M) {
540 Triple TargetTriple(M.getTargetTriple());
541 bool IsX86_64 = TargetTriple.getArch() == Triple::x86_64;
542 bool IsMIPS64 = TargetTriple.isMIPS64();
543 bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64 ||
544 TargetTriple.getArch() == Triple::aarch64_be;
545
546 const DataLayout &DL = M.getDataLayout();
547
548 Mod = &M;
549 Ctx = &M.getContext();
550 ShadowTy = IntegerType::get(*Ctx, ShadowWidth);
551 ShadowPtrTy = PointerType::getUnqual(ShadowTy);
552 IntptrTy = DL.getIntPtrType(*Ctx);
553 ZeroShadow = ConstantInt::getSigned(ShadowTy, 0);
554 ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidth / 8);
555 if (IsX86_64)
556 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL);
557 else if (IsMIPS64)
558 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0xF000000000LL);
559 // AArch64 supports multiple VMAs and the shadow mask is set at runtime.
560 else if (IsAArch64)
561 DFSanRuntimeShadowMask = true;
562 else
563 report_fatal_error("unsupported triple");
564
565 Type *DFSanUnionArgs[2] = { ShadowTy, ShadowTy };
566 DFSanUnionFnTy =
567 FunctionType::get(ShadowTy, DFSanUnionArgs, /*isVarArg=*/ false);
568 Type *DFSanUnionLoadArgs[2] = { ShadowPtrTy, IntptrTy };
569 DFSanUnionLoadFnTy =
570 FunctionType::get(ShadowTy, DFSanUnionLoadArgs, /*isVarArg=*/ false);
571 DFSanUnimplementedFnTy = FunctionType::get(
572 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
573 Type *DFSanSetLabelArgs[3] = { ShadowTy, Type::getInt8PtrTy(*Ctx), IntptrTy };
574 DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx),
575 DFSanSetLabelArgs, /*isVarArg=*/false);
576 DFSanNonzeroLabelFnTy = FunctionType::get(
577 Type::getVoidTy(*Ctx), None, /*isVarArg=*/false);
578 DFSanVarargWrapperFnTy = FunctionType::get(
579 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
580
581 if (GetArgTLSPtr) {
582 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64);
583 ArgTLS = nullptr;
584 GetArgTLS = ConstantExpr::getIntToPtr(
585 ConstantInt::get(IntptrTy, uintptr_t(GetArgTLSPtr)),
586 PointerType::getUnqual(
587 FunctionType::get(PointerType::getUnqual(ArgTLSTy), false)));
588 }
589 if (GetRetvalTLSPtr) {
590 RetvalTLS = nullptr;
591 GetRetvalTLS = ConstantExpr::getIntToPtr(
592 ConstantInt::get(IntptrTy, uintptr_t(GetRetvalTLSPtr)),
593 PointerType::getUnqual(
594 FunctionType::get(PointerType::getUnqual(ShadowTy), false)));
595 }
596
597 ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000);
598 return true;
599 }
600
isInstrumented(const Function * F)601 bool DataFlowSanitizer::isInstrumented(const Function *F) {
602 return !ABIList.isIn(*F, "uninstrumented");
603 }
604
isInstrumented(const GlobalAlias * GA)605 bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) {
606 return !ABIList.isIn(*GA, "uninstrumented");
607 }
608
getInstrumentedABI()609 DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() {
610 return ClArgsABI ? IA_Args : IA_TLS;
611 }
612
getWrapperKind(Function * F)613 DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) {
614 if (ABIList.isIn(*F, "functional"))
615 return WK_Functional;
616 if (ABIList.isIn(*F, "discard"))
617 return WK_Discard;
618 if (ABIList.isIn(*F, "custom"))
619 return WK_Custom;
620
621 return WK_Warning;
622 }
623
addGlobalNamePrefix(GlobalValue * GV)624 void DataFlowSanitizer::addGlobalNamePrefix(GlobalValue *GV) {
625 std::string GVName = GV->getName(), Prefix = "dfs$";
626 GV->setName(Prefix + GVName);
627
628 // Try to change the name of the function in module inline asm. We only do
629 // this for specific asm directives, currently only ".symver", to try to avoid
630 // corrupting asm which happens to contain the symbol name as a substring.
631 // Note that the substitution for .symver assumes that the versioned symbol
632 // also has an instrumented name.
633 std::string Asm = GV->getParent()->getModuleInlineAsm();
634 std::string SearchStr = ".symver " + GVName + ",";
635 size_t Pos = Asm.find(SearchStr);
636 if (Pos != std::string::npos) {
637 Asm.replace(Pos, SearchStr.size(),
638 ".symver " + Prefix + GVName + "," + Prefix);
639 GV->getParent()->setModuleInlineAsm(Asm);
640 }
641 }
642
643 Function *
buildWrapperFunction(Function * F,StringRef NewFName,GlobalValue::LinkageTypes NewFLink,FunctionType * NewFT)644 DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName,
645 GlobalValue::LinkageTypes NewFLink,
646 FunctionType *NewFT) {
647 FunctionType *FT = F->getFunctionType();
648 Function *NewF = Function::Create(NewFT, NewFLink, NewFName,
649 F->getParent());
650 NewF->copyAttributesFrom(F);
651 NewF->removeAttributes(
652 AttributeList::ReturnIndex,
653 AttributeFuncs::typeIncompatible(NewFT->getReturnType()));
654
655 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF);
656 if (F->isVarArg()) {
657 NewF->removeAttributes(AttributeList::FunctionIndex,
658 AttrBuilder().addAttribute("split-stack"));
659 CallInst::Create(DFSanVarargWrapperFn,
660 IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "",
661 BB);
662 new UnreachableInst(*Ctx, BB);
663 } else {
664 std::vector<Value *> Args;
665 unsigned n = FT->getNumParams();
666 for (Function::arg_iterator ai = NewF->arg_begin(); n != 0; ++ai, --n)
667 Args.push_back(&*ai);
668 CallInst *CI = CallInst::Create(F, Args, "", BB);
669 if (FT->getReturnType()->isVoidTy())
670 ReturnInst::Create(*Ctx, BB);
671 else
672 ReturnInst::Create(*Ctx, CI, BB);
673 }
674
675 return NewF;
676 }
677
getOrBuildTrampolineFunction(FunctionType * FT,StringRef FName)678 Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT,
679 StringRef FName) {
680 FunctionType *FTT = getTrampolineFunctionType(FT);
681 Constant *C = Mod->getOrInsertFunction(FName, FTT);
682 Function *F = dyn_cast<Function>(C);
683 if (F && F->isDeclaration()) {
684 F->setLinkage(GlobalValue::LinkOnceODRLinkage);
685 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F);
686 std::vector<Value *> Args;
687 Function::arg_iterator AI = F->arg_begin(); ++AI;
688 for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N)
689 Args.push_back(&*AI);
690 CallInst *CI = CallInst::Create(&*F->arg_begin(), Args, "", BB);
691 ReturnInst *RI;
692 if (FT->getReturnType()->isVoidTy())
693 RI = ReturnInst::Create(*Ctx, BB);
694 else
695 RI = ReturnInst::Create(*Ctx, CI, BB);
696
697 DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true);
698 Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI; ++ValAI;
699 for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N)
700 DFSF.ValShadowMap[&*ValAI] = &*ShadowAI;
701 DFSanVisitor(DFSF).visitCallInst(*CI);
702 if (!FT->getReturnType()->isVoidTy())
703 new StoreInst(DFSF.getShadow(RI->getReturnValue()),
704 &*std::prev(F->arg_end()), RI);
705 }
706
707 return C;
708 }
709
runOnModule(Module & M)710 bool DataFlowSanitizer::runOnModule(Module &M) {
711 if (ABIList.isIn(M, "skip"))
712 return false;
713
714 if (!GetArgTLSPtr) {
715 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64);
716 ArgTLS = Mod->getOrInsertGlobal("__dfsan_arg_tls", ArgTLSTy);
717 if (GlobalVariable *G = dyn_cast<GlobalVariable>(ArgTLS))
718 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
719 }
720 if (!GetRetvalTLSPtr) {
721 RetvalTLS = Mod->getOrInsertGlobal("__dfsan_retval_tls", ShadowTy);
722 if (GlobalVariable *G = dyn_cast<GlobalVariable>(RetvalTLS))
723 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
724 }
725
726 ExternalShadowMask =
727 Mod->getOrInsertGlobal(kDFSanExternShadowPtrMask, IntptrTy);
728
729 DFSanUnionFn = Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy);
730 if (Function *F = dyn_cast<Function>(DFSanUnionFn)) {
731 F->addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
732 F->addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
733 F->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
734 F->addParamAttr(0, Attribute::ZExt);
735 F->addParamAttr(1, Attribute::ZExt);
736 }
737 DFSanCheckedUnionFn = Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy);
738 if (Function *F = dyn_cast<Function>(DFSanCheckedUnionFn)) {
739 F->addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
740 F->addAttribute(AttributeList::FunctionIndex, Attribute::ReadNone);
741 F->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
742 F->addParamAttr(0, Attribute::ZExt);
743 F->addParamAttr(1, Attribute::ZExt);
744 }
745 DFSanUnionLoadFn =
746 Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy);
747 if (Function *F = dyn_cast<Function>(DFSanUnionLoadFn)) {
748 F->addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
749 F->addAttribute(AttributeList::FunctionIndex, Attribute::ReadOnly);
750 F->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
751 }
752 DFSanUnimplementedFn =
753 Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy);
754 DFSanSetLabelFn =
755 Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy);
756 if (Function *F = dyn_cast<Function>(DFSanSetLabelFn)) {
757 F->addParamAttr(0, Attribute::ZExt);
758 }
759 DFSanNonzeroLabelFn =
760 Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy);
761 DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper",
762 DFSanVarargWrapperFnTy);
763
764 std::vector<Function *> FnsToInstrument;
765 SmallPtrSet<Function *, 2> FnsWithNativeABI;
766 for (Function &i : M) {
767 if (!i.isIntrinsic() &&
768 &i != DFSanUnionFn &&
769 &i != DFSanCheckedUnionFn &&
770 &i != DFSanUnionLoadFn &&
771 &i != DFSanUnimplementedFn &&
772 &i != DFSanSetLabelFn &&
773 &i != DFSanNonzeroLabelFn &&
774 &i != DFSanVarargWrapperFn)
775 FnsToInstrument.push_back(&i);
776 }
777
778 // Give function aliases prefixes when necessary, and build wrappers where the
779 // instrumentedness is inconsistent.
780 for (Module::alias_iterator i = M.alias_begin(), e = M.alias_end(); i != e;) {
781 GlobalAlias *GA = &*i;
782 ++i;
783 // Don't stop on weak. We assume people aren't playing games with the
784 // instrumentedness of overridden weak aliases.
785 if (auto F = dyn_cast<Function>(GA->getBaseObject())) {
786 bool GAInst = isInstrumented(GA), FInst = isInstrumented(F);
787 if (GAInst && FInst) {
788 addGlobalNamePrefix(GA);
789 } else if (GAInst != FInst) {
790 // Non-instrumented alias of an instrumented function, or vice versa.
791 // Replace the alias with a native-ABI wrapper of the aliasee. The pass
792 // below will take care of instrumenting it.
793 Function *NewF =
794 buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType());
795 GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType()));
796 NewF->takeName(GA);
797 GA->eraseFromParent();
798 FnsToInstrument.push_back(NewF);
799 }
800 }
801 }
802
803 ReadOnlyNoneAttrs.addAttribute(Attribute::ReadOnly)
804 .addAttribute(Attribute::ReadNone);
805
806 // First, change the ABI of every function in the module. ABI-listed
807 // functions keep their original ABI and get a wrapper function.
808 for (std::vector<Function *>::iterator i = FnsToInstrument.begin(),
809 e = FnsToInstrument.end();
810 i != e; ++i) {
811 Function &F = **i;
812 FunctionType *FT = F.getFunctionType();
813
814 bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() &&
815 FT->getReturnType()->isVoidTy());
816
817 if (isInstrumented(&F)) {
818 // Instrumented functions get a 'dfs$' prefix. This allows us to more
819 // easily identify cases of mismatching ABIs.
820 if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) {
821 FunctionType *NewFT = getArgsFunctionType(FT);
822 Function *NewF = Function::Create(NewFT, F.getLinkage(), "", &M);
823 NewF->copyAttributesFrom(&F);
824 NewF->removeAttributes(
825 AttributeList::ReturnIndex,
826 AttributeFuncs::typeIncompatible(NewFT->getReturnType()));
827 for (Function::arg_iterator FArg = F.arg_begin(),
828 NewFArg = NewF->arg_begin(),
829 FArgEnd = F.arg_end();
830 FArg != FArgEnd; ++FArg, ++NewFArg) {
831 FArg->replaceAllUsesWith(&*NewFArg);
832 }
833 NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList());
834
835 for (Function::user_iterator UI = F.user_begin(), UE = F.user_end();
836 UI != UE;) {
837 BlockAddress *BA = dyn_cast<BlockAddress>(*UI);
838 ++UI;
839 if (BA) {
840 BA->replaceAllUsesWith(
841 BlockAddress::get(NewF, BA->getBasicBlock()));
842 delete BA;
843 }
844 }
845 F.replaceAllUsesWith(
846 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)));
847 NewF->takeName(&F);
848 F.eraseFromParent();
849 *i = NewF;
850 addGlobalNamePrefix(NewF);
851 } else {
852 addGlobalNamePrefix(&F);
853 }
854 } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) {
855 // Build a wrapper function for F. The wrapper simply calls F, and is
856 // added to FnsToInstrument so that any instrumentation according to its
857 // WrapperKind is done in the second pass below.
858 FunctionType *NewFT = getInstrumentedABI() == IA_Args
859 ? getArgsFunctionType(FT)
860 : FT;
861
862 // If the function being wrapped has local linkage, then preserve the
863 // function's linkage in the wrapper function.
864 GlobalValue::LinkageTypes wrapperLinkage =
865 F.hasLocalLinkage()
866 ? F.getLinkage()
867 : GlobalValue::LinkOnceODRLinkage;
868
869 Function *NewF = buildWrapperFunction(
870 &F, std::string("dfsw$") + std::string(F.getName()),
871 wrapperLinkage, NewFT);
872 if (getInstrumentedABI() == IA_TLS)
873 NewF->removeAttributes(AttributeList::FunctionIndex, ReadOnlyNoneAttrs);
874
875 Value *WrappedFnCst =
876 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT));
877 F.replaceAllUsesWith(WrappedFnCst);
878
879 UnwrappedFnMap[WrappedFnCst] = &F;
880 *i = NewF;
881
882 if (!F.isDeclaration()) {
883 // This function is probably defining an interposition of an
884 // uninstrumented function and hence needs to keep the original ABI.
885 // But any functions it may call need to use the instrumented ABI, so
886 // we instrument it in a mode which preserves the original ABI.
887 FnsWithNativeABI.insert(&F);
888
889 // This code needs to rebuild the iterators, as they may be invalidated
890 // by the push_back, taking care that the new range does not include
891 // any functions added by this code.
892 size_t N = i - FnsToInstrument.begin(),
893 Count = e - FnsToInstrument.begin();
894 FnsToInstrument.push_back(&F);
895 i = FnsToInstrument.begin() + N;
896 e = FnsToInstrument.begin() + Count;
897 }
898 // Hopefully, nobody will try to indirectly call a vararg
899 // function... yet.
900 } else if (FT->isVarArg()) {
901 UnwrappedFnMap[&F] = &F;
902 *i = nullptr;
903 }
904 }
905
906 for (Function *i : FnsToInstrument) {
907 if (!i || i->isDeclaration())
908 continue;
909
910 removeUnreachableBlocks(*i);
911
912 DFSanFunction DFSF(*this, i, FnsWithNativeABI.count(i));
913
914 // DFSanVisitor may create new basic blocks, which confuses df_iterator.
915 // Build a copy of the list before iterating over it.
916 SmallVector<BasicBlock *, 4> BBList(depth_first(&i->getEntryBlock()));
917
918 for (BasicBlock *i : BBList) {
919 Instruction *Inst = &i->front();
920 while (true) {
921 // DFSanVisitor may split the current basic block, changing the current
922 // instruction's next pointer and moving the next instruction to the
923 // tail block from which we should continue.
924 Instruction *Next = Inst->getNextNode();
925 // DFSanVisitor may delete Inst, so keep track of whether it was a
926 // terminator.
927 bool IsTerminator = isa<TerminatorInst>(Inst);
928 if (!DFSF.SkipInsts.count(Inst))
929 DFSanVisitor(DFSF).visit(Inst);
930 if (IsTerminator)
931 break;
932 Inst = Next;
933 }
934 }
935
936 // We will not necessarily be able to compute the shadow for every phi node
937 // until we have visited every block. Therefore, the code that handles phi
938 // nodes adds them to the PHIFixups list so that they can be properly
939 // handled here.
940 for (std::vector<std::pair<PHINode *, PHINode *>>::iterator
941 i = DFSF.PHIFixups.begin(),
942 e = DFSF.PHIFixups.end();
943 i != e; ++i) {
944 for (unsigned val = 0, n = i->first->getNumIncomingValues(); val != n;
945 ++val) {
946 i->second->setIncomingValue(
947 val, DFSF.getShadow(i->first->getIncomingValue(val)));
948 }
949 }
950
951 // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy
952 // places (i.e. instructions in basic blocks we haven't even begun visiting
953 // yet). To make our life easier, do this work in a pass after the main
954 // instrumentation.
955 if (ClDebugNonzeroLabels) {
956 for (Value *V : DFSF.NonZeroChecks) {
957 Instruction *Pos;
958 if (Instruction *I = dyn_cast<Instruction>(V))
959 Pos = I->getNextNode();
960 else
961 Pos = &DFSF.F->getEntryBlock().front();
962 while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos))
963 Pos = Pos->getNextNode();
964 IRBuilder<> IRB(Pos);
965 Value *Ne = IRB.CreateICmpNE(V, DFSF.DFS.ZeroShadow);
966 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
967 Ne, Pos, /*Unreachable=*/false, ColdCallWeights));
968 IRBuilder<> ThenIRB(BI);
969 ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {});
970 }
971 }
972 }
973
974 return false;
975 }
976
getArgTLSPtr()977 Value *DFSanFunction::getArgTLSPtr() {
978 if (ArgTLSPtr)
979 return ArgTLSPtr;
980 if (DFS.ArgTLS)
981 return ArgTLSPtr = DFS.ArgTLS;
982
983 IRBuilder<> IRB(&F->getEntryBlock().front());
984 return ArgTLSPtr = IRB.CreateCall(DFS.GetArgTLS, {});
985 }
986
getRetvalTLS()987 Value *DFSanFunction::getRetvalTLS() {
988 if (RetvalTLSPtr)
989 return RetvalTLSPtr;
990 if (DFS.RetvalTLS)
991 return RetvalTLSPtr = DFS.RetvalTLS;
992
993 IRBuilder<> IRB(&F->getEntryBlock().front());
994 return RetvalTLSPtr = IRB.CreateCall(DFS.GetRetvalTLS, {});
995 }
996
getArgTLS(unsigned Idx,Instruction * Pos)997 Value *DFSanFunction::getArgTLS(unsigned Idx, Instruction *Pos) {
998 IRBuilder<> IRB(Pos);
999 return IRB.CreateConstGEP2_64(getArgTLSPtr(), 0, Idx);
1000 }
1001
getShadow(Value * V)1002 Value *DFSanFunction::getShadow(Value *V) {
1003 if (!isa<Argument>(V) && !isa<Instruction>(V))
1004 return DFS.ZeroShadow;
1005 Value *&Shadow = ValShadowMap[V];
1006 if (!Shadow) {
1007 if (Argument *A = dyn_cast<Argument>(V)) {
1008 if (IsNativeABI)
1009 return DFS.ZeroShadow;
1010 switch (IA) {
1011 case DataFlowSanitizer::IA_TLS: {
1012 Value *ArgTLSPtr = getArgTLSPtr();
1013 Instruction *ArgTLSPos =
1014 DFS.ArgTLS ? &*F->getEntryBlock().begin()
1015 : cast<Instruction>(ArgTLSPtr)->getNextNode();
1016 IRBuilder<> IRB(ArgTLSPos);
1017 Shadow = IRB.CreateLoad(getArgTLS(A->getArgNo(), ArgTLSPos));
1018 break;
1019 }
1020 case DataFlowSanitizer::IA_Args: {
1021 unsigned ArgIdx = A->getArgNo() + F->arg_size() / 2;
1022 Function::arg_iterator i = F->arg_begin();
1023 while (ArgIdx--)
1024 ++i;
1025 Shadow = &*i;
1026 assert(Shadow->getType() == DFS.ShadowTy);
1027 break;
1028 }
1029 }
1030 NonZeroChecks.push_back(Shadow);
1031 } else {
1032 Shadow = DFS.ZeroShadow;
1033 }
1034 }
1035 return Shadow;
1036 }
1037
setShadow(Instruction * I,Value * Shadow)1038 void DFSanFunction::setShadow(Instruction *I, Value *Shadow) {
1039 assert(!ValShadowMap.count(I));
1040 assert(Shadow->getType() == DFS.ShadowTy);
1041 ValShadowMap[I] = Shadow;
1042 }
1043
getShadowAddress(Value * Addr,Instruction * Pos)1044 Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) {
1045 assert(Addr != RetvalTLS && "Reinstrumenting?");
1046 IRBuilder<> IRB(Pos);
1047 Value *ShadowPtrMaskValue;
1048 if (DFSanRuntimeShadowMask)
1049 ShadowPtrMaskValue = IRB.CreateLoad(IntptrTy, ExternalShadowMask);
1050 else
1051 ShadowPtrMaskValue = ShadowPtrMask;
1052 return IRB.CreateIntToPtr(
1053 IRB.CreateMul(
1054 IRB.CreateAnd(IRB.CreatePtrToInt(Addr, IntptrTy),
1055 IRB.CreatePtrToInt(ShadowPtrMaskValue, IntptrTy)),
1056 ShadowPtrMul),
1057 ShadowPtrTy);
1058 }
1059
1060 // Generates IR to compute the union of the two given shadows, inserting it
1061 // before Pos. Returns the computed union Value.
combineShadows(Value * V1,Value * V2,Instruction * Pos)1062 Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) {
1063 if (V1 == DFS.ZeroShadow)
1064 return V2;
1065 if (V2 == DFS.ZeroShadow)
1066 return V1;
1067 if (V1 == V2)
1068 return V1;
1069
1070 auto V1Elems = ShadowElements.find(V1);
1071 auto V2Elems = ShadowElements.find(V2);
1072 if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) {
1073 if (std::includes(V1Elems->second.begin(), V1Elems->second.end(),
1074 V2Elems->second.begin(), V2Elems->second.end())) {
1075 return V1;
1076 } else if (std::includes(V2Elems->second.begin(), V2Elems->second.end(),
1077 V1Elems->second.begin(), V1Elems->second.end())) {
1078 return V2;
1079 }
1080 } else if (V1Elems != ShadowElements.end()) {
1081 if (V1Elems->second.count(V2))
1082 return V1;
1083 } else if (V2Elems != ShadowElements.end()) {
1084 if (V2Elems->second.count(V1))
1085 return V2;
1086 }
1087
1088 auto Key = std::make_pair(V1, V2);
1089 if (V1 > V2)
1090 std::swap(Key.first, Key.second);
1091 CachedCombinedShadow &CCS = CachedCombinedShadows[Key];
1092 if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent()))
1093 return CCS.Shadow;
1094
1095 IRBuilder<> IRB(Pos);
1096 if (AvoidNewBlocks) {
1097 CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {V1, V2});
1098 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
1099 Call->addParamAttr(0, Attribute::ZExt);
1100 Call->addParamAttr(1, Attribute::ZExt);
1101
1102 CCS.Block = Pos->getParent();
1103 CCS.Shadow = Call;
1104 } else {
1105 BasicBlock *Head = Pos->getParent();
1106 Value *Ne = IRB.CreateICmpNE(V1, V2);
1107 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
1108 Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT));
1109 IRBuilder<> ThenIRB(BI);
1110 CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {V1, V2});
1111 Call->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
1112 Call->addParamAttr(0, Attribute::ZExt);
1113 Call->addParamAttr(1, Attribute::ZExt);
1114
1115 BasicBlock *Tail = BI->getSuccessor(0);
1116 PHINode *Phi = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front());
1117 Phi->addIncoming(Call, Call->getParent());
1118 Phi->addIncoming(V1, Head);
1119
1120 CCS.Block = Tail;
1121 CCS.Shadow = Phi;
1122 }
1123
1124 std::set<Value *> UnionElems;
1125 if (V1Elems != ShadowElements.end()) {
1126 UnionElems = V1Elems->second;
1127 } else {
1128 UnionElems.insert(V1);
1129 }
1130 if (V2Elems != ShadowElements.end()) {
1131 UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end());
1132 } else {
1133 UnionElems.insert(V2);
1134 }
1135 ShadowElements[CCS.Shadow] = std::move(UnionElems);
1136
1137 return CCS.Shadow;
1138 }
1139
1140 // A convenience function which folds the shadows of each of the operands
1141 // of the provided instruction Inst, inserting the IR before Inst. Returns
1142 // the computed union Value.
combineOperandShadows(Instruction * Inst)1143 Value *DFSanFunction::combineOperandShadows(Instruction *Inst) {
1144 if (Inst->getNumOperands() == 0)
1145 return DFS.ZeroShadow;
1146
1147 Value *Shadow = getShadow(Inst->getOperand(0));
1148 for (unsigned i = 1, n = Inst->getNumOperands(); i != n; ++i) {
1149 Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst);
1150 }
1151 return Shadow;
1152 }
1153
visitOperandShadowInst(Instruction & I)1154 void DFSanVisitor::visitOperandShadowInst(Instruction &I) {
1155 Value *CombinedShadow = DFSF.combineOperandShadows(&I);
1156 DFSF.setShadow(&I, CombinedShadow);
1157 }
1158
1159 // Generates IR to load shadow corresponding to bytes [Addr, Addr+Size), where
1160 // Addr has alignment Align, and take the union of each of those shadows.
loadShadow(Value * Addr,uint64_t Size,uint64_t Align,Instruction * Pos)1161 Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
1162 Instruction *Pos) {
1163 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
1164 const auto i = AllocaShadowMap.find(AI);
1165 if (i != AllocaShadowMap.end()) {
1166 IRBuilder<> IRB(Pos);
1167 return IRB.CreateLoad(i->second);
1168 }
1169 }
1170
1171 uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8;
1172 SmallVector<Value *, 2> Objs;
1173 GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
1174 bool AllConstants = true;
1175 for (Value *Obj : Objs) {
1176 if (isa<Function>(Obj) || isa<BlockAddress>(Obj))
1177 continue;
1178 if (isa<GlobalVariable>(Obj) && cast<GlobalVariable>(Obj)->isConstant())
1179 continue;
1180
1181 AllConstants = false;
1182 break;
1183 }
1184 if (AllConstants)
1185 return DFS.ZeroShadow;
1186
1187 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
1188 switch (Size) {
1189 case 0:
1190 return DFS.ZeroShadow;
1191 case 1: {
1192 LoadInst *LI = new LoadInst(ShadowAddr, "", Pos);
1193 LI->setAlignment(ShadowAlign);
1194 return LI;
1195 }
1196 case 2: {
1197 IRBuilder<> IRB(Pos);
1198 Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr,
1199 ConstantInt::get(DFS.IntptrTy, 1));
1200 return combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign),
1201 IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign), Pos);
1202 }
1203 }
1204 if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidth) == 0) {
1205 // Fast path for the common case where each byte has identical shadow: load
1206 // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any
1207 // shadow is non-equal.
1208 BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F);
1209 IRBuilder<> FallbackIRB(FallbackBB);
1210 CallInst *FallbackCall = FallbackIRB.CreateCall(
1211 DFS.DFSanUnionLoadFn,
1212 {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
1213 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
1214
1215 // Compare each of the shadows stored in the loaded 64 bits to each other,
1216 // by computing (WideShadow rotl ShadowWidth) == WideShadow.
1217 IRBuilder<> IRB(Pos);
1218 Value *WideAddr =
1219 IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
1220 Value *WideShadow = IRB.CreateAlignedLoad(WideAddr, ShadowAlign);
1221 Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy);
1222 Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidth);
1223 Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidth);
1224 Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow);
1225 Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow);
1226
1227 BasicBlock *Head = Pos->getParent();
1228 BasicBlock *Tail = Head->splitBasicBlock(Pos->getIterator());
1229
1230 if (DomTreeNode *OldNode = DT.getNode(Head)) {
1231 std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end());
1232
1233 DomTreeNode *NewNode = DT.addNewBlock(Tail, Head);
1234 for (auto Child : Children)
1235 DT.changeImmediateDominator(Child, NewNode);
1236 }
1237
1238 // In the following code LastBr will refer to the previous basic block's
1239 // conditional branch instruction, whose true successor is fixed up to point
1240 // to the next block during the loop below or to the tail after the final
1241 // iteration.
1242 BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq);
1243 ReplaceInstWithInst(Head->getTerminator(), LastBr);
1244 DT.addNewBlock(FallbackBB, Head);
1245
1246 for (uint64_t Ofs = 64 / DFS.ShadowWidth; Ofs != Size;
1247 Ofs += 64 / DFS.ShadowWidth) {
1248 BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F);
1249 DT.addNewBlock(NextBB, LastBr->getParent());
1250 IRBuilder<> NextIRB(NextBB);
1251 WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
1252 ConstantInt::get(DFS.IntptrTy, 1));
1253 Value *NextWideShadow = NextIRB.CreateAlignedLoad(WideAddr, ShadowAlign);
1254 ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow);
1255 LastBr->setSuccessor(0, NextBB);
1256 LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB);
1257 }
1258
1259 LastBr->setSuccessor(0, Tail);
1260 FallbackIRB.CreateBr(Tail);
1261 PHINode *Shadow = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front());
1262 Shadow->addIncoming(FallbackCall, FallbackBB);
1263 Shadow->addIncoming(TruncShadow, LastBr->getParent());
1264 return Shadow;
1265 }
1266
1267 IRBuilder<> IRB(Pos);
1268 CallInst *FallbackCall = IRB.CreateCall(
1269 DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
1270 FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
1271 return FallbackCall;
1272 }
1273
visitLoadInst(LoadInst & LI)1274 void DFSanVisitor::visitLoadInst(LoadInst &LI) {
1275 auto &DL = LI.getModule()->getDataLayout();
1276 uint64_t Size = DL.getTypeStoreSize(LI.getType());
1277 if (Size == 0) {
1278 DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow);
1279 return;
1280 }
1281
1282 uint64_t Align;
1283 if (ClPreserveAlignment) {
1284 Align = LI.getAlignment();
1285 if (Align == 0)
1286 Align = DL.getABITypeAlignment(LI.getType());
1287 } else {
1288 Align = 1;
1289 }
1290 IRBuilder<> IRB(&LI);
1291 Value *Shadow = DFSF.loadShadow(LI.getPointerOperand(), Size, Align, &LI);
1292 if (ClCombinePointerLabelsOnLoad) {
1293 Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand());
1294 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &LI);
1295 }
1296 if (Shadow != DFSF.DFS.ZeroShadow)
1297 DFSF.NonZeroChecks.push_back(Shadow);
1298
1299 DFSF.setShadow(&LI, Shadow);
1300 }
1301
storeShadow(Value * Addr,uint64_t Size,uint64_t Align,Value * Shadow,Instruction * Pos)1302 void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align,
1303 Value *Shadow, Instruction *Pos) {
1304 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
1305 const auto i = AllocaShadowMap.find(AI);
1306 if (i != AllocaShadowMap.end()) {
1307 IRBuilder<> IRB(Pos);
1308 IRB.CreateStore(Shadow, i->second);
1309 return;
1310 }
1311 }
1312
1313 uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8;
1314 IRBuilder<> IRB(Pos);
1315 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
1316 if (Shadow == DFS.ZeroShadow) {
1317 IntegerType *ShadowTy = IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidth);
1318 Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
1319 Value *ExtShadowAddr =
1320 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy));
1321 IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign);
1322 return;
1323 }
1324
1325 const unsigned ShadowVecSize = 128 / DFS.ShadowWidth;
1326 uint64_t Offset = 0;
1327 if (Size >= ShadowVecSize) {
1328 VectorType *ShadowVecTy = VectorType::get(DFS.ShadowTy, ShadowVecSize);
1329 Value *ShadowVec = UndefValue::get(ShadowVecTy);
1330 for (unsigned i = 0; i != ShadowVecSize; ++i) {
1331 ShadowVec = IRB.CreateInsertElement(
1332 ShadowVec, Shadow, ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), i));
1333 }
1334 Value *ShadowVecAddr =
1335 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy));
1336 do {
1337 Value *CurShadowVecAddr =
1338 IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset);
1339 IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign);
1340 Size -= ShadowVecSize;
1341 ++Offset;
1342 } while (Size >= ShadowVecSize);
1343 Offset *= ShadowVecSize;
1344 }
1345 while (Size > 0) {
1346 Value *CurShadowAddr =
1347 IRB.CreateConstGEP1_32(DFS.ShadowTy, ShadowAddr, Offset);
1348 IRB.CreateAlignedStore(Shadow, CurShadowAddr, ShadowAlign);
1349 --Size;
1350 ++Offset;
1351 }
1352 }
1353
visitStoreInst(StoreInst & SI)1354 void DFSanVisitor::visitStoreInst(StoreInst &SI) {
1355 auto &DL = SI.getModule()->getDataLayout();
1356 uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType());
1357 if (Size == 0)
1358 return;
1359
1360 uint64_t Align;
1361 if (ClPreserveAlignment) {
1362 Align = SI.getAlignment();
1363 if (Align == 0)
1364 Align = DL.getABITypeAlignment(SI.getValueOperand()->getType());
1365 } else {
1366 Align = 1;
1367 }
1368
1369 Value* Shadow = DFSF.getShadow(SI.getValueOperand());
1370 if (ClCombinePointerLabelsOnStore) {
1371 Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand());
1372 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &SI);
1373 }
1374 DFSF.storeShadow(SI.getPointerOperand(), Size, Align, Shadow, &SI);
1375 }
1376
visitBinaryOperator(BinaryOperator & BO)1377 void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) {
1378 visitOperandShadowInst(BO);
1379 }
1380
visitCastInst(CastInst & CI)1381 void DFSanVisitor::visitCastInst(CastInst &CI) { visitOperandShadowInst(CI); }
1382
visitCmpInst(CmpInst & CI)1383 void DFSanVisitor::visitCmpInst(CmpInst &CI) { visitOperandShadowInst(CI); }
1384
visitGetElementPtrInst(GetElementPtrInst & GEPI)1385 void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
1386 visitOperandShadowInst(GEPI);
1387 }
1388
visitExtractElementInst(ExtractElementInst & I)1389 void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) {
1390 visitOperandShadowInst(I);
1391 }
1392
visitInsertElementInst(InsertElementInst & I)1393 void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) {
1394 visitOperandShadowInst(I);
1395 }
1396
visitShuffleVectorInst(ShuffleVectorInst & I)1397 void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) {
1398 visitOperandShadowInst(I);
1399 }
1400
visitExtractValueInst(ExtractValueInst & I)1401 void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) {
1402 visitOperandShadowInst(I);
1403 }
1404
visitInsertValueInst(InsertValueInst & I)1405 void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) {
1406 visitOperandShadowInst(I);
1407 }
1408
visitAllocaInst(AllocaInst & I)1409 void DFSanVisitor::visitAllocaInst(AllocaInst &I) {
1410 bool AllLoadsStores = true;
1411 for (User *U : I.users()) {
1412 if (isa<LoadInst>(U))
1413 continue;
1414
1415 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1416 if (SI->getPointerOperand() == &I)
1417 continue;
1418 }
1419
1420 AllLoadsStores = false;
1421 break;
1422 }
1423 if (AllLoadsStores) {
1424 IRBuilder<> IRB(&I);
1425 DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.ShadowTy);
1426 }
1427 DFSF.setShadow(&I, DFSF.DFS.ZeroShadow);
1428 }
1429
visitSelectInst(SelectInst & I)1430 void DFSanVisitor::visitSelectInst(SelectInst &I) {
1431 Value *CondShadow = DFSF.getShadow(I.getCondition());
1432 Value *TrueShadow = DFSF.getShadow(I.getTrueValue());
1433 Value *FalseShadow = DFSF.getShadow(I.getFalseValue());
1434
1435 if (isa<VectorType>(I.getCondition()->getType())) {
1436 DFSF.setShadow(
1437 &I,
1438 DFSF.combineShadows(
1439 CondShadow, DFSF.combineShadows(TrueShadow, FalseShadow, &I), &I));
1440 } else {
1441 Value *ShadowSel;
1442 if (TrueShadow == FalseShadow) {
1443 ShadowSel = TrueShadow;
1444 } else {
1445 ShadowSel =
1446 SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I);
1447 }
1448 DFSF.setShadow(&I, DFSF.combineShadows(CondShadow, ShadowSel, &I));
1449 }
1450 }
1451
visitMemSetInst(MemSetInst & I)1452 void DFSanVisitor::visitMemSetInst(MemSetInst &I) {
1453 IRBuilder<> IRB(&I);
1454 Value *ValShadow = DFSF.getShadow(I.getValue());
1455 IRB.CreateCall(DFSF.DFS.DFSanSetLabelFn,
1456 {ValShadow, IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy(
1457 *DFSF.DFS.Ctx)),
1458 IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)});
1459 }
1460
visitMemTransferInst(MemTransferInst & I)1461 void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
1462 IRBuilder<> IRB(&I);
1463 Value *DestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I);
1464 Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I);
1465 Value *LenShadow = IRB.CreateMul(
1466 I.getLength(),
1467 ConstantInt::get(I.getLength()->getType(), DFSF.DFS.ShadowWidth / 8));
1468 Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx);
1469 DestShadow = IRB.CreateBitCast(DestShadow, Int8Ptr);
1470 SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);
1471 auto *MTI = cast<MemTransferInst>(
1472 IRB.CreateCall(I.getCalledValue(),
1473 {DestShadow, SrcShadow, LenShadow, I.getVolatileCst()}));
1474 if (ClPreserveAlignment) {
1475 MTI->setDestAlignment(I.getDestAlignment() * (DFSF.DFS.ShadowWidth / 8));
1476 MTI->setSourceAlignment(I.getSourceAlignment() * (DFSF.DFS.ShadowWidth / 8));
1477 } else {
1478 MTI->setDestAlignment(DFSF.DFS.ShadowWidth / 8);
1479 MTI->setSourceAlignment(DFSF.DFS.ShadowWidth / 8);
1480 }
1481 }
1482
visitReturnInst(ReturnInst & RI)1483 void DFSanVisitor::visitReturnInst(ReturnInst &RI) {
1484 if (!DFSF.IsNativeABI && RI.getReturnValue()) {
1485 switch (DFSF.IA) {
1486 case DataFlowSanitizer::IA_TLS: {
1487 Value *S = DFSF.getShadow(RI.getReturnValue());
1488 IRBuilder<> IRB(&RI);
1489 IRB.CreateStore(S, DFSF.getRetvalTLS());
1490 break;
1491 }
1492 case DataFlowSanitizer::IA_Args: {
1493 IRBuilder<> IRB(&RI);
1494 Type *RT = DFSF.F->getFunctionType()->getReturnType();
1495 Value *InsVal =
1496 IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0);
1497 Value *InsShadow =
1498 IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1);
1499 RI.setOperand(0, InsShadow);
1500 break;
1501 }
1502 }
1503 }
1504 }
1505
visitCallSite(CallSite CS)1506 void DFSanVisitor::visitCallSite(CallSite CS) {
1507 Function *F = CS.getCalledFunction();
1508 if ((F && F->isIntrinsic()) || isa<InlineAsm>(CS.getCalledValue())) {
1509 visitOperandShadowInst(*CS.getInstruction());
1510 return;
1511 }
1512
1513 // Calls to this function are synthesized in wrappers, and we shouldn't
1514 // instrument them.
1515 if (F == DFSF.DFS.DFSanVarargWrapperFn)
1516 return;
1517
1518 IRBuilder<> IRB(CS.getInstruction());
1519
1520 DenseMap<Value *, Function *>::iterator i =
1521 DFSF.DFS.UnwrappedFnMap.find(CS.getCalledValue());
1522 if (i != DFSF.DFS.UnwrappedFnMap.end()) {
1523 Function *F = i->second;
1524 switch (DFSF.DFS.getWrapperKind(F)) {
1525 case DataFlowSanitizer::WK_Warning:
1526 CS.setCalledFunction(F);
1527 IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn,
1528 IRB.CreateGlobalStringPtr(F->getName()));
1529 DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow);
1530 return;
1531 case DataFlowSanitizer::WK_Discard:
1532 CS.setCalledFunction(F);
1533 DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow);
1534 return;
1535 case DataFlowSanitizer::WK_Functional:
1536 CS.setCalledFunction(F);
1537 visitOperandShadowInst(*CS.getInstruction());
1538 return;
1539 case DataFlowSanitizer::WK_Custom:
1540 // Don't try to handle invokes of custom functions, it's too complicated.
1541 // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_
1542 // wrapper.
1543 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
1544 FunctionType *FT = F->getFunctionType();
1545 TransformedFunction CustomFn = DFSF.DFS.getCustomFunctionType(FT);
1546 std::string CustomFName = "__dfsw_";
1547 CustomFName += F->getName();
1548 Constant *CustomF = DFSF.DFS.Mod->getOrInsertFunction(
1549 CustomFName, CustomFn.TransformedType);
1550 if (Function *CustomFn = dyn_cast<Function>(CustomF)) {
1551 CustomFn->copyAttributesFrom(F);
1552
1553 // Custom functions returning non-void will write to the return label.
1554 if (!FT->getReturnType()->isVoidTy()) {
1555 CustomFn->removeAttributes(AttributeList::FunctionIndex,
1556 DFSF.DFS.ReadOnlyNoneAttrs);
1557 }
1558 }
1559
1560 std::vector<Value *> Args;
1561
1562 CallSite::arg_iterator i = CS.arg_begin();
1563 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) {
1564 Type *T = (*i)->getType();
1565 FunctionType *ParamFT;
1566 if (isa<PointerType>(T) &&
1567 (ParamFT = dyn_cast<FunctionType>(
1568 cast<PointerType>(T)->getElementType()))) {
1569 std::string TName = "dfst";
1570 TName += utostr(FT->getNumParams() - n);
1571 TName += "$";
1572 TName += F->getName();
1573 Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName);
1574 Args.push_back(T);
1575 Args.push_back(
1576 IRB.CreateBitCast(*i, Type::getInt8PtrTy(*DFSF.DFS.Ctx)));
1577 } else {
1578 Args.push_back(*i);
1579 }
1580 }
1581
1582 i = CS.arg_begin();
1583 const unsigned ShadowArgStart = Args.size();
1584 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
1585 Args.push_back(DFSF.getShadow(*i));
1586
1587 if (FT->isVarArg()) {
1588 auto *LabelVATy = ArrayType::get(DFSF.DFS.ShadowTy,
1589 CS.arg_size() - FT->getNumParams());
1590 auto *LabelVAAlloca = new AllocaInst(
1591 LabelVATy, getDataLayout().getAllocaAddrSpace(),
1592 "labelva", &DFSF.F->getEntryBlock().front());
1593
1594 for (unsigned n = 0; i != CS.arg_end(); ++i, ++n) {
1595 auto LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, n);
1596 IRB.CreateStore(DFSF.getShadow(*i), LabelVAPtr);
1597 }
1598
1599 Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0));
1600 }
1601
1602 if (!FT->getReturnType()->isVoidTy()) {
1603 if (!DFSF.LabelReturnAlloca) {
1604 DFSF.LabelReturnAlloca =
1605 new AllocaInst(DFSF.DFS.ShadowTy,
1606 getDataLayout().getAllocaAddrSpace(),
1607 "labelreturn", &DFSF.F->getEntryBlock().front());
1608 }
1609 Args.push_back(DFSF.LabelReturnAlloca);
1610 }
1611
1612 for (i = CS.arg_begin() + FT->getNumParams(); i != CS.arg_end(); ++i)
1613 Args.push_back(*i);
1614
1615 CallInst *CustomCI = IRB.CreateCall(CustomF, Args);
1616 CustomCI->setCallingConv(CI->getCallingConv());
1617 CustomCI->setAttributes(TransformFunctionAttributes(CustomFn,
1618 CI->getContext(), CI->getAttributes()));
1619
1620 // Update the parameter attributes of the custom call instruction to
1621 // zero extend the shadow parameters. This is required for targets
1622 // which consider ShadowTy an illegal type.
1623 for (unsigned n = 0; n < FT->getNumParams(); n++) {
1624 const unsigned ArgNo = ShadowArgStart + n;
1625 if (CustomCI->getArgOperand(ArgNo)->getType() == DFSF.DFS.ShadowTy)
1626 CustomCI->addParamAttr(ArgNo, Attribute::ZExt);
1627 }
1628
1629 if (!FT->getReturnType()->isVoidTy()) {
1630 LoadInst *LabelLoad = IRB.CreateLoad(DFSF.LabelReturnAlloca);
1631 DFSF.setShadow(CustomCI, LabelLoad);
1632 }
1633
1634 CI->replaceAllUsesWith(CustomCI);
1635 CI->eraseFromParent();
1636 return;
1637 }
1638 break;
1639 }
1640 }
1641
1642 FunctionType *FT = cast<FunctionType>(
1643 CS.getCalledValue()->getType()->getPointerElementType());
1644 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
1645 for (unsigned i = 0, n = FT->getNumParams(); i != n; ++i) {
1646 IRB.CreateStore(DFSF.getShadow(CS.getArgument(i)),
1647 DFSF.getArgTLS(i, CS.getInstruction()));
1648 }
1649 }
1650
1651 Instruction *Next = nullptr;
1652 if (!CS.getType()->isVoidTy()) {
1653 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
1654 if (II->getNormalDest()->getSinglePredecessor()) {
1655 Next = &II->getNormalDest()->front();
1656 } else {
1657 BasicBlock *NewBB =
1658 SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT);
1659 Next = &NewBB->front();
1660 }
1661 } else {
1662 assert(CS->getIterator() != CS->getParent()->end());
1663 Next = CS->getNextNode();
1664 }
1665
1666 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
1667 IRBuilder<> NextIRB(Next);
1668 LoadInst *LI = NextIRB.CreateLoad(DFSF.getRetvalTLS());
1669 DFSF.SkipInsts.insert(LI);
1670 DFSF.setShadow(CS.getInstruction(), LI);
1671 DFSF.NonZeroChecks.push_back(LI);
1672 }
1673 }
1674
1675 // Do all instrumentation for IA_Args down here to defer tampering with the
1676 // CFG in a way that SplitEdge may be able to detect.
1677 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) {
1678 FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT);
1679 Value *Func =
1680 IRB.CreateBitCast(CS.getCalledValue(), PointerType::getUnqual(NewFT));
1681 std::vector<Value *> Args;
1682
1683 CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1684 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
1685 Args.push_back(*i);
1686
1687 i = CS.arg_begin();
1688 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
1689 Args.push_back(DFSF.getShadow(*i));
1690
1691 if (FT->isVarArg()) {
1692 unsigned VarArgSize = CS.arg_size() - FT->getNumParams();
1693 ArrayType *VarArgArrayTy = ArrayType::get(DFSF.DFS.ShadowTy, VarArgSize);
1694 AllocaInst *VarArgShadow =
1695 new AllocaInst(VarArgArrayTy, getDataLayout().getAllocaAddrSpace(),
1696 "", &DFSF.F->getEntryBlock().front());
1697 Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0));
1698 for (unsigned n = 0; i != e; ++i, ++n) {
1699 IRB.CreateStore(
1700 DFSF.getShadow(*i),
1701 IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, n));
1702 Args.push_back(*i);
1703 }
1704 }
1705
1706 CallSite NewCS;
1707 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
1708 NewCS = IRB.CreateInvoke(Func, II->getNormalDest(), II->getUnwindDest(),
1709 Args);
1710 } else {
1711 NewCS = IRB.CreateCall(Func, Args);
1712 }
1713 NewCS.setCallingConv(CS.getCallingConv());
1714 NewCS.setAttributes(CS.getAttributes().removeAttributes(
1715 *DFSF.DFS.Ctx, AttributeList::ReturnIndex,
1716 AttributeFuncs::typeIncompatible(NewCS.getInstruction()->getType())));
1717
1718 if (Next) {
1719 ExtractValueInst *ExVal =
1720 ExtractValueInst::Create(NewCS.getInstruction(), 0, "", Next);
1721 DFSF.SkipInsts.insert(ExVal);
1722 ExtractValueInst *ExShadow =
1723 ExtractValueInst::Create(NewCS.getInstruction(), 1, "", Next);
1724 DFSF.SkipInsts.insert(ExShadow);
1725 DFSF.setShadow(ExVal, ExShadow);
1726 DFSF.NonZeroChecks.push_back(ExShadow);
1727
1728 CS.getInstruction()->replaceAllUsesWith(ExVal);
1729 }
1730
1731 CS.getInstruction()->eraseFromParent();
1732 }
1733 }
1734
visitPHINode(PHINode & PN)1735 void DFSanVisitor::visitPHINode(PHINode &PN) {
1736 PHINode *ShadowPN =
1737 PHINode::Create(DFSF.DFS.ShadowTy, PN.getNumIncomingValues(), "", &PN);
1738
1739 // Give the shadow phi node valid predecessors to fool SplitEdge into working.
1740 Value *UndefShadow = UndefValue::get(DFSF.DFS.ShadowTy);
1741 for (PHINode::block_iterator i = PN.block_begin(), e = PN.block_end(); i != e;
1742 ++i) {
1743 ShadowPN->addIncoming(UndefShadow, *i);
1744 }
1745
1746 DFSF.PHIFixups.push_back(std::make_pair(&PN, ShadowPN));
1747 DFSF.setShadow(&PN, ShadowPN);
1748 }
1749