1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/Decl.h"
23 #include "clang/AST/DeclCXX.h"
24 #include "clang/AST/DeclObjC.h"
25 #include "clang/Basic/TargetBuiltins.h"
26 #include "clang/Basic/TargetInfo.h"
27 #include "clang/CodeGen/CGFunctionInfo.h"
28 #include "clang/Frontend/CodeGenOptions.h"
29 #include "llvm/ADT/StringExtras.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/Transforms/Utils/Local.h"
37 using namespace clang;
38 using namespace CodeGen;
39
40 /***/
41
ClangCallConvToLLVMCallConv(CallingConv CC)42 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
43 switch (CC) {
44 default: return llvm::CallingConv::C;
45 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
46 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
47 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
48 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
49 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
50 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
51 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
52 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
53 // TODO: Add support for __pascal to LLVM.
54 case CC_X86Pascal: return llvm::CallingConv::C;
55 // TODO: Add support for __vectorcall to LLVM.
56 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
57 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
58 case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL;
59 }
60 }
61
62 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
63 /// qualification.
64 /// FIXME: address space qualification?
GetThisType(ASTContext & Context,const CXXRecordDecl * RD)65 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
66 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
67 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
68 }
69
70 /// Returns the canonical formal type of the given C++ method.
GetFormalType(const CXXMethodDecl * MD)71 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
72 return MD->getType()->getCanonicalTypeUnqualified()
73 .getAs<FunctionProtoType>();
74 }
75
76 /// Returns the "extra-canonicalized" return type, which discards
77 /// qualifiers on the return type. Codegen doesn't care about them,
78 /// and it makes ABI code a little easier to be able to assume that
79 /// all parameter and return types are top-level unqualified.
GetReturnType(QualType RetTy)80 static CanQualType GetReturnType(QualType RetTy) {
81 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
82 }
83
84 /// Arrange the argument and result information for a value of the given
85 /// unprototyped freestanding function type.
86 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP)87 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
88 // When translating an unprototyped function type, always use a
89 // variadic type.
90 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
91 /*instanceMethod=*/false,
92 /*chainCall=*/false, None,
93 FTNP->getExtInfo(), RequiredArgs(0));
94 }
95
96 /// Adds the formal paramaters in FPT to the given prefix. If any parameter in
97 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
appendParameterTypes(const CodeGenTypes & CGT,SmallVectorImpl<CanQualType> & prefix,const CanQual<FunctionProtoType> & FPT,const FunctionDecl * FD)98 static void appendParameterTypes(const CodeGenTypes &CGT,
99 SmallVectorImpl<CanQualType> &prefix,
100 const CanQual<FunctionProtoType> &FPT,
101 const FunctionDecl *FD) {
102 // Fast path: unknown target.
103 if (FD == nullptr) {
104 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
105 return;
106 }
107
108 // In the vast majority cases, we'll have precisely FPT->getNumParams()
109 // parameters; the only thing that can change this is the presence of
110 // pass_object_size. So, we preallocate for the common case.
111 prefix.reserve(prefix.size() + FPT->getNumParams());
112
113 assert(FD->getNumParams() == FPT->getNumParams());
114 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
115 prefix.push_back(FPT->getParamType(I));
116 if (FD->getParamDecl(I)->hasAttr<PassObjectSizeAttr>())
117 prefix.push_back(CGT.getContext().getSizeType());
118 }
119 }
120
121 /// Arrange the LLVM function layout for a value of the given function
122 /// type, on top of any implicit parameters already stored.
123 static const CGFunctionInfo &
arrangeLLVMFunctionInfo(CodeGenTypes & CGT,bool instanceMethod,SmallVectorImpl<CanQualType> & prefix,CanQual<FunctionProtoType> FTP,const FunctionDecl * FD)124 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
125 SmallVectorImpl<CanQualType> &prefix,
126 CanQual<FunctionProtoType> FTP,
127 const FunctionDecl *FD) {
128 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
129 // FIXME: Kill copy.
130 appendParameterTypes(CGT, prefix, FTP, FD);
131 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
132 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
133 /*chainCall=*/false, prefix,
134 FTP->getExtInfo(), required);
135 }
136
137 /// Arrange the argument and result information for a value of the
138 /// given freestanding function type.
139 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,const FunctionDecl * FD)140 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
141 const FunctionDecl *FD) {
142 SmallVector<CanQualType, 16> argTypes;
143 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
144 FTP, FD);
145 }
146
getCallingConventionForDecl(const Decl * D,bool IsWindows)147 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
148 // Set the appropriate calling convention for the Function.
149 if (D->hasAttr<StdCallAttr>())
150 return CC_X86StdCall;
151
152 if (D->hasAttr<FastCallAttr>())
153 return CC_X86FastCall;
154
155 if (D->hasAttr<ThisCallAttr>())
156 return CC_X86ThisCall;
157
158 if (D->hasAttr<VectorCallAttr>())
159 return CC_X86VectorCall;
160
161 if (D->hasAttr<PascalAttr>())
162 return CC_X86Pascal;
163
164 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
165 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
166
167 if (D->hasAttr<IntelOclBiccAttr>())
168 return CC_IntelOclBicc;
169
170 if (D->hasAttr<MSABIAttr>())
171 return IsWindows ? CC_C : CC_X86_64Win64;
172
173 if (D->hasAttr<SysVABIAttr>())
174 return IsWindows ? CC_X86_64SysV : CC_C;
175
176 return CC_C;
177 }
178
179 /// Arrange the argument and result information for a call to an
180 /// unknown C++ non-static member function of the given abstract type.
181 /// (Zero value of RD means we don't have any meaningful "this" argument type,
182 /// so fall back to a generic pointer type).
183 /// The member function must be an ordinary function, i.e. not a
184 /// constructor or destructor.
185 const CGFunctionInfo &
arrangeCXXMethodType(const CXXRecordDecl * RD,const FunctionProtoType * FTP,const CXXMethodDecl * MD)186 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
187 const FunctionProtoType *FTP,
188 const CXXMethodDecl *MD) {
189 SmallVector<CanQualType, 16> argTypes;
190
191 // Add the 'this' pointer.
192 if (RD)
193 argTypes.push_back(GetThisType(Context, RD));
194 else
195 argTypes.push_back(Context.VoidPtrTy);
196
197 return ::arrangeLLVMFunctionInfo(
198 *this, true, argTypes,
199 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
200 }
201
202 /// Arrange the argument and result information for a declaration or
203 /// definition of the given C++ non-static member function. The
204 /// member function must be an ordinary function, i.e. not a
205 /// constructor or destructor.
206 const CGFunctionInfo &
arrangeCXXMethodDeclaration(const CXXMethodDecl * MD)207 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
208 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
209 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
210
211 CanQual<FunctionProtoType> prototype = GetFormalType(MD);
212
213 if (MD->isInstance()) {
214 // The abstract case is perfectly fine.
215 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
216 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
217 }
218
219 return arrangeFreeFunctionType(prototype, MD);
220 }
221
222 const CGFunctionInfo &
arrangeCXXStructorDeclaration(const CXXMethodDecl * MD,StructorType Type)223 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
224 StructorType Type) {
225
226 SmallVector<CanQualType, 16> argTypes;
227 argTypes.push_back(GetThisType(Context, MD->getParent()));
228
229 GlobalDecl GD;
230 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
231 GD = GlobalDecl(CD, toCXXCtorType(Type));
232 } else {
233 auto *DD = dyn_cast<CXXDestructorDecl>(MD);
234 GD = GlobalDecl(DD, toCXXDtorType(Type));
235 }
236
237 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
238
239 // Add the formal parameters.
240 appendParameterTypes(*this, argTypes, FTP, MD);
241
242 TheCXXABI.buildStructorSignature(MD, Type, argTypes);
243
244 RequiredArgs required =
245 (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
246
247 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
248 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
249 ? argTypes.front()
250 : TheCXXABI.hasMostDerivedReturn(GD)
251 ? CGM.getContext().VoidPtrTy
252 : Context.VoidTy;
253 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
254 /*chainCall=*/false, argTypes, extInfo,
255 required);
256 }
257
258 /// Arrange a call to a C++ method, passing the given arguments.
259 const CGFunctionInfo &
arrangeCXXConstructorCall(const CallArgList & args,const CXXConstructorDecl * D,CXXCtorType CtorKind,unsigned ExtraArgs)260 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
261 const CXXConstructorDecl *D,
262 CXXCtorType CtorKind,
263 unsigned ExtraArgs) {
264 // FIXME: Kill copy.
265 SmallVector<CanQualType, 16> ArgTypes;
266 for (const auto &Arg : args)
267 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
268
269 CanQual<FunctionProtoType> FPT = GetFormalType(D);
270 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
271 GlobalDecl GD(D, CtorKind);
272 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
273 ? ArgTypes.front()
274 : TheCXXABI.hasMostDerivedReturn(GD)
275 ? CGM.getContext().VoidPtrTy
276 : Context.VoidTy;
277
278 FunctionType::ExtInfo Info = FPT->getExtInfo();
279 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
280 /*chainCall=*/false, ArgTypes, Info,
281 Required);
282 }
283
284 /// Arrange the argument and result information for the declaration or
285 /// definition of the given function.
286 const CGFunctionInfo &
arrangeFunctionDeclaration(const FunctionDecl * FD)287 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
288 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
289 if (MD->isInstance())
290 return arrangeCXXMethodDeclaration(MD);
291
292 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
293
294 assert(isa<FunctionType>(FTy));
295
296 // When declaring a function without a prototype, always use a
297 // non-variadic type.
298 if (isa<FunctionNoProtoType>(FTy)) {
299 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
300 return arrangeLLVMFunctionInfo(
301 noProto->getReturnType(), /*instanceMethod=*/false,
302 /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All);
303 }
304
305 assert(isa<FunctionProtoType>(FTy));
306 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>(), FD);
307 }
308
309 /// Arrange the argument and result information for the declaration or
310 /// definition of an Objective-C method.
311 const CGFunctionInfo &
arrangeObjCMethodDeclaration(const ObjCMethodDecl * MD)312 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
313 // It happens that this is the same as a call with no optional
314 // arguments, except also using the formal 'self' type.
315 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
316 }
317
318 /// Arrange the argument and result information for the function type
319 /// through which to perform a send to the given Objective-C method,
320 /// using the given receiver type. The receiver type is not always
321 /// the 'self' type of the method or even an Objective-C pointer type.
322 /// This is *not* the right method for actually performing such a
323 /// message send, due to the possibility of optional arguments.
324 const CGFunctionInfo &
arrangeObjCMessageSendSignature(const ObjCMethodDecl * MD,QualType receiverType)325 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
326 QualType receiverType) {
327 SmallVector<CanQualType, 16> argTys;
328 argTys.push_back(Context.getCanonicalParamType(receiverType));
329 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
330 // FIXME: Kill copy?
331 for (const auto *I : MD->params()) {
332 argTys.push_back(Context.getCanonicalParamType(I->getType()));
333 }
334
335 FunctionType::ExtInfo einfo;
336 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
337 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
338
339 if (getContext().getLangOpts().ObjCAutoRefCount &&
340 MD->hasAttr<NSReturnsRetainedAttr>())
341 einfo = einfo.withProducesResult(true);
342
343 RequiredArgs required =
344 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
345
346 return arrangeLLVMFunctionInfo(
347 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
348 /*chainCall=*/false, argTys, einfo, required);
349 }
350
351 const CGFunctionInfo &
arrangeGlobalDeclaration(GlobalDecl GD)352 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
353 // FIXME: Do we need to handle ObjCMethodDecl?
354 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
355
356 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
357 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
358
359 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
360 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
361
362 return arrangeFunctionDeclaration(FD);
363 }
364
365 /// Arrange a thunk that takes 'this' as the first parameter followed by
366 /// varargs. Return a void pointer, regardless of the actual return type.
367 /// The body of the thunk will end in a musttail call to a function of the
368 /// correct type, and the caller will bitcast the function to the correct
369 /// prototype.
370 const CGFunctionInfo &
arrangeMSMemberPointerThunk(const CXXMethodDecl * MD)371 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
372 assert(MD->isVirtual() && "only virtual memptrs have thunks");
373 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
374 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
375 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
376 /*chainCall=*/false, ArgTys,
377 FTP->getExtInfo(), RequiredArgs(1));
378 }
379
380 const CGFunctionInfo &
arrangeMSCtorClosure(const CXXConstructorDecl * CD,CXXCtorType CT)381 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
382 CXXCtorType CT) {
383 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
384
385 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
386 SmallVector<CanQualType, 2> ArgTys;
387 const CXXRecordDecl *RD = CD->getParent();
388 ArgTys.push_back(GetThisType(Context, RD));
389 if (CT == Ctor_CopyingClosure)
390 ArgTys.push_back(*FTP->param_type_begin());
391 if (RD->getNumVBases() > 0)
392 ArgTys.push_back(Context.IntTy);
393 CallingConv CC = Context.getDefaultCallingConvention(
394 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
395 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
396 /*chainCall=*/false, ArgTys,
397 FunctionType::ExtInfo(CC), RequiredArgs::All);
398 }
399
400 /// Arrange a call as unto a free function, except possibly with an
401 /// additional number of formal parameters considered required.
402 static const CGFunctionInfo &
arrangeFreeFunctionLikeCall(CodeGenTypes & CGT,CodeGenModule & CGM,const CallArgList & args,const FunctionType * fnType,unsigned numExtraRequiredArgs,bool chainCall)403 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
404 CodeGenModule &CGM,
405 const CallArgList &args,
406 const FunctionType *fnType,
407 unsigned numExtraRequiredArgs,
408 bool chainCall) {
409 assert(args.size() >= numExtraRequiredArgs);
410
411 // In most cases, there are no optional arguments.
412 RequiredArgs required = RequiredArgs::All;
413
414 // If we have a variadic prototype, the required arguments are the
415 // extra prefix plus the arguments in the prototype.
416 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
417 if (proto->isVariadic())
418 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
419
420 // If we don't have a prototype at all, but we're supposed to
421 // explicitly use the variadic convention for unprototyped calls,
422 // treat all of the arguments as required but preserve the nominal
423 // possibility of variadics.
424 } else if (CGM.getTargetCodeGenInfo()
425 .isNoProtoCallVariadic(args,
426 cast<FunctionNoProtoType>(fnType))) {
427 required = RequiredArgs(args.size());
428 }
429
430 // FIXME: Kill copy.
431 SmallVector<CanQualType, 16> argTypes;
432 for (const auto &arg : args)
433 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
434 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
435 /*instanceMethod=*/false, chainCall,
436 argTypes, fnType->getExtInfo(), required);
437 }
438
439 /// Figure out the rules for calling a function with the given formal
440 /// type using the given arguments. The arguments are necessary
441 /// because the function might be unprototyped, in which case it's
442 /// target-dependent in crazy ways.
443 const CGFunctionInfo &
arrangeFreeFunctionCall(const CallArgList & args,const FunctionType * fnType,bool chainCall)444 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
445 const FunctionType *fnType,
446 bool chainCall) {
447 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
448 chainCall ? 1 : 0, chainCall);
449 }
450
451 /// A block function call is essentially a free-function call with an
452 /// extra implicit argument.
453 const CGFunctionInfo &
arrangeBlockFunctionCall(const CallArgList & args,const FunctionType * fnType)454 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
455 const FunctionType *fnType) {
456 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
457 /*chainCall=*/false);
458 }
459
460 const CGFunctionInfo &
arrangeFreeFunctionCall(QualType resultType,const CallArgList & args,FunctionType::ExtInfo info,RequiredArgs required)461 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
462 const CallArgList &args,
463 FunctionType::ExtInfo info,
464 RequiredArgs required) {
465 // FIXME: Kill copy.
466 SmallVector<CanQualType, 16> argTypes;
467 for (const auto &Arg : args)
468 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
469 return arrangeLLVMFunctionInfo(
470 GetReturnType(resultType), /*instanceMethod=*/false,
471 /*chainCall=*/false, argTypes, info, required);
472 }
473
474 /// Arrange a call to a C++ method, passing the given arguments.
475 const CGFunctionInfo &
arrangeCXXMethodCall(const CallArgList & args,const FunctionProtoType * FPT,RequiredArgs required)476 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
477 const FunctionProtoType *FPT,
478 RequiredArgs required) {
479 // FIXME: Kill copy.
480 SmallVector<CanQualType, 16> argTypes;
481 for (const auto &Arg : args)
482 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
483
484 FunctionType::ExtInfo info = FPT->getExtInfo();
485 return arrangeLLVMFunctionInfo(
486 GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true,
487 /*chainCall=*/false, argTypes, info, required);
488 }
489
arrangeFreeFunctionDeclaration(QualType resultType,const FunctionArgList & args,const FunctionType::ExtInfo & info,bool isVariadic)490 const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
491 QualType resultType, const FunctionArgList &args,
492 const FunctionType::ExtInfo &info, bool isVariadic) {
493 // FIXME: Kill copy.
494 SmallVector<CanQualType, 16> argTypes;
495 for (auto Arg : args)
496 argTypes.push_back(Context.getCanonicalParamType(Arg->getType()));
497
498 RequiredArgs required =
499 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
500 return arrangeLLVMFunctionInfo(
501 GetReturnType(resultType), /*instanceMethod=*/false,
502 /*chainCall=*/false, argTypes, info, required);
503 }
504
arrangeNullaryFunction()505 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
506 return arrangeLLVMFunctionInfo(
507 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
508 None, FunctionType::ExtInfo(), RequiredArgs::All);
509 }
510
511 /// Arrange the argument and result information for an abstract value
512 /// of a given function type. This is the method which all of the
513 /// above functions ultimately defer to.
514 const CGFunctionInfo &
arrangeLLVMFunctionInfo(CanQualType resultType,bool instanceMethod,bool chainCall,ArrayRef<CanQualType> argTypes,FunctionType::ExtInfo info,RequiredArgs required)515 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
516 bool instanceMethod,
517 bool chainCall,
518 ArrayRef<CanQualType> argTypes,
519 FunctionType::ExtInfo info,
520 RequiredArgs required) {
521 assert(std::all_of(argTypes.begin(), argTypes.end(),
522 std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
523
524 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
525
526 // Lookup or create unique function info.
527 llvm::FoldingSetNodeID ID;
528 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required,
529 resultType, argTypes);
530
531 void *insertPos = nullptr;
532 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
533 if (FI)
534 return *FI;
535
536 // Construct the function info. We co-allocate the ArgInfos.
537 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
538 resultType, argTypes, required);
539 FunctionInfos.InsertNode(FI, insertPos);
540
541 bool inserted = FunctionsBeingProcessed.insert(FI).second;
542 (void)inserted;
543 assert(inserted && "Recursively being processed?");
544
545 // Compute ABI information.
546 getABIInfo().computeInfo(*FI);
547
548 // Loop over all of the computed argument and return value info. If any of
549 // them are direct or extend without a specified coerce type, specify the
550 // default now.
551 ABIArgInfo &retInfo = FI->getReturnInfo();
552 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
553 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
554
555 for (auto &I : FI->arguments())
556 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
557 I.info.setCoerceToType(ConvertType(I.type));
558
559 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
560 assert(erased && "Not in set?");
561
562 return *FI;
563 }
564
create(unsigned llvmCC,bool instanceMethod,bool chainCall,const FunctionType::ExtInfo & info,CanQualType resultType,ArrayRef<CanQualType> argTypes,RequiredArgs required)565 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
566 bool instanceMethod,
567 bool chainCall,
568 const FunctionType::ExtInfo &info,
569 CanQualType resultType,
570 ArrayRef<CanQualType> argTypes,
571 RequiredArgs required) {
572 void *buffer = operator new(sizeof(CGFunctionInfo) +
573 sizeof(ArgInfo) * (argTypes.size() + 1));
574 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
575 FI->CallingConvention = llvmCC;
576 FI->EffectiveCallingConvention = llvmCC;
577 FI->ASTCallingConvention = info.getCC();
578 FI->InstanceMethod = instanceMethod;
579 FI->ChainCall = chainCall;
580 FI->NoReturn = info.getNoReturn();
581 FI->ReturnsRetained = info.getProducesResult();
582 FI->Required = required;
583 FI->HasRegParm = info.getHasRegParm();
584 FI->RegParm = info.getRegParm();
585 FI->ArgStruct = nullptr;
586 FI->ArgStructAlign = 0;
587 FI->NumArgs = argTypes.size();
588 FI->getArgsBuffer()[0].type = resultType;
589 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
590 FI->getArgsBuffer()[i + 1].type = argTypes[i];
591 return FI;
592 }
593
594 /***/
595
596 namespace {
597 // ABIArgInfo::Expand implementation.
598
599 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
600 struct TypeExpansion {
601 enum TypeExpansionKind {
602 // Elements of constant arrays are expanded recursively.
603 TEK_ConstantArray,
604 // Record fields are expanded recursively (but if record is a union, only
605 // the field with the largest size is expanded).
606 TEK_Record,
607 // For complex types, real and imaginary parts are expanded recursively.
608 TEK_Complex,
609 // All other types are not expandable.
610 TEK_None
611 };
612
613 const TypeExpansionKind Kind;
614
TypeExpansion__anond5d483060111::TypeExpansion615 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
~TypeExpansion__anond5d483060111::TypeExpansion616 virtual ~TypeExpansion() {}
617 };
618
619 struct ConstantArrayExpansion : TypeExpansion {
620 QualType EltTy;
621 uint64_t NumElts;
622
ConstantArrayExpansion__anond5d483060111::ConstantArrayExpansion623 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
624 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
classof__anond5d483060111::ConstantArrayExpansion625 static bool classof(const TypeExpansion *TE) {
626 return TE->Kind == TEK_ConstantArray;
627 }
628 };
629
630 struct RecordExpansion : TypeExpansion {
631 SmallVector<const CXXBaseSpecifier *, 1> Bases;
632
633 SmallVector<const FieldDecl *, 1> Fields;
634
RecordExpansion__anond5d483060111::RecordExpansion635 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
636 SmallVector<const FieldDecl *, 1> &&Fields)
637 : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {}
classof__anond5d483060111::RecordExpansion638 static bool classof(const TypeExpansion *TE) {
639 return TE->Kind == TEK_Record;
640 }
641 };
642
643 struct ComplexExpansion : TypeExpansion {
644 QualType EltTy;
645
ComplexExpansion__anond5d483060111::ComplexExpansion646 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
classof__anond5d483060111::ComplexExpansion647 static bool classof(const TypeExpansion *TE) {
648 return TE->Kind == TEK_Complex;
649 }
650 };
651
652 struct NoExpansion : TypeExpansion {
NoExpansion__anond5d483060111::NoExpansion653 NoExpansion() : TypeExpansion(TEK_None) {}
classof__anond5d483060111::NoExpansion654 static bool classof(const TypeExpansion *TE) {
655 return TE->Kind == TEK_None;
656 }
657 };
658 } // namespace
659
660 static std::unique_ptr<TypeExpansion>
getTypeExpansion(QualType Ty,const ASTContext & Context)661 getTypeExpansion(QualType Ty, const ASTContext &Context) {
662 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
663 return llvm::make_unique<ConstantArrayExpansion>(
664 AT->getElementType(), AT->getSize().getZExtValue());
665 }
666 if (const RecordType *RT = Ty->getAs<RecordType>()) {
667 SmallVector<const CXXBaseSpecifier *, 1> Bases;
668 SmallVector<const FieldDecl *, 1> Fields;
669 const RecordDecl *RD = RT->getDecl();
670 assert(!RD->hasFlexibleArrayMember() &&
671 "Cannot expand structure with flexible array.");
672 if (RD->isUnion()) {
673 // Unions can be here only in degenerative cases - all the fields are same
674 // after flattening. Thus we have to use the "largest" field.
675 const FieldDecl *LargestFD = nullptr;
676 CharUnits UnionSize = CharUnits::Zero();
677
678 for (const auto *FD : RD->fields()) {
679 // Skip zero length bitfields.
680 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
681 continue;
682 assert(!FD->isBitField() &&
683 "Cannot expand structure with bit-field members.");
684 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
685 if (UnionSize < FieldSize) {
686 UnionSize = FieldSize;
687 LargestFD = FD;
688 }
689 }
690 if (LargestFD)
691 Fields.push_back(LargestFD);
692 } else {
693 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
694 assert(!CXXRD->isDynamicClass() &&
695 "cannot expand vtable pointers in dynamic classes");
696 for (const CXXBaseSpecifier &BS : CXXRD->bases())
697 Bases.push_back(&BS);
698 }
699
700 for (const auto *FD : RD->fields()) {
701 // Skip zero length bitfields.
702 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
703 continue;
704 assert(!FD->isBitField() &&
705 "Cannot expand structure with bit-field members.");
706 Fields.push_back(FD);
707 }
708 }
709 return llvm::make_unique<RecordExpansion>(std::move(Bases),
710 std::move(Fields));
711 }
712 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
713 return llvm::make_unique<ComplexExpansion>(CT->getElementType());
714 }
715 return llvm::make_unique<NoExpansion>();
716 }
717
getExpansionSize(QualType Ty,const ASTContext & Context)718 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
719 auto Exp = getTypeExpansion(Ty, Context);
720 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
721 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
722 }
723 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
724 int Res = 0;
725 for (auto BS : RExp->Bases)
726 Res += getExpansionSize(BS->getType(), Context);
727 for (auto FD : RExp->Fields)
728 Res += getExpansionSize(FD->getType(), Context);
729 return Res;
730 }
731 if (isa<ComplexExpansion>(Exp.get()))
732 return 2;
733 assert(isa<NoExpansion>(Exp.get()));
734 return 1;
735 }
736
737 void
getExpandedTypes(QualType Ty,SmallVectorImpl<llvm::Type * >::iterator & TI)738 CodeGenTypes::getExpandedTypes(QualType Ty,
739 SmallVectorImpl<llvm::Type *>::iterator &TI) {
740 auto Exp = getTypeExpansion(Ty, Context);
741 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
742 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
743 getExpandedTypes(CAExp->EltTy, TI);
744 }
745 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
746 for (auto BS : RExp->Bases)
747 getExpandedTypes(BS->getType(), TI);
748 for (auto FD : RExp->Fields)
749 getExpandedTypes(FD->getType(), TI);
750 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
751 llvm::Type *EltTy = ConvertType(CExp->EltTy);
752 *TI++ = EltTy;
753 *TI++ = EltTy;
754 } else {
755 assert(isa<NoExpansion>(Exp.get()));
756 *TI++ = ConvertType(Ty);
757 }
758 }
759
forConstantArrayExpansion(CodeGenFunction & CGF,ConstantArrayExpansion * CAE,Address BaseAddr,llvm::function_ref<void (Address)> Fn)760 static void forConstantArrayExpansion(CodeGenFunction &CGF,
761 ConstantArrayExpansion *CAE,
762 Address BaseAddr,
763 llvm::function_ref<void(Address)> Fn) {
764 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
765 CharUnits EltAlign =
766 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
767
768 for (int i = 0, n = CAE->NumElts; i < n; i++) {
769 llvm::Value *EltAddr =
770 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
771 Fn(Address(EltAddr, EltAlign));
772 }
773 }
774
ExpandTypeFromArgs(QualType Ty,LValue LV,SmallVectorImpl<llvm::Argument * >::iterator & AI)775 void CodeGenFunction::ExpandTypeFromArgs(
776 QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) {
777 assert(LV.isSimple() &&
778 "Unexpected non-simple lvalue during struct expansion.");
779
780 auto Exp = getTypeExpansion(Ty, getContext());
781 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
782 forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
783 [&](Address EltAddr) {
784 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
785 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
786 });
787 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
788 Address This = LV.getAddress();
789 for (const CXXBaseSpecifier *BS : RExp->Bases) {
790 // Perform a single step derived-to-base conversion.
791 Address Base =
792 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
793 /*NullCheckValue=*/false, SourceLocation());
794 LValue SubLV = MakeAddrLValue(Base, BS->getType());
795
796 // Recurse onto bases.
797 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
798 }
799 for (auto FD : RExp->Fields) {
800 // FIXME: What are the right qualifiers here?
801 LValue SubLV = EmitLValueForField(LV, FD);
802 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
803 }
804 } else if (isa<ComplexExpansion>(Exp.get())) {
805 auto realValue = *AI++;
806 auto imagValue = *AI++;
807 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
808 } else {
809 assert(isa<NoExpansion>(Exp.get()));
810 EmitStoreThroughLValue(RValue::get(*AI++), LV);
811 }
812 }
813
ExpandTypeToArgs(QualType Ty,RValue RV,llvm::FunctionType * IRFuncTy,SmallVectorImpl<llvm::Value * > & IRCallArgs,unsigned & IRCallArgPos)814 void CodeGenFunction::ExpandTypeToArgs(
815 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
816 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
817 auto Exp = getTypeExpansion(Ty, getContext());
818 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
819 forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(),
820 [&](Address EltAddr) {
821 RValue EltRV =
822 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
823 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
824 });
825 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
826 Address This = RV.getAggregateAddress();
827 for (const CXXBaseSpecifier *BS : RExp->Bases) {
828 // Perform a single step derived-to-base conversion.
829 Address Base =
830 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
831 /*NullCheckValue=*/false, SourceLocation());
832 RValue BaseRV = RValue::getAggregate(Base);
833
834 // Recurse onto bases.
835 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
836 IRCallArgPos);
837 }
838
839 LValue LV = MakeAddrLValue(This, Ty);
840 for (auto FD : RExp->Fields) {
841 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
842 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
843 IRCallArgPos);
844 }
845 } else if (isa<ComplexExpansion>(Exp.get())) {
846 ComplexPairTy CV = RV.getComplexVal();
847 IRCallArgs[IRCallArgPos++] = CV.first;
848 IRCallArgs[IRCallArgPos++] = CV.second;
849 } else {
850 assert(isa<NoExpansion>(Exp.get()));
851 assert(RV.isScalar() &&
852 "Unexpected non-scalar rvalue during struct expansion.");
853
854 // Insert a bitcast as needed.
855 llvm::Value *V = RV.getScalarVal();
856 if (IRCallArgPos < IRFuncTy->getNumParams() &&
857 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
858 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
859
860 IRCallArgs[IRCallArgPos++] = V;
861 }
862 }
863
864 /// Create a temporary allocation for the purposes of coercion.
CreateTempAllocaForCoercion(CodeGenFunction & CGF,llvm::Type * Ty,CharUnits MinAlign)865 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
866 CharUnits MinAlign) {
867 // Don't use an alignment that's worse than what LLVM would prefer.
868 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
869 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
870
871 return CGF.CreateTempAlloca(Ty, Align);
872 }
873
874 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
875 /// accessing some number of bytes out of it, try to gep into the struct to get
876 /// at its inner goodness. Dive as deep as possible without entering an element
877 /// with an in-memory size smaller than DstSize.
878 static Address
EnterStructPointerForCoercedAccess(Address SrcPtr,llvm::StructType * SrcSTy,uint64_t DstSize,CodeGenFunction & CGF)879 EnterStructPointerForCoercedAccess(Address SrcPtr,
880 llvm::StructType *SrcSTy,
881 uint64_t DstSize, CodeGenFunction &CGF) {
882 // We can't dive into a zero-element struct.
883 if (SrcSTy->getNumElements() == 0) return SrcPtr;
884
885 llvm::Type *FirstElt = SrcSTy->getElementType(0);
886
887 // If the first elt is at least as large as what we're looking for, or if the
888 // first element is the same size as the whole struct, we can enter it. The
889 // comparison must be made on the store size and not the alloca size. Using
890 // the alloca size may overstate the size of the load.
891 uint64_t FirstEltSize =
892 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
893 if (FirstEltSize < DstSize &&
894 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
895 return SrcPtr;
896
897 // GEP into the first element.
898 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
899
900 // If the first element is a struct, recurse.
901 llvm::Type *SrcTy = SrcPtr.getElementType();
902 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
903 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
904
905 return SrcPtr;
906 }
907
908 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
909 /// are either integers or pointers. This does a truncation of the value if it
910 /// is too large or a zero extension if it is too small.
911 ///
912 /// This behaves as if the value were coerced through memory, so on big-endian
913 /// targets the high bits are preserved in a truncation, while little-endian
914 /// targets preserve the low bits.
CoerceIntOrPtrToIntOrPtr(llvm::Value * Val,llvm::Type * Ty,CodeGenFunction & CGF)915 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
916 llvm::Type *Ty,
917 CodeGenFunction &CGF) {
918 if (Val->getType() == Ty)
919 return Val;
920
921 if (isa<llvm::PointerType>(Val->getType())) {
922 // If this is Pointer->Pointer avoid conversion to and from int.
923 if (isa<llvm::PointerType>(Ty))
924 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
925
926 // Convert the pointer to an integer so we can play with its width.
927 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
928 }
929
930 llvm::Type *DestIntTy = Ty;
931 if (isa<llvm::PointerType>(DestIntTy))
932 DestIntTy = CGF.IntPtrTy;
933
934 if (Val->getType() != DestIntTy) {
935 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
936 if (DL.isBigEndian()) {
937 // Preserve the high bits on big-endian targets.
938 // That is what memory coercion does.
939 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
940 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
941
942 if (SrcSize > DstSize) {
943 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
944 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
945 } else {
946 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
947 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
948 }
949 } else {
950 // Little-endian targets preserve the low bits. No shifts required.
951 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
952 }
953 }
954
955 if (isa<llvm::PointerType>(Ty))
956 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
957 return Val;
958 }
959
960
961
962 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
963 /// a pointer to an object of type \arg Ty, known to be aligned to
964 /// \arg SrcAlign bytes.
965 ///
966 /// This safely handles the case when the src type is smaller than the
967 /// destination type; in this situation the values of bits which not
968 /// present in the src are undefined.
CreateCoercedLoad(Address Src,llvm::Type * Ty,CodeGenFunction & CGF)969 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
970 CodeGenFunction &CGF) {
971 llvm::Type *SrcTy = Src.getElementType();
972
973 // If SrcTy and Ty are the same, just do a load.
974 if (SrcTy == Ty)
975 return CGF.Builder.CreateLoad(Src);
976
977 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
978
979 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
980 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
981 SrcTy = Src.getType()->getElementType();
982 }
983
984 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
985
986 // If the source and destination are integer or pointer types, just do an
987 // extension or truncation to the desired type.
988 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
989 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
990 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
991 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
992 }
993
994 // If load is legal, just bitcast the src pointer.
995 if (SrcSize >= DstSize) {
996 // Generally SrcSize is never greater than DstSize, since this means we are
997 // losing bits. However, this can happen in cases where the structure has
998 // additional padding, for example due to a user specified alignment.
999 //
1000 // FIXME: Assert that we aren't truncating non-padding bits when have access
1001 // to that information.
1002 Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty));
1003 return CGF.Builder.CreateLoad(Src);
1004 }
1005
1006 // Otherwise do coercion through memory. This is stupid, but simple.
1007 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1008 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1009 Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy);
1010 CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1011 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1012 false);
1013 return CGF.Builder.CreateLoad(Tmp);
1014 }
1015
1016 // Function to store a first-class aggregate into memory. We prefer to
1017 // store the elements rather than the aggregate to be more friendly to
1018 // fast-isel.
1019 // FIXME: Do we need to recurse here?
BuildAggStore(CodeGenFunction & CGF,llvm::Value * Val,Address Dest,bool DestIsVolatile)1020 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1021 Address Dest, bool DestIsVolatile) {
1022 // Prefer scalar stores to first-class aggregate stores.
1023 if (llvm::StructType *STy =
1024 dyn_cast<llvm::StructType>(Val->getType())) {
1025 const llvm::StructLayout *Layout =
1026 CGF.CGM.getDataLayout().getStructLayout(STy);
1027
1028 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1029 auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1030 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1031 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1032 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1033 }
1034 } else {
1035 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1036 }
1037 }
1038
1039 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1040 /// where the source and destination may have different types. The
1041 /// destination is known to be aligned to \arg DstAlign bytes.
1042 ///
1043 /// This safely handles the case when the src type is larger than the
1044 /// destination type; the upper bits of the src will be lost.
CreateCoercedStore(llvm::Value * Src,Address Dst,bool DstIsVolatile,CodeGenFunction & CGF)1045 static void CreateCoercedStore(llvm::Value *Src,
1046 Address Dst,
1047 bool DstIsVolatile,
1048 CodeGenFunction &CGF) {
1049 llvm::Type *SrcTy = Src->getType();
1050 llvm::Type *DstTy = Dst.getType()->getElementType();
1051 if (SrcTy == DstTy) {
1052 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1053 return;
1054 }
1055
1056 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1057
1058 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1059 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1060 DstTy = Dst.getType()->getElementType();
1061 }
1062
1063 // If the source and destination are integer or pointer types, just do an
1064 // extension or truncation to the desired type.
1065 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1066 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1067 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1068 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1069 return;
1070 }
1071
1072 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1073
1074 // If store is legal, just bitcast the src pointer.
1075 if (SrcSize <= DstSize) {
1076 Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy));
1077 BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1078 } else {
1079 // Otherwise do coercion through memory. This is stupid, but
1080 // simple.
1081
1082 // Generally SrcSize is never greater than DstSize, since this means we are
1083 // losing bits. However, this can happen in cases where the structure has
1084 // additional padding, for example due to a user specified alignment.
1085 //
1086 // FIXME: Assert that we aren't truncating non-padding bits when have access
1087 // to that information.
1088 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1089 CGF.Builder.CreateStore(Src, Tmp);
1090 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1091 Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy);
1092 CGF.Builder.CreateMemCpy(DstCasted, Casted,
1093 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1094 false);
1095 }
1096 }
1097
emitAddressAtOffset(CodeGenFunction & CGF,Address addr,const ABIArgInfo & info)1098 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1099 const ABIArgInfo &info) {
1100 if (unsigned offset = info.getDirectOffset()) {
1101 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1102 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1103 CharUnits::fromQuantity(offset));
1104 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1105 }
1106 return addr;
1107 }
1108
1109 namespace {
1110
1111 /// Encapsulates information about the way function arguments from
1112 /// CGFunctionInfo should be passed to actual LLVM IR function.
1113 class ClangToLLVMArgMapping {
1114 static const unsigned InvalidIndex = ~0U;
1115 unsigned InallocaArgNo;
1116 unsigned SRetArgNo;
1117 unsigned TotalIRArgs;
1118
1119 /// Arguments of LLVM IR function corresponding to single Clang argument.
1120 struct IRArgs {
1121 unsigned PaddingArgIndex;
1122 // Argument is expanded to IR arguments at positions
1123 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1124 unsigned FirstArgIndex;
1125 unsigned NumberOfArgs;
1126
IRArgs__anond5d483060411::ClangToLLVMArgMapping::IRArgs1127 IRArgs()
1128 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1129 NumberOfArgs(0) {}
1130 };
1131
1132 SmallVector<IRArgs, 8> ArgInfo;
1133
1134 public:
ClangToLLVMArgMapping(const ASTContext & Context,const CGFunctionInfo & FI,bool OnlyRequiredArgs=false)1135 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1136 bool OnlyRequiredArgs = false)
1137 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1138 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1139 construct(Context, FI, OnlyRequiredArgs);
1140 }
1141
hasInallocaArg() const1142 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
getInallocaArgNo() const1143 unsigned getInallocaArgNo() const {
1144 assert(hasInallocaArg());
1145 return InallocaArgNo;
1146 }
1147
hasSRetArg() const1148 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
getSRetArgNo() const1149 unsigned getSRetArgNo() const {
1150 assert(hasSRetArg());
1151 return SRetArgNo;
1152 }
1153
totalIRArgs() const1154 unsigned totalIRArgs() const { return TotalIRArgs; }
1155
hasPaddingArg(unsigned ArgNo) const1156 bool hasPaddingArg(unsigned ArgNo) const {
1157 assert(ArgNo < ArgInfo.size());
1158 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1159 }
getPaddingArgNo(unsigned ArgNo) const1160 unsigned getPaddingArgNo(unsigned ArgNo) const {
1161 assert(hasPaddingArg(ArgNo));
1162 return ArgInfo[ArgNo].PaddingArgIndex;
1163 }
1164
1165 /// Returns index of first IR argument corresponding to ArgNo, and their
1166 /// quantity.
getIRArgs(unsigned ArgNo) const1167 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1168 assert(ArgNo < ArgInfo.size());
1169 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1170 ArgInfo[ArgNo].NumberOfArgs);
1171 }
1172
1173 private:
1174 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1175 bool OnlyRequiredArgs);
1176 };
1177
construct(const ASTContext & Context,const CGFunctionInfo & FI,bool OnlyRequiredArgs)1178 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1179 const CGFunctionInfo &FI,
1180 bool OnlyRequiredArgs) {
1181 unsigned IRArgNo = 0;
1182 bool SwapThisWithSRet = false;
1183 const ABIArgInfo &RetAI = FI.getReturnInfo();
1184
1185 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1186 SwapThisWithSRet = RetAI.isSRetAfterThis();
1187 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1188 }
1189
1190 unsigned ArgNo = 0;
1191 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1192 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1193 ++I, ++ArgNo) {
1194 assert(I != FI.arg_end());
1195 QualType ArgType = I->type;
1196 const ABIArgInfo &AI = I->info;
1197 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1198 auto &IRArgs = ArgInfo[ArgNo];
1199
1200 if (AI.getPaddingType())
1201 IRArgs.PaddingArgIndex = IRArgNo++;
1202
1203 switch (AI.getKind()) {
1204 case ABIArgInfo::Extend:
1205 case ABIArgInfo::Direct: {
1206 // FIXME: handle sseregparm someday...
1207 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1208 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1209 IRArgs.NumberOfArgs = STy->getNumElements();
1210 } else {
1211 IRArgs.NumberOfArgs = 1;
1212 }
1213 break;
1214 }
1215 case ABIArgInfo::Indirect:
1216 IRArgs.NumberOfArgs = 1;
1217 break;
1218 case ABIArgInfo::Ignore:
1219 case ABIArgInfo::InAlloca:
1220 // ignore and inalloca doesn't have matching LLVM parameters.
1221 IRArgs.NumberOfArgs = 0;
1222 break;
1223 case ABIArgInfo::Expand: {
1224 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1225 break;
1226 }
1227 }
1228
1229 if (IRArgs.NumberOfArgs > 0) {
1230 IRArgs.FirstArgIndex = IRArgNo;
1231 IRArgNo += IRArgs.NumberOfArgs;
1232 }
1233
1234 // Skip over the sret parameter when it comes second. We already handled it
1235 // above.
1236 if (IRArgNo == 1 && SwapThisWithSRet)
1237 IRArgNo++;
1238 }
1239 assert(ArgNo == ArgInfo.size());
1240
1241 if (FI.usesInAlloca())
1242 InallocaArgNo = IRArgNo++;
1243
1244 TotalIRArgs = IRArgNo;
1245 }
1246 } // namespace
1247
1248 /***/
1249
ReturnTypeUsesSRet(const CGFunctionInfo & FI)1250 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1251 return FI.getReturnInfo().isIndirect();
1252 }
1253
ReturnSlotInterferesWithArgs(const CGFunctionInfo & FI)1254 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1255 return ReturnTypeUsesSRet(FI) &&
1256 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1257 }
1258
ReturnTypeUsesFPRet(QualType ResultType)1259 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1260 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1261 switch (BT->getKind()) {
1262 default:
1263 return false;
1264 case BuiltinType::Float:
1265 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1266 case BuiltinType::Double:
1267 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1268 case BuiltinType::LongDouble:
1269 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1270 }
1271 }
1272
1273 return false;
1274 }
1275
ReturnTypeUsesFP2Ret(QualType ResultType)1276 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1277 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1278 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1279 if (BT->getKind() == BuiltinType::LongDouble)
1280 return getTarget().useObjCFP2RetForComplexLongDouble();
1281 }
1282 }
1283
1284 return false;
1285 }
1286
GetFunctionType(GlobalDecl GD)1287 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1288 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1289 return GetFunctionType(FI);
1290 }
1291
1292 llvm::FunctionType *
GetFunctionType(const CGFunctionInfo & FI)1293 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1294
1295 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1296 (void)Inserted;
1297 assert(Inserted && "Recursively being processed?");
1298
1299 llvm::Type *resultType = nullptr;
1300 const ABIArgInfo &retAI = FI.getReturnInfo();
1301 switch (retAI.getKind()) {
1302 case ABIArgInfo::Expand:
1303 llvm_unreachable("Invalid ABI kind for return argument");
1304
1305 case ABIArgInfo::Extend:
1306 case ABIArgInfo::Direct:
1307 resultType = retAI.getCoerceToType();
1308 break;
1309
1310 case ABIArgInfo::InAlloca:
1311 if (retAI.getInAllocaSRet()) {
1312 // sret things on win32 aren't void, they return the sret pointer.
1313 QualType ret = FI.getReturnType();
1314 llvm::Type *ty = ConvertType(ret);
1315 unsigned addressSpace = Context.getTargetAddressSpace(ret);
1316 resultType = llvm::PointerType::get(ty, addressSpace);
1317 } else {
1318 resultType = llvm::Type::getVoidTy(getLLVMContext());
1319 }
1320 break;
1321
1322 case ABIArgInfo::Indirect:
1323 case ABIArgInfo::Ignore:
1324 resultType = llvm::Type::getVoidTy(getLLVMContext());
1325 break;
1326 }
1327
1328 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1329 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1330
1331 // Add type for sret argument.
1332 if (IRFunctionArgs.hasSRetArg()) {
1333 QualType Ret = FI.getReturnType();
1334 llvm::Type *Ty = ConvertType(Ret);
1335 unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1336 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1337 llvm::PointerType::get(Ty, AddressSpace);
1338 }
1339
1340 // Add type for inalloca argument.
1341 if (IRFunctionArgs.hasInallocaArg()) {
1342 auto ArgStruct = FI.getArgStruct();
1343 assert(ArgStruct);
1344 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1345 }
1346
1347 // Add in all of the required arguments.
1348 unsigned ArgNo = 0;
1349 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1350 ie = it + FI.getNumRequiredArgs();
1351 for (; it != ie; ++it, ++ArgNo) {
1352 const ABIArgInfo &ArgInfo = it->info;
1353
1354 // Insert a padding type to ensure proper alignment.
1355 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1356 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1357 ArgInfo.getPaddingType();
1358
1359 unsigned FirstIRArg, NumIRArgs;
1360 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1361
1362 switch (ArgInfo.getKind()) {
1363 case ABIArgInfo::Ignore:
1364 case ABIArgInfo::InAlloca:
1365 assert(NumIRArgs == 0);
1366 break;
1367
1368 case ABIArgInfo::Indirect: {
1369 assert(NumIRArgs == 1);
1370 // indirect arguments are always on the stack, which is addr space #0.
1371 llvm::Type *LTy = ConvertTypeForMem(it->type);
1372 ArgTypes[FirstIRArg] = LTy->getPointerTo();
1373 break;
1374 }
1375
1376 case ABIArgInfo::Extend:
1377 case ABIArgInfo::Direct: {
1378 // Fast-isel and the optimizer generally like scalar values better than
1379 // FCAs, so we flatten them if this is safe to do for this argument.
1380 llvm::Type *argType = ArgInfo.getCoerceToType();
1381 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1382 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1383 assert(NumIRArgs == st->getNumElements());
1384 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1385 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1386 } else {
1387 assert(NumIRArgs == 1);
1388 ArgTypes[FirstIRArg] = argType;
1389 }
1390 break;
1391 }
1392
1393 case ABIArgInfo::Expand:
1394 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1395 getExpandedTypes(it->type, ArgTypesIter);
1396 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1397 break;
1398 }
1399 }
1400
1401 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1402 assert(Erased && "Not in set?");
1403
1404 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1405 }
1406
GetFunctionTypeForVTable(GlobalDecl GD)1407 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1408 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1409 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1410
1411 if (!isFuncTypeConvertible(FPT))
1412 return llvm::StructType::get(getLLVMContext());
1413
1414 const CGFunctionInfo *Info;
1415 if (isa<CXXDestructorDecl>(MD))
1416 Info =
1417 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
1418 else
1419 Info = &arrangeCXXMethodDeclaration(MD);
1420 return GetFunctionType(*Info);
1421 }
1422
AddAttributesFromFunctionProtoType(ASTContext & Ctx,llvm::AttrBuilder & FuncAttrs,const FunctionProtoType * FPT)1423 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1424 llvm::AttrBuilder &FuncAttrs,
1425 const FunctionProtoType *FPT) {
1426 if (!FPT)
1427 return;
1428
1429 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1430 FPT->isNothrow(Ctx))
1431 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1432 }
1433
ConstructAttributeList(const CGFunctionInfo & FI,CGCalleeInfo CalleeInfo,AttributeListType & PAL,unsigned & CallingConv,bool AttrOnCallSite)1434 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
1435 CGCalleeInfo CalleeInfo,
1436 AttributeListType &PAL,
1437 unsigned &CallingConv,
1438 bool AttrOnCallSite) {
1439 llvm::AttrBuilder FuncAttrs;
1440 llvm::AttrBuilder RetAttrs;
1441 bool HasOptnone = false;
1442
1443 CallingConv = FI.getEffectiveCallingConvention();
1444
1445 if (FI.isNoReturn())
1446 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1447
1448 // If we have information about the function prototype, we can learn
1449 // attributes form there.
1450 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1451 CalleeInfo.getCalleeFunctionProtoType());
1452
1453 const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1454
1455 // FIXME: handle sseregparm someday...
1456 if (TargetDecl) {
1457 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1458 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1459 if (TargetDecl->hasAttr<NoThrowAttr>())
1460 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1461 if (TargetDecl->hasAttr<NoReturnAttr>())
1462 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1463 if (TargetDecl->hasAttr<NoDuplicateAttr>())
1464 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1465
1466 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1467 AddAttributesFromFunctionProtoType(
1468 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1469 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1470 // These attributes are not inherited by overloads.
1471 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1472 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1473 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1474 }
1475
1476 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1477 if (TargetDecl->hasAttr<ConstAttr>()) {
1478 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1479 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1480 } else if (TargetDecl->hasAttr<PureAttr>()) {
1481 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1482 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1483 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1484 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1485 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1486 }
1487 if (TargetDecl->hasAttr<RestrictAttr>())
1488 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1489 if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1490 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1491
1492 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1493 }
1494
1495 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1496 if (!HasOptnone) {
1497 if (CodeGenOpts.OptimizeSize)
1498 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1499 if (CodeGenOpts.OptimizeSize == 2)
1500 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1501 }
1502
1503 if (CodeGenOpts.DisableRedZone)
1504 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1505 if (CodeGenOpts.NoImplicitFloat)
1506 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1507 if (CodeGenOpts.EnableSegmentedStacks &&
1508 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1509 FuncAttrs.addAttribute("split-stack");
1510
1511 if (AttrOnCallSite) {
1512 // Attributes that should go on the call site only.
1513 if (!CodeGenOpts.SimplifyLibCalls)
1514 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1515 if (!CodeGenOpts.TrapFuncName.empty())
1516 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1517 } else {
1518 // Attributes that should go on the function, but not the call site.
1519 if (!CodeGenOpts.DisableFPElim) {
1520 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1521 } else if (CodeGenOpts.OmitLeafFramePointer) {
1522 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1523 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1524 } else {
1525 FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1526 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1527 }
1528
1529 bool DisableTailCalls =
1530 CodeGenOpts.DisableTailCalls ||
1531 (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>());
1532 FuncAttrs.addAttribute("disable-tail-calls",
1533 llvm::toStringRef(DisableTailCalls));
1534
1535 FuncAttrs.addAttribute("less-precise-fpmad",
1536 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1537 FuncAttrs.addAttribute("no-infs-fp-math",
1538 llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1539 FuncAttrs.addAttribute("no-nans-fp-math",
1540 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1541 FuncAttrs.addAttribute("unsafe-fp-math",
1542 llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1543 FuncAttrs.addAttribute("use-soft-float",
1544 llvm::toStringRef(CodeGenOpts.SoftFloat));
1545 FuncAttrs.addAttribute("stack-protector-buffer-size",
1546 llvm::utostr(CodeGenOpts.SSPBufferSize));
1547
1548 if (CodeGenOpts.StackRealignment)
1549 FuncAttrs.addAttribute("stackrealign");
1550
1551 // Add target-cpu and target-features attributes to functions. If
1552 // we have a decl for the function and it has a target attribute then
1553 // parse that and add it to the feature set.
1554 StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1555 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
1556 if (FD && FD->hasAttr<TargetAttr>()) {
1557 llvm::StringMap<bool> FeatureMap;
1558 getFunctionFeatureMap(FeatureMap, FD);
1559
1560 // Produce the canonical string for this set of features.
1561 std::vector<std::string> Features;
1562 for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
1563 ie = FeatureMap.end();
1564 it != ie; ++it)
1565 Features.push_back((it->second ? "+" : "-") + it->first().str());
1566
1567 // Now add the target-cpu and target-features to the function.
1568 // While we populated the feature map above, we still need to
1569 // get and parse the target attribute so we can get the cpu for
1570 // the function.
1571 const auto *TD = FD->getAttr<TargetAttr>();
1572 TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
1573 if (ParsedAttr.second != "")
1574 TargetCPU = ParsedAttr.second;
1575 if (TargetCPU != "")
1576 FuncAttrs.addAttribute("target-cpu", TargetCPU);
1577 if (!Features.empty()) {
1578 std::sort(Features.begin(), Features.end());
1579 FuncAttrs.addAttribute(
1580 "target-features",
1581 llvm::join(Features.begin(), Features.end(), ","));
1582 }
1583 } else {
1584 // Otherwise just add the existing target cpu and target features to the
1585 // function.
1586 std::vector<std::string> &Features = getTarget().getTargetOpts().Features;
1587 if (TargetCPU != "")
1588 FuncAttrs.addAttribute("target-cpu", TargetCPU);
1589 if (!Features.empty()) {
1590 std::sort(Features.begin(), Features.end());
1591 FuncAttrs.addAttribute(
1592 "target-features",
1593 llvm::join(Features.begin(), Features.end(), ","));
1594 }
1595 }
1596 }
1597
1598 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1599
1600 QualType RetTy = FI.getReturnType();
1601 const ABIArgInfo &RetAI = FI.getReturnInfo();
1602 switch (RetAI.getKind()) {
1603 case ABIArgInfo::Extend:
1604 if (RetTy->hasSignedIntegerRepresentation())
1605 RetAttrs.addAttribute(llvm::Attribute::SExt);
1606 else if (RetTy->hasUnsignedIntegerRepresentation())
1607 RetAttrs.addAttribute(llvm::Attribute::ZExt);
1608 // FALL THROUGH
1609 case ABIArgInfo::Direct:
1610 if (RetAI.getInReg())
1611 RetAttrs.addAttribute(llvm::Attribute::InReg);
1612 break;
1613 case ABIArgInfo::Ignore:
1614 break;
1615
1616 case ABIArgInfo::InAlloca:
1617 case ABIArgInfo::Indirect: {
1618 // inalloca and sret disable readnone and readonly
1619 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1620 .removeAttribute(llvm::Attribute::ReadNone);
1621 break;
1622 }
1623
1624 case ABIArgInfo::Expand:
1625 llvm_unreachable("Invalid ABI kind for return argument");
1626 }
1627
1628 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1629 QualType PTy = RefTy->getPointeeType();
1630 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1631 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1632 .getQuantity());
1633 else if (getContext().getTargetAddressSpace(PTy) == 0)
1634 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1635 }
1636
1637 // Attach return attributes.
1638 if (RetAttrs.hasAttributes()) {
1639 PAL.push_back(llvm::AttributeSet::get(
1640 getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
1641 }
1642
1643 // Attach attributes to sret.
1644 if (IRFunctionArgs.hasSRetArg()) {
1645 llvm::AttrBuilder SRETAttrs;
1646 SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1647 if (RetAI.getInReg())
1648 SRETAttrs.addAttribute(llvm::Attribute::InReg);
1649 PAL.push_back(llvm::AttributeSet::get(
1650 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
1651 }
1652
1653 // Attach attributes to inalloca argument.
1654 if (IRFunctionArgs.hasInallocaArg()) {
1655 llvm::AttrBuilder Attrs;
1656 Attrs.addAttribute(llvm::Attribute::InAlloca);
1657 PAL.push_back(llvm::AttributeSet::get(
1658 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
1659 }
1660
1661 unsigned ArgNo = 0;
1662 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
1663 E = FI.arg_end();
1664 I != E; ++I, ++ArgNo) {
1665 QualType ParamType = I->type;
1666 const ABIArgInfo &AI = I->info;
1667 llvm::AttrBuilder Attrs;
1668
1669 // Add attribute for padding argument, if necessary.
1670 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1671 if (AI.getPaddingInReg())
1672 PAL.push_back(llvm::AttributeSet::get(
1673 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
1674 llvm::Attribute::InReg));
1675 }
1676
1677 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1678 // have the corresponding parameter variable. It doesn't make
1679 // sense to do it here because parameters are so messed up.
1680 switch (AI.getKind()) {
1681 case ABIArgInfo::Extend:
1682 if (ParamType->isSignedIntegerOrEnumerationType())
1683 Attrs.addAttribute(llvm::Attribute::SExt);
1684 else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
1685 if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
1686 Attrs.addAttribute(llvm::Attribute::SExt);
1687 else
1688 Attrs.addAttribute(llvm::Attribute::ZExt);
1689 }
1690 // FALL THROUGH
1691 case ABIArgInfo::Direct:
1692 if (ArgNo == 0 && FI.isChainCall())
1693 Attrs.addAttribute(llvm::Attribute::Nest);
1694 else if (AI.getInReg())
1695 Attrs.addAttribute(llvm::Attribute::InReg);
1696 break;
1697
1698 case ABIArgInfo::Indirect: {
1699 if (AI.getInReg())
1700 Attrs.addAttribute(llvm::Attribute::InReg);
1701
1702 if (AI.getIndirectByVal())
1703 Attrs.addAttribute(llvm::Attribute::ByVal);
1704
1705 CharUnits Align = AI.getIndirectAlign();
1706
1707 // In a byval argument, it is important that the required
1708 // alignment of the type is honored, as LLVM might be creating a
1709 // *new* stack object, and needs to know what alignment to give
1710 // it. (Sometimes it can deduce a sensible alignment on its own,
1711 // but not if clang decides it must emit a packed struct, or the
1712 // user specifies increased alignment requirements.)
1713 //
1714 // This is different from indirect *not* byval, where the object
1715 // exists already, and the align attribute is purely
1716 // informative.
1717 assert(!Align.isZero());
1718
1719 // For now, only add this when we have a byval argument.
1720 // TODO: be less lazy about updating test cases.
1721 if (AI.getIndirectByVal())
1722 Attrs.addAlignmentAttr(Align.getQuantity());
1723
1724 // byval disables readnone and readonly.
1725 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1726 .removeAttribute(llvm::Attribute::ReadNone);
1727 break;
1728 }
1729 case ABIArgInfo::Ignore:
1730 case ABIArgInfo::Expand:
1731 continue;
1732
1733 case ABIArgInfo::InAlloca:
1734 // inalloca disables readnone and readonly.
1735 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1736 .removeAttribute(llvm::Attribute::ReadNone);
1737 continue;
1738 }
1739
1740 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
1741 QualType PTy = RefTy->getPointeeType();
1742 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1743 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1744 .getQuantity());
1745 else if (getContext().getTargetAddressSpace(PTy) == 0)
1746 Attrs.addAttribute(llvm::Attribute::NonNull);
1747 }
1748
1749 if (Attrs.hasAttributes()) {
1750 unsigned FirstIRArg, NumIRArgs;
1751 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1752 for (unsigned i = 0; i < NumIRArgs; i++)
1753 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
1754 FirstIRArg + i + 1, Attrs));
1755 }
1756 }
1757 assert(ArgNo == FI.arg_size());
1758
1759 if (FuncAttrs.hasAttributes())
1760 PAL.push_back(llvm::
1761 AttributeSet::get(getLLVMContext(),
1762 llvm::AttributeSet::FunctionIndex,
1763 FuncAttrs));
1764 }
1765
1766 /// An argument came in as a promoted argument; demote it back to its
1767 /// declared type.
emitArgumentDemotion(CodeGenFunction & CGF,const VarDecl * var,llvm::Value * value)1768 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1769 const VarDecl *var,
1770 llvm::Value *value) {
1771 llvm::Type *varType = CGF.ConvertType(var->getType());
1772
1773 // This can happen with promotions that actually don't change the
1774 // underlying type, like the enum promotions.
1775 if (value->getType() == varType) return value;
1776
1777 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1778 && "unexpected promotion type");
1779
1780 if (isa<llvm::IntegerType>(varType))
1781 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1782
1783 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1784 }
1785
1786 /// Returns the attribute (either parameter attribute, or function
1787 /// attribute), which declares argument ArgNo to be non-null.
getNonNullAttr(const Decl * FD,const ParmVarDecl * PVD,QualType ArgType,unsigned ArgNo)1788 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
1789 QualType ArgType, unsigned ArgNo) {
1790 // FIXME: __attribute__((nonnull)) can also be applied to:
1791 // - references to pointers, where the pointee is known to be
1792 // nonnull (apparently a Clang extension)
1793 // - transparent unions containing pointers
1794 // In the former case, LLVM IR cannot represent the constraint. In
1795 // the latter case, we have no guarantee that the transparent union
1796 // is in fact passed as a pointer.
1797 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
1798 return nullptr;
1799 // First, check attribute on parameter itself.
1800 if (PVD) {
1801 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
1802 return ParmNNAttr;
1803 }
1804 // Check function attributes.
1805 if (!FD)
1806 return nullptr;
1807 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
1808 if (NNAttr->isNonNull(ArgNo))
1809 return NNAttr;
1810 }
1811 return nullptr;
1812 }
1813
EmitFunctionProlog(const CGFunctionInfo & FI,llvm::Function * Fn,const FunctionArgList & Args)1814 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1815 llvm::Function *Fn,
1816 const FunctionArgList &Args) {
1817 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
1818 // Naked functions don't have prologues.
1819 return;
1820
1821 // If this is an implicit-return-zero function, go ahead and
1822 // initialize the return value. TODO: it might be nice to have
1823 // a more general mechanism for this that didn't require synthesized
1824 // return statements.
1825 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
1826 if (FD->hasImplicitReturnZero()) {
1827 QualType RetTy = FD->getReturnType().getUnqualifiedType();
1828 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1829 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1830 Builder.CreateStore(Zero, ReturnValue);
1831 }
1832 }
1833
1834 // FIXME: We no longer need the types from FunctionArgList; lift up and
1835 // simplify.
1836
1837 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
1838 // Flattened function arguments.
1839 SmallVector<llvm::Argument *, 16> FnArgs;
1840 FnArgs.reserve(IRFunctionArgs.totalIRArgs());
1841 for (auto &Arg : Fn->args()) {
1842 FnArgs.push_back(&Arg);
1843 }
1844 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
1845
1846 // If we're using inalloca, all the memory arguments are GEPs off of the last
1847 // parameter, which is a pointer to the complete memory area.
1848 Address ArgStruct = Address::invalid();
1849 const llvm::StructLayout *ArgStructLayout = nullptr;
1850 if (IRFunctionArgs.hasInallocaArg()) {
1851 ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
1852 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
1853 FI.getArgStructAlignment());
1854
1855 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
1856 }
1857
1858 // Name the struct return parameter.
1859 if (IRFunctionArgs.hasSRetArg()) {
1860 auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()];
1861 AI->setName("agg.result");
1862 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
1863 llvm::Attribute::NoAlias));
1864 }
1865
1866 // Track if we received the parameter as a pointer (indirect, byval, or
1867 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
1868 // into a local alloca for us.
1869 SmallVector<ParamValue, 16> ArgVals;
1870 ArgVals.reserve(Args.size());
1871
1872 // Create a pointer value for every parameter declaration. This usually
1873 // entails copying one or more LLVM IR arguments into an alloca. Don't push
1874 // any cleanups or do anything that might unwind. We do that separately, so
1875 // we can push the cleanups in the correct order for the ABI.
1876 assert(FI.arg_size() == Args.size() &&
1877 "Mismatch between function signature & arguments.");
1878 unsigned ArgNo = 0;
1879 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1880 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1881 i != e; ++i, ++info_it, ++ArgNo) {
1882 const VarDecl *Arg = *i;
1883 QualType Ty = info_it->type;
1884 const ABIArgInfo &ArgI = info_it->info;
1885
1886 bool isPromoted =
1887 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1888
1889 unsigned FirstIRArg, NumIRArgs;
1890 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1891
1892 switch (ArgI.getKind()) {
1893 case ABIArgInfo::InAlloca: {
1894 assert(NumIRArgs == 0);
1895 auto FieldIndex = ArgI.getInAllocaFieldIndex();
1896 CharUnits FieldOffset =
1897 CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
1898 Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
1899 Arg->getName());
1900 ArgVals.push_back(ParamValue::forIndirect(V));
1901 break;
1902 }
1903
1904 case ABIArgInfo::Indirect: {
1905 assert(NumIRArgs == 1);
1906 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
1907
1908 if (!hasScalarEvaluationKind(Ty)) {
1909 // Aggregates and complex variables are accessed by reference. All we
1910 // need to do is realign the value, if requested.
1911 Address V = ParamAddr;
1912 if (ArgI.getIndirectRealign()) {
1913 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
1914
1915 // Copy from the incoming argument pointer to the temporary with the
1916 // appropriate alignment.
1917 //
1918 // FIXME: We should have a common utility for generating an aggregate
1919 // copy.
1920 CharUnits Size = getContext().getTypeSizeInChars(Ty);
1921 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
1922 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
1923 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
1924 Builder.CreateMemCpy(Dst, Src, SizeVal, false);
1925 V = AlignedTemp;
1926 }
1927 ArgVals.push_back(ParamValue::forIndirect(V));
1928 } else {
1929 // Load scalar value from indirect argument.
1930 llvm::Value *V =
1931 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
1932
1933 if (isPromoted)
1934 V = emitArgumentDemotion(*this, Arg, V);
1935 ArgVals.push_back(ParamValue::forDirect(V));
1936 }
1937 break;
1938 }
1939
1940 case ABIArgInfo::Extend:
1941 case ABIArgInfo::Direct: {
1942
1943 // If we have the trivial case, handle it with no muss and fuss.
1944 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1945 ArgI.getCoerceToType() == ConvertType(Ty) &&
1946 ArgI.getDirectOffset() == 0) {
1947 assert(NumIRArgs == 1);
1948 auto AI = FnArgs[FirstIRArg];
1949 llvm::Value *V = AI;
1950
1951 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
1952 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
1953 PVD->getFunctionScopeIndex()))
1954 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1955 AI->getArgNo() + 1,
1956 llvm::Attribute::NonNull));
1957
1958 QualType OTy = PVD->getOriginalType();
1959 if (const auto *ArrTy =
1960 getContext().getAsConstantArrayType(OTy)) {
1961 // A C99 array parameter declaration with the static keyword also
1962 // indicates dereferenceability, and if the size is constant we can
1963 // use the dereferenceable attribute (which requires the size in
1964 // bytes).
1965 if (ArrTy->getSizeModifier() == ArrayType::Static) {
1966 QualType ETy = ArrTy->getElementType();
1967 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
1968 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
1969 ArrSize) {
1970 llvm::AttrBuilder Attrs;
1971 Attrs.addDereferenceableAttr(
1972 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
1973 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1974 AI->getArgNo() + 1, Attrs));
1975 } else if (getContext().getTargetAddressSpace(ETy) == 0) {
1976 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1977 AI->getArgNo() + 1,
1978 llvm::Attribute::NonNull));
1979 }
1980 }
1981 } else if (const auto *ArrTy =
1982 getContext().getAsVariableArrayType(OTy)) {
1983 // For C99 VLAs with the static keyword, we don't know the size so
1984 // we can't use the dereferenceable attribute, but in addrspace(0)
1985 // we know that it must be nonnull.
1986 if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
1987 !getContext().getTargetAddressSpace(ArrTy->getElementType()))
1988 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1989 AI->getArgNo() + 1,
1990 llvm::Attribute::NonNull));
1991 }
1992
1993 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
1994 if (!AVAttr)
1995 if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
1996 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
1997 if (AVAttr) {
1998 llvm::Value *AlignmentValue =
1999 EmitScalarExpr(AVAttr->getAlignment());
2000 llvm::ConstantInt *AlignmentCI =
2001 cast<llvm::ConstantInt>(AlignmentValue);
2002 unsigned Alignment =
2003 std::min((unsigned) AlignmentCI->getZExtValue(),
2004 +llvm::Value::MaximumAlignment);
2005
2006 llvm::AttrBuilder Attrs;
2007 Attrs.addAlignmentAttr(Alignment);
2008 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2009 AI->getArgNo() + 1, Attrs));
2010 }
2011 }
2012
2013 if (Arg->getType().isRestrictQualified())
2014 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2015 AI->getArgNo() + 1,
2016 llvm::Attribute::NoAlias));
2017
2018 // Ensure the argument is the correct type.
2019 if (V->getType() != ArgI.getCoerceToType())
2020 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2021
2022 if (isPromoted)
2023 V = emitArgumentDemotion(*this, Arg, V);
2024
2025 if (const CXXMethodDecl *MD =
2026 dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
2027 if (MD->isVirtual() && Arg == CXXABIThisDecl)
2028 V = CGM.getCXXABI().
2029 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
2030 }
2031
2032 // Because of merging of function types from multiple decls it is
2033 // possible for the type of an argument to not match the corresponding
2034 // type in the function type. Since we are codegening the callee
2035 // in here, add a cast to the argument type.
2036 llvm::Type *LTy = ConvertType(Arg->getType());
2037 if (V->getType() != LTy)
2038 V = Builder.CreateBitCast(V, LTy);
2039
2040 ArgVals.push_back(ParamValue::forDirect(V));
2041 break;
2042 }
2043
2044 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2045 Arg->getName());
2046
2047 // Pointer to store into.
2048 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2049
2050 // Fast-isel and the optimizer generally like scalar values better than
2051 // FCAs, so we flatten them if this is safe to do for this argument.
2052 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2053 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2054 STy->getNumElements() > 1) {
2055 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2056 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2057 llvm::Type *DstTy = Ptr.getElementType();
2058 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2059
2060 Address AddrToStoreInto = Address::invalid();
2061 if (SrcSize <= DstSize) {
2062 AddrToStoreInto =
2063 Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
2064 } else {
2065 AddrToStoreInto =
2066 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2067 }
2068
2069 assert(STy->getNumElements() == NumIRArgs);
2070 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2071 auto AI = FnArgs[FirstIRArg + i];
2072 AI->setName(Arg->getName() + ".coerce" + Twine(i));
2073 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2074 Address EltPtr =
2075 Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2076 Builder.CreateStore(AI, EltPtr);
2077 }
2078
2079 if (SrcSize > DstSize) {
2080 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2081 }
2082
2083 } else {
2084 // Simple case, just do a coerced store of the argument into the alloca.
2085 assert(NumIRArgs == 1);
2086 auto AI = FnArgs[FirstIRArg];
2087 AI->setName(Arg->getName() + ".coerce");
2088 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2089 }
2090
2091 // Match to what EmitParmDecl is expecting for this type.
2092 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2093 llvm::Value *V =
2094 EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
2095 if (isPromoted)
2096 V = emitArgumentDemotion(*this, Arg, V);
2097 ArgVals.push_back(ParamValue::forDirect(V));
2098 } else {
2099 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2100 }
2101 break;
2102 }
2103
2104 case ABIArgInfo::Expand: {
2105 // If this structure was expanded into multiple arguments then
2106 // we need to create a temporary and reconstruct it from the
2107 // arguments.
2108 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2109 LValue LV = MakeAddrLValue(Alloca, Ty);
2110 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2111
2112 auto FnArgIter = FnArgs.begin() + FirstIRArg;
2113 ExpandTypeFromArgs(Ty, LV, FnArgIter);
2114 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2115 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2116 auto AI = FnArgs[FirstIRArg + i];
2117 AI->setName(Arg->getName() + "." + Twine(i));
2118 }
2119 break;
2120 }
2121
2122 case ABIArgInfo::Ignore:
2123 assert(NumIRArgs == 0);
2124 // Initialize the local variable appropriately.
2125 if (!hasScalarEvaluationKind(Ty)) {
2126 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2127 } else {
2128 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2129 ArgVals.push_back(ParamValue::forDirect(U));
2130 }
2131 break;
2132 }
2133 }
2134
2135 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2136 for (int I = Args.size() - 1; I >= 0; --I)
2137 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2138 } else {
2139 for (unsigned I = 0, E = Args.size(); I != E; ++I)
2140 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2141 }
2142 }
2143
eraseUnusedBitCasts(llvm::Instruction * insn)2144 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2145 while (insn->use_empty()) {
2146 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2147 if (!bitcast) return;
2148
2149 // This is "safe" because we would have used a ConstantExpr otherwise.
2150 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2151 bitcast->eraseFromParent();
2152 }
2153 }
2154
2155 /// Try to emit a fused autorelease of a return result.
tryEmitFusedAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)2156 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2157 llvm::Value *result) {
2158 // We must be immediately followed the cast.
2159 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2160 if (BB->empty()) return nullptr;
2161 if (&BB->back() != result) return nullptr;
2162
2163 llvm::Type *resultType = result->getType();
2164
2165 // result is in a BasicBlock and is therefore an Instruction.
2166 llvm::Instruction *generator = cast<llvm::Instruction>(result);
2167
2168 SmallVector<llvm::Instruction*,4> insnsToKill;
2169
2170 // Look for:
2171 // %generator = bitcast %type1* %generator2 to %type2*
2172 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2173 // We would have emitted this as a constant if the operand weren't
2174 // an Instruction.
2175 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2176
2177 // Require the generator to be immediately followed by the cast.
2178 if (generator->getNextNode() != bitcast)
2179 return nullptr;
2180
2181 insnsToKill.push_back(bitcast);
2182 }
2183
2184 // Look for:
2185 // %generator = call i8* @objc_retain(i8* %originalResult)
2186 // or
2187 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2188 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2189 if (!call) return nullptr;
2190
2191 bool doRetainAutorelease;
2192
2193 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2194 doRetainAutorelease = true;
2195 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2196 .objc_retainAutoreleasedReturnValue) {
2197 doRetainAutorelease = false;
2198
2199 // If we emitted an assembly marker for this call (and the
2200 // ARCEntrypoints field should have been set if so), go looking
2201 // for that call. If we can't find it, we can't do this
2202 // optimization. But it should always be the immediately previous
2203 // instruction, unless we needed bitcasts around the call.
2204 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2205 llvm::Instruction *prev = call->getPrevNode();
2206 assert(prev);
2207 if (isa<llvm::BitCastInst>(prev)) {
2208 prev = prev->getPrevNode();
2209 assert(prev);
2210 }
2211 assert(isa<llvm::CallInst>(prev));
2212 assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2213 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2214 insnsToKill.push_back(prev);
2215 }
2216 } else {
2217 return nullptr;
2218 }
2219
2220 result = call->getArgOperand(0);
2221 insnsToKill.push_back(call);
2222
2223 // Keep killing bitcasts, for sanity. Note that we no longer care
2224 // about precise ordering as long as there's exactly one use.
2225 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2226 if (!bitcast->hasOneUse()) break;
2227 insnsToKill.push_back(bitcast);
2228 result = bitcast->getOperand(0);
2229 }
2230
2231 // Delete all the unnecessary instructions, from latest to earliest.
2232 for (SmallVectorImpl<llvm::Instruction*>::iterator
2233 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
2234 (*i)->eraseFromParent();
2235
2236 // Do the fused retain/autorelease if we were asked to.
2237 if (doRetainAutorelease)
2238 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2239
2240 // Cast back to the result type.
2241 return CGF.Builder.CreateBitCast(result, resultType);
2242 }
2243
2244 /// If this is a +1 of the value of an immutable 'self', remove it.
tryRemoveRetainOfSelf(CodeGenFunction & CGF,llvm::Value * result)2245 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2246 llvm::Value *result) {
2247 // This is only applicable to a method with an immutable 'self'.
2248 const ObjCMethodDecl *method =
2249 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2250 if (!method) return nullptr;
2251 const VarDecl *self = method->getSelfDecl();
2252 if (!self->getType().isConstQualified()) return nullptr;
2253
2254 // Look for a retain call.
2255 llvm::CallInst *retainCall =
2256 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2257 if (!retainCall ||
2258 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2259 return nullptr;
2260
2261 // Look for an ordinary load of 'self'.
2262 llvm::Value *retainedValue = retainCall->getArgOperand(0);
2263 llvm::LoadInst *load =
2264 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2265 if (!load || load->isAtomic() || load->isVolatile() ||
2266 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2267 return nullptr;
2268
2269 // Okay! Burn it all down. This relies for correctness on the
2270 // assumption that the retain is emitted as part of the return and
2271 // that thereafter everything is used "linearly".
2272 llvm::Type *resultType = result->getType();
2273 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2274 assert(retainCall->use_empty());
2275 retainCall->eraseFromParent();
2276 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2277
2278 return CGF.Builder.CreateBitCast(load, resultType);
2279 }
2280
2281 /// Emit an ARC autorelease of the result of a function.
2282 ///
2283 /// \return the value to actually return from the function
emitAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)2284 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2285 llvm::Value *result) {
2286 // If we're returning 'self', kill the initial retain. This is a
2287 // heuristic attempt to "encourage correctness" in the really unfortunate
2288 // case where we have a return of self during a dealloc and we desperately
2289 // need to avoid the possible autorelease.
2290 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2291 return self;
2292
2293 // At -O0, try to emit a fused retain/autorelease.
2294 if (CGF.shouldUseFusedARCCalls())
2295 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2296 return fused;
2297
2298 return CGF.EmitARCAutoreleaseReturnValue(result);
2299 }
2300
2301 /// Heuristically search for a dominating store to the return-value slot.
findDominatingStoreToReturnValue(CodeGenFunction & CGF)2302 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2303 // Check if a User is a store which pointerOperand is the ReturnValue.
2304 // We are looking for stores to the ReturnValue, not for stores of the
2305 // ReturnValue to some other location.
2306 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2307 auto *SI = dyn_cast<llvm::StoreInst>(U);
2308 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2309 return nullptr;
2310 // These aren't actually possible for non-coerced returns, and we
2311 // only care about non-coerced returns on this code path.
2312 assert(!SI->isAtomic() && !SI->isVolatile());
2313 return SI;
2314 };
2315 // If there are multiple uses of the return-value slot, just check
2316 // for something immediately preceding the IP. Sometimes this can
2317 // happen with how we generate implicit-returns; it can also happen
2318 // with noreturn cleanups.
2319 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2320 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2321 if (IP->empty()) return nullptr;
2322 llvm::Instruction *I = &IP->back();
2323
2324 // Skip lifetime markers
2325 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2326 IE = IP->rend();
2327 II != IE; ++II) {
2328 if (llvm::IntrinsicInst *Intrinsic =
2329 dyn_cast<llvm::IntrinsicInst>(&*II)) {
2330 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2331 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2332 ++II;
2333 if (II == IE)
2334 break;
2335 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2336 continue;
2337 }
2338 }
2339 I = &*II;
2340 break;
2341 }
2342
2343 return GetStoreIfValid(I);
2344 }
2345
2346 llvm::StoreInst *store =
2347 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2348 if (!store) return nullptr;
2349
2350 // Now do a first-and-dirty dominance check: just walk up the
2351 // single-predecessors chain from the current insertion point.
2352 llvm::BasicBlock *StoreBB = store->getParent();
2353 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2354 while (IP != StoreBB) {
2355 if (!(IP = IP->getSinglePredecessor()))
2356 return nullptr;
2357 }
2358
2359 // Okay, the store's basic block dominates the insertion point; we
2360 // can do our thing.
2361 return store;
2362 }
2363
EmitFunctionEpilog(const CGFunctionInfo & FI,bool EmitRetDbgLoc,SourceLocation EndLoc)2364 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2365 bool EmitRetDbgLoc,
2366 SourceLocation EndLoc) {
2367 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2368 // Naked functions don't have epilogues.
2369 Builder.CreateUnreachable();
2370 return;
2371 }
2372
2373 // Functions with no result always return void.
2374 if (!ReturnValue.isValid()) {
2375 Builder.CreateRetVoid();
2376 return;
2377 }
2378
2379 llvm::DebugLoc RetDbgLoc;
2380 llvm::Value *RV = nullptr;
2381 QualType RetTy = FI.getReturnType();
2382 const ABIArgInfo &RetAI = FI.getReturnInfo();
2383
2384 switch (RetAI.getKind()) {
2385 case ABIArgInfo::InAlloca:
2386 // Aggregrates get evaluated directly into the destination. Sometimes we
2387 // need to return the sret value in a register, though.
2388 assert(hasAggregateEvaluationKind(RetTy));
2389 if (RetAI.getInAllocaSRet()) {
2390 llvm::Function::arg_iterator EI = CurFn->arg_end();
2391 --EI;
2392 llvm::Value *ArgStruct = &*EI;
2393 llvm::Value *SRet = Builder.CreateStructGEP(
2394 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2395 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2396 }
2397 break;
2398
2399 case ABIArgInfo::Indirect: {
2400 auto AI = CurFn->arg_begin();
2401 if (RetAI.isSRetAfterThis())
2402 ++AI;
2403 switch (getEvaluationKind(RetTy)) {
2404 case TEK_Complex: {
2405 ComplexPairTy RT =
2406 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2407 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2408 /*isInit*/ true);
2409 break;
2410 }
2411 case TEK_Aggregate:
2412 // Do nothing; aggregrates get evaluated directly into the destination.
2413 break;
2414 case TEK_Scalar:
2415 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2416 MakeNaturalAlignAddrLValue(&*AI, RetTy),
2417 /*isInit*/ true);
2418 break;
2419 }
2420 break;
2421 }
2422
2423 case ABIArgInfo::Extend:
2424 case ABIArgInfo::Direct:
2425 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2426 RetAI.getDirectOffset() == 0) {
2427 // The internal return value temp always will have pointer-to-return-type
2428 // type, just do a load.
2429
2430 // If there is a dominating store to ReturnValue, we can elide
2431 // the load, zap the store, and usually zap the alloca.
2432 if (llvm::StoreInst *SI =
2433 findDominatingStoreToReturnValue(*this)) {
2434 // Reuse the debug location from the store unless there is
2435 // cleanup code to be emitted between the store and return
2436 // instruction.
2437 if (EmitRetDbgLoc && !AutoreleaseResult)
2438 RetDbgLoc = SI->getDebugLoc();
2439 // Get the stored value and nuke the now-dead store.
2440 RV = SI->getValueOperand();
2441 SI->eraseFromParent();
2442
2443 // If that was the only use of the return value, nuke it as well now.
2444 auto returnValueInst = ReturnValue.getPointer();
2445 if (returnValueInst->use_empty()) {
2446 if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2447 alloca->eraseFromParent();
2448 ReturnValue = Address::invalid();
2449 }
2450 }
2451
2452 // Otherwise, we have to do a simple load.
2453 } else {
2454 RV = Builder.CreateLoad(ReturnValue);
2455 }
2456 } else {
2457 // If the value is offset in memory, apply the offset now.
2458 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2459
2460 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2461 }
2462
2463 // In ARC, end functions that return a retainable type with a call
2464 // to objc_autoreleaseReturnValue.
2465 if (AutoreleaseResult) {
2466 assert(getLangOpts().ObjCAutoRefCount &&
2467 !FI.isReturnsRetained() &&
2468 RetTy->isObjCRetainableType());
2469 RV = emitAutoreleaseOfResult(*this, RV);
2470 }
2471
2472 break;
2473
2474 case ABIArgInfo::Ignore:
2475 break;
2476
2477 case ABIArgInfo::Expand:
2478 llvm_unreachable("Invalid ABI kind for return argument");
2479 }
2480
2481 llvm::Instruction *Ret;
2482 if (RV) {
2483 if (CurCodeDecl && SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
2484 if (auto RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>()) {
2485 SanitizerScope SanScope(this);
2486 llvm::Value *Cond = Builder.CreateICmpNE(
2487 RV, llvm::Constant::getNullValue(RV->getType()));
2488 llvm::Constant *StaticData[] = {
2489 EmitCheckSourceLocation(EndLoc),
2490 EmitCheckSourceLocation(RetNNAttr->getLocation()),
2491 };
2492 EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
2493 "nonnull_return", StaticData, None);
2494 }
2495 }
2496 Ret = Builder.CreateRet(RV);
2497 } else {
2498 Ret = Builder.CreateRetVoid();
2499 }
2500
2501 if (RetDbgLoc)
2502 Ret->setDebugLoc(std::move(RetDbgLoc));
2503 }
2504
isInAllocaArgument(CGCXXABI & ABI,QualType type)2505 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
2506 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2507 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
2508 }
2509
createPlaceholderSlot(CodeGenFunction & CGF,QualType Ty)2510 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
2511 QualType Ty) {
2512 // FIXME: Generate IR in one pass, rather than going back and fixing up these
2513 // placeholders.
2514 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
2515 llvm::Value *Placeholder =
2516 llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
2517 Placeholder = CGF.Builder.CreateDefaultAlignedLoad(Placeholder);
2518
2519 // FIXME: When we generate this IR in one pass, we shouldn't need
2520 // this win32-specific alignment hack.
2521 CharUnits Align = CharUnits::fromQuantity(4);
2522
2523 return AggValueSlot::forAddr(Address(Placeholder, Align),
2524 Ty.getQualifiers(),
2525 AggValueSlot::IsNotDestructed,
2526 AggValueSlot::DoesNotNeedGCBarriers,
2527 AggValueSlot::IsNotAliased);
2528 }
2529
EmitDelegateCallArg(CallArgList & args,const VarDecl * param,SourceLocation loc)2530 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
2531 const VarDecl *param,
2532 SourceLocation loc) {
2533 // StartFunction converted the ABI-lowered parameter(s) into a
2534 // local alloca. We need to turn that into an r-value suitable
2535 // for EmitCall.
2536 Address local = GetAddrOfLocalVar(param);
2537
2538 QualType type = param->getType();
2539
2540 // For the most part, we just need to load the alloca, except:
2541 // 1) aggregate r-values are actually pointers to temporaries, and
2542 // 2) references to non-scalars are pointers directly to the aggregate.
2543 // I don't know why references to scalars are different here.
2544 if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
2545 if (!hasScalarEvaluationKind(ref->getPointeeType()))
2546 return args.add(RValue::getAggregate(local), type);
2547
2548 // Locals which are references to scalars are represented
2549 // with allocas holding the pointer.
2550 return args.add(RValue::get(Builder.CreateLoad(local)), type);
2551 }
2552
2553 assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
2554 "cannot emit delegate call arguments for inalloca arguments!");
2555
2556 args.add(convertTempToRValue(local, type, loc), type);
2557 }
2558
isProvablyNull(llvm::Value * addr)2559 static bool isProvablyNull(llvm::Value *addr) {
2560 return isa<llvm::ConstantPointerNull>(addr);
2561 }
2562
isProvablyNonNull(llvm::Value * addr)2563 static bool isProvablyNonNull(llvm::Value *addr) {
2564 return isa<llvm::AllocaInst>(addr);
2565 }
2566
2567 /// Emit the actual writing-back of a writeback.
emitWriteback(CodeGenFunction & CGF,const CallArgList::Writeback & writeback)2568 static void emitWriteback(CodeGenFunction &CGF,
2569 const CallArgList::Writeback &writeback) {
2570 const LValue &srcLV = writeback.Source;
2571 Address srcAddr = srcLV.getAddress();
2572 assert(!isProvablyNull(srcAddr.getPointer()) &&
2573 "shouldn't have writeback for provably null argument");
2574
2575 llvm::BasicBlock *contBB = nullptr;
2576
2577 // If the argument wasn't provably non-null, we need to null check
2578 // before doing the store.
2579 bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
2580 if (!provablyNonNull) {
2581 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
2582 contBB = CGF.createBasicBlock("icr.done");
2583
2584 llvm::Value *isNull =
2585 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
2586 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
2587 CGF.EmitBlock(writebackBB);
2588 }
2589
2590 // Load the value to writeback.
2591 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
2592
2593 // Cast it back, in case we're writing an id to a Foo* or something.
2594 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
2595 "icr.writeback-cast");
2596
2597 // Perform the writeback.
2598
2599 // If we have a "to use" value, it's something we need to emit a use
2600 // of. This has to be carefully threaded in: if it's done after the
2601 // release it's potentially undefined behavior (and the optimizer
2602 // will ignore it), and if it happens before the retain then the
2603 // optimizer could move the release there.
2604 if (writeback.ToUse) {
2605 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
2606
2607 // Retain the new value. No need to block-copy here: the block's
2608 // being passed up the stack.
2609 value = CGF.EmitARCRetainNonBlock(value);
2610
2611 // Emit the intrinsic use here.
2612 CGF.EmitARCIntrinsicUse(writeback.ToUse);
2613
2614 // Load the old value (primitively).
2615 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
2616
2617 // Put the new value in place (primitively).
2618 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
2619
2620 // Release the old value.
2621 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
2622
2623 // Otherwise, we can just do a normal lvalue store.
2624 } else {
2625 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
2626 }
2627
2628 // Jump to the continuation block.
2629 if (!provablyNonNull)
2630 CGF.EmitBlock(contBB);
2631 }
2632
emitWritebacks(CodeGenFunction & CGF,const CallArgList & args)2633 static void emitWritebacks(CodeGenFunction &CGF,
2634 const CallArgList &args) {
2635 for (const auto &I : args.writebacks())
2636 emitWriteback(CGF, I);
2637 }
2638
deactivateArgCleanupsBeforeCall(CodeGenFunction & CGF,const CallArgList & CallArgs)2639 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
2640 const CallArgList &CallArgs) {
2641 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
2642 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
2643 CallArgs.getCleanupsToDeactivate();
2644 // Iterate in reverse to increase the likelihood of popping the cleanup.
2645 for (const auto &I : llvm::reverse(Cleanups)) {
2646 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
2647 I.IsActiveIP->eraseFromParent();
2648 }
2649 }
2650
maybeGetUnaryAddrOfOperand(const Expr * E)2651 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
2652 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
2653 if (uop->getOpcode() == UO_AddrOf)
2654 return uop->getSubExpr();
2655 return nullptr;
2656 }
2657
2658 /// Emit an argument that's being passed call-by-writeback. That is,
2659 /// we are passing the address of an __autoreleased temporary; it
2660 /// might be copy-initialized with the current value of the given
2661 /// address, but it will definitely be copied out of after the call.
emitWritebackArg(CodeGenFunction & CGF,CallArgList & args,const ObjCIndirectCopyRestoreExpr * CRE)2662 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
2663 const ObjCIndirectCopyRestoreExpr *CRE) {
2664 LValue srcLV;
2665
2666 // Make an optimistic effort to emit the address as an l-value.
2667 // This can fail if the argument expression is more complicated.
2668 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
2669 srcLV = CGF.EmitLValue(lvExpr);
2670
2671 // Otherwise, just emit it as a scalar.
2672 } else {
2673 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
2674
2675 QualType srcAddrType =
2676 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
2677 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
2678 }
2679 Address srcAddr = srcLV.getAddress();
2680
2681 // The dest and src types don't necessarily match in LLVM terms
2682 // because of the crazy ObjC compatibility rules.
2683
2684 llvm::PointerType *destType =
2685 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
2686
2687 // If the address is a constant null, just pass the appropriate null.
2688 if (isProvablyNull(srcAddr.getPointer())) {
2689 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
2690 CRE->getType());
2691 return;
2692 }
2693
2694 // Create the temporary.
2695 Address temp = CGF.CreateTempAlloca(destType->getElementType(),
2696 CGF.getPointerAlign(),
2697 "icr.temp");
2698 // Loading an l-value can introduce a cleanup if the l-value is __weak,
2699 // and that cleanup will be conditional if we can't prove that the l-value
2700 // isn't null, so we need to register a dominating point so that the cleanups
2701 // system will make valid IR.
2702 CodeGenFunction::ConditionalEvaluation condEval(CGF);
2703
2704 // Zero-initialize it if we're not doing a copy-initialization.
2705 bool shouldCopy = CRE->shouldCopy();
2706 if (!shouldCopy) {
2707 llvm::Value *null =
2708 llvm::ConstantPointerNull::get(
2709 cast<llvm::PointerType>(destType->getElementType()));
2710 CGF.Builder.CreateStore(null, temp);
2711 }
2712
2713 llvm::BasicBlock *contBB = nullptr;
2714 llvm::BasicBlock *originBB = nullptr;
2715
2716 // If the address is *not* known to be non-null, we need to switch.
2717 llvm::Value *finalArgument;
2718
2719 bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
2720 if (provablyNonNull) {
2721 finalArgument = temp.getPointer();
2722 } else {
2723 llvm::Value *isNull =
2724 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
2725
2726 finalArgument = CGF.Builder.CreateSelect(isNull,
2727 llvm::ConstantPointerNull::get(destType),
2728 temp.getPointer(), "icr.argument");
2729
2730 // If we need to copy, then the load has to be conditional, which
2731 // means we need control flow.
2732 if (shouldCopy) {
2733 originBB = CGF.Builder.GetInsertBlock();
2734 contBB = CGF.createBasicBlock("icr.cont");
2735 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
2736 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
2737 CGF.EmitBlock(copyBB);
2738 condEval.begin(CGF);
2739 }
2740 }
2741
2742 llvm::Value *valueToUse = nullptr;
2743
2744 // Perform a copy if necessary.
2745 if (shouldCopy) {
2746 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
2747 assert(srcRV.isScalar());
2748
2749 llvm::Value *src = srcRV.getScalarVal();
2750 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
2751 "icr.cast");
2752
2753 // Use an ordinary store, not a store-to-lvalue.
2754 CGF.Builder.CreateStore(src, temp);
2755
2756 // If optimization is enabled, and the value was held in a
2757 // __strong variable, we need to tell the optimizer that this
2758 // value has to stay alive until we're doing the store back.
2759 // This is because the temporary is effectively unretained,
2760 // and so otherwise we can violate the high-level semantics.
2761 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2762 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
2763 valueToUse = src;
2764 }
2765 }
2766
2767 // Finish the control flow if we needed it.
2768 if (shouldCopy && !provablyNonNull) {
2769 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
2770 CGF.EmitBlock(contBB);
2771
2772 // Make a phi for the value to intrinsically use.
2773 if (valueToUse) {
2774 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
2775 "icr.to-use");
2776 phiToUse->addIncoming(valueToUse, copyBB);
2777 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
2778 originBB);
2779 valueToUse = phiToUse;
2780 }
2781
2782 condEval.end(CGF);
2783 }
2784
2785 args.addWriteback(srcLV, temp, valueToUse);
2786 args.add(RValue::get(finalArgument), CRE->getType());
2787 }
2788
allocateArgumentMemory(CodeGenFunction & CGF)2789 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
2790 assert(!StackBase && !StackCleanup.isValid());
2791
2792 // Save the stack.
2793 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
2794 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
2795 }
2796
freeArgumentMemory(CodeGenFunction & CGF) const2797 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
2798 if (StackBase) {
2799 // Restore the stack after the call.
2800 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
2801 CGF.Builder.CreateCall(F, StackBase);
2802 }
2803 }
2804
EmitNonNullArgCheck(RValue RV,QualType ArgType,SourceLocation ArgLoc,const FunctionDecl * FD,unsigned ParmNum)2805 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
2806 SourceLocation ArgLoc,
2807 const FunctionDecl *FD,
2808 unsigned ParmNum) {
2809 if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
2810 return;
2811 auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
2812 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
2813 auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
2814 if (!NNAttr)
2815 return;
2816 SanitizerScope SanScope(this);
2817 assert(RV.isScalar());
2818 llvm::Value *V = RV.getScalarVal();
2819 llvm::Value *Cond =
2820 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
2821 llvm::Constant *StaticData[] = {
2822 EmitCheckSourceLocation(ArgLoc),
2823 EmitCheckSourceLocation(NNAttr->getLocation()),
2824 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
2825 };
2826 EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
2827 "nonnull_arg", StaticData, None);
2828 }
2829
EmitCallArgs(CallArgList & Args,ArrayRef<QualType> ArgTypes,llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,const FunctionDecl * CalleeDecl,unsigned ParamsToSkip)2830 void CodeGenFunction::EmitCallArgs(
2831 CallArgList &Args, ArrayRef<QualType> ArgTypes,
2832 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
2833 const FunctionDecl *CalleeDecl, unsigned ParamsToSkip) {
2834 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
2835
2836 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) {
2837 if (CalleeDecl == nullptr || I >= CalleeDecl->getNumParams())
2838 return;
2839 auto *PS = CalleeDecl->getParamDecl(I)->getAttr<PassObjectSizeAttr>();
2840 if (PS == nullptr)
2841 return;
2842
2843 const auto &Context = getContext();
2844 auto SizeTy = Context.getSizeType();
2845 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
2846 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T);
2847 Args.add(RValue::get(V), SizeTy);
2848 };
2849
2850 // We *have* to evaluate arguments from right to left in the MS C++ ABI,
2851 // because arguments are destroyed left to right in the callee.
2852 if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2853 // Insert a stack save if we're going to need any inalloca args.
2854 bool HasInAllocaArgs = false;
2855 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
2856 I != E && !HasInAllocaArgs; ++I)
2857 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
2858 if (HasInAllocaArgs) {
2859 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
2860 Args.allocateArgumentMemory(*this);
2861 }
2862
2863 // Evaluate each argument.
2864 size_t CallArgsStart = Args.size();
2865 for (int I = ArgTypes.size() - 1; I >= 0; --I) {
2866 CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
2867 EmitCallArg(Args, *Arg, ArgTypes[I]);
2868 EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
2869 CalleeDecl, ParamsToSkip + I);
2870 MaybeEmitImplicitObjectSize(I, *Arg);
2871 }
2872
2873 // Un-reverse the arguments we just evaluated so they match up with the LLVM
2874 // IR function.
2875 std::reverse(Args.begin() + CallArgsStart, Args.end());
2876 return;
2877 }
2878
2879 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
2880 CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
2881 assert(Arg != ArgRange.end());
2882 EmitCallArg(Args, *Arg, ArgTypes[I]);
2883 EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
2884 CalleeDecl, ParamsToSkip + I);
2885 MaybeEmitImplicitObjectSize(I, *Arg);
2886 }
2887 }
2888
2889 namespace {
2890
2891 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
DestroyUnpassedArg__anond5d483060711::DestroyUnpassedArg2892 DestroyUnpassedArg(Address Addr, QualType Ty)
2893 : Addr(Addr), Ty(Ty) {}
2894
2895 Address Addr;
2896 QualType Ty;
2897
Emit__anond5d483060711::DestroyUnpassedArg2898 void Emit(CodeGenFunction &CGF, Flags flags) override {
2899 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
2900 assert(!Dtor->isTrivial());
2901 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
2902 /*Delegating=*/false, Addr);
2903 }
2904 };
2905
2906 struct DisableDebugLocationUpdates {
2907 CodeGenFunction &CGF;
2908 bool disabledDebugInfo;
DisableDebugLocationUpdates__anond5d483060711::DisableDebugLocationUpdates2909 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
2910 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
2911 CGF.disableDebugInfo();
2912 }
~DisableDebugLocationUpdates__anond5d483060711::DisableDebugLocationUpdates2913 ~DisableDebugLocationUpdates() {
2914 if (disabledDebugInfo)
2915 CGF.enableDebugInfo();
2916 }
2917 };
2918
2919 } // end anonymous namespace
2920
EmitCallArg(CallArgList & args,const Expr * E,QualType type)2921 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
2922 QualType type) {
2923 DisableDebugLocationUpdates Dis(*this, E);
2924 if (const ObjCIndirectCopyRestoreExpr *CRE
2925 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
2926 assert(getLangOpts().ObjCAutoRefCount);
2927 assert(getContext().hasSameType(E->getType(), type));
2928 return emitWritebackArg(*this, args, CRE);
2929 }
2930
2931 assert(type->isReferenceType() == E->isGLValue() &&
2932 "reference binding to unmaterialized r-value!");
2933
2934 if (E->isGLValue()) {
2935 assert(E->getObjectKind() == OK_Ordinary);
2936 return args.add(EmitReferenceBindingToExpr(E), type);
2937 }
2938
2939 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
2940
2941 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
2942 // However, we still have to push an EH-only cleanup in case we unwind before
2943 // we make it to the call.
2944 if (HasAggregateEvalKind &&
2945 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2946 // If we're using inalloca, use the argument memory. Otherwise, use a
2947 // temporary.
2948 AggValueSlot Slot;
2949 if (args.isUsingInAlloca())
2950 Slot = createPlaceholderSlot(*this, type);
2951 else
2952 Slot = CreateAggTemp(type, "agg.tmp");
2953
2954 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2955 bool DestroyedInCallee =
2956 RD && RD->hasNonTrivialDestructor() &&
2957 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
2958 if (DestroyedInCallee)
2959 Slot.setExternallyDestructed();
2960
2961 EmitAggExpr(E, Slot);
2962 RValue RV = Slot.asRValue();
2963 args.add(RV, type);
2964
2965 if (DestroyedInCallee) {
2966 // Create a no-op GEP between the placeholder and the cleanup so we can
2967 // RAUW it successfully. It also serves as a marker of the first
2968 // instruction where the cleanup is active.
2969 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
2970 type);
2971 // This unreachable is a temporary marker which will be removed later.
2972 llvm::Instruction *IsActive = Builder.CreateUnreachable();
2973 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
2974 }
2975 return;
2976 }
2977
2978 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
2979 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
2980 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
2981 assert(L.isSimple());
2982 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
2983 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
2984 } else {
2985 // We can't represent a misaligned lvalue in the CallArgList, so copy
2986 // to an aligned temporary now.
2987 Address tmp = CreateMemTemp(type);
2988 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
2989 args.add(RValue::getAggregate(tmp), type);
2990 }
2991 return;
2992 }
2993
2994 args.add(EmitAnyExprToTemp(E), type);
2995 }
2996
getVarArgType(const Expr * Arg)2997 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
2998 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
2999 // implicitly widens null pointer constants that are arguments to varargs
3000 // functions to pointer-sized ints.
3001 if (!getTarget().getTriple().isOSWindows())
3002 return Arg->getType();
3003
3004 if (Arg->getType()->isIntegerType() &&
3005 getContext().getTypeSize(Arg->getType()) <
3006 getContext().getTargetInfo().getPointerWidth(0) &&
3007 Arg->isNullPointerConstant(getContext(),
3008 Expr::NPC_ValueDependentIsNotNull)) {
3009 return getContext().getIntPtrType();
3010 }
3011
3012 return Arg->getType();
3013 }
3014
3015 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3016 // optimizer it can aggressively ignore unwind edges.
3017 void
AddObjCARCExceptionMetadata(llvm::Instruction * Inst)3018 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3019 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3020 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3021 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3022 CGM.getNoObjCARCExceptionsMetadata());
3023 }
3024
3025 /// Emits a call to the given no-arguments nounwind runtime function.
3026 llvm::CallInst *
EmitNounwindRuntimeCall(llvm::Value * callee,const llvm::Twine & name)3027 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3028 const llvm::Twine &name) {
3029 return EmitNounwindRuntimeCall(callee, None, name);
3030 }
3031
3032 /// Emits a call to the given nounwind runtime function.
3033 llvm::CallInst *
EmitNounwindRuntimeCall(llvm::Value * callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)3034 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3035 ArrayRef<llvm::Value*> args,
3036 const llvm::Twine &name) {
3037 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3038 call->setDoesNotThrow();
3039 return call;
3040 }
3041
3042 /// Emits a simple call (never an invoke) to the given no-arguments
3043 /// runtime function.
3044 llvm::CallInst *
EmitRuntimeCall(llvm::Value * callee,const llvm::Twine & name)3045 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3046 const llvm::Twine &name) {
3047 return EmitRuntimeCall(callee, None, name);
3048 }
3049
3050 /// Emits a simple call (never an invoke) to the given runtime
3051 /// function.
3052 llvm::CallInst *
EmitRuntimeCall(llvm::Value * callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)3053 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3054 ArrayRef<llvm::Value*> args,
3055 const llvm::Twine &name) {
3056 llvm::CallInst *call = Builder.CreateCall(callee, args, name);
3057 call->setCallingConv(getRuntimeCC());
3058 return call;
3059 }
3060
3061 // Calls which may throw must have operand bundles indicating which funclet
3062 // they are nested within.
3063 static void
getBundlesForFunclet(llvm::Value * Callee,llvm::Instruction * CurrentFuncletPad,SmallVectorImpl<llvm::OperandBundleDef> & BundleList)3064 getBundlesForFunclet(llvm::Value *Callee,
3065 llvm::Instruction *CurrentFuncletPad,
3066 SmallVectorImpl<llvm::OperandBundleDef> &BundleList) {
3067 // There is no need for a funclet operand bundle if we aren't inside a funclet.
3068 if (!CurrentFuncletPad)
3069 return;
3070
3071 // Skip intrinsics which cannot throw.
3072 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3073 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3074 return;
3075
3076 BundleList.emplace_back("funclet", CurrentFuncletPad);
3077 }
3078
3079 /// Emits a call or invoke to the given noreturn runtime function.
EmitNoreturnRuntimeCallOrInvoke(llvm::Value * callee,ArrayRef<llvm::Value * > args)3080 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3081 ArrayRef<llvm::Value*> args) {
3082 SmallVector<llvm::OperandBundleDef, 1> BundleList;
3083 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3084
3085 if (getInvokeDest()) {
3086 llvm::InvokeInst *invoke =
3087 Builder.CreateInvoke(callee,
3088 getUnreachableBlock(),
3089 getInvokeDest(),
3090 args,
3091 BundleList);
3092 invoke->setDoesNotReturn();
3093 invoke->setCallingConv(getRuntimeCC());
3094 } else {
3095 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3096 call->setDoesNotReturn();
3097 call->setCallingConv(getRuntimeCC());
3098 Builder.CreateUnreachable();
3099 }
3100 }
3101
3102 /// Emits a call or invoke instruction to the given nullary runtime
3103 /// function.
3104 llvm::CallSite
EmitRuntimeCallOrInvoke(llvm::Value * callee,const Twine & name)3105 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3106 const Twine &name) {
3107 return EmitRuntimeCallOrInvoke(callee, None, name);
3108 }
3109
3110 /// Emits a call or invoke instruction to the given runtime function.
3111 llvm::CallSite
EmitRuntimeCallOrInvoke(llvm::Value * callee,ArrayRef<llvm::Value * > args,const Twine & name)3112 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3113 ArrayRef<llvm::Value*> args,
3114 const Twine &name) {
3115 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3116 callSite.setCallingConv(getRuntimeCC());
3117 return callSite;
3118 }
3119
3120 /// Emits a call or invoke instruction to the given function, depending
3121 /// on the current state of the EH stack.
3122 llvm::CallSite
EmitCallOrInvoke(llvm::Value * Callee,ArrayRef<llvm::Value * > Args,const Twine & Name)3123 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
3124 ArrayRef<llvm::Value *> Args,
3125 const Twine &Name) {
3126 llvm::BasicBlock *InvokeDest = getInvokeDest();
3127
3128 llvm::Instruction *Inst;
3129 if (!InvokeDest)
3130 Inst = Builder.CreateCall(Callee, Args, Name);
3131 else {
3132 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3133 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
3134 EmitBlock(ContBB);
3135 }
3136
3137 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3138 // optimizer it can aggressively ignore unwind edges.
3139 if (CGM.getLangOpts().ObjCAutoRefCount)
3140 AddObjCARCExceptionMetadata(Inst);
3141
3142 return llvm::CallSite(Inst);
3143 }
3144
3145 /// \brief Store a non-aggregate value to an address to initialize it. For
3146 /// initialization, a non-atomic store will be used.
EmitInitStoreOfNonAggregate(CodeGenFunction & CGF,RValue Src,LValue Dst)3147 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
3148 LValue Dst) {
3149 if (Src.isScalar())
3150 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
3151 else
3152 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
3153 }
3154
deferPlaceholderReplacement(llvm::Instruction * Old,llvm::Value * New)3155 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3156 llvm::Value *New) {
3157 DeferredReplacements.push_back(std::make_pair(Old, New));
3158 }
3159
EmitCall(const CGFunctionInfo & CallInfo,llvm::Value * Callee,ReturnValueSlot ReturnValue,const CallArgList & CallArgs,CGCalleeInfo CalleeInfo,llvm::Instruction ** callOrInvoke)3160 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3161 llvm::Value *Callee,
3162 ReturnValueSlot ReturnValue,
3163 const CallArgList &CallArgs,
3164 CGCalleeInfo CalleeInfo,
3165 llvm::Instruction **callOrInvoke) {
3166 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3167
3168 // Handle struct-return functions by passing a pointer to the
3169 // location that we would like to return into.
3170 QualType RetTy = CallInfo.getReturnType();
3171 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3172
3173 llvm::FunctionType *IRFuncTy =
3174 cast<llvm::FunctionType>(
3175 cast<llvm::PointerType>(Callee->getType())->getElementType());
3176
3177 // If we're using inalloca, insert the allocation after the stack save.
3178 // FIXME: Do this earlier rather than hacking it in here!
3179 Address ArgMemory = Address::invalid();
3180 const llvm::StructLayout *ArgMemoryLayout = nullptr;
3181 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3182 ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct);
3183 llvm::Instruction *IP = CallArgs.getStackBase();
3184 llvm::AllocaInst *AI;
3185 if (IP) {
3186 IP = IP->getNextNode();
3187 AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
3188 } else {
3189 AI = CreateTempAlloca(ArgStruct, "argmem");
3190 }
3191 auto Align = CallInfo.getArgStructAlignment();
3192 AI->setAlignment(Align.getQuantity());
3193 AI->setUsedWithInAlloca(true);
3194 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3195 ArgMemory = Address(AI, Align);
3196 }
3197
3198 // Helper function to drill into the inalloca allocation.
3199 auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3200 auto FieldOffset =
3201 CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3202 return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3203 };
3204
3205 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3206 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3207
3208 // If the call returns a temporary with struct return, create a temporary
3209 // alloca to hold the result, unless one is given to us.
3210 Address SRetPtr = Address::invalid();
3211 size_t UnusedReturnSize = 0;
3212 if (RetAI.isIndirect() || RetAI.isInAlloca()) {
3213 if (!ReturnValue.isNull()) {
3214 SRetPtr = ReturnValue.getValue();
3215 } else {
3216 SRetPtr = CreateMemTemp(RetTy);
3217 if (HaveInsertPoint() && ReturnValue.isUnused()) {
3218 uint64_t size =
3219 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3220 if (EmitLifetimeStart(size, SRetPtr.getPointer()))
3221 UnusedReturnSize = size;
3222 }
3223 }
3224 if (IRFunctionArgs.hasSRetArg()) {
3225 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3226 } else {
3227 Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3228 Builder.CreateStore(SRetPtr.getPointer(), Addr);
3229 }
3230 }
3231
3232 assert(CallInfo.arg_size() == CallArgs.size() &&
3233 "Mismatch between function signature & arguments.");
3234 unsigned ArgNo = 0;
3235 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3236 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3237 I != E; ++I, ++info_it, ++ArgNo) {
3238 const ABIArgInfo &ArgInfo = info_it->info;
3239 RValue RV = I->RV;
3240
3241 // Insert a padding argument to ensure proper alignment.
3242 if (IRFunctionArgs.hasPaddingArg(ArgNo))
3243 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3244 llvm::UndefValue::get(ArgInfo.getPaddingType());
3245
3246 unsigned FirstIRArg, NumIRArgs;
3247 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3248
3249 switch (ArgInfo.getKind()) {
3250 case ABIArgInfo::InAlloca: {
3251 assert(NumIRArgs == 0);
3252 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3253 if (RV.isAggregate()) {
3254 // Replace the placeholder with the appropriate argument slot GEP.
3255 llvm::Instruction *Placeholder =
3256 cast<llvm::Instruction>(RV.getAggregatePointer());
3257 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3258 Builder.SetInsertPoint(Placeholder);
3259 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3260 Builder.restoreIP(IP);
3261 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3262 } else {
3263 // Store the RValue into the argument struct.
3264 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3265 unsigned AS = Addr.getType()->getPointerAddressSpace();
3266 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3267 // There are some cases where a trivial bitcast is not avoidable. The
3268 // definition of a type later in a translation unit may change it's type
3269 // from {}* to (%struct.foo*)*.
3270 if (Addr.getType() != MemType)
3271 Addr = Builder.CreateBitCast(Addr, MemType);
3272 LValue argLV = MakeAddrLValue(Addr, I->Ty);
3273 EmitInitStoreOfNonAggregate(*this, RV, argLV);
3274 }
3275 break;
3276 }
3277
3278 case ABIArgInfo::Indirect: {
3279 assert(NumIRArgs == 1);
3280 if (RV.isScalar() || RV.isComplex()) {
3281 // Make a temporary alloca to pass the argument.
3282 Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3283 IRCallArgs[FirstIRArg] = Addr.getPointer();
3284
3285 LValue argLV = MakeAddrLValue(Addr, I->Ty);
3286 EmitInitStoreOfNonAggregate(*this, RV, argLV);
3287 } else {
3288 // We want to avoid creating an unnecessary temporary+copy here;
3289 // however, we need one in three cases:
3290 // 1. If the argument is not byval, and we are required to copy the
3291 // source. (This case doesn't occur on any common architecture.)
3292 // 2. If the argument is byval, RV is not sufficiently aligned, and
3293 // we cannot force it to be sufficiently aligned.
3294 // 3. If the argument is byval, but RV is located in an address space
3295 // different than that of the argument (0).
3296 Address Addr = RV.getAggregateAddress();
3297 CharUnits Align = ArgInfo.getIndirectAlign();
3298 const llvm::DataLayout *TD = &CGM.getDataLayout();
3299 const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
3300 const unsigned ArgAddrSpace =
3301 (FirstIRArg < IRFuncTy->getNumParams()
3302 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
3303 : 0);
3304 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
3305 (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
3306 llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
3307 Align.getQuantity(), *TD)
3308 < Align.getQuantity()) ||
3309 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
3310 // Create an aligned temporary, and copy to it.
3311 Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3312 IRCallArgs[FirstIRArg] = AI.getPointer();
3313 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
3314 } else {
3315 // Skip the extra memcpy call.
3316 IRCallArgs[FirstIRArg] = Addr.getPointer();
3317 }
3318 }
3319 break;
3320 }
3321
3322 case ABIArgInfo::Ignore:
3323 assert(NumIRArgs == 0);
3324 break;
3325
3326 case ABIArgInfo::Extend:
3327 case ABIArgInfo::Direct: {
3328 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3329 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3330 ArgInfo.getDirectOffset() == 0) {
3331 assert(NumIRArgs == 1);
3332 llvm::Value *V;
3333 if (RV.isScalar())
3334 V = RV.getScalarVal();
3335 else
3336 V = Builder.CreateLoad(RV.getAggregateAddress());
3337
3338 // We might have to widen integers, but we should never truncate.
3339 if (ArgInfo.getCoerceToType() != V->getType() &&
3340 V->getType()->isIntegerTy())
3341 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3342
3343 // If the argument doesn't match, perform a bitcast to coerce it. This
3344 // can happen due to trivial type mismatches.
3345 if (FirstIRArg < IRFuncTy->getNumParams() &&
3346 V->getType() != IRFuncTy->getParamType(FirstIRArg))
3347 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3348 IRCallArgs[FirstIRArg] = V;
3349 break;
3350 }
3351
3352 // FIXME: Avoid the conversion through memory if possible.
3353 Address Src = Address::invalid();
3354 if (RV.isScalar() || RV.isComplex()) {
3355 Src = CreateMemTemp(I->Ty, "coerce");
3356 LValue SrcLV = MakeAddrLValue(Src, I->Ty);
3357 EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
3358 } else {
3359 Src = RV.getAggregateAddress();
3360 }
3361
3362 // If the value is offset in memory, apply the offset now.
3363 Src = emitAddressAtOffset(*this, Src, ArgInfo);
3364
3365 // Fast-isel and the optimizer generally like scalar values better than
3366 // FCAs, so we flatten them if this is safe to do for this argument.
3367 llvm::StructType *STy =
3368 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
3369 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
3370 llvm::Type *SrcTy = Src.getType()->getElementType();
3371 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
3372 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
3373
3374 // If the source type is smaller than the destination type of the
3375 // coerce-to logic, copy the source value into a temp alloca the size
3376 // of the destination type to allow loading all of it. The bits past
3377 // the source value are left undef.
3378 if (SrcSize < DstSize) {
3379 Address TempAlloca
3380 = CreateTempAlloca(STy, Src.getAlignment(),
3381 Src.getName() + ".coerce");
3382 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
3383 Src = TempAlloca;
3384 } else {
3385 Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy));
3386 }
3387
3388 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
3389 assert(NumIRArgs == STy->getNumElements());
3390 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3391 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
3392 Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
3393 llvm::Value *LI = Builder.CreateLoad(EltPtr);
3394 IRCallArgs[FirstIRArg + i] = LI;
3395 }
3396 } else {
3397 // In the simple case, just pass the coerced loaded value.
3398 assert(NumIRArgs == 1);
3399 IRCallArgs[FirstIRArg] =
3400 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
3401 }
3402
3403 break;
3404 }
3405
3406 case ABIArgInfo::Expand:
3407 unsigned IRArgPos = FirstIRArg;
3408 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
3409 assert(IRArgPos == FirstIRArg + NumIRArgs);
3410 break;
3411 }
3412 }
3413
3414 if (ArgMemory.isValid()) {
3415 llvm::Value *Arg = ArgMemory.getPointer();
3416 if (CallInfo.isVariadic()) {
3417 // When passing non-POD arguments by value to variadic functions, we will
3418 // end up with a variadic prototype and an inalloca call site. In such
3419 // cases, we can't do any parameter mismatch checks. Give up and bitcast
3420 // the callee.
3421 unsigned CalleeAS =
3422 cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
3423 Callee = Builder.CreateBitCast(
3424 Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
3425 } else {
3426 llvm::Type *LastParamTy =
3427 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
3428 if (Arg->getType() != LastParamTy) {
3429 #ifndef NDEBUG
3430 // Assert that these structs have equivalent element types.
3431 llvm::StructType *FullTy = CallInfo.getArgStruct();
3432 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
3433 cast<llvm::PointerType>(LastParamTy)->getElementType());
3434 assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
3435 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
3436 DE = DeclaredTy->element_end(),
3437 FI = FullTy->element_begin();
3438 DI != DE; ++DI, ++FI)
3439 assert(*DI == *FI);
3440 #endif
3441 Arg = Builder.CreateBitCast(Arg, LastParamTy);
3442 }
3443 }
3444 assert(IRFunctionArgs.hasInallocaArg());
3445 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
3446 }
3447
3448 if (!CallArgs.getCleanupsToDeactivate().empty())
3449 deactivateArgCleanupsBeforeCall(*this, CallArgs);
3450
3451 // If the callee is a bitcast of a function to a varargs pointer to function
3452 // type, check to see if we can remove the bitcast. This handles some cases
3453 // with unprototyped functions.
3454 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
3455 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
3456 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
3457 llvm::FunctionType *CurFT =
3458 cast<llvm::FunctionType>(CurPT->getElementType());
3459 llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
3460
3461 if (CE->getOpcode() == llvm::Instruction::BitCast &&
3462 ActualFT->getReturnType() == CurFT->getReturnType() &&
3463 ActualFT->getNumParams() == CurFT->getNumParams() &&
3464 ActualFT->getNumParams() == IRCallArgs.size() &&
3465 (CurFT->isVarArg() || !ActualFT->isVarArg())) {
3466 bool ArgsMatch = true;
3467 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
3468 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
3469 ArgsMatch = false;
3470 break;
3471 }
3472
3473 // Strip the cast if we can get away with it. This is a nice cleanup,
3474 // but also allows us to inline the function at -O0 if it is marked
3475 // always_inline.
3476 if (ArgsMatch)
3477 Callee = CalleeF;
3478 }
3479 }
3480
3481 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
3482 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
3483 // Inalloca argument can have different type.
3484 if (IRFunctionArgs.hasInallocaArg() &&
3485 i == IRFunctionArgs.getInallocaArgNo())
3486 continue;
3487 if (i < IRFuncTy->getNumParams())
3488 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
3489 }
3490
3491 unsigned CallingConv;
3492 CodeGen::AttributeListType AttributeList;
3493 CGM.ConstructAttributeList(CallInfo, CalleeInfo, AttributeList, CallingConv,
3494 true);
3495 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
3496 AttributeList);
3497
3498 bool CannotThrow;
3499 if (currentFunctionUsesSEHTry()) {
3500 // SEH cares about asynchronous exceptions, everything can "throw."
3501 CannotThrow = false;
3502 } else if (isCleanupPadScope() &&
3503 EHPersonality::get(*this).isMSVCXXPersonality()) {
3504 // The MSVC++ personality will implicitly terminate the program if an
3505 // exception is thrown. An unwind edge cannot be reached.
3506 CannotThrow = true;
3507 } else {
3508 // Otherwise, nowunind callsites will never throw.
3509 CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
3510 llvm::Attribute::NoUnwind);
3511 }
3512 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
3513
3514 SmallVector<llvm::OperandBundleDef, 1> BundleList;
3515 getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3516
3517 llvm::CallSite CS;
3518 if (!InvokeDest) {
3519 CS = Builder.CreateCall(Callee, IRCallArgs, BundleList);
3520 } else {
3521 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
3522 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs,
3523 BundleList);
3524 EmitBlock(Cont);
3525 }
3526 if (callOrInvoke)
3527 *callOrInvoke = CS.getInstruction();
3528
3529 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
3530 !CS.hasFnAttr(llvm::Attribute::NoInline))
3531 Attrs =
3532 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3533 llvm::Attribute::AlwaysInline);
3534
3535 // Disable inlining inside SEH __try blocks.
3536 if (isSEHTryScope())
3537 Attrs =
3538 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3539 llvm::Attribute::NoInline);
3540
3541 CS.setAttributes(Attrs);
3542 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
3543
3544 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3545 // optimizer it can aggressively ignore unwind edges.
3546 if (CGM.getLangOpts().ObjCAutoRefCount)
3547 AddObjCARCExceptionMetadata(CS.getInstruction());
3548
3549 // If the call doesn't return, finish the basic block and clear the
3550 // insertion point; this allows the rest of IRgen to discard
3551 // unreachable code.
3552 if (CS.doesNotReturn()) {
3553 if (UnusedReturnSize)
3554 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
3555 SRetPtr.getPointer());
3556
3557 Builder.CreateUnreachable();
3558 Builder.ClearInsertionPoint();
3559
3560 // FIXME: For now, emit a dummy basic block because expr emitters in
3561 // generally are not ready to handle emitting expressions at unreachable
3562 // points.
3563 EnsureInsertPoint();
3564
3565 // Return a reasonable RValue.
3566 return GetUndefRValue(RetTy);
3567 }
3568
3569 llvm::Instruction *CI = CS.getInstruction();
3570 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
3571 CI->setName("call");
3572
3573 // Emit any writebacks immediately. Arguably this should happen
3574 // after any return-value munging.
3575 if (CallArgs.hasWritebacks())
3576 emitWritebacks(*this, CallArgs);
3577
3578 // The stack cleanup for inalloca arguments has to run out of the normal
3579 // lexical order, so deactivate it and run it manually here.
3580 CallArgs.freeArgumentMemory(*this);
3581
3582 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
3583 const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
3584 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
3585 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
3586 }
3587
3588 RValue Ret = [&] {
3589 switch (RetAI.getKind()) {
3590 case ABIArgInfo::InAlloca:
3591 case ABIArgInfo::Indirect: {
3592 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
3593 if (UnusedReturnSize)
3594 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
3595 SRetPtr.getPointer());
3596 return ret;
3597 }
3598
3599 case ABIArgInfo::Ignore:
3600 // If we are ignoring an argument that had a result, make sure to
3601 // construct the appropriate return value for our caller.
3602 return GetUndefRValue(RetTy);
3603
3604 case ABIArgInfo::Extend:
3605 case ABIArgInfo::Direct: {
3606 llvm::Type *RetIRTy = ConvertType(RetTy);
3607 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
3608 switch (getEvaluationKind(RetTy)) {
3609 case TEK_Complex: {
3610 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
3611 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
3612 return RValue::getComplex(std::make_pair(Real, Imag));
3613 }
3614 case TEK_Aggregate: {
3615 Address DestPtr = ReturnValue.getValue();
3616 bool DestIsVolatile = ReturnValue.isVolatile();
3617
3618 if (!DestPtr.isValid()) {
3619 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
3620 DestIsVolatile = false;
3621 }
3622 BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
3623 return RValue::getAggregate(DestPtr);
3624 }
3625 case TEK_Scalar: {
3626 // If the argument doesn't match, perform a bitcast to coerce it. This
3627 // can happen due to trivial type mismatches.
3628 llvm::Value *V = CI;
3629 if (V->getType() != RetIRTy)
3630 V = Builder.CreateBitCast(V, RetIRTy);
3631 return RValue::get(V);
3632 }
3633 }
3634 llvm_unreachable("bad evaluation kind");
3635 }
3636
3637 Address DestPtr = ReturnValue.getValue();
3638 bool DestIsVolatile = ReturnValue.isVolatile();
3639
3640 if (!DestPtr.isValid()) {
3641 DestPtr = CreateMemTemp(RetTy, "coerce");
3642 DestIsVolatile = false;
3643 }
3644
3645 // If the value is offset in memory, apply the offset now.
3646 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
3647 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
3648
3649 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
3650 }
3651
3652 case ABIArgInfo::Expand:
3653 llvm_unreachable("Invalid ABI kind for return argument");
3654 }
3655
3656 llvm_unreachable("Unhandled ABIArgInfo::Kind");
3657 } ();
3658
3659 const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
3660
3661 if (Ret.isScalar() && TargetDecl) {
3662 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
3663 llvm::Value *OffsetValue = nullptr;
3664 if (const auto *Offset = AA->getOffset())
3665 OffsetValue = EmitScalarExpr(Offset);
3666
3667 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
3668 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
3669 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
3670 OffsetValue);
3671 }
3672 }
3673
3674 return Ret;
3675 }
3676
3677 /* VarArg handling */
3678
EmitVAArg(VAArgExpr * VE,Address & VAListAddr)3679 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
3680 VAListAddr = VE->isMicrosoftABI()
3681 ? EmitMSVAListRef(VE->getSubExpr())
3682 : EmitVAListRef(VE->getSubExpr());
3683 QualType Ty = VE->getType();
3684 if (VE->isMicrosoftABI())
3685 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
3686 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
3687 }
3688