1 //===-- NVPTXAsmPrinter.cpp - NVPTX LLVM assembly writer ------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains a printer that converts from our internal representation
11 // of machine-dependent LLVM code to NVPTX assembly language.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "NVPTXAsmPrinter.h"
16 #include "InstPrinter/NVPTXInstPrinter.h"
17 #include "MCTargetDesc/NVPTXBaseInfo.h"
18 #include "MCTargetDesc/NVPTXMCAsmInfo.h"
19 #include "NVPTX.h"
20 #include "NVPTXMCExpr.h"
21 #include "NVPTXMachineFunctionInfo.h"
22 #include "NVPTXRegisterInfo.h"
23 #include "NVPTXSubtarget.h"
24 #include "NVPTXTargetMachine.h"
25 #include "NVPTXUtilities.h"
26 #include "cl_common_defines.h"
27 #include "llvm/ADT/APFloat.h"
28 #include "llvm/ADT/APInt.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/DenseSet.h"
31 #include "llvm/ADT/SmallString.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/StringExtras.h"
34 #include "llvm/ADT/StringRef.h"
35 #include "llvm/ADT/Triple.h"
36 #include "llvm/ADT/Twine.h"
37 #include "llvm/Analysis/ConstantFolding.h"
38 #include "llvm/CodeGen/Analysis.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineLoopInfo.h"
44 #include "llvm/CodeGen/MachineModuleInfo.h"
45 #include "llvm/CodeGen/MachineOperand.h"
46 #include "llvm/CodeGen/MachineRegisterInfo.h"
47 #include "llvm/CodeGen/TargetLowering.h"
48 #include "llvm/CodeGen/TargetRegisterInfo.h"
49 #include "llvm/CodeGen/ValueTypes.h"
50 #include "llvm/IR/Attributes.h"
51 #include "llvm/IR/BasicBlock.h"
52 #include "llvm/IR/Constant.h"
53 #include "llvm/IR/Constants.h"
54 #include "llvm/IR/DataLayout.h"
55 #include "llvm/IR/DebugInfo.h"
56 #include "llvm/IR/DebugInfoMetadata.h"
57 #include "llvm/IR/DebugLoc.h"
58 #include "llvm/IR/DerivedTypes.h"
59 #include "llvm/IR/Function.h"
60 #include "llvm/IR/GlobalValue.h"
61 #include "llvm/IR/GlobalVariable.h"
62 #include "llvm/IR/Instruction.h"
63 #include "llvm/IR/LLVMContext.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/Operator.h"
66 #include "llvm/IR/Type.h"
67 #include "llvm/IR/User.h"
68 #include "llvm/MC/MCExpr.h"
69 #include "llvm/MC/MCInst.h"
70 #include "llvm/MC/MCInstrDesc.h"
71 #include "llvm/MC/MCStreamer.h"
72 #include "llvm/MC/MCSymbol.h"
73 #include "llvm/Support/Casting.h"
74 #include "llvm/Support/CommandLine.h"
75 #include "llvm/Support/ErrorHandling.h"
76 #include "llvm/Support/MachineValueType.h"
77 #include "llvm/Support/Path.h"
78 #include "llvm/Support/TargetRegistry.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Target/TargetLoweringObjectFile.h"
81 #include "llvm/Target/TargetMachine.h"
82 #include "llvm/Transforms/Utils/UnrollLoop.h"
83 #include <cassert>
84 #include <cstdint>
85 #include <cstring>
86 #include <new>
87 #include <string>
88 #include <utility>
89 #include <vector>
90
91 using namespace llvm;
92
93 #define DEPOTNAME "__local_depot"
94
95 /// DiscoverDependentGlobals - Return a set of GlobalVariables on which \p V
96 /// depends.
97 static void
DiscoverDependentGlobals(const Value * V,DenseSet<const GlobalVariable * > & Globals)98 DiscoverDependentGlobals(const Value *V,
99 DenseSet<const GlobalVariable *> &Globals) {
100 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
101 Globals.insert(GV);
102 else {
103 if (const User *U = dyn_cast<User>(V)) {
104 for (unsigned i = 0, e = U->getNumOperands(); i != e; ++i) {
105 DiscoverDependentGlobals(U->getOperand(i), Globals);
106 }
107 }
108 }
109 }
110
111 /// VisitGlobalVariableForEmission - Add \p GV to the list of GlobalVariable
112 /// instances to be emitted, but only after any dependents have been added
113 /// first.s
114 static void
VisitGlobalVariableForEmission(const GlobalVariable * GV,SmallVectorImpl<const GlobalVariable * > & Order,DenseSet<const GlobalVariable * > & Visited,DenseSet<const GlobalVariable * > & Visiting)115 VisitGlobalVariableForEmission(const GlobalVariable *GV,
116 SmallVectorImpl<const GlobalVariable *> &Order,
117 DenseSet<const GlobalVariable *> &Visited,
118 DenseSet<const GlobalVariable *> &Visiting) {
119 // Have we already visited this one?
120 if (Visited.count(GV))
121 return;
122
123 // Do we have a circular dependency?
124 if (!Visiting.insert(GV).second)
125 report_fatal_error("Circular dependency found in global variable set");
126
127 // Make sure we visit all dependents first
128 DenseSet<const GlobalVariable *> Others;
129 for (unsigned i = 0, e = GV->getNumOperands(); i != e; ++i)
130 DiscoverDependentGlobals(GV->getOperand(i), Others);
131
132 for (DenseSet<const GlobalVariable *>::iterator I = Others.begin(),
133 E = Others.end();
134 I != E; ++I)
135 VisitGlobalVariableForEmission(*I, Order, Visited, Visiting);
136
137 // Now we can visit ourself
138 Order.push_back(GV);
139 Visited.insert(GV);
140 Visiting.erase(GV);
141 }
142
EmitInstruction(const MachineInstr * MI)143 void NVPTXAsmPrinter::EmitInstruction(const MachineInstr *MI) {
144 MCInst Inst;
145 lowerToMCInst(MI, Inst);
146 EmitToStreamer(*OutStreamer, Inst);
147 }
148
149 // Handle symbol backtracking for targets that do not support image handles
lowerImageHandleOperand(const MachineInstr * MI,unsigned OpNo,MCOperand & MCOp)150 bool NVPTXAsmPrinter::lowerImageHandleOperand(const MachineInstr *MI,
151 unsigned OpNo, MCOperand &MCOp) {
152 const MachineOperand &MO = MI->getOperand(OpNo);
153 const MCInstrDesc &MCID = MI->getDesc();
154
155 if (MCID.TSFlags & NVPTXII::IsTexFlag) {
156 // This is a texture fetch, so operand 4 is a texref and operand 5 is
157 // a samplerref
158 if (OpNo == 4 && MO.isImm()) {
159 lowerImageHandleSymbol(MO.getImm(), MCOp);
160 return true;
161 }
162 if (OpNo == 5 && MO.isImm() && !(MCID.TSFlags & NVPTXII::IsTexModeUnifiedFlag)) {
163 lowerImageHandleSymbol(MO.getImm(), MCOp);
164 return true;
165 }
166
167 return false;
168 } else if (MCID.TSFlags & NVPTXII::IsSuldMask) {
169 unsigned VecSize =
170 1 << (((MCID.TSFlags & NVPTXII::IsSuldMask) >> NVPTXII::IsSuldShift) - 1);
171
172 // For a surface load of vector size N, the Nth operand will be the surfref
173 if (OpNo == VecSize && MO.isImm()) {
174 lowerImageHandleSymbol(MO.getImm(), MCOp);
175 return true;
176 }
177
178 return false;
179 } else if (MCID.TSFlags & NVPTXII::IsSustFlag) {
180 // This is a surface store, so operand 0 is a surfref
181 if (OpNo == 0 && MO.isImm()) {
182 lowerImageHandleSymbol(MO.getImm(), MCOp);
183 return true;
184 }
185
186 return false;
187 } else if (MCID.TSFlags & NVPTXII::IsSurfTexQueryFlag) {
188 // This is a query, so operand 1 is a surfref/texref
189 if (OpNo == 1 && MO.isImm()) {
190 lowerImageHandleSymbol(MO.getImm(), MCOp);
191 return true;
192 }
193
194 return false;
195 }
196
197 return false;
198 }
199
lowerImageHandleSymbol(unsigned Index,MCOperand & MCOp)200 void NVPTXAsmPrinter::lowerImageHandleSymbol(unsigned Index, MCOperand &MCOp) {
201 // Ewwww
202 TargetMachine &TM = const_cast<TargetMachine&>(MF->getTarget());
203 NVPTXTargetMachine &nvTM = static_cast<NVPTXTargetMachine&>(TM);
204 const NVPTXMachineFunctionInfo *MFI = MF->getInfo<NVPTXMachineFunctionInfo>();
205 const char *Sym = MFI->getImageHandleSymbol(Index);
206 std::string *SymNamePtr =
207 nvTM.getManagedStrPool()->getManagedString(Sym);
208 MCOp = GetSymbolRef(OutContext.getOrCreateSymbol(StringRef(*SymNamePtr)));
209 }
210
lowerToMCInst(const MachineInstr * MI,MCInst & OutMI)211 void NVPTXAsmPrinter::lowerToMCInst(const MachineInstr *MI, MCInst &OutMI) {
212 OutMI.setOpcode(MI->getOpcode());
213 // Special: Do not mangle symbol operand of CALL_PROTOTYPE
214 if (MI->getOpcode() == NVPTX::CALL_PROTOTYPE) {
215 const MachineOperand &MO = MI->getOperand(0);
216 OutMI.addOperand(GetSymbolRef(
217 OutContext.getOrCreateSymbol(Twine(MO.getSymbolName()))));
218 return;
219 }
220
221 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
222 const MachineOperand &MO = MI->getOperand(i);
223
224 MCOperand MCOp;
225 if (!nvptxSubtarget->hasImageHandles()) {
226 if (lowerImageHandleOperand(MI, i, MCOp)) {
227 OutMI.addOperand(MCOp);
228 continue;
229 }
230 }
231
232 if (lowerOperand(MO, MCOp))
233 OutMI.addOperand(MCOp);
234 }
235 }
236
lowerOperand(const MachineOperand & MO,MCOperand & MCOp)237 bool NVPTXAsmPrinter::lowerOperand(const MachineOperand &MO,
238 MCOperand &MCOp) {
239 switch (MO.getType()) {
240 default: llvm_unreachable("unknown operand type");
241 case MachineOperand::MO_Register:
242 MCOp = MCOperand::createReg(encodeVirtualRegister(MO.getReg()));
243 break;
244 case MachineOperand::MO_Immediate:
245 MCOp = MCOperand::createImm(MO.getImm());
246 break;
247 case MachineOperand::MO_MachineBasicBlock:
248 MCOp = MCOperand::createExpr(MCSymbolRefExpr::create(
249 MO.getMBB()->getSymbol(), OutContext));
250 break;
251 case MachineOperand::MO_ExternalSymbol:
252 MCOp = GetSymbolRef(GetExternalSymbolSymbol(MO.getSymbolName()));
253 break;
254 case MachineOperand::MO_GlobalAddress:
255 MCOp = GetSymbolRef(getSymbol(MO.getGlobal()));
256 break;
257 case MachineOperand::MO_FPImmediate: {
258 const ConstantFP *Cnt = MO.getFPImm();
259 const APFloat &Val = Cnt->getValueAPF();
260
261 switch (Cnt->getType()->getTypeID()) {
262 default: report_fatal_error("Unsupported FP type"); break;
263 case Type::HalfTyID:
264 MCOp = MCOperand::createExpr(
265 NVPTXFloatMCExpr::createConstantFPHalf(Val, OutContext));
266 break;
267 case Type::FloatTyID:
268 MCOp = MCOperand::createExpr(
269 NVPTXFloatMCExpr::createConstantFPSingle(Val, OutContext));
270 break;
271 case Type::DoubleTyID:
272 MCOp = MCOperand::createExpr(
273 NVPTXFloatMCExpr::createConstantFPDouble(Val, OutContext));
274 break;
275 }
276 break;
277 }
278 }
279 return true;
280 }
281
encodeVirtualRegister(unsigned Reg)282 unsigned NVPTXAsmPrinter::encodeVirtualRegister(unsigned Reg) {
283 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
284 const TargetRegisterClass *RC = MRI->getRegClass(Reg);
285
286 DenseMap<unsigned, unsigned> &RegMap = VRegMapping[RC];
287 unsigned RegNum = RegMap[Reg];
288
289 // Encode the register class in the upper 4 bits
290 // Must be kept in sync with NVPTXInstPrinter::printRegName
291 unsigned Ret = 0;
292 if (RC == &NVPTX::Int1RegsRegClass) {
293 Ret = (1 << 28);
294 } else if (RC == &NVPTX::Int16RegsRegClass) {
295 Ret = (2 << 28);
296 } else if (RC == &NVPTX::Int32RegsRegClass) {
297 Ret = (3 << 28);
298 } else if (RC == &NVPTX::Int64RegsRegClass) {
299 Ret = (4 << 28);
300 } else if (RC == &NVPTX::Float32RegsRegClass) {
301 Ret = (5 << 28);
302 } else if (RC == &NVPTX::Float64RegsRegClass) {
303 Ret = (6 << 28);
304 } else if (RC == &NVPTX::Float16RegsRegClass) {
305 Ret = (7 << 28);
306 } else if (RC == &NVPTX::Float16x2RegsRegClass) {
307 Ret = (8 << 28);
308 } else {
309 report_fatal_error("Bad register class");
310 }
311
312 // Insert the vreg number
313 Ret |= (RegNum & 0x0FFFFFFF);
314 return Ret;
315 } else {
316 // Some special-use registers are actually physical registers.
317 // Encode this as the register class ID of 0 and the real register ID.
318 return Reg & 0x0FFFFFFF;
319 }
320 }
321
GetSymbolRef(const MCSymbol * Symbol)322 MCOperand NVPTXAsmPrinter::GetSymbolRef(const MCSymbol *Symbol) {
323 const MCExpr *Expr;
324 Expr = MCSymbolRefExpr::create(Symbol, MCSymbolRefExpr::VK_None,
325 OutContext);
326 return MCOperand::createExpr(Expr);
327 }
328
printReturnValStr(const Function * F,raw_ostream & O)329 void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) {
330 const DataLayout &DL = getDataLayout();
331 const TargetLowering *TLI = nvptxSubtarget->getTargetLowering();
332
333 Type *Ty = F->getReturnType();
334
335 bool isABI = (nvptxSubtarget->getSmVersion() >= 20);
336
337 if (Ty->getTypeID() == Type::VoidTyID)
338 return;
339
340 O << " (";
341
342 if (isABI) {
343 if (Ty->isFloatingPointTy() || (Ty->isIntegerTy() && !Ty->isIntegerTy(128))) {
344 unsigned size = 0;
345 if (auto *ITy = dyn_cast<IntegerType>(Ty)) {
346 size = ITy->getBitWidth();
347 } else {
348 assert(Ty->isFloatingPointTy() && "Floating point type expected here");
349 size = Ty->getPrimitiveSizeInBits();
350 }
351 // PTX ABI requires all scalar return values to be at least 32
352 // bits in size. fp16 normally uses .b16 as its storage type in
353 // PTX, so its size must be adjusted here, too.
354 if (size < 32)
355 size = 32;
356
357 O << ".param .b" << size << " func_retval0";
358 } else if (isa<PointerType>(Ty)) {
359 O << ".param .b" << TLI->getPointerTy(DL).getSizeInBits()
360 << " func_retval0";
361 } else if (Ty->isAggregateType() || Ty->isVectorTy() || Ty->isIntegerTy(128)) {
362 unsigned totalsz = DL.getTypeAllocSize(Ty);
363 unsigned retAlignment = 0;
364 if (!getAlign(*F, 0, retAlignment))
365 retAlignment = DL.getABITypeAlignment(Ty);
366 O << ".param .align " << retAlignment << " .b8 func_retval0[" << totalsz
367 << "]";
368 } else
369 llvm_unreachable("Unknown return type");
370 } else {
371 SmallVector<EVT, 16> vtparts;
372 ComputeValueVTs(*TLI, DL, Ty, vtparts);
373 unsigned idx = 0;
374 for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
375 unsigned elems = 1;
376 EVT elemtype = vtparts[i];
377 if (vtparts[i].isVector()) {
378 elems = vtparts[i].getVectorNumElements();
379 elemtype = vtparts[i].getVectorElementType();
380 }
381
382 for (unsigned j = 0, je = elems; j != je; ++j) {
383 unsigned sz = elemtype.getSizeInBits();
384 if (elemtype.isInteger() && (sz < 32))
385 sz = 32;
386 O << ".reg .b" << sz << " func_retval" << idx;
387 if (j < je - 1)
388 O << ", ";
389 ++idx;
390 }
391 if (i < e - 1)
392 O << ", ";
393 }
394 }
395 O << ") ";
396 }
397
printReturnValStr(const MachineFunction & MF,raw_ostream & O)398 void NVPTXAsmPrinter::printReturnValStr(const MachineFunction &MF,
399 raw_ostream &O) {
400 const Function &F = MF.getFunction();
401 printReturnValStr(&F, O);
402 }
403
404 // Return true if MBB is the header of a loop marked with
405 // llvm.loop.unroll.disable.
406 // TODO: consider "#pragma unroll 1" which is equivalent to "#pragma nounroll".
isLoopHeaderOfNoUnroll(const MachineBasicBlock & MBB) const407 bool NVPTXAsmPrinter::isLoopHeaderOfNoUnroll(
408 const MachineBasicBlock &MBB) const {
409 MachineLoopInfo &LI = getAnalysis<MachineLoopInfo>();
410 // We insert .pragma "nounroll" only to the loop header.
411 if (!LI.isLoopHeader(&MBB))
412 return false;
413
414 // llvm.loop.unroll.disable is marked on the back edges of a loop. Therefore,
415 // we iterate through each back edge of the loop with header MBB, and check
416 // whether its metadata contains llvm.loop.unroll.disable.
417 for (auto I = MBB.pred_begin(); I != MBB.pred_end(); ++I) {
418 const MachineBasicBlock *PMBB = *I;
419 if (LI.getLoopFor(PMBB) != LI.getLoopFor(&MBB)) {
420 // Edges from other loops to MBB are not back edges.
421 continue;
422 }
423 if (const BasicBlock *PBB = PMBB->getBasicBlock()) {
424 if (MDNode *LoopID =
425 PBB->getTerminator()->getMetadata(LLVMContext::MD_loop)) {
426 if (GetUnrollMetadata(LoopID, "llvm.loop.unroll.disable"))
427 return true;
428 }
429 }
430 }
431 return false;
432 }
433
EmitBasicBlockStart(const MachineBasicBlock & MBB) const434 void NVPTXAsmPrinter::EmitBasicBlockStart(const MachineBasicBlock &MBB) const {
435 AsmPrinter::EmitBasicBlockStart(MBB);
436 if (isLoopHeaderOfNoUnroll(MBB))
437 OutStreamer->EmitRawText(StringRef("\t.pragma \"nounroll\";\n"));
438 }
439
EmitFunctionEntryLabel()440 void NVPTXAsmPrinter::EmitFunctionEntryLabel() {
441 SmallString<128> Str;
442 raw_svector_ostream O(Str);
443
444 if (!GlobalsEmitted) {
445 emitGlobals(*MF->getFunction().getParent());
446 GlobalsEmitted = true;
447 }
448
449 // Set up
450 MRI = &MF->getRegInfo();
451 F = &MF->getFunction();
452 emitLinkageDirective(F, O);
453 if (isKernelFunction(*F))
454 O << ".entry ";
455 else {
456 O << ".func ";
457 printReturnValStr(*MF, O);
458 }
459
460 CurrentFnSym->print(O, MAI);
461
462 emitFunctionParamList(*MF, O);
463
464 if (isKernelFunction(*F))
465 emitKernelFunctionDirectives(*F, O);
466
467 OutStreamer->EmitRawText(O.str());
468
469 VRegMapping.clear();
470 // Emit open brace for function body.
471 OutStreamer->EmitRawText(StringRef("{\n"));
472 setAndEmitFunctionVirtualRegisters(*MF);
473 }
474
runOnMachineFunction(MachineFunction & F)475 bool NVPTXAsmPrinter::runOnMachineFunction(MachineFunction &F) {
476 nvptxSubtarget = &F.getSubtarget<NVPTXSubtarget>();
477 bool Result = AsmPrinter::runOnMachineFunction(F);
478 // Emit closing brace for the body of function F.
479 // The closing brace must be emitted here because we need to emit additional
480 // debug labels/data after the last basic block.
481 // We need to emit the closing brace here because we don't have function that
482 // finished emission of the function body.
483 OutStreamer->EmitRawText(StringRef("}\n"));
484 return Result;
485 }
486
EmitFunctionBodyStart()487 void NVPTXAsmPrinter::EmitFunctionBodyStart() {
488 SmallString<128> Str;
489 raw_svector_ostream O(Str);
490 emitDemotedVars(&MF->getFunction(), O);
491 OutStreamer->EmitRawText(O.str());
492 }
493
EmitFunctionBodyEnd()494 void NVPTXAsmPrinter::EmitFunctionBodyEnd() {
495 VRegMapping.clear();
496 }
497
getFunctionFrameSymbol() const498 const MCSymbol *NVPTXAsmPrinter::getFunctionFrameSymbol() const {
499 SmallString<128> Str;
500 raw_svector_ostream(Str) << DEPOTNAME << getFunctionNumber();
501 return OutContext.getOrCreateSymbol(Str);
502 }
503
emitImplicitDef(const MachineInstr * MI) const504 void NVPTXAsmPrinter::emitImplicitDef(const MachineInstr *MI) const {
505 unsigned RegNo = MI->getOperand(0).getReg();
506 if (TargetRegisterInfo::isVirtualRegister(RegNo)) {
507 OutStreamer->AddComment(Twine("implicit-def: ") +
508 getVirtualRegisterName(RegNo));
509 } else {
510 OutStreamer->AddComment(Twine("implicit-def: ") +
511 nvptxSubtarget->getRegisterInfo()->getName(RegNo));
512 }
513 OutStreamer->AddBlankLine();
514 }
515
emitKernelFunctionDirectives(const Function & F,raw_ostream & O) const516 void NVPTXAsmPrinter::emitKernelFunctionDirectives(const Function &F,
517 raw_ostream &O) const {
518 // If the NVVM IR has some of reqntid* specified, then output
519 // the reqntid directive, and set the unspecified ones to 1.
520 // If none of reqntid* is specified, don't output reqntid directive.
521 unsigned reqntidx, reqntidy, reqntidz;
522 bool specified = false;
523 if (!getReqNTIDx(F, reqntidx))
524 reqntidx = 1;
525 else
526 specified = true;
527 if (!getReqNTIDy(F, reqntidy))
528 reqntidy = 1;
529 else
530 specified = true;
531 if (!getReqNTIDz(F, reqntidz))
532 reqntidz = 1;
533 else
534 specified = true;
535
536 if (specified)
537 O << ".reqntid " << reqntidx << ", " << reqntidy << ", " << reqntidz
538 << "\n";
539
540 // If the NVVM IR has some of maxntid* specified, then output
541 // the maxntid directive, and set the unspecified ones to 1.
542 // If none of maxntid* is specified, don't output maxntid directive.
543 unsigned maxntidx, maxntidy, maxntidz;
544 specified = false;
545 if (!getMaxNTIDx(F, maxntidx))
546 maxntidx = 1;
547 else
548 specified = true;
549 if (!getMaxNTIDy(F, maxntidy))
550 maxntidy = 1;
551 else
552 specified = true;
553 if (!getMaxNTIDz(F, maxntidz))
554 maxntidz = 1;
555 else
556 specified = true;
557
558 if (specified)
559 O << ".maxntid " << maxntidx << ", " << maxntidy << ", " << maxntidz
560 << "\n";
561
562 unsigned mincta;
563 if (getMinCTASm(F, mincta))
564 O << ".minnctapersm " << mincta << "\n";
565
566 unsigned maxnreg;
567 if (getMaxNReg(F, maxnreg))
568 O << ".maxnreg " << maxnreg << "\n";
569 }
570
571 std::string
getVirtualRegisterName(unsigned Reg) const572 NVPTXAsmPrinter::getVirtualRegisterName(unsigned Reg) const {
573 const TargetRegisterClass *RC = MRI->getRegClass(Reg);
574
575 std::string Name;
576 raw_string_ostream NameStr(Name);
577
578 VRegRCMap::const_iterator I = VRegMapping.find(RC);
579 assert(I != VRegMapping.end() && "Bad register class");
580 const DenseMap<unsigned, unsigned> &RegMap = I->second;
581
582 VRegMap::const_iterator VI = RegMap.find(Reg);
583 assert(VI != RegMap.end() && "Bad virtual register");
584 unsigned MappedVR = VI->second;
585
586 NameStr << getNVPTXRegClassStr(RC) << MappedVR;
587
588 NameStr.flush();
589 return Name;
590 }
591
emitVirtualRegister(unsigned int vr,raw_ostream & O)592 void NVPTXAsmPrinter::emitVirtualRegister(unsigned int vr,
593 raw_ostream &O) {
594 O << getVirtualRegisterName(vr);
595 }
596
printVecModifiedImmediate(const MachineOperand & MO,const char * Modifier,raw_ostream & O)597 void NVPTXAsmPrinter::printVecModifiedImmediate(
598 const MachineOperand &MO, const char *Modifier, raw_ostream &O) {
599 static const char vecelem[] = { '0', '1', '2', '3', '0', '1', '2', '3' };
600 int Imm = (int) MO.getImm();
601 if (0 == strcmp(Modifier, "vecelem"))
602 O << "_" << vecelem[Imm];
603 else if (0 == strcmp(Modifier, "vecv4comm1")) {
604 if ((Imm < 0) || (Imm > 3))
605 O << "//";
606 } else if (0 == strcmp(Modifier, "vecv4comm2")) {
607 if ((Imm < 4) || (Imm > 7))
608 O << "//";
609 } else if (0 == strcmp(Modifier, "vecv4pos")) {
610 if (Imm < 0)
611 Imm = 0;
612 O << "_" << vecelem[Imm % 4];
613 } else if (0 == strcmp(Modifier, "vecv2comm1")) {
614 if ((Imm < 0) || (Imm > 1))
615 O << "//";
616 } else if (0 == strcmp(Modifier, "vecv2comm2")) {
617 if ((Imm < 2) || (Imm > 3))
618 O << "//";
619 } else if (0 == strcmp(Modifier, "vecv2pos")) {
620 if (Imm < 0)
621 Imm = 0;
622 O << "_" << vecelem[Imm % 2];
623 } else
624 llvm_unreachable("Unknown Modifier on immediate operand");
625 }
626
emitDeclaration(const Function * F,raw_ostream & O)627 void NVPTXAsmPrinter::emitDeclaration(const Function *F, raw_ostream &O) {
628 emitLinkageDirective(F, O);
629 if (isKernelFunction(*F))
630 O << ".entry ";
631 else
632 O << ".func ";
633 printReturnValStr(F, O);
634 getSymbol(F)->print(O, MAI);
635 O << "\n";
636 emitFunctionParamList(F, O);
637 O << ";\n";
638 }
639
usedInGlobalVarDef(const Constant * C)640 static bool usedInGlobalVarDef(const Constant *C) {
641 if (!C)
642 return false;
643
644 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) {
645 return GV->getName() != "llvm.used";
646 }
647
648 for (const User *U : C->users())
649 if (const Constant *C = dyn_cast<Constant>(U))
650 if (usedInGlobalVarDef(C))
651 return true;
652
653 return false;
654 }
655
usedInOneFunc(const User * U,Function const * & oneFunc)656 static bool usedInOneFunc(const User *U, Function const *&oneFunc) {
657 if (const GlobalVariable *othergv = dyn_cast<GlobalVariable>(U)) {
658 if (othergv->getName() == "llvm.used")
659 return true;
660 }
661
662 if (const Instruction *instr = dyn_cast<Instruction>(U)) {
663 if (instr->getParent() && instr->getParent()->getParent()) {
664 const Function *curFunc = instr->getParent()->getParent();
665 if (oneFunc && (curFunc != oneFunc))
666 return false;
667 oneFunc = curFunc;
668 return true;
669 } else
670 return false;
671 }
672
673 for (const User *UU : U->users())
674 if (!usedInOneFunc(UU, oneFunc))
675 return false;
676
677 return true;
678 }
679
680 /* Find out if a global variable can be demoted to local scope.
681 * Currently, this is valid for CUDA shared variables, which have local
682 * scope and global lifetime. So the conditions to check are :
683 * 1. Is the global variable in shared address space?
684 * 2. Does it have internal linkage?
685 * 3. Is the global variable referenced only in one function?
686 */
canDemoteGlobalVar(const GlobalVariable * gv,Function const * & f)687 static bool canDemoteGlobalVar(const GlobalVariable *gv, Function const *&f) {
688 if (!gv->hasInternalLinkage())
689 return false;
690 PointerType *Pty = gv->getType();
691 if (Pty->getAddressSpace() != ADDRESS_SPACE_SHARED)
692 return false;
693
694 const Function *oneFunc = nullptr;
695
696 bool flag = usedInOneFunc(gv, oneFunc);
697 if (!flag)
698 return false;
699 if (!oneFunc)
700 return false;
701 f = oneFunc;
702 return true;
703 }
704
useFuncSeen(const Constant * C,DenseMap<const Function *,bool> & seenMap)705 static bool useFuncSeen(const Constant *C,
706 DenseMap<const Function *, bool> &seenMap) {
707 for (const User *U : C->users()) {
708 if (const Constant *cu = dyn_cast<Constant>(U)) {
709 if (useFuncSeen(cu, seenMap))
710 return true;
711 } else if (const Instruction *I = dyn_cast<Instruction>(U)) {
712 const BasicBlock *bb = I->getParent();
713 if (!bb)
714 continue;
715 const Function *caller = bb->getParent();
716 if (!caller)
717 continue;
718 if (seenMap.find(caller) != seenMap.end())
719 return true;
720 }
721 }
722 return false;
723 }
724
emitDeclarations(const Module & M,raw_ostream & O)725 void NVPTXAsmPrinter::emitDeclarations(const Module &M, raw_ostream &O) {
726 DenseMap<const Function *, bool> seenMap;
727 for (Module::const_iterator FI = M.begin(), FE = M.end(); FI != FE; ++FI) {
728 const Function *F = &*FI;
729
730 if (F->isDeclaration()) {
731 if (F->use_empty())
732 continue;
733 if (F->getIntrinsicID())
734 continue;
735 emitDeclaration(F, O);
736 continue;
737 }
738 for (const User *U : F->users()) {
739 if (const Constant *C = dyn_cast<Constant>(U)) {
740 if (usedInGlobalVarDef(C)) {
741 // The use is in the initialization of a global variable
742 // that is a function pointer, so print a declaration
743 // for the original function
744 emitDeclaration(F, O);
745 break;
746 }
747 // Emit a declaration of this function if the function that
748 // uses this constant expr has already been seen.
749 if (useFuncSeen(C, seenMap)) {
750 emitDeclaration(F, O);
751 break;
752 }
753 }
754
755 if (!isa<Instruction>(U))
756 continue;
757 const Instruction *instr = cast<Instruction>(U);
758 const BasicBlock *bb = instr->getParent();
759 if (!bb)
760 continue;
761 const Function *caller = bb->getParent();
762 if (!caller)
763 continue;
764
765 // If a caller has already been seen, then the caller is
766 // appearing in the module before the callee. so print out
767 // a declaration for the callee.
768 if (seenMap.find(caller) != seenMap.end()) {
769 emitDeclaration(F, O);
770 break;
771 }
772 }
773 seenMap[F] = true;
774 }
775 }
776
isEmptyXXStructor(GlobalVariable * GV)777 static bool isEmptyXXStructor(GlobalVariable *GV) {
778 if (!GV) return true;
779 const ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
780 if (!InitList) return true; // Not an array; we don't know how to parse.
781 return InitList->getNumOperands() == 0;
782 }
783
doInitialization(Module & M)784 bool NVPTXAsmPrinter::doInitialization(Module &M) {
785 // Construct a default subtarget off of the TargetMachine defaults. The
786 // rest of NVPTX isn't friendly to change subtargets per function and
787 // so the default TargetMachine will have all of the options.
788 const Triple &TT = TM.getTargetTriple();
789 StringRef CPU = TM.getTargetCPU();
790 StringRef FS = TM.getTargetFeatureString();
791 const NVPTXTargetMachine &NTM = static_cast<const NVPTXTargetMachine &>(TM);
792 const NVPTXSubtarget STI(TT, CPU, FS, NTM);
793
794 if (M.alias_size()) {
795 report_fatal_error("Module has aliases, which NVPTX does not support.");
796 return true; // error
797 }
798 if (!isEmptyXXStructor(M.getNamedGlobal("llvm.global_ctors"))) {
799 report_fatal_error(
800 "Module has a nontrivial global ctor, which NVPTX does not support.");
801 return true; // error
802 }
803 if (!isEmptyXXStructor(M.getNamedGlobal("llvm.global_dtors"))) {
804 report_fatal_error(
805 "Module has a nontrivial global dtor, which NVPTX does not support.");
806 return true; // error
807 }
808
809 SmallString<128> Str1;
810 raw_svector_ostream OS1(Str1);
811
812 // We need to call the parent's one explicitly.
813 bool Result = AsmPrinter::doInitialization(M);
814
815 // Emit header before any dwarf directives are emitted below.
816 emitHeader(M, OS1, STI);
817 OutStreamer->EmitRawText(OS1.str());
818
819 // Emit module-level inline asm if it exists.
820 if (!M.getModuleInlineAsm().empty()) {
821 OutStreamer->AddComment("Start of file scope inline assembly");
822 OutStreamer->AddBlankLine();
823 OutStreamer->EmitRawText(StringRef(M.getModuleInlineAsm()));
824 OutStreamer->AddBlankLine();
825 OutStreamer->AddComment("End of file scope inline assembly");
826 OutStreamer->AddBlankLine();
827 }
828
829 GlobalsEmitted = false;
830
831 return Result;
832 }
833
emitGlobals(const Module & M)834 void NVPTXAsmPrinter::emitGlobals(const Module &M) {
835 SmallString<128> Str2;
836 raw_svector_ostream OS2(Str2);
837
838 emitDeclarations(M, OS2);
839
840 // As ptxas does not support forward references of globals, we need to first
841 // sort the list of module-level globals in def-use order. We visit each
842 // global variable in order, and ensure that we emit it *after* its dependent
843 // globals. We use a little extra memory maintaining both a set and a list to
844 // have fast searches while maintaining a strict ordering.
845 SmallVector<const GlobalVariable *, 8> Globals;
846 DenseSet<const GlobalVariable *> GVVisited;
847 DenseSet<const GlobalVariable *> GVVisiting;
848
849 // Visit each global variable, in order
850 for (const GlobalVariable &I : M.globals())
851 VisitGlobalVariableForEmission(&I, Globals, GVVisited, GVVisiting);
852
853 assert(GVVisited.size() == M.getGlobalList().size() &&
854 "Missed a global variable");
855 assert(GVVisiting.size() == 0 && "Did not fully process a global variable");
856
857 // Print out module-level global variables in proper order
858 for (unsigned i = 0, e = Globals.size(); i != e; ++i)
859 printModuleLevelGV(Globals[i], OS2);
860
861 OS2 << '\n';
862
863 OutStreamer->EmitRawText(OS2.str());
864 }
865
emitHeader(Module & M,raw_ostream & O,const NVPTXSubtarget & STI)866 void NVPTXAsmPrinter::emitHeader(Module &M, raw_ostream &O,
867 const NVPTXSubtarget &STI) {
868 O << "//\n";
869 O << "// Generated by LLVM NVPTX Back-End\n";
870 O << "//\n";
871 O << "\n";
872
873 unsigned PTXVersion = STI.getPTXVersion();
874 O << ".version " << (PTXVersion / 10) << "." << (PTXVersion % 10) << "\n";
875
876 O << ".target ";
877 O << STI.getTargetName();
878
879 const NVPTXTargetMachine &NTM = static_cast<const NVPTXTargetMachine &>(TM);
880 if (NTM.getDrvInterface() == NVPTX::NVCL)
881 O << ", texmode_independent";
882
883 // FIXME: remove comment once debug info is properly supported.
884 if (MMI && MMI->hasDebugInfo())
885 O << "//, debug";
886
887 O << "\n";
888
889 O << ".address_size ";
890 if (NTM.is64Bit())
891 O << "64";
892 else
893 O << "32";
894 O << "\n";
895
896 O << "\n";
897 }
898
doFinalization(Module & M)899 bool NVPTXAsmPrinter::doFinalization(Module &M) {
900 bool HasDebugInfo = MMI && MMI->hasDebugInfo();
901
902 // If we did not emit any functions, then the global declarations have not
903 // yet been emitted.
904 if (!GlobalsEmitted) {
905 emitGlobals(M);
906 GlobalsEmitted = true;
907 }
908
909 // XXX Temproarily remove global variables so that doFinalization() will not
910 // emit them again (global variables are emitted at beginning).
911
912 Module::GlobalListType &global_list = M.getGlobalList();
913 int i, n = global_list.size();
914 GlobalVariable **gv_array = new GlobalVariable *[n];
915
916 // first, back-up GlobalVariable in gv_array
917 i = 0;
918 for (Module::global_iterator I = global_list.begin(), E = global_list.end();
919 I != E; ++I)
920 gv_array[i++] = &*I;
921
922 // second, empty global_list
923 while (!global_list.empty())
924 global_list.remove(global_list.begin());
925
926 // call doFinalization
927 bool ret = AsmPrinter::doFinalization(M);
928
929 // now we restore global variables
930 for (i = 0; i < n; i++)
931 global_list.insert(global_list.end(), gv_array[i]);
932
933 clearAnnotationCache(&M);
934
935 delete[] gv_array;
936 // FIXME: remove comment once debug info is properly supported.
937 // Close the last emitted section
938 if (HasDebugInfo)
939 OutStreamer->EmitRawText("//\t}");
940
941 return ret;
942
943 //bool Result = AsmPrinter::doFinalization(M);
944 // Instead of calling the parents doFinalization, we may
945 // clone parents doFinalization and customize here.
946 // Currently, we if NVISA out the EmitGlobals() in
947 // parent's doFinalization, which is too intrusive.
948 //
949 // Same for the doInitialization.
950 //return Result;
951 }
952
953 // This function emits appropriate linkage directives for
954 // functions and global variables.
955 //
956 // extern function declaration -> .extern
957 // extern function definition -> .visible
958 // external global variable with init -> .visible
959 // external without init -> .extern
960 // appending -> not allowed, assert.
961 // for any linkage other than
962 // internal, private, linker_private,
963 // linker_private_weak, linker_private_weak_def_auto,
964 // we emit -> .weak.
965
emitLinkageDirective(const GlobalValue * V,raw_ostream & O)966 void NVPTXAsmPrinter::emitLinkageDirective(const GlobalValue *V,
967 raw_ostream &O) {
968 if (static_cast<NVPTXTargetMachine &>(TM).getDrvInterface() == NVPTX::CUDA) {
969 if (V->hasExternalLinkage()) {
970 if (isa<GlobalVariable>(V)) {
971 const GlobalVariable *GVar = cast<GlobalVariable>(V);
972 if (GVar) {
973 if (GVar->hasInitializer())
974 O << ".visible ";
975 else
976 O << ".extern ";
977 }
978 } else if (V->isDeclaration())
979 O << ".extern ";
980 else
981 O << ".visible ";
982 } else if (V->hasAppendingLinkage()) {
983 std::string msg;
984 msg.append("Error: ");
985 msg.append("Symbol ");
986 if (V->hasName())
987 msg.append(V->getName());
988 msg.append("has unsupported appending linkage type");
989 llvm_unreachable(msg.c_str());
990 } else if (!V->hasInternalLinkage() &&
991 !V->hasPrivateLinkage()) {
992 O << ".weak ";
993 }
994 }
995 }
996
printModuleLevelGV(const GlobalVariable * GVar,raw_ostream & O,bool processDemoted)997 void NVPTXAsmPrinter::printModuleLevelGV(const GlobalVariable *GVar,
998 raw_ostream &O,
999 bool processDemoted) {
1000 // Skip meta data
1001 if (GVar->hasSection()) {
1002 if (GVar->getSection() == "llvm.metadata")
1003 return;
1004 }
1005
1006 // Skip LLVM intrinsic global variables
1007 if (GVar->getName().startswith("llvm.") ||
1008 GVar->getName().startswith("nvvm."))
1009 return;
1010
1011 const DataLayout &DL = getDataLayout();
1012
1013 // GlobalVariables are always constant pointers themselves.
1014 PointerType *PTy = GVar->getType();
1015 Type *ETy = GVar->getValueType();
1016
1017 if (GVar->hasExternalLinkage()) {
1018 if (GVar->hasInitializer())
1019 O << ".visible ";
1020 else
1021 O << ".extern ";
1022 } else if (GVar->hasLinkOnceLinkage() || GVar->hasWeakLinkage() ||
1023 GVar->hasAvailableExternallyLinkage() ||
1024 GVar->hasCommonLinkage()) {
1025 O << ".weak ";
1026 }
1027
1028 if (isTexture(*GVar)) {
1029 O << ".global .texref " << getTextureName(*GVar) << ";\n";
1030 return;
1031 }
1032
1033 if (isSurface(*GVar)) {
1034 O << ".global .surfref " << getSurfaceName(*GVar) << ";\n";
1035 return;
1036 }
1037
1038 if (GVar->isDeclaration()) {
1039 // (extern) declarations, no definition or initializer
1040 // Currently the only known declaration is for an automatic __local
1041 // (.shared) promoted to global.
1042 emitPTXGlobalVariable(GVar, O);
1043 O << ";\n";
1044 return;
1045 }
1046
1047 if (isSampler(*GVar)) {
1048 O << ".global .samplerref " << getSamplerName(*GVar);
1049
1050 const Constant *Initializer = nullptr;
1051 if (GVar->hasInitializer())
1052 Initializer = GVar->getInitializer();
1053 const ConstantInt *CI = nullptr;
1054 if (Initializer)
1055 CI = dyn_cast<ConstantInt>(Initializer);
1056 if (CI) {
1057 unsigned sample = CI->getZExtValue();
1058
1059 O << " = { ";
1060
1061 for (int i = 0,
1062 addr = ((sample & __CLK_ADDRESS_MASK) >> __CLK_ADDRESS_BASE);
1063 i < 3; i++) {
1064 O << "addr_mode_" << i << " = ";
1065 switch (addr) {
1066 case 0:
1067 O << "wrap";
1068 break;
1069 case 1:
1070 O << "clamp_to_border";
1071 break;
1072 case 2:
1073 O << "clamp_to_edge";
1074 break;
1075 case 3:
1076 O << "wrap";
1077 break;
1078 case 4:
1079 O << "mirror";
1080 break;
1081 }
1082 O << ", ";
1083 }
1084 O << "filter_mode = ";
1085 switch ((sample & __CLK_FILTER_MASK) >> __CLK_FILTER_BASE) {
1086 case 0:
1087 O << "nearest";
1088 break;
1089 case 1:
1090 O << "linear";
1091 break;
1092 case 2:
1093 llvm_unreachable("Anisotropic filtering is not supported");
1094 default:
1095 O << "nearest";
1096 break;
1097 }
1098 if (!((sample & __CLK_NORMALIZED_MASK) >> __CLK_NORMALIZED_BASE)) {
1099 O << ", force_unnormalized_coords = 1";
1100 }
1101 O << " }";
1102 }
1103
1104 O << ";\n";
1105 return;
1106 }
1107
1108 if (GVar->hasPrivateLinkage()) {
1109 if (strncmp(GVar->getName().data(), "unrollpragma", 12) == 0)
1110 return;
1111
1112 // FIXME - need better way (e.g. Metadata) to avoid generating this global
1113 if (strncmp(GVar->getName().data(), "filename", 8) == 0)
1114 return;
1115 if (GVar->use_empty())
1116 return;
1117 }
1118
1119 const Function *demotedFunc = nullptr;
1120 if (!processDemoted && canDemoteGlobalVar(GVar, demotedFunc)) {
1121 O << "// " << GVar->getName() << " has been demoted\n";
1122 if (localDecls.find(demotedFunc) != localDecls.end())
1123 localDecls[demotedFunc].push_back(GVar);
1124 else {
1125 std::vector<const GlobalVariable *> temp;
1126 temp.push_back(GVar);
1127 localDecls[demotedFunc] = temp;
1128 }
1129 return;
1130 }
1131
1132 O << ".";
1133 emitPTXAddressSpace(PTy->getAddressSpace(), O);
1134
1135 if (isManaged(*GVar)) {
1136 O << " .attribute(.managed)";
1137 }
1138
1139 if (GVar->getAlignment() == 0)
1140 O << " .align " << (int)DL.getPrefTypeAlignment(ETy);
1141 else
1142 O << " .align " << GVar->getAlignment();
1143
1144 if (ETy->isFloatingPointTy() || ETy->isPointerTy() ||
1145 (ETy->isIntegerTy() && ETy->getScalarSizeInBits() <= 64)) {
1146 O << " .";
1147 // Special case: ABI requires that we use .u8 for predicates
1148 if (ETy->isIntegerTy(1))
1149 O << "u8";
1150 else
1151 O << getPTXFundamentalTypeStr(ETy, false);
1152 O << " ";
1153 getSymbol(GVar)->print(O, MAI);
1154
1155 // Ptx allows variable initilization only for constant and global state
1156 // spaces.
1157 if (GVar->hasInitializer()) {
1158 if ((PTy->getAddressSpace() == ADDRESS_SPACE_GLOBAL) ||
1159 (PTy->getAddressSpace() == ADDRESS_SPACE_CONST)) {
1160 const Constant *Initializer = GVar->getInitializer();
1161 // 'undef' is treated as there is no value specified.
1162 if (!Initializer->isNullValue() && !isa<UndefValue>(Initializer)) {
1163 O << " = ";
1164 printScalarConstant(Initializer, O);
1165 }
1166 } else {
1167 // The frontend adds zero-initializer to device and constant variables
1168 // that don't have an initial value, and UndefValue to shared
1169 // variables, so skip warning for this case.
1170 if (!GVar->getInitializer()->isNullValue() &&
1171 !isa<UndefValue>(GVar->getInitializer())) {
1172 report_fatal_error("initial value of '" + GVar->getName() +
1173 "' is not allowed in addrspace(" +
1174 Twine(PTy->getAddressSpace()) + ")");
1175 }
1176 }
1177 }
1178 } else {
1179 unsigned int ElementSize = 0;
1180
1181 // Although PTX has direct support for struct type and array type and
1182 // LLVM IR is very similar to PTX, the LLVM CodeGen does not support for
1183 // targets that support these high level field accesses. Structs, arrays
1184 // and vectors are lowered into arrays of bytes.
1185 switch (ETy->getTypeID()) {
1186 case Type::IntegerTyID: // Integers larger than 64 bits
1187 case Type::StructTyID:
1188 case Type::ArrayTyID:
1189 case Type::VectorTyID:
1190 ElementSize = DL.getTypeStoreSize(ETy);
1191 // Ptx allows variable initilization only for constant and
1192 // global state spaces.
1193 if (((PTy->getAddressSpace() == ADDRESS_SPACE_GLOBAL) ||
1194 (PTy->getAddressSpace() == ADDRESS_SPACE_CONST)) &&
1195 GVar->hasInitializer()) {
1196 const Constant *Initializer = GVar->getInitializer();
1197 if (!isa<UndefValue>(Initializer) && !Initializer->isNullValue()) {
1198 AggBuffer aggBuffer(ElementSize, O, *this);
1199 bufferAggregateConstant(Initializer, &aggBuffer);
1200 if (aggBuffer.numSymbols) {
1201 if (static_cast<const NVPTXTargetMachine &>(TM).is64Bit()) {
1202 O << " .u64 ";
1203 getSymbol(GVar)->print(O, MAI);
1204 O << "[";
1205 O << ElementSize / 8;
1206 } else {
1207 O << " .u32 ";
1208 getSymbol(GVar)->print(O, MAI);
1209 O << "[";
1210 O << ElementSize / 4;
1211 }
1212 O << "]";
1213 } else {
1214 O << " .b8 ";
1215 getSymbol(GVar)->print(O, MAI);
1216 O << "[";
1217 O << ElementSize;
1218 O << "]";
1219 }
1220 O << " = {";
1221 aggBuffer.print();
1222 O << "}";
1223 } else {
1224 O << " .b8 ";
1225 getSymbol(GVar)->print(O, MAI);
1226 if (ElementSize) {
1227 O << "[";
1228 O << ElementSize;
1229 O << "]";
1230 }
1231 }
1232 } else {
1233 O << " .b8 ";
1234 getSymbol(GVar)->print(O, MAI);
1235 if (ElementSize) {
1236 O << "[";
1237 O << ElementSize;
1238 O << "]";
1239 }
1240 }
1241 break;
1242 default:
1243 llvm_unreachable("type not supported yet");
1244 }
1245 }
1246 O << ";\n";
1247 }
1248
emitDemotedVars(const Function * f,raw_ostream & O)1249 void NVPTXAsmPrinter::emitDemotedVars(const Function *f, raw_ostream &O) {
1250 if (localDecls.find(f) == localDecls.end())
1251 return;
1252
1253 std::vector<const GlobalVariable *> &gvars = localDecls[f];
1254
1255 for (unsigned i = 0, e = gvars.size(); i != e; ++i) {
1256 O << "\t// demoted variable\n\t";
1257 printModuleLevelGV(gvars[i], O, true);
1258 }
1259 }
1260
emitPTXAddressSpace(unsigned int AddressSpace,raw_ostream & O) const1261 void NVPTXAsmPrinter::emitPTXAddressSpace(unsigned int AddressSpace,
1262 raw_ostream &O) const {
1263 switch (AddressSpace) {
1264 case ADDRESS_SPACE_LOCAL:
1265 O << "local";
1266 break;
1267 case ADDRESS_SPACE_GLOBAL:
1268 O << "global";
1269 break;
1270 case ADDRESS_SPACE_CONST:
1271 O << "const";
1272 break;
1273 case ADDRESS_SPACE_SHARED:
1274 O << "shared";
1275 break;
1276 default:
1277 report_fatal_error("Bad address space found while emitting PTX: " +
1278 llvm::Twine(AddressSpace));
1279 break;
1280 }
1281 }
1282
1283 std::string
getPTXFundamentalTypeStr(Type * Ty,bool useB4PTR) const1284 NVPTXAsmPrinter::getPTXFundamentalTypeStr(Type *Ty, bool useB4PTR) const {
1285 switch (Ty->getTypeID()) {
1286 default:
1287 llvm_unreachable("unexpected type");
1288 break;
1289 case Type::IntegerTyID: {
1290 unsigned NumBits = cast<IntegerType>(Ty)->getBitWidth();
1291 if (NumBits == 1)
1292 return "pred";
1293 else if (NumBits <= 64) {
1294 std::string name = "u";
1295 return name + utostr(NumBits);
1296 } else {
1297 llvm_unreachable("Integer too large");
1298 break;
1299 }
1300 break;
1301 }
1302 case Type::HalfTyID:
1303 // fp16 is stored as .b16 for compatibility with pre-sm_53 PTX assembly.
1304 return "b16";
1305 case Type::FloatTyID:
1306 return "f32";
1307 case Type::DoubleTyID:
1308 return "f64";
1309 case Type::PointerTyID:
1310 if (static_cast<const NVPTXTargetMachine &>(TM).is64Bit())
1311 if (useB4PTR)
1312 return "b64";
1313 else
1314 return "u64";
1315 else if (useB4PTR)
1316 return "b32";
1317 else
1318 return "u32";
1319 }
1320 llvm_unreachable("unexpected type");
1321 return nullptr;
1322 }
1323
emitPTXGlobalVariable(const GlobalVariable * GVar,raw_ostream & O)1324 void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable *GVar,
1325 raw_ostream &O) {
1326 const DataLayout &DL = getDataLayout();
1327
1328 // GlobalVariables are always constant pointers themselves.
1329 Type *ETy = GVar->getValueType();
1330
1331 O << ".";
1332 emitPTXAddressSpace(GVar->getType()->getAddressSpace(), O);
1333 if (GVar->getAlignment() == 0)
1334 O << " .align " << (int)DL.getPrefTypeAlignment(ETy);
1335 else
1336 O << " .align " << GVar->getAlignment();
1337
1338 // Special case for i128
1339 if (ETy->isIntegerTy(128)) {
1340 O << " .b8 ";
1341 getSymbol(GVar)->print(O, MAI);
1342 O << "[16]";
1343 return;
1344 }
1345
1346 if (ETy->isFloatingPointTy() || ETy->isIntOrPtrTy()) {
1347 O << " .";
1348 O << getPTXFundamentalTypeStr(ETy);
1349 O << " ";
1350 getSymbol(GVar)->print(O, MAI);
1351 return;
1352 }
1353
1354 int64_t ElementSize = 0;
1355
1356 // Although PTX has direct support for struct type and array type and LLVM IR
1357 // is very similar to PTX, the LLVM CodeGen does not support for targets that
1358 // support these high level field accesses. Structs and arrays are lowered
1359 // into arrays of bytes.
1360 switch (ETy->getTypeID()) {
1361 case Type::StructTyID:
1362 case Type::ArrayTyID:
1363 case Type::VectorTyID:
1364 ElementSize = DL.getTypeStoreSize(ETy);
1365 O << " .b8 ";
1366 getSymbol(GVar)->print(O, MAI);
1367 O << "[";
1368 if (ElementSize) {
1369 O << ElementSize;
1370 }
1371 O << "]";
1372 break;
1373 default:
1374 llvm_unreachable("type not supported yet");
1375 }
1376 }
1377
getOpenCLAlignment(const DataLayout & DL,Type * Ty)1378 static unsigned int getOpenCLAlignment(const DataLayout &DL, Type *Ty) {
1379 if (Ty->isSingleValueType())
1380 return DL.getPrefTypeAlignment(Ty);
1381
1382 auto *ATy = dyn_cast<ArrayType>(Ty);
1383 if (ATy)
1384 return getOpenCLAlignment(DL, ATy->getElementType());
1385
1386 auto *STy = dyn_cast<StructType>(Ty);
1387 if (STy) {
1388 unsigned int alignStruct = 1;
1389 // Go through each element of the struct and find the
1390 // largest alignment.
1391 for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1392 Type *ETy = STy->getElementType(i);
1393 unsigned int align = getOpenCLAlignment(DL, ETy);
1394 if (align > alignStruct)
1395 alignStruct = align;
1396 }
1397 return alignStruct;
1398 }
1399
1400 auto *FTy = dyn_cast<FunctionType>(Ty);
1401 if (FTy)
1402 return DL.getPointerPrefAlignment();
1403 return DL.getPrefTypeAlignment(Ty);
1404 }
1405
printParamName(Function::const_arg_iterator I,int paramIndex,raw_ostream & O)1406 void NVPTXAsmPrinter::printParamName(Function::const_arg_iterator I,
1407 int paramIndex, raw_ostream &O) {
1408 getSymbol(I->getParent())->print(O, MAI);
1409 O << "_param_" << paramIndex;
1410 }
1411
emitFunctionParamList(const Function * F,raw_ostream & O)1412 void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) {
1413 const DataLayout &DL = getDataLayout();
1414 const AttributeList &PAL = F->getAttributes();
1415 const TargetLowering *TLI = nvptxSubtarget->getTargetLowering();
1416 Function::const_arg_iterator I, E;
1417 unsigned paramIndex = 0;
1418 bool first = true;
1419 bool isKernelFunc = isKernelFunction(*F);
1420 bool isABI = (nvptxSubtarget->getSmVersion() >= 20);
1421 MVT thePointerTy = TLI->getPointerTy(DL);
1422
1423 if (F->arg_empty()) {
1424 O << "()\n";
1425 return;
1426 }
1427
1428 O << "(\n";
1429
1430 for (I = F->arg_begin(), E = F->arg_end(); I != E; ++I, paramIndex++) {
1431 Type *Ty = I->getType();
1432
1433 if (!first)
1434 O << ",\n";
1435
1436 first = false;
1437
1438 // Handle image/sampler parameters
1439 if (isKernelFunction(*F)) {
1440 if (isSampler(*I) || isImage(*I)) {
1441 if (isImage(*I)) {
1442 std::string sname = I->getName();
1443 if (isImageWriteOnly(*I) || isImageReadWrite(*I)) {
1444 if (nvptxSubtarget->hasImageHandles())
1445 O << "\t.param .u64 .ptr .surfref ";
1446 else
1447 O << "\t.param .surfref ";
1448 CurrentFnSym->print(O, MAI);
1449 O << "_param_" << paramIndex;
1450 }
1451 else { // Default image is read_only
1452 if (nvptxSubtarget->hasImageHandles())
1453 O << "\t.param .u64 .ptr .texref ";
1454 else
1455 O << "\t.param .texref ";
1456 CurrentFnSym->print(O, MAI);
1457 O << "_param_" << paramIndex;
1458 }
1459 } else {
1460 if (nvptxSubtarget->hasImageHandles())
1461 O << "\t.param .u64 .ptr .samplerref ";
1462 else
1463 O << "\t.param .samplerref ";
1464 CurrentFnSym->print(O, MAI);
1465 O << "_param_" << paramIndex;
1466 }
1467 continue;
1468 }
1469 }
1470
1471 if (!PAL.hasParamAttribute(paramIndex, Attribute::ByVal)) {
1472 if (Ty->isAggregateType() || Ty->isVectorTy() || Ty->isIntegerTy(128)) {
1473 // Just print .param .align <a> .b8 .param[size];
1474 // <a> = PAL.getparamalignment
1475 // size = typeallocsize of element type
1476 unsigned align = PAL.getParamAlignment(paramIndex);
1477 if (align == 0)
1478 align = DL.getABITypeAlignment(Ty);
1479
1480 unsigned sz = DL.getTypeAllocSize(Ty);
1481 O << "\t.param .align " << align << " .b8 ";
1482 printParamName(I, paramIndex, O);
1483 O << "[" << sz << "]";
1484
1485 continue;
1486 }
1487 // Just a scalar
1488 auto *PTy = dyn_cast<PointerType>(Ty);
1489 if (isKernelFunc) {
1490 if (PTy) {
1491 // Special handling for pointer arguments to kernel
1492 O << "\t.param .u" << thePointerTy.getSizeInBits() << " ";
1493
1494 if (static_cast<NVPTXTargetMachine &>(TM).getDrvInterface() !=
1495 NVPTX::CUDA) {
1496 Type *ETy = PTy->getElementType();
1497 int addrSpace = PTy->getAddressSpace();
1498 switch (addrSpace) {
1499 default:
1500 O << ".ptr ";
1501 break;
1502 case ADDRESS_SPACE_CONST:
1503 O << ".ptr .const ";
1504 break;
1505 case ADDRESS_SPACE_SHARED:
1506 O << ".ptr .shared ";
1507 break;
1508 case ADDRESS_SPACE_GLOBAL:
1509 O << ".ptr .global ";
1510 break;
1511 }
1512 O << ".align " << (int)getOpenCLAlignment(DL, ETy) << " ";
1513 }
1514 printParamName(I, paramIndex, O);
1515 continue;
1516 }
1517
1518 // non-pointer scalar to kernel func
1519 O << "\t.param .";
1520 // Special case: predicate operands become .u8 types
1521 if (Ty->isIntegerTy(1))
1522 O << "u8";
1523 else
1524 O << getPTXFundamentalTypeStr(Ty);
1525 O << " ";
1526 printParamName(I, paramIndex, O);
1527 continue;
1528 }
1529 // Non-kernel function, just print .param .b<size> for ABI
1530 // and .reg .b<size> for non-ABI
1531 unsigned sz = 0;
1532 if (isa<IntegerType>(Ty)) {
1533 sz = cast<IntegerType>(Ty)->getBitWidth();
1534 if (sz < 32)
1535 sz = 32;
1536 } else if (isa<PointerType>(Ty))
1537 sz = thePointerTy.getSizeInBits();
1538 else if (Ty->isHalfTy())
1539 // PTX ABI requires all scalar parameters to be at least 32
1540 // bits in size. fp16 normally uses .b16 as its storage type
1541 // in PTX, so its size must be adjusted here, too.
1542 sz = 32;
1543 else
1544 sz = Ty->getPrimitiveSizeInBits();
1545 if (isABI)
1546 O << "\t.param .b" << sz << " ";
1547 else
1548 O << "\t.reg .b" << sz << " ";
1549 printParamName(I, paramIndex, O);
1550 continue;
1551 }
1552
1553 // param has byVal attribute. So should be a pointer
1554 auto *PTy = dyn_cast<PointerType>(Ty);
1555 assert(PTy && "Param with byval attribute should be a pointer type");
1556 Type *ETy = PTy->getElementType();
1557
1558 if (isABI || isKernelFunc) {
1559 // Just print .param .align <a> .b8 .param[size];
1560 // <a> = PAL.getparamalignment
1561 // size = typeallocsize of element type
1562 unsigned align = PAL.getParamAlignment(paramIndex);
1563 if (align == 0)
1564 align = DL.getABITypeAlignment(ETy);
1565 // Work around a bug in ptxas. When PTX code takes address of
1566 // byval parameter with alignment < 4, ptxas generates code to
1567 // spill argument into memory. Alas on sm_50+ ptxas generates
1568 // SASS code that fails with misaligned access. To work around
1569 // the problem, make sure that we align byval parameters by at
1570 // least 4. Matching change must be made in LowerCall() where we
1571 // prepare parameters for the call.
1572 //
1573 // TODO: this will need to be undone when we get to support multi-TU
1574 // device-side compilation as it breaks ABI compatibility with nvcc.
1575 // Hopefully ptxas bug is fixed by then.
1576 if (!isKernelFunc && align < 4)
1577 align = 4;
1578 unsigned sz = DL.getTypeAllocSize(ETy);
1579 O << "\t.param .align " << align << " .b8 ";
1580 printParamName(I, paramIndex, O);
1581 O << "[" << sz << "]";
1582 continue;
1583 } else {
1584 // Split the ETy into constituent parts and
1585 // print .param .b<size> <name> for each part.
1586 // Further, if a part is vector, print the above for
1587 // each vector element.
1588 SmallVector<EVT, 16> vtparts;
1589 ComputeValueVTs(*TLI, DL, ETy, vtparts);
1590 for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
1591 unsigned elems = 1;
1592 EVT elemtype = vtparts[i];
1593 if (vtparts[i].isVector()) {
1594 elems = vtparts[i].getVectorNumElements();
1595 elemtype = vtparts[i].getVectorElementType();
1596 }
1597
1598 for (unsigned j = 0, je = elems; j != je; ++j) {
1599 unsigned sz = elemtype.getSizeInBits();
1600 if (elemtype.isInteger() && (sz < 32))
1601 sz = 32;
1602 O << "\t.reg .b" << sz << " ";
1603 printParamName(I, paramIndex, O);
1604 if (j < je - 1)
1605 O << ",\n";
1606 ++paramIndex;
1607 }
1608 if (i < e - 1)
1609 O << ",\n";
1610 }
1611 --paramIndex;
1612 continue;
1613 }
1614 }
1615
1616 O << "\n)\n";
1617 }
1618
emitFunctionParamList(const MachineFunction & MF,raw_ostream & O)1619 void NVPTXAsmPrinter::emitFunctionParamList(const MachineFunction &MF,
1620 raw_ostream &O) {
1621 const Function &F = MF.getFunction();
1622 emitFunctionParamList(&F, O);
1623 }
1624
setAndEmitFunctionVirtualRegisters(const MachineFunction & MF)1625 void NVPTXAsmPrinter::setAndEmitFunctionVirtualRegisters(
1626 const MachineFunction &MF) {
1627 SmallString<128> Str;
1628 raw_svector_ostream O(Str);
1629
1630 // Map the global virtual register number to a register class specific
1631 // virtual register number starting from 1 with that class.
1632 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1633 //unsigned numRegClasses = TRI->getNumRegClasses();
1634
1635 // Emit the Fake Stack Object
1636 const MachineFrameInfo &MFI = MF.getFrameInfo();
1637 int NumBytes = (int) MFI.getStackSize();
1638 if (NumBytes) {
1639 O << "\t.local .align " << MFI.getMaxAlignment() << " .b8 \t" << DEPOTNAME
1640 << getFunctionNumber() << "[" << NumBytes << "];\n";
1641 if (static_cast<const NVPTXTargetMachine &>(MF.getTarget()).is64Bit()) {
1642 O << "\t.reg .b64 \t%SP;\n";
1643 O << "\t.reg .b64 \t%SPL;\n";
1644 } else {
1645 O << "\t.reg .b32 \t%SP;\n";
1646 O << "\t.reg .b32 \t%SPL;\n";
1647 }
1648 }
1649
1650 // Go through all virtual registers to establish the mapping between the
1651 // global virtual
1652 // register number and the per class virtual register number.
1653 // We use the per class virtual register number in the ptx output.
1654 unsigned int numVRs = MRI->getNumVirtRegs();
1655 for (unsigned i = 0; i < numVRs; i++) {
1656 unsigned int vr = TRI->index2VirtReg(i);
1657 const TargetRegisterClass *RC = MRI->getRegClass(vr);
1658 DenseMap<unsigned, unsigned> ®map = VRegMapping[RC];
1659 int n = regmap.size();
1660 regmap.insert(std::make_pair(vr, n + 1));
1661 }
1662
1663 // Emit register declarations
1664 // @TODO: Extract out the real register usage
1665 // O << "\t.reg .pred %p<" << NVPTXNumRegisters << ">;\n";
1666 // O << "\t.reg .s16 %rc<" << NVPTXNumRegisters << ">;\n";
1667 // O << "\t.reg .s16 %rs<" << NVPTXNumRegisters << ">;\n";
1668 // O << "\t.reg .s32 %r<" << NVPTXNumRegisters << ">;\n";
1669 // O << "\t.reg .s64 %rd<" << NVPTXNumRegisters << ">;\n";
1670 // O << "\t.reg .f32 %f<" << NVPTXNumRegisters << ">;\n";
1671 // O << "\t.reg .f64 %fd<" << NVPTXNumRegisters << ">;\n";
1672
1673 // Emit declaration of the virtual registers or 'physical' registers for
1674 // each register class
1675 for (unsigned i=0; i< TRI->getNumRegClasses(); i++) {
1676 const TargetRegisterClass *RC = TRI->getRegClass(i);
1677 DenseMap<unsigned, unsigned> ®map = VRegMapping[RC];
1678 std::string rcname = getNVPTXRegClassName(RC);
1679 std::string rcStr = getNVPTXRegClassStr(RC);
1680 int n = regmap.size();
1681
1682 // Only declare those registers that may be used.
1683 if (n) {
1684 O << "\t.reg " << rcname << " \t" << rcStr << "<" << (n+1)
1685 << ">;\n";
1686 }
1687 }
1688
1689 OutStreamer->EmitRawText(O.str());
1690 }
1691
printFPConstant(const ConstantFP * Fp,raw_ostream & O)1692 void NVPTXAsmPrinter::printFPConstant(const ConstantFP *Fp, raw_ostream &O) {
1693 APFloat APF = APFloat(Fp->getValueAPF()); // make a copy
1694 bool ignored;
1695 unsigned int numHex;
1696 const char *lead;
1697
1698 if (Fp->getType()->getTypeID() == Type::FloatTyID) {
1699 numHex = 8;
1700 lead = "0f";
1701 APF.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, &ignored);
1702 } else if (Fp->getType()->getTypeID() == Type::DoubleTyID) {
1703 numHex = 16;
1704 lead = "0d";
1705 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &ignored);
1706 } else
1707 llvm_unreachable("unsupported fp type");
1708
1709 APInt API = APF.bitcastToAPInt();
1710 O << lead << format_hex_no_prefix(API.getZExtValue(), numHex, /*Upper=*/true);
1711 }
1712
printScalarConstant(const Constant * CPV,raw_ostream & O)1713 void NVPTXAsmPrinter::printScalarConstant(const Constant *CPV, raw_ostream &O) {
1714 if (const ConstantInt *CI = dyn_cast<ConstantInt>(CPV)) {
1715 O << CI->getValue();
1716 return;
1717 }
1718 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CPV)) {
1719 printFPConstant(CFP, O);
1720 return;
1721 }
1722 if (isa<ConstantPointerNull>(CPV)) {
1723 O << "0";
1724 return;
1725 }
1726 if (const GlobalValue *GVar = dyn_cast<GlobalValue>(CPV)) {
1727 bool IsNonGenericPointer = false;
1728 if (GVar->getType()->getAddressSpace() != 0) {
1729 IsNonGenericPointer = true;
1730 }
1731 if (EmitGeneric && !isa<Function>(CPV) && !IsNonGenericPointer) {
1732 O << "generic(";
1733 getSymbol(GVar)->print(O, MAI);
1734 O << ")";
1735 } else {
1736 getSymbol(GVar)->print(O, MAI);
1737 }
1738 return;
1739 }
1740 if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
1741 const Value *v = Cexpr->stripPointerCasts();
1742 PointerType *PTy = dyn_cast<PointerType>(Cexpr->getType());
1743 bool IsNonGenericPointer = false;
1744 if (PTy && PTy->getAddressSpace() != 0) {
1745 IsNonGenericPointer = true;
1746 }
1747 if (const GlobalValue *GVar = dyn_cast<GlobalValue>(v)) {
1748 if (EmitGeneric && !isa<Function>(v) && !IsNonGenericPointer) {
1749 O << "generic(";
1750 getSymbol(GVar)->print(O, MAI);
1751 O << ")";
1752 } else {
1753 getSymbol(GVar)->print(O, MAI);
1754 }
1755 return;
1756 } else {
1757 lowerConstant(CPV)->print(O, MAI);
1758 return;
1759 }
1760 }
1761 llvm_unreachable("Not scalar type found in printScalarConstant()");
1762 }
1763
1764 // These utility functions assure we get the right sequence of bytes for a given
1765 // type even for big-endian machines
ConvertIntToBytes(unsigned char * p,T val)1766 template <typename T> static void ConvertIntToBytes(unsigned char *p, T val) {
1767 int64_t vp = (int64_t)val;
1768 for (unsigned i = 0; i < sizeof(T); ++i) {
1769 p[i] = (unsigned char)vp;
1770 vp >>= 8;
1771 }
1772 }
ConvertFloatToBytes(unsigned char * p,float val)1773 static void ConvertFloatToBytes(unsigned char *p, float val) {
1774 int32_t *vp = (int32_t *)&val;
1775 for (unsigned i = 0; i < sizeof(int32_t); ++i) {
1776 p[i] = (unsigned char)*vp;
1777 *vp >>= 8;
1778 }
1779 }
ConvertDoubleToBytes(unsigned char * p,double val)1780 static void ConvertDoubleToBytes(unsigned char *p, double val) {
1781 int64_t *vp = (int64_t *)&val;
1782 for (unsigned i = 0; i < sizeof(int64_t); ++i) {
1783 p[i] = (unsigned char)*vp;
1784 *vp >>= 8;
1785 }
1786 }
1787
bufferLEByte(const Constant * CPV,int Bytes,AggBuffer * aggBuffer)1788 void NVPTXAsmPrinter::bufferLEByte(const Constant *CPV, int Bytes,
1789 AggBuffer *aggBuffer) {
1790 const DataLayout &DL = getDataLayout();
1791
1792 if (isa<UndefValue>(CPV) || CPV->isNullValue()) {
1793 int s = DL.getTypeAllocSize(CPV->getType());
1794 if (s < Bytes)
1795 s = Bytes;
1796 aggBuffer->addZeros(s);
1797 return;
1798 }
1799
1800 unsigned char ptr[8];
1801 switch (CPV->getType()->getTypeID()) {
1802
1803 case Type::IntegerTyID: {
1804 Type *ETy = CPV->getType();
1805 if (ETy == Type::getInt8Ty(CPV->getContext())) {
1806 unsigned char c = (unsigned char)cast<ConstantInt>(CPV)->getZExtValue();
1807 ConvertIntToBytes<>(ptr, c);
1808 aggBuffer->addBytes(ptr, 1, Bytes);
1809 } else if (ETy == Type::getInt16Ty(CPV->getContext())) {
1810 short int16 = (short)cast<ConstantInt>(CPV)->getZExtValue();
1811 ConvertIntToBytes<>(ptr, int16);
1812 aggBuffer->addBytes(ptr, 2, Bytes);
1813 } else if (ETy == Type::getInt32Ty(CPV->getContext())) {
1814 if (const ConstantInt *constInt = dyn_cast<ConstantInt>(CPV)) {
1815 int int32 = (int)(constInt->getZExtValue());
1816 ConvertIntToBytes<>(ptr, int32);
1817 aggBuffer->addBytes(ptr, 4, Bytes);
1818 break;
1819 } else if (const auto *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
1820 if (const auto *constInt = dyn_cast_or_null<ConstantInt>(
1821 ConstantFoldConstant(Cexpr, DL))) {
1822 int int32 = (int)(constInt->getZExtValue());
1823 ConvertIntToBytes<>(ptr, int32);
1824 aggBuffer->addBytes(ptr, 4, Bytes);
1825 break;
1826 }
1827 if (Cexpr->getOpcode() == Instruction::PtrToInt) {
1828 Value *v = Cexpr->getOperand(0)->stripPointerCasts();
1829 aggBuffer->addSymbol(v, Cexpr->getOperand(0));
1830 aggBuffer->addZeros(4);
1831 break;
1832 }
1833 }
1834 llvm_unreachable("unsupported integer const type");
1835 } else if (ETy == Type::getInt64Ty(CPV->getContext())) {
1836 if (const ConstantInt *constInt = dyn_cast<ConstantInt>(CPV)) {
1837 long long int64 = (long long)(constInt->getZExtValue());
1838 ConvertIntToBytes<>(ptr, int64);
1839 aggBuffer->addBytes(ptr, 8, Bytes);
1840 break;
1841 } else if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
1842 if (const auto *constInt = dyn_cast_or_null<ConstantInt>(
1843 ConstantFoldConstant(Cexpr, DL))) {
1844 long long int64 = (long long)(constInt->getZExtValue());
1845 ConvertIntToBytes<>(ptr, int64);
1846 aggBuffer->addBytes(ptr, 8, Bytes);
1847 break;
1848 }
1849 if (Cexpr->getOpcode() == Instruction::PtrToInt) {
1850 Value *v = Cexpr->getOperand(0)->stripPointerCasts();
1851 aggBuffer->addSymbol(v, Cexpr->getOperand(0));
1852 aggBuffer->addZeros(8);
1853 break;
1854 }
1855 }
1856 llvm_unreachable("unsupported integer const type");
1857 } else
1858 llvm_unreachable("unsupported integer const type");
1859 break;
1860 }
1861 case Type::HalfTyID:
1862 case Type::FloatTyID:
1863 case Type::DoubleTyID: {
1864 const ConstantFP *CFP = dyn_cast<ConstantFP>(CPV);
1865 Type *Ty = CFP->getType();
1866 if (Ty == Type::getHalfTy(CPV->getContext())) {
1867 APInt API = CFP->getValueAPF().bitcastToAPInt();
1868 uint16_t float16 = API.getLoBits(16).getZExtValue();
1869 ConvertIntToBytes<>(ptr, float16);
1870 aggBuffer->addBytes(ptr, 2, Bytes);
1871 } else if (Ty == Type::getFloatTy(CPV->getContext())) {
1872 float float32 = (float) CFP->getValueAPF().convertToFloat();
1873 ConvertFloatToBytes(ptr, float32);
1874 aggBuffer->addBytes(ptr, 4, Bytes);
1875 } else if (Ty == Type::getDoubleTy(CPV->getContext())) {
1876 double float64 = CFP->getValueAPF().convertToDouble();
1877 ConvertDoubleToBytes(ptr, float64);
1878 aggBuffer->addBytes(ptr, 8, Bytes);
1879 } else {
1880 llvm_unreachable("unsupported fp const type");
1881 }
1882 break;
1883 }
1884 case Type::PointerTyID: {
1885 if (const GlobalValue *GVar = dyn_cast<GlobalValue>(CPV)) {
1886 aggBuffer->addSymbol(GVar, GVar);
1887 } else if (const ConstantExpr *Cexpr = dyn_cast<ConstantExpr>(CPV)) {
1888 const Value *v = Cexpr->stripPointerCasts();
1889 aggBuffer->addSymbol(v, Cexpr);
1890 }
1891 unsigned int s = DL.getTypeAllocSize(CPV->getType());
1892 aggBuffer->addZeros(s);
1893 break;
1894 }
1895
1896 case Type::ArrayTyID:
1897 case Type::VectorTyID:
1898 case Type::StructTyID: {
1899 if (isa<ConstantAggregate>(CPV) || isa<ConstantDataSequential>(CPV)) {
1900 int ElementSize = DL.getTypeAllocSize(CPV->getType());
1901 bufferAggregateConstant(CPV, aggBuffer);
1902 if (Bytes > ElementSize)
1903 aggBuffer->addZeros(Bytes - ElementSize);
1904 } else if (isa<ConstantAggregateZero>(CPV))
1905 aggBuffer->addZeros(Bytes);
1906 else
1907 llvm_unreachable("Unexpected Constant type");
1908 break;
1909 }
1910
1911 default:
1912 llvm_unreachable("unsupported type");
1913 }
1914 }
1915
bufferAggregateConstant(const Constant * CPV,AggBuffer * aggBuffer)1916 void NVPTXAsmPrinter::bufferAggregateConstant(const Constant *CPV,
1917 AggBuffer *aggBuffer) {
1918 const DataLayout &DL = getDataLayout();
1919 int Bytes;
1920
1921 // Integers of arbitrary width
1922 if (const ConstantInt *CI = dyn_cast<ConstantInt>(CPV)) {
1923 APInt Val = CI->getValue();
1924 for (unsigned I = 0, E = DL.getTypeAllocSize(CPV->getType()); I < E; ++I) {
1925 uint8_t Byte = Val.getLoBits(8).getZExtValue();
1926 aggBuffer->addBytes(&Byte, 1, 1);
1927 Val.lshrInPlace(8);
1928 }
1929 return;
1930 }
1931
1932 // Old constants
1933 if (isa<ConstantArray>(CPV) || isa<ConstantVector>(CPV)) {
1934 if (CPV->getNumOperands())
1935 for (unsigned i = 0, e = CPV->getNumOperands(); i != e; ++i)
1936 bufferLEByte(cast<Constant>(CPV->getOperand(i)), 0, aggBuffer);
1937 return;
1938 }
1939
1940 if (const ConstantDataSequential *CDS =
1941 dyn_cast<ConstantDataSequential>(CPV)) {
1942 if (CDS->getNumElements())
1943 for (unsigned i = 0; i < CDS->getNumElements(); ++i)
1944 bufferLEByte(cast<Constant>(CDS->getElementAsConstant(i)), 0,
1945 aggBuffer);
1946 return;
1947 }
1948
1949 if (isa<ConstantStruct>(CPV)) {
1950 if (CPV->getNumOperands()) {
1951 StructType *ST = cast<StructType>(CPV->getType());
1952 for (unsigned i = 0, e = CPV->getNumOperands(); i != e; ++i) {
1953 if (i == (e - 1))
1954 Bytes = DL.getStructLayout(ST)->getElementOffset(0) +
1955 DL.getTypeAllocSize(ST) -
1956 DL.getStructLayout(ST)->getElementOffset(i);
1957 else
1958 Bytes = DL.getStructLayout(ST)->getElementOffset(i + 1) -
1959 DL.getStructLayout(ST)->getElementOffset(i);
1960 bufferLEByte(cast<Constant>(CPV->getOperand(i)), Bytes, aggBuffer);
1961 }
1962 }
1963 return;
1964 }
1965 llvm_unreachable("unsupported constant type in printAggregateConstant()");
1966 }
1967
1968 /// lowerConstantForGV - Return an MCExpr for the given Constant. This is mostly
1969 /// a copy from AsmPrinter::lowerConstant, except customized to only handle
1970 /// expressions that are representable in PTX and create
1971 /// NVPTXGenericMCSymbolRefExpr nodes for addrspacecast instructions.
1972 const MCExpr *
lowerConstantForGV(const Constant * CV,bool ProcessingGeneric)1973 NVPTXAsmPrinter::lowerConstantForGV(const Constant *CV, bool ProcessingGeneric) {
1974 MCContext &Ctx = OutContext;
1975
1976 if (CV->isNullValue() || isa<UndefValue>(CV))
1977 return MCConstantExpr::create(0, Ctx);
1978
1979 if (const ConstantInt *CI = dyn_cast<ConstantInt>(CV))
1980 return MCConstantExpr::create(CI->getZExtValue(), Ctx);
1981
1982 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
1983 const MCSymbolRefExpr *Expr =
1984 MCSymbolRefExpr::create(getSymbol(GV), Ctx);
1985 if (ProcessingGeneric) {
1986 return NVPTXGenericMCSymbolRefExpr::create(Expr, Ctx);
1987 } else {
1988 return Expr;
1989 }
1990 }
1991
1992 const ConstantExpr *CE = dyn_cast<ConstantExpr>(CV);
1993 if (!CE) {
1994 llvm_unreachable("Unknown constant value to lower!");
1995 }
1996
1997 switch (CE->getOpcode()) {
1998 default:
1999 // If the code isn't optimized, there may be outstanding folding
2000 // opportunities. Attempt to fold the expression using DataLayout as a
2001 // last resort before giving up.
2002 if (Constant *C = ConstantFoldConstant(CE, getDataLayout()))
2003 if (C && C != CE)
2004 return lowerConstantForGV(C, ProcessingGeneric);
2005
2006 // Otherwise report the problem to the user.
2007 {
2008 std::string S;
2009 raw_string_ostream OS(S);
2010 OS << "Unsupported expression in static initializer: ";
2011 CE->printAsOperand(OS, /*PrintType=*/false,
2012 !MF ? nullptr : MF->getFunction().getParent());
2013 report_fatal_error(OS.str());
2014 }
2015
2016 case Instruction::AddrSpaceCast: {
2017 // Strip the addrspacecast and pass along the operand
2018 PointerType *DstTy = cast<PointerType>(CE->getType());
2019 if (DstTy->getAddressSpace() == 0) {
2020 return lowerConstantForGV(cast<const Constant>(CE->getOperand(0)), true);
2021 }
2022 std::string S;
2023 raw_string_ostream OS(S);
2024 OS << "Unsupported expression in static initializer: ";
2025 CE->printAsOperand(OS, /*PrintType=*/ false,
2026 !MF ? nullptr : MF->getFunction().getParent());
2027 report_fatal_error(OS.str());
2028 }
2029
2030 case Instruction::GetElementPtr: {
2031 const DataLayout &DL = getDataLayout();
2032
2033 // Generate a symbolic expression for the byte address
2034 APInt OffsetAI(DL.getPointerTypeSizeInBits(CE->getType()), 0);
2035 cast<GEPOperator>(CE)->accumulateConstantOffset(DL, OffsetAI);
2036
2037 const MCExpr *Base = lowerConstantForGV(CE->getOperand(0),
2038 ProcessingGeneric);
2039 if (!OffsetAI)
2040 return Base;
2041
2042 int64_t Offset = OffsetAI.getSExtValue();
2043 return MCBinaryExpr::createAdd(Base, MCConstantExpr::create(Offset, Ctx),
2044 Ctx);
2045 }
2046
2047 case Instruction::Trunc:
2048 // We emit the value and depend on the assembler to truncate the generated
2049 // expression properly. This is important for differences between
2050 // blockaddress labels. Since the two labels are in the same function, it
2051 // is reasonable to treat their delta as a 32-bit value.
2052 LLVM_FALLTHROUGH;
2053 case Instruction::BitCast:
2054 return lowerConstantForGV(CE->getOperand(0), ProcessingGeneric);
2055
2056 case Instruction::IntToPtr: {
2057 const DataLayout &DL = getDataLayout();
2058
2059 // Handle casts to pointers by changing them into casts to the appropriate
2060 // integer type. This promotes constant folding and simplifies this code.
2061 Constant *Op = CE->getOperand(0);
2062 Op = ConstantExpr::getIntegerCast(Op, DL.getIntPtrType(CV->getType()),
2063 false/*ZExt*/);
2064 return lowerConstantForGV(Op, ProcessingGeneric);
2065 }
2066
2067 case Instruction::PtrToInt: {
2068 const DataLayout &DL = getDataLayout();
2069
2070 // Support only foldable casts to/from pointers that can be eliminated by
2071 // changing the pointer to the appropriately sized integer type.
2072 Constant *Op = CE->getOperand(0);
2073 Type *Ty = CE->getType();
2074
2075 const MCExpr *OpExpr = lowerConstantForGV(Op, ProcessingGeneric);
2076
2077 // We can emit the pointer value into this slot if the slot is an
2078 // integer slot equal to the size of the pointer.
2079 if (DL.getTypeAllocSize(Ty) == DL.getTypeAllocSize(Op->getType()))
2080 return OpExpr;
2081
2082 // Otherwise the pointer is smaller than the resultant integer, mask off
2083 // the high bits so we are sure to get a proper truncation if the input is
2084 // a constant expr.
2085 unsigned InBits = DL.getTypeAllocSizeInBits(Op->getType());
2086 const MCExpr *MaskExpr = MCConstantExpr::create(~0ULL >> (64-InBits), Ctx);
2087 return MCBinaryExpr::createAnd(OpExpr, MaskExpr, Ctx);
2088 }
2089
2090 // The MC library also has a right-shift operator, but it isn't consistently
2091 // signed or unsigned between different targets.
2092 case Instruction::Add: {
2093 const MCExpr *LHS = lowerConstantForGV(CE->getOperand(0), ProcessingGeneric);
2094 const MCExpr *RHS = lowerConstantForGV(CE->getOperand(1), ProcessingGeneric);
2095 switch (CE->getOpcode()) {
2096 default: llvm_unreachable("Unknown binary operator constant cast expr");
2097 case Instruction::Add: return MCBinaryExpr::createAdd(LHS, RHS, Ctx);
2098 }
2099 }
2100 }
2101 }
2102
2103 // Copy of MCExpr::print customized for NVPTX
printMCExpr(const MCExpr & Expr,raw_ostream & OS)2104 void NVPTXAsmPrinter::printMCExpr(const MCExpr &Expr, raw_ostream &OS) {
2105 switch (Expr.getKind()) {
2106 case MCExpr::Target:
2107 return cast<MCTargetExpr>(&Expr)->printImpl(OS, MAI);
2108 case MCExpr::Constant:
2109 OS << cast<MCConstantExpr>(Expr).getValue();
2110 return;
2111
2112 case MCExpr::SymbolRef: {
2113 const MCSymbolRefExpr &SRE = cast<MCSymbolRefExpr>(Expr);
2114 const MCSymbol &Sym = SRE.getSymbol();
2115 Sym.print(OS, MAI);
2116 return;
2117 }
2118
2119 case MCExpr::Unary: {
2120 const MCUnaryExpr &UE = cast<MCUnaryExpr>(Expr);
2121 switch (UE.getOpcode()) {
2122 case MCUnaryExpr::LNot: OS << '!'; break;
2123 case MCUnaryExpr::Minus: OS << '-'; break;
2124 case MCUnaryExpr::Not: OS << '~'; break;
2125 case MCUnaryExpr::Plus: OS << '+'; break;
2126 }
2127 printMCExpr(*UE.getSubExpr(), OS);
2128 return;
2129 }
2130
2131 case MCExpr::Binary: {
2132 const MCBinaryExpr &BE = cast<MCBinaryExpr>(Expr);
2133
2134 // Only print parens around the LHS if it is non-trivial.
2135 if (isa<MCConstantExpr>(BE.getLHS()) || isa<MCSymbolRefExpr>(BE.getLHS()) ||
2136 isa<NVPTXGenericMCSymbolRefExpr>(BE.getLHS())) {
2137 printMCExpr(*BE.getLHS(), OS);
2138 } else {
2139 OS << '(';
2140 printMCExpr(*BE.getLHS(), OS);
2141 OS<< ')';
2142 }
2143
2144 switch (BE.getOpcode()) {
2145 case MCBinaryExpr::Add:
2146 // Print "X-42" instead of "X+-42".
2147 if (const MCConstantExpr *RHSC = dyn_cast<MCConstantExpr>(BE.getRHS())) {
2148 if (RHSC->getValue() < 0) {
2149 OS << RHSC->getValue();
2150 return;
2151 }
2152 }
2153
2154 OS << '+';
2155 break;
2156 default: llvm_unreachable("Unhandled binary operator");
2157 }
2158
2159 // Only print parens around the LHS if it is non-trivial.
2160 if (isa<MCConstantExpr>(BE.getRHS()) || isa<MCSymbolRefExpr>(BE.getRHS())) {
2161 printMCExpr(*BE.getRHS(), OS);
2162 } else {
2163 OS << '(';
2164 printMCExpr(*BE.getRHS(), OS);
2165 OS << ')';
2166 }
2167 return;
2168 }
2169 }
2170
2171 llvm_unreachable("Invalid expression kind!");
2172 }
2173
2174 /// PrintAsmOperand - Print out an operand for an inline asm expression.
2175 ///
PrintAsmOperand(const MachineInstr * MI,unsigned OpNo,unsigned AsmVariant,const char * ExtraCode,raw_ostream & O)2176 bool NVPTXAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
2177 unsigned AsmVariant,
2178 const char *ExtraCode, raw_ostream &O) {
2179 if (ExtraCode && ExtraCode[0]) {
2180 if (ExtraCode[1] != 0)
2181 return true; // Unknown modifier.
2182
2183 switch (ExtraCode[0]) {
2184 default:
2185 // See if this is a generic print operand
2186 return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
2187 case 'r':
2188 break;
2189 }
2190 }
2191
2192 printOperand(MI, OpNo, O);
2193
2194 return false;
2195 }
2196
PrintAsmMemoryOperand(const MachineInstr * MI,unsigned OpNo,unsigned AsmVariant,const char * ExtraCode,raw_ostream & O)2197 bool NVPTXAsmPrinter::PrintAsmMemoryOperand(
2198 const MachineInstr *MI, unsigned OpNo, unsigned AsmVariant,
2199 const char *ExtraCode, raw_ostream &O) {
2200 if (ExtraCode && ExtraCode[0])
2201 return true; // Unknown modifier
2202
2203 O << '[';
2204 printMemOperand(MI, OpNo, O);
2205 O << ']';
2206
2207 return false;
2208 }
2209
printOperand(const MachineInstr * MI,int opNum,raw_ostream & O,const char * Modifier)2210 void NVPTXAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
2211 raw_ostream &O, const char *Modifier) {
2212 const MachineOperand &MO = MI->getOperand(opNum);
2213 switch (MO.getType()) {
2214 case MachineOperand::MO_Register:
2215 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
2216 if (MO.getReg() == NVPTX::VRDepot)
2217 O << DEPOTNAME << getFunctionNumber();
2218 else
2219 O << NVPTXInstPrinter::getRegisterName(MO.getReg());
2220 } else {
2221 emitVirtualRegister(MO.getReg(), O);
2222 }
2223 return;
2224
2225 case MachineOperand::MO_Immediate:
2226 if (!Modifier)
2227 O << MO.getImm();
2228 else if (strstr(Modifier, "vec") == Modifier)
2229 printVecModifiedImmediate(MO, Modifier, O);
2230 else
2231 llvm_unreachable(
2232 "Don't know how to handle modifier on immediate operand");
2233 return;
2234
2235 case MachineOperand::MO_FPImmediate:
2236 printFPConstant(MO.getFPImm(), O);
2237 break;
2238
2239 case MachineOperand::MO_GlobalAddress:
2240 getSymbol(MO.getGlobal())->print(O, MAI);
2241 break;
2242
2243 case MachineOperand::MO_MachineBasicBlock:
2244 MO.getMBB()->getSymbol()->print(O, MAI);
2245 return;
2246
2247 default:
2248 llvm_unreachable("Operand type not supported.");
2249 }
2250 }
2251
printMemOperand(const MachineInstr * MI,int opNum,raw_ostream & O,const char * Modifier)2252 void NVPTXAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum,
2253 raw_ostream &O, const char *Modifier) {
2254 printOperand(MI, opNum, O);
2255
2256 if (Modifier && strcmp(Modifier, "add") == 0) {
2257 O << ", ";
2258 printOperand(MI, opNum + 1, O);
2259 } else {
2260 if (MI->getOperand(opNum + 1).isImm() &&
2261 MI->getOperand(opNum + 1).getImm() == 0)
2262 return; // don't print ',0' or '+0'
2263 O << "+";
2264 printOperand(MI, opNum + 1, O);
2265 }
2266 }
2267
2268 // Force static initialization.
LLVMInitializeNVPTXAsmPrinter()2269 extern "C" void LLVMInitializeNVPTXAsmPrinter() {
2270 RegisterAsmPrinter<NVPTXAsmPrinter> X(getTheNVPTXTarget32());
2271 RegisterAsmPrinter<NVPTXAsmPrinter> Y(getTheNVPTXTarget64());
2272 }
2273