1 //===-- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information--------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 #include "AMDGPUBaseInfo.h"
10 #include "AMDGPU.h"
11 #include "llvm/IR/LLVMContext.h"
12 #include "llvm/IR/Function.h"
13 #include "llvm/IR/GlobalValue.h"
14 #include "llvm/MC/MCContext.h"
15 #include "llvm/MC/MCSectionELF.h"
16 #include "llvm/MC/MCSubtargetInfo.h"
17 #include "llvm/MC/SubtargetFeature.h"
18 
19 #define GET_SUBTARGETINFO_ENUM
20 #include "AMDGPUGenSubtargetInfo.inc"
21 #undef GET_SUBTARGETINFO_ENUM
22 
23 #define GET_REGINFO_ENUM
24 #include "AMDGPUGenRegisterInfo.inc"
25 #undef GET_REGINFO_ENUM
26 
27 namespace llvm {
28 namespace AMDGPU {
29 
getIsaVersion(const FeatureBitset & Features)30 IsaVersion getIsaVersion(const FeatureBitset &Features) {
31 
32   if (Features.test(FeatureISAVersion7_0_0))
33     return {7, 0, 0};
34 
35   if (Features.test(FeatureISAVersion7_0_1))
36     return {7, 0, 1};
37 
38   if (Features.test(FeatureISAVersion8_0_0))
39     return {8, 0, 0};
40 
41   if (Features.test(FeatureISAVersion8_0_1))
42     return {8, 0, 1};
43 
44   return {0, 0, 0};
45 }
46 
initDefaultAMDKernelCodeT(amd_kernel_code_t & Header,const FeatureBitset & Features)47 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
48                                const FeatureBitset &Features) {
49 
50   IsaVersion ISA = getIsaVersion(Features);
51 
52   memset(&Header, 0, sizeof(Header));
53 
54   Header.amd_kernel_code_version_major = 1;
55   Header.amd_kernel_code_version_minor = 0;
56   Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
57   Header.amd_machine_version_major = ISA.Major;
58   Header.amd_machine_version_minor = ISA.Minor;
59   Header.amd_machine_version_stepping = ISA.Stepping;
60   Header.kernel_code_entry_byte_offset = sizeof(Header);
61   // wavefront_size is specified as a power of 2: 2^6 = 64 threads.
62   Header.wavefront_size = 6;
63   // These alignment values are specified in powers of two, so alignment =
64   // 2^n.  The minimum alignment is 2^4 = 16.
65   Header.kernarg_segment_alignment = 4;
66   Header.group_segment_alignment = 4;
67   Header.private_segment_alignment = 4;
68 }
69 
getHSATextSection(MCContext & Ctx)70 MCSection *getHSATextSection(MCContext &Ctx) {
71   return Ctx.getELFSection(".hsatext", ELF::SHT_PROGBITS,
72                            ELF::SHF_ALLOC | ELF::SHF_WRITE |
73                            ELF::SHF_EXECINSTR |
74                            ELF::SHF_AMDGPU_HSA_AGENT |
75                            ELF::SHF_AMDGPU_HSA_CODE);
76 }
77 
getHSADataGlobalAgentSection(MCContext & Ctx)78 MCSection *getHSADataGlobalAgentSection(MCContext &Ctx) {
79   return Ctx.getELFSection(".hsadata_global_agent", ELF::SHT_PROGBITS,
80                            ELF::SHF_ALLOC | ELF::SHF_WRITE |
81                            ELF::SHF_AMDGPU_HSA_GLOBAL |
82                            ELF::SHF_AMDGPU_HSA_AGENT);
83 }
84 
getHSADataGlobalProgramSection(MCContext & Ctx)85 MCSection *getHSADataGlobalProgramSection(MCContext &Ctx) {
86   return  Ctx.getELFSection(".hsadata_global_program", ELF::SHT_PROGBITS,
87                             ELF::SHF_ALLOC | ELF::SHF_WRITE |
88                             ELF::SHF_AMDGPU_HSA_GLOBAL);
89 }
90 
getHSARodataReadonlyAgentSection(MCContext & Ctx)91 MCSection *getHSARodataReadonlyAgentSection(MCContext &Ctx) {
92   return Ctx.getELFSection(".hsarodata_readonly_agent", ELF::SHT_PROGBITS,
93                            ELF::SHF_ALLOC | ELF::SHF_AMDGPU_HSA_READONLY |
94                            ELF::SHF_AMDGPU_HSA_AGENT);
95 }
96 
isGroupSegment(const GlobalValue * GV)97 bool isGroupSegment(const GlobalValue *GV) {
98   return GV->getType()->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
99 }
100 
isGlobalSegment(const GlobalValue * GV)101 bool isGlobalSegment(const GlobalValue *GV) {
102   return GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
103 }
104 
isReadOnlySegment(const GlobalValue * GV)105 bool isReadOnlySegment(const GlobalValue *GV) {
106   return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS;
107 }
108 
109 static const char ShaderTypeAttribute[] = "ShaderType";
110 
getShaderType(const Function & F)111 unsigned getShaderType(const Function &F) {
112   Attribute A = F.getFnAttribute(ShaderTypeAttribute);
113   unsigned ShaderType = ShaderType::COMPUTE;
114 
115   if (A.isStringAttribute()) {
116     StringRef Str = A.getValueAsString();
117     if (Str.getAsInteger(0, ShaderType)) {
118       LLVMContext &Ctx = F.getContext();
119       Ctx.emitError("can't parse shader type");
120     }
121   }
122   return ShaderType;
123 }
124 
isSI(const MCSubtargetInfo & STI)125 bool isSI(const MCSubtargetInfo &STI) {
126   return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
127 }
128 
isCI(const MCSubtargetInfo & STI)129 bool isCI(const MCSubtargetInfo &STI) {
130   return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
131 }
132 
isVI(const MCSubtargetInfo & STI)133 bool isVI(const MCSubtargetInfo &STI) {
134   return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
135 }
136 
getMCReg(unsigned Reg,const MCSubtargetInfo & STI)137 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
138 
139   switch(Reg) {
140   default: break;
141   case AMDGPU::FLAT_SCR:
142     assert(!isSI(STI));
143     return isCI(STI) ? AMDGPU::FLAT_SCR_ci : AMDGPU::FLAT_SCR_vi;
144 
145   case AMDGPU::FLAT_SCR_LO:
146     assert(!isSI(STI));
147     return isCI(STI) ? AMDGPU::FLAT_SCR_LO_ci : AMDGPU::FLAT_SCR_LO_vi;
148 
149   case AMDGPU::FLAT_SCR_HI:
150     assert(!isSI(STI));
151     return isCI(STI) ? AMDGPU::FLAT_SCR_HI_ci : AMDGPU::FLAT_SCR_HI_vi;
152   }
153   return Reg;
154 }
155 
156 } // End namespace AMDGPU
157 } // End namespace llvm
158