1 //===-- RISCVAsmBackend.cpp - RISCV Assembler Backend ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "MCTargetDesc/RISCVFixupKinds.h"
11 #include "MCTargetDesc/RISCVMCTargetDesc.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/MC/MCAsmBackend.h"
14 #include "llvm/MC/MCAssembler.h"
15 #include "llvm/MC/MCContext.h"
16 #include "llvm/MC/MCDirectives.h"
17 #include "llvm/MC/MCELFObjectWriter.h"
18 #include "llvm/MC/MCExpr.h"
19 #include "llvm/MC/MCFixupKindInfo.h"
20 #include "llvm/MC/MCObjectWriter.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/raw_ostream.h"
25
26 using namespace llvm;
27
28 namespace {
29 class RISCVAsmBackend : public MCAsmBackend {
30 const MCSubtargetInfo &STI;
31 uint8_t OSABI;
32 bool Is64Bit;
33
34 public:
RISCVAsmBackend(const MCSubtargetInfo & STI,uint8_t OSABI,bool Is64Bit)35 RISCVAsmBackend(const MCSubtargetInfo &STI, uint8_t OSABI, bool Is64Bit)
36 : MCAsmBackend(support::little), STI(STI), OSABI(OSABI),
37 Is64Bit(Is64Bit) {}
~RISCVAsmBackend()38 ~RISCVAsmBackend() override {}
39
40 // Generate diff expression relocations if the relax feature is enabled,
41 // otherwise it is safe for the assembler to calculate these internally.
requiresDiffExpressionRelocations() const42 bool requiresDiffExpressionRelocations() const override {
43 return STI.getFeatureBits()[RISCV::FeatureRelax];
44 }
45 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
46 const MCValue &Target, MutableArrayRef<char> Data,
47 uint64_t Value, bool IsResolved,
48 const MCSubtargetInfo *STI) const override;
49
50 std::unique_ptr<MCObjectTargetWriter>
51 createObjectTargetWriter() const override;
52
53 // If linker relaxation is enabled, always emit relocations even if the fixup
54 // can be resolved. This is necessary for correctness as offsets may change
55 // during relaxation.
shouldForceRelocation(const MCAssembler & Asm,const MCFixup & Fixup,const MCValue & Target)56 bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
57 const MCValue &Target) override {
58 return STI.getFeatureBits()[RISCV::FeatureRelax];
59 }
60
fixupNeedsRelaxation(const MCFixup & Fixup,uint64_t Value,const MCRelaxableFragment * DF,const MCAsmLayout & Layout) const61 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
62 const MCRelaxableFragment *DF,
63 const MCAsmLayout &Layout) const override {
64 llvm_unreachable("Handled by fixupNeedsRelaxationAdvanced");
65 }
66
67 bool fixupNeedsRelaxationAdvanced(const MCFixup &Fixup, bool Resolved,
68 uint64_t Value,
69 const MCRelaxableFragment *DF,
70 const MCAsmLayout &Layout,
71 const bool WasForced) const override;
72
getNumFixupKinds() const73 unsigned getNumFixupKinds() const override {
74 return RISCV::NumTargetFixupKinds;
75 }
76
getFixupKindInfo(MCFixupKind Kind) const77 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
78 const static MCFixupKindInfo Infos[] = {
79 // This table *must* be in the order that the fixup_* kinds are defined in
80 // RISCVFixupKinds.h.
81 //
82 // name offset bits flags
83 { "fixup_riscv_hi20", 12, 20, 0 },
84 { "fixup_riscv_lo12_i", 20, 12, 0 },
85 { "fixup_riscv_lo12_s", 0, 32, 0 },
86 { "fixup_riscv_pcrel_hi20", 12, 20, MCFixupKindInfo::FKF_IsPCRel },
87 { "fixup_riscv_pcrel_lo12_i", 20, 12, MCFixupKindInfo::FKF_IsPCRel },
88 { "fixup_riscv_pcrel_lo12_s", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
89 { "fixup_riscv_jal", 12, 20, MCFixupKindInfo::FKF_IsPCRel },
90 { "fixup_riscv_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel },
91 { "fixup_riscv_rvc_jump", 2, 11, MCFixupKindInfo::FKF_IsPCRel },
92 { "fixup_riscv_rvc_branch", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
93 { "fixup_riscv_call", 0, 64, MCFixupKindInfo::FKF_IsPCRel },
94 { "fixup_riscv_relax", 0, 0, 0 }
95 };
96 static_assert((array_lengthof(Infos)) == RISCV::NumTargetFixupKinds,
97 "Not all fixup kinds added to Infos array");
98
99 if (Kind < FirstTargetFixupKind)
100 return MCAsmBackend::getFixupKindInfo(Kind);
101
102 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
103 "Invalid kind!");
104 return Infos[Kind - FirstTargetFixupKind];
105 }
106
107 bool mayNeedRelaxation(const MCInst &Inst,
108 const MCSubtargetInfo &STI) const override;
109 unsigned getRelaxedOpcode(unsigned Op) const;
110
111 void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
112 MCInst &Res) const override;
113
114
115 bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
116 };
117
118
fixupNeedsRelaxationAdvanced(const MCFixup & Fixup,bool Resolved,uint64_t Value,const MCRelaxableFragment * DF,const MCAsmLayout & Layout,const bool WasForced) const119 bool RISCVAsmBackend::fixupNeedsRelaxationAdvanced(const MCFixup &Fixup,
120 bool Resolved,
121 uint64_t Value,
122 const MCRelaxableFragment *DF,
123 const MCAsmLayout &Layout,
124 const bool WasForced) const {
125 // Return true if the symbol is actually unresolved.
126 // Resolved could be always false when shouldForceRelocation return true.
127 // We use !WasForced to indicate that the symbol is unresolved and not forced
128 // by shouldForceRelocation.
129 if (!Resolved && !WasForced)
130 return true;
131
132 int64_t Offset = int64_t(Value);
133 switch ((unsigned)Fixup.getKind()) {
134 default:
135 return false;
136 case RISCV::fixup_riscv_rvc_branch:
137 // For compressed branch instructions the immediate must be
138 // in the range [-256, 254].
139 return Offset > 254 || Offset < -256;
140 case RISCV::fixup_riscv_rvc_jump:
141 // For compressed jump instructions the immediate must be
142 // in the range [-2048, 2046].
143 return Offset > 2046 || Offset < -2048;
144 }
145 }
146
relaxInstruction(const MCInst & Inst,const MCSubtargetInfo & STI,MCInst & Res) const147 void RISCVAsmBackend::relaxInstruction(const MCInst &Inst,
148 const MCSubtargetInfo &STI,
149 MCInst &Res) const {
150 // TODO: replace this with call to auto generated uncompressinstr() function.
151 switch (Inst.getOpcode()) {
152 default:
153 llvm_unreachable("Opcode not expected!");
154 case RISCV::C_BEQZ:
155 // c.beqz $rs1, $imm -> beq $rs1, X0, $imm.
156 Res.setOpcode(RISCV::BEQ);
157 Res.addOperand(Inst.getOperand(0));
158 Res.addOperand(MCOperand::createReg(RISCV::X0));
159 Res.addOperand(Inst.getOperand(1));
160 break;
161 case RISCV::C_BNEZ:
162 // c.bnez $rs1, $imm -> bne $rs1, X0, $imm.
163 Res.setOpcode(RISCV::BNE);
164 Res.addOperand(Inst.getOperand(0));
165 Res.addOperand(MCOperand::createReg(RISCV::X0));
166 Res.addOperand(Inst.getOperand(1));
167 break;
168 case RISCV::C_J:
169 // c.j $imm -> jal X0, $imm.
170 Res.setOpcode(RISCV::JAL);
171 Res.addOperand(MCOperand::createReg(RISCV::X0));
172 Res.addOperand(Inst.getOperand(0));
173 break;
174 case RISCV::C_JAL:
175 // c.jal $imm -> jal X1, $imm.
176 Res.setOpcode(RISCV::JAL);
177 Res.addOperand(MCOperand::createReg(RISCV::X1));
178 Res.addOperand(Inst.getOperand(0));
179 break;
180 }
181 }
182
183 // Given a compressed control flow instruction this function returns
184 // the expanded instruction.
getRelaxedOpcode(unsigned Op) const185 unsigned RISCVAsmBackend::getRelaxedOpcode(unsigned Op) const {
186 switch (Op) {
187 default:
188 return Op;
189 case RISCV::C_BEQZ:
190 return RISCV::BEQ;
191 case RISCV::C_BNEZ:
192 return RISCV::BNE;
193 case RISCV::C_J:
194 case RISCV::C_JAL: // fall through.
195 return RISCV::JAL;
196 }
197 }
198
mayNeedRelaxation(const MCInst & Inst,const MCSubtargetInfo & STI) const199 bool RISCVAsmBackend::mayNeedRelaxation(const MCInst &Inst,
200 const MCSubtargetInfo &STI) const {
201 return getRelaxedOpcode(Inst.getOpcode()) != Inst.getOpcode();
202 }
203
writeNopData(raw_ostream & OS,uint64_t Count) const204 bool RISCVAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
205 bool HasStdExtC = STI.getFeatureBits()[RISCV::FeatureStdExtC];
206 unsigned MinNopLen = HasStdExtC ? 2 : 4;
207
208 if ((Count % MinNopLen) != 0)
209 return false;
210
211 // The canonical nop on RISC-V is addi x0, x0, 0.
212 uint64_t Nop32Count = Count / 4;
213 for (uint64_t i = Nop32Count; i != 0; --i)
214 OS.write("\x13\0\0\0", 4);
215
216 // The canonical nop on RVC is c.nop.
217 if (HasStdExtC) {
218 uint64_t Nop16Count = (Count - Nop32Count * 4) / 2;
219 for (uint64_t i = Nop16Count; i != 0; --i)
220 OS.write("\x01\0", 2);
221 }
222
223 return true;
224 }
225
adjustFixupValue(const MCFixup & Fixup,uint64_t Value,MCContext & Ctx)226 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
227 MCContext &Ctx) {
228 unsigned Kind = Fixup.getKind();
229 switch (Kind) {
230 default:
231 llvm_unreachable("Unknown fixup kind!");
232 case FK_Data_1:
233 case FK_Data_2:
234 case FK_Data_4:
235 case FK_Data_8:
236 return Value;
237 case RISCV::fixup_riscv_lo12_i:
238 case RISCV::fixup_riscv_pcrel_lo12_i:
239 return Value & 0xfff;
240 case RISCV::fixup_riscv_lo12_s:
241 case RISCV::fixup_riscv_pcrel_lo12_s:
242 return (((Value >> 5) & 0x7f) << 25) | ((Value & 0x1f) << 7);
243 case RISCV::fixup_riscv_hi20:
244 case RISCV::fixup_riscv_pcrel_hi20:
245 // Add 1 if bit 11 is 1, to compensate for low 12 bits being negative.
246 return ((Value + 0x800) >> 12) & 0xfffff;
247 case RISCV::fixup_riscv_jal: {
248 if (!isInt<21>(Value))
249 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
250 if (Value & 0x1)
251 Ctx.reportError(Fixup.getLoc(), "fixup value must be 2-byte aligned");
252 // Need to produce imm[19|10:1|11|19:12] from the 21-bit Value.
253 unsigned Sbit = (Value >> 20) & 0x1;
254 unsigned Hi8 = (Value >> 12) & 0xff;
255 unsigned Mid1 = (Value >> 11) & 0x1;
256 unsigned Lo10 = (Value >> 1) & 0x3ff;
257 // Inst{31} = Sbit;
258 // Inst{30-21} = Lo10;
259 // Inst{20} = Mid1;
260 // Inst{19-12} = Hi8;
261 Value = (Sbit << 19) | (Lo10 << 9) | (Mid1 << 8) | Hi8;
262 return Value;
263 }
264 case RISCV::fixup_riscv_branch: {
265 if (!isInt<13>(Value))
266 Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
267 if (Value & 0x1)
268 Ctx.reportError(Fixup.getLoc(), "fixup value must be 2-byte aligned");
269 // Need to extract imm[12], imm[10:5], imm[4:1], imm[11] from the 13-bit
270 // Value.
271 unsigned Sbit = (Value >> 12) & 0x1;
272 unsigned Hi1 = (Value >> 11) & 0x1;
273 unsigned Mid6 = (Value >> 5) & 0x3f;
274 unsigned Lo4 = (Value >> 1) & 0xf;
275 // Inst{31} = Sbit;
276 // Inst{30-25} = Mid6;
277 // Inst{11-8} = Lo4;
278 // Inst{7} = Hi1;
279 Value = (Sbit << 31) | (Mid6 << 25) | (Lo4 << 8) | (Hi1 << 7);
280 return Value;
281 }
282 case RISCV::fixup_riscv_call: {
283 // Jalr will add UpperImm with the sign-extended 12-bit LowerImm,
284 // we need to add 0x800ULL before extract upper bits to reflect the
285 // effect of the sign extension.
286 uint64_t UpperImm = (Value + 0x800ULL) & 0xfffff000ULL;
287 uint64_t LowerImm = Value & 0xfffULL;
288 return UpperImm | ((LowerImm << 20) << 32);
289 }
290 case RISCV::fixup_riscv_rvc_jump: {
291 // Need to produce offset[11|4|9:8|10|6|7|3:1|5] from the 11-bit Value.
292 unsigned Bit11 = (Value >> 11) & 0x1;
293 unsigned Bit4 = (Value >> 4) & 0x1;
294 unsigned Bit9_8 = (Value >> 8) & 0x3;
295 unsigned Bit10 = (Value >> 10) & 0x1;
296 unsigned Bit6 = (Value >> 6) & 0x1;
297 unsigned Bit7 = (Value >> 7) & 0x1;
298 unsigned Bit3_1 = (Value >> 1) & 0x7;
299 unsigned Bit5 = (Value >> 5) & 0x1;
300 Value = (Bit11 << 10) | (Bit4 << 9) | (Bit9_8 << 7) | (Bit10 << 6) |
301 (Bit6 << 5) | (Bit7 << 4) | (Bit3_1 << 1) | Bit5;
302 return Value;
303 }
304 case RISCV::fixup_riscv_rvc_branch: {
305 // Need to produce offset[8|4:3], [reg 3 bit], offset[7:6|2:1|5]
306 unsigned Bit8 = (Value >> 8) & 0x1;
307 unsigned Bit7_6 = (Value >> 6) & 0x3;
308 unsigned Bit5 = (Value >> 5) & 0x1;
309 unsigned Bit4_3 = (Value >> 3) & 0x3;
310 unsigned Bit2_1 = (Value >> 1) & 0x3;
311 Value = (Bit8 << 12) | (Bit4_3 << 10) | (Bit7_6 << 5) | (Bit2_1 << 3) |
312 (Bit5 << 2);
313 return Value;
314 }
315
316 }
317 }
318
applyFixup(const MCAssembler & Asm,const MCFixup & Fixup,const MCValue & Target,MutableArrayRef<char> Data,uint64_t Value,bool IsResolved,const MCSubtargetInfo * STI) const319 void RISCVAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
320 const MCValue &Target,
321 MutableArrayRef<char> Data, uint64_t Value,
322 bool IsResolved,
323 const MCSubtargetInfo *STI) const {
324 MCContext &Ctx = Asm.getContext();
325 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
326 if (!Value)
327 return; // Doesn't change encoding.
328 // Apply any target-specific value adjustments.
329 Value = adjustFixupValue(Fixup, Value, Ctx);
330
331 // Shift the value into position.
332 Value <<= Info.TargetOffset;
333
334 unsigned Offset = Fixup.getOffset();
335 unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8;
336
337 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
338
339 // For each byte of the fragment that the fixup touches, mask in the
340 // bits from the fixup value.
341 for (unsigned i = 0; i != NumBytes; ++i) {
342 Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
343 }
344 }
345
346 std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const347 RISCVAsmBackend::createObjectTargetWriter() const {
348 return createRISCVELFObjectWriter(OSABI, Is64Bit);
349 }
350
351 } // end anonymous namespace
352
createRISCVAsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options)353 MCAsmBackend *llvm::createRISCVAsmBackend(const Target &T,
354 const MCSubtargetInfo &STI,
355 const MCRegisterInfo &MRI,
356 const MCTargetOptions &Options) {
357 const Triple &TT = STI.getTargetTriple();
358 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TT.getOS());
359 return new RISCVAsmBackend(STI, OSABI, TT.isArch64Bit());
360 }
361