xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp (revision e64bea71c21eb42e97aa615188ba91f6cce0d36d)
1 //===-- RISCVAsmBackend.cpp - RISC-V Assembler Backend --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "RISCVAsmBackend.h"
10 #include "RISCVFixupKinds.h"
11 #include "llvm/ADT/APInt.h"
12 #include "llvm/MC/MCAsmInfo.h"
13 #include "llvm/MC/MCAssembler.h"
14 #include "llvm/MC/MCContext.h"
15 #include "llvm/MC/MCELFObjectWriter.h"
16 #include "llvm/MC/MCExpr.h"
17 #include "llvm/MC/MCObjectWriter.h"
18 #include "llvm/MC/MCSymbol.h"
19 #include "llvm/MC/MCValue.h"
20 #include "llvm/Support/CommandLine.h"
21 #include "llvm/Support/EndianStream.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/LEB128.h"
24 #include "llvm/Support/raw_ostream.h"
25 
26 using namespace llvm;
27 
28 // Temporary workaround for old linkers that do not support ULEB128 relocations,
29 // which are abused by DWARF v5 DW_LLE_offset_pair/DW_RLE_offset_pair
30 // implemented in Clang/LLVM.
31 static cl::opt<bool> ULEB128Reloc(
32     "riscv-uleb128-reloc", cl::init(true), cl::Hidden,
33     cl::desc("Emit R_RISCV_SET_ULEB128/E_RISCV_SUB_ULEB128 if appropriate"));
34 
RISCVAsmBackend(const MCSubtargetInfo & STI,uint8_t OSABI,bool Is64Bit,const MCTargetOptions & Options)35 RISCVAsmBackend::RISCVAsmBackend(const MCSubtargetInfo &STI, uint8_t OSABI,
36                                  bool Is64Bit, const MCTargetOptions &Options)
37     : MCAsmBackend(llvm::endianness::little), STI(STI), OSABI(OSABI),
38       Is64Bit(Is64Bit), TargetOptions(Options) {
39   RISCVFeatures::validate(STI.getTargetTriple(), STI.getFeatureBits());
40 }
41 
getFixupKind(StringRef Name) const42 std::optional<MCFixupKind> RISCVAsmBackend::getFixupKind(StringRef Name) const {
43   if (STI.getTargetTriple().isOSBinFormatELF()) {
44     unsigned Type;
45     Type = llvm::StringSwitch<unsigned>(Name)
46 #define ELF_RELOC(NAME, ID) .Case(#NAME, ID)
47 #include "llvm/BinaryFormat/ELFRelocs/RISCV.def"
48 #undef ELF_RELOC
49 #define ELF_RISCV_NONSTANDARD_RELOC(_VENDOR, NAME, ID) .Case(#NAME, ID)
50 #include "llvm/BinaryFormat/ELFRelocs/RISCV_nonstandard.def"
51 #undef ELF_RISCV_NONSTANDARD_RELOC
52                .Case("BFD_RELOC_NONE", ELF::R_RISCV_NONE)
53                .Case("BFD_RELOC_32", ELF::R_RISCV_32)
54                .Case("BFD_RELOC_64", ELF::R_RISCV_64)
55                .Default(-1u);
56     if (Type != -1u)
57       return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
58   }
59   return std::nullopt;
60 }
61 
getFixupKindInfo(MCFixupKind Kind) const62 MCFixupKindInfo RISCVAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
63   const static MCFixupKindInfo Infos[] = {
64       // This table *must* be in the order that the fixup_* kinds are defined in
65       // RISCVFixupKinds.h.
66       //
67       // name                      offset bits  flags
68       {"fixup_riscv_hi20", 12, 20, 0},
69       {"fixup_riscv_lo12_i", 20, 12, 0},
70       {"fixup_riscv_12_i", 20, 12, 0},
71       {"fixup_riscv_lo12_s", 0, 32, 0},
72       {"fixup_riscv_pcrel_hi20", 12, 20, 0},
73       {"fixup_riscv_pcrel_lo12_i", 20, 12, 0},
74       {"fixup_riscv_pcrel_lo12_s", 0, 32, 0},
75       {"fixup_riscv_jal", 12, 20, 0},
76       {"fixup_riscv_branch", 0, 32, 0},
77       {"fixup_riscv_rvc_jump", 2, 11, 0},
78       {"fixup_riscv_rvc_branch", 0, 16, 0},
79       {"fixup_riscv_rvc_imm", 0, 16, 0},
80       {"fixup_riscv_call", 0, 64, 0},
81       {"fixup_riscv_call_plt", 0, 64, 0},
82 
83       {"fixup_riscv_qc_e_branch", 0, 48, 0},
84       {"fixup_riscv_qc_e_32", 16, 32, 0},
85       {"fixup_riscv_qc_abs20_u", 0, 32, 0},
86       {"fixup_riscv_qc_e_call_plt", 0, 48, 0},
87 
88       // Andes fixups
89       {"fixup_riscv_nds_branch_10", 0, 32, 0},
90   };
91   static_assert((std::size(Infos)) == RISCV::NumTargetFixupKinds,
92                 "Not all fixup kinds added to Infos array");
93 
94   // Fixup kinds from raw relocation types and .reloc directives force
95   // relocations and do not use these fields.
96   if (mc::isRelocation(Kind))
97     return {};
98 
99   if (Kind < FirstTargetFixupKind)
100     return MCAsmBackend::getFixupKindInfo(Kind);
101 
102   assert(unsigned(Kind - FirstTargetFixupKind) < RISCV::NumTargetFixupKinds &&
103          "Invalid kind!");
104   return Infos[Kind - FirstTargetFixupKind];
105 }
106 
fixupNeedsRelaxationAdvanced(const MCFixup & Fixup,const MCValue &,uint64_t Value,bool Resolved) const107 bool RISCVAsmBackend::fixupNeedsRelaxationAdvanced(const MCFixup &Fixup,
108                                                    const MCValue &,
109                                                    uint64_t Value,
110                                                    bool Resolved) const {
111   int64_t Offset = int64_t(Value);
112   auto Kind = Fixup.getKind();
113 
114   // Return true if the symbol is unresolved.
115   if (!Resolved)
116     return true;
117 
118   switch (Kind) {
119   default:
120     return false;
121   case RISCV::fixup_riscv_rvc_branch:
122     // For compressed branch instructions the immediate must be
123     // in the range [-256, 254].
124     return Offset > 254 || Offset < -256;
125   case RISCV::fixup_riscv_rvc_jump:
126     // For compressed jump instructions the immediate must be
127     // in the range [-2048, 2046].
128     return Offset > 2046 || Offset < -2048;
129   case RISCV::fixup_riscv_branch:
130   case RISCV::fixup_riscv_qc_e_branch:
131     // For conditional branch instructions the immediate must be
132     // in the range [-4096, 4094].
133     return Offset > 4094 || Offset < -4096;
134   case RISCV::fixup_riscv_jal:
135     // For jump instructions the immediate must be in the range
136     // [-1048576, 1048574]
137     return Offset > 1048574 || Offset < -1048576;
138   case RISCV::fixup_riscv_rvc_imm:
139     // This fixup can never be emitted as a relocation, so always needs to be
140     // relaxed.
141     return true;
142   }
143 }
144 
145 // Given a compressed control flow instruction this function returns
146 // the expanded instruction, or the original instruction code if no
147 // expansion is available.
getRelaxedOpcode(unsigned Opcode,ArrayRef<MCOperand> Operands,const MCSubtargetInfo & STI)148 static unsigned getRelaxedOpcode(unsigned Opcode, ArrayRef<MCOperand> Operands,
149                                  const MCSubtargetInfo &STI) {
150   switch (Opcode) {
151   case RISCV::C_BEQZ:
152     return RISCV::BEQ;
153   case RISCV::C_BNEZ:
154     return RISCV::BNE;
155   case RISCV::C_J:
156   case RISCV::C_JAL: // fall through.
157     // This only relaxes one "step" - i.e. from C.J to JAL, not from C.J to
158     // QC.E.J, because we can always relax again if needed.
159     return RISCV::JAL;
160   case RISCV::C_LI:
161     if (!STI.hasFeature(RISCV::FeatureVendorXqcili))
162       break;
163     // We only need this because `QC.E.LI` can be compressed into a `C.LI`. This
164     // happens because the `simm6` MCOperandPredicate accepts bare symbols, and
165     // `QC.E.LI` is the only instruction that accepts bare symbols at parse-time
166     // and compresses to `C.LI`. `C.LI` does not itself accept bare symbols at
167     // parse time.
168     //
169     // If we have a bare symbol, we need to turn this back to a `QC.E.LI`, as we
170     // have no way to emit a relocation on a `C.LI` instruction.
171     return RISCV::QC_E_LI;
172   case RISCV::JAL: {
173     // We can only relax JAL if we have Xqcilb
174     if (!STI.hasFeature(RISCV::FeatureVendorXqcilb))
175       break;
176 
177     // And only if it is using X0 or X1 for rd.
178     MCRegister Reg = Operands[0].getReg();
179     if (Reg == RISCV::X0)
180       return RISCV::QC_E_J;
181     if (Reg == RISCV::X1)
182       return RISCV::QC_E_JAL;
183 
184     break;
185   }
186   case RISCV::BEQ:
187     return RISCV::PseudoLongBEQ;
188   case RISCV::BNE:
189     return RISCV::PseudoLongBNE;
190   case RISCV::BLT:
191     return RISCV::PseudoLongBLT;
192   case RISCV::BGE:
193     return RISCV::PseudoLongBGE;
194   case RISCV::BLTU:
195     return RISCV::PseudoLongBLTU;
196   case RISCV::BGEU:
197     return RISCV::PseudoLongBGEU;
198   case RISCV::QC_BEQI:
199     return RISCV::PseudoLongQC_BEQI;
200   case RISCV::QC_BNEI:
201     return RISCV::PseudoLongQC_BNEI;
202   case RISCV::QC_BLTI:
203     return RISCV::PseudoLongQC_BLTI;
204   case RISCV::QC_BGEI:
205     return RISCV::PseudoLongQC_BGEI;
206   case RISCV::QC_BLTUI:
207     return RISCV::PseudoLongQC_BLTUI;
208   case RISCV::QC_BGEUI:
209     return RISCV::PseudoLongQC_BGEUI;
210   case RISCV::QC_E_BEQI:
211     return RISCV::PseudoLongQC_E_BEQI;
212   case RISCV::QC_E_BNEI:
213     return RISCV::PseudoLongQC_E_BNEI;
214   case RISCV::QC_E_BLTI:
215     return RISCV::PseudoLongQC_E_BLTI;
216   case RISCV::QC_E_BGEI:
217     return RISCV::PseudoLongQC_E_BGEI;
218   case RISCV::QC_E_BLTUI:
219     return RISCV::PseudoLongQC_E_BLTUI;
220   case RISCV::QC_E_BGEUI:
221     return RISCV::PseudoLongQC_E_BGEUI;
222   }
223 
224   // Returning the original opcode means we cannot relax the instruction.
225   return Opcode;
226 }
227 
relaxInstruction(MCInst & Inst,const MCSubtargetInfo & STI) const228 void RISCVAsmBackend::relaxInstruction(MCInst &Inst,
229                                        const MCSubtargetInfo &STI) const {
230   if (STI.hasFeature(RISCV::FeatureExactAssembly))
231     return;
232 
233   MCInst Res;
234   switch (Inst.getOpcode()) {
235   default:
236     llvm_unreachable("Opcode not expected!");
237   case RISCV::C_BEQZ:
238   case RISCV::C_BNEZ:
239   case RISCV::C_J:
240   case RISCV::C_JAL: {
241     [[maybe_unused]] bool Success = RISCVRVC::uncompress(Res, Inst, STI);
242     assert(Success && "Can't uncompress instruction");
243     assert(Res.getOpcode() ==
244                getRelaxedOpcode(Inst.getOpcode(), Inst.getOperands(), STI) &&
245            "Branch Relaxation Error");
246     break;
247   }
248   case RISCV::JAL: {
249     // This has to be written manually because the QC.E.J -> JAL is
250     // compression-only, so that it is not used when printing disassembly.
251     assert(STI.hasFeature(RISCV::FeatureVendorXqcilb) &&
252            "JAL is only relaxable with Xqcilb");
253     assert((Inst.getOperand(0).getReg() == RISCV::X0 ||
254             Inst.getOperand(0).getReg() == RISCV::X1) &&
255            "JAL only relaxable with rd=x0 or rd=x1");
256     Res.setOpcode(getRelaxedOpcode(Inst.getOpcode(), Inst.getOperands(), STI));
257     Res.addOperand(Inst.getOperand(1));
258     break;
259   }
260   case RISCV::C_LI: {
261     // This should only be hit when trying to relax a `C.LI` into a `QC.E.LI`
262     // because the `C.LI` has a bare symbol. We cannot use
263     // `RISCVRVC::uncompress` because it will use decompression patterns. The
264     // `QC.E.LI` compression pattern to `C.LI` is compression-only (because we
265     // don't want `c.li` ever printed as `qc.e.li`, which might be done if the
266     // pattern applied to decompression), but that doesn't help much becuase
267     // `C.LI` with a bare symbol will decompress to an `ADDI` anyway (because
268     // `simm12`'s MCOperandPredicate accepts a bare symbol and that pattern
269     // comes first), and we still cannot emit an `ADDI` with a bare symbol.
270     assert(STI.hasFeature(RISCV::FeatureVendorXqcili) &&
271            "C.LI is only relaxable with Xqcili");
272     Res.setOpcode(getRelaxedOpcode(Inst.getOpcode(), Inst.getOperands(), STI));
273     Res.addOperand(Inst.getOperand(0));
274     Res.addOperand(Inst.getOperand(1));
275     break;
276   }
277   case RISCV::BEQ:
278   case RISCV::BNE:
279   case RISCV::BLT:
280   case RISCV::BGE:
281   case RISCV::BLTU:
282   case RISCV::BGEU:
283   case RISCV::QC_BEQI:
284   case RISCV::QC_BNEI:
285   case RISCV::QC_BLTI:
286   case RISCV::QC_BGEI:
287   case RISCV::QC_BLTUI:
288   case RISCV::QC_BGEUI:
289   case RISCV::QC_E_BEQI:
290   case RISCV::QC_E_BNEI:
291   case RISCV::QC_E_BLTI:
292   case RISCV::QC_E_BGEI:
293   case RISCV::QC_E_BLTUI:
294   case RISCV::QC_E_BGEUI:
295     Res.setOpcode(getRelaxedOpcode(Inst.getOpcode(), Inst.getOperands(), STI));
296     Res.addOperand(Inst.getOperand(0));
297     Res.addOperand(Inst.getOperand(1));
298     Res.addOperand(Inst.getOperand(2));
299     break;
300   }
301   Inst = std::move(Res);
302 }
303 
relaxDwarfLineAddr(MCDwarfLineAddrFragment & DF,bool & WasRelaxed) const304 bool RISCVAsmBackend::relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF,
305                                          bool &WasRelaxed) const {
306   MCContext &C = getContext();
307 
308   int64_t LineDelta = DF.getLineDelta();
309   const MCExpr &AddrDelta = DF.getAddrDelta();
310   SmallVector<MCFixup, 1> Fixups;
311   size_t OldSize = DF.getContents().size();
312 
313   int64_t Value;
314   [[maybe_unused]] bool IsAbsolute =
315       AddrDelta.evaluateKnownAbsolute(Value, *Asm);
316   assert(IsAbsolute && "CFA with invalid expression");
317 
318   Fixups.clear();
319   SmallVector<char> Data;
320   raw_svector_ostream OS(Data);
321 
322   // INT64_MAX is a signal that this is actually a DW_LNE_end_sequence.
323   if (LineDelta != INT64_MAX) {
324     OS << uint8_t(dwarf::DW_LNS_advance_line);
325     encodeSLEB128(LineDelta, OS);
326   }
327 
328   unsigned Offset;
329   std::pair<MCFixupKind, MCFixupKind> Fixup;
330 
331   // According to the DWARF specification, the `DW_LNS_fixed_advance_pc` opcode
332   // takes a single unsigned half (unencoded) operand. The maximum encodable
333   // value is therefore 65535.  Set a conservative upper bound for relaxation.
334   if (Value > 60000) {
335     unsigned PtrSize = C.getAsmInfo()->getCodePointerSize();
336 
337     OS << uint8_t(dwarf::DW_LNS_extended_op);
338     encodeULEB128(PtrSize + 1, OS);
339 
340     OS << uint8_t(dwarf::DW_LNE_set_address);
341     Offset = OS.tell();
342     assert((PtrSize == 4 || PtrSize == 8) && "Unexpected pointer size");
343     Fixup = RISCV::getRelocPairForSize(PtrSize);
344     OS.write_zeros(PtrSize);
345   } else {
346     OS << uint8_t(dwarf::DW_LNS_fixed_advance_pc);
347     Offset = OS.tell();
348     Fixup = RISCV::getRelocPairForSize(2);
349     support::endian::write<uint16_t>(OS, 0, llvm::endianness::little);
350   }
351 
352   const MCBinaryExpr &MBE = cast<MCBinaryExpr>(AddrDelta);
353   Fixups.push_back(MCFixup::create(Offset, MBE.getLHS(), std::get<0>(Fixup)));
354   Fixups.push_back(MCFixup::create(Offset, MBE.getRHS(), std::get<1>(Fixup)));
355 
356   if (LineDelta == INT64_MAX) {
357     OS << uint8_t(dwarf::DW_LNS_extended_op);
358     OS << uint8_t(1);
359     OS << uint8_t(dwarf::DW_LNE_end_sequence);
360   } else {
361     OS << uint8_t(dwarf::DW_LNS_copy);
362   }
363 
364   DF.setContents(Data);
365   DF.setFixups(Fixups);
366   WasRelaxed = OldSize != Data.size();
367   return true;
368 }
369 
relaxDwarfCFA(MCDwarfCallFrameFragment & DF,bool & WasRelaxed) const370 bool RISCVAsmBackend::relaxDwarfCFA(MCDwarfCallFrameFragment &DF,
371                                     bool &WasRelaxed) const {
372   const MCExpr &AddrDelta = DF.getAddrDelta();
373   SmallVector<MCFixup, 2> Fixups;
374   size_t OldSize = DF.getContents().size();
375 
376   int64_t Value;
377   if (AddrDelta.evaluateAsAbsolute(Value, *Asm))
378     return false;
379   [[maybe_unused]] bool IsAbsolute =
380       AddrDelta.evaluateKnownAbsolute(Value, *Asm);
381   assert(IsAbsolute && "CFA with invalid expression");
382 
383   assert(getContext().getAsmInfo()->getMinInstAlignment() == 1 &&
384          "expected 1-byte alignment");
385   if (Value == 0) {
386     DF.clearContents();
387     DF.clearFixups();
388     WasRelaxed = OldSize != DF.getContents().size();
389     return true;
390   }
391 
392   auto AddFixups = [&Fixups, &AddrDelta](unsigned Offset,
393                                          std::pair<unsigned, unsigned> Fixup) {
394     const MCBinaryExpr &MBE = cast<MCBinaryExpr>(AddrDelta);
395     Fixups.push_back(MCFixup::create(Offset, MBE.getLHS(), std::get<0>(Fixup)));
396     Fixups.push_back(MCFixup::create(Offset, MBE.getRHS(), std::get<1>(Fixup)));
397   };
398 
399   SmallVector<char, 8> Data;
400   raw_svector_ostream OS(Data);
401   if (isUIntN(6, Value)) {
402     OS << uint8_t(dwarf::DW_CFA_advance_loc);
403     AddFixups(0, {ELF::R_RISCV_SET6, ELF::R_RISCV_SUB6});
404   } else if (isUInt<8>(Value)) {
405     OS << uint8_t(dwarf::DW_CFA_advance_loc1);
406     support::endian::write<uint8_t>(OS, 0, llvm::endianness::little);
407     AddFixups(1, {ELF::R_RISCV_SET8, ELF::R_RISCV_SUB8});
408   } else if (isUInt<16>(Value)) {
409     OS << uint8_t(dwarf::DW_CFA_advance_loc2);
410     support::endian::write<uint16_t>(OS, 0, llvm::endianness::little);
411     AddFixups(1, {ELF::R_RISCV_SET16, ELF::R_RISCV_SUB16});
412   } else if (isUInt<32>(Value)) {
413     OS << uint8_t(dwarf::DW_CFA_advance_loc4);
414     support::endian::write<uint32_t>(OS, 0, llvm::endianness::little);
415     AddFixups(1, {ELF::R_RISCV_SET32, ELF::R_RISCV_SUB32});
416   } else {
417     llvm_unreachable("unsupported CFA encoding");
418   }
419   DF.setContents(Data);
420   DF.setFixups(Fixups);
421 
422   WasRelaxed = OldSize != Data.size();
423   return true;
424 }
425 
relaxLEB128(MCLEBFragment & LF,int64_t & Value) const426 std::pair<bool, bool> RISCVAsmBackend::relaxLEB128(MCLEBFragment &LF,
427                                                    int64_t &Value) const {
428   if (LF.isSigned())
429     return std::make_pair(false, false);
430   const MCExpr &Expr = LF.getValue();
431   if (ULEB128Reloc) {
432     LF.addFixup(MCFixup::create(0, &Expr, FK_Data_leb128));
433   }
434   return std::make_pair(Expr.evaluateKnownAbsolute(Value, *Asm), false);
435 }
436 
mayNeedRelaxation(unsigned Opcode,ArrayRef<MCOperand> Operands,const MCSubtargetInfo & STI) const437 bool RISCVAsmBackend::mayNeedRelaxation(unsigned Opcode,
438                                         ArrayRef<MCOperand> Operands,
439                                         const MCSubtargetInfo &STI) const {
440   // This function has access to two STIs, the member of the AsmBackend, and the
441   // one passed as an argument. The latter is more specific, so we query it for
442   // specific features.
443   if (STI.hasFeature(RISCV::FeatureExactAssembly))
444     return false;
445 
446   return getRelaxedOpcode(Opcode, Operands, STI) != Opcode;
447 }
448 
writeNopData(raw_ostream & OS,uint64_t Count,const MCSubtargetInfo * STI) const449 bool RISCVAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
450                                    const MCSubtargetInfo *STI) const {
451   // We mostly follow binutils' convention here: align to even boundary with a
452   // 0-fill padding.  We emit up to 1 2-byte nop, though we use c.nop if RVC is
453   // enabled or 0-fill otherwise.  The remainder is now padded with 4-byte nops.
454 
455   // Instructions always are at even addresses.  We must be in a data area or
456   // be unaligned due to some other reason.
457   if (Count % 2) {
458     OS.write("\0", 1);
459     Count -= 1;
460   }
461 
462   if (Count % 4 == 2) {
463     // The canonical nop with Zca is c.nop.
464     OS.write(STI->hasFeature(RISCV::FeatureStdExtZca) ? "\x01\0" : "\0\0", 2);
465     Count -= 2;
466   }
467 
468   // The canonical nop on RISC-V is addi x0, x0, 0.
469   for (; Count >= 4; Count -= 4)
470     OS.write("\x13\0\0\0", 4);
471 
472   return true;
473 }
474 
adjustFixupValue(const MCFixup & Fixup,uint64_t Value,MCContext & Ctx)475 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
476                                  MCContext &Ctx) {
477   switch (Fixup.getKind()) {
478   default:
479     llvm_unreachable("Unknown fixup kind!");
480   case FK_Data_1:
481   case FK_Data_2:
482   case FK_Data_4:
483   case FK_Data_8:
484   case FK_Data_leb128:
485     return Value;
486   case RISCV::fixup_riscv_lo12_i:
487   case RISCV::fixup_riscv_pcrel_lo12_i:
488     return Value & 0xfff;
489   case RISCV::fixup_riscv_12_i:
490     if (!isInt<12>(Value)) {
491       Ctx.reportError(Fixup.getLoc(),
492                       "operand must be a constant 12-bit integer");
493     }
494     return Value & 0xfff;
495   case RISCV::fixup_riscv_lo12_s:
496   case RISCV::fixup_riscv_pcrel_lo12_s:
497     return (((Value >> 5) & 0x7f) << 25) | ((Value & 0x1f) << 7);
498   case RISCV::fixup_riscv_hi20:
499   case RISCV::fixup_riscv_pcrel_hi20:
500     // Add 1 if bit 11 is 1, to compensate for low 12 bits being negative.
501     return ((Value + 0x800) >> 12) & 0xfffff;
502   case RISCV::fixup_riscv_jal: {
503     if (!isInt<21>(Value))
504       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
505     if (Value & 0x1)
506       Ctx.reportError(Fixup.getLoc(), "fixup value must be 2-byte aligned");
507     // Need to produce imm[19|10:1|11|19:12] from the 21-bit Value.
508     unsigned Sbit = (Value >> 20) & 0x1;
509     unsigned Hi8 = (Value >> 12) & 0xff;
510     unsigned Mid1 = (Value >> 11) & 0x1;
511     unsigned Lo10 = (Value >> 1) & 0x3ff;
512     // Inst{31} = Sbit;
513     // Inst{30-21} = Lo10;
514     // Inst{20} = Mid1;
515     // Inst{19-12} = Hi8;
516     Value = (Sbit << 19) | (Lo10 << 9) | (Mid1 << 8) | Hi8;
517     return Value;
518   }
519   case RISCV::fixup_riscv_qc_e_branch:
520   case RISCV::fixup_riscv_branch: {
521     if (!isInt<13>(Value))
522       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
523     if (Value & 0x1)
524       Ctx.reportError(Fixup.getLoc(), "fixup value must be 2-byte aligned");
525     // Need to extract imm[12], imm[10:5], imm[4:1], imm[11] from the 13-bit
526     // Value.
527     unsigned Sbit = (Value >> 12) & 0x1;
528     unsigned Hi1 = (Value >> 11) & 0x1;
529     unsigned Mid6 = (Value >> 5) & 0x3f;
530     unsigned Lo4 = (Value >> 1) & 0xf;
531     // Inst{31} = Sbit;
532     // Inst{30-25} = Mid6;
533     // Inst{11-8} = Lo4;
534     // Inst{7} = Hi1;
535     Value = (Sbit << 31) | (Mid6 << 25) | (Lo4 << 8) | (Hi1 << 7);
536     return Value;
537   }
538   case RISCV::fixup_riscv_call:
539   case RISCV::fixup_riscv_call_plt: {
540     // Jalr will add UpperImm with the sign-extended 12-bit LowerImm,
541     // we need to add 0x800ULL before extract upper bits to reflect the
542     // effect of the sign extension.
543     uint64_t UpperImm = (Value + 0x800ULL) & 0xfffff000ULL;
544     uint64_t LowerImm = Value & 0xfffULL;
545     return UpperImm | ((LowerImm << 20) << 32);
546   }
547   case RISCV::fixup_riscv_rvc_jump: {
548     if (!isInt<12>(Value))
549       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
550     // Need to produce offset[11|4|9:8|10|6|7|3:1|5] from the 11-bit Value.
551     unsigned Bit11  = (Value >> 11) & 0x1;
552     unsigned Bit4   = (Value >> 4) & 0x1;
553     unsigned Bit9_8 = (Value >> 8) & 0x3;
554     unsigned Bit10  = (Value >> 10) & 0x1;
555     unsigned Bit6   = (Value >> 6) & 0x1;
556     unsigned Bit7   = (Value >> 7) & 0x1;
557     unsigned Bit3_1 = (Value >> 1) & 0x7;
558     unsigned Bit5   = (Value >> 5) & 0x1;
559     Value = (Bit11 << 10) | (Bit4 << 9) | (Bit9_8 << 7) | (Bit10 << 6) |
560             (Bit6 << 5) | (Bit7 << 4) | (Bit3_1 << 1) | Bit5;
561     return Value;
562   }
563   case RISCV::fixup_riscv_rvc_branch: {
564     if (!isInt<9>(Value))
565       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
566     // Need to produce offset[8|4:3], [reg 3 bit], offset[7:6|2:1|5]
567     unsigned Bit8   = (Value >> 8) & 0x1;
568     unsigned Bit7_6 = (Value >> 6) & 0x3;
569     unsigned Bit5   = (Value >> 5) & 0x1;
570     unsigned Bit4_3 = (Value >> 3) & 0x3;
571     unsigned Bit2_1 = (Value >> 1) & 0x3;
572     Value = (Bit8 << 12) | (Bit4_3 << 10) | (Bit7_6 << 5) | (Bit2_1 << 3) |
573             (Bit5 << 2);
574     return Value;
575   }
576   case RISCV::fixup_riscv_rvc_imm: {
577     if (!isInt<6>(Value))
578       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
579     unsigned Bit5 = (Value >> 5) & 0x1;
580     unsigned Bit4_0 = Value & 0x1f;
581     Value = (Bit5 << 12) | (Bit4_0 << 2);
582     return Value;
583   }
584   case RISCV::fixup_riscv_qc_e_32: {
585     if (!isInt<32>(Value))
586       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
587     return Value & 0xffffffffu;
588   }
589   case RISCV::fixup_riscv_qc_abs20_u: {
590     if (!isInt<20>(Value))
591       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
592     unsigned Bit19 = (Value >> 19) & 0x1;
593     unsigned Bit14_0 = Value & 0x7fff;
594     unsigned Bit18_15 = (Value >> 15) & 0xf;
595     Value = (Bit19 << 31) | (Bit14_0 << 16) | (Bit18_15 << 12);
596     return Value;
597   }
598   case RISCV::fixup_riscv_qc_e_call_plt: {
599     if (!isInt<32>(Value))
600       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
601     if (Value & 0x1)
602       Ctx.reportError(Fixup.getLoc(), "fixup value must be 2-byte aligned");
603     uint64_t Bit31_16 = (Value >> 16) & 0xffff;
604     uint64_t Bit12 = (Value >> 12) & 0x1;
605     uint64_t Bit10_5 = (Value >> 5) & 0x3f;
606     uint64_t Bit15_13 = (Value >> 13) & 0x7;
607     uint64_t Bit4_1 = (Value >> 1) & 0xf;
608     uint64_t Bit11 = (Value >> 11) & 0x1;
609     Value = (Bit31_16 << 32ull) | (Bit12 << 31) | (Bit10_5 << 25) |
610             (Bit15_13 << 17) | (Bit4_1 << 8) | (Bit11 << 7);
611     return Value;
612   }
613   case RISCV::fixup_riscv_nds_branch_10: {
614     if (!isInt<11>(Value))
615       Ctx.reportError(Fixup.getLoc(), "fixup value out of range");
616     if (Value & 0x1)
617       Ctx.reportError(Fixup.getLoc(), "fixup value must be 2-byte aligned");
618     // Need to extract imm[10], imm[9:5], imm[4:1] from the 11-bit Value.
619     unsigned Sbit = (Value >> 10) & 0x1;
620     unsigned Hi5 = (Value >> 5) & 0x1f;
621     unsigned Lo4 = (Value >> 1) & 0xf;
622     // Inst{31} = Sbit;
623     // Inst{29-25} = Hi5;
624     // Inst{11-8} = Lo4;
625     Value = (Sbit << 31) | (Hi5 << 25) | (Lo4 << 8);
626     return Value;
627   }
628   }
629 }
630 
isPCRelFixupResolved(const MCSymbol * SymA,const MCFragment & F)631 bool RISCVAsmBackend::isPCRelFixupResolved(const MCSymbol *SymA,
632                                            const MCFragment &F) {
633   // If the section does not contain linker-relaxable fragments, PC-relative
634   // fixups can be resolved.
635   if (!F.getParent()->isLinkerRelaxable())
636     return true;
637 
638   // Otherwise, check if the offset between the symbol and fragment is fully
639   // resolved, unaffected by linker-relaxable fragments (e.g. instructions or
640   // offset-affected MCAlignFragment). Complements the generic
641   // isSymbolRefDifferenceFullyResolvedImpl.
642   if (!PCRelTemp)
643     PCRelTemp = getContext().createTempSymbol();
644   PCRelTemp->setFragment(const_cast<MCFragment *>(&F));
645   MCValue Res;
646   MCExpr::evaluateSymbolicAdd(Asm, false, MCValue::get(SymA),
647                               MCValue::get(nullptr, PCRelTemp), Res);
648   return !Res.getSubSym();
649 }
650 
651 // Get the corresponding PC-relative HI fixup that a S_PCREL_LO points to, and
652 // optionally the fragment containing it.
653 //
654 // \returns nullptr if this isn't a S_PCREL_LO pointing to a known PC-relative
655 // HI fixup.
getPCRelHiFixup(const MCSpecifierExpr & Expr,const MCFragment ** DFOut)656 static const MCFixup *getPCRelHiFixup(const MCSpecifierExpr &Expr,
657                                       const MCFragment **DFOut) {
658   MCValue AUIPCLoc;
659   if (!Expr.getSubExpr()->evaluateAsRelocatable(AUIPCLoc, nullptr))
660     return nullptr;
661 
662   const MCSymbol *AUIPCSymbol = AUIPCLoc.getAddSym();
663   if (!AUIPCSymbol)
664     return nullptr;
665   const auto *DF = dyn_cast_or_null<MCDataFragment>(AUIPCSymbol->getFragment());
666 
667   if (!DF)
668     return nullptr;
669 
670   uint64_t Offset = AUIPCSymbol->getOffset();
671   if (DF->getContents().size() == Offset) {
672     DF = dyn_cast_or_null<MCDataFragment>(DF->getNext());
673     if (!DF)
674       return nullptr;
675     Offset = 0;
676   }
677 
678   for (const MCFixup &F : DF->getFixups()) {
679     if (F.getOffset() != Offset)
680       continue;
681     auto Kind = F.getKind();
682     if (!mc::isRelocation(F.getKind())) {
683       if (Kind == RISCV::fixup_riscv_pcrel_hi20) {
684         *DFOut = DF;
685         return &F;
686       }
687       break;
688     }
689     switch (Kind) {
690     case ELF::R_RISCV_GOT_HI20:
691     case ELF::R_RISCV_TLS_GOT_HI20:
692     case ELF::R_RISCV_TLS_GD_HI20:
693     case ELF::R_RISCV_TLSDESC_HI20:
694       *DFOut = DF;
695       return &F;
696     }
697   }
698 
699   return nullptr;
700 }
701 
evaluateFixup(const MCFragment &,MCFixup & Fixup,MCValue & Target,uint64_t & Value)702 std::optional<bool> RISCVAsmBackend::evaluateFixup(const MCFragment &,
703                                                    MCFixup &Fixup,
704                                                    MCValue &Target,
705                                                    uint64_t &Value) {
706   const MCFixup *AUIPCFixup;
707   const MCFragment *AUIPCDF;
708   MCValue AUIPCTarget;
709   switch (Fixup.getKind()) {
710   default:
711     // Use default handling for `Value` and `IsResolved`.
712     return {};
713   case RISCV::fixup_riscv_pcrel_lo12_i:
714   case RISCV::fixup_riscv_pcrel_lo12_s: {
715     AUIPCFixup =
716         getPCRelHiFixup(cast<MCSpecifierExpr>(*Fixup.getValue()), &AUIPCDF);
717     if (!AUIPCFixup) {
718       getContext().reportError(Fixup.getLoc(),
719                                "could not find corresponding %pcrel_hi");
720       return true;
721     }
722 
723     // MCAssembler::evaluateFixup will emit an error for this case when it sees
724     // the %pcrel_hi, so don't duplicate it when also seeing the %pcrel_lo.
725     const MCExpr *AUIPCExpr = AUIPCFixup->getValue();
726     if (!AUIPCExpr->evaluateAsRelocatable(AUIPCTarget, Asm))
727       return true;
728     break;
729   }
730   }
731 
732   if (!AUIPCTarget.getAddSym())
733     return false;
734 
735   const MCSymbolELF &SA = cast<MCSymbolELF>(*AUIPCTarget.getAddSym());
736   if (SA.isUndefined())
737     return false;
738 
739   bool IsResolved = &SA.getSection() == AUIPCDF->getParent() &&
740                     SA.getBinding() == ELF::STB_LOCAL &&
741                     SA.getType() != ELF::STT_GNU_IFUNC;
742   if (!IsResolved)
743     return false;
744 
745   Value = Asm->getSymbolOffset(SA) + AUIPCTarget.getConstant();
746   Value -= Asm->getFragmentOffset(*AUIPCDF) + AUIPCFixup->getOffset();
747 
748   return AUIPCFixup->getKind() == RISCV::fixup_riscv_pcrel_hi20 &&
749          isPCRelFixupResolved(AUIPCTarget.getAddSym(), *AUIPCDF);
750 }
751 
maybeAddVendorReloc(const MCFragment & F,const MCFixup & Fixup)752 void RISCVAsmBackend::maybeAddVendorReloc(const MCFragment &F,
753                                           const MCFixup &Fixup) {
754   StringRef VendorIdentifier;
755   switch (Fixup.getKind()) {
756   default:
757     // No Vendor Relocation Required.
758     return;
759   case RISCV::fixup_riscv_qc_e_branch:
760   case RISCV::fixup_riscv_qc_abs20_u:
761   case RISCV::fixup_riscv_qc_e_32:
762   case RISCV::fixup_riscv_qc_e_call_plt:
763     VendorIdentifier = "QUALCOMM";
764     break;
765   case RISCV::fixup_riscv_nds_branch_10:
766     VendorIdentifier = "ANDES";
767     break;
768   }
769 
770   // Create a local symbol for the vendor relocation to reference. It's fine if
771   // the symbol has the same name as an existing symbol.
772   MCContext &Ctx = Asm->getContext();
773   MCSymbol *VendorSymbol = Ctx.createLocalSymbol(VendorIdentifier);
774   auto [It, Inserted] =
775       VendorSymbols.try_emplace(VendorIdentifier, VendorSymbol);
776 
777   if (Inserted) {
778     // Setup the just-created symbol
779     VendorSymbol->setVariableValue(MCConstantExpr::create(0, Ctx));
780     Asm->registerSymbol(*VendorSymbol);
781   } else {
782     // Fetch the existing symbol
783     VendorSymbol = It->getValue();
784   }
785 
786   MCFixup VendorFixup =
787       MCFixup::create(Fixup.getOffset(), nullptr, ELF::R_RISCV_VENDOR);
788   // Explicitly create MCValue rather than using an MCExpr and evaluating it so
789   // that the absolute vendor symbol is not evaluated to constant 0.
790   MCValue VendorTarget = MCValue::get(VendorSymbol);
791   uint64_t VendorValue;
792   Asm->getWriter().recordRelocation(F, VendorFixup, VendorTarget, VendorValue);
793 }
794 
relaxableFixupNeedsRelocation(const MCFixupKind Kind)795 static bool relaxableFixupNeedsRelocation(const MCFixupKind Kind) {
796   // Some Fixups are marked as LinkerRelaxable by
797   // `RISCVMCCodeEmitter::getImmOpValue` only because they may be
798   // (assembly-)relaxed into a linker-relaxable instruction. This function
799   // should return `false` for those fixups so they do not get a `R_RISCV_RELAX`
800   // relocation emitted in addition to the relocation.
801   switch (Kind) {
802   default:
803     break;
804   case RISCV::fixup_riscv_rvc_jump:
805   case RISCV::fixup_riscv_rvc_branch:
806   case RISCV::fixup_riscv_jal:
807     return false;
808   }
809   return true;
810 }
811 
addReloc(const MCFragment & F,const MCFixup & Fixup,const MCValue & Target,uint64_t & FixedValue,bool IsResolved)812 bool RISCVAsmBackend::addReloc(const MCFragment &F, const MCFixup &Fixup,
813                                const MCValue &Target, uint64_t &FixedValue,
814                                bool IsResolved) {
815   uint64_t FixedValueA, FixedValueB;
816   if (Target.getSubSym()) {
817     assert(Target.getSpecifier() == 0 &&
818            "relocatable SymA-SymB cannot have relocation specifier");
819     unsigned TA = 0, TB = 0;
820     switch (Fixup.getKind()) {
821     case llvm::FK_Data_1:
822       TA = ELF::R_RISCV_ADD8;
823       TB = ELF::R_RISCV_SUB8;
824       break;
825     case llvm::FK_Data_2:
826       TA = ELF::R_RISCV_ADD16;
827       TB = ELF::R_RISCV_SUB16;
828       break;
829     case llvm::FK_Data_4:
830       TA = ELF::R_RISCV_ADD32;
831       TB = ELF::R_RISCV_SUB32;
832       break;
833     case llvm::FK_Data_8:
834       TA = ELF::R_RISCV_ADD64;
835       TB = ELF::R_RISCV_SUB64;
836       break;
837     case llvm::FK_Data_leb128:
838       TA = ELF::R_RISCV_SET_ULEB128;
839       TB = ELF::R_RISCV_SUB_ULEB128;
840       break;
841     default:
842       llvm_unreachable("unsupported fixup size");
843     }
844     MCValue A = MCValue::get(Target.getAddSym(), nullptr, Target.getConstant());
845     MCValue B = MCValue::get(Target.getSubSym());
846     auto FA = MCFixup::create(Fixup.getOffset(), nullptr, TA);
847     auto FB = MCFixup::create(Fixup.getOffset(), nullptr, TB);
848     Asm->getWriter().recordRelocation(F, FA, A, FixedValueA);
849     Asm->getWriter().recordRelocation(F, FB, B, FixedValueB);
850     FixedValue = FixedValueA - FixedValueB;
851     return false;
852   }
853 
854   // If linker relaxation is enabled and supported by the current fixup, then we
855   // always want to generate a relocation.
856   bool NeedsRelax = Fixup.isLinkerRelaxable() &&
857                     relaxableFixupNeedsRelocation(Fixup.getKind());
858   if (NeedsRelax)
859     IsResolved = false;
860 
861   if (IsResolved && Fixup.isPCRel())
862     IsResolved = isPCRelFixupResolved(Target.getAddSym(), F);
863 
864   if (!IsResolved) {
865     // Some Fixups require a VENDOR relocation, record it (directly) before we
866     // add the relocation.
867     maybeAddVendorReloc(F, Fixup);
868 
869     Asm->getWriter().recordRelocation(F, Fixup, Target, FixedValue);
870 
871     if (NeedsRelax) {
872       // Some Fixups get a RELAX relocation, record it (directly) after we add
873       // the relocation.
874       MCFixup RelaxFixup =
875           MCFixup::create(Fixup.getOffset(), nullptr, ELF::R_RISCV_RELAX);
876       MCValue RelaxTarget = MCValue::get(nullptr);
877       uint64_t RelaxValue;
878       Asm->getWriter().recordRelocation(F, RelaxFixup, RelaxTarget, RelaxValue);
879     }
880   }
881 
882   return false;
883 }
884 
applyFixup(const MCFragment & F,const MCFixup & Fixup,const MCValue & Target,MutableArrayRef<char> Data,uint64_t Value,bool IsResolved)885 void RISCVAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
886                                  const MCValue &Target,
887                                  MutableArrayRef<char> Data, uint64_t Value,
888                                  bool IsResolved) {
889   IsResolved = addReloc(F, Fixup, Target, Value, IsResolved);
890   MCFixupKind Kind = Fixup.getKind();
891   if (mc::isRelocation(Kind))
892     return;
893   MCContext &Ctx = getContext();
894   MCFixupKindInfo Info = getFixupKindInfo(Kind);
895   if (!Value)
896     return; // Doesn't change encoding.
897   // Apply any target-specific value adjustments.
898   Value = adjustFixupValue(Fixup, Value, Ctx);
899 
900   // Shift the value into position.
901   Value <<= Info.TargetOffset;
902 
903   unsigned Offset = Fixup.getOffset();
904   unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8;
905 
906   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
907 
908   // For each byte of the fragment that the fixup touches, mask in the
909   // bits from the fixup value.
910   for (unsigned i = 0; i != NumBytes; ++i) {
911     Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff);
912   }
913 }
914 
915 // Linker relaxation may change code size. We have to insert Nops
916 // for .align directive when linker relaxation enabled. So then Linker
917 // could satisfy alignment by removing Nops.
918 // The function return the total Nops Size we need to insert.
shouldInsertExtraNopBytesForCodeAlign(const MCAlignFragment & AF,unsigned & Size)919 bool RISCVAsmBackend::shouldInsertExtraNopBytesForCodeAlign(
920     const MCAlignFragment &AF, unsigned &Size) {
921   // Calculate Nops Size only when linker relaxation enabled.
922   const MCSubtargetInfo *STI = AF.getSubtargetInfo();
923   if (!STI->hasFeature(RISCV::FeatureRelax))
924     return false;
925 
926   unsigned MinNopLen = STI->hasFeature(RISCV::FeatureStdExtZca) ? 2 : 4;
927 
928   if (AF.getAlignment() <= MinNopLen) {
929     return false;
930   } else {
931     Size = AF.getAlignment().value() - MinNopLen;
932     return true;
933   }
934 }
935 
936 // We need to insert R_RISCV_ALIGN relocation type to indicate the
937 // position of Nops and the total bytes of the Nops have been inserted
938 // when linker relaxation enabled.
939 // The function insert fixup_riscv_align fixup which eventually will
940 // transfer to R_RISCV_ALIGN relocation type.
shouldInsertFixupForCodeAlign(MCAssembler & Asm,MCAlignFragment & AF)941 bool RISCVAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm,
942                                                     MCAlignFragment &AF) {
943   // Insert the fixup only when linker relaxation enabled.
944   const MCSubtargetInfo *STI = AF.getSubtargetInfo();
945   if (!STI->hasFeature(RISCV::FeatureRelax))
946     return false;
947 
948   // Calculate total Nops we need to insert. If there are none to insert
949   // then simply return.
950   unsigned Count;
951   if (!shouldInsertExtraNopBytesForCodeAlign(AF, Count) || (Count == 0))
952     return false;
953 
954   MCContext &Ctx = getContext();
955   const MCExpr *Dummy = MCConstantExpr::create(0, Ctx);
956   MCFixup Fixup = MCFixup::create(0, Dummy, ELF::R_RISCV_ALIGN);
957 
958   uint64_t FixedValue = 0;
959   MCValue NopBytes = MCValue::get(Count);
960   Asm.getWriter().recordRelocation(AF, Fixup, NopBytes, FixedValue);
961   return true;
962 }
963 
964 std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const965 RISCVAsmBackend::createObjectTargetWriter() const {
966   return createRISCVELFObjectWriter(OSABI, Is64Bit);
967 }
968 
createRISCVAsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options)969 MCAsmBackend *llvm::createRISCVAsmBackend(const Target &T,
970                                           const MCSubtargetInfo &STI,
971                                           const MCRegisterInfo &MRI,
972                                           const MCTargetOptions &Options) {
973   const Triple &TT = STI.getTargetTriple();
974   uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TT.getOS());
975   return new RISCVAsmBackend(STI, OSABI, TT.isArch64Bit(), Options);
976 }
977