xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp (revision c66ec88fed842fbaad62c30d510644ceb7bd2d71)
1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 
10 #include "MCTargetDesc/AMDGPUFixupKinds.h"
11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12 #include "llvm/ADT/StringRef.h"
13 #include "llvm/BinaryFormat/ELF.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCFixupKindInfo.h"
18 #include "llvm/MC/MCObjectWriter.h"
19 #include "llvm/MC/MCValue.h"
20 #include "llvm/Support/EndianStream.h"
21 #include "llvm/Support/TargetRegistry.h"
22 #include "Utils/AMDGPUBaseInfo.h"
23 
24 using namespace llvm;
25 using namespace llvm::AMDGPU;
26 
27 namespace {
28 
29 class AMDGPUAsmBackend : public MCAsmBackend {
30 public:
31   AMDGPUAsmBackend(const Target &T) : MCAsmBackend(support::little) {}
32 
33   unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
34 
35   void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
36                   const MCValue &Target, MutableArrayRef<char> Data,
37                   uint64_t Value, bool IsResolved,
38                   const MCSubtargetInfo *STI) const override;
39   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
40                             const MCRelaxableFragment *DF,
41                             const MCAsmLayout &Layout) const override;
42 
43   void relaxInstruction(MCInst &Inst,
44                         const MCSubtargetInfo &STI) const override;
45 
46   bool mayNeedRelaxation(const MCInst &Inst,
47                          const MCSubtargetInfo &STI) const override;
48 
49   unsigned getMinimumNopSize() const override;
50   bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
51 
52   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
53 };
54 
55 } //End anonymous namespace
56 
57 void AMDGPUAsmBackend::relaxInstruction(MCInst &Inst,
58                                         const MCSubtargetInfo &STI) const {
59   MCInst Res;
60   unsigned RelaxedOpcode = AMDGPU::getSOPPWithRelaxation(Inst.getOpcode());
61   Res.setOpcode(RelaxedOpcode);
62   Res.addOperand(Inst.getOperand(0));
63   Inst = std::move(Res);
64   return;
65 }
66 
67 bool AMDGPUAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
68                                             uint64_t Value,
69                                             const MCRelaxableFragment *DF,
70                                             const MCAsmLayout &Layout) const {
71   // if the branch target has an offset of x3f this needs to be relaxed to
72   // add a s_nop 0 immediately after branch to effectively increment offset
73   // for hardware workaround in gfx1010
74   return (((int64_t(Value)/4)-1) == 0x3f);
75 }
76 
77 bool AMDGPUAsmBackend::mayNeedRelaxation(const MCInst &Inst,
78                        const MCSubtargetInfo &STI) const {
79   if (!STI.getFeatureBits()[AMDGPU::FeatureOffset3fBug])
80     return false;
81 
82   if (AMDGPU::getSOPPWithRelaxation(Inst.getOpcode()) >= 0)
83     return true;
84 
85   return false;
86 }
87 
88 static unsigned getFixupKindNumBytes(unsigned Kind) {
89   switch (Kind) {
90   case AMDGPU::fixup_si_sopp_br:
91     return 2;
92   case FK_SecRel_1:
93   case FK_Data_1:
94     return 1;
95   case FK_SecRel_2:
96   case FK_Data_2:
97     return 2;
98   case FK_SecRel_4:
99   case FK_Data_4:
100   case FK_PCRel_4:
101     return 4;
102   case FK_SecRel_8:
103   case FK_Data_8:
104     return 8;
105   default:
106     llvm_unreachable("Unknown fixup kind!");
107   }
108 }
109 
110 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
111                                  MCContext *Ctx) {
112   int64_t SignedValue = static_cast<int64_t>(Value);
113 
114   switch (Fixup.getTargetKind()) {
115   case AMDGPU::fixup_si_sopp_br: {
116     int64_t BrImm = (SignedValue - 4) / 4;
117 
118     if (Ctx && !isInt<16>(BrImm))
119       Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
120 
121     return BrImm;
122   }
123   case FK_Data_1:
124   case FK_Data_2:
125   case FK_Data_4:
126   case FK_Data_8:
127   case FK_PCRel_4:
128   case FK_SecRel_4:
129     return Value;
130   default:
131     llvm_unreachable("unhandled fixup kind");
132   }
133 }
134 
135 void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
136                                   const MCValue &Target,
137                                   MutableArrayRef<char> Data, uint64_t Value,
138                                   bool IsResolved,
139                                   const MCSubtargetInfo *STI) const {
140   Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
141   if (!Value)
142     return; // Doesn't change encoding.
143 
144   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
145 
146   // Shift the value into position.
147   Value <<= Info.TargetOffset;
148 
149   unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
150   uint32_t Offset = Fixup.getOffset();
151   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
152 
153   // For each byte of the fragment that the fixup touches, mask in the bits from
154   // the fixup value.
155   for (unsigned i = 0; i != NumBytes; ++i)
156     Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
157 }
158 
159 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
160                                                        MCFixupKind Kind) const {
161   const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
162     // name                   offset bits  flags
163     { "fixup_si_sopp_br",     0,     16,   MCFixupKindInfo::FKF_IsPCRel },
164   };
165 
166   if (Kind < FirstTargetFixupKind)
167     return MCAsmBackend::getFixupKindInfo(Kind);
168 
169   return Infos[Kind - FirstTargetFixupKind];
170 }
171 
172 unsigned AMDGPUAsmBackend::getMinimumNopSize() const {
173   return 4;
174 }
175 
176 bool AMDGPUAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
177   // If the count is not 4-byte aligned, we must be writing data into the text
178   // section (otherwise we have unaligned instructions, and thus have far
179   // bigger problems), so just write zeros instead.
180   OS.write_zeros(Count % 4);
181 
182   // We are properly aligned, so write NOPs as requested.
183   Count /= 4;
184 
185   // FIXME: R600 support.
186   // s_nop 0
187   const uint32_t Encoded_S_NOP_0 = 0xbf800000;
188 
189   for (uint64_t I = 0; I != Count; ++I)
190     support::endian::write<uint32_t>(OS, Encoded_S_NOP_0, Endian);
191 
192   return true;
193 }
194 
195 //===----------------------------------------------------------------------===//
196 // ELFAMDGPUAsmBackend class
197 //===----------------------------------------------------------------------===//
198 
199 namespace {
200 
201 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
202   bool Is64Bit;
203   bool HasRelocationAddend;
204   uint8_t OSABI = ELF::ELFOSABI_NONE;
205   uint8_t ABIVersion = 0;
206 
207 public:
208   ELFAMDGPUAsmBackend(const Target &T, const Triple &TT, uint8_t ABIVersion) :
209       AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
210       HasRelocationAddend(TT.getOS() == Triple::AMDHSA),
211       ABIVersion(ABIVersion) {
212     switch (TT.getOS()) {
213     case Triple::AMDHSA:
214       OSABI = ELF::ELFOSABI_AMDGPU_HSA;
215       break;
216     case Triple::AMDPAL:
217       OSABI = ELF::ELFOSABI_AMDGPU_PAL;
218       break;
219     case Triple::Mesa3D:
220       OSABI = ELF::ELFOSABI_AMDGPU_MESA3D;
221       break;
222     default:
223       break;
224     }
225   }
226 
227   std::unique_ptr<MCObjectTargetWriter>
228   createObjectTargetWriter() const override {
229     return createAMDGPUELFObjectWriter(Is64Bit, OSABI, HasRelocationAddend,
230                                        ABIVersion);
231   }
232 };
233 
234 } // end anonymous namespace
235 
236 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
237                                            const MCSubtargetInfo &STI,
238                                            const MCRegisterInfo &MRI,
239                                            const MCTargetOptions &Options) {
240   // Use 64-bit ELF for amdgcn
241   return new ELFAMDGPUAsmBackend(T, STI.getTargetTriple(),
242                                  IsaInfo::hasCodeObjectV3(&STI) ? 1 : 0);
243 }
244