xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp (revision e40139ff33b48b56a24c808b166b04b8ee6f5b21)
1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9 
10 #include "MCTargetDesc/AMDGPUFixupKinds.h"
11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12 #include "llvm/ADT/StringRef.h"
13 #include "llvm/BinaryFormat/ELF.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCFixupKindInfo.h"
18 #include "llvm/MC/MCObjectWriter.h"
19 #include "llvm/MC/MCValue.h"
20 #include "llvm/Support/TargetRegistry.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 
23 using namespace llvm;
24 using namespace llvm::AMDGPU;
25 
26 namespace {
27 
28 class AMDGPUAsmBackend : public MCAsmBackend {
29 public:
30   AMDGPUAsmBackend(const Target &T) : MCAsmBackend(support::little) {}
31 
32   unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
33 
34   void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
35                   const MCValue &Target, MutableArrayRef<char> Data,
36                   uint64_t Value, bool IsResolved,
37                   const MCSubtargetInfo *STI) const override;
38   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
39                             const MCRelaxableFragment *DF,
40                             const MCAsmLayout &Layout) const override;
41 
42   void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
43                         MCInst &Res) const override;
44 
45   bool mayNeedRelaxation(const MCInst &Inst,
46                          const MCSubtargetInfo &STI) const override;
47 
48   unsigned getMinimumNopSize() const override;
49   bool writeNopData(raw_ostream &OS, uint64_t Count) const override;
50 
51   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
52 };
53 
54 } //End anonymous namespace
55 
56 void AMDGPUAsmBackend::relaxInstruction(const MCInst &Inst,
57                                         const MCSubtargetInfo &STI,
58                                         MCInst &Res) const {
59   unsigned RelaxedOpcode = AMDGPU::getSOPPWithRelaxation(Inst.getOpcode());
60   Res.setOpcode(RelaxedOpcode);
61   Res.addOperand(Inst.getOperand(0));
62   return;
63 }
64 
65 bool AMDGPUAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
66                                             uint64_t Value,
67                                             const MCRelaxableFragment *DF,
68                                             const MCAsmLayout &Layout) const {
69   // if the branch target has an offset of x3f this needs to be relaxed to
70   // add a s_nop 0 immediately after branch to effectively increment offset
71   // for hardware workaround in gfx1010
72   return (((int64_t(Value)/4)-1) == 0x3f);
73 }
74 
75 bool AMDGPUAsmBackend::mayNeedRelaxation(const MCInst &Inst,
76                        const MCSubtargetInfo &STI) const {
77   if (!STI.getFeatureBits()[AMDGPU::FeatureOffset3fBug])
78     return false;
79 
80   if (AMDGPU::getSOPPWithRelaxation(Inst.getOpcode()) >= 0)
81     return true;
82 
83   return false;
84 }
85 
86 static unsigned getFixupKindNumBytes(unsigned Kind) {
87   switch (Kind) {
88   case AMDGPU::fixup_si_sopp_br:
89     return 2;
90   case FK_SecRel_1:
91   case FK_Data_1:
92     return 1;
93   case FK_SecRel_2:
94   case FK_Data_2:
95     return 2;
96   case FK_SecRel_4:
97   case FK_Data_4:
98   case FK_PCRel_4:
99     return 4;
100   case FK_SecRel_8:
101   case FK_Data_8:
102     return 8;
103   default:
104     llvm_unreachable("Unknown fixup kind!");
105   }
106 }
107 
108 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
109                                  MCContext *Ctx) {
110   int64_t SignedValue = static_cast<int64_t>(Value);
111 
112   switch (Fixup.getTargetKind()) {
113   case AMDGPU::fixup_si_sopp_br: {
114     int64_t BrImm = (SignedValue - 4) / 4;
115 
116     if (Ctx && !isInt<16>(BrImm))
117       Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
118 
119     return BrImm;
120   }
121   case FK_Data_1:
122   case FK_Data_2:
123   case FK_Data_4:
124   case FK_Data_8:
125   case FK_PCRel_4:
126   case FK_SecRel_4:
127     return Value;
128   default:
129     llvm_unreachable("unhandled fixup kind");
130   }
131 }
132 
133 void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
134                                   const MCValue &Target,
135                                   MutableArrayRef<char> Data, uint64_t Value,
136                                   bool IsResolved,
137                                   const MCSubtargetInfo *STI) const {
138   Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
139   if (!Value)
140     return; // Doesn't change encoding.
141 
142   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
143 
144   // Shift the value into position.
145   Value <<= Info.TargetOffset;
146 
147   unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
148   uint32_t Offset = Fixup.getOffset();
149   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
150 
151   // For each byte of the fragment that the fixup touches, mask in the bits from
152   // the fixup value.
153   for (unsigned i = 0; i != NumBytes; ++i)
154     Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
155 }
156 
157 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
158                                                        MCFixupKind Kind) const {
159   const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
160     // name                   offset bits  flags
161     { "fixup_si_sopp_br",     0,     16,   MCFixupKindInfo::FKF_IsPCRel },
162   };
163 
164   if (Kind < FirstTargetFixupKind)
165     return MCAsmBackend::getFixupKindInfo(Kind);
166 
167   return Infos[Kind - FirstTargetFixupKind];
168 }
169 
170 unsigned AMDGPUAsmBackend::getMinimumNopSize() const {
171   return 4;
172 }
173 
174 bool AMDGPUAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
175   // If the count is not 4-byte aligned, we must be writing data into the text
176   // section (otherwise we have unaligned instructions, and thus have far
177   // bigger problems), so just write zeros instead.
178   OS.write_zeros(Count % 4);
179 
180   // We are properly aligned, so write NOPs as requested.
181   Count /= 4;
182 
183   // FIXME: R600 support.
184   // s_nop 0
185   const uint32_t Encoded_S_NOP_0 = 0xbf800000;
186 
187   for (uint64_t I = 0; I != Count; ++I)
188     support::endian::write<uint32_t>(OS, Encoded_S_NOP_0, Endian);
189 
190   return true;
191 }
192 
193 //===----------------------------------------------------------------------===//
194 // ELFAMDGPUAsmBackend class
195 //===----------------------------------------------------------------------===//
196 
197 namespace {
198 
199 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
200   bool Is64Bit;
201   bool HasRelocationAddend;
202   uint8_t OSABI = ELF::ELFOSABI_NONE;
203   uint8_t ABIVersion = 0;
204 
205 public:
206   ELFAMDGPUAsmBackend(const Target &T, const Triple &TT, uint8_t ABIVersion) :
207       AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
208       HasRelocationAddend(TT.getOS() == Triple::AMDHSA),
209       ABIVersion(ABIVersion) {
210     switch (TT.getOS()) {
211     case Triple::AMDHSA:
212       OSABI = ELF::ELFOSABI_AMDGPU_HSA;
213       break;
214     case Triple::AMDPAL:
215       OSABI = ELF::ELFOSABI_AMDGPU_PAL;
216       break;
217     case Triple::Mesa3D:
218       OSABI = ELF::ELFOSABI_AMDGPU_MESA3D;
219       break;
220     default:
221       break;
222     }
223   }
224 
225   std::unique_ptr<MCObjectTargetWriter>
226   createObjectTargetWriter() const override {
227     return createAMDGPUELFObjectWriter(Is64Bit, OSABI, HasRelocationAddend,
228                                        ABIVersion);
229   }
230 };
231 
232 } // end anonymous namespace
233 
234 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
235                                            const MCSubtargetInfo &STI,
236                                            const MCRegisterInfo &MRI,
237                                            const MCTargetOptions &Options) {
238   // Use 64-bit ELF for amdgcn
239   return new ELFAMDGPUAsmBackend(T, STI.getTargetTriple(),
240                                  IsaInfo::hasCodeObjectV3(&STI) ? 1 : 0);
241 }
242