1 //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 //===----------------------------------------------------------------------===//
9
10 #include "MCTargetDesc/AMDGPUFixupKinds.h"
11 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
12 #include "Utils/AMDGPUBaseInfo.h"
13 #include "llvm/ADT/StringSwitch.h"
14 #include "llvm/BinaryFormat/ELF.h"
15 #include "llvm/MC/MCAsmBackend.h"
16 #include "llvm/MC/MCAssembler.h"
17 #include "llvm/MC/MCContext.h"
18 #include "llvm/MC/MCFixupKindInfo.h"
19 #include "llvm/MC/MCObjectWriter.h"
20 #include "llvm/MC/MCSubtargetInfo.h"
21 #include "llvm/MC/TargetRegistry.h"
22 #include "llvm/Support/EndianStream.h"
23 #include "llvm/TargetParser/TargetParser.h"
24
25 using namespace llvm;
26 using namespace llvm::AMDGPU;
27
28 namespace {
29
30 class AMDGPUAsmBackend : public MCAsmBackend {
31 public:
AMDGPUAsmBackend(const Target & T)32 AMDGPUAsmBackend(const Target &T) : MCAsmBackend(llvm::endianness::little) {}
33
getNumFixupKinds() const34 unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
35
36 void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
37 const MCValue &Target, MutableArrayRef<char> Data,
38 uint64_t Value, bool IsResolved,
39 const MCSubtargetInfo *STI) const override;
40 bool fixupNeedsRelaxation(const MCFixup &Fixup,
41 uint64_t Value) const override;
42
43 void relaxInstruction(MCInst &Inst,
44 const MCSubtargetInfo &STI) const override;
45
46 bool mayNeedRelaxation(const MCInst &Inst,
47 const MCSubtargetInfo &STI) const override;
48
49 unsigned getMinimumNopSize() const override;
50 bool writeNopData(raw_ostream &OS, uint64_t Count,
51 const MCSubtargetInfo *STI) const override;
52
53 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
54 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
55 bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
56 const MCValue &Target,
57 const MCSubtargetInfo *STI) override;
58 };
59
60 } //End anonymous namespace
61
relaxInstruction(MCInst & Inst,const MCSubtargetInfo & STI) const62 void AMDGPUAsmBackend::relaxInstruction(MCInst &Inst,
63 const MCSubtargetInfo &STI) const {
64 MCInst Res;
65 unsigned RelaxedOpcode = AMDGPU::getSOPPWithRelaxation(Inst.getOpcode());
66 Res.setOpcode(RelaxedOpcode);
67 Res.addOperand(Inst.getOperand(0));
68 Inst = std::move(Res);
69 }
70
fixupNeedsRelaxation(const MCFixup & Fixup,uint64_t Value) const71 bool AMDGPUAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
72 uint64_t Value) const {
73 // if the branch target has an offset of x3f this needs to be relaxed to
74 // add a s_nop 0 immediately after branch to effectively increment offset
75 // for hardware workaround in gfx1010
76 return (((int64_t(Value)/4)-1) == 0x3f);
77 }
78
mayNeedRelaxation(const MCInst & Inst,const MCSubtargetInfo & STI) const79 bool AMDGPUAsmBackend::mayNeedRelaxation(const MCInst &Inst,
80 const MCSubtargetInfo &STI) const {
81 if (!STI.hasFeature(AMDGPU::FeatureOffset3fBug))
82 return false;
83
84 if (AMDGPU::getSOPPWithRelaxation(Inst.getOpcode()) >= 0)
85 return true;
86
87 return false;
88 }
89
getFixupKindNumBytes(unsigned Kind)90 static unsigned getFixupKindNumBytes(unsigned Kind) {
91 switch (Kind) {
92 case AMDGPU::fixup_si_sopp_br:
93 return 2;
94 case FK_SecRel_1:
95 case FK_Data_1:
96 return 1;
97 case FK_SecRel_2:
98 case FK_Data_2:
99 return 2;
100 case FK_SecRel_4:
101 case FK_Data_4:
102 case FK_PCRel_4:
103 return 4;
104 case FK_SecRel_8:
105 case FK_Data_8:
106 return 8;
107 default:
108 llvm_unreachable("Unknown fixup kind!");
109 }
110 }
111
adjustFixupValue(const MCFixup & Fixup,uint64_t Value,MCContext * Ctx)112 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
113 MCContext *Ctx) {
114 int64_t SignedValue = static_cast<int64_t>(Value);
115
116 switch (Fixup.getTargetKind()) {
117 case AMDGPU::fixup_si_sopp_br: {
118 int64_t BrImm = (SignedValue - 4) / 4;
119
120 if (Ctx && !isInt<16>(BrImm))
121 Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
122
123 return BrImm;
124 }
125 case FK_Data_1:
126 case FK_Data_2:
127 case FK_Data_4:
128 case FK_Data_8:
129 case FK_PCRel_4:
130 case FK_SecRel_4:
131 return Value;
132 default:
133 llvm_unreachable("unhandled fixup kind");
134 }
135 }
136
applyFixup(const MCAssembler & Asm,const MCFixup & Fixup,const MCValue & Target,MutableArrayRef<char> Data,uint64_t Value,bool IsResolved,const MCSubtargetInfo * STI) const137 void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
138 const MCValue &Target,
139 MutableArrayRef<char> Data, uint64_t Value,
140 bool IsResolved,
141 const MCSubtargetInfo *STI) const {
142 if (Fixup.getKind() >= FirstLiteralRelocationKind)
143 return;
144
145 Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
146 if (!Value)
147 return; // Doesn't change encoding.
148
149 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
150
151 // Shift the value into position.
152 Value <<= Info.TargetOffset;
153
154 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
155 uint32_t Offset = Fixup.getOffset();
156 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
157
158 // For each byte of the fragment that the fixup touches, mask in the bits from
159 // the fixup value.
160 for (unsigned i = 0; i != NumBytes; ++i)
161 Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
162 }
163
164 std::optional<MCFixupKind>
getFixupKind(StringRef Name) const165 AMDGPUAsmBackend::getFixupKind(StringRef Name) const {
166 return StringSwitch<std::optional<MCFixupKind>>(Name)
167 #define ELF_RELOC(Name, Value) \
168 .Case(#Name, MCFixupKind(FirstLiteralRelocationKind + Value))
169 #include "llvm/BinaryFormat/ELFRelocs/AMDGPU.def"
170 #undef ELF_RELOC
171 .Default(std::nullopt);
172 }
173
getFixupKindInfo(MCFixupKind Kind) const174 const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
175 MCFixupKind Kind) const {
176 const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
177 // name offset bits flags
178 { "fixup_si_sopp_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
179 };
180
181 if (Kind >= FirstLiteralRelocationKind)
182 return MCAsmBackend::getFixupKindInfo(FK_NONE);
183
184 if (Kind < FirstTargetFixupKind)
185 return MCAsmBackend::getFixupKindInfo(Kind);
186
187 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
188 "Invalid kind!");
189 return Infos[Kind - FirstTargetFixupKind];
190 }
191
shouldForceRelocation(const MCAssembler &,const MCFixup & Fixup,const MCValue &,const MCSubtargetInfo * STI)192 bool AMDGPUAsmBackend::shouldForceRelocation(const MCAssembler &,
193 const MCFixup &Fixup,
194 const MCValue &,
195 const MCSubtargetInfo *STI) {
196 return Fixup.getKind() >= FirstLiteralRelocationKind;
197 }
198
getMinimumNopSize() const199 unsigned AMDGPUAsmBackend::getMinimumNopSize() const {
200 return 4;
201 }
202
writeNopData(raw_ostream & OS,uint64_t Count,const MCSubtargetInfo * STI) const203 bool AMDGPUAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
204 const MCSubtargetInfo *STI) const {
205 // If the count is not 4-byte aligned, we must be writing data into the text
206 // section (otherwise we have unaligned instructions, and thus have far
207 // bigger problems), so just write zeros instead.
208 OS.write_zeros(Count % 4);
209
210 // We are properly aligned, so write NOPs as requested.
211 Count /= 4;
212
213 // FIXME: R600 support.
214 // s_nop 0
215 const uint32_t Encoded_S_NOP_0 = 0xbf800000;
216
217 for (uint64_t I = 0; I != Count; ++I)
218 support::endian::write<uint32_t>(OS, Encoded_S_NOP_0, Endian);
219
220 return true;
221 }
222
223 //===----------------------------------------------------------------------===//
224 // ELFAMDGPUAsmBackend class
225 //===----------------------------------------------------------------------===//
226
227 namespace {
228
229 class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
230 bool Is64Bit;
231 bool HasRelocationAddend;
232 uint8_t OSABI = ELF::ELFOSABI_NONE;
233
234 public:
ELFAMDGPUAsmBackend(const Target & T,const Triple & TT)235 ELFAMDGPUAsmBackend(const Target &T, const Triple &TT)
236 : AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
237 HasRelocationAddend(TT.getOS() == Triple::AMDHSA) {
238 switch (TT.getOS()) {
239 case Triple::AMDHSA:
240 OSABI = ELF::ELFOSABI_AMDGPU_HSA;
241 break;
242 case Triple::AMDPAL:
243 OSABI = ELF::ELFOSABI_AMDGPU_PAL;
244 break;
245 case Triple::Mesa3D:
246 OSABI = ELF::ELFOSABI_AMDGPU_MESA3D;
247 break;
248 default:
249 break;
250 }
251 }
252
253 std::unique_ptr<MCObjectTargetWriter>
createObjectTargetWriter() const254 createObjectTargetWriter() const override {
255 return createAMDGPUELFObjectWriter(Is64Bit, OSABI, HasRelocationAddend);
256 }
257 };
258
259 } // end anonymous namespace
260
createAMDGPUAsmBackend(const Target & T,const MCSubtargetInfo & STI,const MCRegisterInfo & MRI,const MCTargetOptions & Options)261 MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
262 const MCSubtargetInfo &STI,
263 const MCRegisterInfo &MRI,
264 const MCTargetOptions &Options) {
265 return new ELFAMDGPUAsmBackend(T, STI.getTargetTriple());
266 }
267