1 //===- ARMTargetStreamer.cpp - ARMTargetStreamer class --*- C++ -*---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the ARMTargetStreamer class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "MCTargetDesc/ARMMCTargetDesc.h"
14 #include "llvm/MC/ConstantPools.h"
15 #include "llvm/MC/MCAsmInfo.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCStreamer.h"
19 #include "llvm/MC/MCSubtargetInfo.h"
20 #include "llvm/Support/ARMBuildAttributes.h"
21
22 using namespace llvm;
23
24 //
25 // ARMTargetStreamer Implemenation
26 //
27
ARMTargetStreamer(MCStreamer & S)28 ARMTargetStreamer::ARMTargetStreamer(MCStreamer &S)
29 : MCTargetStreamer(S), ConstantPools(new AssemblerConstantPools()) {}
30
31 ARMTargetStreamer::~ARMTargetStreamer() = default;
32
33 // The constant pool handling is shared by all ARMTargetStreamer
34 // implementations.
addConstantPoolEntry(const MCExpr * Expr,SMLoc Loc)35 const MCExpr *ARMTargetStreamer::addConstantPoolEntry(const MCExpr *Expr, SMLoc Loc) {
36 return ConstantPools->addEntry(Streamer, Expr, 4, Loc);
37 }
38
emitCurrentConstantPool()39 void ARMTargetStreamer::emitCurrentConstantPool() {
40 ConstantPools->emitForCurrentSection(Streamer);
41 ConstantPools->clearCacheForCurrentSection(Streamer);
42 }
43
44 // finish() - write out any non-empty assembler constant pools.
emitConstantPools()45 void ARMTargetStreamer::emitConstantPools() {
46 ConstantPools->emitAll(Streamer);
47 }
48
49 // reset() - Reset any state
reset()50 void ARMTargetStreamer::reset() {}
51
emitInst(uint32_t Inst,char Suffix)52 void ARMTargetStreamer::emitInst(uint32_t Inst, char Suffix) {
53 unsigned Size;
54 char Buffer[4];
55 const bool LittleEndian = getContext().getAsmInfo()->isLittleEndian();
56
57 switch (Suffix) {
58 case '\0':
59 Size = 4;
60
61 for (unsigned II = 0, IE = Size; II != IE; II++) {
62 const unsigned I = LittleEndian ? (Size - II - 1) : II;
63 Buffer[Size - II - 1] = uint8_t(Inst >> I * CHAR_BIT);
64 }
65
66 break;
67 case 'n':
68 case 'w':
69 Size = (Suffix == 'n' ? 2 : 4);
70
71 // Thumb wide instructions are emitted as a pair of 16-bit words of the
72 // appropriate endianness.
73 for (unsigned II = 0, IE = Size; II != IE; II = II + 2) {
74 const unsigned I0 = LittleEndian ? II + 0 : II + 1;
75 const unsigned I1 = LittleEndian ? II + 1 : II + 0;
76 Buffer[Size - II - 2] = uint8_t(Inst >> I0 * CHAR_BIT);
77 Buffer[Size - II - 1] = uint8_t(Inst >> I1 * CHAR_BIT);
78 }
79
80 break;
81 default:
82 llvm_unreachable("Invalid Suffix");
83 }
84 getStreamer().emitBytes(StringRef(Buffer, Size));
85 }
86
87 // The remaining callbacks should be handled separately by each
88 // streamer.
emitFnStart()89 void ARMTargetStreamer::emitFnStart() {}
emitFnEnd()90 void ARMTargetStreamer::emitFnEnd() {}
emitCantUnwind()91 void ARMTargetStreamer::emitCantUnwind() {}
emitPersonality(const MCSymbol * Personality)92 void ARMTargetStreamer::emitPersonality(const MCSymbol *Personality) {}
emitPersonalityIndex(unsigned Index)93 void ARMTargetStreamer::emitPersonalityIndex(unsigned Index) {}
emitHandlerData()94 void ARMTargetStreamer::emitHandlerData() {}
emitSetFP(MCRegister FpReg,MCRegister SpReg,int64_t Offset)95 void ARMTargetStreamer::emitSetFP(MCRegister FpReg, MCRegister SpReg,
96 int64_t Offset) {}
emitMovSP(MCRegister Reg,int64_t Offset)97 void ARMTargetStreamer::emitMovSP(MCRegister Reg, int64_t Offset) {}
emitPad(int64_t Offset)98 void ARMTargetStreamer::emitPad(int64_t Offset) {}
emitRegSave(const SmallVectorImpl<MCRegister> & RegList,bool isVector)99 void ARMTargetStreamer::emitRegSave(const SmallVectorImpl<MCRegister> &RegList,
100 bool isVector) {}
emitUnwindRaw(int64_t StackOffset,const SmallVectorImpl<uint8_t> & Opcodes)101 void ARMTargetStreamer::emitUnwindRaw(int64_t StackOffset,
102 const SmallVectorImpl<uint8_t> &Opcodes) {
103 }
switchVendor(StringRef Vendor)104 void ARMTargetStreamer::switchVendor(StringRef Vendor) {}
emitAttribute(unsigned Attribute,unsigned Value)105 void ARMTargetStreamer::emitAttribute(unsigned Attribute, unsigned Value) {}
emitTextAttribute(unsigned Attribute,StringRef String)106 void ARMTargetStreamer::emitTextAttribute(unsigned Attribute,
107 StringRef String) {}
emitIntTextAttribute(unsigned Attribute,unsigned IntValue,StringRef StringValue)108 void ARMTargetStreamer::emitIntTextAttribute(unsigned Attribute,
109 unsigned IntValue,
110 StringRef StringValue) {}
emitArch(ARM::ArchKind Arch)111 void ARMTargetStreamer::emitArch(ARM::ArchKind Arch) {}
emitArchExtension(uint64_t ArchExt)112 void ARMTargetStreamer::emitArchExtension(uint64_t ArchExt) {}
emitObjectArch(ARM::ArchKind Arch)113 void ARMTargetStreamer::emitObjectArch(ARM::ArchKind Arch) {}
emitFPU(ARM::FPUKind FPU)114 void ARMTargetStreamer::emitFPU(ARM::FPUKind FPU) {}
finishAttributeSection()115 void ARMTargetStreamer::finishAttributeSection() {}
annotateTLSDescriptorSequence(const MCSymbolRefExpr * SRE)116 void ARMTargetStreamer::annotateTLSDescriptorSequence(
117 const MCSymbolRefExpr *SRE) {}
emitSyntaxUnified()118 void ARMTargetStreamer::emitSyntaxUnified() {}
emitCode16()119 void ARMTargetStreamer::emitCode16() {}
emitCode32()120 void ARMTargetStreamer::emitCode32() {}
emitThumbFunc(MCSymbol * Symbol)121 void ARMTargetStreamer::emitThumbFunc(MCSymbol *Symbol) {}
emitThumbSet(MCSymbol * Symbol,const MCExpr * Value)122 void ARMTargetStreamer::emitThumbSet(MCSymbol *Symbol, const MCExpr *Value) {}
123
emitARMWinCFIAllocStack(unsigned Size,bool Wide)124 void ARMTargetStreamer::emitARMWinCFIAllocStack(unsigned Size, bool Wide) {}
emitARMWinCFISaveRegMask(unsigned Mask,bool Wide)125 void ARMTargetStreamer::emitARMWinCFISaveRegMask(unsigned Mask, bool Wide) {}
emitARMWinCFISaveSP(unsigned Reg)126 void ARMTargetStreamer::emitARMWinCFISaveSP(unsigned Reg) {}
emitARMWinCFISaveFRegs(unsigned First,unsigned Last)127 void ARMTargetStreamer::emitARMWinCFISaveFRegs(unsigned First, unsigned Last) {}
emitARMWinCFISaveLR(unsigned Offset)128 void ARMTargetStreamer::emitARMWinCFISaveLR(unsigned Offset) {}
emitARMWinCFINop(bool Wide)129 void ARMTargetStreamer::emitARMWinCFINop(bool Wide) {}
emitARMWinCFIPrologEnd(bool Fragment)130 void ARMTargetStreamer::emitARMWinCFIPrologEnd(bool Fragment) {}
emitARMWinCFIEpilogStart(unsigned Condition)131 void ARMTargetStreamer::emitARMWinCFIEpilogStart(unsigned Condition) {}
emitARMWinCFIEpilogEnd()132 void ARMTargetStreamer::emitARMWinCFIEpilogEnd() {}
emitARMWinCFICustom(unsigned Opcode)133 void ARMTargetStreamer::emitARMWinCFICustom(unsigned Opcode) {}
134
getArchForCPU(const MCSubtargetInfo & STI)135 static ARMBuildAttrs::CPUArch getArchForCPU(const MCSubtargetInfo &STI) {
136 if (STI.getCPU() == "xscale")
137 return ARMBuildAttrs::v5TEJ;
138
139 if (STI.hasFeature(ARM::HasV9_0aOps))
140 return ARMBuildAttrs::v9_A;
141 else if (STI.hasFeature(ARM::HasV8Ops)) {
142 if (STI.hasFeature(ARM::FeatureRClass))
143 return ARMBuildAttrs::v8_R;
144 return ARMBuildAttrs::v8_A;
145 } else if (STI.hasFeature(ARM::HasV8_1MMainlineOps))
146 return ARMBuildAttrs::v8_1_M_Main;
147 else if (STI.hasFeature(ARM::HasV8MMainlineOps))
148 return ARMBuildAttrs::v8_M_Main;
149 else if (STI.hasFeature(ARM::HasV7Ops)) {
150 if (STI.hasFeature(ARM::FeatureMClass) && STI.hasFeature(ARM::FeatureDSP))
151 return ARMBuildAttrs::v7E_M;
152 return ARMBuildAttrs::v7;
153 } else if (STI.hasFeature(ARM::HasV6T2Ops))
154 return ARMBuildAttrs::v6T2;
155 else if (STI.hasFeature(ARM::HasV8MBaselineOps))
156 return ARMBuildAttrs::v8_M_Base;
157 else if (STI.hasFeature(ARM::HasV6MOps))
158 return ARMBuildAttrs::v6S_M;
159 else if (STI.hasFeature(ARM::HasV6Ops))
160 return ARMBuildAttrs::v6;
161 else if (STI.hasFeature(ARM::HasV5TEOps))
162 return ARMBuildAttrs::v5TE;
163 else if (STI.hasFeature(ARM::HasV5TOps))
164 return ARMBuildAttrs::v5T;
165 else if (STI.hasFeature(ARM::HasV4TOps))
166 return ARMBuildAttrs::v4T;
167 else
168 return ARMBuildAttrs::v4;
169 }
170
isV8M(const MCSubtargetInfo & STI)171 static bool isV8M(const MCSubtargetInfo &STI) {
172 // Note that v8M Baseline is a subset of v6T2!
173 return (STI.hasFeature(ARM::HasV8MBaselineOps) &&
174 !STI.hasFeature(ARM::HasV6T2Ops)) ||
175 STI.hasFeature(ARM::HasV8MMainlineOps);
176 }
177
178 /// Emit the build attributes that only depend on the hardware that we expect
179 // /to be available, and not on the ABI, or any source-language choices.
emitTargetAttributes(const MCSubtargetInfo & STI)180 void ARMTargetStreamer::emitTargetAttributes(const MCSubtargetInfo &STI) {
181 switchVendor("aeabi");
182
183 const StringRef CPUString = STI.getCPU();
184 if (!CPUString.empty() && !CPUString.starts_with("generic")) {
185 // FIXME: remove krait check when GNU tools support krait cpu
186 if (STI.hasFeature(ARM::ProcKrait)) {
187 emitTextAttribute(ARMBuildAttrs::CPU_name, "cortex-a9");
188 // We consider krait as a "cortex-a9" + hwdiv CPU
189 // Enable hwdiv through ".arch_extension idiv"
190 if (STI.hasFeature(ARM::FeatureHWDivThumb) ||
191 STI.hasFeature(ARM::FeatureHWDivARM))
192 emitArchExtension(ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM);
193 } else {
194 emitTextAttribute(ARMBuildAttrs::CPU_name, CPUString);
195 }
196 }
197
198 emitAttribute(ARMBuildAttrs::CPU_arch, getArchForCPU(STI));
199
200 if (STI.hasFeature(ARM::FeatureAClass)) {
201 emitAttribute(ARMBuildAttrs::CPU_arch_profile,
202 ARMBuildAttrs::ApplicationProfile);
203 } else if (STI.hasFeature(ARM::FeatureRClass)) {
204 emitAttribute(ARMBuildAttrs::CPU_arch_profile,
205 ARMBuildAttrs::RealTimeProfile);
206 } else if (STI.hasFeature(ARM::FeatureMClass)) {
207 emitAttribute(ARMBuildAttrs::CPU_arch_profile,
208 ARMBuildAttrs::MicroControllerProfile);
209 }
210
211 emitAttribute(ARMBuildAttrs::ARM_ISA_use, STI.hasFeature(ARM::FeatureNoARM)
212 ? ARMBuildAttrs::Not_Allowed
213 : ARMBuildAttrs::Allowed);
214
215 if (isV8M(STI)) {
216 emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
217 ARMBuildAttrs::AllowThumbDerived);
218 } else if (STI.hasFeature(ARM::FeatureThumb2)) {
219 emitAttribute(ARMBuildAttrs::THUMB_ISA_use,
220 ARMBuildAttrs::AllowThumb32);
221 } else if (STI.hasFeature(ARM::HasV4TOps)) {
222 emitAttribute(ARMBuildAttrs::THUMB_ISA_use, ARMBuildAttrs::Allowed);
223 }
224
225 if (STI.hasFeature(ARM::FeatureNEON)) {
226 /* NEON is not exactly a VFP architecture, but GAS emit one of
227 * neon/neon-fp-armv8/neon-vfpv4/vfpv3/vfpv2 for .fpu parameters */
228 if (STI.hasFeature(ARM::FeatureFPARMv8)) {
229 if (STI.hasFeature(ARM::FeatureCrypto))
230 emitFPU(ARM::FK_CRYPTO_NEON_FP_ARMV8);
231 else
232 emitFPU(ARM::FK_NEON_FP_ARMV8);
233 } else if (STI.hasFeature(ARM::FeatureVFP4))
234 emitFPU(ARM::FK_NEON_VFPV4);
235 else
236 emitFPU(STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_NEON_FP16
237 : ARM::FK_NEON);
238 // Emit Tag_Advanced_SIMD_arch for ARMv8 architecture
239 if (STI.hasFeature(ARM::HasV8Ops))
240 emitAttribute(ARMBuildAttrs::Advanced_SIMD_arch,
241 STI.hasFeature(ARM::HasV8_1aOps)
242 ? ARMBuildAttrs::AllowNeonARMv8_1a
243 : ARMBuildAttrs::AllowNeonARMv8);
244 } else {
245 if (STI.hasFeature(ARM::FeatureFPARMv8_D16_SP)) {
246 // FPv5 and FP-ARMv8 have the same instructions, so are modeled as one
247 // FPU, but there are two different names for it depending on the CPU.
248 if (STI.hasFeature(ARM::FeatureD32))
249 emitFPU(ARM::FK_FP_ARMV8);
250 else {
251 emitFPU(STI.hasFeature(ARM::FeatureFP64) ? ARM::FK_FPV5_D16
252 : ARM::FK_FPV5_SP_D16);
253 if (STI.hasFeature(ARM::HasMVEFloatOps))
254 emitArchExtension(ARM::AEK_MVE | ARM::AEK_DSP | ARM::AEK_FP);
255 }
256 } else if (STI.hasFeature(ARM::FeatureVFP4_D16_SP))
257 emitFPU(STI.hasFeature(ARM::FeatureD32)
258 ? ARM::FK_VFPV4
259 : (STI.hasFeature(ARM::FeatureFP64) ? ARM::FK_VFPV4_D16
260 : ARM::FK_FPV4_SP_D16));
261 else if (STI.hasFeature(ARM::FeatureVFP3_D16_SP))
262 emitFPU(
263 STI.hasFeature(ARM::FeatureD32)
264 // +d32
265 ? (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3_FP16
266 : ARM::FK_VFPV3)
267 // -d32
268 : (STI.hasFeature(ARM::FeatureFP64)
269 ? (STI.hasFeature(ARM::FeatureFP16)
270 ? ARM::FK_VFPV3_D16_FP16
271 : ARM::FK_VFPV3_D16)
272 : (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3XD_FP16
273 : ARM::FK_VFPV3XD)));
274 else if (STI.hasFeature(ARM::FeatureVFP2_SP))
275 emitFPU(ARM::FK_VFPV2);
276 }
277
278 // ABI_HardFP_use attribute to indicate single precision FP.
279 if (STI.hasFeature(ARM::FeatureVFP2_SP) && !STI.hasFeature(ARM::FeatureFP64))
280 emitAttribute(ARMBuildAttrs::ABI_HardFP_use,
281 ARMBuildAttrs::HardFPSinglePrecision);
282
283 if (STI.hasFeature(ARM::FeatureFP16))
284 emitAttribute(ARMBuildAttrs::FP_HP_extension, ARMBuildAttrs::AllowHPFP);
285
286 if (STI.hasFeature(ARM::FeatureMP))
287 emitAttribute(ARMBuildAttrs::MPextension_use, ARMBuildAttrs::AllowMP);
288
289 if (STI.hasFeature(ARM::HasMVEFloatOps))
290 emitAttribute(ARMBuildAttrs::MVE_arch, ARMBuildAttrs::AllowMVEIntegerAndFloat);
291 else if (STI.hasFeature(ARM::HasMVEIntegerOps))
292 emitAttribute(ARMBuildAttrs::MVE_arch, ARMBuildAttrs::AllowMVEInteger);
293
294 // Hardware divide in ARM mode is part of base arch, starting from ARMv8.
295 // If only Thumb hwdiv is present, it must also be in base arch (ARMv7-R/M).
296 // It is not possible to produce DisallowDIV: if hwdiv is present in the base
297 // arch, supplying -hwdiv downgrades the effective arch, via ClearImpliedBits.
298 // AllowDIVExt is only emitted if hwdiv isn't available in the base arch;
299 // otherwise, the default value (AllowDIVIfExists) applies.
300 if (STI.hasFeature(ARM::FeatureHWDivARM) && !STI.hasFeature(ARM::HasV8Ops))
301 emitAttribute(ARMBuildAttrs::DIV_use, ARMBuildAttrs::AllowDIVExt);
302
303 if (STI.hasFeature(ARM::FeatureDSP) && isV8M(STI))
304 emitAttribute(ARMBuildAttrs::DSP_extension, ARMBuildAttrs::Allowed);
305
306 if (STI.hasFeature(ARM::FeatureStrictAlign))
307 emitAttribute(ARMBuildAttrs::CPU_unaligned_access,
308 ARMBuildAttrs::Not_Allowed);
309 else
310 emitAttribute(ARMBuildAttrs::CPU_unaligned_access,
311 ARMBuildAttrs::Allowed);
312
313 if (STI.hasFeature(ARM::FeatureTrustZone) &&
314 STI.hasFeature(ARM::FeatureVirtualization))
315 emitAttribute(ARMBuildAttrs::Virtualization_use,
316 ARMBuildAttrs::AllowTZVirtualization);
317 else if (STI.hasFeature(ARM::FeatureTrustZone))
318 emitAttribute(ARMBuildAttrs::Virtualization_use, ARMBuildAttrs::AllowTZ);
319 else if (STI.hasFeature(ARM::FeatureVirtualization))
320 emitAttribute(ARMBuildAttrs::Virtualization_use,
321 ARMBuildAttrs::AllowVirtualization);
322
323 if (STI.hasFeature(ARM::FeaturePACBTI)) {
324 emitAttribute(ARMBuildAttrs::PAC_extension, ARMBuildAttrs::AllowPAC);
325 emitAttribute(ARMBuildAttrs::BTI_extension, ARMBuildAttrs::AllowBTI);
326 }
327 }
328
329 MCTargetStreamer *
createARMObjectTargetStreamer(MCStreamer & S,const MCSubtargetInfo & STI)330 llvm::createARMObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) {
331 const Triple &TT = STI.getTargetTriple();
332 if (TT.isOSBinFormatELF())
333 return createARMObjectTargetELFStreamer(S);
334 if (TT.isOSBinFormatCOFF())
335 return createARMObjectTargetWinCOFFStreamer(S);
336 if (TT.isOSBinFormatMachO())
337 return createARMObjectTargetMachOStreamer(S);
338 return new ARMTargetStreamer(S);
339 }
340