1 //===- ARMTargetStreamer.cpp - ARMTargetStreamer class --*- C++ -*---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ARMTargetStreamer class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "MCTargetDesc/ARMMCTargetDesc.h" 14 #include "llvm/MC/ConstantPools.h" 15 #include "llvm/MC/MCAsmInfo.h" 16 #include "llvm/MC/MCContext.h" 17 #include "llvm/MC/MCExpr.h" 18 #include "llvm/MC/MCStreamer.h" 19 #include "llvm/MC/MCSubtargetInfo.h" 20 #include "llvm/Support/ARMBuildAttributes.h" 21 22 using namespace llvm; 23 24 // 25 // ARMTargetStreamer Implemenation 26 // 27 28 ARMTargetStreamer::ARMTargetStreamer(MCStreamer &S) 29 : MCTargetStreamer(S), ConstantPools(new AssemblerConstantPools()) {} 30 31 ARMTargetStreamer::~ARMTargetStreamer() = default; 32 33 // The constant pool handling is shared by all ARMTargetStreamer 34 // implementations. 35 const MCExpr *ARMTargetStreamer::addConstantPoolEntry(const MCExpr *Expr, SMLoc Loc) { 36 return ConstantPools->addEntry(Streamer, Expr, 4, Loc); 37 } 38 39 void ARMTargetStreamer::emitCurrentConstantPool() { 40 ConstantPools->emitForCurrentSection(Streamer); 41 ConstantPools->clearCacheForCurrentSection(Streamer); 42 } 43 44 // finish() - write out any non-empty assembler constant pools. 45 void ARMTargetStreamer::emitConstantPools() { 46 ConstantPools->emitAll(Streamer); 47 } 48 49 // reset() - Reset any state 50 void ARMTargetStreamer::reset() {} 51 52 void ARMTargetStreamer::emitInst(uint32_t Inst, char Suffix) { 53 unsigned Size; 54 char Buffer[4]; 55 const bool LittleEndian = getStreamer().getContext().getAsmInfo()->isLittleEndian(); 56 57 switch (Suffix) { 58 case '\0': 59 Size = 4; 60 61 for (unsigned II = 0, IE = Size; II != IE; II++) { 62 const unsigned I = LittleEndian ? (Size - II - 1) : II; 63 Buffer[Size - II - 1] = uint8_t(Inst >> I * CHAR_BIT); 64 } 65 66 break; 67 case 'n': 68 case 'w': 69 Size = (Suffix == 'n' ? 2 : 4); 70 71 // Thumb wide instructions are emitted as a pair of 16-bit words of the 72 // appropriate endianness. 73 for (unsigned II = 0, IE = Size; II != IE; II = II + 2) { 74 const unsigned I0 = LittleEndian ? II + 0 : II + 1; 75 const unsigned I1 = LittleEndian ? II + 1 : II + 0; 76 Buffer[Size - II - 2] = uint8_t(Inst >> I0 * CHAR_BIT); 77 Buffer[Size - II - 1] = uint8_t(Inst >> I1 * CHAR_BIT); 78 } 79 80 break; 81 default: 82 llvm_unreachable("Invalid Suffix"); 83 } 84 getStreamer().emitBytes(StringRef(Buffer, Size)); 85 } 86 87 // The remaining callbacks should be handled separately by each 88 // streamer. 89 void ARMTargetStreamer::emitFnStart() {} 90 void ARMTargetStreamer::emitFnEnd() {} 91 void ARMTargetStreamer::emitCantUnwind() {} 92 void ARMTargetStreamer::emitPersonality(const MCSymbol *Personality) {} 93 void ARMTargetStreamer::emitPersonalityIndex(unsigned Index) {} 94 void ARMTargetStreamer::emitHandlerData() {} 95 void ARMTargetStreamer::emitSetFP(unsigned FpReg, unsigned SpReg, 96 int64_t Offset) {} 97 void ARMTargetStreamer::emitMovSP(unsigned Reg, int64_t Offset) {} 98 void ARMTargetStreamer::emitPad(int64_t Offset) {} 99 void ARMTargetStreamer::emitRegSave(const SmallVectorImpl<unsigned> &RegList, 100 bool isVector) {} 101 void ARMTargetStreamer::emitUnwindRaw(int64_t StackOffset, 102 const SmallVectorImpl<uint8_t> &Opcodes) { 103 } 104 void ARMTargetStreamer::switchVendor(StringRef Vendor) {} 105 void ARMTargetStreamer::emitAttribute(unsigned Attribute, unsigned Value) {} 106 void ARMTargetStreamer::emitTextAttribute(unsigned Attribute, 107 StringRef String) {} 108 void ARMTargetStreamer::emitIntTextAttribute(unsigned Attribute, 109 unsigned IntValue, 110 StringRef StringValue) {} 111 void ARMTargetStreamer::emitArch(ARM::ArchKind Arch) {} 112 void ARMTargetStreamer::emitArchExtension(uint64_t ArchExt) {} 113 void ARMTargetStreamer::emitObjectArch(ARM::ArchKind Arch) {} 114 void ARMTargetStreamer::emitFPU(ARM::FPUKind FPU) {} 115 void ARMTargetStreamer::finishAttributeSection() {} 116 void ARMTargetStreamer::annotateTLSDescriptorSequence( 117 const MCSymbolRefExpr *SRE) {} 118 void ARMTargetStreamer::emitThumbSet(MCSymbol *Symbol, const MCExpr *Value) {} 119 120 void ARMTargetStreamer::emitARMWinCFIAllocStack(unsigned Size, bool Wide) {} 121 void ARMTargetStreamer::emitARMWinCFISaveRegMask(unsigned Mask, bool Wide) {} 122 void ARMTargetStreamer::emitARMWinCFISaveSP(unsigned Reg) {} 123 void ARMTargetStreamer::emitARMWinCFISaveFRegs(unsigned First, unsigned Last) {} 124 void ARMTargetStreamer::emitARMWinCFISaveLR(unsigned Offset) {} 125 void ARMTargetStreamer::emitARMWinCFINop(bool Wide) {} 126 void ARMTargetStreamer::emitARMWinCFIPrologEnd(bool Fragment) {} 127 void ARMTargetStreamer::emitARMWinCFIEpilogStart(unsigned Condition) {} 128 void ARMTargetStreamer::emitARMWinCFIEpilogEnd() {} 129 void ARMTargetStreamer::emitARMWinCFICustom(unsigned Opcode) {} 130 131 static ARMBuildAttrs::CPUArch getArchForCPU(const MCSubtargetInfo &STI) { 132 if (STI.getCPU() == "xscale") 133 return ARMBuildAttrs::v5TEJ; 134 135 if (STI.hasFeature(ARM::HasV9_0aOps)) 136 return ARMBuildAttrs::v9_A; 137 else if (STI.hasFeature(ARM::HasV8Ops)) { 138 if (STI.hasFeature(ARM::FeatureRClass)) 139 return ARMBuildAttrs::v8_R; 140 return ARMBuildAttrs::v8_A; 141 } else if (STI.hasFeature(ARM::HasV8_1MMainlineOps)) 142 return ARMBuildAttrs::v8_1_M_Main; 143 else if (STI.hasFeature(ARM::HasV8MMainlineOps)) 144 return ARMBuildAttrs::v8_M_Main; 145 else if (STI.hasFeature(ARM::HasV7Ops)) { 146 if (STI.hasFeature(ARM::FeatureMClass) && STI.hasFeature(ARM::FeatureDSP)) 147 return ARMBuildAttrs::v7E_M; 148 return ARMBuildAttrs::v7; 149 } else if (STI.hasFeature(ARM::HasV6T2Ops)) 150 return ARMBuildAttrs::v6T2; 151 else if (STI.hasFeature(ARM::HasV8MBaselineOps)) 152 return ARMBuildAttrs::v8_M_Base; 153 else if (STI.hasFeature(ARM::HasV6MOps)) 154 return ARMBuildAttrs::v6S_M; 155 else if (STI.hasFeature(ARM::HasV6Ops)) 156 return ARMBuildAttrs::v6; 157 else if (STI.hasFeature(ARM::HasV5TEOps)) 158 return ARMBuildAttrs::v5TE; 159 else if (STI.hasFeature(ARM::HasV5TOps)) 160 return ARMBuildAttrs::v5T; 161 else if (STI.hasFeature(ARM::HasV4TOps)) 162 return ARMBuildAttrs::v4T; 163 else 164 return ARMBuildAttrs::v4; 165 } 166 167 static bool isV8M(const MCSubtargetInfo &STI) { 168 // Note that v8M Baseline is a subset of v6T2! 169 return (STI.hasFeature(ARM::HasV8MBaselineOps) && 170 !STI.hasFeature(ARM::HasV6T2Ops)) || 171 STI.hasFeature(ARM::HasV8MMainlineOps); 172 } 173 174 /// Emit the build attributes that only depend on the hardware that we expect 175 // /to be available, and not on the ABI, or any source-language choices. 176 void ARMTargetStreamer::emitTargetAttributes(const MCSubtargetInfo &STI) { 177 switchVendor("aeabi"); 178 179 const StringRef CPUString = STI.getCPU(); 180 if (!CPUString.empty() && !CPUString.starts_with("generic")) { 181 // FIXME: remove krait check when GNU tools support krait cpu 182 if (STI.hasFeature(ARM::ProcKrait)) { 183 emitTextAttribute(ARMBuildAttrs::CPU_name, "cortex-a9"); 184 // We consider krait as a "cortex-a9" + hwdiv CPU 185 // Enable hwdiv through ".arch_extension idiv" 186 if (STI.hasFeature(ARM::FeatureHWDivThumb) || 187 STI.hasFeature(ARM::FeatureHWDivARM)) 188 emitArchExtension(ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM); 189 } else { 190 emitTextAttribute(ARMBuildAttrs::CPU_name, CPUString); 191 } 192 } 193 194 emitAttribute(ARMBuildAttrs::CPU_arch, getArchForCPU(STI)); 195 196 if (STI.hasFeature(ARM::FeatureAClass)) { 197 emitAttribute(ARMBuildAttrs::CPU_arch_profile, 198 ARMBuildAttrs::ApplicationProfile); 199 } else if (STI.hasFeature(ARM::FeatureRClass)) { 200 emitAttribute(ARMBuildAttrs::CPU_arch_profile, 201 ARMBuildAttrs::RealTimeProfile); 202 } else if (STI.hasFeature(ARM::FeatureMClass)) { 203 emitAttribute(ARMBuildAttrs::CPU_arch_profile, 204 ARMBuildAttrs::MicroControllerProfile); 205 } 206 207 emitAttribute(ARMBuildAttrs::ARM_ISA_use, STI.hasFeature(ARM::FeatureNoARM) 208 ? ARMBuildAttrs::Not_Allowed 209 : ARMBuildAttrs::Allowed); 210 211 if (isV8M(STI)) { 212 emitAttribute(ARMBuildAttrs::THUMB_ISA_use, 213 ARMBuildAttrs::AllowThumbDerived); 214 } else if (STI.hasFeature(ARM::FeatureThumb2)) { 215 emitAttribute(ARMBuildAttrs::THUMB_ISA_use, 216 ARMBuildAttrs::AllowThumb32); 217 } else if (STI.hasFeature(ARM::HasV4TOps)) { 218 emitAttribute(ARMBuildAttrs::THUMB_ISA_use, ARMBuildAttrs::Allowed); 219 } 220 221 if (STI.hasFeature(ARM::FeatureNEON)) { 222 /* NEON is not exactly a VFP architecture, but GAS emit one of 223 * neon/neon-fp-armv8/neon-vfpv4/vfpv3/vfpv2 for .fpu parameters */ 224 if (STI.hasFeature(ARM::FeatureFPARMv8)) { 225 if (STI.hasFeature(ARM::FeatureCrypto)) 226 emitFPU(ARM::FK_CRYPTO_NEON_FP_ARMV8); 227 else 228 emitFPU(ARM::FK_NEON_FP_ARMV8); 229 } else if (STI.hasFeature(ARM::FeatureVFP4)) 230 emitFPU(ARM::FK_NEON_VFPV4); 231 else 232 emitFPU(STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_NEON_FP16 233 : ARM::FK_NEON); 234 // Emit Tag_Advanced_SIMD_arch for ARMv8 architecture 235 if (STI.hasFeature(ARM::HasV8Ops)) 236 emitAttribute(ARMBuildAttrs::Advanced_SIMD_arch, 237 STI.hasFeature(ARM::HasV8_1aOps) 238 ? ARMBuildAttrs::AllowNeonARMv8_1a 239 : ARMBuildAttrs::AllowNeonARMv8); 240 } else { 241 if (STI.hasFeature(ARM::FeatureFPARMv8_D16_SP)) { 242 // FPv5 and FP-ARMv8 have the same instructions, so are modeled as one 243 // FPU, but there are two different names for it depending on the CPU. 244 if (STI.hasFeature(ARM::FeatureD32)) 245 emitFPU(ARM::FK_FP_ARMV8); 246 else { 247 emitFPU(STI.hasFeature(ARM::FeatureFP64) ? ARM::FK_FPV5_D16 248 : ARM::FK_FPV5_SP_D16); 249 if (STI.hasFeature(ARM::HasMVEFloatOps)) 250 emitArchExtension(ARM::AEK_SIMD | ARM::AEK_DSP | ARM::AEK_FP); 251 } 252 } else if (STI.hasFeature(ARM::FeatureVFP4_D16_SP)) 253 emitFPU(STI.hasFeature(ARM::FeatureD32) 254 ? ARM::FK_VFPV4 255 : (STI.hasFeature(ARM::FeatureFP64) ? ARM::FK_VFPV4_D16 256 : ARM::FK_FPV4_SP_D16)); 257 else if (STI.hasFeature(ARM::FeatureVFP3_D16_SP)) 258 emitFPU( 259 STI.hasFeature(ARM::FeatureD32) 260 // +d32 261 ? (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3_FP16 262 : ARM::FK_VFPV3) 263 // -d32 264 : (STI.hasFeature(ARM::FeatureFP64) 265 ? (STI.hasFeature(ARM::FeatureFP16) 266 ? ARM::FK_VFPV3_D16_FP16 267 : ARM::FK_VFPV3_D16) 268 : (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3XD_FP16 269 : ARM::FK_VFPV3XD))); 270 else if (STI.hasFeature(ARM::FeatureVFP2_SP)) 271 emitFPU(ARM::FK_VFPV2); 272 } 273 274 // ABI_HardFP_use attribute to indicate single precision FP. 275 if (STI.hasFeature(ARM::FeatureVFP2_SP) && !STI.hasFeature(ARM::FeatureFP64)) 276 emitAttribute(ARMBuildAttrs::ABI_HardFP_use, 277 ARMBuildAttrs::HardFPSinglePrecision); 278 279 if (STI.hasFeature(ARM::FeatureFP16)) 280 emitAttribute(ARMBuildAttrs::FP_HP_extension, ARMBuildAttrs::AllowHPFP); 281 282 if (STI.hasFeature(ARM::FeatureMP)) 283 emitAttribute(ARMBuildAttrs::MPextension_use, ARMBuildAttrs::AllowMP); 284 285 if (STI.hasFeature(ARM::HasMVEFloatOps)) 286 emitAttribute(ARMBuildAttrs::MVE_arch, ARMBuildAttrs::AllowMVEIntegerAndFloat); 287 else if (STI.hasFeature(ARM::HasMVEIntegerOps)) 288 emitAttribute(ARMBuildAttrs::MVE_arch, ARMBuildAttrs::AllowMVEInteger); 289 290 // Hardware divide in ARM mode is part of base arch, starting from ARMv8. 291 // If only Thumb hwdiv is present, it must also be in base arch (ARMv7-R/M). 292 // It is not possible to produce DisallowDIV: if hwdiv is present in the base 293 // arch, supplying -hwdiv downgrades the effective arch, via ClearImpliedBits. 294 // AllowDIVExt is only emitted if hwdiv isn't available in the base arch; 295 // otherwise, the default value (AllowDIVIfExists) applies. 296 if (STI.hasFeature(ARM::FeatureHWDivARM) && !STI.hasFeature(ARM::HasV8Ops)) 297 emitAttribute(ARMBuildAttrs::DIV_use, ARMBuildAttrs::AllowDIVExt); 298 299 if (STI.hasFeature(ARM::FeatureDSP) && isV8M(STI)) 300 emitAttribute(ARMBuildAttrs::DSP_extension, ARMBuildAttrs::Allowed); 301 302 if (STI.hasFeature(ARM::FeatureStrictAlign)) 303 emitAttribute(ARMBuildAttrs::CPU_unaligned_access, 304 ARMBuildAttrs::Not_Allowed); 305 else 306 emitAttribute(ARMBuildAttrs::CPU_unaligned_access, 307 ARMBuildAttrs::Allowed); 308 309 if (STI.hasFeature(ARM::FeatureTrustZone) && 310 STI.hasFeature(ARM::FeatureVirtualization)) 311 emitAttribute(ARMBuildAttrs::Virtualization_use, 312 ARMBuildAttrs::AllowTZVirtualization); 313 else if (STI.hasFeature(ARM::FeatureTrustZone)) 314 emitAttribute(ARMBuildAttrs::Virtualization_use, ARMBuildAttrs::AllowTZ); 315 else if (STI.hasFeature(ARM::FeatureVirtualization)) 316 emitAttribute(ARMBuildAttrs::Virtualization_use, 317 ARMBuildAttrs::AllowVirtualization); 318 319 if (STI.hasFeature(ARM::FeaturePACBTI)) { 320 emitAttribute(ARMBuildAttrs::PAC_extension, ARMBuildAttrs::AllowPAC); 321 emitAttribute(ARMBuildAttrs::BTI_extension, ARMBuildAttrs::AllowBTI); 322 } 323 } 324 325 MCTargetStreamer * 326 llvm::createARMObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) { 327 const Triple &TT = STI.getTargetTriple(); 328 if (TT.isOSBinFormatELF()) 329 return createARMObjectTargetELFStreamer(S); 330 if (TT.isOSBinFormatCOFF()) 331 return createARMObjectTargetWinCOFFStreamer(S); 332 return new ARMTargetStreamer(S); 333 } 334