1 //===-- LoongArchAsmBackend.cpp - LoongArch Assembler Backend -*- C++ -*---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the LoongArchAsmBackend class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "LoongArchAsmBackend.h" 14 #include "LoongArchFixupKinds.h" 15 #include "llvm/MC/MCAsmInfo.h" 16 #include "llvm/MC/MCAssembler.h" 17 #include "llvm/MC/MCContext.h" 18 #include "llvm/MC/MCELFObjectWriter.h" 19 #include "llvm/MC/MCExpr.h" 20 #include "llvm/MC/MCSection.h" 21 #include "llvm/MC/MCValue.h" 22 #include "llvm/Support/EndianStream.h" 23 #include "llvm/Support/LEB128.h" 24 #include "llvm/Support/MathExtras.h" 25 26 #define DEBUG_TYPE "loongarch-asmbackend" 27 28 using namespace llvm; 29 30 std::optional<MCFixupKind> 31 LoongArchAsmBackend::getFixupKind(StringRef Name) const { 32 if (STI.getTargetTriple().isOSBinFormatELF()) { 33 auto Type = llvm::StringSwitch<unsigned>(Name) 34 #define ELF_RELOC(X, Y) .Case(#X, Y) 35 #include "llvm/BinaryFormat/ELFRelocs/LoongArch.def" 36 #undef ELF_RELOC 37 .Case("BFD_RELOC_NONE", ELF::R_LARCH_NONE) 38 .Case("BFD_RELOC_32", ELF::R_LARCH_32) 39 .Case("BFD_RELOC_64", ELF::R_LARCH_64) 40 .Default(-1u); 41 if (Type != -1u) 42 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type); 43 } 44 return std::nullopt; 45 } 46 47 const MCFixupKindInfo & 48 LoongArchAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { 49 const static MCFixupKindInfo Infos[] = { 50 // This table *must* be in the order that the fixup_* kinds are defined in 51 // LoongArchFixupKinds.h. 52 // 53 // {name, offset, bits, flags} 54 {"fixup_loongarch_b16", 10, 16, MCFixupKindInfo::FKF_IsPCRel}, 55 {"fixup_loongarch_b21", 0, 26, MCFixupKindInfo::FKF_IsPCRel}, 56 {"fixup_loongarch_b26", 0, 26, MCFixupKindInfo::FKF_IsPCRel}, 57 {"fixup_loongarch_abs_hi20", 5, 20, 0}, 58 {"fixup_loongarch_abs_lo12", 10, 12, 0}, 59 {"fixup_loongarch_abs64_lo20", 5, 20, 0}, 60 {"fixup_loongarch_abs64_hi12", 10, 12, 0}, 61 {"fixup_loongarch_tls_le_hi20", 5, 20, 0}, 62 {"fixup_loongarch_tls_le_lo12", 10, 12, 0}, 63 {"fixup_loongarch_tls_le64_lo20", 5, 20, 0}, 64 {"fixup_loongarch_tls_le64_hi12", 10, 12, 0}, 65 // TODO: Add more fixup kinds. 66 }; 67 68 static_assert((std::size(Infos)) == LoongArch::NumTargetFixupKinds, 69 "Not all fixup kinds added to Infos array"); 70 71 // Fixup kinds from .reloc directive are like R_LARCH_NONE. They 72 // do not require any extra processing. 73 if (Kind >= FirstLiteralRelocationKind) 74 return MCAsmBackend::getFixupKindInfo(FK_NONE); 75 76 if (Kind < FirstTargetFixupKind) 77 return MCAsmBackend::getFixupKindInfo(Kind); 78 79 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 80 "Invalid kind!"); 81 return Infos[Kind - FirstTargetFixupKind]; 82 } 83 84 static void reportOutOfRangeError(MCContext &Ctx, SMLoc Loc, unsigned N) { 85 Ctx.reportError(Loc, "fixup value out of range [" + Twine(llvm::minIntN(N)) + 86 ", " + Twine(llvm::maxIntN(N)) + "]"); 87 } 88 89 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, 90 MCContext &Ctx) { 91 switch (Fixup.getTargetKind()) { 92 default: 93 llvm_unreachable("Unknown fixup kind"); 94 case FK_Data_1: 95 case FK_Data_2: 96 case FK_Data_4: 97 case FK_Data_8: 98 case FK_Data_leb128: 99 return Value; 100 case LoongArch::fixup_loongarch_b16: { 101 if (!isInt<18>(Value)) 102 reportOutOfRangeError(Ctx, Fixup.getLoc(), 18); 103 if (Value % 4) 104 Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); 105 return (Value >> 2) & 0xffff; 106 } 107 case LoongArch::fixup_loongarch_b21: { 108 if (!isInt<23>(Value)) 109 reportOutOfRangeError(Ctx, Fixup.getLoc(), 23); 110 if (Value % 4) 111 Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); 112 return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x1f); 113 } 114 case LoongArch::fixup_loongarch_b26: { 115 if (!isInt<28>(Value)) 116 reportOutOfRangeError(Ctx, Fixup.getLoc(), 28); 117 if (Value % 4) 118 Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); 119 return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x3ff); 120 } 121 case LoongArch::fixup_loongarch_abs_hi20: 122 case LoongArch::fixup_loongarch_tls_le_hi20: 123 return (Value >> 12) & 0xfffff; 124 case LoongArch::fixup_loongarch_abs_lo12: 125 case LoongArch::fixup_loongarch_tls_le_lo12: 126 return Value & 0xfff; 127 case LoongArch::fixup_loongarch_abs64_lo20: 128 case LoongArch::fixup_loongarch_tls_le64_lo20: 129 return (Value >> 32) & 0xfffff; 130 case LoongArch::fixup_loongarch_abs64_hi12: 131 case LoongArch::fixup_loongarch_tls_le64_hi12: 132 return (Value >> 52) & 0xfff; 133 } 134 } 135 136 static void fixupLeb128(MCContext &Ctx, const MCFixup &Fixup, 137 MutableArrayRef<char> Data, uint64_t Value) { 138 unsigned I; 139 for (I = 0; I != Data.size() && Value; ++I, Value >>= 7) 140 Data[I] |= uint8_t(Value & 0x7f); 141 if (Value) 142 Ctx.reportError(Fixup.getLoc(), "Invalid uleb128 value!"); 143 } 144 145 void LoongArchAsmBackend::applyFixup(const MCAssembler &Asm, 146 const MCFixup &Fixup, 147 const MCValue &Target, 148 MutableArrayRef<char> Data, uint64_t Value, 149 bool IsResolved, 150 const MCSubtargetInfo *STI) const { 151 if (!Value) 152 return; // Doesn't change encoding. 153 154 MCFixupKind Kind = Fixup.getKind(); 155 if (Kind >= FirstLiteralRelocationKind) 156 return; 157 MCFixupKindInfo Info = getFixupKindInfo(Kind); 158 MCContext &Ctx = Asm.getContext(); 159 160 // Fixup leb128 separately. 161 if (Fixup.getTargetKind() == FK_Data_leb128) 162 return fixupLeb128(Ctx, Fixup, Data, Value); 163 164 // Apply any target-specific value adjustments. 165 Value = adjustFixupValue(Fixup, Value, Ctx); 166 167 // Shift the value into position. 168 Value <<= Info.TargetOffset; 169 170 unsigned Offset = Fixup.getOffset(); 171 unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8; 172 173 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); 174 // For each byte of the fragment that the fixup touches, mask in the 175 // bits from the fixup value. 176 for (unsigned I = 0; I != NumBytes; ++I) { 177 Data[Offset + I] |= uint8_t((Value >> (I * 8)) & 0xff); 178 } 179 } 180 181 // Linker relaxation may change code size. We have to insert Nops 182 // for .align directive when linker relaxation enabled. So then Linker 183 // could satisfy alignment by removing Nops. 184 // The function returns the total Nops Size we need to insert. 185 bool LoongArchAsmBackend::shouldInsertExtraNopBytesForCodeAlign( 186 const MCAlignFragment &AF, unsigned &Size) { 187 // Calculate Nops Size only when linker relaxation enabled. 188 if (!AF.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax)) 189 return false; 190 191 // Ignore alignment if MaxBytesToEmit is less than the minimum Nop size. 192 const unsigned MinNopLen = 4; 193 if (AF.getMaxBytesToEmit() < MinNopLen) 194 return false; 195 Size = AF.getAlignment().value() - MinNopLen; 196 return AF.getAlignment() > MinNopLen; 197 } 198 199 // We need to insert R_LARCH_ALIGN relocation type to indicate the 200 // position of Nops and the total bytes of the Nops have been inserted 201 // when linker relaxation enabled. 202 // The function inserts fixup_loongarch_align fixup which eventually will 203 // transfer to R_LARCH_ALIGN relocation type. 204 // The improved R_LARCH_ALIGN requires symbol index. The lowest 8 bits of 205 // addend represent alignment and the other bits of addend represent the 206 // maximum number of bytes to emit. The maximum number of bytes is zero 207 // means ignore the emit limit. 208 bool LoongArchAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm, 209 MCAlignFragment &AF) { 210 // Insert the fixup only when linker relaxation enabled. 211 if (!AF.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax)) 212 return false; 213 214 // Calculate total Nops we need to insert. If there are none to insert 215 // then simply return. 216 unsigned InsertedNopBytes; 217 if (!shouldInsertExtraNopBytesForCodeAlign(AF, InsertedNopBytes)) 218 return false; 219 220 MCSection *Sec = AF.getParent(); 221 MCContext &Ctx = Asm.getContext(); 222 const MCExpr *Dummy = MCConstantExpr::create(0, Ctx); 223 // Create fixup_loongarch_align fixup. 224 MCFixup Fixup = 225 MCFixup::create(0, Dummy, MCFixupKind(LoongArch::fixup_loongarch_align)); 226 unsigned MaxBytesToEmit = AF.getMaxBytesToEmit(); 227 228 auto createExtendedValue = [&]() { 229 const MCSymbolRefExpr *MCSym = getSecToAlignSym()[Sec]; 230 if (MCSym == nullptr) { 231 // Define a marker symbol at the section with an offset of 0. 232 MCSymbol *Sym = Ctx.createNamedTempSymbol("la-relax-align"); 233 Sym->setFragment(&*Sec->getBeginSymbol()->getFragment()); 234 Asm.registerSymbol(*Sym); 235 MCSym = MCSymbolRefExpr::create(Sym, Ctx); 236 getSecToAlignSym()[Sec] = MCSym; 237 } 238 return MCValue::get(MCSym, nullptr, 239 MaxBytesToEmit << 8 | Log2(AF.getAlignment())); 240 }; 241 242 uint64_t FixedValue = 0; 243 MCValue Value = MaxBytesToEmit >= InsertedNopBytes 244 ? MCValue::get(InsertedNopBytes) 245 : createExtendedValue(); 246 Asm.getWriter().recordRelocation(Asm, &AF, Fixup, Value, FixedValue); 247 248 return true; 249 } 250 251 bool LoongArchAsmBackend::shouldForceRelocation(const MCAssembler &Asm, 252 const MCFixup &Fixup, 253 const MCValue &Target, 254 const MCSubtargetInfo *STI) { 255 if (Fixup.getKind() >= FirstLiteralRelocationKind) 256 return true; 257 switch (Fixup.getTargetKind()) { 258 default: 259 return STI->hasFeature(LoongArch::FeatureRelax); 260 case FK_Data_1: 261 case FK_Data_2: 262 case FK_Data_4: 263 case FK_Data_8: 264 case FK_Data_leb128: 265 return !Target.isAbsolute(); 266 } 267 } 268 269 static inline std::pair<MCFixupKind, MCFixupKind> 270 getRelocPairForSize(unsigned Size) { 271 switch (Size) { 272 default: 273 llvm_unreachable("unsupported fixup size"); 274 case 6: 275 return std::make_pair( 276 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD6), 277 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB6)); 278 case 8: 279 return std::make_pair( 280 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD8), 281 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB8)); 282 case 16: 283 return std::make_pair( 284 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD16), 285 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB16)); 286 case 32: 287 return std::make_pair( 288 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD32), 289 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB32)); 290 case 64: 291 return std::make_pair( 292 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD64), 293 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB64)); 294 case 128: 295 return std::make_pair( 296 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD_ULEB128), 297 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB_ULEB128)); 298 } 299 } 300 301 std::pair<bool, bool> LoongArchAsmBackend::relaxLEB128(const MCAssembler &Asm, 302 MCLEBFragment &LF, 303 int64_t &Value) const { 304 const MCExpr &Expr = LF.getValue(); 305 if (LF.isSigned() || !Expr.evaluateKnownAbsolute(Value, Asm)) 306 return std::make_pair(false, false); 307 LF.getFixups().push_back( 308 MCFixup::create(0, &Expr, FK_Data_leb128, Expr.getLoc())); 309 return std::make_pair(true, true); 310 } 311 312 bool LoongArchAsmBackend::relaxDwarfLineAddr(const MCAssembler &Asm, 313 MCDwarfLineAddrFragment &DF, 314 bool &WasRelaxed) const { 315 MCContext &C = Asm.getContext(); 316 317 int64_t LineDelta = DF.getLineDelta(); 318 const MCExpr &AddrDelta = DF.getAddrDelta(); 319 SmallVectorImpl<char> &Data = DF.getContents(); 320 SmallVectorImpl<MCFixup> &Fixups = DF.getFixups(); 321 size_t OldSize = Data.size(); 322 323 int64_t Value; 324 if (AddrDelta.evaluateAsAbsolute(Value, Asm)) 325 return false; 326 bool IsAbsolute = AddrDelta.evaluateKnownAbsolute(Value, Asm); 327 assert(IsAbsolute && "CFA with invalid expression"); 328 (void)IsAbsolute; 329 330 Data.clear(); 331 Fixups.clear(); 332 raw_svector_ostream OS(Data); 333 334 // INT64_MAX is a signal that this is actually a DW_LNE_end_sequence. 335 if (LineDelta != INT64_MAX) { 336 OS << uint8_t(dwarf::DW_LNS_advance_line); 337 encodeSLEB128(LineDelta, OS); 338 } 339 340 unsigned Offset; 341 std::pair<MCFixupKind, MCFixupKind> FK; 342 343 // According to the DWARF specification, the `DW_LNS_fixed_advance_pc` opcode 344 // takes a single unsigned half (unencoded) operand. The maximum encodable 345 // value is therefore 65535. Set a conservative upper bound for relaxation. 346 if (Value > 60000) { 347 unsigned PtrSize = C.getAsmInfo()->getCodePointerSize(); 348 349 OS << uint8_t(dwarf::DW_LNS_extended_op); 350 encodeULEB128(PtrSize + 1, OS); 351 352 OS << uint8_t(dwarf::DW_LNE_set_address); 353 Offset = OS.tell(); 354 assert((PtrSize == 4 || PtrSize == 8) && "Unexpected pointer size"); 355 FK = getRelocPairForSize(PtrSize == 4 ? 32 : 64); 356 OS.write_zeros(PtrSize); 357 } else { 358 OS << uint8_t(dwarf::DW_LNS_fixed_advance_pc); 359 Offset = OS.tell(); 360 FK = getRelocPairForSize(16); 361 support::endian::write<uint16_t>(OS, 0, llvm::endianness::little); 362 } 363 364 const MCBinaryExpr &MBE = cast<MCBinaryExpr>(AddrDelta); 365 Fixups.push_back(MCFixup::create(Offset, MBE.getLHS(), std::get<0>(FK))); 366 Fixups.push_back(MCFixup::create(Offset, MBE.getRHS(), std::get<1>(FK))); 367 368 if (LineDelta == INT64_MAX) { 369 OS << uint8_t(dwarf::DW_LNS_extended_op); 370 OS << uint8_t(1); 371 OS << uint8_t(dwarf::DW_LNE_end_sequence); 372 } else { 373 OS << uint8_t(dwarf::DW_LNS_copy); 374 } 375 376 WasRelaxed = OldSize != Data.size(); 377 return true; 378 } 379 380 bool LoongArchAsmBackend::relaxDwarfCFA(const MCAssembler &Asm, 381 MCDwarfCallFrameFragment &DF, 382 bool &WasRelaxed) const { 383 const MCExpr &AddrDelta = DF.getAddrDelta(); 384 SmallVectorImpl<char> &Data = DF.getContents(); 385 SmallVectorImpl<MCFixup> &Fixups = DF.getFixups(); 386 size_t OldSize = Data.size(); 387 388 int64_t Value; 389 if (AddrDelta.evaluateAsAbsolute(Value, Asm)) 390 return false; 391 bool IsAbsolute = AddrDelta.evaluateKnownAbsolute(Value, Asm); 392 assert(IsAbsolute && "CFA with invalid expression"); 393 (void)IsAbsolute; 394 395 Data.clear(); 396 Fixups.clear(); 397 raw_svector_ostream OS(Data); 398 399 assert(Asm.getContext().getAsmInfo()->getMinInstAlignment() == 1 && 400 "expected 1-byte alignment"); 401 if (Value == 0) { 402 WasRelaxed = OldSize != Data.size(); 403 return true; 404 } 405 406 auto AddFixups = [&Fixups, 407 &AddrDelta](unsigned Offset, 408 std::pair<MCFixupKind, MCFixupKind> FK) { 409 const MCBinaryExpr &MBE = cast<MCBinaryExpr>(AddrDelta); 410 Fixups.push_back(MCFixup::create(Offset, MBE.getLHS(), std::get<0>(FK))); 411 Fixups.push_back(MCFixup::create(Offset, MBE.getRHS(), std::get<1>(FK))); 412 }; 413 414 if (isUIntN(6, Value)) { 415 OS << uint8_t(dwarf::DW_CFA_advance_loc); 416 AddFixups(0, getRelocPairForSize(6)); 417 } else if (isUInt<8>(Value)) { 418 OS << uint8_t(dwarf::DW_CFA_advance_loc1); 419 support::endian::write<uint8_t>(OS, 0, llvm::endianness::little); 420 AddFixups(1, getRelocPairForSize(8)); 421 } else if (isUInt<16>(Value)) { 422 OS << uint8_t(dwarf::DW_CFA_advance_loc2); 423 support::endian::write<uint16_t>(OS, 0, llvm::endianness::little); 424 AddFixups(1, getRelocPairForSize(16)); 425 } else if (isUInt<32>(Value)) { 426 OS << uint8_t(dwarf::DW_CFA_advance_loc4); 427 support::endian::write<uint32_t>(OS, 0, llvm::endianness::little); 428 AddFixups(1, getRelocPairForSize(32)); 429 } else { 430 llvm_unreachable("unsupported CFA encoding"); 431 } 432 433 WasRelaxed = OldSize != Data.size(); 434 return true; 435 } 436 437 bool LoongArchAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, 438 const MCSubtargetInfo *STI) const { 439 // We mostly follow binutils' convention here: align to 4-byte boundary with a 440 // 0-fill padding. 441 OS.write_zeros(Count % 4); 442 443 // The remainder is now padded with 4-byte nops. 444 // nop: andi r0, r0, 0 445 for (; Count >= 4; Count -= 4) 446 OS.write("\0\0\x40\x03", 4); 447 448 return true; 449 } 450 451 bool LoongArchAsmBackend::handleAddSubRelocations(const MCAssembler &Asm, 452 const MCFragment &F, 453 const MCFixup &Fixup, 454 const MCValue &Target, 455 uint64_t &FixedValue) const { 456 std::pair<MCFixupKind, MCFixupKind> FK; 457 uint64_t FixedValueA, FixedValueB; 458 const MCSymbol &SA = Target.getSymA()->getSymbol(); 459 const MCSymbol &SB = Target.getSymB()->getSymbol(); 460 461 bool force = !SA.isInSection() || !SB.isInSection(); 462 if (!force) { 463 const MCSection &SecA = SA.getSection(); 464 const MCSection &SecB = SB.getSection(); 465 466 // We need record relocation if SecA != SecB. Usually SecB is same as the 467 // section of Fixup, which will be record the relocation as PCRel. If SecB 468 // is not same as the section of Fixup, it will report error. Just return 469 // false and then this work can be finished by handleFixup. 470 if (&SecA != &SecB) 471 return false; 472 473 // In SecA == SecB case. If the linker relaxation is enabled, we need record 474 // the ADD, SUB relocations. Otherwise the FixedValue has already been calc- 475 // ulated out in evaluateFixup, return true and avoid record relocations. 476 if (!STI.hasFeature(LoongArch::FeatureRelax)) 477 return true; 478 } 479 480 switch (Fixup.getKind()) { 481 case llvm::FK_Data_1: 482 FK = getRelocPairForSize(8); 483 break; 484 case llvm::FK_Data_2: 485 FK = getRelocPairForSize(16); 486 break; 487 case llvm::FK_Data_4: 488 FK = getRelocPairForSize(32); 489 break; 490 case llvm::FK_Data_8: 491 FK = getRelocPairForSize(64); 492 break; 493 case llvm::FK_Data_leb128: 494 FK = getRelocPairForSize(128); 495 break; 496 default: 497 llvm_unreachable("unsupported fixup size"); 498 } 499 MCValue A = MCValue::get(Target.getSymA(), nullptr, Target.getConstant()); 500 MCValue B = MCValue::get(Target.getSymB()); 501 auto FA = MCFixup::create(Fixup.getOffset(), nullptr, std::get<0>(FK)); 502 auto FB = MCFixup::create(Fixup.getOffset(), nullptr, std::get<1>(FK)); 503 auto &Assembler = const_cast<MCAssembler &>(Asm); 504 Asm.getWriter().recordRelocation(Assembler, &F, FA, A, FixedValueA); 505 Asm.getWriter().recordRelocation(Assembler, &F, FB, B, FixedValueB); 506 FixedValue = FixedValueA - FixedValueB; 507 return true; 508 } 509 510 std::unique_ptr<MCObjectTargetWriter> 511 LoongArchAsmBackend::createObjectTargetWriter() const { 512 return createLoongArchELFObjectWriter( 513 OSABI, Is64Bit, STI.hasFeature(LoongArch::FeatureRelax)); 514 } 515 516 MCAsmBackend *llvm::createLoongArchAsmBackend(const Target &T, 517 const MCSubtargetInfo &STI, 518 const MCRegisterInfo &MRI, 519 const MCTargetOptions &Options) { 520 const Triple &TT = STI.getTargetTriple(); 521 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TT.getOS()); 522 return new LoongArchAsmBackend(STI, OSABI, TT.isArch64Bit(), Options); 523 } 524