1 //===-- LoongArchAsmBackend.cpp - LoongArch Assembler Backend -*- C++ -*---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the LoongArchAsmBackend class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "LoongArchAsmBackend.h" 14 #include "LoongArchFixupKinds.h" 15 #include "llvm/MC/MCAsmInfo.h" 16 #include "llvm/MC/MCAsmLayout.h" 17 #include "llvm/MC/MCAssembler.h" 18 #include "llvm/MC/MCContext.h" 19 #include "llvm/MC/MCELFObjectWriter.h" 20 #include "llvm/MC/MCExpr.h" 21 #include "llvm/MC/MCSection.h" 22 #include "llvm/MC/MCValue.h" 23 #include "llvm/Support/EndianStream.h" 24 #include "llvm/Support/LEB128.h" 25 #include "llvm/Support/MathExtras.h" 26 27 #define DEBUG_TYPE "loongarch-asmbackend" 28 29 using namespace llvm; 30 31 std::optional<MCFixupKind> 32 LoongArchAsmBackend::getFixupKind(StringRef Name) const { 33 if (STI.getTargetTriple().isOSBinFormatELF()) { 34 auto Type = llvm::StringSwitch<unsigned>(Name) 35 #define ELF_RELOC(X, Y) .Case(#X, Y) 36 #include "llvm/BinaryFormat/ELFRelocs/LoongArch.def" 37 #undef ELF_RELOC 38 .Case("BFD_RELOC_NONE", ELF::R_LARCH_NONE) 39 .Case("BFD_RELOC_32", ELF::R_LARCH_32) 40 .Case("BFD_RELOC_64", ELF::R_LARCH_64) 41 .Default(-1u); 42 if (Type != -1u) 43 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type); 44 } 45 return std::nullopt; 46 } 47 48 const MCFixupKindInfo & 49 LoongArchAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { 50 const static MCFixupKindInfo Infos[] = { 51 // This table *must* be in the order that the fixup_* kinds are defined in 52 // LoongArchFixupKinds.h. 53 // 54 // {name, offset, bits, flags} 55 {"fixup_loongarch_b16", 10, 16, MCFixupKindInfo::FKF_IsPCRel}, 56 {"fixup_loongarch_b21", 0, 26, MCFixupKindInfo::FKF_IsPCRel}, 57 {"fixup_loongarch_b26", 0, 26, MCFixupKindInfo::FKF_IsPCRel}, 58 {"fixup_loongarch_abs_hi20", 5, 20, 0}, 59 {"fixup_loongarch_abs_lo12", 10, 12, 0}, 60 {"fixup_loongarch_abs64_lo20", 5, 20, 0}, 61 {"fixup_loongarch_abs64_hi12", 10, 12, 0}, 62 {"fixup_loongarch_tls_le_hi20", 5, 20, 0}, 63 {"fixup_loongarch_tls_le_lo12", 10, 12, 0}, 64 {"fixup_loongarch_tls_le64_lo20", 5, 20, 0}, 65 {"fixup_loongarch_tls_le64_hi12", 10, 12, 0}, 66 // TODO: Add more fixup kinds. 67 }; 68 69 static_assert((std::size(Infos)) == LoongArch::NumTargetFixupKinds, 70 "Not all fixup kinds added to Infos array"); 71 72 // Fixup kinds from .reloc directive are like R_LARCH_NONE. They 73 // do not require any extra processing. 74 if (Kind >= FirstLiteralRelocationKind) 75 return MCAsmBackend::getFixupKindInfo(FK_NONE); 76 77 if (Kind < FirstTargetFixupKind) 78 return MCAsmBackend::getFixupKindInfo(Kind); 79 80 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 81 "Invalid kind!"); 82 return Infos[Kind - FirstTargetFixupKind]; 83 } 84 85 static void reportOutOfRangeError(MCContext &Ctx, SMLoc Loc, unsigned N) { 86 Ctx.reportError(Loc, "fixup value out of range [" + Twine(llvm::minIntN(N)) + 87 ", " + Twine(llvm::maxIntN(N)) + "]"); 88 } 89 90 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, 91 MCContext &Ctx) { 92 switch (Fixup.getTargetKind()) { 93 default: 94 llvm_unreachable("Unknown fixup kind"); 95 case FK_Data_1: 96 case FK_Data_2: 97 case FK_Data_4: 98 case FK_Data_8: 99 case FK_Data_leb128: 100 return Value; 101 case LoongArch::fixup_loongarch_b16: { 102 if (!isInt<18>(Value)) 103 reportOutOfRangeError(Ctx, Fixup.getLoc(), 18); 104 if (Value % 4) 105 Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); 106 return (Value >> 2) & 0xffff; 107 } 108 case LoongArch::fixup_loongarch_b21: { 109 if (!isInt<23>(Value)) 110 reportOutOfRangeError(Ctx, Fixup.getLoc(), 23); 111 if (Value % 4) 112 Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); 113 return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x1f); 114 } 115 case LoongArch::fixup_loongarch_b26: { 116 if (!isInt<28>(Value)) 117 reportOutOfRangeError(Ctx, Fixup.getLoc(), 28); 118 if (Value % 4) 119 Ctx.reportError(Fixup.getLoc(), "fixup value must be 4-byte aligned"); 120 return ((Value & 0x3fffc) << 8) | ((Value >> 18) & 0x3ff); 121 } 122 case LoongArch::fixup_loongarch_abs_hi20: 123 case LoongArch::fixup_loongarch_tls_le_hi20: 124 return (Value >> 12) & 0xfffff; 125 case LoongArch::fixup_loongarch_abs_lo12: 126 case LoongArch::fixup_loongarch_tls_le_lo12: 127 return Value & 0xfff; 128 case LoongArch::fixup_loongarch_abs64_lo20: 129 case LoongArch::fixup_loongarch_tls_le64_lo20: 130 return (Value >> 32) & 0xfffff; 131 case LoongArch::fixup_loongarch_abs64_hi12: 132 case LoongArch::fixup_loongarch_tls_le64_hi12: 133 return (Value >> 52) & 0xfff; 134 } 135 } 136 137 static void fixupLeb128(MCContext &Ctx, const MCFixup &Fixup, 138 MutableArrayRef<char> Data, uint64_t Value) { 139 unsigned I; 140 for (I = 0; I != Data.size() && Value; ++I, Value >>= 7) 141 Data[I] |= uint8_t(Value & 0x7f); 142 if (Value) 143 Ctx.reportError(Fixup.getLoc(), "Invalid uleb128 value!"); 144 } 145 146 void LoongArchAsmBackend::applyFixup(const MCAssembler &Asm, 147 const MCFixup &Fixup, 148 const MCValue &Target, 149 MutableArrayRef<char> Data, uint64_t Value, 150 bool IsResolved, 151 const MCSubtargetInfo *STI) const { 152 if (!Value) 153 return; // Doesn't change encoding. 154 155 MCFixupKind Kind = Fixup.getKind(); 156 if (Kind >= FirstLiteralRelocationKind) 157 return; 158 MCFixupKindInfo Info = getFixupKindInfo(Kind); 159 MCContext &Ctx = Asm.getContext(); 160 161 // Fixup leb128 separately. 162 if (Fixup.getTargetKind() == FK_Data_leb128) 163 return fixupLeb128(Ctx, Fixup, Data, Value); 164 165 // Apply any target-specific value adjustments. 166 Value = adjustFixupValue(Fixup, Value, Ctx); 167 168 // Shift the value into position. 169 Value <<= Info.TargetOffset; 170 171 unsigned Offset = Fixup.getOffset(); 172 unsigned NumBytes = alignTo(Info.TargetSize + Info.TargetOffset, 8) / 8; 173 174 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); 175 // For each byte of the fragment that the fixup touches, mask in the 176 // bits from the fixup value. 177 for (unsigned I = 0; I != NumBytes; ++I) { 178 Data[Offset + I] |= uint8_t((Value >> (I * 8)) & 0xff); 179 } 180 } 181 182 // Linker relaxation may change code size. We have to insert Nops 183 // for .align directive when linker relaxation enabled. So then Linker 184 // could satisfy alignment by removing Nops. 185 // The function returns the total Nops Size we need to insert. 186 bool LoongArchAsmBackend::shouldInsertExtraNopBytesForCodeAlign( 187 const MCAlignFragment &AF, unsigned &Size) { 188 // Calculate Nops Size only when linker relaxation enabled. 189 if (!AF.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax)) 190 return false; 191 192 // Ignore alignment if MaxBytesToEmit is less than the minimum Nop size. 193 const unsigned MinNopLen = 4; 194 if (AF.getMaxBytesToEmit() < MinNopLen) 195 return false; 196 Size = AF.getAlignment().value() - MinNopLen; 197 return AF.getAlignment() > MinNopLen; 198 } 199 200 // We need to insert R_LARCH_ALIGN relocation type to indicate the 201 // position of Nops and the total bytes of the Nops have been inserted 202 // when linker relaxation enabled. 203 // The function inserts fixup_loongarch_align fixup which eventually will 204 // transfer to R_LARCH_ALIGN relocation type. 205 // The improved R_LARCH_ALIGN requires symbol index. The lowest 8 bits of 206 // addend represent alignment and the other bits of addend represent the 207 // maximum number of bytes to emit. The maximum number of bytes is zero 208 // means ignore the emit limit. 209 bool LoongArchAsmBackend::shouldInsertFixupForCodeAlign( 210 MCAssembler &Asm, const MCAsmLayout &Layout, MCAlignFragment &AF) { 211 // Insert the fixup only when linker relaxation enabled. 212 if (!AF.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax)) 213 return false; 214 215 // Calculate total Nops we need to insert. If there are none to insert 216 // then simply return. 217 unsigned Count; 218 if (!shouldInsertExtraNopBytesForCodeAlign(AF, Count)) 219 return false; 220 221 MCSection *Sec = AF.getParent(); 222 MCContext &Ctx = Asm.getContext(); 223 const MCExpr *Dummy = MCConstantExpr::create(0, Ctx); 224 // Create fixup_loongarch_align fixup. 225 MCFixup Fixup = 226 MCFixup::create(0, Dummy, MCFixupKind(LoongArch::fixup_loongarch_align)); 227 const MCSymbolRefExpr *MCSym = getSecToAlignSym()[Sec]; 228 if (MCSym == nullptr) { 229 // Create a symbol and make the value of symbol is zero. 230 MCSymbol *Sym = Ctx.createNamedTempSymbol("la-relax-align"); 231 Sym->setFragment(&*Sec->getBeginSymbol()->getFragment()); 232 Asm.registerSymbol(*Sym); 233 MCSym = MCSymbolRefExpr::create(Sym, Ctx); 234 getSecToAlignSym()[Sec] = MCSym; 235 } 236 237 uint64_t FixedValue = 0; 238 unsigned Lo = Log2_64(Count) + 1; 239 unsigned Hi = AF.getMaxBytesToEmit() >= Count ? 0 : AF.getMaxBytesToEmit(); 240 MCValue Value = MCValue::get(MCSym, nullptr, Hi << 8 | Lo); 241 Asm.getWriter().recordRelocation(Asm, Layout, &AF, Fixup, Value, FixedValue); 242 243 return true; 244 } 245 246 bool LoongArchAsmBackend::shouldForceRelocation(const MCAssembler &Asm, 247 const MCFixup &Fixup, 248 const MCValue &Target, 249 const MCSubtargetInfo *STI) { 250 if (Fixup.getKind() >= FirstLiteralRelocationKind) 251 return true; 252 switch (Fixup.getTargetKind()) { 253 default: 254 return STI->hasFeature(LoongArch::FeatureRelax); 255 case FK_Data_1: 256 case FK_Data_2: 257 case FK_Data_4: 258 case FK_Data_8: 259 case FK_Data_leb128: 260 return !Target.isAbsolute(); 261 } 262 } 263 264 static inline std::pair<MCFixupKind, MCFixupKind> 265 getRelocPairForSize(unsigned Size) { 266 switch (Size) { 267 default: 268 llvm_unreachable("unsupported fixup size"); 269 case 6: 270 return std::make_pair( 271 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD6), 272 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB6)); 273 case 8: 274 return std::make_pair( 275 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD8), 276 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB8)); 277 case 16: 278 return std::make_pair( 279 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD16), 280 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB16)); 281 case 32: 282 return std::make_pair( 283 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD32), 284 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB32)); 285 case 64: 286 return std::make_pair( 287 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD64), 288 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB64)); 289 case 128: 290 return std::make_pair( 291 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_ADD_ULEB128), 292 MCFixupKind(FirstLiteralRelocationKind + ELF::R_LARCH_SUB_ULEB128)); 293 } 294 } 295 296 std::pair<bool, bool> LoongArchAsmBackend::relaxLEB128(MCLEBFragment &LF, 297 MCAsmLayout &Layout, 298 int64_t &Value) const { 299 const MCExpr &Expr = LF.getValue(); 300 if (LF.isSigned() || !Expr.evaluateKnownAbsolute(Value, Layout)) 301 return std::make_pair(false, false); 302 LF.getFixups().push_back( 303 MCFixup::create(0, &Expr, FK_Data_leb128, Expr.getLoc())); 304 return std::make_pair(true, true); 305 } 306 307 bool LoongArchAsmBackend::relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF, 308 MCAsmLayout &Layout, 309 bool &WasRelaxed) const { 310 MCContext &C = Layout.getAssembler().getContext(); 311 312 int64_t LineDelta = DF.getLineDelta(); 313 const MCExpr &AddrDelta = DF.getAddrDelta(); 314 SmallVectorImpl<char> &Data = DF.getContents(); 315 SmallVectorImpl<MCFixup> &Fixups = DF.getFixups(); 316 size_t OldSize = Data.size(); 317 318 int64_t Value; 319 if (AddrDelta.evaluateAsAbsolute(Value, Layout)) 320 return false; 321 bool IsAbsolute = AddrDelta.evaluateKnownAbsolute(Value, Layout); 322 assert(IsAbsolute && "CFA with invalid expression"); 323 (void)IsAbsolute; 324 325 Data.clear(); 326 Fixups.clear(); 327 raw_svector_ostream OS(Data); 328 329 // INT64_MAX is a signal that this is actually a DW_LNE_end_sequence. 330 if (LineDelta != INT64_MAX) { 331 OS << uint8_t(dwarf::DW_LNS_advance_line); 332 encodeSLEB128(LineDelta, OS); 333 } 334 335 unsigned Offset; 336 std::pair<MCFixupKind, MCFixupKind> FK; 337 338 // According to the DWARF specification, the `DW_LNS_fixed_advance_pc` opcode 339 // takes a single unsigned half (unencoded) operand. The maximum encodable 340 // value is therefore 65535. Set a conservative upper bound for relaxation. 341 if (Value > 60000) { 342 unsigned PtrSize = C.getAsmInfo()->getCodePointerSize(); 343 344 OS << uint8_t(dwarf::DW_LNS_extended_op); 345 encodeULEB128(PtrSize + 1, OS); 346 347 OS << uint8_t(dwarf::DW_LNE_set_address); 348 Offset = OS.tell(); 349 assert((PtrSize == 4 || PtrSize == 8) && "Unexpected pointer size"); 350 FK = getRelocPairForSize(PtrSize == 4 ? 32 : 64); 351 OS.write_zeros(PtrSize); 352 } else { 353 OS << uint8_t(dwarf::DW_LNS_fixed_advance_pc); 354 Offset = OS.tell(); 355 FK = getRelocPairForSize(16); 356 support::endian::write<uint16_t>(OS, 0, llvm::endianness::little); 357 } 358 359 const MCBinaryExpr &MBE = cast<MCBinaryExpr>(AddrDelta); 360 Fixups.push_back(MCFixup::create(Offset, MBE.getLHS(), std::get<0>(FK))); 361 Fixups.push_back(MCFixup::create(Offset, MBE.getRHS(), std::get<1>(FK))); 362 363 if (LineDelta == INT64_MAX) { 364 OS << uint8_t(dwarf::DW_LNS_extended_op); 365 OS << uint8_t(1); 366 OS << uint8_t(dwarf::DW_LNE_end_sequence); 367 } else { 368 OS << uint8_t(dwarf::DW_LNS_copy); 369 } 370 371 WasRelaxed = OldSize != Data.size(); 372 return true; 373 } 374 375 bool LoongArchAsmBackend::relaxDwarfCFA(MCDwarfCallFrameFragment &DF, 376 MCAsmLayout &Layout, 377 bool &WasRelaxed) const { 378 const MCExpr &AddrDelta = DF.getAddrDelta(); 379 SmallVectorImpl<char> &Data = DF.getContents(); 380 SmallVectorImpl<MCFixup> &Fixups = DF.getFixups(); 381 size_t OldSize = Data.size(); 382 383 int64_t Value; 384 if (AddrDelta.evaluateAsAbsolute(Value, Layout)) 385 return false; 386 bool IsAbsolute = AddrDelta.evaluateKnownAbsolute(Value, Layout); 387 assert(IsAbsolute && "CFA with invalid expression"); 388 (void)IsAbsolute; 389 390 Data.clear(); 391 Fixups.clear(); 392 raw_svector_ostream OS(Data); 393 394 assert( 395 Layout.getAssembler().getContext().getAsmInfo()->getMinInstAlignment() == 396 1 && 397 "expected 1-byte alignment"); 398 if (Value == 0) { 399 WasRelaxed = OldSize != Data.size(); 400 return true; 401 } 402 403 auto AddFixups = [&Fixups, 404 &AddrDelta](unsigned Offset, 405 std::pair<MCFixupKind, MCFixupKind> FK) { 406 const MCBinaryExpr &MBE = cast<MCBinaryExpr>(AddrDelta); 407 Fixups.push_back(MCFixup::create(Offset, MBE.getLHS(), std::get<0>(FK))); 408 Fixups.push_back(MCFixup::create(Offset, MBE.getRHS(), std::get<1>(FK))); 409 }; 410 411 if (isUIntN(6, Value)) { 412 OS << uint8_t(dwarf::DW_CFA_advance_loc); 413 AddFixups(0, getRelocPairForSize(6)); 414 } else if (isUInt<8>(Value)) { 415 OS << uint8_t(dwarf::DW_CFA_advance_loc1); 416 support::endian::write<uint8_t>(OS, 0, llvm::endianness::little); 417 AddFixups(1, getRelocPairForSize(8)); 418 } else if (isUInt<16>(Value)) { 419 OS << uint8_t(dwarf::DW_CFA_advance_loc2); 420 support::endian::write<uint16_t>(OS, 0, llvm::endianness::little); 421 AddFixups(1, getRelocPairForSize(16)); 422 } else if (isUInt<32>(Value)) { 423 OS << uint8_t(dwarf::DW_CFA_advance_loc4); 424 support::endian::write<uint32_t>(OS, 0, llvm::endianness::little); 425 AddFixups(1, getRelocPairForSize(32)); 426 } else { 427 llvm_unreachable("unsupported CFA encoding"); 428 } 429 430 WasRelaxed = OldSize != Data.size(); 431 return true; 432 } 433 434 bool LoongArchAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, 435 const MCSubtargetInfo *STI) const { 436 // We mostly follow binutils' convention here: align to 4-byte boundary with a 437 // 0-fill padding. 438 OS.write_zeros(Count % 4); 439 440 // The remainder is now padded with 4-byte nops. 441 // nop: andi r0, r0, 0 442 for (; Count >= 4; Count -= 4) 443 OS.write("\0\0\x40\x03", 4); 444 445 return true; 446 } 447 448 bool LoongArchAsmBackend::handleAddSubRelocations(const MCAsmLayout &Layout, 449 const MCFragment &F, 450 const MCFixup &Fixup, 451 const MCValue &Target, 452 uint64_t &FixedValue) const { 453 std::pair<MCFixupKind, MCFixupKind> FK; 454 uint64_t FixedValueA, FixedValueB; 455 const MCSymbol &SA = Target.getSymA()->getSymbol(); 456 const MCSymbol &SB = Target.getSymB()->getSymbol(); 457 458 bool force = !SA.isInSection() || !SB.isInSection(); 459 if (!force) { 460 const MCSection &SecA = SA.getSection(); 461 const MCSection &SecB = SB.getSection(); 462 463 // We need record relocation if SecA != SecB. Usually SecB is same as the 464 // section of Fixup, which will be record the relocation as PCRel. If SecB 465 // is not same as the section of Fixup, it will report error. Just return 466 // false and then this work can be finished by handleFixup. 467 if (&SecA != &SecB) 468 return false; 469 470 // In SecA == SecB case. If the linker relaxation is enabled, we need record 471 // the ADD, SUB relocations. Otherwise the FixedValue has already been calc- 472 // ulated out in evaluateFixup, return true and avoid record relocations. 473 if (!STI.hasFeature(LoongArch::FeatureRelax)) 474 return true; 475 } 476 477 switch (Fixup.getKind()) { 478 case llvm::FK_Data_1: 479 FK = getRelocPairForSize(8); 480 break; 481 case llvm::FK_Data_2: 482 FK = getRelocPairForSize(16); 483 break; 484 case llvm::FK_Data_4: 485 FK = getRelocPairForSize(32); 486 break; 487 case llvm::FK_Data_8: 488 FK = getRelocPairForSize(64); 489 break; 490 case llvm::FK_Data_leb128: 491 FK = getRelocPairForSize(128); 492 break; 493 default: 494 llvm_unreachable("unsupported fixup size"); 495 } 496 MCValue A = MCValue::get(Target.getSymA(), nullptr, Target.getConstant()); 497 MCValue B = MCValue::get(Target.getSymB()); 498 auto FA = MCFixup::create(Fixup.getOffset(), nullptr, std::get<0>(FK)); 499 auto FB = MCFixup::create(Fixup.getOffset(), nullptr, std::get<1>(FK)); 500 auto &Asm = Layout.getAssembler(); 501 Asm.getWriter().recordRelocation(Asm, Layout, &F, FA, A, FixedValueA); 502 Asm.getWriter().recordRelocation(Asm, Layout, &F, FB, B, FixedValueB); 503 FixedValue = FixedValueA - FixedValueB; 504 return true; 505 } 506 507 std::unique_ptr<MCObjectTargetWriter> 508 LoongArchAsmBackend::createObjectTargetWriter() const { 509 return createLoongArchELFObjectWriter( 510 OSABI, Is64Bit, STI.hasFeature(LoongArch::FeatureRelax)); 511 } 512 513 MCAsmBackend *llvm::createLoongArchAsmBackend(const Target &T, 514 const MCSubtargetInfo &STI, 515 const MCRegisterInfo &MRI, 516 const MCTargetOptions &Options) { 517 const Triple &TT = STI.getTargetTriple(); 518 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TT.getOS()); 519 return new LoongArchAsmBackend(STI, OSABI, TT.isArch64Bit(), Options); 520 } 521