1 //===- utils/TableGen/X86FoldTablesEmitter.cpp - X86 backend-*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This tablegen backend is responsible for emitting the memory fold tables of 10 // the X86 backend instructions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenTarget.h" 15 #include "X86RecognizableInstr.h" 16 #include "llvm/Support/FormattedStream.h" 17 #include "llvm/TableGen/Error.h" 18 #include "llvm/TableGen/TableGenBackend.h" 19 20 using namespace llvm; 21 using namespace X86Disassembler; 22 23 namespace { 24 25 // 3 possible strategies for the unfolding flag (TB_NO_REVERSE) of the 26 // manual added entries. 27 enum UnfoldStrategy { 28 UNFOLD, // Allow unfolding 29 NO_UNFOLD, // Prevent unfolding 30 NO_STRATEGY // Make decision according to operands' sizes 31 }; 32 33 // Represents an entry in the manual mapped instructions set. 34 struct ManualMapEntry { 35 const char *RegInstStr; 36 const char *MemInstStr; 37 UnfoldStrategy Strategy; 38 39 ManualMapEntry(const char *RegInstStr, const char *MemInstStr, 40 UnfoldStrategy Strategy = NO_STRATEGY) 41 : RegInstStr(RegInstStr), MemInstStr(MemInstStr), Strategy(Strategy) {} 42 }; 43 44 // List of instructions requiring explicitly aligned memory. 45 const char *ExplicitAlign[] = {"MOVDQA", "MOVAPS", "MOVAPD", "MOVNTPS", 46 "MOVNTPD", "MOVNTDQ", "MOVNTDQA"}; 47 48 // List of instructions NOT requiring explicit memory alignment. 49 const char *ExplicitUnalign[] = {"MOVDQU", "MOVUPS", "MOVUPD", 50 "PCMPESTRM", "PCMPESTRI", 51 "PCMPISTRM", "PCMPISTRI" }; 52 53 // For manually mapping instructions that do not match by their encoding. 54 const ManualMapEntry ManualMapSet[] = { 55 { "ADD16ri_DB", "ADD16mi", NO_UNFOLD }, 56 { "ADD16ri8_DB", "ADD16mi8", NO_UNFOLD }, 57 { "ADD16rr_DB", "ADD16mr", NO_UNFOLD }, 58 { "ADD32ri_DB", "ADD32mi", NO_UNFOLD }, 59 { "ADD32ri8_DB", "ADD32mi8", NO_UNFOLD }, 60 { "ADD32rr_DB", "ADD32mr", NO_UNFOLD }, 61 { "ADD64ri32_DB", "ADD64mi32", NO_UNFOLD }, 62 { "ADD64ri8_DB", "ADD64mi8", NO_UNFOLD }, 63 { "ADD64rr_DB", "ADD64mr", NO_UNFOLD }, 64 { "ADD8ri_DB", "ADD8mi", NO_UNFOLD }, 65 { "ADD8rr_DB", "ADD8mr", NO_UNFOLD }, 66 { "ADD16rr_DB", "ADD16rm", NO_UNFOLD }, 67 { "ADD32rr_DB", "ADD32rm", NO_UNFOLD }, 68 { "ADD64rr_DB", "ADD64rm", NO_UNFOLD }, 69 { "ADD8rr_DB", "ADD8rm", NO_UNFOLD }, 70 { "MMX_MOVD64from64rr", "MMX_MOVQ64mr", UNFOLD }, 71 { "MMX_MOVD64grr", "MMX_MOVD64mr", UNFOLD }, 72 { "MOVLHPSrr", "MOVHPSrm", NO_UNFOLD }, 73 { "PUSH16r", "PUSH16rmm", UNFOLD }, 74 { "PUSH32r", "PUSH32rmm", UNFOLD }, 75 { "PUSH64r", "PUSH64rmm", UNFOLD }, 76 { "TAILJMPr", "TAILJMPm", UNFOLD }, 77 { "TAILJMPr64", "TAILJMPm64", UNFOLD }, 78 { "TAILJMPr64_REX", "TAILJMPm64_REX", UNFOLD }, 79 { "VMOVLHPSZrr", "VMOVHPSZ128rm", NO_UNFOLD }, 80 { "VMOVLHPSrr", "VMOVHPSrm", NO_UNFOLD }, 81 }; 82 83 84 static bool isExplicitAlign(const CodeGenInstruction *Inst) { 85 return any_of(ExplicitAlign, [Inst](const char *InstStr) { 86 return Inst->TheDef->getName().contains(InstStr); 87 }); 88 } 89 90 static bool isExplicitUnalign(const CodeGenInstruction *Inst) { 91 return any_of(ExplicitUnalign, [Inst](const char *InstStr) { 92 return Inst->TheDef->getName().contains(InstStr); 93 }); 94 } 95 96 class X86FoldTablesEmitter { 97 RecordKeeper &Records; 98 CodeGenTarget Target; 99 100 // Represents an entry in the folding table 101 class X86FoldTableEntry { 102 const CodeGenInstruction *RegInst; 103 const CodeGenInstruction *MemInst; 104 105 public: 106 bool CannotUnfold = false; 107 bool IsLoad = false; 108 bool IsStore = false; 109 bool IsAligned = false; 110 unsigned int Alignment = 0; 111 112 X86FoldTableEntry(const CodeGenInstruction *RegInst, 113 const CodeGenInstruction *MemInst) 114 : RegInst(RegInst), MemInst(MemInst) {} 115 116 void print(formatted_raw_ostream &OS) const { 117 OS.indent(2); 118 OS << "{ X86::" << RegInst->TheDef->getName() << ","; 119 OS.PadToColumn(40); 120 OS << "X86::" << MemInst->TheDef->getName() << ","; 121 OS.PadToColumn(75); 122 123 std::string Attrs; 124 if (IsLoad) 125 Attrs += "TB_FOLDED_LOAD | "; 126 if (IsStore) 127 Attrs += "TB_FOLDED_STORE | "; 128 if (CannotUnfold) 129 Attrs += "TB_NO_REVERSE | "; 130 if (IsAligned) 131 Attrs += "TB_ALIGN_" + std::to_string(Alignment) + " | "; 132 133 StringRef SimplifiedAttrs = StringRef(Attrs).rtrim("| "); 134 if (SimplifiedAttrs.empty()) 135 SimplifiedAttrs = "0"; 136 137 OS << SimplifiedAttrs << " },\n"; 138 } 139 140 bool operator<(const X86FoldTableEntry &RHS) const { 141 bool LHSpseudo = RegInst->TheDef->getValueAsBit("isPseudo"); 142 bool RHSpseudo = RHS.RegInst->TheDef->getValueAsBit("isPseudo"); 143 if (LHSpseudo != RHSpseudo) 144 return LHSpseudo; 145 146 return RegInst->TheDef->getName() < RHS.RegInst->TheDef->getName(); 147 } 148 }; 149 150 typedef std::vector<X86FoldTableEntry> FoldTable; 151 // std::vector for each folding table. 152 // Table2Addr - Holds instructions which their memory form performs load+store 153 // Table#i - Holds instructions which the their memory form perform a load OR 154 // a store, and their #i'th operand is folded. 155 FoldTable Table2Addr; 156 FoldTable Table0; 157 FoldTable Table1; 158 FoldTable Table2; 159 FoldTable Table3; 160 FoldTable Table4; 161 162 public: 163 X86FoldTablesEmitter(RecordKeeper &R) : Records(R), Target(R) {} 164 165 // run - Generate the 6 X86 memory fold tables. 166 void run(formatted_raw_ostream &OS); 167 168 private: 169 // Decides to which table to add the entry with the given instructions. 170 // S sets the strategy of adding the TB_NO_REVERSE flag. 171 void updateTables(const CodeGenInstruction *RegInstr, 172 const CodeGenInstruction *MemInstr, 173 const UnfoldStrategy S = NO_STRATEGY); 174 175 // Generates X86FoldTableEntry with the given instructions and fill it with 176 // the appropriate flags - then adds it to Table. 177 void addEntryWithFlags(FoldTable &Table, const CodeGenInstruction *RegInstr, 178 const CodeGenInstruction *MemInstr, 179 const UnfoldStrategy S, const unsigned int FoldedInd); 180 181 // Print the given table as a static const C++ array of type 182 // X86MemoryFoldTableEntry. 183 void printTable(const FoldTable &Table, StringRef TableName, 184 formatted_raw_ostream &OS) { 185 OS << "static const X86MemoryFoldTableEntry MemoryFold" << TableName 186 << "[] = {\n"; 187 188 for (const X86FoldTableEntry &E : Table) 189 E.print(OS); 190 191 OS << "};\n\n"; 192 } 193 }; 194 195 // Return true if one of the instruction's operands is a RST register class 196 static bool hasRSTRegClass(const CodeGenInstruction *Inst) { 197 return any_of(Inst->Operands, [](const CGIOperandList::OperandInfo &OpIn) { 198 return OpIn.Rec->getName() == "RST" || OpIn.Rec->getName() == "RSTi"; 199 }); 200 } 201 202 // Return true if one of the instruction's operands is a ptr_rc_tailcall 203 static bool hasPtrTailcallRegClass(const CodeGenInstruction *Inst) { 204 return any_of(Inst->Operands, [](const CGIOperandList::OperandInfo &OpIn) { 205 return OpIn.Rec->getName() == "ptr_rc_tailcall"; 206 }); 207 } 208 209 // Calculates the integer value representing the BitsInit object 210 static inline uint64_t getValueFromBitsInit(const BitsInit *B) { 211 assert(B->getNumBits() <= sizeof(uint64_t) * 8 && "BitInits' too long!"); 212 213 uint64_t Value = 0; 214 for (unsigned i = 0, e = B->getNumBits(); i != e; ++i) { 215 BitInit *Bit = cast<BitInit>(B->getBit(i)); 216 Value |= uint64_t(Bit->getValue()) << i; 217 } 218 return Value; 219 } 220 221 // Return true if the instruction defined as a register flavor. 222 static inline bool hasRegisterFormat(const Record *Inst) { 223 const BitsInit *FormBits = Inst->getValueAsBitsInit("FormBits"); 224 uint64_t FormBitsNum = getValueFromBitsInit(FormBits); 225 226 // Values from X86Local namespace defined in X86RecognizableInstr.cpp 227 return FormBitsNum >= X86Local::MRMDestReg && FormBitsNum <= X86Local::MRM7r; 228 } 229 230 // Return true if the instruction defined as a memory flavor. 231 static inline bool hasMemoryFormat(const Record *Inst) { 232 const BitsInit *FormBits = Inst->getValueAsBitsInit("FormBits"); 233 uint64_t FormBitsNum = getValueFromBitsInit(FormBits); 234 235 // Values from X86Local namespace defined in X86RecognizableInstr.cpp 236 return FormBitsNum >= X86Local::MRMDestMem && FormBitsNum <= X86Local::MRM7m; 237 } 238 239 static inline bool isNOREXRegClass(const Record *Op) { 240 return Op->getName().contains("_NOREX"); 241 } 242 243 // Get the alternative instruction pointed by "FoldGenRegForm" field. 244 static inline const CodeGenInstruction * 245 getAltRegInst(const CodeGenInstruction *I, const RecordKeeper &Records, 246 const CodeGenTarget &Target) { 247 248 StringRef AltRegInstStr = I->TheDef->getValueAsString("FoldGenRegForm"); 249 Record *AltRegInstRec = Records.getDef(AltRegInstStr); 250 assert(AltRegInstRec && 251 "Alternative register form instruction def not found"); 252 CodeGenInstruction &AltRegInst = Target.getInstruction(AltRegInstRec); 253 return &AltRegInst; 254 } 255 256 // Function object - Operator() returns true if the given VEX instruction 257 // matches the EVEX instruction of this object. 258 class IsMatch { 259 const CodeGenInstruction *MemInst; 260 unsigned Variant; 261 262 public: 263 IsMatch(const CodeGenInstruction *Inst, unsigned V) 264 : MemInst(Inst), Variant(V) {} 265 266 bool operator()(const CodeGenInstruction *RegInst) { 267 X86Disassembler::RecognizableInstrBase RegRI(*RegInst); 268 X86Disassembler::RecognizableInstrBase MemRI(*MemInst); 269 const Record *RegRec = RegInst->TheDef; 270 const Record *MemRec = MemInst->TheDef; 271 272 // EVEX_B means different things for memory and register forms. 273 if (RegRI.HasEVEX_B != 0 || MemRI.HasEVEX_B != 0) 274 return false; 275 276 // Instruction's format - The register form's "Form" field should be 277 // the opposite of the memory form's "Form" field. 278 if (!areOppositeForms(RegRI.Form, MemRI.Form)) 279 return false; 280 281 // X86 encoding is crazy, e.g 282 // 283 // f3 0f c7 30 vmxon (%rax) 284 // f3 0f c7 f0 senduipi %rax 285 // 286 // This two instruction have similiar encoding fields but are unrelated 287 if (X86Disassembler::getMnemonic(MemInst, Variant) != 288 X86Disassembler::getMnemonic(RegInst, Variant)) 289 return false; 290 291 // Return false if one (at least) of the encoding fields of both 292 // instructions do not match. 293 if (RegRI.Encoding != MemRI.Encoding || RegRI.Opcode != MemRI.Opcode || 294 RegRI.OpPrefix != MemRI.OpPrefix || RegRI.OpMap != MemRI.OpMap || 295 RegRI.OpSize != MemRI.OpSize || RegRI.AdSize != MemRI.AdSize || 296 RegRI.HasREX_W != MemRI.HasREX_W || 297 RegRI.HasVEX_4V != MemRI.HasVEX_4V || 298 RegRI.HasVEX_L != MemRI.HasVEX_L || 299 RegRI.HasVEX_W != MemRI.HasVEX_W || 300 RegRI.IgnoresVEX_L != MemRI.IgnoresVEX_L || 301 RegRI.IgnoresVEX_W != MemRI.IgnoresVEX_W || 302 RegRI.HasEVEX_K != MemRI.HasEVEX_K || 303 RegRI.HasEVEX_KZ != MemRI.HasEVEX_KZ || 304 RegRI.HasEVEX_L2 != MemRI.HasEVEX_L2 || 305 RegRec->getValueAsBit("hasEVEX_RC") != 306 MemRec->getValueAsBit("hasEVEX_RC") || 307 RegRec->getValueAsBit("hasLockPrefix") != 308 MemRec->getValueAsBit("hasLockPrefix") || 309 RegRec->getValueAsBit("hasNoTrackPrefix") != 310 MemRec->getValueAsBit("hasNoTrackPrefix") || 311 RegRec->getValueAsBit("EVEX_W1_VEX_W0") != 312 MemRec->getValueAsBit("EVEX_W1_VEX_W0")) 313 return false; 314 315 // Make sure the sizes of the operands of both instructions suit each other. 316 // This is needed for instructions with intrinsic version (_Int). 317 // Where the only difference is the size of the operands. 318 // For example: VUCOMISDZrm and Int_VUCOMISDrm 319 // Also for instructions that their EVEX version was upgraded to work with 320 // k-registers. For example VPCMPEQBrm (xmm output register) and 321 // VPCMPEQBZ128rm (k register output register). 322 bool ArgFolded = false; 323 unsigned MemOutSize = MemRec->getValueAsDag("OutOperandList")->getNumArgs(); 324 unsigned RegOutSize = RegRec->getValueAsDag("OutOperandList")->getNumArgs(); 325 unsigned MemInSize = MemRec->getValueAsDag("InOperandList")->getNumArgs(); 326 unsigned RegInSize = RegRec->getValueAsDag("InOperandList")->getNumArgs(); 327 328 // Instructions with one output in their memory form use the memory folded 329 // operand as source and destination (Read-Modify-Write). 330 unsigned RegStartIdx = 331 (MemOutSize + 1 == RegOutSize) && (MemInSize == RegInSize) ? 1 : 0; 332 333 for (unsigned i = 0, e = MemInst->Operands.size(); i < e; i++) { 334 Record *MemOpRec = MemInst->Operands[i].Rec; 335 Record *RegOpRec = RegInst->Operands[i + RegStartIdx].Rec; 336 337 if (MemOpRec == RegOpRec) 338 continue; 339 340 if (isRegisterOperand(MemOpRec) && isRegisterOperand(RegOpRec)) { 341 if (getRegOperandSize(MemOpRec) != getRegOperandSize(RegOpRec) || 342 isNOREXRegClass(MemOpRec) != isNOREXRegClass(RegOpRec)) 343 return false; 344 } else if (isMemoryOperand(MemOpRec) && isMemoryOperand(RegOpRec)) { 345 if (getMemOperandSize(MemOpRec) != getMemOperandSize(RegOpRec)) 346 return false; 347 } else if (isImmediateOperand(MemOpRec) && isImmediateOperand(RegOpRec)) { 348 if (MemOpRec->getValueAsDef("Type") != RegOpRec->getValueAsDef("Type")) 349 return false; 350 } else { 351 // Only one operand can be folded. 352 if (ArgFolded) 353 return false; 354 355 assert(isRegisterOperand(RegOpRec) && isMemoryOperand(MemOpRec)); 356 ArgFolded = true; 357 } 358 } 359 360 return true; 361 } 362 363 private: 364 // Return true of the 2 given forms are the opposite of each other. 365 bool areOppositeForms(unsigned RegForm, unsigned MemForm) { 366 if ((MemForm == X86Local::MRM0m && RegForm == X86Local::MRM0r) || 367 (MemForm == X86Local::MRM1m && RegForm == X86Local::MRM1r) || 368 (MemForm == X86Local::MRM2m && RegForm == X86Local::MRM2r) || 369 (MemForm == X86Local::MRM3m && RegForm == X86Local::MRM3r) || 370 (MemForm == X86Local::MRM4m && RegForm == X86Local::MRM4r) || 371 (MemForm == X86Local::MRM5m && RegForm == X86Local::MRM5r) || 372 (MemForm == X86Local::MRM6m && RegForm == X86Local::MRM6r) || 373 (MemForm == X86Local::MRM7m && RegForm == X86Local::MRM7r) || 374 (MemForm == X86Local::MRMXm && RegForm == X86Local::MRMXr) || 375 (MemForm == X86Local::MRMXmCC && RegForm == X86Local::MRMXrCC) || 376 (MemForm == X86Local::MRMDestMem && RegForm == X86Local::MRMDestReg) || 377 (MemForm == X86Local::MRMSrcMem && RegForm == X86Local::MRMSrcReg) || 378 (MemForm == X86Local::MRMSrcMem4VOp3 && 379 RegForm == X86Local::MRMSrcReg4VOp3) || 380 (MemForm == X86Local::MRMSrcMemOp4 && 381 RegForm == X86Local::MRMSrcRegOp4) || 382 (MemForm == X86Local::MRMSrcMemCC && RegForm == X86Local::MRMSrcRegCC)) 383 return true; 384 385 return false; 386 } 387 }; 388 389 } // end anonymous namespace 390 391 void X86FoldTablesEmitter::addEntryWithFlags(FoldTable &Table, 392 const CodeGenInstruction *RegInstr, 393 const CodeGenInstruction *MemInstr, 394 const UnfoldStrategy S, 395 const unsigned int FoldedInd) { 396 397 X86FoldTableEntry Result = X86FoldTableEntry(RegInstr, MemInstr); 398 Record *RegRec = RegInstr->TheDef; 399 Record *MemRec = MemInstr->TheDef; 400 401 // Only table0 entries should explicitly specify a load or store flag. 402 if (&Table == &Table0) { 403 unsigned MemInOpsNum = MemRec->getValueAsDag("InOperandList")->getNumArgs(); 404 unsigned RegInOpsNum = RegRec->getValueAsDag("InOperandList")->getNumArgs(); 405 // If the instruction writes to the folded operand, it will appear as an 406 // output in the register form instruction and as an input in the memory 407 // form instruction. 408 // If the instruction reads from the folded operand, it well appear as in 409 // input in both forms. 410 if (MemInOpsNum == RegInOpsNum) 411 Result.IsLoad = true; 412 else 413 Result.IsStore = true; 414 } 415 416 Record *RegOpRec = RegInstr->Operands[FoldedInd].Rec; 417 Record *MemOpRec = MemInstr->Operands[FoldedInd].Rec; 418 419 // Unfolding code generates a load/store instruction according to the size of 420 // the register in the register form instruction. 421 // If the register's size is greater than the memory's operand size, do not 422 // allow unfolding. 423 if (S == UNFOLD) 424 Result.CannotUnfold = false; 425 else if (S == NO_UNFOLD) 426 Result.CannotUnfold = true; 427 else if (getRegOperandSize(RegOpRec) > getMemOperandSize(MemOpRec)) 428 Result.CannotUnfold = true; // S == NO_STRATEGY 429 430 uint64_t Enc = getValueFromBitsInit(RegRec->getValueAsBitsInit("OpEncBits")); 431 if (isExplicitAlign(RegInstr)) { 432 // The instruction require explicitly aligned memory. 433 BitsInit *VectSize = RegRec->getValueAsBitsInit("VectSize"); 434 uint64_t Value = getValueFromBitsInit(VectSize); 435 Result.IsAligned = true; 436 Result.Alignment = Value; 437 } else if (Enc != X86Local::XOP && Enc != X86Local::VEX && 438 Enc != X86Local::EVEX) { 439 // Instructions with VEX encoding do not require alignment. 440 if (!isExplicitUnalign(RegInstr) && getMemOperandSize(MemOpRec) > 64) { 441 // SSE packed vector instructions require a 16 byte alignment. 442 Result.IsAligned = true; 443 Result.Alignment = 16; 444 } 445 } 446 447 Table.push_back(Result); 448 } 449 450 void X86FoldTablesEmitter::updateTables(const CodeGenInstruction *RegInstr, 451 const CodeGenInstruction *MemInstr, 452 const UnfoldStrategy S) { 453 454 Record *RegRec = RegInstr->TheDef; 455 Record *MemRec = MemInstr->TheDef; 456 unsigned MemOutSize = MemRec->getValueAsDag("OutOperandList")->getNumArgs(); 457 unsigned RegOutSize = RegRec->getValueAsDag("OutOperandList")->getNumArgs(); 458 unsigned MemInSize = MemRec->getValueAsDag("InOperandList")->getNumArgs(); 459 unsigned RegInSize = RegRec->getValueAsDag("InOperandList")->getNumArgs(); 460 461 // Instructions which Read-Modify-Write should be added to Table2Addr. 462 if (MemOutSize != RegOutSize && MemInSize == RegInSize) { 463 addEntryWithFlags(Table2Addr, RegInstr, MemInstr, S, 0); 464 return; 465 } 466 467 if (MemInSize == RegInSize && MemOutSize == RegOutSize) { 468 // Load-Folding cases. 469 // If the i'th register form operand is a register and the i'th memory form 470 // operand is a memory operand, add instructions to Table#i. 471 for (unsigned i = RegOutSize, e = RegInstr->Operands.size(); i < e; i++) { 472 Record *RegOpRec = RegInstr->Operands[i].Rec; 473 Record *MemOpRec = MemInstr->Operands[i].Rec; 474 // PointerLikeRegClass: For instructions like TAILJMPr, TAILJMPr64, TAILJMPr64_REX 475 if ((isRegisterOperand(RegOpRec) || 476 RegOpRec->isSubClassOf("PointerLikeRegClass")) && 477 isMemoryOperand(MemOpRec)) { 478 switch (i) { 479 case 0: 480 addEntryWithFlags(Table0, RegInstr, MemInstr, S, 0); 481 return; 482 case 1: 483 addEntryWithFlags(Table1, RegInstr, MemInstr, S, 1); 484 return; 485 case 2: 486 addEntryWithFlags(Table2, RegInstr, MemInstr, S, 2); 487 return; 488 case 3: 489 addEntryWithFlags(Table3, RegInstr, MemInstr, S, 3); 490 return; 491 case 4: 492 addEntryWithFlags(Table4, RegInstr, MemInstr, S, 4); 493 return; 494 } 495 } 496 } 497 } else if (MemInSize == RegInSize + 1 && MemOutSize + 1 == RegOutSize) { 498 // Store-Folding cases. 499 // If the memory form instruction performs a store, the *output* 500 // register of the register form instructions disappear and instead a 501 // memory *input* operand appears in the memory form instruction. 502 // For example: 503 // MOVAPSrr => (outs VR128:$dst), (ins VR128:$src) 504 // MOVAPSmr => (outs), (ins f128mem:$dst, VR128:$src) 505 Record *RegOpRec = RegInstr->Operands[RegOutSize - 1].Rec; 506 Record *MemOpRec = MemInstr->Operands[RegOutSize - 1].Rec; 507 if (isRegisterOperand(RegOpRec) && isMemoryOperand(MemOpRec) && 508 getRegOperandSize(RegOpRec) == getMemOperandSize(MemOpRec)) 509 addEntryWithFlags(Table0, RegInstr, MemInstr, S, 0); 510 } 511 } 512 513 void X86FoldTablesEmitter::run(formatted_raw_ostream &OS) { 514 emitSourceFileHeader("X86 fold tables", OS); 515 516 // Holds all memory instructions 517 std::vector<const CodeGenInstruction *> MemInsts; 518 // Holds all register instructions - divided according to opcode. 519 std::map<uint8_t, std::vector<const CodeGenInstruction *>> RegInsts; 520 521 ArrayRef<const CodeGenInstruction *> NumberedInstructions = 522 Target.getInstructionsByEnumValue(); 523 524 for (const CodeGenInstruction *Inst : NumberedInstructions) { 525 const Record *Rec = Inst->TheDef; 526 if (!Rec->isSubClassOf("X86Inst") || Rec->getValueAsBit("isAsmParserOnly")) 527 continue; 528 529 // - Do not proceed if the instruction is marked as notMemoryFoldable. 530 // - Instructions including RST register class operands are not relevant 531 // for memory folding (for further details check the explanation in 532 // lib/Target/X86/X86InstrFPStack.td file). 533 // - Some instructions (listed in the manual map above) use the register 534 // class ptr_rc_tailcall, which can be of a size 32 or 64, to ensure 535 // safe mapping of these instruction we manually map them and exclude 536 // them from the automation. 537 if (Rec->getValueAsBit("isMemoryFoldable") == false || 538 hasRSTRegClass(Inst) || hasPtrTailcallRegClass(Inst)) 539 continue; 540 541 // Add all the memory form instructions to MemInsts, and all the register 542 // form instructions to RegInsts[Opc], where Opc in the opcode of each 543 // instructions. this helps reducing the runtime of the backend. 544 if (hasMemoryFormat(Rec)) 545 MemInsts.push_back(Inst); 546 else if (hasRegisterFormat(Rec)) { 547 uint8_t Opc = getValueFromBitsInit(Rec->getValueAsBitsInit("Opcode")); 548 RegInsts[Opc].push_back(Inst); 549 } 550 } 551 552 Record *AsmWriter = Target.getAsmWriter(); 553 unsigned Variant = AsmWriter->getValueAsInt("Variant"); 554 // For each memory form instruction, try to find its register form 555 // instruction. 556 for (const CodeGenInstruction *MemInst : MemInsts) { 557 uint8_t Opc = 558 getValueFromBitsInit(MemInst->TheDef->getValueAsBitsInit("Opcode")); 559 560 auto RegInstsIt = RegInsts.find(Opc); 561 if (RegInstsIt == RegInsts.end()) 562 continue; 563 564 // Two forms (memory & register) of the same instruction must have the same 565 // opcode. try matching only with register form instructions with the same 566 // opcode. 567 std::vector<const CodeGenInstruction *> &OpcRegInsts = RegInstsIt->second; 568 569 auto Match = find_if(OpcRegInsts, IsMatch(MemInst, Variant)); 570 if (Match != OpcRegInsts.end()) { 571 const CodeGenInstruction *RegInst = *Match; 572 // If the matched instruction has it's "FoldGenRegForm" set, map the 573 // memory form instruction to the register form instruction pointed by 574 // this field 575 if (RegInst->TheDef->isValueUnset("FoldGenRegForm")) { 576 updateTables(RegInst, MemInst); 577 } else { 578 const CodeGenInstruction *AltRegInst = 579 getAltRegInst(RegInst, Records, Target); 580 updateTables(AltRegInst, MemInst); 581 } 582 OpcRegInsts.erase(Match); 583 } 584 } 585 586 // Add the manually mapped instructions listed above. 587 for (const ManualMapEntry &Entry : ManualMapSet) { 588 Record *RegInstIter = Records.getDef(Entry.RegInstStr); 589 Record *MemInstIter = Records.getDef(Entry.MemInstStr); 590 591 updateTables(&(Target.getInstruction(RegInstIter)), 592 &(Target.getInstruction(MemInstIter)), Entry.Strategy); 593 } 594 595 // Sort the tables before printing. 596 llvm::sort(Table2Addr); 597 llvm::sort(Table0); 598 llvm::sort(Table1); 599 llvm::sort(Table2); 600 llvm::sort(Table3); 601 llvm::sort(Table4); 602 603 // Print all tables. 604 printTable(Table2Addr, "Table2Addr", OS); 605 printTable(Table0, "Table0", OS); 606 printTable(Table1, "Table1", OS); 607 printTable(Table2, "Table2", OS); 608 printTable(Table3, "Table3", OS); 609 printTable(Table4, "Table4", OS); 610 } 611 612 namespace llvm { 613 614 void EmitX86FoldTables(RecordKeeper &RK, raw_ostream &o) { 615 formatted_raw_ostream OS(o); 616 X86FoldTablesEmitter(RK).run(OS); 617 } 618 } // namespace llvm 619