1 //===- X86AvoidStoreForwardingBlocks.cpp - Avoid HW Store Forward Block ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // If a load follows a store and reloads data that the store has written to 10 // memory, Intel microarchitectures can in many cases forward the data directly 11 // from the store to the load, This "store forwarding" saves cycles by enabling 12 // the load to directly obtain the data instead of accessing the data from 13 // cache or memory. 14 // A "store forward block" occurs in cases that a store cannot be forwarded to 15 // the load. The most typical case of store forward block on Intel Core 16 // microarchitecture that a small store cannot be forwarded to a large load. 17 // The estimated penalty for a store forward block is ~13 cycles. 18 // 19 // This pass tries to recognize and handle cases where "store forward block" 20 // is created by the compiler when lowering memcpy calls to a sequence 21 // of a load and a store. 22 // 23 // The pass currently only handles cases where memcpy is lowered to 24 // XMM/YMM registers, it tries to break the memcpy into smaller copies. 25 // breaking the memcpy should be possible since there is no atomicity 26 // guarantee for loads and stores to XMM/YMM. 27 // 28 // It could be better for performance to solve the problem by loading 29 // to XMM/YMM then inserting the partial store before storing back from XMM/YMM 30 // to memory, but this will result in a more conservative optimization since it 31 // requires we prove that all memory accesses between the blocking store and the 32 // load must alias/don't alias before we can move the store, whereas the 33 // transformation done here is correct regardless to other memory accesses. 34 //===----------------------------------------------------------------------===// 35 36 #include "X86.h" 37 #include "X86InstrInfo.h" 38 #include "X86Subtarget.h" 39 #include "llvm/Analysis/AliasAnalysis.h" 40 #include "llvm/CodeGen/MachineBasicBlock.h" 41 #include "llvm/CodeGen/MachineFunction.h" 42 #include "llvm/CodeGen/MachineFunctionPass.h" 43 #include "llvm/CodeGen/MachineInstr.h" 44 #include "llvm/CodeGen/MachineInstrBuilder.h" 45 #include "llvm/CodeGen/MachineOperand.h" 46 #include "llvm/CodeGen/MachineRegisterInfo.h" 47 #include "llvm/IR/DebugInfoMetadata.h" 48 #include "llvm/IR/DebugLoc.h" 49 #include "llvm/IR/Function.h" 50 #include "llvm/InitializePasses.h" 51 #include "llvm/MC/MCInstrDesc.h" 52 53 using namespace llvm; 54 55 #define DEBUG_TYPE "x86-avoid-SFB" 56 57 static cl::opt<bool> DisableX86AvoidStoreForwardBlocks( 58 "x86-disable-avoid-SFB", cl::Hidden, 59 cl::desc("X86: Disable Store Forwarding Blocks fixup."), cl::init(false)); 60 61 static cl::opt<unsigned> X86AvoidSFBInspectionLimit( 62 "x86-sfb-inspection-limit", 63 cl::desc("X86: Number of instructions backward to " 64 "inspect for store forwarding blocks."), 65 cl::init(20), cl::Hidden); 66 67 namespace { 68 69 using DisplacementSizeMap = std::map<int64_t, unsigned>; 70 71 class X86AvoidSFBPass : public MachineFunctionPass { 72 public: 73 static char ID; 74 X86AvoidSFBPass() : MachineFunctionPass(ID) { } 75 76 StringRef getPassName() const override { 77 return "X86 Avoid Store Forwarding Blocks"; 78 } 79 80 bool runOnMachineFunction(MachineFunction &MF) override; 81 82 void getAnalysisUsage(AnalysisUsage &AU) const override { 83 MachineFunctionPass::getAnalysisUsage(AU); 84 AU.addRequired<AAResultsWrapperPass>(); 85 } 86 87 private: 88 MachineRegisterInfo *MRI = nullptr; 89 const X86InstrInfo *TII = nullptr; 90 const X86RegisterInfo *TRI = nullptr; 91 SmallVector<std::pair<MachineInstr *, MachineInstr *>, 2> 92 BlockedLoadsStoresPairs; 93 SmallVector<MachineInstr *, 2> ForRemoval; 94 AliasAnalysis *AA = nullptr; 95 96 /// Returns couples of Load then Store to memory which look 97 /// like a memcpy. 98 void findPotentiallylBlockedCopies(MachineFunction &MF); 99 /// Break the memcpy's load and store into smaller copies 100 /// such that each memory load that was blocked by a smaller store 101 /// would now be copied separately. 102 void breakBlockedCopies(MachineInstr *LoadInst, MachineInstr *StoreInst, 103 const DisplacementSizeMap &BlockingStoresDispSizeMap); 104 /// Break a copy of size Size to smaller copies. 105 void buildCopies(int Size, MachineInstr *LoadInst, int64_t LdDispImm, 106 MachineInstr *StoreInst, int64_t StDispImm, 107 int64_t LMMOffset, int64_t SMMOffset); 108 109 void buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode, int64_t LoadDisp, 110 MachineInstr *StoreInst, unsigned NStoreOpcode, 111 int64_t StoreDisp, unsigned Size, int64_t LMMOffset, 112 int64_t SMMOffset); 113 114 bool alias(const MachineMemOperand &Op1, const MachineMemOperand &Op2) const; 115 116 unsigned getRegSizeInBytes(MachineInstr *Inst); 117 }; 118 119 } // end anonymous namespace 120 121 char X86AvoidSFBPass::ID = 0; 122 123 INITIALIZE_PASS_BEGIN(X86AvoidSFBPass, DEBUG_TYPE, "Machine code sinking", 124 false, false) 125 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 126 INITIALIZE_PASS_END(X86AvoidSFBPass, DEBUG_TYPE, "Machine code sinking", false, 127 false) 128 129 FunctionPass *llvm::createX86AvoidStoreForwardingBlocks() { 130 return new X86AvoidSFBPass(); 131 } 132 133 static bool isXMMLoadOpcode(unsigned Opcode) { 134 return Opcode == X86::MOVUPSrm || Opcode == X86::MOVAPSrm || 135 Opcode == X86::VMOVUPSrm || Opcode == X86::VMOVAPSrm || 136 Opcode == X86::VMOVUPDrm || Opcode == X86::VMOVAPDrm || 137 Opcode == X86::VMOVDQUrm || Opcode == X86::VMOVDQArm || 138 Opcode == X86::VMOVUPSZ128rm || Opcode == X86::VMOVAPSZ128rm || 139 Opcode == X86::VMOVUPDZ128rm || Opcode == X86::VMOVAPDZ128rm || 140 Opcode == X86::VMOVDQU64Z128rm || Opcode == X86::VMOVDQA64Z128rm || 141 Opcode == X86::VMOVDQU32Z128rm || Opcode == X86::VMOVDQA32Z128rm; 142 } 143 static bool isYMMLoadOpcode(unsigned Opcode) { 144 return Opcode == X86::VMOVUPSYrm || Opcode == X86::VMOVAPSYrm || 145 Opcode == X86::VMOVUPDYrm || Opcode == X86::VMOVAPDYrm || 146 Opcode == X86::VMOVDQUYrm || Opcode == X86::VMOVDQAYrm || 147 Opcode == X86::VMOVUPSZ256rm || Opcode == X86::VMOVAPSZ256rm || 148 Opcode == X86::VMOVUPDZ256rm || Opcode == X86::VMOVAPDZ256rm || 149 Opcode == X86::VMOVDQU64Z256rm || Opcode == X86::VMOVDQA64Z256rm || 150 Opcode == X86::VMOVDQU32Z256rm || Opcode == X86::VMOVDQA32Z256rm; 151 } 152 153 static bool isPotentialBlockedMemCpyLd(unsigned Opcode) { 154 return isXMMLoadOpcode(Opcode) || isYMMLoadOpcode(Opcode); 155 } 156 157 static bool isPotentialBlockedMemCpyPair(unsigned LdOpcode, unsigned StOpcode) { 158 switch (LdOpcode) { 159 case X86::MOVUPSrm: 160 case X86::MOVAPSrm: 161 return StOpcode == X86::MOVUPSmr || StOpcode == X86::MOVAPSmr; 162 case X86::VMOVUPSrm: 163 case X86::VMOVAPSrm: 164 return StOpcode == X86::VMOVUPSmr || StOpcode == X86::VMOVAPSmr; 165 case X86::VMOVUPDrm: 166 case X86::VMOVAPDrm: 167 return StOpcode == X86::VMOVUPDmr || StOpcode == X86::VMOVAPDmr; 168 case X86::VMOVDQUrm: 169 case X86::VMOVDQArm: 170 return StOpcode == X86::VMOVDQUmr || StOpcode == X86::VMOVDQAmr; 171 case X86::VMOVUPSZ128rm: 172 case X86::VMOVAPSZ128rm: 173 return StOpcode == X86::VMOVUPSZ128mr || StOpcode == X86::VMOVAPSZ128mr; 174 case X86::VMOVUPDZ128rm: 175 case X86::VMOVAPDZ128rm: 176 return StOpcode == X86::VMOVUPDZ128mr || StOpcode == X86::VMOVAPDZ128mr; 177 case X86::VMOVUPSYrm: 178 case X86::VMOVAPSYrm: 179 return StOpcode == X86::VMOVUPSYmr || StOpcode == X86::VMOVAPSYmr; 180 case X86::VMOVUPDYrm: 181 case X86::VMOVAPDYrm: 182 return StOpcode == X86::VMOVUPDYmr || StOpcode == X86::VMOVAPDYmr; 183 case X86::VMOVDQUYrm: 184 case X86::VMOVDQAYrm: 185 return StOpcode == X86::VMOVDQUYmr || StOpcode == X86::VMOVDQAYmr; 186 case X86::VMOVUPSZ256rm: 187 case X86::VMOVAPSZ256rm: 188 return StOpcode == X86::VMOVUPSZ256mr || StOpcode == X86::VMOVAPSZ256mr; 189 case X86::VMOVUPDZ256rm: 190 case X86::VMOVAPDZ256rm: 191 return StOpcode == X86::VMOVUPDZ256mr || StOpcode == X86::VMOVAPDZ256mr; 192 case X86::VMOVDQU64Z128rm: 193 case X86::VMOVDQA64Z128rm: 194 return StOpcode == X86::VMOVDQU64Z128mr || StOpcode == X86::VMOVDQA64Z128mr; 195 case X86::VMOVDQU32Z128rm: 196 case X86::VMOVDQA32Z128rm: 197 return StOpcode == X86::VMOVDQU32Z128mr || StOpcode == X86::VMOVDQA32Z128mr; 198 case X86::VMOVDQU64Z256rm: 199 case X86::VMOVDQA64Z256rm: 200 return StOpcode == X86::VMOVDQU64Z256mr || StOpcode == X86::VMOVDQA64Z256mr; 201 case X86::VMOVDQU32Z256rm: 202 case X86::VMOVDQA32Z256rm: 203 return StOpcode == X86::VMOVDQU32Z256mr || StOpcode == X86::VMOVDQA32Z256mr; 204 default: 205 return false; 206 } 207 } 208 209 static bool isPotentialBlockingStoreInst(unsigned Opcode, unsigned LoadOpcode) { 210 bool PBlock = false; 211 PBlock |= Opcode == X86::MOV64mr || Opcode == X86::MOV64mi32 || 212 Opcode == X86::MOV32mr || Opcode == X86::MOV32mi || 213 Opcode == X86::MOV16mr || Opcode == X86::MOV16mi || 214 Opcode == X86::MOV8mr || Opcode == X86::MOV8mi; 215 if (isYMMLoadOpcode(LoadOpcode)) 216 PBlock |= Opcode == X86::VMOVUPSmr || Opcode == X86::VMOVAPSmr || 217 Opcode == X86::VMOVUPDmr || Opcode == X86::VMOVAPDmr || 218 Opcode == X86::VMOVDQUmr || Opcode == X86::VMOVDQAmr || 219 Opcode == X86::VMOVUPSZ128mr || Opcode == X86::VMOVAPSZ128mr || 220 Opcode == X86::VMOVUPDZ128mr || Opcode == X86::VMOVAPDZ128mr || 221 Opcode == X86::VMOVDQU64Z128mr || 222 Opcode == X86::VMOVDQA64Z128mr || 223 Opcode == X86::VMOVDQU32Z128mr || Opcode == X86::VMOVDQA32Z128mr; 224 return PBlock; 225 } 226 227 static const int MOV128SZ = 16; 228 static const int MOV64SZ = 8; 229 static const int MOV32SZ = 4; 230 static const int MOV16SZ = 2; 231 static const int MOV8SZ = 1; 232 233 static unsigned getYMMtoXMMLoadOpcode(unsigned LoadOpcode) { 234 switch (LoadOpcode) { 235 case X86::VMOVUPSYrm: 236 case X86::VMOVAPSYrm: 237 return X86::VMOVUPSrm; 238 case X86::VMOVUPDYrm: 239 case X86::VMOVAPDYrm: 240 return X86::VMOVUPDrm; 241 case X86::VMOVDQUYrm: 242 case X86::VMOVDQAYrm: 243 return X86::VMOVDQUrm; 244 case X86::VMOVUPSZ256rm: 245 case X86::VMOVAPSZ256rm: 246 return X86::VMOVUPSZ128rm; 247 case X86::VMOVUPDZ256rm: 248 case X86::VMOVAPDZ256rm: 249 return X86::VMOVUPDZ128rm; 250 case X86::VMOVDQU64Z256rm: 251 case X86::VMOVDQA64Z256rm: 252 return X86::VMOVDQU64Z128rm; 253 case X86::VMOVDQU32Z256rm: 254 case X86::VMOVDQA32Z256rm: 255 return X86::VMOVDQU32Z128rm; 256 default: 257 llvm_unreachable("Unexpected Load Instruction Opcode"); 258 } 259 return 0; 260 } 261 262 static unsigned getYMMtoXMMStoreOpcode(unsigned StoreOpcode) { 263 switch (StoreOpcode) { 264 case X86::VMOVUPSYmr: 265 case X86::VMOVAPSYmr: 266 return X86::VMOVUPSmr; 267 case X86::VMOVUPDYmr: 268 case X86::VMOVAPDYmr: 269 return X86::VMOVUPDmr; 270 case X86::VMOVDQUYmr: 271 case X86::VMOVDQAYmr: 272 return X86::VMOVDQUmr; 273 case X86::VMOVUPSZ256mr: 274 case X86::VMOVAPSZ256mr: 275 return X86::VMOVUPSZ128mr; 276 case X86::VMOVUPDZ256mr: 277 case X86::VMOVAPDZ256mr: 278 return X86::VMOVUPDZ128mr; 279 case X86::VMOVDQU64Z256mr: 280 case X86::VMOVDQA64Z256mr: 281 return X86::VMOVDQU64Z128mr; 282 case X86::VMOVDQU32Z256mr: 283 case X86::VMOVDQA32Z256mr: 284 return X86::VMOVDQU32Z128mr; 285 default: 286 llvm_unreachable("Unexpected Load Instruction Opcode"); 287 } 288 return 0; 289 } 290 291 static int getAddrOffset(const MachineInstr *MI) { 292 const MCInstrDesc &Descl = MI->getDesc(); 293 int AddrOffset = X86II::getMemoryOperandNo(Descl.TSFlags); 294 assert(AddrOffset != -1 && "Expected Memory Operand"); 295 AddrOffset += X86II::getOperandBias(Descl); 296 return AddrOffset; 297 } 298 299 static MachineOperand &getBaseOperand(MachineInstr *MI) { 300 int AddrOffset = getAddrOffset(MI); 301 return MI->getOperand(AddrOffset + X86::AddrBaseReg); 302 } 303 304 static MachineOperand &getDispOperand(MachineInstr *MI) { 305 int AddrOffset = getAddrOffset(MI); 306 return MI->getOperand(AddrOffset + X86::AddrDisp); 307 } 308 309 // Relevant addressing modes contain only base register and immediate 310 // displacement or frameindex and immediate displacement. 311 // TODO: Consider expanding to other addressing modes in the future 312 static bool isRelevantAddressingMode(MachineInstr *MI) { 313 int AddrOffset = getAddrOffset(MI); 314 const MachineOperand &Base = getBaseOperand(MI); 315 const MachineOperand &Disp = getDispOperand(MI); 316 const MachineOperand &Scale = MI->getOperand(AddrOffset + X86::AddrScaleAmt); 317 const MachineOperand &Index = MI->getOperand(AddrOffset + X86::AddrIndexReg); 318 const MachineOperand &Segment = MI->getOperand(AddrOffset + X86::AddrSegmentReg); 319 320 if (!((Base.isReg() && Base.getReg() != X86::NoRegister) || Base.isFI())) 321 return false; 322 if (!Disp.isImm()) 323 return false; 324 if (Scale.getImm() != 1) 325 return false; 326 if (!(Index.isReg() && Index.getReg() == X86::NoRegister)) 327 return false; 328 if (!(Segment.isReg() && Segment.getReg() == X86::NoRegister)) 329 return false; 330 return true; 331 } 332 333 // Collect potentially blocking stores. 334 // Limit the number of instructions backwards we want to inspect 335 // since the effect of store block won't be visible if the store 336 // and load instructions have enough instructions in between to 337 // keep the core busy. 338 static SmallVector<MachineInstr *, 2> 339 findPotentialBlockers(MachineInstr *LoadInst) { 340 SmallVector<MachineInstr *, 2> PotentialBlockers; 341 unsigned BlockCount = 0; 342 const unsigned InspectionLimit = X86AvoidSFBInspectionLimit; 343 for (auto PBInst = std::next(MachineBasicBlock::reverse_iterator(LoadInst)), 344 E = LoadInst->getParent()->rend(); 345 PBInst != E; ++PBInst) { 346 if (PBInst->isMetaInstruction()) 347 continue; 348 BlockCount++; 349 if (BlockCount >= InspectionLimit) 350 break; 351 MachineInstr &MI = *PBInst; 352 if (MI.getDesc().isCall()) 353 return PotentialBlockers; 354 PotentialBlockers.push_back(&MI); 355 } 356 // If we didn't get to the instructions limit try predecessing blocks. 357 // Ideally we should traverse the predecessor blocks in depth with some 358 // coloring algorithm, but for now let's just look at the first order 359 // predecessors. 360 if (BlockCount < InspectionLimit) { 361 MachineBasicBlock *MBB = LoadInst->getParent(); 362 int LimitLeft = InspectionLimit - BlockCount; 363 for (MachineBasicBlock::pred_iterator PB = MBB->pred_begin(), 364 PE = MBB->pred_end(); 365 PB != PE; ++PB) { 366 MachineBasicBlock *PMBB = *PB; 367 int PredCount = 0; 368 for (MachineBasicBlock::reverse_iterator PBInst = PMBB->rbegin(), 369 PME = PMBB->rend(); 370 PBInst != PME; ++PBInst) { 371 if (PBInst->isMetaInstruction()) 372 continue; 373 PredCount++; 374 if (PredCount >= LimitLeft) 375 break; 376 if (PBInst->getDesc().isCall()) 377 break; 378 PotentialBlockers.push_back(&*PBInst); 379 } 380 } 381 } 382 return PotentialBlockers; 383 } 384 385 void X86AvoidSFBPass::buildCopy(MachineInstr *LoadInst, unsigned NLoadOpcode, 386 int64_t LoadDisp, MachineInstr *StoreInst, 387 unsigned NStoreOpcode, int64_t StoreDisp, 388 unsigned Size, int64_t LMMOffset, 389 int64_t SMMOffset) { 390 MachineOperand &LoadBase = getBaseOperand(LoadInst); 391 MachineOperand &StoreBase = getBaseOperand(StoreInst); 392 MachineBasicBlock *MBB = LoadInst->getParent(); 393 MachineMemOperand *LMMO = *LoadInst->memoperands_begin(); 394 MachineMemOperand *SMMO = *StoreInst->memoperands_begin(); 395 396 Register Reg1 = MRI->createVirtualRegister( 397 TII->getRegClass(TII->get(NLoadOpcode), 0, TRI, *(MBB->getParent()))); 398 MachineInstr *NewLoad = 399 BuildMI(*MBB, LoadInst, LoadInst->getDebugLoc(), TII->get(NLoadOpcode), 400 Reg1) 401 .add(LoadBase) 402 .addImm(1) 403 .addReg(X86::NoRegister) 404 .addImm(LoadDisp) 405 .addReg(X86::NoRegister) 406 .addMemOperand( 407 MBB->getParent()->getMachineMemOperand(LMMO, LMMOffset, Size)); 408 if (LoadBase.isReg()) 409 getBaseOperand(NewLoad).setIsKill(false); 410 LLVM_DEBUG(NewLoad->dump()); 411 // If the load and store are consecutive, use the loadInst location to 412 // reduce register pressure. 413 MachineInstr *StInst = StoreInst; 414 auto PrevInstrIt = prev_nodbg(MachineBasicBlock::instr_iterator(StoreInst), 415 MBB->instr_begin()); 416 if (PrevInstrIt.getNodePtr() == LoadInst) 417 StInst = LoadInst; 418 MachineInstr *NewStore = 419 BuildMI(*MBB, StInst, StInst->getDebugLoc(), TII->get(NStoreOpcode)) 420 .add(StoreBase) 421 .addImm(1) 422 .addReg(X86::NoRegister) 423 .addImm(StoreDisp) 424 .addReg(X86::NoRegister) 425 .addReg(Reg1) 426 .addMemOperand( 427 MBB->getParent()->getMachineMemOperand(SMMO, SMMOffset, Size)); 428 if (StoreBase.isReg()) 429 getBaseOperand(NewStore).setIsKill(false); 430 MachineOperand &StoreSrcVReg = StoreInst->getOperand(X86::AddrNumOperands); 431 assert(StoreSrcVReg.isReg() && "Expected virtual register"); 432 NewStore->getOperand(X86::AddrNumOperands).setIsKill(StoreSrcVReg.isKill()); 433 LLVM_DEBUG(NewStore->dump()); 434 } 435 436 void X86AvoidSFBPass::buildCopies(int Size, MachineInstr *LoadInst, 437 int64_t LdDispImm, MachineInstr *StoreInst, 438 int64_t StDispImm, int64_t LMMOffset, 439 int64_t SMMOffset) { 440 int LdDisp = LdDispImm; 441 int StDisp = StDispImm; 442 while (Size > 0) { 443 if ((Size - MOV128SZ >= 0) && isYMMLoadOpcode(LoadInst->getOpcode())) { 444 Size = Size - MOV128SZ; 445 buildCopy(LoadInst, getYMMtoXMMLoadOpcode(LoadInst->getOpcode()), LdDisp, 446 StoreInst, getYMMtoXMMStoreOpcode(StoreInst->getOpcode()), 447 StDisp, MOV128SZ, LMMOffset, SMMOffset); 448 LdDisp += MOV128SZ; 449 StDisp += MOV128SZ; 450 LMMOffset += MOV128SZ; 451 SMMOffset += MOV128SZ; 452 continue; 453 } 454 if (Size - MOV64SZ >= 0) { 455 Size = Size - MOV64SZ; 456 buildCopy(LoadInst, X86::MOV64rm, LdDisp, StoreInst, X86::MOV64mr, StDisp, 457 MOV64SZ, LMMOffset, SMMOffset); 458 LdDisp += MOV64SZ; 459 StDisp += MOV64SZ; 460 LMMOffset += MOV64SZ; 461 SMMOffset += MOV64SZ; 462 continue; 463 } 464 if (Size - MOV32SZ >= 0) { 465 Size = Size - MOV32SZ; 466 buildCopy(LoadInst, X86::MOV32rm, LdDisp, StoreInst, X86::MOV32mr, StDisp, 467 MOV32SZ, LMMOffset, SMMOffset); 468 LdDisp += MOV32SZ; 469 StDisp += MOV32SZ; 470 LMMOffset += MOV32SZ; 471 SMMOffset += MOV32SZ; 472 continue; 473 } 474 if (Size - MOV16SZ >= 0) { 475 Size = Size - MOV16SZ; 476 buildCopy(LoadInst, X86::MOV16rm, LdDisp, StoreInst, X86::MOV16mr, StDisp, 477 MOV16SZ, LMMOffset, SMMOffset); 478 LdDisp += MOV16SZ; 479 StDisp += MOV16SZ; 480 LMMOffset += MOV16SZ; 481 SMMOffset += MOV16SZ; 482 continue; 483 } 484 if (Size - MOV8SZ >= 0) { 485 Size = Size - MOV8SZ; 486 buildCopy(LoadInst, X86::MOV8rm, LdDisp, StoreInst, X86::MOV8mr, StDisp, 487 MOV8SZ, LMMOffset, SMMOffset); 488 LdDisp += MOV8SZ; 489 StDisp += MOV8SZ; 490 LMMOffset += MOV8SZ; 491 SMMOffset += MOV8SZ; 492 continue; 493 } 494 } 495 assert(Size == 0 && "Wrong size division"); 496 } 497 498 static void updateKillStatus(MachineInstr *LoadInst, MachineInstr *StoreInst) { 499 MachineOperand &LoadBase = getBaseOperand(LoadInst); 500 MachineOperand &StoreBase = getBaseOperand(StoreInst); 501 auto *StorePrevNonDbgInstr = 502 prev_nodbg(MachineBasicBlock::instr_iterator(StoreInst), 503 LoadInst->getParent()->instr_begin()) 504 .getNodePtr(); 505 if (LoadBase.isReg()) { 506 MachineInstr *LastLoad = LoadInst->getPrevNode(); 507 // If the original load and store to xmm/ymm were consecutive 508 // then the partial copies were also created in 509 // a consecutive order to reduce register pressure, 510 // and the location of the last load is before the last store. 511 if (StorePrevNonDbgInstr == LoadInst) 512 LastLoad = LoadInst->getPrevNode()->getPrevNode(); 513 getBaseOperand(LastLoad).setIsKill(LoadBase.isKill()); 514 } 515 if (StoreBase.isReg()) { 516 MachineInstr *StInst = StoreInst; 517 if (StorePrevNonDbgInstr == LoadInst) 518 StInst = LoadInst; 519 getBaseOperand(StInst->getPrevNode()).setIsKill(StoreBase.isKill()); 520 } 521 } 522 523 bool X86AvoidSFBPass::alias(const MachineMemOperand &Op1, 524 const MachineMemOperand &Op2) const { 525 if (!Op1.getValue() || !Op2.getValue()) 526 return true; 527 528 int64_t MinOffset = std::min(Op1.getOffset(), Op2.getOffset()); 529 int64_t Overlapa = Op1.getSize() + Op1.getOffset() - MinOffset; 530 int64_t Overlapb = Op2.getSize() + Op2.getOffset() - MinOffset; 531 532 return !AA->isNoAlias( 533 MemoryLocation(Op1.getValue(), Overlapa, Op1.getAAInfo()), 534 MemoryLocation(Op2.getValue(), Overlapb, Op2.getAAInfo())); 535 } 536 537 void X86AvoidSFBPass::findPotentiallylBlockedCopies(MachineFunction &MF) { 538 for (auto &MBB : MF) 539 for (auto &MI : MBB) { 540 if (!isPotentialBlockedMemCpyLd(MI.getOpcode())) 541 continue; 542 int DefVR = MI.getOperand(0).getReg(); 543 if (!MRI->hasOneNonDBGUse(DefVR)) 544 continue; 545 for (auto UI = MRI->use_nodbg_begin(DefVR), UE = MRI->use_nodbg_end(); 546 UI != UE;) { 547 MachineOperand &StoreMO = *UI++; 548 MachineInstr &StoreMI = *StoreMO.getParent(); 549 // Skip cases where the memcpy may overlap. 550 if (StoreMI.getParent() == MI.getParent() && 551 isPotentialBlockedMemCpyPair(MI.getOpcode(), StoreMI.getOpcode()) && 552 isRelevantAddressingMode(&MI) && 553 isRelevantAddressingMode(&StoreMI) && 554 MI.hasOneMemOperand() && StoreMI.hasOneMemOperand()) { 555 if (!alias(**MI.memoperands_begin(), **StoreMI.memoperands_begin())) 556 BlockedLoadsStoresPairs.push_back(std::make_pair(&MI, &StoreMI)); 557 } 558 } 559 } 560 } 561 562 unsigned X86AvoidSFBPass::getRegSizeInBytes(MachineInstr *LoadInst) { 563 const auto *TRC = TII->getRegClass(TII->get(LoadInst->getOpcode()), 0, TRI, 564 *LoadInst->getParent()->getParent()); 565 return TRI->getRegSizeInBits(*TRC) / 8; 566 } 567 568 void X86AvoidSFBPass::breakBlockedCopies( 569 MachineInstr *LoadInst, MachineInstr *StoreInst, 570 const DisplacementSizeMap &BlockingStoresDispSizeMap) { 571 int64_t LdDispImm = getDispOperand(LoadInst).getImm(); 572 int64_t StDispImm = getDispOperand(StoreInst).getImm(); 573 int64_t LMMOffset = 0; 574 int64_t SMMOffset = 0; 575 576 int64_t LdDisp1 = LdDispImm; 577 int64_t LdDisp2 = 0; 578 int64_t StDisp1 = StDispImm; 579 int64_t StDisp2 = 0; 580 unsigned Size1 = 0; 581 unsigned Size2 = 0; 582 int64_t LdStDelta = StDispImm - LdDispImm; 583 584 for (auto DispSizePair : BlockingStoresDispSizeMap) { 585 LdDisp2 = DispSizePair.first; 586 StDisp2 = DispSizePair.first + LdStDelta; 587 Size2 = DispSizePair.second; 588 // Avoid copying overlapping areas. 589 if (LdDisp2 < LdDisp1) { 590 int OverlapDelta = LdDisp1 - LdDisp2; 591 LdDisp2 += OverlapDelta; 592 StDisp2 += OverlapDelta; 593 Size2 -= OverlapDelta; 594 } 595 Size1 = LdDisp2 - LdDisp1; 596 597 // Build a copy for the point until the current blocking store's 598 // displacement. 599 buildCopies(Size1, LoadInst, LdDisp1, StoreInst, StDisp1, LMMOffset, 600 SMMOffset); 601 // Build a copy for the current blocking store. 602 buildCopies(Size2, LoadInst, LdDisp2, StoreInst, StDisp2, LMMOffset + Size1, 603 SMMOffset + Size1); 604 LdDisp1 = LdDisp2 + Size2; 605 StDisp1 = StDisp2 + Size2; 606 LMMOffset += Size1 + Size2; 607 SMMOffset += Size1 + Size2; 608 } 609 unsigned Size3 = (LdDispImm + getRegSizeInBytes(LoadInst)) - LdDisp1; 610 buildCopies(Size3, LoadInst, LdDisp1, StoreInst, StDisp1, LMMOffset, 611 LMMOffset); 612 } 613 614 static bool hasSameBaseOpValue(MachineInstr *LoadInst, 615 MachineInstr *StoreInst) { 616 const MachineOperand &LoadBase = getBaseOperand(LoadInst); 617 const MachineOperand &StoreBase = getBaseOperand(StoreInst); 618 if (LoadBase.isReg() != StoreBase.isReg()) 619 return false; 620 if (LoadBase.isReg()) 621 return LoadBase.getReg() == StoreBase.getReg(); 622 return LoadBase.getIndex() == StoreBase.getIndex(); 623 } 624 625 static bool isBlockingStore(int64_t LoadDispImm, unsigned LoadSize, 626 int64_t StoreDispImm, unsigned StoreSize) { 627 return ((StoreDispImm >= LoadDispImm) && 628 (StoreDispImm <= LoadDispImm + (LoadSize - StoreSize))); 629 } 630 631 // Keep track of all stores blocking a load 632 static void 633 updateBlockingStoresDispSizeMap(DisplacementSizeMap &BlockingStoresDispSizeMap, 634 int64_t DispImm, unsigned Size) { 635 if (BlockingStoresDispSizeMap.count(DispImm)) { 636 // Choose the smallest blocking store starting at this displacement. 637 if (BlockingStoresDispSizeMap[DispImm] > Size) 638 BlockingStoresDispSizeMap[DispImm] = Size; 639 640 } else 641 BlockingStoresDispSizeMap[DispImm] = Size; 642 } 643 644 // Remove blocking stores contained in each other. 645 static void 646 removeRedundantBlockingStores(DisplacementSizeMap &BlockingStoresDispSizeMap) { 647 if (BlockingStoresDispSizeMap.size() <= 1) 648 return; 649 650 SmallVector<std::pair<int64_t, unsigned>, 0> DispSizeStack; 651 for (auto DispSizePair : BlockingStoresDispSizeMap) { 652 int64_t CurrDisp = DispSizePair.first; 653 unsigned CurrSize = DispSizePair.second; 654 while (DispSizeStack.size()) { 655 int64_t PrevDisp = DispSizeStack.back().first; 656 unsigned PrevSize = DispSizeStack.back().second; 657 if (CurrDisp + CurrSize > PrevDisp + PrevSize) 658 break; 659 DispSizeStack.pop_back(); 660 } 661 DispSizeStack.push_back(DispSizePair); 662 } 663 BlockingStoresDispSizeMap.clear(); 664 for (auto Disp : DispSizeStack) 665 BlockingStoresDispSizeMap.insert(Disp); 666 } 667 668 bool X86AvoidSFBPass::runOnMachineFunction(MachineFunction &MF) { 669 bool Changed = false; 670 671 if (DisableX86AvoidStoreForwardBlocks || skipFunction(MF.getFunction()) || 672 !MF.getSubtarget<X86Subtarget>().is64Bit()) 673 return false; 674 675 MRI = &MF.getRegInfo(); 676 assert(MRI->isSSA() && "Expected MIR to be in SSA form"); 677 TII = MF.getSubtarget<X86Subtarget>().getInstrInfo(); 678 TRI = MF.getSubtarget<X86Subtarget>().getRegisterInfo(); 679 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 680 LLVM_DEBUG(dbgs() << "Start X86AvoidStoreForwardBlocks\n";); 681 // Look for a load then a store to XMM/YMM which look like a memcpy 682 findPotentiallylBlockedCopies(MF); 683 684 for (auto LoadStoreInstPair : BlockedLoadsStoresPairs) { 685 MachineInstr *LoadInst = LoadStoreInstPair.first; 686 int64_t LdDispImm = getDispOperand(LoadInst).getImm(); 687 DisplacementSizeMap BlockingStoresDispSizeMap; 688 689 SmallVector<MachineInstr *, 2> PotentialBlockers = 690 findPotentialBlockers(LoadInst); 691 for (auto *PBInst : PotentialBlockers) { 692 if (!isPotentialBlockingStoreInst(PBInst->getOpcode(), 693 LoadInst->getOpcode()) || 694 !isRelevantAddressingMode(PBInst) || !PBInst->hasOneMemOperand()) 695 continue; 696 int64_t PBstDispImm = getDispOperand(PBInst).getImm(); 697 unsigned PBstSize = (*PBInst->memoperands_begin())->getSize(); 698 // This check doesn't cover all cases, but it will suffice for now. 699 // TODO: take branch probability into consideration, if the blocking 700 // store is in an unreached block, breaking the memcopy could lose 701 // performance. 702 if (hasSameBaseOpValue(LoadInst, PBInst) && 703 isBlockingStore(LdDispImm, getRegSizeInBytes(LoadInst), PBstDispImm, 704 PBstSize)) 705 updateBlockingStoresDispSizeMap(BlockingStoresDispSizeMap, PBstDispImm, 706 PBstSize); 707 } 708 709 if (BlockingStoresDispSizeMap.empty()) 710 continue; 711 712 // We found a store forward block, break the memcpy's load and store 713 // into smaller copies such that each smaller store that was causing 714 // a store block would now be copied separately. 715 MachineInstr *StoreInst = LoadStoreInstPair.second; 716 LLVM_DEBUG(dbgs() << "Blocked load and store instructions: \n"); 717 LLVM_DEBUG(LoadInst->dump()); 718 LLVM_DEBUG(StoreInst->dump()); 719 LLVM_DEBUG(dbgs() << "Replaced with:\n"); 720 removeRedundantBlockingStores(BlockingStoresDispSizeMap); 721 breakBlockedCopies(LoadInst, StoreInst, BlockingStoresDispSizeMap); 722 updateKillStatus(LoadInst, StoreInst); 723 ForRemoval.push_back(LoadInst); 724 ForRemoval.push_back(StoreInst); 725 } 726 for (auto *RemovedInst : ForRemoval) { 727 RemovedInst->eraseFromParent(); 728 } 729 ForRemoval.clear(); 730 BlockedLoadsStoresPairs.clear(); 731 LLVM_DEBUG(dbgs() << "End X86AvoidStoreForwardBlocks\n";); 732 733 return Changed; 734 } 735