1 //===-- RISCVExpandAtomicPseudoInsts.cpp - Expand atomic pseudo instrs. ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains a pass that expands atomic pseudo instructions into 10 // target instructions. This pass should be run at the last possible moment, 11 // avoiding the possibility for other passes to break the requirements for 12 // forward progress in the LR/SC block. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "RISCV.h" 17 #include "RISCVInstrInfo.h" 18 #include "RISCVTargetMachine.h" 19 20 #include "llvm/CodeGen/LivePhysRegs.h" 21 #include "llvm/CodeGen/MachineFunctionPass.h" 22 #include "llvm/CodeGen/MachineInstrBuilder.h" 23 24 using namespace llvm; 25 26 #define RISCV_EXPAND_ATOMIC_PSEUDO_NAME \ 27 "RISCV atomic pseudo instruction expansion pass" 28 29 namespace { 30 31 class RISCVExpandAtomicPseudo : public MachineFunctionPass { 32 public: 33 const RISCVInstrInfo *TII; 34 static char ID; 35 36 RISCVExpandAtomicPseudo() : MachineFunctionPass(ID) { 37 initializeRISCVExpandAtomicPseudoPass(*PassRegistry::getPassRegistry()); 38 } 39 40 bool runOnMachineFunction(MachineFunction &MF) override; 41 42 StringRef getPassName() const override { 43 return RISCV_EXPAND_ATOMIC_PSEUDO_NAME; 44 } 45 46 private: 47 bool expandMBB(MachineBasicBlock &MBB); 48 bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 49 MachineBasicBlock::iterator &NextMBBI); 50 bool expandAtomicBinOp(MachineBasicBlock &MBB, 51 MachineBasicBlock::iterator MBBI, AtomicRMWInst::BinOp, 52 bool IsMasked, int Width, 53 MachineBasicBlock::iterator &NextMBBI); 54 bool expandAtomicMinMaxOp(MachineBasicBlock &MBB, 55 MachineBasicBlock::iterator MBBI, 56 AtomicRMWInst::BinOp, bool IsMasked, int Width, 57 MachineBasicBlock::iterator &NextMBBI); 58 bool expandAtomicCmpXchg(MachineBasicBlock &MBB, 59 MachineBasicBlock::iterator MBBI, bool IsMasked, 60 int Width, MachineBasicBlock::iterator &NextMBBI); 61 }; 62 63 char RISCVExpandAtomicPseudo::ID = 0; 64 65 bool RISCVExpandAtomicPseudo::runOnMachineFunction(MachineFunction &MF) { 66 TII = static_cast<const RISCVInstrInfo *>(MF.getSubtarget().getInstrInfo()); 67 bool Modified = false; 68 for (auto &MBB : MF) 69 Modified |= expandMBB(MBB); 70 return Modified; 71 } 72 73 bool RISCVExpandAtomicPseudo::expandMBB(MachineBasicBlock &MBB) { 74 bool Modified = false; 75 76 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end(); 77 while (MBBI != E) { 78 MachineBasicBlock::iterator NMBBI = std::next(MBBI); 79 Modified |= expandMI(MBB, MBBI, NMBBI); 80 MBBI = NMBBI; 81 } 82 83 return Modified; 84 } 85 86 bool RISCVExpandAtomicPseudo::expandMI(MachineBasicBlock &MBB, 87 MachineBasicBlock::iterator MBBI, 88 MachineBasicBlock::iterator &NextMBBI) { 89 // RISCVInstrInfo::getInstSizeInBytes hard-codes the number of expanded 90 // instructions for each pseudo, and must be updated when adding new pseudos 91 // or changing existing ones. 92 switch (MBBI->getOpcode()) { 93 case RISCV::PseudoAtomicLoadNand32: 94 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 32, 95 NextMBBI); 96 case RISCV::PseudoAtomicLoadNand64: 97 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 64, 98 NextMBBI); 99 case RISCV::PseudoMaskedAtomicSwap32: 100 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, true, 32, 101 NextMBBI); 102 case RISCV::PseudoMaskedAtomicLoadAdd32: 103 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Add, true, 32, NextMBBI); 104 case RISCV::PseudoMaskedAtomicLoadSub32: 105 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Sub, true, 32, NextMBBI); 106 case RISCV::PseudoMaskedAtomicLoadNand32: 107 return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, true, 32, 108 NextMBBI); 109 case RISCV::PseudoMaskedAtomicLoadMax32: 110 return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Max, true, 32, 111 NextMBBI); 112 case RISCV::PseudoMaskedAtomicLoadMin32: 113 return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Min, true, 32, 114 NextMBBI); 115 case RISCV::PseudoMaskedAtomicLoadUMax32: 116 return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMax, true, 32, 117 NextMBBI); 118 case RISCV::PseudoMaskedAtomicLoadUMin32: 119 return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMin, true, 32, 120 NextMBBI); 121 case RISCV::PseudoCmpXchg32: 122 return expandAtomicCmpXchg(MBB, MBBI, false, 32, NextMBBI); 123 case RISCV::PseudoCmpXchg64: 124 return expandAtomicCmpXchg(MBB, MBBI, false, 64, NextMBBI); 125 case RISCV::PseudoMaskedCmpXchg32: 126 return expandAtomicCmpXchg(MBB, MBBI, true, 32, NextMBBI); 127 } 128 129 return false; 130 } 131 132 static unsigned getLRForRMW32(AtomicOrdering Ordering) { 133 switch (Ordering) { 134 default: 135 llvm_unreachable("Unexpected AtomicOrdering"); 136 case AtomicOrdering::Monotonic: 137 return RISCV::LR_W; 138 case AtomicOrdering::Acquire: 139 return RISCV::LR_W_AQ; 140 case AtomicOrdering::Release: 141 return RISCV::LR_W; 142 case AtomicOrdering::AcquireRelease: 143 return RISCV::LR_W_AQ; 144 case AtomicOrdering::SequentiallyConsistent: 145 return RISCV::LR_W_AQ_RL; 146 } 147 } 148 149 static unsigned getSCForRMW32(AtomicOrdering Ordering) { 150 switch (Ordering) { 151 default: 152 llvm_unreachable("Unexpected AtomicOrdering"); 153 case AtomicOrdering::Monotonic: 154 return RISCV::SC_W; 155 case AtomicOrdering::Acquire: 156 return RISCV::SC_W; 157 case AtomicOrdering::Release: 158 return RISCV::SC_W_RL; 159 case AtomicOrdering::AcquireRelease: 160 return RISCV::SC_W_RL; 161 case AtomicOrdering::SequentiallyConsistent: 162 return RISCV::SC_W_AQ_RL; 163 } 164 } 165 166 static unsigned getLRForRMW64(AtomicOrdering Ordering) { 167 switch (Ordering) { 168 default: 169 llvm_unreachable("Unexpected AtomicOrdering"); 170 case AtomicOrdering::Monotonic: 171 return RISCV::LR_D; 172 case AtomicOrdering::Acquire: 173 return RISCV::LR_D_AQ; 174 case AtomicOrdering::Release: 175 return RISCV::LR_D; 176 case AtomicOrdering::AcquireRelease: 177 return RISCV::LR_D_AQ; 178 case AtomicOrdering::SequentiallyConsistent: 179 return RISCV::LR_D_AQ_RL; 180 } 181 } 182 183 static unsigned getSCForRMW64(AtomicOrdering Ordering) { 184 switch (Ordering) { 185 default: 186 llvm_unreachable("Unexpected AtomicOrdering"); 187 case AtomicOrdering::Monotonic: 188 return RISCV::SC_D; 189 case AtomicOrdering::Acquire: 190 return RISCV::SC_D; 191 case AtomicOrdering::Release: 192 return RISCV::SC_D_RL; 193 case AtomicOrdering::AcquireRelease: 194 return RISCV::SC_D_RL; 195 case AtomicOrdering::SequentiallyConsistent: 196 return RISCV::SC_D_AQ_RL; 197 } 198 } 199 200 static unsigned getLRForRMW(AtomicOrdering Ordering, int Width) { 201 if (Width == 32) 202 return getLRForRMW32(Ordering); 203 if (Width == 64) 204 return getLRForRMW64(Ordering); 205 llvm_unreachable("Unexpected LR width\n"); 206 } 207 208 static unsigned getSCForRMW(AtomicOrdering Ordering, int Width) { 209 if (Width == 32) 210 return getSCForRMW32(Ordering); 211 if (Width == 64) 212 return getSCForRMW64(Ordering); 213 llvm_unreachable("Unexpected SC width\n"); 214 } 215 216 static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI, 217 DebugLoc DL, MachineBasicBlock *ThisMBB, 218 MachineBasicBlock *LoopMBB, 219 MachineBasicBlock *DoneMBB, 220 AtomicRMWInst::BinOp BinOp, int Width) { 221 Register DestReg = MI.getOperand(0).getReg(); 222 Register ScratchReg = MI.getOperand(1).getReg(); 223 Register AddrReg = MI.getOperand(2).getReg(); 224 Register IncrReg = MI.getOperand(3).getReg(); 225 AtomicOrdering Ordering = 226 static_cast<AtomicOrdering>(MI.getOperand(4).getImm()); 227 228 // .loop: 229 // lr.[w|d] dest, (addr) 230 // binop scratch, dest, val 231 // sc.[w|d] scratch, scratch, (addr) 232 // bnez scratch, loop 233 BuildMI(LoopMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg) 234 .addReg(AddrReg); 235 switch (BinOp) { 236 default: 237 llvm_unreachable("Unexpected AtomicRMW BinOp"); 238 case AtomicRMWInst::Nand: 239 BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg) 240 .addReg(DestReg) 241 .addReg(IncrReg); 242 BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg) 243 .addReg(ScratchReg) 244 .addImm(-1); 245 break; 246 } 247 BuildMI(LoopMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg) 248 .addReg(AddrReg) 249 .addReg(ScratchReg); 250 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE)) 251 .addReg(ScratchReg) 252 .addReg(RISCV::X0) 253 .addMBB(LoopMBB); 254 } 255 256 static void insertMaskedMerge(const RISCVInstrInfo *TII, DebugLoc DL, 257 MachineBasicBlock *MBB, Register DestReg, 258 Register OldValReg, Register NewValReg, 259 Register MaskReg, Register ScratchReg) { 260 assert(OldValReg != ScratchReg && "OldValReg and ScratchReg must be unique"); 261 assert(OldValReg != MaskReg && "OldValReg and MaskReg must be unique"); 262 assert(ScratchReg != MaskReg && "ScratchReg and MaskReg must be unique"); 263 264 // We select bits from newval and oldval using: 265 // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge 266 // r = oldval ^ ((oldval ^ newval) & masktargetdata); 267 BuildMI(MBB, DL, TII->get(RISCV::XOR), ScratchReg) 268 .addReg(OldValReg) 269 .addReg(NewValReg); 270 BuildMI(MBB, DL, TII->get(RISCV::AND), ScratchReg) 271 .addReg(ScratchReg) 272 .addReg(MaskReg); 273 BuildMI(MBB, DL, TII->get(RISCV::XOR), DestReg) 274 .addReg(OldValReg) 275 .addReg(ScratchReg); 276 } 277 278 static void doMaskedAtomicBinOpExpansion( 279 const RISCVInstrInfo *TII, MachineInstr &MI, DebugLoc DL, 280 MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopMBB, 281 MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width) { 282 assert(Width == 32 && "Should never need to expand masked 64-bit operations"); 283 Register DestReg = MI.getOperand(0).getReg(); 284 Register ScratchReg = MI.getOperand(1).getReg(); 285 Register AddrReg = MI.getOperand(2).getReg(); 286 Register IncrReg = MI.getOperand(3).getReg(); 287 Register MaskReg = MI.getOperand(4).getReg(); 288 AtomicOrdering Ordering = 289 static_cast<AtomicOrdering>(MI.getOperand(5).getImm()); 290 291 // .loop: 292 // lr.w destreg, (alignedaddr) 293 // binop scratch, destreg, incr 294 // xor scratch, destreg, scratch 295 // and scratch, scratch, masktargetdata 296 // xor scratch, destreg, scratch 297 // sc.w scratch, scratch, (alignedaddr) 298 // bnez scratch, loop 299 BuildMI(LoopMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg) 300 .addReg(AddrReg); 301 switch (BinOp) { 302 default: 303 llvm_unreachable("Unexpected AtomicRMW BinOp"); 304 case AtomicRMWInst::Xchg: 305 BuildMI(LoopMBB, DL, TII->get(RISCV::ADDI), ScratchReg) 306 .addReg(IncrReg) 307 .addImm(0); 308 break; 309 case AtomicRMWInst::Add: 310 BuildMI(LoopMBB, DL, TII->get(RISCV::ADD), ScratchReg) 311 .addReg(DestReg) 312 .addReg(IncrReg); 313 break; 314 case AtomicRMWInst::Sub: 315 BuildMI(LoopMBB, DL, TII->get(RISCV::SUB), ScratchReg) 316 .addReg(DestReg) 317 .addReg(IncrReg); 318 break; 319 case AtomicRMWInst::Nand: 320 BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg) 321 .addReg(DestReg) 322 .addReg(IncrReg); 323 BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg) 324 .addReg(ScratchReg) 325 .addImm(-1); 326 break; 327 } 328 329 insertMaskedMerge(TII, DL, LoopMBB, ScratchReg, DestReg, ScratchReg, MaskReg, 330 ScratchReg); 331 332 BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg) 333 .addReg(AddrReg) 334 .addReg(ScratchReg); 335 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE)) 336 .addReg(ScratchReg) 337 .addReg(RISCV::X0) 338 .addMBB(LoopMBB); 339 } 340 341 bool RISCVExpandAtomicPseudo::expandAtomicBinOp( 342 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 343 AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width, 344 MachineBasicBlock::iterator &NextMBBI) { 345 MachineInstr &MI = *MBBI; 346 DebugLoc DL = MI.getDebugLoc(); 347 348 MachineFunction *MF = MBB.getParent(); 349 auto LoopMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); 350 auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); 351 352 // Insert new MBBs. 353 MF->insert(++MBB.getIterator(), LoopMBB); 354 MF->insert(++LoopMBB->getIterator(), DoneMBB); 355 356 // Set up successors and transfer remaining instructions to DoneMBB. 357 LoopMBB->addSuccessor(LoopMBB); 358 LoopMBB->addSuccessor(DoneMBB); 359 DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end()); 360 DoneMBB->transferSuccessors(&MBB); 361 MBB.addSuccessor(LoopMBB); 362 363 if (!IsMasked) 364 doAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp, Width); 365 else 366 doMaskedAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp, 367 Width); 368 369 NextMBBI = MBB.end(); 370 MI.eraseFromParent(); 371 372 LivePhysRegs LiveRegs; 373 computeAndAddLiveIns(LiveRegs, *LoopMBB); 374 computeAndAddLiveIns(LiveRegs, *DoneMBB); 375 376 return true; 377 } 378 379 static void insertSext(const RISCVInstrInfo *TII, DebugLoc DL, 380 MachineBasicBlock *MBB, Register ValReg, 381 Register ShamtReg) { 382 BuildMI(MBB, DL, TII->get(RISCV::SLL), ValReg) 383 .addReg(ValReg) 384 .addReg(ShamtReg); 385 BuildMI(MBB, DL, TII->get(RISCV::SRA), ValReg) 386 .addReg(ValReg) 387 .addReg(ShamtReg); 388 } 389 390 bool RISCVExpandAtomicPseudo::expandAtomicMinMaxOp( 391 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, 392 AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width, 393 MachineBasicBlock::iterator &NextMBBI) { 394 assert(IsMasked == true && 395 "Should only need to expand masked atomic max/min"); 396 assert(Width == 32 && "Should never need to expand masked 64-bit operations"); 397 398 MachineInstr &MI = *MBBI; 399 DebugLoc DL = MI.getDebugLoc(); 400 MachineFunction *MF = MBB.getParent(); 401 auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); 402 auto LoopIfBodyMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); 403 auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); 404 auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); 405 406 // Insert new MBBs. 407 MF->insert(++MBB.getIterator(), LoopHeadMBB); 408 MF->insert(++LoopHeadMBB->getIterator(), LoopIfBodyMBB); 409 MF->insert(++LoopIfBodyMBB->getIterator(), LoopTailMBB); 410 MF->insert(++LoopTailMBB->getIterator(), DoneMBB); 411 412 // Set up successors and transfer remaining instructions to DoneMBB. 413 LoopHeadMBB->addSuccessor(LoopIfBodyMBB); 414 LoopHeadMBB->addSuccessor(LoopTailMBB); 415 LoopIfBodyMBB->addSuccessor(LoopTailMBB); 416 LoopTailMBB->addSuccessor(LoopHeadMBB); 417 LoopTailMBB->addSuccessor(DoneMBB); 418 DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end()); 419 DoneMBB->transferSuccessors(&MBB); 420 MBB.addSuccessor(LoopHeadMBB); 421 422 Register DestReg = MI.getOperand(0).getReg(); 423 Register Scratch1Reg = MI.getOperand(1).getReg(); 424 Register Scratch2Reg = MI.getOperand(2).getReg(); 425 Register AddrReg = MI.getOperand(3).getReg(); 426 Register IncrReg = MI.getOperand(4).getReg(); 427 Register MaskReg = MI.getOperand(5).getReg(); 428 bool IsSigned = BinOp == AtomicRMWInst::Min || BinOp == AtomicRMWInst::Max; 429 AtomicOrdering Ordering = 430 static_cast<AtomicOrdering>(MI.getOperand(IsSigned ? 7 : 6).getImm()); 431 432 // 433 // .loophead: 434 // lr.w destreg, (alignedaddr) 435 // and scratch2, destreg, mask 436 // mv scratch1, destreg 437 // [sext scratch2 if signed min/max] 438 // ifnochangeneeded scratch2, incr, .looptail 439 BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg) 440 .addReg(AddrReg); 441 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), Scratch2Reg) 442 .addReg(DestReg) 443 .addReg(MaskReg); 444 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::ADDI), Scratch1Reg) 445 .addReg(DestReg) 446 .addImm(0); 447 448 switch (BinOp) { 449 default: 450 llvm_unreachable("Unexpected AtomicRMW BinOp"); 451 case AtomicRMWInst::Max: { 452 insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg()); 453 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE)) 454 .addReg(Scratch2Reg) 455 .addReg(IncrReg) 456 .addMBB(LoopTailMBB); 457 break; 458 } 459 case AtomicRMWInst::Min: { 460 insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg()); 461 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE)) 462 .addReg(IncrReg) 463 .addReg(Scratch2Reg) 464 .addMBB(LoopTailMBB); 465 break; 466 } 467 case AtomicRMWInst::UMax: 468 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU)) 469 .addReg(Scratch2Reg) 470 .addReg(IncrReg) 471 .addMBB(LoopTailMBB); 472 break; 473 case AtomicRMWInst::UMin: 474 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU)) 475 .addReg(IncrReg) 476 .addReg(Scratch2Reg) 477 .addMBB(LoopTailMBB); 478 break; 479 } 480 481 // .loopifbody: 482 // xor scratch1, destreg, incr 483 // and scratch1, scratch1, mask 484 // xor scratch1, destreg, scratch1 485 insertMaskedMerge(TII, DL, LoopIfBodyMBB, Scratch1Reg, DestReg, IncrReg, 486 MaskReg, Scratch1Reg); 487 488 // .looptail: 489 // sc.w scratch1, scratch1, (addr) 490 // bnez scratch1, loop 491 BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), Scratch1Reg) 492 .addReg(AddrReg) 493 .addReg(Scratch1Reg); 494 BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE)) 495 .addReg(Scratch1Reg) 496 .addReg(RISCV::X0) 497 .addMBB(LoopHeadMBB); 498 499 NextMBBI = MBB.end(); 500 MI.eraseFromParent(); 501 502 LivePhysRegs LiveRegs; 503 computeAndAddLiveIns(LiveRegs, *LoopHeadMBB); 504 computeAndAddLiveIns(LiveRegs, *LoopIfBodyMBB); 505 computeAndAddLiveIns(LiveRegs, *LoopTailMBB); 506 computeAndAddLiveIns(LiveRegs, *DoneMBB); 507 508 return true; 509 } 510 511 bool RISCVExpandAtomicPseudo::expandAtomicCmpXchg( 512 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsMasked, 513 int Width, MachineBasicBlock::iterator &NextMBBI) { 514 MachineInstr &MI = *MBBI; 515 DebugLoc DL = MI.getDebugLoc(); 516 MachineFunction *MF = MBB.getParent(); 517 auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); 518 auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); 519 auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock()); 520 521 // Insert new MBBs. 522 MF->insert(++MBB.getIterator(), LoopHeadMBB); 523 MF->insert(++LoopHeadMBB->getIterator(), LoopTailMBB); 524 MF->insert(++LoopTailMBB->getIterator(), DoneMBB); 525 526 // Set up successors and transfer remaining instructions to DoneMBB. 527 LoopHeadMBB->addSuccessor(LoopTailMBB); 528 LoopHeadMBB->addSuccessor(DoneMBB); 529 LoopTailMBB->addSuccessor(DoneMBB); 530 LoopTailMBB->addSuccessor(LoopHeadMBB); 531 DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end()); 532 DoneMBB->transferSuccessors(&MBB); 533 MBB.addSuccessor(LoopHeadMBB); 534 535 Register DestReg = MI.getOperand(0).getReg(); 536 Register ScratchReg = MI.getOperand(1).getReg(); 537 Register AddrReg = MI.getOperand(2).getReg(); 538 Register CmpValReg = MI.getOperand(3).getReg(); 539 Register NewValReg = MI.getOperand(4).getReg(); 540 AtomicOrdering Ordering = 541 static_cast<AtomicOrdering>(MI.getOperand(IsMasked ? 6 : 5).getImm()); 542 543 if (!IsMasked) { 544 // .loophead: 545 // lr.[w|d] dest, (addr) 546 // bne dest, cmpval, done 547 BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg) 548 .addReg(AddrReg); 549 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE)) 550 .addReg(DestReg) 551 .addReg(CmpValReg) 552 .addMBB(DoneMBB); 553 // .looptail: 554 // sc.[w|d] scratch, newval, (addr) 555 // bnez scratch, loophead 556 BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg) 557 .addReg(AddrReg) 558 .addReg(NewValReg); 559 BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE)) 560 .addReg(ScratchReg) 561 .addReg(RISCV::X0) 562 .addMBB(LoopHeadMBB); 563 } else { 564 // .loophead: 565 // lr.w dest, (addr) 566 // and scratch, dest, mask 567 // bne scratch, cmpval, done 568 Register MaskReg = MI.getOperand(5).getReg(); 569 BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW(Ordering, Width)), DestReg) 570 .addReg(AddrReg); 571 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), ScratchReg) 572 .addReg(DestReg) 573 .addReg(MaskReg); 574 BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE)) 575 .addReg(ScratchReg) 576 .addReg(CmpValReg) 577 .addMBB(DoneMBB); 578 579 // .looptail: 580 // xor scratch, dest, newval 581 // and scratch, scratch, mask 582 // xor scratch, dest, scratch 583 // sc.w scratch, scratch, (adrr) 584 // bnez scratch, loophead 585 insertMaskedMerge(TII, DL, LoopTailMBB, ScratchReg, DestReg, NewValReg, 586 MaskReg, ScratchReg); 587 BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW(Ordering, Width)), ScratchReg) 588 .addReg(AddrReg) 589 .addReg(ScratchReg); 590 BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE)) 591 .addReg(ScratchReg) 592 .addReg(RISCV::X0) 593 .addMBB(LoopHeadMBB); 594 } 595 596 NextMBBI = MBB.end(); 597 MI.eraseFromParent(); 598 599 LivePhysRegs LiveRegs; 600 computeAndAddLiveIns(LiveRegs, *LoopHeadMBB); 601 computeAndAddLiveIns(LiveRegs, *LoopTailMBB); 602 computeAndAddLiveIns(LiveRegs, *DoneMBB); 603 604 return true; 605 } 606 607 } // end of anonymous namespace 608 609 INITIALIZE_PASS(RISCVExpandAtomicPseudo, "riscv-expand-atomic-pseudo", 610 RISCV_EXPAND_ATOMIC_PSEUDO_NAME, false, false) 611 612 namespace llvm { 613 614 FunctionPass *createRISCVExpandAtomicPseudoPass() { 615 return new RISCVExpandAtomicPseudo(); 616 } 617 618 } // end of namespace llvm 619