1 // 2 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 3 // See https://llvm.org/LICENSE.txt for license information. 4 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 5 // 6 //===----------------------------------------------------------------------===// 7 // 8 // This file contains a pass that performs optimization on SIMD instructions 9 // with high latency by splitting them into more efficient series of 10 // instructions. 11 // 12 // 1. Rewrite certain SIMD instructions with vector element due to their 13 // inefficiency on some targets. 14 // 15 // For example: 16 // fmla v0.4s, v1.4s, v2.s[1] 17 // 18 // Is rewritten into: 19 // dup v3.4s, v2.s[1] 20 // fmla v0.4s, v1.4s, v3.4s 21 // 22 // 2. Rewrite interleaved memory access instructions due to their 23 // inefficiency on some targets. 24 // 25 // For example: 26 // st2 {v0.4s, v1.4s}, addr 27 // 28 // Is rewritten into: 29 // zip1 v2.4s, v0.4s, v1.4s 30 // zip2 v3.4s, v0.4s, v1.4s 31 // stp q2, q3, addr 32 // 33 //===----------------------------------------------------------------------===// 34 35 #include "AArch64InstrInfo.h" 36 #include "llvm/ADT/SmallVector.h" 37 #include "llvm/ADT/Statistic.h" 38 #include "llvm/ADT/StringRef.h" 39 #include "llvm/CodeGen/MachineBasicBlock.h" 40 #include "llvm/CodeGen/MachineFunction.h" 41 #include "llvm/CodeGen/MachineFunctionPass.h" 42 #include "llvm/CodeGen/MachineInstr.h" 43 #include "llvm/CodeGen/MachineInstrBuilder.h" 44 #include "llvm/CodeGen/MachineOperand.h" 45 #include "llvm/CodeGen/MachineRegisterInfo.h" 46 #include "llvm/CodeGen/TargetInstrInfo.h" 47 #include "llvm/CodeGen/TargetSchedule.h" 48 #include "llvm/CodeGen/TargetSubtargetInfo.h" 49 #include "llvm/MC/MCInstrDesc.h" 50 #include "llvm/MC/MCSchedule.h" 51 #include "llvm/Pass.h" 52 #include <unordered_map> 53 54 using namespace llvm; 55 56 #define DEBUG_TYPE "aarch64-simdinstr-opt" 57 58 STATISTIC(NumModifiedInstr, 59 "Number of SIMD instructions modified"); 60 61 #define AARCH64_VECTOR_BY_ELEMENT_OPT_NAME \ 62 "AArch64 SIMD instructions optimization pass" 63 64 namespace { 65 66 struct AArch64SIMDInstrOpt : public MachineFunctionPass { 67 static char ID; 68 69 const TargetInstrInfo *TII; 70 MachineRegisterInfo *MRI; 71 TargetSchedModel SchedModel; 72 73 // The two maps below are used to cache decisions instead of recomputing: 74 // This is used to cache instruction replacement decisions within function 75 // units and across function units. 76 std::map<std::pair<unsigned, std::string>, bool> SIMDInstrTable; 77 // This is used to cache the decision of whether to leave the interleaved 78 // store instructions replacement pass early or not for a particular target. 79 std::unordered_map<std::string, bool> InterlEarlyExit; 80 81 typedef enum { 82 VectorElem, 83 Interleave 84 } Subpass; 85 86 // Instruction represented by OrigOpc is replaced by instructions in ReplOpc. 87 struct InstReplInfo { 88 unsigned OrigOpc; 89 std::vector<unsigned> ReplOpc; 90 const TargetRegisterClass RC; 91 }; 92 93 #define RuleST2(OpcOrg, OpcR0, OpcR1, OpcR2, RC) \ 94 {OpcOrg, {OpcR0, OpcR1, OpcR2}, RC} 95 #define RuleST4(OpcOrg, OpcR0, OpcR1, OpcR2, OpcR3, OpcR4, OpcR5, OpcR6, \ 96 OpcR7, OpcR8, OpcR9, RC) \ 97 {OpcOrg, \ 98 {OpcR0, OpcR1, OpcR2, OpcR3, OpcR4, OpcR5, OpcR6, OpcR7, OpcR8, OpcR9}, RC} 99 100 // The Instruction Replacement Table: 101 std::vector<InstReplInfo> IRT = { 102 // ST2 instructions 103 RuleST2(AArch64::ST2Twov2d, AArch64::ZIP1v2i64, AArch64::ZIP2v2i64, 104 AArch64::STPQi, AArch64::FPR128RegClass), 105 RuleST2(AArch64::ST2Twov4s, AArch64::ZIP1v4i32, AArch64::ZIP2v4i32, 106 AArch64::STPQi, AArch64::FPR128RegClass), 107 RuleST2(AArch64::ST2Twov2s, AArch64::ZIP1v2i32, AArch64::ZIP2v2i32, 108 AArch64::STPDi, AArch64::FPR64RegClass), 109 RuleST2(AArch64::ST2Twov8h, AArch64::ZIP1v8i16, AArch64::ZIP2v8i16, 110 AArch64::STPQi, AArch64::FPR128RegClass), 111 RuleST2(AArch64::ST2Twov4h, AArch64::ZIP1v4i16, AArch64::ZIP2v4i16, 112 AArch64::STPDi, AArch64::FPR64RegClass), 113 RuleST2(AArch64::ST2Twov16b, AArch64::ZIP1v16i8, AArch64::ZIP2v16i8, 114 AArch64::STPQi, AArch64::FPR128RegClass), 115 RuleST2(AArch64::ST2Twov8b, AArch64::ZIP1v8i8, AArch64::ZIP2v8i8, 116 AArch64::STPDi, AArch64::FPR64RegClass), 117 // ST4 instructions 118 RuleST4(AArch64::ST4Fourv2d, AArch64::ZIP1v2i64, AArch64::ZIP2v2i64, 119 AArch64::ZIP1v2i64, AArch64::ZIP2v2i64, AArch64::ZIP1v2i64, 120 AArch64::ZIP2v2i64, AArch64::ZIP1v2i64, AArch64::ZIP2v2i64, 121 AArch64::STPQi, AArch64::STPQi, AArch64::FPR128RegClass), 122 RuleST4(AArch64::ST4Fourv4s, AArch64::ZIP1v4i32, AArch64::ZIP2v4i32, 123 AArch64::ZIP1v4i32, AArch64::ZIP2v4i32, AArch64::ZIP1v4i32, 124 AArch64::ZIP2v4i32, AArch64::ZIP1v4i32, AArch64::ZIP2v4i32, 125 AArch64::STPQi, AArch64::STPQi, AArch64::FPR128RegClass), 126 RuleST4(AArch64::ST4Fourv2s, AArch64::ZIP1v2i32, AArch64::ZIP2v2i32, 127 AArch64::ZIP1v2i32, AArch64::ZIP2v2i32, AArch64::ZIP1v2i32, 128 AArch64::ZIP2v2i32, AArch64::ZIP1v2i32, AArch64::ZIP2v2i32, 129 AArch64::STPDi, AArch64::STPDi, AArch64::FPR64RegClass), 130 RuleST4(AArch64::ST4Fourv8h, AArch64::ZIP1v8i16, AArch64::ZIP2v8i16, 131 AArch64::ZIP1v8i16, AArch64::ZIP2v8i16, AArch64::ZIP1v8i16, 132 AArch64::ZIP2v8i16, AArch64::ZIP1v8i16, AArch64::ZIP2v8i16, 133 AArch64::STPQi, AArch64::STPQi, AArch64::FPR128RegClass), 134 RuleST4(AArch64::ST4Fourv4h, AArch64::ZIP1v4i16, AArch64::ZIP2v4i16, 135 AArch64::ZIP1v4i16, AArch64::ZIP2v4i16, AArch64::ZIP1v4i16, 136 AArch64::ZIP2v4i16, AArch64::ZIP1v4i16, AArch64::ZIP2v4i16, 137 AArch64::STPDi, AArch64::STPDi, AArch64::FPR64RegClass), 138 RuleST4(AArch64::ST4Fourv16b, AArch64::ZIP1v16i8, AArch64::ZIP2v16i8, 139 AArch64::ZIP1v16i8, AArch64::ZIP2v16i8, AArch64::ZIP1v16i8, 140 AArch64::ZIP2v16i8, AArch64::ZIP1v16i8, AArch64::ZIP2v16i8, 141 AArch64::STPQi, AArch64::STPQi, AArch64::FPR128RegClass), 142 RuleST4(AArch64::ST4Fourv8b, AArch64::ZIP1v8i8, AArch64::ZIP2v8i8, 143 AArch64::ZIP1v8i8, AArch64::ZIP2v8i8, AArch64::ZIP1v8i8, 144 AArch64::ZIP2v8i8, AArch64::ZIP1v8i8, AArch64::ZIP2v8i8, 145 AArch64::STPDi, AArch64::STPDi, AArch64::FPR64RegClass) 146 }; 147 148 // A costly instruction is replaced in this work by N efficient instructions 149 // The maximum of N is curently 10 and it is for ST4 case. 150 static const unsigned MaxNumRepl = 10; 151 152 AArch64SIMDInstrOpt() : MachineFunctionPass(ID) { 153 initializeAArch64SIMDInstrOptPass(*PassRegistry::getPassRegistry()); 154 } 155 156 /// Based only on latency of instructions, determine if it is cost efficient 157 /// to replace the instruction InstDesc by the instructions stored in the 158 /// array InstDescRepl. 159 /// Return true if replacement is expected to be faster. 160 bool shouldReplaceInst(MachineFunction *MF, const MCInstrDesc *InstDesc, 161 SmallVectorImpl<const MCInstrDesc*> &ReplInstrMCID); 162 163 /// Determine if we need to exit the instruction replacement optimization 164 /// passes early. This makes sure that no compile time is spent in this pass 165 /// for targets with no need for any of these optimizations. 166 /// Return true if early exit of the pass is recommended. 167 bool shouldExitEarly(MachineFunction *MF, Subpass SP); 168 169 /// Check whether an equivalent DUP instruction has already been 170 /// created or not. 171 /// Return true when the DUP instruction already exists. In this case, 172 /// DestReg will point to the destination of the already created DUP. 173 bool reuseDUP(MachineInstr &MI, unsigned DupOpcode, unsigned SrcReg, 174 unsigned LaneNumber, unsigned *DestReg) const; 175 176 /// Certain SIMD instructions with vector element operand are not efficient. 177 /// Rewrite them into SIMD instructions with vector operands. This rewrite 178 /// is driven by the latency of the instructions. 179 /// Return true if the SIMD instruction is modified. 180 bool optimizeVectElement(MachineInstr &MI); 181 182 /// Process The REG_SEQUENCE instruction, and extract the source 183 /// operands of the ST2/4 instruction from it. 184 /// Example of such instructions. 185 /// %dest = REG_SEQUENCE %st2_src1, dsub0, %st2_src2, dsub1; 186 /// Return true when the instruction is processed successfully. 187 bool processSeqRegInst(MachineInstr *DefiningMI, unsigned* StReg, 188 unsigned* StRegKill, unsigned NumArg) const; 189 190 /// Load/Store Interleaving instructions are not always beneficial. 191 /// Replace them by ZIP instructionand classical load/store. 192 /// Return true if the SIMD instruction is modified. 193 bool optimizeLdStInterleave(MachineInstr &MI); 194 195 /// Return the number of useful source registers for this 196 /// instruction (2 for ST2 and 4 for ST4). 197 unsigned determineSrcReg(MachineInstr &MI) const; 198 199 bool runOnMachineFunction(MachineFunction &Fn) override; 200 201 StringRef getPassName() const override { 202 return AARCH64_VECTOR_BY_ELEMENT_OPT_NAME; 203 } 204 }; 205 206 char AArch64SIMDInstrOpt::ID = 0; 207 208 } // end anonymous namespace 209 210 INITIALIZE_PASS(AArch64SIMDInstrOpt, "aarch64-simdinstr-opt", 211 AARCH64_VECTOR_BY_ELEMENT_OPT_NAME, false, false) 212 213 /// Based only on latency of instructions, determine if it is cost efficient 214 /// to replace the instruction InstDesc by the instructions stored in the 215 /// array InstDescRepl. 216 /// Return true if replacement is expected to be faster. 217 bool AArch64SIMDInstrOpt:: 218 shouldReplaceInst(MachineFunction *MF, const MCInstrDesc *InstDesc, 219 SmallVectorImpl<const MCInstrDesc*> &InstDescRepl) { 220 // Check if replacement decision is already available in the cached table. 221 // if so, return it. 222 std::string Subtarget = std::string(SchedModel.getSubtargetInfo()->getCPU()); 223 auto InstID = std::make_pair(InstDesc->getOpcode(), Subtarget); 224 if (SIMDInstrTable.find(InstID) != SIMDInstrTable.end()) 225 return SIMDInstrTable[InstID]; 226 227 unsigned SCIdx = InstDesc->getSchedClass(); 228 const MCSchedClassDesc *SCDesc = 229 SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdx); 230 231 // If a target does not define resources for the instructions 232 // of interest, then return false for no replacement. 233 const MCSchedClassDesc *SCDescRepl; 234 if (!SCDesc->isValid() || SCDesc->isVariant()) 235 { 236 SIMDInstrTable[InstID] = false; 237 return false; 238 } 239 for (auto IDesc : InstDescRepl) 240 { 241 SCDescRepl = SchedModel.getMCSchedModel()->getSchedClassDesc( 242 IDesc->getSchedClass()); 243 if (!SCDescRepl->isValid() || SCDescRepl->isVariant()) 244 { 245 SIMDInstrTable[InstID] = false; 246 return false; 247 } 248 } 249 250 // Replacement cost. 251 unsigned ReplCost = 0; 252 for (auto IDesc :InstDescRepl) 253 ReplCost += SchedModel.computeInstrLatency(IDesc->getOpcode()); 254 255 if (SchedModel.computeInstrLatency(InstDesc->getOpcode()) > ReplCost) 256 { 257 SIMDInstrTable[InstID] = true; 258 return true; 259 } 260 else 261 { 262 SIMDInstrTable[InstID] = false; 263 return false; 264 } 265 } 266 267 /// Determine if we need to exit this pass for a kind of instruction replacement 268 /// early. This makes sure that no compile time is spent in this pass for 269 /// targets with no need for any of these optimizations beyond performing this 270 /// check. 271 /// Return true if early exit of this pass for a kind of instruction 272 /// replacement is recommended for a target. 273 bool AArch64SIMDInstrOpt::shouldExitEarly(MachineFunction *MF, Subpass SP) { 274 const MCInstrDesc* OriginalMCID; 275 SmallVector<const MCInstrDesc*, MaxNumRepl> ReplInstrMCID; 276 277 switch (SP) { 278 // For this optimization, check by comparing the latency of a representative 279 // instruction to that of the replacement instructions. 280 // TODO: check for all concerned instructions. 281 case VectorElem: 282 OriginalMCID = &TII->get(AArch64::FMLAv4i32_indexed); 283 ReplInstrMCID.push_back(&TII->get(AArch64::DUPv4i32lane)); 284 ReplInstrMCID.push_back(&TII->get(AArch64::FMLAv4f32)); 285 if (shouldReplaceInst(MF, OriginalMCID, ReplInstrMCID)) 286 return false; 287 break; 288 289 // For this optimization, check for all concerned instructions. 290 case Interleave: 291 std::string Subtarget = 292 std::string(SchedModel.getSubtargetInfo()->getCPU()); 293 if (InterlEarlyExit.find(Subtarget) != InterlEarlyExit.end()) 294 return InterlEarlyExit[Subtarget]; 295 296 for (auto &I : IRT) { 297 OriginalMCID = &TII->get(I.OrigOpc); 298 for (auto &Repl : I.ReplOpc) 299 ReplInstrMCID.push_back(&TII->get(Repl)); 300 if (shouldReplaceInst(MF, OriginalMCID, ReplInstrMCID)) { 301 InterlEarlyExit[Subtarget] = false; 302 return false; 303 } 304 ReplInstrMCID.clear(); 305 } 306 InterlEarlyExit[Subtarget] = true; 307 break; 308 } 309 310 return true; 311 } 312 313 /// Check whether an equivalent DUP instruction has already been 314 /// created or not. 315 /// Return true when the DUP instruction already exists. In this case, 316 /// DestReg will point to the destination of the already created DUP. 317 bool AArch64SIMDInstrOpt::reuseDUP(MachineInstr &MI, unsigned DupOpcode, 318 unsigned SrcReg, unsigned LaneNumber, 319 unsigned *DestReg) const { 320 for (MachineBasicBlock::iterator MII = MI, MIE = MI.getParent()->begin(); 321 MII != MIE;) { 322 MII--; 323 MachineInstr *CurrentMI = &*MII; 324 325 if (CurrentMI->getOpcode() == DupOpcode && 326 CurrentMI->getNumOperands() == 3 && 327 CurrentMI->getOperand(1).getReg() == SrcReg && 328 CurrentMI->getOperand(2).getImm() == LaneNumber) { 329 *DestReg = CurrentMI->getOperand(0).getReg(); 330 return true; 331 } 332 } 333 334 return false; 335 } 336 337 /// Certain SIMD instructions with vector element operand are not efficient. 338 /// Rewrite them into SIMD instructions with vector operands. This rewrite 339 /// is driven by the latency of the instructions. 340 /// The instruction of concerns are for the time being FMLA, FMLS, FMUL, 341 /// and FMULX and hence they are hardcoded. 342 /// 343 /// For example: 344 /// fmla v0.4s, v1.4s, v2.s[1] 345 /// 346 /// Is rewritten into 347 /// dup v3.4s, v2.s[1] // DUP not necessary if redundant 348 /// fmla v0.4s, v1.4s, v3.4s 349 /// 350 /// Return true if the SIMD instruction is modified. 351 bool AArch64SIMDInstrOpt::optimizeVectElement(MachineInstr &MI) { 352 const MCInstrDesc *MulMCID, *DupMCID; 353 const TargetRegisterClass *RC = &AArch64::FPR128RegClass; 354 355 switch (MI.getOpcode()) { 356 default: 357 return false; 358 359 // 4X32 instructions 360 case AArch64::FMLAv4i32_indexed: 361 DupMCID = &TII->get(AArch64::DUPv4i32lane); 362 MulMCID = &TII->get(AArch64::FMLAv4f32); 363 break; 364 case AArch64::FMLSv4i32_indexed: 365 DupMCID = &TII->get(AArch64::DUPv4i32lane); 366 MulMCID = &TII->get(AArch64::FMLSv4f32); 367 break; 368 case AArch64::FMULXv4i32_indexed: 369 DupMCID = &TII->get(AArch64::DUPv4i32lane); 370 MulMCID = &TII->get(AArch64::FMULXv4f32); 371 break; 372 case AArch64::FMULv4i32_indexed: 373 DupMCID = &TII->get(AArch64::DUPv4i32lane); 374 MulMCID = &TII->get(AArch64::FMULv4f32); 375 break; 376 377 // 2X64 instructions 378 case AArch64::FMLAv2i64_indexed: 379 DupMCID = &TII->get(AArch64::DUPv2i64lane); 380 MulMCID = &TII->get(AArch64::FMLAv2f64); 381 break; 382 case AArch64::FMLSv2i64_indexed: 383 DupMCID = &TII->get(AArch64::DUPv2i64lane); 384 MulMCID = &TII->get(AArch64::FMLSv2f64); 385 break; 386 case AArch64::FMULXv2i64_indexed: 387 DupMCID = &TII->get(AArch64::DUPv2i64lane); 388 MulMCID = &TII->get(AArch64::FMULXv2f64); 389 break; 390 case AArch64::FMULv2i64_indexed: 391 DupMCID = &TII->get(AArch64::DUPv2i64lane); 392 MulMCID = &TII->get(AArch64::FMULv2f64); 393 break; 394 395 // 2X32 instructions 396 case AArch64::FMLAv2i32_indexed: 397 RC = &AArch64::FPR64RegClass; 398 DupMCID = &TII->get(AArch64::DUPv2i32lane); 399 MulMCID = &TII->get(AArch64::FMLAv2f32); 400 break; 401 case AArch64::FMLSv2i32_indexed: 402 RC = &AArch64::FPR64RegClass; 403 DupMCID = &TII->get(AArch64::DUPv2i32lane); 404 MulMCID = &TII->get(AArch64::FMLSv2f32); 405 break; 406 case AArch64::FMULXv2i32_indexed: 407 RC = &AArch64::FPR64RegClass; 408 DupMCID = &TII->get(AArch64::DUPv2i32lane); 409 MulMCID = &TII->get(AArch64::FMULXv2f32); 410 break; 411 case AArch64::FMULv2i32_indexed: 412 RC = &AArch64::FPR64RegClass; 413 DupMCID = &TII->get(AArch64::DUPv2i32lane); 414 MulMCID = &TII->get(AArch64::FMULv2f32); 415 break; 416 } 417 418 SmallVector<const MCInstrDesc*, 2> ReplInstrMCID; 419 ReplInstrMCID.push_back(DupMCID); 420 ReplInstrMCID.push_back(MulMCID); 421 if (!shouldReplaceInst(MI.getParent()->getParent(), &TII->get(MI.getOpcode()), 422 ReplInstrMCID)) 423 return false; 424 425 const DebugLoc &DL = MI.getDebugLoc(); 426 MachineBasicBlock &MBB = *MI.getParent(); 427 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 428 429 // Get the operands of the current SIMD arithmetic instruction. 430 Register MulDest = MI.getOperand(0).getReg(); 431 Register SrcReg0 = MI.getOperand(1).getReg(); 432 unsigned Src0IsKill = getKillRegState(MI.getOperand(1).isKill()); 433 Register SrcReg1 = MI.getOperand(2).getReg(); 434 unsigned Src1IsKill = getKillRegState(MI.getOperand(2).isKill()); 435 unsigned DupDest; 436 437 // Instructions of interest have either 4 or 5 operands. 438 if (MI.getNumOperands() == 5) { 439 Register SrcReg2 = MI.getOperand(3).getReg(); 440 unsigned Src2IsKill = getKillRegState(MI.getOperand(3).isKill()); 441 unsigned LaneNumber = MI.getOperand(4).getImm(); 442 // Create a new DUP instruction. Note that if an equivalent DUP instruction 443 // has already been created before, then use that one instead of creating 444 // a new one. 445 if (!reuseDUP(MI, DupMCID->getOpcode(), SrcReg2, LaneNumber, &DupDest)) { 446 DupDest = MRI.createVirtualRegister(RC); 447 BuildMI(MBB, MI, DL, *DupMCID, DupDest) 448 .addReg(SrcReg2, Src2IsKill) 449 .addImm(LaneNumber); 450 } 451 BuildMI(MBB, MI, DL, *MulMCID, MulDest) 452 .addReg(SrcReg0, Src0IsKill) 453 .addReg(SrcReg1, Src1IsKill) 454 .addReg(DupDest, Src2IsKill); 455 } else if (MI.getNumOperands() == 4) { 456 unsigned LaneNumber = MI.getOperand(3).getImm(); 457 if (!reuseDUP(MI, DupMCID->getOpcode(), SrcReg1, LaneNumber, &DupDest)) { 458 DupDest = MRI.createVirtualRegister(RC); 459 BuildMI(MBB, MI, DL, *DupMCID, DupDest) 460 .addReg(SrcReg1, Src1IsKill) 461 .addImm(LaneNumber); 462 } 463 BuildMI(MBB, MI, DL, *MulMCID, MulDest) 464 .addReg(SrcReg0, Src0IsKill) 465 .addReg(DupDest, Src1IsKill); 466 } else { 467 return false; 468 } 469 470 ++NumModifiedInstr; 471 return true; 472 } 473 474 /// Load/Store Interleaving instructions are not always beneficial. 475 /// Replace them by ZIP instructions and classical load/store. 476 /// 477 /// For example: 478 /// st2 {v0.4s, v1.4s}, addr 479 /// 480 /// Is rewritten into: 481 /// zip1 v2.4s, v0.4s, v1.4s 482 /// zip2 v3.4s, v0.4s, v1.4s 483 /// stp q2, q3, addr 484 // 485 /// For example: 486 /// st4 {v0.4s, v1.4s, v2.4s, v3.4s}, addr 487 /// 488 /// Is rewritten into: 489 /// zip1 v4.4s, v0.4s, v2.4s 490 /// zip2 v5.4s, v0.4s, v2.4s 491 /// zip1 v6.4s, v1.4s, v3.4s 492 /// zip2 v7.4s, v1.4s, v3.4s 493 /// zip1 v8.4s, v4.4s, v6.4s 494 /// zip2 v9.4s, v4.4s, v6.4s 495 /// zip1 v10.4s, v5.4s, v7.4s 496 /// zip2 v11.4s, v5.4s, v7.4s 497 /// stp q8, q9, addr 498 /// stp q10, q11, addr+32 499 /// 500 /// Currently only instructions related to ST2 and ST4 are considered. 501 /// Other may be added later. 502 /// Return true if the SIMD instruction is modified. 503 bool AArch64SIMDInstrOpt::optimizeLdStInterleave(MachineInstr &MI) { 504 505 unsigned SeqReg, AddrReg; 506 unsigned StReg[4], StRegKill[4]; 507 MachineInstr *DefiningMI; 508 const DebugLoc &DL = MI.getDebugLoc(); 509 MachineBasicBlock &MBB = *MI.getParent(); 510 SmallVector<unsigned, MaxNumRepl> ZipDest; 511 SmallVector<const MCInstrDesc*, MaxNumRepl> ReplInstrMCID; 512 513 // If current instruction matches any of the rewriting rules, then 514 // gather information about parameters of the new instructions. 515 bool Match = false; 516 for (auto &I : IRT) { 517 if (MI.getOpcode() == I.OrigOpc) { 518 SeqReg = MI.getOperand(0).getReg(); 519 AddrReg = MI.getOperand(1).getReg(); 520 DefiningMI = MRI->getUniqueVRegDef(SeqReg); 521 unsigned NumReg = determineSrcReg(MI); 522 if (!processSeqRegInst(DefiningMI, StReg, StRegKill, NumReg)) 523 return false; 524 525 for (auto &Repl : I.ReplOpc) { 526 ReplInstrMCID.push_back(&TII->get(Repl)); 527 // Generate destination registers but only for non-store instruction. 528 if (Repl != AArch64::STPQi && Repl != AArch64::STPDi) 529 ZipDest.push_back(MRI->createVirtualRegister(&I.RC)); 530 } 531 Match = true; 532 break; 533 } 534 } 535 536 if (!Match) 537 return false; 538 539 // Determine if it is profitable to replace MI by the series of instructions 540 // represented in ReplInstrMCID. 541 if (!shouldReplaceInst(MI.getParent()->getParent(), &TII->get(MI.getOpcode()), 542 ReplInstrMCID)) 543 return false; 544 545 // Generate the replacement instructions composed of ZIP1, ZIP2, and STP (at 546 // this point, the code generation is hardcoded and does not rely on the IRT 547 // table used above given that code generation for ST2 replacement is somewhat 548 // different than for ST4 replacement. We could have added more info into the 549 // table related to how we build new instructions but we may be adding more 550 // complexity with that). 551 switch (MI.getOpcode()) { 552 default: 553 return false; 554 555 case AArch64::ST2Twov16b: 556 case AArch64::ST2Twov8b: 557 case AArch64::ST2Twov8h: 558 case AArch64::ST2Twov4h: 559 case AArch64::ST2Twov4s: 560 case AArch64::ST2Twov2s: 561 case AArch64::ST2Twov2d: 562 // ZIP instructions 563 BuildMI(MBB, MI, DL, *ReplInstrMCID[0], ZipDest[0]) 564 .addReg(StReg[0]) 565 .addReg(StReg[1]); 566 BuildMI(MBB, MI, DL, *ReplInstrMCID[1], ZipDest[1]) 567 .addReg(StReg[0], StRegKill[0]) 568 .addReg(StReg[1], StRegKill[1]); 569 // STP instructions 570 BuildMI(MBB, MI, DL, *ReplInstrMCID[2]) 571 .addReg(ZipDest[0]) 572 .addReg(ZipDest[1]) 573 .addReg(AddrReg) 574 .addImm(0); 575 break; 576 577 case AArch64::ST4Fourv16b: 578 case AArch64::ST4Fourv8b: 579 case AArch64::ST4Fourv8h: 580 case AArch64::ST4Fourv4h: 581 case AArch64::ST4Fourv4s: 582 case AArch64::ST4Fourv2s: 583 case AArch64::ST4Fourv2d: 584 // ZIP instructions 585 BuildMI(MBB, MI, DL, *ReplInstrMCID[0], ZipDest[0]) 586 .addReg(StReg[0]) 587 .addReg(StReg[2]); 588 BuildMI(MBB, MI, DL, *ReplInstrMCID[1], ZipDest[1]) 589 .addReg(StReg[0], StRegKill[0]) 590 .addReg(StReg[2], StRegKill[2]); 591 BuildMI(MBB, MI, DL, *ReplInstrMCID[2], ZipDest[2]) 592 .addReg(StReg[1]) 593 .addReg(StReg[3]); 594 BuildMI(MBB, MI, DL, *ReplInstrMCID[3], ZipDest[3]) 595 .addReg(StReg[1], StRegKill[1]) 596 .addReg(StReg[3], StRegKill[3]); 597 BuildMI(MBB, MI, DL, *ReplInstrMCID[4], ZipDest[4]) 598 .addReg(ZipDest[0]) 599 .addReg(ZipDest[2]); 600 BuildMI(MBB, MI, DL, *ReplInstrMCID[5], ZipDest[5]) 601 .addReg(ZipDest[0]) 602 .addReg(ZipDest[2]); 603 BuildMI(MBB, MI, DL, *ReplInstrMCID[6], ZipDest[6]) 604 .addReg(ZipDest[1]) 605 .addReg(ZipDest[3]); 606 BuildMI(MBB, MI, DL, *ReplInstrMCID[7], ZipDest[7]) 607 .addReg(ZipDest[1]) 608 .addReg(ZipDest[3]); 609 // stp instructions 610 BuildMI(MBB, MI, DL, *ReplInstrMCID[8]) 611 .addReg(ZipDest[4]) 612 .addReg(ZipDest[5]) 613 .addReg(AddrReg) 614 .addImm(0); 615 BuildMI(MBB, MI, DL, *ReplInstrMCID[9]) 616 .addReg(ZipDest[6]) 617 .addReg(ZipDest[7]) 618 .addReg(AddrReg) 619 .addImm(2); 620 break; 621 } 622 623 ++NumModifiedInstr; 624 return true; 625 } 626 627 /// Process The REG_SEQUENCE instruction, and extract the source 628 /// operands of the ST2/4 instruction from it. 629 /// Example of such instruction. 630 /// %dest = REG_SEQUENCE %st2_src1, dsub0, %st2_src2, dsub1; 631 /// Return true when the instruction is processed successfully. 632 bool AArch64SIMDInstrOpt::processSeqRegInst(MachineInstr *DefiningMI, 633 unsigned* StReg, unsigned* StRegKill, unsigned NumArg) const { 634 assert (DefiningMI != NULL); 635 if (DefiningMI->getOpcode() != AArch64::REG_SEQUENCE) 636 return false; 637 638 for (unsigned i=0; i<NumArg; i++) { 639 StReg[i] = DefiningMI->getOperand(2*i+1).getReg(); 640 StRegKill[i] = getKillRegState(DefiningMI->getOperand(2*i+1).isKill()); 641 642 // Sanity check for the other arguments. 643 if (DefiningMI->getOperand(2*i+2).isImm()) { 644 switch (DefiningMI->getOperand(2*i+2).getImm()) { 645 default: 646 return false; 647 648 case AArch64::dsub0: 649 case AArch64::dsub1: 650 case AArch64::dsub2: 651 case AArch64::dsub3: 652 case AArch64::qsub0: 653 case AArch64::qsub1: 654 case AArch64::qsub2: 655 case AArch64::qsub3: 656 break; 657 } 658 } 659 else 660 return false; 661 } 662 return true; 663 } 664 665 /// Return the number of useful source registers for this instruction 666 /// (2 for ST2 and 4 for ST4). 667 unsigned AArch64SIMDInstrOpt::determineSrcReg(MachineInstr &MI) const { 668 switch (MI.getOpcode()) { 669 default: 670 llvm_unreachable("Unsupported instruction for this pass"); 671 672 case AArch64::ST2Twov16b: 673 case AArch64::ST2Twov8b: 674 case AArch64::ST2Twov8h: 675 case AArch64::ST2Twov4h: 676 case AArch64::ST2Twov4s: 677 case AArch64::ST2Twov2s: 678 case AArch64::ST2Twov2d: 679 return 2; 680 681 case AArch64::ST4Fourv16b: 682 case AArch64::ST4Fourv8b: 683 case AArch64::ST4Fourv8h: 684 case AArch64::ST4Fourv4h: 685 case AArch64::ST4Fourv4s: 686 case AArch64::ST4Fourv2s: 687 case AArch64::ST4Fourv2d: 688 return 4; 689 } 690 } 691 692 bool AArch64SIMDInstrOpt::runOnMachineFunction(MachineFunction &MF) { 693 if (skipFunction(MF.getFunction())) 694 return false; 695 696 TII = MF.getSubtarget().getInstrInfo(); 697 MRI = &MF.getRegInfo(); 698 const TargetSubtargetInfo &ST = MF.getSubtarget(); 699 const AArch64InstrInfo *AAII = 700 static_cast<const AArch64InstrInfo *>(ST.getInstrInfo()); 701 if (!AAII) 702 return false; 703 SchedModel.init(&ST); 704 if (!SchedModel.hasInstrSchedModel()) 705 return false; 706 707 bool Changed = false; 708 for (auto OptimizationKind : {VectorElem, Interleave}) { 709 if (!shouldExitEarly(&MF, OptimizationKind)) { 710 SmallVector<MachineInstr *, 8> RemoveMIs; 711 for (MachineBasicBlock &MBB : MF) { 712 for (MachineBasicBlock::iterator MII = MBB.begin(), MIE = MBB.end(); 713 MII != MIE;) { 714 MachineInstr &MI = *MII; 715 bool InstRewrite; 716 if (OptimizationKind == VectorElem) 717 InstRewrite = optimizeVectElement(MI) ; 718 else 719 InstRewrite = optimizeLdStInterleave(MI); 720 if (InstRewrite) { 721 // Add MI to the list of instructions to be removed given that it 722 // has been replaced. 723 RemoveMIs.push_back(&MI); 724 Changed = true; 725 } 726 ++MII; 727 } 728 } 729 for (MachineInstr *MI : RemoveMIs) 730 MI->eraseFromParent(); 731 } 732 } 733 734 return Changed; 735 } 736 737 /// Returns an instance of the high cost ASIMD instruction replacement 738 /// optimization pass. 739 FunctionPass *llvm::createAArch64SIMDInstrOptPass() { 740 return new AArch64SIMDInstrOpt(); 741 } 742