1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the PPCISelLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "PPCISelLowering.h" 14 #include "MCTargetDesc/PPCPredicates.h" 15 #include "PPC.h" 16 #include "PPCCCState.h" 17 #include "PPCCallingConv.h" 18 #include "PPCFrameLowering.h" 19 #include "PPCInstrInfo.h" 20 #include "PPCMachineFunctionInfo.h" 21 #include "PPCPerfectShuffle.h" 22 #include "PPCRegisterInfo.h" 23 #include "PPCSubtarget.h" 24 #include "PPCTargetMachine.h" 25 #include "llvm/ADT/APFloat.h" 26 #include "llvm/ADT/APInt.h" 27 #include "llvm/ADT/ArrayRef.h" 28 #include "llvm/ADT/DenseMap.h" 29 #include "llvm/ADT/None.h" 30 #include "llvm/ADT/STLExtras.h" 31 #include "llvm/ADT/SmallPtrSet.h" 32 #include "llvm/ADT/SmallSet.h" 33 #include "llvm/ADT/SmallVector.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/ADT/StringRef.h" 36 #include "llvm/ADT/StringSwitch.h" 37 #include "llvm/CodeGen/CallingConvLower.h" 38 #include "llvm/CodeGen/ISDOpcodes.h" 39 #include "llvm/CodeGen/MachineBasicBlock.h" 40 #include "llvm/CodeGen/MachineFrameInfo.h" 41 #include "llvm/CodeGen/MachineFunction.h" 42 #include "llvm/CodeGen/MachineInstr.h" 43 #include "llvm/CodeGen/MachineInstrBuilder.h" 44 #include "llvm/CodeGen/MachineJumpTableInfo.h" 45 #include "llvm/CodeGen/MachineLoopInfo.h" 46 #include "llvm/CodeGen/MachineMemOperand.h" 47 #include "llvm/CodeGen/MachineModuleInfo.h" 48 #include "llvm/CodeGen/MachineOperand.h" 49 #include "llvm/CodeGen/MachineRegisterInfo.h" 50 #include "llvm/CodeGen/RuntimeLibcalls.h" 51 #include "llvm/CodeGen/SelectionDAG.h" 52 #include "llvm/CodeGen/SelectionDAGNodes.h" 53 #include "llvm/CodeGen/TargetInstrInfo.h" 54 #include "llvm/CodeGen/TargetLowering.h" 55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 56 #include "llvm/CodeGen/TargetRegisterInfo.h" 57 #include "llvm/CodeGen/ValueTypes.h" 58 #include "llvm/IR/CallSite.h" 59 #include "llvm/IR/CallingConv.h" 60 #include "llvm/IR/Constant.h" 61 #include "llvm/IR/Constants.h" 62 #include "llvm/IR/DataLayout.h" 63 #include "llvm/IR/DebugLoc.h" 64 #include "llvm/IR/DerivedTypes.h" 65 #include "llvm/IR/Function.h" 66 #include "llvm/IR/GlobalValue.h" 67 #include "llvm/IR/IRBuilder.h" 68 #include "llvm/IR/Instructions.h" 69 #include "llvm/IR/Intrinsics.h" 70 #include "llvm/IR/IntrinsicsPowerPC.h" 71 #include "llvm/IR/Module.h" 72 #include "llvm/IR/Type.h" 73 #include "llvm/IR/Use.h" 74 #include "llvm/IR/Value.h" 75 #include "llvm/MC/MCContext.h" 76 #include "llvm/MC/MCExpr.h" 77 #include "llvm/MC/MCRegisterInfo.h" 78 #include "llvm/MC/MCSymbolXCOFF.h" 79 #include "llvm/Support/AtomicOrdering.h" 80 #include "llvm/Support/BranchProbability.h" 81 #include "llvm/Support/Casting.h" 82 #include "llvm/Support/CodeGen.h" 83 #include "llvm/Support/CommandLine.h" 84 #include "llvm/Support/Compiler.h" 85 #include "llvm/Support/Debug.h" 86 #include "llvm/Support/ErrorHandling.h" 87 #include "llvm/Support/Format.h" 88 #include "llvm/Support/KnownBits.h" 89 #include "llvm/Support/MachineValueType.h" 90 #include "llvm/Support/MathExtras.h" 91 #include "llvm/Support/raw_ostream.h" 92 #include "llvm/Target/TargetMachine.h" 93 #include "llvm/Target/TargetOptions.h" 94 #include <algorithm> 95 #include <cassert> 96 #include <cstdint> 97 #include <iterator> 98 #include <list> 99 #include <utility> 100 #include <vector> 101 102 using namespace llvm; 103 104 #define DEBUG_TYPE "ppc-lowering" 105 106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc", 107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden); 108 109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref", 110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden); 111 112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned", 113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden); 114 115 static cl::opt<bool> DisableSCO("disable-ppc-sco", 116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden); 117 118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32", 119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden); 120 121 static cl::opt<bool> EnableQuadPrecision("enable-ppc-quad-precision", 122 cl::desc("enable quad precision float support on ppc"), cl::Hidden); 123 124 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables", 125 cl::desc("use absolute jump tables on ppc"), cl::Hidden); 126 127 STATISTIC(NumTailCalls, "Number of tail calls"); 128 STATISTIC(NumSiblingCalls, "Number of sibling calls"); 129 130 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int); 131 132 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl); 133 134 // FIXME: Remove this once the bug has been fixed! 135 extern cl::opt<bool> ANDIGlueBug; 136 137 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, 138 const PPCSubtarget &STI) 139 : TargetLowering(TM), Subtarget(STI) { 140 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all 141 // arguments are at least 4/8 bytes aligned. 142 bool isPPC64 = Subtarget.isPPC64(); 143 setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4)); 144 145 // Set up the register classes. 146 addRegisterClass(MVT::i32, &PPC::GPRCRegClass); 147 if (!useSoftFloat()) { 148 if (hasSPE()) { 149 addRegisterClass(MVT::f32, &PPC::GPRCRegClass); 150 addRegisterClass(MVT::f64, &PPC::SPERCRegClass); 151 } else { 152 addRegisterClass(MVT::f32, &PPC::F4RCRegClass); 153 addRegisterClass(MVT::f64, &PPC::F8RCRegClass); 154 } 155 } 156 157 // Match BITREVERSE to customized fast code sequence in the td file. 158 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 159 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 160 161 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended. 162 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom); 163 164 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD. 165 for (MVT VT : MVT::integer_valuetypes()) { 166 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 167 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 168 } 169 170 if (Subtarget.isISA3_0()) { 171 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal); 172 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal); 173 setTruncStoreAction(MVT::f64, MVT::f16, Legal); 174 setTruncStoreAction(MVT::f32, MVT::f16, Legal); 175 } else { 176 // No extending loads from f16 or HW conversions back and forth. 177 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 178 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 179 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); 180 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 181 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); 182 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); 183 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 184 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 185 } 186 187 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 188 189 // PowerPC has pre-inc load and store's. 190 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal); 191 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal); 192 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal); 193 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal); 194 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal); 195 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal); 196 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal); 197 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal); 198 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal); 199 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal); 200 if (!Subtarget.hasSPE()) { 201 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal); 202 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal); 203 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal); 204 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal); 205 } 206 207 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry. 208 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 209 for (MVT VT : ScalarIntVTs) { 210 setOperationAction(ISD::ADDC, VT, Legal); 211 setOperationAction(ISD::ADDE, VT, Legal); 212 setOperationAction(ISD::SUBC, VT, Legal); 213 setOperationAction(ISD::SUBE, VT, Legal); 214 } 215 216 if (Subtarget.useCRBits()) { 217 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 218 219 if (isPPC64 || Subtarget.hasFPCVT()) { 220 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote); 221 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1, 222 isPPC64 ? MVT::i64 : MVT::i32); 223 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote); 224 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1, 225 isPPC64 ? MVT::i64 : MVT::i32); 226 } else { 227 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom); 228 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom); 229 } 230 231 // PowerPC does not support direct load/store of condition registers. 232 setOperationAction(ISD::LOAD, MVT::i1, Custom); 233 setOperationAction(ISD::STORE, MVT::i1, Custom); 234 235 // FIXME: Remove this once the ANDI glue bug is fixed: 236 if (ANDIGlueBug) 237 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom); 238 239 for (MVT VT : MVT::integer_valuetypes()) { 240 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 241 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 242 setTruncStoreAction(VT, MVT::i1, Expand); 243 } 244 245 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass); 246 } 247 248 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 249 // PPC (the libcall is not available). 250 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom); 251 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom); 252 253 // We do not currently implement these libm ops for PowerPC. 254 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand); 255 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand); 256 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand); 257 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand); 258 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand); 259 setOperationAction(ISD::FREM, MVT::ppcf128, Expand); 260 261 // PowerPC has no SREM/UREM instructions unless we are on P9 262 // On P9 we may use a hardware instruction to compute the remainder. 263 // The instructions are not legalized directly because in the cases where the 264 // result of both the remainder and the division is required it is more 265 // efficient to compute the remainder from the result of the division rather 266 // than use the remainder instruction. 267 if (Subtarget.isISA3_0()) { 268 setOperationAction(ISD::SREM, MVT::i32, Custom); 269 setOperationAction(ISD::UREM, MVT::i32, Custom); 270 setOperationAction(ISD::SREM, MVT::i64, Custom); 271 setOperationAction(ISD::UREM, MVT::i64, Custom); 272 } else { 273 setOperationAction(ISD::SREM, MVT::i32, Expand); 274 setOperationAction(ISD::UREM, MVT::i32, Expand); 275 setOperationAction(ISD::SREM, MVT::i64, Expand); 276 setOperationAction(ISD::UREM, MVT::i64, Expand); 277 } 278 279 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM. 280 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 281 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 282 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 283 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 284 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 285 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 286 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 287 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 288 289 // We don't support sin/cos/sqrt/fmod/pow 290 setOperationAction(ISD::FSIN , MVT::f64, Expand); 291 setOperationAction(ISD::FCOS , MVT::f64, Expand); 292 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 293 setOperationAction(ISD::FREM , MVT::f64, Expand); 294 setOperationAction(ISD::FPOW , MVT::f64, Expand); 295 setOperationAction(ISD::FSIN , MVT::f32, Expand); 296 setOperationAction(ISD::FCOS , MVT::f32, Expand); 297 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 298 setOperationAction(ISD::FREM , MVT::f32, Expand); 299 setOperationAction(ISD::FPOW , MVT::f32, Expand); 300 if (Subtarget.hasSPE()) { 301 setOperationAction(ISD::FMA , MVT::f64, Expand); 302 setOperationAction(ISD::FMA , MVT::f32, Expand); 303 } else { 304 setOperationAction(ISD::FMA , MVT::f64, Legal); 305 setOperationAction(ISD::FMA , MVT::f32, Legal); 306 } 307 308 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 309 310 // If we're enabling GP optimizations, use hardware square root 311 if (!Subtarget.hasFSQRT() && 312 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() && 313 Subtarget.hasFRE())) 314 setOperationAction(ISD::FSQRT, MVT::f64, Expand); 315 316 if (!Subtarget.hasFSQRT() && 317 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() && 318 Subtarget.hasFRES())) 319 setOperationAction(ISD::FSQRT, MVT::f32, Expand); 320 321 if (Subtarget.hasFCPSGN()) { 322 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal); 323 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal); 324 } else { 325 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 326 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 327 } 328 329 if (Subtarget.hasFPRND()) { 330 setOperationAction(ISD::FFLOOR, MVT::f64, Legal); 331 setOperationAction(ISD::FCEIL, MVT::f64, Legal); 332 setOperationAction(ISD::FTRUNC, MVT::f64, Legal); 333 setOperationAction(ISD::FROUND, MVT::f64, Legal); 334 335 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 336 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 337 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 338 setOperationAction(ISD::FROUND, MVT::f32, Legal); 339 } 340 341 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd 342 // to speed up scalar BSWAP64. 343 // CTPOP or CTTZ were introduced in P8/P9 respectively 344 setOperationAction(ISD::BSWAP, MVT::i32 , Expand); 345 if (Subtarget.hasP9Vector()) 346 setOperationAction(ISD::BSWAP, MVT::i64 , Custom); 347 else 348 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); 349 if (Subtarget.isISA3_0()) { 350 setOperationAction(ISD::CTTZ , MVT::i32 , Legal); 351 setOperationAction(ISD::CTTZ , MVT::i64 , Legal); 352 } else { 353 setOperationAction(ISD::CTTZ , MVT::i32 , Expand); 354 setOperationAction(ISD::CTTZ , MVT::i64 , Expand); 355 } 356 357 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) { 358 setOperationAction(ISD::CTPOP, MVT::i32 , Legal); 359 setOperationAction(ISD::CTPOP, MVT::i64 , Legal); 360 } else { 361 setOperationAction(ISD::CTPOP, MVT::i32 , Expand); 362 setOperationAction(ISD::CTPOP, MVT::i64 , Expand); 363 } 364 365 // PowerPC does not have ROTR 366 setOperationAction(ISD::ROTR, MVT::i32 , Expand); 367 setOperationAction(ISD::ROTR, MVT::i64 , Expand); 368 369 if (!Subtarget.useCRBits()) { 370 // PowerPC does not have Select 371 setOperationAction(ISD::SELECT, MVT::i32, Expand); 372 setOperationAction(ISD::SELECT, MVT::i64, Expand); 373 setOperationAction(ISD::SELECT, MVT::f32, Expand); 374 setOperationAction(ISD::SELECT, MVT::f64, Expand); 375 } 376 377 // PowerPC wants to turn select_cc of FP into fsel when possible. 378 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 379 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 380 381 // PowerPC wants to optimize integer setcc a bit 382 if (!Subtarget.useCRBits()) 383 setOperationAction(ISD::SETCC, MVT::i32, Custom); 384 385 // PowerPC does not have BRCOND which requires SetCC 386 if (!Subtarget.useCRBits()) 387 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 388 389 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 390 391 if (Subtarget.hasSPE()) { 392 // SPE has built-in conversions 393 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal); 394 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal); 395 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal); 396 } else { 397 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores. 398 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 399 400 // PowerPC does not have [U|S]INT_TO_FP 401 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); 402 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); 403 } 404 405 if (Subtarget.hasDirectMove() && isPPC64) { 406 setOperationAction(ISD::BITCAST, MVT::f32, Legal); 407 setOperationAction(ISD::BITCAST, MVT::i32, Legal); 408 setOperationAction(ISD::BITCAST, MVT::i64, Legal); 409 setOperationAction(ISD::BITCAST, MVT::f64, Legal); 410 if (TM.Options.UnsafeFPMath) { 411 setOperationAction(ISD::LRINT, MVT::f64, Legal); 412 setOperationAction(ISD::LRINT, MVT::f32, Legal); 413 setOperationAction(ISD::LLRINT, MVT::f64, Legal); 414 setOperationAction(ISD::LLRINT, MVT::f32, Legal); 415 setOperationAction(ISD::LROUND, MVT::f64, Legal); 416 setOperationAction(ISD::LROUND, MVT::f32, Legal); 417 setOperationAction(ISD::LLROUND, MVT::f64, Legal); 418 setOperationAction(ISD::LLROUND, MVT::f32, Legal); 419 } 420 } else { 421 setOperationAction(ISD::BITCAST, MVT::f32, Expand); 422 setOperationAction(ISD::BITCAST, MVT::i32, Expand); 423 setOperationAction(ISD::BITCAST, MVT::i64, Expand); 424 setOperationAction(ISD::BITCAST, MVT::f64, Expand); 425 } 426 427 // We cannot sextinreg(i1). Expand to shifts. 428 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 429 430 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support 431 // SjLj exception handling but a light-weight setjmp/longjmp replacement to 432 // support continuation, user-level threading, and etc.. As a result, no 433 // other SjLj exception interfaces are implemented and please don't build 434 // your own exception handling based on them. 435 // LLVM/Clang supports zero-cost DWARF exception handling. 436 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); 437 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); 438 439 // We want to legalize GlobalAddress and ConstantPool nodes into the 440 // appropriate instructions to materialize the address. 441 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 442 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); 443 setOperationAction(ISD::BlockAddress, MVT::i32, Custom); 444 setOperationAction(ISD::ConstantPool, MVT::i32, Custom); 445 setOperationAction(ISD::JumpTable, MVT::i32, Custom); 446 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 447 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 448 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 449 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 450 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 451 452 // TRAP is legal. 453 setOperationAction(ISD::TRAP, MVT::Other, Legal); 454 455 // TRAMPOLINE is custom lowered. 456 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); 457 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); 458 459 // VASTART needs to be custom lowered to use the VarArgsFrameIndex 460 setOperationAction(ISD::VASTART , MVT::Other, Custom); 461 462 if (Subtarget.is64BitELFABI()) { 463 // VAARG always uses double-word chunks, so promote anything smaller. 464 setOperationAction(ISD::VAARG, MVT::i1, Promote); 465 AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64); 466 setOperationAction(ISD::VAARG, MVT::i8, Promote); 467 AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64); 468 setOperationAction(ISD::VAARG, MVT::i16, Promote); 469 AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64); 470 setOperationAction(ISD::VAARG, MVT::i32, Promote); 471 AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64); 472 setOperationAction(ISD::VAARG, MVT::Other, Expand); 473 } else if (Subtarget.is32BitELFABI()) { 474 // VAARG is custom lowered with the 32-bit SVR4 ABI. 475 setOperationAction(ISD::VAARG, MVT::Other, Custom); 476 setOperationAction(ISD::VAARG, MVT::i64, Custom); 477 } else 478 setOperationAction(ISD::VAARG, MVT::Other, Expand); 479 480 // VACOPY is custom lowered with the 32-bit SVR4 ABI. 481 if (Subtarget.is32BitELFABI()) 482 setOperationAction(ISD::VACOPY , MVT::Other, Custom); 483 else 484 setOperationAction(ISD::VACOPY , MVT::Other, Expand); 485 486 // Use the default implementation. 487 setOperationAction(ISD::VAEND , MVT::Other, Expand); 488 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand); 489 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom); 490 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom); 491 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom); 492 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom); 493 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom); 494 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom); 495 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom); 496 497 // We want to custom lower some of our intrinsics. 498 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 499 500 // To handle counter-based loop conditions. 501 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); 502 503 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); 504 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); 505 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); 506 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 507 508 // Comparisons that require checking two conditions. 509 if (Subtarget.hasSPE()) { 510 setCondCodeAction(ISD::SETO, MVT::f32, Expand); 511 setCondCodeAction(ISD::SETO, MVT::f64, Expand); 512 setCondCodeAction(ISD::SETUO, MVT::f32, Expand); 513 setCondCodeAction(ISD::SETUO, MVT::f64, Expand); 514 } 515 setCondCodeAction(ISD::SETULT, MVT::f32, Expand); 516 setCondCodeAction(ISD::SETULT, MVT::f64, Expand); 517 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 518 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 519 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand); 520 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand); 521 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 522 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 523 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand); 524 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand); 525 setCondCodeAction(ISD::SETONE, MVT::f32, Expand); 526 setCondCodeAction(ISD::SETONE, MVT::f64, Expand); 527 528 if (Subtarget.has64BitSupport()) { 529 // They also have instructions for converting between i64 and fp. 530 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 531 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); 532 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 533 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); 534 // This is just the low 32 bits of a (signed) fp->i64 conversion. 535 // We cannot do this with Promote because i64 is not a legal type. 536 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 537 538 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) 539 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 540 } else { 541 // PowerPC does not have FP_TO_UINT on 32-bit implementations. 542 if (Subtarget.hasSPE()) 543 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal); 544 else 545 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); 546 } 547 548 // With the instructions enabled under FPCVT, we can do everything. 549 if (Subtarget.hasFPCVT()) { 550 if (Subtarget.has64BitSupport()) { 551 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 552 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 553 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 554 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 555 } 556 557 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 558 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 559 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 560 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 561 } 562 563 if (Subtarget.use64BitRegs()) { 564 // 64-bit PowerPC implementations can support i64 types directly 565 addRegisterClass(MVT::i64, &PPC::G8RCRegClass); 566 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or 567 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 568 // 64-bit PowerPC wants to expand i128 shifts itself. 569 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 570 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 571 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 572 } else { 573 // 32-bit PowerPC wants to expand i64 shifts itself. 574 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); 575 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom); 576 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); 577 } 578 579 if (Subtarget.hasVSX()) { 580 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal); 581 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal); 582 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal); 583 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal); 584 } 585 586 if (Subtarget.hasAltivec()) { 587 // First set operation action for all vector types to expand. Then we 588 // will selectively turn on ones that can be effectively codegen'd. 589 for (MVT VT : MVT::fixedlen_vector_valuetypes()) { 590 // add/sub are legal for all supported vector VT's. 591 setOperationAction(ISD::ADD, VT, Legal); 592 setOperationAction(ISD::SUB, VT, Legal); 593 594 // For v2i64, these are only valid with P8Vector. This is corrected after 595 // the loop. 596 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) { 597 setOperationAction(ISD::SMAX, VT, Legal); 598 setOperationAction(ISD::SMIN, VT, Legal); 599 setOperationAction(ISD::UMAX, VT, Legal); 600 setOperationAction(ISD::UMIN, VT, Legal); 601 } 602 else { 603 setOperationAction(ISD::SMAX, VT, Expand); 604 setOperationAction(ISD::SMIN, VT, Expand); 605 setOperationAction(ISD::UMAX, VT, Expand); 606 setOperationAction(ISD::UMIN, VT, Expand); 607 } 608 609 if (Subtarget.hasVSX()) { 610 setOperationAction(ISD::FMAXNUM, VT, Legal); 611 setOperationAction(ISD::FMINNUM, VT, Legal); 612 } 613 614 // Vector instructions introduced in P8 615 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) { 616 setOperationAction(ISD::CTPOP, VT, Legal); 617 setOperationAction(ISD::CTLZ, VT, Legal); 618 } 619 else { 620 setOperationAction(ISD::CTPOP, VT, Expand); 621 setOperationAction(ISD::CTLZ, VT, Expand); 622 } 623 624 // Vector instructions introduced in P9 625 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128)) 626 setOperationAction(ISD::CTTZ, VT, Legal); 627 else 628 setOperationAction(ISD::CTTZ, VT, Expand); 629 630 // We promote all shuffles to v16i8. 631 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote); 632 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8); 633 634 // We promote all non-typed operations to v4i32. 635 setOperationAction(ISD::AND , VT, Promote); 636 AddPromotedToType (ISD::AND , VT, MVT::v4i32); 637 setOperationAction(ISD::OR , VT, Promote); 638 AddPromotedToType (ISD::OR , VT, MVT::v4i32); 639 setOperationAction(ISD::XOR , VT, Promote); 640 AddPromotedToType (ISD::XOR , VT, MVT::v4i32); 641 setOperationAction(ISD::LOAD , VT, Promote); 642 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32); 643 setOperationAction(ISD::SELECT, VT, Promote); 644 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32); 645 setOperationAction(ISD::VSELECT, VT, Legal); 646 setOperationAction(ISD::SELECT_CC, VT, Promote); 647 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32); 648 setOperationAction(ISD::STORE, VT, Promote); 649 AddPromotedToType (ISD::STORE, VT, MVT::v4i32); 650 651 // No other operations are legal. 652 setOperationAction(ISD::MUL , VT, Expand); 653 setOperationAction(ISD::SDIV, VT, Expand); 654 setOperationAction(ISD::SREM, VT, Expand); 655 setOperationAction(ISD::UDIV, VT, Expand); 656 setOperationAction(ISD::UREM, VT, Expand); 657 setOperationAction(ISD::FDIV, VT, Expand); 658 setOperationAction(ISD::FREM, VT, Expand); 659 setOperationAction(ISD::FNEG, VT, Expand); 660 setOperationAction(ISD::FSQRT, VT, Expand); 661 setOperationAction(ISD::FLOG, VT, Expand); 662 setOperationAction(ISD::FLOG10, VT, Expand); 663 setOperationAction(ISD::FLOG2, VT, Expand); 664 setOperationAction(ISD::FEXP, VT, Expand); 665 setOperationAction(ISD::FEXP2, VT, Expand); 666 setOperationAction(ISD::FSIN, VT, Expand); 667 setOperationAction(ISD::FCOS, VT, Expand); 668 setOperationAction(ISD::FABS, VT, Expand); 669 setOperationAction(ISD::FFLOOR, VT, Expand); 670 setOperationAction(ISD::FCEIL, VT, Expand); 671 setOperationAction(ISD::FTRUNC, VT, Expand); 672 setOperationAction(ISD::FRINT, VT, Expand); 673 setOperationAction(ISD::FNEARBYINT, VT, Expand); 674 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand); 675 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); 676 setOperationAction(ISD::BUILD_VECTOR, VT, Expand); 677 setOperationAction(ISD::MULHU, VT, Expand); 678 setOperationAction(ISD::MULHS, VT, Expand); 679 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 680 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 681 setOperationAction(ISD::UDIVREM, VT, Expand); 682 setOperationAction(ISD::SDIVREM, VT, Expand); 683 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand); 684 setOperationAction(ISD::FPOW, VT, Expand); 685 setOperationAction(ISD::BSWAP, VT, Expand); 686 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 687 setOperationAction(ISD::ROTL, VT, Expand); 688 setOperationAction(ISD::ROTR, VT, Expand); 689 690 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) { 691 setTruncStoreAction(VT, InnerVT, Expand); 692 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 693 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 694 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 695 } 696 } 697 setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand); 698 if (!Subtarget.hasP8Vector()) { 699 setOperationAction(ISD::SMAX, MVT::v2i64, Expand); 700 setOperationAction(ISD::SMIN, MVT::v2i64, Expand); 701 setOperationAction(ISD::UMAX, MVT::v2i64, Expand); 702 setOperationAction(ISD::UMIN, MVT::v2i64, Expand); 703 } 704 705 for (auto VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8}) 706 setOperationAction(ISD::ABS, VT, Custom); 707 708 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle 709 // with merges, splats, etc. 710 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom); 711 712 // Vector truncates to sub-word integer that fit in an Altivec/VSX register 713 // are cheap, so handle them before they get expanded to scalar. 714 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom); 715 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom); 716 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom); 717 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom); 718 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom); 719 720 setOperationAction(ISD::AND , MVT::v4i32, Legal); 721 setOperationAction(ISD::OR , MVT::v4i32, Legal); 722 setOperationAction(ISD::XOR , MVT::v4i32, Legal); 723 setOperationAction(ISD::LOAD , MVT::v4i32, Legal); 724 setOperationAction(ISD::SELECT, MVT::v4i32, 725 Subtarget.useCRBits() ? Legal : Expand); 726 setOperationAction(ISD::STORE , MVT::v4i32, Legal); 727 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); 728 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); 729 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); 730 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); 731 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 732 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 733 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 734 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 735 736 // Without hasP8Altivec set, v2i64 SMAX isn't available. 737 // But ABS custom lowering requires SMAX support. 738 if (!Subtarget.hasP8Altivec()) 739 setOperationAction(ISD::ABS, MVT::v2i64, Expand); 740 741 // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w). 742 if (Subtarget.hasAltivec()) 743 for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8}) 744 setOperationAction(ISD::ROTL, VT, Legal); 745 // With hasP8Altivec set, we can lower ISD::ROTL to vrld. 746 if (Subtarget.hasP8Altivec()) 747 setOperationAction(ISD::ROTL, MVT::v2i64, Legal); 748 749 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); 750 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); 751 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass); 752 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass); 753 754 setOperationAction(ISD::MUL, MVT::v4f32, Legal); 755 setOperationAction(ISD::FMA, MVT::v4f32, Legal); 756 757 if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) { 758 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 759 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 760 } 761 762 if (Subtarget.hasP8Altivec()) 763 setOperationAction(ISD::MUL, MVT::v4i32, Legal); 764 else 765 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 766 767 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 768 setOperationAction(ISD::MUL, MVT::v16i8, Custom); 769 770 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom); 771 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom); 772 773 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom); 774 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); 775 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); 776 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 777 778 // Altivec does not contain unordered floating-point compare instructions 779 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); 780 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); 781 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand); 782 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand); 783 784 if (Subtarget.hasVSX()) { 785 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal); 786 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 787 if (Subtarget.hasP8Vector()) { 788 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 789 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal); 790 } 791 if (Subtarget.hasDirectMove() && isPPC64) { 792 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal); 793 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal); 794 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal); 795 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal); 796 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal); 797 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal); 798 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal); 799 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal); 800 } 801 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal); 802 803 // The nearbyint variants are not allowed to raise the inexact exception 804 // so we can only code-gen them with unsafe math. 805 if (TM.Options.UnsafeFPMath) { 806 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); 807 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal); 808 } 809 810 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); 811 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal); 812 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal); 813 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal); 814 setOperationAction(ISD::FROUND, MVT::v2f64, Legal); 815 setOperationAction(ISD::FROUND, MVT::f64, Legal); 816 817 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal); 818 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 819 setOperationAction(ISD::FROUND, MVT::f32, Legal); 820 821 setOperationAction(ISD::MUL, MVT::v2f64, Legal); 822 setOperationAction(ISD::FMA, MVT::v2f64, Legal); 823 824 setOperationAction(ISD::FDIV, MVT::v2f64, Legal); 825 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); 826 827 // Share the Altivec comparison restrictions. 828 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand); 829 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand); 830 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand); 831 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand); 832 833 setOperationAction(ISD::LOAD, MVT::v2f64, Legal); 834 setOperationAction(ISD::STORE, MVT::v2f64, Legal); 835 836 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal); 837 838 if (Subtarget.hasP8Vector()) 839 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass); 840 841 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass); 842 843 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass); 844 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); 845 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); 846 847 if (Subtarget.hasP8Altivec()) { 848 setOperationAction(ISD::SHL, MVT::v2i64, Legal); 849 setOperationAction(ISD::SRA, MVT::v2i64, Legal); 850 setOperationAction(ISD::SRL, MVT::v2i64, Legal); 851 852 // 128 bit shifts can be accomplished via 3 instructions for SHL and 853 // SRL, but not for SRA because of the instructions available: 854 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth 855 // doing 856 setOperationAction(ISD::SHL, MVT::v1i128, Expand); 857 setOperationAction(ISD::SRL, MVT::v1i128, Expand); 858 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 859 860 setOperationAction(ISD::SETCC, MVT::v2i64, Legal); 861 } 862 else { 863 setOperationAction(ISD::SHL, MVT::v2i64, Expand); 864 setOperationAction(ISD::SRA, MVT::v2i64, Expand); 865 setOperationAction(ISD::SRL, MVT::v2i64, Expand); 866 867 setOperationAction(ISD::SETCC, MVT::v2i64, Custom); 868 869 // VSX v2i64 only supports non-arithmetic operations. 870 setOperationAction(ISD::ADD, MVT::v2i64, Expand); 871 setOperationAction(ISD::SUB, MVT::v2i64, Expand); 872 } 873 874 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 875 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64); 876 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 877 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64); 878 879 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal); 880 881 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal); 882 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal); 883 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal); 884 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal); 885 886 // Custom handling for partial vectors of integers converted to 887 // floating point. We already have optimal handling for v2i32 through 888 // the DAG combine, so those aren't necessary. 889 setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom); 890 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); 891 setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom); 892 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); 893 setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom); 894 setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom); 895 setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom); 896 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom); 897 898 setOperationAction(ISD::FNEG, MVT::v4f32, Legal); 899 setOperationAction(ISD::FNEG, MVT::v2f64, Legal); 900 setOperationAction(ISD::FABS, MVT::v4f32, Legal); 901 setOperationAction(ISD::FABS, MVT::v2f64, Legal); 902 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 903 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal); 904 905 if (Subtarget.hasDirectMove()) 906 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom); 907 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom); 908 909 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass); 910 } 911 912 if (Subtarget.hasP8Altivec()) { 913 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass); 914 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass); 915 } 916 917 if (Subtarget.hasP9Vector()) { 918 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); 919 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); 920 921 // 128 bit shifts can be accomplished via 3 instructions for SHL and 922 // SRL, but not for SRA because of the instructions available: 923 // VS{RL} and VS{RL}O. 924 setOperationAction(ISD::SHL, MVT::v1i128, Legal); 925 setOperationAction(ISD::SRL, MVT::v1i128, Legal); 926 setOperationAction(ISD::SRA, MVT::v1i128, Expand); 927 928 if (EnableQuadPrecision) { 929 addRegisterClass(MVT::f128, &PPC::VRRCRegClass); 930 setOperationAction(ISD::FADD, MVT::f128, Legal); 931 setOperationAction(ISD::FSUB, MVT::f128, Legal); 932 setOperationAction(ISD::FDIV, MVT::f128, Legal); 933 setOperationAction(ISD::FMUL, MVT::f128, Legal); 934 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal); 935 // No extending loads to f128 on PPC. 936 for (MVT FPT : MVT::fp_valuetypes()) 937 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand); 938 setOperationAction(ISD::FMA, MVT::f128, Legal); 939 setCondCodeAction(ISD::SETULT, MVT::f128, Expand); 940 setCondCodeAction(ISD::SETUGT, MVT::f128, Expand); 941 setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand); 942 setCondCodeAction(ISD::SETOGE, MVT::f128, Expand); 943 setCondCodeAction(ISD::SETOLE, MVT::f128, Expand); 944 setCondCodeAction(ISD::SETONE, MVT::f128, Expand); 945 946 setOperationAction(ISD::FTRUNC, MVT::f128, Legal); 947 setOperationAction(ISD::FRINT, MVT::f128, Legal); 948 setOperationAction(ISD::FFLOOR, MVT::f128, Legal); 949 setOperationAction(ISD::FCEIL, MVT::f128, Legal); 950 setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal); 951 setOperationAction(ISD::FROUND, MVT::f128, Legal); 952 953 setOperationAction(ISD::SELECT, MVT::f128, Expand); 954 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal); 955 setOperationAction(ISD::FP_ROUND, MVT::f32, Legal); 956 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 957 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 958 setOperationAction(ISD::BITCAST, MVT::i128, Custom); 959 // No implementation for these ops for PowerPC. 960 setOperationAction(ISD::FSIN , MVT::f128, Expand); 961 setOperationAction(ISD::FCOS , MVT::f128, Expand); 962 setOperationAction(ISD::FPOW, MVT::f128, Expand); 963 setOperationAction(ISD::FPOWI, MVT::f128, Expand); 964 setOperationAction(ISD::FREM, MVT::f128, Expand); 965 } 966 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); 967 setOperationAction(ISD::BSWAP, MVT::v8i16, Legal); 968 setOperationAction(ISD::BSWAP, MVT::v4i32, Legal); 969 setOperationAction(ISD::BSWAP, MVT::v2i64, Legal); 970 setOperationAction(ISD::BSWAP, MVT::v1i128, Legal); 971 } 972 973 if (Subtarget.hasP9Altivec()) { 974 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); 975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); 976 977 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal); 978 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal); 979 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal); 980 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal); 981 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal); 982 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal); 983 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal); 984 } 985 } 986 987 if (Subtarget.hasQPX()) { 988 setOperationAction(ISD::FADD, MVT::v4f64, Legal); 989 setOperationAction(ISD::FSUB, MVT::v4f64, Legal); 990 setOperationAction(ISD::FMUL, MVT::v4f64, Legal); 991 setOperationAction(ISD::FREM, MVT::v4f64, Expand); 992 993 setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal); 994 setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand); 995 996 setOperationAction(ISD::LOAD , MVT::v4f64, Custom); 997 setOperationAction(ISD::STORE , MVT::v4f64, Custom); 998 999 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom); 1000 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom); 1001 1002 if (!Subtarget.useCRBits()) 1003 setOperationAction(ISD::SELECT, MVT::v4f64, Expand); 1004 setOperationAction(ISD::VSELECT, MVT::v4f64, Legal); 1005 1006 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal); 1007 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand); 1008 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand); 1009 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand); 1010 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom); 1011 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal); 1012 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom); 1013 1014 setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal); 1015 setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand); 1016 1017 setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal); 1018 setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal); 1019 1020 setOperationAction(ISD::FNEG , MVT::v4f64, Legal); 1021 setOperationAction(ISD::FABS , MVT::v4f64, Legal); 1022 setOperationAction(ISD::FSIN , MVT::v4f64, Expand); 1023 setOperationAction(ISD::FCOS , MVT::v4f64, Expand); 1024 setOperationAction(ISD::FPOW , MVT::v4f64, Expand); 1025 setOperationAction(ISD::FLOG , MVT::v4f64, Expand); 1026 setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand); 1027 setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand); 1028 setOperationAction(ISD::FEXP , MVT::v4f64, Expand); 1029 setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand); 1030 1031 setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal); 1032 setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal); 1033 1034 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal); 1035 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal); 1036 1037 addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass); 1038 1039 setOperationAction(ISD::FADD, MVT::v4f32, Legal); 1040 setOperationAction(ISD::FSUB, MVT::v4f32, Legal); 1041 setOperationAction(ISD::FMUL, MVT::v4f32, Legal); 1042 setOperationAction(ISD::FREM, MVT::v4f32, Expand); 1043 1044 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal); 1045 setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand); 1046 1047 setOperationAction(ISD::LOAD , MVT::v4f32, Custom); 1048 setOperationAction(ISD::STORE , MVT::v4f32, Custom); 1049 1050 if (!Subtarget.useCRBits()) 1051 setOperationAction(ISD::SELECT, MVT::v4f32, Expand); 1052 setOperationAction(ISD::VSELECT, MVT::v4f32, Legal); 1053 1054 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal); 1055 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand); 1056 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand); 1057 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand); 1058 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom); 1059 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal); 1060 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); 1061 1062 setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal); 1063 setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand); 1064 1065 setOperationAction(ISD::FNEG , MVT::v4f32, Legal); 1066 setOperationAction(ISD::FABS , MVT::v4f32, Legal); 1067 setOperationAction(ISD::FSIN , MVT::v4f32, Expand); 1068 setOperationAction(ISD::FCOS , MVT::v4f32, Expand); 1069 setOperationAction(ISD::FPOW , MVT::v4f32, Expand); 1070 setOperationAction(ISD::FLOG , MVT::v4f32, Expand); 1071 setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand); 1072 setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand); 1073 setOperationAction(ISD::FEXP , MVT::v4f32, Expand); 1074 setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand); 1075 1076 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); 1077 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); 1078 1079 setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal); 1080 setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal); 1081 1082 addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass); 1083 1084 setOperationAction(ISD::AND , MVT::v4i1, Legal); 1085 setOperationAction(ISD::OR , MVT::v4i1, Legal); 1086 setOperationAction(ISD::XOR , MVT::v4i1, Legal); 1087 1088 if (!Subtarget.useCRBits()) 1089 setOperationAction(ISD::SELECT, MVT::v4i1, Expand); 1090 setOperationAction(ISD::VSELECT, MVT::v4i1, Legal); 1091 1092 setOperationAction(ISD::LOAD , MVT::v4i1, Custom); 1093 setOperationAction(ISD::STORE , MVT::v4i1, Custom); 1094 1095 setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom); 1096 setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand); 1097 setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand); 1098 setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand); 1099 setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom); 1100 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand); 1101 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom); 1102 1103 setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom); 1104 setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom); 1105 1106 addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass); 1107 1108 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); 1109 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal); 1110 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal); 1111 setOperationAction(ISD::FROUND, MVT::v4f64, Legal); 1112 1113 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); 1114 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal); 1115 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal); 1116 setOperationAction(ISD::FROUND, MVT::v4f32, Legal); 1117 1118 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand); 1119 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand); 1120 1121 // These need to set FE_INEXACT, and so cannot be vectorized here. 1122 setOperationAction(ISD::FRINT, MVT::v4f64, Expand); 1123 setOperationAction(ISD::FRINT, MVT::v4f32, Expand); 1124 1125 if (TM.Options.UnsafeFPMath) { 1126 setOperationAction(ISD::FDIV, MVT::v4f64, Legal); 1127 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); 1128 1129 setOperationAction(ISD::FDIV, MVT::v4f32, Legal); 1130 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); 1131 } else { 1132 setOperationAction(ISD::FDIV, MVT::v4f64, Expand); 1133 setOperationAction(ISD::FSQRT, MVT::v4f64, Expand); 1134 1135 setOperationAction(ISD::FDIV, MVT::v4f32, Expand); 1136 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); 1137 } 1138 } 1139 1140 if (Subtarget.has64BitSupport()) 1141 setOperationAction(ISD::PREFETCH, MVT::Other, Legal); 1142 1143 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom); 1144 1145 if (!isPPC64) { 1146 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); 1147 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); 1148 } 1149 1150 setBooleanContents(ZeroOrOneBooleanContent); 1151 1152 if (Subtarget.hasAltivec()) { 1153 // Altivec instructions set fields to all zeros or all ones. 1154 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 1155 } 1156 1157 if (!isPPC64) { 1158 // These libcalls are not available in 32-bit. 1159 setLibcallName(RTLIB::SHL_I128, nullptr); 1160 setLibcallName(RTLIB::SRL_I128, nullptr); 1161 setLibcallName(RTLIB::SRA_I128, nullptr); 1162 } 1163 1164 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1); 1165 1166 // We have target-specific dag combine patterns for the following nodes: 1167 setTargetDAGCombine(ISD::ADD); 1168 setTargetDAGCombine(ISD::SHL); 1169 setTargetDAGCombine(ISD::SRA); 1170 setTargetDAGCombine(ISD::SRL); 1171 setTargetDAGCombine(ISD::MUL); 1172 setTargetDAGCombine(ISD::SINT_TO_FP); 1173 setTargetDAGCombine(ISD::BUILD_VECTOR); 1174 if (Subtarget.hasFPCVT()) 1175 setTargetDAGCombine(ISD::UINT_TO_FP); 1176 setTargetDAGCombine(ISD::LOAD); 1177 setTargetDAGCombine(ISD::STORE); 1178 setTargetDAGCombine(ISD::BR_CC); 1179 if (Subtarget.useCRBits()) 1180 setTargetDAGCombine(ISD::BRCOND); 1181 setTargetDAGCombine(ISD::BSWAP); 1182 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 1183 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 1184 setTargetDAGCombine(ISD::INTRINSIC_VOID); 1185 1186 setTargetDAGCombine(ISD::SIGN_EXTEND); 1187 setTargetDAGCombine(ISD::ZERO_EXTEND); 1188 setTargetDAGCombine(ISD::ANY_EXTEND); 1189 1190 setTargetDAGCombine(ISD::TRUNCATE); 1191 setTargetDAGCombine(ISD::VECTOR_SHUFFLE); 1192 1193 1194 if (Subtarget.useCRBits()) { 1195 setTargetDAGCombine(ISD::TRUNCATE); 1196 setTargetDAGCombine(ISD::SETCC); 1197 setTargetDAGCombine(ISD::SELECT_CC); 1198 } 1199 1200 // Use reciprocal estimates. 1201 if (TM.Options.UnsafeFPMath) { 1202 setTargetDAGCombine(ISD::FDIV); 1203 setTargetDAGCombine(ISD::FSQRT); 1204 } 1205 1206 if (Subtarget.hasP9Altivec()) { 1207 setTargetDAGCombine(ISD::ABS); 1208 setTargetDAGCombine(ISD::VSELECT); 1209 } 1210 1211 // Darwin long double math library functions have $LDBL128 appended. 1212 if (Subtarget.isDarwin()) { 1213 setLibcallName(RTLIB::COS_PPCF128, "cosl$LDBL128"); 1214 setLibcallName(RTLIB::POW_PPCF128, "powl$LDBL128"); 1215 setLibcallName(RTLIB::REM_PPCF128, "fmodl$LDBL128"); 1216 setLibcallName(RTLIB::SIN_PPCF128, "sinl$LDBL128"); 1217 setLibcallName(RTLIB::SQRT_PPCF128, "sqrtl$LDBL128"); 1218 setLibcallName(RTLIB::LOG_PPCF128, "logl$LDBL128"); 1219 setLibcallName(RTLIB::LOG2_PPCF128, "log2l$LDBL128"); 1220 setLibcallName(RTLIB::LOG10_PPCF128, "log10l$LDBL128"); 1221 setLibcallName(RTLIB::EXP_PPCF128, "expl$LDBL128"); 1222 setLibcallName(RTLIB::EXP2_PPCF128, "exp2l$LDBL128"); 1223 } 1224 1225 if (EnableQuadPrecision) { 1226 setLibcallName(RTLIB::LOG_F128, "logf128"); 1227 setLibcallName(RTLIB::LOG2_F128, "log2f128"); 1228 setLibcallName(RTLIB::LOG10_F128, "log10f128"); 1229 setLibcallName(RTLIB::EXP_F128, "expf128"); 1230 setLibcallName(RTLIB::EXP2_F128, "exp2f128"); 1231 setLibcallName(RTLIB::SIN_F128, "sinf128"); 1232 setLibcallName(RTLIB::COS_F128, "cosf128"); 1233 setLibcallName(RTLIB::POW_F128, "powf128"); 1234 setLibcallName(RTLIB::FMIN_F128, "fminf128"); 1235 setLibcallName(RTLIB::FMAX_F128, "fmaxf128"); 1236 setLibcallName(RTLIB::POWI_F128, "__powikf2"); 1237 setLibcallName(RTLIB::REM_F128, "fmodf128"); 1238 } 1239 1240 // With 32 condition bits, we don't need to sink (and duplicate) compares 1241 // aggressively in CodeGenPrep. 1242 if (Subtarget.useCRBits()) { 1243 setHasMultipleConditionRegisters(); 1244 setJumpIsExpensive(); 1245 } 1246 1247 setMinFunctionAlignment(Align(4)); 1248 if (Subtarget.isDarwin()) 1249 setPrefFunctionAlignment(Align(16)); 1250 1251 switch (Subtarget.getCPUDirective()) { 1252 default: break; 1253 case PPC::DIR_970: 1254 case PPC::DIR_A2: 1255 case PPC::DIR_E500: 1256 case PPC::DIR_E500mc: 1257 case PPC::DIR_E5500: 1258 case PPC::DIR_PWR4: 1259 case PPC::DIR_PWR5: 1260 case PPC::DIR_PWR5X: 1261 case PPC::DIR_PWR6: 1262 case PPC::DIR_PWR6X: 1263 case PPC::DIR_PWR7: 1264 case PPC::DIR_PWR8: 1265 case PPC::DIR_PWR9: 1266 case PPC::DIR_PWR_FUTURE: 1267 setPrefLoopAlignment(Align(16)); 1268 setPrefFunctionAlignment(Align(16)); 1269 break; 1270 } 1271 1272 if (Subtarget.enableMachineScheduler()) 1273 setSchedulingPreference(Sched::Source); 1274 else 1275 setSchedulingPreference(Sched::Hybrid); 1276 1277 computeRegisterProperties(STI.getRegisterInfo()); 1278 1279 // The Freescale cores do better with aggressive inlining of memcpy and 1280 // friends. GCC uses same threshold of 128 bytes (= 32 word stores). 1281 if (Subtarget.getCPUDirective() == PPC::DIR_E500mc || 1282 Subtarget.getCPUDirective() == PPC::DIR_E5500) { 1283 MaxStoresPerMemset = 32; 1284 MaxStoresPerMemsetOptSize = 16; 1285 MaxStoresPerMemcpy = 32; 1286 MaxStoresPerMemcpyOptSize = 8; 1287 MaxStoresPerMemmove = 32; 1288 MaxStoresPerMemmoveOptSize = 8; 1289 } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) { 1290 // The A2 also benefits from (very) aggressive inlining of memcpy and 1291 // friends. The overhead of a the function call, even when warm, can be 1292 // over one hundred cycles. 1293 MaxStoresPerMemset = 128; 1294 MaxStoresPerMemcpy = 128; 1295 MaxStoresPerMemmove = 128; 1296 MaxLoadsPerMemcmp = 128; 1297 } else { 1298 MaxLoadsPerMemcmp = 8; 1299 MaxLoadsPerMemcmpOptSize = 4; 1300 } 1301 } 1302 1303 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine 1304 /// the desired ByVal argument alignment. 1305 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, 1306 unsigned MaxMaxAlign) { 1307 if (MaxAlign == MaxMaxAlign) 1308 return; 1309 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { 1310 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256) 1311 MaxAlign = 32; 1312 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16) 1313 MaxAlign = 16; 1314 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { 1315 unsigned EltAlign = 0; 1316 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign); 1317 if (EltAlign > MaxAlign) 1318 MaxAlign = EltAlign; 1319 } else if (StructType *STy = dyn_cast<StructType>(Ty)) { 1320 for (auto *EltTy : STy->elements()) { 1321 unsigned EltAlign = 0; 1322 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign); 1323 if (EltAlign > MaxAlign) 1324 MaxAlign = EltAlign; 1325 if (MaxAlign == MaxMaxAlign) 1326 break; 1327 } 1328 } 1329 } 1330 1331 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate 1332 /// function arguments in the caller parameter area. 1333 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty, 1334 const DataLayout &DL) const { 1335 // Darwin passes everything on 4 byte boundary. 1336 if (Subtarget.isDarwin()) 1337 return 4; 1338 1339 // 16byte and wider vectors are passed on 16byte boundary. 1340 // The rest is 8 on PPC64 and 4 on PPC32 boundary. 1341 unsigned Align = Subtarget.isPPC64() ? 8 : 4; 1342 if (Subtarget.hasAltivec() || Subtarget.hasQPX()) 1343 getMaxByValAlign(Ty, Align, Subtarget.hasQPX() ? 32 : 16); 1344 return Align; 1345 } 1346 1347 bool PPCTargetLowering::useSoftFloat() const { 1348 return Subtarget.useSoftFloat(); 1349 } 1350 1351 bool PPCTargetLowering::hasSPE() const { 1352 return Subtarget.hasSPE(); 1353 } 1354 1355 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { 1356 return VT.isScalarInteger(); 1357 } 1358 1359 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { 1360 switch ((PPCISD::NodeType)Opcode) { 1361 case PPCISD::FIRST_NUMBER: break; 1362 case PPCISD::FSEL: return "PPCISD::FSEL"; 1363 case PPCISD::XSMAXCDP: return "PPCISD::XSMAXCDP"; 1364 case PPCISD::XSMINCDP: return "PPCISD::XSMINCDP"; 1365 case PPCISD::FCFID: return "PPCISD::FCFID"; 1366 case PPCISD::FCFIDU: return "PPCISD::FCFIDU"; 1367 case PPCISD::FCFIDS: return "PPCISD::FCFIDS"; 1368 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS"; 1369 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ"; 1370 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ"; 1371 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ"; 1372 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ"; 1373 case PPCISD::FP_TO_UINT_IN_VSR: 1374 return "PPCISD::FP_TO_UINT_IN_VSR,"; 1375 case PPCISD::FP_TO_SINT_IN_VSR: 1376 return "PPCISD::FP_TO_SINT_IN_VSR"; 1377 case PPCISD::FRE: return "PPCISD::FRE"; 1378 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE"; 1379 case PPCISD::STFIWX: return "PPCISD::STFIWX"; 1380 case PPCISD::VMADDFP: return "PPCISD::VMADDFP"; 1381 case PPCISD::VNMSUBFP: return "PPCISD::VNMSUBFP"; 1382 case PPCISD::VPERM: return "PPCISD::VPERM"; 1383 case PPCISD::XXSPLT: return "PPCISD::XXSPLT"; 1384 case PPCISD::VECINSERT: return "PPCISD::VECINSERT"; 1385 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI"; 1386 case PPCISD::VECSHL: return "PPCISD::VECSHL"; 1387 case PPCISD::CMPB: return "PPCISD::CMPB"; 1388 case PPCISD::Hi: return "PPCISD::Hi"; 1389 case PPCISD::Lo: return "PPCISD::Lo"; 1390 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY"; 1391 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8"; 1392 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16"; 1393 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC"; 1394 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET"; 1395 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg"; 1396 case PPCISD::SRL: return "PPCISD::SRL"; 1397 case PPCISD::SRA: return "PPCISD::SRA"; 1398 case PPCISD::SHL: return "PPCISD::SHL"; 1399 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE"; 1400 case PPCISD::CALL: return "PPCISD::CALL"; 1401 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP"; 1402 case PPCISD::MTCTR: return "PPCISD::MTCTR"; 1403 case PPCISD::BCTRL: return "PPCISD::BCTRL"; 1404 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC"; 1405 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG"; 1406 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE"; 1407 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP"; 1408 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP"; 1409 case PPCISD::MFOCRF: return "PPCISD::MFOCRF"; 1410 case PPCISD::MFVSR: return "PPCISD::MFVSR"; 1411 case PPCISD::MTVSRA: return "PPCISD::MTVSRA"; 1412 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ"; 1413 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP"; 1414 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP"; 1415 case PPCISD::ANDI_rec_1_EQ_BIT: 1416 return "PPCISD::ANDI_rec_1_EQ_BIT"; 1417 case PPCISD::ANDI_rec_1_GT_BIT: 1418 return "PPCISD::ANDI_rec_1_GT_BIT"; 1419 case PPCISD::VCMP: return "PPCISD::VCMP"; 1420 case PPCISD::VCMPo: return "PPCISD::VCMPo"; 1421 case PPCISD::LBRX: return "PPCISD::LBRX"; 1422 case PPCISD::STBRX: return "PPCISD::STBRX"; 1423 case PPCISD::LFIWAX: return "PPCISD::LFIWAX"; 1424 case PPCISD::LFIWZX: return "PPCISD::LFIWZX"; 1425 case PPCISD::LXSIZX: return "PPCISD::LXSIZX"; 1426 case PPCISD::STXSIX: return "PPCISD::STXSIX"; 1427 case PPCISD::VEXTS: return "PPCISD::VEXTS"; 1428 case PPCISD::SExtVElems: return "PPCISD::SExtVElems"; 1429 case PPCISD::LXVD2X: return "PPCISD::LXVD2X"; 1430 case PPCISD::STXVD2X: return "PPCISD::STXVD2X"; 1431 case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE"; 1432 case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE"; 1433 case PPCISD::ST_VSR_SCAL_INT: 1434 return "PPCISD::ST_VSR_SCAL_INT"; 1435 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH"; 1436 case PPCISD::BDNZ: return "PPCISD::BDNZ"; 1437 case PPCISD::BDZ: return "PPCISD::BDZ"; 1438 case PPCISD::MFFS: return "PPCISD::MFFS"; 1439 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; 1440 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; 1441 case PPCISD::CR6SET: return "PPCISD::CR6SET"; 1442 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; 1443 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT"; 1444 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT"; 1445 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA"; 1446 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L"; 1447 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS"; 1448 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA"; 1449 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L"; 1450 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR"; 1451 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR"; 1452 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA"; 1453 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L"; 1454 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR"; 1455 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR"; 1456 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA"; 1457 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L"; 1458 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT"; 1459 case PPCISD::SC: return "PPCISD::SC"; 1460 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB"; 1461 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE"; 1462 case PPCISD::RFEBB: return "PPCISD::RFEBB"; 1463 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD"; 1464 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN"; 1465 case PPCISD::VABSD: return "PPCISD::VABSD"; 1466 case PPCISD::QVFPERM: return "PPCISD::QVFPERM"; 1467 case PPCISD::QVGPCI: return "PPCISD::QVGPCI"; 1468 case PPCISD::QVALIGNI: return "PPCISD::QVALIGNI"; 1469 case PPCISD::QVESPLATI: return "PPCISD::QVESPLATI"; 1470 case PPCISD::QBFLT: return "PPCISD::QBFLT"; 1471 case PPCISD::QVLFSb: return "PPCISD::QVLFSb"; 1472 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128"; 1473 case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64"; 1474 case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE"; 1475 case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI"; 1476 case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH"; 1477 case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF"; 1478 case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT"; 1479 } 1480 return nullptr; 1481 } 1482 1483 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C, 1484 EVT VT) const { 1485 if (!VT.isVector()) 1486 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32; 1487 1488 if (Subtarget.hasQPX()) 1489 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 1490 1491 return VT.changeVectorElementTypeToInteger(); 1492 } 1493 1494 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const { 1495 assert(VT.isFloatingPoint() && "Non-floating-point FMA?"); 1496 return true; 1497 } 1498 1499 //===----------------------------------------------------------------------===// 1500 // Node matching predicates, for use by the tblgen matching code. 1501 //===----------------------------------------------------------------------===// 1502 1503 /// isFloatingPointZero - Return true if this is 0.0 or -0.0. 1504 static bool isFloatingPointZero(SDValue Op) { 1505 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) 1506 return CFP->getValueAPF().isZero(); 1507 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) { 1508 // Maybe this has already been legalized into the constant pool? 1509 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1))) 1510 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal())) 1511 return CFP->getValueAPF().isZero(); 1512 } 1513 return false; 1514 } 1515 1516 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return 1517 /// true if Op is undef or if it matches the specified value. 1518 static bool isConstantOrUndef(int Op, int Val) { 1519 return Op < 0 || Op == Val; 1520 } 1521 1522 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a 1523 /// VPKUHUM instruction. 1524 /// The ShuffleKind distinguishes between big-endian operations with 1525 /// two different inputs (0), either-endian operations with two identical 1526 /// inputs (1), and little-endian operations with two different inputs (2). 1527 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1528 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1529 SelectionDAG &DAG) { 1530 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1531 if (ShuffleKind == 0) { 1532 if (IsLE) 1533 return false; 1534 for (unsigned i = 0; i != 16; ++i) 1535 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1)) 1536 return false; 1537 } else if (ShuffleKind == 2) { 1538 if (!IsLE) 1539 return false; 1540 for (unsigned i = 0; i != 16; ++i) 1541 if (!isConstantOrUndef(N->getMaskElt(i), i*2)) 1542 return false; 1543 } else if (ShuffleKind == 1) { 1544 unsigned j = IsLE ? 0 : 1; 1545 for (unsigned i = 0; i != 8; ++i) 1546 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) || 1547 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j)) 1548 return false; 1549 } 1550 return true; 1551 } 1552 1553 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a 1554 /// VPKUWUM instruction. 1555 /// The ShuffleKind distinguishes between big-endian operations with 1556 /// two different inputs (0), either-endian operations with two identical 1557 /// inputs (1), and little-endian operations with two different inputs (2). 1558 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1559 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1560 SelectionDAG &DAG) { 1561 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1562 if (ShuffleKind == 0) { 1563 if (IsLE) 1564 return false; 1565 for (unsigned i = 0; i != 16; i += 2) 1566 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) || 1567 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3)) 1568 return false; 1569 } else if (ShuffleKind == 2) { 1570 if (!IsLE) 1571 return false; 1572 for (unsigned i = 0; i != 16; i += 2) 1573 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1574 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1)) 1575 return false; 1576 } else if (ShuffleKind == 1) { 1577 unsigned j = IsLE ? 0 : 2; 1578 for (unsigned i = 0; i != 8; i += 2) 1579 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1580 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1581 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1582 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1)) 1583 return false; 1584 } 1585 return true; 1586 } 1587 1588 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a 1589 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the 1590 /// current subtarget. 1591 /// 1592 /// The ShuffleKind distinguishes between big-endian operations with 1593 /// two different inputs (0), either-endian operations with two identical 1594 /// inputs (1), and little-endian operations with two different inputs (2). 1595 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td). 1596 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, 1597 SelectionDAG &DAG) { 1598 const PPCSubtarget& Subtarget = 1599 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 1600 if (!Subtarget.hasP8Vector()) 1601 return false; 1602 1603 bool IsLE = DAG.getDataLayout().isLittleEndian(); 1604 if (ShuffleKind == 0) { 1605 if (IsLE) 1606 return false; 1607 for (unsigned i = 0; i != 16; i += 4) 1608 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) || 1609 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) || 1610 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) || 1611 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7)) 1612 return false; 1613 } else if (ShuffleKind == 2) { 1614 if (!IsLE) 1615 return false; 1616 for (unsigned i = 0; i != 16; i += 4) 1617 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) || 1618 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) || 1619 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) || 1620 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3)) 1621 return false; 1622 } else if (ShuffleKind == 1) { 1623 unsigned j = IsLE ? 0 : 4; 1624 for (unsigned i = 0; i != 8; i += 4) 1625 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) || 1626 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) || 1627 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) || 1628 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) || 1629 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) || 1630 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) || 1631 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) || 1632 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3)) 1633 return false; 1634 } 1635 return true; 1636 } 1637 1638 /// isVMerge - Common function, used to match vmrg* shuffles. 1639 /// 1640 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, 1641 unsigned LHSStart, unsigned RHSStart) { 1642 if (N->getValueType(0) != MVT::v16i8) 1643 return false; 1644 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && 1645 "Unsupported merge size!"); 1646 1647 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units 1648 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit 1649 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j), 1650 LHSStart+j+i*UnitSize) || 1651 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j), 1652 RHSStart+j+i*UnitSize)) 1653 return false; 1654 } 1655 return true; 1656 } 1657 1658 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for 1659 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes). 1660 /// The ShuffleKind distinguishes between big-endian merges with two 1661 /// different inputs (0), either-endian merges with two identical inputs (1), 1662 /// and little-endian merges with two different inputs (2). For the latter, 1663 /// the input operands are swapped (see PPCInstrAltivec.td). 1664 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1665 unsigned ShuffleKind, SelectionDAG &DAG) { 1666 if (DAG.getDataLayout().isLittleEndian()) { 1667 if (ShuffleKind == 1) // unary 1668 return isVMerge(N, UnitSize, 0, 0); 1669 else if (ShuffleKind == 2) // swapped 1670 return isVMerge(N, UnitSize, 0, 16); 1671 else 1672 return false; 1673 } else { 1674 if (ShuffleKind == 1) // unary 1675 return isVMerge(N, UnitSize, 8, 8); 1676 else if (ShuffleKind == 0) // normal 1677 return isVMerge(N, UnitSize, 8, 24); 1678 else 1679 return false; 1680 } 1681 } 1682 1683 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for 1684 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes). 1685 /// The ShuffleKind distinguishes between big-endian merges with two 1686 /// different inputs (0), either-endian merges with two identical inputs (1), 1687 /// and little-endian merges with two different inputs (2). For the latter, 1688 /// the input operands are swapped (see PPCInstrAltivec.td). 1689 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, 1690 unsigned ShuffleKind, SelectionDAG &DAG) { 1691 if (DAG.getDataLayout().isLittleEndian()) { 1692 if (ShuffleKind == 1) // unary 1693 return isVMerge(N, UnitSize, 8, 8); 1694 else if (ShuffleKind == 2) // swapped 1695 return isVMerge(N, UnitSize, 8, 24); 1696 else 1697 return false; 1698 } else { 1699 if (ShuffleKind == 1) // unary 1700 return isVMerge(N, UnitSize, 0, 0); 1701 else if (ShuffleKind == 0) // normal 1702 return isVMerge(N, UnitSize, 0, 16); 1703 else 1704 return false; 1705 } 1706 } 1707 1708 /** 1709 * Common function used to match vmrgew and vmrgow shuffles 1710 * 1711 * The indexOffset determines whether to look for even or odd words in 1712 * the shuffle mask. This is based on the of the endianness of the target 1713 * machine. 1714 * - Little Endian: 1715 * - Use offset of 0 to check for odd elements 1716 * - Use offset of 4 to check for even elements 1717 * - Big Endian: 1718 * - Use offset of 0 to check for even elements 1719 * - Use offset of 4 to check for odd elements 1720 * A detailed description of the vector element ordering for little endian and 1721 * big endian can be found at 1722 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html 1723 * Targeting your applications - what little endian and big endian IBM XL C/C++ 1724 * compiler differences mean to you 1725 * 1726 * The mask to the shuffle vector instruction specifies the indices of the 1727 * elements from the two input vectors to place in the result. The elements are 1728 * numbered in array-access order, starting with the first vector. These vectors 1729 * are always of type v16i8, thus each vector will contain 16 elements of size 1730 * 8. More info on the shuffle vector can be found in the 1731 * http://llvm.org/docs/LangRef.html#shufflevector-instruction 1732 * Language Reference. 1733 * 1734 * The RHSStartValue indicates whether the same input vectors are used (unary) 1735 * or two different input vectors are used, based on the following: 1736 * - If the instruction uses the same vector for both inputs, the range of the 1737 * indices will be 0 to 15. In this case, the RHSStart value passed should 1738 * be 0. 1739 * - If the instruction has two different vectors then the range of the 1740 * indices will be 0 to 31. In this case, the RHSStart value passed should 1741 * be 16 (indices 0-15 specify elements in the first vector while indices 16 1742 * to 31 specify elements in the second vector). 1743 * 1744 * \param[in] N The shuffle vector SD Node to analyze 1745 * \param[in] IndexOffset Specifies whether to look for even or odd elements 1746 * \param[in] RHSStartValue Specifies the starting index for the righthand input 1747 * vector to the shuffle_vector instruction 1748 * \return true iff this shuffle vector represents an even or odd word merge 1749 */ 1750 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset, 1751 unsigned RHSStartValue) { 1752 if (N->getValueType(0) != MVT::v16i8) 1753 return false; 1754 1755 for (unsigned i = 0; i < 2; ++i) 1756 for (unsigned j = 0; j < 4; ++j) 1757 if (!isConstantOrUndef(N->getMaskElt(i*4+j), 1758 i*RHSStartValue+j+IndexOffset) || 1759 !isConstantOrUndef(N->getMaskElt(i*4+j+8), 1760 i*RHSStartValue+j+IndexOffset+8)) 1761 return false; 1762 return true; 1763 } 1764 1765 /** 1766 * Determine if the specified shuffle mask is suitable for the vmrgew or 1767 * vmrgow instructions. 1768 * 1769 * \param[in] N The shuffle vector SD Node to analyze 1770 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false) 1771 * \param[in] ShuffleKind Identify the type of merge: 1772 * - 0 = big-endian merge with two different inputs; 1773 * - 1 = either-endian merge with two identical inputs; 1774 * - 2 = little-endian merge with two different inputs (inputs are swapped for 1775 * little-endian merges). 1776 * \param[in] DAG The current SelectionDAG 1777 * \return true iff this shuffle mask 1778 */ 1779 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, 1780 unsigned ShuffleKind, SelectionDAG &DAG) { 1781 if (DAG.getDataLayout().isLittleEndian()) { 1782 unsigned indexOffset = CheckEven ? 4 : 0; 1783 if (ShuffleKind == 1) // Unary 1784 return isVMerge(N, indexOffset, 0); 1785 else if (ShuffleKind == 2) // swapped 1786 return isVMerge(N, indexOffset, 16); 1787 else 1788 return false; 1789 } 1790 else { 1791 unsigned indexOffset = CheckEven ? 0 : 4; 1792 if (ShuffleKind == 1) // Unary 1793 return isVMerge(N, indexOffset, 0); 1794 else if (ShuffleKind == 0) // Normal 1795 return isVMerge(N, indexOffset, 16); 1796 else 1797 return false; 1798 } 1799 return false; 1800 } 1801 1802 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift 1803 /// amount, otherwise return -1. 1804 /// The ShuffleKind distinguishes between big-endian operations with two 1805 /// different inputs (0), either-endian operations with two identical inputs 1806 /// (1), and little-endian operations with two different inputs (2). For the 1807 /// latter, the input operands are swapped (see PPCInstrAltivec.td). 1808 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, 1809 SelectionDAG &DAG) { 1810 if (N->getValueType(0) != MVT::v16i8) 1811 return -1; 1812 1813 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 1814 1815 // Find the first non-undef value in the shuffle mask. 1816 unsigned i; 1817 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) 1818 /*search*/; 1819 1820 if (i == 16) return -1; // all undef. 1821 1822 // Otherwise, check to see if the rest of the elements are consecutively 1823 // numbered from this value. 1824 unsigned ShiftAmt = SVOp->getMaskElt(i); 1825 if (ShiftAmt < i) return -1; 1826 1827 ShiftAmt -= i; 1828 bool isLE = DAG.getDataLayout().isLittleEndian(); 1829 1830 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) { 1831 // Check the rest of the elements to see if they are consecutive. 1832 for (++i; i != 16; ++i) 1833 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 1834 return -1; 1835 } else if (ShuffleKind == 1) { 1836 // Check the rest of the elements to see if they are consecutive. 1837 for (++i; i != 16; ++i) 1838 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) 1839 return -1; 1840 } else 1841 return -1; 1842 1843 if (isLE) 1844 ShiftAmt = 16 - ShiftAmt; 1845 1846 return ShiftAmt; 1847 } 1848 1849 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand 1850 /// specifies a splat of a single element that is suitable for input to 1851 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.). 1852 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) { 1853 assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) && 1854 EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes"); 1855 1856 // The consecutive indices need to specify an element, not part of two 1857 // different elements. So abandon ship early if this isn't the case. 1858 if (N->getMaskElt(0) % EltSize != 0) 1859 return false; 1860 1861 // This is a splat operation if each element of the permute is the same, and 1862 // if the value doesn't reference the second vector. 1863 unsigned ElementBase = N->getMaskElt(0); 1864 1865 // FIXME: Handle UNDEF elements too! 1866 if (ElementBase >= 16) 1867 return false; 1868 1869 // Check that the indices are consecutive, in the case of a multi-byte element 1870 // splatted with a v16i8 mask. 1871 for (unsigned i = 1; i != EltSize; ++i) 1872 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase)) 1873 return false; 1874 1875 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) { 1876 if (N->getMaskElt(i) < 0) continue; 1877 for (unsigned j = 0; j != EltSize; ++j) 1878 if (N->getMaskElt(i+j) != N->getMaskElt(j)) 1879 return false; 1880 } 1881 return true; 1882 } 1883 1884 /// Check that the mask is shuffling N byte elements. Within each N byte 1885 /// element of the mask, the indices could be either in increasing or 1886 /// decreasing order as long as they are consecutive. 1887 /// \param[in] N the shuffle vector SD Node to analyze 1888 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/ 1889 /// Word/DoubleWord/QuadWord). 1890 /// \param[in] StepLen the delta indices number among the N byte element, if 1891 /// the mask is in increasing/decreasing order then it is 1/-1. 1892 /// \return true iff the mask is shuffling N byte elements. 1893 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width, 1894 int StepLen) { 1895 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) && 1896 "Unexpected element width."); 1897 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width."); 1898 1899 unsigned NumOfElem = 16 / Width; 1900 unsigned MaskVal[16]; // Width is never greater than 16 1901 for (unsigned i = 0; i < NumOfElem; ++i) { 1902 MaskVal[0] = N->getMaskElt(i * Width); 1903 if ((StepLen == 1) && (MaskVal[0] % Width)) { 1904 return false; 1905 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) { 1906 return false; 1907 } 1908 1909 for (unsigned int j = 1; j < Width; ++j) { 1910 MaskVal[j] = N->getMaskElt(i * Width + j); 1911 if (MaskVal[j] != MaskVal[j-1] + StepLen) { 1912 return false; 1913 } 1914 } 1915 } 1916 1917 return true; 1918 } 1919 1920 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1921 unsigned &InsertAtByte, bool &Swap, bool IsLE) { 1922 if (!isNByteElemShuffleMask(N, 4, 1)) 1923 return false; 1924 1925 // Now we look at mask elements 0,4,8,12 1926 unsigned M0 = N->getMaskElt(0) / 4; 1927 unsigned M1 = N->getMaskElt(4) / 4; 1928 unsigned M2 = N->getMaskElt(8) / 4; 1929 unsigned M3 = N->getMaskElt(12) / 4; 1930 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 }; 1931 unsigned BigEndianShifts[] = { 3, 0, 1, 2 }; 1932 1933 // Below, let H and L be arbitrary elements of the shuffle mask 1934 // where H is in the range [4,7] and L is in the range [0,3]. 1935 // H, 1, 2, 3 or L, 5, 6, 7 1936 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) || 1937 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) { 1938 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3]; 1939 InsertAtByte = IsLE ? 12 : 0; 1940 Swap = M0 < 4; 1941 return true; 1942 } 1943 // 0, H, 2, 3 or 4, L, 6, 7 1944 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) || 1945 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) { 1946 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3]; 1947 InsertAtByte = IsLE ? 8 : 4; 1948 Swap = M1 < 4; 1949 return true; 1950 } 1951 // 0, 1, H, 3 or 4, 5, L, 7 1952 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) || 1953 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) { 1954 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3]; 1955 InsertAtByte = IsLE ? 4 : 8; 1956 Swap = M2 < 4; 1957 return true; 1958 } 1959 // 0, 1, 2, H or 4, 5, 6, L 1960 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) || 1961 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) { 1962 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3]; 1963 InsertAtByte = IsLE ? 0 : 12; 1964 Swap = M3 < 4; 1965 return true; 1966 } 1967 1968 // If both vector operands for the shuffle are the same vector, the mask will 1969 // contain only elements from the first one and the second one will be undef. 1970 if (N->getOperand(1).isUndef()) { 1971 ShiftElts = 0; 1972 Swap = true; 1973 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1; 1974 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) { 1975 InsertAtByte = IsLE ? 12 : 0; 1976 return true; 1977 } 1978 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) { 1979 InsertAtByte = IsLE ? 8 : 4; 1980 return true; 1981 } 1982 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) { 1983 InsertAtByte = IsLE ? 4 : 8; 1984 return true; 1985 } 1986 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) { 1987 InsertAtByte = IsLE ? 0 : 12; 1988 return true; 1989 } 1990 } 1991 1992 return false; 1993 } 1994 1995 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, 1996 bool &Swap, bool IsLE) { 1997 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 1998 // Ensure each byte index of the word is consecutive. 1999 if (!isNByteElemShuffleMask(N, 4, 1)) 2000 return false; 2001 2002 // Now we look at mask elements 0,4,8,12, which are the beginning of words. 2003 unsigned M0 = N->getMaskElt(0) / 4; 2004 unsigned M1 = N->getMaskElt(4) / 4; 2005 unsigned M2 = N->getMaskElt(8) / 4; 2006 unsigned M3 = N->getMaskElt(12) / 4; 2007 2008 // If both vector operands for the shuffle are the same vector, the mask will 2009 // contain only elements from the first one and the second one will be undef. 2010 if (N->getOperand(1).isUndef()) { 2011 assert(M0 < 4 && "Indexing into an undef vector?"); 2012 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4) 2013 return false; 2014 2015 ShiftElts = IsLE ? (4 - M0) % 4 : M0; 2016 Swap = false; 2017 return true; 2018 } 2019 2020 // Ensure each word index of the ShuffleVector Mask is consecutive. 2021 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8) 2022 return false; 2023 2024 if (IsLE) { 2025 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) { 2026 // Input vectors don't need to be swapped if the leading element 2027 // of the result is one of the 3 left elements of the second vector 2028 // (or if there is no shift to be done at all). 2029 Swap = false; 2030 ShiftElts = (8 - M0) % 8; 2031 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) { 2032 // Input vectors need to be swapped if the leading element 2033 // of the result is one of the 3 left elements of the first vector 2034 // (or if we're shifting by 4 - thereby simply swapping the vectors). 2035 Swap = true; 2036 ShiftElts = (4 - M0) % 4; 2037 } 2038 2039 return true; 2040 } else { // BE 2041 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) { 2042 // Input vectors don't need to be swapped if the leading element 2043 // of the result is one of the 4 elements of the first vector. 2044 Swap = false; 2045 ShiftElts = M0; 2046 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) { 2047 // Input vectors need to be swapped if the leading element 2048 // of the result is one of the 4 elements of the right vector. 2049 Swap = true; 2050 ShiftElts = M0 - 4; 2051 } 2052 2053 return true; 2054 } 2055 } 2056 2057 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) { 2058 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2059 2060 if (!isNByteElemShuffleMask(N, Width, -1)) 2061 return false; 2062 2063 for (int i = 0; i < 16; i += Width) 2064 if (N->getMaskElt(i) != i + Width - 1) 2065 return false; 2066 2067 return true; 2068 } 2069 2070 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) { 2071 return isXXBRShuffleMaskHelper(N, 2); 2072 } 2073 2074 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) { 2075 return isXXBRShuffleMaskHelper(N, 4); 2076 } 2077 2078 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) { 2079 return isXXBRShuffleMaskHelper(N, 8); 2080 } 2081 2082 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) { 2083 return isXXBRShuffleMaskHelper(N, 16); 2084 } 2085 2086 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap 2087 /// if the inputs to the instruction should be swapped and set \p DM to the 2088 /// value for the immediate. 2089 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI 2090 /// AND element 0 of the result comes from the first input (LE) or second input 2091 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered. 2092 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle 2093 /// mask. 2094 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM, 2095 bool &Swap, bool IsLE) { 2096 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"); 2097 2098 // Ensure each byte index of the double word is consecutive. 2099 if (!isNByteElemShuffleMask(N, 8, 1)) 2100 return false; 2101 2102 unsigned M0 = N->getMaskElt(0) / 8; 2103 unsigned M1 = N->getMaskElt(8) / 8; 2104 assert(((M0 | M1) < 4) && "A mask element out of bounds?"); 2105 2106 // If both vector operands for the shuffle are the same vector, the mask will 2107 // contain only elements from the first one and the second one will be undef. 2108 if (N->getOperand(1).isUndef()) { 2109 if ((M0 | M1) < 2) { 2110 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1); 2111 Swap = false; 2112 return true; 2113 } else 2114 return false; 2115 } 2116 2117 if (IsLE) { 2118 if (M0 > 1 && M1 < 2) { 2119 Swap = false; 2120 } else if (M0 < 2 && M1 > 1) { 2121 M0 = (M0 + 2) % 4; 2122 M1 = (M1 + 2) % 4; 2123 Swap = true; 2124 } else 2125 return false; 2126 2127 // Note: if control flow comes here that means Swap is already set above 2128 DM = (((~M1) & 1) << 1) + ((~M0) & 1); 2129 return true; 2130 } else { // BE 2131 if (M0 < 2 && M1 > 1) { 2132 Swap = false; 2133 } else if (M0 > 1 && M1 < 2) { 2134 M0 = (M0 + 2) % 4; 2135 M1 = (M1 + 2) % 4; 2136 Swap = true; 2137 } else 2138 return false; 2139 2140 // Note: if control flow comes here that means Swap is already set above 2141 DM = (M0 << 1) + (M1 & 1); 2142 return true; 2143 } 2144 } 2145 2146 2147 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is 2148 /// appropriate for PPC mnemonics (which have a big endian bias - namely 2149 /// elements are counted from the left of the vector register). 2150 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, 2151 SelectionDAG &DAG) { 2152 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2153 assert(isSplatShuffleMask(SVOp, EltSize)); 2154 if (DAG.getDataLayout().isLittleEndian()) 2155 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize); 2156 else 2157 return SVOp->getMaskElt(0) / EltSize; 2158 } 2159 2160 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed 2161 /// by using a vspltis[bhw] instruction of the specified element size, return 2162 /// the constant being splatted. The ByteSize field indicates the number of 2163 /// bytes of each element [124] -> [bhw]. 2164 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { 2165 SDValue OpVal(nullptr, 0); 2166 2167 // If ByteSize of the splat is bigger than the element size of the 2168 // build_vector, then we have a case where we are checking for a splat where 2169 // multiple elements of the buildvector are folded together into a single 2170 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8). 2171 unsigned EltSize = 16/N->getNumOperands(); 2172 if (EltSize < ByteSize) { 2173 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval. 2174 SDValue UniquedVals[4]; 2175 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?"); 2176 2177 // See if all of the elements in the buildvector agree across. 2178 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2179 if (N->getOperand(i).isUndef()) continue; 2180 // If the element isn't a constant, bail fully out. 2181 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue(); 2182 2183 if (!UniquedVals[i&(Multiple-1)].getNode()) 2184 UniquedVals[i&(Multiple-1)] = N->getOperand(i); 2185 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i)) 2186 return SDValue(); // no match. 2187 } 2188 2189 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains 2190 // either constant or undef values that are identical for each chunk. See 2191 // if these chunks can form into a larger vspltis*. 2192 2193 // Check to see if all of the leading entries are either 0 or -1. If 2194 // neither, then this won't fit into the immediate field. 2195 bool LeadingZero = true; 2196 bool LeadingOnes = true; 2197 for (unsigned i = 0; i != Multiple-1; ++i) { 2198 if (!UniquedVals[i].getNode()) continue; // Must have been undefs. 2199 2200 LeadingZero &= isNullConstant(UniquedVals[i]); 2201 LeadingOnes &= isAllOnesConstant(UniquedVals[i]); 2202 } 2203 // Finally, check the least significant entry. 2204 if (LeadingZero) { 2205 if (!UniquedVals[Multiple-1].getNode()) 2206 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef 2207 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue(); 2208 if (Val < 16) // 0,0,0,4 -> vspltisw(4) 2209 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2210 } 2211 if (LeadingOnes) { 2212 if (!UniquedVals[Multiple-1].getNode()) 2213 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef 2214 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue(); 2215 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2) 2216 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32); 2217 } 2218 2219 return SDValue(); 2220 } 2221 2222 // Check to see if this buildvec has a single non-undef value in its elements. 2223 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 2224 if (N->getOperand(i).isUndef()) continue; 2225 if (!OpVal.getNode()) 2226 OpVal = N->getOperand(i); 2227 else if (OpVal != N->getOperand(i)) 2228 return SDValue(); 2229 } 2230 2231 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def. 2232 2233 unsigned ValSizeInBytes = EltSize; 2234 uint64_t Value = 0; 2235 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) { 2236 Value = CN->getZExtValue(); 2237 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) { 2238 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"); 2239 Value = FloatToBits(CN->getValueAPF().convertToFloat()); 2240 } 2241 2242 // If the splat value is larger than the element value, then we can never do 2243 // this splat. The only case that we could fit the replicated bits into our 2244 // immediate field for would be zero, and we prefer to use vxor for it. 2245 if (ValSizeInBytes < ByteSize) return SDValue(); 2246 2247 // If the element value is larger than the splat value, check if it consists 2248 // of a repeated bit pattern of size ByteSize. 2249 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8)) 2250 return SDValue(); 2251 2252 // Properly sign extend the value. 2253 int MaskVal = SignExtend32(Value, ByteSize * 8); 2254 2255 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. 2256 if (MaskVal == 0) return SDValue(); 2257 2258 // Finally, if this value fits in a 5 bit sext field, return it 2259 if (SignExtend32<5>(MaskVal) == MaskVal) 2260 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32); 2261 return SDValue(); 2262 } 2263 2264 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift 2265 /// amount, otherwise return -1. 2266 int PPC::isQVALIGNIShuffleMask(SDNode *N) { 2267 EVT VT = N->getValueType(0); 2268 if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1) 2269 return -1; 2270 2271 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); 2272 2273 // Find the first non-undef value in the shuffle mask. 2274 unsigned i; 2275 for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i) 2276 /*search*/; 2277 2278 if (i == 4) return -1; // all undef. 2279 2280 // Otherwise, check to see if the rest of the elements are consecutively 2281 // numbered from this value. 2282 unsigned ShiftAmt = SVOp->getMaskElt(i); 2283 if (ShiftAmt < i) return -1; 2284 ShiftAmt -= i; 2285 2286 // Check the rest of the elements to see if they are consecutive. 2287 for (++i; i != 4; ++i) 2288 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) 2289 return -1; 2290 2291 return ShiftAmt; 2292 } 2293 2294 //===----------------------------------------------------------------------===// 2295 // Addressing Mode Selection 2296 //===----------------------------------------------------------------------===// 2297 2298 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit 2299 /// or 64-bit immediate, and if the value can be accurately represented as a 2300 /// sign extension from a 16-bit value. If so, this returns true and the 2301 /// immediate. 2302 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) { 2303 if (!isa<ConstantSDNode>(N)) 2304 return false; 2305 2306 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue(); 2307 if (N->getValueType(0) == MVT::i32) 2308 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue(); 2309 else 2310 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue(); 2311 } 2312 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) { 2313 return isIntS16Immediate(Op.getNode(), Imm); 2314 } 2315 2316 2317 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can 2318 /// be represented as an indexed [r+r] operation. 2319 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base, 2320 SDValue &Index, 2321 SelectionDAG &DAG) const { 2322 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); 2323 UI != E; ++UI) { 2324 if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) { 2325 if (Memop->getMemoryVT() == MVT::f64) { 2326 Base = N.getOperand(0); 2327 Index = N.getOperand(1); 2328 return true; 2329 } 2330 } 2331 } 2332 return false; 2333 } 2334 2335 /// SelectAddressRegReg - Given the specified addressed, check to see if it 2336 /// can be represented as an indexed [r+r] operation. Returns false if it 2337 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is 2338 /// non-zero and N can be represented by a base register plus a signed 16-bit 2339 /// displacement, make a more precise judgement by checking (displacement % \p 2340 /// EncodingAlignment). 2341 bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base, 2342 SDValue &Index, SelectionDAG &DAG, 2343 unsigned EncodingAlignment) const { 2344 int16_t imm = 0; 2345 if (N.getOpcode() == ISD::ADD) { 2346 // Is there any SPE load/store (f64), which can't handle 16bit offset? 2347 // SPE load/store can only handle 8-bit offsets. 2348 if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG)) 2349 return true; 2350 if (isIntS16Immediate(N.getOperand(1), imm) && 2351 (!EncodingAlignment || !(imm % EncodingAlignment))) 2352 return false; // r+i 2353 if (N.getOperand(1).getOpcode() == PPCISD::Lo) 2354 return false; // r+i 2355 2356 Base = N.getOperand(0); 2357 Index = N.getOperand(1); 2358 return true; 2359 } else if (N.getOpcode() == ISD::OR) { 2360 if (isIntS16Immediate(N.getOperand(1), imm) && 2361 (!EncodingAlignment || !(imm % EncodingAlignment))) 2362 return false; // r+i can fold it if we can. 2363 2364 // If this is an or of disjoint bitfields, we can codegen this as an add 2365 // (for better address arithmetic) if the LHS and RHS of the OR are provably 2366 // disjoint. 2367 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2368 2369 if (LHSKnown.Zero.getBoolValue()) { 2370 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1)); 2371 // If all of the bits are known zero on the LHS or RHS, the add won't 2372 // carry. 2373 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) { 2374 Base = N.getOperand(0); 2375 Index = N.getOperand(1); 2376 return true; 2377 } 2378 } 2379 } 2380 2381 return false; 2382 } 2383 2384 // If we happen to be doing an i64 load or store into a stack slot that has 2385 // less than a 4-byte alignment, then the frame-index elimination may need to 2386 // use an indexed load or store instruction (because the offset may not be a 2387 // multiple of 4). The extra register needed to hold the offset comes from the 2388 // register scavenger, and it is possible that the scavenger will need to use 2389 // an emergency spill slot. As a result, we need to make sure that a spill slot 2390 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned 2391 // stack slot. 2392 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) { 2393 // FIXME: This does not handle the LWA case. 2394 if (VT != MVT::i64) 2395 return; 2396 2397 // NOTE: We'll exclude negative FIs here, which come from argument 2398 // lowering, because there are no known test cases triggering this problem 2399 // using packed structures (or similar). We can remove this exclusion if 2400 // we find such a test case. The reason why this is so test-case driven is 2401 // because this entire 'fixup' is only to prevent crashes (from the 2402 // register scavenger) on not-really-valid inputs. For example, if we have: 2403 // %a = alloca i1 2404 // %b = bitcast i1* %a to i64* 2405 // store i64* a, i64 b 2406 // then the store should really be marked as 'align 1', but is not. If it 2407 // were marked as 'align 1' then the indexed form would have been 2408 // instruction-selected initially, and the problem this 'fixup' is preventing 2409 // won't happen regardless. 2410 if (FrameIdx < 0) 2411 return; 2412 2413 MachineFunction &MF = DAG.getMachineFunction(); 2414 MachineFrameInfo &MFI = MF.getFrameInfo(); 2415 2416 unsigned Align = MFI.getObjectAlignment(FrameIdx); 2417 if (Align >= 4) 2418 return; 2419 2420 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2421 FuncInfo->setHasNonRISpills(); 2422 } 2423 2424 /// Returns true if the address N can be represented by a base register plus 2425 /// a signed 16-bit displacement [r+imm], and if it is not better 2426 /// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept 2427 /// displacements that are multiples of that value. 2428 bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp, 2429 SDValue &Base, 2430 SelectionDAG &DAG, 2431 unsigned EncodingAlignment) const { 2432 // FIXME dl should come from parent load or store, not from address 2433 SDLoc dl(N); 2434 // If this can be more profitably realized as r+r, fail. 2435 if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment)) 2436 return false; 2437 2438 if (N.getOpcode() == ISD::ADD) { 2439 int16_t imm = 0; 2440 if (isIntS16Immediate(N.getOperand(1), imm) && 2441 (!EncodingAlignment || (imm % EncodingAlignment) == 0)) { 2442 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2443 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2444 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2445 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2446 } else { 2447 Base = N.getOperand(0); 2448 } 2449 return true; // [r+i] 2450 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) { 2451 // Match LOAD (ADD (X, Lo(G))). 2452 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() 2453 && "Cannot handle constant offsets yet!"); 2454 Disp = N.getOperand(1).getOperand(0); // The global address. 2455 assert(Disp.getOpcode() == ISD::TargetGlobalAddress || 2456 Disp.getOpcode() == ISD::TargetGlobalTLSAddress || 2457 Disp.getOpcode() == ISD::TargetConstantPool || 2458 Disp.getOpcode() == ISD::TargetJumpTable); 2459 Base = N.getOperand(0); 2460 return true; // [&g+r] 2461 } 2462 } else if (N.getOpcode() == ISD::OR) { 2463 int16_t imm = 0; 2464 if (isIntS16Immediate(N.getOperand(1), imm) && 2465 (!EncodingAlignment || (imm % EncodingAlignment) == 0)) { 2466 // If this is an or of disjoint bitfields, we can codegen this as an add 2467 // (for better address arithmetic) if the LHS and RHS of the OR are 2468 // provably disjoint. 2469 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0)); 2470 2471 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) { 2472 // If all of the bits are known zero on the LHS or RHS, the add won't 2473 // carry. 2474 if (FrameIndexSDNode *FI = 2475 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) { 2476 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2477 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2478 } else { 2479 Base = N.getOperand(0); 2480 } 2481 Disp = DAG.getTargetConstant(imm, dl, N.getValueType()); 2482 return true; 2483 } 2484 } 2485 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) { 2486 // Loading from a constant address. 2487 2488 // If this address fits entirely in a 16-bit sext immediate field, codegen 2489 // this as "d, 0" 2490 int16_t Imm; 2491 if (isIntS16Immediate(CN, Imm) && 2492 (!EncodingAlignment || (Imm % EncodingAlignment) == 0)) { 2493 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0)); 2494 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2495 CN->getValueType(0)); 2496 return true; 2497 } 2498 2499 // Handle 32-bit sext immediates with LIS + addr mode. 2500 if ((CN->getValueType(0) == MVT::i32 || 2501 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) && 2502 (!EncodingAlignment || (CN->getZExtValue() % EncodingAlignment) == 0)) { 2503 int Addr = (int)CN->getZExtValue(); 2504 2505 // Otherwise, break this down into an LIS + disp. 2506 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32); 2507 2508 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl, 2509 MVT::i32); 2510 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8; 2511 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0); 2512 return true; 2513 } 2514 } 2515 2516 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout())); 2517 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) { 2518 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType()); 2519 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType()); 2520 } else 2521 Base = N; 2522 return true; // [r+0] 2523 } 2524 2525 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be 2526 /// represented as an indexed [r+r] operation. 2527 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base, 2528 SDValue &Index, 2529 SelectionDAG &DAG) const { 2530 // Check to see if we can easily represent this as an [r+r] address. This 2531 // will fail if it thinks that the address is more profitably represented as 2532 // reg+imm, e.g. where imm = 0. 2533 if (SelectAddressRegReg(N, Base, Index, DAG)) 2534 return true; 2535 2536 // If the address is the result of an add, we will utilize the fact that the 2537 // address calculation includes an implicit add. However, we can reduce 2538 // register pressure if we do not materialize a constant just for use as the 2539 // index register. We only get rid of the add if it is not an add of a 2540 // value and a 16-bit signed constant and both have a single use. 2541 int16_t imm = 0; 2542 if (N.getOpcode() == ISD::ADD && 2543 (!isIntS16Immediate(N.getOperand(1), imm) || 2544 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) { 2545 Base = N.getOperand(0); 2546 Index = N.getOperand(1); 2547 return true; 2548 } 2549 2550 // Otherwise, do it the hard way, using R0 as the base register. 2551 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO, 2552 N.getValueType()); 2553 Index = N; 2554 return true; 2555 } 2556 2557 /// Returns true if we should use a direct load into vector instruction 2558 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence. 2559 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) { 2560 2561 // If there are any other uses other than scalar to vector, then we should 2562 // keep it as a scalar load -> direct move pattern to prevent multiple 2563 // loads. 2564 LoadSDNode *LD = dyn_cast<LoadSDNode>(N); 2565 if (!LD) 2566 return false; 2567 2568 EVT MemVT = LD->getMemoryVT(); 2569 if (!MemVT.isSimple()) 2570 return false; 2571 switch(MemVT.getSimpleVT().SimpleTy) { 2572 case MVT::i64: 2573 break; 2574 case MVT::i32: 2575 if (!ST.hasP8Vector()) 2576 return false; 2577 break; 2578 case MVT::i16: 2579 case MVT::i8: 2580 if (!ST.hasP9Vector()) 2581 return false; 2582 break; 2583 default: 2584 return false; 2585 } 2586 2587 SDValue LoadedVal(N, 0); 2588 if (!LoadedVal.hasOneUse()) 2589 return false; 2590 2591 for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); 2592 UI != UE; ++UI) 2593 if (UI.getUse().get().getResNo() == 0 && 2594 UI->getOpcode() != ISD::SCALAR_TO_VECTOR) 2595 return false; 2596 2597 return true; 2598 } 2599 2600 /// getPreIndexedAddressParts - returns true by value, base pointer and 2601 /// offset pointer and addressing mode by reference if the node's address 2602 /// can be legally represented as pre-indexed load / store address. 2603 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 2604 SDValue &Offset, 2605 ISD::MemIndexedMode &AM, 2606 SelectionDAG &DAG) const { 2607 if (DisablePPCPreinc) return false; 2608 2609 bool isLoad = true; 2610 SDValue Ptr; 2611 EVT VT; 2612 unsigned Alignment; 2613 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2614 Ptr = LD->getBasePtr(); 2615 VT = LD->getMemoryVT(); 2616 Alignment = LD->getAlignment(); 2617 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 2618 Ptr = ST->getBasePtr(); 2619 VT = ST->getMemoryVT(); 2620 Alignment = ST->getAlignment(); 2621 isLoad = false; 2622 } else 2623 return false; 2624 2625 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector 2626 // instructions because we can fold these into a more efficient instruction 2627 // instead, (such as LXSD). 2628 if (isLoad && usePartialVectorLoads(N, Subtarget)) { 2629 return false; 2630 } 2631 2632 // PowerPC doesn't have preinc load/store instructions for vectors (except 2633 // for QPX, which does have preinc r+r forms). 2634 if (VT.isVector()) { 2635 if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) { 2636 return false; 2637 } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) { 2638 AM = ISD::PRE_INC; 2639 return true; 2640 } 2641 } 2642 2643 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) { 2644 // Common code will reject creating a pre-inc form if the base pointer 2645 // is a frame index, or if N is a store and the base pointer is either 2646 // the same as or a predecessor of the value being stored. Check for 2647 // those situations here, and try with swapped Base/Offset instead. 2648 bool Swap = false; 2649 2650 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base)) 2651 Swap = true; 2652 else if (!isLoad) { 2653 SDValue Val = cast<StoreSDNode>(N)->getValue(); 2654 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode())) 2655 Swap = true; 2656 } 2657 2658 if (Swap) 2659 std::swap(Base, Offset); 2660 2661 AM = ISD::PRE_INC; 2662 return true; 2663 } 2664 2665 // LDU/STU can only handle immediates that are a multiple of 4. 2666 if (VT != MVT::i64) { 2667 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0)) 2668 return false; 2669 } else { 2670 // LDU/STU need an address with at least 4-byte alignment. 2671 if (Alignment < 4) 2672 return false; 2673 2674 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4)) 2675 return false; 2676 } 2677 2678 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 2679 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of 2680 // sext i32 to i64 when addr mode is r+i. 2681 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 && 2682 LD->getExtensionType() == ISD::SEXTLOAD && 2683 isa<ConstantSDNode>(Offset)) 2684 return false; 2685 } 2686 2687 AM = ISD::PRE_INC; 2688 return true; 2689 } 2690 2691 //===----------------------------------------------------------------------===// 2692 // LowerOperation implementation 2693 //===----------------------------------------------------------------------===// 2694 2695 /// Return true if we should reference labels using a PICBase, set the HiOpFlags 2696 /// and LoOpFlags to the target MO flags. 2697 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, 2698 unsigned &HiOpFlags, unsigned &LoOpFlags, 2699 const GlobalValue *GV = nullptr) { 2700 HiOpFlags = PPCII::MO_HA; 2701 LoOpFlags = PPCII::MO_LO; 2702 2703 // Don't use the pic base if not in PIC relocation model. 2704 if (IsPIC) { 2705 HiOpFlags |= PPCII::MO_PIC_FLAG; 2706 LoOpFlags |= PPCII::MO_PIC_FLAG; 2707 } 2708 2709 // If this is a reference to a global value that requires a non-lazy-ptr, make 2710 // sure that instruction lowering adds it. 2711 if (GV && Subtarget.hasLazyResolverStub(GV)) { 2712 HiOpFlags |= PPCII::MO_NLP_FLAG; 2713 LoOpFlags |= PPCII::MO_NLP_FLAG; 2714 2715 if (GV->hasHiddenVisibility()) { 2716 HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2717 LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; 2718 } 2719 } 2720 } 2721 2722 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, 2723 SelectionDAG &DAG) { 2724 SDLoc DL(HiPart); 2725 EVT PtrVT = HiPart.getValueType(); 2726 SDValue Zero = DAG.getConstant(0, DL, PtrVT); 2727 2728 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); 2729 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); 2730 2731 // With PIC, the first instruction is actually "GR+hi(&G)". 2732 if (isPIC) 2733 Hi = DAG.getNode(ISD::ADD, DL, PtrVT, 2734 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); 2735 2736 // Generate non-pic code that has direct accesses to the constant pool. 2737 // The address of the global is just (hi(&g)+lo(&g)). 2738 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); 2739 } 2740 2741 static void setUsesTOCBasePtr(MachineFunction &MF) { 2742 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 2743 FuncInfo->setUsesTOCBasePtr(); 2744 } 2745 2746 static void setUsesTOCBasePtr(SelectionDAG &DAG) { 2747 setUsesTOCBasePtr(DAG.getMachineFunction()); 2748 } 2749 2750 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, 2751 SDValue GA) const { 2752 const bool Is64Bit = Subtarget.isPPC64(); 2753 EVT VT = Is64Bit ? MVT::i64 : MVT::i32; 2754 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT) 2755 : Subtarget.isAIXABI() 2756 ? DAG.getRegister(PPC::R2, VT) 2757 : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT); 2758 SDValue Ops[] = { GA, Reg }; 2759 return DAG.getMemIntrinsicNode( 2760 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT, 2761 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 0, 2762 MachineMemOperand::MOLoad); 2763 } 2764 2765 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, 2766 SelectionDAG &DAG) const { 2767 EVT PtrVT = Op.getValueType(); 2768 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 2769 const Constant *C = CP->getConstVal(); 2770 2771 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 2772 // The actual address of the GlobalValue is stored in the TOC. 2773 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2774 setUsesTOCBasePtr(DAG); 2775 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); 2776 return getTOCEntry(DAG, SDLoc(CP), GA); 2777 } 2778 2779 unsigned MOHiFlag, MOLoFlag; 2780 bool IsPIC = isPositionIndependent(); 2781 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2782 2783 if (IsPIC && Subtarget.isSVR4ABI()) { 2784 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 2785 PPCII::MO_PIC_FLAG); 2786 return getTOCEntry(DAG, SDLoc(CP), GA); 2787 } 2788 2789 SDValue CPIHi = 2790 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOHiFlag); 2791 SDValue CPILo = 2792 DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0, MOLoFlag); 2793 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG); 2794 } 2795 2796 // For 64-bit PowerPC, prefer the more compact relative encodings. 2797 // This trades 32 bits per jump table entry for one or two instructions 2798 // on the jump site. 2799 unsigned PPCTargetLowering::getJumpTableEncoding() const { 2800 if (isJumpTableRelative()) 2801 return MachineJumpTableInfo::EK_LabelDifference32; 2802 2803 return TargetLowering::getJumpTableEncoding(); 2804 } 2805 2806 bool PPCTargetLowering::isJumpTableRelative() const { 2807 if (UseAbsoluteJumpTables) 2808 return false; 2809 if (Subtarget.isPPC64() || Subtarget.isAIXABI()) 2810 return true; 2811 return TargetLowering::isJumpTableRelative(); 2812 } 2813 2814 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table, 2815 SelectionDAG &DAG) const { 2816 if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) 2817 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2818 2819 switch (getTargetMachine().getCodeModel()) { 2820 case CodeModel::Small: 2821 case CodeModel::Medium: 2822 return TargetLowering::getPICJumpTableRelocBase(Table, DAG); 2823 default: 2824 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(), 2825 getPointerTy(DAG.getDataLayout())); 2826 } 2827 } 2828 2829 const MCExpr * 2830 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 2831 unsigned JTI, 2832 MCContext &Ctx) const { 2833 if (!Subtarget.isPPC64() || Subtarget.isAIXABI()) 2834 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2835 2836 switch (getTargetMachine().getCodeModel()) { 2837 case CodeModel::Small: 2838 case CodeModel::Medium: 2839 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); 2840 default: 2841 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); 2842 } 2843 } 2844 2845 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { 2846 EVT PtrVT = Op.getValueType(); 2847 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 2848 2849 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 2850 // The actual address of the GlobalValue is stored in the TOC. 2851 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2852 setUsesTOCBasePtr(DAG); 2853 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); 2854 return getTOCEntry(DAG, SDLoc(JT), GA); 2855 } 2856 2857 unsigned MOHiFlag, MOLoFlag; 2858 bool IsPIC = isPositionIndependent(); 2859 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2860 2861 if (IsPIC && Subtarget.isSVR4ABI()) { 2862 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, 2863 PPCII::MO_PIC_FLAG); 2864 return getTOCEntry(DAG, SDLoc(GA), GA); 2865 } 2866 2867 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); 2868 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag); 2869 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG); 2870 } 2871 2872 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, 2873 SelectionDAG &DAG) const { 2874 EVT PtrVT = Op.getValueType(); 2875 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op); 2876 const BlockAddress *BA = BASDN->getBlockAddress(); 2877 2878 // 64-bit SVR4 ABI and AIX ABI code are always position-independent. 2879 // The actual BlockAddress is stored in the TOC. 2880 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 2881 setUsesTOCBasePtr(DAG); 2882 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()); 2883 return getTOCEntry(DAG, SDLoc(BASDN), GA); 2884 } 2885 2886 // 32-bit position-independent ELF stores the BlockAddress in the .got. 2887 if (Subtarget.is32BitELFABI() && isPositionIndependent()) 2888 return getTOCEntry( 2889 DAG, SDLoc(BASDN), 2890 DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset())); 2891 2892 unsigned MOHiFlag, MOLoFlag; 2893 bool IsPIC = isPositionIndependent(); 2894 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag); 2895 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); 2896 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); 2897 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG); 2898 } 2899 2900 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op, 2901 SelectionDAG &DAG) const { 2902 // FIXME: TLS addresses currently use medium model code sequences, 2903 // which is the most useful form. Eventually support for small and 2904 // large models could be added if users need it, at the cost of 2905 // additional complexity. 2906 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 2907 if (DAG.getTarget().useEmulatedTLS()) 2908 return LowerToTLSEmulatedModel(GA, DAG); 2909 2910 SDLoc dl(GA); 2911 const GlobalValue *GV = GA->getGlobal(); 2912 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2913 bool is64bit = Subtarget.isPPC64(); 2914 const Module *M = DAG.getMachineFunction().getFunction().getParent(); 2915 PICLevel::Level picLevel = M->getPICLevel(); 2916 2917 const TargetMachine &TM = getTargetMachine(); 2918 TLSModel::Model Model = TM.getTLSModel(GV); 2919 2920 if (Model == TLSModel::LocalExec) { 2921 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2922 PPCII::MO_TPREL_HA); 2923 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2924 PPCII::MO_TPREL_LO); 2925 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64) 2926 : DAG.getRegister(PPC::R2, MVT::i32); 2927 2928 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg); 2929 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi); 2930 } 2931 2932 if (Model == TLSModel::InitialExec) { 2933 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2934 SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 2935 PPCII::MO_TLS); 2936 SDValue GOTPtr; 2937 if (is64bit) { 2938 setUsesTOCBasePtr(DAG); 2939 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2940 GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, 2941 PtrVT, GOTReg, TGA); 2942 } else { 2943 if (!TM.isPositionIndependent()) 2944 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT); 2945 else if (picLevel == PICLevel::SmallPIC) 2946 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2947 else 2948 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2949 } 2950 SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, 2951 PtrVT, TGA, GOTPtr); 2952 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS); 2953 } 2954 2955 if (Model == TLSModel::GeneralDynamic) { 2956 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2957 SDValue GOTPtr; 2958 if (is64bit) { 2959 setUsesTOCBasePtr(DAG); 2960 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2961 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT, 2962 GOTReg, TGA); 2963 } else { 2964 if (picLevel == PICLevel::SmallPIC) 2965 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2966 else 2967 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2968 } 2969 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT, 2970 GOTPtr, TGA, TGA); 2971 } 2972 2973 if (Model == TLSModel::LocalDynamic) { 2974 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0); 2975 SDValue GOTPtr; 2976 if (is64bit) { 2977 setUsesTOCBasePtr(DAG); 2978 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64); 2979 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT, 2980 GOTReg, TGA); 2981 } else { 2982 if (picLevel == PICLevel::SmallPIC) 2983 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT); 2984 else 2985 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT); 2986 } 2987 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl, 2988 PtrVT, GOTPtr, TGA, TGA); 2989 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, 2990 PtrVT, TLSAddr, TGA); 2991 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA); 2992 } 2993 2994 llvm_unreachable("Unknown TLS model!"); 2995 } 2996 2997 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op, 2998 SelectionDAG &DAG) const { 2999 EVT PtrVT = Op.getValueType(); 3000 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); 3001 SDLoc DL(GSDN); 3002 const GlobalValue *GV = GSDN->getGlobal(); 3003 3004 // 64-bit SVR4 ABI & AIX ABI code is always position-independent. 3005 // The actual address of the GlobalValue is stored in the TOC. 3006 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 3007 setUsesTOCBasePtr(DAG); 3008 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset()); 3009 return getTOCEntry(DAG, DL, GA); 3010 } 3011 3012 unsigned MOHiFlag, MOLoFlag; 3013 bool IsPIC = isPositionIndependent(); 3014 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV); 3015 3016 if (IsPIC && Subtarget.isSVR4ABI()) { 3017 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 3018 GSDN->getOffset(), 3019 PPCII::MO_PIC_FLAG); 3020 return getTOCEntry(DAG, DL, GA); 3021 } 3022 3023 SDValue GAHi = 3024 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); 3025 SDValue GALo = 3026 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); 3027 3028 SDValue Ptr = LowerLabelRef(GAHi, GALo, IsPIC, DAG); 3029 3030 // If the global reference is actually to a non-lazy-pointer, we have to do an 3031 // extra load to get the address of the global. 3032 if (MOHiFlag & PPCII::MO_NLP_FLAG) 3033 Ptr = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 3034 return Ptr; 3035 } 3036 3037 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 3038 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 3039 SDLoc dl(Op); 3040 3041 if (Op.getValueType() == MVT::v2i64) { 3042 // When the operands themselves are v2i64 values, we need to do something 3043 // special because VSX has no underlying comparison operations for these. 3044 if (Op.getOperand(0).getValueType() == MVT::v2i64) { 3045 // Equality can be handled by casting to the legal type for Altivec 3046 // comparisons, everything else needs to be expanded. 3047 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 3048 return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, 3049 DAG.getSetCC(dl, MVT::v4i32, 3050 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)), 3051 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)), 3052 CC)); 3053 } 3054 3055 return SDValue(); 3056 } 3057 3058 // We handle most of these in the usual way. 3059 return Op; 3060 } 3061 3062 // If we're comparing for equality to zero, expose the fact that this is 3063 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can 3064 // fold the new nodes. 3065 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG)) 3066 return V; 3067 3068 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 3069 // Leave comparisons against 0 and -1 alone for now, since they're usually 3070 // optimized. FIXME: revisit this when we can custom lower all setcc 3071 // optimizations. 3072 if (C->isAllOnesValue() || C->isNullValue()) 3073 return SDValue(); 3074 } 3075 3076 // If we have an integer seteq/setne, turn it into a compare against zero 3077 // by xor'ing the rhs with the lhs, which is faster than setting a 3078 // condition register, reading it back out, and masking the correct bit. The 3079 // normal approach here uses sub to do this instead of xor. Using xor exposes 3080 // the result to other bit-twiddling opportunities. 3081 EVT LHSVT = Op.getOperand(0).getValueType(); 3082 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 3083 EVT VT = Op.getValueType(); 3084 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0), 3085 Op.getOperand(1)); 3086 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC); 3087 } 3088 return SDValue(); 3089 } 3090 3091 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 3092 SDNode *Node = Op.getNode(); 3093 EVT VT = Node->getValueType(0); 3094 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3095 SDValue InChain = Node->getOperand(0); 3096 SDValue VAListPtr = Node->getOperand(1); 3097 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue(); 3098 SDLoc dl(Node); 3099 3100 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only"); 3101 3102 // gpr_index 3103 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 3104 VAListPtr, MachinePointerInfo(SV), MVT::i8); 3105 InChain = GprIndex.getValue(1); 3106 3107 if (VT == MVT::i64) { 3108 // Check if GprIndex is even 3109 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex, 3110 DAG.getConstant(1, dl, MVT::i32)); 3111 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd, 3112 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE); 3113 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex, 3114 DAG.getConstant(1, dl, MVT::i32)); 3115 // Align GprIndex to be even if it isn't 3116 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne, 3117 GprIndex); 3118 } 3119 3120 // fpr index is 1 byte after gpr 3121 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3122 DAG.getConstant(1, dl, MVT::i32)); 3123 3124 // fpr 3125 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain, 3126 FprPtr, MachinePointerInfo(SV), MVT::i8); 3127 InChain = FprIndex.getValue(1); 3128 3129 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3130 DAG.getConstant(8, dl, MVT::i32)); 3131 3132 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr, 3133 DAG.getConstant(4, dl, MVT::i32)); 3134 3135 // areas 3136 SDValue OverflowArea = 3137 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo()); 3138 InChain = OverflowArea.getValue(1); 3139 3140 SDValue RegSaveArea = 3141 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo()); 3142 InChain = RegSaveArea.getValue(1); 3143 3144 // select overflow_area if index > 8 3145 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex, 3146 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT); 3147 3148 // adjustment constant gpr_index * 4/8 3149 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32, 3150 VT.isInteger() ? GprIndex : FprIndex, 3151 DAG.getConstant(VT.isInteger() ? 4 : 8, dl, 3152 MVT::i32)); 3153 3154 // OurReg = RegSaveArea + RegConstant 3155 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea, 3156 RegConstant); 3157 3158 // Floating types are 32 bytes into RegSaveArea 3159 if (VT.isFloatingPoint()) 3160 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg, 3161 DAG.getConstant(32, dl, MVT::i32)); 3162 3163 // increase {f,g}pr_index by 1 (or 2 if VT is i64) 3164 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32, 3165 VT.isInteger() ? GprIndex : FprIndex, 3166 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl, 3167 MVT::i32)); 3168 3169 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1, 3170 VT.isInteger() ? VAListPtr : FprPtr, 3171 MachinePointerInfo(SV), MVT::i8); 3172 3173 // determine if we should load from reg_save_area or overflow_area 3174 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea); 3175 3176 // increase overflow_area by 4/8 if gpr/fpr > 8 3177 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea, 3178 DAG.getConstant(VT.isInteger() ? 4 : 8, 3179 dl, MVT::i32)); 3180 3181 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea, 3182 OverflowAreaPlusN); 3183 3184 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr, 3185 MachinePointerInfo(), MVT::i32); 3186 3187 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo()); 3188 } 3189 3190 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { 3191 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"); 3192 3193 // We have to copy the entire va_list struct: 3194 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte 3195 return DAG.getMemcpy(Op.getOperand(0), Op, 3196 Op.getOperand(1), Op.getOperand(2), 3197 DAG.getConstant(12, SDLoc(Op), MVT::i32), 8, false, true, 3198 false, MachinePointerInfo(), MachinePointerInfo()); 3199 } 3200 3201 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, 3202 SelectionDAG &DAG) const { 3203 if (Subtarget.isAIXABI()) 3204 report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX."); 3205 3206 return Op.getOperand(0); 3207 } 3208 3209 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, 3210 SelectionDAG &DAG) const { 3211 if (Subtarget.isAIXABI()) 3212 report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX."); 3213 3214 SDValue Chain = Op.getOperand(0); 3215 SDValue Trmp = Op.getOperand(1); // trampoline 3216 SDValue FPtr = Op.getOperand(2); // nested function 3217 SDValue Nest = Op.getOperand(3); // 'nest' parameter value 3218 SDLoc dl(Op); 3219 3220 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3221 bool isPPC64 = (PtrVT == MVT::i64); 3222 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext()); 3223 3224 TargetLowering::ArgListTy Args; 3225 TargetLowering::ArgListEntry Entry; 3226 3227 Entry.Ty = IntPtrTy; 3228 Entry.Node = Trmp; Args.push_back(Entry); 3229 3230 // TrampSize == (isPPC64 ? 48 : 40); 3231 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl, 3232 isPPC64 ? MVT::i64 : MVT::i32); 3233 Args.push_back(Entry); 3234 3235 Entry.Node = FPtr; Args.push_back(Entry); 3236 Entry.Node = Nest; Args.push_back(Entry); 3237 3238 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg) 3239 TargetLowering::CallLoweringInfo CLI(DAG); 3240 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee( 3241 CallingConv::C, Type::getVoidTy(*DAG.getContext()), 3242 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args)); 3243 3244 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 3245 return CallResult.second; 3246 } 3247 3248 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 3249 MachineFunction &MF = DAG.getMachineFunction(); 3250 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3251 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3252 3253 SDLoc dl(Op); 3254 3255 if (Subtarget.isDarwinABI() || Subtarget.isPPC64()) { 3256 // vastart just stores the address of the VarArgsFrameIndex slot into the 3257 // memory location argument. 3258 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3259 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3260 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), 3261 MachinePointerInfo(SV)); 3262 } 3263 3264 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct. 3265 // We suppose the given va_list is already allocated. 3266 // 3267 // typedef struct { 3268 // char gpr; /* index into the array of 8 GPRs 3269 // * stored in the register save area 3270 // * gpr=0 corresponds to r3, 3271 // * gpr=1 to r4, etc. 3272 // */ 3273 // char fpr; /* index into the array of 8 FPRs 3274 // * stored in the register save area 3275 // * fpr=0 corresponds to f1, 3276 // * fpr=1 to f2, etc. 3277 // */ 3278 // char *overflow_arg_area; 3279 // /* location on stack that holds 3280 // * the next overflow argument 3281 // */ 3282 // char *reg_save_area; 3283 // /* where r3:r10 and f1:f8 (if saved) 3284 // * are stored 3285 // */ 3286 // } va_list[1]; 3287 3288 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32); 3289 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32); 3290 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(), 3291 PtrVT); 3292 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 3293 PtrVT); 3294 3295 uint64_t FrameOffset = PtrVT.getSizeInBits()/8; 3296 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT); 3297 3298 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1; 3299 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT); 3300 3301 uint64_t FPROffset = 1; 3302 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT); 3303 3304 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3305 3306 // Store first byte : number of int regs 3307 SDValue firstStore = 3308 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1), 3309 MachinePointerInfo(SV), MVT::i8); 3310 uint64_t nextOffset = FPROffset; 3311 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1), 3312 ConstFPROffset); 3313 3314 // Store second byte : number of float regs 3315 SDValue secondStore = 3316 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr, 3317 MachinePointerInfo(SV, nextOffset), MVT::i8); 3318 nextOffset += StackOffset; 3319 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset); 3320 3321 // Store second word : arguments given on stack 3322 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr, 3323 MachinePointerInfo(SV, nextOffset)); 3324 nextOffset += FrameOffset; 3325 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset); 3326 3327 // Store third word : arguments given in registers 3328 return DAG.getStore(thirdStore, dl, FR, nextPtr, 3329 MachinePointerInfo(SV, nextOffset)); 3330 } 3331 3332 /// FPR - The set of FP registers that should be allocated for arguments 3333 /// on Darwin and AIX. 3334 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, 3335 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10, 3336 PPC::F11, PPC::F12, PPC::F13}; 3337 3338 /// QFPR - The set of QPX registers that should be allocated for arguments. 3339 static const MCPhysReg QFPR[] = { 3340 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7, 3341 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13}; 3342 3343 /// CalculateStackSlotSize - Calculates the size reserved for this argument on 3344 /// the stack. 3345 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, 3346 unsigned PtrByteSize) { 3347 unsigned ArgSize = ArgVT.getStoreSize(); 3348 if (Flags.isByVal()) 3349 ArgSize = Flags.getByValSize(); 3350 3351 // Round up to multiples of the pointer size, except for array members, 3352 // which are always packed. 3353 if (!Flags.isInConsecutiveRegs()) 3354 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3355 3356 return ArgSize; 3357 } 3358 3359 /// CalculateStackSlotAlignment - Calculates the alignment of this argument 3360 /// on the stack. 3361 static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, 3362 ISD::ArgFlagsTy Flags, 3363 unsigned PtrByteSize) { 3364 unsigned Align = PtrByteSize; 3365 3366 // Altivec parameters are padded to a 16 byte boundary. 3367 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3368 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3369 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3370 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3371 Align = 16; 3372 // QPX vector types stored in double-precision are padded to a 32 byte 3373 // boundary. 3374 else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1) 3375 Align = 32; 3376 3377 // ByVal parameters are aligned as requested. 3378 if (Flags.isByVal()) { 3379 unsigned BVAlign = Flags.getByValAlign(); 3380 if (BVAlign > PtrByteSize) { 3381 if (BVAlign % PtrByteSize != 0) 3382 llvm_unreachable( 3383 "ByVal alignment is not a multiple of the pointer size"); 3384 3385 Align = BVAlign; 3386 } 3387 } 3388 3389 // Array members are always packed to their original alignment. 3390 if (Flags.isInConsecutiveRegs()) { 3391 // If the array member was split into multiple registers, the first 3392 // needs to be aligned to the size of the full type. (Except for 3393 // ppcf128, which is only aligned as its f64 components.) 3394 if (Flags.isSplit() && OrigVT != MVT::ppcf128) 3395 Align = OrigVT.getStoreSize(); 3396 else 3397 Align = ArgVT.getStoreSize(); 3398 } 3399 3400 return Align; 3401 } 3402 3403 /// CalculateStackSlotUsed - Return whether this argument will use its 3404 /// stack slot (instead of being passed in registers). ArgOffset, 3405 /// AvailableFPRs, and AvailableVRs must hold the current argument 3406 /// position, and will be updated to account for this argument. 3407 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, 3408 ISD::ArgFlagsTy Flags, 3409 unsigned PtrByteSize, 3410 unsigned LinkageSize, 3411 unsigned ParamAreaSize, 3412 unsigned &ArgOffset, 3413 unsigned &AvailableFPRs, 3414 unsigned &AvailableVRs, bool HasQPX) { 3415 bool UseMemory = false; 3416 3417 // Respect alignment of argument on the stack. 3418 unsigned Align = 3419 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 3420 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3421 // If there's no space left in the argument save area, we must 3422 // use memory (this check also catches zero-sized arguments). 3423 if (ArgOffset >= LinkageSize + ParamAreaSize) 3424 UseMemory = true; 3425 3426 // Allocate argument on the stack. 3427 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 3428 if (Flags.isInConsecutiveRegsLast()) 3429 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3430 // If we overran the argument save area, we must use memory 3431 // (this check catches arguments passed partially in memory) 3432 if (ArgOffset > LinkageSize + ParamAreaSize) 3433 UseMemory = true; 3434 3435 // However, if the argument is actually passed in an FPR or a VR, 3436 // we don't use memory after all. 3437 if (!Flags.isByVal()) { 3438 if (ArgVT == MVT::f32 || ArgVT == MVT::f64 || 3439 // QPX registers overlap with the scalar FP registers. 3440 (HasQPX && (ArgVT == MVT::v4f32 || 3441 ArgVT == MVT::v4f64 || 3442 ArgVT == MVT::v4i1))) 3443 if (AvailableFPRs > 0) { 3444 --AvailableFPRs; 3445 return false; 3446 } 3447 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 3448 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 3449 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 || 3450 ArgVT == MVT::v1i128 || ArgVT == MVT::f128) 3451 if (AvailableVRs > 0) { 3452 --AvailableVRs; 3453 return false; 3454 } 3455 } 3456 3457 return UseMemory; 3458 } 3459 3460 /// EnsureStackAlignment - Round stack frame size up from NumBytes to 3461 /// ensure minimum alignment required for target. 3462 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, 3463 unsigned NumBytes) { 3464 unsigned TargetAlign = Lowering->getStackAlignment(); 3465 unsigned AlignMask = TargetAlign - 1; 3466 NumBytes = (NumBytes + AlignMask) & ~AlignMask; 3467 return NumBytes; 3468 } 3469 3470 SDValue PPCTargetLowering::LowerFormalArguments( 3471 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3472 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3473 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3474 if (Subtarget.isAIXABI()) 3475 return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG, 3476 InVals); 3477 if (Subtarget.is64BitELFABI()) 3478 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, 3479 InVals); 3480 if (Subtarget.is32BitELFABI()) 3481 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG, 3482 InVals); 3483 3484 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG, 3485 InVals); 3486 } 3487 3488 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4( 3489 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3490 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3491 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3492 3493 // 32-bit SVR4 ABI Stack Frame Layout: 3494 // +-----------------------------------+ 3495 // +--> | Back chain | 3496 // | +-----------------------------------+ 3497 // | | Floating-point register save area | 3498 // | +-----------------------------------+ 3499 // | | General register save area | 3500 // | +-----------------------------------+ 3501 // | | CR save word | 3502 // | +-----------------------------------+ 3503 // | | VRSAVE save word | 3504 // | +-----------------------------------+ 3505 // | | Alignment padding | 3506 // | +-----------------------------------+ 3507 // | | Vector register save area | 3508 // | +-----------------------------------+ 3509 // | | Local variable space | 3510 // | +-----------------------------------+ 3511 // | | Parameter list area | 3512 // | +-----------------------------------+ 3513 // | | LR save word | 3514 // | +-----------------------------------+ 3515 // SP--> +--- | Back chain | 3516 // +-----------------------------------+ 3517 // 3518 // Specifications: 3519 // System V Application Binary Interface PowerPC Processor Supplement 3520 // AltiVec Technology Programming Interface Manual 3521 3522 MachineFunction &MF = DAG.getMachineFunction(); 3523 MachineFrameInfo &MFI = MF.getFrameInfo(); 3524 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3525 3526 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3527 // Potential tail calls could cause overwriting of argument stack slots. 3528 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3529 (CallConv == CallingConv::Fast)); 3530 unsigned PtrByteSize = 4; 3531 3532 // Assign locations to all of the incoming arguments. 3533 SmallVector<CCValAssign, 16> ArgLocs; 3534 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 3535 *DAG.getContext()); 3536 3537 // Reserve space for the linkage area on the stack. 3538 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3539 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 3540 if (useSoftFloat()) 3541 CCInfo.PreAnalyzeFormalArguments(Ins); 3542 3543 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4); 3544 CCInfo.clearWasPPCF128(); 3545 3546 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 3547 CCValAssign &VA = ArgLocs[i]; 3548 3549 // Arguments stored in registers. 3550 if (VA.isRegLoc()) { 3551 const TargetRegisterClass *RC; 3552 EVT ValVT = VA.getValVT(); 3553 3554 switch (ValVT.getSimpleVT().SimpleTy) { 3555 default: 3556 llvm_unreachable("ValVT not supported by formal arguments Lowering"); 3557 case MVT::i1: 3558 case MVT::i32: 3559 RC = &PPC::GPRCRegClass; 3560 break; 3561 case MVT::f32: 3562 if (Subtarget.hasP8Vector()) 3563 RC = &PPC::VSSRCRegClass; 3564 else if (Subtarget.hasSPE()) 3565 RC = &PPC::GPRCRegClass; 3566 else 3567 RC = &PPC::F4RCRegClass; 3568 break; 3569 case MVT::f64: 3570 if (Subtarget.hasVSX()) 3571 RC = &PPC::VSFRCRegClass; 3572 else if (Subtarget.hasSPE()) 3573 // SPE passes doubles in GPR pairs. 3574 RC = &PPC::GPRCRegClass; 3575 else 3576 RC = &PPC::F8RCRegClass; 3577 break; 3578 case MVT::v16i8: 3579 case MVT::v8i16: 3580 case MVT::v4i32: 3581 RC = &PPC::VRRCRegClass; 3582 break; 3583 case MVT::v4f32: 3584 RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass; 3585 break; 3586 case MVT::v2f64: 3587 case MVT::v2i64: 3588 RC = &PPC::VRRCRegClass; 3589 break; 3590 case MVT::v4f64: 3591 RC = &PPC::QFRCRegClass; 3592 break; 3593 case MVT::v4i1: 3594 RC = &PPC::QBRCRegClass; 3595 break; 3596 } 3597 3598 SDValue ArgValue; 3599 // Transform the arguments stored in physical registers into 3600 // virtual ones. 3601 if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) { 3602 assert(i + 1 < e && "No second half of double precision argument"); 3603 unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC); 3604 unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC); 3605 SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32); 3606 SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32); 3607 if (!Subtarget.isLittleEndian()) 3608 std::swap (ArgValueLo, ArgValueHi); 3609 ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo, 3610 ArgValueHi); 3611 } else { 3612 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3613 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, 3614 ValVT == MVT::i1 ? MVT::i32 : ValVT); 3615 if (ValVT == MVT::i1) 3616 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue); 3617 } 3618 3619 InVals.push_back(ArgValue); 3620 } else { 3621 // Argument stored in memory. 3622 assert(VA.isMemLoc()); 3623 3624 // Get the extended size of the argument type in stack 3625 unsigned ArgSize = VA.getLocVT().getStoreSize(); 3626 // Get the actual size of the argument type 3627 unsigned ObjSize = VA.getValVT().getStoreSize(); 3628 unsigned ArgOffset = VA.getLocMemOffset(); 3629 // Stack objects in PPC32 are right justified. 3630 ArgOffset += ArgSize - ObjSize; 3631 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable); 3632 3633 // Create load nodes to retrieve arguments from the stack. 3634 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3635 InVals.push_back( 3636 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo())); 3637 } 3638 } 3639 3640 // Assign locations to all of the incoming aggregate by value arguments. 3641 // Aggregates passed by value are stored in the local variable space of the 3642 // caller's stack frame, right above the parameter list area. 3643 SmallVector<CCValAssign, 16> ByValArgLocs; 3644 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), 3645 ByValArgLocs, *DAG.getContext()); 3646 3647 // Reserve stack space for the allocations in CCInfo. 3648 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 3649 3650 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal); 3651 3652 // Area that is at least reserved in the caller of this function. 3653 unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); 3654 MinReservedArea = std::max(MinReservedArea, LinkageSize); 3655 3656 // Set the size that is at least reserved in caller of this function. Tail 3657 // call optimized function's reserved stack space needs to be aligned so that 3658 // taking the difference between two stack areas will result in an aligned 3659 // stack. 3660 MinReservedArea = 3661 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 3662 FuncInfo->setMinReservedArea(MinReservedArea); 3663 3664 SmallVector<SDValue, 8> MemOps; 3665 3666 // If the function takes variable number of arguments, make a frame index for 3667 // the start of the first vararg value... for expansion of llvm.va_start. 3668 if (isVarArg) { 3669 static const MCPhysReg GPArgRegs[] = { 3670 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 3671 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 3672 }; 3673 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs); 3674 3675 static const MCPhysReg FPArgRegs[] = { 3676 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7, 3677 PPC::F8 3678 }; 3679 unsigned NumFPArgRegs = array_lengthof(FPArgRegs); 3680 3681 if (useSoftFloat() || hasSPE()) 3682 NumFPArgRegs = 0; 3683 3684 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs)); 3685 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs)); 3686 3687 // Make room for NumGPArgRegs and NumFPArgRegs. 3688 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 + 3689 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8; 3690 3691 FuncInfo->setVarArgsStackOffset( 3692 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 3693 CCInfo.getNextStackOffset(), true)); 3694 3695 FuncInfo->setVarArgsFrameIndex(MFI.CreateStackObject(Depth, 8, false)); 3696 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 3697 3698 // The fixed integer arguments of a variadic function are stored to the 3699 // VarArgsFrameIndex on the stack so that they may be loaded by 3700 // dereferencing the result of va_next. 3701 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) { 3702 // Get an existing live-in vreg, or add a new one. 3703 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]); 3704 if (!VReg) 3705 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass); 3706 3707 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3708 SDValue Store = 3709 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3710 MemOps.push_back(Store); 3711 // Increment the address by four for the next argument to store 3712 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 3713 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3714 } 3715 3716 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6 3717 // is set. 3718 // The double arguments are stored to the VarArgsFrameIndex 3719 // on the stack. 3720 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) { 3721 // Get an existing live-in vreg, or add a new one. 3722 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]); 3723 if (!VReg) 3724 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass); 3725 3726 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64); 3727 SDValue Store = 3728 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 3729 MemOps.push_back(Store); 3730 // Increment the address by eight for the next argument to store 3731 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl, 3732 PtrVT); 3733 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 3734 } 3735 } 3736 3737 if (!MemOps.empty()) 3738 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 3739 3740 return Chain; 3741 } 3742 3743 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3744 // value to MVT::i64 and then truncate to the correct register size. 3745 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, 3746 EVT ObjectVT, SelectionDAG &DAG, 3747 SDValue ArgVal, 3748 const SDLoc &dl) const { 3749 if (Flags.isSExt()) 3750 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, 3751 DAG.getValueType(ObjectVT)); 3752 else if (Flags.isZExt()) 3753 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, 3754 DAG.getValueType(ObjectVT)); 3755 3756 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal); 3757 } 3758 3759 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4( 3760 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 3761 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 3762 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 3763 // TODO: add description of PPC stack frame format, or at least some docs. 3764 // 3765 bool isELFv2ABI = Subtarget.isELFv2ABI(); 3766 bool isLittleEndian = Subtarget.isLittleEndian(); 3767 MachineFunction &MF = DAG.getMachineFunction(); 3768 MachineFrameInfo &MFI = MF.getFrameInfo(); 3769 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 3770 3771 assert(!(CallConv == CallingConv::Fast && isVarArg) && 3772 "fastcc not supported on varargs functions"); 3773 3774 EVT PtrVT = getPointerTy(MF.getDataLayout()); 3775 // Potential tail calls could cause overwriting of argument stack slots. 3776 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 3777 (CallConv == CallingConv::Fast)); 3778 unsigned PtrByteSize = 8; 3779 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 3780 3781 static const MCPhysReg GPR[] = { 3782 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 3783 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 3784 }; 3785 static const MCPhysReg VR[] = { 3786 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 3787 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 3788 }; 3789 3790 const unsigned Num_GPR_Regs = array_lengthof(GPR); 3791 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 3792 const unsigned Num_VR_Regs = array_lengthof(VR); 3793 const unsigned Num_QFPR_Regs = Num_FPR_Regs; 3794 3795 // Do a first pass over the arguments to determine whether the ABI 3796 // guarantees that our caller has allocated the parameter save area 3797 // on its stack frame. In the ELFv1 ABI, this is always the case; 3798 // in the ELFv2 ABI, it is true if this is a vararg function or if 3799 // any parameter is located in a stack slot. 3800 3801 bool HasParameterArea = !isELFv2ABI || isVarArg; 3802 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize; 3803 unsigned NumBytes = LinkageSize; 3804 unsigned AvailableFPRs = Num_FPR_Regs; 3805 unsigned AvailableVRs = Num_VR_Regs; 3806 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 3807 if (Ins[i].Flags.isNest()) 3808 continue; 3809 3810 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags, 3811 PtrByteSize, LinkageSize, ParamAreaSize, 3812 NumBytes, AvailableFPRs, AvailableVRs, 3813 Subtarget.hasQPX())) 3814 HasParameterArea = true; 3815 } 3816 3817 // Add DAG nodes to load the arguments or copy them out of registers. On 3818 // entry to a function on PPC, the arguments start after the linkage area, 3819 // although the first ones are often in registers. 3820 3821 unsigned ArgOffset = LinkageSize; 3822 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 3823 unsigned &QFPR_idx = FPR_idx; 3824 SmallVector<SDValue, 8> MemOps; 3825 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 3826 unsigned CurArgIdx = 0; 3827 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 3828 SDValue ArgVal; 3829 bool needsLoad = false; 3830 EVT ObjectVT = Ins[ArgNo].VT; 3831 EVT OrigVT = Ins[ArgNo].ArgVT; 3832 unsigned ObjSize = ObjectVT.getStoreSize(); 3833 unsigned ArgSize = ObjSize; 3834 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 3835 if (Ins[ArgNo].isOrigArg()) { 3836 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 3837 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 3838 } 3839 // We re-align the argument offset for each argument, except when using the 3840 // fast calling convention, when we need to make sure we do that only when 3841 // we'll actually use a stack slot. 3842 unsigned CurArgOffset, Align; 3843 auto ComputeArgOffset = [&]() { 3844 /* Respect alignment of argument on the stack. */ 3845 Align = CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize); 3846 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 3847 CurArgOffset = ArgOffset; 3848 }; 3849 3850 if (CallConv != CallingConv::Fast) { 3851 ComputeArgOffset(); 3852 3853 /* Compute GPR index associated with argument offset. */ 3854 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 3855 GPR_idx = std::min(GPR_idx, Num_GPR_Regs); 3856 } 3857 3858 // FIXME the codegen can be much improved in some cases. 3859 // We do not have to keep everything in memory. 3860 if (Flags.isByVal()) { 3861 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 3862 3863 if (CallConv == CallingConv::Fast) 3864 ComputeArgOffset(); 3865 3866 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 3867 ObjSize = Flags.getByValSize(); 3868 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 3869 // Empty aggregate parameters do not take up registers. Examples: 3870 // struct { } a; 3871 // union { } b; 3872 // int c[0]; 3873 // etc. However, we have to provide a place-holder in InVals, so 3874 // pretend we have an 8-byte item at the current address for that 3875 // purpose. 3876 if (!ObjSize) { 3877 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 3878 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3879 InVals.push_back(FIN); 3880 continue; 3881 } 3882 3883 // Create a stack object covering all stack doublewords occupied 3884 // by the argument. If the argument is (fully or partially) on 3885 // the stack, or if the argument is fully in registers but the 3886 // caller has allocated the parameter save anyway, we can refer 3887 // directly to the caller's stack frame. Otherwise, create a 3888 // local copy in our own frame. 3889 int FI; 3890 if (HasParameterArea || 3891 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize) 3892 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true); 3893 else 3894 FI = MFI.CreateStackObject(ArgSize, Align, false); 3895 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 3896 3897 // Handle aggregates smaller than 8 bytes. 3898 if (ObjSize < PtrByteSize) { 3899 // The value of the object is its address, which differs from the 3900 // address of the enclosing doubleword on big-endian systems. 3901 SDValue Arg = FIN; 3902 if (!isLittleEndian) { 3903 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT); 3904 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff); 3905 } 3906 InVals.push_back(Arg); 3907 3908 if (GPR_idx != Num_GPR_Regs) { 3909 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3910 FuncInfo->addLiveInAttr(VReg, Flags); 3911 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3912 SDValue Store; 3913 3914 if (ObjSize==1 || ObjSize==2 || ObjSize==4) { 3915 EVT ObjType = (ObjSize == 1 ? MVT::i8 : 3916 (ObjSize == 2 ? MVT::i16 : MVT::i32)); 3917 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg, 3918 MachinePointerInfo(&*FuncArg), ObjType); 3919 } else { 3920 // For sizes that don't fit a truncating store (3, 5, 6, 7), 3921 // store the whole register as-is to the parameter save area 3922 // slot. 3923 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 3924 MachinePointerInfo(&*FuncArg)); 3925 } 3926 3927 MemOps.push_back(Store); 3928 } 3929 // Whether we copied from a register or not, advance the offset 3930 // into the parameter save area by a full doubleword. 3931 ArgOffset += PtrByteSize; 3932 continue; 3933 } 3934 3935 // The value of the object is its address, which is the address of 3936 // its first stack doubleword. 3937 InVals.push_back(FIN); 3938 3939 // Store whatever pieces of the object are in registers to memory. 3940 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 3941 if (GPR_idx == Num_GPR_Regs) 3942 break; 3943 3944 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 3945 FuncInfo->addLiveInAttr(VReg, Flags); 3946 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 3947 SDValue Addr = FIN; 3948 if (j) { 3949 SDValue Off = DAG.getConstant(j, dl, PtrVT); 3950 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off); 3951 } 3952 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr, 3953 MachinePointerInfo(&*FuncArg, j)); 3954 MemOps.push_back(Store); 3955 ++GPR_idx; 3956 } 3957 ArgOffset += ArgSize; 3958 continue; 3959 } 3960 3961 switch (ObjectVT.getSimpleVT().SimpleTy) { 3962 default: llvm_unreachable("Unhandled argument type!"); 3963 case MVT::i1: 3964 case MVT::i32: 3965 case MVT::i64: 3966 if (Flags.isNest()) { 3967 // The 'nest' parameter, if any, is passed in R11. 3968 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass); 3969 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3970 3971 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3972 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3973 3974 break; 3975 } 3976 3977 // These can be scalar arguments or elements of an integer array type 3978 // passed directly. Clang may use those instead of "byval" aggregate 3979 // types to avoid forcing arguments to memory unnecessarily. 3980 if (GPR_idx != Num_GPR_Regs) { 3981 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 3982 FuncInfo->addLiveInAttr(VReg, Flags); 3983 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 3984 3985 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 3986 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 3987 // value to MVT::i64 and then truncate to the correct register size. 3988 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 3989 } else { 3990 if (CallConv == CallingConv::Fast) 3991 ComputeArgOffset(); 3992 3993 needsLoad = true; 3994 ArgSize = PtrByteSize; 3995 } 3996 if (CallConv != CallingConv::Fast || needsLoad) 3997 ArgOffset += 8; 3998 break; 3999 4000 case MVT::f32: 4001 case MVT::f64: 4002 // These can be scalar arguments or elements of a float array type 4003 // passed directly. The latter are used to implement ELFv2 homogenous 4004 // float aggregates. 4005 if (FPR_idx != Num_FPR_Regs) { 4006 unsigned VReg; 4007 4008 if (ObjectVT == MVT::f32) 4009 VReg = MF.addLiveIn(FPR[FPR_idx], 4010 Subtarget.hasP8Vector() 4011 ? &PPC::VSSRCRegClass 4012 : &PPC::F4RCRegClass); 4013 else 4014 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX() 4015 ? &PPC::VSFRCRegClass 4016 : &PPC::F8RCRegClass); 4017 4018 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4019 ++FPR_idx; 4020 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) { 4021 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 4022 // once we support fp <-> gpr moves. 4023 4024 // This can only ever happen in the presence of f32 array types, 4025 // since otherwise we never run out of FPRs before running out 4026 // of GPRs. 4027 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass); 4028 FuncInfo->addLiveInAttr(VReg, Flags); 4029 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4030 4031 if (ObjectVT == MVT::f32) { 4032 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0)) 4033 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal, 4034 DAG.getConstant(32, dl, MVT::i32)); 4035 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); 4036 } 4037 4038 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal); 4039 } else { 4040 if (CallConv == CallingConv::Fast) 4041 ComputeArgOffset(); 4042 4043 needsLoad = true; 4044 } 4045 4046 // When passing an array of floats, the array occupies consecutive 4047 // space in the argument area; only round up to the next doubleword 4048 // at the end of the array. Otherwise, each float takes 8 bytes. 4049 if (CallConv != CallingConv::Fast || needsLoad) { 4050 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize; 4051 ArgOffset += ArgSize; 4052 if (Flags.isInConsecutiveRegsLast()) 4053 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4054 } 4055 break; 4056 case MVT::v4f32: 4057 case MVT::v4i32: 4058 case MVT::v8i16: 4059 case MVT::v16i8: 4060 case MVT::v2f64: 4061 case MVT::v2i64: 4062 case MVT::v1i128: 4063 case MVT::f128: 4064 if (!Subtarget.hasQPX()) { 4065 // These can be scalar arguments or elements of a vector array type 4066 // passed directly. The latter are used to implement ELFv2 homogenous 4067 // vector aggregates. 4068 if (VR_idx != Num_VR_Regs) { 4069 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4070 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4071 ++VR_idx; 4072 } else { 4073 if (CallConv == CallingConv::Fast) 4074 ComputeArgOffset(); 4075 needsLoad = true; 4076 } 4077 if (CallConv != CallingConv::Fast || needsLoad) 4078 ArgOffset += 16; 4079 break; 4080 } // not QPX 4081 4082 assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 && 4083 "Invalid QPX parameter type"); 4084 LLVM_FALLTHROUGH; 4085 4086 case MVT::v4f64: 4087 case MVT::v4i1: 4088 // QPX vectors are treated like their scalar floating-point subregisters 4089 // (except that they're larger). 4090 unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32; 4091 if (QFPR_idx != Num_QFPR_Regs) { 4092 const TargetRegisterClass *RC; 4093 switch (ObjectVT.getSimpleVT().SimpleTy) { 4094 case MVT::v4f64: RC = &PPC::QFRCRegClass; break; 4095 case MVT::v4f32: RC = &PPC::QSRCRegClass; break; 4096 default: RC = &PPC::QBRCRegClass; break; 4097 } 4098 4099 unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC); 4100 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4101 ++QFPR_idx; 4102 } else { 4103 if (CallConv == CallingConv::Fast) 4104 ComputeArgOffset(); 4105 needsLoad = true; 4106 } 4107 if (CallConv != CallingConv::Fast || needsLoad) 4108 ArgOffset += Sz; 4109 break; 4110 } 4111 4112 // We need to load the argument to a virtual register if we determined 4113 // above that we ran out of physical registers of the appropriate type. 4114 if (needsLoad) { 4115 if (ObjSize < ArgSize && !isLittleEndian) 4116 CurArgOffset += ArgSize - ObjSize; 4117 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable); 4118 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4119 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4120 } 4121 4122 InVals.push_back(ArgVal); 4123 } 4124 4125 // Area that is at least reserved in the caller of this function. 4126 unsigned MinReservedArea; 4127 if (HasParameterArea) 4128 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize); 4129 else 4130 MinReservedArea = LinkageSize; 4131 4132 // Set the size that is at least reserved in caller of this function. Tail 4133 // call optimized functions' reserved stack space needs to be aligned so that 4134 // taking the difference between two stack areas will result in an aligned 4135 // stack. 4136 MinReservedArea = 4137 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4138 FuncInfo->setMinReservedArea(MinReservedArea); 4139 4140 // If the function takes variable number of arguments, make a frame index for 4141 // the start of the first vararg value... for expansion of llvm.va_start. 4142 if (isVarArg) { 4143 int Depth = ArgOffset; 4144 4145 FuncInfo->setVarArgsFrameIndex( 4146 MFI.CreateFixedObject(PtrByteSize, Depth, true)); 4147 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4148 4149 // If this function is vararg, store any remaining integer argument regs 4150 // to their spots on the stack so that they may be loaded by dereferencing 4151 // the result of va_next. 4152 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 4153 GPR_idx < Num_GPR_Regs; ++GPR_idx) { 4154 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4155 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4156 SDValue Store = 4157 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4158 MemOps.push_back(Store); 4159 // Increment the address by four for the next argument to store 4160 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT); 4161 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4162 } 4163 } 4164 4165 if (!MemOps.empty()) 4166 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4167 4168 return Chain; 4169 } 4170 4171 SDValue PPCTargetLowering::LowerFormalArguments_Darwin( 4172 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 4173 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 4174 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 4175 // TODO: add description of PPC stack frame format, or at least some docs. 4176 // 4177 MachineFunction &MF = DAG.getMachineFunction(); 4178 MachineFrameInfo &MFI = MF.getFrameInfo(); 4179 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 4180 4181 EVT PtrVT = getPointerTy(MF.getDataLayout()); 4182 bool isPPC64 = PtrVT == MVT::i64; 4183 // Potential tail calls could cause overwriting of argument stack slots. 4184 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && 4185 (CallConv == CallingConv::Fast)); 4186 unsigned PtrByteSize = isPPC64 ? 8 : 4; 4187 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4188 unsigned ArgOffset = LinkageSize; 4189 // Area that is at least reserved in caller of this function. 4190 unsigned MinReservedArea = ArgOffset; 4191 4192 static const MCPhysReg GPR_32[] = { // 32-bit registers. 4193 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 4194 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 4195 }; 4196 static const MCPhysReg GPR_64[] = { // 64-bit registers. 4197 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4198 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4199 }; 4200 static const MCPhysReg VR[] = { 4201 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4202 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4203 }; 4204 4205 const unsigned Num_GPR_Regs = array_lengthof(GPR_32); 4206 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13; 4207 const unsigned Num_VR_Regs = array_lengthof( VR); 4208 4209 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 4210 4211 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 4212 4213 // In 32-bit non-varargs functions, the stack space for vectors is after the 4214 // stack space for non-vectors. We do not use this space unless we have 4215 // too many vectors to fit in registers, something that only occurs in 4216 // constructed examples:), but we have to walk the arglist to figure 4217 // that out...for the pathological case, compute VecArgOffset as the 4218 // start of the vector parameter area. Computing VecArgOffset is the 4219 // entire point of the following loop. 4220 unsigned VecArgOffset = ArgOffset; 4221 if (!isVarArg && !isPPC64) { 4222 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; 4223 ++ArgNo) { 4224 EVT ObjectVT = Ins[ArgNo].VT; 4225 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4226 4227 if (Flags.isByVal()) { 4228 // ObjSize is the true size, ArgSize rounded up to multiple of regs. 4229 unsigned ObjSize = Flags.getByValSize(); 4230 unsigned ArgSize = 4231 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4232 VecArgOffset += ArgSize; 4233 continue; 4234 } 4235 4236 switch(ObjectVT.getSimpleVT().SimpleTy) { 4237 default: llvm_unreachable("Unhandled argument type!"); 4238 case MVT::i1: 4239 case MVT::i32: 4240 case MVT::f32: 4241 VecArgOffset += 4; 4242 break; 4243 case MVT::i64: // PPC64 4244 case MVT::f64: 4245 // FIXME: We are guaranteed to be !isPPC64 at this point. 4246 // Does MVT::i64 apply? 4247 VecArgOffset += 8; 4248 break; 4249 case MVT::v4f32: 4250 case MVT::v4i32: 4251 case MVT::v8i16: 4252 case MVT::v16i8: 4253 // Nothing to do, we're only looking at Nonvector args here. 4254 break; 4255 } 4256 } 4257 } 4258 // We've found where the vector parameter area in memory is. Skip the 4259 // first 12 parameters; these don't use that memory. 4260 VecArgOffset = ((VecArgOffset+15)/16)*16; 4261 VecArgOffset += 12*16; 4262 4263 // Add DAG nodes to load the arguments or copy them out of registers. On 4264 // entry to a function on PPC, the arguments start after the linkage area, 4265 // although the first ones are often in registers. 4266 4267 SmallVector<SDValue, 8> MemOps; 4268 unsigned nAltivecParamsAtEnd = 0; 4269 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin(); 4270 unsigned CurArgIdx = 0; 4271 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { 4272 SDValue ArgVal; 4273 bool needsLoad = false; 4274 EVT ObjectVT = Ins[ArgNo].VT; 4275 unsigned ObjSize = ObjectVT.getSizeInBits()/8; 4276 unsigned ArgSize = ObjSize; 4277 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; 4278 if (Ins[ArgNo].isOrigArg()) { 4279 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx); 4280 CurArgIdx = Ins[ArgNo].getOrigArgIndex(); 4281 } 4282 unsigned CurArgOffset = ArgOffset; 4283 4284 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. 4285 if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || 4286 ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { 4287 if (isVarArg || isPPC64) { 4288 MinReservedArea = ((MinReservedArea+15)/16)*16; 4289 MinReservedArea += CalculateStackSlotSize(ObjectVT, 4290 Flags, 4291 PtrByteSize); 4292 } else nAltivecParamsAtEnd++; 4293 } else 4294 // Calculate min reserved area. 4295 MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, 4296 Flags, 4297 PtrByteSize); 4298 4299 // FIXME the codegen can be much improved in some cases. 4300 // We do not have to keep everything in memory. 4301 if (Flags.isByVal()) { 4302 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"); 4303 4304 // ObjSize is the true size, ArgSize rounded up to multiple of registers. 4305 ObjSize = Flags.getByValSize(); 4306 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 4307 // Objects of size 1 and 2 are right justified, everything else is 4308 // left justified. This means the memory address is adjusted forwards. 4309 if (ObjSize==1 || ObjSize==2) { 4310 CurArgOffset = CurArgOffset + (4 - ObjSize); 4311 } 4312 // The value of the object is its address. 4313 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true); 4314 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4315 InVals.push_back(FIN); 4316 if (ObjSize==1 || ObjSize==2) { 4317 if (GPR_idx != Num_GPR_Regs) { 4318 unsigned VReg; 4319 if (isPPC64) 4320 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4321 else 4322 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4323 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4324 EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; 4325 SDValue Store = 4326 DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, 4327 MachinePointerInfo(&*FuncArg), ObjType); 4328 MemOps.push_back(Store); 4329 ++GPR_idx; 4330 } 4331 4332 ArgOffset += PtrByteSize; 4333 4334 continue; 4335 } 4336 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { 4337 // Store whatever pieces of the object are in registers 4338 // to memory. ArgOffset will be the address of the beginning 4339 // of the object. 4340 if (GPR_idx != Num_GPR_Regs) { 4341 unsigned VReg; 4342 if (isPPC64) 4343 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4344 else 4345 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4346 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true); 4347 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4348 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4349 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, 4350 MachinePointerInfo(&*FuncArg, j)); 4351 MemOps.push_back(Store); 4352 ++GPR_idx; 4353 ArgOffset += PtrByteSize; 4354 } else { 4355 ArgOffset += ArgSize - (ArgOffset-CurArgOffset); 4356 break; 4357 } 4358 } 4359 continue; 4360 } 4361 4362 switch (ObjectVT.getSimpleVT().SimpleTy) { 4363 default: llvm_unreachable("Unhandled argument type!"); 4364 case MVT::i1: 4365 case MVT::i32: 4366 if (!isPPC64) { 4367 if (GPR_idx != Num_GPR_Regs) { 4368 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4369 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); 4370 4371 if (ObjectVT == MVT::i1) 4372 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal); 4373 4374 ++GPR_idx; 4375 } else { 4376 needsLoad = true; 4377 ArgSize = PtrByteSize; 4378 } 4379 // All int arguments reserve stack space in the Darwin ABI. 4380 ArgOffset += PtrByteSize; 4381 break; 4382 } 4383 LLVM_FALLTHROUGH; 4384 case MVT::i64: // PPC64 4385 if (GPR_idx != Num_GPR_Regs) { 4386 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4387 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); 4388 4389 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1) 4390 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote 4391 // value to MVT::i64 and then truncate to the correct register size. 4392 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); 4393 4394 ++GPR_idx; 4395 } else { 4396 needsLoad = true; 4397 ArgSize = PtrByteSize; 4398 } 4399 // All int arguments reserve stack space in the Darwin ABI. 4400 ArgOffset += 8; 4401 break; 4402 4403 case MVT::f32: 4404 case MVT::f64: 4405 // Every 4 bytes of argument space consumes one of the GPRs available for 4406 // argument passing. 4407 if (GPR_idx != Num_GPR_Regs) { 4408 ++GPR_idx; 4409 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64) 4410 ++GPR_idx; 4411 } 4412 if (FPR_idx != Num_FPR_Regs) { 4413 unsigned VReg; 4414 4415 if (ObjectVT == MVT::f32) 4416 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); 4417 else 4418 VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); 4419 4420 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4421 ++FPR_idx; 4422 } else { 4423 needsLoad = true; 4424 } 4425 4426 // All FP arguments reserve stack space in the Darwin ABI. 4427 ArgOffset += isPPC64 ? 8 : ObjSize; 4428 break; 4429 case MVT::v4f32: 4430 case MVT::v4i32: 4431 case MVT::v8i16: 4432 case MVT::v16i8: 4433 // Note that vector arguments in registers don't reserve stack space, 4434 // except in varargs functions. 4435 if (VR_idx != Num_VR_Regs) { 4436 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); 4437 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); 4438 if (isVarArg) { 4439 while ((ArgOffset % 16) != 0) { 4440 ArgOffset += PtrByteSize; 4441 if (GPR_idx != Num_GPR_Regs) 4442 GPR_idx++; 4443 } 4444 ArgOffset += 16; 4445 GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? 4446 } 4447 ++VR_idx; 4448 } else { 4449 if (!isVarArg && !isPPC64) { 4450 // Vectors go after all the nonvectors. 4451 CurArgOffset = VecArgOffset; 4452 VecArgOffset += 16; 4453 } else { 4454 // Vectors are aligned. 4455 ArgOffset = ((ArgOffset+15)/16)*16; 4456 CurArgOffset = ArgOffset; 4457 ArgOffset += 16; 4458 } 4459 needsLoad = true; 4460 } 4461 break; 4462 } 4463 4464 // We need to load the argument to a virtual register if we determined above 4465 // that we ran out of physical registers of the appropriate type. 4466 if (needsLoad) { 4467 int FI = MFI.CreateFixedObject(ObjSize, 4468 CurArgOffset + (ArgSize - ObjSize), 4469 isImmutable); 4470 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 4471 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo()); 4472 } 4473 4474 InVals.push_back(ArgVal); 4475 } 4476 4477 // Allow for Altivec parameters at the end, if needed. 4478 if (nAltivecParamsAtEnd) { 4479 MinReservedArea = ((MinReservedArea+15)/16)*16; 4480 MinReservedArea += 16*nAltivecParamsAtEnd; 4481 } 4482 4483 // Area that is at least reserved in the caller of this function. 4484 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize); 4485 4486 // Set the size that is at least reserved in caller of this function. Tail 4487 // call optimized functions' reserved stack space needs to be aligned so that 4488 // taking the difference between two stack areas will result in an aligned 4489 // stack. 4490 MinReservedArea = 4491 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 4492 FuncInfo->setMinReservedArea(MinReservedArea); 4493 4494 // If the function takes variable number of arguments, make a frame index for 4495 // the start of the first vararg value... for expansion of llvm.va_start. 4496 if (isVarArg) { 4497 int Depth = ArgOffset; 4498 4499 FuncInfo->setVarArgsFrameIndex( 4500 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8, 4501 Depth, true)); 4502 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); 4503 4504 // If this function is vararg, store any remaining integer argument regs 4505 // to their spots on the stack so that they may be loaded by dereferencing 4506 // the result of va_next. 4507 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { 4508 unsigned VReg; 4509 4510 if (isPPC64) 4511 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); 4512 else 4513 VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); 4514 4515 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); 4516 SDValue Store = 4517 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo()); 4518 MemOps.push_back(Store); 4519 // Increment the address by four for the next argument to store 4520 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT); 4521 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); 4522 } 4523 } 4524 4525 if (!MemOps.empty()) 4526 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); 4527 4528 return Chain; 4529 } 4530 4531 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be 4532 /// adjusted to accommodate the arguments for the tailcall. 4533 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, 4534 unsigned ParamSize) { 4535 4536 if (!isTailCall) return 0; 4537 4538 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>(); 4539 unsigned CallerMinReservedArea = FI->getMinReservedArea(); 4540 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize; 4541 // Remember only if the new adjustment is bigger. 4542 if (SPDiff < FI->getTailCallSPDelta()) 4543 FI->setTailCallSPDelta(SPDiff); 4544 4545 return SPDiff; 4546 } 4547 4548 static bool isFunctionGlobalAddress(SDValue Callee); 4549 4550 static bool 4551 callsShareTOCBase(const Function *Caller, SDValue Callee, 4552 const TargetMachine &TM) { 4553 // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols 4554 // don't have enough information to determine if the caller and calle share 4555 // the same TOC base, so we have to pessimistically assume they don't for 4556 // correctness. 4557 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 4558 if (!G) 4559 return false; 4560 4561 const GlobalValue *GV = G->getGlobal(); 4562 // The medium and large code models are expected to provide a sufficiently 4563 // large TOC to provide all data addressing needs of a module with a 4564 // single TOC. Since each module will be addressed with a single TOC then we 4565 // only need to check that caller and callee don't cross dso boundaries. 4566 if (CodeModel::Medium == TM.getCodeModel() || 4567 CodeModel::Large == TM.getCodeModel()) 4568 return TM.shouldAssumeDSOLocal(*Caller->getParent(), GV); 4569 4570 // Otherwise we need to ensure callee and caller are in the same section, 4571 // since the linker may allocate multiple TOCs, and we don't know which 4572 // sections will belong to the same TOC base. 4573 4574 if (!GV->isStrongDefinitionForLinker()) 4575 return false; 4576 4577 // Any explicitly-specified sections and section prefixes must also match. 4578 // Also, if we're using -ffunction-sections, then each function is always in 4579 // a different section (the same is true for COMDAT functions). 4580 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() || 4581 GV->getSection() != Caller->getSection()) 4582 return false; 4583 if (const auto *F = dyn_cast<Function>(GV)) { 4584 if (F->getSectionPrefix() != Caller->getSectionPrefix()) 4585 return false; 4586 } 4587 4588 // If the callee might be interposed, then we can't assume the ultimate call 4589 // target will be in the same section. Even in cases where we can assume that 4590 // interposition won't happen, in any case where the linker might insert a 4591 // stub to allow for interposition, we must generate code as though 4592 // interposition might occur. To understand why this matters, consider a 4593 // situation where: a -> b -> c where the arrows indicate calls. b and c are 4594 // in the same section, but a is in a different module (i.e. has a different 4595 // TOC base pointer). If the linker allows for interposition between b and c, 4596 // then it will generate a stub for the call edge between b and c which will 4597 // save the TOC pointer into the designated stack slot allocated by b. If we 4598 // return true here, and therefore allow a tail call between b and c, that 4599 // stack slot won't exist and the b -> c stub will end up saving b'c TOC base 4600 // pointer into the stack slot allocated by a (where the a -> b stub saved 4601 // a's TOC base pointer). If we're not considering a tail call, but rather, 4602 // whether a nop is needed after the call instruction in b, because the linker 4603 // will insert a stub, it might complain about a missing nop if we omit it 4604 // (although many don't complain in this case). 4605 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV)) 4606 return false; 4607 4608 return true; 4609 } 4610 4611 static bool 4612 needStackSlotPassParameters(const PPCSubtarget &Subtarget, 4613 const SmallVectorImpl<ISD::OutputArg> &Outs) { 4614 assert(Subtarget.is64BitELFABI()); 4615 4616 const unsigned PtrByteSize = 8; 4617 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 4618 4619 static const MCPhysReg GPR[] = { 4620 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 4621 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 4622 }; 4623 static const MCPhysReg VR[] = { 4624 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 4625 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 4626 }; 4627 4628 const unsigned NumGPRs = array_lengthof(GPR); 4629 const unsigned NumFPRs = 13; 4630 const unsigned NumVRs = array_lengthof(VR); 4631 const unsigned ParamAreaSize = NumGPRs * PtrByteSize; 4632 4633 unsigned NumBytes = LinkageSize; 4634 unsigned AvailableFPRs = NumFPRs; 4635 unsigned AvailableVRs = NumVRs; 4636 4637 for (const ISD::OutputArg& Param : Outs) { 4638 if (Param.Flags.isNest()) continue; 4639 4640 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, 4641 PtrByteSize, LinkageSize, ParamAreaSize, 4642 NumBytes, AvailableFPRs, AvailableVRs, 4643 Subtarget.hasQPX())) 4644 return true; 4645 } 4646 return false; 4647 } 4648 4649 static bool 4650 hasSameArgumentList(const Function *CallerFn, ImmutableCallSite CS) { 4651 if (CS.arg_size() != CallerFn->arg_size()) 4652 return false; 4653 4654 ImmutableCallSite::arg_iterator CalleeArgIter = CS.arg_begin(); 4655 ImmutableCallSite::arg_iterator CalleeArgEnd = CS.arg_end(); 4656 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin(); 4657 4658 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) { 4659 const Value* CalleeArg = *CalleeArgIter; 4660 const Value* CallerArg = &(*CallerArgIter); 4661 if (CalleeArg == CallerArg) 4662 continue; 4663 4664 // e.g. @caller([4 x i64] %a, [4 x i64] %b) { 4665 // tail call @callee([4 x i64] undef, [4 x i64] %b) 4666 // } 4667 // 1st argument of callee is undef and has the same type as caller. 4668 if (CalleeArg->getType() == CallerArg->getType() && 4669 isa<UndefValue>(CalleeArg)) 4670 continue; 4671 4672 return false; 4673 } 4674 4675 return true; 4676 } 4677 4678 // Returns true if TCO is possible between the callers and callees 4679 // calling conventions. 4680 static bool 4681 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, 4682 CallingConv::ID CalleeCC) { 4683 // Tail calls are possible with fastcc and ccc. 4684 auto isTailCallableCC = [] (CallingConv::ID CC){ 4685 return CC == CallingConv::C || CC == CallingConv::Fast; 4686 }; 4687 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC)) 4688 return false; 4689 4690 // We can safely tail call both fastcc and ccc callees from a c calling 4691 // convention caller. If the caller is fastcc, we may have less stack space 4692 // than a non-fastcc caller with the same signature so disable tail-calls in 4693 // that case. 4694 return CallerCC == CallingConv::C || CallerCC == CalleeCC; 4695 } 4696 4697 bool 4698 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4( 4699 SDValue Callee, 4700 CallingConv::ID CalleeCC, 4701 ImmutableCallSite CS, 4702 bool isVarArg, 4703 const SmallVectorImpl<ISD::OutputArg> &Outs, 4704 const SmallVectorImpl<ISD::InputArg> &Ins, 4705 SelectionDAG& DAG) const { 4706 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt; 4707 4708 if (DisableSCO && !TailCallOpt) return false; 4709 4710 // Variadic argument functions are not supported. 4711 if (isVarArg) return false; 4712 4713 auto &Caller = DAG.getMachineFunction().getFunction(); 4714 // Check that the calling conventions are compatible for tco. 4715 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC)) 4716 return false; 4717 4718 // Caller contains any byval parameter is not supported. 4719 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); })) 4720 return false; 4721 4722 // Callee contains any byval parameter is not supported, too. 4723 // Note: This is a quick work around, because in some cases, e.g. 4724 // caller's stack size > callee's stack size, we are still able to apply 4725 // sibling call optimization. For example, gcc is able to do SCO for caller1 4726 // in the following example, but not for caller2. 4727 // struct test { 4728 // long int a; 4729 // char ary[56]; 4730 // } gTest; 4731 // __attribute__((noinline)) int callee(struct test v, struct test *b) { 4732 // b->a = v.a; 4733 // return 0; 4734 // } 4735 // void caller1(struct test a, struct test c, struct test *b) { 4736 // callee(gTest, b); } 4737 // void caller2(struct test *b) { callee(gTest, b); } 4738 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); })) 4739 return false; 4740 4741 // If callee and caller use different calling conventions, we cannot pass 4742 // parameters on stack since offsets for the parameter area may be different. 4743 if (Caller.getCallingConv() != CalleeCC && 4744 needStackSlotPassParameters(Subtarget, Outs)) 4745 return false; 4746 4747 // No TCO/SCO on indirect call because Caller have to restore its TOC 4748 if (!isFunctionGlobalAddress(Callee) && 4749 !isa<ExternalSymbolSDNode>(Callee)) 4750 return false; 4751 4752 // If the caller and callee potentially have different TOC bases then we 4753 // cannot tail call since we need to restore the TOC pointer after the call. 4754 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977 4755 if (!callsShareTOCBase(&Caller, Callee, getTargetMachine())) 4756 return false; 4757 4758 // TCO allows altering callee ABI, so we don't have to check further. 4759 if (CalleeCC == CallingConv::Fast && TailCallOpt) 4760 return true; 4761 4762 if (DisableSCO) return false; 4763 4764 // If callee use the same argument list that caller is using, then we can 4765 // apply SCO on this case. If it is not, then we need to check if callee needs 4766 // stack for passing arguments. 4767 if (!hasSameArgumentList(&Caller, CS) && 4768 needStackSlotPassParameters(Subtarget, Outs)) { 4769 return false; 4770 } 4771 4772 return true; 4773 } 4774 4775 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 4776 /// for tail call optimization. Targets which want to do tail call 4777 /// optimization should implement this function. 4778 bool 4779 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, 4780 CallingConv::ID CalleeCC, 4781 bool isVarArg, 4782 const SmallVectorImpl<ISD::InputArg> &Ins, 4783 SelectionDAG& DAG) const { 4784 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 4785 return false; 4786 4787 // Variable argument functions are not supported. 4788 if (isVarArg) 4789 return false; 4790 4791 MachineFunction &MF = DAG.getMachineFunction(); 4792 CallingConv::ID CallerCC = MF.getFunction().getCallingConv(); 4793 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) { 4794 // Functions containing by val parameters are not supported. 4795 for (unsigned i = 0; i != Ins.size(); i++) { 4796 ISD::ArgFlagsTy Flags = Ins[i].Flags; 4797 if (Flags.isByVal()) return false; 4798 } 4799 4800 // Non-PIC/GOT tail calls are supported. 4801 if (getTargetMachine().getRelocationModel() != Reloc::PIC_) 4802 return true; 4803 4804 // At the moment we can only do local tail calls (in same module, hidden 4805 // or protected) if we are generating PIC. 4806 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 4807 return G->getGlobal()->hasHiddenVisibility() 4808 || G->getGlobal()->hasProtectedVisibility(); 4809 } 4810 4811 return false; 4812 } 4813 4814 /// isCallCompatibleAddress - Return the immediate to use if the specified 4815 /// 32-bit value is representable in the immediate field of a BxA instruction. 4816 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { 4817 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 4818 if (!C) return nullptr; 4819 4820 int Addr = C->getZExtValue(); 4821 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. 4822 SignExtend32<26>(Addr) != Addr) 4823 return nullptr; // Top 6 bits have to be sext of immediate. 4824 4825 return DAG 4826 .getConstant( 4827 (int)C->getZExtValue() >> 2, SDLoc(Op), 4828 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())) 4829 .getNode(); 4830 } 4831 4832 namespace { 4833 4834 struct TailCallArgumentInfo { 4835 SDValue Arg; 4836 SDValue FrameIdxOp; 4837 int FrameIdx = 0; 4838 4839 TailCallArgumentInfo() = default; 4840 }; 4841 4842 } // end anonymous namespace 4843 4844 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot. 4845 static void StoreTailCallArgumentsToStackSlot( 4846 SelectionDAG &DAG, SDValue Chain, 4847 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs, 4848 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) { 4849 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) { 4850 SDValue Arg = TailCallArgs[i].Arg; 4851 SDValue FIN = TailCallArgs[i].FrameIdxOp; 4852 int FI = TailCallArgs[i].FrameIdx; 4853 // Store relative to framepointer. 4854 MemOpChains.push_back(DAG.getStore( 4855 Chain, dl, Arg, FIN, 4856 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); 4857 } 4858 } 4859 4860 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to 4861 /// the appropriate stack slot for the tail call optimized function call. 4862 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, 4863 SDValue OldRetAddr, SDValue OldFP, 4864 int SPDiff, const SDLoc &dl) { 4865 if (SPDiff) { 4866 // Calculate the new stack slot for the return address. 4867 MachineFunction &MF = DAG.getMachineFunction(); 4868 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>(); 4869 const PPCFrameLowering *FL = Subtarget.getFrameLowering(); 4870 bool isPPC64 = Subtarget.isPPC64(); 4871 int SlotSize = isPPC64 ? 8 : 4; 4872 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset(); 4873 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize, 4874 NewRetAddrLoc, true); 4875 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4876 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT); 4877 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx, 4878 MachinePointerInfo::getFixedStack(MF, NewRetAddr)); 4879 4880 // When using the 32/64-bit SVR4 ABI there is no need to move the FP stack 4881 // slot as the FP is never overwritten. 4882 if (Subtarget.isDarwinABI()) { 4883 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset(); 4884 int NewFPIdx = MF.getFrameInfo().CreateFixedObject(SlotSize, NewFPLoc, 4885 true); 4886 SDValue NewFramePtrIdx = DAG.getFrameIndex(NewFPIdx, VT); 4887 Chain = DAG.getStore(Chain, dl, OldFP, NewFramePtrIdx, 4888 MachinePointerInfo::getFixedStack( 4889 DAG.getMachineFunction(), NewFPIdx)); 4890 } 4891 } 4892 return Chain; 4893 } 4894 4895 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate 4896 /// the position of the argument. 4897 static void 4898 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, 4899 SDValue Arg, int SPDiff, unsigned ArgOffset, 4900 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) { 4901 int Offset = ArgOffset + SPDiff; 4902 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; 4903 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 4904 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 4905 SDValue FIN = DAG.getFrameIndex(FI, VT); 4906 TailCallArgumentInfo Info; 4907 Info.Arg = Arg; 4908 Info.FrameIdxOp = FIN; 4909 Info.FrameIdx = FI; 4910 TailCallArguments.push_back(Info); 4911 } 4912 4913 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address 4914 /// stack slot. Returns the chain as result and the loaded frame pointers in 4915 /// LROpOut/FPOpout. Used when tail calling. 4916 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr( 4917 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut, 4918 SDValue &FPOpOut, const SDLoc &dl) const { 4919 if (SPDiff) { 4920 // Load the LR and FP stack slot for later adjusting. 4921 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 4922 LROpOut = getReturnAddrFrameIndex(DAG); 4923 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo()); 4924 Chain = SDValue(LROpOut.getNode(), 1); 4925 4926 // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack 4927 // slot as the FP is never overwritten. 4928 if (Subtarget.isDarwinABI()) { 4929 FPOpOut = getFramePointerFrameIndex(DAG); 4930 FPOpOut = DAG.getLoad(VT, dl, Chain, FPOpOut, MachinePointerInfo()); 4931 Chain = SDValue(FPOpOut.getNode(), 1); 4932 } 4933 } 4934 return Chain; 4935 } 4936 4937 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified 4938 /// by "Src" to address "Dst" of size "Size". Alignment information is 4939 /// specified by the specific parameter attribute. The copy will be passed as 4940 /// a byval function parameter. 4941 /// Sometimes what we are copying is the end of a larger object, the part that 4942 /// does not fit in registers. 4943 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, 4944 SDValue Chain, ISD::ArgFlagsTy Flags, 4945 SelectionDAG &DAG, const SDLoc &dl) { 4946 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); 4947 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), 4948 false, false, false, MachinePointerInfo(), 4949 MachinePointerInfo()); 4950 } 4951 4952 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of 4953 /// tail calls. 4954 static void LowerMemOpCallTo( 4955 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, 4956 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, 4957 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains, 4958 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) { 4959 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 4960 if (!isTailCall) { 4961 if (isVector) { 4962 SDValue StackPtr; 4963 if (isPPC64) 4964 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 4965 else 4966 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 4967 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 4968 DAG.getConstant(ArgOffset, dl, PtrVT)); 4969 } 4970 MemOpChains.push_back( 4971 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 4972 // Calculate and remember argument location. 4973 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset, 4974 TailCallArguments); 4975 } 4976 4977 static void 4978 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, 4979 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, 4980 SDValue FPOp, 4981 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) { 4982 // Emit a sequence of copyto/copyfrom virtual registers for arguments that 4983 // might overwrite each other in case of tail call optimization. 4984 SmallVector<SDValue, 8> MemOpChains2; 4985 // Do not flag preceding copytoreg stuff together with the following stuff. 4986 InFlag = SDValue(); 4987 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, 4988 MemOpChains2, dl); 4989 if (!MemOpChains2.empty()) 4990 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); 4991 4992 // Store the return address to the appropriate stack slot. 4993 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl); 4994 4995 // Emit callseq_end just before tailcall node. 4996 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 4997 DAG.getIntPtrConstant(0, dl, true), InFlag, dl); 4998 InFlag = Chain.getValue(1); 4999 } 5000 5001 // Is this global address that of a function that can be called by name? (as 5002 // opposed to something that must hold a descriptor for an indirect call). 5003 static bool isFunctionGlobalAddress(SDValue Callee) { 5004 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 5005 if (Callee.getOpcode() == ISD::GlobalTLSAddress || 5006 Callee.getOpcode() == ISD::TargetGlobalTLSAddress) 5007 return false; 5008 5009 return G->getGlobal()->getValueType()->isFunctionTy(); 5010 } 5011 5012 return false; 5013 } 5014 5015 SDValue PPCTargetLowering::LowerCallResult( 5016 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 5017 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5018 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 5019 SmallVector<CCValAssign, 16> RVLocs; 5020 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 5021 *DAG.getContext()); 5022 5023 CCRetInfo.AnalyzeCallResult( 5024 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 5025 ? RetCC_PPC_Cold 5026 : RetCC_PPC); 5027 5028 // Copy all of the result registers out of their specified physreg. 5029 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 5030 CCValAssign &VA = RVLocs[i]; 5031 assert(VA.isRegLoc() && "Can only return in registers!"); 5032 5033 SDValue Val; 5034 5035 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { 5036 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 5037 InFlag); 5038 Chain = Lo.getValue(1); 5039 InFlag = Lo.getValue(2); 5040 VA = RVLocs[++i]; // skip ahead to next loc 5041 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, 5042 InFlag); 5043 Chain = Hi.getValue(1); 5044 InFlag = Hi.getValue(2); 5045 if (!Subtarget.isLittleEndian()) 5046 std::swap (Lo, Hi); 5047 Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi); 5048 } else { 5049 Val = DAG.getCopyFromReg(Chain, dl, 5050 VA.getLocReg(), VA.getLocVT(), InFlag); 5051 Chain = Val.getValue(1); 5052 InFlag = Val.getValue(2); 5053 } 5054 5055 switch (VA.getLocInfo()) { 5056 default: llvm_unreachable("Unknown loc info!"); 5057 case CCValAssign::Full: break; 5058 case CCValAssign::AExt: 5059 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5060 break; 5061 case CCValAssign::ZExt: 5062 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, 5063 DAG.getValueType(VA.getValVT())); 5064 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5065 break; 5066 case CCValAssign::SExt: 5067 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, 5068 DAG.getValueType(VA.getValVT())); 5069 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); 5070 break; 5071 } 5072 5073 InVals.push_back(Val); 5074 } 5075 5076 return Chain; 5077 } 5078 5079 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG, 5080 const PPCSubtarget &Subtarget, bool isPatchPoint) { 5081 // PatchPoint calls are not indirect. 5082 if (isPatchPoint) 5083 return false; 5084 5085 if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee)) 5086 return false; 5087 5088 // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not 5089 // becuase the immediate function pointer points to a descriptor instead of 5090 // a function entry point. The ELFv2 ABI cannot use a BLA because the function 5091 // pointer immediate points to the global entry point, while the BLA would 5092 // need to jump to the local entry point (see rL211174). 5093 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() && 5094 isBLACompatibleAddress(Callee, DAG)) 5095 return false; 5096 5097 return true; 5098 } 5099 5100 static unsigned getCallOpcode(bool isIndirectCall, bool isPatchPoint, 5101 bool isTailCall, const Function &Caller, 5102 const SDValue &Callee, 5103 const PPCSubtarget &Subtarget, 5104 const TargetMachine &TM) { 5105 if (isTailCall) 5106 return PPCISD::TC_RETURN; 5107 5108 // This is a call through a function pointer. 5109 if (isIndirectCall) { 5110 // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross 5111 // indirect calls. The save of the caller's TOC pointer to the stack will be 5112 // inserted into the DAG as part of call lowering. The restore of the TOC 5113 // pointer is modeled by using a pseudo instruction for the call opcode that 5114 // represents the 2 instruction sequence of an indirect branch and link, 5115 // immediately followed by a load of the TOC pointer from the the stack save 5116 // slot into gpr2. 5117 if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI()) 5118 return PPCISD::BCTRL_LOAD_TOC; 5119 5120 // An indirect call that does not need a TOC restore. 5121 return PPCISD::BCTRL; 5122 } 5123 5124 // The ABIs that maintain a TOC pointer accross calls need to have a nop 5125 // immediately following the call instruction if the caller and callee may 5126 // have different TOC bases. At link time if the linker determines the calls 5127 // may not share a TOC base, the call is redirected to a trampoline inserted 5128 // by the linker. The trampoline will (among other things) save the callers 5129 // TOC pointer at an ABI designated offset in the linkage area and the linker 5130 // will rewrite the nop to be a load of the TOC pointer from the linkage area 5131 // into gpr2. 5132 if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI()) 5133 return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL 5134 : PPCISD::CALL_NOP; 5135 5136 return PPCISD::CALL; 5137 } 5138 5139 static bool isValidAIXExternalSymSDNode(StringRef SymName) { 5140 return StringSwitch<bool>(SymName) 5141 .Cases("__divdi3", "__fixunsdfdi", "__floatundidf", "__floatundisf", 5142 "__moddi3", "__udivdi3", "__umoddi3", true) 5143 .Cases("ceil", "floor", "memcpy", "memmove", "memset", "round", true) 5144 .Default(false); 5145 } 5146 5147 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG, 5148 const SDLoc &dl, const PPCSubtarget &Subtarget) { 5149 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI()) 5150 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG)) 5151 return SDValue(Dest, 0); 5152 5153 // Returns true if the callee is local, and false otherwise. 5154 auto isLocalCallee = [&]() { 5155 const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); 5156 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 5157 const GlobalValue *GV = G ? G->getGlobal() : nullptr; 5158 5159 return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) && 5160 !dyn_cast_or_null<GlobalIFunc>(GV); 5161 }; 5162 5163 // The PLT is only used in 32-bit ELF PIC mode. Attempting to use the PLT in 5164 // a static relocation model causes some versions of GNU LD (2.17.50, at 5165 // least) to force BSS-PLT, instead of secure-PLT, even if all objects are 5166 // built with secure-PLT. 5167 bool UsePlt = 5168 Subtarget.is32BitELFABI() && !isLocalCallee() && 5169 Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_; 5170 5171 // On AIX, direct function calls reference the symbol for the function's 5172 // entry point, which is named by prepending a "." before the function's 5173 // C-linkage name. 5174 const auto getAIXFuncEntryPointSymbolSDNode = 5175 [&](StringRef FuncName, bool IsDeclaration, 5176 const XCOFF::StorageClass &SC) { 5177 auto &Context = DAG.getMachineFunction().getMMI().getContext(); 5178 5179 MCSymbolXCOFF *S = cast<MCSymbolXCOFF>( 5180 Context.getOrCreateSymbol(Twine(".") + Twine(FuncName))); 5181 5182 if (IsDeclaration && !S->hasContainingCsect()) { 5183 // On AIX, an undefined symbol needs to be associated with a 5184 // MCSectionXCOFF to get the correct storage mapping class. 5185 // In this case, XCOFF::XMC_PR. 5186 MCSectionXCOFF *Sec = Context.getXCOFFSection( 5187 S->getName(), XCOFF::XMC_PR, XCOFF::XTY_ER, SC, 5188 SectionKind::getMetadata()); 5189 S->setContainingCsect(Sec); 5190 } 5191 5192 MVT PtrVT = 5193 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); 5194 return DAG.getMCSymbol(S, PtrVT); 5195 }; 5196 5197 if (isFunctionGlobalAddress(Callee)) { 5198 const GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee); 5199 const GlobalValue *GV = G->getGlobal(); 5200 5201 if (!Subtarget.isAIXABI()) 5202 return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0, 5203 UsePlt ? PPCII::MO_PLT : 0); 5204 5205 assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX."); 5206 const GlobalObject *GO = cast<GlobalObject>(GV); 5207 const XCOFF::StorageClass SC = 5208 TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(GO); 5209 return getAIXFuncEntryPointSymbolSDNode(GO->getName(), GO->isDeclaration(), 5210 SC); 5211 } 5212 5213 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 5214 const char *SymName = S->getSymbol(); 5215 if (!Subtarget.isAIXABI()) 5216 return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(), 5217 UsePlt ? PPCII::MO_PLT : 0); 5218 5219 // If there exists a user-declared function whose name is the same as the 5220 // ExternalSymbol's, then we pick up the user-declared version. 5221 const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); 5222 if (const Function *F = 5223 dyn_cast_or_null<Function>(Mod->getNamedValue(SymName))) { 5224 const XCOFF::StorageClass SC = 5225 TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(F); 5226 return getAIXFuncEntryPointSymbolSDNode(F->getName(), F->isDeclaration(), 5227 SC); 5228 } 5229 5230 // TODO: Remove this when the support for ExternalSymbolSDNode is complete. 5231 if (isValidAIXExternalSymSDNode(SymName)) { 5232 return getAIXFuncEntryPointSymbolSDNode(SymName, true, XCOFF::C_EXT); 5233 } 5234 5235 report_fatal_error("Unexpected ExternalSymbolSDNode: " + Twine(SymName)); 5236 } 5237 5238 // No transformation needed. 5239 assert(Callee.getNode() && "What no callee?"); 5240 return Callee; 5241 } 5242 5243 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) { 5244 assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START && 5245 "Expected a CALLSEQ_STARTSDNode."); 5246 5247 // The last operand is the chain, except when the node has glue. If the node 5248 // has glue, then the last operand is the glue, and the chain is the second 5249 // last operand. 5250 SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1); 5251 if (LastValue.getValueType() != MVT::Glue) 5252 return LastValue; 5253 5254 return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2); 5255 } 5256 5257 // Creates the node that moves a functions address into the count register 5258 // to prepare for an indirect call instruction. 5259 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee, 5260 SDValue &Glue, SDValue &Chain, 5261 const SDLoc &dl) { 5262 SDValue MTCTROps[] = {Chain, Callee, Glue}; 5263 EVT ReturnTypes[] = {MVT::Other, MVT::Glue}; 5264 Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2), 5265 makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2)); 5266 // The glue is the second value produced. 5267 Glue = Chain.getValue(1); 5268 } 5269 5270 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee, 5271 SDValue &Glue, SDValue &Chain, 5272 SDValue CallSeqStart, 5273 ImmutableCallSite CS, const SDLoc &dl, 5274 bool hasNest, 5275 const PPCSubtarget &Subtarget) { 5276 // Function pointers in the 64-bit SVR4 ABI do not point to the function 5277 // entry point, but to the function descriptor (the function entry point 5278 // address is part of the function descriptor though). 5279 // The function descriptor is a three doubleword structure with the 5280 // following fields: function entry point, TOC base address and 5281 // environment pointer. 5282 // Thus for a call through a function pointer, the following actions need 5283 // to be performed: 5284 // 1. Save the TOC of the caller in the TOC save area of its stack 5285 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). 5286 // 2. Load the address of the function entry point from the function 5287 // descriptor. 5288 // 3. Load the TOC of the callee from the function descriptor into r2. 5289 // 4. Load the environment pointer from the function descriptor into 5290 // r11. 5291 // 5. Branch to the function entry point address. 5292 // 6. On return of the callee, the TOC of the caller needs to be 5293 // restored (this is done in FinishCall()). 5294 // 5295 // The loads are scheduled at the beginning of the call sequence, and the 5296 // register copies are flagged together to ensure that no other 5297 // operations can be scheduled in between. E.g. without flagging the 5298 // copies together, a TOC access in the caller could be scheduled between 5299 // the assignment of the callee TOC and the branch to the callee, which leads 5300 // to incorrect code. 5301 5302 // Start by loading the function address from the descriptor. 5303 SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart); 5304 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors() 5305 ? (MachineMemOperand::MODereferenceable | 5306 MachineMemOperand::MOInvariant) 5307 : MachineMemOperand::MONone; 5308 5309 MachinePointerInfo MPI(CS ? CS.getCalledValue() : nullptr); 5310 5311 // Registers used in building the DAG. 5312 const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister(); 5313 const MCRegister TOCReg = Subtarget.getTOCPointerRegister(); 5314 5315 // Offsets of descriptor members. 5316 const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset(); 5317 const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset(); 5318 5319 const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 5320 const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4; 5321 5322 // One load for the functions entry point address. 5323 SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI, 5324 Alignment, MMOFlags); 5325 5326 // One for loading the TOC anchor for the module that contains the called 5327 // function. 5328 SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl); 5329 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff); 5330 SDValue TOCPtr = 5331 DAG.getLoad(RegVT, dl, LDChain, AddTOC, 5332 MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags); 5333 5334 // One for loading the environment pointer. 5335 SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl); 5336 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff); 5337 SDValue LoadEnvPtr = 5338 DAG.getLoad(RegVT, dl, LDChain, AddPtr, 5339 MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags); 5340 5341 5342 // Then copy the newly loaded TOC anchor to the TOC pointer. 5343 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue); 5344 Chain = TOCVal.getValue(0); 5345 Glue = TOCVal.getValue(1); 5346 5347 // If the function call has an explicit 'nest' parameter, it takes the 5348 // place of the environment pointer. 5349 assert((!hasNest || !Subtarget.isAIXABI()) && 5350 "Nest parameter is not supported on AIX."); 5351 if (!hasNest) { 5352 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue); 5353 Chain = EnvVal.getValue(0); 5354 Glue = EnvVal.getValue(1); 5355 } 5356 5357 // The rest of the indirect call sequence is the same as the non-descriptor 5358 // DAG. 5359 prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl); 5360 } 5361 5362 static void 5363 buildCallOperands(SmallVectorImpl<SDValue> &Ops, CallingConv::ID CallConv, 5364 const SDLoc &dl, bool isTailCall, bool isVarArg, 5365 bool isPatchPoint, bool hasNest, SelectionDAG &DAG, 5366 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, 5367 SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff, 5368 const PPCSubtarget &Subtarget, bool isIndirect) { 5369 const bool IsPPC64 = Subtarget.isPPC64(); 5370 // MVT for a general purpose register. 5371 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 5372 5373 // First operand is always the chain. 5374 Ops.push_back(Chain); 5375 5376 // If it's a direct call pass the callee as the second operand. 5377 if (!isIndirect) 5378 Ops.push_back(Callee); 5379 else { 5380 assert(!isPatchPoint && "Patch point call are not indirect."); 5381 5382 // For the TOC based ABIs, we have saved the TOC pointer to the linkage area 5383 // on the stack (this would have been done in `LowerCall_64SVR4` or 5384 // `LowerCall_AIX`). The call instruction is a pseudo instruction that 5385 // represents both the indirect branch and a load that restores the TOC 5386 // pointer from the linkage area. The operand for the TOC restore is an add 5387 // of the TOC save offset to the stack pointer. This must be the second 5388 // operand: after the chain input but before any other variadic arguments. 5389 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) { 5390 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister(); 5391 5392 SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT); 5393 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 5394 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 5395 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff); 5396 Ops.push_back(AddTOC); 5397 } 5398 5399 // Add the register used for the environment pointer. 5400 if (Subtarget.usesFunctionDescriptors() && !hasNest) 5401 Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(), 5402 RegVT)); 5403 5404 5405 // Add CTR register as callee so a bctr can be emitted later. 5406 if (isTailCall) 5407 Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT)); 5408 } 5409 5410 // If this is a tail call add stack pointer delta. 5411 if (isTailCall) 5412 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32)); 5413 5414 // Add argument registers to the end of the list so that they are known live 5415 // into the call. 5416 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) 5417 Ops.push_back(DAG.getRegister(RegsToPass[i].first, 5418 RegsToPass[i].second.getValueType())); 5419 5420 // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is 5421 // no way to mark dependencies as implicit here. 5422 // We will add the R2/X2 dependency in EmitInstrWithCustomInserter. 5423 if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) && !isPatchPoint) 5424 Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT)); 5425 5426 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls 5427 if (isVarArg && Subtarget.is32BitELFABI()) 5428 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); 5429 5430 // Add a register mask operand representing the call-preserved registers. 5431 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5432 const uint32_t *Mask = 5433 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 5434 assert(Mask && "Missing call preserved mask for calling convention"); 5435 Ops.push_back(DAG.getRegisterMask(Mask)); 5436 5437 // If the glue is valid, it is the last operand. 5438 if (Glue.getNode()) 5439 Ops.push_back(Glue); 5440 } 5441 5442 SDValue PPCTargetLowering::FinishCall( 5443 CallingConv::ID CallConv, const SDLoc &dl, bool isTailCall, bool isVarArg, 5444 bool isPatchPoint, bool hasNest, SelectionDAG &DAG, 5445 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue, 5446 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff, 5447 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins, 5448 SmallVectorImpl<SDValue> &InVals, ImmutableCallSite CS) const { 5449 5450 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) 5451 setUsesTOCBasePtr(DAG); 5452 5453 const bool isIndirect = isIndirectCall(Callee, DAG, Subtarget, isPatchPoint); 5454 unsigned CallOpc = getCallOpcode(isIndirect, isPatchPoint, isTailCall, 5455 DAG.getMachineFunction().getFunction(), 5456 Callee, Subtarget, DAG.getTarget()); 5457 5458 if (!isIndirect) 5459 Callee = transformCallee(Callee, DAG, dl, Subtarget); 5460 else if (Subtarget.usesFunctionDescriptors()) 5461 prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CS, 5462 dl, hasNest, Subtarget); 5463 else 5464 prepareIndirectCall(DAG, Callee, Glue, Chain, dl); 5465 5466 // Build the operand list for the call instruction. 5467 SmallVector<SDValue, 8> Ops; 5468 buildCallOperands(Ops, CallConv, dl, isTailCall, isVarArg, isPatchPoint, 5469 hasNest, DAG, RegsToPass, Glue, Chain, Callee, SPDiff, 5470 Subtarget, isIndirect); 5471 5472 // Emit tail call. 5473 if (isTailCall) { 5474 assert(((Callee.getOpcode() == ISD::Register && 5475 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || 5476 Callee.getOpcode() == ISD::TargetExternalSymbol || 5477 Callee.getOpcode() == ISD::TargetGlobalAddress || 5478 isa<ConstantSDNode>(Callee)) && 5479 "Expecting a global address, external symbol, absolute value or " 5480 "register"); 5481 assert(CallOpc == PPCISD::TC_RETURN && 5482 "Unexpected call opcode for a tail call."); 5483 DAG.getMachineFunction().getFrameInfo().setHasTailCall(); 5484 return DAG.getNode(CallOpc, dl, MVT::Other, Ops); 5485 } 5486 5487 std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}}; 5488 Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops); 5489 Glue = Chain.getValue(1); 5490 5491 // When performing tail call optimization the callee pops its arguments off 5492 // the stack. Account for this here so these bytes can be pushed back on in 5493 // PPCFrameLowering::eliminateCallFramePseudoInstr. 5494 int BytesCalleePops = (CallConv == CallingConv::Fast && 5495 getTargetMachine().Options.GuaranteedTailCallOpt) 5496 ? NumBytes 5497 : 0; 5498 5499 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true), 5500 DAG.getIntPtrConstant(BytesCalleePops, dl, true), 5501 Glue, dl); 5502 Glue = Chain.getValue(1); 5503 5504 return LowerCallResult(Chain, Glue, CallConv, isVarArg, Ins, dl, DAG, InVals); 5505 } 5506 5507 SDValue 5508 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 5509 SmallVectorImpl<SDValue> &InVals) const { 5510 SelectionDAG &DAG = CLI.DAG; 5511 SDLoc &dl = CLI.DL; 5512 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 5513 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 5514 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 5515 SDValue Chain = CLI.Chain; 5516 SDValue Callee = CLI.Callee; 5517 bool &isTailCall = CLI.IsTailCall; 5518 CallingConv::ID CallConv = CLI.CallConv; 5519 bool isVarArg = CLI.IsVarArg; 5520 bool isPatchPoint = CLI.IsPatchPoint; 5521 ImmutableCallSite CS = CLI.CS; 5522 5523 if (isTailCall) { 5524 if (Subtarget.useLongCalls() && !(CS && CS.isMustTailCall())) 5525 isTailCall = false; 5526 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5527 isTailCall = 5528 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS, 5529 isVarArg, Outs, Ins, DAG); 5530 else 5531 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, 5532 Ins, DAG); 5533 if (isTailCall) { 5534 ++NumTailCalls; 5535 if (!getTargetMachine().Options.GuaranteedTailCallOpt) 5536 ++NumSiblingCalls; 5537 5538 assert(isa<GlobalAddressSDNode>(Callee) && 5539 "Callee should be an llvm::Function object."); 5540 LLVM_DEBUG( 5541 const GlobalValue *GV = 5542 cast<GlobalAddressSDNode>(Callee)->getGlobal(); 5543 const unsigned Width = 5544 80 - strlen("TCO caller: ") - strlen(", callee linkage: 0, 0"); 5545 dbgs() << "TCO caller: " 5546 << left_justify(DAG.getMachineFunction().getName(), Width) 5547 << ", callee linkage: " << GV->getVisibility() << ", " 5548 << GV->getLinkage() << "\n"); 5549 } 5550 } 5551 5552 if (!isTailCall && CS && CS.isMustTailCall()) 5553 report_fatal_error("failed to perform tail call elimination on a call " 5554 "site marked musttail"); 5555 5556 // When long calls (i.e. indirect calls) are always used, calls are always 5557 // made via function pointer. If we have a function name, first translate it 5558 // into a pointer. 5559 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) && 5560 !isTailCall) 5561 Callee = LowerGlobalAddress(Callee, DAG); 5562 5563 if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) 5564 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, 5565 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5566 dl, DAG, InVals, CS); 5567 5568 if (Subtarget.isSVR4ABI()) 5569 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, 5570 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5571 dl, DAG, InVals, CS); 5572 5573 if (Subtarget.isAIXABI()) 5574 return LowerCall_AIX(Chain, Callee, CallConv, isVarArg, 5575 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5576 dl, DAG, InVals, CS); 5577 5578 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, 5579 isTailCall, isPatchPoint, Outs, OutVals, Ins, 5580 dl, DAG, InVals, CS); 5581 } 5582 5583 SDValue PPCTargetLowering::LowerCall_32SVR4( 5584 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5585 bool isTailCall, bool isPatchPoint, 5586 const SmallVectorImpl<ISD::OutputArg> &Outs, 5587 const SmallVectorImpl<SDValue> &OutVals, 5588 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5589 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5590 ImmutableCallSite CS) const { 5591 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description 5592 // of the 32-bit SVR4 ABI stack frame layout. 5593 5594 assert((CallConv == CallingConv::C || 5595 CallConv == CallingConv::Cold || 5596 CallConv == CallingConv::Fast) && "Unknown calling convention!"); 5597 5598 unsigned PtrByteSize = 4; 5599 5600 MachineFunction &MF = DAG.getMachineFunction(); 5601 5602 // Mark this function as potentially containing a function that contains a 5603 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5604 // and restoring the callers stack pointer in this functions epilog. This is 5605 // done because by tail calling the called function might overwrite the value 5606 // in this function's (MF) stack pointer stack slot 0(SP). 5607 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5608 CallConv == CallingConv::Fast) 5609 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5610 5611 // Count how many bytes are to be pushed on the stack, including the linkage 5612 // area, parameter list area and the part of the local variable space which 5613 // contains copies of aggregates which are passed by value. 5614 5615 // Assign locations to all of the outgoing arguments. 5616 SmallVector<CCValAssign, 16> ArgLocs; 5617 PPCCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 5618 5619 // Reserve space for the linkage area on the stack. 5620 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(), 5621 PtrByteSize); 5622 if (useSoftFloat()) 5623 CCInfo.PreAnalyzeCallOperands(Outs); 5624 5625 if (isVarArg) { 5626 // Handle fixed and variable vector arguments differently. 5627 // Fixed vector arguments go into registers as long as registers are 5628 // available. Variable vector arguments always go into memory. 5629 unsigned NumArgs = Outs.size(); 5630 5631 for (unsigned i = 0; i != NumArgs; ++i) { 5632 MVT ArgVT = Outs[i].VT; 5633 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 5634 bool Result; 5635 5636 if (Outs[i].IsFixed) { 5637 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, 5638 CCInfo); 5639 } else { 5640 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, 5641 ArgFlags, CCInfo); 5642 } 5643 5644 if (Result) { 5645 #ifndef NDEBUG 5646 errs() << "Call operand #" << i << " has unhandled type " 5647 << EVT(ArgVT).getEVTString() << "\n"; 5648 #endif 5649 llvm_unreachable(nullptr); 5650 } 5651 } 5652 } else { 5653 // All arguments are treated the same. 5654 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4); 5655 } 5656 CCInfo.clearWasPPCF128(); 5657 5658 // Assign locations to all of the outgoing aggregate by value arguments. 5659 SmallVector<CCValAssign, 16> ByValArgLocs; 5660 CCState CCByValInfo(CallConv, isVarArg, MF, ByValArgLocs, *DAG.getContext()); 5661 5662 // Reserve stack space for the allocations in CCInfo. 5663 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); 5664 5665 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal); 5666 5667 // Size of the linkage area, parameter list area and the part of the local 5668 // space variable where copies of aggregates which are passed by value are 5669 // stored. 5670 unsigned NumBytes = CCByValInfo.getNextStackOffset(); 5671 5672 // Calculate by how many bytes the stack has to be adjusted in case of tail 5673 // call optimization. 5674 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 5675 5676 // Adjust the stack pointer for the new arguments... 5677 // These operations are automatically eliminated by the prolog/epilog pass 5678 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 5679 SDValue CallSeqStart = Chain; 5680 5681 // Load the return address and frame pointer so it can be moved somewhere else 5682 // later. 5683 SDValue LROp, FPOp; 5684 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 5685 5686 // Set up a copy of the stack pointer for use loading and storing any 5687 // arguments that may not fit in the registers available for argument 5688 // passing. 5689 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 5690 5691 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 5692 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 5693 SmallVector<SDValue, 8> MemOpChains; 5694 5695 bool seenFloatArg = false; 5696 // Walk the register/memloc assignments, inserting copies/loads. 5697 // i - Tracks the index into the list of registers allocated for the call 5698 // RealArgIdx - Tracks the index into the list of actual function arguments 5699 // j - Tracks the index into the list of byval arguments 5700 for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size(); 5701 i != e; 5702 ++i, ++RealArgIdx) { 5703 CCValAssign &VA = ArgLocs[i]; 5704 SDValue Arg = OutVals[RealArgIdx]; 5705 ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags; 5706 5707 if (Flags.isByVal()) { 5708 // Argument is an aggregate which is passed by value, thus we need to 5709 // create a copy of it in the local variable space of the current stack 5710 // frame (which is the stack frame of the caller) and pass the address of 5711 // this copy to the callee. 5712 assert((j < ByValArgLocs.size()) && "Index out of bounds!"); 5713 CCValAssign &ByValVA = ByValArgLocs[j++]; 5714 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); 5715 5716 // Memory reserved in the local variable space of the callers stack frame. 5717 unsigned LocMemOffset = ByValVA.getLocMemOffset(); 5718 5719 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5720 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5721 StackPtr, PtrOff); 5722 5723 // Create a copy of the argument in the local area of the current 5724 // stack frame. 5725 SDValue MemcpyCall = 5726 CreateCopyOfByValArgument(Arg, PtrOff, 5727 CallSeqStart.getNode()->getOperand(0), 5728 Flags, DAG, dl); 5729 5730 // This must go outside the CALLSEQ_START..END. 5731 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0, 5732 SDLoc(MemcpyCall)); 5733 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5734 NewCallSeqStart.getNode()); 5735 Chain = CallSeqStart = NewCallSeqStart; 5736 5737 // Pass the address of the aggregate copy on the stack either in a 5738 // physical register or in the parameter list area of the current stack 5739 // frame to the callee. 5740 Arg = PtrOff; 5741 } 5742 5743 // When useCRBits() is true, there can be i1 arguments. 5744 // It is because getRegisterType(MVT::i1) => MVT::i1, 5745 // and for other integer types getRegisterType() => MVT::i32. 5746 // Extend i1 and ensure callee will get i32. 5747 if (Arg.getValueType() == MVT::i1) 5748 Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 5749 dl, MVT::i32, Arg); 5750 5751 if (VA.isRegLoc()) { 5752 seenFloatArg |= VA.getLocVT().isFloatingPoint(); 5753 // Put argument in a physical register. 5754 if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) { 5755 bool IsLE = Subtarget.isLittleEndian(); 5756 SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 5757 DAG.getIntPtrConstant(IsLE ? 0 : 1, dl)); 5758 RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0))); 5759 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 5760 DAG.getIntPtrConstant(IsLE ? 1 : 0, dl)); 5761 RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(), 5762 SVal.getValue(0))); 5763 } else 5764 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 5765 } else { 5766 // Put argument in the parameter list area of the current stack frame. 5767 assert(VA.isMemLoc()); 5768 unsigned LocMemOffset = VA.getLocMemOffset(); 5769 5770 if (!isTailCall) { 5771 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); 5772 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()), 5773 StackPtr, PtrOff); 5774 5775 MemOpChains.push_back( 5776 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo())); 5777 } else { 5778 // Calculate and remember argument location. 5779 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, 5780 TailCallArguments); 5781 } 5782 } 5783 } 5784 5785 if (!MemOpChains.empty()) 5786 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 5787 5788 // Build a sequence of copy-to-reg nodes chained together with token chain 5789 // and flag operands which copy the outgoing args into the appropriate regs. 5790 SDValue InFlag; 5791 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 5792 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 5793 RegsToPass[i].second, InFlag); 5794 InFlag = Chain.getValue(1); 5795 } 5796 5797 // Set CR bit 6 to true if this is a vararg call with floating args passed in 5798 // registers. 5799 if (isVarArg) { 5800 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); 5801 SDValue Ops[] = { Chain, InFlag }; 5802 5803 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, 5804 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1)); 5805 5806 InFlag = Chain.getValue(1); 5807 } 5808 5809 if (isTailCall) 5810 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 5811 TailCallArguments); 5812 5813 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 5814 /* unused except on PPC64 ELFv1 */ false, DAG, 5815 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 5816 NumBytes, Ins, InVals, CS); 5817 } 5818 5819 // Copy an argument into memory, being careful to do this outside the 5820 // call sequence for the call to which the argument belongs. 5821 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq( 5822 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags, 5823 SelectionDAG &DAG, const SDLoc &dl) const { 5824 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, 5825 CallSeqStart.getNode()->getOperand(0), 5826 Flags, DAG, dl); 5827 // The MEMCPY must go outside the CALLSEQ_START..END. 5828 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1); 5829 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0, 5830 SDLoc(MemcpyCall)); 5831 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), 5832 NewCallSeqStart.getNode()); 5833 return NewCallSeqStart; 5834 } 5835 5836 SDValue PPCTargetLowering::LowerCall_64SVR4( 5837 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 5838 bool isTailCall, bool isPatchPoint, 5839 const SmallVectorImpl<ISD::OutputArg> &Outs, 5840 const SmallVectorImpl<SDValue> &OutVals, 5841 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 5842 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 5843 ImmutableCallSite CS) const { 5844 bool isELFv2ABI = Subtarget.isELFv2ABI(); 5845 bool isLittleEndian = Subtarget.isLittleEndian(); 5846 unsigned NumOps = Outs.size(); 5847 bool hasNest = false; 5848 bool IsSibCall = false; 5849 5850 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 5851 unsigned PtrByteSize = 8; 5852 5853 MachineFunction &MF = DAG.getMachineFunction(); 5854 5855 if (isTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt) 5856 IsSibCall = true; 5857 5858 // Mark this function as potentially containing a function that contains a 5859 // tail call. As a consequence the frame pointer will be used for dynamicalloc 5860 // and restoring the callers stack pointer in this functions epilog. This is 5861 // done because by tail calling the called function might overwrite the value 5862 // in this function's (MF) stack pointer stack slot 0(SP). 5863 if (getTargetMachine().Options.GuaranteedTailCallOpt && 5864 CallConv == CallingConv::Fast) 5865 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 5866 5867 assert(!(CallConv == CallingConv::Fast && isVarArg) && 5868 "fastcc not supported on varargs functions"); 5869 5870 // Count how many bytes are to be pushed on the stack, including the linkage 5871 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes 5872 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage 5873 // area is 32 bytes reserved space for [SP][CR][LR][TOC]. 5874 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 5875 unsigned NumBytes = LinkageSize; 5876 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 5877 unsigned &QFPR_idx = FPR_idx; 5878 5879 static const MCPhysReg GPR[] = { 5880 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 5881 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 5882 }; 5883 static const MCPhysReg VR[] = { 5884 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 5885 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 5886 }; 5887 5888 const unsigned NumGPRs = array_lengthof(GPR); 5889 const unsigned NumFPRs = useSoftFloat() ? 0 : 13; 5890 const unsigned NumVRs = array_lengthof(VR); 5891 const unsigned NumQFPRs = NumFPRs; 5892 5893 // On ELFv2, we can avoid allocating the parameter area if all the arguments 5894 // can be passed to the callee in registers. 5895 // For the fast calling convention, there is another check below. 5896 // Note: We should keep consistent with LowerFormalArguments_64SVR4() 5897 bool HasParameterArea = !isELFv2ABI || isVarArg || CallConv == CallingConv::Fast; 5898 if (!HasParameterArea) { 5899 unsigned ParamAreaSize = NumGPRs * PtrByteSize; 5900 unsigned AvailableFPRs = NumFPRs; 5901 unsigned AvailableVRs = NumVRs; 5902 unsigned NumBytesTmp = NumBytes; 5903 for (unsigned i = 0; i != NumOps; ++i) { 5904 if (Outs[i].Flags.isNest()) continue; 5905 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags, 5906 PtrByteSize, LinkageSize, ParamAreaSize, 5907 NumBytesTmp, AvailableFPRs, AvailableVRs, 5908 Subtarget.hasQPX())) 5909 HasParameterArea = true; 5910 } 5911 } 5912 5913 // When using the fast calling convention, we don't provide backing for 5914 // arguments that will be in registers. 5915 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0; 5916 5917 // Avoid allocating parameter area for fastcc functions if all the arguments 5918 // can be passed in the registers. 5919 if (CallConv == CallingConv::Fast) 5920 HasParameterArea = false; 5921 5922 // Add up all the space actually used. 5923 for (unsigned i = 0; i != NumOps; ++i) { 5924 ISD::ArgFlagsTy Flags = Outs[i].Flags; 5925 EVT ArgVT = Outs[i].VT; 5926 EVT OrigVT = Outs[i].ArgVT; 5927 5928 if (Flags.isNest()) 5929 continue; 5930 5931 if (CallConv == CallingConv::Fast) { 5932 if (Flags.isByVal()) { 5933 NumGPRsUsed += (Flags.getByValSize()+7)/8; 5934 if (NumGPRsUsed > NumGPRs) 5935 HasParameterArea = true; 5936 } else { 5937 switch (ArgVT.getSimpleVT().SimpleTy) { 5938 default: llvm_unreachable("Unexpected ValueType for argument!"); 5939 case MVT::i1: 5940 case MVT::i32: 5941 case MVT::i64: 5942 if (++NumGPRsUsed <= NumGPRs) 5943 continue; 5944 break; 5945 case MVT::v4i32: 5946 case MVT::v8i16: 5947 case MVT::v16i8: 5948 case MVT::v2f64: 5949 case MVT::v2i64: 5950 case MVT::v1i128: 5951 case MVT::f128: 5952 if (++NumVRsUsed <= NumVRs) 5953 continue; 5954 break; 5955 case MVT::v4f32: 5956 // When using QPX, this is handled like a FP register, otherwise, it 5957 // is an Altivec register. 5958 if (Subtarget.hasQPX()) { 5959 if (++NumFPRsUsed <= NumFPRs) 5960 continue; 5961 } else { 5962 if (++NumVRsUsed <= NumVRs) 5963 continue; 5964 } 5965 break; 5966 case MVT::f32: 5967 case MVT::f64: 5968 case MVT::v4f64: // QPX 5969 case MVT::v4i1: // QPX 5970 if (++NumFPRsUsed <= NumFPRs) 5971 continue; 5972 break; 5973 } 5974 HasParameterArea = true; 5975 } 5976 } 5977 5978 /* Respect alignment of argument on the stack. */ 5979 unsigned Align = 5980 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 5981 NumBytes = ((NumBytes + Align - 1) / Align) * Align; 5982 5983 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 5984 if (Flags.isInConsecutiveRegsLast()) 5985 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 5986 } 5987 5988 unsigned NumBytesActuallyUsed = NumBytes; 5989 5990 // In the old ELFv1 ABI, 5991 // the prolog code of the callee may store up to 8 GPR argument registers to 5992 // the stack, allowing va_start to index over them in memory if its varargs. 5993 // Because we cannot tell if this is needed on the caller side, we have to 5994 // conservatively assume that it is needed. As such, make sure we have at 5995 // least enough stack space for the caller to store the 8 GPRs. 5996 // In the ELFv2 ABI, we allocate the parameter area iff a callee 5997 // really requires memory operands, e.g. a vararg function. 5998 if (HasParameterArea) 5999 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 6000 else 6001 NumBytes = LinkageSize; 6002 6003 // Tail call needs the stack to be aligned. 6004 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6005 CallConv == CallingConv::Fast) 6006 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 6007 6008 int SPDiff = 0; 6009 6010 // Calculate by how many bytes the stack has to be adjusted in case of tail 6011 // call optimization. 6012 if (!IsSibCall) 6013 SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 6014 6015 // To protect arguments on the stack from being clobbered in a tail call, 6016 // force all the loads to happen before doing any other lowering. 6017 if (isTailCall) 6018 Chain = DAG.getStackArgumentTokenFactor(Chain); 6019 6020 // Adjust the stack pointer for the new arguments... 6021 // These operations are automatically eliminated by the prolog/epilog pass 6022 if (!IsSibCall) 6023 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6024 SDValue CallSeqStart = Chain; 6025 6026 // Load the return address and frame pointer so it can be move somewhere else 6027 // later. 6028 SDValue LROp, FPOp; 6029 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 6030 6031 // Set up a copy of the stack pointer for use loading and storing any 6032 // arguments that may not fit in the registers available for argument 6033 // passing. 6034 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 6035 6036 // Figure out which arguments are going to go in registers, and which in 6037 // memory. Also, if this is a vararg function, floating point operations 6038 // must be stored to our stack, and loaded into integer regs as well, if 6039 // any integer regs are available for argument passing. 6040 unsigned ArgOffset = LinkageSize; 6041 6042 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6043 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 6044 6045 SmallVector<SDValue, 8> MemOpChains; 6046 for (unsigned i = 0; i != NumOps; ++i) { 6047 SDValue Arg = OutVals[i]; 6048 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6049 EVT ArgVT = Outs[i].VT; 6050 EVT OrigVT = Outs[i].ArgVT; 6051 6052 // PtrOff will be used to store the current argument to the stack if a 6053 // register cannot be found for it. 6054 SDValue PtrOff; 6055 6056 // We re-align the argument offset for each argument, except when using the 6057 // fast calling convention, when we need to make sure we do that only when 6058 // we'll actually use a stack slot. 6059 auto ComputePtrOff = [&]() { 6060 /* Respect alignment of argument on the stack. */ 6061 unsigned Align = 6062 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize); 6063 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align; 6064 6065 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 6066 6067 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6068 }; 6069 6070 if (CallConv != CallingConv::Fast) { 6071 ComputePtrOff(); 6072 6073 /* Compute GPR index associated with argument offset. */ 6074 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize; 6075 GPR_idx = std::min(GPR_idx, NumGPRs); 6076 } 6077 6078 // Promote integers to 64-bit values. 6079 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) { 6080 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 6081 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6082 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 6083 } 6084 6085 // FIXME memcpy is used way more than necessary. Correctness first. 6086 // Note: "by value" is code for passing a structure by value, not 6087 // basic types. 6088 if (Flags.isByVal()) { 6089 // Note: Size includes alignment padding, so 6090 // struct x { short a; char b; } 6091 // will have Size = 4. With #pragma pack(1), it will have Size = 3. 6092 // These are the proper values we need for right-justifying the 6093 // aggregate in a parameter register. 6094 unsigned Size = Flags.getByValSize(); 6095 6096 // An empty aggregate parameter takes up no storage and no 6097 // registers. 6098 if (Size == 0) 6099 continue; 6100 6101 if (CallConv == CallingConv::Fast) 6102 ComputePtrOff(); 6103 6104 // All aggregates smaller than 8 bytes must be passed right-justified. 6105 if (Size==1 || Size==2 || Size==4) { 6106 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); 6107 if (GPR_idx != NumGPRs) { 6108 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 6109 MachinePointerInfo(), VT); 6110 MemOpChains.push_back(Load.getValue(1)); 6111 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6112 6113 ArgOffset += PtrByteSize; 6114 continue; 6115 } 6116 } 6117 6118 if (GPR_idx == NumGPRs && Size < 8) { 6119 SDValue AddPtr = PtrOff; 6120 if (!isLittleEndian) { 6121 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6122 PtrOff.getValueType()); 6123 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6124 } 6125 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6126 CallSeqStart, 6127 Flags, DAG, dl); 6128 ArgOffset += PtrByteSize; 6129 continue; 6130 } 6131 // Copy entire object into memory. There are cases where gcc-generated 6132 // code assumes it is there, even if it could be put entirely into 6133 // registers. (This is not what the doc says.) 6134 6135 // FIXME: The above statement is likely due to a misunderstanding of the 6136 // documents. All arguments must be copied into the parameter area BY 6137 // THE CALLEE in the event that the callee takes the address of any 6138 // formal argument. That has not yet been implemented. However, it is 6139 // reasonable to use the stack area as a staging area for the register 6140 // load. 6141 6142 // Skip this for small aggregates, as we will use the same slot for a 6143 // right-justified copy, below. 6144 if (Size >= 8) 6145 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6146 CallSeqStart, 6147 Flags, DAG, dl); 6148 6149 // When a register is available, pass a small aggregate right-justified. 6150 if (Size < 8 && GPR_idx != NumGPRs) { 6151 // The easiest way to get this right-justified in a register 6152 // is to copy the structure into the rightmost portion of a 6153 // local variable slot, then load the whole slot into the 6154 // register. 6155 // FIXME: The memcpy seems to produce pretty awful code for 6156 // small aggregates, particularly for packed ones. 6157 // FIXME: It would be preferable to use the slot in the 6158 // parameter save area instead of a new local variable. 6159 SDValue AddPtr = PtrOff; 6160 if (!isLittleEndian) { 6161 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType()); 6162 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6163 } 6164 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6165 CallSeqStart, 6166 Flags, DAG, dl); 6167 6168 // Load the slot into the register. 6169 SDValue Load = 6170 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo()); 6171 MemOpChains.push_back(Load.getValue(1)); 6172 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6173 6174 // Done with this argument. 6175 ArgOffset += PtrByteSize; 6176 continue; 6177 } 6178 6179 // For aggregates larger than PtrByteSize, copy the pieces of the 6180 // object that fit into registers from the parameter save area. 6181 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6182 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6183 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6184 if (GPR_idx != NumGPRs) { 6185 SDValue Load = 6186 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6187 MemOpChains.push_back(Load.getValue(1)); 6188 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6189 ArgOffset += PtrByteSize; 6190 } else { 6191 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6192 break; 6193 } 6194 } 6195 continue; 6196 } 6197 6198 switch (Arg.getSimpleValueType().SimpleTy) { 6199 default: llvm_unreachable("Unexpected ValueType for argument!"); 6200 case MVT::i1: 6201 case MVT::i32: 6202 case MVT::i64: 6203 if (Flags.isNest()) { 6204 // The 'nest' parameter, if any, is passed in R11. 6205 RegsToPass.push_back(std::make_pair(PPC::X11, Arg)); 6206 hasNest = true; 6207 break; 6208 } 6209 6210 // These can be scalar arguments or elements of an integer array type 6211 // passed directly. Clang may use those instead of "byval" aggregate 6212 // types to avoid forcing arguments to memory unnecessarily. 6213 if (GPR_idx != NumGPRs) { 6214 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6215 } else { 6216 if (CallConv == CallingConv::Fast) 6217 ComputePtrOff(); 6218 6219 assert(HasParameterArea && 6220 "Parameter area must exist to pass an argument in memory."); 6221 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6222 true, isTailCall, false, MemOpChains, 6223 TailCallArguments, dl); 6224 if (CallConv == CallingConv::Fast) 6225 ArgOffset += PtrByteSize; 6226 } 6227 if (CallConv != CallingConv::Fast) 6228 ArgOffset += PtrByteSize; 6229 break; 6230 case MVT::f32: 6231 case MVT::f64: { 6232 // These can be scalar arguments or elements of a float array type 6233 // passed directly. The latter are used to implement ELFv2 homogenous 6234 // float aggregates. 6235 6236 // Named arguments go into FPRs first, and once they overflow, the 6237 // remaining arguments go into GPRs and then the parameter save area. 6238 // Unnamed arguments for vararg functions always go to GPRs and 6239 // then the parameter save area. For now, put all arguments to vararg 6240 // routines always in both locations (FPR *and* GPR or stack slot). 6241 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs; 6242 bool NeededLoad = false; 6243 6244 // First load the argument into the next available FPR. 6245 if (FPR_idx != NumFPRs) 6246 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6247 6248 // Next, load the argument into GPR or stack slot if needed. 6249 if (!NeedGPROrStack) 6250 ; 6251 else if (GPR_idx != NumGPRs && CallConv != CallingConv::Fast) { 6252 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8 6253 // once we support fp <-> gpr moves. 6254 6255 // In the non-vararg case, this can only ever happen in the 6256 // presence of f32 array types, since otherwise we never run 6257 // out of FPRs before running out of GPRs. 6258 SDValue ArgVal; 6259 6260 // Double values are always passed in a single GPR. 6261 if (Arg.getValueType() != MVT::f32) { 6262 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg); 6263 6264 // Non-array float values are extended and passed in a GPR. 6265 } else if (!Flags.isInConsecutiveRegs()) { 6266 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6267 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6268 6269 // If we have an array of floats, we collect every odd element 6270 // together with its predecessor into one GPR. 6271 } else if (ArgOffset % PtrByteSize != 0) { 6272 SDValue Lo, Hi; 6273 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]); 6274 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6275 if (!isLittleEndian) 6276 std::swap(Lo, Hi); 6277 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); 6278 6279 // The final element, if even, goes into the first half of a GPR. 6280 } else if (Flags.isInConsecutiveRegsLast()) { 6281 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg); 6282 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal); 6283 if (!isLittleEndian) 6284 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal, 6285 DAG.getConstant(32, dl, MVT::i32)); 6286 6287 // Non-final even elements are skipped; they will be handled 6288 // together the with subsequent argument on the next go-around. 6289 } else 6290 ArgVal = SDValue(); 6291 6292 if (ArgVal.getNode()) 6293 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal)); 6294 } else { 6295 if (CallConv == CallingConv::Fast) 6296 ComputePtrOff(); 6297 6298 // Single-precision floating-point values are mapped to the 6299 // second (rightmost) word of the stack doubleword. 6300 if (Arg.getValueType() == MVT::f32 && 6301 !isLittleEndian && !Flags.isInConsecutiveRegs()) { 6302 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6303 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6304 } 6305 6306 assert(HasParameterArea && 6307 "Parameter area must exist to pass an argument in memory."); 6308 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6309 true, isTailCall, false, MemOpChains, 6310 TailCallArguments, dl); 6311 6312 NeededLoad = true; 6313 } 6314 // When passing an array of floats, the array occupies consecutive 6315 // space in the argument area; only round up to the next doubleword 6316 // at the end of the array. Otherwise, each float takes 8 bytes. 6317 if (CallConv != CallingConv::Fast || NeededLoad) { 6318 ArgOffset += (Arg.getValueType() == MVT::f32 && 6319 Flags.isInConsecutiveRegs()) ? 4 : 8; 6320 if (Flags.isInConsecutiveRegsLast()) 6321 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; 6322 } 6323 break; 6324 } 6325 case MVT::v4f32: 6326 case MVT::v4i32: 6327 case MVT::v8i16: 6328 case MVT::v16i8: 6329 case MVT::v2f64: 6330 case MVT::v2i64: 6331 case MVT::v1i128: 6332 case MVT::f128: 6333 if (!Subtarget.hasQPX()) { 6334 // These can be scalar arguments or elements of a vector array type 6335 // passed directly. The latter are used to implement ELFv2 homogenous 6336 // vector aggregates. 6337 6338 // For a varargs call, named arguments go into VRs or on the stack as 6339 // usual; unnamed arguments always go to the stack or the corresponding 6340 // GPRs when within range. For now, we always put the value in both 6341 // locations (or even all three). 6342 if (isVarArg) { 6343 assert(HasParameterArea && 6344 "Parameter area must exist if we have a varargs call."); 6345 // We could elide this store in the case where the object fits 6346 // entirely in R registers. Maybe later. 6347 SDValue Store = 6348 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6349 MemOpChains.push_back(Store); 6350 if (VR_idx != NumVRs) { 6351 SDValue Load = 6352 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6353 MemOpChains.push_back(Load.getValue(1)); 6354 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6355 } 6356 ArgOffset += 16; 6357 for (unsigned i=0; i<16; i+=PtrByteSize) { 6358 if (GPR_idx == NumGPRs) 6359 break; 6360 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6361 DAG.getConstant(i, dl, PtrVT)); 6362 SDValue Load = 6363 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6364 MemOpChains.push_back(Load.getValue(1)); 6365 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6366 } 6367 break; 6368 } 6369 6370 // Non-varargs Altivec params go into VRs or on the stack. 6371 if (VR_idx != NumVRs) { 6372 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6373 } else { 6374 if (CallConv == CallingConv::Fast) 6375 ComputePtrOff(); 6376 6377 assert(HasParameterArea && 6378 "Parameter area must exist to pass an argument in memory."); 6379 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6380 true, isTailCall, true, MemOpChains, 6381 TailCallArguments, dl); 6382 if (CallConv == CallingConv::Fast) 6383 ArgOffset += 16; 6384 } 6385 6386 if (CallConv != CallingConv::Fast) 6387 ArgOffset += 16; 6388 break; 6389 } // not QPX 6390 6391 assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 && 6392 "Invalid QPX parameter type"); 6393 6394 LLVM_FALLTHROUGH; 6395 case MVT::v4f64: 6396 case MVT::v4i1: { 6397 bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32; 6398 if (isVarArg) { 6399 assert(HasParameterArea && 6400 "Parameter area must exist if we have a varargs call."); 6401 // We could elide this store in the case where the object fits 6402 // entirely in R registers. Maybe later. 6403 SDValue Store = 6404 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6405 MemOpChains.push_back(Store); 6406 if (QFPR_idx != NumQFPRs) { 6407 SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store, 6408 PtrOff, MachinePointerInfo()); 6409 MemOpChains.push_back(Load.getValue(1)); 6410 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load)); 6411 } 6412 ArgOffset += (IsF32 ? 16 : 32); 6413 for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) { 6414 if (GPR_idx == NumGPRs) 6415 break; 6416 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6417 DAG.getConstant(i, dl, PtrVT)); 6418 SDValue Load = 6419 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6420 MemOpChains.push_back(Load.getValue(1)); 6421 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6422 } 6423 break; 6424 } 6425 6426 // Non-varargs QPX params go into registers or on the stack. 6427 if (QFPR_idx != NumQFPRs) { 6428 RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg)); 6429 } else { 6430 if (CallConv == CallingConv::Fast) 6431 ComputePtrOff(); 6432 6433 assert(HasParameterArea && 6434 "Parameter area must exist to pass an argument in memory."); 6435 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6436 true, isTailCall, true, MemOpChains, 6437 TailCallArguments, dl); 6438 if (CallConv == CallingConv::Fast) 6439 ArgOffset += (IsF32 ? 16 : 32); 6440 } 6441 6442 if (CallConv != CallingConv::Fast) 6443 ArgOffset += (IsF32 ? 16 : 32); 6444 break; 6445 } 6446 } 6447 } 6448 6449 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && 6450 "mismatch in size of parameter area"); 6451 (void)NumBytesActuallyUsed; 6452 6453 if (!MemOpChains.empty()) 6454 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6455 6456 // Check if this is an indirect call (MTCTR/BCTRL). 6457 // See prepareDescriptorIndirectCall and buildCallOperands for more 6458 // information about calls through function pointers in the 64-bit SVR4 ABI. 6459 if (!isTailCall && !isPatchPoint && 6460 !isFunctionGlobalAddress(Callee) && 6461 !isa<ExternalSymbolSDNode>(Callee)) { 6462 // Load r2 into a virtual register and store it to the TOC save area. 6463 setUsesTOCBasePtr(DAG); 6464 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); 6465 // TOC save area offset. 6466 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset(); 6467 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 6468 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6469 Chain = DAG.getStore( 6470 Val.getValue(1), dl, Val, AddPtr, 6471 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 6472 // In the ELFv2 ABI, R12 must contain the address of an indirect callee. 6473 // This does not mean the MTCTR instruction must use R12; it's easier 6474 // to model this as an extra parameter, so do that. 6475 if (isELFv2ABI && !isPatchPoint) 6476 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); 6477 } 6478 6479 // Build a sequence of copy-to-reg nodes chained together with token chain 6480 // and flag operands which copy the outgoing args into the appropriate regs. 6481 SDValue InFlag; 6482 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6483 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6484 RegsToPass[i].second, InFlag); 6485 InFlag = Chain.getValue(1); 6486 } 6487 6488 if (isTailCall && !IsSibCall) 6489 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6490 TailCallArguments); 6491 6492 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest, 6493 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee, 6494 SPDiff, NumBytes, Ins, InVals, CS); 6495 } 6496 6497 SDValue PPCTargetLowering::LowerCall_Darwin( 6498 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 6499 bool isTailCall, bool isPatchPoint, 6500 const SmallVectorImpl<ISD::OutputArg> &Outs, 6501 const SmallVectorImpl<SDValue> &OutVals, 6502 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 6503 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 6504 ImmutableCallSite CS) const { 6505 unsigned NumOps = Outs.size(); 6506 6507 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 6508 bool isPPC64 = PtrVT == MVT::i64; 6509 unsigned PtrByteSize = isPPC64 ? 8 : 4; 6510 6511 MachineFunction &MF = DAG.getMachineFunction(); 6512 6513 // Mark this function as potentially containing a function that contains a 6514 // tail call. As a consequence the frame pointer will be used for dynamicalloc 6515 // and restoring the callers stack pointer in this functions epilog. This is 6516 // done because by tail calling the called function might overwrite the value 6517 // in this function's (MF) stack pointer stack slot 0(SP). 6518 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6519 CallConv == CallingConv::Fast) 6520 MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); 6521 6522 // Count how many bytes are to be pushed on the stack, including the linkage 6523 // area, and parameter passing area. We start with 24/48 bytes, which is 6524 // prereserved space for [SP][CR][LR][3 x unused]. 6525 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 6526 unsigned NumBytes = LinkageSize; 6527 6528 // Add up all the space actually used. 6529 // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually 6530 // they all go in registers, but we must reserve stack space for them for 6531 // possible use by the caller. In varargs or 64-bit calls, parameters are 6532 // assigned stack space in order, with padding so Altivec parameters are 6533 // 16-byte aligned. 6534 unsigned nAltivecParamsAtEnd = 0; 6535 for (unsigned i = 0; i != NumOps; ++i) { 6536 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6537 EVT ArgVT = Outs[i].VT; 6538 // Varargs Altivec parameters are padded to a 16 byte boundary. 6539 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 || 6540 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 || 6541 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) { 6542 if (!isVarArg && !isPPC64) { 6543 // Non-varargs Altivec parameters go after all the non-Altivec 6544 // parameters; handle those later so we know how much padding we need. 6545 nAltivecParamsAtEnd++; 6546 continue; 6547 } 6548 // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary. 6549 NumBytes = ((NumBytes+15)/16)*16; 6550 } 6551 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize); 6552 } 6553 6554 // Allow for Altivec parameters at the end, if needed. 6555 if (nAltivecParamsAtEnd) { 6556 NumBytes = ((NumBytes+15)/16)*16; 6557 NumBytes += 16*nAltivecParamsAtEnd; 6558 } 6559 6560 // The prolog code of the callee may store up to 8 GPR argument registers to 6561 // the stack, allowing va_start to index over them in memory if its varargs. 6562 // Because we cannot tell if this is needed on the caller side, we have to 6563 // conservatively assume that it is needed. As such, make sure we have at 6564 // least enough stack space for the caller to store the 8 GPRs. 6565 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize); 6566 6567 // Tail call needs the stack to be aligned. 6568 if (getTargetMachine().Options.GuaranteedTailCallOpt && 6569 CallConv == CallingConv::Fast) 6570 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes); 6571 6572 // Calculate by how many bytes the stack has to be adjusted in case of tail 6573 // call optimization. 6574 int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); 6575 6576 // To protect arguments on the stack from being clobbered in a tail call, 6577 // force all the loads to happen before doing any other lowering. 6578 if (isTailCall) 6579 Chain = DAG.getStackArgumentTokenFactor(Chain); 6580 6581 // Adjust the stack pointer for the new arguments... 6582 // These operations are automatically eliminated by the prolog/epilog pass 6583 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 6584 SDValue CallSeqStart = Chain; 6585 6586 // Load the return address and frame pointer so it can be move somewhere else 6587 // later. 6588 SDValue LROp, FPOp; 6589 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl); 6590 6591 // Set up a copy of the stack pointer for use loading and storing any 6592 // arguments that may not fit in the registers available for argument 6593 // passing. 6594 SDValue StackPtr; 6595 if (isPPC64) 6596 StackPtr = DAG.getRegister(PPC::X1, MVT::i64); 6597 else 6598 StackPtr = DAG.getRegister(PPC::R1, MVT::i32); 6599 6600 // Figure out which arguments are going to go in registers, and which in 6601 // memory. Also, if this is a vararg function, floating point operations 6602 // must be stored to our stack, and loaded into integer regs as well, if 6603 // any integer regs are available for argument passing. 6604 unsigned ArgOffset = LinkageSize; 6605 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; 6606 6607 static const MCPhysReg GPR_32[] = { // 32-bit registers. 6608 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6609 PPC::R7, PPC::R8, PPC::R9, PPC::R10, 6610 }; 6611 static const MCPhysReg GPR_64[] = { // 64-bit registers. 6612 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6613 PPC::X7, PPC::X8, PPC::X9, PPC::X10, 6614 }; 6615 static const MCPhysReg VR[] = { 6616 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, 6617 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 6618 }; 6619 const unsigned NumGPRs = array_lengthof(GPR_32); 6620 const unsigned NumFPRs = 13; 6621 const unsigned NumVRs = array_lengthof(VR); 6622 6623 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32; 6624 6625 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 6626 SmallVector<TailCallArgumentInfo, 8> TailCallArguments; 6627 6628 SmallVector<SDValue, 8> MemOpChains; 6629 for (unsigned i = 0; i != NumOps; ++i) { 6630 SDValue Arg = OutVals[i]; 6631 ISD::ArgFlagsTy Flags = Outs[i].Flags; 6632 6633 // PtrOff will be used to store the current argument to the stack if a 6634 // register cannot be found for it. 6635 SDValue PtrOff; 6636 6637 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType()); 6638 6639 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 6640 6641 // On PPC64, promote integers to 64-bit values. 6642 if (isPPC64 && Arg.getValueType() == MVT::i32) { 6643 // FIXME: Should this use ANY_EXTEND if neither sext nor zext? 6644 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 6645 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); 6646 } 6647 6648 // FIXME memcpy is used way more than necessary. Correctness first. 6649 // Note: "by value" is code for passing a structure by value, not 6650 // basic types. 6651 if (Flags.isByVal()) { 6652 unsigned Size = Flags.getByValSize(); 6653 // Very small objects are passed right-justified. Everything else is 6654 // passed left-justified. 6655 if (Size==1 || Size==2) { 6656 EVT VT = (Size==1) ? MVT::i8 : MVT::i16; 6657 if (GPR_idx != NumGPRs) { 6658 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, 6659 MachinePointerInfo(), VT); 6660 MemOpChains.push_back(Load.getValue(1)); 6661 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6662 6663 ArgOffset += PtrByteSize; 6664 } else { 6665 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl, 6666 PtrOff.getValueType()); 6667 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); 6668 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, 6669 CallSeqStart, 6670 Flags, DAG, dl); 6671 ArgOffset += PtrByteSize; 6672 } 6673 continue; 6674 } 6675 // Copy entire object into memory. There are cases where gcc-generated 6676 // code assumes it is there, even if it could be put entirely into 6677 // registers. (This is not what the doc says.) 6678 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, 6679 CallSeqStart, 6680 Flags, DAG, dl); 6681 6682 // For small aggregates (Darwin only) and aggregates >= PtrByteSize, 6683 // copy the pieces of the object that fit into registers from the 6684 // parameter save area. 6685 for (unsigned j=0; j<Size; j+=PtrByteSize) { 6686 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType()); 6687 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const); 6688 if (GPR_idx != NumGPRs) { 6689 SDValue Load = 6690 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo()); 6691 MemOpChains.push_back(Load.getValue(1)); 6692 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6693 ArgOffset += PtrByteSize; 6694 } else { 6695 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize; 6696 break; 6697 } 6698 } 6699 continue; 6700 } 6701 6702 switch (Arg.getSimpleValueType().SimpleTy) { 6703 default: llvm_unreachable("Unexpected ValueType for argument!"); 6704 case MVT::i1: 6705 case MVT::i32: 6706 case MVT::i64: 6707 if (GPR_idx != NumGPRs) { 6708 if (Arg.getValueType() == MVT::i1) 6709 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg); 6710 6711 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); 6712 } else { 6713 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6714 isPPC64, isTailCall, false, MemOpChains, 6715 TailCallArguments, dl); 6716 } 6717 ArgOffset += PtrByteSize; 6718 break; 6719 case MVT::f32: 6720 case MVT::f64: 6721 if (FPR_idx != NumFPRs) { 6722 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg)); 6723 6724 if (isVarArg) { 6725 SDValue Store = 6726 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6727 MemOpChains.push_back(Store); 6728 6729 // Float varargs are always shadowed in available integer registers 6730 if (GPR_idx != NumGPRs) { 6731 SDValue Load = 6732 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6733 MemOpChains.push_back(Load.getValue(1)); 6734 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6735 } 6736 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){ 6737 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType()); 6738 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour); 6739 SDValue Load = 6740 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo()); 6741 MemOpChains.push_back(Load.getValue(1)); 6742 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6743 } 6744 } else { 6745 // If we have any FPRs remaining, we may also have GPRs remaining. 6746 // Args passed in FPRs consume either 1 (f32) or 2 (f64) available 6747 // GPRs. 6748 if (GPR_idx != NumGPRs) 6749 ++GPR_idx; 6750 if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && 6751 !isPPC64) // PPC64 has 64-bit GPR's obviously :) 6752 ++GPR_idx; 6753 } 6754 } else 6755 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6756 isPPC64, isTailCall, false, MemOpChains, 6757 TailCallArguments, dl); 6758 if (isPPC64) 6759 ArgOffset += 8; 6760 else 6761 ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8; 6762 break; 6763 case MVT::v4f32: 6764 case MVT::v4i32: 6765 case MVT::v8i16: 6766 case MVT::v16i8: 6767 if (isVarArg) { 6768 // These go aligned on the stack, or in the corresponding R registers 6769 // when within range. The Darwin PPC ABI doc claims they also go in 6770 // V registers; in fact gcc does this only for arguments that are 6771 // prototyped, not for those that match the ... We do it for all 6772 // arguments, seems to work. 6773 while (ArgOffset % 16 !=0) { 6774 ArgOffset += PtrByteSize; 6775 if (GPR_idx != NumGPRs) 6776 GPR_idx++; 6777 } 6778 // We could elide this store in the case where the object fits 6779 // entirely in R registers. Maybe later. 6780 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, 6781 DAG.getConstant(ArgOffset, dl, PtrVT)); 6782 SDValue Store = 6783 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()); 6784 MemOpChains.push_back(Store); 6785 if (VR_idx != NumVRs) { 6786 SDValue Load = 6787 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo()); 6788 MemOpChains.push_back(Load.getValue(1)); 6789 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load)); 6790 } 6791 ArgOffset += 16; 6792 for (unsigned i=0; i<16; i+=PtrByteSize) { 6793 if (GPR_idx == NumGPRs) 6794 break; 6795 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, 6796 DAG.getConstant(i, dl, PtrVT)); 6797 SDValue Load = 6798 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo()); 6799 MemOpChains.push_back(Load.getValue(1)); 6800 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); 6801 } 6802 break; 6803 } 6804 6805 // Non-varargs Altivec params generally go in registers, but have 6806 // stack space allocated at the end. 6807 if (VR_idx != NumVRs) { 6808 // Doesn't have GPR space allocated. 6809 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg)); 6810 } else if (nAltivecParamsAtEnd==0) { 6811 // We are emitting Altivec params in order. 6812 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6813 isPPC64, isTailCall, true, MemOpChains, 6814 TailCallArguments, dl); 6815 ArgOffset += 16; 6816 } 6817 break; 6818 } 6819 } 6820 // If all Altivec parameters fit in registers, as they usually do, 6821 // they get stack space following the non-Altivec parameters. We 6822 // don't track this here because nobody below needs it. 6823 // If there are more Altivec parameters than fit in registers emit 6824 // the stores here. 6825 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) { 6826 unsigned j = 0; 6827 // Offset is aligned; skip 1st 12 params which go in V registers. 6828 ArgOffset = ((ArgOffset+15)/16)*16; 6829 ArgOffset += 12*16; 6830 for (unsigned i = 0; i != NumOps; ++i) { 6831 SDValue Arg = OutVals[i]; 6832 EVT ArgType = Outs[i].VT; 6833 if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 || 6834 ArgType==MVT::v8i16 || ArgType==MVT::v16i8) { 6835 if (++j > NumVRs) { 6836 SDValue PtrOff; 6837 // We are emitting Altivec params in order. 6838 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset, 6839 isPPC64, isTailCall, true, MemOpChains, 6840 TailCallArguments, dl); 6841 ArgOffset += 16; 6842 } 6843 } 6844 } 6845 } 6846 6847 if (!MemOpChains.empty()) 6848 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); 6849 6850 // On Darwin, R12 must contain the address of an indirect callee. This does 6851 // not mean the MTCTR instruction must use R12; it's easier to model this as 6852 // an extra parameter, so do that. 6853 if (!isTailCall && 6854 !isFunctionGlobalAddress(Callee) && 6855 !isa<ExternalSymbolSDNode>(Callee) && 6856 !isBLACompatibleAddress(Callee, DAG)) 6857 RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 : 6858 PPC::R12), Callee)); 6859 6860 // Build a sequence of copy-to-reg nodes chained together with token chain 6861 // and flag operands which copy the outgoing args into the appropriate regs. 6862 SDValue InFlag; 6863 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { 6864 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, 6865 RegsToPass[i].second, InFlag); 6866 InFlag = Chain.getValue(1); 6867 } 6868 6869 if (isTailCall) 6870 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp, 6871 TailCallArguments); 6872 6873 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 6874 /* unused except on PPC64 ELFv1 */ false, DAG, 6875 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff, 6876 NumBytes, Ins, InVals, CS); 6877 } 6878 6879 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT, 6880 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, 6881 CCState &State) { 6882 6883 if (ValVT == MVT::f128) 6884 report_fatal_error("f128 is unimplemented on AIX."); 6885 6886 if (ArgFlags.isByVal()) 6887 report_fatal_error("Passing structure by value is unimplemented."); 6888 6889 if (ArgFlags.isNest()) 6890 report_fatal_error("Nest arguments are unimplemented."); 6891 6892 if (ValVT.isVector() || LocVT.isVector()) 6893 report_fatal_error("Vector arguments are unimplemented on AIX."); 6894 6895 const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>( 6896 State.getMachineFunction().getSubtarget()); 6897 const bool IsPPC64 = Subtarget.isPPC64(); 6898 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 6899 6900 static const MCPhysReg GPR_32[] = {// 32-bit registers. 6901 PPC::R3, PPC::R4, PPC::R5, PPC::R6, 6902 PPC::R7, PPC::R8, PPC::R9, PPC::R10}; 6903 static const MCPhysReg GPR_64[] = {// 64-bit registers. 6904 PPC::X3, PPC::X4, PPC::X5, PPC::X6, 6905 PPC::X7, PPC::X8, PPC::X9, PPC::X10}; 6906 6907 // Arguments always reserve parameter save area. 6908 switch (ValVT.SimpleTy) { 6909 default: 6910 report_fatal_error("Unhandled value type for argument."); 6911 case MVT::i64: 6912 // i64 arguments should have been split to i32 for PPC32. 6913 assert(IsPPC64 && "PPC32 should have split i64 values."); 6914 LLVM_FALLTHROUGH; 6915 case MVT::i1: 6916 case MVT::i32: 6917 State.AllocateStack(PtrByteSize, PtrByteSize); 6918 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) { 6919 MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 6920 // Promote integers if needed. 6921 if (ValVT.getSizeInBits() < RegVT.getSizeInBits()) 6922 LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt 6923 : CCValAssign::LocInfo::ZExt; 6924 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6925 } 6926 else 6927 report_fatal_error("Handling of placing parameters on the stack is " 6928 "unimplemented!"); 6929 return false; 6930 6931 case MVT::f32: 6932 case MVT::f64: { 6933 // Parameter save area (PSA) is reserved even if the float passes in fpr. 6934 const unsigned StoreSize = LocVT.getStoreSize(); 6935 // Floats are always 4-byte aligned in the PSA on AIX. 6936 // This includes f64 in 64-bit mode for ABI compatibility. 6937 State.AllocateStack(IsPPC64 ? 8 : StoreSize, 4); 6938 if (unsigned Reg = State.AllocateReg(FPR)) 6939 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 6940 else 6941 report_fatal_error("Handling of placing parameters on the stack is " 6942 "unimplemented!"); 6943 6944 // AIX requires that GPRs are reserved for float arguments. 6945 // Successfully reserved GPRs are only initialized for vararg calls. 6946 MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32; 6947 for (unsigned I = 0; I < StoreSize; I += PtrByteSize) { 6948 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) { 6949 if (State.isVarArg()) { 6950 // Custom handling is required for: 6951 // f64 in PPC32 needs to be split into 2 GPRs. 6952 // f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR. 6953 State.addLoc( 6954 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo)); 6955 } 6956 } else if (State.isVarArg()) { 6957 report_fatal_error("Handling of placing parameters on the stack is " 6958 "unimplemented!"); 6959 } 6960 } 6961 6962 return false; 6963 } 6964 } 6965 return true; 6966 } 6967 6968 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT, 6969 bool IsPPC64) { 6970 assert((IsPPC64 || SVT != MVT::i64) && 6971 "i64 should have been split for 32-bit codegen."); 6972 6973 switch (SVT) { 6974 default: 6975 report_fatal_error("Unexpected value type for formal argument"); 6976 case MVT::i1: 6977 case MVT::i32: 6978 case MVT::i64: 6979 return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 6980 case MVT::f32: 6981 return &PPC::F4RCRegClass; 6982 case MVT::f64: 6983 return &PPC::F8RCRegClass; 6984 } 6985 } 6986 6987 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT, 6988 SelectionDAG &DAG, SDValue ArgValue, 6989 MVT LocVT, const SDLoc &dl) { 6990 assert(ValVT.isScalarInteger() && LocVT.isScalarInteger()); 6991 assert(ValVT.getSizeInBits() < LocVT.getSizeInBits()); 6992 6993 if (Flags.isSExt()) 6994 ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue, 6995 DAG.getValueType(ValVT)); 6996 else if (Flags.isZExt()) 6997 ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue, 6998 DAG.getValueType(ValVT)); 6999 7000 return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue); 7001 } 7002 7003 SDValue PPCTargetLowering::LowerFormalArguments_AIX( 7004 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 7005 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 7006 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 7007 7008 assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold || 7009 CallConv == CallingConv::Fast) && 7010 "Unexpected calling convention!"); 7011 7012 if (isVarArg) 7013 report_fatal_error("This call type is unimplemented on AIX."); 7014 7015 if (getTargetMachine().Options.GuaranteedTailCallOpt) 7016 report_fatal_error("Tail call support is unimplemented on AIX."); 7017 7018 if (useSoftFloat()) 7019 report_fatal_error("Soft float support is unimplemented on AIX."); 7020 7021 const PPCSubtarget &Subtarget = 7022 static_cast<const PPCSubtarget &>(DAG.getSubtarget()); 7023 if (Subtarget.hasQPX()) 7024 report_fatal_error("QPX support is not supported on AIX."); 7025 7026 const bool IsPPC64 = Subtarget.isPPC64(); 7027 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 7028 7029 // Assign locations to all of the incoming arguments. 7030 SmallVector<CCValAssign, 16> ArgLocs; 7031 MachineFunction &MF = DAG.getMachineFunction(); 7032 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 7033 7034 // Reserve space for the linkage area on the stack. 7035 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 7036 // On AIX a minimum of 8 words is saved to the parameter save area. 7037 const unsigned MinParameterSaveArea = 8 * PtrByteSize; 7038 CCInfo.AllocateStack(LinkageSize + MinParameterSaveArea, PtrByteSize); 7039 CCInfo.AnalyzeFormalArguments(Ins, CC_AIX); 7040 7041 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 7042 CCValAssign &VA = ArgLocs[i]; 7043 SDValue ArgValue; 7044 ISD::ArgFlagsTy Flags = Ins[i].Flags; 7045 if (VA.isRegLoc()) { 7046 EVT ValVT = VA.getValVT(); 7047 MVT LocVT = VA.getLocVT(); 7048 MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy; 7049 unsigned VReg = 7050 MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64)); 7051 ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT); 7052 if (ValVT.isScalarInteger() && 7053 (ValVT.getSizeInBits() < LocVT.getSizeInBits())) { 7054 ArgValue = 7055 truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl); 7056 } 7057 InVals.push_back(ArgValue); 7058 } else { 7059 report_fatal_error("Handling of formal arguments on the stack is " 7060 "unimplemented!"); 7061 } 7062 } 7063 7064 // Area that is at least reserved in the caller of this function. 7065 unsigned MinReservedArea = CCInfo.getNextStackOffset(); 7066 7067 // Set the size that is at least reserved in caller of this function. Tail 7068 // call optimized function's reserved stack space needs to be aligned so 7069 // that taking the difference between two stack areas will result in an 7070 // aligned stack. 7071 MinReservedArea = 7072 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea); 7073 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 7074 FuncInfo->setMinReservedArea(MinReservedArea); 7075 7076 return Chain; 7077 } 7078 7079 SDValue PPCTargetLowering::LowerCall_AIX( 7080 SDValue Chain, SDValue Callee, CallingConv::ID CallConv, bool isVarArg, 7081 bool isTailCall, bool isPatchPoint, 7082 const SmallVectorImpl<ISD::OutputArg> &Outs, 7083 const SmallVectorImpl<SDValue> &OutVals, 7084 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 7085 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, 7086 ImmutableCallSite CS) const { 7087 7088 assert((CallConv == CallingConv::C || 7089 CallConv == CallingConv::Cold || 7090 CallConv == CallingConv::Fast) && "Unexpected calling convention!"); 7091 7092 if (isPatchPoint) 7093 report_fatal_error("This call type is unimplemented on AIX."); 7094 7095 const PPCSubtarget& Subtarget = 7096 static_cast<const PPCSubtarget&>(DAG.getSubtarget()); 7097 if (Subtarget.hasQPX()) 7098 report_fatal_error("QPX is not supported on AIX."); 7099 if (Subtarget.hasAltivec()) 7100 report_fatal_error("Altivec support is unimplemented on AIX."); 7101 7102 MachineFunction &MF = DAG.getMachineFunction(); 7103 SmallVector<CCValAssign, 16> ArgLocs; 7104 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); 7105 7106 // Reserve space for the linkage save area (LSA) on the stack. 7107 // In both PPC32 and PPC64 there are 6 reserved slots in the LSA: 7108 // [SP][CR][LR][2 x reserved][TOC]. 7109 // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64. 7110 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize(); 7111 const bool IsPPC64 = Subtarget.isPPC64(); 7112 const unsigned PtrByteSize = IsPPC64 ? 8 : 4; 7113 CCInfo.AllocateStack(LinkageSize, PtrByteSize); 7114 CCInfo.AnalyzeCallOperands(Outs, CC_AIX); 7115 7116 // The prolog code of the callee may store up to 8 GPR argument registers to 7117 // the stack, allowing va_start to index over them in memory if the callee 7118 // is variadic. 7119 // Because we cannot tell if this is needed on the caller side, we have to 7120 // conservatively assume that it is needed. As such, make sure we have at 7121 // least enough stack space for the caller to store the 8 GPRs. 7122 const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize; 7123 const unsigned NumBytes = LinkageSize + MinParameterSaveAreaSize; 7124 7125 // Adjust the stack pointer for the new arguments... 7126 // These operations are automatically eliminated by the prolog/epilog pass. 7127 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl); 7128 SDValue CallSeqStart = Chain; 7129 7130 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 7131 7132 for (unsigned I = 0, E = ArgLocs.size(); I != E;) { 7133 CCValAssign &VA = ArgLocs[I++]; 7134 7135 if (VA.isMemLoc()) 7136 report_fatal_error("Handling of placing parameters on the stack is " 7137 "unimplemented!"); 7138 if (!VA.isRegLoc()) 7139 report_fatal_error( 7140 "Unexpected non-register location for function call argument."); 7141 7142 SDValue Arg = OutVals[VA.getValNo()]; 7143 7144 if (!VA.needsCustom()) { 7145 switch (VA.getLocInfo()) { 7146 default: 7147 report_fatal_error("Unexpected argument extension type."); 7148 case CCValAssign::Full: 7149 break; 7150 case CCValAssign::ZExt: 7151 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 7152 break; 7153 case CCValAssign::SExt: 7154 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 7155 break; 7156 } 7157 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 7158 7159 continue; 7160 } 7161 7162 // Custom handling is used for GPR initializations for vararg float 7163 // arguments. 7164 assert(isVarArg && VA.getValVT().isFloatingPoint() && 7165 VA.getLocVT().isInteger() && 7166 "Unexpected custom register handling for calling convention."); 7167 7168 SDValue ArgAsInt = 7169 DAG.getBitcast(MVT::getIntegerVT(VA.getValVT().getSizeInBits()), Arg); 7170 7171 if (Arg.getValueType().getStoreSize() == VA.getLocVT().getStoreSize()) 7172 // f32 in 32-bit GPR 7173 // f64 in 64-bit GPR 7174 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt)); 7175 else if (Arg.getValueType().getSizeInBits() < VA.getLocVT().getSizeInBits()) 7176 // f32 in 64-bit GPR. 7177 RegsToPass.push_back(std::make_pair( 7178 VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, VA.getLocVT()))); 7179 else { 7180 // f64 in two 32-bit GPRs 7181 // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs. 7182 assert(Arg.getValueType() == MVT::f64 && isVarArg && !IsPPC64 && 7183 "Unexpected custom register for argument!"); 7184 CCValAssign &GPR1 = VA; 7185 SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt, 7186 DAG.getConstant(32, dl, MVT::i8)); 7187 RegsToPass.push_back(std::make_pair( 7188 GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32))); 7189 assert(I != E && "A second custom GPR is expected!"); 7190 CCValAssign &GPR2 = ArgLocs[I++]; 7191 assert(GPR2.isRegLoc() && GPR2.getValNo() == GPR1.getValNo() && 7192 GPR2.needsCustom() && "A second custom GPR is expected!"); 7193 RegsToPass.push_back(std::make_pair( 7194 GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32))); 7195 } 7196 } 7197 7198 // For indirect calls, we need to save the TOC base to the stack for 7199 // restoration after the call. 7200 if (!isTailCall && !isPatchPoint && 7201 !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee)) { 7202 const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister(); 7203 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister(); 7204 const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32; 7205 const unsigned TOCSaveOffset = 7206 Subtarget.getFrameLowering()->getTOCSaveOffset(); 7207 7208 setUsesTOCBasePtr(DAG); 7209 SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT); 7210 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl); 7211 SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT); 7212 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); 7213 Chain = DAG.getStore( 7214 Val.getValue(1), dl, Val, AddPtr, 7215 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset)); 7216 } 7217 7218 // Build a sequence of copy-to-reg nodes chained together with token chain 7219 // and flag operands which copy the outgoing args into the appropriate regs. 7220 SDValue InFlag; 7221 for (auto Reg : RegsToPass) { 7222 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag); 7223 InFlag = Chain.getValue(1); 7224 } 7225 7226 const int SPDiff = 0; 7227 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, 7228 /* unused except on PPC64 ELFv1 */ false, DAG, RegsToPass, 7229 InFlag, Chain, CallSeqStart, Callee, SPDiff, NumBytes, Ins, 7230 InVals, CS); 7231 } 7232 7233 bool 7234 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, 7235 MachineFunction &MF, bool isVarArg, 7236 const SmallVectorImpl<ISD::OutputArg> &Outs, 7237 LLVMContext &Context) const { 7238 SmallVector<CCValAssign, 16> RVLocs; 7239 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 7240 return CCInfo.CheckReturn( 7241 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 7242 ? RetCC_PPC_Cold 7243 : RetCC_PPC); 7244 } 7245 7246 SDValue 7247 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 7248 bool isVarArg, 7249 const SmallVectorImpl<ISD::OutputArg> &Outs, 7250 const SmallVectorImpl<SDValue> &OutVals, 7251 const SDLoc &dl, SelectionDAG &DAG) const { 7252 SmallVector<CCValAssign, 16> RVLocs; 7253 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 7254 *DAG.getContext()); 7255 CCInfo.AnalyzeReturn(Outs, 7256 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold) 7257 ? RetCC_PPC_Cold 7258 : RetCC_PPC); 7259 7260 SDValue Flag; 7261 SmallVector<SDValue, 4> RetOps(1, Chain); 7262 7263 // Copy the result values into the output registers. 7264 for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) { 7265 CCValAssign &VA = RVLocs[i]; 7266 assert(VA.isRegLoc() && "Can only return in registers!"); 7267 7268 SDValue Arg = OutVals[RealResIdx]; 7269 7270 switch (VA.getLocInfo()) { 7271 default: llvm_unreachable("Unknown loc info!"); 7272 case CCValAssign::Full: break; 7273 case CCValAssign::AExt: 7274 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); 7275 break; 7276 case CCValAssign::ZExt: 7277 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); 7278 break; 7279 case CCValAssign::SExt: 7280 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); 7281 break; 7282 } 7283 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) { 7284 bool isLittleEndian = Subtarget.isLittleEndian(); 7285 // Legalize ret f64 -> ret 2 x i32. 7286 SDValue SVal = 7287 DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 7288 DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl)); 7289 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag); 7290 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 7291 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg, 7292 DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl)); 7293 Flag = Chain.getValue(1); 7294 VA = RVLocs[++i]; // skip ahead to next loc 7295 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag); 7296 } else 7297 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); 7298 Flag = Chain.getValue(1); 7299 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 7300 } 7301 7302 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 7303 const MCPhysReg *I = 7304 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 7305 if (I) { 7306 for (; *I; ++I) { 7307 7308 if (PPC::G8RCRegClass.contains(*I)) 7309 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 7310 else if (PPC::F8RCRegClass.contains(*I)) 7311 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 7312 else if (PPC::CRRCRegClass.contains(*I)) 7313 RetOps.push_back(DAG.getRegister(*I, MVT::i1)); 7314 else if (PPC::VRRCRegClass.contains(*I)) 7315 RetOps.push_back(DAG.getRegister(*I, MVT::Other)); 7316 else 7317 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 7318 } 7319 } 7320 7321 RetOps[0] = Chain; // Update chain. 7322 7323 // Add the flag if we have it. 7324 if (Flag.getNode()) 7325 RetOps.push_back(Flag); 7326 7327 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps); 7328 } 7329 7330 SDValue 7331 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, 7332 SelectionDAG &DAG) const { 7333 SDLoc dl(Op); 7334 7335 // Get the correct type for integers. 7336 EVT IntVT = Op.getValueType(); 7337 7338 // Get the inputs. 7339 SDValue Chain = Op.getOperand(0); 7340 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 7341 // Build a DYNAREAOFFSET node. 7342 SDValue Ops[2] = {Chain, FPSIdx}; 7343 SDVTList VTs = DAG.getVTList(IntVT); 7344 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops); 7345 } 7346 7347 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op, 7348 SelectionDAG &DAG) const { 7349 // When we pop the dynamic allocation we need to restore the SP link. 7350 SDLoc dl(Op); 7351 7352 // Get the correct type for pointers. 7353 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7354 7355 // Construct the stack pointer operand. 7356 bool isPPC64 = Subtarget.isPPC64(); 7357 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1; 7358 SDValue StackPtr = DAG.getRegister(SP, PtrVT); 7359 7360 // Get the operands for the STACKRESTORE. 7361 SDValue Chain = Op.getOperand(0); 7362 SDValue SaveSP = Op.getOperand(1); 7363 7364 // Load the old link SP. 7365 SDValue LoadLinkSP = 7366 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo()); 7367 7368 // Restore the stack pointer. 7369 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP); 7370 7371 // Store the old link SP. 7372 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo()); 7373 } 7374 7375 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const { 7376 MachineFunction &MF = DAG.getMachineFunction(); 7377 bool isPPC64 = Subtarget.isPPC64(); 7378 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7379 7380 // Get current frame pointer save index. The users of this index will be 7381 // primarily DYNALLOC instructions. 7382 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 7383 int RASI = FI->getReturnAddrSaveIndex(); 7384 7385 // If the frame pointer save index hasn't been defined yet. 7386 if (!RASI) { 7387 // Find out what the fix offset of the frame pointer save area. 7388 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset(); 7389 // Allocate the frame index for frame pointer save area. 7390 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false); 7391 // Save the result. 7392 FI->setReturnAddrSaveIndex(RASI); 7393 } 7394 return DAG.getFrameIndex(RASI, PtrVT); 7395 } 7396 7397 SDValue 7398 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const { 7399 MachineFunction &MF = DAG.getMachineFunction(); 7400 bool isPPC64 = Subtarget.isPPC64(); 7401 EVT PtrVT = getPointerTy(MF.getDataLayout()); 7402 7403 // Get current frame pointer save index. The users of this index will be 7404 // primarily DYNALLOC instructions. 7405 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 7406 int FPSI = FI->getFramePointerSaveIndex(); 7407 7408 // If the frame pointer save index hasn't been defined yet. 7409 if (!FPSI) { 7410 // Find out what the fix offset of the frame pointer save area. 7411 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset(); 7412 // Allocate the frame index for frame pointer save area. 7413 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true); 7414 // Save the result. 7415 FI->setFramePointerSaveIndex(FPSI); 7416 } 7417 return DAG.getFrameIndex(FPSI, PtrVT); 7418 } 7419 7420 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 7421 SelectionDAG &DAG) const { 7422 // Get the inputs. 7423 SDValue Chain = Op.getOperand(0); 7424 SDValue Size = Op.getOperand(1); 7425 SDLoc dl(Op); 7426 7427 // Get the correct type for pointers. 7428 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7429 // Negate the size. 7430 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT, 7431 DAG.getConstant(0, dl, PtrVT), Size); 7432 // Construct a node for the frame pointer save index. 7433 SDValue FPSIdx = getFramePointerFrameIndex(DAG); 7434 // Build a DYNALLOC node. 7435 SDValue Ops[3] = { Chain, NegSize, FPSIdx }; 7436 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other); 7437 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops); 7438 } 7439 7440 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op, 7441 SelectionDAG &DAG) const { 7442 MachineFunction &MF = DAG.getMachineFunction(); 7443 7444 bool isPPC64 = Subtarget.isPPC64(); 7445 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7446 7447 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false); 7448 return DAG.getFrameIndex(FI, PtrVT); 7449 } 7450 7451 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, 7452 SelectionDAG &DAG) const { 7453 SDLoc DL(Op); 7454 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL, 7455 DAG.getVTList(MVT::i32, MVT::Other), 7456 Op.getOperand(0), Op.getOperand(1)); 7457 } 7458 7459 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, 7460 SelectionDAG &DAG) const { 7461 SDLoc DL(Op); 7462 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other, 7463 Op.getOperand(0), Op.getOperand(1)); 7464 } 7465 7466 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 7467 if (Op.getValueType().isVector()) 7468 return LowerVectorLoad(Op, DAG); 7469 7470 assert(Op.getValueType() == MVT::i1 && 7471 "Custom lowering only for i1 loads"); 7472 7473 // First, load 8 bits into 32 bits, then truncate to 1 bit. 7474 7475 SDLoc dl(Op); 7476 LoadSDNode *LD = cast<LoadSDNode>(Op); 7477 7478 SDValue Chain = LD->getChain(); 7479 SDValue BasePtr = LD->getBasePtr(); 7480 MachineMemOperand *MMO = LD->getMemOperand(); 7481 7482 SDValue NewLD = 7483 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain, 7484 BasePtr, MVT::i8, MMO); 7485 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD); 7486 7487 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) }; 7488 return DAG.getMergeValues(Ops, dl); 7489 } 7490 7491 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 7492 if (Op.getOperand(1).getValueType().isVector()) 7493 return LowerVectorStore(Op, DAG); 7494 7495 assert(Op.getOperand(1).getValueType() == MVT::i1 && 7496 "Custom lowering only for i1 stores"); 7497 7498 // First, zero extend to 32 bits, then use a truncating store to 8 bits. 7499 7500 SDLoc dl(Op); 7501 StoreSDNode *ST = cast<StoreSDNode>(Op); 7502 7503 SDValue Chain = ST->getChain(); 7504 SDValue BasePtr = ST->getBasePtr(); 7505 SDValue Value = ST->getValue(); 7506 MachineMemOperand *MMO = ST->getMemOperand(); 7507 7508 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()), 7509 Value); 7510 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO); 7511 } 7512 7513 // FIXME: Remove this once the ANDI glue bug is fixed: 7514 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { 7515 assert(Op.getValueType() == MVT::i1 && 7516 "Custom lowering only for i1 results"); 7517 7518 SDLoc DL(Op); 7519 return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0)); 7520 } 7521 7522 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op, 7523 SelectionDAG &DAG) const { 7524 7525 // Implements a vector truncate that fits in a vector register as a shuffle. 7526 // We want to legalize vector truncates down to where the source fits in 7527 // a vector register (and target is therefore smaller than vector register 7528 // size). At that point legalization will try to custom lower the sub-legal 7529 // result and get here - where we can contain the truncate as a single target 7530 // operation. 7531 7532 // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows: 7533 // <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2> 7534 // 7535 // We will implement it for big-endian ordering as this (where x denotes 7536 // undefined): 7537 // < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to 7538 // < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u> 7539 // 7540 // The same operation in little-endian ordering will be: 7541 // <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to 7542 // <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1> 7543 7544 assert(Op.getValueType().isVector() && "Vector type expected."); 7545 7546 SDLoc DL(Op); 7547 SDValue N1 = Op.getOperand(0); 7548 unsigned SrcSize = N1.getValueType().getSizeInBits(); 7549 assert(SrcSize <= 128 && "Source must fit in an Altivec/VSX vector"); 7550 SDValue WideSrc = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL); 7551 7552 EVT TrgVT = Op.getValueType(); 7553 unsigned TrgNumElts = TrgVT.getVectorNumElements(); 7554 EVT EltVT = TrgVT.getVectorElementType(); 7555 unsigned WideNumElts = 128 / EltVT.getSizeInBits(); 7556 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts); 7557 7558 // First list the elements we want to keep. 7559 unsigned SizeMult = SrcSize / TrgVT.getSizeInBits(); 7560 SmallVector<int, 16> ShuffV; 7561 if (Subtarget.isLittleEndian()) 7562 for (unsigned i = 0; i < TrgNumElts; ++i) 7563 ShuffV.push_back(i * SizeMult); 7564 else 7565 for (unsigned i = 1; i <= TrgNumElts; ++i) 7566 ShuffV.push_back(i * SizeMult - 1); 7567 7568 // Populate the remaining elements with undefs. 7569 for (unsigned i = TrgNumElts; i < WideNumElts; ++i) 7570 // ShuffV.push_back(i + WideNumElts); 7571 ShuffV.push_back(WideNumElts + 1); 7572 7573 SDValue Conv = DAG.getNode(ISD::BITCAST, DL, WideVT, WideSrc); 7574 return DAG.getVectorShuffle(WideVT, DL, Conv, DAG.getUNDEF(WideVT), ShuffV); 7575 } 7576 7577 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when 7578 /// possible. 7579 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 7580 // Not FP? Not a fsel. 7581 if (!Op.getOperand(0).getValueType().isFloatingPoint() || 7582 !Op.getOperand(2).getValueType().isFloatingPoint()) 7583 return Op; 7584 7585 bool HasNoInfs = DAG.getTarget().Options.NoInfsFPMath; 7586 bool HasNoNaNs = DAG.getTarget().Options.NoNaNsFPMath; 7587 // We might be able to do better than this under some circumstances, but in 7588 // general, fsel-based lowering of select is a finite-math-only optimization. 7589 // For more information, see section F.3 of the 2.06 ISA specification. 7590 // With ISA 3.0, we have xsmaxcdp/xsmincdp which are OK to emit even in the 7591 // presence of infinities. 7592 if (!Subtarget.hasP9Vector() && (!HasNoInfs || !HasNoNaNs)) 7593 return Op; 7594 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 7595 7596 EVT ResVT = Op.getValueType(); 7597 EVT CmpVT = Op.getOperand(0).getValueType(); 7598 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 7599 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3); 7600 SDLoc dl(Op); 7601 7602 if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) { 7603 switch (CC) { 7604 default: 7605 // Not a min/max but with finite math, we may still be able to use fsel. 7606 if (HasNoInfs && HasNoNaNs) 7607 break; 7608 return Op; 7609 case ISD::SETOGT: 7610 case ISD::SETGT: 7611 return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS); 7612 case ISD::SETOLT: 7613 case ISD::SETLT: 7614 return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS); 7615 } 7616 } 7617 7618 // TODO: Propagate flags from the select rather than global settings. 7619 SDNodeFlags Flags; 7620 Flags.setNoInfs(true); 7621 Flags.setNoNaNs(true); 7622 7623 // If the RHS of the comparison is a 0.0, we don't need to do the 7624 // subtraction at all. 7625 SDValue Sel1; 7626 if (isFloatingPointZero(RHS)) 7627 switch (CC) { 7628 default: break; // SETUO etc aren't handled by fsel. 7629 case ISD::SETNE: 7630 std::swap(TV, FV); 7631 LLVM_FALLTHROUGH; 7632 case ISD::SETEQ: 7633 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7634 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7635 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 7636 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 7637 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 7638 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7639 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV); 7640 case ISD::SETULT: 7641 case ISD::SETLT: 7642 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 7643 LLVM_FALLTHROUGH; 7644 case ISD::SETOGE: 7645 case ISD::SETGE: 7646 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7647 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7648 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV); 7649 case ISD::SETUGT: 7650 case ISD::SETGT: 7651 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt 7652 LLVM_FALLTHROUGH; 7653 case ISD::SETOLE: 7654 case ISD::SETLE: 7655 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits 7656 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS); 7657 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7658 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV); 7659 } 7660 7661 SDValue Cmp; 7662 switch (CC) { 7663 default: break; // SETUO etc aren't handled by fsel. 7664 case ISD::SETNE: 7665 std::swap(TV, FV); 7666 LLVM_FALLTHROUGH; 7667 case ISD::SETEQ: 7668 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7669 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7670 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7671 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7672 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits 7673 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1); 7674 return DAG.getNode(PPCISD::FSEL, dl, ResVT, 7675 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV); 7676 case ISD::SETULT: 7677 case ISD::SETLT: 7678 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7679 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7680 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7681 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 7682 case ISD::SETOGE: 7683 case ISD::SETGE: 7684 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags); 7685 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7686 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7687 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7688 case ISD::SETUGT: 7689 case ISD::SETGT: 7690 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 7691 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7692 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7693 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV); 7694 case ISD::SETOLE: 7695 case ISD::SETLE: 7696 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags); 7697 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits 7698 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp); 7699 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV); 7700 } 7701 return Op; 7702 } 7703 7704 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI, 7705 SelectionDAG &DAG, 7706 const SDLoc &dl) const { 7707 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 7708 SDValue Src = Op.getOperand(0); 7709 if (Src.getValueType() == MVT::f32) 7710 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 7711 7712 SDValue Tmp; 7713 switch (Op.getSimpleValueType().SimpleTy) { 7714 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 7715 case MVT::i32: 7716 Tmp = DAG.getNode( 7717 Op.getOpcode() == ISD::FP_TO_SINT 7718 ? PPCISD::FCTIWZ 7719 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 7720 dl, MVT::f64, Src); 7721 break; 7722 case MVT::i64: 7723 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 7724 "i64 FP_TO_UINT is supported only with FPCVT"); 7725 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 7726 PPCISD::FCTIDUZ, 7727 dl, MVT::f64, Src); 7728 break; 7729 } 7730 7731 // Convert the FP value to an int value through memory. 7732 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() && 7733 (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()); 7734 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64); 7735 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex(); 7736 MachinePointerInfo MPI = 7737 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 7738 7739 // Emit a store to the stack slot. 7740 SDValue Chain; 7741 unsigned Alignment = DAG.getEVTAlignment(Tmp.getValueType()); 7742 if (i32Stack) { 7743 MachineFunction &MF = DAG.getMachineFunction(); 7744 Alignment = 4; 7745 MachineMemOperand *MMO = 7746 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment); 7747 SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr }; 7748 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl, 7749 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO); 7750 } else 7751 Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI, Alignment); 7752 7753 // Result is a load from the stack slot. If loading 4 bytes, make sure to 7754 // add in a bias on big endian. 7755 if (Op.getValueType() == MVT::i32 && !i32Stack) { 7756 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr, 7757 DAG.getConstant(4, dl, FIPtr.getValueType())); 7758 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4); 7759 } 7760 7761 RLI.Chain = Chain; 7762 RLI.Ptr = FIPtr; 7763 RLI.MPI = MPI; 7764 RLI.Alignment = Alignment; 7765 } 7766 7767 /// Custom lowers floating point to integer conversions to use 7768 /// the direct move instructions available in ISA 2.07 to avoid the 7769 /// need for load/store combinations. 7770 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op, 7771 SelectionDAG &DAG, 7772 const SDLoc &dl) const { 7773 assert(Op.getOperand(0).getValueType().isFloatingPoint()); 7774 SDValue Src = Op.getOperand(0); 7775 7776 if (Src.getValueType() == MVT::f32) 7777 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 7778 7779 SDValue Tmp; 7780 switch (Op.getSimpleValueType().SimpleTy) { 7781 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); 7782 case MVT::i32: 7783 Tmp = DAG.getNode( 7784 Op.getOpcode() == ISD::FP_TO_SINT 7785 ? PPCISD::FCTIWZ 7786 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ), 7787 dl, MVT::f64, Src); 7788 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp); 7789 break; 7790 case MVT::i64: 7791 assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) && 7792 "i64 FP_TO_UINT is supported only with FPCVT"); 7793 Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 7794 PPCISD::FCTIDUZ, 7795 dl, MVT::f64, Src); 7796 Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp); 7797 break; 7798 } 7799 return Tmp; 7800 } 7801 7802 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, 7803 const SDLoc &dl) const { 7804 7805 // FP to INT conversions are legal for f128. 7806 if (EnableQuadPrecision && (Op->getOperand(0).getValueType() == MVT::f128)) 7807 return Op; 7808 7809 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on 7810 // PPC (the libcall is not available). 7811 if (Op.getOperand(0).getValueType() == MVT::ppcf128) { 7812 if (Op.getValueType() == MVT::i32) { 7813 if (Op.getOpcode() == ISD::FP_TO_SINT) { 7814 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7815 MVT::f64, Op.getOperand(0), 7816 DAG.getIntPtrConstant(0, dl)); 7817 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, 7818 MVT::f64, Op.getOperand(0), 7819 DAG.getIntPtrConstant(1, dl)); 7820 7821 // Add the two halves of the long double in round-to-zero mode. 7822 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi); 7823 7824 // Now use a smaller FP_TO_SINT. 7825 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res); 7826 } 7827 if (Op.getOpcode() == ISD::FP_TO_UINT) { 7828 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0}; 7829 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31)); 7830 SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128); 7831 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X 7832 // FIXME: generated code sucks. 7833 // TODO: Are there fast-math-flags to propagate to this FSUB? 7834 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, 7835 Op.getOperand(0), Tmp); 7836 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True); 7837 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, 7838 DAG.getConstant(0x80000000, dl, MVT::i32)); 7839 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, 7840 Op.getOperand(0)); 7841 return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False, 7842 ISD::SETGE); 7843 } 7844 } 7845 7846 return SDValue(); 7847 } 7848 7849 if (Subtarget.hasDirectMove() && Subtarget.isPPC64()) 7850 return LowerFP_TO_INTDirectMove(Op, DAG, dl); 7851 7852 ReuseLoadInfo RLI; 7853 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7854 7855 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI, 7856 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 7857 } 7858 7859 // We're trying to insert a regular store, S, and then a load, L. If the 7860 // incoming value, O, is a load, we might just be able to have our load use the 7861 // address used by O. However, we don't know if anything else will store to 7862 // that address before we can load from it. To prevent this situation, we need 7863 // to insert our load, L, into the chain as a peer of O. To do this, we give L 7864 // the same chain operand as O, we create a token factor from the chain results 7865 // of O and L, and we replace all uses of O's chain result with that token 7866 // factor (see spliceIntoChain below for this last part). 7867 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT, 7868 ReuseLoadInfo &RLI, 7869 SelectionDAG &DAG, 7870 ISD::LoadExtType ET) const { 7871 SDLoc dl(Op); 7872 bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT && 7873 (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32); 7874 if (ET == ISD::NON_EXTLOAD && 7875 (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) && 7876 isOperationLegalOrCustom(Op.getOpcode(), 7877 Op.getOperand(0).getValueType())) { 7878 7879 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl); 7880 return true; 7881 } 7882 7883 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op); 7884 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() || 7885 LD->isNonTemporal()) 7886 return false; 7887 if (LD->getMemoryVT() != MemVT) 7888 return false; 7889 7890 RLI.Ptr = LD->getBasePtr(); 7891 if (LD->isIndexed() && !LD->getOffset().isUndef()) { 7892 assert(LD->getAddressingMode() == ISD::PRE_INC && 7893 "Non-pre-inc AM on PPC?"); 7894 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr, 7895 LD->getOffset()); 7896 } 7897 7898 RLI.Chain = LD->getChain(); 7899 RLI.MPI = LD->getPointerInfo(); 7900 RLI.IsDereferenceable = LD->isDereferenceable(); 7901 RLI.IsInvariant = LD->isInvariant(); 7902 RLI.Alignment = LD->getAlignment(); 7903 RLI.AAInfo = LD->getAAInfo(); 7904 RLI.Ranges = LD->getRanges(); 7905 7906 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1); 7907 return true; 7908 } 7909 7910 // Given the head of the old chain, ResChain, insert a token factor containing 7911 // it and NewResChain, and make users of ResChain now be users of that token 7912 // factor. 7913 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead. 7914 void PPCTargetLowering::spliceIntoChain(SDValue ResChain, 7915 SDValue NewResChain, 7916 SelectionDAG &DAG) const { 7917 if (!ResChain) 7918 return; 7919 7920 SDLoc dl(NewResChain); 7921 7922 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 7923 NewResChain, DAG.getUNDEF(MVT::Other)); 7924 assert(TF.getNode() != NewResChain.getNode() && 7925 "A new TF really is required here"); 7926 7927 DAG.ReplaceAllUsesOfValueWith(ResChain, TF); 7928 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain); 7929 } 7930 7931 /// Analyze profitability of direct move 7932 /// prefer float load to int load plus direct move 7933 /// when there is no integer use of int load 7934 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const { 7935 SDNode *Origin = Op.getOperand(0).getNode(); 7936 if (Origin->getOpcode() != ISD::LOAD) 7937 return true; 7938 7939 // If there is no LXSIBZX/LXSIHZX, like Power8, 7940 // prefer direct move if the memory size is 1 or 2 bytes. 7941 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand(); 7942 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2) 7943 return true; 7944 7945 for (SDNode::use_iterator UI = Origin->use_begin(), 7946 UE = Origin->use_end(); 7947 UI != UE; ++UI) { 7948 7949 // Only look at the users of the loaded value. 7950 if (UI.getUse().get().getResNo() != 0) 7951 continue; 7952 7953 if (UI->getOpcode() != ISD::SINT_TO_FP && 7954 UI->getOpcode() != ISD::UINT_TO_FP) 7955 return true; 7956 } 7957 7958 return false; 7959 } 7960 7961 /// Custom lowers integer to floating point conversions to use 7962 /// the direct move instructions available in ISA 2.07 to avoid the 7963 /// need for load/store combinations. 7964 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op, 7965 SelectionDAG &DAG, 7966 const SDLoc &dl) const { 7967 assert((Op.getValueType() == MVT::f32 || 7968 Op.getValueType() == MVT::f64) && 7969 "Invalid floating point type as target of conversion"); 7970 assert(Subtarget.hasFPCVT() && 7971 "Int to FP conversions with direct moves require FPCVT"); 7972 SDValue FP; 7973 SDValue Src = Op.getOperand(0); 7974 bool SinglePrec = Op.getValueType() == MVT::f32; 7975 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32; 7976 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP; 7977 unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) : 7978 (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU); 7979 7980 if (WordInt) { 7981 FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ, 7982 dl, MVT::f64, Src); 7983 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7984 } 7985 else { 7986 FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src); 7987 FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP); 7988 } 7989 7990 return FP; 7991 } 7992 7993 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) { 7994 7995 EVT VecVT = Vec.getValueType(); 7996 assert(VecVT.isVector() && "Expected a vector type."); 7997 assert(VecVT.getSizeInBits() < 128 && "Vector is already full width."); 7998 7999 EVT EltVT = VecVT.getVectorElementType(); 8000 unsigned WideNumElts = 128 / EltVT.getSizeInBits(); 8001 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts); 8002 8003 unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements(); 8004 SmallVector<SDValue, 16> Ops(NumConcat); 8005 Ops[0] = Vec; 8006 SDValue UndefVec = DAG.getUNDEF(VecVT); 8007 for (unsigned i = 1; i < NumConcat; ++i) 8008 Ops[i] = UndefVec; 8009 8010 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops); 8011 } 8012 8013 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG, 8014 const SDLoc &dl) const { 8015 8016 unsigned Opc = Op.getOpcode(); 8017 assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP) && 8018 "Unexpected conversion type"); 8019 assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) && 8020 "Supports conversions to v2f64/v4f32 only."); 8021 8022 bool SignedConv = Opc == ISD::SINT_TO_FP; 8023 bool FourEltRes = Op.getValueType() == MVT::v4f32; 8024 8025 SDValue Wide = widenVec(DAG, Op.getOperand(0), dl); 8026 EVT WideVT = Wide.getValueType(); 8027 unsigned WideNumElts = WideVT.getVectorNumElements(); 8028 MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64; 8029 8030 SmallVector<int, 16> ShuffV; 8031 for (unsigned i = 0; i < WideNumElts; ++i) 8032 ShuffV.push_back(i + WideNumElts); 8033 8034 int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2; 8035 int SaveElts = FourEltRes ? 4 : 2; 8036 if (Subtarget.isLittleEndian()) 8037 for (int i = 0; i < SaveElts; i++) 8038 ShuffV[i * Stride] = i; 8039 else 8040 for (int i = 1; i <= SaveElts; i++) 8041 ShuffV[i * Stride - 1] = i - 1; 8042 8043 SDValue ShuffleSrc2 = 8044 SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT); 8045 SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV); 8046 unsigned ExtendOp = 8047 SignedConv ? (unsigned)PPCISD::SExtVElems : (unsigned)ISD::BITCAST; 8048 8049 SDValue Extend; 8050 if (!Subtarget.hasP9Altivec() && SignedConv) { 8051 Arrange = DAG.getBitcast(IntermediateVT, Arrange); 8052 Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange, 8053 DAG.getValueType(Op.getOperand(0).getValueType())); 8054 } else 8055 Extend = DAG.getNode(ExtendOp, dl, IntermediateVT, Arrange); 8056 8057 return DAG.getNode(Opc, dl, Op.getValueType(), Extend); 8058 } 8059 8060 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op, 8061 SelectionDAG &DAG) const { 8062 SDLoc dl(Op); 8063 8064 EVT InVT = Op.getOperand(0).getValueType(); 8065 EVT OutVT = Op.getValueType(); 8066 if (OutVT.isVector() && OutVT.isFloatingPoint() && 8067 isOperationCustom(Op.getOpcode(), InVT)) 8068 return LowerINT_TO_FPVector(Op, DAG, dl); 8069 8070 // Conversions to f128 are legal. 8071 if (EnableQuadPrecision && (Op.getValueType() == MVT::f128)) 8072 return Op; 8073 8074 if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) { 8075 if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64) 8076 return SDValue(); 8077 8078 SDValue Value = Op.getOperand(0); 8079 // The values are now known to be -1 (false) or 1 (true). To convert this 8080 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 8081 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 8082 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 8083 8084 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 8085 8086 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 8087 8088 if (Op.getValueType() != MVT::v4f64) 8089 Value = DAG.getNode(ISD::FP_ROUND, dl, 8090 Op.getValueType(), Value, 8091 DAG.getIntPtrConstant(1, dl)); 8092 return Value; 8093 } 8094 8095 // Don't handle ppc_fp128 here; let it be lowered to a libcall. 8096 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 8097 return SDValue(); 8098 8099 if (Op.getOperand(0).getValueType() == MVT::i1) 8100 return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0), 8101 DAG.getConstantFP(1.0, dl, Op.getValueType()), 8102 DAG.getConstantFP(0.0, dl, Op.getValueType())); 8103 8104 // If we have direct moves, we can do all the conversion, skip the store/load 8105 // however, without FPCVT we can't do most conversions. 8106 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) && 8107 Subtarget.isPPC64() && Subtarget.hasFPCVT()) 8108 return LowerINT_TO_FPDirectMove(Op, DAG, dl); 8109 8110 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 8111 "UINT_TO_FP is supported only with FPCVT"); 8112 8113 // If we have FCFIDS, then use it when converting to single-precision. 8114 // Otherwise, convert to double-precision and then round. 8115 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 8116 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 8117 : PPCISD::FCFIDS) 8118 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 8119 : PPCISD::FCFID); 8120 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 8121 ? MVT::f32 8122 : MVT::f64; 8123 8124 if (Op.getOperand(0).getValueType() == MVT::i64) { 8125 SDValue SINT = Op.getOperand(0); 8126 // When converting to single-precision, we actually need to convert 8127 // to double-precision first and then round to single-precision. 8128 // To avoid double-rounding effects during that operation, we have 8129 // to prepare the input operand. Bits that might be truncated when 8130 // converting to double-precision are replaced by a bit that won't 8131 // be lost at this stage, but is below the single-precision rounding 8132 // position. 8133 // 8134 // However, if -enable-unsafe-fp-math is in effect, accept double 8135 // rounding to avoid the extra overhead. 8136 if (Op.getValueType() == MVT::f32 && 8137 !Subtarget.hasFPCVT() && 8138 !DAG.getTarget().Options.UnsafeFPMath) { 8139 8140 // Twiddle input to make sure the low 11 bits are zero. (If this 8141 // is the case, we are guaranteed the value will fit into the 53 bit 8142 // mantissa of an IEEE double-precision value without rounding.) 8143 // If any of those low 11 bits were not zero originally, make sure 8144 // bit 12 (value 2048) is set instead, so that the final rounding 8145 // to single-precision gets the correct result. 8146 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, 8147 SINT, DAG.getConstant(2047, dl, MVT::i64)); 8148 Round = DAG.getNode(ISD::ADD, dl, MVT::i64, 8149 Round, DAG.getConstant(2047, dl, MVT::i64)); 8150 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); 8151 Round = DAG.getNode(ISD::AND, dl, MVT::i64, 8152 Round, DAG.getConstant(-2048, dl, MVT::i64)); 8153 8154 // However, we cannot use that value unconditionally: if the magnitude 8155 // of the input value is small, the bit-twiddling we did above might 8156 // end up visibly changing the output. Fortunately, in that case, we 8157 // don't need to twiddle bits since the original input will convert 8158 // exactly to double-precision floating-point already. Therefore, 8159 // construct a conditional to use the original value if the top 11 8160 // bits are all sign-bit copies, and use the rounded value computed 8161 // above otherwise. 8162 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, 8163 SINT, DAG.getConstant(53, dl, MVT::i32)); 8164 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, 8165 Cond, DAG.getConstant(1, dl, MVT::i64)); 8166 Cond = DAG.getSetCC(dl, MVT::i32, 8167 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT); 8168 8169 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); 8170 } 8171 8172 ReuseLoadInfo RLI; 8173 SDValue Bits; 8174 8175 MachineFunction &MF = DAG.getMachineFunction(); 8176 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) { 8177 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI, 8178 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges); 8179 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8180 } else if (Subtarget.hasLFIWAX() && 8181 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) { 8182 MachineMemOperand *MMO = 8183 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8184 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8185 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8186 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl, 8187 DAG.getVTList(MVT::f64, MVT::Other), 8188 Ops, MVT::i32, MMO); 8189 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8190 } else if (Subtarget.hasFPCVT() && 8191 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) { 8192 MachineMemOperand *MMO = 8193 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8194 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8195 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8196 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl, 8197 DAG.getVTList(MVT::f64, MVT::Other), 8198 Ops, MVT::i32, MMO); 8199 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG); 8200 } else if (((Subtarget.hasLFIWAX() && 8201 SINT.getOpcode() == ISD::SIGN_EXTEND) || 8202 (Subtarget.hasFPCVT() && 8203 SINT.getOpcode() == ISD::ZERO_EXTEND)) && 8204 SINT.getOperand(0).getValueType() == MVT::i32) { 8205 MachineFrameInfo &MFI = MF.getFrameInfo(); 8206 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8207 8208 int FrameIdx = MFI.CreateStackObject(4, 4, false); 8209 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8210 8211 SDValue Store = 8212 DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx, 8213 MachinePointerInfo::getFixedStack( 8214 DAG.getMachineFunction(), FrameIdx)); 8215 8216 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 8217 "Expected an i32 store"); 8218 8219 RLI.Ptr = FIdx; 8220 RLI.Chain = Store; 8221 RLI.MPI = 8222 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8223 RLI.Alignment = 4; 8224 8225 MachineMemOperand *MMO = 8226 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8227 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8228 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8229 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ? 8230 PPCISD::LFIWZX : PPCISD::LFIWAX, 8231 dl, DAG.getVTList(MVT::f64, MVT::Other), 8232 Ops, MVT::i32, MMO); 8233 } else 8234 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); 8235 8236 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits); 8237 8238 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 8239 FP = DAG.getNode(ISD::FP_ROUND, dl, 8240 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 8241 return FP; 8242 } 8243 8244 assert(Op.getOperand(0).getValueType() == MVT::i32 && 8245 "Unhandled INT_TO_FP type in custom expander!"); 8246 // Since we only generate this in 64-bit mode, we can take advantage of 8247 // 64-bit registers. In particular, sign extend the input value into the 8248 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack 8249 // then lfd it and fcfid it. 8250 MachineFunction &MF = DAG.getMachineFunction(); 8251 MachineFrameInfo &MFI = MF.getFrameInfo(); 8252 EVT PtrVT = getPointerTy(MF.getDataLayout()); 8253 8254 SDValue Ld; 8255 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) { 8256 ReuseLoadInfo RLI; 8257 bool ReusingLoad; 8258 if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI, 8259 DAG))) { 8260 int FrameIdx = MFI.CreateStackObject(4, 4, false); 8261 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8262 8263 SDValue Store = 8264 DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 8265 MachinePointerInfo::getFixedStack( 8266 DAG.getMachineFunction(), FrameIdx)); 8267 8268 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && 8269 "Expected an i32 store"); 8270 8271 RLI.Ptr = FIdx; 8272 RLI.Chain = Store; 8273 RLI.MPI = 8274 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8275 RLI.Alignment = 4; 8276 } 8277 8278 MachineMemOperand *MMO = 8279 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4, 8280 RLI.Alignment, RLI.AAInfo, RLI.Ranges); 8281 SDValue Ops[] = { RLI.Chain, RLI.Ptr }; 8282 Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ? 8283 PPCISD::LFIWZX : PPCISD::LFIWAX, 8284 dl, DAG.getVTList(MVT::f64, MVT::Other), 8285 Ops, MVT::i32, MMO); 8286 if (ReusingLoad) 8287 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG); 8288 } else { 8289 assert(Subtarget.isPPC64() && 8290 "i32->FP without LFIWAX supported only on PPC64"); 8291 8292 int FrameIdx = MFI.CreateStackObject(8, 8, false); 8293 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8294 8295 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, 8296 Op.getOperand(0)); 8297 8298 // STD the extended value into the stack slot. 8299 SDValue Store = DAG.getStore( 8300 DAG.getEntryNode(), dl, Ext64, FIdx, 8301 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 8302 8303 // Load the value as a double. 8304 Ld = DAG.getLoad( 8305 MVT::f64, dl, Store, FIdx, 8306 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx)); 8307 } 8308 8309 // FCFID it and return it. 8310 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld); 8311 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) 8312 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP, 8313 DAG.getIntPtrConstant(0, dl)); 8314 return FP; 8315 } 8316 8317 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op, 8318 SelectionDAG &DAG) const { 8319 SDLoc dl(Op); 8320 /* 8321 The rounding mode is in bits 30:31 of FPSR, and has the following 8322 settings: 8323 00 Round to nearest 8324 01 Round to 0 8325 10 Round to +inf 8326 11 Round to -inf 8327 8328 FLT_ROUNDS, on the other hand, expects the following: 8329 -1 Undefined 8330 0 Round to 0 8331 1 Round to nearest 8332 2 Round to +inf 8333 3 Round to -inf 8334 8335 To perform the conversion, we do: 8336 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1)) 8337 */ 8338 8339 MachineFunction &MF = DAG.getMachineFunction(); 8340 EVT VT = Op.getValueType(); 8341 EVT PtrVT = getPointerTy(MF.getDataLayout()); 8342 8343 // Save FP Control Word to register 8344 EVT NodeTys[] = { 8345 MVT::f64, // return register 8346 MVT::Glue // unused in this context 8347 }; 8348 SDValue Chain = DAG.getNode(PPCISD::MFFS, dl, NodeTys, None); 8349 8350 // Save FP register to stack slot 8351 int SSFI = MF.getFrameInfo().CreateStackObject(8, 8, false); 8352 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); 8353 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Chain, StackSlot, 8354 MachinePointerInfo()); 8355 8356 // Load FP Control Word from low 32 bits of stack slot. 8357 SDValue Four = DAG.getConstant(4, dl, PtrVT); 8358 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four); 8359 SDValue CWD = DAG.getLoad(MVT::i32, dl, Store, Addr, MachinePointerInfo()); 8360 8361 // Transform as necessary 8362 SDValue CWD1 = 8363 DAG.getNode(ISD::AND, dl, MVT::i32, 8364 CWD, DAG.getConstant(3, dl, MVT::i32)); 8365 SDValue CWD2 = 8366 DAG.getNode(ISD::SRL, dl, MVT::i32, 8367 DAG.getNode(ISD::AND, dl, MVT::i32, 8368 DAG.getNode(ISD::XOR, dl, MVT::i32, 8369 CWD, DAG.getConstant(3, dl, MVT::i32)), 8370 DAG.getConstant(3, dl, MVT::i32)), 8371 DAG.getConstant(1, dl, MVT::i32)); 8372 8373 SDValue RetVal = 8374 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2); 8375 8376 return DAG.getNode((VT.getSizeInBits() < 16 ? 8377 ISD::TRUNCATE : ISD::ZERO_EXTEND), dl, VT, RetVal); 8378 } 8379 8380 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const { 8381 EVT VT = Op.getValueType(); 8382 unsigned BitWidth = VT.getSizeInBits(); 8383 SDLoc dl(Op); 8384 assert(Op.getNumOperands() == 3 && 8385 VT == Op.getOperand(1).getValueType() && 8386 "Unexpected SHL!"); 8387 8388 // Expand into a bunch of logical ops. Note that these ops 8389 // depend on the PPC behavior for oversized shift amounts. 8390 SDValue Lo = Op.getOperand(0); 8391 SDValue Hi = Op.getOperand(1); 8392 SDValue Amt = Op.getOperand(2); 8393 EVT AmtVT = Amt.getValueType(); 8394 8395 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8396 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8397 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt); 8398 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1); 8399 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3); 8400 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8401 DAG.getConstant(-BitWidth, dl, AmtVT)); 8402 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5); 8403 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 8404 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt); 8405 SDValue OutOps[] = { OutLo, OutHi }; 8406 return DAG.getMergeValues(OutOps, dl); 8407 } 8408 8409 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const { 8410 EVT VT = Op.getValueType(); 8411 SDLoc dl(Op); 8412 unsigned BitWidth = VT.getSizeInBits(); 8413 assert(Op.getNumOperands() == 3 && 8414 VT == Op.getOperand(1).getValueType() && 8415 "Unexpected SRL!"); 8416 8417 // Expand into a bunch of logical ops. Note that these ops 8418 // depend on the PPC behavior for oversized shift amounts. 8419 SDValue Lo = Op.getOperand(0); 8420 SDValue Hi = Op.getOperand(1); 8421 SDValue Amt = Op.getOperand(2); 8422 EVT AmtVT = Amt.getValueType(); 8423 8424 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8425 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8426 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 8427 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 8428 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8429 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8430 DAG.getConstant(-BitWidth, dl, AmtVT)); 8431 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5); 8432 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6); 8433 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt); 8434 SDValue OutOps[] = { OutLo, OutHi }; 8435 return DAG.getMergeValues(OutOps, dl); 8436 } 8437 8438 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const { 8439 SDLoc dl(Op); 8440 EVT VT = Op.getValueType(); 8441 unsigned BitWidth = VT.getSizeInBits(); 8442 assert(Op.getNumOperands() == 3 && 8443 VT == Op.getOperand(1).getValueType() && 8444 "Unexpected SRA!"); 8445 8446 // Expand into a bunch of logical ops, followed by a select_cc. 8447 SDValue Lo = Op.getOperand(0); 8448 SDValue Hi = Op.getOperand(1); 8449 SDValue Amt = Op.getOperand(2); 8450 EVT AmtVT = Amt.getValueType(); 8451 8452 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT, 8453 DAG.getConstant(BitWidth, dl, AmtVT), Amt); 8454 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt); 8455 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1); 8456 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3); 8457 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt, 8458 DAG.getConstant(-BitWidth, dl, AmtVT)); 8459 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5); 8460 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt); 8461 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT), 8462 Tmp4, Tmp6, ISD::SETLE); 8463 SDValue OutOps[] = { OutLo, OutHi }; 8464 return DAG.getMergeValues(OutOps, dl); 8465 } 8466 8467 //===----------------------------------------------------------------------===// 8468 // Vector related lowering. 8469 // 8470 8471 /// BuildSplatI - Build a canonical splati of Val with an element size of 8472 /// SplatSize. Cast the result to VT. 8473 static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, 8474 SelectionDAG &DAG, const SDLoc &dl) { 8475 static const MVT VTys[] = { // canonical VT to use for each size. 8476 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32 8477 }; 8478 8479 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1]; 8480 8481 // Force vspltis[hw] -1 to vspltisb -1 to canonicalize. 8482 if (Val == -1) 8483 SplatSize = 1; 8484 8485 EVT CanonicalVT = VTys[SplatSize-1]; 8486 8487 // Build a canonical splat for this value. 8488 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT)); 8489 } 8490 8491 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the 8492 /// specified intrinsic ID. 8493 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, 8494 const SDLoc &dl, EVT DestVT = MVT::Other) { 8495 if (DestVT == MVT::Other) DestVT = Op.getValueType(); 8496 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8497 DAG.getConstant(IID, dl, MVT::i32), Op); 8498 } 8499 8500 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the 8501 /// specified intrinsic ID. 8502 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS, 8503 SelectionDAG &DAG, const SDLoc &dl, 8504 EVT DestVT = MVT::Other) { 8505 if (DestVT == MVT::Other) DestVT = LHS.getValueType(); 8506 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8507 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS); 8508 } 8509 8510 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the 8511 /// specified intrinsic ID. 8512 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1, 8513 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl, 8514 EVT DestVT = MVT::Other) { 8515 if (DestVT == MVT::Other) DestVT = Op0.getValueType(); 8516 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, 8517 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2); 8518 } 8519 8520 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified 8521 /// amount. The result has the specified value type. 8522 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, 8523 SelectionDAG &DAG, const SDLoc &dl) { 8524 // Force LHS/RHS to be the right type. 8525 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS); 8526 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS); 8527 8528 int Ops[16]; 8529 for (unsigned i = 0; i != 16; ++i) 8530 Ops[i] = i + Amt; 8531 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); 8532 return DAG.getNode(ISD::BITCAST, dl, VT, T); 8533 } 8534 8535 /// Do we have an efficient pattern in a .td file for this node? 8536 /// 8537 /// \param V - pointer to the BuildVectorSDNode being matched 8538 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves? 8539 /// 8540 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR 8541 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where 8542 /// the opposite is true (expansion is beneficial) are: 8543 /// - The node builds a vector out of integers that are not 32 or 64-bits 8544 /// - The node builds a vector out of constants 8545 /// - The node is a "load-and-splat" 8546 /// In all other cases, we will choose to keep the BUILD_VECTOR. 8547 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, 8548 bool HasDirectMove, 8549 bool HasP8Vector) { 8550 EVT VecVT = V->getValueType(0); 8551 bool RightType = VecVT == MVT::v2f64 || 8552 (HasP8Vector && VecVT == MVT::v4f32) || 8553 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32)); 8554 if (!RightType) 8555 return false; 8556 8557 bool IsSplat = true; 8558 bool IsLoad = false; 8559 SDValue Op0 = V->getOperand(0); 8560 8561 // This function is called in a block that confirms the node is not a constant 8562 // splat. So a constant BUILD_VECTOR here means the vector is built out of 8563 // different constants. 8564 if (V->isConstant()) 8565 return false; 8566 for (int i = 0, e = V->getNumOperands(); i < e; ++i) { 8567 if (V->getOperand(i).isUndef()) 8568 return false; 8569 // We want to expand nodes that represent load-and-splat even if the 8570 // loaded value is a floating point truncation or conversion to int. 8571 if (V->getOperand(i).getOpcode() == ISD::LOAD || 8572 (V->getOperand(i).getOpcode() == ISD::FP_ROUND && 8573 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 8574 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT && 8575 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) || 8576 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT && 8577 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD)) 8578 IsLoad = true; 8579 // If the operands are different or the input is not a load and has more 8580 // uses than just this BV node, then it isn't a splat. 8581 if (V->getOperand(i) != Op0 || 8582 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode()))) 8583 IsSplat = false; 8584 } 8585 return !(IsSplat && IsLoad); 8586 } 8587 8588 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128. 8589 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { 8590 8591 SDLoc dl(Op); 8592 SDValue Op0 = Op->getOperand(0); 8593 8594 if (!EnableQuadPrecision || 8595 (Op.getValueType() != MVT::f128 ) || 8596 (Op0.getOpcode() != ISD::BUILD_PAIR) || 8597 (Op0.getOperand(0).getValueType() != MVT::i64) || 8598 (Op0.getOperand(1).getValueType() != MVT::i64)) 8599 return SDValue(); 8600 8601 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0), 8602 Op0.getOperand(1)); 8603 } 8604 8605 static const SDValue *getNormalLoadInput(const SDValue &Op) { 8606 const SDValue *InputLoad = &Op; 8607 if (InputLoad->getOpcode() == ISD::BITCAST) 8608 InputLoad = &InputLoad->getOperand(0); 8609 if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR) 8610 InputLoad = &InputLoad->getOperand(0); 8611 if (InputLoad->getOpcode() != ISD::LOAD) 8612 return nullptr; 8613 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 8614 return ISD::isNormalLoad(LD) ? InputLoad : nullptr; 8615 } 8616 8617 // If this is a case we can't handle, return null and let the default 8618 // expansion code take care of it. If we CAN select this case, and if it 8619 // selects to a single instruction, return Op. Otherwise, if we can codegen 8620 // this case more efficiently than a constant pool load, lower it to the 8621 // sequence of ops that should be used. 8622 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, 8623 SelectionDAG &DAG) const { 8624 SDLoc dl(Op); 8625 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 8626 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"); 8627 8628 if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) { 8629 // We first build an i32 vector, load it into a QPX register, 8630 // then convert it to a floating-point vector and compare it 8631 // to a zero vector to get the boolean result. 8632 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 8633 int FrameIdx = MFI.CreateStackObject(16, 16, false); 8634 MachinePointerInfo PtrInfo = 8635 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 8636 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 8637 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 8638 8639 assert(BVN->getNumOperands() == 4 && 8640 "BUILD_VECTOR for v4i1 does not have 4 operands"); 8641 8642 bool IsConst = true; 8643 for (unsigned i = 0; i < 4; ++i) { 8644 if (BVN->getOperand(i).isUndef()) continue; 8645 if (!isa<ConstantSDNode>(BVN->getOperand(i))) { 8646 IsConst = false; 8647 break; 8648 } 8649 } 8650 8651 if (IsConst) { 8652 Constant *One = 8653 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0); 8654 Constant *NegOne = 8655 ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0); 8656 8657 Constant *CV[4]; 8658 for (unsigned i = 0; i < 4; ++i) { 8659 if (BVN->getOperand(i).isUndef()) 8660 CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext())); 8661 else if (isNullConstant(BVN->getOperand(i))) 8662 CV[i] = NegOne; 8663 else 8664 CV[i] = One; 8665 } 8666 8667 Constant *CP = ConstantVector::get(CV); 8668 SDValue CPIdx = DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), 8669 16 /* alignment */); 8670 8671 SDValue Ops[] = {DAG.getEntryNode(), CPIdx}; 8672 SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other}); 8673 return DAG.getMemIntrinsicNode( 8674 PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32, 8675 MachinePointerInfo::getConstantPool(DAG.getMachineFunction())); 8676 } 8677 8678 SmallVector<SDValue, 4> Stores; 8679 for (unsigned i = 0; i < 4; ++i) { 8680 if (BVN->getOperand(i).isUndef()) continue; 8681 8682 unsigned Offset = 4*i; 8683 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 8684 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 8685 8686 unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize(); 8687 if (StoreSize > 4) { 8688 Stores.push_back( 8689 DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx, 8690 PtrInfo.getWithOffset(Offset), MVT::i32)); 8691 } else { 8692 SDValue StoreValue = BVN->getOperand(i); 8693 if (StoreSize < 4) 8694 StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue); 8695 8696 Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx, 8697 PtrInfo.getWithOffset(Offset))); 8698 } 8699 } 8700 8701 SDValue StoreChain; 8702 if (!Stores.empty()) 8703 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 8704 else 8705 StoreChain = DAG.getEntryNode(); 8706 8707 // Now load from v4i32 into the QPX register; this will extend it to 8708 // v4i64 but not yet convert it to a floating point. Nevertheless, this 8709 // is typed as v4f64 because the QPX register integer states are not 8710 // explicitly represented. 8711 8712 SDValue Ops[] = {StoreChain, 8713 DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32), 8714 FIdx}; 8715 SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other}); 8716 8717 SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, 8718 dl, VTs, Ops, MVT::v4i32, PtrInfo); 8719 LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 8720 DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32), 8721 LoadedVect); 8722 8723 SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64); 8724 8725 return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ); 8726 } 8727 8728 // All other QPX vectors are handled by generic code. 8729 if (Subtarget.hasQPX()) 8730 return SDValue(); 8731 8732 // Check if this is a splat of a constant value. 8733 APInt APSplatBits, APSplatUndef; 8734 unsigned SplatBitSize; 8735 bool HasAnyUndefs; 8736 if (! BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize, 8737 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) || 8738 SplatBitSize > 32) { 8739 8740 const SDValue *InputLoad = getNormalLoadInput(Op.getOperand(0)); 8741 // Handle load-and-splat patterns as we have instructions that will do this 8742 // in one go. 8743 if (InputLoad && DAG.isSplatValue(Op, true)) { 8744 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 8745 8746 // We have handling for 4 and 8 byte elements. 8747 unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits(); 8748 8749 // Checking for a single use of this load, we have to check for vector 8750 // width (128 bits) / ElementSize uses (since each operand of the 8751 // BUILD_VECTOR is a separate use of the value. 8752 if (InputLoad->getNode()->hasNUsesOfValue(128 / ElementSize, 0) && 8753 ((Subtarget.hasVSX() && ElementSize == 64) || 8754 (Subtarget.hasP9Vector() && ElementSize == 32))) { 8755 SDValue Ops[] = { 8756 LD->getChain(), // Chain 8757 LD->getBasePtr(), // Ptr 8758 DAG.getValueType(Op.getValueType()) // VT 8759 }; 8760 return 8761 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, 8762 DAG.getVTList(Op.getValueType(), MVT::Other), 8763 Ops, LD->getMemoryVT(), LD->getMemOperand()); 8764 } 8765 } 8766 8767 // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be 8768 // lowered to VSX instructions under certain conditions. 8769 // Without VSX, there is no pattern more efficient than expanding the node. 8770 if (Subtarget.hasVSX() && 8771 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(), 8772 Subtarget.hasP8Vector())) 8773 return Op; 8774 return SDValue(); 8775 } 8776 8777 unsigned SplatBits = APSplatBits.getZExtValue(); 8778 unsigned SplatUndef = APSplatUndef.getZExtValue(); 8779 unsigned SplatSize = SplatBitSize / 8; 8780 8781 // First, handle single instruction cases. 8782 8783 // All zeros? 8784 if (SplatBits == 0) { 8785 // Canonicalize all zero vectors to be v4i32. 8786 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { 8787 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32); 8788 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z); 8789 } 8790 return Op; 8791 } 8792 8793 // We have XXSPLTIB for constant splats one byte wide 8794 // FIXME: SplatBits is an unsigned int being cast to an int while passing it 8795 // as an argument to BuildSplatiI. Given SplatSize == 1 it is okay here. 8796 if (Subtarget.hasP9Vector() && SplatSize == 1) 8797 return BuildSplatI(SplatBits, SplatSize, Op.getValueType(), DAG, dl); 8798 8799 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw]. 8800 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >> 8801 (32-SplatBitSize)); 8802 if (SextVal >= -16 && SextVal <= 15) 8803 return BuildSplatI(SextVal, SplatSize, Op.getValueType(), DAG, dl); 8804 8805 // Two instruction sequences. 8806 8807 // If this value is in the range [-32,30] and is even, use: 8808 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2) 8809 // If this value is in the range [17,31] and is odd, use: 8810 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16) 8811 // If this value is in the range [-31,-17] and is odd, use: 8812 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16) 8813 // Note the last two are three-instruction sequences. 8814 if (SextVal >= -32 && SextVal <= 31) { 8815 // To avoid having these optimizations undone by constant folding, 8816 // we convert to a pseudo that will be expanded later into one of 8817 // the above forms. 8818 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32); 8819 EVT VT = (SplatSize == 1 ? MVT::v16i8 : 8820 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32)); 8821 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32); 8822 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize); 8823 if (VT == Op.getValueType()) 8824 return RetVal; 8825 else 8826 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal); 8827 } 8828 8829 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is 8830 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important 8831 // for fneg/fabs. 8832 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) { 8833 // Make -1 and vspltisw -1: 8834 SDValue OnesV = BuildSplatI(-1, 4, MVT::v4i32, DAG, dl); 8835 8836 // Make the VSLW intrinsic, computing 0x8000_0000. 8837 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV, 8838 OnesV, DAG, dl); 8839 8840 // xor by OnesV to invert it. 8841 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); 8842 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8843 } 8844 8845 // Check to see if this is a wide variety of vsplti*, binop self cases. 8846 static const signed char SplatCsts[] = { 8847 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7, 8848 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16 8849 }; 8850 8851 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) { 8852 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for 8853 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1' 8854 int i = SplatCsts[idx]; 8855 8856 // Figure out what shift amount will be used by altivec if shifted by i in 8857 // this splat size. 8858 unsigned TypeShiftAmt = i & (SplatBitSize-1); 8859 8860 // vsplti + shl self. 8861 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { 8862 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8863 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8864 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, 8865 Intrinsic::ppc_altivec_vslw 8866 }; 8867 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8868 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8869 } 8870 8871 // vsplti + srl self. 8872 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8873 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8874 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8875 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0, 8876 Intrinsic::ppc_altivec_vsrw 8877 }; 8878 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8879 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8880 } 8881 8882 // vsplti + sra self. 8883 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) { 8884 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8885 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8886 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0, 8887 Intrinsic::ppc_altivec_vsraw 8888 }; 8889 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8890 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8891 } 8892 8893 // vsplti + rol self. 8894 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) | 8895 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) { 8896 SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); 8897 static const unsigned IIDs[] = { // Intrinsic to use for each size. 8898 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0, 8899 Intrinsic::ppc_altivec_vrlw 8900 }; 8901 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); 8902 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res); 8903 } 8904 8905 // t = vsplti c, result = vsldoi t, t, 1 8906 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { 8907 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8908 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1; 8909 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8910 } 8911 // t = vsplti c, result = vsldoi t, t, 2 8912 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { 8913 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8914 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2; 8915 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8916 } 8917 // t = vsplti c, result = vsldoi t, t, 3 8918 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { 8919 SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); 8920 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3; 8921 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl); 8922 } 8923 } 8924 8925 return SDValue(); 8926 } 8927 8928 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 8929 /// the specified operations to build the shuffle. 8930 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 8931 SDValue RHS, SelectionDAG &DAG, 8932 const SDLoc &dl) { 8933 unsigned OpNum = (PFEntry >> 26) & 0x0F; 8934 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1); 8935 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1); 8936 8937 enum { 8938 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 8939 OP_VMRGHW, 8940 OP_VMRGLW, 8941 OP_VSPLTISW0, 8942 OP_VSPLTISW1, 8943 OP_VSPLTISW2, 8944 OP_VSPLTISW3, 8945 OP_VSLDOI4, 8946 OP_VSLDOI8, 8947 OP_VSLDOI12 8948 }; 8949 8950 if (OpNum == OP_COPY) { 8951 if (LHSID == (1*9+2)*9+3) return LHS; 8952 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!"); 8953 return RHS; 8954 } 8955 8956 SDValue OpLHS, OpRHS; 8957 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 8958 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 8959 8960 int ShufIdxs[16]; 8961 switch (OpNum) { 8962 default: llvm_unreachable("Unknown i32 permute!"); 8963 case OP_VMRGHW: 8964 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3; 8965 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19; 8966 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7; 8967 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23; 8968 break; 8969 case OP_VMRGLW: 8970 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11; 8971 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27; 8972 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15; 8973 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31; 8974 break; 8975 case OP_VSPLTISW0: 8976 for (unsigned i = 0; i != 16; ++i) 8977 ShufIdxs[i] = (i&3)+0; 8978 break; 8979 case OP_VSPLTISW1: 8980 for (unsigned i = 0; i != 16; ++i) 8981 ShufIdxs[i] = (i&3)+4; 8982 break; 8983 case OP_VSPLTISW2: 8984 for (unsigned i = 0; i != 16; ++i) 8985 ShufIdxs[i] = (i&3)+8; 8986 break; 8987 case OP_VSPLTISW3: 8988 for (unsigned i = 0; i != 16; ++i) 8989 ShufIdxs[i] = (i&3)+12; 8990 break; 8991 case OP_VSLDOI4: 8992 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl); 8993 case OP_VSLDOI8: 8994 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl); 8995 case OP_VSLDOI12: 8996 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); 8997 } 8998 EVT VT = OpLHS.getValueType(); 8999 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS); 9000 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS); 9001 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); 9002 return DAG.getNode(ISD::BITCAST, dl, VT, T); 9003 } 9004 9005 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled 9006 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default 9007 /// SDValue. 9008 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N, 9009 SelectionDAG &DAG) const { 9010 const unsigned BytesInVector = 16; 9011 bool IsLE = Subtarget.isLittleEndian(); 9012 SDLoc dl(N); 9013 SDValue V1 = N->getOperand(0); 9014 SDValue V2 = N->getOperand(1); 9015 unsigned ShiftElts = 0, InsertAtByte = 0; 9016 bool Swap = false; 9017 9018 // Shifts required to get the byte we want at element 7. 9019 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1, 9020 0, 15, 14, 13, 12, 11, 10, 9}; 9021 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0, 9022 1, 2, 3, 4, 5, 6, 7, 8}; 9023 9024 ArrayRef<int> Mask = N->getMask(); 9025 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; 9026 9027 // For each mask element, find out if we're just inserting something 9028 // from V2 into V1 or vice versa. 9029 // Possible permutations inserting an element from V2 into V1: 9030 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 9031 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 9032 // ... 9033 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X 9034 // Inserting from V1 into V2 will be similar, except mask range will be 9035 // [16,31]. 9036 9037 bool FoundCandidate = false; 9038 // If both vector operands for the shuffle are the same vector, the mask 9039 // will contain only elements from the first one and the second one will be 9040 // undef. 9041 unsigned VINSERTBSrcElem = IsLE ? 8 : 7; 9042 // Go through the mask of half-words to find an element that's being moved 9043 // from one vector to the other. 9044 for (unsigned i = 0; i < BytesInVector; ++i) { 9045 unsigned CurrentElement = Mask[i]; 9046 // If 2nd operand is undefined, we should only look for element 7 in the 9047 // Mask. 9048 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem) 9049 continue; 9050 9051 bool OtherElementsInOrder = true; 9052 // Examine the other elements in the Mask to see if they're in original 9053 // order. 9054 for (unsigned j = 0; j < BytesInVector; ++j) { 9055 if (j == i) 9056 continue; 9057 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be 9058 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined, 9059 // in which we always assume we're always picking from the 1st operand. 9060 int MaskOffset = 9061 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0; 9062 if (Mask[j] != OriginalOrder[j] + MaskOffset) { 9063 OtherElementsInOrder = false; 9064 break; 9065 } 9066 } 9067 // If other elements are in original order, we record the number of shifts 9068 // we need to get the element we want into element 7. Also record which byte 9069 // in the vector we should insert into. 9070 if (OtherElementsInOrder) { 9071 // If 2nd operand is undefined, we assume no shifts and no swapping. 9072 if (V2.isUndef()) { 9073 ShiftElts = 0; 9074 Swap = false; 9075 } else { 9076 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4. 9077 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF] 9078 : BigEndianShifts[CurrentElement & 0xF]; 9079 Swap = CurrentElement < BytesInVector; 9080 } 9081 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i; 9082 FoundCandidate = true; 9083 break; 9084 } 9085 } 9086 9087 if (!FoundCandidate) 9088 return SDValue(); 9089 9090 // Candidate found, construct the proper SDAG sequence with VINSERTB, 9091 // optionally with VECSHL if shift is required. 9092 if (Swap) 9093 std::swap(V1, V2); 9094 if (V2.isUndef()) 9095 V2 = V1; 9096 if (ShiftElts) { 9097 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 9098 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9099 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl, 9100 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9101 } 9102 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2, 9103 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9104 } 9105 9106 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled 9107 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default 9108 /// SDValue. 9109 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N, 9110 SelectionDAG &DAG) const { 9111 const unsigned NumHalfWords = 8; 9112 const unsigned BytesInVector = NumHalfWords * 2; 9113 // Check that the shuffle is on half-words. 9114 if (!isNByteElemShuffleMask(N, 2, 1)) 9115 return SDValue(); 9116 9117 bool IsLE = Subtarget.isLittleEndian(); 9118 SDLoc dl(N); 9119 SDValue V1 = N->getOperand(0); 9120 SDValue V2 = N->getOperand(1); 9121 unsigned ShiftElts = 0, InsertAtByte = 0; 9122 bool Swap = false; 9123 9124 // Shifts required to get the half-word we want at element 3. 9125 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5}; 9126 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4}; 9127 9128 uint32_t Mask = 0; 9129 uint32_t OriginalOrderLow = 0x1234567; 9130 uint32_t OriginalOrderHigh = 0x89ABCDEF; 9131 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a 9132 // 32-bit space, only need 4-bit nibbles per element. 9133 for (unsigned i = 0; i < NumHalfWords; ++i) { 9134 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 9135 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift); 9136 } 9137 9138 // For each mask element, find out if we're just inserting something 9139 // from V2 into V1 or vice versa. Possible permutations inserting an element 9140 // from V2 into V1: 9141 // X, 1, 2, 3, 4, 5, 6, 7 9142 // 0, X, 2, 3, 4, 5, 6, 7 9143 // 0, 1, X, 3, 4, 5, 6, 7 9144 // 0, 1, 2, X, 4, 5, 6, 7 9145 // 0, 1, 2, 3, X, 5, 6, 7 9146 // 0, 1, 2, 3, 4, X, 6, 7 9147 // 0, 1, 2, 3, 4, 5, X, 7 9148 // 0, 1, 2, 3, 4, 5, 6, X 9149 // Inserting from V1 into V2 will be similar, except mask range will be [8,15]. 9150 9151 bool FoundCandidate = false; 9152 // Go through the mask of half-words to find an element that's being moved 9153 // from one vector to the other. 9154 for (unsigned i = 0; i < NumHalfWords; ++i) { 9155 unsigned MaskShift = (NumHalfWords - 1 - i) * 4; 9156 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF; 9157 uint32_t MaskOtherElts = ~(0xF << MaskShift); 9158 uint32_t TargetOrder = 0x0; 9159 9160 // If both vector operands for the shuffle are the same vector, the mask 9161 // will contain only elements from the first one and the second one will be 9162 // undef. 9163 if (V2.isUndef()) { 9164 ShiftElts = 0; 9165 unsigned VINSERTHSrcElem = IsLE ? 4 : 3; 9166 TargetOrder = OriginalOrderLow; 9167 Swap = false; 9168 // Skip if not the correct element or mask of other elements don't equal 9169 // to our expected order. 9170 if (MaskOneElt == VINSERTHSrcElem && 9171 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 9172 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 9173 FoundCandidate = true; 9174 break; 9175 } 9176 } else { // If both operands are defined. 9177 // Target order is [8,15] if the current mask is between [0,7]. 9178 TargetOrder = 9179 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow; 9180 // Skip if mask of other elements don't equal our expected order. 9181 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) { 9182 // We only need the last 3 bits for the number of shifts. 9183 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7] 9184 : BigEndianShifts[MaskOneElt & 0x7]; 9185 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2; 9186 Swap = MaskOneElt < NumHalfWords; 9187 FoundCandidate = true; 9188 break; 9189 } 9190 } 9191 } 9192 9193 if (!FoundCandidate) 9194 return SDValue(); 9195 9196 // Candidate found, construct the proper SDAG sequence with VINSERTH, 9197 // optionally with VECSHL if shift is required. 9198 if (Swap) 9199 std::swap(V1, V2); 9200 if (V2.isUndef()) 9201 V2 = V1; 9202 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 9203 if (ShiftElts) { 9204 // Double ShiftElts because we're left shifting on v16i8 type. 9205 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2, 9206 DAG.getConstant(2 * ShiftElts, dl, MVT::i32)); 9207 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl); 9208 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 9209 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9210 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9211 } 9212 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2); 9213 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2, 9214 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9215 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9216 } 9217 9218 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this 9219 /// is a shuffle we can handle in a single instruction, return it. Otherwise, 9220 /// return the code it can be lowered into. Worst case, it can always be 9221 /// lowered into a vperm. 9222 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 9223 SelectionDAG &DAG) const { 9224 SDLoc dl(Op); 9225 SDValue V1 = Op.getOperand(0); 9226 SDValue V2 = Op.getOperand(1); 9227 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); 9228 EVT VT = Op.getValueType(); 9229 bool isLittleEndian = Subtarget.isLittleEndian(); 9230 9231 unsigned ShiftElts, InsertAtByte; 9232 bool Swap = false; 9233 9234 // If this is a load-and-splat, we can do that with a single instruction 9235 // in some cases. However if the load has multiple uses, we don't want to 9236 // combine it because that will just produce multiple loads. 9237 const SDValue *InputLoad = getNormalLoadInput(V1); 9238 if (InputLoad && Subtarget.hasVSX() && V2.isUndef() && 9239 (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) && 9240 InputLoad->hasOneUse()) { 9241 bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4); 9242 int SplatIdx = 9243 PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG); 9244 9245 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad); 9246 // For 4-byte load-and-splat, we need Power9. 9247 if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) { 9248 uint64_t Offset = 0; 9249 if (IsFourByte) 9250 Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4; 9251 else 9252 Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8; 9253 SDValue BasePtr = LD->getBasePtr(); 9254 if (Offset != 0) 9255 BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), 9256 BasePtr, DAG.getIntPtrConstant(Offset, dl)); 9257 SDValue Ops[] = { 9258 LD->getChain(), // Chain 9259 BasePtr, // BasePtr 9260 DAG.getValueType(Op.getValueType()) // VT 9261 }; 9262 SDVTList VTL = 9263 DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other); 9264 SDValue LdSplt = 9265 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL, 9266 Ops, LD->getMemoryVT(), LD->getMemOperand()); 9267 if (LdSplt.getValueType() != SVOp->getValueType(0)) 9268 LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt); 9269 return LdSplt; 9270 } 9271 } 9272 if (Subtarget.hasP9Vector() && 9273 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap, 9274 isLittleEndian)) { 9275 if (Swap) 9276 std::swap(V1, V2); 9277 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9278 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2); 9279 if (ShiftElts) { 9280 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2, 9281 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9282 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl, 9283 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9284 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9285 } 9286 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2, 9287 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9288 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins); 9289 } 9290 9291 if (Subtarget.hasP9Altivec()) { 9292 SDValue NewISDNode; 9293 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG))) 9294 return NewISDNode; 9295 9296 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG))) 9297 return NewISDNode; 9298 } 9299 9300 if (Subtarget.hasVSX() && 9301 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 9302 if (Swap) 9303 std::swap(V1, V2); 9304 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9305 SDValue Conv2 = 9306 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2); 9307 9308 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2, 9309 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9310 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl); 9311 } 9312 9313 if (Subtarget.hasVSX() && 9314 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) { 9315 if (Swap) 9316 std::swap(V1, V2); 9317 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 9318 SDValue Conv2 = 9319 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2); 9320 9321 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2, 9322 DAG.getConstant(ShiftElts, dl, MVT::i32)); 9323 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI); 9324 } 9325 9326 if (Subtarget.hasP9Vector()) { 9327 if (PPC::isXXBRHShuffleMask(SVOp)) { 9328 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1); 9329 SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv); 9330 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord); 9331 } else if (PPC::isXXBRWShuffleMask(SVOp)) { 9332 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9333 SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv); 9334 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord); 9335 } else if (PPC::isXXBRDShuffleMask(SVOp)) { 9336 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1); 9337 SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv); 9338 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord); 9339 } else if (PPC::isXXBRQShuffleMask(SVOp)) { 9340 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1); 9341 SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv); 9342 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord); 9343 } 9344 } 9345 9346 if (Subtarget.hasVSX()) { 9347 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) { 9348 int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG); 9349 9350 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1); 9351 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv, 9352 DAG.getConstant(SplatIdx, dl, MVT::i32)); 9353 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat); 9354 } 9355 9356 // Left shifts of 8 bytes are actually swaps. Convert accordingly. 9357 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) { 9358 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1); 9359 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv); 9360 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap); 9361 } 9362 } 9363 9364 if (Subtarget.hasQPX()) { 9365 if (VT.getVectorNumElements() != 4) 9366 return SDValue(); 9367 9368 if (V2.isUndef()) V2 = V1; 9369 9370 int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp); 9371 if (AlignIdx != -1) { 9372 return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2, 9373 DAG.getConstant(AlignIdx, dl, MVT::i32)); 9374 } else if (SVOp->isSplat()) { 9375 int SplatIdx = SVOp->getSplatIndex(); 9376 if (SplatIdx >= 4) { 9377 std::swap(V1, V2); 9378 SplatIdx -= 4; 9379 } 9380 9381 return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1, 9382 DAG.getConstant(SplatIdx, dl, MVT::i32)); 9383 } 9384 9385 // Lower this into a qvgpci/qvfperm pair. 9386 9387 // Compute the qvgpci literal 9388 unsigned idx = 0; 9389 for (unsigned i = 0; i < 4; ++i) { 9390 int m = SVOp->getMaskElt(i); 9391 unsigned mm = m >= 0 ? (unsigned) m : i; 9392 idx |= mm << (3-i)*3; 9393 } 9394 9395 SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64, 9396 DAG.getConstant(idx, dl, MVT::i32)); 9397 return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3); 9398 } 9399 9400 // Cases that are handled by instructions that take permute immediates 9401 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be 9402 // selected by the instruction selector. 9403 if (V2.isUndef()) { 9404 if (PPC::isSplatShuffleMask(SVOp, 1) || 9405 PPC::isSplatShuffleMask(SVOp, 2) || 9406 PPC::isSplatShuffleMask(SVOp, 4) || 9407 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) || 9408 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) || 9409 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 || 9410 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) || 9411 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) || 9412 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) || 9413 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) || 9414 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) || 9415 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) || 9416 (Subtarget.hasP8Altivec() && ( 9417 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) || 9418 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) || 9419 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) { 9420 return Op; 9421 } 9422 } 9423 9424 // Altivec has a variety of "shuffle immediates" that take two vector inputs 9425 // and produce a fixed permutation. If any of these match, do not lower to 9426 // VPERM. 9427 unsigned int ShuffleKind = isLittleEndian ? 2 : 0; 9428 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) || 9429 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) || 9430 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 || 9431 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) || 9432 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) || 9433 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) || 9434 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) || 9435 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) || 9436 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) || 9437 (Subtarget.hasP8Altivec() && ( 9438 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) || 9439 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) || 9440 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG)))) 9441 return Op; 9442 9443 // Check to see if this is a shuffle of 4-byte values. If so, we can use our 9444 // perfect shuffle table to emit an optimal matching sequence. 9445 ArrayRef<int> PermMask = SVOp->getMask(); 9446 9447 unsigned PFIndexes[4]; 9448 bool isFourElementShuffle = true; 9449 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number 9450 unsigned EltNo = 8; // Start out undef. 9451 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte. 9452 if (PermMask[i*4+j] < 0) 9453 continue; // Undef, ignore it. 9454 9455 unsigned ByteSource = PermMask[i*4+j]; 9456 if ((ByteSource & 3) != j) { 9457 isFourElementShuffle = false; 9458 break; 9459 } 9460 9461 if (EltNo == 8) { 9462 EltNo = ByteSource/4; 9463 } else if (EltNo != ByteSource/4) { 9464 isFourElementShuffle = false; 9465 break; 9466 } 9467 } 9468 PFIndexes[i] = EltNo; 9469 } 9470 9471 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the 9472 // perfect shuffle vector to determine if it is cost effective to do this as 9473 // discrete instructions, or whether we should use a vperm. 9474 // For now, we skip this for little endian until such time as we have a 9475 // little-endian perfect shuffle table. 9476 if (isFourElementShuffle && !isLittleEndian) { 9477 // Compute the index in the perfect shuffle table. 9478 unsigned PFTableIndex = 9479 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3]; 9480 9481 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 9482 unsigned Cost = (PFEntry >> 30); 9483 9484 // Determining when to avoid vperm is tricky. Many things affect the cost 9485 // of vperm, particularly how many times the perm mask needs to be computed. 9486 // For example, if the perm mask can be hoisted out of a loop or is already 9487 // used (perhaps because there are multiple permutes with the same shuffle 9488 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of 9489 // the loop requires an extra register. 9490 // 9491 // As a compromise, we only emit discrete instructions if the shuffle can be 9492 // generated in 3 or fewer operations. When we have loop information 9493 // available, if this block is within a loop, we should avoid using vperm 9494 // for 3-operation perms and use a constant pool load instead. 9495 if (Cost < 3) 9496 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 9497 } 9498 9499 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant 9500 // vector that will get spilled to the constant pool. 9501 if (V2.isUndef()) V2 = V1; 9502 9503 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except 9504 // that it is in input element units, not in bytes. Convert now. 9505 9506 // For little endian, the order of the input vectors is reversed, and 9507 // the permutation mask is complemented with respect to 31. This is 9508 // necessary to produce proper semantics with the big-endian-biased vperm 9509 // instruction. 9510 EVT EltVT = V1.getValueType().getVectorElementType(); 9511 unsigned BytesPerElement = EltVT.getSizeInBits()/8; 9512 9513 SmallVector<SDValue, 16> ResultMask; 9514 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { 9515 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i]; 9516 9517 for (unsigned j = 0; j != BytesPerElement; ++j) 9518 if (isLittleEndian) 9519 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j), 9520 dl, MVT::i32)); 9521 else 9522 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl, 9523 MVT::i32)); 9524 } 9525 9526 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask); 9527 if (isLittleEndian) 9528 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 9529 V2, V1, VPermMask); 9530 else 9531 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(), 9532 V1, V2, VPermMask); 9533 } 9534 9535 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a 9536 /// vector comparison. If it is, return true and fill in Opc/isDot with 9537 /// information about the intrinsic. 9538 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, 9539 bool &isDot, const PPCSubtarget &Subtarget) { 9540 unsigned IntrinsicID = 9541 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue(); 9542 CompareOpc = -1; 9543 isDot = false; 9544 switch (IntrinsicID) { 9545 default: 9546 return false; 9547 // Comparison predicates. 9548 case Intrinsic::ppc_altivec_vcmpbfp_p: 9549 CompareOpc = 966; 9550 isDot = true; 9551 break; 9552 case Intrinsic::ppc_altivec_vcmpeqfp_p: 9553 CompareOpc = 198; 9554 isDot = true; 9555 break; 9556 case Intrinsic::ppc_altivec_vcmpequb_p: 9557 CompareOpc = 6; 9558 isDot = true; 9559 break; 9560 case Intrinsic::ppc_altivec_vcmpequh_p: 9561 CompareOpc = 70; 9562 isDot = true; 9563 break; 9564 case Intrinsic::ppc_altivec_vcmpequw_p: 9565 CompareOpc = 134; 9566 isDot = true; 9567 break; 9568 case Intrinsic::ppc_altivec_vcmpequd_p: 9569 if (Subtarget.hasP8Altivec()) { 9570 CompareOpc = 199; 9571 isDot = true; 9572 } else 9573 return false; 9574 break; 9575 case Intrinsic::ppc_altivec_vcmpneb_p: 9576 case Intrinsic::ppc_altivec_vcmpneh_p: 9577 case Intrinsic::ppc_altivec_vcmpnew_p: 9578 case Intrinsic::ppc_altivec_vcmpnezb_p: 9579 case Intrinsic::ppc_altivec_vcmpnezh_p: 9580 case Intrinsic::ppc_altivec_vcmpnezw_p: 9581 if (Subtarget.hasP9Altivec()) { 9582 switch (IntrinsicID) { 9583 default: 9584 llvm_unreachable("Unknown comparison intrinsic."); 9585 case Intrinsic::ppc_altivec_vcmpneb_p: 9586 CompareOpc = 7; 9587 break; 9588 case Intrinsic::ppc_altivec_vcmpneh_p: 9589 CompareOpc = 71; 9590 break; 9591 case Intrinsic::ppc_altivec_vcmpnew_p: 9592 CompareOpc = 135; 9593 break; 9594 case Intrinsic::ppc_altivec_vcmpnezb_p: 9595 CompareOpc = 263; 9596 break; 9597 case Intrinsic::ppc_altivec_vcmpnezh_p: 9598 CompareOpc = 327; 9599 break; 9600 case Intrinsic::ppc_altivec_vcmpnezw_p: 9601 CompareOpc = 391; 9602 break; 9603 } 9604 isDot = true; 9605 } else 9606 return false; 9607 break; 9608 case Intrinsic::ppc_altivec_vcmpgefp_p: 9609 CompareOpc = 454; 9610 isDot = true; 9611 break; 9612 case Intrinsic::ppc_altivec_vcmpgtfp_p: 9613 CompareOpc = 710; 9614 isDot = true; 9615 break; 9616 case Intrinsic::ppc_altivec_vcmpgtsb_p: 9617 CompareOpc = 774; 9618 isDot = true; 9619 break; 9620 case Intrinsic::ppc_altivec_vcmpgtsh_p: 9621 CompareOpc = 838; 9622 isDot = true; 9623 break; 9624 case Intrinsic::ppc_altivec_vcmpgtsw_p: 9625 CompareOpc = 902; 9626 isDot = true; 9627 break; 9628 case Intrinsic::ppc_altivec_vcmpgtsd_p: 9629 if (Subtarget.hasP8Altivec()) { 9630 CompareOpc = 967; 9631 isDot = true; 9632 } else 9633 return false; 9634 break; 9635 case Intrinsic::ppc_altivec_vcmpgtub_p: 9636 CompareOpc = 518; 9637 isDot = true; 9638 break; 9639 case Intrinsic::ppc_altivec_vcmpgtuh_p: 9640 CompareOpc = 582; 9641 isDot = true; 9642 break; 9643 case Intrinsic::ppc_altivec_vcmpgtuw_p: 9644 CompareOpc = 646; 9645 isDot = true; 9646 break; 9647 case Intrinsic::ppc_altivec_vcmpgtud_p: 9648 if (Subtarget.hasP8Altivec()) { 9649 CompareOpc = 711; 9650 isDot = true; 9651 } else 9652 return false; 9653 break; 9654 9655 // VSX predicate comparisons use the same infrastructure 9656 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 9657 case Intrinsic::ppc_vsx_xvcmpgedp_p: 9658 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 9659 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 9660 case Intrinsic::ppc_vsx_xvcmpgesp_p: 9661 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 9662 if (Subtarget.hasVSX()) { 9663 switch (IntrinsicID) { 9664 case Intrinsic::ppc_vsx_xvcmpeqdp_p: 9665 CompareOpc = 99; 9666 break; 9667 case Intrinsic::ppc_vsx_xvcmpgedp_p: 9668 CompareOpc = 115; 9669 break; 9670 case Intrinsic::ppc_vsx_xvcmpgtdp_p: 9671 CompareOpc = 107; 9672 break; 9673 case Intrinsic::ppc_vsx_xvcmpeqsp_p: 9674 CompareOpc = 67; 9675 break; 9676 case Intrinsic::ppc_vsx_xvcmpgesp_p: 9677 CompareOpc = 83; 9678 break; 9679 case Intrinsic::ppc_vsx_xvcmpgtsp_p: 9680 CompareOpc = 75; 9681 break; 9682 } 9683 isDot = true; 9684 } else 9685 return false; 9686 break; 9687 9688 // Normal Comparisons. 9689 case Intrinsic::ppc_altivec_vcmpbfp: 9690 CompareOpc = 966; 9691 break; 9692 case Intrinsic::ppc_altivec_vcmpeqfp: 9693 CompareOpc = 198; 9694 break; 9695 case Intrinsic::ppc_altivec_vcmpequb: 9696 CompareOpc = 6; 9697 break; 9698 case Intrinsic::ppc_altivec_vcmpequh: 9699 CompareOpc = 70; 9700 break; 9701 case Intrinsic::ppc_altivec_vcmpequw: 9702 CompareOpc = 134; 9703 break; 9704 case Intrinsic::ppc_altivec_vcmpequd: 9705 if (Subtarget.hasP8Altivec()) 9706 CompareOpc = 199; 9707 else 9708 return false; 9709 break; 9710 case Intrinsic::ppc_altivec_vcmpneb: 9711 case Intrinsic::ppc_altivec_vcmpneh: 9712 case Intrinsic::ppc_altivec_vcmpnew: 9713 case Intrinsic::ppc_altivec_vcmpnezb: 9714 case Intrinsic::ppc_altivec_vcmpnezh: 9715 case Intrinsic::ppc_altivec_vcmpnezw: 9716 if (Subtarget.hasP9Altivec()) 9717 switch (IntrinsicID) { 9718 default: 9719 llvm_unreachable("Unknown comparison intrinsic."); 9720 case Intrinsic::ppc_altivec_vcmpneb: 9721 CompareOpc = 7; 9722 break; 9723 case Intrinsic::ppc_altivec_vcmpneh: 9724 CompareOpc = 71; 9725 break; 9726 case Intrinsic::ppc_altivec_vcmpnew: 9727 CompareOpc = 135; 9728 break; 9729 case Intrinsic::ppc_altivec_vcmpnezb: 9730 CompareOpc = 263; 9731 break; 9732 case Intrinsic::ppc_altivec_vcmpnezh: 9733 CompareOpc = 327; 9734 break; 9735 case Intrinsic::ppc_altivec_vcmpnezw: 9736 CompareOpc = 391; 9737 break; 9738 } 9739 else 9740 return false; 9741 break; 9742 case Intrinsic::ppc_altivec_vcmpgefp: 9743 CompareOpc = 454; 9744 break; 9745 case Intrinsic::ppc_altivec_vcmpgtfp: 9746 CompareOpc = 710; 9747 break; 9748 case Intrinsic::ppc_altivec_vcmpgtsb: 9749 CompareOpc = 774; 9750 break; 9751 case Intrinsic::ppc_altivec_vcmpgtsh: 9752 CompareOpc = 838; 9753 break; 9754 case Intrinsic::ppc_altivec_vcmpgtsw: 9755 CompareOpc = 902; 9756 break; 9757 case Intrinsic::ppc_altivec_vcmpgtsd: 9758 if (Subtarget.hasP8Altivec()) 9759 CompareOpc = 967; 9760 else 9761 return false; 9762 break; 9763 case Intrinsic::ppc_altivec_vcmpgtub: 9764 CompareOpc = 518; 9765 break; 9766 case Intrinsic::ppc_altivec_vcmpgtuh: 9767 CompareOpc = 582; 9768 break; 9769 case Intrinsic::ppc_altivec_vcmpgtuw: 9770 CompareOpc = 646; 9771 break; 9772 case Intrinsic::ppc_altivec_vcmpgtud: 9773 if (Subtarget.hasP8Altivec()) 9774 CompareOpc = 711; 9775 else 9776 return false; 9777 break; 9778 } 9779 return true; 9780 } 9781 9782 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom 9783 /// lower, do it, otherwise return null. 9784 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 9785 SelectionDAG &DAG) const { 9786 unsigned IntrinsicID = 9787 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 9788 9789 SDLoc dl(Op); 9790 9791 if (IntrinsicID == Intrinsic::thread_pointer) { 9792 // Reads the thread pointer register, used for __builtin_thread_pointer. 9793 if (Subtarget.isPPC64()) 9794 return DAG.getRegister(PPC::X13, MVT::i64); 9795 return DAG.getRegister(PPC::R2, MVT::i32); 9796 } 9797 9798 // If this is a lowered altivec predicate compare, CompareOpc is set to the 9799 // opcode number of the comparison. 9800 int CompareOpc; 9801 bool isDot; 9802 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget)) 9803 return SDValue(); // Don't custom lower most intrinsics. 9804 9805 // If this is a non-dot comparison, make the VCMP node and we are done. 9806 if (!isDot) { 9807 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), 9808 Op.getOperand(1), Op.getOperand(2), 9809 DAG.getConstant(CompareOpc, dl, MVT::i32)); 9810 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp); 9811 } 9812 9813 // Create the PPCISD altivec 'dot' comparison node. 9814 SDValue Ops[] = { 9815 Op.getOperand(2), // LHS 9816 Op.getOperand(3), // RHS 9817 DAG.getConstant(CompareOpc, dl, MVT::i32) 9818 }; 9819 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue }; 9820 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 9821 9822 // Now that we have the comparison, emit a copy from the CR to a GPR. 9823 // This is flagged to the above dot comparison. 9824 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32, 9825 DAG.getRegister(PPC::CR6, MVT::i32), 9826 CompNode.getValue(1)); 9827 9828 // Unpack the result based on how the target uses it. 9829 unsigned BitNo; // Bit # of CR6. 9830 bool InvertBit; // Invert result? 9831 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) { 9832 default: // Can't happen, don't crash on invalid number though. 9833 case 0: // Return the value of the EQ bit of CR6. 9834 BitNo = 0; InvertBit = false; 9835 break; 9836 case 1: // Return the inverted value of the EQ bit of CR6. 9837 BitNo = 0; InvertBit = true; 9838 break; 9839 case 2: // Return the value of the LT bit of CR6. 9840 BitNo = 2; InvertBit = false; 9841 break; 9842 case 3: // Return the inverted value of the LT bit of CR6. 9843 BitNo = 2; InvertBit = true; 9844 break; 9845 } 9846 9847 // Shift the bit into the low position. 9848 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags, 9849 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32)); 9850 // Isolate the bit. 9851 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags, 9852 DAG.getConstant(1, dl, MVT::i32)); 9853 9854 // If we are supposed to, toggle the bit. 9855 if (InvertBit) 9856 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags, 9857 DAG.getConstant(1, dl, MVT::i32)); 9858 return Flags; 9859 } 9860 9861 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 9862 SelectionDAG &DAG) const { 9863 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to 9864 // the beginning of the argument list. 9865 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1; 9866 SDLoc DL(Op); 9867 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) { 9868 case Intrinsic::ppc_cfence: { 9869 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."); 9870 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); 9871 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, 9872 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, 9873 Op.getOperand(ArgStart + 1)), 9874 Op.getOperand(0)), 9875 0); 9876 } 9877 default: 9878 break; 9879 } 9880 return SDValue(); 9881 } 9882 9883 SDValue PPCTargetLowering::LowerREM(SDValue Op, SelectionDAG &DAG) const { 9884 // Check for a DIV with the same operands as this REM. 9885 for (auto UI : Op.getOperand(1)->uses()) { 9886 if ((Op.getOpcode() == ISD::SREM && UI->getOpcode() == ISD::SDIV) || 9887 (Op.getOpcode() == ISD::UREM && UI->getOpcode() == ISD::UDIV)) 9888 if (UI->getOperand(0) == Op.getOperand(0) && 9889 UI->getOperand(1) == Op.getOperand(1)) 9890 return SDValue(); 9891 } 9892 return Op; 9893 } 9894 9895 // Lower scalar BSWAP64 to xxbrd. 9896 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const { 9897 SDLoc dl(Op); 9898 // MTVSRDD 9899 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0), 9900 Op.getOperand(0)); 9901 // XXBRD 9902 Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op); 9903 // MFVSRD 9904 int VectorIndex = 0; 9905 if (Subtarget.isLittleEndian()) 9906 VectorIndex = 1; 9907 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op, 9908 DAG.getTargetConstant(VectorIndex, dl, MVT::i32)); 9909 return Op; 9910 } 9911 9912 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be 9913 // compared to a value that is atomically loaded (atomic loads zero-extend). 9914 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, 9915 SelectionDAG &DAG) const { 9916 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && 9917 "Expecting an atomic compare-and-swap here."); 9918 SDLoc dl(Op); 9919 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode()); 9920 EVT MemVT = AtomicNode->getMemoryVT(); 9921 if (MemVT.getSizeInBits() >= 32) 9922 return Op; 9923 9924 SDValue CmpOp = Op.getOperand(2); 9925 // If this is already correctly zero-extended, leave it alone. 9926 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits()); 9927 if (DAG.MaskedValueIsZero(CmpOp, HighBits)) 9928 return Op; 9929 9930 // Clear the high bits of the compare operand. 9931 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1; 9932 SDValue NewCmpOp = 9933 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp, 9934 DAG.getConstant(MaskVal, dl, MVT::i32)); 9935 9936 // Replace the existing compare operand with the properly zero-extended one. 9937 SmallVector<SDValue, 4> Ops; 9938 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++) 9939 Ops.push_back(AtomicNode->getOperand(i)); 9940 Ops[2] = NewCmpOp; 9941 MachineMemOperand *MMO = AtomicNode->getMemOperand(); 9942 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other); 9943 auto NodeTy = 9944 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16; 9945 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO); 9946 } 9947 9948 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, 9949 SelectionDAG &DAG) const { 9950 SDLoc dl(Op); 9951 // Create a stack slot that is 16-byte aligned. 9952 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 9953 int FrameIdx = MFI.CreateStackObject(16, 16, false); 9954 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 9955 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 9956 9957 // Store the input value into Value#0 of the stack slot. 9958 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx, 9959 MachinePointerInfo()); 9960 // Load it out. 9961 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo()); 9962 } 9963 9964 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 9965 SelectionDAG &DAG) const { 9966 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && 9967 "Should only be called for ISD::INSERT_VECTOR_ELT"); 9968 9969 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 9970 // We have legal lowering for constant indices but not for variable ones. 9971 if (!C) 9972 return SDValue(); 9973 9974 EVT VT = Op.getValueType(); 9975 SDLoc dl(Op); 9976 SDValue V1 = Op.getOperand(0); 9977 SDValue V2 = Op.getOperand(1); 9978 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types. 9979 if (VT == MVT::v8i16 || VT == MVT::v16i8) { 9980 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2); 9981 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8; 9982 unsigned InsertAtElement = C->getZExtValue(); 9983 unsigned InsertAtByte = InsertAtElement * BytesInEachElement; 9984 if (Subtarget.isLittleEndian()) { 9985 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte; 9986 } 9987 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz, 9988 DAG.getConstant(InsertAtByte, dl, MVT::i32)); 9989 } 9990 return Op; 9991 } 9992 9993 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 9994 SelectionDAG &DAG) const { 9995 SDLoc dl(Op); 9996 SDNode *N = Op.getNode(); 9997 9998 assert(N->getOperand(0).getValueType() == MVT::v4i1 && 9999 "Unknown extract_vector_elt type"); 10000 10001 SDValue Value = N->getOperand(0); 10002 10003 // The first part of this is like the store lowering except that we don't 10004 // need to track the chain. 10005 10006 // The values are now known to be -1 (false) or 1 (true). To convert this 10007 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 10008 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 10009 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 10010 10011 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 10012 // understand how to form the extending load. 10013 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 10014 10015 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 10016 10017 // Now convert to an integer and store. 10018 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 10019 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 10020 Value); 10021 10022 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10023 int FrameIdx = MFI.CreateStackObject(16, 16, false); 10024 MachinePointerInfo PtrInfo = 10025 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 10026 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 10027 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 10028 10029 SDValue StoreChain = DAG.getEntryNode(); 10030 SDValue Ops[] = {StoreChain, 10031 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 10032 Value, FIdx}; 10033 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 10034 10035 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 10036 dl, VTs, Ops, MVT::v4i32, PtrInfo); 10037 10038 // Extract the value requested. 10039 unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 10040 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 10041 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 10042 10043 SDValue IntVal = 10044 DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset)); 10045 10046 if (!Subtarget.useCRBits()) 10047 return IntVal; 10048 10049 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal); 10050 } 10051 10052 /// Lowering for QPX v4i1 loads 10053 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op, 10054 SelectionDAG &DAG) const { 10055 SDLoc dl(Op); 10056 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode()); 10057 SDValue LoadChain = LN->getChain(); 10058 SDValue BasePtr = LN->getBasePtr(); 10059 10060 if (Op.getValueType() == MVT::v4f64 || 10061 Op.getValueType() == MVT::v4f32) { 10062 EVT MemVT = LN->getMemoryVT(); 10063 unsigned Alignment = LN->getAlignment(); 10064 10065 // If this load is properly aligned, then it is legal. 10066 if (Alignment >= MemVT.getStoreSize()) 10067 return Op; 10068 10069 EVT ScalarVT = Op.getValueType().getScalarType(), 10070 ScalarMemVT = MemVT.getScalarType(); 10071 unsigned Stride = ScalarMemVT.getStoreSize(); 10072 10073 SDValue Vals[4], LoadChains[4]; 10074 for (unsigned Idx = 0; Idx < 4; ++Idx) { 10075 SDValue Load; 10076 if (ScalarVT != ScalarMemVT) 10077 Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain, 10078 BasePtr, 10079 LN->getPointerInfo().getWithOffset(Idx * Stride), 10080 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 10081 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 10082 else 10083 Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr, 10084 LN->getPointerInfo().getWithOffset(Idx * Stride), 10085 MinAlign(Alignment, Idx * Stride), 10086 LN->getMemOperand()->getFlags(), LN->getAAInfo()); 10087 10088 if (Idx == 0 && LN->isIndexed()) { 10089 assert(LN->getAddressingMode() == ISD::PRE_INC && 10090 "Unknown addressing mode on vector load"); 10091 Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(), 10092 LN->getAddressingMode()); 10093 } 10094 10095 Vals[Idx] = Load; 10096 LoadChains[Idx] = Load.getValue(1); 10097 10098 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10099 DAG.getConstant(Stride, dl, 10100 BasePtr.getValueType())); 10101 } 10102 10103 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 10104 SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals); 10105 10106 if (LN->isIndexed()) { 10107 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF }; 10108 return DAG.getMergeValues(RetOps, dl); 10109 } 10110 10111 SDValue RetOps[] = { Value, TF }; 10112 return DAG.getMergeValues(RetOps, dl); 10113 } 10114 10115 assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower"); 10116 assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported"); 10117 10118 // To lower v4i1 from a byte array, we load the byte elements of the 10119 // vector and then reuse the BUILD_VECTOR logic. 10120 10121 SDValue VectElmts[4], VectElmtChains[4]; 10122 for (unsigned i = 0; i < 4; ++i) { 10123 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 10124 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 10125 10126 VectElmts[i] = DAG.getExtLoad( 10127 ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx, 10128 LN->getPointerInfo().getWithOffset(i), MVT::i8, 10129 /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo()); 10130 VectElmtChains[i] = VectElmts[i].getValue(1); 10131 } 10132 10133 LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains); 10134 SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts); 10135 10136 SDValue RVals[] = { Value, LoadChain }; 10137 return DAG.getMergeValues(RVals, dl); 10138 } 10139 10140 /// Lowering for QPX v4i1 stores 10141 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op, 10142 SelectionDAG &DAG) const { 10143 SDLoc dl(Op); 10144 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode()); 10145 SDValue StoreChain = SN->getChain(); 10146 SDValue BasePtr = SN->getBasePtr(); 10147 SDValue Value = SN->getValue(); 10148 10149 if (Value.getValueType() == MVT::v4f64 || 10150 Value.getValueType() == MVT::v4f32) { 10151 EVT MemVT = SN->getMemoryVT(); 10152 unsigned Alignment = SN->getAlignment(); 10153 10154 // If this store is properly aligned, then it is legal. 10155 if (Alignment >= MemVT.getStoreSize()) 10156 return Op; 10157 10158 EVT ScalarVT = Value.getValueType().getScalarType(), 10159 ScalarMemVT = MemVT.getScalarType(); 10160 unsigned Stride = ScalarMemVT.getStoreSize(); 10161 10162 SDValue Stores[4]; 10163 for (unsigned Idx = 0; Idx < 4; ++Idx) { 10164 SDValue Ex = DAG.getNode( 10165 ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value, 10166 DAG.getConstant(Idx, dl, getVectorIdxTy(DAG.getDataLayout()))); 10167 SDValue Store; 10168 if (ScalarVT != ScalarMemVT) 10169 Store = 10170 DAG.getTruncStore(StoreChain, dl, Ex, BasePtr, 10171 SN->getPointerInfo().getWithOffset(Idx * Stride), 10172 ScalarMemVT, MinAlign(Alignment, Idx * Stride), 10173 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 10174 else 10175 Store = DAG.getStore(StoreChain, dl, Ex, BasePtr, 10176 SN->getPointerInfo().getWithOffset(Idx * Stride), 10177 MinAlign(Alignment, Idx * Stride), 10178 SN->getMemOperand()->getFlags(), SN->getAAInfo()); 10179 10180 if (Idx == 0 && SN->isIndexed()) { 10181 assert(SN->getAddressingMode() == ISD::PRE_INC && 10182 "Unknown addressing mode on vector store"); 10183 Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(), 10184 SN->getAddressingMode()); 10185 } 10186 10187 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 10188 DAG.getConstant(Stride, dl, 10189 BasePtr.getValueType())); 10190 Stores[Idx] = Store; 10191 } 10192 10193 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 10194 10195 if (SN->isIndexed()) { 10196 SDValue RetOps[] = { TF, Stores[0].getValue(1) }; 10197 return DAG.getMergeValues(RetOps, dl); 10198 } 10199 10200 return TF; 10201 } 10202 10203 assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported"); 10204 assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower"); 10205 10206 // The values are now known to be -1 (false) or 1 (true). To convert this 10207 // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5). 10208 // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5 10209 Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value); 10210 10211 // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to 10212 // understand how to form the extending load. 10213 SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64); 10214 10215 Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs); 10216 10217 // Now convert to an integer and store. 10218 Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64, 10219 DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32), 10220 Value); 10221 10222 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 10223 int FrameIdx = MFI.CreateStackObject(16, 16, false); 10224 MachinePointerInfo PtrInfo = 10225 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx); 10226 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 10227 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT); 10228 10229 SDValue Ops[] = {StoreChain, 10230 DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32), 10231 Value, FIdx}; 10232 SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other); 10233 10234 StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, 10235 dl, VTs, Ops, MVT::v4i32, PtrInfo); 10236 10237 // Move data into the byte array. 10238 SDValue Loads[4], LoadChains[4]; 10239 for (unsigned i = 0; i < 4; ++i) { 10240 unsigned Offset = 4*i; 10241 SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType()); 10242 Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx); 10243 10244 Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx, 10245 PtrInfo.getWithOffset(Offset)); 10246 LoadChains[i] = Loads[i].getValue(1); 10247 } 10248 10249 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); 10250 10251 SDValue Stores[4]; 10252 for (unsigned i = 0; i < 4; ++i) { 10253 SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType()); 10254 Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx); 10255 10256 Stores[i] = DAG.getTruncStore( 10257 StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i), 10258 MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(), 10259 SN->getAAInfo()); 10260 } 10261 10262 StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 10263 10264 return StoreChain; 10265 } 10266 10267 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { 10268 SDLoc dl(Op); 10269 if (Op.getValueType() == MVT::v4i32) { 10270 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 10271 10272 SDValue Zero = BuildSplatI( 0, 1, MVT::v4i32, DAG, dl); 10273 SDValue Neg16 = BuildSplatI(-16, 4, MVT::v4i32, DAG, dl);//+16 as shift amt. 10274 10275 SDValue RHSSwap = // = vrlw RHS, 16 10276 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); 10277 10278 // Shrinkify inputs to v8i16. 10279 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS); 10280 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS); 10281 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap); 10282 10283 // Low parts multiplied together, generating 32-bit results (we ignore the 10284 // top parts). 10285 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh, 10286 LHS, RHS, DAG, dl, MVT::v4i32); 10287 10288 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm, 10289 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32); 10290 // Shift the high parts up 16 bits. 10291 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd, 10292 Neg16, DAG, dl); 10293 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd); 10294 } else if (Op.getValueType() == MVT::v8i16) { 10295 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 10296 10297 SDValue Zero = BuildSplatI(0, 1, MVT::v8i16, DAG, dl); 10298 10299 return BuildIntrinsicOp(Intrinsic::ppc_altivec_vmladduhm, 10300 LHS, RHS, Zero, DAG, dl); 10301 } else if (Op.getValueType() == MVT::v16i8) { 10302 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1); 10303 bool isLittleEndian = Subtarget.isLittleEndian(); 10304 10305 // Multiply the even 8-bit parts, producing 16-bit sums. 10306 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, 10307 LHS, RHS, DAG, dl, MVT::v8i16); 10308 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts); 10309 10310 // Multiply the odd 8-bit parts, producing 16-bit sums. 10311 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, 10312 LHS, RHS, DAG, dl, MVT::v8i16); 10313 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts); 10314 10315 // Merge the results together. Because vmuleub and vmuloub are 10316 // instructions with a big-endian bias, we must reverse the 10317 // element numbering and reverse the meaning of "odd" and "even" 10318 // when generating little endian code. 10319 int Ops[16]; 10320 for (unsigned i = 0; i != 8; ++i) { 10321 if (isLittleEndian) { 10322 Ops[i*2 ] = 2*i; 10323 Ops[i*2+1] = 2*i+16; 10324 } else { 10325 Ops[i*2 ] = 2*i+1; 10326 Ops[i*2+1] = 2*i+1+16; 10327 } 10328 } 10329 if (isLittleEndian) 10330 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops); 10331 else 10332 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops); 10333 } else { 10334 llvm_unreachable("Unknown mul to lower!"); 10335 } 10336 } 10337 10338 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const { 10339 10340 assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS"); 10341 10342 EVT VT = Op.getValueType(); 10343 assert(VT.isVector() && 10344 "Only set vector abs as custom, scalar abs shouldn't reach here!"); 10345 assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || 10346 VT == MVT::v16i8) && 10347 "Unexpected vector element type!"); 10348 assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) && 10349 "Current subtarget doesn't support smax v2i64!"); 10350 10351 // For vector abs, it can be lowered to: 10352 // abs x 10353 // ==> 10354 // y = -x 10355 // smax(x, y) 10356 10357 SDLoc dl(Op); 10358 SDValue X = Op.getOperand(0); 10359 SDValue Zero = DAG.getConstant(0, dl, VT); 10360 SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X); 10361 10362 // SMAX patch https://reviews.llvm.org/D47332 10363 // hasn't landed yet, so use intrinsic first here. 10364 // TODO: Should use SMAX directly once SMAX patch landed 10365 Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw; 10366 if (VT == MVT::v2i64) 10367 BifID = Intrinsic::ppc_altivec_vmaxsd; 10368 else if (VT == MVT::v8i16) 10369 BifID = Intrinsic::ppc_altivec_vmaxsh; 10370 else if (VT == MVT::v16i8) 10371 BifID = Intrinsic::ppc_altivec_vmaxsb; 10372 10373 return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT); 10374 } 10375 10376 // Custom lowering for fpext vf32 to v2f64 10377 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { 10378 10379 assert(Op.getOpcode() == ISD::FP_EXTEND && 10380 "Should only be called for ISD::FP_EXTEND"); 10381 10382 // FIXME: handle extends from half precision float vectors on P9. 10383 // We only want to custom lower an extend from v2f32 to v2f64. 10384 if (Op.getValueType() != MVT::v2f64 || 10385 Op.getOperand(0).getValueType() != MVT::v2f32) 10386 return SDValue(); 10387 10388 SDLoc dl(Op); 10389 SDValue Op0 = Op.getOperand(0); 10390 10391 switch (Op0.getOpcode()) { 10392 default: 10393 return SDValue(); 10394 case ISD::EXTRACT_SUBVECTOR: { 10395 assert(Op0.getNumOperands() == 2 && 10396 isa<ConstantSDNode>(Op0->getOperand(1)) && 10397 "Node should have 2 operands with second one being a constant!"); 10398 10399 if (Op0.getOperand(0).getValueType() != MVT::v4f32) 10400 return SDValue(); 10401 10402 // Custom lower is only done for high or low doubleword. 10403 int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue(); 10404 if (Idx % 2 != 0) 10405 return SDValue(); 10406 10407 // Since input is v4f32, at this point Idx is either 0 or 2. 10408 // Shift to get the doubleword position we want. 10409 int DWord = Idx >> 1; 10410 10411 // High and low word positions are different on little endian. 10412 if (Subtarget.isLittleEndian()) 10413 DWord ^= 0x1; 10414 10415 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, 10416 Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32)); 10417 } 10418 case ISD::FADD: 10419 case ISD::FMUL: 10420 case ISD::FSUB: { 10421 SDValue NewLoad[2]; 10422 for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) { 10423 // Ensure both input are loads. 10424 SDValue LdOp = Op0.getOperand(i); 10425 if (LdOp.getOpcode() != ISD::LOAD) 10426 return SDValue(); 10427 // Generate new load node. 10428 LoadSDNode *LD = cast<LoadSDNode>(LdOp); 10429 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()}; 10430 NewLoad[i] = DAG.getMemIntrinsicNode( 10431 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps, 10432 LD->getMemoryVT(), LD->getMemOperand()); 10433 } 10434 SDValue NewOp = 10435 DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0], 10436 NewLoad[1], Op0.getNode()->getFlags()); 10437 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp, 10438 DAG.getConstant(0, dl, MVT::i32)); 10439 } 10440 case ISD::LOAD: { 10441 LoadSDNode *LD = cast<LoadSDNode>(Op0); 10442 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()}; 10443 SDValue NewLd = DAG.getMemIntrinsicNode( 10444 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps, 10445 LD->getMemoryVT(), LD->getMemOperand()); 10446 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd, 10447 DAG.getConstant(0, dl, MVT::i32)); 10448 } 10449 } 10450 llvm_unreachable("ERROR:Should return for all cases within swtich."); 10451 } 10452 10453 /// LowerOperation - Provide custom lowering hooks for some operations. 10454 /// 10455 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 10456 switch (Op.getOpcode()) { 10457 default: llvm_unreachable("Wasn't expecting to be able to lower this!"); 10458 case ISD::ConstantPool: return LowerConstantPool(Op, DAG); 10459 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); 10460 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); 10461 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); 10462 case ISD::JumpTable: return LowerJumpTable(Op, DAG); 10463 case ISD::SETCC: return LowerSETCC(Op, DAG); 10464 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); 10465 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); 10466 10467 // Variable argument lowering. 10468 case ISD::VASTART: return LowerVASTART(Op, DAG); 10469 case ISD::VAARG: return LowerVAARG(Op, DAG); 10470 case ISD::VACOPY: return LowerVACOPY(Op, DAG); 10471 10472 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG); 10473 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 10474 case ISD::GET_DYNAMIC_AREA_OFFSET: 10475 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG); 10476 10477 // Exception handling lowering. 10478 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG); 10479 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); 10480 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); 10481 10482 case ISD::LOAD: return LowerLOAD(Op, DAG); 10483 case ISD::STORE: return LowerSTORE(Op, DAG); 10484 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); 10485 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 10486 case ISD::FP_TO_UINT: 10487 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op)); 10488 case ISD::UINT_TO_FP: 10489 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG); 10490 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); 10491 10492 // Lower 64-bit shifts. 10493 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG); 10494 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG); 10495 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG); 10496 10497 // Vector-related lowering. 10498 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); 10499 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); 10500 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 10501 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); 10502 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 10503 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 10504 case ISD::MUL: return LowerMUL(Op, DAG); 10505 case ISD::ABS: return LowerABS(Op, DAG); 10506 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); 10507 10508 // For counter-based loop handling. 10509 case ISD::INTRINSIC_W_CHAIN: return SDValue(); 10510 10511 case ISD::BITCAST: return LowerBITCAST(Op, DAG); 10512 10513 // Frame & Return address. 10514 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); 10515 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); 10516 10517 case ISD::INTRINSIC_VOID: 10518 return LowerINTRINSIC_VOID(Op, DAG); 10519 case ISD::SREM: 10520 case ISD::UREM: 10521 return LowerREM(Op, DAG); 10522 case ISD::BSWAP: 10523 return LowerBSWAP(Op, DAG); 10524 case ISD::ATOMIC_CMP_SWAP: 10525 return LowerATOMIC_CMP_SWAP(Op, DAG); 10526 } 10527 } 10528 10529 void PPCTargetLowering::ReplaceNodeResults(SDNode *N, 10530 SmallVectorImpl<SDValue>&Results, 10531 SelectionDAG &DAG) const { 10532 SDLoc dl(N); 10533 switch (N->getOpcode()) { 10534 default: 10535 llvm_unreachable("Do not know how to custom type legalize this operation!"); 10536 case ISD::READCYCLECOUNTER: { 10537 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 10538 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0)); 10539 10540 Results.push_back(RTB); 10541 Results.push_back(RTB.getValue(1)); 10542 Results.push_back(RTB.getValue(2)); 10543 break; 10544 } 10545 case ISD::INTRINSIC_W_CHAIN: { 10546 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 10547 Intrinsic::loop_decrement) 10548 break; 10549 10550 assert(N->getValueType(0) == MVT::i1 && 10551 "Unexpected result type for CTR decrement intrinsic"); 10552 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 10553 N->getValueType(0)); 10554 SDVTList VTs = DAG.getVTList(SVT, MVT::Other); 10555 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0), 10556 N->getOperand(1)); 10557 10558 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt)); 10559 Results.push_back(NewInt.getValue(1)); 10560 break; 10561 } 10562 case ISD::VAARG: { 10563 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64()) 10564 return; 10565 10566 EVT VT = N->getValueType(0); 10567 10568 if (VT == MVT::i64) { 10569 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG); 10570 10571 Results.push_back(NewNode); 10572 Results.push_back(NewNode.getValue(1)); 10573 } 10574 return; 10575 } 10576 case ISD::FP_TO_SINT: 10577 case ISD::FP_TO_UINT: 10578 // LowerFP_TO_INT() can only handle f32 and f64. 10579 if (N->getOperand(0).getValueType() == MVT::ppcf128) 10580 return; 10581 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl)); 10582 return; 10583 case ISD::TRUNCATE: { 10584 EVT TrgVT = N->getValueType(0); 10585 EVT OpVT = N->getOperand(0).getValueType(); 10586 if (TrgVT.isVector() && 10587 isOperationCustom(N->getOpcode(), TrgVT) && 10588 OpVT.getSizeInBits() <= 128 && 10589 isPowerOf2_32(OpVT.getVectorElementType().getSizeInBits())) 10590 Results.push_back(LowerTRUNCATEVector(SDValue(N, 0), DAG)); 10591 return; 10592 } 10593 case ISD::BITCAST: 10594 // Don't handle bitcast here. 10595 return; 10596 case ISD::FP_EXTEND: 10597 SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG); 10598 if (Lowered) 10599 Results.push_back(Lowered); 10600 return; 10601 } 10602 } 10603 10604 //===----------------------------------------------------------------------===// 10605 // Other Lowering Code 10606 //===----------------------------------------------------------------------===// 10607 10608 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { 10609 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 10610 Function *Func = Intrinsic::getDeclaration(M, Id); 10611 return Builder.CreateCall(Func, {}); 10612 } 10613 10614 // The mappings for emitLeading/TrailingFence is taken from 10615 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html 10616 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, 10617 Instruction *Inst, 10618 AtomicOrdering Ord) const { 10619 if (Ord == AtomicOrdering::SequentiallyConsistent) 10620 return callIntrinsic(Builder, Intrinsic::ppc_sync); 10621 if (isReleaseOrStronger(Ord)) 10622 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 10623 return nullptr; 10624 } 10625 10626 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, 10627 Instruction *Inst, 10628 AtomicOrdering Ord) const { 10629 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { 10630 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and 10631 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html 10632 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. 10633 if (isa<LoadInst>(Inst) && Subtarget.isPPC64()) 10634 return Builder.CreateCall( 10635 Intrinsic::getDeclaration( 10636 Builder.GetInsertBlock()->getParent()->getParent(), 10637 Intrinsic::ppc_cfence, {Inst->getType()}), 10638 {Inst}); 10639 // FIXME: Can use isync for rmw operation. 10640 return callIntrinsic(Builder, Intrinsic::ppc_lwsync); 10641 } 10642 return nullptr; 10643 } 10644 10645 MachineBasicBlock * 10646 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB, 10647 unsigned AtomicSize, 10648 unsigned BinOpcode, 10649 unsigned CmpOpcode, 10650 unsigned CmpPred) const { 10651 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 10652 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10653 10654 auto LoadMnemonic = PPC::LDARX; 10655 auto StoreMnemonic = PPC::STDCX; 10656 switch (AtomicSize) { 10657 default: 10658 llvm_unreachable("Unexpected size of atomic entity"); 10659 case 1: 10660 LoadMnemonic = PPC::LBARX; 10661 StoreMnemonic = PPC::STBCX; 10662 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 10663 break; 10664 case 2: 10665 LoadMnemonic = PPC::LHARX; 10666 StoreMnemonic = PPC::STHCX; 10667 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4"); 10668 break; 10669 case 4: 10670 LoadMnemonic = PPC::LWARX; 10671 StoreMnemonic = PPC::STWCX; 10672 break; 10673 case 8: 10674 LoadMnemonic = PPC::LDARX; 10675 StoreMnemonic = PPC::STDCX; 10676 break; 10677 } 10678 10679 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10680 MachineFunction *F = BB->getParent(); 10681 MachineFunction::iterator It = ++BB->getIterator(); 10682 10683 Register dest = MI.getOperand(0).getReg(); 10684 Register ptrA = MI.getOperand(1).getReg(); 10685 Register ptrB = MI.getOperand(2).getReg(); 10686 Register incr = MI.getOperand(3).getReg(); 10687 DebugLoc dl = MI.getDebugLoc(); 10688 10689 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 10690 MachineBasicBlock *loop2MBB = 10691 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 10692 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10693 F->insert(It, loopMBB); 10694 if (CmpOpcode) 10695 F->insert(It, loop2MBB); 10696 F->insert(It, exitMBB); 10697 exitMBB->splice(exitMBB->begin(), BB, 10698 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10699 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10700 10701 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10702 Register TmpReg = (!BinOpcode) ? incr : 10703 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass 10704 : &PPC::GPRCRegClass); 10705 10706 // thisMBB: 10707 // ... 10708 // fallthrough --> loopMBB 10709 BB->addSuccessor(loopMBB); 10710 10711 // loopMBB: 10712 // l[wd]arx dest, ptr 10713 // add r0, dest, incr 10714 // st[wd]cx. r0, ptr 10715 // bne- loopMBB 10716 // fallthrough --> exitMBB 10717 10718 // For max/min... 10719 // loopMBB: 10720 // l[wd]arx dest, ptr 10721 // cmpl?[wd] incr, dest 10722 // bgt exitMBB 10723 // loop2MBB: 10724 // st[wd]cx. dest, ptr 10725 // bne- loopMBB 10726 // fallthrough --> exitMBB 10727 10728 BB = loopMBB; 10729 BuildMI(BB, dl, TII->get(LoadMnemonic), dest) 10730 .addReg(ptrA).addReg(ptrB); 10731 if (BinOpcode) 10732 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest); 10733 if (CmpOpcode) { 10734 // Signed comparisons of byte or halfword values must be sign-extended. 10735 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) { 10736 Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 10737 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH), 10738 ExtReg).addReg(dest); 10739 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10740 .addReg(incr).addReg(ExtReg); 10741 } else 10742 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10743 .addReg(incr).addReg(dest); 10744 10745 BuildMI(BB, dl, TII->get(PPC::BCC)) 10746 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB); 10747 BB->addSuccessor(loop2MBB); 10748 BB->addSuccessor(exitMBB); 10749 BB = loop2MBB; 10750 } 10751 BuildMI(BB, dl, TII->get(StoreMnemonic)) 10752 .addReg(TmpReg).addReg(ptrA).addReg(ptrB); 10753 BuildMI(BB, dl, TII->get(PPC::BCC)) 10754 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB); 10755 BB->addSuccessor(loopMBB); 10756 BB->addSuccessor(exitMBB); 10757 10758 // exitMBB: 10759 // ... 10760 BB = exitMBB; 10761 return BB; 10762 } 10763 10764 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary( 10765 MachineInstr &MI, MachineBasicBlock *BB, 10766 bool is8bit, // operation 10767 unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const { 10768 // If we support part-word atomic mnemonics, just use them 10769 if (Subtarget.hasPartwordAtomics()) 10770 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode, 10771 CmpPred); 10772 10773 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0. 10774 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10775 // In 64 bit mode we have to use 64 bits for addresses, even though the 10776 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address 10777 // registers without caring whether they're 32 or 64, but here we're 10778 // doing actual arithmetic on the addresses. 10779 bool is64bit = Subtarget.isPPC64(); 10780 bool isLittleEndian = Subtarget.isLittleEndian(); 10781 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 10782 10783 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 10784 MachineFunction *F = BB->getParent(); 10785 MachineFunction::iterator It = ++BB->getIterator(); 10786 10787 Register dest = MI.getOperand(0).getReg(); 10788 Register ptrA = MI.getOperand(1).getReg(); 10789 Register ptrB = MI.getOperand(2).getReg(); 10790 Register incr = MI.getOperand(3).getReg(); 10791 DebugLoc dl = MI.getDebugLoc(); 10792 10793 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); 10794 MachineBasicBlock *loop2MBB = 10795 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr; 10796 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 10797 F->insert(It, loopMBB); 10798 if (CmpOpcode) 10799 F->insert(It, loop2MBB); 10800 F->insert(It, exitMBB); 10801 exitMBB->splice(exitMBB->begin(), BB, 10802 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 10803 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 10804 10805 MachineRegisterInfo &RegInfo = F->getRegInfo(); 10806 const TargetRegisterClass *RC = 10807 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 10808 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 10809 10810 Register PtrReg = RegInfo.createVirtualRegister(RC); 10811 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); 10812 Register ShiftReg = 10813 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); 10814 Register Incr2Reg = RegInfo.createVirtualRegister(GPRC); 10815 Register MaskReg = RegInfo.createVirtualRegister(GPRC); 10816 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); 10817 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); 10818 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); 10819 Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC); 10820 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); 10821 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); 10822 Register Ptr1Reg; 10823 Register TmpReg = 10824 (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC); 10825 10826 // thisMBB: 10827 // ... 10828 // fallthrough --> loopMBB 10829 BB->addSuccessor(loopMBB); 10830 10831 // The 4-byte load must be aligned, while a char or short may be 10832 // anywhere in the word. Hence all this nasty bookkeeping code. 10833 // add ptr1, ptrA, ptrB [copy if ptrA==0] 10834 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 10835 // xori shift, shift1, 24 [16] 10836 // rlwinm ptr, ptr1, 0, 0, 29 10837 // slw incr2, incr, shift 10838 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 10839 // slw mask, mask2, shift 10840 // loopMBB: 10841 // lwarx tmpDest, ptr 10842 // add tmp, tmpDest, incr2 10843 // andc tmp2, tmpDest, mask 10844 // and tmp3, tmp, mask 10845 // or tmp4, tmp3, tmp2 10846 // stwcx. tmp4, ptr 10847 // bne- loopMBB 10848 // fallthrough --> exitMBB 10849 // srw dest, tmpDest, shift 10850 if (ptrA != ZeroReg) { 10851 Ptr1Reg = RegInfo.createVirtualRegister(RC); 10852 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 10853 .addReg(ptrA) 10854 .addReg(ptrB); 10855 } else { 10856 Ptr1Reg = ptrB; 10857 } 10858 // We need use 32-bit subregister to avoid mismatch register class in 64-bit 10859 // mode. 10860 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg) 10861 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0) 10862 .addImm(3) 10863 .addImm(27) 10864 .addImm(is8bit ? 28 : 27); 10865 if (!isLittleEndian) 10866 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg) 10867 .addReg(Shift1Reg) 10868 .addImm(is8bit ? 24 : 16); 10869 if (is64bit) 10870 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 10871 .addReg(Ptr1Reg) 10872 .addImm(0) 10873 .addImm(61); 10874 else 10875 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 10876 .addReg(Ptr1Reg) 10877 .addImm(0) 10878 .addImm(0) 10879 .addImm(29); 10880 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg); 10881 if (is8bit) 10882 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 10883 else { 10884 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 10885 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 10886 .addReg(Mask3Reg) 10887 .addImm(65535); 10888 } 10889 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 10890 .addReg(Mask2Reg) 10891 .addReg(ShiftReg); 10892 10893 BB = loopMBB; 10894 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 10895 .addReg(ZeroReg) 10896 .addReg(PtrReg); 10897 if (BinOpcode) 10898 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg) 10899 .addReg(Incr2Reg) 10900 .addReg(TmpDestReg); 10901 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg) 10902 .addReg(TmpDestReg) 10903 .addReg(MaskReg); 10904 BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg); 10905 if (CmpOpcode) { 10906 // For unsigned comparisons, we can directly compare the shifted values. 10907 // For signed comparisons we shift and sign extend. 10908 Register SReg = RegInfo.createVirtualRegister(GPRC); 10909 BuildMI(BB, dl, TII->get(PPC::AND), SReg) 10910 .addReg(TmpDestReg) 10911 .addReg(MaskReg); 10912 unsigned ValueReg = SReg; 10913 unsigned CmpReg = Incr2Reg; 10914 if (CmpOpcode == PPC::CMPW) { 10915 ValueReg = RegInfo.createVirtualRegister(GPRC); 10916 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg) 10917 .addReg(SReg) 10918 .addReg(ShiftReg); 10919 Register ValueSReg = RegInfo.createVirtualRegister(GPRC); 10920 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg) 10921 .addReg(ValueReg); 10922 ValueReg = ValueSReg; 10923 CmpReg = incr; 10924 } 10925 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0) 10926 .addReg(CmpReg) 10927 .addReg(ValueReg); 10928 BuildMI(BB, dl, TII->get(PPC::BCC)) 10929 .addImm(CmpPred) 10930 .addReg(PPC::CR0) 10931 .addMBB(exitMBB); 10932 BB->addSuccessor(loop2MBB); 10933 BB->addSuccessor(exitMBB); 10934 BB = loop2MBB; 10935 } 10936 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg); 10937 BuildMI(BB, dl, TII->get(PPC::STWCX)) 10938 .addReg(Tmp4Reg) 10939 .addReg(ZeroReg) 10940 .addReg(PtrReg); 10941 BuildMI(BB, dl, TII->get(PPC::BCC)) 10942 .addImm(PPC::PRED_NE) 10943 .addReg(PPC::CR0) 10944 .addMBB(loopMBB); 10945 BB->addSuccessor(loopMBB); 10946 BB->addSuccessor(exitMBB); 10947 10948 // exitMBB: 10949 // ... 10950 BB = exitMBB; 10951 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest) 10952 .addReg(TmpDestReg) 10953 .addReg(ShiftReg); 10954 return BB; 10955 } 10956 10957 llvm::MachineBasicBlock * 10958 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, 10959 MachineBasicBlock *MBB) const { 10960 DebugLoc DL = MI.getDebugLoc(); 10961 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 10962 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 10963 10964 MachineFunction *MF = MBB->getParent(); 10965 MachineRegisterInfo &MRI = MF->getRegInfo(); 10966 10967 const BasicBlock *BB = MBB->getBasicBlock(); 10968 MachineFunction::iterator I = ++MBB->getIterator(); 10969 10970 Register DstReg = MI.getOperand(0).getReg(); 10971 const TargetRegisterClass *RC = MRI.getRegClass(DstReg); 10972 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"); 10973 Register mainDstReg = MRI.createVirtualRegister(RC); 10974 Register restoreDstReg = MRI.createVirtualRegister(RC); 10975 10976 MVT PVT = getPointerTy(MF->getDataLayout()); 10977 assert((PVT == MVT::i64 || PVT == MVT::i32) && 10978 "Invalid Pointer Size!"); 10979 // For v = setjmp(buf), we generate 10980 // 10981 // thisMBB: 10982 // SjLjSetup mainMBB 10983 // bl mainMBB 10984 // v_restore = 1 10985 // b sinkMBB 10986 // 10987 // mainMBB: 10988 // buf[LabelOffset] = LR 10989 // v_main = 0 10990 // 10991 // sinkMBB: 10992 // v = phi(main, restore) 10993 // 10994 10995 MachineBasicBlock *thisMBB = MBB; 10996 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); 10997 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); 10998 MF->insert(I, mainMBB); 10999 MF->insert(I, sinkMBB); 11000 11001 MachineInstrBuilder MIB; 11002 11003 // Transfer the remainder of BB and its successor edges to sinkMBB. 11004 sinkMBB->splice(sinkMBB->begin(), MBB, 11005 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 11006 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); 11007 11008 // Note that the structure of the jmp_buf used here is not compatible 11009 // with that used by libc, and is not designed to be. Specifically, it 11010 // stores only those 'reserved' registers that LLVM does not otherwise 11011 // understand how to spill. Also, by convention, by the time this 11012 // intrinsic is called, Clang has already stored the frame address in the 11013 // first slot of the buffer and stack address in the third. Following the 11014 // X86 target code, we'll store the jump address in the second slot. We also 11015 // need to save the TOC pointer (R2) to handle jumps between shared 11016 // libraries, and that will be stored in the fourth slot. The thread 11017 // identifier (R13) is not affected. 11018 11019 // thisMBB: 11020 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 11021 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 11022 const int64_t BPOffset = 4 * PVT.getStoreSize(); 11023 11024 // Prepare IP either in reg. 11025 const TargetRegisterClass *PtrRC = getRegClassFor(PVT); 11026 Register LabelReg = MRI.createVirtualRegister(PtrRC); 11027 Register BufReg = MI.getOperand(1).getReg(); 11028 11029 if (Subtarget.is64BitELFABI()) { 11030 setUsesTOCBasePtr(*MBB->getParent()); 11031 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD)) 11032 .addReg(PPC::X2) 11033 .addImm(TOCOffset) 11034 .addReg(BufReg) 11035 .cloneMemRefs(MI); 11036 } 11037 11038 // Naked functions never have a base pointer, and so we use r1. For all 11039 // other functions, this decision must be delayed until during PEI. 11040 unsigned BaseReg; 11041 if (MF->getFunction().hasFnAttribute(Attribute::Naked)) 11042 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1; 11043 else 11044 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP; 11045 11046 MIB = BuildMI(*thisMBB, MI, DL, 11047 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW)) 11048 .addReg(BaseReg) 11049 .addImm(BPOffset) 11050 .addReg(BufReg) 11051 .cloneMemRefs(MI); 11052 11053 // Setup 11054 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB); 11055 MIB.addRegMask(TRI->getNoPreservedMask()); 11056 11057 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1); 11058 11059 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup)) 11060 .addMBB(mainMBB); 11061 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB); 11062 11063 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero()); 11064 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne()); 11065 11066 // mainMBB: 11067 // mainDstReg = 0 11068 MIB = 11069 BuildMI(mainMBB, DL, 11070 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg); 11071 11072 // Store IP 11073 if (Subtarget.isPPC64()) { 11074 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD)) 11075 .addReg(LabelReg) 11076 .addImm(LabelOffset) 11077 .addReg(BufReg); 11078 } else { 11079 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW)) 11080 .addReg(LabelReg) 11081 .addImm(LabelOffset) 11082 .addReg(BufReg); 11083 } 11084 MIB.cloneMemRefs(MI); 11085 11086 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0); 11087 mainMBB->addSuccessor(sinkMBB); 11088 11089 // sinkMBB: 11090 BuildMI(*sinkMBB, sinkMBB->begin(), DL, 11091 TII->get(PPC::PHI), DstReg) 11092 .addReg(mainDstReg).addMBB(mainMBB) 11093 .addReg(restoreDstReg).addMBB(thisMBB); 11094 11095 MI.eraseFromParent(); 11096 return sinkMBB; 11097 } 11098 11099 MachineBasicBlock * 11100 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, 11101 MachineBasicBlock *MBB) const { 11102 DebugLoc DL = MI.getDebugLoc(); 11103 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11104 11105 MachineFunction *MF = MBB->getParent(); 11106 MachineRegisterInfo &MRI = MF->getRegInfo(); 11107 11108 MVT PVT = getPointerTy(MF->getDataLayout()); 11109 assert((PVT == MVT::i64 || PVT == MVT::i32) && 11110 "Invalid Pointer Size!"); 11111 11112 const TargetRegisterClass *RC = 11113 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 11114 Register Tmp = MRI.createVirtualRegister(RC); 11115 // Since FP is only updated here but NOT referenced, it's treated as GPR. 11116 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31; 11117 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1; 11118 unsigned BP = 11119 (PVT == MVT::i64) 11120 ? PPC::X30 11121 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29 11122 : PPC::R30); 11123 11124 MachineInstrBuilder MIB; 11125 11126 const int64_t LabelOffset = 1 * PVT.getStoreSize(); 11127 const int64_t SPOffset = 2 * PVT.getStoreSize(); 11128 const int64_t TOCOffset = 3 * PVT.getStoreSize(); 11129 const int64_t BPOffset = 4 * PVT.getStoreSize(); 11130 11131 Register BufReg = MI.getOperand(0).getReg(); 11132 11133 // Reload FP (the jumped-to function may not have had a 11134 // frame pointer, and if so, then its r31 will be restored 11135 // as necessary). 11136 if (PVT == MVT::i64) { 11137 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP) 11138 .addImm(0) 11139 .addReg(BufReg); 11140 } else { 11141 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP) 11142 .addImm(0) 11143 .addReg(BufReg); 11144 } 11145 MIB.cloneMemRefs(MI); 11146 11147 // Reload IP 11148 if (PVT == MVT::i64) { 11149 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp) 11150 .addImm(LabelOffset) 11151 .addReg(BufReg); 11152 } else { 11153 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp) 11154 .addImm(LabelOffset) 11155 .addReg(BufReg); 11156 } 11157 MIB.cloneMemRefs(MI); 11158 11159 // Reload SP 11160 if (PVT == MVT::i64) { 11161 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP) 11162 .addImm(SPOffset) 11163 .addReg(BufReg); 11164 } else { 11165 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP) 11166 .addImm(SPOffset) 11167 .addReg(BufReg); 11168 } 11169 MIB.cloneMemRefs(MI); 11170 11171 // Reload BP 11172 if (PVT == MVT::i64) { 11173 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP) 11174 .addImm(BPOffset) 11175 .addReg(BufReg); 11176 } else { 11177 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP) 11178 .addImm(BPOffset) 11179 .addReg(BufReg); 11180 } 11181 MIB.cloneMemRefs(MI); 11182 11183 // Reload TOC 11184 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) { 11185 setUsesTOCBasePtr(*MBB->getParent()); 11186 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2) 11187 .addImm(TOCOffset) 11188 .addReg(BufReg) 11189 .cloneMemRefs(MI); 11190 } 11191 11192 // Jump 11193 BuildMI(*MBB, MI, DL, 11194 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp); 11195 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR)); 11196 11197 MI.eraseFromParent(); 11198 return MBB; 11199 } 11200 11201 MachineBasicBlock * 11202 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 11203 MachineBasicBlock *BB) const { 11204 if (MI.getOpcode() == TargetOpcode::STACKMAP || 11205 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 11206 if (Subtarget.is64BitELFABI() && 11207 MI.getOpcode() == TargetOpcode::PATCHPOINT) { 11208 // Call lowering should have added an r2 operand to indicate a dependence 11209 // on the TOC base pointer value. It can't however, because there is no 11210 // way to mark the dependence as implicit there, and so the stackmap code 11211 // will confuse it with a regular operand. Instead, add the dependence 11212 // here. 11213 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true)); 11214 } 11215 11216 return emitPatchPoint(MI, BB); 11217 } 11218 11219 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 || 11220 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) { 11221 return emitEHSjLjSetJmp(MI, BB); 11222 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 || 11223 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) { 11224 return emitEHSjLjLongJmp(MI, BB); 11225 } 11226 11227 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 11228 11229 // To "insert" these instructions we actually have to insert their 11230 // control-flow patterns. 11231 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 11232 MachineFunction::iterator It = ++BB->getIterator(); 11233 11234 MachineFunction *F = BB->getParent(); 11235 11236 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 11237 MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 || 11238 MI.getOpcode() == PPC::SELECT_I8) { 11239 SmallVector<MachineOperand, 2> Cond; 11240 if (MI.getOpcode() == PPC::SELECT_CC_I4 || 11241 MI.getOpcode() == PPC::SELECT_CC_I8) 11242 Cond.push_back(MI.getOperand(4)); 11243 else 11244 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET)); 11245 Cond.push_back(MI.getOperand(1)); 11246 11247 DebugLoc dl = MI.getDebugLoc(); 11248 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond, 11249 MI.getOperand(2).getReg(), MI.getOperand(3).getReg()); 11250 } else if (MI.getOpcode() == PPC::SELECT_CC_F4 || 11251 MI.getOpcode() == PPC::SELECT_CC_F8 || 11252 MI.getOpcode() == PPC::SELECT_CC_F16 || 11253 MI.getOpcode() == PPC::SELECT_CC_QFRC || 11254 MI.getOpcode() == PPC::SELECT_CC_QSRC || 11255 MI.getOpcode() == PPC::SELECT_CC_QBRC || 11256 MI.getOpcode() == PPC::SELECT_CC_VRRC || 11257 MI.getOpcode() == PPC::SELECT_CC_VSFRC || 11258 MI.getOpcode() == PPC::SELECT_CC_VSSRC || 11259 MI.getOpcode() == PPC::SELECT_CC_VSRC || 11260 MI.getOpcode() == PPC::SELECT_CC_SPE4 || 11261 MI.getOpcode() == PPC::SELECT_CC_SPE || 11262 MI.getOpcode() == PPC::SELECT_F4 || 11263 MI.getOpcode() == PPC::SELECT_F8 || 11264 MI.getOpcode() == PPC::SELECT_F16 || 11265 MI.getOpcode() == PPC::SELECT_QFRC || 11266 MI.getOpcode() == PPC::SELECT_QSRC || 11267 MI.getOpcode() == PPC::SELECT_QBRC || 11268 MI.getOpcode() == PPC::SELECT_SPE || 11269 MI.getOpcode() == PPC::SELECT_SPE4 || 11270 MI.getOpcode() == PPC::SELECT_VRRC || 11271 MI.getOpcode() == PPC::SELECT_VSFRC || 11272 MI.getOpcode() == PPC::SELECT_VSSRC || 11273 MI.getOpcode() == PPC::SELECT_VSRC) { 11274 // The incoming instruction knows the destination vreg to set, the 11275 // condition code register to branch on, the true/false values to 11276 // select between, and a branch opcode to use. 11277 11278 // thisMBB: 11279 // ... 11280 // TrueVal = ... 11281 // cmpTY ccX, r1, r2 11282 // bCC copy1MBB 11283 // fallthrough --> copy0MBB 11284 MachineBasicBlock *thisMBB = BB; 11285 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 11286 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 11287 DebugLoc dl = MI.getDebugLoc(); 11288 F->insert(It, copy0MBB); 11289 F->insert(It, sinkMBB); 11290 11291 // Transfer the remainder of BB and its successor edges to sinkMBB. 11292 sinkMBB->splice(sinkMBB->begin(), BB, 11293 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11294 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 11295 11296 // Next, add the true and fallthrough blocks as its successors. 11297 BB->addSuccessor(copy0MBB); 11298 BB->addSuccessor(sinkMBB); 11299 11300 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 || 11301 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 || 11302 MI.getOpcode() == PPC::SELECT_F16 || 11303 MI.getOpcode() == PPC::SELECT_SPE4 || 11304 MI.getOpcode() == PPC::SELECT_SPE || 11305 MI.getOpcode() == PPC::SELECT_QFRC || 11306 MI.getOpcode() == PPC::SELECT_QSRC || 11307 MI.getOpcode() == PPC::SELECT_QBRC || 11308 MI.getOpcode() == PPC::SELECT_VRRC || 11309 MI.getOpcode() == PPC::SELECT_VSFRC || 11310 MI.getOpcode() == PPC::SELECT_VSSRC || 11311 MI.getOpcode() == PPC::SELECT_VSRC) { 11312 BuildMI(BB, dl, TII->get(PPC::BC)) 11313 .addReg(MI.getOperand(1).getReg()) 11314 .addMBB(sinkMBB); 11315 } else { 11316 unsigned SelectPred = MI.getOperand(4).getImm(); 11317 BuildMI(BB, dl, TII->get(PPC::BCC)) 11318 .addImm(SelectPred) 11319 .addReg(MI.getOperand(1).getReg()) 11320 .addMBB(sinkMBB); 11321 } 11322 11323 // copy0MBB: 11324 // %FalseValue = ... 11325 // # fallthrough to sinkMBB 11326 BB = copy0MBB; 11327 11328 // Update machine-CFG edges 11329 BB->addSuccessor(sinkMBB); 11330 11331 // sinkMBB: 11332 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ] 11333 // ... 11334 BB = sinkMBB; 11335 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg()) 11336 .addReg(MI.getOperand(3).getReg()) 11337 .addMBB(copy0MBB) 11338 .addReg(MI.getOperand(2).getReg()) 11339 .addMBB(thisMBB); 11340 } else if (MI.getOpcode() == PPC::ReadTB) { 11341 // To read the 64-bit time-base register on a 32-bit target, we read the 11342 // two halves. Should the counter have wrapped while it was being read, we 11343 // need to try again. 11344 // ... 11345 // readLoop: 11346 // mfspr Rx,TBU # load from TBU 11347 // mfspr Ry,TB # load from TB 11348 // mfspr Rz,TBU # load from TBU 11349 // cmpw crX,Rx,Rz # check if 'old'='new' 11350 // bne readLoop # branch if they're not equal 11351 // ... 11352 11353 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB); 11354 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); 11355 DebugLoc dl = MI.getDebugLoc(); 11356 F->insert(It, readMBB); 11357 F->insert(It, sinkMBB); 11358 11359 // Transfer the remainder of BB and its successor edges to sinkMBB. 11360 sinkMBB->splice(sinkMBB->begin(), BB, 11361 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11362 sinkMBB->transferSuccessorsAndUpdatePHIs(BB); 11363 11364 BB->addSuccessor(readMBB); 11365 BB = readMBB; 11366 11367 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11368 Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); 11369 Register LoReg = MI.getOperand(0).getReg(); 11370 Register HiReg = MI.getOperand(1).getReg(); 11371 11372 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269); 11373 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268); 11374 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269); 11375 11376 Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 11377 11378 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg) 11379 .addReg(HiReg) 11380 .addReg(ReadAgainReg); 11381 BuildMI(BB, dl, TII->get(PPC::BCC)) 11382 .addImm(PPC::PRED_NE) 11383 .addReg(CmpReg) 11384 .addMBB(readMBB); 11385 11386 BB->addSuccessor(readMBB); 11387 BB->addSuccessor(sinkMBB); 11388 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8) 11389 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); 11390 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16) 11391 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); 11392 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32) 11393 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); 11394 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64) 11395 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8); 11396 11397 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8) 11398 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND); 11399 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16) 11400 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND); 11401 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32) 11402 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND); 11403 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64) 11404 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8); 11405 11406 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8) 11407 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR); 11408 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16) 11409 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR); 11410 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32) 11411 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR); 11412 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64) 11413 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8); 11414 11415 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8) 11416 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR); 11417 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16) 11418 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR); 11419 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32) 11420 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR); 11421 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64) 11422 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8); 11423 11424 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8) 11425 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND); 11426 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16) 11427 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND); 11428 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32) 11429 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND); 11430 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64) 11431 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8); 11432 11433 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8) 11434 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF); 11435 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16) 11436 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF); 11437 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32) 11438 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF); 11439 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64) 11440 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8); 11441 11442 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8) 11443 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE); 11444 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16) 11445 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE); 11446 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32) 11447 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE); 11448 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64) 11449 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE); 11450 11451 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8) 11452 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE); 11453 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16) 11454 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE); 11455 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32) 11456 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE); 11457 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64) 11458 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE); 11459 11460 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8) 11461 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE); 11462 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16) 11463 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE); 11464 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32) 11465 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE); 11466 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64) 11467 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE); 11468 11469 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8) 11470 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE); 11471 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16) 11472 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE); 11473 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32) 11474 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE); 11475 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64) 11476 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE); 11477 11478 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8) 11479 BB = EmitPartwordAtomicBinary(MI, BB, true, 0); 11480 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16) 11481 BB = EmitPartwordAtomicBinary(MI, BB, false, 0); 11482 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32) 11483 BB = EmitAtomicBinary(MI, BB, 4, 0); 11484 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64) 11485 BB = EmitAtomicBinary(MI, BB, 8, 0); 11486 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 || 11487 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 || 11488 (Subtarget.hasPartwordAtomics() && 11489 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) || 11490 (Subtarget.hasPartwordAtomics() && 11491 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) { 11492 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64; 11493 11494 auto LoadMnemonic = PPC::LDARX; 11495 auto StoreMnemonic = PPC::STDCX; 11496 switch (MI.getOpcode()) { 11497 default: 11498 llvm_unreachable("Compare and swap of unknown size"); 11499 case PPC::ATOMIC_CMP_SWAP_I8: 11500 LoadMnemonic = PPC::LBARX; 11501 StoreMnemonic = PPC::STBCX; 11502 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 11503 break; 11504 case PPC::ATOMIC_CMP_SWAP_I16: 11505 LoadMnemonic = PPC::LHARX; 11506 StoreMnemonic = PPC::STHCX; 11507 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics."); 11508 break; 11509 case PPC::ATOMIC_CMP_SWAP_I32: 11510 LoadMnemonic = PPC::LWARX; 11511 StoreMnemonic = PPC::STWCX; 11512 break; 11513 case PPC::ATOMIC_CMP_SWAP_I64: 11514 LoadMnemonic = PPC::LDARX; 11515 StoreMnemonic = PPC::STDCX; 11516 break; 11517 } 11518 Register dest = MI.getOperand(0).getReg(); 11519 Register ptrA = MI.getOperand(1).getReg(); 11520 Register ptrB = MI.getOperand(2).getReg(); 11521 Register oldval = MI.getOperand(3).getReg(); 11522 Register newval = MI.getOperand(4).getReg(); 11523 DebugLoc dl = MI.getDebugLoc(); 11524 11525 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 11526 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 11527 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 11528 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 11529 F->insert(It, loop1MBB); 11530 F->insert(It, loop2MBB); 11531 F->insert(It, midMBB); 11532 F->insert(It, exitMBB); 11533 exitMBB->splice(exitMBB->begin(), BB, 11534 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11535 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 11536 11537 // thisMBB: 11538 // ... 11539 // fallthrough --> loopMBB 11540 BB->addSuccessor(loop1MBB); 11541 11542 // loop1MBB: 11543 // l[bhwd]arx dest, ptr 11544 // cmp[wd] dest, oldval 11545 // bne- midMBB 11546 // loop2MBB: 11547 // st[bhwd]cx. newval, ptr 11548 // bne- loopMBB 11549 // b exitBB 11550 // midMBB: 11551 // st[bhwd]cx. dest, ptr 11552 // exitBB: 11553 BB = loop1MBB; 11554 BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB); 11555 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0) 11556 .addReg(oldval) 11557 .addReg(dest); 11558 BuildMI(BB, dl, TII->get(PPC::BCC)) 11559 .addImm(PPC::PRED_NE) 11560 .addReg(PPC::CR0) 11561 .addMBB(midMBB); 11562 BB->addSuccessor(loop2MBB); 11563 BB->addSuccessor(midMBB); 11564 11565 BB = loop2MBB; 11566 BuildMI(BB, dl, TII->get(StoreMnemonic)) 11567 .addReg(newval) 11568 .addReg(ptrA) 11569 .addReg(ptrB); 11570 BuildMI(BB, dl, TII->get(PPC::BCC)) 11571 .addImm(PPC::PRED_NE) 11572 .addReg(PPC::CR0) 11573 .addMBB(loop1MBB); 11574 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 11575 BB->addSuccessor(loop1MBB); 11576 BB->addSuccessor(exitMBB); 11577 11578 BB = midMBB; 11579 BuildMI(BB, dl, TII->get(StoreMnemonic)) 11580 .addReg(dest) 11581 .addReg(ptrA) 11582 .addReg(ptrB); 11583 BB->addSuccessor(exitMBB); 11584 11585 // exitMBB: 11586 // ... 11587 BB = exitMBB; 11588 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 || 11589 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) { 11590 // We must use 64-bit registers for addresses when targeting 64-bit, 11591 // since we're actually doing arithmetic on them. Other registers 11592 // can be 32-bit. 11593 bool is64bit = Subtarget.isPPC64(); 11594 bool isLittleEndian = Subtarget.isLittleEndian(); 11595 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8; 11596 11597 Register dest = MI.getOperand(0).getReg(); 11598 Register ptrA = MI.getOperand(1).getReg(); 11599 Register ptrB = MI.getOperand(2).getReg(); 11600 Register oldval = MI.getOperand(3).getReg(); 11601 Register newval = MI.getOperand(4).getReg(); 11602 DebugLoc dl = MI.getDebugLoc(); 11603 11604 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB); 11605 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB); 11606 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB); 11607 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB); 11608 F->insert(It, loop1MBB); 11609 F->insert(It, loop2MBB); 11610 F->insert(It, midMBB); 11611 F->insert(It, exitMBB); 11612 exitMBB->splice(exitMBB->begin(), BB, 11613 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 11614 exitMBB->transferSuccessorsAndUpdatePHIs(BB); 11615 11616 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11617 const TargetRegisterClass *RC = 11618 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; 11619 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; 11620 11621 Register PtrReg = RegInfo.createVirtualRegister(RC); 11622 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); 11623 Register ShiftReg = 11624 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); 11625 Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC); 11626 Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC); 11627 Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC); 11628 Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC); 11629 Register MaskReg = RegInfo.createVirtualRegister(GPRC); 11630 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); 11631 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); 11632 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); 11633 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); 11634 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); 11635 Register Ptr1Reg; 11636 Register TmpReg = RegInfo.createVirtualRegister(GPRC); 11637 Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; 11638 // thisMBB: 11639 // ... 11640 // fallthrough --> loopMBB 11641 BB->addSuccessor(loop1MBB); 11642 11643 // The 4-byte load must be aligned, while a char or short may be 11644 // anywhere in the word. Hence all this nasty bookkeeping code. 11645 // add ptr1, ptrA, ptrB [copy if ptrA==0] 11646 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27] 11647 // xori shift, shift1, 24 [16] 11648 // rlwinm ptr, ptr1, 0, 0, 29 11649 // slw newval2, newval, shift 11650 // slw oldval2, oldval,shift 11651 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535] 11652 // slw mask, mask2, shift 11653 // and newval3, newval2, mask 11654 // and oldval3, oldval2, mask 11655 // loop1MBB: 11656 // lwarx tmpDest, ptr 11657 // and tmp, tmpDest, mask 11658 // cmpw tmp, oldval3 11659 // bne- midMBB 11660 // loop2MBB: 11661 // andc tmp2, tmpDest, mask 11662 // or tmp4, tmp2, newval3 11663 // stwcx. tmp4, ptr 11664 // bne- loop1MBB 11665 // b exitBB 11666 // midMBB: 11667 // stwcx. tmpDest, ptr 11668 // exitBB: 11669 // srw dest, tmpDest, shift 11670 if (ptrA != ZeroReg) { 11671 Ptr1Reg = RegInfo.createVirtualRegister(RC); 11672 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) 11673 .addReg(ptrA) 11674 .addReg(ptrB); 11675 } else { 11676 Ptr1Reg = ptrB; 11677 } 11678 11679 // We need use 32-bit subregister to avoid mismatch register class in 64-bit 11680 // mode. 11681 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg) 11682 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0) 11683 .addImm(3) 11684 .addImm(27) 11685 .addImm(is8bit ? 28 : 27); 11686 if (!isLittleEndian) 11687 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg) 11688 .addReg(Shift1Reg) 11689 .addImm(is8bit ? 24 : 16); 11690 if (is64bit) 11691 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg) 11692 .addReg(Ptr1Reg) 11693 .addImm(0) 11694 .addImm(61); 11695 else 11696 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg) 11697 .addReg(Ptr1Reg) 11698 .addImm(0) 11699 .addImm(0) 11700 .addImm(29); 11701 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg) 11702 .addReg(newval) 11703 .addReg(ShiftReg); 11704 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg) 11705 .addReg(oldval) 11706 .addReg(ShiftReg); 11707 if (is8bit) 11708 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255); 11709 else { 11710 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0); 11711 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg) 11712 .addReg(Mask3Reg) 11713 .addImm(65535); 11714 } 11715 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg) 11716 .addReg(Mask2Reg) 11717 .addReg(ShiftReg); 11718 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg) 11719 .addReg(NewVal2Reg) 11720 .addReg(MaskReg); 11721 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg) 11722 .addReg(OldVal2Reg) 11723 .addReg(MaskReg); 11724 11725 BB = loop1MBB; 11726 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg) 11727 .addReg(ZeroReg) 11728 .addReg(PtrReg); 11729 BuildMI(BB, dl, TII->get(PPC::AND), TmpReg) 11730 .addReg(TmpDestReg) 11731 .addReg(MaskReg); 11732 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0) 11733 .addReg(TmpReg) 11734 .addReg(OldVal3Reg); 11735 BuildMI(BB, dl, TII->get(PPC::BCC)) 11736 .addImm(PPC::PRED_NE) 11737 .addReg(PPC::CR0) 11738 .addMBB(midMBB); 11739 BB->addSuccessor(loop2MBB); 11740 BB->addSuccessor(midMBB); 11741 11742 BB = loop2MBB; 11743 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg) 11744 .addReg(TmpDestReg) 11745 .addReg(MaskReg); 11746 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg) 11747 .addReg(Tmp2Reg) 11748 .addReg(NewVal3Reg); 11749 BuildMI(BB, dl, TII->get(PPC::STWCX)) 11750 .addReg(Tmp4Reg) 11751 .addReg(ZeroReg) 11752 .addReg(PtrReg); 11753 BuildMI(BB, dl, TII->get(PPC::BCC)) 11754 .addImm(PPC::PRED_NE) 11755 .addReg(PPC::CR0) 11756 .addMBB(loop1MBB); 11757 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB); 11758 BB->addSuccessor(loop1MBB); 11759 BB->addSuccessor(exitMBB); 11760 11761 BB = midMBB; 11762 BuildMI(BB, dl, TII->get(PPC::STWCX)) 11763 .addReg(TmpDestReg) 11764 .addReg(ZeroReg) 11765 .addReg(PtrReg); 11766 BB->addSuccessor(exitMBB); 11767 11768 // exitMBB: 11769 // ... 11770 BB = exitMBB; 11771 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest) 11772 .addReg(TmpReg) 11773 .addReg(ShiftReg); 11774 } else if (MI.getOpcode() == PPC::FADDrtz) { 11775 // This pseudo performs an FADD with rounding mode temporarily forced 11776 // to round-to-zero. We emit this via custom inserter since the FPSCR 11777 // is not modeled at the SelectionDAG level. 11778 Register Dest = MI.getOperand(0).getReg(); 11779 Register Src1 = MI.getOperand(1).getReg(); 11780 Register Src2 = MI.getOperand(2).getReg(); 11781 DebugLoc dl = MI.getDebugLoc(); 11782 11783 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11784 Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 11785 11786 // Save FPSCR value. 11787 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg); 11788 11789 // Set rounding mode to round-to-zero. 11790 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31); 11791 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30); 11792 11793 // Perform addition. 11794 BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2); 11795 11796 // Restore FPSCR value. 11797 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg); 11798 } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT || 11799 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT || 11800 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 || 11801 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) { 11802 unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 || 11803 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) 11804 ? PPC::ANDI8_rec 11805 : PPC::ANDI_rec; 11806 bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT || 11807 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8); 11808 11809 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11810 Register Dest = RegInfo.createVirtualRegister( 11811 Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass); 11812 11813 DebugLoc Dl = MI.getDebugLoc(); 11814 BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest) 11815 .addReg(MI.getOperand(1).getReg()) 11816 .addImm(1); 11817 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 11818 MI.getOperand(0).getReg()) 11819 .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT); 11820 } else if (MI.getOpcode() == PPC::TCHECK_RET) { 11821 DebugLoc Dl = MI.getDebugLoc(); 11822 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11823 Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass); 11824 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg); 11825 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 11826 MI.getOperand(0).getReg()) 11827 .addReg(CRReg); 11828 } else if (MI.getOpcode() == PPC::TBEGIN_RET) { 11829 DebugLoc Dl = MI.getDebugLoc(); 11830 unsigned Imm = MI.getOperand(1).getImm(); 11831 BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm); 11832 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY), 11833 MI.getOperand(0).getReg()) 11834 .addReg(PPC::CR0EQ); 11835 } else if (MI.getOpcode() == PPC::SETRNDi) { 11836 DebugLoc dl = MI.getDebugLoc(); 11837 Register OldFPSCRReg = MI.getOperand(0).getReg(); 11838 11839 // Save FPSCR value. 11840 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); 11841 11842 // The floating point rounding mode is in the bits 62:63 of FPCSR, and has 11843 // the following settings: 11844 // 00 Round to nearest 11845 // 01 Round to 0 11846 // 10 Round to +inf 11847 // 11 Round to -inf 11848 11849 // When the operand is immediate, using the two least significant bits of 11850 // the immediate to set the bits 62:63 of FPSCR. 11851 unsigned Mode = MI.getOperand(1).getImm(); 11852 BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0)) 11853 .addImm(31); 11854 11855 BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0)) 11856 .addImm(30); 11857 } else if (MI.getOpcode() == PPC::SETRND) { 11858 DebugLoc dl = MI.getDebugLoc(); 11859 11860 // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg 11861 // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg. 11862 // If the target doesn't have DirectMove, we should use stack to do the 11863 // conversion, because the target doesn't have the instructions like mtvsrd 11864 // or mfvsrd to do this conversion directly. 11865 auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) { 11866 if (Subtarget.hasDirectMove()) { 11867 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg) 11868 .addReg(SrcReg); 11869 } else { 11870 // Use stack to do the register copy. 11871 unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD; 11872 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11873 const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg); 11874 if (RC == &PPC::F8RCRegClass) { 11875 // Copy register from F8RCRegClass to G8RCRegclass. 11876 assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) && 11877 "Unsupported RegClass."); 11878 11879 StoreOp = PPC::STFD; 11880 LoadOp = PPC::LD; 11881 } else { 11882 // Copy register from G8RCRegClass to F8RCRegclass. 11883 assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) && 11884 (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) && 11885 "Unsupported RegClass."); 11886 } 11887 11888 MachineFrameInfo &MFI = F->getFrameInfo(); 11889 int FrameIdx = MFI.CreateStackObject(8, 8, false); 11890 11891 MachineMemOperand *MMOStore = F->getMachineMemOperand( 11892 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0), 11893 MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx), 11894 MFI.getObjectAlignment(FrameIdx)); 11895 11896 // Store the SrcReg into the stack. 11897 BuildMI(*BB, MI, dl, TII->get(StoreOp)) 11898 .addReg(SrcReg) 11899 .addImm(0) 11900 .addFrameIndex(FrameIdx) 11901 .addMemOperand(MMOStore); 11902 11903 MachineMemOperand *MMOLoad = F->getMachineMemOperand( 11904 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0), 11905 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx), 11906 MFI.getObjectAlignment(FrameIdx)); 11907 11908 // Load from the stack where SrcReg is stored, and save to DestReg, 11909 // so we have done the RegClass conversion from RegClass::SrcReg to 11910 // RegClass::DestReg. 11911 BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg) 11912 .addImm(0) 11913 .addFrameIndex(FrameIdx) 11914 .addMemOperand(MMOLoad); 11915 } 11916 }; 11917 11918 Register OldFPSCRReg = MI.getOperand(0).getReg(); 11919 11920 // Save FPSCR value. 11921 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); 11922 11923 // When the operand is gprc register, use two least significant bits of the 11924 // register and mtfsf instruction to set the bits 62:63 of FPSCR. 11925 // 11926 // copy OldFPSCRTmpReg, OldFPSCRReg 11927 // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1) 11928 // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62 11929 // copy NewFPSCRReg, NewFPSCRTmpReg 11930 // mtfsf 255, NewFPSCRReg 11931 MachineOperand SrcOp = MI.getOperand(1); 11932 MachineRegisterInfo &RegInfo = F->getRegInfo(); 11933 Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11934 11935 copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg); 11936 11937 Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11938 Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11939 11940 // The first operand of INSERT_SUBREG should be a register which has 11941 // subregisters, we only care about its RegClass, so we should use an 11942 // IMPLICIT_DEF register. 11943 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg); 11944 BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg) 11945 .addReg(ImDefReg) 11946 .add(SrcOp) 11947 .addImm(1); 11948 11949 Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); 11950 BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg) 11951 .addReg(OldFPSCRTmpReg) 11952 .addReg(ExtSrcReg) 11953 .addImm(0) 11954 .addImm(62); 11955 11956 Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass); 11957 copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg); 11958 11959 // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63 11960 // bits of FPSCR. 11961 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF)) 11962 .addImm(255) 11963 .addReg(NewFPSCRReg) 11964 .addImm(0) 11965 .addImm(0); 11966 } else { 11967 llvm_unreachable("Unexpected instr type to insert"); 11968 } 11969 11970 MI.eraseFromParent(); // The pseudo instruction is gone now. 11971 return BB; 11972 } 11973 11974 //===----------------------------------------------------------------------===// 11975 // Target Optimization Hooks 11976 //===----------------------------------------------------------------------===// 11977 11978 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) { 11979 // For the estimates, convergence is quadratic, so we essentially double the 11980 // number of digits correct after every iteration. For both FRE and FRSQRTE, 11981 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(), 11982 // this is 2^-14. IEEE float has 23 digits and double has 52 digits. 11983 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3; 11984 if (VT.getScalarType() == MVT::f64) 11985 RefinementSteps++; 11986 return RefinementSteps; 11987 } 11988 11989 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, 11990 int Enabled, int &RefinementSteps, 11991 bool &UseOneConstNR, 11992 bool Reciprocal) const { 11993 EVT VT = Operand.getValueType(); 11994 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) || 11995 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) || 11996 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 11997 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 11998 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 11999 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 12000 if (RefinementSteps == ReciprocalEstimate::Unspecified) 12001 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 12002 12003 // The Newton-Raphson computation with a single constant does not provide 12004 // enough accuracy on some CPUs. 12005 UseOneConstNR = !Subtarget.needsTwoConstNR(); 12006 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand); 12007 } 12008 return SDValue(); 12009 } 12010 12011 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG, 12012 int Enabled, 12013 int &RefinementSteps) const { 12014 EVT VT = Operand.getValueType(); 12015 if ((VT == MVT::f32 && Subtarget.hasFRES()) || 12016 (VT == MVT::f64 && Subtarget.hasFRE()) || 12017 (VT == MVT::v4f32 && Subtarget.hasAltivec()) || 12018 (VT == MVT::v2f64 && Subtarget.hasVSX()) || 12019 (VT == MVT::v4f32 && Subtarget.hasQPX()) || 12020 (VT == MVT::v4f64 && Subtarget.hasQPX())) { 12021 if (RefinementSteps == ReciprocalEstimate::Unspecified) 12022 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget); 12023 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand); 12024 } 12025 return SDValue(); 12026 } 12027 12028 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const { 12029 // Note: This functionality is used only when unsafe-fp-math is enabled, and 12030 // on cores with reciprocal estimates (which are used when unsafe-fp-math is 12031 // enabled for division), this functionality is redundant with the default 12032 // combiner logic (once the division -> reciprocal/multiply transformation 12033 // has taken place). As a result, this matters more for older cores than for 12034 // newer ones. 12035 12036 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 12037 // reciprocal if there are two or more FDIVs (for embedded cores with only 12038 // one FP pipeline) for three or more FDIVs (for generic OOO cores). 12039 switch (Subtarget.getCPUDirective()) { 12040 default: 12041 return 3; 12042 case PPC::DIR_440: 12043 case PPC::DIR_A2: 12044 case PPC::DIR_E500: 12045 case PPC::DIR_E500mc: 12046 case PPC::DIR_E5500: 12047 return 2; 12048 } 12049 } 12050 12051 // isConsecutiveLSLoc needs to work even if all adds have not yet been 12052 // collapsed, and so we need to look through chains of them. 12053 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, 12054 int64_t& Offset, SelectionDAG &DAG) { 12055 if (DAG.isBaseWithConstantOffset(Loc)) { 12056 Base = Loc.getOperand(0); 12057 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue(); 12058 12059 // The base might itself be a base plus an offset, and if so, accumulate 12060 // that as well. 12061 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG); 12062 } 12063 } 12064 12065 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, 12066 unsigned Bytes, int Dist, 12067 SelectionDAG &DAG) { 12068 if (VT.getSizeInBits() / 8 != Bytes) 12069 return false; 12070 12071 SDValue BaseLoc = Base->getBasePtr(); 12072 if (Loc.getOpcode() == ISD::FrameIndex) { 12073 if (BaseLoc.getOpcode() != ISD::FrameIndex) 12074 return false; 12075 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 12076 int FI = cast<FrameIndexSDNode>(Loc)->getIndex(); 12077 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex(); 12078 int FS = MFI.getObjectSize(FI); 12079 int BFS = MFI.getObjectSize(BFI); 12080 if (FS != BFS || FS != (int)Bytes) return false; 12081 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes); 12082 } 12083 12084 SDValue Base1 = Loc, Base2 = BaseLoc; 12085 int64_t Offset1 = 0, Offset2 = 0; 12086 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG); 12087 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG); 12088 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes)) 12089 return true; 12090 12091 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 12092 const GlobalValue *GV1 = nullptr; 12093 const GlobalValue *GV2 = nullptr; 12094 Offset1 = 0; 12095 Offset2 = 0; 12096 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1); 12097 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2); 12098 if (isGA1 && isGA2 && GV1 == GV2) 12099 return Offset1 == (Offset2 + Dist*Bytes); 12100 return false; 12101 } 12102 12103 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does 12104 // not enforce equality of the chain operands. 12105 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, 12106 unsigned Bytes, int Dist, 12107 SelectionDAG &DAG) { 12108 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) { 12109 EVT VT = LS->getMemoryVT(); 12110 SDValue Loc = LS->getBasePtr(); 12111 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG); 12112 } 12113 12114 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) { 12115 EVT VT; 12116 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12117 default: return false; 12118 case Intrinsic::ppc_qpx_qvlfd: 12119 case Intrinsic::ppc_qpx_qvlfda: 12120 VT = MVT::v4f64; 12121 break; 12122 case Intrinsic::ppc_qpx_qvlfs: 12123 case Intrinsic::ppc_qpx_qvlfsa: 12124 VT = MVT::v4f32; 12125 break; 12126 case Intrinsic::ppc_qpx_qvlfcd: 12127 case Intrinsic::ppc_qpx_qvlfcda: 12128 VT = MVT::v2f64; 12129 break; 12130 case Intrinsic::ppc_qpx_qvlfcs: 12131 case Intrinsic::ppc_qpx_qvlfcsa: 12132 VT = MVT::v2f32; 12133 break; 12134 case Intrinsic::ppc_qpx_qvlfiwa: 12135 case Intrinsic::ppc_qpx_qvlfiwz: 12136 case Intrinsic::ppc_altivec_lvx: 12137 case Intrinsic::ppc_altivec_lvxl: 12138 case Intrinsic::ppc_vsx_lxvw4x: 12139 case Intrinsic::ppc_vsx_lxvw4x_be: 12140 VT = MVT::v4i32; 12141 break; 12142 case Intrinsic::ppc_vsx_lxvd2x: 12143 case Intrinsic::ppc_vsx_lxvd2x_be: 12144 VT = MVT::v2f64; 12145 break; 12146 case Intrinsic::ppc_altivec_lvebx: 12147 VT = MVT::i8; 12148 break; 12149 case Intrinsic::ppc_altivec_lvehx: 12150 VT = MVT::i16; 12151 break; 12152 case Intrinsic::ppc_altivec_lvewx: 12153 VT = MVT::i32; 12154 break; 12155 } 12156 12157 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG); 12158 } 12159 12160 if (N->getOpcode() == ISD::INTRINSIC_VOID) { 12161 EVT VT; 12162 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 12163 default: return false; 12164 case Intrinsic::ppc_qpx_qvstfd: 12165 case Intrinsic::ppc_qpx_qvstfda: 12166 VT = MVT::v4f64; 12167 break; 12168 case Intrinsic::ppc_qpx_qvstfs: 12169 case Intrinsic::ppc_qpx_qvstfsa: 12170 VT = MVT::v4f32; 12171 break; 12172 case Intrinsic::ppc_qpx_qvstfcd: 12173 case Intrinsic::ppc_qpx_qvstfcda: 12174 VT = MVT::v2f64; 12175 break; 12176 case Intrinsic::ppc_qpx_qvstfcs: 12177 case Intrinsic::ppc_qpx_qvstfcsa: 12178 VT = MVT::v2f32; 12179 break; 12180 case Intrinsic::ppc_qpx_qvstfiw: 12181 case Intrinsic::ppc_qpx_qvstfiwa: 12182 case Intrinsic::ppc_altivec_stvx: 12183 case Intrinsic::ppc_altivec_stvxl: 12184 case Intrinsic::ppc_vsx_stxvw4x: 12185 VT = MVT::v4i32; 12186 break; 12187 case Intrinsic::ppc_vsx_stxvd2x: 12188 VT = MVT::v2f64; 12189 break; 12190 case Intrinsic::ppc_vsx_stxvw4x_be: 12191 VT = MVT::v4i32; 12192 break; 12193 case Intrinsic::ppc_vsx_stxvd2x_be: 12194 VT = MVT::v2f64; 12195 break; 12196 case Intrinsic::ppc_altivec_stvebx: 12197 VT = MVT::i8; 12198 break; 12199 case Intrinsic::ppc_altivec_stvehx: 12200 VT = MVT::i16; 12201 break; 12202 case Intrinsic::ppc_altivec_stvewx: 12203 VT = MVT::i32; 12204 break; 12205 } 12206 12207 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG); 12208 } 12209 12210 return false; 12211 } 12212 12213 // Return true is there is a nearyby consecutive load to the one provided 12214 // (regardless of alignment). We search up and down the chain, looking though 12215 // token factors and other loads (but nothing else). As a result, a true result 12216 // indicates that it is safe to create a new consecutive load adjacent to the 12217 // load provided. 12218 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) { 12219 SDValue Chain = LD->getChain(); 12220 EVT VT = LD->getMemoryVT(); 12221 12222 SmallSet<SDNode *, 16> LoadRoots; 12223 SmallVector<SDNode *, 8> Queue(1, Chain.getNode()); 12224 SmallSet<SDNode *, 16> Visited; 12225 12226 // First, search up the chain, branching to follow all token-factor operands. 12227 // If we find a consecutive load, then we're done, otherwise, record all 12228 // nodes just above the top-level loads and token factors. 12229 while (!Queue.empty()) { 12230 SDNode *ChainNext = Queue.pop_back_val(); 12231 if (!Visited.insert(ChainNext).second) 12232 continue; 12233 12234 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) { 12235 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 12236 return true; 12237 12238 if (!Visited.count(ChainLD->getChain().getNode())) 12239 Queue.push_back(ChainLD->getChain().getNode()); 12240 } else if (ChainNext->getOpcode() == ISD::TokenFactor) { 12241 for (const SDUse &O : ChainNext->ops()) 12242 if (!Visited.count(O.getNode())) 12243 Queue.push_back(O.getNode()); 12244 } else 12245 LoadRoots.insert(ChainNext); 12246 } 12247 12248 // Second, search down the chain, starting from the top-level nodes recorded 12249 // in the first phase. These top-level nodes are the nodes just above all 12250 // loads and token factors. Starting with their uses, recursively look though 12251 // all loads (just the chain uses) and token factors to find a consecutive 12252 // load. 12253 Visited.clear(); 12254 Queue.clear(); 12255 12256 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(), 12257 IE = LoadRoots.end(); I != IE; ++I) { 12258 Queue.push_back(*I); 12259 12260 while (!Queue.empty()) { 12261 SDNode *LoadRoot = Queue.pop_back_val(); 12262 if (!Visited.insert(LoadRoot).second) 12263 continue; 12264 12265 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot)) 12266 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG)) 12267 return true; 12268 12269 for (SDNode::use_iterator UI = LoadRoot->use_begin(), 12270 UE = LoadRoot->use_end(); UI != UE; ++UI) 12271 if (((isa<MemSDNode>(*UI) && 12272 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) || 12273 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI)) 12274 Queue.push_back(*UI); 12275 } 12276 } 12277 12278 return false; 12279 } 12280 12281 /// This function is called when we have proved that a SETCC node can be replaced 12282 /// by subtraction (and other supporting instructions) so that the result of 12283 /// comparison is kept in a GPR instead of CR. This function is purely for 12284 /// codegen purposes and has some flags to guide the codegen process. 12285 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, 12286 bool Swap, SDLoc &DL, SelectionDAG &DAG) { 12287 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 12288 12289 // Zero extend the operands to the largest legal integer. Originally, they 12290 // must be of a strictly smaller size. 12291 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0), 12292 DAG.getConstant(Size, DL, MVT::i32)); 12293 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1), 12294 DAG.getConstant(Size, DL, MVT::i32)); 12295 12296 // Swap if needed. Depends on the condition code. 12297 if (Swap) 12298 std::swap(Op0, Op1); 12299 12300 // Subtract extended integers. 12301 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1); 12302 12303 // Move the sign bit to the least significant position and zero out the rest. 12304 // Now the least significant bit carries the result of original comparison. 12305 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode, 12306 DAG.getConstant(Size - 1, DL, MVT::i32)); 12307 auto Final = Shifted; 12308 12309 // Complement the result if needed. Based on the condition code. 12310 if (Complement) 12311 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted, 12312 DAG.getConstant(1, DL, MVT::i64)); 12313 12314 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final); 12315 } 12316 12317 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N, 12318 DAGCombinerInfo &DCI) const { 12319 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."); 12320 12321 SelectionDAG &DAG = DCI.DAG; 12322 SDLoc DL(N); 12323 12324 // Size of integers being compared has a critical role in the following 12325 // analysis, so we prefer to do this when all types are legal. 12326 if (!DCI.isAfterLegalizeDAG()) 12327 return SDValue(); 12328 12329 // If all users of SETCC extend its value to a legal integer type 12330 // then we replace SETCC with a subtraction 12331 for (SDNode::use_iterator UI = N->use_begin(), 12332 UE = N->use_end(); UI != UE; ++UI) { 12333 if (UI->getOpcode() != ISD::ZERO_EXTEND) 12334 return SDValue(); 12335 } 12336 12337 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 12338 auto OpSize = N->getOperand(0).getValueSizeInBits(); 12339 12340 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits(); 12341 12342 if (OpSize < Size) { 12343 switch (CC) { 12344 default: break; 12345 case ISD::SETULT: 12346 return generateEquivalentSub(N, Size, false, false, DL, DAG); 12347 case ISD::SETULE: 12348 return generateEquivalentSub(N, Size, true, true, DL, DAG); 12349 case ISD::SETUGT: 12350 return generateEquivalentSub(N, Size, false, true, DL, DAG); 12351 case ISD::SETUGE: 12352 return generateEquivalentSub(N, Size, true, false, DL, DAG); 12353 } 12354 } 12355 12356 return SDValue(); 12357 } 12358 12359 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N, 12360 DAGCombinerInfo &DCI) const { 12361 SelectionDAG &DAG = DCI.DAG; 12362 SDLoc dl(N); 12363 12364 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits"); 12365 // If we're tracking CR bits, we need to be careful that we don't have: 12366 // trunc(binary-ops(zext(x), zext(y))) 12367 // or 12368 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...) 12369 // such that we're unnecessarily moving things into GPRs when it would be 12370 // better to keep them in CR bits. 12371 12372 // Note that trunc here can be an actual i1 trunc, or can be the effective 12373 // truncation that comes from a setcc or select_cc. 12374 if (N->getOpcode() == ISD::TRUNCATE && 12375 N->getValueType(0) != MVT::i1) 12376 return SDValue(); 12377 12378 if (N->getOperand(0).getValueType() != MVT::i32 && 12379 N->getOperand(0).getValueType() != MVT::i64) 12380 return SDValue(); 12381 12382 if (N->getOpcode() == ISD::SETCC || 12383 N->getOpcode() == ISD::SELECT_CC) { 12384 // If we're looking at a comparison, then we need to make sure that the 12385 // high bits (all except for the first) don't matter the result. 12386 ISD::CondCode CC = 12387 cast<CondCodeSDNode>(N->getOperand( 12388 N->getOpcode() == ISD::SETCC ? 2 : 4))->get(); 12389 unsigned OpBits = N->getOperand(0).getValueSizeInBits(); 12390 12391 if (ISD::isSignedIntSetCC(CC)) { 12392 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits || 12393 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits) 12394 return SDValue(); 12395 } else if (ISD::isUnsignedIntSetCC(CC)) { 12396 if (!DAG.MaskedValueIsZero(N->getOperand(0), 12397 APInt::getHighBitsSet(OpBits, OpBits-1)) || 12398 !DAG.MaskedValueIsZero(N->getOperand(1), 12399 APInt::getHighBitsSet(OpBits, OpBits-1))) 12400 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI) 12401 : SDValue()); 12402 } else { 12403 // This is neither a signed nor an unsigned comparison, just make sure 12404 // that the high bits are equal. 12405 KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0)); 12406 KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1)); 12407 12408 // We don't really care about what is known about the first bit (if 12409 // anything), so clear it in all masks prior to comparing them. 12410 Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0); 12411 Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0); 12412 12413 if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One) 12414 return SDValue(); 12415 } 12416 } 12417 12418 // We now know that the higher-order bits are irrelevant, we just need to 12419 // make sure that all of the intermediate operations are bit operations, and 12420 // all inputs are extensions. 12421 if (N->getOperand(0).getOpcode() != ISD::AND && 12422 N->getOperand(0).getOpcode() != ISD::OR && 12423 N->getOperand(0).getOpcode() != ISD::XOR && 12424 N->getOperand(0).getOpcode() != ISD::SELECT && 12425 N->getOperand(0).getOpcode() != ISD::SELECT_CC && 12426 N->getOperand(0).getOpcode() != ISD::TRUNCATE && 12427 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND && 12428 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND && 12429 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND) 12430 return SDValue(); 12431 12432 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) && 12433 N->getOperand(1).getOpcode() != ISD::AND && 12434 N->getOperand(1).getOpcode() != ISD::OR && 12435 N->getOperand(1).getOpcode() != ISD::XOR && 12436 N->getOperand(1).getOpcode() != ISD::SELECT && 12437 N->getOperand(1).getOpcode() != ISD::SELECT_CC && 12438 N->getOperand(1).getOpcode() != ISD::TRUNCATE && 12439 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND && 12440 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND && 12441 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND) 12442 return SDValue(); 12443 12444 SmallVector<SDValue, 4> Inputs; 12445 SmallVector<SDValue, 8> BinOps, PromOps; 12446 SmallPtrSet<SDNode *, 16> Visited; 12447 12448 for (unsigned i = 0; i < 2; ++i) { 12449 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12450 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12451 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 12452 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) || 12453 isa<ConstantSDNode>(N->getOperand(i))) 12454 Inputs.push_back(N->getOperand(i)); 12455 else 12456 BinOps.push_back(N->getOperand(i)); 12457 12458 if (N->getOpcode() == ISD::TRUNCATE) 12459 break; 12460 } 12461 12462 // Visit all inputs, collect all binary operations (and, or, xor and 12463 // select) that are all fed by extensions. 12464 while (!BinOps.empty()) { 12465 SDValue BinOp = BinOps.back(); 12466 BinOps.pop_back(); 12467 12468 if (!Visited.insert(BinOp.getNode()).second) 12469 continue; 12470 12471 PromOps.push_back(BinOp); 12472 12473 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 12474 // The condition of the select is not promoted. 12475 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 12476 continue; 12477 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 12478 continue; 12479 12480 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12481 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12482 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) && 12483 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) || 12484 isa<ConstantSDNode>(BinOp.getOperand(i))) { 12485 Inputs.push_back(BinOp.getOperand(i)); 12486 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 12487 BinOp.getOperand(i).getOpcode() == ISD::OR || 12488 BinOp.getOperand(i).getOpcode() == ISD::XOR || 12489 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 12490 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC || 12491 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 12492 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND || 12493 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND || 12494 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) { 12495 BinOps.push_back(BinOp.getOperand(i)); 12496 } else { 12497 // We have an input that is not an extension or another binary 12498 // operation; we'll abort this transformation. 12499 return SDValue(); 12500 } 12501 } 12502 } 12503 12504 // Make sure that this is a self-contained cluster of operations (which 12505 // is not quite the same thing as saying that everything has only one 12506 // use). 12507 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12508 if (isa<ConstantSDNode>(Inputs[i])) 12509 continue; 12510 12511 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 12512 UE = Inputs[i].getNode()->use_end(); 12513 UI != UE; ++UI) { 12514 SDNode *User = *UI; 12515 if (User != N && !Visited.count(User)) 12516 return SDValue(); 12517 12518 // Make sure that we're not going to promote the non-output-value 12519 // operand(s) or SELECT or SELECT_CC. 12520 // FIXME: Although we could sometimes handle this, and it does occur in 12521 // practice that one of the condition inputs to the select is also one of 12522 // the outputs, we currently can't deal with this. 12523 if (User->getOpcode() == ISD::SELECT) { 12524 if (User->getOperand(0) == Inputs[i]) 12525 return SDValue(); 12526 } else if (User->getOpcode() == ISD::SELECT_CC) { 12527 if (User->getOperand(0) == Inputs[i] || 12528 User->getOperand(1) == Inputs[i]) 12529 return SDValue(); 12530 } 12531 } 12532 } 12533 12534 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 12535 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 12536 UE = PromOps[i].getNode()->use_end(); 12537 UI != UE; ++UI) { 12538 SDNode *User = *UI; 12539 if (User != N && !Visited.count(User)) 12540 return SDValue(); 12541 12542 // Make sure that we're not going to promote the non-output-value 12543 // operand(s) or SELECT or SELECT_CC. 12544 // FIXME: Although we could sometimes handle this, and it does occur in 12545 // practice that one of the condition inputs to the select is also one of 12546 // the outputs, we currently can't deal with this. 12547 if (User->getOpcode() == ISD::SELECT) { 12548 if (User->getOperand(0) == PromOps[i]) 12549 return SDValue(); 12550 } else if (User->getOpcode() == ISD::SELECT_CC) { 12551 if (User->getOperand(0) == PromOps[i] || 12552 User->getOperand(1) == PromOps[i]) 12553 return SDValue(); 12554 } 12555 } 12556 } 12557 12558 // Replace all inputs with the extension operand. 12559 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12560 // Constants may have users outside the cluster of to-be-promoted nodes, 12561 // and so we need to replace those as we do the promotions. 12562 if (isa<ConstantSDNode>(Inputs[i])) 12563 continue; 12564 else 12565 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0)); 12566 } 12567 12568 std::list<HandleSDNode> PromOpHandles; 12569 for (auto &PromOp : PromOps) 12570 PromOpHandles.emplace_back(PromOp); 12571 12572 // Replace all operations (these are all the same, but have a different 12573 // (i1) return type). DAG.getNode will validate that the types of 12574 // a binary operator match, so go through the list in reverse so that 12575 // we've likely promoted both operands first. Any intermediate truncations or 12576 // extensions disappear. 12577 while (!PromOpHandles.empty()) { 12578 SDValue PromOp = PromOpHandles.back().getValue(); 12579 PromOpHandles.pop_back(); 12580 12581 if (PromOp.getOpcode() == ISD::TRUNCATE || 12582 PromOp.getOpcode() == ISD::SIGN_EXTEND || 12583 PromOp.getOpcode() == ISD::ZERO_EXTEND || 12584 PromOp.getOpcode() == ISD::ANY_EXTEND) { 12585 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) && 12586 PromOp.getOperand(0).getValueType() != MVT::i1) { 12587 // The operand is not yet ready (see comment below). 12588 PromOpHandles.emplace_front(PromOp); 12589 continue; 12590 } 12591 12592 SDValue RepValue = PromOp.getOperand(0); 12593 if (isa<ConstantSDNode>(RepValue)) 12594 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue); 12595 12596 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue); 12597 continue; 12598 } 12599 12600 unsigned C; 12601 switch (PromOp.getOpcode()) { 12602 default: C = 0; break; 12603 case ISD::SELECT: C = 1; break; 12604 case ISD::SELECT_CC: C = 2; break; 12605 } 12606 12607 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 12608 PromOp.getOperand(C).getValueType() != MVT::i1) || 12609 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 12610 PromOp.getOperand(C+1).getValueType() != MVT::i1)) { 12611 // The to-be-promoted operands of this node have not yet been 12612 // promoted (this should be rare because we're going through the 12613 // list backward, but if one of the operands has several users in 12614 // this cluster of to-be-promoted nodes, it is possible). 12615 PromOpHandles.emplace_front(PromOp); 12616 continue; 12617 } 12618 12619 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 12620 PromOp.getNode()->op_end()); 12621 12622 // If there are any constant inputs, make sure they're replaced now. 12623 for (unsigned i = 0; i < 2; ++i) 12624 if (isa<ConstantSDNode>(Ops[C+i])) 12625 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]); 12626 12627 DAG.ReplaceAllUsesOfValueWith(PromOp, 12628 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops)); 12629 } 12630 12631 // Now we're left with the initial truncation itself. 12632 if (N->getOpcode() == ISD::TRUNCATE) 12633 return N->getOperand(0); 12634 12635 // Otherwise, this is a comparison. The operands to be compared have just 12636 // changed type (to i1), but everything else is the same. 12637 return SDValue(N, 0); 12638 } 12639 12640 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N, 12641 DAGCombinerInfo &DCI) const { 12642 SelectionDAG &DAG = DCI.DAG; 12643 SDLoc dl(N); 12644 12645 // If we're tracking CR bits, we need to be careful that we don't have: 12646 // zext(binary-ops(trunc(x), trunc(y))) 12647 // or 12648 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...) 12649 // such that we're unnecessarily moving things into CR bits that can more 12650 // efficiently stay in GPRs. Note that if we're not certain that the high 12651 // bits are set as required by the final extension, we still may need to do 12652 // some masking to get the proper behavior. 12653 12654 // This same functionality is important on PPC64 when dealing with 12655 // 32-to-64-bit extensions; these occur often when 32-bit values are used as 12656 // the return values of functions. Because it is so similar, it is handled 12657 // here as well. 12658 12659 if (N->getValueType(0) != MVT::i32 && 12660 N->getValueType(0) != MVT::i64) 12661 return SDValue(); 12662 12663 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) || 12664 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64()))) 12665 return SDValue(); 12666 12667 if (N->getOperand(0).getOpcode() != ISD::AND && 12668 N->getOperand(0).getOpcode() != ISD::OR && 12669 N->getOperand(0).getOpcode() != ISD::XOR && 12670 N->getOperand(0).getOpcode() != ISD::SELECT && 12671 N->getOperand(0).getOpcode() != ISD::SELECT_CC) 12672 return SDValue(); 12673 12674 SmallVector<SDValue, 4> Inputs; 12675 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps; 12676 SmallPtrSet<SDNode *, 16> Visited; 12677 12678 // Visit all inputs, collect all binary operations (and, or, xor and 12679 // select) that are all fed by truncations. 12680 while (!BinOps.empty()) { 12681 SDValue BinOp = BinOps.back(); 12682 BinOps.pop_back(); 12683 12684 if (!Visited.insert(BinOp.getNode()).second) 12685 continue; 12686 12687 PromOps.push_back(BinOp); 12688 12689 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) { 12690 // The condition of the select is not promoted. 12691 if (BinOp.getOpcode() == ISD::SELECT && i == 0) 12692 continue; 12693 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3) 12694 continue; 12695 12696 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE || 12697 isa<ConstantSDNode>(BinOp.getOperand(i))) { 12698 Inputs.push_back(BinOp.getOperand(i)); 12699 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND || 12700 BinOp.getOperand(i).getOpcode() == ISD::OR || 12701 BinOp.getOperand(i).getOpcode() == ISD::XOR || 12702 BinOp.getOperand(i).getOpcode() == ISD::SELECT || 12703 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) { 12704 BinOps.push_back(BinOp.getOperand(i)); 12705 } else { 12706 // We have an input that is not a truncation or another binary 12707 // operation; we'll abort this transformation. 12708 return SDValue(); 12709 } 12710 } 12711 } 12712 12713 // The operands of a select that must be truncated when the select is 12714 // promoted because the operand is actually part of the to-be-promoted set. 12715 DenseMap<SDNode *, EVT> SelectTruncOp[2]; 12716 12717 // Make sure that this is a self-contained cluster of operations (which 12718 // is not quite the same thing as saying that everything has only one 12719 // use). 12720 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12721 if (isa<ConstantSDNode>(Inputs[i])) 12722 continue; 12723 12724 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(), 12725 UE = Inputs[i].getNode()->use_end(); 12726 UI != UE; ++UI) { 12727 SDNode *User = *UI; 12728 if (User != N && !Visited.count(User)) 12729 return SDValue(); 12730 12731 // If we're going to promote the non-output-value operand(s) or SELECT or 12732 // SELECT_CC, record them for truncation. 12733 if (User->getOpcode() == ISD::SELECT) { 12734 if (User->getOperand(0) == Inputs[i]) 12735 SelectTruncOp[0].insert(std::make_pair(User, 12736 User->getOperand(0).getValueType())); 12737 } else if (User->getOpcode() == ISD::SELECT_CC) { 12738 if (User->getOperand(0) == Inputs[i]) 12739 SelectTruncOp[0].insert(std::make_pair(User, 12740 User->getOperand(0).getValueType())); 12741 if (User->getOperand(1) == Inputs[i]) 12742 SelectTruncOp[1].insert(std::make_pair(User, 12743 User->getOperand(1).getValueType())); 12744 } 12745 } 12746 } 12747 12748 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) { 12749 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(), 12750 UE = PromOps[i].getNode()->use_end(); 12751 UI != UE; ++UI) { 12752 SDNode *User = *UI; 12753 if (User != N && !Visited.count(User)) 12754 return SDValue(); 12755 12756 // If we're going to promote the non-output-value operand(s) or SELECT or 12757 // SELECT_CC, record them for truncation. 12758 if (User->getOpcode() == ISD::SELECT) { 12759 if (User->getOperand(0) == PromOps[i]) 12760 SelectTruncOp[0].insert(std::make_pair(User, 12761 User->getOperand(0).getValueType())); 12762 } else if (User->getOpcode() == ISD::SELECT_CC) { 12763 if (User->getOperand(0) == PromOps[i]) 12764 SelectTruncOp[0].insert(std::make_pair(User, 12765 User->getOperand(0).getValueType())); 12766 if (User->getOperand(1) == PromOps[i]) 12767 SelectTruncOp[1].insert(std::make_pair(User, 12768 User->getOperand(1).getValueType())); 12769 } 12770 } 12771 } 12772 12773 unsigned PromBits = N->getOperand(0).getValueSizeInBits(); 12774 bool ReallyNeedsExt = false; 12775 if (N->getOpcode() != ISD::ANY_EXTEND) { 12776 // If all of the inputs are not already sign/zero extended, then 12777 // we'll still need to do that at the end. 12778 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12779 if (isa<ConstantSDNode>(Inputs[i])) 12780 continue; 12781 12782 unsigned OpBits = 12783 Inputs[i].getOperand(0).getValueSizeInBits(); 12784 assert(PromBits < OpBits && "Truncation not to a smaller bit count?"); 12785 12786 if ((N->getOpcode() == ISD::ZERO_EXTEND && 12787 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0), 12788 APInt::getHighBitsSet(OpBits, 12789 OpBits-PromBits))) || 12790 (N->getOpcode() == ISD::SIGN_EXTEND && 12791 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) < 12792 (OpBits-(PromBits-1)))) { 12793 ReallyNeedsExt = true; 12794 break; 12795 } 12796 } 12797 } 12798 12799 // Replace all inputs, either with the truncation operand, or a 12800 // truncation or extension to the final output type. 12801 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) { 12802 // Constant inputs need to be replaced with the to-be-promoted nodes that 12803 // use them because they might have users outside of the cluster of 12804 // promoted nodes. 12805 if (isa<ConstantSDNode>(Inputs[i])) 12806 continue; 12807 12808 SDValue InSrc = Inputs[i].getOperand(0); 12809 if (Inputs[i].getValueType() == N->getValueType(0)) 12810 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc); 12811 else if (N->getOpcode() == ISD::SIGN_EXTEND) 12812 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 12813 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0))); 12814 else if (N->getOpcode() == ISD::ZERO_EXTEND) 12815 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 12816 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0))); 12817 else 12818 DAG.ReplaceAllUsesOfValueWith(Inputs[i], 12819 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0))); 12820 } 12821 12822 std::list<HandleSDNode> PromOpHandles; 12823 for (auto &PromOp : PromOps) 12824 PromOpHandles.emplace_back(PromOp); 12825 12826 // Replace all operations (these are all the same, but have a different 12827 // (promoted) return type). DAG.getNode will validate that the types of 12828 // a binary operator match, so go through the list in reverse so that 12829 // we've likely promoted both operands first. 12830 while (!PromOpHandles.empty()) { 12831 SDValue PromOp = PromOpHandles.back().getValue(); 12832 PromOpHandles.pop_back(); 12833 12834 unsigned C; 12835 switch (PromOp.getOpcode()) { 12836 default: C = 0; break; 12837 case ISD::SELECT: C = 1; break; 12838 case ISD::SELECT_CC: C = 2; break; 12839 } 12840 12841 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) && 12842 PromOp.getOperand(C).getValueType() != N->getValueType(0)) || 12843 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) && 12844 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) { 12845 // The to-be-promoted operands of this node have not yet been 12846 // promoted (this should be rare because we're going through the 12847 // list backward, but if one of the operands has several users in 12848 // this cluster of to-be-promoted nodes, it is possible). 12849 PromOpHandles.emplace_front(PromOp); 12850 continue; 12851 } 12852 12853 // For SELECT and SELECT_CC nodes, we do a similar check for any 12854 // to-be-promoted comparison inputs. 12855 if (PromOp.getOpcode() == ISD::SELECT || 12856 PromOp.getOpcode() == ISD::SELECT_CC) { 12857 if ((SelectTruncOp[0].count(PromOp.getNode()) && 12858 PromOp.getOperand(0).getValueType() != N->getValueType(0)) || 12859 (SelectTruncOp[1].count(PromOp.getNode()) && 12860 PromOp.getOperand(1).getValueType() != N->getValueType(0))) { 12861 PromOpHandles.emplace_front(PromOp); 12862 continue; 12863 } 12864 } 12865 12866 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(), 12867 PromOp.getNode()->op_end()); 12868 12869 // If this node has constant inputs, then they'll need to be promoted here. 12870 for (unsigned i = 0; i < 2; ++i) { 12871 if (!isa<ConstantSDNode>(Ops[C+i])) 12872 continue; 12873 if (Ops[C+i].getValueType() == N->getValueType(0)) 12874 continue; 12875 12876 if (N->getOpcode() == ISD::SIGN_EXTEND) 12877 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 12878 else if (N->getOpcode() == ISD::ZERO_EXTEND) 12879 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 12880 else 12881 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0)); 12882 } 12883 12884 // If we've promoted the comparison inputs of a SELECT or SELECT_CC, 12885 // truncate them again to the original value type. 12886 if (PromOp.getOpcode() == ISD::SELECT || 12887 PromOp.getOpcode() == ISD::SELECT_CC) { 12888 auto SI0 = SelectTruncOp[0].find(PromOp.getNode()); 12889 if (SI0 != SelectTruncOp[0].end()) 12890 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]); 12891 auto SI1 = SelectTruncOp[1].find(PromOp.getNode()); 12892 if (SI1 != SelectTruncOp[1].end()) 12893 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]); 12894 } 12895 12896 DAG.ReplaceAllUsesOfValueWith(PromOp, 12897 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops)); 12898 } 12899 12900 // Now we're left with the initial extension itself. 12901 if (!ReallyNeedsExt) 12902 return N->getOperand(0); 12903 12904 // To zero extend, just mask off everything except for the first bit (in the 12905 // i1 case). 12906 if (N->getOpcode() == ISD::ZERO_EXTEND) 12907 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0), 12908 DAG.getConstant(APInt::getLowBitsSet( 12909 N->getValueSizeInBits(0), PromBits), 12910 dl, N->getValueType(0))); 12911 12912 assert(N->getOpcode() == ISD::SIGN_EXTEND && 12913 "Invalid extension type"); 12914 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout()); 12915 SDValue ShiftCst = 12916 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy); 12917 return DAG.getNode( 12918 ISD::SRA, dl, N->getValueType(0), 12919 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst), 12920 ShiftCst); 12921 } 12922 12923 SDValue PPCTargetLowering::combineSetCC(SDNode *N, 12924 DAGCombinerInfo &DCI) const { 12925 assert(N->getOpcode() == ISD::SETCC && 12926 "Should be called with a SETCC node"); 12927 12928 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 12929 if (CC == ISD::SETNE || CC == ISD::SETEQ) { 12930 SDValue LHS = N->getOperand(0); 12931 SDValue RHS = N->getOperand(1); 12932 12933 // If there is a '0 - y' pattern, canonicalize the pattern to the RHS. 12934 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) && 12935 LHS.hasOneUse()) 12936 std::swap(LHS, RHS); 12937 12938 // x == 0-y --> x+y == 0 12939 // x != 0-y --> x+y != 0 12940 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) && 12941 RHS.hasOneUse()) { 12942 SDLoc DL(N); 12943 SelectionDAG &DAG = DCI.DAG; 12944 EVT VT = N->getValueType(0); 12945 EVT OpVT = LHS.getValueType(); 12946 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1)); 12947 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC); 12948 } 12949 } 12950 12951 return DAGCombineTruncBoolExt(N, DCI); 12952 } 12953 12954 // Is this an extending load from an f32 to an f64? 12955 static bool isFPExtLoad(SDValue Op) { 12956 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode())) 12957 return LD->getExtensionType() == ISD::EXTLOAD && 12958 Op.getValueType() == MVT::f64; 12959 return false; 12960 } 12961 12962 /// Reduces the number of fp-to-int conversion when building a vector. 12963 /// 12964 /// If this vector is built out of floating to integer conversions, 12965 /// transform it to a vector built out of floating point values followed by a 12966 /// single floating to integer conversion of the vector. 12967 /// Namely (build_vector (fptosi $A), (fptosi $B), ...) 12968 /// becomes (fptosi (build_vector ($A, $B, ...))) 12969 SDValue PPCTargetLowering:: 12970 combineElementTruncationToVectorTruncation(SDNode *N, 12971 DAGCombinerInfo &DCI) const { 12972 assert(N->getOpcode() == ISD::BUILD_VECTOR && 12973 "Should be called with a BUILD_VECTOR node"); 12974 12975 SelectionDAG &DAG = DCI.DAG; 12976 SDLoc dl(N); 12977 12978 SDValue FirstInput = N->getOperand(0); 12979 assert(FirstInput.getOpcode() == PPCISD::MFVSR && 12980 "The input operand must be an fp-to-int conversion."); 12981 12982 // This combine happens after legalization so the fp_to_[su]i nodes are 12983 // already converted to PPCSISD nodes. 12984 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode(); 12985 if (FirstConversion == PPCISD::FCTIDZ || 12986 FirstConversion == PPCISD::FCTIDUZ || 12987 FirstConversion == PPCISD::FCTIWZ || 12988 FirstConversion == PPCISD::FCTIWUZ) { 12989 bool IsSplat = true; 12990 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ || 12991 FirstConversion == PPCISD::FCTIWUZ; 12992 EVT SrcVT = FirstInput.getOperand(0).getValueType(); 12993 SmallVector<SDValue, 4> Ops; 12994 EVT TargetVT = N->getValueType(0); 12995 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 12996 SDValue NextOp = N->getOperand(i); 12997 if (NextOp.getOpcode() != PPCISD::MFVSR) 12998 return SDValue(); 12999 unsigned NextConversion = NextOp.getOperand(0).getOpcode(); 13000 if (NextConversion != FirstConversion) 13001 return SDValue(); 13002 // If we are converting to 32-bit integers, we need to add an FP_ROUND. 13003 // This is not valid if the input was originally double precision. It is 13004 // also not profitable to do unless this is an extending load in which 13005 // case doing this combine will allow us to combine consecutive loads. 13006 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0))) 13007 return SDValue(); 13008 if (N->getOperand(i) != FirstInput) 13009 IsSplat = false; 13010 } 13011 13012 // If this is a splat, we leave it as-is since there will be only a single 13013 // fp-to-int conversion followed by a splat of the integer. This is better 13014 // for 32-bit and smaller ints and neutral for 64-bit ints. 13015 if (IsSplat) 13016 return SDValue(); 13017 13018 // Now that we know we have the right type of node, get its operands 13019 for (int i = 0, e = N->getNumOperands(); i < e; ++i) { 13020 SDValue In = N->getOperand(i).getOperand(0); 13021 if (Is32Bit) { 13022 // For 32-bit values, we need to add an FP_ROUND node (if we made it 13023 // here, we know that all inputs are extending loads so this is safe). 13024 if (In.isUndef()) 13025 Ops.push_back(DAG.getUNDEF(SrcVT)); 13026 else { 13027 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl, 13028 MVT::f32, In.getOperand(0), 13029 DAG.getIntPtrConstant(1, dl)); 13030 Ops.push_back(Trunc); 13031 } 13032 } else 13033 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0)); 13034 } 13035 13036 unsigned Opcode; 13037 if (FirstConversion == PPCISD::FCTIDZ || 13038 FirstConversion == PPCISD::FCTIWZ) 13039 Opcode = ISD::FP_TO_SINT; 13040 else 13041 Opcode = ISD::FP_TO_UINT; 13042 13043 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32; 13044 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops); 13045 return DAG.getNode(Opcode, dl, TargetVT, BV); 13046 } 13047 return SDValue(); 13048 } 13049 13050 /// Reduce the number of loads when building a vector. 13051 /// 13052 /// Building a vector out of multiple loads can be converted to a load 13053 /// of the vector type if the loads are consecutive. If the loads are 13054 /// consecutive but in descending order, a shuffle is added at the end 13055 /// to reorder the vector. 13056 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) { 13057 assert(N->getOpcode() == ISD::BUILD_VECTOR && 13058 "Should be called with a BUILD_VECTOR node"); 13059 13060 SDLoc dl(N); 13061 13062 // Return early for non byte-sized type, as they can't be consecutive. 13063 if (!N->getValueType(0).getVectorElementType().isByteSized()) 13064 return SDValue(); 13065 13066 bool InputsAreConsecutiveLoads = true; 13067 bool InputsAreReverseConsecutive = true; 13068 unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize(); 13069 SDValue FirstInput = N->getOperand(0); 13070 bool IsRoundOfExtLoad = false; 13071 13072 if (FirstInput.getOpcode() == ISD::FP_ROUND && 13073 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) { 13074 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0)); 13075 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD; 13076 } 13077 // Not a build vector of (possibly fp_rounded) loads. 13078 if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) || 13079 N->getNumOperands() == 1) 13080 return SDValue(); 13081 13082 for (int i = 1, e = N->getNumOperands(); i < e; ++i) { 13083 // If any inputs are fp_round(extload), they all must be. 13084 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND) 13085 return SDValue(); 13086 13087 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) : 13088 N->getOperand(i); 13089 if (NextInput.getOpcode() != ISD::LOAD) 13090 return SDValue(); 13091 13092 SDValue PreviousInput = 13093 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1); 13094 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput); 13095 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput); 13096 13097 // If any inputs are fp_round(extload), they all must be. 13098 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD) 13099 return SDValue(); 13100 13101 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG)) 13102 InputsAreConsecutiveLoads = false; 13103 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG)) 13104 InputsAreReverseConsecutive = false; 13105 13106 // Exit early if the loads are neither consecutive nor reverse consecutive. 13107 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive) 13108 return SDValue(); 13109 } 13110 13111 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && 13112 "The loads cannot be both consecutive and reverse consecutive."); 13113 13114 SDValue FirstLoadOp = 13115 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput; 13116 SDValue LastLoadOp = 13117 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) : 13118 N->getOperand(N->getNumOperands()-1); 13119 13120 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp); 13121 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp); 13122 if (InputsAreConsecutiveLoads) { 13123 assert(LD1 && "Input needs to be a LoadSDNode."); 13124 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(), 13125 LD1->getBasePtr(), LD1->getPointerInfo(), 13126 LD1->getAlignment()); 13127 } 13128 if (InputsAreReverseConsecutive) { 13129 assert(LDL && "Input needs to be a LoadSDNode."); 13130 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(), 13131 LDL->getBasePtr(), LDL->getPointerInfo(), 13132 LDL->getAlignment()); 13133 SmallVector<int, 16> Ops; 13134 for (int i = N->getNumOperands() - 1; i >= 0; i--) 13135 Ops.push_back(i); 13136 13137 return DAG.getVectorShuffle(N->getValueType(0), dl, Load, 13138 DAG.getUNDEF(N->getValueType(0)), Ops); 13139 } 13140 return SDValue(); 13141 } 13142 13143 // This function adds the required vector_shuffle needed to get 13144 // the elements of the vector extract in the correct position 13145 // as specified by the CorrectElems encoding. 13146 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, 13147 SDValue Input, uint64_t Elems, 13148 uint64_t CorrectElems) { 13149 SDLoc dl(N); 13150 13151 unsigned NumElems = Input.getValueType().getVectorNumElements(); 13152 SmallVector<int, 16> ShuffleMask(NumElems, -1); 13153 13154 // Knowing the element indices being extracted from the original 13155 // vector and the order in which they're being inserted, just put 13156 // them at element indices required for the instruction. 13157 for (unsigned i = 0; i < N->getNumOperands(); i++) { 13158 if (DAG.getDataLayout().isLittleEndian()) 13159 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF; 13160 else 13161 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4; 13162 CorrectElems = CorrectElems >> 8; 13163 Elems = Elems >> 8; 13164 } 13165 13166 SDValue Shuffle = 13167 DAG.getVectorShuffle(Input.getValueType(), dl, Input, 13168 DAG.getUNDEF(Input.getValueType()), ShuffleMask); 13169 13170 EVT Ty = N->getValueType(0); 13171 SDValue BV = DAG.getNode(PPCISD::SExtVElems, dl, Ty, Shuffle); 13172 return BV; 13173 } 13174 13175 // Look for build vector patterns where input operands come from sign 13176 // extended vector_extract elements of specific indices. If the correct indices 13177 // aren't used, add a vector shuffle to fix up the indices and create a new 13178 // PPCISD:SExtVElems node which selects the vector sign extend instructions 13179 // during instruction selection. 13180 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) { 13181 // This array encodes the indices that the vector sign extend instructions 13182 // extract from when extending from one type to another for both BE and LE. 13183 // The right nibble of each byte corresponds to the LE incides. 13184 // and the left nibble of each byte corresponds to the BE incides. 13185 // For example: 0x3074B8FC byte->word 13186 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC 13187 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF 13188 // For example: 0x000070F8 byte->double word 13189 // For LE: the allowed indices are: 0x0,0x8 13190 // For BE: the allowed indices are: 0x7,0xF 13191 uint64_t TargetElems[] = { 13192 0x3074B8FC, // b->w 13193 0x000070F8, // b->d 13194 0x10325476, // h->w 13195 0x00003074, // h->d 13196 0x00001032, // w->d 13197 }; 13198 13199 uint64_t Elems = 0; 13200 int Index; 13201 SDValue Input; 13202 13203 auto isSExtOfVecExtract = [&](SDValue Op) -> bool { 13204 if (!Op) 13205 return false; 13206 if (Op.getOpcode() != ISD::SIGN_EXTEND && 13207 Op.getOpcode() != ISD::SIGN_EXTEND_INREG) 13208 return false; 13209 13210 // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value 13211 // of the right width. 13212 SDValue Extract = Op.getOperand(0); 13213 if (Extract.getOpcode() == ISD::ANY_EXTEND) 13214 Extract = Extract.getOperand(0); 13215 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 13216 return false; 13217 13218 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1)); 13219 if (!ExtOp) 13220 return false; 13221 13222 Index = ExtOp->getZExtValue(); 13223 if (Input && Input != Extract.getOperand(0)) 13224 return false; 13225 13226 if (!Input) 13227 Input = Extract.getOperand(0); 13228 13229 Elems = Elems << 8; 13230 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4; 13231 Elems |= Index; 13232 13233 return true; 13234 }; 13235 13236 // If the build vector operands aren't sign extended vector extracts, 13237 // of the same input vector, then return. 13238 for (unsigned i = 0; i < N->getNumOperands(); i++) { 13239 if (!isSExtOfVecExtract(N->getOperand(i))) { 13240 return SDValue(); 13241 } 13242 } 13243 13244 // If the vector extract indicies are not correct, add the appropriate 13245 // vector_shuffle. 13246 int TgtElemArrayIdx; 13247 int InputSize = Input.getValueType().getScalarSizeInBits(); 13248 int OutputSize = N->getValueType(0).getScalarSizeInBits(); 13249 if (InputSize + OutputSize == 40) 13250 TgtElemArrayIdx = 0; 13251 else if (InputSize + OutputSize == 72) 13252 TgtElemArrayIdx = 1; 13253 else if (InputSize + OutputSize == 48) 13254 TgtElemArrayIdx = 2; 13255 else if (InputSize + OutputSize == 80) 13256 TgtElemArrayIdx = 3; 13257 else if (InputSize + OutputSize == 96) 13258 TgtElemArrayIdx = 4; 13259 else 13260 return SDValue(); 13261 13262 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx]; 13263 CorrectElems = DAG.getDataLayout().isLittleEndian() 13264 ? CorrectElems & 0x0F0F0F0F0F0F0F0F 13265 : CorrectElems & 0xF0F0F0F0F0F0F0F0; 13266 if (Elems != CorrectElems) { 13267 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems); 13268 } 13269 13270 // Regular lowering will catch cases where a shuffle is not needed. 13271 return SDValue(); 13272 } 13273 13274 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N, 13275 DAGCombinerInfo &DCI) const { 13276 assert(N->getOpcode() == ISD::BUILD_VECTOR && 13277 "Should be called with a BUILD_VECTOR node"); 13278 13279 SelectionDAG &DAG = DCI.DAG; 13280 SDLoc dl(N); 13281 13282 if (!Subtarget.hasVSX()) 13283 return SDValue(); 13284 13285 // The target independent DAG combiner will leave a build_vector of 13286 // float-to-int conversions intact. We can generate MUCH better code for 13287 // a float-to-int conversion of a vector of floats. 13288 SDValue FirstInput = N->getOperand(0); 13289 if (FirstInput.getOpcode() == PPCISD::MFVSR) { 13290 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI); 13291 if (Reduced) 13292 return Reduced; 13293 } 13294 13295 // If we're building a vector out of consecutive loads, just load that 13296 // vector type. 13297 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG); 13298 if (Reduced) 13299 return Reduced; 13300 13301 // If we're building a vector out of extended elements from another vector 13302 // we have P9 vector integer extend instructions. The code assumes legal 13303 // input types (i.e. it can't handle things like v4i16) so do not run before 13304 // legalization. 13305 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) { 13306 Reduced = combineBVOfVecSExt(N, DAG); 13307 if (Reduced) 13308 return Reduced; 13309 } 13310 13311 13312 if (N->getValueType(0) != MVT::v2f64) 13313 return SDValue(); 13314 13315 // Looking for: 13316 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1)) 13317 if (FirstInput.getOpcode() != ISD::SINT_TO_FP && 13318 FirstInput.getOpcode() != ISD::UINT_TO_FP) 13319 return SDValue(); 13320 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP && 13321 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP) 13322 return SDValue(); 13323 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode()) 13324 return SDValue(); 13325 13326 SDValue Ext1 = FirstInput.getOperand(0); 13327 SDValue Ext2 = N->getOperand(1).getOperand(0); 13328 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 13329 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 13330 return SDValue(); 13331 13332 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1)); 13333 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1)); 13334 if (!Ext1Op || !Ext2Op) 13335 return SDValue(); 13336 if (Ext1.getOperand(0).getValueType() != MVT::v4i32 || 13337 Ext1.getOperand(0) != Ext2.getOperand(0)) 13338 return SDValue(); 13339 13340 int FirstElem = Ext1Op->getZExtValue(); 13341 int SecondElem = Ext2Op->getZExtValue(); 13342 int SubvecIdx; 13343 if (FirstElem == 0 && SecondElem == 1) 13344 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0; 13345 else if (FirstElem == 2 && SecondElem == 3) 13346 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1; 13347 else 13348 return SDValue(); 13349 13350 SDValue SrcVec = Ext1.getOperand(0); 13351 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ? 13352 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP; 13353 return DAG.getNode(NodeType, dl, MVT::v2f64, 13354 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl)); 13355 } 13356 13357 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N, 13358 DAGCombinerInfo &DCI) const { 13359 assert((N->getOpcode() == ISD::SINT_TO_FP || 13360 N->getOpcode() == ISD::UINT_TO_FP) && 13361 "Need an int -> FP conversion node here"); 13362 13363 if (useSoftFloat() || !Subtarget.has64BitSupport()) 13364 return SDValue(); 13365 13366 SelectionDAG &DAG = DCI.DAG; 13367 SDLoc dl(N); 13368 SDValue Op(N, 0); 13369 13370 // Don't handle ppc_fp128 here or conversions that are out-of-range capable 13371 // from the hardware. 13372 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64) 13373 return SDValue(); 13374 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) || 13375 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64)) 13376 return SDValue(); 13377 13378 SDValue FirstOperand(Op.getOperand(0)); 13379 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD && 13380 (FirstOperand.getValueType() == MVT::i8 || 13381 FirstOperand.getValueType() == MVT::i16); 13382 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) { 13383 bool Signed = N->getOpcode() == ISD::SINT_TO_FP; 13384 bool DstDouble = Op.getValueType() == MVT::f64; 13385 unsigned ConvOp = Signed ? 13386 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) : 13387 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS); 13388 SDValue WidthConst = 13389 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2, 13390 dl, false); 13391 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode()); 13392 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst }; 13393 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl, 13394 DAG.getVTList(MVT::f64, MVT::Other), 13395 Ops, MVT::i8, LDN->getMemOperand()); 13396 13397 // For signed conversion, we need to sign-extend the value in the VSR 13398 if (Signed) { 13399 SDValue ExtOps[] = { Ld, WidthConst }; 13400 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps); 13401 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext); 13402 } else 13403 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld); 13404 } 13405 13406 13407 // For i32 intermediate values, unfortunately, the conversion functions 13408 // leave the upper 32 bits of the value are undefined. Within the set of 13409 // scalar instructions, we have no method for zero- or sign-extending the 13410 // value. Thus, we cannot handle i32 intermediate values here. 13411 if (Op.getOperand(0).getValueType() == MVT::i32) 13412 return SDValue(); 13413 13414 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && 13415 "UINT_TO_FP is supported only with FPCVT"); 13416 13417 // If we have FCFIDS, then use it when converting to single-precision. 13418 // Otherwise, convert to double-precision and then round. 13419 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 13420 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS 13421 : PPCISD::FCFIDS) 13422 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU 13423 : PPCISD::FCFID); 13424 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32) 13425 ? MVT::f32 13426 : MVT::f64; 13427 13428 // If we're converting from a float, to an int, and back to a float again, 13429 // then we don't need the store/load pair at all. 13430 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT && 13431 Subtarget.hasFPCVT()) || 13432 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) { 13433 SDValue Src = Op.getOperand(0).getOperand(0); 13434 if (Src.getValueType() == MVT::f32) { 13435 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src); 13436 DCI.AddToWorklist(Src.getNode()); 13437 } else if (Src.getValueType() != MVT::f64) { 13438 // Make sure that we don't pick up a ppc_fp128 source value. 13439 return SDValue(); 13440 } 13441 13442 unsigned FCTOp = 13443 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ : 13444 PPCISD::FCTIDUZ; 13445 13446 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src); 13447 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp); 13448 13449 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) { 13450 FP = DAG.getNode(ISD::FP_ROUND, dl, 13451 MVT::f32, FP, DAG.getIntPtrConstant(0, dl)); 13452 DCI.AddToWorklist(FP.getNode()); 13453 } 13454 13455 return FP; 13456 } 13457 13458 return SDValue(); 13459 } 13460 13461 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for 13462 // builtins) into loads with swaps. 13463 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N, 13464 DAGCombinerInfo &DCI) const { 13465 SelectionDAG &DAG = DCI.DAG; 13466 SDLoc dl(N); 13467 SDValue Chain; 13468 SDValue Base; 13469 MachineMemOperand *MMO; 13470 13471 switch (N->getOpcode()) { 13472 default: 13473 llvm_unreachable("Unexpected opcode for little endian VSX load"); 13474 case ISD::LOAD: { 13475 LoadSDNode *LD = cast<LoadSDNode>(N); 13476 Chain = LD->getChain(); 13477 Base = LD->getBasePtr(); 13478 MMO = LD->getMemOperand(); 13479 // If the MMO suggests this isn't a load of a full vector, leave 13480 // things alone. For a built-in, we have to make the change for 13481 // correctness, so if there is a size problem that will be a bug. 13482 if (MMO->getSize() < 16) 13483 return SDValue(); 13484 break; 13485 } 13486 case ISD::INTRINSIC_W_CHAIN: { 13487 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 13488 Chain = Intrin->getChain(); 13489 // Similarly to the store case below, Intrin->getBasePtr() doesn't get 13490 // us what we want. Get operand 2 instead. 13491 Base = Intrin->getOperand(2); 13492 MMO = Intrin->getMemOperand(); 13493 break; 13494 } 13495 } 13496 13497 MVT VecTy = N->getValueType(0).getSimpleVT(); 13498 13499 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is 13500 // aligned and the type is a vector with elements up to 4 bytes 13501 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 13502 && VecTy.getScalarSizeInBits() <= 32 ) { 13503 return SDValue(); 13504 } 13505 13506 SDValue LoadOps[] = { Chain, Base }; 13507 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl, 13508 DAG.getVTList(MVT::v2f64, MVT::Other), 13509 LoadOps, MVT::v2f64, MMO); 13510 13511 DCI.AddToWorklist(Load.getNode()); 13512 Chain = Load.getValue(1); 13513 SDValue Swap = DAG.getNode( 13514 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load); 13515 DCI.AddToWorklist(Swap.getNode()); 13516 13517 // Add a bitcast if the resulting load type doesn't match v2f64. 13518 if (VecTy != MVT::v2f64) { 13519 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap); 13520 DCI.AddToWorklist(N.getNode()); 13521 // Package {bitcast value, swap's chain} to match Load's shape. 13522 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other), 13523 N, Swap.getValue(1)); 13524 } 13525 13526 return Swap; 13527 } 13528 13529 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for 13530 // builtins) into stores with swaps. 13531 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N, 13532 DAGCombinerInfo &DCI) const { 13533 SelectionDAG &DAG = DCI.DAG; 13534 SDLoc dl(N); 13535 SDValue Chain; 13536 SDValue Base; 13537 unsigned SrcOpnd; 13538 MachineMemOperand *MMO; 13539 13540 switch (N->getOpcode()) { 13541 default: 13542 llvm_unreachable("Unexpected opcode for little endian VSX store"); 13543 case ISD::STORE: { 13544 StoreSDNode *ST = cast<StoreSDNode>(N); 13545 Chain = ST->getChain(); 13546 Base = ST->getBasePtr(); 13547 MMO = ST->getMemOperand(); 13548 SrcOpnd = 1; 13549 // If the MMO suggests this isn't a store of a full vector, leave 13550 // things alone. For a built-in, we have to make the change for 13551 // correctness, so if there is a size problem that will be a bug. 13552 if (MMO->getSize() < 16) 13553 return SDValue(); 13554 break; 13555 } 13556 case ISD::INTRINSIC_VOID: { 13557 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N); 13558 Chain = Intrin->getChain(); 13559 // Intrin->getBasePtr() oddly does not get what we want. 13560 Base = Intrin->getOperand(3); 13561 MMO = Intrin->getMemOperand(); 13562 SrcOpnd = 2; 13563 break; 13564 } 13565 } 13566 13567 SDValue Src = N->getOperand(SrcOpnd); 13568 MVT VecTy = Src.getValueType().getSimpleVT(); 13569 13570 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is 13571 // aligned and the type is a vector with elements up to 4 bytes 13572 if (Subtarget.needsSwapsForVSXMemOps() && !(MMO->getAlignment()%16) 13573 && VecTy.getScalarSizeInBits() <= 32 ) { 13574 return SDValue(); 13575 } 13576 13577 // All stores are done as v2f64 and possible bit cast. 13578 if (VecTy != MVT::v2f64) { 13579 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src); 13580 DCI.AddToWorklist(Src.getNode()); 13581 } 13582 13583 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl, 13584 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src); 13585 DCI.AddToWorklist(Swap.getNode()); 13586 Chain = Swap.getValue(1); 13587 SDValue StoreOps[] = { Chain, Swap, Base }; 13588 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl, 13589 DAG.getVTList(MVT::Other), 13590 StoreOps, VecTy, MMO); 13591 DCI.AddToWorklist(Store.getNode()); 13592 return Store; 13593 } 13594 13595 // Handle DAG combine for STORE (FP_TO_INT F). 13596 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N, 13597 DAGCombinerInfo &DCI) const { 13598 13599 SelectionDAG &DAG = DCI.DAG; 13600 SDLoc dl(N); 13601 unsigned Opcode = N->getOperand(1).getOpcode(); 13602 13603 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) 13604 && "Not a FP_TO_INT Instruction!"); 13605 13606 SDValue Val = N->getOperand(1).getOperand(0); 13607 EVT Op1VT = N->getOperand(1).getValueType(); 13608 EVT ResVT = Val.getValueType(); 13609 13610 // Floating point types smaller than 32 bits are not legal on Power. 13611 if (ResVT.getScalarSizeInBits() < 32) 13612 return SDValue(); 13613 13614 // Only perform combine for conversion to i64/i32 or power9 i16/i8. 13615 bool ValidTypeForStoreFltAsInt = 13616 (Op1VT == MVT::i32 || Op1VT == MVT::i64 || 13617 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8))); 13618 13619 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() || 13620 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt) 13621 return SDValue(); 13622 13623 // Extend f32 values to f64 13624 if (ResVT.getScalarSizeInBits() == 32) { 13625 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val); 13626 DCI.AddToWorklist(Val.getNode()); 13627 } 13628 13629 // Set signed or unsigned conversion opcode. 13630 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ? 13631 PPCISD::FP_TO_SINT_IN_VSR : 13632 PPCISD::FP_TO_UINT_IN_VSR; 13633 13634 Val = DAG.getNode(ConvOpcode, 13635 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val); 13636 DCI.AddToWorklist(Val.getNode()); 13637 13638 // Set number of bytes being converted. 13639 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8; 13640 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2), 13641 DAG.getIntPtrConstant(ByteSize, dl, false), 13642 DAG.getValueType(Op1VT) }; 13643 13644 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl, 13645 DAG.getVTList(MVT::Other), Ops, 13646 cast<StoreSDNode>(N)->getMemoryVT(), 13647 cast<StoreSDNode>(N)->getMemOperand()); 13648 13649 DCI.AddToWorklist(Val.getNode()); 13650 return Val; 13651 } 13652 13653 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN, 13654 LSBaseSDNode *LSBase, 13655 DAGCombinerInfo &DCI) const { 13656 assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) && 13657 "Not a reverse memop pattern!"); 13658 13659 auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool { 13660 auto Mask = SVN->getMask(); 13661 int i = 0; 13662 auto I = Mask.rbegin(); 13663 auto E = Mask.rend(); 13664 13665 for (; I != E; ++I) { 13666 if (*I != i) 13667 return false; 13668 i++; 13669 } 13670 return true; 13671 }; 13672 13673 SelectionDAG &DAG = DCI.DAG; 13674 EVT VT = SVN->getValueType(0); 13675 13676 if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX()) 13677 return SDValue(); 13678 13679 // Before P9, we have PPCVSXSwapRemoval pass to hack the element order. 13680 // See comment in PPCVSXSwapRemoval.cpp. 13681 // It is conflict with PPCVSXSwapRemoval opt. So we don't do it. 13682 if (!Subtarget.hasP9Vector()) 13683 return SDValue(); 13684 13685 if(!IsElementReverse(SVN)) 13686 return SDValue(); 13687 13688 if (LSBase->getOpcode() == ISD::LOAD) { 13689 SDLoc dl(SVN); 13690 SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()}; 13691 return DAG.getMemIntrinsicNode( 13692 PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps, 13693 LSBase->getMemoryVT(), LSBase->getMemOperand()); 13694 } 13695 13696 if (LSBase->getOpcode() == ISD::STORE) { 13697 SDLoc dl(LSBase); 13698 SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0), 13699 LSBase->getBasePtr()}; 13700 return DAG.getMemIntrinsicNode( 13701 PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps, 13702 LSBase->getMemoryVT(), LSBase->getMemOperand()); 13703 } 13704 13705 llvm_unreachable("Expected a load or store node here"); 13706 } 13707 13708 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, 13709 DAGCombinerInfo &DCI) const { 13710 SelectionDAG &DAG = DCI.DAG; 13711 SDLoc dl(N); 13712 switch (N->getOpcode()) { 13713 default: break; 13714 case ISD::ADD: 13715 return combineADD(N, DCI); 13716 case ISD::SHL: 13717 return combineSHL(N, DCI); 13718 case ISD::SRA: 13719 return combineSRA(N, DCI); 13720 case ISD::SRL: 13721 return combineSRL(N, DCI); 13722 case ISD::MUL: 13723 return combineMUL(N, DCI); 13724 case PPCISD::SHL: 13725 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0. 13726 return N->getOperand(0); 13727 break; 13728 case PPCISD::SRL: 13729 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0. 13730 return N->getOperand(0); 13731 break; 13732 case PPCISD::SRA: 13733 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) { 13734 if (C->isNullValue() || // 0 >>s V -> 0. 13735 C->isAllOnesValue()) // -1 >>s V -> -1. 13736 return N->getOperand(0); 13737 } 13738 break; 13739 case ISD::SIGN_EXTEND: 13740 case ISD::ZERO_EXTEND: 13741 case ISD::ANY_EXTEND: 13742 return DAGCombineExtBoolTrunc(N, DCI); 13743 case ISD::TRUNCATE: 13744 return combineTRUNCATE(N, DCI); 13745 case ISD::SETCC: 13746 if (SDValue CSCC = combineSetCC(N, DCI)) 13747 return CSCC; 13748 LLVM_FALLTHROUGH; 13749 case ISD::SELECT_CC: 13750 return DAGCombineTruncBoolExt(N, DCI); 13751 case ISD::SINT_TO_FP: 13752 case ISD::UINT_TO_FP: 13753 return combineFPToIntToFP(N, DCI); 13754 case ISD::VECTOR_SHUFFLE: 13755 if (ISD::isNormalLoad(N->getOperand(0).getNode())) { 13756 LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0)); 13757 return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI); 13758 } 13759 break; 13760 case ISD::STORE: { 13761 13762 EVT Op1VT = N->getOperand(1).getValueType(); 13763 unsigned Opcode = N->getOperand(1).getOpcode(); 13764 13765 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) { 13766 SDValue Val= combineStoreFPToInt(N, DCI); 13767 if (Val) 13768 return Val; 13769 } 13770 13771 if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) { 13772 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1)); 13773 SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI); 13774 if (Val) 13775 return Val; 13776 } 13777 13778 // Turn STORE (BSWAP) -> sthbrx/stwbrx. 13779 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP && 13780 N->getOperand(1).getNode()->hasOneUse() && 13781 (Op1VT == MVT::i32 || Op1VT == MVT::i16 || 13782 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) { 13783 13784 // STBRX can only handle simple types and it makes no sense to store less 13785 // two bytes in byte-reversed order. 13786 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT(); 13787 if (mVT.isExtended() || mVT.getSizeInBits() < 16) 13788 break; 13789 13790 SDValue BSwapOp = N->getOperand(1).getOperand(0); 13791 // Do an any-extend to 32-bits if this is a half-word input. 13792 if (BSwapOp.getValueType() == MVT::i16) 13793 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp); 13794 13795 // If the type of BSWAP operand is wider than stored memory width 13796 // it need to be shifted to the right side before STBRX. 13797 if (Op1VT.bitsGT(mVT)) { 13798 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits(); 13799 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp, 13800 DAG.getConstant(Shift, dl, MVT::i32)); 13801 // Need to truncate if this is a bswap of i64 stored as i32/i16. 13802 if (Op1VT == MVT::i64) 13803 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp); 13804 } 13805 13806 SDValue Ops[] = { 13807 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT) 13808 }; 13809 return 13810 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other), 13811 Ops, cast<StoreSDNode>(N)->getMemoryVT(), 13812 cast<StoreSDNode>(N)->getMemOperand()); 13813 } 13814 13815 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0> 13816 // So it can increase the chance of CSE constant construction. 13817 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() && 13818 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) { 13819 // Need to sign-extended to 64-bits to handle negative values. 13820 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT(); 13821 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1), 13822 MemVT.getSizeInBits()); 13823 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64); 13824 13825 // DAG.getTruncStore() can't be used here because it doesn't accept 13826 // the general (base + offset) addressing mode. 13827 // So we use UpdateNodeOperands and setTruncatingStore instead. 13828 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2), 13829 N->getOperand(3)); 13830 cast<StoreSDNode>(N)->setTruncatingStore(true); 13831 return SDValue(N, 0); 13832 } 13833 13834 // For little endian, VSX stores require generating xxswapd/lxvd2x. 13835 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 13836 if (Op1VT.isSimple()) { 13837 MVT StoreVT = Op1VT.getSimpleVT(); 13838 if (Subtarget.needsSwapsForVSXMemOps() && 13839 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 || 13840 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32)) 13841 return expandVSXStoreForLE(N, DCI); 13842 } 13843 break; 13844 } 13845 case ISD::LOAD: { 13846 LoadSDNode *LD = cast<LoadSDNode>(N); 13847 EVT VT = LD->getValueType(0); 13848 13849 // For little endian, VSX loads require generating lxvd2x/xxswapd. 13850 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 13851 if (VT.isSimple()) { 13852 MVT LoadVT = VT.getSimpleVT(); 13853 if (Subtarget.needsSwapsForVSXMemOps() && 13854 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 || 13855 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32)) 13856 return expandVSXLoadForLE(N, DCI); 13857 } 13858 13859 // We sometimes end up with a 64-bit integer load, from which we extract 13860 // two single-precision floating-point numbers. This happens with 13861 // std::complex<float>, and other similar structures, because of the way we 13862 // canonicalize structure copies. However, if we lack direct moves, 13863 // then the final bitcasts from the extracted integer values to the 13864 // floating-point numbers turn into store/load pairs. Even with direct moves, 13865 // just loading the two floating-point numbers is likely better. 13866 auto ReplaceTwoFloatLoad = [&]() { 13867 if (VT != MVT::i64) 13868 return false; 13869 13870 if (LD->getExtensionType() != ISD::NON_EXTLOAD || 13871 LD->isVolatile()) 13872 return false; 13873 13874 // We're looking for a sequence like this: 13875 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64 13876 // t16: i64 = srl t13, Constant:i32<32> 13877 // t17: i32 = truncate t16 13878 // t18: f32 = bitcast t17 13879 // t19: i32 = truncate t13 13880 // t20: f32 = bitcast t19 13881 13882 if (!LD->hasNUsesOfValue(2, 0)) 13883 return false; 13884 13885 auto UI = LD->use_begin(); 13886 while (UI.getUse().getResNo() != 0) ++UI; 13887 SDNode *Trunc = *UI++; 13888 while (UI.getUse().getResNo() != 0) ++UI; 13889 SDNode *RightShift = *UI; 13890 if (Trunc->getOpcode() != ISD::TRUNCATE) 13891 std::swap(Trunc, RightShift); 13892 13893 if (Trunc->getOpcode() != ISD::TRUNCATE || 13894 Trunc->getValueType(0) != MVT::i32 || 13895 !Trunc->hasOneUse()) 13896 return false; 13897 if (RightShift->getOpcode() != ISD::SRL || 13898 !isa<ConstantSDNode>(RightShift->getOperand(1)) || 13899 RightShift->getConstantOperandVal(1) != 32 || 13900 !RightShift->hasOneUse()) 13901 return false; 13902 13903 SDNode *Trunc2 = *RightShift->use_begin(); 13904 if (Trunc2->getOpcode() != ISD::TRUNCATE || 13905 Trunc2->getValueType(0) != MVT::i32 || 13906 !Trunc2->hasOneUse()) 13907 return false; 13908 13909 SDNode *Bitcast = *Trunc->use_begin(); 13910 SDNode *Bitcast2 = *Trunc2->use_begin(); 13911 13912 if (Bitcast->getOpcode() != ISD::BITCAST || 13913 Bitcast->getValueType(0) != MVT::f32) 13914 return false; 13915 if (Bitcast2->getOpcode() != ISD::BITCAST || 13916 Bitcast2->getValueType(0) != MVT::f32) 13917 return false; 13918 13919 if (Subtarget.isLittleEndian()) 13920 std::swap(Bitcast, Bitcast2); 13921 13922 // Bitcast has the second float (in memory-layout order) and Bitcast2 13923 // has the first one. 13924 13925 SDValue BasePtr = LD->getBasePtr(); 13926 if (LD->isIndexed()) { 13927 assert(LD->getAddressingMode() == ISD::PRE_INC && 13928 "Non-pre-inc AM on PPC?"); 13929 BasePtr = 13930 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, 13931 LD->getOffset()); 13932 } 13933 13934 auto MMOFlags = 13935 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile; 13936 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr, 13937 LD->getPointerInfo(), LD->getAlignment(), 13938 MMOFlags, LD->getAAInfo()); 13939 SDValue AddPtr = 13940 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), 13941 BasePtr, DAG.getIntPtrConstant(4, dl)); 13942 SDValue FloatLoad2 = DAG.getLoad( 13943 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr, 13944 LD->getPointerInfo().getWithOffset(4), 13945 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo()); 13946 13947 if (LD->isIndexed()) { 13948 // Note that DAGCombine should re-form any pre-increment load(s) from 13949 // what is produced here if that makes sense. 13950 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr); 13951 } 13952 13953 DCI.CombineTo(Bitcast2, FloatLoad); 13954 DCI.CombineTo(Bitcast, FloatLoad2); 13955 13956 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1), 13957 SDValue(FloatLoad2.getNode(), 1)); 13958 return true; 13959 }; 13960 13961 if (ReplaceTwoFloatLoad()) 13962 return SDValue(N, 0); 13963 13964 EVT MemVT = LD->getMemoryVT(); 13965 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext()); 13966 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty); 13967 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext()); 13968 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy); 13969 if (LD->isUnindexed() && VT.isVector() && 13970 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) && 13971 // P8 and later hardware should just use LOAD. 13972 !Subtarget.hasP8Vector() && (VT == MVT::v16i8 || VT == MVT::v8i16 || 13973 VT == MVT::v4i32 || VT == MVT::v4f32)) || 13974 (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) && 13975 LD->getAlignment() >= ScalarABIAlignment)) && 13976 LD->getAlignment() < ABIAlignment) { 13977 // This is a type-legal unaligned Altivec or QPX load. 13978 SDValue Chain = LD->getChain(); 13979 SDValue Ptr = LD->getBasePtr(); 13980 bool isLittleEndian = Subtarget.isLittleEndian(); 13981 13982 // This implements the loading of unaligned vectors as described in 13983 // the venerable Apple Velocity Engine overview. Specifically: 13984 // https://developer.apple.com/hardwaredrivers/ve/alignment.html 13985 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html 13986 // 13987 // The general idea is to expand a sequence of one or more unaligned 13988 // loads into an alignment-based permutation-control instruction (lvsl 13989 // or lvsr), a series of regular vector loads (which always truncate 13990 // their input address to an aligned address), and a series of 13991 // permutations. The results of these permutations are the requested 13992 // loaded values. The trick is that the last "extra" load is not taken 13993 // from the address you might suspect (sizeof(vector) bytes after the 13994 // last requested load), but rather sizeof(vector) - 1 bytes after the 13995 // last requested vector. The point of this is to avoid a page fault if 13996 // the base address happened to be aligned. This works because if the 13997 // base address is aligned, then adding less than a full vector length 13998 // will cause the last vector in the sequence to be (re)loaded. 13999 // Otherwise, the next vector will be fetched as you might suspect was 14000 // necessary. 14001 14002 // We might be able to reuse the permutation generation from 14003 // a different base address offset from this one by an aligned amount. 14004 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this 14005 // optimization later. 14006 Intrinsic::ID Intr, IntrLD, IntrPerm; 14007 MVT PermCntlTy, PermTy, LDTy; 14008 if (Subtarget.hasAltivec()) { 14009 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr : 14010 Intrinsic::ppc_altivec_lvsl; 14011 IntrLD = Intrinsic::ppc_altivec_lvx; 14012 IntrPerm = Intrinsic::ppc_altivec_vperm; 14013 PermCntlTy = MVT::v16i8; 14014 PermTy = MVT::v4i32; 14015 LDTy = MVT::v4i32; 14016 } else { 14017 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld : 14018 Intrinsic::ppc_qpx_qvlpcls; 14019 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd : 14020 Intrinsic::ppc_qpx_qvlfs; 14021 IntrPerm = Intrinsic::ppc_qpx_qvfperm; 14022 PermCntlTy = MVT::v4f64; 14023 PermTy = MVT::v4f64; 14024 LDTy = MemVT.getSimpleVT(); 14025 } 14026 14027 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy); 14028 14029 // Create the new MMO for the new base load. It is like the original MMO, 14030 // but represents an area in memory almost twice the vector size centered 14031 // on the original address. If the address is unaligned, we might start 14032 // reading up to (sizeof(vector)-1) bytes below the address of the 14033 // original unaligned load. 14034 MachineFunction &MF = DAG.getMachineFunction(); 14035 MachineMemOperand *BaseMMO = 14036 MF.getMachineMemOperand(LD->getMemOperand(), 14037 -(long)MemVT.getStoreSize()+1, 14038 2*MemVT.getStoreSize()-1); 14039 14040 // Create the new base load. 14041 SDValue LDXIntID = 14042 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout())); 14043 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr }; 14044 SDValue BaseLoad = 14045 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 14046 DAG.getVTList(PermTy, MVT::Other), 14047 BaseLoadOps, LDTy, BaseMMO); 14048 14049 // Note that the value of IncOffset (which is provided to the next 14050 // load's pointer info offset value, and thus used to calculate the 14051 // alignment), and the value of IncValue (which is actually used to 14052 // increment the pointer value) are different! This is because we 14053 // require the next load to appear to be aligned, even though it 14054 // is actually offset from the base pointer by a lesser amount. 14055 int IncOffset = VT.getSizeInBits() / 8; 14056 int IncValue = IncOffset; 14057 14058 // Walk (both up and down) the chain looking for another load at the real 14059 // (aligned) offset (the alignment of the other load does not matter in 14060 // this case). If found, then do not use the offset reduction trick, as 14061 // that will prevent the loads from being later combined (as they would 14062 // otherwise be duplicates). 14063 if (!findConsecutiveLoad(LD, DAG)) 14064 --IncValue; 14065 14066 SDValue Increment = 14067 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout())); 14068 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); 14069 14070 MachineMemOperand *ExtraMMO = 14071 MF.getMachineMemOperand(LD->getMemOperand(), 14072 1, 2*MemVT.getStoreSize()-1); 14073 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr }; 14074 SDValue ExtraLoad = 14075 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl, 14076 DAG.getVTList(PermTy, MVT::Other), 14077 ExtraLoadOps, LDTy, ExtraMMO); 14078 14079 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, 14080 BaseLoad.getValue(1), ExtraLoad.getValue(1)); 14081 14082 // Because vperm has a big-endian bias, we must reverse the order 14083 // of the input vectors and complement the permute control vector 14084 // when generating little endian code. We have already handled the 14085 // latter by using lvsr instead of lvsl, so just reverse BaseLoad 14086 // and ExtraLoad here. 14087 SDValue Perm; 14088 if (isLittleEndian) 14089 Perm = BuildIntrinsicOp(IntrPerm, 14090 ExtraLoad, BaseLoad, PermCntl, DAG, dl); 14091 else 14092 Perm = BuildIntrinsicOp(IntrPerm, 14093 BaseLoad, ExtraLoad, PermCntl, DAG, dl); 14094 14095 if (VT != PermTy) 14096 Perm = Subtarget.hasAltivec() ? 14097 DAG.getNode(ISD::BITCAST, dl, VT, Perm) : 14098 DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX 14099 DAG.getTargetConstant(1, dl, MVT::i64)); 14100 // second argument is 1 because this rounding 14101 // is always exact. 14102 14103 // The output of the permutation is our loaded result, the TokenFactor is 14104 // our new chain. 14105 DCI.CombineTo(N, Perm, TF); 14106 return SDValue(N, 0); 14107 } 14108 } 14109 break; 14110 case ISD::INTRINSIC_WO_CHAIN: { 14111 bool isLittleEndian = Subtarget.isLittleEndian(); 14112 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 14113 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr 14114 : Intrinsic::ppc_altivec_lvsl); 14115 if ((IID == Intr || 14116 IID == Intrinsic::ppc_qpx_qvlpcld || 14117 IID == Intrinsic::ppc_qpx_qvlpcls) && 14118 N->getOperand(1)->getOpcode() == ISD::ADD) { 14119 SDValue Add = N->getOperand(1); 14120 14121 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ? 14122 5 /* 32 byte alignment */ : 4 /* 16 byte alignment */; 14123 14124 if (DAG.MaskedValueIsZero(Add->getOperand(1), 14125 APInt::getAllOnesValue(Bits /* alignment */) 14126 .zext(Add.getScalarValueSizeInBits()))) { 14127 SDNode *BasePtr = Add->getOperand(0).getNode(); 14128 for (SDNode::use_iterator UI = BasePtr->use_begin(), 14129 UE = BasePtr->use_end(); 14130 UI != UE; ++UI) { 14131 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14132 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) { 14133 // We've found another LVSL/LVSR, and this address is an aligned 14134 // multiple of that one. The results will be the same, so use the 14135 // one we've just found instead. 14136 14137 return SDValue(*UI, 0); 14138 } 14139 } 14140 } 14141 14142 if (isa<ConstantSDNode>(Add->getOperand(1))) { 14143 SDNode *BasePtr = Add->getOperand(0).getNode(); 14144 for (SDNode::use_iterator UI = BasePtr->use_begin(), 14145 UE = BasePtr->use_end(); UI != UE; ++UI) { 14146 if (UI->getOpcode() == ISD::ADD && 14147 isa<ConstantSDNode>(UI->getOperand(1)) && 14148 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() - 14149 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) % 14150 (1ULL << Bits) == 0) { 14151 SDNode *OtherAdd = *UI; 14152 for (SDNode::use_iterator VI = OtherAdd->use_begin(), 14153 VE = OtherAdd->use_end(); VI != VE; ++VI) { 14154 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14155 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) { 14156 return SDValue(*VI, 0); 14157 } 14158 } 14159 } 14160 } 14161 } 14162 } 14163 14164 // Combine vmaxsw/h/b(a, a's negation) to abs(a) 14165 // Expose the vabsduw/h/b opportunity for down stream 14166 if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() && 14167 (IID == Intrinsic::ppc_altivec_vmaxsw || 14168 IID == Intrinsic::ppc_altivec_vmaxsh || 14169 IID == Intrinsic::ppc_altivec_vmaxsb)) { 14170 SDValue V1 = N->getOperand(1); 14171 SDValue V2 = N->getOperand(2); 14172 if ((V1.getSimpleValueType() == MVT::v4i32 || 14173 V1.getSimpleValueType() == MVT::v8i16 || 14174 V1.getSimpleValueType() == MVT::v16i8) && 14175 V1.getSimpleValueType() == V2.getSimpleValueType()) { 14176 // (0-a, a) 14177 if (V1.getOpcode() == ISD::SUB && 14178 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) && 14179 V1.getOperand(1) == V2) { 14180 return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2); 14181 } 14182 // (a, 0-a) 14183 if (V2.getOpcode() == ISD::SUB && 14184 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) && 14185 V2.getOperand(1) == V1) { 14186 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 14187 } 14188 // (x-y, y-x) 14189 if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB && 14190 V1.getOperand(0) == V2.getOperand(1) && 14191 V1.getOperand(1) == V2.getOperand(0)) { 14192 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1); 14193 } 14194 } 14195 } 14196 } 14197 14198 break; 14199 case ISD::INTRINSIC_W_CHAIN: 14200 // For little endian, VSX loads require generating lxvd2x/xxswapd. 14201 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load. 14202 if (Subtarget.needsSwapsForVSXMemOps()) { 14203 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 14204 default: 14205 break; 14206 case Intrinsic::ppc_vsx_lxvw4x: 14207 case Intrinsic::ppc_vsx_lxvd2x: 14208 return expandVSXLoadForLE(N, DCI); 14209 } 14210 } 14211 break; 14212 case ISD::INTRINSIC_VOID: 14213 // For little endian, VSX stores require generating xxswapd/stxvd2x. 14214 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store. 14215 if (Subtarget.needsSwapsForVSXMemOps()) { 14216 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 14217 default: 14218 break; 14219 case Intrinsic::ppc_vsx_stxvw4x: 14220 case Intrinsic::ppc_vsx_stxvd2x: 14221 return expandVSXStoreForLE(N, DCI); 14222 } 14223 } 14224 break; 14225 case ISD::BSWAP: 14226 // Turn BSWAP (LOAD) -> lhbrx/lwbrx. 14227 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) && 14228 N->getOperand(0).hasOneUse() && 14229 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 || 14230 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && 14231 N->getValueType(0) == MVT::i64))) { 14232 SDValue Load = N->getOperand(0); 14233 LoadSDNode *LD = cast<LoadSDNode>(Load); 14234 // Create the byte-swapping load. 14235 SDValue Ops[] = { 14236 LD->getChain(), // Chain 14237 LD->getBasePtr(), // Ptr 14238 DAG.getValueType(N->getValueType(0)) // VT 14239 }; 14240 SDValue BSLoad = 14241 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl, 14242 DAG.getVTList(N->getValueType(0) == MVT::i64 ? 14243 MVT::i64 : MVT::i32, MVT::Other), 14244 Ops, LD->getMemoryVT(), LD->getMemOperand()); 14245 14246 // If this is an i16 load, insert the truncate. 14247 SDValue ResVal = BSLoad; 14248 if (N->getValueType(0) == MVT::i16) 14249 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad); 14250 14251 // First, combine the bswap away. This makes the value produced by the 14252 // load dead. 14253 DCI.CombineTo(N, ResVal); 14254 14255 // Next, combine the load away, we give it a bogus result value but a real 14256 // chain result. The result value is dead because the bswap is dead. 14257 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1)); 14258 14259 // Return N so it doesn't get rechecked! 14260 return SDValue(N, 0); 14261 } 14262 break; 14263 case PPCISD::VCMP: 14264 // If a VCMPo node already exists with exactly the same operands as this 14265 // node, use its result instead of this node (VCMPo computes both a CR6 and 14266 // a normal output). 14267 // 14268 if (!N->getOperand(0).hasOneUse() && 14269 !N->getOperand(1).hasOneUse() && 14270 !N->getOperand(2).hasOneUse()) { 14271 14272 // Scan all of the users of the LHS, looking for VCMPo's that match. 14273 SDNode *VCMPoNode = nullptr; 14274 14275 SDNode *LHSN = N->getOperand(0).getNode(); 14276 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end(); 14277 UI != E; ++UI) 14278 if (UI->getOpcode() == PPCISD::VCMPo && 14279 UI->getOperand(1) == N->getOperand(1) && 14280 UI->getOperand(2) == N->getOperand(2) && 14281 UI->getOperand(0) == N->getOperand(0)) { 14282 VCMPoNode = *UI; 14283 break; 14284 } 14285 14286 // If there is no VCMPo node, or if the flag value has a single use, don't 14287 // transform this. 14288 if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1)) 14289 break; 14290 14291 // Look at the (necessarily single) use of the flag value. If it has a 14292 // chain, this transformation is more complex. Note that multiple things 14293 // could use the value result, which we should ignore. 14294 SDNode *FlagUser = nullptr; 14295 for (SDNode::use_iterator UI = VCMPoNode->use_begin(); 14296 FlagUser == nullptr; ++UI) { 14297 assert(UI != VCMPoNode->use_end() && "Didn't find user!"); 14298 SDNode *User = *UI; 14299 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) { 14300 if (User->getOperand(i) == SDValue(VCMPoNode, 1)) { 14301 FlagUser = User; 14302 break; 14303 } 14304 } 14305 } 14306 14307 // If the user is a MFOCRF instruction, we know this is safe. 14308 // Otherwise we give up for right now. 14309 if (FlagUser->getOpcode() == PPCISD::MFOCRF) 14310 return SDValue(VCMPoNode, 0); 14311 } 14312 break; 14313 case ISD::BRCOND: { 14314 SDValue Cond = N->getOperand(1); 14315 SDValue Target = N->getOperand(2); 14316 14317 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN && 14318 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() == 14319 Intrinsic::loop_decrement) { 14320 14321 // We now need to make the intrinsic dead (it cannot be instruction 14322 // selected). 14323 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0)); 14324 assert(Cond.getNode()->hasOneUse() && 14325 "Counter decrement has more than one use"); 14326 14327 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other, 14328 N->getOperand(0), Target); 14329 } 14330 } 14331 break; 14332 case ISD::BR_CC: { 14333 // If this is a branch on an altivec predicate comparison, lower this so 14334 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This 14335 // lowering is done pre-legalize, because the legalizer lowers the predicate 14336 // compare down to code that is difficult to reassemble. 14337 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 14338 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3); 14339 14340 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero 14341 // value. If so, pass-through the AND to get to the intrinsic. 14342 if (LHS.getOpcode() == ISD::AND && 14343 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN && 14344 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() == 14345 Intrinsic::loop_decrement && 14346 isa<ConstantSDNode>(LHS.getOperand(1)) && 14347 !isNullConstant(LHS.getOperand(1))) 14348 LHS = LHS.getOperand(0); 14349 14350 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN && 14351 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() == 14352 Intrinsic::loop_decrement && 14353 isa<ConstantSDNode>(RHS)) { 14354 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && 14355 "Counter decrement comparison is not EQ or NE"); 14356 14357 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 14358 bool isBDNZ = (CC == ISD::SETEQ && Val) || 14359 (CC == ISD::SETNE && !Val); 14360 14361 // We now need to make the intrinsic dead (it cannot be instruction 14362 // selected). 14363 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0)); 14364 assert(LHS.getNode()->hasOneUse() && 14365 "Counter decrement has more than one use"); 14366 14367 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other, 14368 N->getOperand(0), N->getOperand(4)); 14369 } 14370 14371 int CompareOpc; 14372 bool isDot; 14373 14374 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN && 14375 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) && 14376 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) { 14377 assert(isDot && "Can't compare against a vector result!"); 14378 14379 // If this is a comparison against something other than 0/1, then we know 14380 // that the condition is never/always true. 14381 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue(); 14382 if (Val != 0 && Val != 1) { 14383 if (CC == ISD::SETEQ) // Cond never true, remove branch. 14384 return N->getOperand(0); 14385 // Always !=, turn it into an unconditional branch. 14386 return DAG.getNode(ISD::BR, dl, MVT::Other, 14387 N->getOperand(0), N->getOperand(4)); 14388 } 14389 14390 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0); 14391 14392 // Create the PPCISD altivec 'dot' comparison node. 14393 SDValue Ops[] = { 14394 LHS.getOperand(2), // LHS of compare 14395 LHS.getOperand(3), // RHS of compare 14396 DAG.getConstant(CompareOpc, dl, MVT::i32) 14397 }; 14398 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue }; 14399 SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops); 14400 14401 // Unpack the result based on how the target uses it. 14402 PPC::Predicate CompOpc; 14403 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) { 14404 default: // Can't happen, don't crash on invalid number though. 14405 case 0: // Branch on the value of the EQ bit of CR6. 14406 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE; 14407 break; 14408 case 1: // Branch on the inverted value of the EQ bit of CR6. 14409 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ; 14410 break; 14411 case 2: // Branch on the value of the LT bit of CR6. 14412 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE; 14413 break; 14414 case 3: // Branch on the inverted value of the LT bit of CR6. 14415 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT; 14416 break; 14417 } 14418 14419 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0), 14420 DAG.getConstant(CompOpc, dl, MVT::i32), 14421 DAG.getRegister(PPC::CR6, MVT::i32), 14422 N->getOperand(4), CompNode.getValue(1)); 14423 } 14424 break; 14425 } 14426 case ISD::BUILD_VECTOR: 14427 return DAGCombineBuildVector(N, DCI); 14428 case ISD::ABS: 14429 return combineABS(N, DCI); 14430 case ISD::VSELECT: 14431 return combineVSelect(N, DCI); 14432 } 14433 14434 return SDValue(); 14435 } 14436 14437 SDValue 14438 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 14439 SelectionDAG &DAG, 14440 SmallVectorImpl<SDNode *> &Created) const { 14441 // fold (sdiv X, pow2) 14442 EVT VT = N->getValueType(0); 14443 if (VT == MVT::i64 && !Subtarget.isPPC64()) 14444 return SDValue(); 14445 if ((VT != MVT::i32 && VT != MVT::i64) || 14446 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 14447 return SDValue(); 14448 14449 SDLoc DL(N); 14450 SDValue N0 = N->getOperand(0); 14451 14452 bool IsNegPow2 = (-Divisor).isPowerOf2(); 14453 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros(); 14454 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT); 14455 14456 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt); 14457 Created.push_back(Op.getNode()); 14458 14459 if (IsNegPow2) { 14460 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op); 14461 Created.push_back(Op.getNode()); 14462 } 14463 14464 return Op; 14465 } 14466 14467 //===----------------------------------------------------------------------===// 14468 // Inline Assembly Support 14469 //===----------------------------------------------------------------------===// 14470 14471 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 14472 KnownBits &Known, 14473 const APInt &DemandedElts, 14474 const SelectionDAG &DAG, 14475 unsigned Depth) const { 14476 Known.resetAll(); 14477 switch (Op.getOpcode()) { 14478 default: break; 14479 case PPCISD::LBRX: { 14480 // lhbrx is known to have the top bits cleared out. 14481 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16) 14482 Known.Zero = 0xFFFF0000; 14483 break; 14484 } 14485 case ISD::INTRINSIC_WO_CHAIN: { 14486 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 14487 default: break; 14488 case Intrinsic::ppc_altivec_vcmpbfp_p: 14489 case Intrinsic::ppc_altivec_vcmpeqfp_p: 14490 case Intrinsic::ppc_altivec_vcmpequb_p: 14491 case Intrinsic::ppc_altivec_vcmpequh_p: 14492 case Intrinsic::ppc_altivec_vcmpequw_p: 14493 case Intrinsic::ppc_altivec_vcmpequd_p: 14494 case Intrinsic::ppc_altivec_vcmpgefp_p: 14495 case Intrinsic::ppc_altivec_vcmpgtfp_p: 14496 case Intrinsic::ppc_altivec_vcmpgtsb_p: 14497 case Intrinsic::ppc_altivec_vcmpgtsh_p: 14498 case Intrinsic::ppc_altivec_vcmpgtsw_p: 14499 case Intrinsic::ppc_altivec_vcmpgtsd_p: 14500 case Intrinsic::ppc_altivec_vcmpgtub_p: 14501 case Intrinsic::ppc_altivec_vcmpgtuh_p: 14502 case Intrinsic::ppc_altivec_vcmpgtuw_p: 14503 case Intrinsic::ppc_altivec_vcmpgtud_p: 14504 Known.Zero = ~1U; // All bits but the low one are known to be zero. 14505 break; 14506 } 14507 } 14508 } 14509 } 14510 14511 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { 14512 switch (Subtarget.getCPUDirective()) { 14513 default: break; 14514 case PPC::DIR_970: 14515 case PPC::DIR_PWR4: 14516 case PPC::DIR_PWR5: 14517 case PPC::DIR_PWR5X: 14518 case PPC::DIR_PWR6: 14519 case PPC::DIR_PWR6X: 14520 case PPC::DIR_PWR7: 14521 case PPC::DIR_PWR8: 14522 case PPC::DIR_PWR9: 14523 case PPC::DIR_PWR_FUTURE: { 14524 if (!ML) 14525 break; 14526 14527 if (!DisableInnermostLoopAlign32) { 14528 // If the nested loop is an innermost loop, prefer to a 32-byte alignment, 14529 // so that we can decrease cache misses and branch-prediction misses. 14530 // Actual alignment of the loop will depend on the hotness check and other 14531 // logic in alignBlocks. 14532 if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty()) 14533 return Align(32); 14534 } 14535 14536 const PPCInstrInfo *TII = Subtarget.getInstrInfo(); 14537 14538 // For small loops (between 5 and 8 instructions), align to a 32-byte 14539 // boundary so that the entire loop fits in one instruction-cache line. 14540 uint64_t LoopSize = 0; 14541 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I) 14542 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) { 14543 LoopSize += TII->getInstSizeInBytes(*J); 14544 if (LoopSize > 32) 14545 break; 14546 } 14547 14548 if (LoopSize > 16 && LoopSize <= 32) 14549 return Align(32); 14550 14551 break; 14552 } 14553 } 14554 14555 return TargetLowering::getPrefLoopAlignment(ML); 14556 } 14557 14558 /// getConstraintType - Given a constraint, return the type of 14559 /// constraint it is for this target. 14560 PPCTargetLowering::ConstraintType 14561 PPCTargetLowering::getConstraintType(StringRef Constraint) const { 14562 if (Constraint.size() == 1) { 14563 switch (Constraint[0]) { 14564 default: break; 14565 case 'b': 14566 case 'r': 14567 case 'f': 14568 case 'd': 14569 case 'v': 14570 case 'y': 14571 return C_RegisterClass; 14572 case 'Z': 14573 // FIXME: While Z does indicate a memory constraint, it specifically 14574 // indicates an r+r address (used in conjunction with the 'y' modifier 14575 // in the replacement string). Currently, we're forcing the base 14576 // register to be r0 in the asm printer (which is interpreted as zero) 14577 // and forming the complete address in the second register. This is 14578 // suboptimal. 14579 return C_Memory; 14580 } 14581 } else if (Constraint == "wc") { // individual CR bits. 14582 return C_RegisterClass; 14583 } else if (Constraint == "wa" || Constraint == "wd" || 14584 Constraint == "wf" || Constraint == "ws" || 14585 Constraint == "wi" || Constraint == "ww") { 14586 return C_RegisterClass; // VSX registers. 14587 } 14588 return TargetLowering::getConstraintType(Constraint); 14589 } 14590 14591 /// Examine constraint type and operand type and determine a weight value. 14592 /// This object must already have been set up with the operand type 14593 /// and the current alternative constraint selected. 14594 TargetLowering::ConstraintWeight 14595 PPCTargetLowering::getSingleConstraintMatchWeight( 14596 AsmOperandInfo &info, const char *constraint) const { 14597 ConstraintWeight weight = CW_Invalid; 14598 Value *CallOperandVal = info.CallOperandVal; 14599 // If we don't have a value, we can't do a match, 14600 // but allow it at the lowest weight. 14601 if (!CallOperandVal) 14602 return CW_Default; 14603 Type *type = CallOperandVal->getType(); 14604 14605 // Look at the constraint type. 14606 if (StringRef(constraint) == "wc" && type->isIntegerTy(1)) 14607 return CW_Register; // an individual CR bit. 14608 else if ((StringRef(constraint) == "wa" || 14609 StringRef(constraint) == "wd" || 14610 StringRef(constraint) == "wf") && 14611 type->isVectorTy()) 14612 return CW_Register; 14613 else if (StringRef(constraint) == "wi" && type->isIntegerTy(64)) 14614 return CW_Register; // just hold 64-bit integers data. 14615 else if (StringRef(constraint) == "ws" && type->isDoubleTy()) 14616 return CW_Register; 14617 else if (StringRef(constraint) == "ww" && type->isFloatTy()) 14618 return CW_Register; 14619 14620 switch (*constraint) { 14621 default: 14622 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 14623 break; 14624 case 'b': 14625 if (type->isIntegerTy()) 14626 weight = CW_Register; 14627 break; 14628 case 'f': 14629 if (type->isFloatTy()) 14630 weight = CW_Register; 14631 break; 14632 case 'd': 14633 if (type->isDoubleTy()) 14634 weight = CW_Register; 14635 break; 14636 case 'v': 14637 if (type->isVectorTy()) 14638 weight = CW_Register; 14639 break; 14640 case 'y': 14641 weight = CW_Register; 14642 break; 14643 case 'Z': 14644 weight = CW_Memory; 14645 break; 14646 } 14647 return weight; 14648 } 14649 14650 std::pair<unsigned, const TargetRegisterClass *> 14651 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 14652 StringRef Constraint, 14653 MVT VT) const { 14654 if (Constraint.size() == 1) { 14655 // GCC RS6000 Constraint Letters 14656 switch (Constraint[0]) { 14657 case 'b': // R1-R31 14658 if (VT == MVT::i64 && Subtarget.isPPC64()) 14659 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass); 14660 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass); 14661 case 'r': // R0-R31 14662 if (VT == MVT::i64 && Subtarget.isPPC64()) 14663 return std::make_pair(0U, &PPC::G8RCRegClass); 14664 return std::make_pair(0U, &PPC::GPRCRegClass); 14665 // 'd' and 'f' constraints are both defined to be "the floating point 14666 // registers", where one is for 32-bit and the other for 64-bit. We don't 14667 // really care overly much here so just give them all the same reg classes. 14668 case 'd': 14669 case 'f': 14670 if (Subtarget.hasSPE()) { 14671 if (VT == MVT::f32 || VT == MVT::i32) 14672 return std::make_pair(0U, &PPC::GPRCRegClass); 14673 if (VT == MVT::f64 || VT == MVT::i64) 14674 return std::make_pair(0U, &PPC::SPERCRegClass); 14675 } else { 14676 if (VT == MVT::f32 || VT == MVT::i32) 14677 return std::make_pair(0U, &PPC::F4RCRegClass); 14678 if (VT == MVT::f64 || VT == MVT::i64) 14679 return std::make_pair(0U, &PPC::F8RCRegClass); 14680 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 14681 return std::make_pair(0U, &PPC::QFRCRegClass); 14682 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 14683 return std::make_pair(0U, &PPC::QSRCRegClass); 14684 } 14685 break; 14686 case 'v': 14687 if (VT == MVT::v4f64 && Subtarget.hasQPX()) 14688 return std::make_pair(0U, &PPC::QFRCRegClass); 14689 if (VT == MVT::v4f32 && Subtarget.hasQPX()) 14690 return std::make_pair(0U, &PPC::QSRCRegClass); 14691 if (Subtarget.hasAltivec()) 14692 return std::make_pair(0U, &PPC::VRRCRegClass); 14693 break; 14694 case 'y': // crrc 14695 return std::make_pair(0U, &PPC::CRRCRegClass); 14696 } 14697 } else if (Constraint == "wc" && Subtarget.useCRBits()) { 14698 // An individual CR bit. 14699 return std::make_pair(0U, &PPC::CRBITRCRegClass); 14700 } else if ((Constraint == "wa" || Constraint == "wd" || 14701 Constraint == "wf" || Constraint == "wi") && 14702 Subtarget.hasVSX()) { 14703 return std::make_pair(0U, &PPC::VSRCRegClass); 14704 } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) { 14705 if (VT == MVT::f32 && Subtarget.hasP8Vector()) 14706 return std::make_pair(0U, &PPC::VSSRCRegClass); 14707 else 14708 return std::make_pair(0U, &PPC::VSFRCRegClass); 14709 } 14710 14711 // If we name a VSX register, we can't defer to the base class because it 14712 // will not recognize the correct register (their names will be VSL{0-31} 14713 // and V{0-31} so they won't match). So we match them here. 14714 if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') { 14715 int VSNum = atoi(Constraint.data() + 3); 14716 assert(VSNum >= 0 && VSNum <= 63 && 14717 "Attempted to access a vsr out of range"); 14718 if (VSNum < 32) 14719 return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass); 14720 return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass); 14721 } 14722 std::pair<unsigned, const TargetRegisterClass *> R = 14723 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 14724 14725 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers 14726 // (which we call X[0-9]+). If a 64-bit value has been requested, and a 14727 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent 14728 // register. 14729 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use 14730 // the AsmName field from *RegisterInfo.td, then this would not be necessary. 14731 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() && 14732 PPC::GPRCRegClass.contains(R.first)) 14733 return std::make_pair(TRI->getMatchingSuperReg(R.first, 14734 PPC::sub_32, &PPC::G8RCRegClass), 14735 &PPC::G8RCRegClass); 14736 14737 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same. 14738 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) { 14739 R.first = PPC::CR0; 14740 R.second = &PPC::CRRCRegClass; 14741 } 14742 14743 return R; 14744 } 14745 14746 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 14747 /// vector. If it is invalid, don't add anything to Ops. 14748 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 14749 std::string &Constraint, 14750 std::vector<SDValue>&Ops, 14751 SelectionDAG &DAG) const { 14752 SDValue Result; 14753 14754 // Only support length 1 constraints. 14755 if (Constraint.length() > 1) return; 14756 14757 char Letter = Constraint[0]; 14758 switch (Letter) { 14759 default: break; 14760 case 'I': 14761 case 'J': 14762 case 'K': 14763 case 'L': 14764 case 'M': 14765 case 'N': 14766 case 'O': 14767 case 'P': { 14768 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op); 14769 if (!CST) return; // Must be an immediate to match. 14770 SDLoc dl(Op); 14771 int64_t Value = CST->getSExtValue(); 14772 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative 14773 // numbers are printed as such. 14774 switch (Letter) { 14775 default: llvm_unreachable("Unknown constraint letter!"); 14776 case 'I': // "I" is a signed 16-bit constant. 14777 if (isInt<16>(Value)) 14778 Result = DAG.getTargetConstant(Value, dl, TCVT); 14779 break; 14780 case 'J': // "J" is a constant with only the high-order 16 bits nonzero. 14781 if (isShiftedUInt<16, 16>(Value)) 14782 Result = DAG.getTargetConstant(Value, dl, TCVT); 14783 break; 14784 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits. 14785 if (isShiftedInt<16, 16>(Value)) 14786 Result = DAG.getTargetConstant(Value, dl, TCVT); 14787 break; 14788 case 'K': // "K" is a constant with only the low-order 16 bits nonzero. 14789 if (isUInt<16>(Value)) 14790 Result = DAG.getTargetConstant(Value, dl, TCVT); 14791 break; 14792 case 'M': // "M" is a constant that is greater than 31. 14793 if (Value > 31) 14794 Result = DAG.getTargetConstant(Value, dl, TCVT); 14795 break; 14796 case 'N': // "N" is a positive constant that is an exact power of two. 14797 if (Value > 0 && isPowerOf2_64(Value)) 14798 Result = DAG.getTargetConstant(Value, dl, TCVT); 14799 break; 14800 case 'O': // "O" is the constant zero. 14801 if (Value == 0) 14802 Result = DAG.getTargetConstant(Value, dl, TCVT); 14803 break; 14804 case 'P': // "P" is a constant whose negation is a signed 16-bit constant. 14805 if (isInt<16>(-Value)) 14806 Result = DAG.getTargetConstant(Value, dl, TCVT); 14807 break; 14808 } 14809 break; 14810 } 14811 } 14812 14813 if (Result.getNode()) { 14814 Ops.push_back(Result); 14815 return; 14816 } 14817 14818 // Handle standard constraint letters. 14819 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 14820 } 14821 14822 // isLegalAddressingMode - Return true if the addressing mode represented 14823 // by AM is legal for this target, for a load/store of the specified type. 14824 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL, 14825 const AddrMode &AM, Type *Ty, 14826 unsigned AS, Instruction *I) const { 14827 // PPC does not allow r+i addressing modes for vectors! 14828 if (Ty->isVectorTy() && AM.BaseOffs != 0) 14829 return false; 14830 14831 // PPC allows a sign-extended 16-bit immediate field. 14832 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1) 14833 return false; 14834 14835 // No global is ever allowed as a base. 14836 if (AM.BaseGV) 14837 return false; 14838 14839 // PPC only support r+r, 14840 switch (AM.Scale) { 14841 case 0: // "r+i" or just "i", depending on HasBaseReg. 14842 break; 14843 case 1: 14844 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed. 14845 return false; 14846 // Otherwise we have r+r or r+i. 14847 break; 14848 case 2: 14849 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed. 14850 return false; 14851 // Allow 2*r as r+r. 14852 break; 14853 default: 14854 // No other scales are supported. 14855 return false; 14856 } 14857 14858 return true; 14859 } 14860 14861 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op, 14862 SelectionDAG &DAG) const { 14863 MachineFunction &MF = DAG.getMachineFunction(); 14864 MachineFrameInfo &MFI = MF.getFrameInfo(); 14865 MFI.setReturnAddressIsTaken(true); 14866 14867 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 14868 return SDValue(); 14869 14870 SDLoc dl(Op); 14871 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 14872 14873 // Make sure the function does not optimize away the store of the RA to 14874 // the stack. 14875 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 14876 FuncInfo->setLRStoreRequired(); 14877 bool isPPC64 = Subtarget.isPPC64(); 14878 auto PtrVT = getPointerTy(MF.getDataLayout()); 14879 14880 if (Depth > 0) { 14881 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 14882 SDValue Offset = 14883 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl, 14884 isPPC64 ? MVT::i64 : MVT::i32); 14885 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), 14886 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), 14887 MachinePointerInfo()); 14888 } 14889 14890 // Just load the return address off the stack. 14891 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG); 14892 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, 14893 MachinePointerInfo()); 14894 } 14895 14896 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, 14897 SelectionDAG &DAG) const { 14898 SDLoc dl(Op); 14899 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 14900 14901 MachineFunction &MF = DAG.getMachineFunction(); 14902 MachineFrameInfo &MFI = MF.getFrameInfo(); 14903 MFI.setFrameAddressIsTaken(true); 14904 14905 EVT PtrVT = getPointerTy(MF.getDataLayout()); 14906 bool isPPC64 = PtrVT == MVT::i64; 14907 14908 // Naked functions never have a frame pointer, and so we use r1. For all 14909 // other functions, this decision must be delayed until during PEI. 14910 unsigned FrameReg; 14911 if (MF.getFunction().hasFnAttribute(Attribute::Naked)) 14912 FrameReg = isPPC64 ? PPC::X1 : PPC::R1; 14913 else 14914 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP; 14915 14916 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, 14917 PtrVT); 14918 while (Depth--) 14919 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(), 14920 FrameAddr, MachinePointerInfo()); 14921 return FrameAddr; 14922 } 14923 14924 // FIXME? Maybe this could be a TableGen attribute on some registers and 14925 // this table could be generated automatically from RegInfo. 14926 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT, 14927 const MachineFunction &MF) const { 14928 bool isPPC64 = Subtarget.isPPC64(); 14929 bool IsDarwinABI = Subtarget.isDarwinABI(); 14930 14931 bool is64Bit = isPPC64 && VT == LLT::scalar(64); 14932 if (!is64Bit && VT != LLT::scalar(32)) 14933 report_fatal_error("Invalid register global variable type"); 14934 14935 Register Reg = StringSwitch<Register>(RegName) 14936 .Case("r1", is64Bit ? PPC::X1 : PPC::R1) 14937 .Case("r2", (IsDarwinABI || isPPC64) ? Register() : PPC::R2) 14938 .Case("r13", (!isPPC64 && IsDarwinABI) ? Register() : 14939 (is64Bit ? PPC::X13 : PPC::R13)) 14940 .Default(Register()); 14941 14942 if (Reg) 14943 return Reg; 14944 report_fatal_error("Invalid register name global variable"); 14945 } 14946 14947 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const { 14948 // 32-bit SVR4 ABI access everything as got-indirect. 14949 if (Subtarget.is32BitELFABI()) 14950 return true; 14951 14952 // AIX accesses everything indirectly through the TOC, which is similar to 14953 // the GOT. 14954 if (Subtarget.isAIXABI()) 14955 return true; 14956 14957 CodeModel::Model CModel = getTargetMachine().getCodeModel(); 14958 // If it is small or large code model, module locals are accessed 14959 // indirectly by loading their address from .toc/.got. 14960 if (CModel == CodeModel::Small || CModel == CodeModel::Large) 14961 return true; 14962 14963 // JumpTable and BlockAddress are accessed as got-indirect. 14964 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA)) 14965 return true; 14966 14967 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA)) 14968 return Subtarget.isGVIndirectSymbol(G->getGlobal()); 14969 14970 return false; 14971 } 14972 14973 bool 14974 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 14975 // The PowerPC target isn't yet aware of offsets. 14976 return false; 14977 } 14978 14979 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 14980 const CallInst &I, 14981 MachineFunction &MF, 14982 unsigned Intrinsic) const { 14983 switch (Intrinsic) { 14984 case Intrinsic::ppc_qpx_qvlfd: 14985 case Intrinsic::ppc_qpx_qvlfs: 14986 case Intrinsic::ppc_qpx_qvlfcd: 14987 case Intrinsic::ppc_qpx_qvlfcs: 14988 case Intrinsic::ppc_qpx_qvlfiwa: 14989 case Intrinsic::ppc_qpx_qvlfiwz: 14990 case Intrinsic::ppc_altivec_lvx: 14991 case Intrinsic::ppc_altivec_lvxl: 14992 case Intrinsic::ppc_altivec_lvebx: 14993 case Intrinsic::ppc_altivec_lvehx: 14994 case Intrinsic::ppc_altivec_lvewx: 14995 case Intrinsic::ppc_vsx_lxvd2x: 14996 case Intrinsic::ppc_vsx_lxvw4x: { 14997 EVT VT; 14998 switch (Intrinsic) { 14999 case Intrinsic::ppc_altivec_lvebx: 15000 VT = MVT::i8; 15001 break; 15002 case Intrinsic::ppc_altivec_lvehx: 15003 VT = MVT::i16; 15004 break; 15005 case Intrinsic::ppc_altivec_lvewx: 15006 VT = MVT::i32; 15007 break; 15008 case Intrinsic::ppc_vsx_lxvd2x: 15009 VT = MVT::v2f64; 15010 break; 15011 case Intrinsic::ppc_qpx_qvlfd: 15012 VT = MVT::v4f64; 15013 break; 15014 case Intrinsic::ppc_qpx_qvlfs: 15015 VT = MVT::v4f32; 15016 break; 15017 case Intrinsic::ppc_qpx_qvlfcd: 15018 VT = MVT::v2f64; 15019 break; 15020 case Intrinsic::ppc_qpx_qvlfcs: 15021 VT = MVT::v2f32; 15022 break; 15023 default: 15024 VT = MVT::v4i32; 15025 break; 15026 } 15027 15028 Info.opc = ISD::INTRINSIC_W_CHAIN; 15029 Info.memVT = VT; 15030 Info.ptrVal = I.getArgOperand(0); 15031 Info.offset = -VT.getStoreSize()+1; 15032 Info.size = 2*VT.getStoreSize()-1; 15033 Info.align = Align::None(); 15034 Info.flags = MachineMemOperand::MOLoad; 15035 return true; 15036 } 15037 case Intrinsic::ppc_qpx_qvlfda: 15038 case Intrinsic::ppc_qpx_qvlfsa: 15039 case Intrinsic::ppc_qpx_qvlfcda: 15040 case Intrinsic::ppc_qpx_qvlfcsa: 15041 case Intrinsic::ppc_qpx_qvlfiwaa: 15042 case Intrinsic::ppc_qpx_qvlfiwza: { 15043 EVT VT; 15044 switch (Intrinsic) { 15045 case Intrinsic::ppc_qpx_qvlfda: 15046 VT = MVT::v4f64; 15047 break; 15048 case Intrinsic::ppc_qpx_qvlfsa: 15049 VT = MVT::v4f32; 15050 break; 15051 case Intrinsic::ppc_qpx_qvlfcda: 15052 VT = MVT::v2f64; 15053 break; 15054 case Intrinsic::ppc_qpx_qvlfcsa: 15055 VT = MVT::v2f32; 15056 break; 15057 default: 15058 VT = MVT::v4i32; 15059 break; 15060 } 15061 15062 Info.opc = ISD::INTRINSIC_W_CHAIN; 15063 Info.memVT = VT; 15064 Info.ptrVal = I.getArgOperand(0); 15065 Info.offset = 0; 15066 Info.size = VT.getStoreSize(); 15067 Info.align = Align::None(); 15068 Info.flags = MachineMemOperand::MOLoad; 15069 return true; 15070 } 15071 case Intrinsic::ppc_qpx_qvstfd: 15072 case Intrinsic::ppc_qpx_qvstfs: 15073 case Intrinsic::ppc_qpx_qvstfcd: 15074 case Intrinsic::ppc_qpx_qvstfcs: 15075 case Intrinsic::ppc_qpx_qvstfiw: 15076 case Intrinsic::ppc_altivec_stvx: 15077 case Intrinsic::ppc_altivec_stvxl: 15078 case Intrinsic::ppc_altivec_stvebx: 15079 case Intrinsic::ppc_altivec_stvehx: 15080 case Intrinsic::ppc_altivec_stvewx: 15081 case Intrinsic::ppc_vsx_stxvd2x: 15082 case Intrinsic::ppc_vsx_stxvw4x: { 15083 EVT VT; 15084 switch (Intrinsic) { 15085 case Intrinsic::ppc_altivec_stvebx: 15086 VT = MVT::i8; 15087 break; 15088 case Intrinsic::ppc_altivec_stvehx: 15089 VT = MVT::i16; 15090 break; 15091 case Intrinsic::ppc_altivec_stvewx: 15092 VT = MVT::i32; 15093 break; 15094 case Intrinsic::ppc_vsx_stxvd2x: 15095 VT = MVT::v2f64; 15096 break; 15097 case Intrinsic::ppc_qpx_qvstfd: 15098 VT = MVT::v4f64; 15099 break; 15100 case Intrinsic::ppc_qpx_qvstfs: 15101 VT = MVT::v4f32; 15102 break; 15103 case Intrinsic::ppc_qpx_qvstfcd: 15104 VT = MVT::v2f64; 15105 break; 15106 case Intrinsic::ppc_qpx_qvstfcs: 15107 VT = MVT::v2f32; 15108 break; 15109 default: 15110 VT = MVT::v4i32; 15111 break; 15112 } 15113 15114 Info.opc = ISD::INTRINSIC_VOID; 15115 Info.memVT = VT; 15116 Info.ptrVal = I.getArgOperand(1); 15117 Info.offset = -VT.getStoreSize()+1; 15118 Info.size = 2*VT.getStoreSize()-1; 15119 Info.align = Align::None(); 15120 Info.flags = MachineMemOperand::MOStore; 15121 return true; 15122 } 15123 case Intrinsic::ppc_qpx_qvstfda: 15124 case Intrinsic::ppc_qpx_qvstfsa: 15125 case Intrinsic::ppc_qpx_qvstfcda: 15126 case Intrinsic::ppc_qpx_qvstfcsa: 15127 case Intrinsic::ppc_qpx_qvstfiwa: { 15128 EVT VT; 15129 switch (Intrinsic) { 15130 case Intrinsic::ppc_qpx_qvstfda: 15131 VT = MVT::v4f64; 15132 break; 15133 case Intrinsic::ppc_qpx_qvstfsa: 15134 VT = MVT::v4f32; 15135 break; 15136 case Intrinsic::ppc_qpx_qvstfcda: 15137 VT = MVT::v2f64; 15138 break; 15139 case Intrinsic::ppc_qpx_qvstfcsa: 15140 VT = MVT::v2f32; 15141 break; 15142 default: 15143 VT = MVT::v4i32; 15144 break; 15145 } 15146 15147 Info.opc = ISD::INTRINSIC_VOID; 15148 Info.memVT = VT; 15149 Info.ptrVal = I.getArgOperand(1); 15150 Info.offset = 0; 15151 Info.size = VT.getStoreSize(); 15152 Info.align = Align::None(); 15153 Info.flags = MachineMemOperand::MOStore; 15154 return true; 15155 } 15156 default: 15157 break; 15158 } 15159 15160 return false; 15161 } 15162 15163 /// getOptimalMemOpType - Returns the target specific optimal type for load 15164 /// and store operations as a result of memset, memcpy, and memmove 15165 /// lowering. If DstAlign is zero that means it's safe to destination 15166 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it 15167 /// means there isn't a need to check it against alignment requirement, 15168 /// probably because the source does not need to be loaded. If 'IsMemset' is 15169 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that 15170 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy 15171 /// source is constant so it does not need to be loaded. 15172 /// It returns EVT::Other if the type should be determined using generic 15173 /// target-independent logic. 15174 EVT PPCTargetLowering::getOptimalMemOpType( 15175 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, 15176 bool ZeroMemset, bool MemcpyStrSrc, 15177 const AttributeList &FuncAttributes) const { 15178 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) { 15179 // When expanding a memset, require at least two QPX instructions to cover 15180 // the cost of loading the value to be stored from the constant pool. 15181 if (Subtarget.hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) && 15182 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) && 15183 !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) { 15184 return MVT::v4f64; 15185 } 15186 15187 // We should use Altivec/VSX loads and stores when available. For unaligned 15188 // addresses, unaligned VSX loads are only fast starting with the P8. 15189 if (Subtarget.hasAltivec() && Size >= 16 && 15190 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) || 15191 ((IsMemset && Subtarget.hasVSX()) || Subtarget.hasP8Vector()))) 15192 return MVT::v4i32; 15193 } 15194 15195 if (Subtarget.isPPC64()) { 15196 return MVT::i64; 15197 } 15198 15199 return MVT::i32; 15200 } 15201 15202 /// Returns true if it is beneficial to convert a load of a constant 15203 /// to just the constant itself. 15204 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 15205 Type *Ty) const { 15206 assert(Ty->isIntegerTy()); 15207 15208 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 15209 return !(BitSize == 0 || BitSize > 64); 15210 } 15211 15212 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 15213 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 15214 return false; 15215 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 15216 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 15217 return NumBits1 == 64 && NumBits2 == 32; 15218 } 15219 15220 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 15221 if (!VT1.isInteger() || !VT2.isInteger()) 15222 return false; 15223 unsigned NumBits1 = VT1.getSizeInBits(); 15224 unsigned NumBits2 = VT2.getSizeInBits(); 15225 return NumBits1 == 64 && NumBits2 == 32; 15226 } 15227 15228 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 15229 // Generally speaking, zexts are not free, but they are free when they can be 15230 // folded with other operations. 15231 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) { 15232 EVT MemVT = LD->getMemoryVT(); 15233 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 || 15234 (Subtarget.isPPC64() && MemVT == MVT::i32)) && 15235 (LD->getExtensionType() == ISD::NON_EXTLOAD || 15236 LD->getExtensionType() == ISD::ZEXTLOAD)) 15237 return true; 15238 } 15239 15240 // FIXME: Add other cases... 15241 // - 32-bit shifts with a zext to i64 15242 // - zext after ctlz, bswap, etc. 15243 // - zext after and by a constant mask 15244 15245 return TargetLowering::isZExtFree(Val, VT2); 15246 } 15247 15248 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const { 15249 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && 15250 "invalid fpext types"); 15251 // Extending to float128 is not free. 15252 if (DestVT == MVT::f128) 15253 return false; 15254 return true; 15255 } 15256 15257 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 15258 return isInt<16>(Imm) || isUInt<16>(Imm); 15259 } 15260 15261 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const { 15262 return isInt<16>(Imm) || isUInt<16>(Imm); 15263 } 15264 15265 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 15266 unsigned, 15267 unsigned, 15268 MachineMemOperand::Flags, 15269 bool *Fast) const { 15270 if (DisablePPCUnaligned) 15271 return false; 15272 15273 // PowerPC supports unaligned memory access for simple non-vector types. 15274 // Although accessing unaligned addresses is not as efficient as accessing 15275 // aligned addresses, it is generally more efficient than manual expansion, 15276 // and generally only traps for software emulation when crossing page 15277 // boundaries. 15278 15279 if (!VT.isSimple()) 15280 return false; 15281 15282 if (VT.isFloatingPoint() && !VT.isVector() && 15283 !Subtarget.allowsUnalignedFPAccess()) 15284 return false; 15285 15286 if (VT.getSimpleVT().isVector()) { 15287 if (Subtarget.hasVSX()) { 15288 if (VT != MVT::v2f64 && VT != MVT::v2i64 && 15289 VT != MVT::v4f32 && VT != MVT::v4i32) 15290 return false; 15291 } else { 15292 return false; 15293 } 15294 } 15295 15296 if (VT == MVT::ppcf128) 15297 return false; 15298 15299 if (Fast) 15300 *Fast = true; 15301 15302 return true; 15303 } 15304 15305 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 15306 EVT VT) const { 15307 VT = VT.getScalarType(); 15308 15309 if (!VT.isSimple()) 15310 return false; 15311 15312 switch (VT.getSimpleVT().SimpleTy) { 15313 case MVT::f32: 15314 case MVT::f64: 15315 return true; 15316 case MVT::f128: 15317 return (EnableQuadPrecision && Subtarget.hasP9Vector()); 15318 default: 15319 break; 15320 } 15321 15322 return false; 15323 } 15324 15325 const MCPhysReg * 15326 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const { 15327 // LR is a callee-save register, but we must treat it as clobbered by any call 15328 // site. Hence we include LR in the scratch registers, which are in turn added 15329 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies 15330 // to CTR, which is used by any indirect call. 15331 static const MCPhysReg ScratchRegs[] = { 15332 PPC::X12, PPC::LR8, PPC::CTR8, 0 15333 }; 15334 15335 return ScratchRegs; 15336 } 15337 15338 unsigned PPCTargetLowering::getExceptionPointerRegister( 15339 const Constant *PersonalityFn) const { 15340 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3; 15341 } 15342 15343 unsigned PPCTargetLowering::getExceptionSelectorRegister( 15344 const Constant *PersonalityFn) const { 15345 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4; 15346 } 15347 15348 bool 15349 PPCTargetLowering::shouldExpandBuildVectorWithShuffles( 15350 EVT VT , unsigned DefinedValues) const { 15351 if (VT == MVT::v2i64) 15352 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves 15353 15354 if (Subtarget.hasVSX() || Subtarget.hasQPX()) 15355 return true; 15356 15357 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues); 15358 } 15359 15360 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const { 15361 if (DisableILPPref || Subtarget.enableMachineScheduler()) 15362 return TargetLowering::getSchedulingPreference(N); 15363 15364 return Sched::ILP; 15365 } 15366 15367 // Create a fast isel object. 15368 FastISel * 15369 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo, 15370 const TargetLibraryInfo *LibInfo) const { 15371 return PPC::createFastISel(FuncInfo, LibInfo); 15372 } 15373 15374 void PPCTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 15375 if (Subtarget.isDarwinABI()) return; 15376 if (!Subtarget.isPPC64()) return; 15377 15378 // Update IsSplitCSR in PPCFunctionInfo 15379 PPCFunctionInfo *PFI = Entry->getParent()->getInfo<PPCFunctionInfo>(); 15380 PFI->setIsSplitCSR(true); 15381 } 15382 15383 void PPCTargetLowering::insertCopiesSplitCSR( 15384 MachineBasicBlock *Entry, 15385 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 15386 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo(); 15387 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 15388 if (!IStart) 15389 return; 15390 15391 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 15392 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 15393 MachineBasicBlock::iterator MBBI = Entry->begin(); 15394 for (const MCPhysReg *I = IStart; *I; ++I) { 15395 const TargetRegisterClass *RC = nullptr; 15396 if (PPC::G8RCRegClass.contains(*I)) 15397 RC = &PPC::G8RCRegClass; 15398 else if (PPC::F8RCRegClass.contains(*I)) 15399 RC = &PPC::F8RCRegClass; 15400 else if (PPC::CRRCRegClass.contains(*I)) 15401 RC = &PPC::CRRCRegClass; 15402 else if (PPC::VRRCRegClass.contains(*I)) 15403 RC = &PPC::VRRCRegClass; 15404 else 15405 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 15406 15407 Register NewVR = MRI->createVirtualRegister(RC); 15408 // Create copy from CSR to a virtual register. 15409 // FIXME: this currently does not emit CFI pseudo-instructions, it works 15410 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 15411 // nounwind. If we want to generalize this later, we may need to emit 15412 // CFI pseudo-instructions. 15413 assert(Entry->getParent()->getFunction().hasFnAttribute( 15414 Attribute::NoUnwind) && 15415 "Function should be nounwind in insertCopiesSplitCSR!"); 15416 Entry->addLiveIn(*I); 15417 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 15418 .addReg(*I); 15419 15420 // Insert the copy-back instructions right before the terminator. 15421 for (auto *Exit : Exits) 15422 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 15423 TII->get(TargetOpcode::COPY), *I) 15424 .addReg(NewVR); 15425 } 15426 } 15427 15428 // Override to enable LOAD_STACK_GUARD lowering on Linux. 15429 bool PPCTargetLowering::useLoadStackGuardNode() const { 15430 if (!Subtarget.isTargetLinux()) 15431 return TargetLowering::useLoadStackGuardNode(); 15432 return true; 15433 } 15434 15435 // Override to disable global variable loading on Linux. 15436 void PPCTargetLowering::insertSSPDeclarations(Module &M) const { 15437 if (!Subtarget.isTargetLinux()) 15438 return TargetLowering::insertSSPDeclarations(M); 15439 } 15440 15441 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 15442 bool ForCodeSize) const { 15443 if (!VT.isSimple() || !Subtarget.hasVSX()) 15444 return false; 15445 15446 switch(VT.getSimpleVT().SimpleTy) { 15447 default: 15448 // For FP types that are currently not supported by PPC backend, return 15449 // false. Examples: f16, f80. 15450 return false; 15451 case MVT::f32: 15452 case MVT::f64: 15453 case MVT::ppcf128: 15454 return Imm.isPosZero(); 15455 } 15456 } 15457 15458 // For vector shift operation op, fold 15459 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y) 15460 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, 15461 SelectionDAG &DAG) { 15462 SDValue N0 = N->getOperand(0); 15463 SDValue N1 = N->getOperand(1); 15464 EVT VT = N0.getValueType(); 15465 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 15466 unsigned Opcode = N->getOpcode(); 15467 unsigned TargetOpcode; 15468 15469 switch (Opcode) { 15470 default: 15471 llvm_unreachable("Unexpected shift operation"); 15472 case ISD::SHL: 15473 TargetOpcode = PPCISD::SHL; 15474 break; 15475 case ISD::SRL: 15476 TargetOpcode = PPCISD::SRL; 15477 break; 15478 case ISD::SRA: 15479 TargetOpcode = PPCISD::SRA; 15480 break; 15481 } 15482 15483 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) && 15484 N1->getOpcode() == ISD::AND) 15485 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1))) 15486 if (Mask->getZExtValue() == OpSizeInBits - 1) 15487 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0)); 15488 15489 return SDValue(); 15490 } 15491 15492 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const { 15493 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 15494 return Value; 15495 15496 SDValue N0 = N->getOperand(0); 15497 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 15498 if (!Subtarget.isISA3_0() || 15499 N0.getOpcode() != ISD::SIGN_EXTEND || 15500 N0.getOperand(0).getValueType() != MVT::i32 || 15501 CN1 == nullptr || N->getValueType(0) != MVT::i64) 15502 return SDValue(); 15503 15504 // We can't save an operation here if the value is already extended, and 15505 // the existing shift is easier to combine. 15506 SDValue ExtsSrc = N0.getOperand(0); 15507 if (ExtsSrc.getOpcode() == ISD::TRUNCATE && 15508 ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext) 15509 return SDValue(); 15510 15511 SDLoc DL(N0); 15512 SDValue ShiftBy = SDValue(CN1, 0); 15513 // We want the shift amount to be i32 on the extswli, but the shift could 15514 // have an i64. 15515 if (ShiftBy.getValueType() == MVT::i64) 15516 ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32); 15517 15518 return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0), 15519 ShiftBy); 15520 } 15521 15522 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const { 15523 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 15524 return Value; 15525 15526 return SDValue(); 15527 } 15528 15529 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const { 15530 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG)) 15531 return Value; 15532 15533 return SDValue(); 15534 } 15535 15536 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1)) 15537 // Transform (add X, (zext(sete Z, C))) -> (addze X, (subfic (addi Z, -C), 0)) 15538 // When C is zero, the equation (addi Z, -C) can be simplified to Z 15539 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types 15540 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG, 15541 const PPCSubtarget &Subtarget) { 15542 if (!Subtarget.isPPC64()) 15543 return SDValue(); 15544 15545 SDValue LHS = N->getOperand(0); 15546 SDValue RHS = N->getOperand(1); 15547 15548 auto isZextOfCompareWithConstant = [](SDValue Op) { 15549 if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() || 15550 Op.getValueType() != MVT::i64) 15551 return false; 15552 15553 SDValue Cmp = Op.getOperand(0); 15554 if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() || 15555 Cmp.getOperand(0).getValueType() != MVT::i64) 15556 return false; 15557 15558 if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) { 15559 int64_t NegConstant = 0 - Constant->getSExtValue(); 15560 // Due to the limitations of the addi instruction, 15561 // -C is required to be [-32768, 32767]. 15562 return isInt<16>(NegConstant); 15563 } 15564 15565 return false; 15566 }; 15567 15568 bool LHSHasPattern = isZextOfCompareWithConstant(LHS); 15569 bool RHSHasPattern = isZextOfCompareWithConstant(RHS); 15570 15571 // If there is a pattern, canonicalize a zext operand to the RHS. 15572 if (LHSHasPattern && !RHSHasPattern) 15573 std::swap(LHS, RHS); 15574 else if (!LHSHasPattern && !RHSHasPattern) 15575 return SDValue(); 15576 15577 SDLoc DL(N); 15578 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue); 15579 SDValue Cmp = RHS.getOperand(0); 15580 SDValue Z = Cmp.getOperand(0); 15581 auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1)); 15582 15583 assert(Constant && "Constant Should not be a null pointer."); 15584 int64_t NegConstant = 0 - Constant->getSExtValue(); 15585 15586 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) { 15587 default: break; 15588 case ISD::SETNE: { 15589 // when C == 0 15590 // --> addze X, (addic Z, -1).carry 15591 // / 15592 // add X, (zext(setne Z, C))-- 15593 // \ when -32768 <= -C <= 32767 && C != 0 15594 // --> addze X, (addic (addi Z, -C), -1).carry 15595 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 15596 DAG.getConstant(NegConstant, DL, MVT::i64)); 15597 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 15598 SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 15599 AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64)); 15600 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 15601 SDValue(Addc.getNode(), 1)); 15602 } 15603 case ISD::SETEQ: { 15604 // when C == 0 15605 // --> addze X, (subfic Z, 0).carry 15606 // / 15607 // add X, (zext(sete Z, C))-- 15608 // \ when -32768 <= -C <= 32767 && C != 0 15609 // --> addze X, (subfic (addi Z, -C), 0).carry 15610 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z, 15611 DAG.getConstant(NegConstant, DL, MVT::i64)); 15612 SDValue AddOrZ = NegConstant != 0 ? Add : Z; 15613 SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue), 15614 DAG.getConstant(0, DL, MVT::i64), AddOrZ); 15615 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64), 15616 SDValue(Subc.getNode(), 1)); 15617 } 15618 } 15619 15620 return SDValue(); 15621 } 15622 15623 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const { 15624 if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget)) 15625 return Value; 15626 15627 return SDValue(); 15628 } 15629 15630 // Detect TRUNCATE operations on bitcasts of float128 values. 15631 // What we are looking for here is the situtation where we extract a subset 15632 // of bits from a 128 bit float. 15633 // This can be of two forms: 15634 // 1) BITCAST of f128 feeding TRUNCATE 15635 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE 15636 // The reason this is required is because we do not have a legal i128 type 15637 // and so we want to prevent having to store the f128 and then reload part 15638 // of it. 15639 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N, 15640 DAGCombinerInfo &DCI) const { 15641 // If we are using CRBits then try that first. 15642 if (Subtarget.useCRBits()) { 15643 // Check if CRBits did anything and return that if it did. 15644 if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI)) 15645 return CRTruncValue; 15646 } 15647 15648 SDLoc dl(N); 15649 SDValue Op0 = N->getOperand(0); 15650 15651 // Looking for a truncate of i128 to i64. 15652 if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64) 15653 return SDValue(); 15654 15655 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0; 15656 15657 // SRL feeding TRUNCATE. 15658 if (Op0.getOpcode() == ISD::SRL) { 15659 ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1)); 15660 // The right shift has to be by 64 bits. 15661 if (!ConstNode || ConstNode->getZExtValue() != 64) 15662 return SDValue(); 15663 15664 // Switch the element number to extract. 15665 EltToExtract = EltToExtract ? 0 : 1; 15666 // Update Op0 past the SRL. 15667 Op0 = Op0.getOperand(0); 15668 } 15669 15670 // BITCAST feeding a TRUNCATE possibly via SRL. 15671 if (Op0.getOpcode() == ISD::BITCAST && 15672 Op0.getValueType() == MVT::i128 && 15673 Op0.getOperand(0).getValueType() == MVT::f128) { 15674 SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0)); 15675 return DCI.DAG.getNode( 15676 ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast, 15677 DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32)); 15678 } 15679 return SDValue(); 15680 } 15681 15682 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const { 15683 SelectionDAG &DAG = DCI.DAG; 15684 15685 ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1)); 15686 if (!ConstOpOrElement) 15687 return SDValue(); 15688 15689 // An imul is usually smaller than the alternative sequence for legal type. 15690 if (DAG.getMachineFunction().getFunction().hasMinSize() && 15691 isOperationLegal(ISD::MUL, N->getValueType(0))) 15692 return SDValue(); 15693 15694 auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool { 15695 switch (this->Subtarget.getCPUDirective()) { 15696 default: 15697 // TODO: enhance the condition for subtarget before pwr8 15698 return false; 15699 case PPC::DIR_PWR8: 15700 // type mul add shl 15701 // scalar 4 1 1 15702 // vector 7 2 2 15703 return true; 15704 case PPC::DIR_PWR9: 15705 case PPC::DIR_PWR_FUTURE: 15706 // type mul add shl 15707 // scalar 5 2 2 15708 // vector 7 2 2 15709 15710 // The cycle RATIO of related operations are showed as a table above. 15711 // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both 15712 // scalar and vector type. For 2 instrs patterns, add/sub + shl 15713 // are 4, it is always profitable; but for 3 instrs patterns 15714 // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6. 15715 // So we should only do it for vector type. 15716 return IsAddOne && IsNeg ? VT.isVector() : true; 15717 } 15718 }; 15719 15720 EVT VT = N->getValueType(0); 15721 SDLoc DL(N); 15722 15723 const APInt &MulAmt = ConstOpOrElement->getAPIntValue(); 15724 bool IsNeg = MulAmt.isNegative(); 15725 APInt MulAmtAbs = MulAmt.abs(); 15726 15727 if ((MulAmtAbs - 1).isPowerOf2()) { 15728 // (mul x, 2^N + 1) => (add (shl x, N), x) 15729 // (mul x, -(2^N + 1)) => -(add (shl x, N), x) 15730 15731 if (!IsProfitable(IsNeg, true, VT)) 15732 return SDValue(); 15733 15734 SDValue Op0 = N->getOperand(0); 15735 SDValue Op1 = 15736 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 15737 DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT)); 15738 SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1); 15739 15740 if (!IsNeg) 15741 return Res; 15742 15743 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res); 15744 } else if ((MulAmtAbs + 1).isPowerOf2()) { 15745 // (mul x, 2^N - 1) => (sub (shl x, N), x) 15746 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 15747 15748 if (!IsProfitable(IsNeg, false, VT)) 15749 return SDValue(); 15750 15751 SDValue Op0 = N->getOperand(0); 15752 SDValue Op1 = 15753 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), 15754 DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT)); 15755 15756 if (!IsNeg) 15757 return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0); 15758 else 15759 return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1); 15760 15761 } else { 15762 return SDValue(); 15763 } 15764 } 15765 15766 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 15767 // Only duplicate to increase tail-calls for the 64bit SysV ABIs. 15768 if (!Subtarget.is64BitELFABI()) 15769 return false; 15770 15771 // If not a tail call then no need to proceed. 15772 if (!CI->isTailCall()) 15773 return false; 15774 15775 // If sibling calls have been disabled and tail-calls aren't guaranteed 15776 // there is no reason to duplicate. 15777 auto &TM = getTargetMachine(); 15778 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO) 15779 return false; 15780 15781 // Can't tail call a function called indirectly, or if it has variadic args. 15782 const Function *Callee = CI->getCalledFunction(); 15783 if (!Callee || Callee->isVarArg()) 15784 return false; 15785 15786 // Make sure the callee and caller calling conventions are eligible for tco. 15787 const Function *Caller = CI->getParent()->getParent(); 15788 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(), 15789 CI->getCallingConv())) 15790 return false; 15791 15792 // If the function is local then we have a good chance at tail-calling it 15793 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee); 15794 } 15795 15796 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 15797 if (!Subtarget.hasVSX()) 15798 return false; 15799 if (Subtarget.hasP9Vector() && VT == MVT::f128) 15800 return true; 15801 return VT == MVT::f32 || VT == MVT::f64 || 15802 VT == MVT::v4f32 || VT == MVT::v2f64; 15803 } 15804 15805 bool PPCTargetLowering:: 15806 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const { 15807 const Value *Mask = AndI.getOperand(1); 15808 // If the mask is suitable for andi. or andis. we should sink the and. 15809 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) { 15810 // Can't handle constants wider than 64-bits. 15811 if (CI->getBitWidth() > 64) 15812 return false; 15813 int64_t ConstVal = CI->getZExtValue(); 15814 return isUInt<16>(ConstVal) || 15815 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF)); 15816 } 15817 15818 // For non-constant masks, we can always use the record-form and. 15819 return true; 15820 } 15821 15822 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0) 15823 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0) 15824 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0) 15825 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0) 15826 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32 15827 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const { 15828 assert((N->getOpcode() == ISD::ABS) && "Need ABS node here"); 15829 assert(Subtarget.hasP9Altivec() && 15830 "Only combine this when P9 altivec supported!"); 15831 EVT VT = N->getValueType(0); 15832 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 15833 return SDValue(); 15834 15835 SelectionDAG &DAG = DCI.DAG; 15836 SDLoc dl(N); 15837 if (N->getOperand(0).getOpcode() == ISD::SUB) { 15838 // Even for signed integers, if it's known to be positive (as signed 15839 // integer) due to zero-extended inputs. 15840 unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode(); 15841 unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode(); 15842 if ((SubOpcd0 == ISD::ZERO_EXTEND || 15843 SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) && 15844 (SubOpcd1 == ISD::ZERO_EXTEND || 15845 SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) { 15846 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 15847 N->getOperand(0)->getOperand(0), 15848 N->getOperand(0)->getOperand(1), 15849 DAG.getTargetConstant(0, dl, MVT::i32)); 15850 } 15851 15852 // For type v4i32, it can be optimized with xvnegsp + vabsduw 15853 if (N->getOperand(0).getValueType() == MVT::v4i32 && 15854 N->getOperand(0).hasOneUse()) { 15855 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(), 15856 N->getOperand(0)->getOperand(0), 15857 N->getOperand(0)->getOperand(1), 15858 DAG.getTargetConstant(1, dl, MVT::i32)); 15859 } 15860 } 15861 15862 return SDValue(); 15863 } 15864 15865 // For type v4i32/v8ii16/v16i8, transform 15866 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b) 15867 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b) 15868 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b) 15869 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b) 15870 SDValue PPCTargetLowering::combineVSelect(SDNode *N, 15871 DAGCombinerInfo &DCI) const { 15872 assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here"); 15873 assert(Subtarget.hasP9Altivec() && 15874 "Only combine this when P9 altivec supported!"); 15875 15876 SelectionDAG &DAG = DCI.DAG; 15877 SDLoc dl(N); 15878 SDValue Cond = N->getOperand(0); 15879 SDValue TrueOpnd = N->getOperand(1); 15880 SDValue FalseOpnd = N->getOperand(2); 15881 EVT VT = N->getOperand(1).getValueType(); 15882 15883 if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB || 15884 FalseOpnd.getOpcode() != ISD::SUB) 15885 return SDValue(); 15886 15887 // ABSD only available for type v4i32/v8i16/v16i8 15888 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8) 15889 return SDValue(); 15890 15891 // At least to save one more dependent computation 15892 if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse())) 15893 return SDValue(); 15894 15895 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 15896 15897 // Can only handle unsigned comparison here 15898 switch (CC) { 15899 default: 15900 return SDValue(); 15901 case ISD::SETUGT: 15902 case ISD::SETUGE: 15903 break; 15904 case ISD::SETULT: 15905 case ISD::SETULE: 15906 std::swap(TrueOpnd, FalseOpnd); 15907 break; 15908 } 15909 15910 SDValue CmpOpnd1 = Cond.getOperand(0); 15911 SDValue CmpOpnd2 = Cond.getOperand(1); 15912 15913 // SETCC CmpOpnd1 CmpOpnd2 cond 15914 // TrueOpnd = CmpOpnd1 - CmpOpnd2 15915 // FalseOpnd = CmpOpnd2 - CmpOpnd1 15916 if (TrueOpnd.getOperand(0) == CmpOpnd1 && 15917 TrueOpnd.getOperand(1) == CmpOpnd2 && 15918 FalseOpnd.getOperand(0) == CmpOpnd2 && 15919 FalseOpnd.getOperand(1) == CmpOpnd1) { 15920 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(), 15921 CmpOpnd1, CmpOpnd2, 15922 DAG.getTargetConstant(0, dl, MVT::i32)); 15923 } 15924 15925 return SDValue(); 15926 } 15927