1 //===- MipsSEISelLowering.cpp - MipsSE DAG Lowering Interface -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Subclass of MipsTargetLowering specialized for mips32/64. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "MipsSEISelLowering.h" 14 #include "MipsMachineFunction.h" 15 #include "MipsRegisterInfo.h" 16 #include "MipsSubtarget.h" 17 #include "llvm/ADT/APInt.h" 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Triple.h" 22 #include "llvm/CodeGen/CallingConvLower.h" 23 #include "llvm/CodeGen/ISDOpcodes.h" 24 #include "llvm/CodeGen/MachineBasicBlock.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstr.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineMemOperand.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/SelectionDAG.h" 31 #include "llvm/CodeGen/SelectionDAGNodes.h" 32 #include "llvm/CodeGen/TargetInstrInfo.h" 33 #include "llvm/CodeGen/TargetSubtargetInfo.h" 34 #include "llvm/CodeGen/ValueTypes.h" 35 #include "llvm/IR/DebugLoc.h" 36 #include "llvm/IR/Intrinsics.h" 37 #include "llvm/IR/IntrinsicsMips.h" 38 #include "llvm/Support/Casting.h" 39 #include "llvm/Support/CommandLine.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/ErrorHandling.h" 42 #include "llvm/Support/MachineValueType.h" 43 #include "llvm/Support/MathExtras.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include <algorithm> 46 #include <cassert> 47 #include <cstdint> 48 #include <iterator> 49 #include <utility> 50 51 using namespace llvm; 52 53 #define DEBUG_TYPE "mips-isel" 54 55 static cl::opt<bool> 56 UseMipsTailCalls("mips-tail-calls", cl::Hidden, 57 cl::desc("MIPS: permit tail calls."), cl::init(false)); 58 59 static cl::opt<bool> NoDPLoadStore("mno-ldc1-sdc1", cl::init(false), 60 cl::desc("Expand double precision loads and " 61 "stores to their single precision " 62 "counterparts")); 63 64 MipsSETargetLowering::MipsSETargetLowering(const MipsTargetMachine &TM, 65 const MipsSubtarget &STI) 66 : MipsTargetLowering(TM, STI) { 67 // Set up the register classes 68 addRegisterClass(MVT::i32, &Mips::GPR32RegClass); 69 70 if (Subtarget.isGP64bit()) 71 addRegisterClass(MVT::i64, &Mips::GPR64RegClass); 72 73 if (Subtarget.hasDSP() || Subtarget.hasMSA()) { 74 // Expand all truncating stores and extending loads. 75 for (MVT VT0 : MVT::fixedlen_vector_valuetypes()) { 76 for (MVT VT1 : MVT::fixedlen_vector_valuetypes()) { 77 setTruncStoreAction(VT0, VT1, Expand); 78 setLoadExtAction(ISD::SEXTLOAD, VT0, VT1, Expand); 79 setLoadExtAction(ISD::ZEXTLOAD, VT0, VT1, Expand); 80 setLoadExtAction(ISD::EXTLOAD, VT0, VT1, Expand); 81 } 82 } 83 } 84 85 if (Subtarget.hasDSP()) { 86 MVT::SimpleValueType VecTys[2] = {MVT::v2i16, MVT::v4i8}; 87 88 for (const auto &VecTy : VecTys) { 89 addRegisterClass(VecTy, &Mips::DSPRRegClass); 90 91 // Expand all builtin opcodes. 92 for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) 93 setOperationAction(Opc, VecTy, Expand); 94 95 setOperationAction(ISD::ADD, VecTy, Legal); 96 setOperationAction(ISD::SUB, VecTy, Legal); 97 setOperationAction(ISD::LOAD, VecTy, Legal); 98 setOperationAction(ISD::STORE, VecTy, Legal); 99 setOperationAction(ISD::BITCAST, VecTy, Legal); 100 } 101 102 setTargetDAGCombine( 103 {ISD::SHL, ISD::SRA, ISD::SRL, ISD::SETCC, ISD::VSELECT}); 104 105 if (Subtarget.hasMips32r2()) { 106 setOperationAction(ISD::ADDC, MVT::i32, Legal); 107 setOperationAction(ISD::ADDE, MVT::i32, Legal); 108 } 109 } 110 111 if (Subtarget.hasDSPR2()) 112 setOperationAction(ISD::MUL, MVT::v2i16, Legal); 113 114 if (Subtarget.hasMSA()) { 115 addMSAIntType(MVT::v16i8, &Mips::MSA128BRegClass); 116 addMSAIntType(MVT::v8i16, &Mips::MSA128HRegClass); 117 addMSAIntType(MVT::v4i32, &Mips::MSA128WRegClass); 118 addMSAIntType(MVT::v2i64, &Mips::MSA128DRegClass); 119 addMSAFloatType(MVT::v8f16, &Mips::MSA128HRegClass); 120 addMSAFloatType(MVT::v4f32, &Mips::MSA128WRegClass); 121 addMSAFloatType(MVT::v2f64, &Mips::MSA128DRegClass); 122 123 // f16 is a storage-only type, always promote it to f32. 124 addRegisterClass(MVT::f16, &Mips::MSA128HRegClass); 125 setOperationAction(ISD::SETCC, MVT::f16, Promote); 126 setOperationAction(ISD::BR_CC, MVT::f16, Promote); 127 setOperationAction(ISD::SELECT_CC, MVT::f16, Promote); 128 setOperationAction(ISD::SELECT, MVT::f16, Promote); 129 setOperationAction(ISD::FADD, MVT::f16, Promote); 130 setOperationAction(ISD::FSUB, MVT::f16, Promote); 131 setOperationAction(ISD::FMUL, MVT::f16, Promote); 132 setOperationAction(ISD::FDIV, MVT::f16, Promote); 133 setOperationAction(ISD::FREM, MVT::f16, Promote); 134 setOperationAction(ISD::FMA, MVT::f16, Promote); 135 setOperationAction(ISD::FNEG, MVT::f16, Promote); 136 setOperationAction(ISD::FABS, MVT::f16, Promote); 137 setOperationAction(ISD::FCEIL, MVT::f16, Promote); 138 setOperationAction(ISD::FCOPYSIGN, MVT::f16, Promote); 139 setOperationAction(ISD::FCOS, MVT::f16, Promote); 140 setOperationAction(ISD::FP_EXTEND, MVT::f16, Promote); 141 setOperationAction(ISD::FFLOOR, MVT::f16, Promote); 142 setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote); 143 setOperationAction(ISD::FPOW, MVT::f16, Promote); 144 setOperationAction(ISD::FPOWI, MVT::f16, Promote); 145 setOperationAction(ISD::FRINT, MVT::f16, Promote); 146 setOperationAction(ISD::FSIN, MVT::f16, Promote); 147 setOperationAction(ISD::FSINCOS, MVT::f16, Promote); 148 setOperationAction(ISD::FSQRT, MVT::f16, Promote); 149 setOperationAction(ISD::FEXP, MVT::f16, Promote); 150 setOperationAction(ISD::FEXP2, MVT::f16, Promote); 151 setOperationAction(ISD::FLOG, MVT::f16, Promote); 152 setOperationAction(ISD::FLOG2, MVT::f16, Promote); 153 setOperationAction(ISD::FLOG10, MVT::f16, Promote); 154 setOperationAction(ISD::FROUND, MVT::f16, Promote); 155 setOperationAction(ISD::FTRUNC, MVT::f16, Promote); 156 setOperationAction(ISD::FMINNUM, MVT::f16, Promote); 157 setOperationAction(ISD::FMAXNUM, MVT::f16, Promote); 158 setOperationAction(ISD::FMINIMUM, MVT::f16, Promote); 159 setOperationAction(ISD::FMAXIMUM, MVT::f16, Promote); 160 161 setTargetDAGCombine({ISD::AND, ISD::OR, ISD::SRA, ISD::VSELECT, ISD::XOR}); 162 } 163 164 if (!Subtarget.useSoftFloat()) { 165 addRegisterClass(MVT::f32, &Mips::FGR32RegClass); 166 167 // When dealing with single precision only, use libcalls 168 if (!Subtarget.isSingleFloat()) { 169 if (Subtarget.isFP64bit()) 170 addRegisterClass(MVT::f64, &Mips::FGR64RegClass); 171 else 172 addRegisterClass(MVT::f64, &Mips::AFGR64RegClass); 173 } 174 } 175 176 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom); 177 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom); 178 setOperationAction(ISD::MULHS, MVT::i32, Custom); 179 setOperationAction(ISD::MULHU, MVT::i32, Custom); 180 181 if (Subtarget.hasCnMips()) 182 setOperationAction(ISD::MUL, MVT::i64, Legal); 183 else if (Subtarget.isGP64bit()) 184 setOperationAction(ISD::MUL, MVT::i64, Custom); 185 186 if (Subtarget.isGP64bit()) { 187 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Custom); 188 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Custom); 189 setOperationAction(ISD::MULHS, MVT::i64, Custom); 190 setOperationAction(ISD::MULHU, MVT::i64, Custom); 191 setOperationAction(ISD::SDIVREM, MVT::i64, Custom); 192 setOperationAction(ISD::UDIVREM, MVT::i64, Custom); 193 } 194 195 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); 196 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); 197 198 setOperationAction(ISD::SDIVREM, MVT::i32, Custom); 199 setOperationAction(ISD::UDIVREM, MVT::i32, Custom); 200 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); 201 setOperationAction(ISD::LOAD, MVT::i32, Custom); 202 setOperationAction(ISD::STORE, MVT::i32, Custom); 203 204 setTargetDAGCombine(ISD::MUL); 205 206 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 207 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); 208 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); 209 210 if (Subtarget.hasMips32r2() && !Subtarget.useSoftFloat() && 211 !Subtarget.hasMips64()) { 212 setOperationAction(ISD::BITCAST, MVT::i64, Custom); 213 } 214 215 if (NoDPLoadStore) { 216 setOperationAction(ISD::LOAD, MVT::f64, Custom); 217 setOperationAction(ISD::STORE, MVT::f64, Custom); 218 } 219 220 if (Subtarget.hasMips32r6()) { 221 // MIPS32r6 replaces the accumulator-based multiplies with a three register 222 // instruction 223 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand); 224 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand); 225 setOperationAction(ISD::MUL, MVT::i32, Legal); 226 setOperationAction(ISD::MULHS, MVT::i32, Legal); 227 setOperationAction(ISD::MULHU, MVT::i32, Legal); 228 229 // MIPS32r6 replaces the accumulator-based division/remainder with separate 230 // three register division and remainder instructions. 231 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 232 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 233 setOperationAction(ISD::SDIV, MVT::i32, Legal); 234 setOperationAction(ISD::UDIV, MVT::i32, Legal); 235 setOperationAction(ISD::SREM, MVT::i32, Legal); 236 setOperationAction(ISD::UREM, MVT::i32, Legal); 237 238 // MIPS32r6 replaces conditional moves with an equivalent that removes the 239 // need for three GPR read ports. 240 setOperationAction(ISD::SETCC, MVT::i32, Legal); 241 setOperationAction(ISD::SELECT, MVT::i32, Legal); 242 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 243 244 setOperationAction(ISD::SETCC, MVT::f32, Legal); 245 setOperationAction(ISD::SELECT, MVT::f32, Legal); 246 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 247 248 assert(Subtarget.isFP64bit() && "FR=1 is required for MIPS32r6"); 249 setOperationAction(ISD::SETCC, MVT::f64, Legal); 250 setOperationAction(ISD::SELECT, MVT::f64, Custom); 251 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 252 253 setOperationAction(ISD::BRCOND, MVT::Other, Legal); 254 255 // Floating point > and >= are supported via < and <= 256 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand); 257 setCondCodeAction(ISD::SETOGT, MVT::f32, Expand); 258 setCondCodeAction(ISD::SETUGE, MVT::f32, Expand); 259 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand); 260 261 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand); 262 setCondCodeAction(ISD::SETOGT, MVT::f64, Expand); 263 setCondCodeAction(ISD::SETUGE, MVT::f64, Expand); 264 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand); 265 } 266 267 if (Subtarget.hasMips64r6()) { 268 // MIPS64r6 replaces the accumulator-based multiplies with a three register 269 // instruction 270 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 271 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 272 setOperationAction(ISD::MUL, MVT::i64, Legal); 273 setOperationAction(ISD::MULHS, MVT::i64, Legal); 274 setOperationAction(ISD::MULHU, MVT::i64, Legal); 275 276 // MIPS32r6 replaces the accumulator-based division/remainder with separate 277 // three register division and remainder instructions. 278 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 279 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 280 setOperationAction(ISD::SDIV, MVT::i64, Legal); 281 setOperationAction(ISD::UDIV, MVT::i64, Legal); 282 setOperationAction(ISD::SREM, MVT::i64, Legal); 283 setOperationAction(ISD::UREM, MVT::i64, Legal); 284 285 // MIPS64r6 replaces conditional moves with an equivalent that removes the 286 // need for three GPR read ports. 287 setOperationAction(ISD::SETCC, MVT::i64, Legal); 288 setOperationAction(ISD::SELECT, MVT::i64, Legal); 289 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 290 } 291 292 computeRegisterProperties(Subtarget.getRegisterInfo()); 293 } 294 295 const MipsTargetLowering * 296 llvm::createMipsSETargetLowering(const MipsTargetMachine &TM, 297 const MipsSubtarget &STI) { 298 return new MipsSETargetLowering(TM, STI); 299 } 300 301 const TargetRegisterClass * 302 MipsSETargetLowering::getRepRegClassFor(MVT VT) const { 303 if (VT == MVT::Untyped) 304 return Subtarget.hasDSP() ? &Mips::ACC64DSPRegClass : &Mips::ACC64RegClass; 305 306 return TargetLowering::getRepRegClassFor(VT); 307 } 308 309 // Enable MSA support for the given integer type and Register class. 310 void MipsSETargetLowering:: 311 addMSAIntType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) { 312 addRegisterClass(Ty, RC); 313 314 // Expand all builtin opcodes. 315 for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) 316 setOperationAction(Opc, Ty, Expand); 317 318 setOperationAction(ISD::BITCAST, Ty, Legal); 319 setOperationAction(ISD::LOAD, Ty, Legal); 320 setOperationAction(ISD::STORE, Ty, Legal); 321 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Custom); 322 setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal); 323 setOperationAction(ISD::BUILD_VECTOR, Ty, Custom); 324 setOperationAction(ISD::UNDEF, Ty, Legal); 325 326 setOperationAction(ISD::ADD, Ty, Legal); 327 setOperationAction(ISD::AND, Ty, Legal); 328 setOperationAction(ISD::CTLZ, Ty, Legal); 329 setOperationAction(ISD::CTPOP, Ty, Legal); 330 setOperationAction(ISD::MUL, Ty, Legal); 331 setOperationAction(ISD::OR, Ty, Legal); 332 setOperationAction(ISD::SDIV, Ty, Legal); 333 setOperationAction(ISD::SREM, Ty, Legal); 334 setOperationAction(ISD::SHL, Ty, Legal); 335 setOperationAction(ISD::SRA, Ty, Legal); 336 setOperationAction(ISD::SRL, Ty, Legal); 337 setOperationAction(ISD::SUB, Ty, Legal); 338 setOperationAction(ISD::SMAX, Ty, Legal); 339 setOperationAction(ISD::SMIN, Ty, Legal); 340 setOperationAction(ISD::UDIV, Ty, Legal); 341 setOperationAction(ISD::UREM, Ty, Legal); 342 setOperationAction(ISD::UMAX, Ty, Legal); 343 setOperationAction(ISD::UMIN, Ty, Legal); 344 setOperationAction(ISD::VECTOR_SHUFFLE, Ty, Custom); 345 setOperationAction(ISD::VSELECT, Ty, Legal); 346 setOperationAction(ISD::XOR, Ty, Legal); 347 348 if (Ty == MVT::v4i32 || Ty == MVT::v2i64) { 349 setOperationAction(ISD::FP_TO_SINT, Ty, Legal); 350 setOperationAction(ISD::FP_TO_UINT, Ty, Legal); 351 setOperationAction(ISD::SINT_TO_FP, Ty, Legal); 352 setOperationAction(ISD::UINT_TO_FP, Ty, Legal); 353 } 354 355 setOperationAction(ISD::SETCC, Ty, Legal); 356 setCondCodeAction(ISD::SETNE, Ty, Expand); 357 setCondCodeAction(ISD::SETGE, Ty, Expand); 358 setCondCodeAction(ISD::SETGT, Ty, Expand); 359 setCondCodeAction(ISD::SETUGE, Ty, Expand); 360 setCondCodeAction(ISD::SETUGT, Ty, Expand); 361 } 362 363 // Enable MSA support for the given floating-point type and Register class. 364 void MipsSETargetLowering:: 365 addMSAFloatType(MVT::SimpleValueType Ty, const TargetRegisterClass *RC) { 366 addRegisterClass(Ty, RC); 367 368 // Expand all builtin opcodes. 369 for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) 370 setOperationAction(Opc, Ty, Expand); 371 372 setOperationAction(ISD::LOAD, Ty, Legal); 373 setOperationAction(ISD::STORE, Ty, Legal); 374 setOperationAction(ISD::BITCAST, Ty, Legal); 375 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Ty, Legal); 376 setOperationAction(ISD::INSERT_VECTOR_ELT, Ty, Legal); 377 setOperationAction(ISD::BUILD_VECTOR, Ty, Custom); 378 379 if (Ty != MVT::v8f16) { 380 setOperationAction(ISD::FABS, Ty, Legal); 381 setOperationAction(ISD::FADD, Ty, Legal); 382 setOperationAction(ISD::FDIV, Ty, Legal); 383 setOperationAction(ISD::FEXP2, Ty, Legal); 384 setOperationAction(ISD::FLOG2, Ty, Legal); 385 setOperationAction(ISD::FMA, Ty, Legal); 386 setOperationAction(ISD::FMUL, Ty, Legal); 387 setOperationAction(ISD::FRINT, Ty, Legal); 388 setOperationAction(ISD::FSQRT, Ty, Legal); 389 setOperationAction(ISD::FSUB, Ty, Legal); 390 setOperationAction(ISD::VSELECT, Ty, Legal); 391 392 setOperationAction(ISD::SETCC, Ty, Legal); 393 setCondCodeAction(ISD::SETOGE, Ty, Expand); 394 setCondCodeAction(ISD::SETOGT, Ty, Expand); 395 setCondCodeAction(ISD::SETUGE, Ty, Expand); 396 setCondCodeAction(ISD::SETUGT, Ty, Expand); 397 setCondCodeAction(ISD::SETGE, Ty, Expand); 398 setCondCodeAction(ISD::SETGT, Ty, Expand); 399 } 400 } 401 402 SDValue MipsSETargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { 403 if(!Subtarget.hasMips32r6()) 404 return MipsTargetLowering::LowerOperation(Op, DAG); 405 406 EVT ResTy = Op->getValueType(0); 407 SDLoc DL(Op); 408 409 // Although MTC1_D64 takes an i32 and writes an f64, the upper 32 bits of the 410 // floating point register are undefined. Not really an issue as sel.d, which 411 // is produced from an FSELECT node, only looks at bit 0. 412 SDValue Tmp = DAG.getNode(MipsISD::MTC1_D64, DL, MVT::f64, Op->getOperand(0)); 413 return DAG.getNode(MipsISD::FSELECT, DL, ResTy, Tmp, Op->getOperand(1), 414 Op->getOperand(2)); 415 } 416 417 bool MipsSETargetLowering::allowsMisalignedMemoryAccesses( 418 EVT VT, unsigned, Align, MachineMemOperand::Flags, unsigned *Fast) const { 419 MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy; 420 421 if (Subtarget.systemSupportsUnalignedAccess()) { 422 // MIPS32r6/MIPS64r6 is required to support unaligned access. It's 423 // implementation defined whether this is handled by hardware, software, or 424 // a hybrid of the two but it's expected that most implementations will 425 // handle the majority of cases in hardware. 426 if (Fast) 427 *Fast = 1; 428 return true; 429 } 430 431 switch (SVT) { 432 case MVT::i64: 433 case MVT::i32: 434 if (Fast) 435 *Fast = 1; 436 return true; 437 default: 438 return false; 439 } 440 } 441 442 SDValue MipsSETargetLowering::LowerOperation(SDValue Op, 443 SelectionDAG &DAG) const { 444 switch(Op.getOpcode()) { 445 case ISD::LOAD: return lowerLOAD(Op, DAG); 446 case ISD::STORE: return lowerSTORE(Op, DAG); 447 case ISD::SMUL_LOHI: return lowerMulDiv(Op, MipsISD::Mult, true, true, DAG); 448 case ISD::UMUL_LOHI: return lowerMulDiv(Op, MipsISD::Multu, true, true, DAG); 449 case ISD::MULHS: return lowerMulDiv(Op, MipsISD::Mult, false, true, DAG); 450 case ISD::MULHU: return lowerMulDiv(Op, MipsISD::Multu, false, true, DAG); 451 case ISD::MUL: return lowerMulDiv(Op, MipsISD::Mult, true, false, DAG); 452 case ISD::SDIVREM: return lowerMulDiv(Op, MipsISD::DivRem, true, true, DAG); 453 case ISD::UDIVREM: return lowerMulDiv(Op, MipsISD::DivRemU, true, true, 454 DAG); 455 case ISD::INTRINSIC_WO_CHAIN: return lowerINTRINSIC_WO_CHAIN(Op, DAG); 456 case ISD::INTRINSIC_W_CHAIN: return lowerINTRINSIC_W_CHAIN(Op, DAG); 457 case ISD::INTRINSIC_VOID: return lowerINTRINSIC_VOID(Op, DAG); 458 case ISD::EXTRACT_VECTOR_ELT: return lowerEXTRACT_VECTOR_ELT(Op, DAG); 459 case ISD::BUILD_VECTOR: return lowerBUILD_VECTOR(Op, DAG); 460 case ISD::VECTOR_SHUFFLE: return lowerVECTOR_SHUFFLE(Op, DAG); 461 case ISD::SELECT: return lowerSELECT(Op, DAG); 462 case ISD::BITCAST: return lowerBITCAST(Op, DAG); 463 } 464 465 return MipsTargetLowering::LowerOperation(Op, DAG); 466 } 467 468 // Fold zero extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT 469 // 470 // Performs the following transformations: 471 // - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to zero extension if its 472 // sign/zero-extension is completely overwritten by the new one performed by 473 // the ISD::AND. 474 // - Removes redundant zero extensions performed by an ISD::AND. 475 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG, 476 TargetLowering::DAGCombinerInfo &DCI, 477 const MipsSubtarget &Subtarget) { 478 if (!Subtarget.hasMSA()) 479 return SDValue(); 480 481 SDValue Op0 = N->getOperand(0); 482 SDValue Op1 = N->getOperand(1); 483 unsigned Op0Opcode = Op0->getOpcode(); 484 485 // (and (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d) 486 // where $d + 1 == 2^n and n == 32 487 // or $d + 1 == 2^n and n <= 32 and ZExt 488 // -> (MipsVExtractZExt $a, $b, $c) 489 if (Op0Opcode == MipsISD::VEXTRACT_SEXT_ELT || 490 Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT) { 491 ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(Op1); 492 493 if (!Mask) 494 return SDValue(); 495 496 int32_t Log2IfPositive = (Mask->getAPIntValue() + 1).exactLogBase2(); 497 498 if (Log2IfPositive <= 0) 499 return SDValue(); // Mask+1 is not a power of 2 500 501 SDValue Op0Op2 = Op0->getOperand(2); 502 EVT ExtendTy = cast<VTSDNode>(Op0Op2)->getVT(); 503 unsigned ExtendTySize = ExtendTy.getSizeInBits(); 504 unsigned Log2 = Log2IfPositive; 505 506 if ((Op0Opcode == MipsISD::VEXTRACT_ZEXT_ELT && Log2 >= ExtendTySize) || 507 Log2 == ExtendTySize) { 508 SDValue Ops[] = { Op0->getOperand(0), Op0->getOperand(1), Op0Op2 }; 509 return DAG.getNode(MipsISD::VEXTRACT_ZEXT_ELT, SDLoc(Op0), 510 Op0->getVTList(), 511 ArrayRef(Ops, Op0->getNumOperands())); 512 } 513 } 514 515 return SDValue(); 516 } 517 518 // Determine if the specified node is a constant vector splat. 519 // 520 // Returns true and sets Imm if: 521 // * N is a ISD::BUILD_VECTOR representing a constant splat 522 // 523 // This function is quite similar to MipsSEDAGToDAGISel::selectVSplat. The 524 // differences are that it assumes the MSA has already been checked and the 525 // arbitrary requirement for a maximum of 32-bit integers isn't applied (and 526 // must not be in order for binsri.d to be selectable). 527 static bool isVSplat(SDValue N, APInt &Imm, bool IsLittleEndian) { 528 BuildVectorSDNode *Node = dyn_cast<BuildVectorSDNode>(N.getNode()); 529 530 if (!Node) 531 return false; 532 533 APInt SplatValue, SplatUndef; 534 unsigned SplatBitSize; 535 bool HasAnyUndefs; 536 537 if (!Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs, 538 8, !IsLittleEndian)) 539 return false; 540 541 Imm = SplatValue; 542 543 return true; 544 } 545 546 // Test whether the given node is an all-ones build_vector. 547 static bool isVectorAllOnes(SDValue N) { 548 // Look through bitcasts. Endianness doesn't matter because we are looking 549 // for an all-ones value. 550 if (N->getOpcode() == ISD::BITCAST) 551 N = N->getOperand(0); 552 553 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N); 554 555 if (!BVN) 556 return false; 557 558 APInt SplatValue, SplatUndef; 559 unsigned SplatBitSize; 560 bool HasAnyUndefs; 561 562 // Endianness doesn't matter in this context because we are looking for 563 // an all-ones value. 564 if (BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs)) 565 return SplatValue.isAllOnes(); 566 567 return false; 568 } 569 570 // Test whether N is the bitwise inverse of OfNode. 571 static bool isBitwiseInverse(SDValue N, SDValue OfNode) { 572 if (N->getOpcode() != ISD::XOR) 573 return false; 574 575 if (isVectorAllOnes(N->getOperand(0))) 576 return N->getOperand(1) == OfNode; 577 578 if (isVectorAllOnes(N->getOperand(1))) 579 return N->getOperand(0) == OfNode; 580 581 return false; 582 } 583 584 // Perform combines where ISD::OR is the root node. 585 // 586 // Performs the following transformations: 587 // - (or (and $a, $mask), (and $b, $inv_mask)) => (vselect $mask, $a, $b) 588 // where $inv_mask is the bitwise inverse of $mask and the 'or' has a 128-bit 589 // vector type. 590 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG, 591 TargetLowering::DAGCombinerInfo &DCI, 592 const MipsSubtarget &Subtarget) { 593 if (!Subtarget.hasMSA()) 594 return SDValue(); 595 596 EVT Ty = N->getValueType(0); 597 598 if (!Ty.is128BitVector()) 599 return SDValue(); 600 601 SDValue Op0 = N->getOperand(0); 602 SDValue Op1 = N->getOperand(1); 603 604 if (Op0->getOpcode() == ISD::AND && Op1->getOpcode() == ISD::AND) { 605 SDValue Op0Op0 = Op0->getOperand(0); 606 SDValue Op0Op1 = Op0->getOperand(1); 607 SDValue Op1Op0 = Op1->getOperand(0); 608 SDValue Op1Op1 = Op1->getOperand(1); 609 bool IsLittleEndian = !Subtarget.isLittle(); 610 611 SDValue IfSet, IfClr, Cond; 612 bool IsConstantMask = false; 613 APInt Mask, InvMask; 614 615 // If Op0Op0 is an appropriate mask, try to find it's inverse in either 616 // Op1Op0, or Op1Op1. Keep track of the Cond, IfSet, and IfClr nodes, while 617 // looking. 618 // IfClr will be set if we find a valid match. 619 if (isVSplat(Op0Op0, Mask, IsLittleEndian)) { 620 Cond = Op0Op0; 621 IfSet = Op0Op1; 622 623 if (isVSplat(Op1Op0, InvMask, IsLittleEndian) && 624 Mask.getBitWidth() == InvMask.getBitWidth() && Mask == ~InvMask) 625 IfClr = Op1Op1; 626 else if (isVSplat(Op1Op1, InvMask, IsLittleEndian) && 627 Mask.getBitWidth() == InvMask.getBitWidth() && Mask == ~InvMask) 628 IfClr = Op1Op0; 629 630 IsConstantMask = true; 631 } 632 633 // If IfClr is not yet set, and Op0Op1 is an appropriate mask, try the same 634 // thing again using this mask. 635 // IfClr will be set if we find a valid match. 636 if (!IfClr.getNode() && isVSplat(Op0Op1, Mask, IsLittleEndian)) { 637 Cond = Op0Op1; 638 IfSet = Op0Op0; 639 640 if (isVSplat(Op1Op0, InvMask, IsLittleEndian) && 641 Mask.getBitWidth() == InvMask.getBitWidth() && Mask == ~InvMask) 642 IfClr = Op1Op1; 643 else if (isVSplat(Op1Op1, InvMask, IsLittleEndian) && 644 Mask.getBitWidth() == InvMask.getBitWidth() && Mask == ~InvMask) 645 IfClr = Op1Op0; 646 647 IsConstantMask = true; 648 } 649 650 // If IfClr is not yet set, try looking for a non-constant match. 651 // IfClr will be set if we find a valid match amongst the eight 652 // possibilities. 653 if (!IfClr.getNode()) { 654 if (isBitwiseInverse(Op0Op0, Op1Op0)) { 655 Cond = Op1Op0; 656 IfSet = Op1Op1; 657 IfClr = Op0Op1; 658 } else if (isBitwiseInverse(Op0Op1, Op1Op0)) { 659 Cond = Op1Op0; 660 IfSet = Op1Op1; 661 IfClr = Op0Op0; 662 } else if (isBitwiseInverse(Op0Op0, Op1Op1)) { 663 Cond = Op1Op1; 664 IfSet = Op1Op0; 665 IfClr = Op0Op1; 666 } else if (isBitwiseInverse(Op0Op1, Op1Op1)) { 667 Cond = Op1Op1; 668 IfSet = Op1Op0; 669 IfClr = Op0Op0; 670 } else if (isBitwiseInverse(Op1Op0, Op0Op0)) { 671 Cond = Op0Op0; 672 IfSet = Op0Op1; 673 IfClr = Op1Op1; 674 } else if (isBitwiseInverse(Op1Op1, Op0Op0)) { 675 Cond = Op0Op0; 676 IfSet = Op0Op1; 677 IfClr = Op1Op0; 678 } else if (isBitwiseInverse(Op1Op0, Op0Op1)) { 679 Cond = Op0Op1; 680 IfSet = Op0Op0; 681 IfClr = Op1Op1; 682 } else if (isBitwiseInverse(Op1Op1, Op0Op1)) { 683 Cond = Op0Op1; 684 IfSet = Op0Op0; 685 IfClr = Op1Op0; 686 } 687 } 688 689 // At this point, IfClr will be set if we have a valid match. 690 if (!IfClr.getNode()) 691 return SDValue(); 692 693 assert(Cond.getNode() && IfSet.getNode()); 694 695 // Fold degenerate cases. 696 if (IsConstantMask) { 697 if (Mask.isAllOnes()) 698 return IfSet; 699 else if (Mask == 0) 700 return IfClr; 701 } 702 703 // Transform the DAG into an equivalent VSELECT. 704 return DAG.getNode(ISD::VSELECT, SDLoc(N), Ty, Cond, IfSet, IfClr); 705 } 706 707 return SDValue(); 708 } 709 710 static bool shouldTransformMulToShiftsAddsSubs(APInt C, EVT VT, 711 SelectionDAG &DAG, 712 const MipsSubtarget &Subtarget) { 713 // Estimate the number of operations the below transform will turn a 714 // constant multiply into. The number is approximately equal to the minimal 715 // number of powers of two that constant can be broken down to by adding 716 // or subtracting them. 717 // 718 // If we have taken more than 12[1] / 8[2] steps to attempt the 719 // optimization for a native sized value, it is more than likely that this 720 // optimization will make things worse. 721 // 722 // [1] MIPS64 requires 6 instructions at most to materialize any constant, 723 // multiplication requires at least 4 cycles, but another cycle (or two) 724 // to retrieve the result from the HI/LO registers. 725 // 726 // [2] For MIPS32, more than 8 steps is expensive as the constant could be 727 // materialized in 2 instructions, multiplication requires at least 4 728 // cycles, but another cycle (or two) to retrieve the result from the 729 // HI/LO registers. 730 // 731 // TODO: 732 // - MaxSteps needs to consider the `VT` of the constant for the current 733 // target. 734 // - Consider to perform this optimization after type legalization. 735 // That allows to remove a workaround for types not supported natively. 736 // - Take in account `-Os, -Oz` flags because this optimization 737 // increases code size. 738 unsigned MaxSteps = Subtarget.isABI_O32() ? 8 : 12; 739 740 SmallVector<APInt, 16> WorkStack(1, C); 741 unsigned Steps = 0; 742 unsigned BitWidth = C.getBitWidth(); 743 744 while (!WorkStack.empty()) { 745 APInt Val = WorkStack.pop_back_val(); 746 747 if (Val == 0 || Val == 1) 748 continue; 749 750 if (Steps >= MaxSteps) 751 return false; 752 753 if (Val.isPowerOf2()) { 754 ++Steps; 755 continue; 756 } 757 758 APInt Floor = APInt(BitWidth, 1) << Val.logBase2(); 759 APInt Ceil = Val.isNegative() ? APInt(BitWidth, 0) 760 : APInt(BitWidth, 1) << C.ceilLogBase2(); 761 if ((Val - Floor).ule(Ceil - Val)) { 762 WorkStack.push_back(Floor); 763 WorkStack.push_back(Val - Floor); 764 } else { 765 WorkStack.push_back(Ceil); 766 WorkStack.push_back(Ceil - Val); 767 } 768 769 ++Steps; 770 } 771 772 // If the value being multiplied is not supported natively, we have to pay 773 // an additional legalization cost, conservatively assume an increase in the 774 // cost of 3 instructions per step. This values for this heuristic were 775 // determined experimentally. 776 unsigned RegisterSize = DAG.getTargetLoweringInfo() 777 .getRegisterType(*DAG.getContext(), VT) 778 .getSizeInBits(); 779 Steps *= (VT.getSizeInBits() != RegisterSize) * 3; 780 if (Steps > 27) 781 return false; 782 783 return true; 784 } 785 786 static SDValue genConstMult(SDValue X, APInt C, const SDLoc &DL, EVT VT, 787 EVT ShiftTy, SelectionDAG &DAG) { 788 // Return 0. 789 if (C == 0) 790 return DAG.getConstant(0, DL, VT); 791 792 // Return x. 793 if (C == 1) 794 return X; 795 796 // If c is power of 2, return (shl x, log2(c)). 797 if (C.isPowerOf2()) 798 return DAG.getNode(ISD::SHL, DL, VT, X, 799 DAG.getConstant(C.logBase2(), DL, ShiftTy)); 800 801 unsigned BitWidth = C.getBitWidth(); 802 APInt Floor = APInt(BitWidth, 1) << C.logBase2(); 803 APInt Ceil = C.isNegative() ? APInt(BitWidth, 0) : 804 APInt(BitWidth, 1) << C.ceilLogBase2(); 805 806 // If |c - floor_c| <= |c - ceil_c|, 807 // where floor_c = pow(2, floor(log2(c))) and ceil_c = pow(2, ceil(log2(c))), 808 // return (add constMult(x, floor_c), constMult(x, c - floor_c)). 809 if ((C - Floor).ule(Ceil - C)) { 810 SDValue Op0 = genConstMult(X, Floor, DL, VT, ShiftTy, DAG); 811 SDValue Op1 = genConstMult(X, C - Floor, DL, VT, ShiftTy, DAG); 812 return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1); 813 } 814 815 // If |c - floor_c| > |c - ceil_c|, 816 // return (sub constMult(x, ceil_c), constMult(x, ceil_c - c)). 817 SDValue Op0 = genConstMult(X, Ceil, DL, VT, ShiftTy, DAG); 818 SDValue Op1 = genConstMult(X, Ceil - C, DL, VT, ShiftTy, DAG); 819 return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1); 820 } 821 822 static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG, 823 const TargetLowering::DAGCombinerInfo &DCI, 824 const MipsSETargetLowering *TL, 825 const MipsSubtarget &Subtarget) { 826 EVT VT = N->getValueType(0); 827 828 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) 829 if (!VT.isVector() && shouldTransformMulToShiftsAddsSubs( 830 C->getAPIntValue(), VT, DAG, Subtarget)) 831 return genConstMult(N->getOperand(0), C->getAPIntValue(), SDLoc(N), VT, 832 TL->getScalarShiftAmountTy(DAG.getDataLayout(), VT), 833 DAG); 834 835 return SDValue(N, 0); 836 } 837 838 static SDValue performDSPShiftCombine(unsigned Opc, SDNode *N, EVT Ty, 839 SelectionDAG &DAG, 840 const MipsSubtarget &Subtarget) { 841 // See if this is a vector splat immediate node. 842 APInt SplatValue, SplatUndef; 843 unsigned SplatBitSize; 844 bool HasAnyUndefs; 845 unsigned EltSize = Ty.getScalarSizeInBits(); 846 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(1)); 847 848 if (!Subtarget.hasDSP()) 849 return SDValue(); 850 851 if (!BV || 852 !BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, HasAnyUndefs, 853 EltSize, !Subtarget.isLittle()) || 854 (SplatBitSize != EltSize) || 855 (SplatValue.getZExtValue() >= EltSize)) 856 return SDValue(); 857 858 SDLoc DL(N); 859 return DAG.getNode(Opc, DL, Ty, N->getOperand(0), 860 DAG.getConstant(SplatValue.getZExtValue(), DL, MVT::i32)); 861 } 862 863 static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG, 864 TargetLowering::DAGCombinerInfo &DCI, 865 const MipsSubtarget &Subtarget) { 866 EVT Ty = N->getValueType(0); 867 868 if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8)) 869 return SDValue(); 870 871 return performDSPShiftCombine(MipsISD::SHLL_DSP, N, Ty, DAG, Subtarget); 872 } 873 874 // Fold sign-extensions into MipsISD::VEXTRACT_[SZ]EXT_ELT for MSA and fold 875 // constant splats into MipsISD::SHRA_DSP for DSPr2. 876 // 877 // Performs the following transformations: 878 // - Changes MipsISD::VEXTRACT_[SZ]EXT_ELT to sign extension if its 879 // sign/zero-extension is completely overwritten by the new one performed by 880 // the ISD::SRA and ISD::SHL nodes. 881 // - Removes redundant sign extensions performed by an ISD::SRA and ISD::SHL 882 // sequence. 883 // 884 // See performDSPShiftCombine for more information about the transformation 885 // used for DSPr2. 886 static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG, 887 TargetLowering::DAGCombinerInfo &DCI, 888 const MipsSubtarget &Subtarget) { 889 EVT Ty = N->getValueType(0); 890 891 if (Subtarget.hasMSA()) { 892 SDValue Op0 = N->getOperand(0); 893 SDValue Op1 = N->getOperand(1); 894 895 // (sra (shl (MipsVExtract[SZ]Ext $a, $b, $c), imm:$d), imm:$d) 896 // where $d + sizeof($c) == 32 897 // or $d + sizeof($c) <= 32 and SExt 898 // -> (MipsVExtractSExt $a, $b, $c) 899 if (Op0->getOpcode() == ISD::SHL && Op1 == Op0->getOperand(1)) { 900 SDValue Op0Op0 = Op0->getOperand(0); 901 ConstantSDNode *ShAmount = dyn_cast<ConstantSDNode>(Op1); 902 903 if (!ShAmount) 904 return SDValue(); 905 906 if (Op0Op0->getOpcode() != MipsISD::VEXTRACT_SEXT_ELT && 907 Op0Op0->getOpcode() != MipsISD::VEXTRACT_ZEXT_ELT) 908 return SDValue(); 909 910 EVT ExtendTy = cast<VTSDNode>(Op0Op0->getOperand(2))->getVT(); 911 unsigned TotalBits = ShAmount->getZExtValue() + ExtendTy.getSizeInBits(); 912 913 if (TotalBits == 32 || 914 (Op0Op0->getOpcode() == MipsISD::VEXTRACT_SEXT_ELT && 915 TotalBits <= 32)) { 916 SDValue Ops[] = { Op0Op0->getOperand(0), Op0Op0->getOperand(1), 917 Op0Op0->getOperand(2) }; 918 return DAG.getNode(MipsISD::VEXTRACT_SEXT_ELT, SDLoc(Op0Op0), 919 Op0Op0->getVTList(), 920 ArrayRef(Ops, Op0Op0->getNumOperands())); 921 } 922 } 923 } 924 925 if ((Ty != MVT::v2i16) && ((Ty != MVT::v4i8) || !Subtarget.hasDSPR2())) 926 return SDValue(); 927 928 return performDSPShiftCombine(MipsISD::SHRA_DSP, N, Ty, DAG, Subtarget); 929 } 930 931 932 static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG, 933 TargetLowering::DAGCombinerInfo &DCI, 934 const MipsSubtarget &Subtarget) { 935 EVT Ty = N->getValueType(0); 936 937 if (((Ty != MVT::v2i16) || !Subtarget.hasDSPR2()) && (Ty != MVT::v4i8)) 938 return SDValue(); 939 940 return performDSPShiftCombine(MipsISD::SHRL_DSP, N, Ty, DAG, Subtarget); 941 } 942 943 static bool isLegalDSPCondCode(EVT Ty, ISD::CondCode CC) { 944 bool IsV216 = (Ty == MVT::v2i16); 945 946 switch (CC) { 947 case ISD::SETEQ: 948 case ISD::SETNE: return true; 949 case ISD::SETLT: 950 case ISD::SETLE: 951 case ISD::SETGT: 952 case ISD::SETGE: return IsV216; 953 case ISD::SETULT: 954 case ISD::SETULE: 955 case ISD::SETUGT: 956 case ISD::SETUGE: return !IsV216; 957 default: return false; 958 } 959 } 960 961 static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG) { 962 EVT Ty = N->getValueType(0); 963 964 if ((Ty != MVT::v2i16) && (Ty != MVT::v4i8)) 965 return SDValue(); 966 967 if (!isLegalDSPCondCode(Ty, cast<CondCodeSDNode>(N->getOperand(2))->get())) 968 return SDValue(); 969 970 return DAG.getNode(MipsISD::SETCC_DSP, SDLoc(N), Ty, N->getOperand(0), 971 N->getOperand(1), N->getOperand(2)); 972 } 973 974 static SDValue performVSELECTCombine(SDNode *N, SelectionDAG &DAG) { 975 EVT Ty = N->getValueType(0); 976 977 if (Ty == MVT::v2i16 || Ty == MVT::v4i8) { 978 SDValue SetCC = N->getOperand(0); 979 980 if (SetCC.getOpcode() != MipsISD::SETCC_DSP) 981 return SDValue(); 982 983 return DAG.getNode(MipsISD::SELECT_CC_DSP, SDLoc(N), Ty, 984 SetCC.getOperand(0), SetCC.getOperand(1), 985 N->getOperand(1), N->getOperand(2), SetCC.getOperand(2)); 986 } 987 988 return SDValue(); 989 } 990 991 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG, 992 const MipsSubtarget &Subtarget) { 993 EVT Ty = N->getValueType(0); 994 995 if (Subtarget.hasMSA() && Ty.is128BitVector() && Ty.isInteger()) { 996 // Try the following combines: 997 // (xor (or $a, $b), (build_vector allones)) 998 // (xor (or $a, $b), (bitcast (build_vector allones))) 999 SDValue Op0 = N->getOperand(0); 1000 SDValue Op1 = N->getOperand(1); 1001 SDValue NotOp; 1002 1003 if (ISD::isBuildVectorAllOnes(Op0.getNode())) 1004 NotOp = Op1; 1005 else if (ISD::isBuildVectorAllOnes(Op1.getNode())) 1006 NotOp = Op0; 1007 else 1008 return SDValue(); 1009 1010 if (NotOp->getOpcode() == ISD::OR) 1011 return DAG.getNode(MipsISD::VNOR, SDLoc(N), Ty, NotOp->getOperand(0), 1012 NotOp->getOperand(1)); 1013 } 1014 1015 return SDValue(); 1016 } 1017 1018 SDValue 1019 MipsSETargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { 1020 SelectionDAG &DAG = DCI.DAG; 1021 SDValue Val; 1022 1023 switch (N->getOpcode()) { 1024 case ISD::AND: 1025 Val = performANDCombine(N, DAG, DCI, Subtarget); 1026 break; 1027 case ISD::OR: 1028 Val = performORCombine(N, DAG, DCI, Subtarget); 1029 break; 1030 case ISD::MUL: 1031 return performMULCombine(N, DAG, DCI, this, Subtarget); 1032 case ISD::SHL: 1033 Val = performSHLCombine(N, DAG, DCI, Subtarget); 1034 break; 1035 case ISD::SRA: 1036 return performSRACombine(N, DAG, DCI, Subtarget); 1037 case ISD::SRL: 1038 return performSRLCombine(N, DAG, DCI, Subtarget); 1039 case ISD::VSELECT: 1040 return performVSELECTCombine(N, DAG); 1041 case ISD::XOR: 1042 Val = performXORCombine(N, DAG, Subtarget); 1043 break; 1044 case ISD::SETCC: 1045 Val = performSETCCCombine(N, DAG); 1046 break; 1047 } 1048 1049 if (Val.getNode()) { 1050 LLVM_DEBUG(dbgs() << "\nMipsSE DAG Combine:\n"; 1051 N->printrWithDepth(dbgs(), &DAG); dbgs() << "\n=> \n"; 1052 Val.getNode()->printrWithDepth(dbgs(), &DAG); dbgs() << "\n"); 1053 return Val; 1054 } 1055 1056 return MipsTargetLowering::PerformDAGCombine(N, DCI); 1057 } 1058 1059 MachineBasicBlock * 1060 MipsSETargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 1061 MachineBasicBlock *BB) const { 1062 switch (MI.getOpcode()) { 1063 default: 1064 return MipsTargetLowering::EmitInstrWithCustomInserter(MI, BB); 1065 case Mips::BPOSGE32_PSEUDO: 1066 return emitBPOSGE32(MI, BB); 1067 case Mips::SNZ_B_PSEUDO: 1068 return emitMSACBranchPseudo(MI, BB, Mips::BNZ_B); 1069 case Mips::SNZ_H_PSEUDO: 1070 return emitMSACBranchPseudo(MI, BB, Mips::BNZ_H); 1071 case Mips::SNZ_W_PSEUDO: 1072 return emitMSACBranchPseudo(MI, BB, Mips::BNZ_W); 1073 case Mips::SNZ_D_PSEUDO: 1074 return emitMSACBranchPseudo(MI, BB, Mips::BNZ_D); 1075 case Mips::SNZ_V_PSEUDO: 1076 return emitMSACBranchPseudo(MI, BB, Mips::BNZ_V); 1077 case Mips::SZ_B_PSEUDO: 1078 return emitMSACBranchPseudo(MI, BB, Mips::BZ_B); 1079 case Mips::SZ_H_PSEUDO: 1080 return emitMSACBranchPseudo(MI, BB, Mips::BZ_H); 1081 case Mips::SZ_W_PSEUDO: 1082 return emitMSACBranchPseudo(MI, BB, Mips::BZ_W); 1083 case Mips::SZ_D_PSEUDO: 1084 return emitMSACBranchPseudo(MI, BB, Mips::BZ_D); 1085 case Mips::SZ_V_PSEUDO: 1086 return emitMSACBranchPseudo(MI, BB, Mips::BZ_V); 1087 case Mips::COPY_FW_PSEUDO: 1088 return emitCOPY_FW(MI, BB); 1089 case Mips::COPY_FD_PSEUDO: 1090 return emitCOPY_FD(MI, BB); 1091 case Mips::INSERT_FW_PSEUDO: 1092 return emitINSERT_FW(MI, BB); 1093 case Mips::INSERT_FD_PSEUDO: 1094 return emitINSERT_FD(MI, BB); 1095 case Mips::INSERT_B_VIDX_PSEUDO: 1096 case Mips::INSERT_B_VIDX64_PSEUDO: 1097 return emitINSERT_DF_VIDX(MI, BB, 1, false); 1098 case Mips::INSERT_H_VIDX_PSEUDO: 1099 case Mips::INSERT_H_VIDX64_PSEUDO: 1100 return emitINSERT_DF_VIDX(MI, BB, 2, false); 1101 case Mips::INSERT_W_VIDX_PSEUDO: 1102 case Mips::INSERT_W_VIDX64_PSEUDO: 1103 return emitINSERT_DF_VIDX(MI, BB, 4, false); 1104 case Mips::INSERT_D_VIDX_PSEUDO: 1105 case Mips::INSERT_D_VIDX64_PSEUDO: 1106 return emitINSERT_DF_VIDX(MI, BB, 8, false); 1107 case Mips::INSERT_FW_VIDX_PSEUDO: 1108 case Mips::INSERT_FW_VIDX64_PSEUDO: 1109 return emitINSERT_DF_VIDX(MI, BB, 4, true); 1110 case Mips::INSERT_FD_VIDX_PSEUDO: 1111 case Mips::INSERT_FD_VIDX64_PSEUDO: 1112 return emitINSERT_DF_VIDX(MI, BB, 8, true); 1113 case Mips::FILL_FW_PSEUDO: 1114 return emitFILL_FW(MI, BB); 1115 case Mips::FILL_FD_PSEUDO: 1116 return emitFILL_FD(MI, BB); 1117 case Mips::FEXP2_W_1_PSEUDO: 1118 return emitFEXP2_W_1(MI, BB); 1119 case Mips::FEXP2_D_1_PSEUDO: 1120 return emitFEXP2_D_1(MI, BB); 1121 case Mips::ST_F16: 1122 return emitST_F16_PSEUDO(MI, BB); 1123 case Mips::LD_F16: 1124 return emitLD_F16_PSEUDO(MI, BB); 1125 case Mips::MSA_FP_EXTEND_W_PSEUDO: 1126 return emitFPEXTEND_PSEUDO(MI, BB, false); 1127 case Mips::MSA_FP_ROUND_W_PSEUDO: 1128 return emitFPROUND_PSEUDO(MI, BB, false); 1129 case Mips::MSA_FP_EXTEND_D_PSEUDO: 1130 return emitFPEXTEND_PSEUDO(MI, BB, true); 1131 case Mips::MSA_FP_ROUND_D_PSEUDO: 1132 return emitFPROUND_PSEUDO(MI, BB, true); 1133 } 1134 } 1135 1136 bool MipsSETargetLowering::isEligibleForTailCallOptimization( 1137 const CCState &CCInfo, unsigned NextStackOffset, 1138 const MipsFunctionInfo &FI) const { 1139 if (!UseMipsTailCalls) 1140 return false; 1141 1142 // Exception has to be cleared with eret. 1143 if (FI.isISR()) 1144 return false; 1145 1146 // Return false if either the callee or caller has a byval argument. 1147 if (CCInfo.getInRegsParamsCount() > 0 || FI.hasByvalArg()) 1148 return false; 1149 1150 // Return true if the callee's argument area is no larger than the 1151 // caller's. 1152 return NextStackOffset <= FI.getIncomingArgSize(); 1153 } 1154 1155 void MipsSETargetLowering:: 1156 getOpndList(SmallVectorImpl<SDValue> &Ops, 1157 std::deque<std::pair<unsigned, SDValue>> &RegsToPass, 1158 bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage, 1159 bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee, 1160 SDValue Chain) const { 1161 Ops.push_back(Callee); 1162 MipsTargetLowering::getOpndList(Ops, RegsToPass, IsPICCall, GlobalOrExternal, 1163 InternalLinkage, IsCallReloc, CLI, Callee, 1164 Chain); 1165 } 1166 1167 SDValue MipsSETargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const { 1168 LoadSDNode &Nd = *cast<LoadSDNode>(Op); 1169 1170 if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore) 1171 return MipsTargetLowering::lowerLOAD(Op, DAG); 1172 1173 // Replace a double precision load with two i32 loads and a buildpair64. 1174 SDLoc DL(Op); 1175 SDValue Ptr = Nd.getBasePtr(), Chain = Nd.getChain(); 1176 EVT PtrVT = Ptr.getValueType(); 1177 1178 // i32 load from lower address. 1179 SDValue Lo = DAG.getLoad(MVT::i32, DL, Chain, Ptr, MachinePointerInfo(), 1180 Nd.getAlign(), Nd.getMemOperand()->getFlags()); 1181 1182 // i32 load from higher address. 1183 Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, DL, PtrVT)); 1184 SDValue Hi = DAG.getLoad( 1185 MVT::i32, DL, Lo.getValue(1), Ptr, MachinePointerInfo(), 1186 commonAlignment(Nd.getAlign(), 4), Nd.getMemOperand()->getFlags()); 1187 1188 if (!Subtarget.isLittle()) 1189 std::swap(Lo, Hi); 1190 1191 SDValue BP = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, Lo, Hi); 1192 SDValue Ops[2] = {BP, Hi.getValue(1)}; 1193 return DAG.getMergeValues(Ops, DL); 1194 } 1195 1196 SDValue MipsSETargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const { 1197 StoreSDNode &Nd = *cast<StoreSDNode>(Op); 1198 1199 if (Nd.getMemoryVT() != MVT::f64 || !NoDPLoadStore) 1200 return MipsTargetLowering::lowerSTORE(Op, DAG); 1201 1202 // Replace a double precision store with two extractelement64s and i32 stores. 1203 SDLoc DL(Op); 1204 SDValue Val = Nd.getValue(), Ptr = Nd.getBasePtr(), Chain = Nd.getChain(); 1205 EVT PtrVT = Ptr.getValueType(); 1206 SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, 1207 Val, DAG.getConstant(0, DL, MVT::i32)); 1208 SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, 1209 Val, DAG.getConstant(1, DL, MVT::i32)); 1210 1211 if (!Subtarget.isLittle()) 1212 std::swap(Lo, Hi); 1213 1214 // i32 store to lower address. 1215 Chain = DAG.getStore(Chain, DL, Lo, Ptr, MachinePointerInfo(), Nd.getAlign(), 1216 Nd.getMemOperand()->getFlags(), Nd.getAAInfo()); 1217 1218 // i32 store to higher address. 1219 Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, DL, PtrVT)); 1220 return DAG.getStore(Chain, DL, Hi, Ptr, MachinePointerInfo(), 1221 commonAlignment(Nd.getAlign(), 4), 1222 Nd.getMemOperand()->getFlags(), Nd.getAAInfo()); 1223 } 1224 1225 SDValue MipsSETargetLowering::lowerBITCAST(SDValue Op, 1226 SelectionDAG &DAG) const { 1227 SDLoc DL(Op); 1228 MVT Src = Op.getOperand(0).getValueType().getSimpleVT(); 1229 MVT Dest = Op.getValueType().getSimpleVT(); 1230 1231 // Bitcast i64 to double. 1232 if (Src == MVT::i64 && Dest == MVT::f64) { 1233 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, 1234 Op.getOperand(0), DAG.getIntPtrConstant(0, DL)); 1235 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, 1236 Op.getOperand(0), DAG.getIntPtrConstant(1, DL)); 1237 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, Lo, Hi); 1238 } 1239 1240 // Bitcast double to i64. 1241 if (Src == MVT::f64 && Dest == MVT::i64) { 1242 SDValue Lo = 1243 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0), 1244 DAG.getConstant(0, DL, MVT::i32)); 1245 SDValue Hi = 1246 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0), 1247 DAG.getConstant(1, DL, MVT::i32)); 1248 return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi); 1249 } 1250 1251 // Skip other cases of bitcast and use default lowering. 1252 return SDValue(); 1253 } 1254 1255 SDValue MipsSETargetLowering::lowerMulDiv(SDValue Op, unsigned NewOpc, 1256 bool HasLo, bool HasHi, 1257 SelectionDAG &DAG) const { 1258 // MIPS32r6/MIPS64r6 removed accumulator based multiplies. 1259 assert(!Subtarget.hasMips32r6()); 1260 1261 EVT Ty = Op.getOperand(0).getValueType(); 1262 SDLoc DL(Op); 1263 SDValue Mult = DAG.getNode(NewOpc, DL, MVT::Untyped, 1264 Op.getOperand(0), Op.getOperand(1)); 1265 SDValue Lo, Hi; 1266 1267 if (HasLo) 1268 Lo = DAG.getNode(MipsISD::MFLO, DL, Ty, Mult); 1269 if (HasHi) 1270 Hi = DAG.getNode(MipsISD::MFHI, DL, Ty, Mult); 1271 1272 if (!HasLo || !HasHi) 1273 return HasLo ? Lo : Hi; 1274 1275 SDValue Vals[] = { Lo, Hi }; 1276 return DAG.getMergeValues(Vals, DL); 1277 } 1278 1279 static SDValue initAccumulator(SDValue In, const SDLoc &DL, SelectionDAG &DAG) { 1280 SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In, 1281 DAG.getConstant(0, DL, MVT::i32)); 1282 SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In, 1283 DAG.getConstant(1, DL, MVT::i32)); 1284 return DAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped, InLo, InHi); 1285 } 1286 1287 static SDValue extractLOHI(SDValue Op, const SDLoc &DL, SelectionDAG &DAG) { 1288 SDValue Lo = DAG.getNode(MipsISD::MFLO, DL, MVT::i32, Op); 1289 SDValue Hi = DAG.getNode(MipsISD::MFHI, DL, MVT::i32, Op); 1290 return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Lo, Hi); 1291 } 1292 1293 // This function expands mips intrinsic nodes which have 64-bit input operands 1294 // or output values. 1295 // 1296 // out64 = intrinsic-node in64 1297 // => 1298 // lo = copy (extract-element (in64, 0)) 1299 // hi = copy (extract-element (in64, 1)) 1300 // mips-specific-node 1301 // v0 = copy lo 1302 // v1 = copy hi 1303 // out64 = merge-values (v0, v1) 1304 // 1305 static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) { 1306 SDLoc DL(Op); 1307 bool HasChainIn = Op->getOperand(0).getValueType() == MVT::Other; 1308 SmallVector<SDValue, 3> Ops; 1309 unsigned OpNo = 0; 1310 1311 // See if Op has a chain input. 1312 if (HasChainIn) 1313 Ops.push_back(Op->getOperand(OpNo++)); 1314 1315 // The next operand is the intrinsic opcode. 1316 assert(Op->getOperand(OpNo).getOpcode() == ISD::TargetConstant); 1317 1318 // See if the next operand has type i64. 1319 SDValue Opnd = Op->getOperand(++OpNo), In64; 1320 1321 if (Opnd.getValueType() == MVT::i64) 1322 In64 = initAccumulator(Opnd, DL, DAG); 1323 else 1324 Ops.push_back(Opnd); 1325 1326 // Push the remaining operands. 1327 for (++OpNo ; OpNo < Op->getNumOperands(); ++OpNo) 1328 Ops.push_back(Op->getOperand(OpNo)); 1329 1330 // Add In64 to the end of the list. 1331 if (In64.getNode()) 1332 Ops.push_back(In64); 1333 1334 // Scan output. 1335 SmallVector<EVT, 2> ResTys; 1336 1337 for (EVT Ty : Op->values()) 1338 ResTys.push_back((Ty == MVT::i64) ? MVT::Untyped : Ty); 1339 1340 // Create node. 1341 SDValue Val = DAG.getNode(Opc, DL, ResTys, Ops); 1342 SDValue Out = (ResTys[0] == MVT::Untyped) ? extractLOHI(Val, DL, DAG) : Val; 1343 1344 if (!HasChainIn) 1345 return Out; 1346 1347 assert(Val->getValueType(1) == MVT::Other); 1348 SDValue Vals[] = { Out, SDValue(Val.getNode(), 1) }; 1349 return DAG.getMergeValues(Vals, DL); 1350 } 1351 1352 // Lower an MSA copy intrinsic into the specified SelectionDAG node 1353 static SDValue lowerMSACopyIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) { 1354 SDLoc DL(Op); 1355 SDValue Vec = Op->getOperand(1); 1356 SDValue Idx = Op->getOperand(2); 1357 EVT ResTy = Op->getValueType(0); 1358 EVT EltTy = Vec->getValueType(0).getVectorElementType(); 1359 1360 SDValue Result = DAG.getNode(Opc, DL, ResTy, Vec, Idx, 1361 DAG.getValueType(EltTy)); 1362 1363 return Result; 1364 } 1365 1366 static SDValue lowerMSASplatZExt(SDValue Op, unsigned OpNr, SelectionDAG &DAG) { 1367 EVT ResVecTy = Op->getValueType(0); 1368 EVT ViaVecTy = ResVecTy; 1369 bool BigEndian = !DAG.getSubtarget().getTargetTriple().isLittleEndian(); 1370 SDLoc DL(Op); 1371 1372 // When ResVecTy == MVT::v2i64, LaneA is the upper 32 bits of the lane and 1373 // LaneB is the lower 32-bits. Otherwise LaneA and LaneB are alternating 1374 // lanes. 1375 SDValue LaneA = Op->getOperand(OpNr); 1376 SDValue LaneB; 1377 1378 if (ResVecTy == MVT::v2i64) { 1379 // In case of the index being passed as an immediate value, set the upper 1380 // lane to 0 so that the splati.d instruction can be matched. 1381 if (isa<ConstantSDNode>(LaneA)) 1382 LaneB = DAG.getConstant(0, DL, MVT::i32); 1383 // Having the index passed in a register, set the upper lane to the same 1384 // value as the lower - this results in the BUILD_VECTOR node not being 1385 // expanded through stack. This way we are able to pattern match the set of 1386 // nodes created here to splat.d. 1387 else 1388 LaneB = LaneA; 1389 ViaVecTy = MVT::v4i32; 1390 if(BigEndian) 1391 std::swap(LaneA, LaneB); 1392 } else 1393 LaneB = LaneA; 1394 1395 SDValue Ops[16] = { LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, 1396 LaneA, LaneB, LaneA, LaneB, LaneA, LaneB, LaneA, LaneB }; 1397 1398 SDValue Result = DAG.getBuildVector( 1399 ViaVecTy, DL, ArrayRef(Ops, ViaVecTy.getVectorNumElements())); 1400 1401 if (ViaVecTy != ResVecTy) { 1402 SDValue One = DAG.getConstant(1, DL, ViaVecTy); 1403 Result = DAG.getNode(ISD::BITCAST, DL, ResVecTy, 1404 DAG.getNode(ISD::AND, DL, ViaVecTy, Result, One)); 1405 } 1406 1407 return Result; 1408 } 1409 1410 static SDValue lowerMSASplatImm(SDValue Op, unsigned ImmOp, SelectionDAG &DAG, 1411 bool IsSigned = false) { 1412 auto *CImm = cast<ConstantSDNode>(Op->getOperand(ImmOp)); 1413 return DAG.getConstant( 1414 APInt(Op->getValueType(0).getScalarType().getSizeInBits(), 1415 IsSigned ? CImm->getSExtValue() : CImm->getZExtValue(), IsSigned), 1416 SDLoc(Op), Op->getValueType(0)); 1417 } 1418 1419 static SDValue getBuildVectorSplat(EVT VecTy, SDValue SplatValue, 1420 bool BigEndian, SelectionDAG &DAG) { 1421 EVT ViaVecTy = VecTy; 1422 SDValue SplatValueA = SplatValue; 1423 SDValue SplatValueB = SplatValue; 1424 SDLoc DL(SplatValue); 1425 1426 if (VecTy == MVT::v2i64) { 1427 // v2i64 BUILD_VECTOR must be performed via v4i32 so split into i32's. 1428 ViaVecTy = MVT::v4i32; 1429 1430 SplatValueA = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, SplatValue); 1431 SplatValueB = DAG.getNode(ISD::SRL, DL, MVT::i64, SplatValue, 1432 DAG.getConstant(32, DL, MVT::i32)); 1433 SplatValueB = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, SplatValueB); 1434 } 1435 1436 // We currently hold the parts in little endian order. Swap them if 1437 // necessary. 1438 if (BigEndian) 1439 std::swap(SplatValueA, SplatValueB); 1440 1441 SDValue Ops[16] = { SplatValueA, SplatValueB, SplatValueA, SplatValueB, 1442 SplatValueA, SplatValueB, SplatValueA, SplatValueB, 1443 SplatValueA, SplatValueB, SplatValueA, SplatValueB, 1444 SplatValueA, SplatValueB, SplatValueA, SplatValueB }; 1445 1446 SDValue Result = DAG.getBuildVector( 1447 ViaVecTy, DL, ArrayRef(Ops, ViaVecTy.getVectorNumElements())); 1448 1449 if (VecTy != ViaVecTy) 1450 Result = DAG.getNode(ISD::BITCAST, DL, VecTy, Result); 1451 1452 return Result; 1453 } 1454 1455 static SDValue lowerMSABinaryBitImmIntr(SDValue Op, SelectionDAG &DAG, 1456 unsigned Opc, SDValue Imm, 1457 bool BigEndian) { 1458 EVT VecTy = Op->getValueType(0); 1459 SDValue Exp2Imm; 1460 SDLoc DL(Op); 1461 1462 // The DAG Combiner can't constant fold bitcasted vectors yet so we must do it 1463 // here for now. 1464 if (VecTy == MVT::v2i64) { 1465 if (ConstantSDNode *CImm = dyn_cast<ConstantSDNode>(Imm)) { 1466 APInt BitImm = APInt(64, 1) << CImm->getAPIntValue(); 1467 1468 SDValue BitImmHiOp = DAG.getConstant(BitImm.lshr(32).trunc(32), DL, 1469 MVT::i32); 1470 SDValue BitImmLoOp = DAG.getConstant(BitImm.trunc(32), DL, MVT::i32); 1471 1472 if (BigEndian) 1473 std::swap(BitImmLoOp, BitImmHiOp); 1474 1475 Exp2Imm = DAG.getNode( 1476 ISD::BITCAST, DL, MVT::v2i64, 1477 DAG.getBuildVector(MVT::v4i32, DL, 1478 {BitImmLoOp, BitImmHiOp, BitImmLoOp, BitImmHiOp})); 1479 } 1480 } 1481 1482 if (!Exp2Imm.getNode()) { 1483 // We couldnt constant fold, do a vector shift instead 1484 1485 // Extend i32 to i64 if necessary. Sign or zero extend doesn't matter since 1486 // only values 0-63 are valid. 1487 if (VecTy == MVT::v2i64) 1488 Imm = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Imm); 1489 1490 Exp2Imm = getBuildVectorSplat(VecTy, Imm, BigEndian, DAG); 1491 1492 Exp2Imm = DAG.getNode(ISD::SHL, DL, VecTy, DAG.getConstant(1, DL, VecTy), 1493 Exp2Imm); 1494 } 1495 1496 return DAG.getNode(Opc, DL, VecTy, Op->getOperand(1), Exp2Imm); 1497 } 1498 1499 static SDValue truncateVecElts(SDValue Op, SelectionDAG &DAG) { 1500 SDLoc DL(Op); 1501 EVT ResTy = Op->getValueType(0); 1502 SDValue Vec = Op->getOperand(2); 1503 bool BigEndian = !DAG.getSubtarget().getTargetTriple().isLittleEndian(); 1504 MVT ResEltTy = ResTy == MVT::v2i64 ? MVT::i64 : MVT::i32; 1505 SDValue ConstValue = DAG.getConstant(Vec.getScalarValueSizeInBits() - 1, 1506 DL, ResEltTy); 1507 SDValue SplatVec = getBuildVectorSplat(ResTy, ConstValue, BigEndian, DAG); 1508 1509 return DAG.getNode(ISD::AND, DL, ResTy, Vec, SplatVec); 1510 } 1511 1512 static SDValue lowerMSABitClear(SDValue Op, SelectionDAG &DAG) { 1513 EVT ResTy = Op->getValueType(0); 1514 SDLoc DL(Op); 1515 SDValue One = DAG.getConstant(1, DL, ResTy); 1516 SDValue Bit = DAG.getNode(ISD::SHL, DL, ResTy, One, truncateVecElts(Op, DAG)); 1517 1518 return DAG.getNode(ISD::AND, DL, ResTy, Op->getOperand(1), 1519 DAG.getNOT(DL, Bit, ResTy)); 1520 } 1521 1522 static SDValue lowerMSABitClearImm(SDValue Op, SelectionDAG &DAG) { 1523 SDLoc DL(Op); 1524 EVT ResTy = Op->getValueType(0); 1525 APInt BitImm = APInt(ResTy.getScalarSizeInBits(), 1) 1526 << cast<ConstantSDNode>(Op->getOperand(2))->getAPIntValue(); 1527 SDValue BitMask = DAG.getConstant(~BitImm, DL, ResTy); 1528 1529 return DAG.getNode(ISD::AND, DL, ResTy, Op->getOperand(1), BitMask); 1530 } 1531 1532 SDValue MipsSETargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op, 1533 SelectionDAG &DAG) const { 1534 SDLoc DL(Op); 1535 unsigned Intrinsic = cast<ConstantSDNode>(Op->getOperand(0))->getZExtValue(); 1536 switch (Intrinsic) { 1537 default: 1538 return SDValue(); 1539 case Intrinsic::mips_shilo: 1540 return lowerDSPIntr(Op, DAG, MipsISD::SHILO); 1541 case Intrinsic::mips_dpau_h_qbl: 1542 return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBL); 1543 case Intrinsic::mips_dpau_h_qbr: 1544 return lowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBR); 1545 case Intrinsic::mips_dpsu_h_qbl: 1546 return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBL); 1547 case Intrinsic::mips_dpsu_h_qbr: 1548 return lowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBR); 1549 case Intrinsic::mips_dpa_w_ph: 1550 return lowerDSPIntr(Op, DAG, MipsISD::DPA_W_PH); 1551 case Intrinsic::mips_dps_w_ph: 1552 return lowerDSPIntr(Op, DAG, MipsISD::DPS_W_PH); 1553 case Intrinsic::mips_dpax_w_ph: 1554 return lowerDSPIntr(Op, DAG, MipsISD::DPAX_W_PH); 1555 case Intrinsic::mips_dpsx_w_ph: 1556 return lowerDSPIntr(Op, DAG, MipsISD::DPSX_W_PH); 1557 case Intrinsic::mips_mulsa_w_ph: 1558 return lowerDSPIntr(Op, DAG, MipsISD::MULSA_W_PH); 1559 case Intrinsic::mips_mult: 1560 return lowerDSPIntr(Op, DAG, MipsISD::Mult); 1561 case Intrinsic::mips_multu: 1562 return lowerDSPIntr(Op, DAG, MipsISD::Multu); 1563 case Intrinsic::mips_madd: 1564 return lowerDSPIntr(Op, DAG, MipsISD::MAdd); 1565 case Intrinsic::mips_maddu: 1566 return lowerDSPIntr(Op, DAG, MipsISD::MAddu); 1567 case Intrinsic::mips_msub: 1568 return lowerDSPIntr(Op, DAG, MipsISD::MSub); 1569 case Intrinsic::mips_msubu: 1570 return lowerDSPIntr(Op, DAG, MipsISD::MSubu); 1571 case Intrinsic::mips_addv_b: 1572 case Intrinsic::mips_addv_h: 1573 case Intrinsic::mips_addv_w: 1574 case Intrinsic::mips_addv_d: 1575 return DAG.getNode(ISD::ADD, DL, Op->getValueType(0), Op->getOperand(1), 1576 Op->getOperand(2)); 1577 case Intrinsic::mips_addvi_b: 1578 case Intrinsic::mips_addvi_h: 1579 case Intrinsic::mips_addvi_w: 1580 case Intrinsic::mips_addvi_d: 1581 return DAG.getNode(ISD::ADD, DL, Op->getValueType(0), Op->getOperand(1), 1582 lowerMSASplatImm(Op, 2, DAG)); 1583 case Intrinsic::mips_and_v: 1584 return DAG.getNode(ISD::AND, DL, Op->getValueType(0), Op->getOperand(1), 1585 Op->getOperand(2)); 1586 case Intrinsic::mips_andi_b: 1587 return DAG.getNode(ISD::AND, DL, Op->getValueType(0), Op->getOperand(1), 1588 lowerMSASplatImm(Op, 2, DAG)); 1589 case Intrinsic::mips_bclr_b: 1590 case Intrinsic::mips_bclr_h: 1591 case Intrinsic::mips_bclr_w: 1592 case Intrinsic::mips_bclr_d: 1593 return lowerMSABitClear(Op, DAG); 1594 case Intrinsic::mips_bclri_b: 1595 case Intrinsic::mips_bclri_h: 1596 case Intrinsic::mips_bclri_w: 1597 case Intrinsic::mips_bclri_d: 1598 return lowerMSABitClearImm(Op, DAG); 1599 case Intrinsic::mips_binsli_b: 1600 case Intrinsic::mips_binsli_h: 1601 case Intrinsic::mips_binsli_w: 1602 case Intrinsic::mips_binsli_d: { 1603 // binsli_x(IfClear, IfSet, nbits) -> (vselect LBitsMask, IfSet, IfClear) 1604 EVT VecTy = Op->getValueType(0); 1605 EVT EltTy = VecTy.getVectorElementType(); 1606 if (Op->getConstantOperandVal(3) >= EltTy.getSizeInBits()) 1607 report_fatal_error("Immediate out of range"); 1608 APInt Mask = APInt::getHighBitsSet(EltTy.getSizeInBits(), 1609 Op->getConstantOperandVal(3) + 1); 1610 return DAG.getNode(ISD::VSELECT, DL, VecTy, 1611 DAG.getConstant(Mask, DL, VecTy, true), 1612 Op->getOperand(2), Op->getOperand(1)); 1613 } 1614 case Intrinsic::mips_binsri_b: 1615 case Intrinsic::mips_binsri_h: 1616 case Intrinsic::mips_binsri_w: 1617 case Intrinsic::mips_binsri_d: { 1618 // binsri_x(IfClear, IfSet, nbits) -> (vselect RBitsMask, IfSet, IfClear) 1619 EVT VecTy = Op->getValueType(0); 1620 EVT EltTy = VecTy.getVectorElementType(); 1621 if (Op->getConstantOperandVal(3) >= EltTy.getSizeInBits()) 1622 report_fatal_error("Immediate out of range"); 1623 APInt Mask = APInt::getLowBitsSet(EltTy.getSizeInBits(), 1624 Op->getConstantOperandVal(3) + 1); 1625 return DAG.getNode(ISD::VSELECT, DL, VecTy, 1626 DAG.getConstant(Mask, DL, VecTy, true), 1627 Op->getOperand(2), Op->getOperand(1)); 1628 } 1629 case Intrinsic::mips_bmnz_v: 1630 return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), Op->getOperand(3), 1631 Op->getOperand(2), Op->getOperand(1)); 1632 case Intrinsic::mips_bmnzi_b: 1633 return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), 1634 lowerMSASplatImm(Op, 3, DAG), Op->getOperand(2), 1635 Op->getOperand(1)); 1636 case Intrinsic::mips_bmz_v: 1637 return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), Op->getOperand(3), 1638 Op->getOperand(1), Op->getOperand(2)); 1639 case Intrinsic::mips_bmzi_b: 1640 return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), 1641 lowerMSASplatImm(Op, 3, DAG), Op->getOperand(1), 1642 Op->getOperand(2)); 1643 case Intrinsic::mips_bneg_b: 1644 case Intrinsic::mips_bneg_h: 1645 case Intrinsic::mips_bneg_w: 1646 case Intrinsic::mips_bneg_d: { 1647 EVT VecTy = Op->getValueType(0); 1648 SDValue One = DAG.getConstant(1, DL, VecTy); 1649 1650 return DAG.getNode(ISD::XOR, DL, VecTy, Op->getOperand(1), 1651 DAG.getNode(ISD::SHL, DL, VecTy, One, 1652 truncateVecElts(Op, DAG))); 1653 } 1654 case Intrinsic::mips_bnegi_b: 1655 case Intrinsic::mips_bnegi_h: 1656 case Intrinsic::mips_bnegi_w: 1657 case Intrinsic::mips_bnegi_d: 1658 return lowerMSABinaryBitImmIntr(Op, DAG, ISD::XOR, Op->getOperand(2), 1659 !Subtarget.isLittle()); 1660 case Intrinsic::mips_bnz_b: 1661 case Intrinsic::mips_bnz_h: 1662 case Intrinsic::mips_bnz_w: 1663 case Intrinsic::mips_bnz_d: 1664 return DAG.getNode(MipsISD::VALL_NONZERO, DL, Op->getValueType(0), 1665 Op->getOperand(1)); 1666 case Intrinsic::mips_bnz_v: 1667 return DAG.getNode(MipsISD::VANY_NONZERO, DL, Op->getValueType(0), 1668 Op->getOperand(1)); 1669 case Intrinsic::mips_bsel_v: 1670 // bsel_v(Mask, IfClear, IfSet) -> (vselect Mask, IfSet, IfClear) 1671 return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), 1672 Op->getOperand(1), Op->getOperand(3), 1673 Op->getOperand(2)); 1674 case Intrinsic::mips_bseli_b: 1675 // bseli_v(Mask, IfClear, IfSet) -> (vselect Mask, IfSet, IfClear) 1676 return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), 1677 Op->getOperand(1), lowerMSASplatImm(Op, 3, DAG), 1678 Op->getOperand(2)); 1679 case Intrinsic::mips_bset_b: 1680 case Intrinsic::mips_bset_h: 1681 case Intrinsic::mips_bset_w: 1682 case Intrinsic::mips_bset_d: { 1683 EVT VecTy = Op->getValueType(0); 1684 SDValue One = DAG.getConstant(1, DL, VecTy); 1685 1686 return DAG.getNode(ISD::OR, DL, VecTy, Op->getOperand(1), 1687 DAG.getNode(ISD::SHL, DL, VecTy, One, 1688 truncateVecElts(Op, DAG))); 1689 } 1690 case Intrinsic::mips_bseti_b: 1691 case Intrinsic::mips_bseti_h: 1692 case Intrinsic::mips_bseti_w: 1693 case Intrinsic::mips_bseti_d: 1694 return lowerMSABinaryBitImmIntr(Op, DAG, ISD::OR, Op->getOperand(2), 1695 !Subtarget.isLittle()); 1696 case Intrinsic::mips_bz_b: 1697 case Intrinsic::mips_bz_h: 1698 case Intrinsic::mips_bz_w: 1699 case Intrinsic::mips_bz_d: 1700 return DAG.getNode(MipsISD::VALL_ZERO, DL, Op->getValueType(0), 1701 Op->getOperand(1)); 1702 case Intrinsic::mips_bz_v: 1703 return DAG.getNode(MipsISD::VANY_ZERO, DL, Op->getValueType(0), 1704 Op->getOperand(1)); 1705 case Intrinsic::mips_ceq_b: 1706 case Intrinsic::mips_ceq_h: 1707 case Intrinsic::mips_ceq_w: 1708 case Intrinsic::mips_ceq_d: 1709 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1710 Op->getOperand(2), ISD::SETEQ); 1711 case Intrinsic::mips_ceqi_b: 1712 case Intrinsic::mips_ceqi_h: 1713 case Intrinsic::mips_ceqi_w: 1714 case Intrinsic::mips_ceqi_d: 1715 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1716 lowerMSASplatImm(Op, 2, DAG, true), ISD::SETEQ); 1717 case Intrinsic::mips_cle_s_b: 1718 case Intrinsic::mips_cle_s_h: 1719 case Intrinsic::mips_cle_s_w: 1720 case Intrinsic::mips_cle_s_d: 1721 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1722 Op->getOperand(2), ISD::SETLE); 1723 case Intrinsic::mips_clei_s_b: 1724 case Intrinsic::mips_clei_s_h: 1725 case Intrinsic::mips_clei_s_w: 1726 case Intrinsic::mips_clei_s_d: 1727 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1728 lowerMSASplatImm(Op, 2, DAG, true), ISD::SETLE); 1729 case Intrinsic::mips_cle_u_b: 1730 case Intrinsic::mips_cle_u_h: 1731 case Intrinsic::mips_cle_u_w: 1732 case Intrinsic::mips_cle_u_d: 1733 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1734 Op->getOperand(2), ISD::SETULE); 1735 case Intrinsic::mips_clei_u_b: 1736 case Intrinsic::mips_clei_u_h: 1737 case Intrinsic::mips_clei_u_w: 1738 case Intrinsic::mips_clei_u_d: 1739 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1740 lowerMSASplatImm(Op, 2, DAG), ISD::SETULE); 1741 case Intrinsic::mips_clt_s_b: 1742 case Intrinsic::mips_clt_s_h: 1743 case Intrinsic::mips_clt_s_w: 1744 case Intrinsic::mips_clt_s_d: 1745 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1746 Op->getOperand(2), ISD::SETLT); 1747 case Intrinsic::mips_clti_s_b: 1748 case Intrinsic::mips_clti_s_h: 1749 case Intrinsic::mips_clti_s_w: 1750 case Intrinsic::mips_clti_s_d: 1751 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1752 lowerMSASplatImm(Op, 2, DAG, true), ISD::SETLT); 1753 case Intrinsic::mips_clt_u_b: 1754 case Intrinsic::mips_clt_u_h: 1755 case Intrinsic::mips_clt_u_w: 1756 case Intrinsic::mips_clt_u_d: 1757 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1758 Op->getOperand(2), ISD::SETULT); 1759 case Intrinsic::mips_clti_u_b: 1760 case Intrinsic::mips_clti_u_h: 1761 case Intrinsic::mips_clti_u_w: 1762 case Intrinsic::mips_clti_u_d: 1763 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1764 lowerMSASplatImm(Op, 2, DAG), ISD::SETULT); 1765 case Intrinsic::mips_copy_s_b: 1766 case Intrinsic::mips_copy_s_h: 1767 case Intrinsic::mips_copy_s_w: 1768 return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_SEXT_ELT); 1769 case Intrinsic::mips_copy_s_d: 1770 if (Subtarget.hasMips64()) 1771 // Lower directly into VEXTRACT_SEXT_ELT since i64 is legal on Mips64. 1772 return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_SEXT_ELT); 1773 else { 1774 // Lower into the generic EXTRACT_VECTOR_ELT node and let the type 1775 // legalizer and EXTRACT_VECTOR_ELT lowering sort it out. 1776 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op), 1777 Op->getValueType(0), Op->getOperand(1), 1778 Op->getOperand(2)); 1779 } 1780 case Intrinsic::mips_copy_u_b: 1781 case Intrinsic::mips_copy_u_h: 1782 case Intrinsic::mips_copy_u_w: 1783 return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_ZEXT_ELT); 1784 case Intrinsic::mips_copy_u_d: 1785 if (Subtarget.hasMips64()) 1786 // Lower directly into VEXTRACT_ZEXT_ELT since i64 is legal on Mips64. 1787 return lowerMSACopyIntr(Op, DAG, MipsISD::VEXTRACT_ZEXT_ELT); 1788 else { 1789 // Lower into the generic EXTRACT_VECTOR_ELT node and let the type 1790 // legalizer and EXTRACT_VECTOR_ELT lowering sort it out. 1791 // Note: When i64 is illegal, this results in copy_s.w instructions 1792 // instead of copy_u.w instructions. This makes no difference to the 1793 // behaviour since i64 is only illegal when the register file is 32-bit. 1794 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op), 1795 Op->getValueType(0), Op->getOperand(1), 1796 Op->getOperand(2)); 1797 } 1798 case Intrinsic::mips_div_s_b: 1799 case Intrinsic::mips_div_s_h: 1800 case Intrinsic::mips_div_s_w: 1801 case Intrinsic::mips_div_s_d: 1802 return DAG.getNode(ISD::SDIV, DL, Op->getValueType(0), Op->getOperand(1), 1803 Op->getOperand(2)); 1804 case Intrinsic::mips_div_u_b: 1805 case Intrinsic::mips_div_u_h: 1806 case Intrinsic::mips_div_u_w: 1807 case Intrinsic::mips_div_u_d: 1808 return DAG.getNode(ISD::UDIV, DL, Op->getValueType(0), Op->getOperand(1), 1809 Op->getOperand(2)); 1810 case Intrinsic::mips_fadd_w: 1811 case Intrinsic::mips_fadd_d: 1812 // TODO: If intrinsics have fast-math-flags, propagate them. 1813 return DAG.getNode(ISD::FADD, DL, Op->getValueType(0), Op->getOperand(1), 1814 Op->getOperand(2)); 1815 // Don't lower mips_fcaf_[wd] since LLVM folds SETFALSE condcodes away 1816 case Intrinsic::mips_fceq_w: 1817 case Intrinsic::mips_fceq_d: 1818 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1819 Op->getOperand(2), ISD::SETOEQ); 1820 case Intrinsic::mips_fcle_w: 1821 case Intrinsic::mips_fcle_d: 1822 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1823 Op->getOperand(2), ISD::SETOLE); 1824 case Intrinsic::mips_fclt_w: 1825 case Intrinsic::mips_fclt_d: 1826 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1827 Op->getOperand(2), ISD::SETOLT); 1828 case Intrinsic::mips_fcne_w: 1829 case Intrinsic::mips_fcne_d: 1830 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1831 Op->getOperand(2), ISD::SETONE); 1832 case Intrinsic::mips_fcor_w: 1833 case Intrinsic::mips_fcor_d: 1834 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1835 Op->getOperand(2), ISD::SETO); 1836 case Intrinsic::mips_fcueq_w: 1837 case Intrinsic::mips_fcueq_d: 1838 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1839 Op->getOperand(2), ISD::SETUEQ); 1840 case Intrinsic::mips_fcule_w: 1841 case Intrinsic::mips_fcule_d: 1842 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1843 Op->getOperand(2), ISD::SETULE); 1844 case Intrinsic::mips_fcult_w: 1845 case Intrinsic::mips_fcult_d: 1846 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1847 Op->getOperand(2), ISD::SETULT); 1848 case Intrinsic::mips_fcun_w: 1849 case Intrinsic::mips_fcun_d: 1850 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1851 Op->getOperand(2), ISD::SETUO); 1852 case Intrinsic::mips_fcune_w: 1853 case Intrinsic::mips_fcune_d: 1854 return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1), 1855 Op->getOperand(2), ISD::SETUNE); 1856 case Intrinsic::mips_fdiv_w: 1857 case Intrinsic::mips_fdiv_d: 1858 // TODO: If intrinsics have fast-math-flags, propagate them. 1859 return DAG.getNode(ISD::FDIV, DL, Op->getValueType(0), Op->getOperand(1), 1860 Op->getOperand(2)); 1861 case Intrinsic::mips_ffint_u_w: 1862 case Intrinsic::mips_ffint_u_d: 1863 return DAG.getNode(ISD::UINT_TO_FP, DL, Op->getValueType(0), 1864 Op->getOperand(1)); 1865 case Intrinsic::mips_ffint_s_w: 1866 case Intrinsic::mips_ffint_s_d: 1867 return DAG.getNode(ISD::SINT_TO_FP, DL, Op->getValueType(0), 1868 Op->getOperand(1)); 1869 case Intrinsic::mips_fill_b: 1870 case Intrinsic::mips_fill_h: 1871 case Intrinsic::mips_fill_w: 1872 case Intrinsic::mips_fill_d: { 1873 EVT ResTy = Op->getValueType(0); 1874 SmallVector<SDValue, 16> Ops(ResTy.getVectorNumElements(), 1875 Op->getOperand(1)); 1876 1877 // If ResTy is v2i64 then the type legalizer will break this node down into 1878 // an equivalent v4i32. 1879 return DAG.getBuildVector(ResTy, DL, Ops); 1880 } 1881 case Intrinsic::mips_fexp2_w: 1882 case Intrinsic::mips_fexp2_d: { 1883 // TODO: If intrinsics have fast-math-flags, propagate them. 1884 EVT ResTy = Op->getValueType(0); 1885 return DAG.getNode( 1886 ISD::FMUL, SDLoc(Op), ResTy, Op->getOperand(1), 1887 DAG.getNode(ISD::FEXP2, SDLoc(Op), ResTy, Op->getOperand(2))); 1888 } 1889 case Intrinsic::mips_flog2_w: 1890 case Intrinsic::mips_flog2_d: 1891 return DAG.getNode(ISD::FLOG2, DL, Op->getValueType(0), Op->getOperand(1)); 1892 case Intrinsic::mips_fmadd_w: 1893 case Intrinsic::mips_fmadd_d: 1894 return DAG.getNode(ISD::FMA, SDLoc(Op), Op->getValueType(0), 1895 Op->getOperand(1), Op->getOperand(2), Op->getOperand(3)); 1896 case Intrinsic::mips_fmul_w: 1897 case Intrinsic::mips_fmul_d: 1898 // TODO: If intrinsics have fast-math-flags, propagate them. 1899 return DAG.getNode(ISD::FMUL, DL, Op->getValueType(0), Op->getOperand(1), 1900 Op->getOperand(2)); 1901 case Intrinsic::mips_fmsub_w: 1902 case Intrinsic::mips_fmsub_d: { 1903 // TODO: If intrinsics have fast-math-flags, propagate them. 1904 return DAG.getNode(MipsISD::FMS, SDLoc(Op), Op->getValueType(0), 1905 Op->getOperand(1), Op->getOperand(2), Op->getOperand(3)); 1906 } 1907 case Intrinsic::mips_frint_w: 1908 case Intrinsic::mips_frint_d: 1909 return DAG.getNode(ISD::FRINT, DL, Op->getValueType(0), Op->getOperand(1)); 1910 case Intrinsic::mips_fsqrt_w: 1911 case Intrinsic::mips_fsqrt_d: 1912 return DAG.getNode(ISD::FSQRT, DL, Op->getValueType(0), Op->getOperand(1)); 1913 case Intrinsic::mips_fsub_w: 1914 case Intrinsic::mips_fsub_d: 1915 // TODO: If intrinsics have fast-math-flags, propagate them. 1916 return DAG.getNode(ISD::FSUB, DL, Op->getValueType(0), Op->getOperand(1), 1917 Op->getOperand(2)); 1918 case Intrinsic::mips_ftrunc_u_w: 1919 case Intrinsic::mips_ftrunc_u_d: 1920 return DAG.getNode(ISD::FP_TO_UINT, DL, Op->getValueType(0), 1921 Op->getOperand(1)); 1922 case Intrinsic::mips_ftrunc_s_w: 1923 case Intrinsic::mips_ftrunc_s_d: 1924 return DAG.getNode(ISD::FP_TO_SINT, DL, Op->getValueType(0), 1925 Op->getOperand(1)); 1926 case Intrinsic::mips_ilvev_b: 1927 case Intrinsic::mips_ilvev_h: 1928 case Intrinsic::mips_ilvev_w: 1929 case Intrinsic::mips_ilvev_d: 1930 return DAG.getNode(MipsISD::ILVEV, DL, Op->getValueType(0), 1931 Op->getOperand(1), Op->getOperand(2)); 1932 case Intrinsic::mips_ilvl_b: 1933 case Intrinsic::mips_ilvl_h: 1934 case Intrinsic::mips_ilvl_w: 1935 case Intrinsic::mips_ilvl_d: 1936 return DAG.getNode(MipsISD::ILVL, DL, Op->getValueType(0), 1937 Op->getOperand(1), Op->getOperand(2)); 1938 case Intrinsic::mips_ilvod_b: 1939 case Intrinsic::mips_ilvod_h: 1940 case Intrinsic::mips_ilvod_w: 1941 case Intrinsic::mips_ilvod_d: 1942 return DAG.getNode(MipsISD::ILVOD, DL, Op->getValueType(0), 1943 Op->getOperand(1), Op->getOperand(2)); 1944 case Intrinsic::mips_ilvr_b: 1945 case Intrinsic::mips_ilvr_h: 1946 case Intrinsic::mips_ilvr_w: 1947 case Intrinsic::mips_ilvr_d: 1948 return DAG.getNode(MipsISD::ILVR, DL, Op->getValueType(0), 1949 Op->getOperand(1), Op->getOperand(2)); 1950 case Intrinsic::mips_insert_b: 1951 case Intrinsic::mips_insert_h: 1952 case Intrinsic::mips_insert_w: 1953 case Intrinsic::mips_insert_d: 1954 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), Op->getValueType(0), 1955 Op->getOperand(1), Op->getOperand(3), Op->getOperand(2)); 1956 case Intrinsic::mips_insve_b: 1957 case Intrinsic::mips_insve_h: 1958 case Intrinsic::mips_insve_w: 1959 case Intrinsic::mips_insve_d: { 1960 // Report an error for out of range values. 1961 int64_t Max; 1962 switch (Intrinsic) { 1963 case Intrinsic::mips_insve_b: Max = 15; break; 1964 case Intrinsic::mips_insve_h: Max = 7; break; 1965 case Intrinsic::mips_insve_w: Max = 3; break; 1966 case Intrinsic::mips_insve_d: Max = 1; break; 1967 default: llvm_unreachable("Unmatched intrinsic"); 1968 } 1969 int64_t Value = cast<ConstantSDNode>(Op->getOperand(2))->getSExtValue(); 1970 if (Value < 0 || Value > Max) 1971 report_fatal_error("Immediate out of range"); 1972 return DAG.getNode(MipsISD::INSVE, DL, Op->getValueType(0), 1973 Op->getOperand(1), Op->getOperand(2), Op->getOperand(3), 1974 DAG.getConstant(0, DL, MVT::i32)); 1975 } 1976 case Intrinsic::mips_ldi_b: 1977 case Intrinsic::mips_ldi_h: 1978 case Intrinsic::mips_ldi_w: 1979 case Intrinsic::mips_ldi_d: 1980 return lowerMSASplatImm(Op, 1, DAG, true); 1981 case Intrinsic::mips_lsa: 1982 case Intrinsic::mips_dlsa: { 1983 EVT ResTy = Op->getValueType(0); 1984 return DAG.getNode(ISD::ADD, SDLoc(Op), ResTy, Op->getOperand(1), 1985 DAG.getNode(ISD::SHL, SDLoc(Op), ResTy, 1986 Op->getOperand(2), Op->getOperand(3))); 1987 } 1988 case Intrinsic::mips_maddv_b: 1989 case Intrinsic::mips_maddv_h: 1990 case Intrinsic::mips_maddv_w: 1991 case Intrinsic::mips_maddv_d: { 1992 EVT ResTy = Op->getValueType(0); 1993 return DAG.getNode(ISD::ADD, SDLoc(Op), ResTy, Op->getOperand(1), 1994 DAG.getNode(ISD::MUL, SDLoc(Op), ResTy, 1995 Op->getOperand(2), Op->getOperand(3))); 1996 } 1997 case Intrinsic::mips_max_s_b: 1998 case Intrinsic::mips_max_s_h: 1999 case Intrinsic::mips_max_s_w: 2000 case Intrinsic::mips_max_s_d: 2001 return DAG.getNode(ISD::SMAX, DL, Op->getValueType(0), 2002 Op->getOperand(1), Op->getOperand(2)); 2003 case Intrinsic::mips_max_u_b: 2004 case Intrinsic::mips_max_u_h: 2005 case Intrinsic::mips_max_u_w: 2006 case Intrinsic::mips_max_u_d: 2007 return DAG.getNode(ISD::UMAX, DL, Op->getValueType(0), 2008 Op->getOperand(1), Op->getOperand(2)); 2009 case Intrinsic::mips_maxi_s_b: 2010 case Intrinsic::mips_maxi_s_h: 2011 case Intrinsic::mips_maxi_s_w: 2012 case Intrinsic::mips_maxi_s_d: 2013 return DAG.getNode(ISD::SMAX, DL, Op->getValueType(0), 2014 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG, true)); 2015 case Intrinsic::mips_maxi_u_b: 2016 case Intrinsic::mips_maxi_u_h: 2017 case Intrinsic::mips_maxi_u_w: 2018 case Intrinsic::mips_maxi_u_d: 2019 return DAG.getNode(ISD::UMAX, DL, Op->getValueType(0), 2020 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 2021 case Intrinsic::mips_min_s_b: 2022 case Intrinsic::mips_min_s_h: 2023 case Intrinsic::mips_min_s_w: 2024 case Intrinsic::mips_min_s_d: 2025 return DAG.getNode(ISD::SMIN, DL, Op->getValueType(0), 2026 Op->getOperand(1), Op->getOperand(2)); 2027 case Intrinsic::mips_min_u_b: 2028 case Intrinsic::mips_min_u_h: 2029 case Intrinsic::mips_min_u_w: 2030 case Intrinsic::mips_min_u_d: 2031 return DAG.getNode(ISD::UMIN, DL, Op->getValueType(0), 2032 Op->getOperand(1), Op->getOperand(2)); 2033 case Intrinsic::mips_mini_s_b: 2034 case Intrinsic::mips_mini_s_h: 2035 case Intrinsic::mips_mini_s_w: 2036 case Intrinsic::mips_mini_s_d: 2037 return DAG.getNode(ISD::SMIN, DL, Op->getValueType(0), 2038 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG, true)); 2039 case Intrinsic::mips_mini_u_b: 2040 case Intrinsic::mips_mini_u_h: 2041 case Intrinsic::mips_mini_u_w: 2042 case Intrinsic::mips_mini_u_d: 2043 return DAG.getNode(ISD::UMIN, DL, Op->getValueType(0), 2044 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 2045 case Intrinsic::mips_mod_s_b: 2046 case Intrinsic::mips_mod_s_h: 2047 case Intrinsic::mips_mod_s_w: 2048 case Intrinsic::mips_mod_s_d: 2049 return DAG.getNode(ISD::SREM, DL, Op->getValueType(0), Op->getOperand(1), 2050 Op->getOperand(2)); 2051 case Intrinsic::mips_mod_u_b: 2052 case Intrinsic::mips_mod_u_h: 2053 case Intrinsic::mips_mod_u_w: 2054 case Intrinsic::mips_mod_u_d: 2055 return DAG.getNode(ISD::UREM, DL, Op->getValueType(0), Op->getOperand(1), 2056 Op->getOperand(2)); 2057 case Intrinsic::mips_mulv_b: 2058 case Intrinsic::mips_mulv_h: 2059 case Intrinsic::mips_mulv_w: 2060 case Intrinsic::mips_mulv_d: 2061 return DAG.getNode(ISD::MUL, DL, Op->getValueType(0), Op->getOperand(1), 2062 Op->getOperand(2)); 2063 case Intrinsic::mips_msubv_b: 2064 case Intrinsic::mips_msubv_h: 2065 case Intrinsic::mips_msubv_w: 2066 case Intrinsic::mips_msubv_d: { 2067 EVT ResTy = Op->getValueType(0); 2068 return DAG.getNode(ISD::SUB, SDLoc(Op), ResTy, Op->getOperand(1), 2069 DAG.getNode(ISD::MUL, SDLoc(Op), ResTy, 2070 Op->getOperand(2), Op->getOperand(3))); 2071 } 2072 case Intrinsic::mips_nlzc_b: 2073 case Intrinsic::mips_nlzc_h: 2074 case Intrinsic::mips_nlzc_w: 2075 case Intrinsic::mips_nlzc_d: 2076 return DAG.getNode(ISD::CTLZ, DL, Op->getValueType(0), Op->getOperand(1)); 2077 case Intrinsic::mips_nor_v: { 2078 SDValue Res = DAG.getNode(ISD::OR, DL, Op->getValueType(0), 2079 Op->getOperand(1), Op->getOperand(2)); 2080 return DAG.getNOT(DL, Res, Res->getValueType(0)); 2081 } 2082 case Intrinsic::mips_nori_b: { 2083 SDValue Res = DAG.getNode(ISD::OR, DL, Op->getValueType(0), 2084 Op->getOperand(1), 2085 lowerMSASplatImm(Op, 2, DAG)); 2086 return DAG.getNOT(DL, Res, Res->getValueType(0)); 2087 } 2088 case Intrinsic::mips_or_v: 2089 return DAG.getNode(ISD::OR, DL, Op->getValueType(0), Op->getOperand(1), 2090 Op->getOperand(2)); 2091 case Intrinsic::mips_ori_b: 2092 return DAG.getNode(ISD::OR, DL, Op->getValueType(0), 2093 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 2094 case Intrinsic::mips_pckev_b: 2095 case Intrinsic::mips_pckev_h: 2096 case Intrinsic::mips_pckev_w: 2097 case Intrinsic::mips_pckev_d: 2098 return DAG.getNode(MipsISD::PCKEV, DL, Op->getValueType(0), 2099 Op->getOperand(1), Op->getOperand(2)); 2100 case Intrinsic::mips_pckod_b: 2101 case Intrinsic::mips_pckod_h: 2102 case Intrinsic::mips_pckod_w: 2103 case Intrinsic::mips_pckod_d: 2104 return DAG.getNode(MipsISD::PCKOD, DL, Op->getValueType(0), 2105 Op->getOperand(1), Op->getOperand(2)); 2106 case Intrinsic::mips_pcnt_b: 2107 case Intrinsic::mips_pcnt_h: 2108 case Intrinsic::mips_pcnt_w: 2109 case Intrinsic::mips_pcnt_d: 2110 return DAG.getNode(ISD::CTPOP, DL, Op->getValueType(0), Op->getOperand(1)); 2111 case Intrinsic::mips_sat_s_b: 2112 case Intrinsic::mips_sat_s_h: 2113 case Intrinsic::mips_sat_s_w: 2114 case Intrinsic::mips_sat_s_d: 2115 case Intrinsic::mips_sat_u_b: 2116 case Intrinsic::mips_sat_u_h: 2117 case Intrinsic::mips_sat_u_w: 2118 case Intrinsic::mips_sat_u_d: { 2119 // Report an error for out of range values. 2120 int64_t Max; 2121 switch (Intrinsic) { 2122 case Intrinsic::mips_sat_s_b: 2123 case Intrinsic::mips_sat_u_b: Max = 7; break; 2124 case Intrinsic::mips_sat_s_h: 2125 case Intrinsic::mips_sat_u_h: Max = 15; break; 2126 case Intrinsic::mips_sat_s_w: 2127 case Intrinsic::mips_sat_u_w: Max = 31; break; 2128 case Intrinsic::mips_sat_s_d: 2129 case Intrinsic::mips_sat_u_d: Max = 63; break; 2130 default: llvm_unreachable("Unmatched intrinsic"); 2131 } 2132 int64_t Value = cast<ConstantSDNode>(Op->getOperand(2))->getSExtValue(); 2133 if (Value < 0 || Value > Max) 2134 report_fatal_error("Immediate out of range"); 2135 return SDValue(); 2136 } 2137 case Intrinsic::mips_shf_b: 2138 case Intrinsic::mips_shf_h: 2139 case Intrinsic::mips_shf_w: { 2140 int64_t Value = cast<ConstantSDNode>(Op->getOperand(2))->getSExtValue(); 2141 if (Value < 0 || Value > 255) 2142 report_fatal_error("Immediate out of range"); 2143 return DAG.getNode(MipsISD::SHF, DL, Op->getValueType(0), 2144 Op->getOperand(2), Op->getOperand(1)); 2145 } 2146 case Intrinsic::mips_sldi_b: 2147 case Intrinsic::mips_sldi_h: 2148 case Intrinsic::mips_sldi_w: 2149 case Intrinsic::mips_sldi_d: { 2150 // Report an error for out of range values. 2151 int64_t Max; 2152 switch (Intrinsic) { 2153 case Intrinsic::mips_sldi_b: Max = 15; break; 2154 case Intrinsic::mips_sldi_h: Max = 7; break; 2155 case Intrinsic::mips_sldi_w: Max = 3; break; 2156 case Intrinsic::mips_sldi_d: Max = 1; break; 2157 default: llvm_unreachable("Unmatched intrinsic"); 2158 } 2159 int64_t Value = cast<ConstantSDNode>(Op->getOperand(3))->getSExtValue(); 2160 if (Value < 0 || Value > Max) 2161 report_fatal_error("Immediate out of range"); 2162 return SDValue(); 2163 } 2164 case Intrinsic::mips_sll_b: 2165 case Intrinsic::mips_sll_h: 2166 case Intrinsic::mips_sll_w: 2167 case Intrinsic::mips_sll_d: 2168 return DAG.getNode(ISD::SHL, DL, Op->getValueType(0), Op->getOperand(1), 2169 truncateVecElts(Op, DAG)); 2170 case Intrinsic::mips_slli_b: 2171 case Intrinsic::mips_slli_h: 2172 case Intrinsic::mips_slli_w: 2173 case Intrinsic::mips_slli_d: 2174 return DAG.getNode(ISD::SHL, DL, Op->getValueType(0), 2175 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 2176 case Intrinsic::mips_splat_b: 2177 case Intrinsic::mips_splat_h: 2178 case Intrinsic::mips_splat_w: 2179 case Intrinsic::mips_splat_d: 2180 // We can't lower via VECTOR_SHUFFLE because it requires constant shuffle 2181 // masks, nor can we lower via BUILD_VECTOR & EXTRACT_VECTOR_ELT because 2182 // EXTRACT_VECTOR_ELT can't extract i64's on MIPS32. 2183 // Instead we lower to MipsISD::VSHF and match from there. 2184 return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0), 2185 lowerMSASplatZExt(Op, 2, DAG), Op->getOperand(1), 2186 Op->getOperand(1)); 2187 case Intrinsic::mips_splati_b: 2188 case Intrinsic::mips_splati_h: 2189 case Intrinsic::mips_splati_w: 2190 case Intrinsic::mips_splati_d: 2191 return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0), 2192 lowerMSASplatImm(Op, 2, DAG), Op->getOperand(1), 2193 Op->getOperand(1)); 2194 case Intrinsic::mips_sra_b: 2195 case Intrinsic::mips_sra_h: 2196 case Intrinsic::mips_sra_w: 2197 case Intrinsic::mips_sra_d: 2198 return DAG.getNode(ISD::SRA, DL, Op->getValueType(0), Op->getOperand(1), 2199 truncateVecElts(Op, DAG)); 2200 case Intrinsic::mips_srai_b: 2201 case Intrinsic::mips_srai_h: 2202 case Intrinsic::mips_srai_w: 2203 case Intrinsic::mips_srai_d: 2204 return DAG.getNode(ISD::SRA, DL, Op->getValueType(0), 2205 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 2206 case Intrinsic::mips_srari_b: 2207 case Intrinsic::mips_srari_h: 2208 case Intrinsic::mips_srari_w: 2209 case Intrinsic::mips_srari_d: { 2210 // Report an error for out of range values. 2211 int64_t Max; 2212 switch (Intrinsic) { 2213 case Intrinsic::mips_srari_b: Max = 7; break; 2214 case Intrinsic::mips_srari_h: Max = 15; break; 2215 case Intrinsic::mips_srari_w: Max = 31; break; 2216 case Intrinsic::mips_srari_d: Max = 63; break; 2217 default: llvm_unreachable("Unmatched intrinsic"); 2218 } 2219 int64_t Value = cast<ConstantSDNode>(Op->getOperand(2))->getSExtValue(); 2220 if (Value < 0 || Value > Max) 2221 report_fatal_error("Immediate out of range"); 2222 return SDValue(); 2223 } 2224 case Intrinsic::mips_srl_b: 2225 case Intrinsic::mips_srl_h: 2226 case Intrinsic::mips_srl_w: 2227 case Intrinsic::mips_srl_d: 2228 return DAG.getNode(ISD::SRL, DL, Op->getValueType(0), Op->getOperand(1), 2229 truncateVecElts(Op, DAG)); 2230 case Intrinsic::mips_srli_b: 2231 case Intrinsic::mips_srli_h: 2232 case Intrinsic::mips_srli_w: 2233 case Intrinsic::mips_srli_d: 2234 return DAG.getNode(ISD::SRL, DL, Op->getValueType(0), 2235 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 2236 case Intrinsic::mips_srlri_b: 2237 case Intrinsic::mips_srlri_h: 2238 case Intrinsic::mips_srlri_w: 2239 case Intrinsic::mips_srlri_d: { 2240 // Report an error for out of range values. 2241 int64_t Max; 2242 switch (Intrinsic) { 2243 case Intrinsic::mips_srlri_b: Max = 7; break; 2244 case Intrinsic::mips_srlri_h: Max = 15; break; 2245 case Intrinsic::mips_srlri_w: Max = 31; break; 2246 case Intrinsic::mips_srlri_d: Max = 63; break; 2247 default: llvm_unreachable("Unmatched intrinsic"); 2248 } 2249 int64_t Value = cast<ConstantSDNode>(Op->getOperand(2))->getSExtValue(); 2250 if (Value < 0 || Value > Max) 2251 report_fatal_error("Immediate out of range"); 2252 return SDValue(); 2253 } 2254 case Intrinsic::mips_subv_b: 2255 case Intrinsic::mips_subv_h: 2256 case Intrinsic::mips_subv_w: 2257 case Intrinsic::mips_subv_d: 2258 return DAG.getNode(ISD::SUB, DL, Op->getValueType(0), Op->getOperand(1), 2259 Op->getOperand(2)); 2260 case Intrinsic::mips_subvi_b: 2261 case Intrinsic::mips_subvi_h: 2262 case Intrinsic::mips_subvi_w: 2263 case Intrinsic::mips_subvi_d: 2264 return DAG.getNode(ISD::SUB, DL, Op->getValueType(0), 2265 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 2266 case Intrinsic::mips_vshf_b: 2267 case Intrinsic::mips_vshf_h: 2268 case Intrinsic::mips_vshf_w: 2269 case Intrinsic::mips_vshf_d: 2270 return DAG.getNode(MipsISD::VSHF, DL, Op->getValueType(0), 2271 Op->getOperand(1), Op->getOperand(2), Op->getOperand(3)); 2272 case Intrinsic::mips_xor_v: 2273 return DAG.getNode(ISD::XOR, DL, Op->getValueType(0), Op->getOperand(1), 2274 Op->getOperand(2)); 2275 case Intrinsic::mips_xori_b: 2276 return DAG.getNode(ISD::XOR, DL, Op->getValueType(0), 2277 Op->getOperand(1), lowerMSASplatImm(Op, 2, DAG)); 2278 case Intrinsic::thread_pointer: { 2279 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2280 return DAG.getNode(MipsISD::ThreadPointer, DL, PtrVT); 2281 } 2282 } 2283 } 2284 2285 static SDValue lowerMSALoadIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr, 2286 const MipsSubtarget &Subtarget) { 2287 SDLoc DL(Op); 2288 SDValue ChainIn = Op->getOperand(0); 2289 SDValue Address = Op->getOperand(2); 2290 SDValue Offset = Op->getOperand(3); 2291 EVT ResTy = Op->getValueType(0); 2292 EVT PtrTy = Address->getValueType(0); 2293 2294 // For N64 addresses have the underlying type MVT::i64. This intrinsic 2295 // however takes an i32 signed constant offset. The actual type of the 2296 // intrinsic is a scaled signed i10. 2297 if (Subtarget.isABI_N64()) 2298 Offset = DAG.getNode(ISD::SIGN_EXTEND, DL, PtrTy, Offset); 2299 2300 Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset); 2301 return DAG.getLoad(ResTy, DL, ChainIn, Address, MachinePointerInfo(), 2302 Align(16)); 2303 } 2304 2305 SDValue MipsSETargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op, 2306 SelectionDAG &DAG) const { 2307 unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue(); 2308 switch (Intr) { 2309 default: 2310 return SDValue(); 2311 case Intrinsic::mips_extp: 2312 return lowerDSPIntr(Op, DAG, MipsISD::EXTP); 2313 case Intrinsic::mips_extpdp: 2314 return lowerDSPIntr(Op, DAG, MipsISD::EXTPDP); 2315 case Intrinsic::mips_extr_w: 2316 return lowerDSPIntr(Op, DAG, MipsISD::EXTR_W); 2317 case Intrinsic::mips_extr_r_w: 2318 return lowerDSPIntr(Op, DAG, MipsISD::EXTR_R_W); 2319 case Intrinsic::mips_extr_rs_w: 2320 return lowerDSPIntr(Op, DAG, MipsISD::EXTR_RS_W); 2321 case Intrinsic::mips_extr_s_h: 2322 return lowerDSPIntr(Op, DAG, MipsISD::EXTR_S_H); 2323 case Intrinsic::mips_mthlip: 2324 return lowerDSPIntr(Op, DAG, MipsISD::MTHLIP); 2325 case Intrinsic::mips_mulsaq_s_w_ph: 2326 return lowerDSPIntr(Op, DAG, MipsISD::MULSAQ_S_W_PH); 2327 case Intrinsic::mips_maq_s_w_phl: 2328 return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHL); 2329 case Intrinsic::mips_maq_s_w_phr: 2330 return lowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHR); 2331 case Intrinsic::mips_maq_sa_w_phl: 2332 return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHL); 2333 case Intrinsic::mips_maq_sa_w_phr: 2334 return lowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHR); 2335 case Intrinsic::mips_dpaq_s_w_ph: 2336 return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_S_W_PH); 2337 case Intrinsic::mips_dpsq_s_w_ph: 2338 return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_S_W_PH); 2339 case Intrinsic::mips_dpaq_sa_l_w: 2340 return lowerDSPIntr(Op, DAG, MipsISD::DPAQ_SA_L_W); 2341 case Intrinsic::mips_dpsq_sa_l_w: 2342 return lowerDSPIntr(Op, DAG, MipsISD::DPSQ_SA_L_W); 2343 case Intrinsic::mips_dpaqx_s_w_ph: 2344 return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_S_W_PH); 2345 case Intrinsic::mips_dpaqx_sa_w_ph: 2346 return lowerDSPIntr(Op, DAG, MipsISD::DPAQX_SA_W_PH); 2347 case Intrinsic::mips_dpsqx_s_w_ph: 2348 return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_S_W_PH); 2349 case Intrinsic::mips_dpsqx_sa_w_ph: 2350 return lowerDSPIntr(Op, DAG, MipsISD::DPSQX_SA_W_PH); 2351 case Intrinsic::mips_ld_b: 2352 case Intrinsic::mips_ld_h: 2353 case Intrinsic::mips_ld_w: 2354 case Intrinsic::mips_ld_d: 2355 return lowerMSALoadIntr(Op, DAG, Intr, Subtarget); 2356 } 2357 } 2358 2359 static SDValue lowerMSAStoreIntr(SDValue Op, SelectionDAG &DAG, unsigned Intr, 2360 const MipsSubtarget &Subtarget) { 2361 SDLoc DL(Op); 2362 SDValue ChainIn = Op->getOperand(0); 2363 SDValue Value = Op->getOperand(2); 2364 SDValue Address = Op->getOperand(3); 2365 SDValue Offset = Op->getOperand(4); 2366 EVT PtrTy = Address->getValueType(0); 2367 2368 // For N64 addresses have the underlying type MVT::i64. This intrinsic 2369 // however takes an i32 signed constant offset. The actual type of the 2370 // intrinsic is a scaled signed i10. 2371 if (Subtarget.isABI_N64()) 2372 Offset = DAG.getNode(ISD::SIGN_EXTEND, DL, PtrTy, Offset); 2373 2374 Address = DAG.getNode(ISD::ADD, DL, PtrTy, Address, Offset); 2375 2376 return DAG.getStore(ChainIn, DL, Value, Address, MachinePointerInfo(), 2377 Align(16)); 2378 } 2379 2380 SDValue MipsSETargetLowering::lowerINTRINSIC_VOID(SDValue Op, 2381 SelectionDAG &DAG) const { 2382 unsigned Intr = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue(); 2383 switch (Intr) { 2384 default: 2385 return SDValue(); 2386 case Intrinsic::mips_st_b: 2387 case Intrinsic::mips_st_h: 2388 case Intrinsic::mips_st_w: 2389 case Intrinsic::mips_st_d: 2390 return lowerMSAStoreIntr(Op, DAG, Intr, Subtarget); 2391 } 2392 } 2393 2394 // Lower ISD::EXTRACT_VECTOR_ELT into MipsISD::VEXTRACT_SEXT_ELT. 2395 // 2396 // The non-value bits resulting from ISD::EXTRACT_VECTOR_ELT are undefined. We 2397 // choose to sign-extend but we could have equally chosen zero-extend. The 2398 // DAGCombiner will fold any sign/zero extension of the ISD::EXTRACT_VECTOR_ELT 2399 // result into this node later (possibly changing it to a zero-extend in the 2400 // process). 2401 SDValue MipsSETargetLowering:: 2402 lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { 2403 SDLoc DL(Op); 2404 EVT ResTy = Op->getValueType(0); 2405 SDValue Op0 = Op->getOperand(0); 2406 EVT VecTy = Op0->getValueType(0); 2407 2408 if (!VecTy.is128BitVector()) 2409 return SDValue(); 2410 2411 if (ResTy.isInteger()) { 2412 SDValue Op1 = Op->getOperand(1); 2413 EVT EltTy = VecTy.getVectorElementType(); 2414 return DAG.getNode(MipsISD::VEXTRACT_SEXT_ELT, DL, ResTy, Op0, Op1, 2415 DAG.getValueType(EltTy)); 2416 } 2417 2418 return Op; 2419 } 2420 2421 static bool isConstantOrUndef(const SDValue Op) { 2422 if (Op->isUndef()) 2423 return true; 2424 if (isa<ConstantSDNode>(Op)) 2425 return true; 2426 if (isa<ConstantFPSDNode>(Op)) 2427 return true; 2428 return false; 2429 } 2430 2431 static bool isConstantOrUndefBUILD_VECTOR(const BuildVectorSDNode *Op) { 2432 for (unsigned i = 0; i < Op->getNumOperands(); ++i) 2433 if (isConstantOrUndef(Op->getOperand(i))) 2434 return true; 2435 return false; 2436 } 2437 2438 // Lowers ISD::BUILD_VECTOR into appropriate SelectionDAG nodes for the 2439 // backend. 2440 // 2441 // Lowers according to the following rules: 2442 // - Constant splats are legal as-is as long as the SplatBitSize is a power of 2443 // 2 less than or equal to 64 and the value fits into a signed 10-bit 2444 // immediate 2445 // - Constant splats are lowered to bitconverted BUILD_VECTORs if SplatBitSize 2446 // is a power of 2 less than or equal to 64 and the value does not fit into a 2447 // signed 10-bit immediate 2448 // - Non-constant splats are legal as-is. 2449 // - Non-constant non-splats are lowered to sequences of INSERT_VECTOR_ELT. 2450 // - All others are illegal and must be expanded. 2451 SDValue MipsSETargetLowering::lowerBUILD_VECTOR(SDValue Op, 2452 SelectionDAG &DAG) const { 2453 BuildVectorSDNode *Node = cast<BuildVectorSDNode>(Op); 2454 EVT ResTy = Op->getValueType(0); 2455 SDLoc DL(Op); 2456 APInt SplatValue, SplatUndef; 2457 unsigned SplatBitSize; 2458 bool HasAnyUndefs; 2459 2460 if (!Subtarget.hasMSA() || !ResTy.is128BitVector()) 2461 return SDValue(); 2462 2463 if (Node->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, 2464 HasAnyUndefs, 8, 2465 !Subtarget.isLittle()) && SplatBitSize <= 64) { 2466 // We can only cope with 8, 16, 32, or 64-bit elements 2467 if (SplatBitSize != 8 && SplatBitSize != 16 && SplatBitSize != 32 && 2468 SplatBitSize != 64) 2469 return SDValue(); 2470 2471 // If the value isn't an integer type we will have to bitcast 2472 // from an integer type first. Also, if there are any undefs, we must 2473 // lower them to defined values first. 2474 if (ResTy.isInteger() && !HasAnyUndefs) 2475 return Op; 2476 2477 EVT ViaVecTy; 2478 2479 switch (SplatBitSize) { 2480 default: 2481 return SDValue(); 2482 case 8: 2483 ViaVecTy = MVT::v16i8; 2484 break; 2485 case 16: 2486 ViaVecTy = MVT::v8i16; 2487 break; 2488 case 32: 2489 ViaVecTy = MVT::v4i32; 2490 break; 2491 case 64: 2492 // There's no fill.d to fall back on for 64-bit values 2493 return SDValue(); 2494 } 2495 2496 // SelectionDAG::getConstant will promote SplatValue appropriately. 2497 SDValue Result = DAG.getConstant(SplatValue, DL, ViaVecTy); 2498 2499 // Bitcast to the type we originally wanted 2500 if (ViaVecTy != ResTy) 2501 Result = DAG.getNode(ISD::BITCAST, SDLoc(Node), ResTy, Result); 2502 2503 return Result; 2504 } else if (DAG.isSplatValue(Op, /* AllowUndefs */ false)) 2505 return Op; 2506 else if (!isConstantOrUndefBUILD_VECTOR(Node)) { 2507 // Use INSERT_VECTOR_ELT operations rather than expand to stores. 2508 // The resulting code is the same length as the expansion, but it doesn't 2509 // use memory operations 2510 EVT ResTy = Node->getValueType(0); 2511 2512 assert(ResTy.isVector()); 2513 2514 unsigned NumElts = ResTy.getVectorNumElements(); 2515 SDValue Vector = DAG.getUNDEF(ResTy); 2516 for (unsigned i = 0; i < NumElts; ++i) { 2517 Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector, 2518 Node->getOperand(i), 2519 DAG.getConstant(i, DL, MVT::i32)); 2520 } 2521 return Vector; 2522 } 2523 2524 return SDValue(); 2525 } 2526 2527 // Lower VECTOR_SHUFFLE into SHF (if possible). 2528 // 2529 // SHF splits the vector into blocks of four elements, then shuffles these 2530 // elements according to a <4 x i2> constant (encoded as an integer immediate). 2531 // 2532 // It is therefore possible to lower into SHF when the mask takes the form: 2533 // <a, b, c, d, a+4, b+4, c+4, d+4, a+8, b+8, c+8, d+8, ...> 2534 // When undef's appear they are treated as if they were whatever value is 2535 // necessary in order to fit the above forms. 2536 // 2537 // For example: 2538 // %2 = shufflevector <8 x i16> %0, <8 x i16> undef, 2539 // <8 x i32> <i32 3, i32 2, i32 1, i32 0, 2540 // i32 7, i32 6, i32 5, i32 4> 2541 // is lowered to: 2542 // (SHF_H $w0, $w1, 27) 2543 // where the 27 comes from: 2544 // 3 + (2 << 2) + (1 << 4) + (0 << 6) 2545 static SDValue lowerVECTOR_SHUFFLE_SHF(SDValue Op, EVT ResTy, 2546 SmallVector<int, 16> Indices, 2547 SelectionDAG &DAG) { 2548 int SHFIndices[4] = { -1, -1, -1, -1 }; 2549 2550 if (Indices.size() < 4) 2551 return SDValue(); 2552 2553 for (unsigned i = 0; i < 4; ++i) { 2554 for (unsigned j = i; j < Indices.size(); j += 4) { 2555 int Idx = Indices[j]; 2556 2557 // Convert from vector index to 4-element subvector index 2558 // If an index refers to an element outside of the subvector then give up 2559 if (Idx != -1) { 2560 Idx -= 4 * (j / 4); 2561 if (Idx < 0 || Idx >= 4) 2562 return SDValue(); 2563 } 2564 2565 // If the mask has an undef, replace it with the current index. 2566 // Note that it might still be undef if the current index is also undef 2567 if (SHFIndices[i] == -1) 2568 SHFIndices[i] = Idx; 2569 2570 // Check that non-undef values are the same as in the mask. If they 2571 // aren't then give up 2572 if (!(Idx == -1 || Idx == SHFIndices[i])) 2573 return SDValue(); 2574 } 2575 } 2576 2577 // Calculate the immediate. Replace any remaining undefs with zero 2578 APInt Imm(32, 0); 2579 for (int i = 3; i >= 0; --i) { 2580 int Idx = SHFIndices[i]; 2581 2582 if (Idx == -1) 2583 Idx = 0; 2584 2585 Imm <<= 2; 2586 Imm |= Idx & 0x3; 2587 } 2588 2589 SDLoc DL(Op); 2590 return DAG.getNode(MipsISD::SHF, DL, ResTy, 2591 DAG.getTargetConstant(Imm, DL, MVT::i32), 2592 Op->getOperand(0)); 2593 } 2594 2595 /// Determine whether a range fits a regular pattern of values. 2596 /// This function accounts for the possibility of jumping over the End iterator. 2597 template <typename ValType> 2598 static bool 2599 fitsRegularPattern(typename SmallVectorImpl<ValType>::const_iterator Begin, 2600 unsigned CheckStride, 2601 typename SmallVectorImpl<ValType>::const_iterator End, 2602 ValType ExpectedIndex, unsigned ExpectedIndexStride) { 2603 auto &I = Begin; 2604 2605 while (I != End) { 2606 if (*I != -1 && *I != ExpectedIndex) 2607 return false; 2608 ExpectedIndex += ExpectedIndexStride; 2609 2610 // Incrementing past End is undefined behaviour so we must increment one 2611 // step at a time and check for End at each step. 2612 for (unsigned n = 0; n < CheckStride && I != End; ++n, ++I) 2613 ; // Empty loop body. 2614 } 2615 return true; 2616 } 2617 2618 // Determine whether VECTOR_SHUFFLE is a SPLATI. 2619 // 2620 // It is a SPLATI when the mask is: 2621 // <x, x, x, ...> 2622 // where x is any valid index. 2623 // 2624 // When undef's appear in the mask they are treated as if they were whatever 2625 // value is necessary in order to fit the above form. 2626 static bool isVECTOR_SHUFFLE_SPLATI(SDValue Op, EVT ResTy, 2627 SmallVector<int, 16> Indices, 2628 SelectionDAG &DAG) { 2629 assert((Indices.size() % 2) == 0); 2630 2631 int SplatIndex = -1; 2632 for (const auto &V : Indices) { 2633 if (V != -1) { 2634 SplatIndex = V; 2635 break; 2636 } 2637 } 2638 2639 return fitsRegularPattern<int>(Indices.begin(), 1, Indices.end(), SplatIndex, 2640 0); 2641 } 2642 2643 // Lower VECTOR_SHUFFLE into ILVEV (if possible). 2644 // 2645 // ILVEV interleaves the even elements from each vector. 2646 // 2647 // It is possible to lower into ILVEV when the mask consists of two of the 2648 // following forms interleaved: 2649 // <0, 2, 4, ...> 2650 // <n, n+2, n+4, ...> 2651 // where n is the number of elements in the vector. 2652 // For example: 2653 // <0, 0, 2, 2, 4, 4, ...> 2654 // <0, n, 2, n+2, 4, n+4, ...> 2655 // 2656 // When undef's appear in the mask they are treated as if they were whatever 2657 // value is necessary in order to fit the above forms. 2658 static SDValue lowerVECTOR_SHUFFLE_ILVEV(SDValue Op, EVT ResTy, 2659 SmallVector<int, 16> Indices, 2660 SelectionDAG &DAG) { 2661 assert((Indices.size() % 2) == 0); 2662 2663 SDValue Wt; 2664 SDValue Ws; 2665 const auto &Begin = Indices.begin(); 2666 const auto &End = Indices.end(); 2667 2668 // Check even elements are taken from the even elements of one half or the 2669 // other and pick an operand accordingly. 2670 if (fitsRegularPattern<int>(Begin, 2, End, 0, 2)) 2671 Wt = Op->getOperand(0); 2672 else if (fitsRegularPattern<int>(Begin, 2, End, Indices.size(), 2)) 2673 Wt = Op->getOperand(1); 2674 else 2675 return SDValue(); 2676 2677 // Check odd elements are taken from the even elements of one half or the 2678 // other and pick an operand accordingly. 2679 if (fitsRegularPattern<int>(Begin + 1, 2, End, 0, 2)) 2680 Ws = Op->getOperand(0); 2681 else if (fitsRegularPattern<int>(Begin + 1, 2, End, Indices.size(), 2)) 2682 Ws = Op->getOperand(1); 2683 else 2684 return SDValue(); 2685 2686 return DAG.getNode(MipsISD::ILVEV, SDLoc(Op), ResTy, Ws, Wt); 2687 } 2688 2689 // Lower VECTOR_SHUFFLE into ILVOD (if possible). 2690 // 2691 // ILVOD interleaves the odd elements from each vector. 2692 // 2693 // It is possible to lower into ILVOD when the mask consists of two of the 2694 // following forms interleaved: 2695 // <1, 3, 5, ...> 2696 // <n+1, n+3, n+5, ...> 2697 // where n is the number of elements in the vector. 2698 // For example: 2699 // <1, 1, 3, 3, 5, 5, ...> 2700 // <1, n+1, 3, n+3, 5, n+5, ...> 2701 // 2702 // When undef's appear in the mask they are treated as if they were whatever 2703 // value is necessary in order to fit the above forms. 2704 static SDValue lowerVECTOR_SHUFFLE_ILVOD(SDValue Op, EVT ResTy, 2705 SmallVector<int, 16> Indices, 2706 SelectionDAG &DAG) { 2707 assert((Indices.size() % 2) == 0); 2708 2709 SDValue Wt; 2710 SDValue Ws; 2711 const auto &Begin = Indices.begin(); 2712 const auto &End = Indices.end(); 2713 2714 // Check even elements are taken from the odd elements of one half or the 2715 // other and pick an operand accordingly. 2716 if (fitsRegularPattern<int>(Begin, 2, End, 1, 2)) 2717 Wt = Op->getOperand(0); 2718 else if (fitsRegularPattern<int>(Begin, 2, End, Indices.size() + 1, 2)) 2719 Wt = Op->getOperand(1); 2720 else 2721 return SDValue(); 2722 2723 // Check odd elements are taken from the odd elements of one half or the 2724 // other and pick an operand accordingly. 2725 if (fitsRegularPattern<int>(Begin + 1, 2, End, 1, 2)) 2726 Ws = Op->getOperand(0); 2727 else if (fitsRegularPattern<int>(Begin + 1, 2, End, Indices.size() + 1, 2)) 2728 Ws = Op->getOperand(1); 2729 else 2730 return SDValue(); 2731 2732 return DAG.getNode(MipsISD::ILVOD, SDLoc(Op), ResTy, Wt, Ws); 2733 } 2734 2735 // Lower VECTOR_SHUFFLE into ILVR (if possible). 2736 // 2737 // ILVR interleaves consecutive elements from the right (lowest-indexed) half of 2738 // each vector. 2739 // 2740 // It is possible to lower into ILVR when the mask consists of two of the 2741 // following forms interleaved: 2742 // <0, 1, 2, ...> 2743 // <n, n+1, n+2, ...> 2744 // where n is the number of elements in the vector. 2745 // For example: 2746 // <0, 0, 1, 1, 2, 2, ...> 2747 // <0, n, 1, n+1, 2, n+2, ...> 2748 // 2749 // When undef's appear in the mask they are treated as if they were whatever 2750 // value is necessary in order to fit the above forms. 2751 static SDValue lowerVECTOR_SHUFFLE_ILVR(SDValue Op, EVT ResTy, 2752 SmallVector<int, 16> Indices, 2753 SelectionDAG &DAG) { 2754 assert((Indices.size() % 2) == 0); 2755 2756 SDValue Wt; 2757 SDValue Ws; 2758 const auto &Begin = Indices.begin(); 2759 const auto &End = Indices.end(); 2760 2761 // Check even elements are taken from the right (lowest-indexed) elements of 2762 // one half or the other and pick an operand accordingly. 2763 if (fitsRegularPattern<int>(Begin, 2, End, 0, 1)) 2764 Wt = Op->getOperand(0); 2765 else if (fitsRegularPattern<int>(Begin, 2, End, Indices.size(), 1)) 2766 Wt = Op->getOperand(1); 2767 else 2768 return SDValue(); 2769 2770 // Check odd elements are taken from the right (lowest-indexed) elements of 2771 // one half or the other and pick an operand accordingly. 2772 if (fitsRegularPattern<int>(Begin + 1, 2, End, 0, 1)) 2773 Ws = Op->getOperand(0); 2774 else if (fitsRegularPattern<int>(Begin + 1, 2, End, Indices.size(), 1)) 2775 Ws = Op->getOperand(1); 2776 else 2777 return SDValue(); 2778 2779 return DAG.getNode(MipsISD::ILVR, SDLoc(Op), ResTy, Ws, Wt); 2780 } 2781 2782 // Lower VECTOR_SHUFFLE into ILVL (if possible). 2783 // 2784 // ILVL interleaves consecutive elements from the left (highest-indexed) half 2785 // of each vector. 2786 // 2787 // It is possible to lower into ILVL when the mask consists of two of the 2788 // following forms interleaved: 2789 // <x, x+1, x+2, ...> 2790 // <n+x, n+x+1, n+x+2, ...> 2791 // where n is the number of elements in the vector and x is half n. 2792 // For example: 2793 // <x, x, x+1, x+1, x+2, x+2, ...> 2794 // <x, n+x, x+1, n+x+1, x+2, n+x+2, ...> 2795 // 2796 // When undef's appear in the mask they are treated as if they were whatever 2797 // value is necessary in order to fit the above forms. 2798 static SDValue lowerVECTOR_SHUFFLE_ILVL(SDValue Op, EVT ResTy, 2799 SmallVector<int, 16> Indices, 2800 SelectionDAG &DAG) { 2801 assert((Indices.size() % 2) == 0); 2802 2803 unsigned HalfSize = Indices.size() / 2; 2804 SDValue Wt; 2805 SDValue Ws; 2806 const auto &Begin = Indices.begin(); 2807 const auto &End = Indices.end(); 2808 2809 // Check even elements are taken from the left (highest-indexed) elements of 2810 // one half or the other and pick an operand accordingly. 2811 if (fitsRegularPattern<int>(Begin, 2, End, HalfSize, 1)) 2812 Wt = Op->getOperand(0); 2813 else if (fitsRegularPattern<int>(Begin, 2, End, Indices.size() + HalfSize, 1)) 2814 Wt = Op->getOperand(1); 2815 else 2816 return SDValue(); 2817 2818 // Check odd elements are taken from the left (highest-indexed) elements of 2819 // one half or the other and pick an operand accordingly. 2820 if (fitsRegularPattern<int>(Begin + 1, 2, End, HalfSize, 1)) 2821 Ws = Op->getOperand(0); 2822 else if (fitsRegularPattern<int>(Begin + 1, 2, End, Indices.size() + HalfSize, 2823 1)) 2824 Ws = Op->getOperand(1); 2825 else 2826 return SDValue(); 2827 2828 return DAG.getNode(MipsISD::ILVL, SDLoc(Op), ResTy, Ws, Wt); 2829 } 2830 2831 // Lower VECTOR_SHUFFLE into PCKEV (if possible). 2832 // 2833 // PCKEV copies the even elements of each vector into the result vector. 2834 // 2835 // It is possible to lower into PCKEV when the mask consists of two of the 2836 // following forms concatenated: 2837 // <0, 2, 4, ...> 2838 // <n, n+2, n+4, ...> 2839 // where n is the number of elements in the vector. 2840 // For example: 2841 // <0, 2, 4, ..., 0, 2, 4, ...> 2842 // <0, 2, 4, ..., n, n+2, n+4, ...> 2843 // 2844 // When undef's appear in the mask they are treated as if they were whatever 2845 // value is necessary in order to fit the above forms. 2846 static SDValue lowerVECTOR_SHUFFLE_PCKEV(SDValue Op, EVT ResTy, 2847 SmallVector<int, 16> Indices, 2848 SelectionDAG &DAG) { 2849 assert((Indices.size() % 2) == 0); 2850 2851 SDValue Wt; 2852 SDValue Ws; 2853 const auto &Begin = Indices.begin(); 2854 const auto &Mid = Indices.begin() + Indices.size() / 2; 2855 const auto &End = Indices.end(); 2856 2857 if (fitsRegularPattern<int>(Begin, 1, Mid, 0, 2)) 2858 Wt = Op->getOperand(0); 2859 else if (fitsRegularPattern<int>(Begin, 1, Mid, Indices.size(), 2)) 2860 Wt = Op->getOperand(1); 2861 else 2862 return SDValue(); 2863 2864 if (fitsRegularPattern<int>(Mid, 1, End, 0, 2)) 2865 Ws = Op->getOperand(0); 2866 else if (fitsRegularPattern<int>(Mid, 1, End, Indices.size(), 2)) 2867 Ws = Op->getOperand(1); 2868 else 2869 return SDValue(); 2870 2871 return DAG.getNode(MipsISD::PCKEV, SDLoc(Op), ResTy, Ws, Wt); 2872 } 2873 2874 // Lower VECTOR_SHUFFLE into PCKOD (if possible). 2875 // 2876 // PCKOD copies the odd elements of each vector into the result vector. 2877 // 2878 // It is possible to lower into PCKOD when the mask consists of two of the 2879 // following forms concatenated: 2880 // <1, 3, 5, ...> 2881 // <n+1, n+3, n+5, ...> 2882 // where n is the number of elements in the vector. 2883 // For example: 2884 // <1, 3, 5, ..., 1, 3, 5, ...> 2885 // <1, 3, 5, ..., n+1, n+3, n+5, ...> 2886 // 2887 // When undef's appear in the mask they are treated as if they were whatever 2888 // value is necessary in order to fit the above forms. 2889 static SDValue lowerVECTOR_SHUFFLE_PCKOD(SDValue Op, EVT ResTy, 2890 SmallVector<int, 16> Indices, 2891 SelectionDAG &DAG) { 2892 assert((Indices.size() % 2) == 0); 2893 2894 SDValue Wt; 2895 SDValue Ws; 2896 const auto &Begin = Indices.begin(); 2897 const auto &Mid = Indices.begin() + Indices.size() / 2; 2898 const auto &End = Indices.end(); 2899 2900 if (fitsRegularPattern<int>(Begin, 1, Mid, 1, 2)) 2901 Wt = Op->getOperand(0); 2902 else if (fitsRegularPattern<int>(Begin, 1, Mid, Indices.size() + 1, 2)) 2903 Wt = Op->getOperand(1); 2904 else 2905 return SDValue(); 2906 2907 if (fitsRegularPattern<int>(Mid, 1, End, 1, 2)) 2908 Ws = Op->getOperand(0); 2909 else if (fitsRegularPattern<int>(Mid, 1, End, Indices.size() + 1, 2)) 2910 Ws = Op->getOperand(1); 2911 else 2912 return SDValue(); 2913 2914 return DAG.getNode(MipsISD::PCKOD, SDLoc(Op), ResTy, Ws, Wt); 2915 } 2916 2917 // Lower VECTOR_SHUFFLE into VSHF. 2918 // 2919 // This mostly consists of converting the shuffle indices in Indices into a 2920 // BUILD_VECTOR and adding it as an operand to the resulting VSHF. There is 2921 // also code to eliminate unused operands of the VECTOR_SHUFFLE. For example, 2922 // if the type is v8i16 and all the indices are less than 8 then the second 2923 // operand is unused and can be replaced with anything. We choose to replace it 2924 // with the used operand since this reduces the number of instructions overall. 2925 static SDValue lowerVECTOR_SHUFFLE_VSHF(SDValue Op, EVT ResTy, 2926 const SmallVector<int, 16> &Indices, 2927 SelectionDAG &DAG) { 2928 SmallVector<SDValue, 16> Ops; 2929 SDValue Op0; 2930 SDValue Op1; 2931 EVT MaskVecTy = ResTy.changeVectorElementTypeToInteger(); 2932 EVT MaskEltTy = MaskVecTy.getVectorElementType(); 2933 bool Using1stVec = false; 2934 bool Using2ndVec = false; 2935 SDLoc DL(Op); 2936 int ResTyNumElts = ResTy.getVectorNumElements(); 2937 2938 for (int i = 0; i < ResTyNumElts; ++i) { 2939 // Idx == -1 means UNDEF 2940 int Idx = Indices[i]; 2941 2942 if (0 <= Idx && Idx < ResTyNumElts) 2943 Using1stVec = true; 2944 if (ResTyNumElts <= Idx && Idx < ResTyNumElts * 2) 2945 Using2ndVec = true; 2946 } 2947 2948 for (int Idx : Indices) 2949 Ops.push_back(DAG.getTargetConstant(Idx, DL, MaskEltTy)); 2950 2951 SDValue MaskVec = DAG.getBuildVector(MaskVecTy, DL, Ops); 2952 2953 if (Using1stVec && Using2ndVec) { 2954 Op0 = Op->getOperand(0); 2955 Op1 = Op->getOperand(1); 2956 } else if (Using1stVec) 2957 Op0 = Op1 = Op->getOperand(0); 2958 else if (Using2ndVec) 2959 Op0 = Op1 = Op->getOperand(1); 2960 else 2961 llvm_unreachable("shuffle vector mask references neither vector operand?"); 2962 2963 // VECTOR_SHUFFLE concatenates the vectors in an vectorwise fashion. 2964 // <0b00, 0b01> + <0b10, 0b11> -> <0b00, 0b01, 0b10, 0b11> 2965 // VSHF concatenates the vectors in a bitwise fashion: 2966 // <0b00, 0b01> + <0b10, 0b11> -> 2967 // 0b0100 + 0b1110 -> 0b01001110 2968 // <0b10, 0b11, 0b00, 0b01> 2969 // We must therefore swap the operands to get the correct result. 2970 return DAG.getNode(MipsISD::VSHF, DL, ResTy, MaskVec, Op1, Op0); 2971 } 2972 2973 // Lower VECTOR_SHUFFLE into one of a number of instructions depending on the 2974 // indices in the shuffle. 2975 SDValue MipsSETargetLowering::lowerVECTOR_SHUFFLE(SDValue Op, 2976 SelectionDAG &DAG) const { 2977 ShuffleVectorSDNode *Node = cast<ShuffleVectorSDNode>(Op); 2978 EVT ResTy = Op->getValueType(0); 2979 2980 if (!ResTy.is128BitVector()) 2981 return SDValue(); 2982 2983 int ResTyNumElts = ResTy.getVectorNumElements(); 2984 SmallVector<int, 16> Indices; 2985 2986 for (int i = 0; i < ResTyNumElts; ++i) 2987 Indices.push_back(Node->getMaskElt(i)); 2988 2989 // splati.[bhwd] is preferable to the others but is matched from 2990 // MipsISD::VSHF. 2991 if (isVECTOR_SHUFFLE_SPLATI(Op, ResTy, Indices, DAG)) 2992 return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG); 2993 SDValue Result; 2994 if ((Result = lowerVECTOR_SHUFFLE_ILVEV(Op, ResTy, Indices, DAG))) 2995 return Result; 2996 if ((Result = lowerVECTOR_SHUFFLE_ILVOD(Op, ResTy, Indices, DAG))) 2997 return Result; 2998 if ((Result = lowerVECTOR_SHUFFLE_ILVL(Op, ResTy, Indices, DAG))) 2999 return Result; 3000 if ((Result = lowerVECTOR_SHUFFLE_ILVR(Op, ResTy, Indices, DAG))) 3001 return Result; 3002 if ((Result = lowerVECTOR_SHUFFLE_PCKEV(Op, ResTy, Indices, DAG))) 3003 return Result; 3004 if ((Result = lowerVECTOR_SHUFFLE_PCKOD(Op, ResTy, Indices, DAG))) 3005 return Result; 3006 if ((Result = lowerVECTOR_SHUFFLE_SHF(Op, ResTy, Indices, DAG))) 3007 return Result; 3008 return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG); 3009 } 3010 3011 MachineBasicBlock * 3012 MipsSETargetLowering::emitBPOSGE32(MachineInstr &MI, 3013 MachineBasicBlock *BB) const { 3014 // $bb: 3015 // bposge32_pseudo $vr0 3016 // => 3017 // $bb: 3018 // bposge32 $tbb 3019 // $fbb: 3020 // li $vr2, 0 3021 // b $sink 3022 // $tbb: 3023 // li $vr1, 1 3024 // $sink: 3025 // $vr0 = phi($vr2, $fbb, $vr1, $tbb) 3026 3027 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 3028 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 3029 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 3030 DebugLoc DL = MI.getDebugLoc(); 3031 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 3032 MachineFunction::iterator It = std::next(MachineFunction::iterator(BB)); 3033 MachineFunction *F = BB->getParent(); 3034 MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB); 3035 MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB); 3036 MachineBasicBlock *Sink = F->CreateMachineBasicBlock(LLVM_BB); 3037 F->insert(It, FBB); 3038 F->insert(It, TBB); 3039 F->insert(It, Sink); 3040 3041 // Transfer the remainder of BB and its successor edges to Sink. 3042 Sink->splice(Sink->begin(), BB, std::next(MachineBasicBlock::iterator(MI)), 3043 BB->end()); 3044 Sink->transferSuccessorsAndUpdatePHIs(BB); 3045 3046 // Add successors. 3047 BB->addSuccessor(FBB); 3048 BB->addSuccessor(TBB); 3049 FBB->addSuccessor(Sink); 3050 TBB->addSuccessor(Sink); 3051 3052 // Insert the real bposge32 instruction to $BB. 3053 BuildMI(BB, DL, TII->get(Mips::BPOSGE32)).addMBB(TBB); 3054 // Insert the real bposge32c instruction to $BB. 3055 BuildMI(BB, DL, TII->get(Mips::BPOSGE32C_MMR3)).addMBB(TBB); 3056 3057 // Fill $FBB. 3058 Register VR2 = RegInfo.createVirtualRegister(RC); 3059 BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), VR2) 3060 .addReg(Mips::ZERO).addImm(0); 3061 BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink); 3062 3063 // Fill $TBB. 3064 Register VR1 = RegInfo.createVirtualRegister(RC); 3065 BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), VR1) 3066 .addReg(Mips::ZERO).addImm(1); 3067 3068 // Insert phi function to $Sink. 3069 BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI), 3070 MI.getOperand(0).getReg()) 3071 .addReg(VR2) 3072 .addMBB(FBB) 3073 .addReg(VR1) 3074 .addMBB(TBB); 3075 3076 MI.eraseFromParent(); // The pseudo instruction is gone now. 3077 return Sink; 3078 } 3079 3080 MachineBasicBlock *MipsSETargetLowering::emitMSACBranchPseudo( 3081 MachineInstr &MI, MachineBasicBlock *BB, unsigned BranchOp) const { 3082 // $bb: 3083 // vany_nonzero $rd, $ws 3084 // => 3085 // $bb: 3086 // bnz.b $ws, $tbb 3087 // b $fbb 3088 // $fbb: 3089 // li $rd1, 0 3090 // b $sink 3091 // $tbb: 3092 // li $rd2, 1 3093 // $sink: 3094 // $rd = phi($rd1, $fbb, $rd2, $tbb) 3095 3096 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 3097 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 3098 const TargetRegisterClass *RC = &Mips::GPR32RegClass; 3099 DebugLoc DL = MI.getDebugLoc(); 3100 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 3101 MachineFunction::iterator It = std::next(MachineFunction::iterator(BB)); 3102 MachineFunction *F = BB->getParent(); 3103 MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB); 3104 MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB); 3105 MachineBasicBlock *Sink = F->CreateMachineBasicBlock(LLVM_BB); 3106 F->insert(It, FBB); 3107 F->insert(It, TBB); 3108 F->insert(It, Sink); 3109 3110 // Transfer the remainder of BB and its successor edges to Sink. 3111 Sink->splice(Sink->begin(), BB, std::next(MachineBasicBlock::iterator(MI)), 3112 BB->end()); 3113 Sink->transferSuccessorsAndUpdatePHIs(BB); 3114 3115 // Add successors. 3116 BB->addSuccessor(FBB); 3117 BB->addSuccessor(TBB); 3118 FBB->addSuccessor(Sink); 3119 TBB->addSuccessor(Sink); 3120 3121 // Insert the real bnz.b instruction to $BB. 3122 BuildMI(BB, DL, TII->get(BranchOp)) 3123 .addReg(MI.getOperand(1).getReg()) 3124 .addMBB(TBB); 3125 3126 // Fill $FBB. 3127 Register RD1 = RegInfo.createVirtualRegister(RC); 3128 BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), RD1) 3129 .addReg(Mips::ZERO).addImm(0); 3130 BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink); 3131 3132 // Fill $TBB. 3133 Register RD2 = RegInfo.createVirtualRegister(RC); 3134 BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), RD2) 3135 .addReg(Mips::ZERO).addImm(1); 3136 3137 // Insert phi function to $Sink. 3138 BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI), 3139 MI.getOperand(0).getReg()) 3140 .addReg(RD1) 3141 .addMBB(FBB) 3142 .addReg(RD2) 3143 .addMBB(TBB); 3144 3145 MI.eraseFromParent(); // The pseudo instruction is gone now. 3146 return Sink; 3147 } 3148 3149 // Emit the COPY_FW pseudo instruction. 3150 // 3151 // copy_fw_pseudo $fd, $ws, n 3152 // => 3153 // copy_u_w $rt, $ws, $n 3154 // mtc1 $rt, $fd 3155 // 3156 // When n is zero, the equivalent operation can be performed with (potentially) 3157 // zero instructions due to register overlaps. This optimization is never valid 3158 // for lane 1 because it would require FR=0 mode which isn't supported by MSA. 3159 MachineBasicBlock * 3160 MipsSETargetLowering::emitCOPY_FW(MachineInstr &MI, 3161 MachineBasicBlock *BB) const { 3162 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 3163 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 3164 DebugLoc DL = MI.getDebugLoc(); 3165 Register Fd = MI.getOperand(0).getReg(); 3166 Register Ws = MI.getOperand(1).getReg(); 3167 unsigned Lane = MI.getOperand(2).getImm(); 3168 3169 if (Lane == 0) { 3170 unsigned Wt = Ws; 3171 if (!Subtarget.useOddSPReg()) { 3172 // We must copy to an even-numbered MSA register so that the 3173 // single-precision sub-register is also guaranteed to be even-numbered. 3174 Wt = RegInfo.createVirtualRegister(&Mips::MSA128WEvensRegClass); 3175 3176 BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Wt).addReg(Ws); 3177 } 3178 3179 BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo); 3180 } else { 3181 Register Wt = RegInfo.createVirtualRegister( 3182 Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass 3183 : &Mips::MSA128WEvensRegClass); 3184 3185 BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wt).addReg(Ws).addImm(Lane); 3186 BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo); 3187 } 3188 3189 MI.eraseFromParent(); // The pseudo instruction is gone now. 3190 return BB; 3191 } 3192 3193 // Emit the COPY_FD pseudo instruction. 3194 // 3195 // copy_fd_pseudo $fd, $ws, n 3196 // => 3197 // splati.d $wt, $ws, $n 3198 // copy $fd, $wt:sub_64 3199 // 3200 // When n is zero, the equivalent operation can be performed with (potentially) 3201 // zero instructions due to register overlaps. This optimization is always 3202 // valid because FR=1 mode which is the only supported mode in MSA. 3203 MachineBasicBlock * 3204 MipsSETargetLowering::emitCOPY_FD(MachineInstr &MI, 3205 MachineBasicBlock *BB) const { 3206 assert(Subtarget.isFP64bit()); 3207 3208 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 3209 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 3210 Register Fd = MI.getOperand(0).getReg(); 3211 Register Ws = MI.getOperand(1).getReg(); 3212 unsigned Lane = MI.getOperand(2).getImm() * 2; 3213 DebugLoc DL = MI.getDebugLoc(); 3214 3215 if (Lane == 0) 3216 BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_64); 3217 else { 3218 Register Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass); 3219 3220 BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_D), Wt).addReg(Ws).addImm(1); 3221 BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_64); 3222 } 3223 3224 MI.eraseFromParent(); // The pseudo instruction is gone now. 3225 return BB; 3226 } 3227 3228 // Emit the INSERT_FW pseudo instruction. 3229 // 3230 // insert_fw_pseudo $wd, $wd_in, $n, $fs 3231 // => 3232 // subreg_to_reg $wt:sub_lo, $fs 3233 // insve_w $wd[$n], $wd_in, $wt[0] 3234 MachineBasicBlock * 3235 MipsSETargetLowering::emitINSERT_FW(MachineInstr &MI, 3236 MachineBasicBlock *BB) const { 3237 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 3238 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 3239 DebugLoc DL = MI.getDebugLoc(); 3240 Register Wd = MI.getOperand(0).getReg(); 3241 Register Wd_in = MI.getOperand(1).getReg(); 3242 unsigned Lane = MI.getOperand(2).getImm(); 3243 Register Fs = MI.getOperand(3).getReg(); 3244 Register Wt = RegInfo.createVirtualRegister( 3245 Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass 3246 : &Mips::MSA128WEvensRegClass); 3247 3248 BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt) 3249 .addImm(0) 3250 .addReg(Fs) 3251 .addImm(Mips::sub_lo); 3252 BuildMI(*BB, MI, DL, TII->get(Mips::INSVE_W), Wd) 3253 .addReg(Wd_in) 3254 .addImm(Lane) 3255 .addReg(Wt) 3256 .addImm(0); 3257 3258 MI.eraseFromParent(); // The pseudo instruction is gone now. 3259 return BB; 3260 } 3261 3262 // Emit the INSERT_FD pseudo instruction. 3263 // 3264 // insert_fd_pseudo $wd, $fs, n 3265 // => 3266 // subreg_to_reg $wt:sub_64, $fs 3267 // insve_d $wd[$n], $wd_in, $wt[0] 3268 MachineBasicBlock * 3269 MipsSETargetLowering::emitINSERT_FD(MachineInstr &MI, 3270 MachineBasicBlock *BB) const { 3271 assert(Subtarget.isFP64bit()); 3272 3273 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 3274 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 3275 DebugLoc DL = MI.getDebugLoc(); 3276 Register Wd = MI.getOperand(0).getReg(); 3277 Register Wd_in = MI.getOperand(1).getReg(); 3278 unsigned Lane = MI.getOperand(2).getImm(); 3279 Register Fs = MI.getOperand(3).getReg(); 3280 Register Wt = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass); 3281 3282 BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt) 3283 .addImm(0) 3284 .addReg(Fs) 3285 .addImm(Mips::sub_64); 3286 BuildMI(*BB, MI, DL, TII->get(Mips::INSVE_D), Wd) 3287 .addReg(Wd_in) 3288 .addImm(Lane) 3289 .addReg(Wt) 3290 .addImm(0); 3291 3292 MI.eraseFromParent(); // The pseudo instruction is gone now. 3293 return BB; 3294 } 3295 3296 // Emit the INSERT_([BHWD]|F[WD])_VIDX pseudo instruction. 3297 // 3298 // For integer: 3299 // (INSERT_([BHWD]|F[WD])_PSEUDO $wd, $wd_in, $n, $rs) 3300 // => 3301 // (SLL $lanetmp1, $lane, <log2size) 3302 // (SLD_B $wdtmp1, $wd_in, $wd_in, $lanetmp1) 3303 // (INSERT_[BHWD], $wdtmp2, $wdtmp1, 0, $rs) 3304 // (NEG $lanetmp2, $lanetmp1) 3305 // (SLD_B $wd, $wdtmp2, $wdtmp2, $lanetmp2) 3306 // 3307 // For floating point: 3308 // (INSERT_([BHWD]|F[WD])_PSEUDO $wd, $wd_in, $n, $fs) 3309 // => 3310 // (SUBREG_TO_REG $wt, $fs, <subreg>) 3311 // (SLL $lanetmp1, $lane, <log2size) 3312 // (SLD_B $wdtmp1, $wd_in, $wd_in, $lanetmp1) 3313 // (INSVE_[WD], $wdtmp2, 0, $wdtmp1, 0) 3314 // (NEG $lanetmp2, $lanetmp1) 3315 // (SLD_B $wd, $wdtmp2, $wdtmp2, $lanetmp2) 3316 MachineBasicBlock *MipsSETargetLowering::emitINSERT_DF_VIDX( 3317 MachineInstr &MI, MachineBasicBlock *BB, unsigned EltSizeInBytes, 3318 bool IsFP) const { 3319 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 3320 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 3321 DebugLoc DL = MI.getDebugLoc(); 3322 Register Wd = MI.getOperand(0).getReg(); 3323 Register SrcVecReg = MI.getOperand(1).getReg(); 3324 Register LaneReg = MI.getOperand(2).getReg(); 3325 Register SrcValReg = MI.getOperand(3).getReg(); 3326 3327 const TargetRegisterClass *VecRC = nullptr; 3328 // FIXME: This should be true for N32 too. 3329 const TargetRegisterClass *GPRRC = 3330 Subtarget.isABI_N64() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; 3331 unsigned SubRegIdx = Subtarget.isABI_N64() ? Mips::sub_32 : 0; 3332 unsigned ShiftOp = Subtarget.isABI_N64() ? Mips::DSLL : Mips::SLL; 3333 unsigned EltLog2Size; 3334 unsigned InsertOp = 0; 3335 unsigned InsveOp = 0; 3336 switch (EltSizeInBytes) { 3337 default: 3338 llvm_unreachable("Unexpected size"); 3339 case 1: 3340 EltLog2Size = 0; 3341 InsertOp = Mips::INSERT_B; 3342 InsveOp = Mips::INSVE_B; 3343 VecRC = &Mips::MSA128BRegClass; 3344 break; 3345 case 2: 3346 EltLog2Size = 1; 3347 InsertOp = Mips::INSERT_H; 3348 InsveOp = Mips::INSVE_H; 3349 VecRC = &Mips::MSA128HRegClass; 3350 break; 3351 case 4: 3352 EltLog2Size = 2; 3353 InsertOp = Mips::INSERT_W; 3354 InsveOp = Mips::INSVE_W; 3355 VecRC = &Mips::MSA128WRegClass; 3356 break; 3357 case 8: 3358 EltLog2Size = 3; 3359 InsertOp = Mips::INSERT_D; 3360 InsveOp = Mips::INSVE_D; 3361 VecRC = &Mips::MSA128DRegClass; 3362 break; 3363 } 3364 3365 if (IsFP) { 3366 Register Wt = RegInfo.createVirtualRegister(VecRC); 3367 BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt) 3368 .addImm(0) 3369 .addReg(SrcValReg) 3370 .addImm(EltSizeInBytes == 8 ? Mips::sub_64 : Mips::sub_lo); 3371 SrcValReg = Wt; 3372 } 3373 3374 // Convert the lane index into a byte index 3375 if (EltSizeInBytes != 1) { 3376 Register LaneTmp1 = RegInfo.createVirtualRegister(GPRRC); 3377 BuildMI(*BB, MI, DL, TII->get(ShiftOp), LaneTmp1) 3378 .addReg(LaneReg) 3379 .addImm(EltLog2Size); 3380 LaneReg = LaneTmp1; 3381 } 3382 3383 // Rotate bytes around so that the desired lane is element zero 3384 Register WdTmp1 = RegInfo.createVirtualRegister(VecRC); 3385 BuildMI(*BB, MI, DL, TII->get(Mips::SLD_B), WdTmp1) 3386 .addReg(SrcVecReg) 3387 .addReg(SrcVecReg) 3388 .addReg(LaneReg, 0, SubRegIdx); 3389 3390 Register WdTmp2 = RegInfo.createVirtualRegister(VecRC); 3391 if (IsFP) { 3392 // Use insve.df to insert to element zero 3393 BuildMI(*BB, MI, DL, TII->get(InsveOp), WdTmp2) 3394 .addReg(WdTmp1) 3395 .addImm(0) 3396 .addReg(SrcValReg) 3397 .addImm(0); 3398 } else { 3399 // Use insert.df to insert to element zero 3400 BuildMI(*BB, MI, DL, TII->get(InsertOp), WdTmp2) 3401 .addReg(WdTmp1) 3402 .addReg(SrcValReg) 3403 .addImm(0); 3404 } 3405 3406 // Rotate elements the rest of the way for a full rotation. 3407 // sld.df inteprets $rt modulo the number of columns so we only need to negate 3408 // the lane index to do this. 3409 Register LaneTmp2 = RegInfo.createVirtualRegister(GPRRC); 3410 BuildMI(*BB, MI, DL, TII->get(Subtarget.isABI_N64() ? Mips::DSUB : Mips::SUB), 3411 LaneTmp2) 3412 .addReg(Subtarget.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO) 3413 .addReg(LaneReg); 3414 BuildMI(*BB, MI, DL, TII->get(Mips::SLD_B), Wd) 3415 .addReg(WdTmp2) 3416 .addReg(WdTmp2) 3417 .addReg(LaneTmp2, 0, SubRegIdx); 3418 3419 MI.eraseFromParent(); // The pseudo instruction is gone now. 3420 return BB; 3421 } 3422 3423 // Emit the FILL_FW pseudo instruction. 3424 // 3425 // fill_fw_pseudo $wd, $fs 3426 // => 3427 // implicit_def $wt1 3428 // insert_subreg $wt2:subreg_lo, $wt1, $fs 3429 // splati.w $wd, $wt2[0] 3430 MachineBasicBlock * 3431 MipsSETargetLowering::emitFILL_FW(MachineInstr &MI, 3432 MachineBasicBlock *BB) const { 3433 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 3434 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 3435 DebugLoc DL = MI.getDebugLoc(); 3436 Register Wd = MI.getOperand(0).getReg(); 3437 Register Fs = MI.getOperand(1).getReg(); 3438 Register Wt1 = RegInfo.createVirtualRegister( 3439 Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass 3440 : &Mips::MSA128WEvensRegClass); 3441 Register Wt2 = RegInfo.createVirtualRegister( 3442 Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass 3443 : &Mips::MSA128WEvensRegClass); 3444 3445 BuildMI(*BB, MI, DL, TII->get(Mips::IMPLICIT_DEF), Wt1); 3446 BuildMI(*BB, MI, DL, TII->get(Mips::INSERT_SUBREG), Wt2) 3447 .addReg(Wt1) 3448 .addReg(Fs) 3449 .addImm(Mips::sub_lo); 3450 BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wd).addReg(Wt2).addImm(0); 3451 3452 MI.eraseFromParent(); // The pseudo instruction is gone now. 3453 return BB; 3454 } 3455 3456 // Emit the FILL_FD pseudo instruction. 3457 // 3458 // fill_fd_pseudo $wd, $fs 3459 // => 3460 // implicit_def $wt1 3461 // insert_subreg $wt2:subreg_64, $wt1, $fs 3462 // splati.d $wd, $wt2[0] 3463 MachineBasicBlock * 3464 MipsSETargetLowering::emitFILL_FD(MachineInstr &MI, 3465 MachineBasicBlock *BB) const { 3466 assert(Subtarget.isFP64bit()); 3467 3468 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 3469 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 3470 DebugLoc DL = MI.getDebugLoc(); 3471 Register Wd = MI.getOperand(0).getReg(); 3472 Register Fs = MI.getOperand(1).getReg(); 3473 Register Wt1 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass); 3474 Register Wt2 = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass); 3475 3476 BuildMI(*BB, MI, DL, TII->get(Mips::IMPLICIT_DEF), Wt1); 3477 BuildMI(*BB, MI, DL, TII->get(Mips::INSERT_SUBREG), Wt2) 3478 .addReg(Wt1) 3479 .addReg(Fs) 3480 .addImm(Mips::sub_64); 3481 BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_D), Wd).addReg(Wt2).addImm(0); 3482 3483 MI.eraseFromParent(); // The pseudo instruction is gone now. 3484 return BB; 3485 } 3486 3487 // Emit the ST_F16_PSEDUO instruction to store a f16 value from an MSA 3488 // register. 3489 // 3490 // STF16 MSA128F16:$wd, mem_simm10:$addr 3491 // => 3492 // copy_u.h $rtemp,$wd[0] 3493 // sh $rtemp, $addr 3494 // 3495 // Safety: We can't use st.h & co as they would over write the memory after 3496 // the destination. It would require half floats be allocated 16 bytes(!) of 3497 // space. 3498 MachineBasicBlock * 3499 MipsSETargetLowering::emitST_F16_PSEUDO(MachineInstr &MI, 3500 MachineBasicBlock *BB) const { 3501 3502 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 3503 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 3504 DebugLoc DL = MI.getDebugLoc(); 3505 Register Ws = MI.getOperand(0).getReg(); 3506 Register Rt = MI.getOperand(1).getReg(); 3507 const MachineMemOperand &MMO = **MI.memoperands_begin(); 3508 unsigned Imm = MMO.getOffset(); 3509 3510 // Caution: A load via the GOT can expand to a GPR32 operand, a load via 3511 // spill and reload can expand as a GPR64 operand. Examine the 3512 // operand in detail and default to ABI. 3513 const TargetRegisterClass *RC = 3514 MI.getOperand(1).isReg() ? RegInfo.getRegClass(MI.getOperand(1).getReg()) 3515 : (Subtarget.isABI_O32() ? &Mips::GPR32RegClass 3516 : &Mips::GPR64RegClass); 3517 const bool UsingMips32 = RC == &Mips::GPR32RegClass; 3518 Register Rs = RegInfo.createVirtualRegister(&Mips::GPR32RegClass); 3519 3520 BuildMI(*BB, MI, DL, TII->get(Mips::COPY_U_H), Rs).addReg(Ws).addImm(0); 3521 if(!UsingMips32) { 3522 Register Tmp = RegInfo.createVirtualRegister(&Mips::GPR64RegClass); 3523 BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Tmp) 3524 .addImm(0) 3525 .addReg(Rs) 3526 .addImm(Mips::sub_32); 3527 Rs = Tmp; 3528 } 3529 BuildMI(*BB, MI, DL, TII->get(UsingMips32 ? Mips::SH : Mips::SH64)) 3530 .addReg(Rs) 3531 .addReg(Rt) 3532 .addImm(Imm) 3533 .addMemOperand(BB->getParent()->getMachineMemOperand( 3534 &MMO, MMO.getOffset(), MMO.getSize())); 3535 3536 MI.eraseFromParent(); 3537 return BB; 3538 } 3539 3540 // Emit the LD_F16_PSEDUO instruction to load a f16 value into an MSA register. 3541 // 3542 // LD_F16 MSA128F16:$wd, mem_simm10:$addr 3543 // => 3544 // lh $rtemp, $addr 3545 // fill.h $wd, $rtemp 3546 // 3547 // Safety: We can't use ld.h & co as they over-read from the source. 3548 // Additionally, if the address is not modulo 16, 2 cases can occur: 3549 // a) Segmentation fault as the load instruction reads from a memory page 3550 // memory it's not supposed to. 3551 // b) The load crosses an implementation specific boundary, requiring OS 3552 // intervention. 3553 MachineBasicBlock * 3554 MipsSETargetLowering::emitLD_F16_PSEUDO(MachineInstr &MI, 3555 MachineBasicBlock *BB) const { 3556 3557 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 3558 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 3559 DebugLoc DL = MI.getDebugLoc(); 3560 Register Wd = MI.getOperand(0).getReg(); 3561 3562 // Caution: A load via the GOT can expand to a GPR32 operand, a load via 3563 // spill and reload can expand as a GPR64 operand. Examine the 3564 // operand in detail and default to ABI. 3565 const TargetRegisterClass *RC = 3566 MI.getOperand(1).isReg() ? RegInfo.getRegClass(MI.getOperand(1).getReg()) 3567 : (Subtarget.isABI_O32() ? &Mips::GPR32RegClass 3568 : &Mips::GPR64RegClass); 3569 3570 const bool UsingMips32 = RC == &Mips::GPR32RegClass; 3571 Register Rt = RegInfo.createVirtualRegister(RC); 3572 3573 MachineInstrBuilder MIB = 3574 BuildMI(*BB, MI, DL, TII->get(UsingMips32 ? Mips::LH : Mips::LH64), Rt); 3575 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) 3576 MIB.add(MO); 3577 3578 if(!UsingMips32) { 3579 Register Tmp = RegInfo.createVirtualRegister(&Mips::GPR32RegClass); 3580 BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Tmp).addReg(Rt, 0, Mips::sub_32); 3581 Rt = Tmp; 3582 } 3583 3584 BuildMI(*BB, MI, DL, TII->get(Mips::FILL_H), Wd).addReg(Rt); 3585 3586 MI.eraseFromParent(); 3587 return BB; 3588 } 3589 3590 // Emit the FPROUND_PSEUDO instruction. 3591 // 3592 // Round an FGR64Opnd, FGR32Opnd to an f16. 3593 // 3594 // Safety: Cycle the operand through the GPRs so the result always ends up 3595 // the correct MSA register. 3596 // 3597 // FIXME: This copying is strictly unnecessary. If we could tie FGR32Opnd:$Fs 3598 // / FGR64Opnd:$Fs and MSA128F16:$Wd to the same physical register 3599 // (which they can be, as the MSA registers are defined to alias the 3600 // FPU's 64 bit and 32 bit registers) the result can be accessed using 3601 // the correct register class. That requires operands be tie-able across 3602 // register classes which have a sub/super register class relationship. 3603 // 3604 // For FPG32Opnd: 3605 // 3606 // FPROUND MSA128F16:$wd, FGR32Opnd:$fs 3607 // => 3608 // mfc1 $rtemp, $fs 3609 // fill.w $rtemp, $wtemp 3610 // fexdo.w $wd, $wtemp, $wtemp 3611 // 3612 // For FPG64Opnd on mips32r2+: 3613 // 3614 // FPROUND MSA128F16:$wd, FGR64Opnd:$fs 3615 // => 3616 // mfc1 $rtemp, $fs 3617 // fill.w $rtemp, $wtemp 3618 // mfhc1 $rtemp2, $fs 3619 // insert.w $wtemp[1], $rtemp2 3620 // insert.w $wtemp[3], $rtemp2 3621 // fexdo.w $wtemp2, $wtemp, $wtemp 3622 // fexdo.h $wd, $temp2, $temp2 3623 // 3624 // For FGR64Opnd on mips64r2+: 3625 // 3626 // FPROUND MSA128F16:$wd, FGR64Opnd:$fs 3627 // => 3628 // dmfc1 $rtemp, $fs 3629 // fill.d $rtemp, $wtemp 3630 // fexdo.w $wtemp2, $wtemp, $wtemp 3631 // fexdo.h $wd, $wtemp2, $wtemp2 3632 // 3633 // Safety note: As $wtemp is UNDEF, we may provoke a spurious exception if the 3634 // undef bits are "just right" and the exception enable bits are 3635 // set. By using fill.w to replicate $fs into all elements over 3636 // insert.w for one element, we avoid that potiential case. If 3637 // fexdo.[hw] causes an exception in, the exception is valid and it 3638 // occurs for all elements. 3639 MachineBasicBlock * 3640 MipsSETargetLowering::emitFPROUND_PSEUDO(MachineInstr &MI, 3641 MachineBasicBlock *BB, 3642 bool IsFGR64) const { 3643 3644 // Strictly speaking, we need MIPS32R5 to support MSA. We'll be generous 3645 // here. It's technically doable to support MIPS32 here, but the ISA forbids 3646 // it. 3647 assert(Subtarget.hasMSA() && Subtarget.hasMips32r2()); 3648 3649 bool IsFGR64onMips64 = Subtarget.hasMips64() && IsFGR64; 3650 bool IsFGR64onMips32 = !Subtarget.hasMips64() && IsFGR64; 3651 3652 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 3653 DebugLoc DL = MI.getDebugLoc(); 3654 Register Wd = MI.getOperand(0).getReg(); 3655 Register Fs = MI.getOperand(1).getReg(); 3656 3657 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 3658 Register Wtemp = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); 3659 const TargetRegisterClass *GPRRC = 3660 IsFGR64onMips64 ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; 3661 unsigned MFC1Opc = IsFGR64onMips64 3662 ? Mips::DMFC1 3663 : (IsFGR64onMips32 ? Mips::MFC1_D64 : Mips::MFC1); 3664 unsigned FILLOpc = IsFGR64onMips64 ? Mips::FILL_D : Mips::FILL_W; 3665 3666 // Perform the register class copy as mentioned above. 3667 Register Rtemp = RegInfo.createVirtualRegister(GPRRC); 3668 BuildMI(*BB, MI, DL, TII->get(MFC1Opc), Rtemp).addReg(Fs); 3669 BuildMI(*BB, MI, DL, TII->get(FILLOpc), Wtemp).addReg(Rtemp); 3670 unsigned WPHI = Wtemp; 3671 3672 if (IsFGR64onMips32) { 3673 Register Rtemp2 = RegInfo.createVirtualRegister(GPRRC); 3674 BuildMI(*BB, MI, DL, TII->get(Mips::MFHC1_D64), Rtemp2).addReg(Fs); 3675 Register Wtemp2 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); 3676 Register Wtemp3 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); 3677 BuildMI(*BB, MI, DL, TII->get(Mips::INSERT_W), Wtemp2) 3678 .addReg(Wtemp) 3679 .addReg(Rtemp2) 3680 .addImm(1); 3681 BuildMI(*BB, MI, DL, TII->get(Mips::INSERT_W), Wtemp3) 3682 .addReg(Wtemp2) 3683 .addReg(Rtemp2) 3684 .addImm(3); 3685 WPHI = Wtemp3; 3686 } 3687 3688 if (IsFGR64) { 3689 Register Wtemp2 = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); 3690 BuildMI(*BB, MI, DL, TII->get(Mips::FEXDO_W), Wtemp2) 3691 .addReg(WPHI) 3692 .addReg(WPHI); 3693 WPHI = Wtemp2; 3694 } 3695 3696 BuildMI(*BB, MI, DL, TII->get(Mips::FEXDO_H), Wd).addReg(WPHI).addReg(WPHI); 3697 3698 MI.eraseFromParent(); 3699 return BB; 3700 } 3701 3702 // Emit the FPEXTEND_PSEUDO instruction. 3703 // 3704 // Expand an f16 to either a FGR32Opnd or FGR64Opnd. 3705 // 3706 // Safety: Cycle the result through the GPRs so the result always ends up 3707 // the correct floating point register. 3708 // 3709 // FIXME: This copying is strictly unnecessary. If we could tie FGR32Opnd:$Fd 3710 // / FGR64Opnd:$Fd and MSA128F16:$Ws to the same physical register 3711 // (which they can be, as the MSA registers are defined to alias the 3712 // FPU's 64 bit and 32 bit registers) the result can be accessed using 3713 // the correct register class. That requires operands be tie-able across 3714 // register classes which have a sub/super register class relationship. I 3715 // haven't checked. 3716 // 3717 // For FGR32Opnd: 3718 // 3719 // FPEXTEND FGR32Opnd:$fd, MSA128F16:$ws 3720 // => 3721 // fexupr.w $wtemp, $ws 3722 // copy_s.w $rtemp, $ws[0] 3723 // mtc1 $rtemp, $fd 3724 // 3725 // For FGR64Opnd on Mips64: 3726 // 3727 // FPEXTEND FGR64Opnd:$fd, MSA128F16:$ws 3728 // => 3729 // fexupr.w $wtemp, $ws 3730 // fexupr.d $wtemp2, $wtemp 3731 // copy_s.d $rtemp, $wtemp2s[0] 3732 // dmtc1 $rtemp, $fd 3733 // 3734 // For FGR64Opnd on Mips32: 3735 // 3736 // FPEXTEND FGR64Opnd:$fd, MSA128F16:$ws 3737 // => 3738 // fexupr.w $wtemp, $ws 3739 // fexupr.d $wtemp2, $wtemp 3740 // copy_s.w $rtemp, $wtemp2[0] 3741 // mtc1 $rtemp, $ftemp 3742 // copy_s.w $rtemp2, $wtemp2[1] 3743 // $fd = mthc1 $rtemp2, $ftemp 3744 MachineBasicBlock * 3745 MipsSETargetLowering::emitFPEXTEND_PSEUDO(MachineInstr &MI, 3746 MachineBasicBlock *BB, 3747 bool IsFGR64) const { 3748 3749 // Strictly speaking, we need MIPS32R5 to support MSA. We'll be generous 3750 // here. It's technically doable to support MIPS32 here, but the ISA forbids 3751 // it. 3752 assert(Subtarget.hasMSA() && Subtarget.hasMips32r2()); 3753 3754 bool IsFGR64onMips64 = Subtarget.hasMips64() && IsFGR64; 3755 bool IsFGR64onMips32 = !Subtarget.hasMips64() && IsFGR64; 3756 3757 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 3758 DebugLoc DL = MI.getDebugLoc(); 3759 Register Fd = MI.getOperand(0).getReg(); 3760 Register Ws = MI.getOperand(1).getReg(); 3761 3762 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 3763 const TargetRegisterClass *GPRRC = 3764 IsFGR64onMips64 ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; 3765 unsigned MTC1Opc = IsFGR64onMips64 3766 ? Mips::DMTC1 3767 : (IsFGR64onMips32 ? Mips::MTC1_D64 : Mips::MTC1); 3768 Register COPYOpc = IsFGR64onMips64 ? Mips::COPY_S_D : Mips::COPY_S_W; 3769 3770 Register Wtemp = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); 3771 Register WPHI = Wtemp; 3772 3773 BuildMI(*BB, MI, DL, TII->get(Mips::FEXUPR_W), Wtemp).addReg(Ws); 3774 if (IsFGR64) { 3775 WPHI = RegInfo.createVirtualRegister(&Mips::MSA128DRegClass); 3776 BuildMI(*BB, MI, DL, TII->get(Mips::FEXUPR_D), WPHI).addReg(Wtemp); 3777 } 3778 3779 // Perform the safety regclass copy mentioned above. 3780 Register Rtemp = RegInfo.createVirtualRegister(GPRRC); 3781 Register FPRPHI = IsFGR64onMips32 3782 ? RegInfo.createVirtualRegister(&Mips::FGR64RegClass) 3783 : Fd; 3784 BuildMI(*BB, MI, DL, TII->get(COPYOpc), Rtemp).addReg(WPHI).addImm(0); 3785 BuildMI(*BB, MI, DL, TII->get(MTC1Opc), FPRPHI).addReg(Rtemp); 3786 3787 if (IsFGR64onMips32) { 3788 Register Rtemp2 = RegInfo.createVirtualRegister(GPRRC); 3789 BuildMI(*BB, MI, DL, TII->get(Mips::COPY_S_W), Rtemp2) 3790 .addReg(WPHI) 3791 .addImm(1); 3792 BuildMI(*BB, MI, DL, TII->get(Mips::MTHC1_D64), Fd) 3793 .addReg(FPRPHI) 3794 .addReg(Rtemp2); 3795 } 3796 3797 MI.eraseFromParent(); 3798 return BB; 3799 } 3800 3801 // Emit the FEXP2_W_1 pseudo instructions. 3802 // 3803 // fexp2_w_1_pseudo $wd, $wt 3804 // => 3805 // ldi.w $ws, 1 3806 // fexp2.w $wd, $ws, $wt 3807 MachineBasicBlock * 3808 MipsSETargetLowering::emitFEXP2_W_1(MachineInstr &MI, 3809 MachineBasicBlock *BB) const { 3810 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 3811 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 3812 const TargetRegisterClass *RC = &Mips::MSA128WRegClass; 3813 Register Ws1 = RegInfo.createVirtualRegister(RC); 3814 Register Ws2 = RegInfo.createVirtualRegister(RC); 3815 DebugLoc DL = MI.getDebugLoc(); 3816 3817 // Splat 1.0 into a vector 3818 BuildMI(*BB, MI, DL, TII->get(Mips::LDI_W), Ws1).addImm(1); 3819 BuildMI(*BB, MI, DL, TII->get(Mips::FFINT_U_W), Ws2).addReg(Ws1); 3820 3821 // Emit 1.0 * fexp2(Wt) 3822 BuildMI(*BB, MI, DL, TII->get(Mips::FEXP2_W), MI.getOperand(0).getReg()) 3823 .addReg(Ws2) 3824 .addReg(MI.getOperand(1).getReg()); 3825 3826 MI.eraseFromParent(); // The pseudo instruction is gone now. 3827 return BB; 3828 } 3829 3830 // Emit the FEXP2_D_1 pseudo instructions. 3831 // 3832 // fexp2_d_1_pseudo $wd, $wt 3833 // => 3834 // ldi.d $ws, 1 3835 // fexp2.d $wd, $ws, $wt 3836 MachineBasicBlock * 3837 MipsSETargetLowering::emitFEXP2_D_1(MachineInstr &MI, 3838 MachineBasicBlock *BB) const { 3839 const TargetInstrInfo *TII = Subtarget.getInstrInfo(); 3840 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); 3841 const TargetRegisterClass *RC = &Mips::MSA128DRegClass; 3842 Register Ws1 = RegInfo.createVirtualRegister(RC); 3843 Register Ws2 = RegInfo.createVirtualRegister(RC); 3844 DebugLoc DL = MI.getDebugLoc(); 3845 3846 // Splat 1.0 into a vector 3847 BuildMI(*BB, MI, DL, TII->get(Mips::LDI_D), Ws1).addImm(1); 3848 BuildMI(*BB, MI, DL, TII->get(Mips::FFINT_U_D), Ws2).addReg(Ws1); 3849 3850 // Emit 1.0 * fexp2(Wt) 3851 BuildMI(*BB, MI, DL, TII->get(Mips::FEXP2_D), MI.getOperand(0).getReg()) 3852 .addReg(Ws2) 3853 .addReg(MI.getOperand(1).getReg()); 3854 3855 MI.eraseFromParent(); // The pseudo instruction is gone now. 3856 return BB; 3857 } 3858