1 //===-- llvm/CodeGen/GlobalISel/LegalizerHelper.cpp -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file This file implements the LegalizerHelper class to legalize 10 /// individual instructions and the LegalizeMachineIR wrapper pass for the 11 /// primary legalization. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" 16 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 17 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 18 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/TargetInstrInfo.h" 21 #include "llvm/CodeGen/TargetLowering.h" 22 #include "llvm/CodeGen/TargetSubtargetInfo.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Support/MathExtras.h" 25 #include "llvm/Support/raw_ostream.h" 26 27 #define DEBUG_TYPE "legalizer" 28 29 using namespace llvm; 30 using namespace LegalizeActions; 31 32 /// Try to break down \p OrigTy into \p NarrowTy sized pieces. 33 /// 34 /// Returns the number of \p NarrowTy elements needed to reconstruct \p OrigTy, 35 /// with any leftover piece as type \p LeftoverTy 36 /// 37 /// Returns -1 in the first element of the pair if the breakdown is not 38 /// satisfiable. 39 static std::pair<int, int> 40 getNarrowTypeBreakDown(LLT OrigTy, LLT NarrowTy, LLT &LeftoverTy) { 41 assert(!LeftoverTy.isValid() && "this is an out argument"); 42 43 unsigned Size = OrigTy.getSizeInBits(); 44 unsigned NarrowSize = NarrowTy.getSizeInBits(); 45 unsigned NumParts = Size / NarrowSize; 46 unsigned LeftoverSize = Size - NumParts * NarrowSize; 47 assert(Size > NarrowSize); 48 49 if (LeftoverSize == 0) 50 return {NumParts, 0}; 51 52 if (NarrowTy.isVector()) { 53 unsigned EltSize = OrigTy.getScalarSizeInBits(); 54 if (LeftoverSize % EltSize != 0) 55 return {-1, -1}; 56 LeftoverTy = LLT::scalarOrVector(LeftoverSize / EltSize, EltSize); 57 } else { 58 LeftoverTy = LLT::scalar(LeftoverSize); 59 } 60 61 int NumLeftover = LeftoverSize / LeftoverTy.getSizeInBits(); 62 return std::make_pair(NumParts, NumLeftover); 63 } 64 65 LegalizerHelper::LegalizerHelper(MachineFunction &MF, 66 GISelChangeObserver &Observer, 67 MachineIRBuilder &Builder) 68 : MIRBuilder(Builder), MRI(MF.getRegInfo()), 69 LI(*MF.getSubtarget().getLegalizerInfo()), Observer(Observer) { 70 MIRBuilder.setMF(MF); 71 MIRBuilder.setChangeObserver(Observer); 72 } 73 74 LegalizerHelper::LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI, 75 GISelChangeObserver &Observer, 76 MachineIRBuilder &B) 77 : MIRBuilder(B), MRI(MF.getRegInfo()), LI(LI), Observer(Observer) { 78 MIRBuilder.setMF(MF); 79 MIRBuilder.setChangeObserver(Observer); 80 } 81 LegalizerHelper::LegalizeResult 82 LegalizerHelper::legalizeInstrStep(MachineInstr &MI) { 83 LLVM_DEBUG(dbgs() << "Legalizing: "; MI.print(dbgs())); 84 85 if (MI.getOpcode() == TargetOpcode::G_INTRINSIC || 86 MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS) 87 return LI.legalizeIntrinsic(MI, MRI, MIRBuilder) ? Legalized 88 : UnableToLegalize; 89 auto Step = LI.getAction(MI, MRI); 90 switch (Step.Action) { 91 case Legal: 92 LLVM_DEBUG(dbgs() << ".. Already legal\n"); 93 return AlreadyLegal; 94 case Libcall: 95 LLVM_DEBUG(dbgs() << ".. Convert to libcall\n"); 96 return libcall(MI); 97 case NarrowScalar: 98 LLVM_DEBUG(dbgs() << ".. Narrow scalar\n"); 99 return narrowScalar(MI, Step.TypeIdx, Step.NewType); 100 case WidenScalar: 101 LLVM_DEBUG(dbgs() << ".. Widen scalar\n"); 102 return widenScalar(MI, Step.TypeIdx, Step.NewType); 103 case Lower: 104 LLVM_DEBUG(dbgs() << ".. Lower\n"); 105 return lower(MI, Step.TypeIdx, Step.NewType); 106 case FewerElements: 107 LLVM_DEBUG(dbgs() << ".. Reduce number of elements\n"); 108 return fewerElementsVector(MI, Step.TypeIdx, Step.NewType); 109 case MoreElements: 110 LLVM_DEBUG(dbgs() << ".. Increase number of elements\n"); 111 return moreElementsVector(MI, Step.TypeIdx, Step.NewType); 112 case Custom: 113 LLVM_DEBUG(dbgs() << ".. Custom legalization\n"); 114 return LI.legalizeCustom(MI, MRI, MIRBuilder, Observer) ? Legalized 115 : UnableToLegalize; 116 default: 117 LLVM_DEBUG(dbgs() << ".. Unable to legalize\n"); 118 return UnableToLegalize; 119 } 120 } 121 122 void LegalizerHelper::extractParts(Register Reg, LLT Ty, int NumParts, 123 SmallVectorImpl<Register> &VRegs) { 124 for (int i = 0; i < NumParts; ++i) 125 VRegs.push_back(MRI.createGenericVirtualRegister(Ty)); 126 MIRBuilder.buildUnmerge(VRegs, Reg); 127 } 128 129 bool LegalizerHelper::extractParts(Register Reg, LLT RegTy, 130 LLT MainTy, LLT &LeftoverTy, 131 SmallVectorImpl<Register> &VRegs, 132 SmallVectorImpl<Register> &LeftoverRegs) { 133 assert(!LeftoverTy.isValid() && "this is an out argument"); 134 135 unsigned RegSize = RegTy.getSizeInBits(); 136 unsigned MainSize = MainTy.getSizeInBits(); 137 unsigned NumParts = RegSize / MainSize; 138 unsigned LeftoverSize = RegSize - NumParts * MainSize; 139 140 // Use an unmerge when possible. 141 if (LeftoverSize == 0) { 142 for (unsigned I = 0; I < NumParts; ++I) 143 VRegs.push_back(MRI.createGenericVirtualRegister(MainTy)); 144 MIRBuilder.buildUnmerge(VRegs, Reg); 145 return true; 146 } 147 148 if (MainTy.isVector()) { 149 unsigned EltSize = MainTy.getScalarSizeInBits(); 150 if (LeftoverSize % EltSize != 0) 151 return false; 152 LeftoverTy = LLT::scalarOrVector(LeftoverSize / EltSize, EltSize); 153 } else { 154 LeftoverTy = LLT::scalar(LeftoverSize); 155 } 156 157 // For irregular sizes, extract the individual parts. 158 for (unsigned I = 0; I != NumParts; ++I) { 159 Register NewReg = MRI.createGenericVirtualRegister(MainTy); 160 VRegs.push_back(NewReg); 161 MIRBuilder.buildExtract(NewReg, Reg, MainSize * I); 162 } 163 164 for (unsigned Offset = MainSize * NumParts; Offset < RegSize; 165 Offset += LeftoverSize) { 166 Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy); 167 LeftoverRegs.push_back(NewReg); 168 MIRBuilder.buildExtract(NewReg, Reg, Offset); 169 } 170 171 return true; 172 } 173 174 void LegalizerHelper::insertParts(Register DstReg, 175 LLT ResultTy, LLT PartTy, 176 ArrayRef<Register> PartRegs, 177 LLT LeftoverTy, 178 ArrayRef<Register> LeftoverRegs) { 179 if (!LeftoverTy.isValid()) { 180 assert(LeftoverRegs.empty()); 181 182 if (!ResultTy.isVector()) { 183 MIRBuilder.buildMerge(DstReg, PartRegs); 184 return; 185 } 186 187 if (PartTy.isVector()) 188 MIRBuilder.buildConcatVectors(DstReg, PartRegs); 189 else 190 MIRBuilder.buildBuildVector(DstReg, PartRegs); 191 return; 192 } 193 194 unsigned PartSize = PartTy.getSizeInBits(); 195 unsigned LeftoverPartSize = LeftoverTy.getSizeInBits(); 196 197 Register CurResultReg = MRI.createGenericVirtualRegister(ResultTy); 198 MIRBuilder.buildUndef(CurResultReg); 199 200 unsigned Offset = 0; 201 for (Register PartReg : PartRegs) { 202 Register NewResultReg = MRI.createGenericVirtualRegister(ResultTy); 203 MIRBuilder.buildInsert(NewResultReg, CurResultReg, PartReg, Offset); 204 CurResultReg = NewResultReg; 205 Offset += PartSize; 206 } 207 208 for (unsigned I = 0, E = LeftoverRegs.size(); I != E; ++I) { 209 // Use the original output register for the final insert to avoid a copy. 210 Register NewResultReg = (I + 1 == E) ? 211 DstReg : MRI.createGenericVirtualRegister(ResultTy); 212 213 MIRBuilder.buildInsert(NewResultReg, CurResultReg, LeftoverRegs[I], Offset); 214 CurResultReg = NewResultReg; 215 Offset += LeftoverPartSize; 216 } 217 } 218 219 static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) { 220 switch (Opcode) { 221 case TargetOpcode::G_SDIV: 222 assert((Size == 32 || Size == 64) && "Unsupported size"); 223 return Size == 64 ? RTLIB::SDIV_I64 : RTLIB::SDIV_I32; 224 case TargetOpcode::G_UDIV: 225 assert((Size == 32 || Size == 64) && "Unsupported size"); 226 return Size == 64 ? RTLIB::UDIV_I64 : RTLIB::UDIV_I32; 227 case TargetOpcode::G_SREM: 228 assert((Size == 32 || Size == 64) && "Unsupported size"); 229 return Size == 64 ? RTLIB::SREM_I64 : RTLIB::SREM_I32; 230 case TargetOpcode::G_UREM: 231 assert((Size == 32 || Size == 64) && "Unsupported size"); 232 return Size == 64 ? RTLIB::UREM_I64 : RTLIB::UREM_I32; 233 case TargetOpcode::G_CTLZ_ZERO_UNDEF: 234 assert(Size == 32 && "Unsupported size"); 235 return RTLIB::CTLZ_I32; 236 case TargetOpcode::G_FADD: 237 assert((Size == 32 || Size == 64) && "Unsupported size"); 238 return Size == 64 ? RTLIB::ADD_F64 : RTLIB::ADD_F32; 239 case TargetOpcode::G_FSUB: 240 assert((Size == 32 || Size == 64) && "Unsupported size"); 241 return Size == 64 ? RTLIB::SUB_F64 : RTLIB::SUB_F32; 242 case TargetOpcode::G_FMUL: 243 assert((Size == 32 || Size == 64) && "Unsupported size"); 244 return Size == 64 ? RTLIB::MUL_F64 : RTLIB::MUL_F32; 245 case TargetOpcode::G_FDIV: 246 assert((Size == 32 || Size == 64) && "Unsupported size"); 247 return Size == 64 ? RTLIB::DIV_F64 : RTLIB::DIV_F32; 248 case TargetOpcode::G_FEXP: 249 assert((Size == 32 || Size == 64) && "Unsupported size"); 250 return Size == 64 ? RTLIB::EXP_F64 : RTLIB::EXP_F32; 251 case TargetOpcode::G_FEXP2: 252 assert((Size == 32 || Size == 64) && "Unsupported size"); 253 return Size == 64 ? RTLIB::EXP2_F64 : RTLIB::EXP2_F32; 254 case TargetOpcode::G_FREM: 255 return Size == 64 ? RTLIB::REM_F64 : RTLIB::REM_F32; 256 case TargetOpcode::G_FPOW: 257 return Size == 64 ? RTLIB::POW_F64 : RTLIB::POW_F32; 258 case TargetOpcode::G_FMA: 259 assert((Size == 32 || Size == 64) && "Unsupported size"); 260 return Size == 64 ? RTLIB::FMA_F64 : RTLIB::FMA_F32; 261 case TargetOpcode::G_FSIN: 262 assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); 263 return Size == 128 ? RTLIB::SIN_F128 264 : Size == 64 ? RTLIB::SIN_F64 : RTLIB::SIN_F32; 265 case TargetOpcode::G_FCOS: 266 assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); 267 return Size == 128 ? RTLIB::COS_F128 268 : Size == 64 ? RTLIB::COS_F64 : RTLIB::COS_F32; 269 case TargetOpcode::G_FLOG10: 270 assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); 271 return Size == 128 ? RTLIB::LOG10_F128 272 : Size == 64 ? RTLIB::LOG10_F64 : RTLIB::LOG10_F32; 273 case TargetOpcode::G_FLOG: 274 assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); 275 return Size == 128 ? RTLIB::LOG_F128 276 : Size == 64 ? RTLIB::LOG_F64 : RTLIB::LOG_F32; 277 case TargetOpcode::G_FLOG2: 278 assert((Size == 32 || Size == 64 || Size == 128) && "Unsupported size"); 279 return Size == 128 ? RTLIB::LOG2_F128 280 : Size == 64 ? RTLIB::LOG2_F64 : RTLIB::LOG2_F32; 281 case TargetOpcode::G_FCEIL: 282 assert((Size == 32 || Size == 64) && "Unsupported size"); 283 return Size == 64 ? RTLIB::CEIL_F64 : RTLIB::CEIL_F32; 284 case TargetOpcode::G_FFLOOR: 285 assert((Size == 32 || Size == 64) && "Unsupported size"); 286 return Size == 64 ? RTLIB::FLOOR_F64 : RTLIB::FLOOR_F32; 287 } 288 llvm_unreachable("Unknown libcall function"); 289 } 290 291 LegalizerHelper::LegalizeResult 292 llvm::createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall, 293 const CallLowering::ArgInfo &Result, 294 ArrayRef<CallLowering::ArgInfo> Args) { 295 auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering(); 296 auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering(); 297 const char *Name = TLI.getLibcallName(Libcall); 298 299 MIRBuilder.getMF().getFrameInfo().setHasCalls(true); 300 if (!CLI.lowerCall(MIRBuilder, TLI.getLibcallCallingConv(Libcall), 301 MachineOperand::CreateES(Name), Result, Args)) 302 return LegalizerHelper::UnableToLegalize; 303 304 return LegalizerHelper::Legalized; 305 } 306 307 // Useful for libcalls where all operands have the same type. 308 static LegalizerHelper::LegalizeResult 309 simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size, 310 Type *OpType) { 311 auto Libcall = getRTLibDesc(MI.getOpcode(), Size); 312 313 SmallVector<CallLowering::ArgInfo, 3> Args; 314 for (unsigned i = 1; i < MI.getNumOperands(); i++) 315 Args.push_back({MI.getOperand(i).getReg(), OpType}); 316 return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), OpType}, 317 Args); 318 } 319 320 static RTLIB::Libcall getConvRTLibDesc(unsigned Opcode, Type *ToType, 321 Type *FromType) { 322 auto ToMVT = MVT::getVT(ToType); 323 auto FromMVT = MVT::getVT(FromType); 324 325 switch (Opcode) { 326 case TargetOpcode::G_FPEXT: 327 return RTLIB::getFPEXT(FromMVT, ToMVT); 328 case TargetOpcode::G_FPTRUNC: 329 return RTLIB::getFPROUND(FromMVT, ToMVT); 330 case TargetOpcode::G_FPTOSI: 331 return RTLIB::getFPTOSINT(FromMVT, ToMVT); 332 case TargetOpcode::G_FPTOUI: 333 return RTLIB::getFPTOUINT(FromMVT, ToMVT); 334 case TargetOpcode::G_SITOFP: 335 return RTLIB::getSINTTOFP(FromMVT, ToMVT); 336 case TargetOpcode::G_UITOFP: 337 return RTLIB::getUINTTOFP(FromMVT, ToMVT); 338 } 339 llvm_unreachable("Unsupported libcall function"); 340 } 341 342 static LegalizerHelper::LegalizeResult 343 conversionLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, Type *ToType, 344 Type *FromType) { 345 RTLIB::Libcall Libcall = getConvRTLibDesc(MI.getOpcode(), ToType, FromType); 346 return createLibcall(MIRBuilder, Libcall, {MI.getOperand(0).getReg(), ToType}, 347 {{MI.getOperand(1).getReg(), FromType}}); 348 } 349 350 LegalizerHelper::LegalizeResult 351 LegalizerHelper::libcall(MachineInstr &MI) { 352 LLT LLTy = MRI.getType(MI.getOperand(0).getReg()); 353 unsigned Size = LLTy.getSizeInBits(); 354 auto &Ctx = MIRBuilder.getMF().getFunction().getContext(); 355 356 MIRBuilder.setInstr(MI); 357 358 switch (MI.getOpcode()) { 359 default: 360 return UnableToLegalize; 361 case TargetOpcode::G_SDIV: 362 case TargetOpcode::G_UDIV: 363 case TargetOpcode::G_SREM: 364 case TargetOpcode::G_UREM: 365 case TargetOpcode::G_CTLZ_ZERO_UNDEF: { 366 Type *HLTy = IntegerType::get(Ctx, Size); 367 auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy); 368 if (Status != Legalized) 369 return Status; 370 break; 371 } 372 case TargetOpcode::G_FADD: 373 case TargetOpcode::G_FSUB: 374 case TargetOpcode::G_FMUL: 375 case TargetOpcode::G_FDIV: 376 case TargetOpcode::G_FMA: 377 case TargetOpcode::G_FPOW: 378 case TargetOpcode::G_FREM: 379 case TargetOpcode::G_FCOS: 380 case TargetOpcode::G_FSIN: 381 case TargetOpcode::G_FLOG10: 382 case TargetOpcode::G_FLOG: 383 case TargetOpcode::G_FLOG2: 384 case TargetOpcode::G_FEXP: 385 case TargetOpcode::G_FEXP2: 386 case TargetOpcode::G_FCEIL: 387 case TargetOpcode::G_FFLOOR: { 388 if (Size > 64) { 389 LLVM_DEBUG(dbgs() << "Size " << Size << " too large to legalize.\n"); 390 return UnableToLegalize; 391 } 392 Type *HLTy = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx); 393 auto Status = simpleLibcall(MI, MIRBuilder, Size, HLTy); 394 if (Status != Legalized) 395 return Status; 396 break; 397 } 398 case TargetOpcode::G_FPEXT: { 399 // FIXME: Support other floating point types (half, fp128 etc) 400 unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); 401 unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); 402 if (ToSize != 64 || FromSize != 32) 403 return UnableToLegalize; 404 LegalizeResult Status = conversionLibcall( 405 MI, MIRBuilder, Type::getDoubleTy(Ctx), Type::getFloatTy(Ctx)); 406 if (Status != Legalized) 407 return Status; 408 break; 409 } 410 case TargetOpcode::G_FPTRUNC: { 411 // FIXME: Support other floating point types (half, fp128 etc) 412 unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); 413 unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); 414 if (ToSize != 32 || FromSize != 64) 415 return UnableToLegalize; 416 LegalizeResult Status = conversionLibcall( 417 MI, MIRBuilder, Type::getFloatTy(Ctx), Type::getDoubleTy(Ctx)); 418 if (Status != Legalized) 419 return Status; 420 break; 421 } 422 case TargetOpcode::G_FPTOSI: 423 case TargetOpcode::G_FPTOUI: { 424 // FIXME: Support other types 425 unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); 426 unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); 427 if ((ToSize != 32 && ToSize != 64) || (FromSize != 32 && FromSize != 64)) 428 return UnableToLegalize; 429 LegalizeResult Status = conversionLibcall( 430 MI, MIRBuilder, 431 ToSize == 32 ? Type::getInt32Ty(Ctx) : Type::getInt64Ty(Ctx), 432 FromSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx)); 433 if (Status != Legalized) 434 return Status; 435 break; 436 } 437 case TargetOpcode::G_SITOFP: 438 case TargetOpcode::G_UITOFP: { 439 // FIXME: Support other types 440 unsigned FromSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); 441 unsigned ToSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); 442 if ((FromSize != 32 && FromSize != 64) || (ToSize != 32 && ToSize != 64)) 443 return UnableToLegalize; 444 LegalizeResult Status = conversionLibcall( 445 MI, MIRBuilder, 446 ToSize == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx), 447 FromSize == 32 ? Type::getInt32Ty(Ctx) : Type::getInt64Ty(Ctx)); 448 if (Status != Legalized) 449 return Status; 450 break; 451 } 452 } 453 454 MI.eraseFromParent(); 455 return Legalized; 456 } 457 458 LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI, 459 unsigned TypeIdx, 460 LLT NarrowTy) { 461 MIRBuilder.setInstr(MI); 462 463 uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); 464 uint64_t NarrowSize = NarrowTy.getSizeInBits(); 465 466 switch (MI.getOpcode()) { 467 default: 468 return UnableToLegalize; 469 case TargetOpcode::G_IMPLICIT_DEF: { 470 // FIXME: add support for when SizeOp0 isn't an exact multiple of 471 // NarrowSize. 472 if (SizeOp0 % NarrowSize != 0) 473 return UnableToLegalize; 474 int NumParts = SizeOp0 / NarrowSize; 475 476 SmallVector<Register, 2> DstRegs; 477 for (int i = 0; i < NumParts; ++i) 478 DstRegs.push_back( 479 MIRBuilder.buildUndef(NarrowTy)->getOperand(0).getReg()); 480 481 Register DstReg = MI.getOperand(0).getReg(); 482 if(MRI.getType(DstReg).isVector()) 483 MIRBuilder.buildBuildVector(DstReg, DstRegs); 484 else 485 MIRBuilder.buildMerge(DstReg, DstRegs); 486 MI.eraseFromParent(); 487 return Legalized; 488 } 489 case TargetOpcode::G_CONSTANT: { 490 LLT Ty = MRI.getType(MI.getOperand(0).getReg()); 491 const APInt &Val = MI.getOperand(1).getCImm()->getValue(); 492 unsigned TotalSize = Ty.getSizeInBits(); 493 unsigned NarrowSize = NarrowTy.getSizeInBits(); 494 int NumParts = TotalSize / NarrowSize; 495 496 SmallVector<Register, 4> PartRegs; 497 for (int I = 0; I != NumParts; ++I) { 498 unsigned Offset = I * NarrowSize; 499 auto K = MIRBuilder.buildConstant(NarrowTy, 500 Val.lshr(Offset).trunc(NarrowSize)); 501 PartRegs.push_back(K.getReg(0)); 502 } 503 504 LLT LeftoverTy; 505 unsigned LeftoverBits = TotalSize - NumParts * NarrowSize; 506 SmallVector<Register, 1> LeftoverRegs; 507 if (LeftoverBits != 0) { 508 LeftoverTy = LLT::scalar(LeftoverBits); 509 auto K = MIRBuilder.buildConstant( 510 LeftoverTy, 511 Val.lshr(NumParts * NarrowSize).trunc(LeftoverBits)); 512 LeftoverRegs.push_back(K.getReg(0)); 513 } 514 515 insertParts(MI.getOperand(0).getReg(), 516 Ty, NarrowTy, PartRegs, LeftoverTy, LeftoverRegs); 517 518 MI.eraseFromParent(); 519 return Legalized; 520 } 521 case TargetOpcode::G_ADD: { 522 // FIXME: add support for when SizeOp0 isn't an exact multiple of 523 // NarrowSize. 524 if (SizeOp0 % NarrowSize != 0) 525 return UnableToLegalize; 526 // Expand in terms of carry-setting/consuming G_ADDE instructions. 527 int NumParts = SizeOp0 / NarrowTy.getSizeInBits(); 528 529 SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs; 530 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs); 531 extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs); 532 533 Register CarryIn = MRI.createGenericVirtualRegister(LLT::scalar(1)); 534 MIRBuilder.buildConstant(CarryIn, 0); 535 536 for (int i = 0; i < NumParts; ++i) { 537 Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); 538 Register CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); 539 540 MIRBuilder.buildUAdde(DstReg, CarryOut, Src1Regs[i], 541 Src2Regs[i], CarryIn); 542 543 DstRegs.push_back(DstReg); 544 CarryIn = CarryOut; 545 } 546 Register DstReg = MI.getOperand(0).getReg(); 547 if(MRI.getType(DstReg).isVector()) 548 MIRBuilder.buildBuildVector(DstReg, DstRegs); 549 else 550 MIRBuilder.buildMerge(DstReg, DstRegs); 551 MI.eraseFromParent(); 552 return Legalized; 553 } 554 case TargetOpcode::G_SUB: { 555 // FIXME: add support for when SizeOp0 isn't an exact multiple of 556 // NarrowSize. 557 if (SizeOp0 % NarrowSize != 0) 558 return UnableToLegalize; 559 560 int NumParts = SizeOp0 / NarrowTy.getSizeInBits(); 561 562 SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs; 563 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs); 564 extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs); 565 566 Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); 567 Register BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); 568 MIRBuilder.buildInstr(TargetOpcode::G_USUBO, {DstReg, BorrowOut}, 569 {Src1Regs[0], Src2Regs[0]}); 570 DstRegs.push_back(DstReg); 571 Register BorrowIn = BorrowOut; 572 for (int i = 1; i < NumParts; ++i) { 573 DstReg = MRI.createGenericVirtualRegister(NarrowTy); 574 BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); 575 576 MIRBuilder.buildInstr(TargetOpcode::G_USUBE, {DstReg, BorrowOut}, 577 {Src1Regs[i], Src2Regs[i], BorrowIn}); 578 579 DstRegs.push_back(DstReg); 580 BorrowIn = BorrowOut; 581 } 582 MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs); 583 MI.eraseFromParent(); 584 return Legalized; 585 } 586 case TargetOpcode::G_MUL: 587 case TargetOpcode::G_UMULH: 588 return narrowScalarMul(MI, NarrowTy); 589 case TargetOpcode::G_EXTRACT: 590 return narrowScalarExtract(MI, TypeIdx, NarrowTy); 591 case TargetOpcode::G_INSERT: 592 return narrowScalarInsert(MI, TypeIdx, NarrowTy); 593 case TargetOpcode::G_LOAD: { 594 const auto &MMO = **MI.memoperands_begin(); 595 Register DstReg = MI.getOperand(0).getReg(); 596 LLT DstTy = MRI.getType(DstReg); 597 if (DstTy.isVector()) 598 return UnableToLegalize; 599 600 if (8 * MMO.getSize() != DstTy.getSizeInBits()) { 601 Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); 602 auto &MMO = **MI.memoperands_begin(); 603 MIRBuilder.buildLoad(TmpReg, MI.getOperand(1).getReg(), MMO); 604 MIRBuilder.buildAnyExt(DstReg, TmpReg); 605 MI.eraseFromParent(); 606 return Legalized; 607 } 608 609 return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy); 610 } 611 case TargetOpcode::G_ZEXTLOAD: 612 case TargetOpcode::G_SEXTLOAD: { 613 bool ZExt = MI.getOpcode() == TargetOpcode::G_ZEXTLOAD; 614 Register DstReg = MI.getOperand(0).getReg(); 615 Register PtrReg = MI.getOperand(1).getReg(); 616 617 Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); 618 auto &MMO = **MI.memoperands_begin(); 619 if (MMO.getSizeInBits() == NarrowSize) { 620 MIRBuilder.buildLoad(TmpReg, PtrReg, MMO); 621 } else { 622 unsigned ExtLoad = ZExt ? TargetOpcode::G_ZEXTLOAD 623 : TargetOpcode::G_SEXTLOAD; 624 MIRBuilder.buildInstr(ExtLoad) 625 .addDef(TmpReg) 626 .addUse(PtrReg) 627 .addMemOperand(&MMO); 628 } 629 630 if (ZExt) 631 MIRBuilder.buildZExt(DstReg, TmpReg); 632 else 633 MIRBuilder.buildSExt(DstReg, TmpReg); 634 635 MI.eraseFromParent(); 636 return Legalized; 637 } 638 case TargetOpcode::G_STORE: { 639 const auto &MMO = **MI.memoperands_begin(); 640 641 Register SrcReg = MI.getOperand(0).getReg(); 642 LLT SrcTy = MRI.getType(SrcReg); 643 if (SrcTy.isVector()) 644 return UnableToLegalize; 645 646 int NumParts = SizeOp0 / NarrowSize; 647 unsigned HandledSize = NumParts * NarrowTy.getSizeInBits(); 648 unsigned LeftoverBits = SrcTy.getSizeInBits() - HandledSize; 649 if (SrcTy.isVector() && LeftoverBits != 0) 650 return UnableToLegalize; 651 652 if (8 * MMO.getSize() != SrcTy.getSizeInBits()) { 653 Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); 654 auto &MMO = **MI.memoperands_begin(); 655 MIRBuilder.buildTrunc(TmpReg, SrcReg); 656 MIRBuilder.buildStore(TmpReg, MI.getOperand(1).getReg(), MMO); 657 MI.eraseFromParent(); 658 return Legalized; 659 } 660 661 return reduceLoadStoreWidth(MI, 0, NarrowTy); 662 } 663 case TargetOpcode::G_SELECT: 664 return narrowScalarSelect(MI, TypeIdx, NarrowTy); 665 case TargetOpcode::G_AND: 666 case TargetOpcode::G_OR: 667 case TargetOpcode::G_XOR: { 668 // Legalize bitwise operation: 669 // A = BinOp<Ty> B, C 670 // into: 671 // B1, ..., BN = G_UNMERGE_VALUES B 672 // C1, ..., CN = G_UNMERGE_VALUES C 673 // A1 = BinOp<Ty/N> B1, C2 674 // ... 675 // AN = BinOp<Ty/N> BN, CN 676 // A = G_MERGE_VALUES A1, ..., AN 677 return narrowScalarBasic(MI, TypeIdx, NarrowTy); 678 } 679 case TargetOpcode::G_SHL: 680 case TargetOpcode::G_LSHR: 681 case TargetOpcode::G_ASHR: 682 return narrowScalarShift(MI, TypeIdx, NarrowTy); 683 case TargetOpcode::G_CTLZ: 684 case TargetOpcode::G_CTLZ_ZERO_UNDEF: 685 case TargetOpcode::G_CTTZ: 686 case TargetOpcode::G_CTTZ_ZERO_UNDEF: 687 case TargetOpcode::G_CTPOP: 688 if (TypeIdx != 0) 689 return UnableToLegalize; // TODO 690 691 Observer.changingInstr(MI); 692 narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT); 693 Observer.changedInstr(MI); 694 return Legalized; 695 case TargetOpcode::G_INTTOPTR: 696 if (TypeIdx != 1) 697 return UnableToLegalize; 698 699 Observer.changingInstr(MI); 700 narrowScalarSrc(MI, NarrowTy, 1); 701 Observer.changedInstr(MI); 702 return Legalized; 703 case TargetOpcode::G_PTRTOINT: 704 if (TypeIdx != 0) 705 return UnableToLegalize; 706 707 Observer.changingInstr(MI); 708 narrowScalarDst(MI, NarrowTy, 0, TargetOpcode::G_ZEXT); 709 Observer.changedInstr(MI); 710 return Legalized; 711 case TargetOpcode::G_PHI: { 712 unsigned NumParts = SizeOp0 / NarrowSize; 713 SmallVector<Register, 2> DstRegs; 714 SmallVector<SmallVector<Register, 2>, 2> SrcRegs; 715 DstRegs.resize(NumParts); 716 SrcRegs.resize(MI.getNumOperands() / 2); 717 Observer.changingInstr(MI); 718 for (unsigned i = 1; i < MI.getNumOperands(); i += 2) { 719 MachineBasicBlock &OpMBB = *MI.getOperand(i + 1).getMBB(); 720 MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); 721 extractParts(MI.getOperand(i).getReg(), NarrowTy, NumParts, 722 SrcRegs[i / 2]); 723 } 724 MachineBasicBlock &MBB = *MI.getParent(); 725 MIRBuilder.setInsertPt(MBB, MI); 726 for (unsigned i = 0; i < NumParts; ++i) { 727 DstRegs[i] = MRI.createGenericVirtualRegister(NarrowTy); 728 MachineInstrBuilder MIB = 729 MIRBuilder.buildInstr(TargetOpcode::G_PHI).addDef(DstRegs[i]); 730 for (unsigned j = 1; j < MI.getNumOperands(); j += 2) 731 MIB.addUse(SrcRegs[j / 2][i]).add(MI.getOperand(j + 1)); 732 } 733 MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI()); 734 MIRBuilder.buildMerge(MI.getOperand(0).getReg(), DstRegs); 735 Observer.changedInstr(MI); 736 MI.eraseFromParent(); 737 return Legalized; 738 } 739 case TargetOpcode::G_EXTRACT_VECTOR_ELT: 740 case TargetOpcode::G_INSERT_VECTOR_ELT: { 741 if (TypeIdx != 2) 742 return UnableToLegalize; 743 744 int OpIdx = MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT ? 2 : 3; 745 Observer.changingInstr(MI); 746 narrowScalarSrc(MI, NarrowTy, OpIdx); 747 Observer.changedInstr(MI); 748 return Legalized; 749 } 750 case TargetOpcode::G_ICMP: { 751 uint64_t SrcSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); 752 if (NarrowSize * 2 != SrcSize) 753 return UnableToLegalize; 754 755 Observer.changingInstr(MI); 756 Register LHSL = MRI.createGenericVirtualRegister(NarrowTy); 757 Register LHSH = MRI.createGenericVirtualRegister(NarrowTy); 758 MIRBuilder.buildUnmerge({LHSL, LHSH}, MI.getOperand(2).getReg()); 759 760 Register RHSL = MRI.createGenericVirtualRegister(NarrowTy); 761 Register RHSH = MRI.createGenericVirtualRegister(NarrowTy); 762 MIRBuilder.buildUnmerge({RHSL, RHSH}, MI.getOperand(3).getReg()); 763 764 CmpInst::Predicate Pred = 765 static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); 766 767 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) { 768 MachineInstrBuilder XorL = MIRBuilder.buildXor(NarrowTy, LHSL, RHSL); 769 MachineInstrBuilder XorH = MIRBuilder.buildXor(NarrowTy, LHSH, RHSH); 770 MachineInstrBuilder Or = MIRBuilder.buildOr(NarrowTy, XorL, XorH); 771 MachineInstrBuilder Zero = MIRBuilder.buildConstant(NarrowTy, 0); 772 MIRBuilder.buildICmp(Pred, MI.getOperand(0).getReg(), Or, Zero); 773 } else { 774 const LLT s1 = LLT::scalar(1); 775 MachineInstrBuilder CmpH = MIRBuilder.buildICmp(Pred, s1, LHSH, RHSH); 776 MachineInstrBuilder CmpHEQ = 777 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, s1, LHSH, RHSH); 778 MachineInstrBuilder CmpLU = MIRBuilder.buildICmp( 779 ICmpInst::getUnsignedPredicate(Pred), s1, LHSL, RHSL); 780 MIRBuilder.buildSelect(MI.getOperand(0).getReg(), CmpHEQ, CmpLU, CmpH); 781 } 782 Observer.changedInstr(MI); 783 MI.eraseFromParent(); 784 return Legalized; 785 } 786 } 787 } 788 789 void LegalizerHelper::widenScalarSrc(MachineInstr &MI, LLT WideTy, 790 unsigned OpIdx, unsigned ExtOpcode) { 791 MachineOperand &MO = MI.getOperand(OpIdx); 792 auto ExtB = MIRBuilder.buildInstr(ExtOpcode, {WideTy}, {MO.getReg()}); 793 MO.setReg(ExtB->getOperand(0).getReg()); 794 } 795 796 void LegalizerHelper::narrowScalarSrc(MachineInstr &MI, LLT NarrowTy, 797 unsigned OpIdx) { 798 MachineOperand &MO = MI.getOperand(OpIdx); 799 auto ExtB = MIRBuilder.buildInstr(TargetOpcode::G_TRUNC, {NarrowTy}, 800 {MO.getReg()}); 801 MO.setReg(ExtB->getOperand(0).getReg()); 802 } 803 804 void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy, 805 unsigned OpIdx, unsigned TruncOpcode) { 806 MachineOperand &MO = MI.getOperand(OpIdx); 807 Register DstExt = MRI.createGenericVirtualRegister(WideTy); 808 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); 809 MIRBuilder.buildInstr(TruncOpcode, {MO.getReg()}, {DstExt}); 810 MO.setReg(DstExt); 811 } 812 813 void LegalizerHelper::narrowScalarDst(MachineInstr &MI, LLT NarrowTy, 814 unsigned OpIdx, unsigned ExtOpcode) { 815 MachineOperand &MO = MI.getOperand(OpIdx); 816 Register DstTrunc = MRI.createGenericVirtualRegister(NarrowTy); 817 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); 818 MIRBuilder.buildInstr(ExtOpcode, {MO.getReg()}, {DstTrunc}); 819 MO.setReg(DstTrunc); 820 } 821 822 void LegalizerHelper::moreElementsVectorDst(MachineInstr &MI, LLT WideTy, 823 unsigned OpIdx) { 824 MachineOperand &MO = MI.getOperand(OpIdx); 825 Register DstExt = MRI.createGenericVirtualRegister(WideTy); 826 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); 827 MIRBuilder.buildExtract(MO.getReg(), DstExt, 0); 828 MO.setReg(DstExt); 829 } 830 831 void LegalizerHelper::moreElementsVectorSrc(MachineInstr &MI, LLT MoreTy, 832 unsigned OpIdx) { 833 MachineOperand &MO = MI.getOperand(OpIdx); 834 835 LLT OldTy = MRI.getType(MO.getReg()); 836 unsigned OldElts = OldTy.getNumElements(); 837 unsigned NewElts = MoreTy.getNumElements(); 838 839 unsigned NumParts = NewElts / OldElts; 840 841 // Use concat_vectors if the result is a multiple of the number of elements. 842 if (NumParts * OldElts == NewElts) { 843 SmallVector<Register, 8> Parts; 844 Parts.push_back(MO.getReg()); 845 846 Register ImpDef = MIRBuilder.buildUndef(OldTy).getReg(0); 847 for (unsigned I = 1; I != NumParts; ++I) 848 Parts.push_back(ImpDef); 849 850 auto Concat = MIRBuilder.buildConcatVectors(MoreTy, Parts); 851 MO.setReg(Concat.getReg(0)); 852 return; 853 } 854 855 Register MoreReg = MRI.createGenericVirtualRegister(MoreTy); 856 Register ImpDef = MIRBuilder.buildUndef(MoreTy).getReg(0); 857 MIRBuilder.buildInsert(MoreReg, ImpDef, MO.getReg(), 0); 858 MO.setReg(MoreReg); 859 } 860 861 LegalizerHelper::LegalizeResult 862 LegalizerHelper::widenScalarMergeValues(MachineInstr &MI, unsigned TypeIdx, 863 LLT WideTy) { 864 if (TypeIdx != 1) 865 return UnableToLegalize; 866 867 Register DstReg = MI.getOperand(0).getReg(); 868 LLT DstTy = MRI.getType(DstReg); 869 if (DstTy.isVector()) 870 return UnableToLegalize; 871 872 Register Src1 = MI.getOperand(1).getReg(); 873 LLT SrcTy = MRI.getType(Src1); 874 const int DstSize = DstTy.getSizeInBits(); 875 const int SrcSize = SrcTy.getSizeInBits(); 876 const int WideSize = WideTy.getSizeInBits(); 877 const int NumMerge = (DstSize + WideSize - 1) / WideSize; 878 879 unsigned NumOps = MI.getNumOperands(); 880 unsigned NumSrc = MI.getNumOperands() - 1; 881 unsigned PartSize = DstTy.getSizeInBits() / NumSrc; 882 883 if (WideSize >= DstSize) { 884 // Directly pack the bits in the target type. 885 Register ResultReg = MIRBuilder.buildZExt(WideTy, Src1).getReg(0); 886 887 for (unsigned I = 2; I != NumOps; ++I) { 888 const unsigned Offset = (I - 1) * PartSize; 889 890 Register SrcReg = MI.getOperand(I).getReg(); 891 assert(MRI.getType(SrcReg) == LLT::scalar(PartSize)); 892 893 auto ZextInput = MIRBuilder.buildZExt(WideTy, SrcReg); 894 895 Register NextResult = I + 1 == NumOps && WideSize == DstSize ? DstReg : 896 MRI.createGenericVirtualRegister(WideTy); 897 898 auto ShiftAmt = MIRBuilder.buildConstant(WideTy, Offset); 899 auto Shl = MIRBuilder.buildShl(WideTy, ZextInput, ShiftAmt); 900 MIRBuilder.buildOr(NextResult, ResultReg, Shl); 901 ResultReg = NextResult; 902 } 903 904 if (WideSize > DstSize) 905 MIRBuilder.buildTrunc(DstReg, ResultReg); 906 907 MI.eraseFromParent(); 908 return Legalized; 909 } 910 911 // Unmerge the original values to the GCD type, and recombine to the next 912 // multiple greater than the original type. 913 // 914 // %3:_(s12) = G_MERGE_VALUES %0:_(s4), %1:_(s4), %2:_(s4) -> s6 915 // %4:_(s2), %5:_(s2) = G_UNMERGE_VALUES %0 916 // %6:_(s2), %7:_(s2) = G_UNMERGE_VALUES %1 917 // %8:_(s2), %9:_(s2) = G_UNMERGE_VALUES %2 918 // %10:_(s6) = G_MERGE_VALUES %4, %5, %6 919 // %11:_(s6) = G_MERGE_VALUES %7, %8, %9 920 // %12:_(s12) = G_MERGE_VALUES %10, %11 921 // 922 // Padding with undef if necessary: 923 // 924 // %2:_(s8) = G_MERGE_VALUES %0:_(s4), %1:_(s4) -> s6 925 // %3:_(s2), %4:_(s2) = G_UNMERGE_VALUES %0 926 // %5:_(s2), %6:_(s2) = G_UNMERGE_VALUES %1 927 // %7:_(s2) = G_IMPLICIT_DEF 928 // %8:_(s6) = G_MERGE_VALUES %3, %4, %5 929 // %9:_(s6) = G_MERGE_VALUES %6, %7, %7 930 // %10:_(s12) = G_MERGE_VALUES %8, %9 931 932 const int GCD = greatestCommonDivisor(SrcSize, WideSize); 933 LLT GCDTy = LLT::scalar(GCD); 934 935 SmallVector<Register, 8> Parts; 936 SmallVector<Register, 8> NewMergeRegs; 937 SmallVector<Register, 8> Unmerges; 938 LLT WideDstTy = LLT::scalar(NumMerge * WideSize); 939 940 // Decompose the original operands if they don't evenly divide. 941 for (int I = 1, E = MI.getNumOperands(); I != E; ++I) { 942 Register SrcReg = MI.getOperand(I).getReg(); 943 if (GCD == SrcSize) { 944 Unmerges.push_back(SrcReg); 945 } else { 946 auto Unmerge = MIRBuilder.buildUnmerge(GCDTy, SrcReg); 947 for (int J = 0, JE = Unmerge->getNumOperands() - 1; J != JE; ++J) 948 Unmerges.push_back(Unmerge.getReg(J)); 949 } 950 } 951 952 // Pad with undef to the next size that is a multiple of the requested size. 953 if (static_cast<int>(Unmerges.size()) != NumMerge * WideSize) { 954 Register UndefReg = MIRBuilder.buildUndef(GCDTy).getReg(0); 955 for (int I = Unmerges.size(); I != NumMerge * WideSize; ++I) 956 Unmerges.push_back(UndefReg); 957 } 958 959 const int PartsPerGCD = WideSize / GCD; 960 961 // Build merges of each piece. 962 ArrayRef<Register> Slicer(Unmerges); 963 for (int I = 0; I != NumMerge; ++I, Slicer = Slicer.drop_front(PartsPerGCD)) { 964 auto Merge = MIRBuilder.buildMerge(WideTy, Slicer.take_front(PartsPerGCD)); 965 NewMergeRegs.push_back(Merge.getReg(0)); 966 } 967 968 // A truncate may be necessary if the requested type doesn't evenly divide the 969 // original result type. 970 if (DstTy.getSizeInBits() == WideDstTy.getSizeInBits()) { 971 MIRBuilder.buildMerge(DstReg, NewMergeRegs); 972 } else { 973 auto FinalMerge = MIRBuilder.buildMerge(WideDstTy, NewMergeRegs); 974 MIRBuilder.buildTrunc(DstReg, FinalMerge.getReg(0)); 975 } 976 977 MI.eraseFromParent(); 978 return Legalized; 979 } 980 981 LegalizerHelper::LegalizeResult 982 LegalizerHelper::widenScalarUnmergeValues(MachineInstr &MI, unsigned TypeIdx, 983 LLT WideTy) { 984 if (TypeIdx != 0) 985 return UnableToLegalize; 986 987 unsigned NumDst = MI.getNumOperands() - 1; 988 Register SrcReg = MI.getOperand(NumDst).getReg(); 989 LLT SrcTy = MRI.getType(SrcReg); 990 if (!SrcTy.isScalar()) 991 return UnableToLegalize; 992 993 Register Dst0Reg = MI.getOperand(0).getReg(); 994 LLT DstTy = MRI.getType(Dst0Reg); 995 if (!DstTy.isScalar()) 996 return UnableToLegalize; 997 998 unsigned NewSrcSize = NumDst * WideTy.getSizeInBits(); 999 LLT NewSrcTy = LLT::scalar(NewSrcSize); 1000 unsigned SizeDiff = WideTy.getSizeInBits() - DstTy.getSizeInBits(); 1001 1002 auto WideSrc = MIRBuilder.buildZExt(NewSrcTy, SrcReg); 1003 1004 for (unsigned I = 1; I != NumDst; ++I) { 1005 auto ShiftAmt = MIRBuilder.buildConstant(NewSrcTy, SizeDiff * I); 1006 auto Shl = MIRBuilder.buildShl(NewSrcTy, WideSrc, ShiftAmt); 1007 WideSrc = MIRBuilder.buildOr(NewSrcTy, WideSrc, Shl); 1008 } 1009 1010 Observer.changingInstr(MI); 1011 1012 MI.getOperand(NumDst).setReg(WideSrc->getOperand(0).getReg()); 1013 for (unsigned I = 0; I != NumDst; ++I) 1014 widenScalarDst(MI, WideTy, I); 1015 1016 Observer.changedInstr(MI); 1017 1018 return Legalized; 1019 } 1020 1021 LegalizerHelper::LegalizeResult 1022 LegalizerHelper::widenScalarExtract(MachineInstr &MI, unsigned TypeIdx, 1023 LLT WideTy) { 1024 Register DstReg = MI.getOperand(0).getReg(); 1025 Register SrcReg = MI.getOperand(1).getReg(); 1026 LLT SrcTy = MRI.getType(SrcReg); 1027 1028 LLT DstTy = MRI.getType(DstReg); 1029 unsigned Offset = MI.getOperand(2).getImm(); 1030 1031 if (TypeIdx == 0) { 1032 if (SrcTy.isVector() || DstTy.isVector()) 1033 return UnableToLegalize; 1034 1035 SrcOp Src(SrcReg); 1036 if (SrcTy.isPointer()) { 1037 // Extracts from pointers can be handled only if they are really just 1038 // simple integers. 1039 const DataLayout &DL = MIRBuilder.getDataLayout(); 1040 if (DL.isNonIntegralAddressSpace(SrcTy.getAddressSpace())) 1041 return UnableToLegalize; 1042 1043 LLT SrcAsIntTy = LLT::scalar(SrcTy.getSizeInBits()); 1044 Src = MIRBuilder.buildPtrToInt(SrcAsIntTy, Src); 1045 SrcTy = SrcAsIntTy; 1046 } 1047 1048 if (DstTy.isPointer()) 1049 return UnableToLegalize; 1050 1051 if (Offset == 0) { 1052 // Avoid a shift in the degenerate case. 1053 MIRBuilder.buildTrunc(DstReg, 1054 MIRBuilder.buildAnyExtOrTrunc(WideTy, Src)); 1055 MI.eraseFromParent(); 1056 return Legalized; 1057 } 1058 1059 // Do a shift in the source type. 1060 LLT ShiftTy = SrcTy; 1061 if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) { 1062 Src = MIRBuilder.buildAnyExt(WideTy, Src); 1063 ShiftTy = WideTy; 1064 } else if (WideTy.getSizeInBits() > SrcTy.getSizeInBits()) 1065 return UnableToLegalize; 1066 1067 auto LShr = MIRBuilder.buildLShr( 1068 ShiftTy, Src, MIRBuilder.buildConstant(ShiftTy, Offset)); 1069 MIRBuilder.buildTrunc(DstReg, LShr); 1070 MI.eraseFromParent(); 1071 return Legalized; 1072 } 1073 1074 if (SrcTy.isScalar()) { 1075 Observer.changingInstr(MI); 1076 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); 1077 Observer.changedInstr(MI); 1078 return Legalized; 1079 } 1080 1081 if (!SrcTy.isVector()) 1082 return UnableToLegalize; 1083 1084 if (DstTy != SrcTy.getElementType()) 1085 return UnableToLegalize; 1086 1087 if (Offset % SrcTy.getScalarSizeInBits() != 0) 1088 return UnableToLegalize; 1089 1090 Observer.changingInstr(MI); 1091 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); 1092 1093 MI.getOperand(2).setImm((WideTy.getSizeInBits() / SrcTy.getSizeInBits()) * 1094 Offset); 1095 widenScalarDst(MI, WideTy.getScalarType(), 0); 1096 Observer.changedInstr(MI); 1097 return Legalized; 1098 } 1099 1100 LegalizerHelper::LegalizeResult 1101 LegalizerHelper::widenScalarInsert(MachineInstr &MI, unsigned TypeIdx, 1102 LLT WideTy) { 1103 if (TypeIdx != 0) 1104 return UnableToLegalize; 1105 Observer.changingInstr(MI); 1106 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); 1107 widenScalarDst(MI, WideTy); 1108 Observer.changedInstr(MI); 1109 return Legalized; 1110 } 1111 1112 LegalizerHelper::LegalizeResult 1113 LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) { 1114 MIRBuilder.setInstr(MI); 1115 1116 switch (MI.getOpcode()) { 1117 default: 1118 return UnableToLegalize; 1119 case TargetOpcode::G_EXTRACT: 1120 return widenScalarExtract(MI, TypeIdx, WideTy); 1121 case TargetOpcode::G_INSERT: 1122 return widenScalarInsert(MI, TypeIdx, WideTy); 1123 case TargetOpcode::G_MERGE_VALUES: 1124 return widenScalarMergeValues(MI, TypeIdx, WideTy); 1125 case TargetOpcode::G_UNMERGE_VALUES: 1126 return widenScalarUnmergeValues(MI, TypeIdx, WideTy); 1127 case TargetOpcode::G_UADDO: 1128 case TargetOpcode::G_USUBO: { 1129 if (TypeIdx == 1) 1130 return UnableToLegalize; // TODO 1131 auto LHSZext = MIRBuilder.buildInstr(TargetOpcode::G_ZEXT, {WideTy}, 1132 {MI.getOperand(2).getReg()}); 1133 auto RHSZext = MIRBuilder.buildInstr(TargetOpcode::G_ZEXT, {WideTy}, 1134 {MI.getOperand(3).getReg()}); 1135 unsigned Opcode = MI.getOpcode() == TargetOpcode::G_UADDO 1136 ? TargetOpcode::G_ADD 1137 : TargetOpcode::G_SUB; 1138 // Do the arithmetic in the larger type. 1139 auto NewOp = MIRBuilder.buildInstr(Opcode, {WideTy}, {LHSZext, RHSZext}); 1140 LLT OrigTy = MRI.getType(MI.getOperand(0).getReg()); 1141 APInt Mask = APInt::getAllOnesValue(OrigTy.getSizeInBits()); 1142 auto AndOp = MIRBuilder.buildInstr( 1143 TargetOpcode::G_AND, {WideTy}, 1144 {NewOp, MIRBuilder.buildConstant(WideTy, Mask.getZExtValue())}); 1145 // There is no overflow if the AndOp is the same as NewOp. 1146 MIRBuilder.buildICmp(CmpInst::ICMP_NE, MI.getOperand(1).getReg(), NewOp, 1147 AndOp); 1148 // Now trunc the NewOp to the original result. 1149 MIRBuilder.buildTrunc(MI.getOperand(0).getReg(), NewOp); 1150 MI.eraseFromParent(); 1151 return Legalized; 1152 } 1153 case TargetOpcode::G_CTTZ: 1154 case TargetOpcode::G_CTTZ_ZERO_UNDEF: 1155 case TargetOpcode::G_CTLZ: 1156 case TargetOpcode::G_CTLZ_ZERO_UNDEF: 1157 case TargetOpcode::G_CTPOP: { 1158 if (TypeIdx == 0) { 1159 Observer.changingInstr(MI); 1160 widenScalarDst(MI, WideTy, 0); 1161 Observer.changedInstr(MI); 1162 return Legalized; 1163 } 1164 1165 Register SrcReg = MI.getOperand(1).getReg(); 1166 1167 // First ZEXT the input. 1168 auto MIBSrc = MIRBuilder.buildZExt(WideTy, SrcReg); 1169 LLT CurTy = MRI.getType(SrcReg); 1170 if (MI.getOpcode() == TargetOpcode::G_CTTZ) { 1171 // The count is the same in the larger type except if the original 1172 // value was zero. This can be handled by setting the bit just off 1173 // the top of the original type. 1174 auto TopBit = 1175 APInt::getOneBitSet(WideTy.getSizeInBits(), CurTy.getSizeInBits()); 1176 MIBSrc = MIRBuilder.buildOr( 1177 WideTy, MIBSrc, MIRBuilder.buildConstant(WideTy, TopBit)); 1178 } 1179 1180 // Perform the operation at the larger size. 1181 auto MIBNewOp = MIRBuilder.buildInstr(MI.getOpcode(), {WideTy}, {MIBSrc}); 1182 // This is already the correct result for CTPOP and CTTZs 1183 if (MI.getOpcode() == TargetOpcode::G_CTLZ || 1184 MI.getOpcode() == TargetOpcode::G_CTLZ_ZERO_UNDEF) { 1185 // The correct result is NewOp - (Difference in widety and current ty). 1186 unsigned SizeDiff = WideTy.getSizeInBits() - CurTy.getSizeInBits(); 1187 MIBNewOp = MIRBuilder.buildInstr( 1188 TargetOpcode::G_SUB, {WideTy}, 1189 {MIBNewOp, MIRBuilder.buildConstant(WideTy, SizeDiff)}); 1190 } 1191 1192 MIRBuilder.buildZExtOrTrunc(MI.getOperand(0), MIBNewOp); 1193 MI.eraseFromParent(); 1194 return Legalized; 1195 } 1196 case TargetOpcode::G_BSWAP: { 1197 Observer.changingInstr(MI); 1198 Register DstReg = MI.getOperand(0).getReg(); 1199 1200 Register ShrReg = MRI.createGenericVirtualRegister(WideTy); 1201 Register DstExt = MRI.createGenericVirtualRegister(WideTy); 1202 Register ShiftAmtReg = MRI.createGenericVirtualRegister(WideTy); 1203 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); 1204 1205 MI.getOperand(0).setReg(DstExt); 1206 1207 MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); 1208 1209 LLT Ty = MRI.getType(DstReg); 1210 unsigned DiffBits = WideTy.getScalarSizeInBits() - Ty.getScalarSizeInBits(); 1211 MIRBuilder.buildConstant(ShiftAmtReg, DiffBits); 1212 MIRBuilder.buildInstr(TargetOpcode::G_LSHR) 1213 .addDef(ShrReg) 1214 .addUse(DstExt) 1215 .addUse(ShiftAmtReg); 1216 1217 MIRBuilder.buildTrunc(DstReg, ShrReg); 1218 Observer.changedInstr(MI); 1219 return Legalized; 1220 } 1221 case TargetOpcode::G_ADD: 1222 case TargetOpcode::G_AND: 1223 case TargetOpcode::G_MUL: 1224 case TargetOpcode::G_OR: 1225 case TargetOpcode::G_XOR: 1226 case TargetOpcode::G_SUB: 1227 // Perform operation at larger width (any extension is fines here, high bits 1228 // don't affect the result) and then truncate the result back to the 1229 // original type. 1230 Observer.changingInstr(MI); 1231 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); 1232 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT); 1233 widenScalarDst(MI, WideTy); 1234 Observer.changedInstr(MI); 1235 return Legalized; 1236 1237 case TargetOpcode::G_SHL: 1238 Observer.changingInstr(MI); 1239 1240 if (TypeIdx == 0) { 1241 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); 1242 widenScalarDst(MI, WideTy); 1243 } else { 1244 assert(TypeIdx == 1); 1245 // The "number of bits to shift" operand must preserve its value as an 1246 // unsigned integer: 1247 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); 1248 } 1249 1250 Observer.changedInstr(MI); 1251 return Legalized; 1252 1253 case TargetOpcode::G_SDIV: 1254 case TargetOpcode::G_SREM: 1255 case TargetOpcode::G_SMIN: 1256 case TargetOpcode::G_SMAX: 1257 Observer.changingInstr(MI); 1258 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT); 1259 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT); 1260 widenScalarDst(MI, WideTy); 1261 Observer.changedInstr(MI); 1262 return Legalized; 1263 1264 case TargetOpcode::G_ASHR: 1265 case TargetOpcode::G_LSHR: 1266 Observer.changingInstr(MI); 1267 1268 if (TypeIdx == 0) { 1269 unsigned CvtOp = MI.getOpcode() == TargetOpcode::G_ASHR ? 1270 TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT; 1271 1272 widenScalarSrc(MI, WideTy, 1, CvtOp); 1273 widenScalarDst(MI, WideTy); 1274 } else { 1275 assert(TypeIdx == 1); 1276 // The "number of bits to shift" operand must preserve its value as an 1277 // unsigned integer: 1278 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); 1279 } 1280 1281 Observer.changedInstr(MI); 1282 return Legalized; 1283 case TargetOpcode::G_UDIV: 1284 case TargetOpcode::G_UREM: 1285 case TargetOpcode::G_UMIN: 1286 case TargetOpcode::G_UMAX: 1287 Observer.changingInstr(MI); 1288 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT); 1289 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT); 1290 widenScalarDst(MI, WideTy); 1291 Observer.changedInstr(MI); 1292 return Legalized; 1293 1294 case TargetOpcode::G_SELECT: 1295 Observer.changingInstr(MI); 1296 if (TypeIdx == 0) { 1297 // Perform operation at larger width (any extension is fine here, high 1298 // bits don't affect the result) and then truncate the result back to the 1299 // original type. 1300 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ANYEXT); 1301 widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT); 1302 widenScalarDst(MI, WideTy); 1303 } else { 1304 bool IsVec = MRI.getType(MI.getOperand(1).getReg()).isVector(); 1305 // Explicit extension is required here since high bits affect the result. 1306 widenScalarSrc(MI, WideTy, 1, MIRBuilder.getBoolExtOp(IsVec, false)); 1307 } 1308 Observer.changedInstr(MI); 1309 return Legalized; 1310 1311 case TargetOpcode::G_FPTOSI: 1312 case TargetOpcode::G_FPTOUI: 1313 if (TypeIdx != 0) 1314 return UnableToLegalize; 1315 Observer.changingInstr(MI); 1316 widenScalarDst(MI, WideTy); 1317 Observer.changedInstr(MI); 1318 return Legalized; 1319 1320 case TargetOpcode::G_SITOFP: 1321 if (TypeIdx != 1) 1322 return UnableToLegalize; 1323 Observer.changingInstr(MI); 1324 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT); 1325 Observer.changedInstr(MI); 1326 return Legalized; 1327 1328 case TargetOpcode::G_UITOFP: 1329 if (TypeIdx != 1) 1330 return UnableToLegalize; 1331 Observer.changingInstr(MI); 1332 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT); 1333 Observer.changedInstr(MI); 1334 return Legalized; 1335 1336 case TargetOpcode::G_LOAD: 1337 case TargetOpcode::G_SEXTLOAD: 1338 case TargetOpcode::G_ZEXTLOAD: 1339 Observer.changingInstr(MI); 1340 widenScalarDst(MI, WideTy); 1341 Observer.changedInstr(MI); 1342 return Legalized; 1343 1344 case TargetOpcode::G_STORE: { 1345 if (TypeIdx != 0) 1346 return UnableToLegalize; 1347 1348 LLT Ty = MRI.getType(MI.getOperand(0).getReg()); 1349 if (!isPowerOf2_32(Ty.getSizeInBits())) 1350 return UnableToLegalize; 1351 1352 Observer.changingInstr(MI); 1353 1354 unsigned ExtType = Ty.getScalarSizeInBits() == 1 ? 1355 TargetOpcode::G_ZEXT : TargetOpcode::G_ANYEXT; 1356 widenScalarSrc(MI, WideTy, 0, ExtType); 1357 1358 Observer.changedInstr(MI); 1359 return Legalized; 1360 } 1361 case TargetOpcode::G_CONSTANT: { 1362 MachineOperand &SrcMO = MI.getOperand(1); 1363 LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); 1364 const APInt &Val = SrcMO.getCImm()->getValue().sext(WideTy.getSizeInBits()); 1365 Observer.changingInstr(MI); 1366 SrcMO.setCImm(ConstantInt::get(Ctx, Val)); 1367 1368 widenScalarDst(MI, WideTy); 1369 Observer.changedInstr(MI); 1370 return Legalized; 1371 } 1372 case TargetOpcode::G_FCONSTANT: { 1373 MachineOperand &SrcMO = MI.getOperand(1); 1374 LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); 1375 APFloat Val = SrcMO.getFPImm()->getValueAPF(); 1376 bool LosesInfo; 1377 switch (WideTy.getSizeInBits()) { 1378 case 32: 1379 Val.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, 1380 &LosesInfo); 1381 break; 1382 case 64: 1383 Val.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, 1384 &LosesInfo); 1385 break; 1386 default: 1387 return UnableToLegalize; 1388 } 1389 1390 assert(!LosesInfo && "extend should always be lossless"); 1391 1392 Observer.changingInstr(MI); 1393 SrcMO.setFPImm(ConstantFP::get(Ctx, Val)); 1394 1395 widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC); 1396 Observer.changedInstr(MI); 1397 return Legalized; 1398 } 1399 case TargetOpcode::G_IMPLICIT_DEF: { 1400 Observer.changingInstr(MI); 1401 widenScalarDst(MI, WideTy); 1402 Observer.changedInstr(MI); 1403 return Legalized; 1404 } 1405 case TargetOpcode::G_BRCOND: 1406 Observer.changingInstr(MI); 1407 widenScalarSrc(MI, WideTy, 0, MIRBuilder.getBoolExtOp(false, false)); 1408 Observer.changedInstr(MI); 1409 return Legalized; 1410 1411 case TargetOpcode::G_FCMP: 1412 Observer.changingInstr(MI); 1413 if (TypeIdx == 0) 1414 widenScalarDst(MI, WideTy); 1415 else { 1416 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_FPEXT); 1417 widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_FPEXT); 1418 } 1419 Observer.changedInstr(MI); 1420 return Legalized; 1421 1422 case TargetOpcode::G_ICMP: 1423 Observer.changingInstr(MI); 1424 if (TypeIdx == 0) 1425 widenScalarDst(MI, WideTy); 1426 else { 1427 unsigned ExtOpcode = CmpInst::isSigned(static_cast<CmpInst::Predicate>( 1428 MI.getOperand(1).getPredicate())) 1429 ? TargetOpcode::G_SEXT 1430 : TargetOpcode::G_ZEXT; 1431 widenScalarSrc(MI, WideTy, 2, ExtOpcode); 1432 widenScalarSrc(MI, WideTy, 3, ExtOpcode); 1433 } 1434 Observer.changedInstr(MI); 1435 return Legalized; 1436 1437 case TargetOpcode::G_GEP: 1438 assert(TypeIdx == 1 && "unable to legalize pointer of GEP"); 1439 Observer.changingInstr(MI); 1440 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT); 1441 Observer.changedInstr(MI); 1442 return Legalized; 1443 1444 case TargetOpcode::G_PHI: { 1445 assert(TypeIdx == 0 && "Expecting only Idx 0"); 1446 1447 Observer.changingInstr(MI); 1448 for (unsigned I = 1; I < MI.getNumOperands(); I += 2) { 1449 MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB(); 1450 MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); 1451 widenScalarSrc(MI, WideTy, I, TargetOpcode::G_ANYEXT); 1452 } 1453 1454 MachineBasicBlock &MBB = *MI.getParent(); 1455 MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI()); 1456 widenScalarDst(MI, WideTy); 1457 Observer.changedInstr(MI); 1458 return Legalized; 1459 } 1460 case TargetOpcode::G_EXTRACT_VECTOR_ELT: { 1461 if (TypeIdx == 0) { 1462 Register VecReg = MI.getOperand(1).getReg(); 1463 LLT VecTy = MRI.getType(VecReg); 1464 Observer.changingInstr(MI); 1465 1466 widenScalarSrc(MI, LLT::vector(VecTy.getNumElements(), 1467 WideTy.getSizeInBits()), 1468 1, TargetOpcode::G_SEXT); 1469 1470 widenScalarDst(MI, WideTy, 0); 1471 Observer.changedInstr(MI); 1472 return Legalized; 1473 } 1474 1475 if (TypeIdx != 2) 1476 return UnableToLegalize; 1477 Observer.changingInstr(MI); 1478 widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_SEXT); 1479 Observer.changedInstr(MI); 1480 return Legalized; 1481 } 1482 case TargetOpcode::G_FADD: 1483 case TargetOpcode::G_FMUL: 1484 case TargetOpcode::G_FSUB: 1485 case TargetOpcode::G_FMA: 1486 case TargetOpcode::G_FNEG: 1487 case TargetOpcode::G_FABS: 1488 case TargetOpcode::G_FCANONICALIZE: 1489 case TargetOpcode::G_FMINNUM: 1490 case TargetOpcode::G_FMAXNUM: 1491 case TargetOpcode::G_FMINNUM_IEEE: 1492 case TargetOpcode::G_FMAXNUM_IEEE: 1493 case TargetOpcode::G_FMINIMUM: 1494 case TargetOpcode::G_FMAXIMUM: 1495 case TargetOpcode::G_FDIV: 1496 case TargetOpcode::G_FREM: 1497 case TargetOpcode::G_FCEIL: 1498 case TargetOpcode::G_FFLOOR: 1499 case TargetOpcode::G_FCOS: 1500 case TargetOpcode::G_FSIN: 1501 case TargetOpcode::G_FLOG10: 1502 case TargetOpcode::G_FLOG: 1503 case TargetOpcode::G_FLOG2: 1504 case TargetOpcode::G_FRINT: 1505 case TargetOpcode::G_FNEARBYINT: 1506 case TargetOpcode::G_FSQRT: 1507 case TargetOpcode::G_FEXP: 1508 case TargetOpcode::G_FEXP2: 1509 case TargetOpcode::G_FPOW: 1510 case TargetOpcode::G_INTRINSIC_TRUNC: 1511 case TargetOpcode::G_INTRINSIC_ROUND: 1512 assert(TypeIdx == 0); 1513 Observer.changingInstr(MI); 1514 1515 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) 1516 widenScalarSrc(MI, WideTy, I, TargetOpcode::G_FPEXT); 1517 1518 widenScalarDst(MI, WideTy, 0, TargetOpcode::G_FPTRUNC); 1519 Observer.changedInstr(MI); 1520 return Legalized; 1521 case TargetOpcode::G_INTTOPTR: 1522 if (TypeIdx != 1) 1523 return UnableToLegalize; 1524 1525 Observer.changingInstr(MI); 1526 widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT); 1527 Observer.changedInstr(MI); 1528 return Legalized; 1529 case TargetOpcode::G_PTRTOINT: 1530 if (TypeIdx != 0) 1531 return UnableToLegalize; 1532 1533 Observer.changingInstr(MI); 1534 widenScalarDst(MI, WideTy, 0); 1535 Observer.changedInstr(MI); 1536 return Legalized; 1537 case TargetOpcode::G_BUILD_VECTOR: { 1538 Observer.changingInstr(MI); 1539 1540 const LLT WideEltTy = TypeIdx == 1 ? WideTy : WideTy.getElementType(); 1541 for (int I = 1, E = MI.getNumOperands(); I != E; ++I) 1542 widenScalarSrc(MI, WideEltTy, I, TargetOpcode::G_ANYEXT); 1543 1544 // Avoid changing the result vector type if the source element type was 1545 // requested. 1546 if (TypeIdx == 1) { 1547 auto &TII = *MI.getMF()->getSubtarget().getInstrInfo(); 1548 MI.setDesc(TII.get(TargetOpcode::G_BUILD_VECTOR_TRUNC)); 1549 } else { 1550 widenScalarDst(MI, WideTy, 0); 1551 } 1552 1553 Observer.changedInstr(MI); 1554 return Legalized; 1555 } 1556 } 1557 } 1558 1559 LegalizerHelper::LegalizeResult 1560 LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { 1561 using namespace TargetOpcode; 1562 MIRBuilder.setInstr(MI); 1563 1564 switch(MI.getOpcode()) { 1565 default: 1566 return UnableToLegalize; 1567 case TargetOpcode::G_SREM: 1568 case TargetOpcode::G_UREM: { 1569 Register QuotReg = MRI.createGenericVirtualRegister(Ty); 1570 MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV) 1571 .addDef(QuotReg) 1572 .addUse(MI.getOperand(1).getReg()) 1573 .addUse(MI.getOperand(2).getReg()); 1574 1575 Register ProdReg = MRI.createGenericVirtualRegister(Ty); 1576 MIRBuilder.buildMul(ProdReg, QuotReg, MI.getOperand(2).getReg()); 1577 MIRBuilder.buildSub(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), 1578 ProdReg); 1579 MI.eraseFromParent(); 1580 return Legalized; 1581 } 1582 case TargetOpcode::G_SMULO: 1583 case TargetOpcode::G_UMULO: { 1584 // Generate G_UMULH/G_SMULH to check for overflow and a normal G_MUL for the 1585 // result. 1586 Register Res = MI.getOperand(0).getReg(); 1587 Register Overflow = MI.getOperand(1).getReg(); 1588 Register LHS = MI.getOperand(2).getReg(); 1589 Register RHS = MI.getOperand(3).getReg(); 1590 1591 MIRBuilder.buildMul(Res, LHS, RHS); 1592 1593 unsigned Opcode = MI.getOpcode() == TargetOpcode::G_SMULO 1594 ? TargetOpcode::G_SMULH 1595 : TargetOpcode::G_UMULH; 1596 1597 Register HiPart = MRI.createGenericVirtualRegister(Ty); 1598 MIRBuilder.buildInstr(Opcode) 1599 .addDef(HiPart) 1600 .addUse(LHS) 1601 .addUse(RHS); 1602 1603 Register Zero = MRI.createGenericVirtualRegister(Ty); 1604 MIRBuilder.buildConstant(Zero, 0); 1605 1606 // For *signed* multiply, overflow is detected by checking: 1607 // (hi != (lo >> bitwidth-1)) 1608 if (Opcode == TargetOpcode::G_SMULH) { 1609 Register Shifted = MRI.createGenericVirtualRegister(Ty); 1610 Register ShiftAmt = MRI.createGenericVirtualRegister(Ty); 1611 MIRBuilder.buildConstant(ShiftAmt, Ty.getSizeInBits() - 1); 1612 MIRBuilder.buildInstr(TargetOpcode::G_ASHR) 1613 .addDef(Shifted) 1614 .addUse(Res) 1615 .addUse(ShiftAmt); 1616 MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Shifted); 1617 } else { 1618 MIRBuilder.buildICmp(CmpInst::ICMP_NE, Overflow, HiPart, Zero); 1619 } 1620 MI.eraseFromParent(); 1621 return Legalized; 1622 } 1623 case TargetOpcode::G_FNEG: { 1624 // TODO: Handle vector types once we are able to 1625 // represent them. 1626 if (Ty.isVector()) 1627 return UnableToLegalize; 1628 Register Res = MI.getOperand(0).getReg(); 1629 Type *ZeroTy; 1630 LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); 1631 switch (Ty.getSizeInBits()) { 1632 case 16: 1633 ZeroTy = Type::getHalfTy(Ctx); 1634 break; 1635 case 32: 1636 ZeroTy = Type::getFloatTy(Ctx); 1637 break; 1638 case 64: 1639 ZeroTy = Type::getDoubleTy(Ctx); 1640 break; 1641 case 128: 1642 ZeroTy = Type::getFP128Ty(Ctx); 1643 break; 1644 default: 1645 llvm_unreachable("unexpected floating-point type"); 1646 } 1647 ConstantFP &ZeroForNegation = 1648 *cast<ConstantFP>(ConstantFP::getZeroValueForNegation(ZeroTy)); 1649 auto Zero = MIRBuilder.buildFConstant(Ty, ZeroForNegation); 1650 Register SubByReg = MI.getOperand(1).getReg(); 1651 Register ZeroReg = Zero->getOperand(0).getReg(); 1652 MIRBuilder.buildInstr(TargetOpcode::G_FSUB, {Res}, {ZeroReg, SubByReg}, 1653 MI.getFlags()); 1654 MI.eraseFromParent(); 1655 return Legalized; 1656 } 1657 case TargetOpcode::G_FSUB: { 1658 // Lower (G_FSUB LHS, RHS) to (G_FADD LHS, (G_FNEG RHS)). 1659 // First, check if G_FNEG is marked as Lower. If so, we may 1660 // end up with an infinite loop as G_FSUB is used to legalize G_FNEG. 1661 if (LI.getAction({G_FNEG, {Ty}}).Action == Lower) 1662 return UnableToLegalize; 1663 Register Res = MI.getOperand(0).getReg(); 1664 Register LHS = MI.getOperand(1).getReg(); 1665 Register RHS = MI.getOperand(2).getReg(); 1666 Register Neg = MRI.createGenericVirtualRegister(Ty); 1667 MIRBuilder.buildInstr(TargetOpcode::G_FNEG).addDef(Neg).addUse(RHS); 1668 MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Res}, {LHS, Neg}, MI.getFlags()); 1669 MI.eraseFromParent(); 1670 return Legalized; 1671 } 1672 case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: { 1673 Register OldValRes = MI.getOperand(0).getReg(); 1674 Register SuccessRes = MI.getOperand(1).getReg(); 1675 Register Addr = MI.getOperand(2).getReg(); 1676 Register CmpVal = MI.getOperand(3).getReg(); 1677 Register NewVal = MI.getOperand(4).getReg(); 1678 MIRBuilder.buildAtomicCmpXchg(OldValRes, Addr, CmpVal, NewVal, 1679 **MI.memoperands_begin()); 1680 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, SuccessRes, OldValRes, CmpVal); 1681 MI.eraseFromParent(); 1682 return Legalized; 1683 } 1684 case TargetOpcode::G_LOAD: 1685 case TargetOpcode::G_SEXTLOAD: 1686 case TargetOpcode::G_ZEXTLOAD: { 1687 // Lower to a memory-width G_LOAD and a G_SEXT/G_ZEXT/G_ANYEXT 1688 Register DstReg = MI.getOperand(0).getReg(); 1689 Register PtrReg = MI.getOperand(1).getReg(); 1690 LLT DstTy = MRI.getType(DstReg); 1691 auto &MMO = **MI.memoperands_begin(); 1692 1693 if (DstTy.getSizeInBits() == MMO.getSize() /* in bytes */ * 8) { 1694 // In the case of G_LOAD, this was a non-extending load already and we're 1695 // about to lower to the same instruction. 1696 if (MI.getOpcode() == TargetOpcode::G_LOAD) 1697 return UnableToLegalize; 1698 MIRBuilder.buildLoad(DstReg, PtrReg, MMO); 1699 MI.eraseFromParent(); 1700 return Legalized; 1701 } 1702 1703 if (DstTy.isScalar()) { 1704 Register TmpReg = 1705 MRI.createGenericVirtualRegister(LLT::scalar(MMO.getSizeInBits())); 1706 MIRBuilder.buildLoad(TmpReg, PtrReg, MMO); 1707 switch (MI.getOpcode()) { 1708 default: 1709 llvm_unreachable("Unexpected opcode"); 1710 case TargetOpcode::G_LOAD: 1711 MIRBuilder.buildAnyExt(DstReg, TmpReg); 1712 break; 1713 case TargetOpcode::G_SEXTLOAD: 1714 MIRBuilder.buildSExt(DstReg, TmpReg); 1715 break; 1716 case TargetOpcode::G_ZEXTLOAD: 1717 MIRBuilder.buildZExt(DstReg, TmpReg); 1718 break; 1719 } 1720 MI.eraseFromParent(); 1721 return Legalized; 1722 } 1723 1724 return UnableToLegalize; 1725 } 1726 case TargetOpcode::G_CTLZ_ZERO_UNDEF: 1727 case TargetOpcode::G_CTTZ_ZERO_UNDEF: 1728 case TargetOpcode::G_CTLZ: 1729 case TargetOpcode::G_CTTZ: 1730 case TargetOpcode::G_CTPOP: 1731 return lowerBitCount(MI, TypeIdx, Ty); 1732 case G_UADDO: { 1733 Register Res = MI.getOperand(0).getReg(); 1734 Register CarryOut = MI.getOperand(1).getReg(); 1735 Register LHS = MI.getOperand(2).getReg(); 1736 Register RHS = MI.getOperand(3).getReg(); 1737 1738 MIRBuilder.buildAdd(Res, LHS, RHS); 1739 MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, RHS); 1740 1741 MI.eraseFromParent(); 1742 return Legalized; 1743 } 1744 case G_UADDE: { 1745 Register Res = MI.getOperand(0).getReg(); 1746 Register CarryOut = MI.getOperand(1).getReg(); 1747 Register LHS = MI.getOperand(2).getReg(); 1748 Register RHS = MI.getOperand(3).getReg(); 1749 Register CarryIn = MI.getOperand(4).getReg(); 1750 1751 Register TmpRes = MRI.createGenericVirtualRegister(Ty); 1752 Register ZExtCarryIn = MRI.createGenericVirtualRegister(Ty); 1753 1754 MIRBuilder.buildAdd(TmpRes, LHS, RHS); 1755 MIRBuilder.buildZExt(ZExtCarryIn, CarryIn); 1756 MIRBuilder.buildAdd(Res, TmpRes, ZExtCarryIn); 1757 MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, LHS); 1758 1759 MI.eraseFromParent(); 1760 return Legalized; 1761 } 1762 case G_USUBO: { 1763 Register Res = MI.getOperand(0).getReg(); 1764 Register BorrowOut = MI.getOperand(1).getReg(); 1765 Register LHS = MI.getOperand(2).getReg(); 1766 Register RHS = MI.getOperand(3).getReg(); 1767 1768 MIRBuilder.buildSub(Res, LHS, RHS); 1769 MIRBuilder.buildICmp(CmpInst::ICMP_ULT, BorrowOut, LHS, RHS); 1770 1771 MI.eraseFromParent(); 1772 return Legalized; 1773 } 1774 case G_USUBE: { 1775 Register Res = MI.getOperand(0).getReg(); 1776 Register BorrowOut = MI.getOperand(1).getReg(); 1777 Register LHS = MI.getOperand(2).getReg(); 1778 Register RHS = MI.getOperand(3).getReg(); 1779 Register BorrowIn = MI.getOperand(4).getReg(); 1780 1781 Register TmpRes = MRI.createGenericVirtualRegister(Ty); 1782 Register ZExtBorrowIn = MRI.createGenericVirtualRegister(Ty); 1783 Register LHS_EQ_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1)); 1784 Register LHS_ULT_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1)); 1785 1786 MIRBuilder.buildSub(TmpRes, LHS, RHS); 1787 MIRBuilder.buildZExt(ZExtBorrowIn, BorrowIn); 1788 MIRBuilder.buildSub(Res, TmpRes, ZExtBorrowIn); 1789 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LHS_EQ_RHS, LHS, RHS); 1790 MIRBuilder.buildICmp(CmpInst::ICMP_ULT, LHS_ULT_RHS, LHS, RHS); 1791 MIRBuilder.buildSelect(BorrowOut, LHS_EQ_RHS, BorrowIn, LHS_ULT_RHS); 1792 1793 MI.eraseFromParent(); 1794 return Legalized; 1795 } 1796 case G_UITOFP: 1797 return lowerUITOFP(MI, TypeIdx, Ty); 1798 case G_SITOFP: 1799 return lowerSITOFP(MI, TypeIdx, Ty); 1800 case G_SMIN: 1801 case G_SMAX: 1802 case G_UMIN: 1803 case G_UMAX: 1804 return lowerMinMax(MI, TypeIdx, Ty); 1805 case G_FCOPYSIGN: 1806 return lowerFCopySign(MI, TypeIdx, Ty); 1807 case G_FMINNUM: 1808 case G_FMAXNUM: 1809 return lowerFMinNumMaxNum(MI); 1810 } 1811 } 1812 1813 LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef( 1814 MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) { 1815 SmallVector<Register, 2> DstRegs; 1816 1817 unsigned NarrowSize = NarrowTy.getSizeInBits(); 1818 Register DstReg = MI.getOperand(0).getReg(); 1819 unsigned Size = MRI.getType(DstReg).getSizeInBits(); 1820 int NumParts = Size / NarrowSize; 1821 // FIXME: Don't know how to handle the situation where the small vectors 1822 // aren't all the same size yet. 1823 if (Size % NarrowSize != 0) 1824 return UnableToLegalize; 1825 1826 for (int i = 0; i < NumParts; ++i) { 1827 Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); 1828 MIRBuilder.buildUndef(TmpReg); 1829 DstRegs.push_back(TmpReg); 1830 } 1831 1832 if (NarrowTy.isVector()) 1833 MIRBuilder.buildConcatVectors(DstReg, DstRegs); 1834 else 1835 MIRBuilder.buildBuildVector(DstReg, DstRegs); 1836 1837 MI.eraseFromParent(); 1838 return Legalized; 1839 } 1840 1841 LegalizerHelper::LegalizeResult 1842 LegalizerHelper::fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx, 1843 LLT NarrowTy) { 1844 const unsigned Opc = MI.getOpcode(); 1845 const unsigned NumOps = MI.getNumOperands() - 1; 1846 const unsigned NarrowSize = NarrowTy.getSizeInBits(); 1847 const Register DstReg = MI.getOperand(0).getReg(); 1848 const unsigned Flags = MI.getFlags(); 1849 const LLT DstTy = MRI.getType(DstReg); 1850 const unsigned Size = DstTy.getSizeInBits(); 1851 const int NumParts = Size / NarrowSize; 1852 const LLT EltTy = DstTy.getElementType(); 1853 const unsigned EltSize = EltTy.getSizeInBits(); 1854 const unsigned BitsForNumParts = NarrowSize * NumParts; 1855 1856 // Check if we have any leftovers. If we do, then only handle the case where 1857 // the leftover is one element. 1858 if (BitsForNumParts != Size && BitsForNumParts + EltSize != Size) 1859 return UnableToLegalize; 1860 1861 if (BitsForNumParts != Size) { 1862 Register AccumDstReg = MRI.createGenericVirtualRegister(DstTy); 1863 MIRBuilder.buildUndef(AccumDstReg); 1864 1865 // Handle the pieces which evenly divide into the requested type with 1866 // extract/op/insert sequence. 1867 for (unsigned Offset = 0; Offset < BitsForNumParts; Offset += NarrowSize) { 1868 SmallVector<SrcOp, 4> SrcOps; 1869 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { 1870 Register PartOpReg = MRI.createGenericVirtualRegister(NarrowTy); 1871 MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(), Offset); 1872 SrcOps.push_back(PartOpReg); 1873 } 1874 1875 Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy); 1876 MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags); 1877 1878 Register PartInsertReg = MRI.createGenericVirtualRegister(DstTy); 1879 MIRBuilder.buildInsert(PartInsertReg, AccumDstReg, PartDstReg, Offset); 1880 AccumDstReg = PartInsertReg; 1881 } 1882 1883 // Handle the remaining element sized leftover piece. 1884 SmallVector<SrcOp, 4> SrcOps; 1885 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { 1886 Register PartOpReg = MRI.createGenericVirtualRegister(EltTy); 1887 MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(), 1888 BitsForNumParts); 1889 SrcOps.push_back(PartOpReg); 1890 } 1891 1892 Register PartDstReg = MRI.createGenericVirtualRegister(EltTy); 1893 MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags); 1894 MIRBuilder.buildInsert(DstReg, AccumDstReg, PartDstReg, BitsForNumParts); 1895 MI.eraseFromParent(); 1896 1897 return Legalized; 1898 } 1899 1900 SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs; 1901 1902 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src0Regs); 1903 1904 if (NumOps >= 2) 1905 extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src1Regs); 1906 1907 if (NumOps >= 3) 1908 extractParts(MI.getOperand(3).getReg(), NarrowTy, NumParts, Src2Regs); 1909 1910 for (int i = 0; i < NumParts; ++i) { 1911 Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); 1912 1913 if (NumOps == 1) 1914 MIRBuilder.buildInstr(Opc, {DstReg}, {Src0Regs[i]}, Flags); 1915 else if (NumOps == 2) { 1916 MIRBuilder.buildInstr(Opc, {DstReg}, {Src0Regs[i], Src1Regs[i]}, Flags); 1917 } else if (NumOps == 3) { 1918 MIRBuilder.buildInstr(Opc, {DstReg}, 1919 {Src0Regs[i], Src1Regs[i], Src2Regs[i]}, Flags); 1920 } 1921 1922 DstRegs.push_back(DstReg); 1923 } 1924 1925 if (NarrowTy.isVector()) 1926 MIRBuilder.buildConcatVectors(DstReg, DstRegs); 1927 else 1928 MIRBuilder.buildBuildVector(DstReg, DstRegs); 1929 1930 MI.eraseFromParent(); 1931 return Legalized; 1932 } 1933 1934 // Handle splitting vector operations which need to have the same number of 1935 // elements in each type index, but each type index may have a different element 1936 // type. 1937 // 1938 // e.g. <4 x s64> = G_SHL <4 x s64>, <4 x s32> -> 1939 // <2 x s64> = G_SHL <2 x s64>, <2 x s32> 1940 // <2 x s64> = G_SHL <2 x s64>, <2 x s32> 1941 // 1942 // Also handles some irregular breakdown cases, e.g. 1943 // e.g. <3 x s64> = G_SHL <3 x s64>, <3 x s32> -> 1944 // <2 x s64> = G_SHL <2 x s64>, <2 x s32> 1945 // s64 = G_SHL s64, s32 1946 LegalizerHelper::LegalizeResult 1947 LegalizerHelper::fewerElementsVectorMultiEltType( 1948 MachineInstr &MI, unsigned TypeIdx, LLT NarrowTyArg) { 1949 if (TypeIdx != 0) 1950 return UnableToLegalize; 1951 1952 const LLT NarrowTy0 = NarrowTyArg; 1953 const unsigned NewNumElts = 1954 NarrowTy0.isVector() ? NarrowTy0.getNumElements() : 1; 1955 1956 const Register DstReg = MI.getOperand(0).getReg(); 1957 LLT DstTy = MRI.getType(DstReg); 1958 LLT LeftoverTy0; 1959 1960 // All of the operands need to have the same number of elements, so if we can 1961 // determine a type breakdown for the result type, we can for all of the 1962 // source types. 1963 int NumParts = getNarrowTypeBreakDown(DstTy, NarrowTy0, LeftoverTy0).first; 1964 if (NumParts < 0) 1965 return UnableToLegalize; 1966 1967 SmallVector<MachineInstrBuilder, 4> NewInsts; 1968 1969 SmallVector<Register, 4> DstRegs, LeftoverDstRegs; 1970 SmallVector<Register, 4> PartRegs, LeftoverRegs; 1971 1972 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { 1973 LLT LeftoverTy; 1974 Register SrcReg = MI.getOperand(I).getReg(); 1975 LLT SrcTyI = MRI.getType(SrcReg); 1976 LLT NarrowTyI = LLT::scalarOrVector(NewNumElts, SrcTyI.getScalarType()); 1977 LLT LeftoverTyI; 1978 1979 // Split this operand into the requested typed registers, and any leftover 1980 // required to reproduce the original type. 1981 if (!extractParts(SrcReg, SrcTyI, NarrowTyI, LeftoverTyI, PartRegs, 1982 LeftoverRegs)) 1983 return UnableToLegalize; 1984 1985 if (I == 1) { 1986 // For the first operand, create an instruction for each part and setup 1987 // the result. 1988 for (Register PartReg : PartRegs) { 1989 Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy0); 1990 NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode()) 1991 .addDef(PartDstReg) 1992 .addUse(PartReg)); 1993 DstRegs.push_back(PartDstReg); 1994 } 1995 1996 for (Register LeftoverReg : LeftoverRegs) { 1997 Register PartDstReg = MRI.createGenericVirtualRegister(LeftoverTy0); 1998 NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode()) 1999 .addDef(PartDstReg) 2000 .addUse(LeftoverReg)); 2001 LeftoverDstRegs.push_back(PartDstReg); 2002 } 2003 } else { 2004 assert(NewInsts.size() == PartRegs.size() + LeftoverRegs.size()); 2005 2006 // Add the newly created operand splits to the existing instructions. The 2007 // odd-sized pieces are ordered after the requested NarrowTyArg sized 2008 // pieces. 2009 unsigned InstCount = 0; 2010 for (unsigned J = 0, JE = PartRegs.size(); J != JE; ++J) 2011 NewInsts[InstCount++].addUse(PartRegs[J]); 2012 for (unsigned J = 0, JE = LeftoverRegs.size(); J != JE; ++J) 2013 NewInsts[InstCount++].addUse(LeftoverRegs[J]); 2014 } 2015 2016 PartRegs.clear(); 2017 LeftoverRegs.clear(); 2018 } 2019 2020 // Insert the newly built operations and rebuild the result register. 2021 for (auto &MIB : NewInsts) 2022 MIRBuilder.insertInstr(MIB); 2023 2024 insertParts(DstReg, DstTy, NarrowTy0, DstRegs, LeftoverTy0, LeftoverDstRegs); 2025 2026 MI.eraseFromParent(); 2027 return Legalized; 2028 } 2029 2030 LegalizerHelper::LegalizeResult 2031 LegalizerHelper::fewerElementsVectorCasts(MachineInstr &MI, unsigned TypeIdx, 2032 LLT NarrowTy) { 2033 if (TypeIdx != 0) 2034 return UnableToLegalize; 2035 2036 Register DstReg = MI.getOperand(0).getReg(); 2037 Register SrcReg = MI.getOperand(1).getReg(); 2038 LLT DstTy = MRI.getType(DstReg); 2039 LLT SrcTy = MRI.getType(SrcReg); 2040 2041 LLT NarrowTy0 = NarrowTy; 2042 LLT NarrowTy1; 2043 unsigned NumParts; 2044 2045 if (NarrowTy.isVector()) { 2046 // Uneven breakdown not handled. 2047 NumParts = DstTy.getNumElements() / NarrowTy.getNumElements(); 2048 if (NumParts * NarrowTy.getNumElements() != DstTy.getNumElements()) 2049 return UnableToLegalize; 2050 2051 NarrowTy1 = LLT::vector(NumParts, SrcTy.getElementType().getSizeInBits()); 2052 } else { 2053 NumParts = DstTy.getNumElements(); 2054 NarrowTy1 = SrcTy.getElementType(); 2055 } 2056 2057 SmallVector<Register, 4> SrcRegs, DstRegs; 2058 extractParts(SrcReg, NarrowTy1, NumParts, SrcRegs); 2059 2060 for (unsigned I = 0; I < NumParts; ++I) { 2061 Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0); 2062 MachineInstr *NewInst = MIRBuilder.buildInstr(MI.getOpcode()) 2063 .addDef(DstReg) 2064 .addUse(SrcRegs[I]); 2065 2066 NewInst->setFlags(MI.getFlags()); 2067 DstRegs.push_back(DstReg); 2068 } 2069 2070 if (NarrowTy.isVector()) 2071 MIRBuilder.buildConcatVectors(DstReg, DstRegs); 2072 else 2073 MIRBuilder.buildBuildVector(DstReg, DstRegs); 2074 2075 MI.eraseFromParent(); 2076 return Legalized; 2077 } 2078 2079 LegalizerHelper::LegalizeResult 2080 LegalizerHelper::fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx, 2081 LLT NarrowTy) { 2082 Register DstReg = MI.getOperand(0).getReg(); 2083 Register Src0Reg = MI.getOperand(2).getReg(); 2084 LLT DstTy = MRI.getType(DstReg); 2085 LLT SrcTy = MRI.getType(Src0Reg); 2086 2087 unsigned NumParts; 2088 LLT NarrowTy0, NarrowTy1; 2089 2090 if (TypeIdx == 0) { 2091 unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1; 2092 unsigned OldElts = DstTy.getNumElements(); 2093 2094 NarrowTy0 = NarrowTy; 2095 NumParts = NarrowTy.isVector() ? (OldElts / NewElts) : DstTy.getNumElements(); 2096 NarrowTy1 = NarrowTy.isVector() ? 2097 LLT::vector(NarrowTy.getNumElements(), SrcTy.getScalarSizeInBits()) : 2098 SrcTy.getElementType(); 2099 2100 } else { 2101 unsigned NewElts = NarrowTy.isVector() ? NarrowTy.getNumElements() : 1; 2102 unsigned OldElts = SrcTy.getNumElements(); 2103 2104 NumParts = NarrowTy.isVector() ? (OldElts / NewElts) : 2105 NarrowTy.getNumElements(); 2106 NarrowTy0 = LLT::vector(NarrowTy.getNumElements(), 2107 DstTy.getScalarSizeInBits()); 2108 NarrowTy1 = NarrowTy; 2109 } 2110 2111 // FIXME: Don't know how to handle the situation where the small vectors 2112 // aren't all the same size yet. 2113 if (NarrowTy1.isVector() && 2114 NarrowTy1.getNumElements() * NumParts != DstTy.getNumElements()) 2115 return UnableToLegalize; 2116 2117 CmpInst::Predicate Pred 2118 = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); 2119 2120 SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs; 2121 extractParts(MI.getOperand(2).getReg(), NarrowTy1, NumParts, Src1Regs); 2122 extractParts(MI.getOperand(3).getReg(), NarrowTy1, NumParts, Src2Regs); 2123 2124 for (unsigned I = 0; I < NumParts; ++I) { 2125 Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0); 2126 DstRegs.push_back(DstReg); 2127 2128 if (MI.getOpcode() == TargetOpcode::G_ICMP) 2129 MIRBuilder.buildICmp(Pred, DstReg, Src1Regs[I], Src2Regs[I]); 2130 else { 2131 MachineInstr *NewCmp 2132 = MIRBuilder.buildFCmp(Pred, DstReg, Src1Regs[I], Src2Regs[I]); 2133 NewCmp->setFlags(MI.getFlags()); 2134 } 2135 } 2136 2137 if (NarrowTy1.isVector()) 2138 MIRBuilder.buildConcatVectors(DstReg, DstRegs); 2139 else 2140 MIRBuilder.buildBuildVector(DstReg, DstRegs); 2141 2142 MI.eraseFromParent(); 2143 return Legalized; 2144 } 2145 2146 LegalizerHelper::LegalizeResult 2147 LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx, 2148 LLT NarrowTy) { 2149 Register DstReg = MI.getOperand(0).getReg(); 2150 Register CondReg = MI.getOperand(1).getReg(); 2151 2152 unsigned NumParts = 0; 2153 LLT NarrowTy0, NarrowTy1; 2154 2155 LLT DstTy = MRI.getType(DstReg); 2156 LLT CondTy = MRI.getType(CondReg); 2157 unsigned Size = DstTy.getSizeInBits(); 2158 2159 assert(TypeIdx == 0 || CondTy.isVector()); 2160 2161 if (TypeIdx == 0) { 2162 NarrowTy0 = NarrowTy; 2163 NarrowTy1 = CondTy; 2164 2165 unsigned NarrowSize = NarrowTy0.getSizeInBits(); 2166 // FIXME: Don't know how to handle the situation where the small vectors 2167 // aren't all the same size yet. 2168 if (Size % NarrowSize != 0) 2169 return UnableToLegalize; 2170 2171 NumParts = Size / NarrowSize; 2172 2173 // Need to break down the condition type 2174 if (CondTy.isVector()) { 2175 if (CondTy.getNumElements() == NumParts) 2176 NarrowTy1 = CondTy.getElementType(); 2177 else 2178 NarrowTy1 = LLT::vector(CondTy.getNumElements() / NumParts, 2179 CondTy.getScalarSizeInBits()); 2180 } 2181 } else { 2182 NumParts = CondTy.getNumElements(); 2183 if (NarrowTy.isVector()) { 2184 // TODO: Handle uneven breakdown. 2185 if (NumParts * NarrowTy.getNumElements() != CondTy.getNumElements()) 2186 return UnableToLegalize; 2187 2188 return UnableToLegalize; 2189 } else { 2190 NarrowTy0 = DstTy.getElementType(); 2191 NarrowTy1 = NarrowTy; 2192 } 2193 } 2194 2195 SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs; 2196 if (CondTy.isVector()) 2197 extractParts(MI.getOperand(1).getReg(), NarrowTy1, NumParts, Src0Regs); 2198 2199 extractParts(MI.getOperand(2).getReg(), NarrowTy0, NumParts, Src1Regs); 2200 extractParts(MI.getOperand(3).getReg(), NarrowTy0, NumParts, Src2Regs); 2201 2202 for (unsigned i = 0; i < NumParts; ++i) { 2203 Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0); 2204 MIRBuilder.buildSelect(DstReg, CondTy.isVector() ? Src0Regs[i] : CondReg, 2205 Src1Regs[i], Src2Regs[i]); 2206 DstRegs.push_back(DstReg); 2207 } 2208 2209 if (NarrowTy0.isVector()) 2210 MIRBuilder.buildConcatVectors(DstReg, DstRegs); 2211 else 2212 MIRBuilder.buildBuildVector(DstReg, DstRegs); 2213 2214 MI.eraseFromParent(); 2215 return Legalized; 2216 } 2217 2218 LegalizerHelper::LegalizeResult 2219 LegalizerHelper::fewerElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx, 2220 LLT NarrowTy) { 2221 const Register DstReg = MI.getOperand(0).getReg(); 2222 LLT PhiTy = MRI.getType(DstReg); 2223 LLT LeftoverTy; 2224 2225 // All of the operands need to have the same number of elements, so if we can 2226 // determine a type breakdown for the result type, we can for all of the 2227 // source types. 2228 int NumParts, NumLeftover; 2229 std::tie(NumParts, NumLeftover) 2230 = getNarrowTypeBreakDown(PhiTy, NarrowTy, LeftoverTy); 2231 if (NumParts < 0) 2232 return UnableToLegalize; 2233 2234 SmallVector<Register, 4> DstRegs, LeftoverDstRegs; 2235 SmallVector<MachineInstrBuilder, 4> NewInsts; 2236 2237 const int TotalNumParts = NumParts + NumLeftover; 2238 2239 // Insert the new phis in the result block first. 2240 for (int I = 0; I != TotalNumParts; ++I) { 2241 LLT Ty = I < NumParts ? NarrowTy : LeftoverTy; 2242 Register PartDstReg = MRI.createGenericVirtualRegister(Ty); 2243 NewInsts.push_back(MIRBuilder.buildInstr(TargetOpcode::G_PHI) 2244 .addDef(PartDstReg)); 2245 if (I < NumParts) 2246 DstRegs.push_back(PartDstReg); 2247 else 2248 LeftoverDstRegs.push_back(PartDstReg); 2249 } 2250 2251 MachineBasicBlock *MBB = MI.getParent(); 2252 MIRBuilder.setInsertPt(*MBB, MBB->getFirstNonPHI()); 2253 insertParts(DstReg, PhiTy, NarrowTy, DstRegs, LeftoverTy, LeftoverDstRegs); 2254 2255 SmallVector<Register, 4> PartRegs, LeftoverRegs; 2256 2257 // Insert code to extract the incoming values in each predecessor block. 2258 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 2259 PartRegs.clear(); 2260 LeftoverRegs.clear(); 2261 2262 Register SrcReg = MI.getOperand(I).getReg(); 2263 MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB(); 2264 MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); 2265 2266 LLT Unused; 2267 if (!extractParts(SrcReg, PhiTy, NarrowTy, Unused, PartRegs, 2268 LeftoverRegs)) 2269 return UnableToLegalize; 2270 2271 // Add the newly created operand splits to the existing instructions. The 2272 // odd-sized pieces are ordered after the requested NarrowTyArg sized 2273 // pieces. 2274 for (int J = 0; J != TotalNumParts; ++J) { 2275 MachineInstrBuilder MIB = NewInsts[J]; 2276 MIB.addUse(J < NumParts ? PartRegs[J] : LeftoverRegs[J - NumParts]); 2277 MIB.addMBB(&OpMBB); 2278 } 2279 } 2280 2281 MI.eraseFromParent(); 2282 return Legalized; 2283 } 2284 2285 LegalizerHelper::LegalizeResult 2286 LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, 2287 LLT NarrowTy) { 2288 // FIXME: Don't know how to handle secondary types yet. 2289 if (TypeIdx != 0) 2290 return UnableToLegalize; 2291 2292 MachineMemOperand *MMO = *MI.memoperands_begin(); 2293 2294 // This implementation doesn't work for atomics. Give up instead of doing 2295 // something invalid. 2296 if (MMO->getOrdering() != AtomicOrdering::NotAtomic || 2297 MMO->getFailureOrdering() != AtomicOrdering::NotAtomic) 2298 return UnableToLegalize; 2299 2300 bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD; 2301 Register ValReg = MI.getOperand(0).getReg(); 2302 Register AddrReg = MI.getOperand(1).getReg(); 2303 LLT ValTy = MRI.getType(ValReg); 2304 2305 int NumParts = -1; 2306 int NumLeftover = -1; 2307 LLT LeftoverTy; 2308 SmallVector<Register, 8> NarrowRegs, NarrowLeftoverRegs; 2309 if (IsLoad) { 2310 std::tie(NumParts, NumLeftover) = getNarrowTypeBreakDown(ValTy, NarrowTy, LeftoverTy); 2311 } else { 2312 if (extractParts(ValReg, ValTy, NarrowTy, LeftoverTy, NarrowRegs, 2313 NarrowLeftoverRegs)) { 2314 NumParts = NarrowRegs.size(); 2315 NumLeftover = NarrowLeftoverRegs.size(); 2316 } 2317 } 2318 2319 if (NumParts == -1) 2320 return UnableToLegalize; 2321 2322 const LLT OffsetTy = LLT::scalar(MRI.getType(AddrReg).getScalarSizeInBits()); 2323 2324 unsigned TotalSize = ValTy.getSizeInBits(); 2325 2326 // Split the load/store into PartTy sized pieces starting at Offset. If this 2327 // is a load, return the new registers in ValRegs. For a store, each elements 2328 // of ValRegs should be PartTy. Returns the next offset that needs to be 2329 // handled. 2330 auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<Register> &ValRegs, 2331 unsigned Offset) -> unsigned { 2332 MachineFunction &MF = MIRBuilder.getMF(); 2333 unsigned PartSize = PartTy.getSizeInBits(); 2334 for (unsigned Idx = 0, E = NumParts; Idx != E && Offset < TotalSize; 2335 Offset += PartSize, ++Idx) { 2336 unsigned ByteSize = PartSize / 8; 2337 unsigned ByteOffset = Offset / 8; 2338 Register NewAddrReg; 2339 2340 MIRBuilder.materializeGEP(NewAddrReg, AddrReg, OffsetTy, ByteOffset); 2341 2342 MachineMemOperand *NewMMO = 2343 MF.getMachineMemOperand(MMO, ByteOffset, ByteSize); 2344 2345 if (IsLoad) { 2346 Register Dst = MRI.createGenericVirtualRegister(PartTy); 2347 ValRegs.push_back(Dst); 2348 MIRBuilder.buildLoad(Dst, NewAddrReg, *NewMMO); 2349 } else { 2350 MIRBuilder.buildStore(ValRegs[Idx], NewAddrReg, *NewMMO); 2351 } 2352 } 2353 2354 return Offset; 2355 }; 2356 2357 unsigned HandledOffset = splitTypePieces(NarrowTy, NarrowRegs, 0); 2358 2359 // Handle the rest of the register if this isn't an even type breakdown. 2360 if (LeftoverTy.isValid()) 2361 splitTypePieces(LeftoverTy, NarrowLeftoverRegs, HandledOffset); 2362 2363 if (IsLoad) { 2364 insertParts(ValReg, ValTy, NarrowTy, NarrowRegs, 2365 LeftoverTy, NarrowLeftoverRegs); 2366 } 2367 2368 MI.eraseFromParent(); 2369 return Legalized; 2370 } 2371 2372 LegalizerHelper::LegalizeResult 2373 LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx, 2374 LLT NarrowTy) { 2375 using namespace TargetOpcode; 2376 2377 MIRBuilder.setInstr(MI); 2378 switch (MI.getOpcode()) { 2379 case G_IMPLICIT_DEF: 2380 return fewerElementsVectorImplicitDef(MI, TypeIdx, NarrowTy); 2381 case G_AND: 2382 case G_OR: 2383 case G_XOR: 2384 case G_ADD: 2385 case G_SUB: 2386 case G_MUL: 2387 case G_SMULH: 2388 case G_UMULH: 2389 case G_FADD: 2390 case G_FMUL: 2391 case G_FSUB: 2392 case G_FNEG: 2393 case G_FABS: 2394 case G_FCANONICALIZE: 2395 case G_FDIV: 2396 case G_FREM: 2397 case G_FMA: 2398 case G_FPOW: 2399 case G_FEXP: 2400 case G_FEXP2: 2401 case G_FLOG: 2402 case G_FLOG2: 2403 case G_FLOG10: 2404 case G_FNEARBYINT: 2405 case G_FCEIL: 2406 case G_FFLOOR: 2407 case G_FRINT: 2408 case G_INTRINSIC_ROUND: 2409 case G_INTRINSIC_TRUNC: 2410 case G_FCOS: 2411 case G_FSIN: 2412 case G_FSQRT: 2413 case G_BSWAP: 2414 case G_SDIV: 2415 case G_SMIN: 2416 case G_SMAX: 2417 case G_UMIN: 2418 case G_UMAX: 2419 case G_FMINNUM: 2420 case G_FMAXNUM: 2421 case G_FMINNUM_IEEE: 2422 case G_FMAXNUM_IEEE: 2423 case G_FMINIMUM: 2424 case G_FMAXIMUM: 2425 return fewerElementsVectorBasic(MI, TypeIdx, NarrowTy); 2426 case G_SHL: 2427 case G_LSHR: 2428 case G_ASHR: 2429 case G_CTLZ: 2430 case G_CTLZ_ZERO_UNDEF: 2431 case G_CTTZ: 2432 case G_CTTZ_ZERO_UNDEF: 2433 case G_CTPOP: 2434 case G_FCOPYSIGN: 2435 return fewerElementsVectorMultiEltType(MI, TypeIdx, NarrowTy); 2436 case G_ZEXT: 2437 case G_SEXT: 2438 case G_ANYEXT: 2439 case G_FPEXT: 2440 case G_FPTRUNC: 2441 case G_SITOFP: 2442 case G_UITOFP: 2443 case G_FPTOSI: 2444 case G_FPTOUI: 2445 case G_INTTOPTR: 2446 case G_PTRTOINT: 2447 case G_ADDRSPACE_CAST: 2448 return fewerElementsVectorCasts(MI, TypeIdx, NarrowTy); 2449 case G_ICMP: 2450 case G_FCMP: 2451 return fewerElementsVectorCmp(MI, TypeIdx, NarrowTy); 2452 case G_SELECT: 2453 return fewerElementsVectorSelect(MI, TypeIdx, NarrowTy); 2454 case G_PHI: 2455 return fewerElementsVectorPhi(MI, TypeIdx, NarrowTy); 2456 case G_LOAD: 2457 case G_STORE: 2458 return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy); 2459 default: 2460 return UnableToLegalize; 2461 } 2462 } 2463 2464 LegalizerHelper::LegalizeResult 2465 LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt, 2466 const LLT HalfTy, const LLT AmtTy) { 2467 2468 Register InL = MRI.createGenericVirtualRegister(HalfTy); 2469 Register InH = MRI.createGenericVirtualRegister(HalfTy); 2470 MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg()); 2471 2472 if (Amt.isNullValue()) { 2473 MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {InL, InH}); 2474 MI.eraseFromParent(); 2475 return Legalized; 2476 } 2477 2478 LLT NVT = HalfTy; 2479 unsigned NVTBits = HalfTy.getSizeInBits(); 2480 unsigned VTBits = 2 * NVTBits; 2481 2482 SrcOp Lo(Register(0)), Hi(Register(0)); 2483 if (MI.getOpcode() == TargetOpcode::G_SHL) { 2484 if (Amt.ugt(VTBits)) { 2485 Lo = Hi = MIRBuilder.buildConstant(NVT, 0); 2486 } else if (Amt.ugt(NVTBits)) { 2487 Lo = MIRBuilder.buildConstant(NVT, 0); 2488 Hi = MIRBuilder.buildShl(NVT, InL, 2489 MIRBuilder.buildConstant(AmtTy, Amt - NVTBits)); 2490 } else if (Amt == NVTBits) { 2491 Lo = MIRBuilder.buildConstant(NVT, 0); 2492 Hi = InL; 2493 } else { 2494 Lo = MIRBuilder.buildShl(NVT, InL, MIRBuilder.buildConstant(AmtTy, Amt)); 2495 auto OrLHS = 2496 MIRBuilder.buildShl(NVT, InH, MIRBuilder.buildConstant(AmtTy, Amt)); 2497 auto OrRHS = MIRBuilder.buildLShr( 2498 NVT, InL, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits)); 2499 Hi = MIRBuilder.buildOr(NVT, OrLHS, OrRHS); 2500 } 2501 } else if (MI.getOpcode() == TargetOpcode::G_LSHR) { 2502 if (Amt.ugt(VTBits)) { 2503 Lo = Hi = MIRBuilder.buildConstant(NVT, 0); 2504 } else if (Amt.ugt(NVTBits)) { 2505 Lo = MIRBuilder.buildLShr(NVT, InH, 2506 MIRBuilder.buildConstant(AmtTy, Amt - NVTBits)); 2507 Hi = MIRBuilder.buildConstant(NVT, 0); 2508 } else if (Amt == NVTBits) { 2509 Lo = InH; 2510 Hi = MIRBuilder.buildConstant(NVT, 0); 2511 } else { 2512 auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt); 2513 2514 auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst); 2515 auto OrRHS = MIRBuilder.buildShl( 2516 NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits)); 2517 2518 Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS); 2519 Hi = MIRBuilder.buildLShr(NVT, InH, ShiftAmtConst); 2520 } 2521 } else { 2522 if (Amt.ugt(VTBits)) { 2523 Hi = Lo = MIRBuilder.buildAShr( 2524 NVT, InH, MIRBuilder.buildConstant(AmtTy, NVTBits - 1)); 2525 } else if (Amt.ugt(NVTBits)) { 2526 Lo = MIRBuilder.buildAShr(NVT, InH, 2527 MIRBuilder.buildConstant(AmtTy, Amt - NVTBits)); 2528 Hi = MIRBuilder.buildAShr(NVT, InH, 2529 MIRBuilder.buildConstant(AmtTy, NVTBits - 1)); 2530 } else if (Amt == NVTBits) { 2531 Lo = InH; 2532 Hi = MIRBuilder.buildAShr(NVT, InH, 2533 MIRBuilder.buildConstant(AmtTy, NVTBits - 1)); 2534 } else { 2535 auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt); 2536 2537 auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst); 2538 auto OrRHS = MIRBuilder.buildShl( 2539 NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits)); 2540 2541 Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS); 2542 Hi = MIRBuilder.buildAShr(NVT, InH, ShiftAmtConst); 2543 } 2544 } 2545 2546 MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {Lo.getReg(), Hi.getReg()}); 2547 MI.eraseFromParent(); 2548 2549 return Legalized; 2550 } 2551 2552 // TODO: Optimize if constant shift amount. 2553 LegalizerHelper::LegalizeResult 2554 LegalizerHelper::narrowScalarShift(MachineInstr &MI, unsigned TypeIdx, 2555 LLT RequestedTy) { 2556 if (TypeIdx == 1) { 2557 Observer.changingInstr(MI); 2558 narrowScalarSrc(MI, RequestedTy, 2); 2559 Observer.changedInstr(MI); 2560 return Legalized; 2561 } 2562 2563 Register DstReg = MI.getOperand(0).getReg(); 2564 LLT DstTy = MRI.getType(DstReg); 2565 if (DstTy.isVector()) 2566 return UnableToLegalize; 2567 2568 Register Amt = MI.getOperand(2).getReg(); 2569 LLT ShiftAmtTy = MRI.getType(Amt); 2570 const unsigned DstEltSize = DstTy.getScalarSizeInBits(); 2571 if (DstEltSize % 2 != 0) 2572 return UnableToLegalize; 2573 2574 // Ignore the input type. We can only go to exactly half the size of the 2575 // input. If that isn't small enough, the resulting pieces will be further 2576 // legalized. 2577 const unsigned NewBitSize = DstEltSize / 2; 2578 const LLT HalfTy = LLT::scalar(NewBitSize); 2579 const LLT CondTy = LLT::scalar(1); 2580 2581 if (const MachineInstr *KShiftAmt = 2582 getOpcodeDef(TargetOpcode::G_CONSTANT, Amt, MRI)) { 2583 return narrowScalarShiftByConstant( 2584 MI, KShiftAmt->getOperand(1).getCImm()->getValue(), HalfTy, ShiftAmtTy); 2585 } 2586 2587 // TODO: Expand with known bits. 2588 2589 // Handle the fully general expansion by an unknown amount. 2590 auto NewBits = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize); 2591 2592 Register InL = MRI.createGenericVirtualRegister(HalfTy); 2593 Register InH = MRI.createGenericVirtualRegister(HalfTy); 2594 MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg()); 2595 2596 auto AmtExcess = MIRBuilder.buildSub(ShiftAmtTy, Amt, NewBits); 2597 auto AmtLack = MIRBuilder.buildSub(ShiftAmtTy, NewBits, Amt); 2598 2599 auto Zero = MIRBuilder.buildConstant(ShiftAmtTy, 0); 2600 auto IsShort = MIRBuilder.buildICmp(ICmpInst::ICMP_ULT, CondTy, Amt, NewBits); 2601 auto IsZero = MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, CondTy, Amt, Zero); 2602 2603 Register ResultRegs[2]; 2604 switch (MI.getOpcode()) { 2605 case TargetOpcode::G_SHL: { 2606 // Short: ShAmt < NewBitSize 2607 auto LoS = MIRBuilder.buildShl(HalfTy, InH, Amt); 2608 2609 auto OrLHS = MIRBuilder.buildShl(HalfTy, InH, Amt); 2610 auto OrRHS = MIRBuilder.buildLShr(HalfTy, InL, AmtLack); 2611 auto HiS = MIRBuilder.buildOr(HalfTy, OrLHS, OrRHS); 2612 2613 // Long: ShAmt >= NewBitSize 2614 auto LoL = MIRBuilder.buildConstant(HalfTy, 0); // Lo part is zero. 2615 auto HiL = MIRBuilder.buildShl(HalfTy, InL, AmtExcess); // Hi from Lo part. 2616 2617 auto Lo = MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL); 2618 auto Hi = MIRBuilder.buildSelect( 2619 HalfTy, IsZero, InH, MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL)); 2620 2621 ResultRegs[0] = Lo.getReg(0); 2622 ResultRegs[1] = Hi.getReg(0); 2623 break; 2624 } 2625 case TargetOpcode::G_LSHR: { 2626 // Short: ShAmt < NewBitSize 2627 auto HiS = MIRBuilder.buildLShr(HalfTy, InH, Amt); 2628 2629 auto OrLHS = MIRBuilder.buildLShr(HalfTy, InL, Amt); 2630 auto OrRHS = MIRBuilder.buildShl(HalfTy, InH, AmtLack); 2631 auto LoS = MIRBuilder.buildOr(HalfTy, OrLHS, OrRHS); 2632 2633 // Long: ShAmt >= NewBitSize 2634 auto HiL = MIRBuilder.buildConstant(HalfTy, 0); // Hi part is zero. 2635 auto LoL = MIRBuilder.buildLShr(HalfTy, InH, AmtExcess); // Lo from Hi part. 2636 2637 auto Lo = MIRBuilder.buildSelect( 2638 HalfTy, IsZero, InL, MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL)); 2639 auto Hi = MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL); 2640 2641 ResultRegs[0] = Lo.getReg(0); 2642 ResultRegs[1] = Hi.getReg(0); 2643 break; 2644 } 2645 case TargetOpcode::G_ASHR: { 2646 // Short: ShAmt < NewBitSize 2647 auto HiS = MIRBuilder.buildAShr(HalfTy, InH, Amt); 2648 2649 auto OrLHS = MIRBuilder.buildLShr(HalfTy, InL, Amt); 2650 auto OrRHS = MIRBuilder.buildLShr(HalfTy, InH, AmtLack); 2651 auto LoS = MIRBuilder.buildOr(HalfTy, OrLHS, OrRHS); 2652 2653 // Long: ShAmt >= NewBitSize 2654 2655 // Sign of Hi part. 2656 auto HiL = MIRBuilder.buildAShr( 2657 HalfTy, InH, MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize - 1)); 2658 2659 auto LoL = MIRBuilder.buildAShr(HalfTy, InH, AmtExcess); // Lo from Hi part. 2660 2661 auto Lo = MIRBuilder.buildSelect( 2662 HalfTy, IsZero, InL, MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL)); 2663 2664 auto Hi = MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL); 2665 2666 ResultRegs[0] = Lo.getReg(0); 2667 ResultRegs[1] = Hi.getReg(0); 2668 break; 2669 } 2670 default: 2671 llvm_unreachable("not a shift"); 2672 } 2673 2674 MIRBuilder.buildMerge(DstReg, ResultRegs); 2675 MI.eraseFromParent(); 2676 return Legalized; 2677 } 2678 2679 LegalizerHelper::LegalizeResult 2680 LegalizerHelper::moreElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx, 2681 LLT MoreTy) { 2682 assert(TypeIdx == 0 && "Expecting only Idx 0"); 2683 2684 Observer.changingInstr(MI); 2685 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 2686 MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB(); 2687 MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); 2688 moreElementsVectorSrc(MI, MoreTy, I); 2689 } 2690 2691 MachineBasicBlock &MBB = *MI.getParent(); 2692 MIRBuilder.setInsertPt(MBB, --MBB.getFirstNonPHI()); 2693 moreElementsVectorDst(MI, MoreTy, 0); 2694 Observer.changedInstr(MI); 2695 return Legalized; 2696 } 2697 2698 LegalizerHelper::LegalizeResult 2699 LegalizerHelper::moreElementsVector(MachineInstr &MI, unsigned TypeIdx, 2700 LLT MoreTy) { 2701 MIRBuilder.setInstr(MI); 2702 unsigned Opc = MI.getOpcode(); 2703 switch (Opc) { 2704 case TargetOpcode::G_IMPLICIT_DEF: { 2705 Observer.changingInstr(MI); 2706 moreElementsVectorDst(MI, MoreTy, 0); 2707 Observer.changedInstr(MI); 2708 return Legalized; 2709 } 2710 case TargetOpcode::G_AND: 2711 case TargetOpcode::G_OR: 2712 case TargetOpcode::G_XOR: 2713 case TargetOpcode::G_SMIN: 2714 case TargetOpcode::G_SMAX: 2715 case TargetOpcode::G_UMIN: 2716 case TargetOpcode::G_UMAX: { 2717 Observer.changingInstr(MI); 2718 moreElementsVectorSrc(MI, MoreTy, 1); 2719 moreElementsVectorSrc(MI, MoreTy, 2); 2720 moreElementsVectorDst(MI, MoreTy, 0); 2721 Observer.changedInstr(MI); 2722 return Legalized; 2723 } 2724 case TargetOpcode::G_EXTRACT: 2725 if (TypeIdx != 1) 2726 return UnableToLegalize; 2727 Observer.changingInstr(MI); 2728 moreElementsVectorSrc(MI, MoreTy, 1); 2729 Observer.changedInstr(MI); 2730 return Legalized; 2731 case TargetOpcode::G_INSERT: 2732 if (TypeIdx != 0) 2733 return UnableToLegalize; 2734 Observer.changingInstr(MI); 2735 moreElementsVectorSrc(MI, MoreTy, 1); 2736 moreElementsVectorDst(MI, MoreTy, 0); 2737 Observer.changedInstr(MI); 2738 return Legalized; 2739 case TargetOpcode::G_SELECT: 2740 if (TypeIdx != 0) 2741 return UnableToLegalize; 2742 if (MRI.getType(MI.getOperand(1).getReg()).isVector()) 2743 return UnableToLegalize; 2744 2745 Observer.changingInstr(MI); 2746 moreElementsVectorSrc(MI, MoreTy, 2); 2747 moreElementsVectorSrc(MI, MoreTy, 3); 2748 moreElementsVectorDst(MI, MoreTy, 0); 2749 Observer.changedInstr(MI); 2750 return Legalized; 2751 case TargetOpcode::G_PHI: 2752 return moreElementsVectorPhi(MI, TypeIdx, MoreTy); 2753 default: 2754 return UnableToLegalize; 2755 } 2756 } 2757 2758 void LegalizerHelper::multiplyRegisters(SmallVectorImpl<Register> &DstRegs, 2759 ArrayRef<Register> Src1Regs, 2760 ArrayRef<Register> Src2Regs, 2761 LLT NarrowTy) { 2762 MachineIRBuilder &B = MIRBuilder; 2763 unsigned SrcParts = Src1Regs.size(); 2764 unsigned DstParts = DstRegs.size(); 2765 2766 unsigned DstIdx = 0; // Low bits of the result. 2767 Register FactorSum = 2768 B.buildMul(NarrowTy, Src1Regs[DstIdx], Src2Regs[DstIdx]).getReg(0); 2769 DstRegs[DstIdx] = FactorSum; 2770 2771 unsigned CarrySumPrevDstIdx; 2772 SmallVector<Register, 4> Factors; 2773 2774 for (DstIdx = 1; DstIdx < DstParts; DstIdx++) { 2775 // Collect low parts of muls for DstIdx. 2776 for (unsigned i = DstIdx + 1 < SrcParts ? 0 : DstIdx - SrcParts + 1; 2777 i <= std::min(DstIdx, SrcParts - 1); ++i) { 2778 MachineInstrBuilder Mul = 2779 B.buildMul(NarrowTy, Src1Regs[DstIdx - i], Src2Regs[i]); 2780 Factors.push_back(Mul.getReg(0)); 2781 } 2782 // Collect high parts of muls from previous DstIdx. 2783 for (unsigned i = DstIdx < SrcParts ? 0 : DstIdx - SrcParts; 2784 i <= std::min(DstIdx - 1, SrcParts - 1); ++i) { 2785 MachineInstrBuilder Umulh = 2786 B.buildUMulH(NarrowTy, Src1Regs[DstIdx - 1 - i], Src2Regs[i]); 2787 Factors.push_back(Umulh.getReg(0)); 2788 } 2789 // Add CarrySum from additons calculated for previous DstIdx. 2790 if (DstIdx != 1) { 2791 Factors.push_back(CarrySumPrevDstIdx); 2792 } 2793 2794 Register CarrySum; 2795 // Add all factors and accumulate all carries into CarrySum. 2796 if (DstIdx != DstParts - 1) { 2797 MachineInstrBuilder Uaddo = 2798 B.buildUAddo(NarrowTy, LLT::scalar(1), Factors[0], Factors[1]); 2799 FactorSum = Uaddo.getReg(0); 2800 CarrySum = B.buildZExt(NarrowTy, Uaddo.getReg(1)).getReg(0); 2801 for (unsigned i = 2; i < Factors.size(); ++i) { 2802 MachineInstrBuilder Uaddo = 2803 B.buildUAddo(NarrowTy, LLT::scalar(1), FactorSum, Factors[i]); 2804 FactorSum = Uaddo.getReg(0); 2805 MachineInstrBuilder Carry = B.buildZExt(NarrowTy, Uaddo.getReg(1)); 2806 CarrySum = B.buildAdd(NarrowTy, CarrySum, Carry).getReg(0); 2807 } 2808 } else { 2809 // Since value for the next index is not calculated, neither is CarrySum. 2810 FactorSum = B.buildAdd(NarrowTy, Factors[0], Factors[1]).getReg(0); 2811 for (unsigned i = 2; i < Factors.size(); ++i) 2812 FactorSum = B.buildAdd(NarrowTy, FactorSum, Factors[i]).getReg(0); 2813 } 2814 2815 CarrySumPrevDstIdx = CarrySum; 2816 DstRegs[DstIdx] = FactorSum; 2817 Factors.clear(); 2818 } 2819 } 2820 2821 LegalizerHelper::LegalizeResult 2822 LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) { 2823 Register DstReg = MI.getOperand(0).getReg(); 2824 Register Src1 = MI.getOperand(1).getReg(); 2825 Register Src2 = MI.getOperand(2).getReg(); 2826 2827 LLT Ty = MRI.getType(DstReg); 2828 if (Ty.isVector()) 2829 return UnableToLegalize; 2830 2831 unsigned SrcSize = MRI.getType(Src1).getSizeInBits(); 2832 unsigned DstSize = Ty.getSizeInBits(); 2833 unsigned NarrowSize = NarrowTy.getSizeInBits(); 2834 if (DstSize % NarrowSize != 0 || SrcSize % NarrowSize != 0) 2835 return UnableToLegalize; 2836 2837 unsigned NumDstParts = DstSize / NarrowSize; 2838 unsigned NumSrcParts = SrcSize / NarrowSize; 2839 bool IsMulHigh = MI.getOpcode() == TargetOpcode::G_UMULH; 2840 unsigned DstTmpParts = NumDstParts * (IsMulHigh ? 2 : 1); 2841 2842 SmallVector<Register, 2> Src1Parts, Src2Parts, DstTmpRegs; 2843 extractParts(Src1, NarrowTy, NumSrcParts, Src1Parts); 2844 extractParts(Src2, NarrowTy, NumSrcParts, Src2Parts); 2845 DstTmpRegs.resize(DstTmpParts); 2846 multiplyRegisters(DstTmpRegs, Src1Parts, Src2Parts, NarrowTy); 2847 2848 // Take only high half of registers if this is high mul. 2849 ArrayRef<Register> DstRegs( 2850 IsMulHigh ? &DstTmpRegs[DstTmpParts / 2] : &DstTmpRegs[0], NumDstParts); 2851 MIRBuilder.buildMerge(DstReg, DstRegs); 2852 MI.eraseFromParent(); 2853 return Legalized; 2854 } 2855 2856 LegalizerHelper::LegalizeResult 2857 LegalizerHelper::narrowScalarExtract(MachineInstr &MI, unsigned TypeIdx, 2858 LLT NarrowTy) { 2859 if (TypeIdx != 1) 2860 return UnableToLegalize; 2861 2862 uint64_t NarrowSize = NarrowTy.getSizeInBits(); 2863 2864 int64_t SizeOp1 = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits(); 2865 // FIXME: add support for when SizeOp1 isn't an exact multiple of 2866 // NarrowSize. 2867 if (SizeOp1 % NarrowSize != 0) 2868 return UnableToLegalize; 2869 int NumParts = SizeOp1 / NarrowSize; 2870 2871 SmallVector<Register, 2> SrcRegs, DstRegs; 2872 SmallVector<uint64_t, 2> Indexes; 2873 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs); 2874 2875 Register OpReg = MI.getOperand(0).getReg(); 2876 uint64_t OpStart = MI.getOperand(2).getImm(); 2877 uint64_t OpSize = MRI.getType(OpReg).getSizeInBits(); 2878 for (int i = 0; i < NumParts; ++i) { 2879 unsigned SrcStart = i * NarrowSize; 2880 2881 if (SrcStart + NarrowSize <= OpStart || SrcStart >= OpStart + OpSize) { 2882 // No part of the extract uses this subregister, ignore it. 2883 continue; 2884 } else if (SrcStart == OpStart && NarrowTy == MRI.getType(OpReg)) { 2885 // The entire subregister is extracted, forward the value. 2886 DstRegs.push_back(SrcRegs[i]); 2887 continue; 2888 } 2889 2890 // OpSegStart is where this destination segment would start in OpReg if it 2891 // extended infinitely in both directions. 2892 int64_t ExtractOffset; 2893 uint64_t SegSize; 2894 if (OpStart < SrcStart) { 2895 ExtractOffset = 0; 2896 SegSize = std::min(NarrowSize, OpStart + OpSize - SrcStart); 2897 } else { 2898 ExtractOffset = OpStart - SrcStart; 2899 SegSize = std::min(SrcStart + NarrowSize - OpStart, OpSize); 2900 } 2901 2902 Register SegReg = SrcRegs[i]; 2903 if (ExtractOffset != 0 || SegSize != NarrowSize) { 2904 // A genuine extract is needed. 2905 SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize)); 2906 MIRBuilder.buildExtract(SegReg, SrcRegs[i], ExtractOffset); 2907 } 2908 2909 DstRegs.push_back(SegReg); 2910 } 2911 2912 Register DstReg = MI.getOperand(0).getReg(); 2913 if(MRI.getType(DstReg).isVector()) 2914 MIRBuilder.buildBuildVector(DstReg, DstRegs); 2915 else 2916 MIRBuilder.buildMerge(DstReg, DstRegs); 2917 MI.eraseFromParent(); 2918 return Legalized; 2919 } 2920 2921 LegalizerHelper::LegalizeResult 2922 LegalizerHelper::narrowScalarInsert(MachineInstr &MI, unsigned TypeIdx, 2923 LLT NarrowTy) { 2924 // FIXME: Don't know how to handle secondary types yet. 2925 if (TypeIdx != 0) 2926 return UnableToLegalize; 2927 2928 uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); 2929 uint64_t NarrowSize = NarrowTy.getSizeInBits(); 2930 2931 // FIXME: add support for when SizeOp0 isn't an exact multiple of 2932 // NarrowSize. 2933 if (SizeOp0 % NarrowSize != 0) 2934 return UnableToLegalize; 2935 2936 int NumParts = SizeOp0 / NarrowSize; 2937 2938 SmallVector<Register, 2> SrcRegs, DstRegs; 2939 SmallVector<uint64_t, 2> Indexes; 2940 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs); 2941 2942 Register OpReg = MI.getOperand(2).getReg(); 2943 uint64_t OpStart = MI.getOperand(3).getImm(); 2944 uint64_t OpSize = MRI.getType(OpReg).getSizeInBits(); 2945 for (int i = 0; i < NumParts; ++i) { 2946 unsigned DstStart = i * NarrowSize; 2947 2948 if (DstStart + NarrowSize <= OpStart || DstStart >= OpStart + OpSize) { 2949 // No part of the insert affects this subregister, forward the original. 2950 DstRegs.push_back(SrcRegs[i]); 2951 continue; 2952 } else if (DstStart == OpStart && NarrowTy == MRI.getType(OpReg)) { 2953 // The entire subregister is defined by this insert, forward the new 2954 // value. 2955 DstRegs.push_back(OpReg); 2956 continue; 2957 } 2958 2959 // OpSegStart is where this destination segment would start in OpReg if it 2960 // extended infinitely in both directions. 2961 int64_t ExtractOffset, InsertOffset; 2962 uint64_t SegSize; 2963 if (OpStart < DstStart) { 2964 InsertOffset = 0; 2965 ExtractOffset = DstStart - OpStart; 2966 SegSize = std::min(NarrowSize, OpStart + OpSize - DstStart); 2967 } else { 2968 InsertOffset = OpStart - DstStart; 2969 ExtractOffset = 0; 2970 SegSize = 2971 std::min(NarrowSize - InsertOffset, OpStart + OpSize - DstStart); 2972 } 2973 2974 Register SegReg = OpReg; 2975 if (ExtractOffset != 0 || SegSize != OpSize) { 2976 // A genuine extract is needed. 2977 SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize)); 2978 MIRBuilder.buildExtract(SegReg, OpReg, ExtractOffset); 2979 } 2980 2981 Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); 2982 MIRBuilder.buildInsert(DstReg, SrcRegs[i], SegReg, InsertOffset); 2983 DstRegs.push_back(DstReg); 2984 } 2985 2986 assert(DstRegs.size() == (unsigned)NumParts && "not all parts covered"); 2987 Register DstReg = MI.getOperand(0).getReg(); 2988 if(MRI.getType(DstReg).isVector()) 2989 MIRBuilder.buildBuildVector(DstReg, DstRegs); 2990 else 2991 MIRBuilder.buildMerge(DstReg, DstRegs); 2992 MI.eraseFromParent(); 2993 return Legalized; 2994 } 2995 2996 LegalizerHelper::LegalizeResult 2997 LegalizerHelper::narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx, 2998 LLT NarrowTy) { 2999 Register DstReg = MI.getOperand(0).getReg(); 3000 LLT DstTy = MRI.getType(DstReg); 3001 3002 assert(MI.getNumOperands() == 3 && TypeIdx == 0); 3003 3004 SmallVector<Register, 4> DstRegs, DstLeftoverRegs; 3005 SmallVector<Register, 4> Src0Regs, Src0LeftoverRegs; 3006 SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs; 3007 LLT LeftoverTy; 3008 if (!extractParts(MI.getOperand(1).getReg(), DstTy, NarrowTy, LeftoverTy, 3009 Src0Regs, Src0LeftoverRegs)) 3010 return UnableToLegalize; 3011 3012 LLT Unused; 3013 if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, Unused, 3014 Src1Regs, Src1LeftoverRegs)) 3015 llvm_unreachable("inconsistent extractParts result"); 3016 3017 for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) { 3018 auto Inst = MIRBuilder.buildInstr(MI.getOpcode(), {NarrowTy}, 3019 {Src0Regs[I], Src1Regs[I]}); 3020 DstRegs.push_back(Inst->getOperand(0).getReg()); 3021 } 3022 3023 for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) { 3024 auto Inst = MIRBuilder.buildInstr( 3025 MI.getOpcode(), 3026 {LeftoverTy}, {Src0LeftoverRegs[I], Src1LeftoverRegs[I]}); 3027 DstLeftoverRegs.push_back(Inst->getOperand(0).getReg()); 3028 } 3029 3030 insertParts(DstReg, DstTy, NarrowTy, DstRegs, 3031 LeftoverTy, DstLeftoverRegs); 3032 3033 MI.eraseFromParent(); 3034 return Legalized; 3035 } 3036 3037 LegalizerHelper::LegalizeResult 3038 LegalizerHelper::narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx, 3039 LLT NarrowTy) { 3040 if (TypeIdx != 0) 3041 return UnableToLegalize; 3042 3043 Register CondReg = MI.getOperand(1).getReg(); 3044 LLT CondTy = MRI.getType(CondReg); 3045 if (CondTy.isVector()) // TODO: Handle vselect 3046 return UnableToLegalize; 3047 3048 Register DstReg = MI.getOperand(0).getReg(); 3049 LLT DstTy = MRI.getType(DstReg); 3050 3051 SmallVector<Register, 4> DstRegs, DstLeftoverRegs; 3052 SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs; 3053 SmallVector<Register, 4> Src2Regs, Src2LeftoverRegs; 3054 LLT LeftoverTy; 3055 if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, LeftoverTy, 3056 Src1Regs, Src1LeftoverRegs)) 3057 return UnableToLegalize; 3058 3059 LLT Unused; 3060 if (!extractParts(MI.getOperand(3).getReg(), DstTy, NarrowTy, Unused, 3061 Src2Regs, Src2LeftoverRegs)) 3062 llvm_unreachable("inconsistent extractParts result"); 3063 3064 for (unsigned I = 0, E = Src1Regs.size(); I != E; ++I) { 3065 auto Select = MIRBuilder.buildSelect(NarrowTy, 3066 CondReg, Src1Regs[I], Src2Regs[I]); 3067 DstRegs.push_back(Select->getOperand(0).getReg()); 3068 } 3069 3070 for (unsigned I = 0, E = Src1LeftoverRegs.size(); I != E; ++I) { 3071 auto Select = MIRBuilder.buildSelect( 3072 LeftoverTy, CondReg, Src1LeftoverRegs[I], Src2LeftoverRegs[I]); 3073 DstLeftoverRegs.push_back(Select->getOperand(0).getReg()); 3074 } 3075 3076 insertParts(DstReg, DstTy, NarrowTy, DstRegs, 3077 LeftoverTy, DstLeftoverRegs); 3078 3079 MI.eraseFromParent(); 3080 return Legalized; 3081 } 3082 3083 LegalizerHelper::LegalizeResult 3084 LegalizerHelper::lowerBitCount(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { 3085 unsigned Opc = MI.getOpcode(); 3086 auto &TII = *MI.getMF()->getSubtarget().getInstrInfo(); 3087 auto isSupported = [this](const LegalityQuery &Q) { 3088 auto QAction = LI.getAction(Q).Action; 3089 return QAction == Legal || QAction == Libcall || QAction == Custom; 3090 }; 3091 switch (Opc) { 3092 default: 3093 return UnableToLegalize; 3094 case TargetOpcode::G_CTLZ_ZERO_UNDEF: { 3095 // This trivially expands to CTLZ. 3096 Observer.changingInstr(MI); 3097 MI.setDesc(TII.get(TargetOpcode::G_CTLZ)); 3098 Observer.changedInstr(MI); 3099 return Legalized; 3100 } 3101 case TargetOpcode::G_CTLZ: { 3102 Register SrcReg = MI.getOperand(1).getReg(); 3103 unsigned Len = Ty.getSizeInBits(); 3104 if (isSupported({TargetOpcode::G_CTLZ_ZERO_UNDEF, {Ty, Ty}})) { 3105 // If CTLZ_ZERO_UNDEF is supported, emit that and a select for zero. 3106 auto MIBCtlzZU = MIRBuilder.buildInstr(TargetOpcode::G_CTLZ_ZERO_UNDEF, 3107 {Ty}, {SrcReg}); 3108 auto MIBZero = MIRBuilder.buildConstant(Ty, 0); 3109 auto MIBLen = MIRBuilder.buildConstant(Ty, Len); 3110 auto MIBICmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1), 3111 SrcReg, MIBZero); 3112 MIRBuilder.buildSelect(MI.getOperand(0).getReg(), MIBICmp, MIBLen, 3113 MIBCtlzZU); 3114 MI.eraseFromParent(); 3115 return Legalized; 3116 } 3117 // for now, we do this: 3118 // NewLen = NextPowerOf2(Len); 3119 // x = x | (x >> 1); 3120 // x = x | (x >> 2); 3121 // ... 3122 // x = x | (x >>16); 3123 // x = x | (x >>32); // for 64-bit input 3124 // Upto NewLen/2 3125 // return Len - popcount(x); 3126 // 3127 // Ref: "Hacker's Delight" by Henry Warren 3128 Register Op = SrcReg; 3129 unsigned NewLen = PowerOf2Ceil(Len); 3130 for (unsigned i = 0; (1U << i) <= (NewLen / 2); ++i) { 3131 auto MIBShiftAmt = MIRBuilder.buildConstant(Ty, 1ULL << i); 3132 auto MIBOp = MIRBuilder.buildInstr( 3133 TargetOpcode::G_OR, {Ty}, 3134 {Op, MIRBuilder.buildInstr(TargetOpcode::G_LSHR, {Ty}, 3135 {Op, MIBShiftAmt})}); 3136 Op = MIBOp->getOperand(0).getReg(); 3137 } 3138 auto MIBPop = MIRBuilder.buildInstr(TargetOpcode::G_CTPOP, {Ty}, {Op}); 3139 MIRBuilder.buildInstr(TargetOpcode::G_SUB, {MI.getOperand(0).getReg()}, 3140 {MIRBuilder.buildConstant(Ty, Len), MIBPop}); 3141 MI.eraseFromParent(); 3142 return Legalized; 3143 } 3144 case TargetOpcode::G_CTTZ_ZERO_UNDEF: { 3145 // This trivially expands to CTTZ. 3146 Observer.changingInstr(MI); 3147 MI.setDesc(TII.get(TargetOpcode::G_CTTZ)); 3148 Observer.changedInstr(MI); 3149 return Legalized; 3150 } 3151 case TargetOpcode::G_CTTZ: { 3152 Register SrcReg = MI.getOperand(1).getReg(); 3153 unsigned Len = Ty.getSizeInBits(); 3154 if (isSupported({TargetOpcode::G_CTTZ_ZERO_UNDEF, {Ty, Ty}})) { 3155 // If CTTZ_ZERO_UNDEF is legal or custom, emit that and a select with 3156 // zero. 3157 auto MIBCttzZU = MIRBuilder.buildInstr(TargetOpcode::G_CTTZ_ZERO_UNDEF, 3158 {Ty}, {SrcReg}); 3159 auto MIBZero = MIRBuilder.buildConstant(Ty, 0); 3160 auto MIBLen = MIRBuilder.buildConstant(Ty, Len); 3161 auto MIBICmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, LLT::scalar(1), 3162 SrcReg, MIBZero); 3163 MIRBuilder.buildSelect(MI.getOperand(0).getReg(), MIBICmp, MIBLen, 3164 MIBCttzZU); 3165 MI.eraseFromParent(); 3166 return Legalized; 3167 } 3168 // for now, we use: { return popcount(~x & (x - 1)); } 3169 // unless the target has ctlz but not ctpop, in which case we use: 3170 // { return 32 - nlz(~x & (x-1)); } 3171 // Ref: "Hacker's Delight" by Henry Warren 3172 auto MIBCstNeg1 = MIRBuilder.buildConstant(Ty, -1); 3173 auto MIBNot = 3174 MIRBuilder.buildInstr(TargetOpcode::G_XOR, {Ty}, {SrcReg, MIBCstNeg1}); 3175 auto MIBTmp = MIRBuilder.buildInstr( 3176 TargetOpcode::G_AND, {Ty}, 3177 {MIBNot, MIRBuilder.buildInstr(TargetOpcode::G_ADD, {Ty}, 3178 {SrcReg, MIBCstNeg1})}); 3179 if (!isSupported({TargetOpcode::G_CTPOP, {Ty, Ty}}) && 3180 isSupported({TargetOpcode::G_CTLZ, {Ty, Ty}})) { 3181 auto MIBCstLen = MIRBuilder.buildConstant(Ty, Len); 3182 MIRBuilder.buildInstr( 3183 TargetOpcode::G_SUB, {MI.getOperand(0).getReg()}, 3184 {MIBCstLen, 3185 MIRBuilder.buildInstr(TargetOpcode::G_CTLZ, {Ty}, {MIBTmp})}); 3186 MI.eraseFromParent(); 3187 return Legalized; 3188 } 3189 MI.setDesc(TII.get(TargetOpcode::G_CTPOP)); 3190 MI.getOperand(1).setReg(MIBTmp->getOperand(0).getReg()); 3191 return Legalized; 3192 } 3193 } 3194 } 3195 3196 // Expand s32 = G_UITOFP s64 using bit operations to an IEEE float 3197 // representation. 3198 LegalizerHelper::LegalizeResult 3199 LegalizerHelper::lowerU64ToF32BitOps(MachineInstr &MI) { 3200 Register Dst = MI.getOperand(0).getReg(); 3201 Register Src = MI.getOperand(1).getReg(); 3202 const LLT S64 = LLT::scalar(64); 3203 const LLT S32 = LLT::scalar(32); 3204 const LLT S1 = LLT::scalar(1); 3205 3206 assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S32); 3207 3208 // unsigned cul2f(ulong u) { 3209 // uint lz = clz(u); 3210 // uint e = (u != 0) ? 127U + 63U - lz : 0; 3211 // u = (u << lz) & 0x7fffffffffffffffUL; 3212 // ulong t = u & 0xffffffffffUL; 3213 // uint v = (e << 23) | (uint)(u >> 40); 3214 // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U); 3215 // return as_float(v + r); 3216 // } 3217 3218 auto Zero32 = MIRBuilder.buildConstant(S32, 0); 3219 auto Zero64 = MIRBuilder.buildConstant(S64, 0); 3220 3221 auto LZ = MIRBuilder.buildCTLZ_ZERO_UNDEF(S32, Src); 3222 3223 auto K = MIRBuilder.buildConstant(S32, 127U + 63U); 3224 auto Sub = MIRBuilder.buildSub(S32, K, LZ); 3225 3226 auto NotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, Src, Zero64); 3227 auto E = MIRBuilder.buildSelect(S32, NotZero, Sub, Zero32); 3228 3229 auto Mask0 = MIRBuilder.buildConstant(S64, (-1ULL) >> 1); 3230 auto ShlLZ = MIRBuilder.buildShl(S64, Src, LZ); 3231 3232 auto U = MIRBuilder.buildAnd(S64, ShlLZ, Mask0); 3233 3234 auto Mask1 = MIRBuilder.buildConstant(S64, 0xffffffffffULL); 3235 auto T = MIRBuilder.buildAnd(S64, U, Mask1); 3236 3237 auto UShl = MIRBuilder.buildLShr(S64, U, MIRBuilder.buildConstant(S64, 40)); 3238 auto ShlE = MIRBuilder.buildShl(S32, E, MIRBuilder.buildConstant(S32, 23)); 3239 auto V = MIRBuilder.buildOr(S32, ShlE, MIRBuilder.buildTrunc(S32, UShl)); 3240 3241 auto C = MIRBuilder.buildConstant(S64, 0x8000000000ULL); 3242 auto RCmp = MIRBuilder.buildICmp(CmpInst::ICMP_UGT, S1, T, C); 3243 auto TCmp = MIRBuilder.buildICmp(CmpInst::ICMP_EQ, S1, T, C); 3244 auto One = MIRBuilder.buildConstant(S32, 1); 3245 3246 auto VTrunc1 = MIRBuilder.buildAnd(S32, V, One); 3247 auto Select0 = MIRBuilder.buildSelect(S32, TCmp, VTrunc1, Zero32); 3248 auto R = MIRBuilder.buildSelect(S32, RCmp, One, Select0); 3249 MIRBuilder.buildAdd(Dst, V, R); 3250 3251 return Legalized; 3252 } 3253 3254 LegalizerHelper::LegalizeResult 3255 LegalizerHelper::lowerUITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { 3256 Register Dst = MI.getOperand(0).getReg(); 3257 Register Src = MI.getOperand(1).getReg(); 3258 LLT DstTy = MRI.getType(Dst); 3259 LLT SrcTy = MRI.getType(Src); 3260 3261 if (SrcTy != LLT::scalar(64)) 3262 return UnableToLegalize; 3263 3264 if (DstTy == LLT::scalar(32)) { 3265 // TODO: SelectionDAG has several alternative expansions to port which may 3266 // be more reasonble depending on the available instructions. If a target 3267 // has sitofp, does not have CTLZ, or can efficiently use f64 as an 3268 // intermediate type, this is probably worse. 3269 return lowerU64ToF32BitOps(MI); 3270 } 3271 3272 return UnableToLegalize; 3273 } 3274 3275 LegalizerHelper::LegalizeResult 3276 LegalizerHelper::lowerSITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { 3277 Register Dst = MI.getOperand(0).getReg(); 3278 Register Src = MI.getOperand(1).getReg(); 3279 LLT DstTy = MRI.getType(Dst); 3280 LLT SrcTy = MRI.getType(Src); 3281 3282 const LLT S64 = LLT::scalar(64); 3283 const LLT S32 = LLT::scalar(32); 3284 const LLT S1 = LLT::scalar(1); 3285 3286 if (SrcTy != S64) 3287 return UnableToLegalize; 3288 3289 if (DstTy == S32) { 3290 // signed cl2f(long l) { 3291 // long s = l >> 63; 3292 // float r = cul2f((l + s) ^ s); 3293 // return s ? -r : r; 3294 // } 3295 Register L = Src; 3296 auto SignBit = MIRBuilder.buildConstant(S64, 63); 3297 auto S = MIRBuilder.buildAShr(S64, L, SignBit); 3298 3299 auto LPlusS = MIRBuilder.buildAdd(S64, L, S); 3300 auto Xor = MIRBuilder.buildXor(S64, LPlusS, S); 3301 auto R = MIRBuilder.buildUITOFP(S32, Xor); 3302 3303 auto RNeg = MIRBuilder.buildFNeg(S32, R); 3304 auto SignNotZero = MIRBuilder.buildICmp(CmpInst::ICMP_NE, S1, S, 3305 MIRBuilder.buildConstant(S64, 0)); 3306 MIRBuilder.buildSelect(Dst, SignNotZero, RNeg, R); 3307 return Legalized; 3308 } 3309 3310 return UnableToLegalize; 3311 } 3312 3313 static CmpInst::Predicate minMaxToCompare(unsigned Opc) { 3314 switch (Opc) { 3315 case TargetOpcode::G_SMIN: 3316 return CmpInst::ICMP_SLT; 3317 case TargetOpcode::G_SMAX: 3318 return CmpInst::ICMP_SGT; 3319 case TargetOpcode::G_UMIN: 3320 return CmpInst::ICMP_ULT; 3321 case TargetOpcode::G_UMAX: 3322 return CmpInst::ICMP_UGT; 3323 default: 3324 llvm_unreachable("not in integer min/max"); 3325 } 3326 } 3327 3328 LegalizerHelper::LegalizeResult 3329 LegalizerHelper::lowerMinMax(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { 3330 Register Dst = MI.getOperand(0).getReg(); 3331 Register Src0 = MI.getOperand(1).getReg(); 3332 Register Src1 = MI.getOperand(2).getReg(); 3333 3334 const CmpInst::Predicate Pred = minMaxToCompare(MI.getOpcode()); 3335 LLT CmpType = MRI.getType(Dst).changeElementSize(1); 3336 3337 auto Cmp = MIRBuilder.buildICmp(Pred, CmpType, Src0, Src1); 3338 MIRBuilder.buildSelect(Dst, Cmp, Src0, Src1); 3339 3340 MI.eraseFromParent(); 3341 return Legalized; 3342 } 3343 3344 LegalizerHelper::LegalizeResult 3345 LegalizerHelper::lowerFCopySign(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { 3346 Register Dst = MI.getOperand(0).getReg(); 3347 Register Src0 = MI.getOperand(1).getReg(); 3348 Register Src1 = MI.getOperand(2).getReg(); 3349 3350 const LLT Src0Ty = MRI.getType(Src0); 3351 const LLT Src1Ty = MRI.getType(Src1); 3352 3353 const int Src0Size = Src0Ty.getScalarSizeInBits(); 3354 const int Src1Size = Src1Ty.getScalarSizeInBits(); 3355 3356 auto SignBitMask = MIRBuilder.buildConstant( 3357 Src0Ty, APInt::getSignMask(Src0Size)); 3358 3359 auto NotSignBitMask = MIRBuilder.buildConstant( 3360 Src0Ty, APInt::getLowBitsSet(Src0Size, Src0Size - 1)); 3361 3362 auto And0 = MIRBuilder.buildAnd(Src0Ty, Src0, NotSignBitMask); 3363 MachineInstr *Or; 3364 3365 if (Src0Ty == Src1Ty) { 3366 auto And1 = MIRBuilder.buildAnd(Src1Ty, Src0, SignBitMask); 3367 Or = MIRBuilder.buildOr(Dst, And0, And1); 3368 } else if (Src0Size > Src1Size) { 3369 auto ShiftAmt = MIRBuilder.buildConstant(Src0Ty, Src0Size - Src1Size); 3370 auto Zext = MIRBuilder.buildZExt(Src0Ty, Src1); 3371 auto Shift = MIRBuilder.buildShl(Src0Ty, Zext, ShiftAmt); 3372 auto And1 = MIRBuilder.buildAnd(Src0Ty, Shift, SignBitMask); 3373 Or = MIRBuilder.buildOr(Dst, And0, And1); 3374 } else { 3375 auto ShiftAmt = MIRBuilder.buildConstant(Src1Ty, Src1Size - Src0Size); 3376 auto Shift = MIRBuilder.buildLShr(Src1Ty, Src1, ShiftAmt); 3377 auto Trunc = MIRBuilder.buildTrunc(Src0Ty, Shift); 3378 auto And1 = MIRBuilder.buildAnd(Src0Ty, Trunc, SignBitMask); 3379 Or = MIRBuilder.buildOr(Dst, And0, And1); 3380 } 3381 3382 // Be careful about setting nsz/nnan/ninf on every instruction, since the 3383 // constants are a nan and -0.0, but the final result should preserve 3384 // everything. 3385 if (unsigned Flags = MI.getFlags()) 3386 Or->setFlags(Flags); 3387 3388 MI.eraseFromParent(); 3389 return Legalized; 3390 } 3391 3392 LegalizerHelper::LegalizeResult 3393 LegalizerHelper::lowerFMinNumMaxNum(MachineInstr &MI) { 3394 unsigned NewOp = MI.getOpcode() == TargetOpcode::G_FMINNUM ? 3395 TargetOpcode::G_FMINNUM_IEEE : TargetOpcode::G_FMAXNUM_IEEE; 3396 3397 Register Dst = MI.getOperand(0).getReg(); 3398 Register Src0 = MI.getOperand(1).getReg(); 3399 Register Src1 = MI.getOperand(2).getReg(); 3400 LLT Ty = MRI.getType(Dst); 3401 3402 if (!MI.getFlag(MachineInstr::FmNoNans)) { 3403 // Insert canonicalizes if it's possible we need to quiet to get correct 3404 // sNaN behavior. 3405 3406 // Note this must be done here, and not as an optimization combine in the 3407 // absence of a dedicate quiet-snan instruction as we're using an 3408 // omni-purpose G_FCANONICALIZE. 3409 if (!isKnownNeverSNaN(Src0, MRI)) 3410 Src0 = MIRBuilder.buildFCanonicalize(Ty, Src0, MI.getFlags()).getReg(0); 3411 3412 if (!isKnownNeverSNaN(Src1, MRI)) 3413 Src1 = MIRBuilder.buildFCanonicalize(Ty, Src1, MI.getFlags()).getReg(0); 3414 } 3415 3416 // If there are no nans, it's safe to simply replace this with the non-IEEE 3417 // version. 3418 MIRBuilder.buildInstr(NewOp, {Dst}, {Src0, Src1}, MI.getFlags()); 3419 MI.eraseFromParent(); 3420 return Legalized; 3421 } 3422