1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the FastISel class. 10 // 11 // "Fast" instruction selection is designed to emit very poor code quickly. 12 // Also, it is not designed to be able to do much lowering, so most illegal 13 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is 14 // also not intended to be able to do much optimization, except in a few cases 15 // where doing optimizations reduces overall compile time. For example, folding 16 // constants into immediate fields is often done, because it's cheap and it 17 // reduces the number of instructions later phases have to examine. 18 // 19 // "Fast" instruction selection is able to fail gracefully and transfer 20 // control to the SelectionDAG selector for operations that it doesn't 21 // support. In many cases, this allows us to avoid duplicating a lot of 22 // the complicated lowering logic that SelectionDAG currently has. 23 // 24 // The intended use for "fast" instruction selection is "-O0" mode 25 // compilation, where the quality of the generated code is irrelevant when 26 // weighed against the speed at which the code can be generated. Also, 27 // at -O0, the LLVM optimizers are not running, and this makes the 28 // compile time of codegen a much higher portion of the overall compile 29 // time. Despite its limitations, "fast" instruction selection is able to 30 // handle enough code on its own to provide noticeable overall speedups 31 // in -O0 compiles. 32 // 33 // Basic operations are supported in a target-independent way, by reading 34 // the same instruction descriptions that the SelectionDAG selector reads, 35 // and identifying simple arithmetic operations that can be directly selected 36 // from simple operators. More complicated operations currently require 37 // target-specific code. 38 // 39 //===----------------------------------------------------------------------===// 40 41 #include "llvm/CodeGen/FastISel.h" 42 #include "llvm/ADT/APFloat.h" 43 #include "llvm/ADT/APSInt.h" 44 #include "llvm/ADT/DenseMap.h" 45 #include "llvm/ADT/Optional.h" 46 #include "llvm/ADT/SmallPtrSet.h" 47 #include "llvm/ADT/SmallString.h" 48 #include "llvm/ADT/SmallVector.h" 49 #include "llvm/ADT/Statistic.h" 50 #include "llvm/Analysis/BranchProbabilityInfo.h" 51 #include "llvm/Analysis/TargetLibraryInfo.h" 52 #include "llvm/CodeGen/Analysis.h" 53 #include "llvm/CodeGen/FunctionLoweringInfo.h" 54 #include "llvm/CodeGen/ISDOpcodes.h" 55 #include "llvm/CodeGen/MachineBasicBlock.h" 56 #include "llvm/CodeGen/MachineFrameInfo.h" 57 #include "llvm/CodeGen/MachineInstr.h" 58 #include "llvm/CodeGen/MachineInstrBuilder.h" 59 #include "llvm/CodeGen/MachineMemOperand.h" 60 #include "llvm/CodeGen/MachineModuleInfo.h" 61 #include "llvm/CodeGen/MachineOperand.h" 62 #include "llvm/CodeGen/MachineRegisterInfo.h" 63 #include "llvm/CodeGen/StackMaps.h" 64 #include "llvm/CodeGen/TargetInstrInfo.h" 65 #include "llvm/CodeGen/TargetLowering.h" 66 #include "llvm/CodeGen/TargetSubtargetInfo.h" 67 #include "llvm/CodeGen/ValueTypes.h" 68 #include "llvm/IR/Argument.h" 69 #include "llvm/IR/Attributes.h" 70 #include "llvm/IR/BasicBlock.h" 71 #include "llvm/IR/CallingConv.h" 72 #include "llvm/IR/Constant.h" 73 #include "llvm/IR/Constants.h" 74 #include "llvm/IR/DataLayout.h" 75 #include "llvm/IR/DebugLoc.h" 76 #include "llvm/IR/DerivedTypes.h" 77 #include "llvm/IR/DiagnosticInfo.h" 78 #include "llvm/IR/Function.h" 79 #include "llvm/IR/GetElementPtrTypeIterator.h" 80 #include "llvm/IR/GlobalValue.h" 81 #include "llvm/IR/InlineAsm.h" 82 #include "llvm/IR/InstrTypes.h" 83 #include "llvm/IR/Instruction.h" 84 #include "llvm/IR/Instructions.h" 85 #include "llvm/IR/IntrinsicInst.h" 86 #include "llvm/IR/LLVMContext.h" 87 #include "llvm/IR/Mangler.h" 88 #include "llvm/IR/Metadata.h" 89 #include "llvm/IR/Operator.h" 90 #include "llvm/IR/PatternMatch.h" 91 #include "llvm/IR/Type.h" 92 #include "llvm/IR/User.h" 93 #include "llvm/IR/Value.h" 94 #include "llvm/MC/MCContext.h" 95 #include "llvm/MC/MCInstrDesc.h" 96 #include "llvm/Support/Casting.h" 97 #include "llvm/Support/Debug.h" 98 #include "llvm/Support/ErrorHandling.h" 99 #include "llvm/Support/MachineValueType.h" 100 #include "llvm/Support/MathExtras.h" 101 #include "llvm/Support/raw_ostream.h" 102 #include "llvm/Target/TargetMachine.h" 103 #include "llvm/Target/TargetOptions.h" 104 #include <algorithm> 105 #include <cassert> 106 #include <cstdint> 107 #include <iterator> 108 #include <utility> 109 110 using namespace llvm; 111 using namespace PatternMatch; 112 113 #define DEBUG_TYPE "isel" 114 115 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by " 116 "target-independent selector"); 117 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by " 118 "target-specific selector"); 119 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure"); 120 121 /// Set the current block to which generated machine instructions will be 122 /// appended. 123 void FastISel::startNewBlock() { 124 assert(LocalValueMap.empty() && 125 "local values should be cleared after finishing a BB"); 126 127 // Instructions are appended to FuncInfo.MBB. If the basic block already 128 // contains labels or copies, use the last instruction as the last local 129 // value. 130 EmitStartPt = nullptr; 131 if (!FuncInfo.MBB->empty()) 132 EmitStartPt = &FuncInfo.MBB->back(); 133 LastLocalValue = EmitStartPt; 134 } 135 136 void FastISel::finishBasicBlock() { flushLocalValueMap(); } 137 138 bool FastISel::lowerArguments() { 139 if (!FuncInfo.CanLowerReturn) 140 // Fallback to SDISel argument lowering code to deal with sret pointer 141 // parameter. 142 return false; 143 144 if (!fastLowerArguments()) 145 return false; 146 147 // Enter arguments into ValueMap for uses in non-entry BBs. 148 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(), 149 E = FuncInfo.Fn->arg_end(); 150 I != E; ++I) { 151 DenseMap<const Value *, Register>::iterator VI = LocalValueMap.find(&*I); 152 assert(VI != LocalValueMap.end() && "Missed an argument?"); 153 FuncInfo.ValueMap[&*I] = VI->second; 154 } 155 return true; 156 } 157 158 /// Return the defined register if this instruction defines exactly one 159 /// virtual register and uses no other virtual registers. Otherwise return 0. 160 static Register findLocalRegDef(MachineInstr &MI) { 161 Register RegDef; 162 for (const MachineOperand &MO : MI.operands()) { 163 if (!MO.isReg()) 164 continue; 165 if (MO.isDef()) { 166 if (RegDef) 167 return Register(); 168 RegDef = MO.getReg(); 169 } else if (MO.getReg().isVirtual()) { 170 // This is another use of a vreg. Don't delete it. 171 return Register(); 172 } 173 } 174 return RegDef; 175 } 176 177 static bool isRegUsedByPhiNodes(Register DefReg, 178 FunctionLoweringInfo &FuncInfo) { 179 for (auto &P : FuncInfo.PHINodesToUpdate) 180 if (P.second == DefReg) 181 return true; 182 return false; 183 } 184 185 void FastISel::flushLocalValueMap() { 186 // If FastISel bails out, it could leave local value instructions behind 187 // that aren't used for anything. Detect and erase those. 188 if (LastLocalValue != EmitStartPt) { 189 // Save the first instruction after local values, for later. 190 MachineBasicBlock::iterator FirstNonValue(LastLocalValue); 191 ++FirstNonValue; 192 193 MachineBasicBlock::reverse_iterator RE = 194 EmitStartPt ? MachineBasicBlock::reverse_iterator(EmitStartPt) 195 : FuncInfo.MBB->rend(); 196 MachineBasicBlock::reverse_iterator RI(LastLocalValue); 197 for (MachineInstr &LocalMI : 198 llvm::make_early_inc_range(llvm::make_range(RI, RE))) { 199 Register DefReg = findLocalRegDef(LocalMI); 200 if (!DefReg) 201 continue; 202 if (FuncInfo.RegsWithFixups.count(DefReg)) 203 continue; 204 bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo); 205 if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) { 206 if (EmitStartPt == &LocalMI) 207 EmitStartPt = EmitStartPt->getPrevNode(); 208 LLVM_DEBUG(dbgs() << "removing dead local value materialization" 209 << LocalMI); 210 LocalMI.eraseFromParent(); 211 } 212 } 213 214 if (FirstNonValue != FuncInfo.MBB->end()) { 215 // See if there are any local value instructions left. If so, we want to 216 // make sure the first one has a debug location; if it doesn't, use the 217 // first non-value instruction's debug location. 218 219 // If EmitStartPt is non-null, this block had copies at the top before 220 // FastISel started doing anything; it points to the last one, so the 221 // first local value instruction is the one after EmitStartPt. 222 // If EmitStartPt is null, the first local value instruction is at the 223 // top of the block. 224 MachineBasicBlock::iterator FirstLocalValue = 225 EmitStartPt ? ++MachineBasicBlock::iterator(EmitStartPt) 226 : FuncInfo.MBB->begin(); 227 if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc()) 228 FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc()); 229 } 230 } 231 232 LocalValueMap.clear(); 233 LastLocalValue = EmitStartPt; 234 recomputeInsertPt(); 235 SavedInsertPt = FuncInfo.InsertPt; 236 } 237 238 Register FastISel::getRegForValue(const Value *V) { 239 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true); 240 // Don't handle non-simple values in FastISel. 241 if (!RealVT.isSimple()) 242 return Register(); 243 244 // Ignore illegal types. We must do this before looking up the value 245 // in ValueMap because Arguments are given virtual registers regardless 246 // of whether FastISel can handle them. 247 MVT VT = RealVT.getSimpleVT(); 248 if (!TLI.isTypeLegal(VT)) { 249 // Handle integer promotions, though, because they're common and easy. 250 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 251 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT(); 252 else 253 return Register(); 254 } 255 256 // Look up the value to see if we already have a register for it. 257 Register Reg = lookUpRegForValue(V); 258 if (Reg) 259 return Reg; 260 261 // In bottom-up mode, just create the virtual register which will be used 262 // to hold the value. It will be materialized later. 263 if (isa<Instruction>(V) && 264 (!isa<AllocaInst>(V) || 265 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) 266 return FuncInfo.InitializeRegForValue(V); 267 268 SavePoint SaveInsertPt = enterLocalValueArea(); 269 270 // Materialize the value in a register. Emit any instructions in the 271 // local value area. 272 Reg = materializeRegForValue(V, VT); 273 274 leaveLocalValueArea(SaveInsertPt); 275 276 return Reg; 277 } 278 279 Register FastISel::materializeConstant(const Value *V, MVT VT) { 280 Register Reg; 281 if (const auto *CI = dyn_cast<ConstantInt>(V)) { 282 if (CI->getValue().getActiveBits() <= 64) 283 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); 284 } else if (isa<AllocaInst>(V)) 285 Reg = fastMaterializeAlloca(cast<AllocaInst>(V)); 286 else if (isa<ConstantPointerNull>(V)) 287 // Translate this as an integer zero so that it can be 288 // local-CSE'd with actual integer zeros. 289 Reg = 290 getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getType()))); 291 else if (const auto *CF = dyn_cast<ConstantFP>(V)) { 292 if (CF->isNullValue()) 293 Reg = fastMaterializeFloatZero(CF); 294 else 295 // Try to emit the constant directly. 296 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF); 297 298 if (!Reg) { 299 // Try to emit the constant by using an integer constant with a cast. 300 const APFloat &Flt = CF->getValueAPF(); 301 EVT IntVT = TLI.getPointerTy(DL); 302 uint32_t IntBitWidth = IntVT.getSizeInBits(); 303 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false); 304 bool isExact; 305 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact); 306 if (isExact) { 307 Register IntegerReg = 308 getRegForValue(ConstantInt::get(V->getContext(), SIntVal)); 309 if (IntegerReg) 310 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, 311 IntegerReg); 312 } 313 } 314 } else if (const auto *Op = dyn_cast<Operator>(V)) { 315 if (!selectOperator(Op, Op->getOpcode())) 316 if (!isa<Instruction>(Op) || 317 !fastSelectInstruction(cast<Instruction>(Op))) 318 return 0; 319 Reg = lookUpRegForValue(Op); 320 } else if (isa<UndefValue>(V)) { 321 Reg = createResultReg(TLI.getRegClassFor(VT)); 322 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 323 TII.get(TargetOpcode::IMPLICIT_DEF), Reg); 324 } 325 return Reg; 326 } 327 328 /// Helper for getRegForValue. This function is called when the value isn't 329 /// already available in a register and must be materialized with new 330 /// instructions. 331 Register FastISel::materializeRegForValue(const Value *V, MVT VT) { 332 Register Reg; 333 // Give the target-specific code a try first. 334 if (isa<Constant>(V)) 335 Reg = fastMaterializeConstant(cast<Constant>(V)); 336 337 // If target-specific code couldn't or didn't want to handle the value, then 338 // give target-independent code a try. 339 if (!Reg) 340 Reg = materializeConstant(V, VT); 341 342 // Don't cache constant materializations in the general ValueMap. 343 // To do so would require tracking what uses they dominate. 344 if (Reg) { 345 LocalValueMap[V] = Reg; 346 LastLocalValue = MRI.getVRegDef(Reg); 347 } 348 return Reg; 349 } 350 351 Register FastISel::lookUpRegForValue(const Value *V) { 352 // Look up the value to see if we already have a register for it. We 353 // cache values defined by Instructions across blocks, and other values 354 // only locally. This is because Instructions already have the SSA 355 // def-dominates-use requirement enforced. 356 DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(V); 357 if (I != FuncInfo.ValueMap.end()) 358 return I->second; 359 return LocalValueMap[V]; 360 } 361 362 void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) { 363 if (!isa<Instruction>(I)) { 364 LocalValueMap[I] = Reg; 365 return; 366 } 367 368 Register &AssignedReg = FuncInfo.ValueMap[I]; 369 if (!AssignedReg) 370 // Use the new register. 371 AssignedReg = Reg; 372 else if (Reg != AssignedReg) { 373 // Arrange for uses of AssignedReg to be replaced by uses of Reg. 374 for (unsigned i = 0; i < NumRegs; i++) { 375 FuncInfo.RegFixups[AssignedReg + i] = Reg + i; 376 FuncInfo.RegsWithFixups.insert(Reg + i); 377 } 378 379 AssignedReg = Reg; 380 } 381 } 382 383 Register FastISel::getRegForGEPIndex(const Value *Idx) { 384 Register IdxN = getRegForValue(Idx); 385 if (!IdxN) 386 // Unhandled operand. Halt "fast" selection and bail. 387 return Register(); 388 389 // If the index is smaller or larger than intptr_t, truncate or extend it. 390 MVT PtrVT = TLI.getPointerTy(DL); 391 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false); 392 if (IdxVT.bitsLT(PtrVT)) { 393 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN); 394 } else if (IdxVT.bitsGT(PtrVT)) { 395 IdxN = 396 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN); 397 } 398 return IdxN; 399 } 400 401 void FastISel::recomputeInsertPt() { 402 if (getLastLocalValue()) { 403 FuncInfo.InsertPt = getLastLocalValue(); 404 FuncInfo.MBB = FuncInfo.InsertPt->getParent(); 405 ++FuncInfo.InsertPt; 406 } else 407 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI(); 408 409 // Now skip past any EH_LABELs, which must remain at the beginning. 410 while (FuncInfo.InsertPt != FuncInfo.MBB->end() && 411 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL) 412 ++FuncInfo.InsertPt; 413 } 414 415 void FastISel::removeDeadCode(MachineBasicBlock::iterator I, 416 MachineBasicBlock::iterator E) { 417 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 && 418 "Invalid iterator!"); 419 while (I != E) { 420 if (SavedInsertPt == I) 421 SavedInsertPt = E; 422 if (EmitStartPt == I) 423 EmitStartPt = E.isValid() ? &*E : nullptr; 424 if (LastLocalValue == I) 425 LastLocalValue = E.isValid() ? &*E : nullptr; 426 427 MachineInstr *Dead = &*I; 428 ++I; 429 Dead->eraseFromParent(); 430 ++NumFastIselDead; 431 } 432 recomputeInsertPt(); 433 } 434 435 FastISel::SavePoint FastISel::enterLocalValueArea() { 436 SavePoint OldInsertPt = FuncInfo.InsertPt; 437 recomputeInsertPt(); 438 return OldInsertPt; 439 } 440 441 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) { 442 if (FuncInfo.InsertPt != FuncInfo.MBB->begin()) 443 LastLocalValue = &*std::prev(FuncInfo.InsertPt); 444 445 // Restore the previous insert position. 446 FuncInfo.InsertPt = OldInsertPt; 447 } 448 449 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) { 450 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true); 451 if (VT == MVT::Other || !VT.isSimple()) 452 // Unhandled type. Halt "fast" selection and bail. 453 return false; 454 455 // We only handle legal types. For example, on x86-32 the instruction 456 // selector contains all of the 64-bit instructions from x86-64, 457 // under the assumption that i64 won't be used if the target doesn't 458 // support it. 459 if (!TLI.isTypeLegal(VT)) { 460 // MVT::i1 is special. Allow AND, OR, or XOR because they 461 // don't require additional zeroing, which makes them easy. 462 if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR || 463 ISDOpcode == ISD::XOR)) 464 VT = TLI.getTypeToTransformTo(I->getContext(), VT); 465 else 466 return false; 467 } 468 469 // Check if the first operand is a constant, and handle it as "ri". At -O0, 470 // we don't have anything that canonicalizes operand order. 471 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0))) 472 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) { 473 Register Op1 = getRegForValue(I->getOperand(1)); 474 if (!Op1) 475 return false; 476 477 Register ResultReg = 478 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(), 479 VT.getSimpleVT()); 480 if (!ResultReg) 481 return false; 482 483 // We successfully emitted code for the given LLVM Instruction. 484 updateValueMap(I, ResultReg); 485 return true; 486 } 487 488 Register Op0 = getRegForValue(I->getOperand(0)); 489 if (!Op0) // Unhandled operand. Halt "fast" selection and bail. 490 return false; 491 492 // Check if the second operand is a constant and handle it appropriately. 493 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 494 uint64_t Imm = CI->getSExtValue(); 495 496 // Transform "sdiv exact X, 8" -> "sra X, 3". 497 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) && 498 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) { 499 Imm = Log2_64(Imm); 500 ISDOpcode = ISD::SRA; 501 } 502 503 // Transform "urem x, pow2" -> "and x, pow2-1". 504 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) && 505 isPowerOf2_64(Imm)) { 506 --Imm; 507 ISDOpcode = ISD::AND; 508 } 509 510 Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm, 511 VT.getSimpleVT()); 512 if (!ResultReg) 513 return false; 514 515 // We successfully emitted code for the given LLVM Instruction. 516 updateValueMap(I, ResultReg); 517 return true; 518 } 519 520 Register Op1 = getRegForValue(I->getOperand(1)); 521 if (!Op1) // Unhandled operand. Halt "fast" selection and bail. 522 return false; 523 524 // Now we have both operands in registers. Emit the instruction. 525 Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(), 526 ISDOpcode, Op0, Op1); 527 if (!ResultReg) 528 // Target-specific code wasn't able to find a machine opcode for 529 // the given ISD opcode and type. Halt "fast" selection and bail. 530 return false; 531 532 // We successfully emitted code for the given LLVM Instruction. 533 updateValueMap(I, ResultReg); 534 return true; 535 } 536 537 bool FastISel::selectGetElementPtr(const User *I) { 538 Register N = getRegForValue(I->getOperand(0)); 539 if (!N) // Unhandled operand. Halt "fast" selection and bail. 540 return false; 541 542 // FIXME: The code below does not handle vector GEPs. Halt "fast" selection 543 // and bail. 544 if (isa<VectorType>(I->getType())) 545 return false; 546 547 // Keep a running tab of the total offset to coalesce multiple N = N + Offset 548 // into a single N = N + TotalOffset. 549 uint64_t TotalOffs = 0; 550 // FIXME: What's a good SWAG number for MaxOffs? 551 uint64_t MaxOffs = 2048; 552 MVT VT = TLI.getPointerTy(DL); 553 for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I); 554 GTI != E; ++GTI) { 555 const Value *Idx = GTI.getOperand(); 556 if (StructType *StTy = GTI.getStructTypeOrNull()) { 557 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue(); 558 if (Field) { 559 // N = N + Offset 560 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field); 561 if (TotalOffs >= MaxOffs) { 562 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); 563 if (!N) // Unhandled operand. Halt "fast" selection and bail. 564 return false; 565 TotalOffs = 0; 566 } 567 } 568 } else { 569 Type *Ty = GTI.getIndexedType(); 570 571 // If this is a constant subscript, handle it quickly. 572 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 573 if (CI->isZero()) 574 continue; 575 // N = N + Offset 576 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue(); 577 TotalOffs += DL.getTypeAllocSize(Ty) * IdxN; 578 if (TotalOffs >= MaxOffs) { 579 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); 580 if (!N) // Unhandled operand. Halt "fast" selection and bail. 581 return false; 582 TotalOffs = 0; 583 } 584 continue; 585 } 586 if (TotalOffs) { 587 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); 588 if (!N) // Unhandled operand. Halt "fast" selection and bail. 589 return false; 590 TotalOffs = 0; 591 } 592 593 // N = N + Idx * ElementSize; 594 uint64_t ElementSize = DL.getTypeAllocSize(Ty); 595 Register IdxN = getRegForGEPIndex(Idx); 596 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. 597 return false; 598 599 if (ElementSize != 1) { 600 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT); 601 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. 602 return false; 603 } 604 N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN); 605 if (!N) // Unhandled operand. Halt "fast" selection and bail. 606 return false; 607 } 608 } 609 if (TotalOffs) { 610 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); 611 if (!N) // Unhandled operand. Halt "fast" selection and bail. 612 return false; 613 } 614 615 // We successfully emitted code for the given LLVM Instruction. 616 updateValueMap(I, N); 617 return true; 618 } 619 620 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops, 621 const CallInst *CI, unsigned StartIdx) { 622 for (unsigned i = StartIdx, e = CI->arg_size(); i != e; ++i) { 623 Value *Val = CI->getArgOperand(i); 624 // Check for constants and encode them with a StackMaps::ConstantOp prefix. 625 if (const auto *C = dyn_cast<ConstantInt>(Val)) { 626 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); 627 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue())); 628 } else if (isa<ConstantPointerNull>(Val)) { 629 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); 630 Ops.push_back(MachineOperand::CreateImm(0)); 631 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) { 632 // Values coming from a stack location also require a special encoding, 633 // but that is added later on by the target specific frame index 634 // elimination implementation. 635 auto SI = FuncInfo.StaticAllocaMap.find(AI); 636 if (SI != FuncInfo.StaticAllocaMap.end()) 637 Ops.push_back(MachineOperand::CreateFI(SI->second)); 638 else 639 return false; 640 } else { 641 Register Reg = getRegForValue(Val); 642 if (!Reg) 643 return false; 644 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false)); 645 } 646 } 647 return true; 648 } 649 650 bool FastISel::selectStackmap(const CallInst *I) { 651 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>, 652 // [live variables...]) 653 assert(I->getCalledFunction()->getReturnType()->isVoidTy() && 654 "Stackmap cannot return a value."); 655 656 // The stackmap intrinsic only records the live variables (the arguments 657 // passed to it) and emits NOPS (if requested). Unlike the patchpoint 658 // intrinsic, this won't be lowered to a function call. This means we don't 659 // have to worry about calling conventions and target-specific lowering code. 660 // Instead we perform the call lowering right here. 661 // 662 // CALLSEQ_START(0, 0...) 663 // STACKMAP(id, nbytes, ...) 664 // CALLSEQ_END(0, 0) 665 // 666 SmallVector<MachineOperand, 32> Ops; 667 668 // Add the <id> and <numBytes> constants. 669 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) && 670 "Expected a constant integer."); 671 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)); 672 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); 673 674 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) && 675 "Expected a constant integer."); 676 const auto *NumBytes = 677 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)); 678 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); 679 680 // Push live variables for the stack map (skipping the first two arguments 681 // <id> and <numBytes>). 682 if (!addStackMapLiveVars(Ops, I, 2)) 683 return false; 684 685 // We are not adding any register mask info here, because the stackmap doesn't 686 // clobber anything. 687 688 // Add scratch registers as implicit def and early clobber. 689 CallingConv::ID CC = I->getCallingConv(); 690 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); 691 for (unsigned i = 0; ScratchRegs[i]; ++i) 692 Ops.push_back(MachineOperand::CreateReg( 693 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false, 694 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true)); 695 696 // Issue CALLSEQ_START 697 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 698 auto Builder = 699 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)); 700 const MCInstrDesc &MCID = Builder.getInstr()->getDesc(); 701 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I) 702 Builder.addImm(0); 703 704 // Issue STACKMAP. 705 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 706 TII.get(TargetOpcode::STACKMAP)); 707 for (auto const &MO : Ops) 708 MIB.add(MO); 709 710 // Issue CALLSEQ_END 711 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 712 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) 713 .addImm(0) 714 .addImm(0); 715 716 // Inform the Frame Information that we have a stackmap in this function. 717 FuncInfo.MF->getFrameInfo().setHasStackMap(); 718 719 return true; 720 } 721 722 /// Lower an argument list according to the target calling convention. 723 /// 724 /// This is a helper for lowering intrinsics that follow a target calling 725 /// convention or require stack pointer adjustment. Only a subset of the 726 /// intrinsic's operands need to participate in the calling convention. 727 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx, 728 unsigned NumArgs, const Value *Callee, 729 bool ForceRetVoidTy, CallLoweringInfo &CLI) { 730 ArgListTy Args; 731 Args.reserve(NumArgs); 732 733 // Populate the argument list. 734 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) { 735 Value *V = CI->getOperand(ArgI); 736 737 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 738 739 ArgListEntry Entry; 740 Entry.Val = V; 741 Entry.Ty = V->getType(); 742 Entry.setAttributes(CI, ArgI); 743 Args.push_back(Entry); 744 } 745 746 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext()) 747 : CI->getType(); 748 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs); 749 750 return lowerCallTo(CLI); 751 } 752 753 FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee( 754 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy, 755 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) { 756 SmallString<32> MangledName; 757 Mangler::getNameWithPrefix(MangledName, Target, DL); 758 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName); 759 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs); 760 } 761 762 bool FastISel::selectPatchpoint(const CallInst *I) { 763 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>, 764 // i32 <numBytes>, 765 // i8* <target>, 766 // i32 <numArgs>, 767 // [Args...], 768 // [live variables...]) 769 CallingConv::ID CC = I->getCallingConv(); 770 bool IsAnyRegCC = CC == CallingConv::AnyReg; 771 bool HasDef = !I->getType()->isVoidTy(); 772 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts(); 773 774 // Get the real number of arguments participating in the call <numArgs> 775 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) && 776 "Expected a constant integer."); 777 const auto *NumArgsVal = 778 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)); 779 unsigned NumArgs = NumArgsVal->getZExtValue(); 780 781 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs> 782 // This includes all meta-operands up to but not including CC. 783 unsigned NumMetaOpers = PatchPointOpers::CCPos; 784 assert(I->arg_size() >= NumMetaOpers + NumArgs && 785 "Not enough arguments provided to the patchpoint intrinsic"); 786 787 // For AnyRegCC the arguments are lowered later on manually. 788 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs; 789 CallLoweringInfo CLI; 790 CLI.setIsPatchPoint(); 791 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI)) 792 return false; 793 794 assert(CLI.Call && "No call instruction specified."); 795 796 SmallVector<MachineOperand, 32> Ops; 797 798 // Add an explicit result reg if we use the anyreg calling convention. 799 if (IsAnyRegCC && HasDef) { 800 assert(CLI.NumResultRegs == 0 && "Unexpected result register."); 801 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64)); 802 CLI.NumResultRegs = 1; 803 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true)); 804 } 805 806 // Add the <id> and <numBytes> constants. 807 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) && 808 "Expected a constant integer."); 809 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)); 810 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); 811 812 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) && 813 "Expected a constant integer."); 814 const auto *NumBytes = 815 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)); 816 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); 817 818 // Add the call target. 819 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) { 820 uint64_t CalleeConstAddr = 821 cast<ConstantInt>(C->getOperand(0))->getZExtValue(); 822 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr)); 823 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) { 824 if (C->getOpcode() == Instruction::IntToPtr) { 825 uint64_t CalleeConstAddr = 826 cast<ConstantInt>(C->getOperand(0))->getZExtValue(); 827 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr)); 828 } else 829 llvm_unreachable("Unsupported ConstantExpr."); 830 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) { 831 Ops.push_back(MachineOperand::CreateGA(GV, 0)); 832 } else if (isa<ConstantPointerNull>(Callee)) 833 Ops.push_back(MachineOperand::CreateImm(0)); 834 else 835 llvm_unreachable("Unsupported callee address."); 836 837 // Adjust <numArgs> to account for any arguments that have been passed on 838 // the stack instead. 839 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size(); 840 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs)); 841 842 // Add the calling convention 843 Ops.push_back(MachineOperand::CreateImm((unsigned)CC)); 844 845 // Add the arguments we omitted previously. The register allocator should 846 // place these in any free register. 847 if (IsAnyRegCC) { 848 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) { 849 Register Reg = getRegForValue(I->getArgOperand(i)); 850 if (!Reg) 851 return false; 852 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false)); 853 } 854 } 855 856 // Push the arguments from the call instruction. 857 for (auto Reg : CLI.OutRegs) 858 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false)); 859 860 // Push live variables for the stack map. 861 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs)) 862 return false; 863 864 // Push the register mask info. 865 Ops.push_back(MachineOperand::CreateRegMask( 866 TRI.getCallPreservedMask(*FuncInfo.MF, CC))); 867 868 // Add scratch registers as implicit def and early clobber. 869 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); 870 for (unsigned i = 0; ScratchRegs[i]; ++i) 871 Ops.push_back(MachineOperand::CreateReg( 872 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false, 873 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true)); 874 875 // Add implicit defs (return values). 876 for (auto Reg : CLI.InRegs) 877 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true, 878 /*isImp=*/true)); 879 880 // Insert the patchpoint instruction before the call generated by the target. 881 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc, 882 TII.get(TargetOpcode::PATCHPOINT)); 883 884 for (auto &MO : Ops) 885 MIB.add(MO); 886 887 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI); 888 889 // Delete the original call instruction. 890 CLI.Call->eraseFromParent(); 891 892 // Inform the Frame Information that we have a patchpoint in this function. 893 FuncInfo.MF->getFrameInfo().setHasPatchPoint(); 894 895 if (CLI.NumResultRegs) 896 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs); 897 return true; 898 } 899 900 bool FastISel::selectXRayCustomEvent(const CallInst *I) { 901 const auto &Triple = TM.getTargetTriple(); 902 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux()) 903 return true; // don't do anything to this instruction. 904 SmallVector<MachineOperand, 8> Ops; 905 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)), 906 /*isDef=*/false)); 907 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)), 908 /*isDef=*/false)); 909 MachineInstrBuilder MIB = 910 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 911 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL)); 912 for (auto &MO : Ops) 913 MIB.add(MO); 914 915 // Insert the Patchable Event Call instruction, that gets lowered properly. 916 return true; 917 } 918 919 bool FastISel::selectXRayTypedEvent(const CallInst *I) { 920 const auto &Triple = TM.getTargetTriple(); 921 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux()) 922 return true; // don't do anything to this instruction. 923 SmallVector<MachineOperand, 8> Ops; 924 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)), 925 /*isDef=*/false)); 926 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)), 927 /*isDef=*/false)); 928 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)), 929 /*isDef=*/false)); 930 MachineInstrBuilder MIB = 931 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 932 TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL)); 933 for (auto &MO : Ops) 934 MIB.add(MO); 935 936 // Insert the Patchable Typed Event Call instruction, that gets lowered properly. 937 return true; 938 } 939 940 /// Returns an AttributeList representing the attributes applied to the return 941 /// value of the given call. 942 static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) { 943 SmallVector<Attribute::AttrKind, 2> Attrs; 944 if (CLI.RetSExt) 945 Attrs.push_back(Attribute::SExt); 946 if (CLI.RetZExt) 947 Attrs.push_back(Attribute::ZExt); 948 if (CLI.IsInReg) 949 Attrs.push_back(Attribute::InReg); 950 951 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex, 952 Attrs); 953 } 954 955 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName, 956 unsigned NumArgs) { 957 MCContext &Ctx = MF->getContext(); 958 SmallString<32> MangledName; 959 Mangler::getNameWithPrefix(MangledName, SymName, DL); 960 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName); 961 return lowerCallTo(CI, Sym, NumArgs); 962 } 963 964 bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol, 965 unsigned NumArgs) { 966 FunctionType *FTy = CI->getFunctionType(); 967 Type *RetTy = CI->getType(); 968 969 ArgListTy Args; 970 Args.reserve(NumArgs); 971 972 // Populate the argument list. 973 // Attributes for args start at offset 1, after the return attribute. 974 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) { 975 Value *V = CI->getOperand(ArgI); 976 977 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 978 979 ArgListEntry Entry; 980 Entry.Val = V; 981 Entry.Ty = V->getType(); 982 Entry.setAttributes(CI, ArgI); 983 Args.push_back(Entry); 984 } 985 TLI.markLibCallAttributes(MF, CI->getCallingConv(), Args); 986 987 CallLoweringInfo CLI; 988 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs); 989 990 return lowerCallTo(CLI); 991 } 992 993 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) { 994 // Handle the incoming return values from the call. 995 CLI.clearIns(); 996 SmallVector<EVT, 4> RetTys; 997 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys); 998 999 SmallVector<ISD::OutputArg, 4> Outs; 1000 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL); 1001 1002 bool CanLowerReturn = TLI.CanLowerReturn( 1003 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext()); 1004 1005 // FIXME: sret demotion isn't supported yet - bail out. 1006 if (!CanLowerReturn) 1007 return false; 1008 1009 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 1010 EVT VT = RetTys[I]; 1011 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT); 1012 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT); 1013 for (unsigned i = 0; i != NumRegs; ++i) { 1014 ISD::InputArg MyFlags; 1015 MyFlags.VT = RegisterVT; 1016 MyFlags.ArgVT = VT; 1017 MyFlags.Used = CLI.IsReturnValueUsed; 1018 if (CLI.RetSExt) 1019 MyFlags.Flags.setSExt(); 1020 if (CLI.RetZExt) 1021 MyFlags.Flags.setZExt(); 1022 if (CLI.IsInReg) 1023 MyFlags.Flags.setInReg(); 1024 CLI.Ins.push_back(MyFlags); 1025 } 1026 } 1027 1028 // Handle all of the outgoing arguments. 1029 CLI.clearOuts(); 1030 for (auto &Arg : CLI.getArgs()) { 1031 Type *FinalType = Arg.Ty; 1032 if (Arg.IsByVal) 1033 FinalType = Arg.IndirectType; 1034 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters( 1035 FinalType, CLI.CallConv, CLI.IsVarArg, DL); 1036 1037 ISD::ArgFlagsTy Flags; 1038 if (Arg.IsZExt) 1039 Flags.setZExt(); 1040 if (Arg.IsSExt) 1041 Flags.setSExt(); 1042 if (Arg.IsInReg) 1043 Flags.setInReg(); 1044 if (Arg.IsSRet) 1045 Flags.setSRet(); 1046 if (Arg.IsSwiftSelf) 1047 Flags.setSwiftSelf(); 1048 if (Arg.IsSwiftAsync) 1049 Flags.setSwiftAsync(); 1050 if (Arg.IsSwiftError) 1051 Flags.setSwiftError(); 1052 if (Arg.IsCFGuardTarget) 1053 Flags.setCFGuardTarget(); 1054 if (Arg.IsByVal) 1055 Flags.setByVal(); 1056 if (Arg.IsInAlloca) { 1057 Flags.setInAlloca(); 1058 // Set the byval flag for CCAssignFn callbacks that don't know about 1059 // inalloca. This way we can know how many bytes we should've allocated 1060 // and how many bytes a callee cleanup function will pop. If we port 1061 // inalloca to more targets, we'll have to add custom inalloca handling in 1062 // the various CC lowering callbacks. 1063 Flags.setByVal(); 1064 } 1065 if (Arg.IsPreallocated) { 1066 Flags.setPreallocated(); 1067 // Set the byval flag for CCAssignFn callbacks that don't know about 1068 // preallocated. This way we can know how many bytes we should've 1069 // allocated and how many bytes a callee cleanup function will pop. If we 1070 // port preallocated to more targets, we'll have to add custom 1071 // preallocated handling in the various CC lowering callbacks. 1072 Flags.setByVal(); 1073 } 1074 MaybeAlign MemAlign = Arg.Alignment; 1075 if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) { 1076 unsigned FrameSize = DL.getTypeAllocSize(Arg.IndirectType); 1077 1078 // For ByVal, alignment should come from FE. BE will guess if this info 1079 // is not there, but there are cases it cannot get right. 1080 if (!MemAlign) 1081 MemAlign = Align(TLI.getByValTypeAlignment(Arg.IndirectType, DL)); 1082 Flags.setByValSize(FrameSize); 1083 } else if (!MemAlign) { 1084 MemAlign = DL.getABITypeAlign(Arg.Ty); 1085 } 1086 Flags.setMemAlign(*MemAlign); 1087 if (Arg.IsNest) 1088 Flags.setNest(); 1089 if (NeedsRegBlock) 1090 Flags.setInConsecutiveRegs(); 1091 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty)); 1092 CLI.OutVals.push_back(Arg.Val); 1093 CLI.OutFlags.push_back(Flags); 1094 } 1095 1096 if (!fastLowerCall(CLI)) 1097 return false; 1098 1099 // Set all unused physreg defs as dead. 1100 assert(CLI.Call && "No call instruction specified."); 1101 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI); 1102 1103 if (CLI.NumResultRegs && CLI.CB) 1104 updateValueMap(CLI.CB, CLI.ResultReg, CLI.NumResultRegs); 1105 1106 // Set labels for heapallocsite call. 1107 if (CLI.CB) 1108 if (MDNode *MD = CLI.CB->getMetadata("heapallocsite")) 1109 CLI.Call->setHeapAllocMarker(*MF, MD); 1110 1111 return true; 1112 } 1113 1114 bool FastISel::lowerCall(const CallInst *CI) { 1115 FunctionType *FuncTy = CI->getFunctionType(); 1116 Type *RetTy = CI->getType(); 1117 1118 ArgListTy Args; 1119 ArgListEntry Entry; 1120 Args.reserve(CI->arg_size()); 1121 1122 for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) { 1123 Value *V = *i; 1124 1125 // Skip empty types 1126 if (V->getType()->isEmptyTy()) 1127 continue; 1128 1129 Entry.Val = V; 1130 Entry.Ty = V->getType(); 1131 1132 // Skip the first return-type Attribute to get to params. 1133 Entry.setAttributes(CI, i - CI->arg_begin()); 1134 Args.push_back(Entry); 1135 } 1136 1137 // Check if target-independent constraints permit a tail call here. 1138 // Target-dependent constraints are checked within fastLowerCall. 1139 bool IsTailCall = CI->isTailCall(); 1140 if (IsTailCall && !isInTailCallPosition(*CI, TM)) 1141 IsTailCall = false; 1142 if (IsTailCall && MF->getFunction() 1143 .getFnAttribute("disable-tail-calls") 1144 .getValueAsBool()) 1145 IsTailCall = false; 1146 1147 CallLoweringInfo CLI; 1148 CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI) 1149 .setTailCall(IsTailCall); 1150 1151 diagnoseDontCall(*CI); 1152 1153 return lowerCallTo(CLI); 1154 } 1155 1156 bool FastISel::selectCall(const User *I) { 1157 const CallInst *Call = cast<CallInst>(I); 1158 1159 // Handle simple inline asms. 1160 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) { 1161 // Don't attempt to handle constraints. 1162 if (!IA->getConstraintString().empty()) 1163 return false; 1164 1165 unsigned ExtraInfo = 0; 1166 if (IA->hasSideEffects()) 1167 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 1168 if (IA->isAlignStack()) 1169 ExtraInfo |= InlineAsm::Extra_IsAlignStack; 1170 if (Call->isConvergent()) 1171 ExtraInfo |= InlineAsm::Extra_IsConvergent; 1172 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect; 1173 1174 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1175 TII.get(TargetOpcode::INLINEASM)); 1176 MIB.addExternalSymbol(IA->getAsmString().c_str()); 1177 MIB.addImm(ExtraInfo); 1178 1179 const MDNode *SrcLoc = Call->getMetadata("srcloc"); 1180 if (SrcLoc) 1181 MIB.addMetadata(SrcLoc); 1182 1183 return true; 1184 } 1185 1186 // Handle intrinsic function calls. 1187 if (const auto *II = dyn_cast<IntrinsicInst>(Call)) 1188 return selectIntrinsicCall(II); 1189 1190 return lowerCall(Call); 1191 } 1192 1193 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { 1194 switch (II->getIntrinsicID()) { 1195 default: 1196 break; 1197 // At -O0 we don't care about the lifetime intrinsics. 1198 case Intrinsic::lifetime_start: 1199 case Intrinsic::lifetime_end: 1200 // The donothing intrinsic does, well, nothing. 1201 case Intrinsic::donothing: 1202 // Neither does the sideeffect intrinsic. 1203 case Intrinsic::sideeffect: 1204 // Neither does the assume intrinsic; it's also OK not to codegen its operand. 1205 case Intrinsic::assume: 1206 // Neither does the llvm.experimental.noalias.scope.decl intrinsic 1207 case Intrinsic::experimental_noalias_scope_decl: 1208 return true; 1209 case Intrinsic::dbg_declare: { 1210 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II); 1211 assert(DI->getVariable() && "Missing variable"); 1212 if (!FuncInfo.MF->getMMI().hasDebugInfo()) { 1213 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI 1214 << " (!hasDebugInfo)\n"); 1215 return true; 1216 } 1217 1218 const Value *Address = DI->getAddress(); 1219 if (!Address || isa<UndefValue>(Address)) { 1220 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI 1221 << " (bad/undef address)\n"); 1222 return true; 1223 } 1224 1225 // Byval arguments with frame indices were already handled after argument 1226 // lowering and before isel. 1227 const auto *Arg = 1228 dyn_cast<Argument>(Address->stripInBoundsConstantOffsets()); 1229 if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX) 1230 return true; 1231 1232 Optional<MachineOperand> Op; 1233 if (Register Reg = lookUpRegForValue(Address)) 1234 Op = MachineOperand::CreateReg(Reg, false); 1235 1236 // If we have a VLA that has a "use" in a metadata node that's then used 1237 // here but it has no other uses, then we have a problem. E.g., 1238 // 1239 // int foo (const int *x) { 1240 // char a[*x]; 1241 // return 0; 1242 // } 1243 // 1244 // If we assign 'a' a vreg and fast isel later on has to use the selection 1245 // DAG isel, it will want to copy the value to the vreg. However, there are 1246 // no uses, which goes counter to what selection DAG isel expects. 1247 if (!Op && !Address->use_empty() && isa<Instruction>(Address) && 1248 (!isa<AllocaInst>(Address) || 1249 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address)))) 1250 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address), 1251 false); 1252 1253 if (Op) { 1254 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) && 1255 "Expected inlined-at fields to agree"); 1256 // A dbg.declare describes the address of a source variable, so lower it 1257 // into an indirect DBG_VALUE. 1258 auto Builder = 1259 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1260 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, *Op, 1261 DI->getVariable(), DI->getExpression()); 1262 1263 // If using instruction referencing, mutate this into a DBG_INSTR_REF, 1264 // to be later patched up by finalizeDebugInstrRefs. Tack a deref onto 1265 // the expression, we don't have an "indirect" flag in DBG_INSTR_REF. 1266 if (UseInstrRefDebugInfo && Op->isReg()) { 1267 Builder->setDesc(TII.get(TargetOpcode::DBG_INSTR_REF)); 1268 Builder->getOperand(1).ChangeToImmediate(0); 1269 auto *NewExpr = 1270 DIExpression::prepend(DI->getExpression(), DIExpression::DerefBefore); 1271 Builder->getOperand(3).setMetadata(NewExpr); 1272 } 1273 } else { 1274 // We can't yet handle anything else here because it would require 1275 // generating code, thus altering codegen because of debug info. 1276 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI 1277 << " (no materialized reg for address)\n"); 1278 } 1279 return true; 1280 } 1281 case Intrinsic::dbg_value: { 1282 // This form of DBG_VALUE is target-independent. 1283 const DbgValueInst *DI = cast<DbgValueInst>(II); 1284 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); 1285 const Value *V = DI->getValue(); 1286 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) && 1287 "Expected inlined-at fields to agree"); 1288 if (!V || isa<UndefValue>(V) || DI->hasArgList()) { 1289 // DI is either undef or cannot produce a valid DBG_VALUE, so produce an 1290 // undef DBG_VALUE to terminate any prior location. 1291 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U, 1292 DI->getVariable(), DI->getExpression()); 1293 } else if (const auto *CI = dyn_cast<ConstantInt>(V)) { 1294 // See if there's an expression to constant-fold. 1295 DIExpression *Expr = DI->getExpression(); 1296 if (Expr) 1297 std::tie(Expr, CI) = Expr->constantFold(CI); 1298 if (CI->getBitWidth() > 64) 1299 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1300 .addCImm(CI) 1301 .addImm(0U) 1302 .addMetadata(DI->getVariable()) 1303 .addMetadata(Expr); 1304 else 1305 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1306 .addImm(CI->getZExtValue()) 1307 .addImm(0U) 1308 .addMetadata(DI->getVariable()) 1309 .addMetadata(Expr); 1310 } else if (const auto *CF = dyn_cast<ConstantFP>(V)) { 1311 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1312 .addFPImm(CF) 1313 .addImm(0U) 1314 .addMetadata(DI->getVariable()) 1315 .addMetadata(DI->getExpression()); 1316 } else if (Register Reg = lookUpRegForValue(V)) { 1317 // FIXME: This does not handle register-indirect values at offset 0. 1318 bool IsIndirect = false; 1319 auto Builder = 1320 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg, 1321 DI->getVariable(), DI->getExpression()); 1322 1323 // If using instruction referencing, mutate this into a DBG_INSTR_REF, 1324 // to be later patched up by finalizeDebugInstrRefs. 1325 if (UseInstrRefDebugInfo) { 1326 Builder->setDesc(TII.get(TargetOpcode::DBG_INSTR_REF)); 1327 Builder->getOperand(1).ChangeToImmediate(0); 1328 } 1329 } else { 1330 // We don't know how to handle other cases, so we drop. 1331 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1332 } 1333 return true; 1334 } 1335 case Intrinsic::dbg_label: { 1336 const DbgLabelInst *DI = cast<DbgLabelInst>(II); 1337 assert(DI->getLabel() && "Missing label"); 1338 if (!FuncInfo.MF->getMMI().hasDebugInfo()) { 1339 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1340 return true; 1341 } 1342 1343 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1344 TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel()); 1345 return true; 1346 } 1347 case Intrinsic::objectsize: 1348 llvm_unreachable("llvm.objectsize.* should have been lowered already"); 1349 1350 case Intrinsic::is_constant: 1351 llvm_unreachable("llvm.is.constant.* should have been lowered already"); 1352 1353 case Intrinsic::launder_invariant_group: 1354 case Intrinsic::strip_invariant_group: 1355 case Intrinsic::expect: { 1356 Register ResultReg = getRegForValue(II->getArgOperand(0)); 1357 if (!ResultReg) 1358 return false; 1359 updateValueMap(II, ResultReg); 1360 return true; 1361 } 1362 case Intrinsic::experimental_stackmap: 1363 return selectStackmap(II); 1364 case Intrinsic::experimental_patchpoint_void: 1365 case Intrinsic::experimental_patchpoint_i64: 1366 return selectPatchpoint(II); 1367 1368 case Intrinsic::xray_customevent: 1369 return selectXRayCustomEvent(II); 1370 case Intrinsic::xray_typedevent: 1371 return selectXRayTypedEvent(II); 1372 } 1373 1374 return fastLowerIntrinsicCall(II); 1375 } 1376 1377 bool FastISel::selectCast(const User *I, unsigned Opcode) { 1378 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); 1379 EVT DstVT = TLI.getValueType(DL, I->getType()); 1380 1381 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other || 1382 !DstVT.isSimple()) 1383 // Unhandled type. Halt "fast" selection and bail. 1384 return false; 1385 1386 // Check if the destination type is legal. 1387 if (!TLI.isTypeLegal(DstVT)) 1388 return false; 1389 1390 // Check if the source operand is legal. 1391 if (!TLI.isTypeLegal(SrcVT)) 1392 return false; 1393 1394 Register InputReg = getRegForValue(I->getOperand(0)); 1395 if (!InputReg) 1396 // Unhandled operand. Halt "fast" selection and bail. 1397 return false; 1398 1399 Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), 1400 Opcode, InputReg); 1401 if (!ResultReg) 1402 return false; 1403 1404 updateValueMap(I, ResultReg); 1405 return true; 1406 } 1407 1408 bool FastISel::selectBitCast(const User *I) { 1409 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType()); 1410 EVT DstEVT = TLI.getValueType(DL, I->getType()); 1411 if (SrcEVT == MVT::Other || DstEVT == MVT::Other || 1412 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT)) 1413 // Unhandled type. Halt "fast" selection and bail. 1414 return false; 1415 1416 MVT SrcVT = SrcEVT.getSimpleVT(); 1417 MVT DstVT = DstEVT.getSimpleVT(); 1418 Register Op0 = getRegForValue(I->getOperand(0)); 1419 if (!Op0) // Unhandled operand. Halt "fast" selection and bail. 1420 return false; 1421 1422 // If the bitcast doesn't change the type, just use the operand value. 1423 if (SrcVT == DstVT) { 1424 updateValueMap(I, Op0); 1425 return true; 1426 } 1427 1428 // Otherwise, select a BITCAST opcode. 1429 Register ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0); 1430 if (!ResultReg) 1431 return false; 1432 1433 updateValueMap(I, ResultReg); 1434 return true; 1435 } 1436 1437 bool FastISel::selectFreeze(const User *I) { 1438 Register Reg = getRegForValue(I->getOperand(0)); 1439 if (!Reg) 1440 // Unhandled operand. 1441 return false; 1442 1443 EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType()); 1444 if (ETy == MVT::Other || !TLI.isTypeLegal(ETy)) 1445 // Unhandled type, bail out. 1446 return false; 1447 1448 MVT Ty = ETy.getSimpleVT(); 1449 const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty); 1450 Register ResultReg = createResultReg(TyRegClass); 1451 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1452 TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg); 1453 1454 updateValueMap(I, ResultReg); 1455 return true; 1456 } 1457 1458 // Remove local value instructions starting from the instruction after 1459 // SavedLastLocalValue to the current function insert point. 1460 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue) 1461 { 1462 MachineInstr *CurLastLocalValue = getLastLocalValue(); 1463 if (CurLastLocalValue != SavedLastLocalValue) { 1464 // Find the first local value instruction to be deleted. 1465 // This is the instruction after SavedLastLocalValue if it is non-NULL. 1466 // Otherwise it's the first instruction in the block. 1467 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue); 1468 if (SavedLastLocalValue) 1469 ++FirstDeadInst; 1470 else 1471 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI(); 1472 setLastLocalValue(SavedLastLocalValue); 1473 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt); 1474 } 1475 } 1476 1477 bool FastISel::selectInstruction(const Instruction *I) { 1478 // Flush the local value map before starting each instruction. 1479 // This improves locality and debugging, and can reduce spills. 1480 // Reuse of values across IR instructions is relatively uncommon. 1481 flushLocalValueMap(); 1482 1483 MachineInstr *SavedLastLocalValue = getLastLocalValue(); 1484 // Just before the terminator instruction, insert instructions to 1485 // feed PHI nodes in successor blocks. 1486 if (I->isTerminator()) { 1487 if (!handlePHINodesInSuccessorBlocks(I->getParent())) { 1488 // PHI node handling may have generated local value instructions, 1489 // even though it failed to handle all PHI nodes. 1490 // We remove these instructions because SelectionDAGISel will generate 1491 // them again. 1492 removeDeadLocalValueCode(SavedLastLocalValue); 1493 return false; 1494 } 1495 } 1496 1497 // FastISel does not handle any operand bundles except OB_funclet. 1498 if (auto *Call = dyn_cast<CallBase>(I)) 1499 for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i) 1500 if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet) 1501 return false; 1502 1503 DbgLoc = I->getDebugLoc(); 1504 1505 SavedInsertPt = FuncInfo.InsertPt; 1506 1507 if (const auto *Call = dyn_cast<CallInst>(I)) { 1508 const Function *F = Call->getCalledFunction(); 1509 LibFunc Func; 1510 1511 // As a special case, don't handle calls to builtin library functions that 1512 // may be translated directly to target instructions. 1513 if (F && !F->hasLocalLinkage() && F->hasName() && 1514 LibInfo->getLibFunc(F->getName(), Func) && 1515 LibInfo->hasOptimizedCodeGen(Func)) 1516 return false; 1517 1518 // Don't handle Intrinsic::trap if a trap function is specified. 1519 if (F && F->getIntrinsicID() == Intrinsic::trap && 1520 Call->hasFnAttr("trap-func-name")) 1521 return false; 1522 } 1523 1524 // First, try doing target-independent selection. 1525 if (!SkipTargetIndependentISel) { 1526 if (selectOperator(I, I->getOpcode())) { 1527 ++NumFastIselSuccessIndependent; 1528 DbgLoc = DebugLoc(); 1529 return true; 1530 } 1531 // Remove dead code. 1532 recomputeInsertPt(); 1533 if (SavedInsertPt != FuncInfo.InsertPt) 1534 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); 1535 SavedInsertPt = FuncInfo.InsertPt; 1536 } 1537 // Next, try calling the target to attempt to handle the instruction. 1538 if (fastSelectInstruction(I)) { 1539 ++NumFastIselSuccessTarget; 1540 DbgLoc = DebugLoc(); 1541 return true; 1542 } 1543 // Remove dead code. 1544 recomputeInsertPt(); 1545 if (SavedInsertPt != FuncInfo.InsertPt) 1546 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); 1547 1548 DbgLoc = DebugLoc(); 1549 // Undo phi node updates, because they will be added again by SelectionDAG. 1550 if (I->isTerminator()) { 1551 // PHI node handling may have generated local value instructions. 1552 // We remove them because SelectionDAGISel will generate them again. 1553 removeDeadLocalValueCode(SavedLastLocalValue); 1554 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 1555 } 1556 return false; 1557 } 1558 1559 /// Emit an unconditional branch to the given block, unless it is the immediate 1560 /// (fall-through) successor, and update the CFG. 1561 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, 1562 const DebugLoc &DbgLoc) { 1563 if (FuncInfo.MBB->getBasicBlock()->sizeWithoutDebug() > 1 && 1564 FuncInfo.MBB->isLayoutSuccessor(MSucc)) { 1565 // For more accurate line information if this is the only non-debug 1566 // instruction in the block then emit it, otherwise we have the 1567 // unconditional fall-through case, which needs no instructions. 1568 } else { 1569 // The unconditional branch case. 1570 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr, 1571 SmallVector<MachineOperand, 0>(), DbgLoc); 1572 } 1573 if (FuncInfo.BPI) { 1574 auto BranchProbability = FuncInfo.BPI->getEdgeProbability( 1575 FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock()); 1576 FuncInfo.MBB->addSuccessor(MSucc, BranchProbability); 1577 } else 1578 FuncInfo.MBB->addSuccessorWithoutProb(MSucc); 1579 } 1580 1581 void FastISel::finishCondBranch(const BasicBlock *BranchBB, 1582 MachineBasicBlock *TrueMBB, 1583 MachineBasicBlock *FalseMBB) { 1584 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can 1585 // happen in degenerate IR and MachineIR forbids to have a block twice in the 1586 // successor/predecessor lists. 1587 if (TrueMBB != FalseMBB) { 1588 if (FuncInfo.BPI) { 1589 auto BranchProbability = 1590 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock()); 1591 FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability); 1592 } else 1593 FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB); 1594 } 1595 1596 fastEmitBranch(FalseMBB, DbgLoc); 1597 } 1598 1599 /// Emit an FNeg operation. 1600 bool FastISel::selectFNeg(const User *I, const Value *In) { 1601 Register OpReg = getRegForValue(In); 1602 if (!OpReg) 1603 return false; 1604 1605 // If the target has ISD::FNEG, use it. 1606 EVT VT = TLI.getValueType(DL, I->getType()); 1607 Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG, 1608 OpReg); 1609 if (ResultReg) { 1610 updateValueMap(I, ResultReg); 1611 return true; 1612 } 1613 1614 // Bitcast the value to integer, twiddle the sign bit with xor, 1615 // and then bitcast it back to floating-point. 1616 if (VT.getSizeInBits() > 64) 1617 return false; 1618 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits()); 1619 if (!TLI.isTypeLegal(IntVT)) 1620 return false; 1621 1622 Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(), 1623 ISD::BITCAST, OpReg); 1624 if (!IntReg) 1625 return false; 1626 1627 Register IntResultReg = fastEmit_ri_( 1628 IntVT.getSimpleVT(), ISD::XOR, IntReg, 1629 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT()); 1630 if (!IntResultReg) 1631 return false; 1632 1633 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST, 1634 IntResultReg); 1635 if (!ResultReg) 1636 return false; 1637 1638 updateValueMap(I, ResultReg); 1639 return true; 1640 } 1641 1642 bool FastISel::selectExtractValue(const User *U) { 1643 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U); 1644 if (!EVI) 1645 return false; 1646 1647 // Make sure we only try to handle extracts with a legal result. But also 1648 // allow i1 because it's easy. 1649 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true); 1650 if (!RealVT.isSimple()) 1651 return false; 1652 MVT VT = RealVT.getSimpleVT(); 1653 if (!TLI.isTypeLegal(VT) && VT != MVT::i1) 1654 return false; 1655 1656 const Value *Op0 = EVI->getOperand(0); 1657 Type *AggTy = Op0->getType(); 1658 1659 // Get the base result register. 1660 unsigned ResultReg; 1661 DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(Op0); 1662 if (I != FuncInfo.ValueMap.end()) 1663 ResultReg = I->second; 1664 else if (isa<Instruction>(Op0)) 1665 ResultReg = FuncInfo.InitializeRegForValue(Op0); 1666 else 1667 return false; // fast-isel can't handle aggregate constants at the moment 1668 1669 // Get the actual result register, which is an offset from the base register. 1670 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices()); 1671 1672 SmallVector<EVT, 4> AggValueVTs; 1673 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs); 1674 1675 for (unsigned i = 0; i < VTIndex; i++) 1676 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]); 1677 1678 updateValueMap(EVI, ResultReg); 1679 return true; 1680 } 1681 1682 bool FastISel::selectOperator(const User *I, unsigned Opcode) { 1683 switch (Opcode) { 1684 case Instruction::Add: 1685 return selectBinaryOp(I, ISD::ADD); 1686 case Instruction::FAdd: 1687 return selectBinaryOp(I, ISD::FADD); 1688 case Instruction::Sub: 1689 return selectBinaryOp(I, ISD::SUB); 1690 case Instruction::FSub: 1691 return selectBinaryOp(I, ISD::FSUB); 1692 case Instruction::Mul: 1693 return selectBinaryOp(I, ISD::MUL); 1694 case Instruction::FMul: 1695 return selectBinaryOp(I, ISD::FMUL); 1696 case Instruction::SDiv: 1697 return selectBinaryOp(I, ISD::SDIV); 1698 case Instruction::UDiv: 1699 return selectBinaryOp(I, ISD::UDIV); 1700 case Instruction::FDiv: 1701 return selectBinaryOp(I, ISD::FDIV); 1702 case Instruction::SRem: 1703 return selectBinaryOp(I, ISD::SREM); 1704 case Instruction::URem: 1705 return selectBinaryOp(I, ISD::UREM); 1706 case Instruction::FRem: 1707 return selectBinaryOp(I, ISD::FREM); 1708 case Instruction::Shl: 1709 return selectBinaryOp(I, ISD::SHL); 1710 case Instruction::LShr: 1711 return selectBinaryOp(I, ISD::SRL); 1712 case Instruction::AShr: 1713 return selectBinaryOp(I, ISD::SRA); 1714 case Instruction::And: 1715 return selectBinaryOp(I, ISD::AND); 1716 case Instruction::Or: 1717 return selectBinaryOp(I, ISD::OR); 1718 case Instruction::Xor: 1719 return selectBinaryOp(I, ISD::XOR); 1720 1721 case Instruction::FNeg: 1722 return selectFNeg(I, I->getOperand(0)); 1723 1724 case Instruction::GetElementPtr: 1725 return selectGetElementPtr(I); 1726 1727 case Instruction::Br: { 1728 const BranchInst *BI = cast<BranchInst>(I); 1729 1730 if (BI->isUnconditional()) { 1731 const BasicBlock *LLVMSucc = BI->getSuccessor(0); 1732 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc]; 1733 fastEmitBranch(MSucc, BI->getDebugLoc()); 1734 return true; 1735 } 1736 1737 // Conditional branches are not handed yet. 1738 // Halt "fast" selection and bail. 1739 return false; 1740 } 1741 1742 case Instruction::Unreachable: 1743 if (TM.Options.TrapUnreachable) 1744 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0; 1745 else 1746 return true; 1747 1748 case Instruction::Alloca: 1749 // FunctionLowering has the static-sized case covered. 1750 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I))) 1751 return true; 1752 1753 // Dynamic-sized alloca is not handled yet. 1754 return false; 1755 1756 case Instruction::Call: 1757 // On AIX, normal call lowering uses the DAG-ISEL path currently so that the 1758 // callee of the direct function call instruction will be mapped to the 1759 // symbol for the function's entry point, which is distinct from the 1760 // function descriptor symbol. The latter is the symbol whose XCOFF symbol 1761 // name is the C-linkage name of the source level function. 1762 // But fast isel still has the ability to do selection for intrinsics. 1763 if (TM.getTargetTriple().isOSAIX() && !isa<IntrinsicInst>(I)) 1764 return false; 1765 return selectCall(I); 1766 1767 case Instruction::BitCast: 1768 return selectBitCast(I); 1769 1770 case Instruction::FPToSI: 1771 return selectCast(I, ISD::FP_TO_SINT); 1772 case Instruction::ZExt: 1773 return selectCast(I, ISD::ZERO_EXTEND); 1774 case Instruction::SExt: 1775 return selectCast(I, ISD::SIGN_EXTEND); 1776 case Instruction::Trunc: 1777 return selectCast(I, ISD::TRUNCATE); 1778 case Instruction::SIToFP: 1779 return selectCast(I, ISD::SINT_TO_FP); 1780 1781 case Instruction::IntToPtr: // Deliberate fall-through. 1782 case Instruction::PtrToInt: { 1783 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); 1784 EVT DstVT = TLI.getValueType(DL, I->getType()); 1785 if (DstVT.bitsGT(SrcVT)) 1786 return selectCast(I, ISD::ZERO_EXTEND); 1787 if (DstVT.bitsLT(SrcVT)) 1788 return selectCast(I, ISD::TRUNCATE); 1789 Register Reg = getRegForValue(I->getOperand(0)); 1790 if (!Reg) 1791 return false; 1792 updateValueMap(I, Reg); 1793 return true; 1794 } 1795 1796 case Instruction::ExtractValue: 1797 return selectExtractValue(I); 1798 1799 case Instruction::Freeze: 1800 return selectFreeze(I); 1801 1802 case Instruction::PHI: 1803 llvm_unreachable("FastISel shouldn't visit PHI nodes!"); 1804 1805 default: 1806 // Unhandled instruction. Halt "fast" selection and bail. 1807 return false; 1808 } 1809 } 1810 1811 FastISel::FastISel(FunctionLoweringInfo &FuncInfo, 1812 const TargetLibraryInfo *LibInfo, 1813 bool SkipTargetIndependentISel) 1814 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()), 1815 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()), 1816 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()), 1817 TII(*MF->getSubtarget().getInstrInfo()), 1818 TLI(*MF->getSubtarget().getTargetLowering()), 1819 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo), 1820 SkipTargetIndependentISel(SkipTargetIndependentISel) {} 1821 1822 FastISel::~FastISel() = default; 1823 1824 bool FastISel::fastLowerArguments() { return false; } 1825 1826 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; } 1827 1828 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) { 1829 return false; 1830 } 1831 1832 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; } 1833 1834 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/) { 1835 return 0; 1836 } 1837 1838 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/, 1839 unsigned /*Op1*/) { 1840 return 0; 1841 } 1842 1843 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) { 1844 return 0; 1845 } 1846 1847 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned, 1848 const ConstantFP * /*FPImm*/) { 1849 return 0; 1850 } 1851 1852 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/, 1853 uint64_t /*Imm*/) { 1854 return 0; 1855 } 1856 1857 /// This method is a wrapper of fastEmit_ri. It first tries to emit an 1858 /// instruction with an immediate operand using fastEmit_ri. 1859 /// If that fails, it materializes the immediate into a register and try 1860 /// fastEmit_rr instead. 1861 Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, 1862 uint64_t Imm, MVT ImmType) { 1863 // If this is a multiply by a power of two, emit this as a shift left. 1864 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) { 1865 Opcode = ISD::SHL; 1866 Imm = Log2_64(Imm); 1867 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) { 1868 // div x, 8 -> srl x, 3 1869 Opcode = ISD::SRL; 1870 Imm = Log2_64(Imm); 1871 } 1872 1873 // Horrible hack (to be removed), check to make sure shift amounts are 1874 // in-range. 1875 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) && 1876 Imm >= VT.getSizeInBits()) 1877 return 0; 1878 1879 // First check if immediate type is legal. If not, we can't use the ri form. 1880 Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm); 1881 if (ResultReg) 1882 return ResultReg; 1883 Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm); 1884 if (!MaterialReg) { 1885 // This is a bit ugly/slow, but failing here means falling out of 1886 // fast-isel, which would be very slow. 1887 IntegerType *ITy = 1888 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits()); 1889 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm)); 1890 if (!MaterialReg) 1891 return 0; 1892 } 1893 return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg); 1894 } 1895 1896 Register FastISel::createResultReg(const TargetRegisterClass *RC) { 1897 return MRI.createVirtualRegister(RC); 1898 } 1899 1900 Register FastISel::constrainOperandRegClass(const MCInstrDesc &II, Register Op, 1901 unsigned OpNum) { 1902 if (Op.isVirtual()) { 1903 const TargetRegisterClass *RegClass = 1904 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF); 1905 if (!MRI.constrainRegClass(Op, RegClass)) { 1906 // If it's not legal to COPY between the register classes, something 1907 // has gone very wrong before we got here. 1908 Register NewOp = createResultReg(RegClass); 1909 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1910 TII.get(TargetOpcode::COPY), NewOp).addReg(Op); 1911 return NewOp; 1912 } 1913 } 1914 return Op; 1915 } 1916 1917 Register FastISel::fastEmitInst_(unsigned MachineInstOpcode, 1918 const TargetRegisterClass *RC) { 1919 Register ResultReg = createResultReg(RC); 1920 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1921 1922 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg); 1923 return ResultReg; 1924 } 1925 1926 Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode, 1927 const TargetRegisterClass *RC, unsigned Op0) { 1928 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1929 1930 Register ResultReg = createResultReg(RC); 1931 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1932 1933 if (II.getNumDefs() >= 1) 1934 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1935 .addReg(Op0); 1936 else { 1937 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1938 .addReg(Op0); 1939 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1940 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1941 } 1942 1943 return ResultReg; 1944 } 1945 1946 Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode, 1947 const TargetRegisterClass *RC, unsigned Op0, 1948 unsigned Op1) { 1949 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1950 1951 Register ResultReg = createResultReg(RC); 1952 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1953 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 1954 1955 if (II.getNumDefs() >= 1) 1956 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1957 .addReg(Op0) 1958 .addReg(Op1); 1959 else { 1960 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1961 .addReg(Op0) 1962 .addReg(Op1); 1963 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1964 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1965 } 1966 return ResultReg; 1967 } 1968 1969 Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode, 1970 const TargetRegisterClass *RC, unsigned Op0, 1971 unsigned Op1, unsigned Op2) { 1972 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1973 1974 Register ResultReg = createResultReg(RC); 1975 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 1976 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 1977 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2); 1978 1979 if (II.getNumDefs() >= 1) 1980 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 1981 .addReg(Op0) 1982 .addReg(Op1) 1983 .addReg(Op2); 1984 else { 1985 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1986 .addReg(Op0) 1987 .addReg(Op1) 1988 .addReg(Op2); 1989 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1990 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 1991 } 1992 return ResultReg; 1993 } 1994 1995 Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode, 1996 const TargetRegisterClass *RC, unsigned Op0, 1997 uint64_t Imm) { 1998 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1999 2000 Register ResultReg = createResultReg(RC); 2001 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2002 2003 if (II.getNumDefs() >= 1) 2004 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 2005 .addReg(Op0) 2006 .addImm(Imm); 2007 else { 2008 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 2009 .addReg(Op0) 2010 .addImm(Imm); 2011 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2012 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 2013 } 2014 return ResultReg; 2015 } 2016 2017 Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode, 2018 const TargetRegisterClass *RC, unsigned Op0, 2019 uint64_t Imm1, uint64_t Imm2) { 2020 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2021 2022 Register ResultReg = createResultReg(RC); 2023 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2024 2025 if (II.getNumDefs() >= 1) 2026 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 2027 .addReg(Op0) 2028 .addImm(Imm1) 2029 .addImm(Imm2); 2030 else { 2031 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 2032 .addReg(Op0) 2033 .addImm(Imm1) 2034 .addImm(Imm2); 2035 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2036 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 2037 } 2038 return ResultReg; 2039 } 2040 2041 Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode, 2042 const TargetRegisterClass *RC, 2043 const ConstantFP *FPImm) { 2044 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2045 2046 Register ResultReg = createResultReg(RC); 2047 2048 if (II.getNumDefs() >= 1) 2049 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 2050 .addFPImm(FPImm); 2051 else { 2052 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 2053 .addFPImm(FPImm); 2054 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2055 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 2056 } 2057 return ResultReg; 2058 } 2059 2060 Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode, 2061 const TargetRegisterClass *RC, unsigned Op0, 2062 unsigned Op1, uint64_t Imm) { 2063 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2064 2065 Register ResultReg = createResultReg(RC); 2066 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2067 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 2068 2069 if (II.getNumDefs() >= 1) 2070 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 2071 .addReg(Op0) 2072 .addReg(Op1) 2073 .addImm(Imm); 2074 else { 2075 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 2076 .addReg(Op0) 2077 .addReg(Op1) 2078 .addImm(Imm); 2079 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2080 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 2081 } 2082 return ResultReg; 2083 } 2084 2085 Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode, 2086 const TargetRegisterClass *RC, uint64_t Imm) { 2087 Register ResultReg = createResultReg(RC); 2088 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2089 2090 if (II.getNumDefs() >= 1) 2091 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 2092 .addImm(Imm); 2093 else { 2094 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm); 2095 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2096 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 2097 } 2098 return ResultReg; 2099 } 2100 2101 Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, 2102 uint32_t Idx) { 2103 Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 2104 assert(Register::isVirtualRegister(Op0) && 2105 "Cannot yet extract from physregs"); 2106 const TargetRegisterClass *RC = MRI.getRegClass(Op0); 2107 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx)); 2108 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), 2109 ResultReg).addReg(Op0, 0, Idx); 2110 return ResultReg; 2111 } 2112 2113 /// Emit MachineInstrs to compute the value of Op with all but the least 2114 /// significant bit set to zero. 2115 Register FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0) { 2116 return fastEmit_ri(VT, VT, ISD::AND, Op0, 1); 2117 } 2118 2119 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks. 2120 /// Emit code to ensure constants are copied into registers when needed. 2121 /// Remember the virtual registers that need to be added to the Machine PHI 2122 /// nodes as input. We cannot just directly add them, because expansion 2123 /// might result in multiple MBB's for one BB. As such, the start of the 2124 /// BB might correspond to a different MBB than the end. 2125 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { 2126 const Instruction *TI = LLVMBB->getTerminator(); 2127 2128 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; 2129 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size(); 2130 2131 // Check successor nodes' PHI nodes that expect a constant to be available 2132 // from this block. 2133 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 2134 const BasicBlock *SuccBB = TI->getSuccessor(succ); 2135 if (!isa<PHINode>(SuccBB->begin())) 2136 continue; 2137 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; 2138 2139 // If this terminator has multiple identical successors (common for 2140 // switches), only handle each succ once. 2141 if (!SuccsHandled.insert(SuccMBB).second) 2142 continue; 2143 2144 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 2145 2146 // At this point we know that there is a 1-1 correspondence between LLVM PHI 2147 // nodes and Machine PHI nodes, but the incoming operands have not been 2148 // emitted yet. 2149 for (const PHINode &PN : SuccBB->phis()) { 2150 // Ignore dead phi's. 2151 if (PN.use_empty()) 2152 continue; 2153 2154 // Only handle legal types. Two interesting things to note here. First, 2155 // by bailing out early, we may leave behind some dead instructions, 2156 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its 2157 // own moves. Second, this check is necessary because FastISel doesn't 2158 // use CreateRegs to create registers, so it always creates 2159 // exactly one register for each non-void instruction. 2160 EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true); 2161 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) { 2162 // Handle integer promotions, though, because they're common and easy. 2163 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) { 2164 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 2165 return false; 2166 } 2167 } 2168 2169 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB); 2170 2171 // Set the DebugLoc for the copy. Use the location of the operand if 2172 // there is one; otherwise no location, flushLocalValueMap will fix it. 2173 DbgLoc = DebugLoc(); 2174 if (const auto *Inst = dyn_cast<Instruction>(PHIOp)) 2175 DbgLoc = Inst->getDebugLoc(); 2176 2177 Register Reg = getRegForValue(PHIOp); 2178 if (!Reg) { 2179 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 2180 return false; 2181 } 2182 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg)); 2183 DbgLoc = DebugLoc(); 2184 } 2185 } 2186 2187 return true; 2188 } 2189 2190 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) { 2191 assert(LI->hasOneUse() && 2192 "tryToFoldLoad expected a LoadInst with a single use"); 2193 // We know that the load has a single use, but don't know what it is. If it 2194 // isn't one of the folded instructions, then we can't succeed here. Handle 2195 // this by scanning the single-use users of the load until we get to FoldInst. 2196 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs. 2197 2198 const Instruction *TheUser = LI->user_back(); 2199 while (TheUser != FoldInst && // Scan up until we find FoldInst. 2200 // Stay in the right block. 2201 TheUser->getParent() == FoldInst->getParent() && 2202 --MaxUsers) { // Don't scan too far. 2203 // If there are multiple or no uses of this instruction, then bail out. 2204 if (!TheUser->hasOneUse()) 2205 return false; 2206 2207 TheUser = TheUser->user_back(); 2208 } 2209 2210 // If we didn't find the fold instruction, then we failed to collapse the 2211 // sequence. 2212 if (TheUser != FoldInst) 2213 return false; 2214 2215 // Don't try to fold volatile loads. Target has to deal with alignment 2216 // constraints. 2217 if (LI->isVolatile()) 2218 return false; 2219 2220 // Figure out which vreg this is going into. If there is no assigned vreg yet 2221 // then there actually was no reference to it. Perhaps the load is referenced 2222 // by a dead instruction. 2223 Register LoadReg = getRegForValue(LI); 2224 if (!LoadReg) 2225 return false; 2226 2227 // We can't fold if this vreg has no uses or more than one use. Multiple uses 2228 // may mean that the instruction got lowered to multiple MIs, or the use of 2229 // the loaded value ended up being multiple operands of the result. 2230 if (!MRI.hasOneUse(LoadReg)) 2231 return false; 2232 2233 // If the register has fixups, there may be additional uses through a 2234 // different alias of the register. 2235 if (FuncInfo.RegsWithFixups.contains(LoadReg)) 2236 return false; 2237 2238 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg); 2239 MachineInstr *User = RI->getParent(); 2240 2241 // Set the insertion point properly. Folding the load can cause generation of 2242 // other random instructions (like sign extends) for addressing modes; make 2243 // sure they get inserted in a logical place before the new instruction. 2244 FuncInfo.InsertPt = User; 2245 FuncInfo.MBB = User->getParent(); 2246 2247 // Ask the target to try folding the load. 2248 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI); 2249 } 2250 2251 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) { 2252 // Must be an add. 2253 if (!isa<AddOperator>(Add)) 2254 return false; 2255 // Type size needs to match. 2256 if (DL.getTypeSizeInBits(GEP->getType()) != 2257 DL.getTypeSizeInBits(Add->getType())) 2258 return false; 2259 // Must be in the same basic block. 2260 if (isa<Instruction>(Add) && 2261 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB) 2262 return false; 2263 // Must have a constant operand. 2264 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1)); 2265 } 2266 2267 MachineMemOperand * 2268 FastISel::createMachineMemOperandFor(const Instruction *I) const { 2269 const Value *Ptr; 2270 Type *ValTy; 2271 MaybeAlign Alignment; 2272 MachineMemOperand::Flags Flags; 2273 bool IsVolatile; 2274 2275 if (const auto *LI = dyn_cast<LoadInst>(I)) { 2276 Alignment = LI->getAlign(); 2277 IsVolatile = LI->isVolatile(); 2278 Flags = MachineMemOperand::MOLoad; 2279 Ptr = LI->getPointerOperand(); 2280 ValTy = LI->getType(); 2281 } else if (const auto *SI = dyn_cast<StoreInst>(I)) { 2282 Alignment = SI->getAlign(); 2283 IsVolatile = SI->isVolatile(); 2284 Flags = MachineMemOperand::MOStore; 2285 Ptr = SI->getPointerOperand(); 2286 ValTy = SI->getValueOperand()->getType(); 2287 } else 2288 return nullptr; 2289 2290 bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal); 2291 bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load); 2292 bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable); 2293 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range); 2294 2295 AAMDNodes AAInfo = I->getAAMetadata(); 2296 2297 if (!Alignment) // Ensure that codegen never sees alignment 0. 2298 Alignment = DL.getABITypeAlign(ValTy); 2299 2300 unsigned Size = DL.getTypeStoreSize(ValTy); 2301 2302 if (IsVolatile) 2303 Flags |= MachineMemOperand::MOVolatile; 2304 if (IsNonTemporal) 2305 Flags |= MachineMemOperand::MONonTemporal; 2306 if (IsDereferenceable) 2307 Flags |= MachineMemOperand::MODereferenceable; 2308 if (IsInvariant) 2309 Flags |= MachineMemOperand::MOInvariant; 2310 2311 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size, 2312 *Alignment, AAInfo, Ranges); 2313 } 2314 2315 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const { 2316 // If both operands are the same, then try to optimize or fold the cmp. 2317 CmpInst::Predicate Predicate = CI->getPredicate(); 2318 if (CI->getOperand(0) != CI->getOperand(1)) 2319 return Predicate; 2320 2321 switch (Predicate) { 2322 default: llvm_unreachable("Invalid predicate!"); 2323 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break; 2324 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break; 2325 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break; 2326 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break; 2327 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break; 2328 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break; 2329 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break; 2330 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break; 2331 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break; 2332 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break; 2333 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break; 2334 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; 2335 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break; 2336 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; 2337 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break; 2338 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break; 2339 2340 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break; 2341 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break; 2342 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break; 2343 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; 2344 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break; 2345 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; 2346 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break; 2347 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break; 2348 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break; 2349 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break; 2350 } 2351 2352 return Predicate; 2353 } 2354