1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the FastISel class. 10 // 11 // "Fast" instruction selection is designed to emit very poor code quickly. 12 // Also, it is not designed to be able to do much lowering, so most illegal 13 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is 14 // also not intended to be able to do much optimization, except in a few cases 15 // where doing optimizations reduces overall compile time. For example, folding 16 // constants into immediate fields is often done, because it's cheap and it 17 // reduces the number of instructions later phases have to examine. 18 // 19 // "Fast" instruction selection is able to fail gracefully and transfer 20 // control to the SelectionDAG selector for operations that it doesn't 21 // support. In many cases, this allows us to avoid duplicating a lot of 22 // the complicated lowering logic that SelectionDAG currently has. 23 // 24 // The intended use for "fast" instruction selection is "-O0" mode 25 // compilation, where the quality of the generated code is irrelevant when 26 // weighed against the speed at which the code can be generated. Also, 27 // at -O0, the LLVM optimizers are not running, and this makes the 28 // compile time of codegen a much higher portion of the overall compile 29 // time. Despite its limitations, "fast" instruction selection is able to 30 // handle enough code on its own to provide noticeable overall speedups 31 // in -O0 compiles. 32 // 33 // Basic operations are supported in a target-independent way, by reading 34 // the same instruction descriptions that the SelectionDAG selector reads, 35 // and identifying simple arithmetic operations that can be directly selected 36 // from simple operators. More complicated operations currently require 37 // target-specific code. 38 // 39 //===----------------------------------------------------------------------===// 40 41 #include "llvm/CodeGen/FastISel.h" 42 #include "llvm/ADT/APFloat.h" 43 #include "llvm/ADT/APSInt.h" 44 #include "llvm/ADT/DenseMap.h" 45 #include "llvm/ADT/SmallPtrSet.h" 46 #include "llvm/ADT/SmallString.h" 47 #include "llvm/ADT/SmallVector.h" 48 #include "llvm/ADT/Statistic.h" 49 #include "llvm/Analysis/BranchProbabilityInfo.h" 50 #include "llvm/Analysis/TargetLibraryInfo.h" 51 #include "llvm/CodeGen/Analysis.h" 52 #include "llvm/CodeGen/FunctionLoweringInfo.h" 53 #include "llvm/CodeGen/ISDOpcodes.h" 54 #include "llvm/CodeGen/MachineBasicBlock.h" 55 #include "llvm/CodeGen/MachineFrameInfo.h" 56 #include "llvm/CodeGen/MachineInstr.h" 57 #include "llvm/CodeGen/MachineInstrBuilder.h" 58 #include "llvm/CodeGen/MachineMemOperand.h" 59 #include "llvm/CodeGen/MachineModuleInfo.h" 60 #include "llvm/CodeGen/MachineOperand.h" 61 #include "llvm/CodeGen/MachineRegisterInfo.h" 62 #include "llvm/CodeGen/StackMaps.h" 63 #include "llvm/CodeGen/TargetInstrInfo.h" 64 #include "llvm/CodeGen/TargetLowering.h" 65 #include "llvm/CodeGen/TargetSubtargetInfo.h" 66 #include "llvm/CodeGen/ValueTypes.h" 67 #include "llvm/CodeGenTypes/MachineValueType.h" 68 #include "llvm/IR/Argument.h" 69 #include "llvm/IR/Attributes.h" 70 #include "llvm/IR/BasicBlock.h" 71 #include "llvm/IR/CallingConv.h" 72 #include "llvm/IR/Constant.h" 73 #include "llvm/IR/Constants.h" 74 #include "llvm/IR/DataLayout.h" 75 #include "llvm/IR/DebugLoc.h" 76 #include "llvm/IR/DerivedTypes.h" 77 #include "llvm/IR/DiagnosticInfo.h" 78 #include "llvm/IR/Function.h" 79 #include "llvm/IR/GetElementPtrTypeIterator.h" 80 #include "llvm/IR/GlobalValue.h" 81 #include "llvm/IR/InlineAsm.h" 82 #include "llvm/IR/InstrTypes.h" 83 #include "llvm/IR/Instruction.h" 84 #include "llvm/IR/Instructions.h" 85 #include "llvm/IR/IntrinsicInst.h" 86 #include "llvm/IR/LLVMContext.h" 87 #include "llvm/IR/Mangler.h" 88 #include "llvm/IR/Metadata.h" 89 #include "llvm/IR/Module.h" 90 #include "llvm/IR/Operator.h" 91 #include "llvm/IR/PatternMatch.h" 92 #include "llvm/IR/Type.h" 93 #include "llvm/IR/User.h" 94 #include "llvm/IR/Value.h" 95 #include "llvm/MC/MCContext.h" 96 #include "llvm/MC/MCInstrDesc.h" 97 #include "llvm/Support/Casting.h" 98 #include "llvm/Support/Debug.h" 99 #include "llvm/Support/ErrorHandling.h" 100 #include "llvm/Support/MathExtras.h" 101 #include "llvm/Support/raw_ostream.h" 102 #include "llvm/Target/TargetMachine.h" 103 #include "llvm/Target/TargetOptions.h" 104 #include <cassert> 105 #include <cstdint> 106 #include <iterator> 107 #include <optional> 108 #include <utility> 109 110 using namespace llvm; 111 using namespace PatternMatch; 112 113 #define DEBUG_TYPE "isel" 114 115 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by " 116 "target-independent selector"); 117 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by " 118 "target-specific selector"); 119 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure"); 120 121 /// Set the current block to which generated machine instructions will be 122 /// appended. 123 void FastISel::startNewBlock() { 124 assert(LocalValueMap.empty() && 125 "local values should be cleared after finishing a BB"); 126 127 // Instructions are appended to FuncInfo.MBB. If the basic block already 128 // contains labels or copies, use the last instruction as the last local 129 // value. 130 EmitStartPt = nullptr; 131 if (!FuncInfo.MBB->empty()) 132 EmitStartPt = &FuncInfo.MBB->back(); 133 LastLocalValue = EmitStartPt; 134 } 135 136 void FastISel::finishBasicBlock() { flushLocalValueMap(); } 137 138 bool FastISel::lowerArguments() { 139 if (!FuncInfo.CanLowerReturn) 140 // Fallback to SDISel argument lowering code to deal with sret pointer 141 // parameter. 142 return false; 143 144 if (!fastLowerArguments()) 145 return false; 146 147 // Enter arguments into ValueMap for uses in non-entry BBs. 148 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(), 149 E = FuncInfo.Fn->arg_end(); 150 I != E; ++I) { 151 DenseMap<const Value *, Register>::iterator VI = LocalValueMap.find(&*I); 152 assert(VI != LocalValueMap.end() && "Missed an argument?"); 153 FuncInfo.ValueMap[&*I] = VI->second; 154 } 155 return true; 156 } 157 158 /// Return the defined register if this instruction defines exactly one 159 /// virtual register and uses no other virtual registers. Otherwise return 160 /// Register(); 161 static Register findLocalRegDef(MachineInstr &MI) { 162 Register RegDef; 163 for (const MachineOperand &MO : MI.operands()) { 164 if (!MO.isReg()) 165 continue; 166 if (MO.isDef()) { 167 if (RegDef) 168 return Register(); 169 RegDef = MO.getReg(); 170 } else if (MO.getReg().isVirtual()) { 171 // This is another use of a vreg. Don't delete it. 172 return Register(); 173 } 174 } 175 return RegDef; 176 } 177 178 static bool isRegUsedByPhiNodes(Register DefReg, 179 FunctionLoweringInfo &FuncInfo) { 180 for (auto &P : FuncInfo.PHINodesToUpdate) 181 if (P.second == DefReg) 182 return true; 183 return false; 184 } 185 186 void FastISel::flushLocalValueMap() { 187 // If FastISel bails out, it could leave local value instructions behind 188 // that aren't used for anything. Detect and erase those. 189 if (LastLocalValue != EmitStartPt) { 190 // Save the first instruction after local values, for later. 191 MachineBasicBlock::iterator FirstNonValue(LastLocalValue); 192 ++FirstNonValue; 193 194 MachineBasicBlock::reverse_iterator RE = 195 EmitStartPt ? MachineBasicBlock::reverse_iterator(EmitStartPt) 196 : FuncInfo.MBB->rend(); 197 MachineBasicBlock::reverse_iterator RI(LastLocalValue); 198 for (MachineInstr &LocalMI : 199 llvm::make_early_inc_range(llvm::make_range(RI, RE))) { 200 Register DefReg = findLocalRegDef(LocalMI); 201 if (!DefReg) 202 continue; 203 if (FuncInfo.RegsWithFixups.count(DefReg)) 204 continue; 205 bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo); 206 if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) { 207 if (EmitStartPt == &LocalMI) 208 EmitStartPt = EmitStartPt->getPrevNode(); 209 LLVM_DEBUG(dbgs() << "removing dead local value materialization" 210 << LocalMI); 211 LocalMI.eraseFromParent(); 212 } 213 } 214 215 if (FirstNonValue != FuncInfo.MBB->end()) { 216 // See if there are any local value instructions left. If so, we want to 217 // make sure the first one has a debug location; if it doesn't, use the 218 // first non-value instruction's debug location. 219 220 // If EmitStartPt is non-null, this block had copies at the top before 221 // FastISel started doing anything; it points to the last one, so the 222 // first local value instruction is the one after EmitStartPt. 223 // If EmitStartPt is null, the first local value instruction is at the 224 // top of the block. 225 MachineBasicBlock::iterator FirstLocalValue = 226 EmitStartPt ? ++MachineBasicBlock::iterator(EmitStartPt) 227 : FuncInfo.MBB->begin(); 228 if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc()) 229 FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc()); 230 } 231 } 232 233 LocalValueMap.clear(); 234 LastLocalValue = EmitStartPt; 235 recomputeInsertPt(); 236 SavedInsertPt = FuncInfo.InsertPt; 237 } 238 239 Register FastISel::getRegForValue(const Value *V) { 240 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true); 241 // Don't handle non-simple values in FastISel. 242 if (!RealVT.isSimple()) 243 return Register(); 244 245 // Ignore illegal types. We must do this before looking up the value 246 // in ValueMap because Arguments are given virtual registers regardless 247 // of whether FastISel can handle them. 248 MVT VT = RealVT.getSimpleVT(); 249 if (!TLI.isTypeLegal(VT)) { 250 // Handle integer promotions, though, because they're common and easy. 251 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 252 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT(); 253 else 254 return Register(); 255 } 256 257 // Look up the value to see if we already have a register for it. 258 Register Reg = lookUpRegForValue(V); 259 if (Reg) 260 return Reg; 261 262 // In bottom-up mode, just create the virtual register which will be used 263 // to hold the value. It will be materialized later. 264 if (isa<Instruction>(V) && 265 (!isa<AllocaInst>(V) || 266 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) 267 return FuncInfo.InitializeRegForValue(V); 268 269 SavePoint SaveInsertPt = enterLocalValueArea(); 270 271 // Materialize the value in a register. Emit any instructions in the 272 // local value area. 273 Reg = materializeRegForValue(V, VT); 274 275 leaveLocalValueArea(SaveInsertPt); 276 277 return Reg; 278 } 279 280 Register FastISel::materializeConstant(const Value *V, MVT VT) { 281 Register Reg; 282 if (const auto *CI = dyn_cast<ConstantInt>(V)) { 283 if (CI->getValue().getActiveBits() <= 64) 284 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); 285 } else if (isa<AllocaInst>(V)) 286 Reg = fastMaterializeAlloca(cast<AllocaInst>(V)); 287 else if (isa<ConstantPointerNull>(V)) 288 // Translate this as an integer zero so that it can be 289 // local-CSE'd with actual integer zeros. 290 Reg = 291 getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getType()))); 292 else if (const auto *CF = dyn_cast<ConstantFP>(V)) { 293 if (CF->isNullValue()) 294 Reg = fastMaterializeFloatZero(CF); 295 else 296 // Try to emit the constant directly. 297 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF); 298 299 if (!Reg) { 300 // Try to emit the constant by using an integer constant with a cast. 301 const APFloat &Flt = CF->getValueAPF(); 302 EVT IntVT = TLI.getPointerTy(DL); 303 uint32_t IntBitWidth = IntVT.getSizeInBits(); 304 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false); 305 bool isExact; 306 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact); 307 if (isExact) { 308 Register IntegerReg = 309 getRegForValue(ConstantInt::get(V->getContext(), SIntVal)); 310 if (IntegerReg) 311 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, 312 IntegerReg); 313 } 314 } 315 } else if (const auto *Op = dyn_cast<Operator>(V)) { 316 if (!selectOperator(Op, Op->getOpcode())) 317 if (!isa<Instruction>(Op) || 318 !fastSelectInstruction(cast<Instruction>(Op))) 319 return Register(); 320 Reg = lookUpRegForValue(Op); 321 } else if (isa<UndefValue>(V)) { 322 Reg = createResultReg(TLI.getRegClassFor(VT)); 323 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, 324 TII.get(TargetOpcode::IMPLICIT_DEF), Reg); 325 } 326 return Reg; 327 } 328 329 /// Helper for getRegForValue. This function is called when the value isn't 330 /// already available in a register and must be materialized with new 331 /// instructions. 332 Register FastISel::materializeRegForValue(const Value *V, MVT VT) { 333 Register Reg; 334 // Give the target-specific code a try first. 335 if (isa<Constant>(V)) 336 Reg = fastMaterializeConstant(cast<Constant>(V)); 337 338 // If target-specific code couldn't or didn't want to handle the value, then 339 // give target-independent code a try. 340 if (!Reg) 341 Reg = materializeConstant(V, VT); 342 343 // Don't cache constant materializations in the general ValueMap. 344 // To do so would require tracking what uses they dominate. 345 if (Reg) { 346 LocalValueMap[V] = Reg; 347 LastLocalValue = MRI.getVRegDef(Reg); 348 } 349 return Reg; 350 } 351 352 Register FastISel::lookUpRegForValue(const Value *V) { 353 // Look up the value to see if we already have a register for it. We 354 // cache values defined by Instructions across blocks, and other values 355 // only locally. This is because Instructions already have the SSA 356 // def-dominates-use requirement enforced. 357 DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(V); 358 if (I != FuncInfo.ValueMap.end()) 359 return I->second; 360 return LocalValueMap[V]; 361 } 362 363 void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) { 364 if (!isa<Instruction>(I)) { 365 LocalValueMap[I] = Reg; 366 return; 367 } 368 369 Register &AssignedReg = FuncInfo.ValueMap[I]; 370 if (!AssignedReg) 371 // Use the new register. 372 AssignedReg = Reg; 373 else if (Reg != AssignedReg) { 374 // Arrange for uses of AssignedReg to be replaced by uses of Reg. 375 for (unsigned i = 0; i < NumRegs; i++) { 376 FuncInfo.RegFixups[AssignedReg + i] = Reg + i; 377 FuncInfo.RegsWithFixups.insert(Reg + i); 378 } 379 380 AssignedReg = Reg; 381 } 382 } 383 384 Register FastISel::getRegForGEPIndex(MVT PtrVT, const Value *Idx) { 385 Register IdxN = getRegForValue(Idx); 386 if (!IdxN) 387 // Unhandled operand. Halt "fast" selection and bail. 388 return Register(); 389 390 // If the index is smaller or larger than intptr_t, truncate or extend it. 391 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false); 392 if (IdxVT.bitsLT(PtrVT)) { 393 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN); 394 } else if (IdxVT.bitsGT(PtrVT)) { 395 IdxN = 396 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN); 397 } 398 return IdxN; 399 } 400 401 void FastISel::recomputeInsertPt() { 402 if (getLastLocalValue()) { 403 FuncInfo.InsertPt = getLastLocalValue(); 404 FuncInfo.MBB = FuncInfo.InsertPt->getParent(); 405 ++FuncInfo.InsertPt; 406 } else 407 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI(); 408 } 409 410 void FastISel::removeDeadCode(MachineBasicBlock::iterator I, 411 MachineBasicBlock::iterator E) { 412 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 && 413 "Invalid iterator!"); 414 while (I != E) { 415 if (SavedInsertPt == I) 416 SavedInsertPt = E; 417 if (EmitStartPt == I) 418 EmitStartPt = E.isValid() ? &*E : nullptr; 419 if (LastLocalValue == I) 420 LastLocalValue = E.isValid() ? &*E : nullptr; 421 422 MachineInstr *Dead = &*I; 423 ++I; 424 Dead->eraseFromParent(); 425 ++NumFastIselDead; 426 } 427 recomputeInsertPt(); 428 } 429 430 FastISel::SavePoint FastISel::enterLocalValueArea() { 431 SavePoint OldInsertPt = FuncInfo.InsertPt; 432 recomputeInsertPt(); 433 return OldInsertPt; 434 } 435 436 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) { 437 if (FuncInfo.InsertPt != FuncInfo.MBB->begin()) 438 LastLocalValue = &*std::prev(FuncInfo.InsertPt); 439 440 // Restore the previous insert position. 441 FuncInfo.InsertPt = OldInsertPt; 442 } 443 444 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) { 445 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true); 446 if (VT == MVT::Other || !VT.isSimple()) 447 // Unhandled type. Halt "fast" selection and bail. 448 return false; 449 450 // We only handle legal types. For example, on x86-32 the instruction 451 // selector contains all of the 64-bit instructions from x86-64, 452 // under the assumption that i64 won't be used if the target doesn't 453 // support it. 454 if (!TLI.isTypeLegal(VT)) { 455 // MVT::i1 is special. Allow AND, OR, or XOR because they 456 // don't require additional zeroing, which makes them easy. 457 if (VT == MVT::i1 && ISD::isBitwiseLogicOp(ISDOpcode)) 458 VT = TLI.getTypeToTransformTo(I->getContext(), VT); 459 else 460 return false; 461 } 462 463 // Check if the first operand is a constant, and handle it as "ri". At -O0, 464 // we don't have anything that canonicalizes operand order. 465 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0))) 466 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) { 467 Register Op1 = getRegForValue(I->getOperand(1)); 468 if (!Op1) 469 return false; 470 471 Register ResultReg = 472 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(), 473 VT.getSimpleVT()); 474 if (!ResultReg) 475 return false; 476 477 // We successfully emitted code for the given LLVM Instruction. 478 updateValueMap(I, ResultReg); 479 return true; 480 } 481 482 Register Op0 = getRegForValue(I->getOperand(0)); 483 if (!Op0) // Unhandled operand. Halt "fast" selection and bail. 484 return false; 485 486 // Check if the second operand is a constant and handle it appropriately. 487 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 488 uint64_t Imm = CI->getSExtValue(); 489 490 // Transform "sdiv exact X, 8" -> "sra X, 3". 491 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) && 492 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) { 493 Imm = Log2_64(Imm); 494 ISDOpcode = ISD::SRA; 495 } 496 497 // Transform "urem x, pow2" -> "and x, pow2-1". 498 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) && 499 isPowerOf2_64(Imm)) { 500 --Imm; 501 ISDOpcode = ISD::AND; 502 } 503 504 Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm, 505 VT.getSimpleVT()); 506 if (!ResultReg) 507 return false; 508 509 // We successfully emitted code for the given LLVM Instruction. 510 updateValueMap(I, ResultReg); 511 return true; 512 } 513 514 Register Op1 = getRegForValue(I->getOperand(1)); 515 if (!Op1) // Unhandled operand. Halt "fast" selection and bail. 516 return false; 517 518 // Now we have both operands in registers. Emit the instruction. 519 Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(), 520 ISDOpcode, Op0, Op1); 521 if (!ResultReg) 522 // Target-specific code wasn't able to find a machine opcode for 523 // the given ISD opcode and type. Halt "fast" selection and bail. 524 return false; 525 526 // We successfully emitted code for the given LLVM Instruction. 527 updateValueMap(I, ResultReg); 528 return true; 529 } 530 531 bool FastISel::selectGetElementPtr(const User *I) { 532 Register N = getRegForValue(I->getOperand(0)); 533 if (!N) // Unhandled operand. Halt "fast" selection and bail. 534 return false; 535 536 // FIXME: The code below does not handle vector GEPs. Halt "fast" selection 537 // and bail. 538 if (isa<VectorType>(I->getType())) 539 return false; 540 541 // Keep a running tab of the total offset to coalesce multiple N = N + Offset 542 // into a single N = N + TotalOffset. 543 uint64_t TotalOffs = 0; 544 // FIXME: What's a good SWAG number for MaxOffs? 545 uint64_t MaxOffs = 2048; 546 MVT VT = TLI.getValueType(DL, I->getType()).getSimpleVT(); 547 548 for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I); 549 GTI != E; ++GTI) { 550 const Value *Idx = GTI.getOperand(); 551 if (StructType *StTy = GTI.getStructTypeOrNull()) { 552 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue(); 553 if (Field) { 554 // N = N + Offset 555 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field); 556 if (TotalOffs >= MaxOffs) { 557 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); 558 if (!N) // Unhandled operand. Halt "fast" selection and bail. 559 return false; 560 TotalOffs = 0; 561 } 562 } 563 } else { 564 // If this is a constant subscript, handle it quickly. 565 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 566 if (CI->isZero()) 567 continue; 568 // N = N + Offset 569 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue(); 570 TotalOffs += GTI.getSequentialElementStride(DL) * IdxN; 571 if (TotalOffs >= MaxOffs) { 572 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); 573 if (!N) // Unhandled operand. Halt "fast" selection and bail. 574 return false; 575 TotalOffs = 0; 576 } 577 continue; 578 } 579 if (TotalOffs) { 580 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); 581 if (!N) // Unhandled operand. Halt "fast" selection and bail. 582 return false; 583 TotalOffs = 0; 584 } 585 586 // N = N + Idx * ElementSize; 587 uint64_t ElementSize = GTI.getSequentialElementStride(DL); 588 Register IdxN = getRegForGEPIndex(VT, Idx); 589 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. 590 return false; 591 592 if (ElementSize != 1) { 593 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT); 594 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. 595 return false; 596 } 597 N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN); 598 if (!N) // Unhandled operand. Halt "fast" selection and bail. 599 return false; 600 } 601 } 602 if (TotalOffs) { 603 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT); 604 if (!N) // Unhandled operand. Halt "fast" selection and bail. 605 return false; 606 } 607 608 // We successfully emitted code for the given LLVM Instruction. 609 updateValueMap(I, N); 610 return true; 611 } 612 613 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops, 614 const CallInst *CI, unsigned StartIdx) { 615 for (unsigned i = StartIdx, e = CI->arg_size(); i != e; ++i) { 616 Value *Val = CI->getArgOperand(i); 617 // Check for constants and encode them with a StackMaps::ConstantOp prefix. 618 if (const auto *C = dyn_cast<ConstantInt>(Val)) { 619 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); 620 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue())); 621 } else if (isa<ConstantPointerNull>(Val)) { 622 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); 623 Ops.push_back(MachineOperand::CreateImm(0)); 624 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) { 625 // Values coming from a stack location also require a special encoding, 626 // but that is added later on by the target specific frame index 627 // elimination implementation. 628 auto SI = FuncInfo.StaticAllocaMap.find(AI); 629 if (SI != FuncInfo.StaticAllocaMap.end()) 630 Ops.push_back(MachineOperand::CreateFI(SI->second)); 631 else 632 return false; 633 } else { 634 Register Reg = getRegForValue(Val); 635 if (!Reg) 636 return false; 637 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false)); 638 } 639 } 640 return true; 641 } 642 643 bool FastISel::selectStackmap(const CallInst *I) { 644 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>, 645 // [live variables...]) 646 assert(I->getCalledFunction()->getReturnType()->isVoidTy() && 647 "Stackmap cannot return a value."); 648 649 // The stackmap intrinsic only records the live variables (the arguments 650 // passed to it) and emits NOPS (if requested). Unlike the patchpoint 651 // intrinsic, this won't be lowered to a function call. This means we don't 652 // have to worry about calling conventions and target-specific lowering code. 653 // Instead we perform the call lowering right here. 654 // 655 // CALLSEQ_START(0, 0...) 656 // STACKMAP(id, nbytes, ...) 657 // CALLSEQ_END(0, 0) 658 // 659 SmallVector<MachineOperand, 32> Ops; 660 661 // Add the <id> and <numBytes> constants. 662 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) && 663 "Expected a constant integer."); 664 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)); 665 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); 666 667 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) && 668 "Expected a constant integer."); 669 const auto *NumBytes = 670 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)); 671 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); 672 673 // Push live variables for the stack map (skipping the first two arguments 674 // <id> and <numBytes>). 675 if (!addStackMapLiveVars(Ops, I, 2)) 676 return false; 677 678 // We are not adding any register mask info here, because the stackmap doesn't 679 // clobber anything. 680 681 // Add scratch registers as implicit def and early clobber. 682 CallingConv::ID CC = I->getCallingConv(); 683 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); 684 for (unsigned i = 0; ScratchRegs[i]; ++i) 685 Ops.push_back(MachineOperand::CreateReg( 686 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false, 687 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true)); 688 689 // Issue CALLSEQ_START 690 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 691 auto Builder = 692 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown)); 693 const MCInstrDesc &MCID = Builder.getInstr()->getDesc(); 694 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I) 695 Builder.addImm(0); 696 697 // Issue STACKMAP. 698 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, 699 TII.get(TargetOpcode::STACKMAP)); 700 for (auto const &MO : Ops) 701 MIB.add(MO); 702 703 // Issue CALLSEQ_END 704 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 705 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp)) 706 .addImm(0) 707 .addImm(0); 708 709 // Inform the Frame Information that we have a stackmap in this function. 710 FuncInfo.MF->getFrameInfo().setHasStackMap(); 711 712 return true; 713 } 714 715 /// Lower an argument list according to the target calling convention. 716 /// 717 /// This is a helper for lowering intrinsics that follow a target calling 718 /// convention or require stack pointer adjustment. Only a subset of the 719 /// intrinsic's operands need to participate in the calling convention. 720 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx, 721 unsigned NumArgs, const Value *Callee, 722 bool ForceRetVoidTy, CallLoweringInfo &CLI) { 723 ArgListTy Args; 724 Args.reserve(NumArgs); 725 726 // Populate the argument list. 727 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) { 728 Value *V = CI->getOperand(ArgI); 729 730 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 731 732 ArgListEntry Entry; 733 Entry.Val = V; 734 Entry.Ty = V->getType(); 735 Entry.setAttributes(CI, ArgI); 736 Args.push_back(Entry); 737 } 738 739 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext()) 740 : CI->getType(); 741 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs); 742 743 return lowerCallTo(CLI); 744 } 745 746 FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee( 747 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy, 748 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) { 749 SmallString<32> MangledName; 750 Mangler::getNameWithPrefix(MangledName, Target, DL); 751 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName); 752 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs); 753 } 754 755 bool FastISel::selectPatchpoint(const CallInst *I) { 756 // <ty> @llvm.experimental.patchpoint.<ty>(i64 <id>, 757 // i32 <numBytes>, 758 // i8* <target>, 759 // i32 <numArgs>, 760 // [Args...], 761 // [live variables...]) 762 CallingConv::ID CC = I->getCallingConv(); 763 bool IsAnyRegCC = CC == CallingConv::AnyReg; 764 bool HasDef = !I->getType()->isVoidTy(); 765 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts(); 766 767 // Check if we can lower the return type when using anyregcc. 768 MVT ValueType; 769 if (IsAnyRegCC && HasDef) { 770 ValueType = TLI.getSimpleValueType(DL, I->getType(), /*AllowUnknown=*/true); 771 if (ValueType == MVT::Other) 772 return false; 773 } 774 775 // Get the real number of arguments participating in the call <numArgs> 776 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) && 777 "Expected a constant integer."); 778 const auto *NumArgsVal = 779 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)); 780 unsigned NumArgs = NumArgsVal->getZExtValue(); 781 782 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs> 783 // This includes all meta-operands up to but not including CC. 784 unsigned NumMetaOpers = PatchPointOpers::CCPos; 785 assert(I->arg_size() >= NumMetaOpers + NumArgs && 786 "Not enough arguments provided to the patchpoint intrinsic"); 787 788 // For AnyRegCC the arguments are lowered later on manually. 789 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs; 790 CallLoweringInfo CLI; 791 CLI.setIsPatchPoint(); 792 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI)) 793 return false; 794 795 assert(CLI.Call && "No call instruction specified."); 796 797 SmallVector<MachineOperand, 32> Ops; 798 799 // Add an explicit result reg if we use the anyreg calling convention. 800 if (IsAnyRegCC && HasDef) { 801 assert(CLI.NumResultRegs == 0 && "Unexpected result register."); 802 assert(ValueType.isValid()); 803 CLI.ResultReg = createResultReg(TLI.getRegClassFor(ValueType)); 804 CLI.NumResultRegs = 1; 805 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true)); 806 } 807 808 // Add the <id> and <numBytes> constants. 809 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) && 810 "Expected a constant integer."); 811 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)); 812 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); 813 814 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) && 815 "Expected a constant integer."); 816 const auto *NumBytes = 817 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)); 818 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); 819 820 // Add the call target. 821 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) { 822 uint64_t CalleeConstAddr = 823 cast<ConstantInt>(C->getOperand(0))->getZExtValue(); 824 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr)); 825 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) { 826 if (C->getOpcode() == Instruction::IntToPtr) { 827 uint64_t CalleeConstAddr = 828 cast<ConstantInt>(C->getOperand(0))->getZExtValue(); 829 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr)); 830 } else 831 llvm_unreachable("Unsupported ConstantExpr."); 832 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) { 833 Ops.push_back(MachineOperand::CreateGA(GV, 0)); 834 } else if (isa<ConstantPointerNull>(Callee)) 835 Ops.push_back(MachineOperand::CreateImm(0)); 836 else 837 llvm_unreachable("Unsupported callee address."); 838 839 // Adjust <numArgs> to account for any arguments that have been passed on 840 // the stack instead. 841 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size(); 842 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs)); 843 844 // Add the calling convention 845 Ops.push_back(MachineOperand::CreateImm((unsigned)CC)); 846 847 // Add the arguments we omitted previously. The register allocator should 848 // place these in any free register. 849 if (IsAnyRegCC) { 850 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) { 851 Register Reg = getRegForValue(I->getArgOperand(i)); 852 if (!Reg) 853 return false; 854 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false)); 855 } 856 } 857 858 // Push the arguments from the call instruction. 859 for (auto Reg : CLI.OutRegs) 860 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false)); 861 862 // Push live variables for the stack map. 863 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs)) 864 return false; 865 866 // Push the register mask info. 867 Ops.push_back(MachineOperand::CreateRegMask( 868 TRI.getCallPreservedMask(*FuncInfo.MF, CC))); 869 870 // Add scratch registers as implicit def and early clobber. 871 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); 872 for (unsigned i = 0; ScratchRegs[i]; ++i) 873 Ops.push_back(MachineOperand::CreateReg( 874 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false, 875 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true)); 876 877 // Add implicit defs (return values). 878 for (auto Reg : CLI.InRegs) 879 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true, 880 /*isImp=*/true)); 881 882 // Insert the patchpoint instruction before the call generated by the target. 883 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, MIMD, 884 TII.get(TargetOpcode::PATCHPOINT)); 885 886 for (auto &MO : Ops) 887 MIB.add(MO); 888 889 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI); 890 891 // Delete the original call instruction. 892 CLI.Call->eraseFromParent(); 893 894 // Inform the Frame Information that we have a patchpoint in this function. 895 FuncInfo.MF->getFrameInfo().setHasPatchPoint(); 896 897 if (CLI.NumResultRegs) 898 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs); 899 return true; 900 } 901 902 bool FastISel::selectXRayCustomEvent(const CallInst *I) { 903 const auto &Triple = TM.getTargetTriple(); 904 if (Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64) 905 return true; // don't do anything to this instruction. 906 SmallVector<MachineOperand, 8> Ops; 907 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)), 908 /*isDef=*/false)); 909 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)), 910 /*isDef=*/false)); 911 MachineInstrBuilder MIB = 912 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, 913 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL)); 914 for (auto &MO : Ops) 915 MIB.add(MO); 916 917 // Insert the Patchable Event Call instruction, that gets lowered properly. 918 return true; 919 } 920 921 bool FastISel::selectXRayTypedEvent(const CallInst *I) { 922 const auto &Triple = TM.getTargetTriple(); 923 if (Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64) 924 return true; // don't do anything to this instruction. 925 SmallVector<MachineOperand, 8> Ops; 926 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)), 927 /*isDef=*/false)); 928 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)), 929 /*isDef=*/false)); 930 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)), 931 /*isDef=*/false)); 932 MachineInstrBuilder MIB = 933 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, 934 TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL)); 935 for (auto &MO : Ops) 936 MIB.add(MO); 937 938 // Insert the Patchable Typed Event Call instruction, that gets lowered properly. 939 return true; 940 } 941 942 /// Returns an AttributeList representing the attributes applied to the return 943 /// value of the given call. 944 static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) { 945 SmallVector<Attribute::AttrKind, 2> Attrs; 946 if (CLI.RetSExt) 947 Attrs.push_back(Attribute::SExt); 948 if (CLI.RetZExt) 949 Attrs.push_back(Attribute::ZExt); 950 if (CLI.IsInReg) 951 Attrs.push_back(Attribute::InReg); 952 953 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex, 954 Attrs); 955 } 956 957 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName, 958 unsigned NumArgs) { 959 MCContext &Ctx = MF->getContext(); 960 SmallString<32> MangledName; 961 Mangler::getNameWithPrefix(MangledName, SymName, DL); 962 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName); 963 return lowerCallTo(CI, Sym, NumArgs); 964 } 965 966 bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol, 967 unsigned NumArgs) { 968 FunctionType *FTy = CI->getFunctionType(); 969 Type *RetTy = CI->getType(); 970 971 ArgListTy Args; 972 Args.reserve(NumArgs); 973 974 // Populate the argument list. 975 // Attributes for args start at offset 1, after the return attribute. 976 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) { 977 Value *V = CI->getOperand(ArgI); 978 979 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 980 981 ArgListEntry Entry; 982 Entry.Val = V; 983 Entry.Ty = V->getType(); 984 Entry.setAttributes(CI, ArgI); 985 Args.push_back(Entry); 986 } 987 TLI.markLibCallAttributes(MF, CI->getCallingConv(), Args); 988 989 CallLoweringInfo CLI; 990 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs); 991 992 return lowerCallTo(CLI); 993 } 994 995 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) { 996 // Handle the incoming return values from the call. 997 CLI.clearIns(); 998 SmallVector<EVT, 4> RetTys; 999 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys); 1000 1001 SmallVector<ISD::OutputArg, 4> Outs; 1002 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL); 1003 1004 bool CanLowerReturn = TLI.CanLowerReturn( 1005 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext(), CLI.RetTy); 1006 1007 // FIXME: sret demotion isn't supported yet - bail out. 1008 if (!CanLowerReturn) 1009 return false; 1010 1011 for (EVT VT : RetTys) { 1012 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT); 1013 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT); 1014 for (unsigned i = 0; i != NumRegs; ++i) { 1015 ISD::InputArg MyFlags; 1016 MyFlags.VT = RegisterVT; 1017 MyFlags.ArgVT = VT; 1018 MyFlags.Used = CLI.IsReturnValueUsed; 1019 if (CLI.RetSExt) 1020 MyFlags.Flags.setSExt(); 1021 if (CLI.RetZExt) 1022 MyFlags.Flags.setZExt(); 1023 if (CLI.IsInReg) 1024 MyFlags.Flags.setInReg(); 1025 CLI.Ins.push_back(MyFlags); 1026 } 1027 } 1028 1029 // Handle all of the outgoing arguments. 1030 CLI.clearOuts(); 1031 for (auto &Arg : CLI.getArgs()) { 1032 Type *FinalType = Arg.Ty; 1033 if (Arg.IsByVal) 1034 FinalType = Arg.IndirectType; 1035 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters( 1036 FinalType, CLI.CallConv, CLI.IsVarArg, DL); 1037 1038 ISD::ArgFlagsTy Flags; 1039 if (Arg.IsZExt) 1040 Flags.setZExt(); 1041 if (Arg.IsSExt) 1042 Flags.setSExt(); 1043 if (Arg.IsInReg) 1044 Flags.setInReg(); 1045 if (Arg.IsSRet) 1046 Flags.setSRet(); 1047 if (Arg.IsSwiftSelf) 1048 Flags.setSwiftSelf(); 1049 if (Arg.IsSwiftAsync) 1050 Flags.setSwiftAsync(); 1051 if (Arg.IsSwiftError) 1052 Flags.setSwiftError(); 1053 if (Arg.IsCFGuardTarget) 1054 Flags.setCFGuardTarget(); 1055 if (Arg.IsByVal) 1056 Flags.setByVal(); 1057 if (Arg.IsInAlloca) { 1058 Flags.setInAlloca(); 1059 // Set the byval flag for CCAssignFn callbacks that don't know about 1060 // inalloca. This way we can know how many bytes we should've allocated 1061 // and how many bytes a callee cleanup function will pop. If we port 1062 // inalloca to more targets, we'll have to add custom inalloca handling in 1063 // the various CC lowering callbacks. 1064 Flags.setByVal(); 1065 } 1066 if (Arg.IsPreallocated) { 1067 Flags.setPreallocated(); 1068 // Set the byval flag for CCAssignFn callbacks that don't know about 1069 // preallocated. This way we can know how many bytes we should've 1070 // allocated and how many bytes a callee cleanup function will pop. If we 1071 // port preallocated to more targets, we'll have to add custom 1072 // preallocated handling in the various CC lowering callbacks. 1073 Flags.setByVal(); 1074 } 1075 MaybeAlign MemAlign = Arg.Alignment; 1076 if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) { 1077 unsigned FrameSize = DL.getTypeAllocSize(Arg.IndirectType); 1078 1079 // For ByVal, alignment should come from FE. BE will guess if this info 1080 // is not there, but there are cases it cannot get right. 1081 if (!MemAlign) 1082 MemAlign = TLI.getByValTypeAlignment(Arg.IndirectType, DL); 1083 Flags.setByValSize(FrameSize); 1084 } else if (!MemAlign) { 1085 MemAlign = DL.getABITypeAlign(Arg.Ty); 1086 } 1087 Flags.setMemAlign(*MemAlign); 1088 if (Arg.IsNest) 1089 Flags.setNest(); 1090 if (NeedsRegBlock) 1091 Flags.setInConsecutiveRegs(); 1092 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty)); 1093 CLI.OutVals.push_back(Arg.Val); 1094 CLI.OutFlags.push_back(Flags); 1095 } 1096 1097 if (!fastLowerCall(CLI)) 1098 return false; 1099 1100 // Set all unused physreg defs as dead. 1101 assert(CLI.Call && "No call instruction specified."); 1102 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI); 1103 1104 if (CLI.NumResultRegs && CLI.CB) 1105 updateValueMap(CLI.CB, CLI.ResultReg, CLI.NumResultRegs); 1106 1107 // Set labels for heapallocsite call. 1108 if (CLI.CB) 1109 if (MDNode *MD = CLI.CB->getMetadata("heapallocsite")) 1110 CLI.Call->setHeapAllocMarker(*MF, MD); 1111 1112 return true; 1113 } 1114 1115 bool FastISel::lowerCall(const CallInst *CI) { 1116 FunctionType *FuncTy = CI->getFunctionType(); 1117 Type *RetTy = CI->getType(); 1118 1119 ArgListTy Args; 1120 ArgListEntry Entry; 1121 Args.reserve(CI->arg_size()); 1122 1123 for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) { 1124 Value *V = *i; 1125 1126 // Skip empty types 1127 if (V->getType()->isEmptyTy()) 1128 continue; 1129 1130 Entry.Val = V; 1131 Entry.Ty = V->getType(); 1132 1133 // Skip the first return-type Attribute to get to params. 1134 Entry.setAttributes(CI, i - CI->arg_begin()); 1135 Args.push_back(Entry); 1136 } 1137 1138 // Check if target-independent constraints permit a tail call here. 1139 // Target-dependent constraints are checked within fastLowerCall. 1140 bool IsTailCall = CI->isTailCall(); 1141 if (IsTailCall && !isInTailCallPosition(*CI, TM)) 1142 IsTailCall = false; 1143 if (IsTailCall && !CI->isMustTailCall() && 1144 MF->getFunction().getFnAttribute("disable-tail-calls").getValueAsBool()) 1145 IsTailCall = false; 1146 1147 CallLoweringInfo CLI; 1148 CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI) 1149 .setTailCall(IsTailCall); 1150 1151 diagnoseDontCall(*CI); 1152 1153 return lowerCallTo(CLI); 1154 } 1155 1156 bool FastISel::selectCall(const User *I) { 1157 const CallInst *Call = cast<CallInst>(I); 1158 1159 // Handle simple inline asms. 1160 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) { 1161 // Don't attempt to handle constraints. 1162 if (!IA->getConstraintString().empty()) 1163 return false; 1164 1165 unsigned ExtraInfo = 0; 1166 if (IA->hasSideEffects()) 1167 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 1168 if (IA->isAlignStack()) 1169 ExtraInfo |= InlineAsm::Extra_IsAlignStack; 1170 if (Call->isConvergent()) 1171 ExtraInfo |= InlineAsm::Extra_IsConvergent; 1172 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect; 1173 1174 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, 1175 TII.get(TargetOpcode::INLINEASM)); 1176 MIB.addExternalSymbol(IA->getAsmString().data()); 1177 MIB.addImm(ExtraInfo); 1178 1179 const MDNode *SrcLoc = Call->getMetadata("srcloc"); 1180 if (SrcLoc) 1181 MIB.addMetadata(SrcLoc); 1182 1183 return true; 1184 } 1185 1186 // Handle intrinsic function calls. 1187 if (const auto *II = dyn_cast<IntrinsicInst>(Call)) 1188 return selectIntrinsicCall(II); 1189 1190 return lowerCall(Call); 1191 } 1192 1193 void FastISel::handleDbgInfo(const Instruction *II) { 1194 if (!II->hasDbgRecords()) 1195 return; 1196 1197 // Clear any metadata. 1198 MIMD = MIMetadata(); 1199 1200 // Reverse order of debug records, because fast-isel walks through backwards. 1201 for (DbgRecord &DR : llvm::reverse(II->getDbgRecordRange())) { 1202 flushLocalValueMap(); 1203 recomputeInsertPt(); 1204 1205 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) { 1206 assert(DLR->getLabel() && "Missing label"); 1207 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DLR->getDebugLoc(), 1208 TII.get(TargetOpcode::DBG_LABEL)) 1209 .addMetadata(DLR->getLabel()); 1210 continue; 1211 } 1212 1213 DbgVariableRecord &DVR = cast<DbgVariableRecord>(DR); 1214 1215 Value *V = nullptr; 1216 if (!DVR.hasArgList()) 1217 V = DVR.getVariableLocationOp(0); 1218 1219 bool Res = false; 1220 if (DVR.getType() == DbgVariableRecord::LocationType::Value || 1221 DVR.getType() == DbgVariableRecord::LocationType::Assign) { 1222 Res = lowerDbgValue(V, DVR.getExpression(), DVR.getVariable(), 1223 DVR.getDebugLoc()); 1224 } else { 1225 assert(DVR.getType() == DbgVariableRecord::LocationType::Declare); 1226 if (FuncInfo.PreprocessedDVRDeclares.contains(&DVR)) 1227 continue; 1228 Res = lowerDbgDeclare(V, DVR.getExpression(), DVR.getVariable(), 1229 DVR.getDebugLoc()); 1230 } 1231 1232 if (!Res) 1233 LLVM_DEBUG(dbgs() << "Dropping debug-info for " << DVR << "\n"); 1234 } 1235 } 1236 1237 bool FastISel::lowerDbgValue(const Value *V, DIExpression *Expr, 1238 DILocalVariable *Var, const DebugLoc &DL) { 1239 // This form of DBG_VALUE is target-independent. 1240 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); 1241 if (!V || isa<UndefValue>(V)) { 1242 // DI is either undef or cannot produce a valid DBG_VALUE, so produce an 1243 // undef DBG_VALUE to terminate any prior location. 1244 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, false, 0U, Var, Expr); 1245 return true; 1246 } 1247 if (const auto *CI = dyn_cast<ConstantInt>(V)) { 1248 // See if there's an expression to constant-fold. 1249 if (Expr) 1250 std::tie(Expr, CI) = Expr->constantFold(CI); 1251 if (CI->getBitWidth() > 64) 1252 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1253 .addCImm(CI) 1254 .addImm(0U) 1255 .addMetadata(Var) 1256 .addMetadata(Expr); 1257 else 1258 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1259 .addImm(CI->getZExtValue()) 1260 .addImm(0U) 1261 .addMetadata(Var) 1262 .addMetadata(Expr); 1263 return true; 1264 } 1265 if (const auto *CF = dyn_cast<ConstantFP>(V)) { 1266 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) 1267 .addFPImm(CF) 1268 .addImm(0U) 1269 .addMetadata(Var) 1270 .addMetadata(Expr); 1271 return true; 1272 } 1273 if (const auto *Arg = dyn_cast<Argument>(V); 1274 Arg && Expr && Expr->isEntryValue()) { 1275 // As per the Verifier, this case is only valid for swift async Args. 1276 assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync)); 1277 1278 Register Reg = getRegForValue(Arg); 1279 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins()) 1280 if (Reg == VirtReg || Reg == PhysReg) { 1281 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, false /*IsIndirect*/, 1282 PhysReg, Var, Expr); 1283 return true; 1284 } 1285 1286 LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but " 1287 "couldn't find a physical register\n"); 1288 return false; 1289 } 1290 if (auto SI = FuncInfo.StaticAllocaMap.find(dyn_cast<AllocaInst>(V)); 1291 SI != FuncInfo.StaticAllocaMap.end()) { 1292 MachineOperand FrameIndexOp = MachineOperand::CreateFI(SI->second); 1293 bool IsIndirect = false; 1294 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect, FrameIndexOp, 1295 Var, Expr); 1296 return true; 1297 } 1298 if (Register Reg = lookUpRegForValue(V)) { 1299 // FIXME: This does not handle register-indirect values at offset 0. 1300 if (!FuncInfo.MF->useDebugInstrRef()) { 1301 bool IsIndirect = false; 1302 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect, Reg, Var, 1303 Expr); 1304 return true; 1305 } 1306 // If using instruction referencing, produce this as a DBG_INSTR_REF, 1307 // to be later patched up by finalizeDebugInstrRefs. 1308 SmallVector<MachineOperand, 1> MOs({MachineOperand::CreateReg( 1309 /* Reg */ Reg, /* isDef */ false, /* isImp */ false, 1310 /* isKill */ false, /* isDead */ false, 1311 /* isUndef */ false, /* isEarlyClobber */ false, 1312 /* SubReg */ 0, /* isDebug */ true)}); 1313 SmallVector<uint64_t, 2> Ops({dwarf::DW_OP_LLVM_arg, 0}); 1314 auto *NewExpr = DIExpression::prependOpcodes(Expr, Ops); 1315 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1316 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, MOs, 1317 Var, NewExpr); 1318 return true; 1319 } 1320 return false; 1321 } 1322 1323 bool FastISel::lowerDbgDeclare(const Value *Address, DIExpression *Expr, 1324 DILocalVariable *Var, const DebugLoc &DL) { 1325 if (!Address || isa<UndefValue>(Address)) { 1326 LLVM_DEBUG(dbgs() << "Dropping debug info (bad/undef address)\n"); 1327 return false; 1328 } 1329 1330 std::optional<MachineOperand> Op; 1331 if (Register Reg = lookUpRegForValue(Address)) 1332 Op = MachineOperand::CreateReg(Reg, false); 1333 1334 // If we have a VLA that has a "use" in a metadata node that's then used 1335 // here but it has no other uses, then we have a problem. E.g., 1336 // 1337 // int foo (const int *x) { 1338 // char a[*x]; 1339 // return 0; 1340 // } 1341 // 1342 // If we assign 'a' a vreg and fast isel later on has to use the selection 1343 // DAG isel, it will want to copy the value to the vreg. However, there are 1344 // no uses, which goes counter to what selection DAG isel expects. 1345 if (!Op && !Address->use_empty() && isa<Instruction>(Address) && 1346 (!isa<AllocaInst>(Address) || 1347 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address)))) 1348 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address), 1349 false); 1350 1351 if (Op) { 1352 assert(Var->isValidLocationForIntrinsic(DL) && 1353 "Expected inlined-at fields to agree"); 1354 if (FuncInfo.MF->useDebugInstrRef() && Op->isReg()) { 1355 // If using instruction referencing, produce this as a DBG_INSTR_REF, 1356 // to be later patched up by finalizeDebugInstrRefs. Tack a deref onto 1357 // the expression, we don't have an "indirect" flag in DBG_INSTR_REF. 1358 SmallVector<uint64_t, 3> Ops( 1359 {dwarf::DW_OP_LLVM_arg, 0, dwarf::DW_OP_deref}); 1360 auto *NewExpr = DIExpression::prependOpcodes(Expr, Ops); 1361 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1362 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, *Op, 1363 Var, NewExpr); 1364 return true; 1365 } 1366 1367 // A dbg.declare describes the address of a source variable, so lower it 1368 // into an indirect DBG_VALUE. 1369 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, 1370 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, *Op, Var, 1371 Expr); 1372 return true; 1373 } 1374 1375 // We can't yet handle anything else here because it would require 1376 // generating code, thus altering codegen because of debug info. 1377 LLVM_DEBUG( 1378 dbgs() << "Dropping debug info (no materialized reg for address)\n"); 1379 return false; 1380 } 1381 1382 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { 1383 switch (II->getIntrinsicID()) { 1384 default: 1385 break; 1386 // At -O0 we don't care about the lifetime intrinsics. 1387 case Intrinsic::lifetime_start: 1388 case Intrinsic::lifetime_end: 1389 // The donothing intrinsic does, well, nothing. 1390 case Intrinsic::donothing: 1391 // Neither does the sideeffect intrinsic. 1392 case Intrinsic::sideeffect: 1393 // Neither does the assume intrinsic; it's also OK not to codegen its operand. 1394 case Intrinsic::assume: 1395 // Neither does the llvm.experimental.noalias.scope.decl intrinsic 1396 case Intrinsic::experimental_noalias_scope_decl: 1397 return true; 1398 case Intrinsic::objectsize: 1399 llvm_unreachable("llvm.objectsize.* should have been lowered already"); 1400 1401 case Intrinsic::is_constant: 1402 llvm_unreachable("llvm.is.constant.* should have been lowered already"); 1403 1404 case Intrinsic::allow_runtime_check: 1405 case Intrinsic::allow_ubsan_check: { 1406 Register ResultReg = getRegForValue(ConstantInt::getTrue(II->getType())); 1407 if (!ResultReg) 1408 return false; 1409 updateValueMap(II, ResultReg); 1410 return true; 1411 } 1412 1413 case Intrinsic::launder_invariant_group: 1414 case Intrinsic::strip_invariant_group: 1415 case Intrinsic::expect: 1416 case Intrinsic::expect_with_probability: { 1417 Register ResultReg = getRegForValue(II->getArgOperand(0)); 1418 if (!ResultReg) 1419 return false; 1420 updateValueMap(II, ResultReg); 1421 return true; 1422 } 1423 case Intrinsic::fake_use: 1424 // At -O0, we don't need fake use, so just ignore it. 1425 return true; 1426 case Intrinsic::experimental_stackmap: 1427 return selectStackmap(II); 1428 case Intrinsic::experimental_patchpoint_void: 1429 case Intrinsic::experimental_patchpoint: 1430 return selectPatchpoint(II); 1431 1432 case Intrinsic::xray_customevent: 1433 return selectXRayCustomEvent(II); 1434 case Intrinsic::xray_typedevent: 1435 return selectXRayTypedEvent(II); 1436 } 1437 1438 return fastLowerIntrinsicCall(II); 1439 } 1440 1441 bool FastISel::selectCast(const User *I, unsigned Opcode) { 1442 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); 1443 EVT DstVT = TLI.getValueType(DL, I->getType()); 1444 1445 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other || 1446 !DstVT.isSimple()) 1447 // Unhandled type. Halt "fast" selection and bail. 1448 return false; 1449 1450 // Check if the destination type is legal. 1451 if (!TLI.isTypeLegal(DstVT)) 1452 return false; 1453 1454 // Check if the source operand is legal. 1455 if (!TLI.isTypeLegal(SrcVT)) 1456 return false; 1457 1458 Register InputReg = getRegForValue(I->getOperand(0)); 1459 if (!InputReg) 1460 // Unhandled operand. Halt "fast" selection and bail. 1461 return false; 1462 1463 Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), 1464 Opcode, InputReg); 1465 if (!ResultReg) 1466 return false; 1467 1468 updateValueMap(I, ResultReg); 1469 return true; 1470 } 1471 1472 bool FastISel::selectBitCast(const User *I) { 1473 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType()); 1474 EVT DstEVT = TLI.getValueType(DL, I->getType()); 1475 if (SrcEVT == MVT::Other || DstEVT == MVT::Other || 1476 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT)) 1477 // Unhandled type. Halt "fast" selection and bail. 1478 return false; 1479 1480 MVT SrcVT = SrcEVT.getSimpleVT(); 1481 MVT DstVT = DstEVT.getSimpleVT(); 1482 Register Op0 = getRegForValue(I->getOperand(0)); 1483 if (!Op0) // Unhandled operand. Halt "fast" selection and bail. 1484 return false; 1485 1486 // If the bitcast doesn't change the type, just use the operand value. 1487 if (SrcVT == DstVT) { 1488 updateValueMap(I, Op0); 1489 return true; 1490 } 1491 1492 // Otherwise, select a BITCAST opcode. 1493 Register ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0); 1494 if (!ResultReg) 1495 return false; 1496 1497 updateValueMap(I, ResultReg); 1498 return true; 1499 } 1500 1501 bool FastISel::selectFreeze(const User *I) { 1502 Register Reg = getRegForValue(I->getOperand(0)); 1503 if (!Reg) 1504 // Unhandled operand. 1505 return false; 1506 1507 EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType()); 1508 if (ETy == MVT::Other || !TLI.isTypeLegal(ETy)) 1509 // Unhandled type, bail out. 1510 return false; 1511 1512 MVT Ty = ETy.getSimpleVT(); 1513 const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty); 1514 Register ResultReg = createResultReg(TyRegClass); 1515 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, 1516 TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg); 1517 1518 updateValueMap(I, ResultReg); 1519 return true; 1520 } 1521 1522 // Remove local value instructions starting from the instruction after 1523 // SavedLastLocalValue to the current function insert point. 1524 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue) 1525 { 1526 MachineInstr *CurLastLocalValue = getLastLocalValue(); 1527 if (CurLastLocalValue != SavedLastLocalValue) { 1528 // Find the first local value instruction to be deleted. 1529 // This is the instruction after SavedLastLocalValue if it is non-NULL. 1530 // Otherwise it's the first instruction in the block. 1531 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue); 1532 if (SavedLastLocalValue) 1533 ++FirstDeadInst; 1534 else 1535 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI(); 1536 setLastLocalValue(SavedLastLocalValue); 1537 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt); 1538 } 1539 } 1540 1541 bool FastISel::selectInstruction(const Instruction *I) { 1542 // Flush the local value map before starting each instruction. 1543 // This improves locality and debugging, and can reduce spills. 1544 // Reuse of values across IR instructions is relatively uncommon. 1545 flushLocalValueMap(); 1546 1547 MachineInstr *SavedLastLocalValue = getLastLocalValue(); 1548 // Just before the terminator instruction, insert instructions to 1549 // feed PHI nodes in successor blocks. 1550 if (I->isTerminator()) { 1551 if (!handlePHINodesInSuccessorBlocks(I->getParent())) { 1552 // PHI node handling may have generated local value instructions, 1553 // even though it failed to handle all PHI nodes. 1554 // We remove these instructions because SelectionDAGISel will generate 1555 // them again. 1556 removeDeadLocalValueCode(SavedLastLocalValue); 1557 return false; 1558 } 1559 } 1560 1561 // FastISel does not handle any operand bundles except OB_funclet. 1562 if (auto *Call = dyn_cast<CallBase>(I)) 1563 for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i) 1564 if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet) 1565 return false; 1566 1567 MIMD = MIMetadata(*I); 1568 1569 SavedInsertPt = FuncInfo.InsertPt; 1570 1571 if (const auto *Call = dyn_cast<CallInst>(I)) { 1572 const Function *F = Call->getCalledFunction(); 1573 LibFunc Func; 1574 1575 // As a special case, don't handle calls to builtin library functions that 1576 // may be translated directly to target instructions. 1577 if (F && !F->hasLocalLinkage() && F->hasName() && 1578 LibInfo->getLibFunc(F->getName(), Func) && 1579 LibInfo->hasOptimizedCodeGen(Func)) 1580 return false; 1581 1582 // Don't handle Intrinsic::trap if a trap function is specified. 1583 if (F && F->getIntrinsicID() == Intrinsic::trap && 1584 Call->hasFnAttr("trap-func-name")) 1585 return false; 1586 } 1587 1588 // First, try doing target-independent selection. 1589 if (!SkipTargetIndependentISel) { 1590 if (selectOperator(I, I->getOpcode())) { 1591 ++NumFastIselSuccessIndependent; 1592 MIMD = {}; 1593 return true; 1594 } 1595 // Remove dead code. 1596 recomputeInsertPt(); 1597 if (SavedInsertPt != FuncInfo.InsertPt) 1598 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); 1599 SavedInsertPt = FuncInfo.InsertPt; 1600 } 1601 // Next, try calling the target to attempt to handle the instruction. 1602 if (fastSelectInstruction(I)) { 1603 ++NumFastIselSuccessTarget; 1604 MIMD = {}; 1605 return true; 1606 } 1607 // Remove dead code. 1608 recomputeInsertPt(); 1609 if (SavedInsertPt != FuncInfo.InsertPt) 1610 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); 1611 1612 MIMD = {}; 1613 // Undo phi node updates, because they will be added again by SelectionDAG. 1614 if (I->isTerminator()) { 1615 // PHI node handling may have generated local value instructions. 1616 // We remove them because SelectionDAGISel will generate them again. 1617 removeDeadLocalValueCode(SavedLastLocalValue); 1618 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 1619 } 1620 return false; 1621 } 1622 1623 /// Emit an unconditional branch to the given block, unless it is the immediate 1624 /// (fall-through) successor, and update the CFG. 1625 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, 1626 const DebugLoc &DbgLoc) { 1627 const BasicBlock *BB = FuncInfo.MBB->getBasicBlock(); 1628 bool BlockHasMultipleInstrs = &BB->front() != &BB->back(); 1629 if (BlockHasMultipleInstrs && FuncInfo.MBB->isLayoutSuccessor(MSucc)) { 1630 // For more accurate line information if this is the only non-debug 1631 // instruction in the block then emit it, otherwise we have the 1632 // unconditional fall-through case, which needs no instructions. 1633 } else { 1634 // The unconditional branch case. 1635 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr, 1636 SmallVector<MachineOperand, 0>(), DbgLoc); 1637 } 1638 if (FuncInfo.BPI) { 1639 auto BranchProbability = FuncInfo.BPI->getEdgeProbability( 1640 FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock()); 1641 FuncInfo.MBB->addSuccessor(MSucc, BranchProbability); 1642 } else 1643 FuncInfo.MBB->addSuccessorWithoutProb(MSucc); 1644 } 1645 1646 void FastISel::finishCondBranch(const BasicBlock *BranchBB, 1647 MachineBasicBlock *TrueMBB, 1648 MachineBasicBlock *FalseMBB) { 1649 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can 1650 // happen in degenerate IR and MachineIR forbids to have a block twice in the 1651 // successor/predecessor lists. 1652 if (TrueMBB != FalseMBB) { 1653 if (FuncInfo.BPI) { 1654 auto BranchProbability = 1655 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock()); 1656 FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability); 1657 } else 1658 FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB); 1659 } 1660 1661 fastEmitBranch(FalseMBB, MIMD.getDL()); 1662 } 1663 1664 /// Emit an FNeg operation. 1665 bool FastISel::selectFNeg(const User *I, const Value *In) { 1666 Register OpReg = getRegForValue(In); 1667 if (!OpReg) 1668 return false; 1669 1670 // If the target has ISD::FNEG, use it. 1671 EVT VT = TLI.getValueType(DL, I->getType()); 1672 Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG, 1673 OpReg); 1674 if (ResultReg) { 1675 updateValueMap(I, ResultReg); 1676 return true; 1677 } 1678 1679 // Bitcast the value to integer, twiddle the sign bit with xor, 1680 // and then bitcast it back to floating-point. 1681 if (VT.getSizeInBits() > 64) 1682 return false; 1683 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits()); 1684 if (!TLI.isTypeLegal(IntVT)) 1685 return false; 1686 1687 Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(), 1688 ISD::BITCAST, OpReg); 1689 if (!IntReg) 1690 return false; 1691 1692 Register IntResultReg = fastEmit_ri_( 1693 IntVT.getSimpleVT(), ISD::XOR, IntReg, 1694 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT()); 1695 if (!IntResultReg) 1696 return false; 1697 1698 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST, 1699 IntResultReg); 1700 if (!ResultReg) 1701 return false; 1702 1703 updateValueMap(I, ResultReg); 1704 return true; 1705 } 1706 1707 bool FastISel::selectExtractValue(const User *U) { 1708 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U); 1709 if (!EVI) 1710 return false; 1711 1712 // Make sure we only try to handle extracts with a legal result. But also 1713 // allow i1 because it's easy. 1714 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true); 1715 if (!RealVT.isSimple()) 1716 return false; 1717 MVT VT = RealVT.getSimpleVT(); 1718 if (!TLI.isTypeLegal(VT) && VT != MVT::i1) 1719 return false; 1720 1721 const Value *Op0 = EVI->getOperand(0); 1722 Type *AggTy = Op0->getType(); 1723 1724 // Get the base result register. 1725 Register ResultReg; 1726 DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(Op0); 1727 if (I != FuncInfo.ValueMap.end()) 1728 ResultReg = I->second; 1729 else if (isa<Instruction>(Op0)) 1730 ResultReg = FuncInfo.InitializeRegForValue(Op0); 1731 else 1732 return false; // fast-isel can't handle aggregate constants at the moment 1733 1734 // Get the actual result register, which is an offset from the base register. 1735 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices()); 1736 1737 SmallVector<EVT, 4> AggValueVTs; 1738 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs); 1739 1740 for (unsigned i = 0; i < VTIndex; i++) 1741 ResultReg = ResultReg.id() + 1742 TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]); 1743 1744 updateValueMap(EVI, ResultReg); 1745 return true; 1746 } 1747 1748 bool FastISel::selectOperator(const User *I, unsigned Opcode) { 1749 switch (Opcode) { 1750 case Instruction::Add: 1751 return selectBinaryOp(I, ISD::ADD); 1752 case Instruction::FAdd: 1753 return selectBinaryOp(I, ISD::FADD); 1754 case Instruction::Sub: 1755 return selectBinaryOp(I, ISD::SUB); 1756 case Instruction::FSub: 1757 return selectBinaryOp(I, ISD::FSUB); 1758 case Instruction::Mul: 1759 return selectBinaryOp(I, ISD::MUL); 1760 case Instruction::FMul: 1761 return selectBinaryOp(I, ISD::FMUL); 1762 case Instruction::SDiv: 1763 return selectBinaryOp(I, ISD::SDIV); 1764 case Instruction::UDiv: 1765 return selectBinaryOp(I, ISD::UDIV); 1766 case Instruction::FDiv: 1767 return selectBinaryOp(I, ISD::FDIV); 1768 case Instruction::SRem: 1769 return selectBinaryOp(I, ISD::SREM); 1770 case Instruction::URem: 1771 return selectBinaryOp(I, ISD::UREM); 1772 case Instruction::FRem: 1773 return selectBinaryOp(I, ISD::FREM); 1774 case Instruction::Shl: 1775 return selectBinaryOp(I, ISD::SHL); 1776 case Instruction::LShr: 1777 return selectBinaryOp(I, ISD::SRL); 1778 case Instruction::AShr: 1779 return selectBinaryOp(I, ISD::SRA); 1780 case Instruction::And: 1781 return selectBinaryOp(I, ISD::AND); 1782 case Instruction::Or: 1783 return selectBinaryOp(I, ISD::OR); 1784 case Instruction::Xor: 1785 return selectBinaryOp(I, ISD::XOR); 1786 1787 case Instruction::FNeg: 1788 return selectFNeg(I, I->getOperand(0)); 1789 1790 case Instruction::GetElementPtr: 1791 return selectGetElementPtr(I); 1792 1793 case Instruction::Br: { 1794 const BranchInst *BI = cast<BranchInst>(I); 1795 1796 if (BI->isUnconditional()) { 1797 const BasicBlock *LLVMSucc = BI->getSuccessor(0); 1798 MachineBasicBlock *MSucc = FuncInfo.getMBB(LLVMSucc); 1799 fastEmitBranch(MSucc, BI->getDebugLoc()); 1800 return true; 1801 } 1802 1803 // Conditional branches are not handed yet. 1804 // Halt "fast" selection and bail. 1805 return false; 1806 } 1807 1808 case Instruction::Unreachable: { 1809 auto UI = cast<UnreachableInst>(I); 1810 if (!UI->shouldLowerToTrap(TM.Options.TrapUnreachable, 1811 TM.Options.NoTrapAfterNoreturn)) 1812 return true; 1813 1814 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0; 1815 } 1816 1817 case Instruction::Alloca: 1818 // FunctionLowering has the static-sized case covered. 1819 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I))) 1820 return true; 1821 1822 // Dynamic-sized alloca is not handled yet. 1823 return false; 1824 1825 case Instruction::Call: 1826 // On AIX, normal call lowering uses the DAG-ISEL path currently so that the 1827 // callee of the direct function call instruction will be mapped to the 1828 // symbol for the function's entry point, which is distinct from the 1829 // function descriptor symbol. The latter is the symbol whose XCOFF symbol 1830 // name is the C-linkage name of the source level function. 1831 // But fast isel still has the ability to do selection for intrinsics. 1832 if (TM.getTargetTriple().isOSAIX() && !isa<IntrinsicInst>(I)) 1833 return false; 1834 return selectCall(I); 1835 1836 case Instruction::BitCast: 1837 return selectBitCast(I); 1838 1839 case Instruction::FPToSI: 1840 return selectCast(I, ISD::FP_TO_SINT); 1841 case Instruction::ZExt: 1842 return selectCast(I, ISD::ZERO_EXTEND); 1843 case Instruction::SExt: 1844 return selectCast(I, ISD::SIGN_EXTEND); 1845 case Instruction::Trunc: 1846 return selectCast(I, ISD::TRUNCATE); 1847 case Instruction::SIToFP: 1848 return selectCast(I, ISD::SINT_TO_FP); 1849 1850 case Instruction::IntToPtr: // Deliberate fall-through. 1851 case Instruction::PtrToInt: { 1852 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); 1853 EVT DstVT = TLI.getValueType(DL, I->getType()); 1854 if (DstVT.bitsGT(SrcVT)) 1855 return selectCast(I, ISD::ZERO_EXTEND); 1856 if (DstVT.bitsLT(SrcVT)) 1857 return selectCast(I, ISD::TRUNCATE); 1858 Register Reg = getRegForValue(I->getOperand(0)); 1859 if (!Reg) 1860 return false; 1861 updateValueMap(I, Reg); 1862 return true; 1863 } 1864 1865 case Instruction::ExtractValue: 1866 return selectExtractValue(I); 1867 1868 case Instruction::Freeze: 1869 return selectFreeze(I); 1870 1871 case Instruction::PHI: 1872 llvm_unreachable("FastISel shouldn't visit PHI nodes!"); 1873 1874 default: 1875 // Unhandled instruction. Halt "fast" selection and bail. 1876 return false; 1877 } 1878 } 1879 1880 FastISel::FastISel(FunctionLoweringInfo &FuncInfo, 1881 const TargetLibraryInfo *LibInfo, 1882 bool SkipTargetIndependentISel) 1883 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()), 1884 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()), 1885 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()), 1886 TII(*MF->getSubtarget().getInstrInfo()), 1887 TLI(*MF->getSubtarget().getTargetLowering()), 1888 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo), 1889 SkipTargetIndependentISel(SkipTargetIndependentISel) {} 1890 1891 FastISel::~FastISel() = default; 1892 1893 bool FastISel::fastLowerArguments() { return false; } 1894 1895 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; } 1896 1897 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) { 1898 return false; 1899 } 1900 1901 Register FastISel::fastEmit_(MVT, MVT, unsigned) { return Register(); } 1902 1903 Register FastISel::fastEmit_r(MVT, MVT, unsigned, Register /*Op0*/) { 1904 return Register(); 1905 } 1906 1907 Register FastISel::fastEmit_rr(MVT, MVT, unsigned, Register /*Op0*/, 1908 Register /*Op1*/) { 1909 return Register(); 1910 } 1911 1912 Register FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) { 1913 return Register(); 1914 } 1915 1916 Register FastISel::fastEmit_f(MVT, MVT, unsigned, 1917 const ConstantFP * /*FPImm*/) { 1918 return Register(); 1919 } 1920 1921 Register FastISel::fastEmit_ri(MVT, MVT, unsigned, Register /*Op0*/, 1922 uint64_t /*Imm*/) { 1923 return Register(); 1924 } 1925 1926 /// This method is a wrapper of fastEmit_ri. It first tries to emit an 1927 /// instruction with an immediate operand using fastEmit_ri. 1928 /// If that fails, it materializes the immediate into a register and try 1929 /// fastEmit_rr instead. 1930 Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, Register Op0, 1931 uint64_t Imm, MVT ImmType) { 1932 // If this is a multiply by a power of two, emit this as a shift left. 1933 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) { 1934 Opcode = ISD::SHL; 1935 Imm = Log2_64(Imm); 1936 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) { 1937 // div x, 8 -> srl x, 3 1938 Opcode = ISD::SRL; 1939 Imm = Log2_64(Imm); 1940 } 1941 1942 // Horrible hack (to be removed), check to make sure shift amounts are 1943 // in-range. 1944 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) && 1945 Imm >= VT.getSizeInBits()) 1946 return Register(); 1947 1948 // First check if immediate type is legal. If not, we can't use the ri form. 1949 Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm); 1950 if (ResultReg) 1951 return ResultReg; 1952 Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm); 1953 if (!MaterialReg) { 1954 // This is a bit ugly/slow, but failing here means falling out of 1955 // fast-isel, which would be very slow. 1956 IntegerType *ITy = 1957 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits()); 1958 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm)); 1959 if (!MaterialReg) 1960 return Register(); 1961 } 1962 return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg); 1963 } 1964 1965 Register FastISel::createResultReg(const TargetRegisterClass *RC) { 1966 return MRI.createVirtualRegister(RC); 1967 } 1968 1969 Register FastISel::constrainOperandRegClass(const MCInstrDesc &II, Register Op, 1970 unsigned OpNum) { 1971 if (Op.isVirtual()) { 1972 const TargetRegisterClass *RegClass = 1973 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF); 1974 if (!MRI.constrainRegClass(Op, RegClass)) { 1975 // If it's not legal to COPY between the register classes, something 1976 // has gone very wrong before we got here. 1977 Register NewOp = createResultReg(RegClass); 1978 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, 1979 TII.get(TargetOpcode::COPY), NewOp).addReg(Op); 1980 return NewOp; 1981 } 1982 } 1983 return Op; 1984 } 1985 1986 Register FastISel::fastEmitInst_(unsigned MachineInstOpcode, 1987 const TargetRegisterClass *RC) { 1988 Register ResultReg = createResultReg(RC); 1989 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1990 1991 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg); 1992 return ResultReg; 1993 } 1994 1995 Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode, 1996 const TargetRegisterClass *RC, Register Op0) { 1997 const MCInstrDesc &II = TII.get(MachineInstOpcode); 1998 1999 Register ResultReg = createResultReg(RC); 2000 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2001 2002 if (II.getNumDefs() >= 1) 2003 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 2004 .addReg(Op0); 2005 else { 2006 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 2007 .addReg(Op0); 2008 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2009 ResultReg) 2010 .addReg(II.implicit_defs()[0]); 2011 } 2012 2013 return ResultReg; 2014 } 2015 2016 Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode, 2017 const TargetRegisterClass *RC, Register Op0, 2018 Register Op1) { 2019 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2020 2021 Register ResultReg = createResultReg(RC); 2022 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2023 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 2024 2025 if (II.getNumDefs() >= 1) 2026 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 2027 .addReg(Op0) 2028 .addReg(Op1); 2029 else { 2030 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 2031 .addReg(Op0) 2032 .addReg(Op1); 2033 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2034 ResultReg) 2035 .addReg(II.implicit_defs()[0]); 2036 } 2037 return ResultReg; 2038 } 2039 2040 Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode, 2041 const TargetRegisterClass *RC, Register Op0, 2042 Register Op1, Register Op2) { 2043 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2044 2045 Register ResultReg = createResultReg(RC); 2046 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2047 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 2048 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2); 2049 2050 if (II.getNumDefs() >= 1) 2051 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 2052 .addReg(Op0) 2053 .addReg(Op1) 2054 .addReg(Op2); 2055 else { 2056 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 2057 .addReg(Op0) 2058 .addReg(Op1) 2059 .addReg(Op2); 2060 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2061 ResultReg) 2062 .addReg(II.implicit_defs()[0]); 2063 } 2064 return ResultReg; 2065 } 2066 2067 Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode, 2068 const TargetRegisterClass *RC, Register Op0, 2069 uint64_t Imm) { 2070 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2071 2072 Register ResultReg = createResultReg(RC); 2073 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2074 2075 if (II.getNumDefs() >= 1) 2076 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 2077 .addReg(Op0) 2078 .addImm(Imm); 2079 else { 2080 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 2081 .addReg(Op0) 2082 .addImm(Imm); 2083 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2084 ResultReg) 2085 .addReg(II.implicit_defs()[0]); 2086 } 2087 return ResultReg; 2088 } 2089 2090 Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode, 2091 const TargetRegisterClass *RC, Register Op0, 2092 uint64_t Imm1, uint64_t Imm2) { 2093 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2094 2095 Register ResultReg = createResultReg(RC); 2096 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2097 2098 if (II.getNumDefs() >= 1) 2099 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 2100 .addReg(Op0) 2101 .addImm(Imm1) 2102 .addImm(Imm2); 2103 else { 2104 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 2105 .addReg(Op0) 2106 .addImm(Imm1) 2107 .addImm(Imm2); 2108 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2109 ResultReg) 2110 .addReg(II.implicit_defs()[0]); 2111 } 2112 return ResultReg; 2113 } 2114 2115 Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode, 2116 const TargetRegisterClass *RC, 2117 const ConstantFP *FPImm) { 2118 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2119 2120 Register ResultReg = createResultReg(RC); 2121 2122 if (II.getNumDefs() >= 1) 2123 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 2124 .addFPImm(FPImm); 2125 else { 2126 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 2127 .addFPImm(FPImm); 2128 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2129 ResultReg) 2130 .addReg(II.implicit_defs()[0]); 2131 } 2132 return ResultReg; 2133 } 2134 2135 Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode, 2136 const TargetRegisterClass *RC, Register Op0, 2137 Register Op1, uint64_t Imm) { 2138 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2139 2140 Register ResultReg = createResultReg(RC); 2141 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2142 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 2143 2144 if (II.getNumDefs() >= 1) 2145 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 2146 .addReg(Op0) 2147 .addReg(Op1) 2148 .addImm(Imm); 2149 else { 2150 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II) 2151 .addReg(Op0) 2152 .addReg(Op1) 2153 .addImm(Imm); 2154 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2155 ResultReg) 2156 .addReg(II.implicit_defs()[0]); 2157 } 2158 return ResultReg; 2159 } 2160 2161 Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode, 2162 const TargetRegisterClass *RC, uint64_t Imm) { 2163 Register ResultReg = createResultReg(RC); 2164 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2165 2166 if (II.getNumDefs() >= 1) 2167 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg) 2168 .addImm(Imm); 2169 else { 2170 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II).addImm(Imm); 2171 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2172 ResultReg) 2173 .addReg(II.implicit_defs()[0]); 2174 } 2175 return ResultReg; 2176 } 2177 2178 Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, Register Op0, 2179 uint32_t Idx) { 2180 Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 2181 assert(Op0.isVirtual() && "Cannot yet extract from physregs"); 2182 const TargetRegisterClass *RC = MRI.getRegClass(Op0); 2183 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx)); 2184 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY), 2185 ResultReg).addReg(Op0, 0, Idx); 2186 return ResultReg; 2187 } 2188 2189 /// Emit MachineInstrs to compute the value of Op with all but the least 2190 /// significant bit set to zero. 2191 Register FastISel::fastEmitZExtFromI1(MVT VT, Register Op0) { 2192 return fastEmit_ri(VT, VT, ISD::AND, Op0, 1); 2193 } 2194 2195 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks. 2196 /// Emit code to ensure constants are copied into registers when needed. 2197 /// Remember the virtual registers that need to be added to the Machine PHI 2198 /// nodes as input. We cannot just directly add them, because expansion 2199 /// might result in multiple MBB's for one BB. As such, the start of the 2200 /// BB might correspond to a different MBB than the end. 2201 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { 2202 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; 2203 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size(); 2204 2205 // Check successor nodes' PHI nodes that expect a constant to be available 2206 // from this block. 2207 for (const BasicBlock *SuccBB : successors(LLVMBB)) { 2208 if (!isa<PHINode>(SuccBB->begin())) 2209 continue; 2210 MachineBasicBlock *SuccMBB = FuncInfo.getMBB(SuccBB); 2211 2212 // If this terminator has multiple identical successors (common for 2213 // switches), only handle each succ once. 2214 if (!SuccsHandled.insert(SuccMBB).second) 2215 continue; 2216 2217 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 2218 2219 // At this point we know that there is a 1-1 correspondence between LLVM PHI 2220 // nodes and Machine PHI nodes, but the incoming operands have not been 2221 // emitted yet. 2222 for (const PHINode &PN : SuccBB->phis()) { 2223 // Ignore dead phi's. 2224 if (PN.use_empty()) 2225 continue; 2226 2227 // Only handle legal types. Two interesting things to note here. First, 2228 // by bailing out early, we may leave behind some dead instructions, 2229 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its 2230 // own moves. Second, this check is necessary because FastISel doesn't 2231 // use CreateRegs to create registers, so it always creates 2232 // exactly one register for each non-void instruction. 2233 EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true); 2234 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) { 2235 // Handle integer promotions, though, because they're common and easy. 2236 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) { 2237 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 2238 return false; 2239 } 2240 } 2241 2242 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB); 2243 2244 // Set the DebugLoc for the copy. Use the location of the operand if 2245 // there is one; otherwise no location, flushLocalValueMap will fix it. 2246 MIMD = {}; 2247 if (const auto *Inst = dyn_cast<Instruction>(PHIOp)) 2248 MIMD = MIMetadata(*Inst); 2249 2250 Register Reg = getRegForValue(PHIOp); 2251 if (!Reg) { 2252 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 2253 return false; 2254 } 2255 FuncInfo.PHINodesToUpdate.emplace_back(&*MBBI++, Reg); 2256 MIMD = {}; 2257 } 2258 } 2259 2260 return true; 2261 } 2262 2263 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) { 2264 assert(LI->hasOneUse() && 2265 "tryToFoldLoad expected a LoadInst with a single use"); 2266 // We know that the load has a single use, but don't know what it is. If it 2267 // isn't one of the folded instructions, then we can't succeed here. Handle 2268 // this by scanning the single-use users of the load until we get to FoldInst. 2269 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs. 2270 2271 const Instruction *TheUser = LI->user_back(); 2272 while (TheUser != FoldInst && // Scan up until we find FoldInst. 2273 // Stay in the right block. 2274 TheUser->getParent() == FoldInst->getParent() && 2275 --MaxUsers) { // Don't scan too far. 2276 // If there are multiple or no uses of this instruction, then bail out. 2277 if (!TheUser->hasOneUse()) 2278 return false; 2279 2280 TheUser = TheUser->user_back(); 2281 } 2282 2283 // If we didn't find the fold instruction, then we failed to collapse the 2284 // sequence. 2285 if (TheUser != FoldInst) 2286 return false; 2287 2288 // Don't try to fold volatile loads. Target has to deal with alignment 2289 // constraints. 2290 if (LI->isVolatile()) 2291 return false; 2292 2293 // Figure out which vreg this is going into. If there is no assigned vreg yet 2294 // then there actually was no reference to it. Perhaps the load is referenced 2295 // by a dead instruction. 2296 Register LoadReg = getRegForValue(LI); 2297 if (!LoadReg) 2298 return false; 2299 2300 // We can't fold if this vreg has no uses or more than one use. Multiple uses 2301 // may mean that the instruction got lowered to multiple MIs, or the use of 2302 // the loaded value ended up being multiple operands of the result. 2303 if (!MRI.hasOneUse(LoadReg)) 2304 return false; 2305 2306 // If the register has fixups, there may be additional uses through a 2307 // different alias of the register. 2308 if (FuncInfo.RegsWithFixups.contains(LoadReg)) 2309 return false; 2310 2311 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg); 2312 MachineInstr *User = RI->getParent(); 2313 2314 // Set the insertion point properly. Folding the load can cause generation of 2315 // other random instructions (like sign extends) for addressing modes; make 2316 // sure they get inserted in a logical place before the new instruction. 2317 FuncInfo.InsertPt = User; 2318 FuncInfo.MBB = User->getParent(); 2319 2320 // Ask the target to try folding the load. 2321 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI); 2322 } 2323 2324 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) { 2325 // Must be an add. 2326 if (!isa<AddOperator>(Add)) 2327 return false; 2328 // Type size needs to match. 2329 if (DL.getTypeSizeInBits(GEP->getType()) != 2330 DL.getTypeSizeInBits(Add->getType())) 2331 return false; 2332 // Must be in the same basic block. 2333 if (isa<Instruction>(Add) && 2334 FuncInfo.getMBB(cast<Instruction>(Add)->getParent()) != FuncInfo.MBB) 2335 return false; 2336 // Must have a constant operand. 2337 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1)); 2338 } 2339 2340 MachineMemOperand * 2341 FastISel::createMachineMemOperandFor(const Instruction *I) const { 2342 const Value *Ptr; 2343 Type *ValTy; 2344 MaybeAlign Alignment; 2345 MachineMemOperand::Flags Flags; 2346 bool IsVolatile; 2347 2348 if (const auto *LI = dyn_cast<LoadInst>(I)) { 2349 Alignment = LI->getAlign(); 2350 IsVolatile = LI->isVolatile(); 2351 Flags = MachineMemOperand::MOLoad; 2352 Ptr = LI->getPointerOperand(); 2353 ValTy = LI->getType(); 2354 } else if (const auto *SI = dyn_cast<StoreInst>(I)) { 2355 Alignment = SI->getAlign(); 2356 IsVolatile = SI->isVolatile(); 2357 Flags = MachineMemOperand::MOStore; 2358 Ptr = SI->getPointerOperand(); 2359 ValTy = SI->getValueOperand()->getType(); 2360 } else 2361 return nullptr; 2362 2363 bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal); 2364 bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load); 2365 bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable); 2366 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range); 2367 2368 AAMDNodes AAInfo = I->getAAMetadata(); 2369 2370 if (!Alignment) // Ensure that codegen never sees alignment 0. 2371 Alignment = DL.getABITypeAlign(ValTy); 2372 2373 unsigned Size = DL.getTypeStoreSize(ValTy); 2374 2375 if (IsVolatile) 2376 Flags |= MachineMemOperand::MOVolatile; 2377 if (IsNonTemporal) 2378 Flags |= MachineMemOperand::MONonTemporal; 2379 if (IsDereferenceable) 2380 Flags |= MachineMemOperand::MODereferenceable; 2381 if (IsInvariant) 2382 Flags |= MachineMemOperand::MOInvariant; 2383 2384 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size, 2385 *Alignment, AAInfo, Ranges); 2386 } 2387 2388 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const { 2389 // If both operands are the same, then try to optimize or fold the cmp. 2390 CmpInst::Predicate Predicate = CI->getPredicate(); 2391 if (CI->getOperand(0) != CI->getOperand(1)) 2392 return Predicate; 2393 2394 switch (Predicate) { 2395 default: llvm_unreachable("Invalid predicate!"); 2396 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break; 2397 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break; 2398 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break; 2399 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break; 2400 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break; 2401 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break; 2402 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break; 2403 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break; 2404 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break; 2405 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break; 2406 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break; 2407 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; 2408 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break; 2409 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; 2410 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break; 2411 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break; 2412 2413 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break; 2414 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break; 2415 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break; 2416 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; 2417 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break; 2418 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; 2419 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break; 2420 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break; 2421 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break; 2422 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break; 2423 } 2424 2425 return Predicate; 2426 } 2427