1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains the implementation of the FastISel class. 10 // 11 // "Fast" instruction selection is designed to emit very poor code quickly. 12 // Also, it is not designed to be able to do much lowering, so most illegal 13 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is 14 // also not intended to be able to do much optimization, except in a few cases 15 // where doing optimizations reduces overall compile time. For example, folding 16 // constants into immediate fields is often done, because it's cheap and it 17 // reduces the number of instructions later phases have to examine. 18 // 19 // "Fast" instruction selection is able to fail gracefully and transfer 20 // control to the SelectionDAG selector for operations that it doesn't 21 // support. In many cases, this allows us to avoid duplicating a lot of 22 // the complicated lowering logic that SelectionDAG currently has. 23 // 24 // The intended use for "fast" instruction selection is "-O0" mode 25 // compilation, where the quality of the generated code is irrelevant when 26 // weighed against the speed at which the code can be generated. Also, 27 // at -O0, the LLVM optimizers are not running, and this makes the 28 // compile time of codegen a much higher portion of the overall compile 29 // time. Despite its limitations, "fast" instruction selection is able to 30 // handle enough code on its own to provide noticeable overall speedups 31 // in -O0 compiles. 32 // 33 // Basic operations are supported in a target-independent way, by reading 34 // the same instruction descriptions that the SelectionDAG selector reads, 35 // and identifying simple arithmetic operations that can be directly selected 36 // from simple operators. More complicated operations currently require 37 // target-specific code. 38 // 39 //===----------------------------------------------------------------------===// 40 41 #include "llvm/CodeGen/FastISel.h" 42 #include "llvm/ADT/APFloat.h" 43 #include "llvm/ADT/APSInt.h" 44 #include "llvm/ADT/DenseMap.h" 45 #include "llvm/ADT/Optional.h" 46 #include "llvm/ADT/SmallPtrSet.h" 47 #include "llvm/ADT/SmallString.h" 48 #include "llvm/ADT/SmallVector.h" 49 #include "llvm/ADT/Statistic.h" 50 #include "llvm/Analysis/BranchProbabilityInfo.h" 51 #include "llvm/Analysis/TargetLibraryInfo.h" 52 #include "llvm/CodeGen/Analysis.h" 53 #include "llvm/CodeGen/FunctionLoweringInfo.h" 54 #include "llvm/CodeGen/ISDOpcodes.h" 55 #include "llvm/CodeGen/MachineBasicBlock.h" 56 #include "llvm/CodeGen/MachineFrameInfo.h" 57 #include "llvm/CodeGen/MachineInstr.h" 58 #include "llvm/CodeGen/MachineInstrBuilder.h" 59 #include "llvm/CodeGen/MachineMemOperand.h" 60 #include "llvm/CodeGen/MachineModuleInfo.h" 61 #include "llvm/CodeGen/MachineOperand.h" 62 #include "llvm/CodeGen/MachineRegisterInfo.h" 63 #include "llvm/CodeGen/StackMaps.h" 64 #include "llvm/CodeGen/TargetInstrInfo.h" 65 #include "llvm/CodeGen/TargetLowering.h" 66 #include "llvm/CodeGen/TargetSubtargetInfo.h" 67 #include "llvm/CodeGen/ValueTypes.h" 68 #include "llvm/IR/Argument.h" 69 #include "llvm/IR/Attributes.h" 70 #include "llvm/IR/BasicBlock.h" 71 #include "llvm/IR/CallingConv.h" 72 #include "llvm/IR/Constant.h" 73 #include "llvm/IR/Constants.h" 74 #include "llvm/IR/DataLayout.h" 75 #include "llvm/IR/DebugInfo.h" 76 #include "llvm/IR/DebugLoc.h" 77 #include "llvm/IR/DerivedTypes.h" 78 #include "llvm/IR/Function.h" 79 #include "llvm/IR/GetElementPtrTypeIterator.h" 80 #include "llvm/IR/GlobalValue.h" 81 #include "llvm/IR/InlineAsm.h" 82 #include "llvm/IR/InstrTypes.h" 83 #include "llvm/IR/Instruction.h" 84 #include "llvm/IR/Instructions.h" 85 #include "llvm/IR/IntrinsicInst.h" 86 #include "llvm/IR/LLVMContext.h" 87 #include "llvm/IR/Mangler.h" 88 #include "llvm/IR/Metadata.h" 89 #include "llvm/IR/Operator.h" 90 #include "llvm/IR/PatternMatch.h" 91 #include "llvm/IR/Type.h" 92 #include "llvm/IR/User.h" 93 #include "llvm/IR/Value.h" 94 #include "llvm/MC/MCContext.h" 95 #include "llvm/MC/MCInstrDesc.h" 96 #include "llvm/MC/MCRegisterInfo.h" 97 #include "llvm/Support/Casting.h" 98 #include "llvm/Support/Debug.h" 99 #include "llvm/Support/ErrorHandling.h" 100 #include "llvm/Support/MachineValueType.h" 101 #include "llvm/Support/MathExtras.h" 102 #include "llvm/Support/raw_ostream.h" 103 #include "llvm/Target/TargetMachine.h" 104 #include "llvm/Target/TargetOptions.h" 105 #include <algorithm> 106 #include <cassert> 107 #include <cstdint> 108 #include <iterator> 109 #include <utility> 110 111 using namespace llvm; 112 using namespace PatternMatch; 113 114 #define DEBUG_TYPE "isel" 115 116 // FIXME: Remove this after the feature has proven reliable. 117 static cl::opt<bool> SinkLocalValues("fast-isel-sink-local-values", 118 cl::init(true), cl::Hidden, 119 cl::desc("Sink local values in FastISel")); 120 121 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by " 122 "target-independent selector"); 123 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by " 124 "target-specific selector"); 125 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure"); 126 127 /// Set the current block to which generated machine instructions will be 128 /// appended. 129 void FastISel::startNewBlock() { 130 assert(LocalValueMap.empty() && 131 "local values should be cleared after finishing a BB"); 132 133 // Instructions are appended to FuncInfo.MBB. If the basic block already 134 // contains labels or copies, use the last instruction as the last local 135 // value. 136 EmitStartPt = nullptr; 137 if (!FuncInfo.MBB->empty()) 138 EmitStartPt = &FuncInfo.MBB->back(); 139 LastLocalValue = EmitStartPt; 140 } 141 142 /// Flush the local CSE map and sink anything we can. 143 void FastISel::finishBasicBlock() { flushLocalValueMap(); } 144 145 bool FastISel::lowerArguments() { 146 if (!FuncInfo.CanLowerReturn) 147 // Fallback to SDISel argument lowering code to deal with sret pointer 148 // parameter. 149 return false; 150 151 if (!fastLowerArguments()) 152 return false; 153 154 // Enter arguments into ValueMap for uses in non-entry BBs. 155 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(), 156 E = FuncInfo.Fn->arg_end(); 157 I != E; ++I) { 158 DenseMap<const Value *, Register>::iterator VI = LocalValueMap.find(&*I); 159 assert(VI != LocalValueMap.end() && "Missed an argument?"); 160 FuncInfo.ValueMap[&*I] = VI->second; 161 } 162 return true; 163 } 164 165 /// Return the defined register if this instruction defines exactly one 166 /// virtual register and uses no other virtual registers. Otherwise return 0. 167 static Register findSinkableLocalRegDef(MachineInstr &MI) { 168 Register RegDef; 169 for (const MachineOperand &MO : MI.operands()) { 170 if (!MO.isReg()) 171 continue; 172 if (MO.isDef()) { 173 if (RegDef) 174 return 0; 175 RegDef = MO.getReg(); 176 } else if (MO.getReg().isVirtual()) { 177 // This is another use of a vreg. Don't try to sink it. 178 return Register(); 179 } 180 } 181 return RegDef; 182 } 183 184 void FastISel::flushLocalValueMap() { 185 // Try to sink local values down to their first use so that we can give them a 186 // better debug location. This has the side effect of shrinking local value 187 // live ranges, which helps out fast regalloc. 188 if (SinkLocalValues && LastLocalValue != EmitStartPt) { 189 // Sink local value materialization instructions between EmitStartPt and 190 // LastLocalValue. Visit them bottom-up, starting from LastLocalValue, to 191 // avoid inserting into the range that we're iterating over. 192 MachineBasicBlock::reverse_iterator RE = 193 EmitStartPt ? MachineBasicBlock::reverse_iterator(EmitStartPt) 194 : FuncInfo.MBB->rend(); 195 MachineBasicBlock::reverse_iterator RI(LastLocalValue); 196 197 InstOrderMap OrderMap; 198 for (; RI != RE;) { 199 MachineInstr &LocalMI = *RI; 200 ++RI; 201 bool Store = true; 202 if (!LocalMI.isSafeToMove(nullptr, Store)) 203 continue; 204 Register DefReg = findSinkableLocalRegDef(LocalMI); 205 if (DefReg == 0) 206 continue; 207 208 sinkLocalValueMaterialization(LocalMI, DefReg, OrderMap); 209 } 210 } 211 212 LocalValueMap.clear(); 213 LastLocalValue = EmitStartPt; 214 recomputeInsertPt(); 215 SavedInsertPt = FuncInfo.InsertPt; 216 LastFlushPoint = FuncInfo.InsertPt; 217 } 218 219 static bool isRegUsedByPhiNodes(Register DefReg, 220 FunctionLoweringInfo &FuncInfo) { 221 for (auto &P : FuncInfo.PHINodesToUpdate) 222 if (P.second == DefReg) 223 return true; 224 return false; 225 } 226 227 static bool isTerminatingEHLabel(MachineBasicBlock *MBB, MachineInstr &MI) { 228 // Ignore non-EH labels. 229 if (!MI.isEHLabel()) 230 return false; 231 232 // Any EH label outside a landing pad must be for an invoke. Consider it a 233 // terminator. 234 if (!MBB->isEHPad()) 235 return true; 236 237 // If this is a landingpad, the first non-phi instruction will be an EH_LABEL. 238 // Don't consider that label to be a terminator. 239 return MI.getIterator() != MBB->getFirstNonPHI(); 240 } 241 242 /// Build a map of instruction orders. Return the first terminator and its 243 /// order. Consider EH_LABEL instructions to be terminators as well, since local 244 /// values for phis after invokes must be materialized before the call. 245 void FastISel::InstOrderMap::initialize( 246 MachineBasicBlock *MBB, MachineBasicBlock::iterator LastFlushPoint) { 247 unsigned Order = 0; 248 for (MachineInstr &I : *MBB) { 249 if (!FirstTerminator && 250 (I.isTerminator() || isTerminatingEHLabel(MBB, I))) { 251 FirstTerminator = &I; 252 FirstTerminatorOrder = Order; 253 } 254 Orders[&I] = Order++; 255 256 // We don't need to order instructions past the last flush point. 257 if (I.getIterator() == LastFlushPoint) 258 break; 259 } 260 } 261 262 void FastISel::sinkLocalValueMaterialization(MachineInstr &LocalMI, 263 Register DefReg, 264 InstOrderMap &OrderMap) { 265 // If this register is used by a register fixup, MRI will not contain all 266 // the uses until after register fixups, so don't attempt to sink or DCE 267 // this instruction. Register fixups typically come from no-op cast 268 // instructions, which replace the cast instruction vreg with the local 269 // value vreg. 270 if (FuncInfo.RegsWithFixups.count(DefReg)) 271 return; 272 273 // We can DCE this instruction if there are no uses and it wasn't a 274 // materialized for a successor PHI node. 275 bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo); 276 if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) { 277 if (EmitStartPt == &LocalMI) 278 EmitStartPt = EmitStartPt->getPrevNode(); 279 LLVM_DEBUG(dbgs() << "removing dead local value materialization " 280 << LocalMI); 281 OrderMap.Orders.erase(&LocalMI); 282 LocalMI.eraseFromParent(); 283 return; 284 } 285 286 // Number the instructions if we haven't yet so we can efficiently find the 287 // earliest use. 288 if (OrderMap.Orders.empty()) 289 OrderMap.initialize(FuncInfo.MBB, LastFlushPoint); 290 291 // Find the first user in the BB. 292 MachineInstr *FirstUser = nullptr; 293 unsigned FirstOrder = std::numeric_limits<unsigned>::max(); 294 for (MachineInstr &UseInst : MRI.use_nodbg_instructions(DefReg)) { 295 auto I = OrderMap.Orders.find(&UseInst); 296 assert(I != OrderMap.Orders.end() && 297 "local value used by instruction outside local region"); 298 unsigned UseOrder = I->second; 299 if (UseOrder < FirstOrder) { 300 FirstOrder = UseOrder; 301 FirstUser = &UseInst; 302 } 303 } 304 305 // The insertion point will be the first terminator or the first user, 306 // whichever came first. If there was no terminator, this must be a 307 // fallthrough block and the insertion point is the end of the block. 308 MachineBasicBlock::instr_iterator SinkPos; 309 if (UsedByPHI && OrderMap.FirstTerminatorOrder < FirstOrder) { 310 FirstOrder = OrderMap.FirstTerminatorOrder; 311 SinkPos = OrderMap.FirstTerminator->getIterator(); 312 } else if (FirstUser) { 313 SinkPos = FirstUser->getIterator(); 314 } else { 315 assert(UsedByPHI && "must be users if not used by a phi"); 316 SinkPos = FuncInfo.MBB->instr_end(); 317 } 318 319 // Collect all DBG_VALUEs before the new insertion position so that we can 320 // sink them. 321 SmallVector<MachineInstr *, 1> DbgValues; 322 for (MachineInstr &DbgVal : MRI.use_instructions(DefReg)) { 323 if (!DbgVal.isDebugValue()) 324 continue; 325 unsigned UseOrder = OrderMap.Orders[&DbgVal]; 326 if (UseOrder < FirstOrder) 327 DbgValues.push_back(&DbgVal); 328 } 329 330 // Sink LocalMI before SinkPos and assign it the same DebugLoc. 331 LLVM_DEBUG(dbgs() << "sinking local value to first use " << LocalMI); 332 FuncInfo.MBB->remove(&LocalMI); 333 FuncInfo.MBB->insert(SinkPos, &LocalMI); 334 if (SinkPos != FuncInfo.MBB->end()) 335 LocalMI.setDebugLoc(SinkPos->getDebugLoc()); 336 337 // Sink any debug values that we've collected. 338 for (MachineInstr *DI : DbgValues) { 339 FuncInfo.MBB->remove(DI); 340 FuncInfo.MBB->insert(SinkPos, DI); 341 } 342 } 343 344 bool FastISel::hasTrivialKill(const Value *V) { 345 // Don't consider constants or arguments to have trivial kills. 346 const Instruction *I = dyn_cast<Instruction>(V); 347 if (!I) 348 return false; 349 350 // No-op casts are trivially coalesced by fast-isel. 351 if (const auto *Cast = dyn_cast<CastInst>(I)) 352 if (Cast->isNoopCast(DL) && !hasTrivialKill(Cast->getOperand(0))) 353 return false; 354 355 // Even the value might have only one use in the LLVM IR, it is possible that 356 // FastISel might fold the use into another instruction and now there is more 357 // than one use at the Machine Instruction level. 358 Register Reg = lookUpRegForValue(V); 359 if (Reg && !MRI.use_empty(Reg)) 360 return false; 361 362 // GEPs with all zero indices are trivially coalesced by fast-isel. 363 if (const auto *GEP = dyn_cast<GetElementPtrInst>(I)) 364 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0))) 365 return false; 366 367 // Only instructions with a single use in the same basic block are considered 368 // to have trivial kills. 369 return I->hasOneUse() && 370 !(I->getOpcode() == Instruction::BitCast || 371 I->getOpcode() == Instruction::PtrToInt || 372 I->getOpcode() == Instruction::IntToPtr) && 373 cast<Instruction>(*I->user_begin())->getParent() == I->getParent(); 374 } 375 376 Register FastISel::getRegForValue(const Value *V) { 377 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true); 378 // Don't handle non-simple values in FastISel. 379 if (!RealVT.isSimple()) 380 return Register(); 381 382 // Ignore illegal types. We must do this before looking up the value 383 // in ValueMap because Arguments are given virtual registers regardless 384 // of whether FastISel can handle them. 385 MVT VT = RealVT.getSimpleVT(); 386 if (!TLI.isTypeLegal(VT)) { 387 // Handle integer promotions, though, because they're common and easy. 388 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16) 389 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT(); 390 else 391 return Register(); 392 } 393 394 // Look up the value to see if we already have a register for it. 395 Register Reg = lookUpRegForValue(V); 396 if (Reg) 397 return Reg; 398 399 // In bottom-up mode, just create the virtual register which will be used 400 // to hold the value. It will be materialized later. 401 if (isa<Instruction>(V) && 402 (!isa<AllocaInst>(V) || 403 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V)))) 404 return FuncInfo.InitializeRegForValue(V); 405 406 SavePoint SaveInsertPt = enterLocalValueArea(); 407 408 // Materialize the value in a register. Emit any instructions in the 409 // local value area. 410 Reg = materializeRegForValue(V, VT); 411 412 leaveLocalValueArea(SaveInsertPt); 413 414 return Reg; 415 } 416 417 Register FastISel::materializeConstant(const Value *V, MVT VT) { 418 Register Reg; 419 if (const auto *CI = dyn_cast<ConstantInt>(V)) { 420 if (CI->getValue().getActiveBits() <= 64) 421 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue()); 422 } else if (isa<AllocaInst>(V)) 423 Reg = fastMaterializeAlloca(cast<AllocaInst>(V)); 424 else if (isa<ConstantPointerNull>(V)) 425 // Translate this as an integer zero so that it can be 426 // local-CSE'd with actual integer zeros. 427 Reg = 428 getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getType()))); 429 else if (const auto *CF = dyn_cast<ConstantFP>(V)) { 430 if (CF->isNullValue()) 431 Reg = fastMaterializeFloatZero(CF); 432 else 433 // Try to emit the constant directly. 434 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF); 435 436 if (!Reg) { 437 // Try to emit the constant by using an integer constant with a cast. 438 const APFloat &Flt = CF->getValueAPF(); 439 EVT IntVT = TLI.getPointerTy(DL); 440 uint32_t IntBitWidth = IntVT.getSizeInBits(); 441 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false); 442 bool isExact; 443 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact); 444 if (isExact) { 445 Register IntegerReg = 446 getRegForValue(ConstantInt::get(V->getContext(), SIntVal)); 447 if (IntegerReg) 448 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg, 449 /*Kill=*/false); 450 } 451 } 452 } else if (const auto *Op = dyn_cast<Operator>(V)) { 453 if (!selectOperator(Op, Op->getOpcode())) 454 if (!isa<Instruction>(Op) || 455 !fastSelectInstruction(cast<Instruction>(Op))) 456 return 0; 457 Reg = lookUpRegForValue(Op); 458 } else if (isa<UndefValue>(V)) { 459 Reg = createResultReg(TLI.getRegClassFor(VT)); 460 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 461 TII.get(TargetOpcode::IMPLICIT_DEF), Reg); 462 } 463 return Reg; 464 } 465 466 /// Helper for getRegForValue. This function is called when the value isn't 467 /// already available in a register and must be materialized with new 468 /// instructions. 469 Register FastISel::materializeRegForValue(const Value *V, MVT VT) { 470 Register Reg; 471 // Give the target-specific code a try first. 472 if (isa<Constant>(V)) 473 Reg = fastMaterializeConstant(cast<Constant>(V)); 474 475 // If target-specific code couldn't or didn't want to handle the value, then 476 // give target-independent code a try. 477 if (!Reg) 478 Reg = materializeConstant(V, VT); 479 480 // Don't cache constant materializations in the general ValueMap. 481 // To do so would require tracking what uses they dominate. 482 if (Reg) { 483 LocalValueMap[V] = Reg; 484 LastLocalValue = MRI.getVRegDef(Reg); 485 } 486 return Reg; 487 } 488 489 Register FastISel::lookUpRegForValue(const Value *V) { 490 // Look up the value to see if we already have a register for it. We 491 // cache values defined by Instructions across blocks, and other values 492 // only locally. This is because Instructions already have the SSA 493 // def-dominates-use requirement enforced. 494 DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(V); 495 if (I != FuncInfo.ValueMap.end()) 496 return I->second; 497 return LocalValueMap[V]; 498 } 499 500 void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) { 501 if (!isa<Instruction>(I)) { 502 LocalValueMap[I] = Reg; 503 return; 504 } 505 506 Register &AssignedReg = FuncInfo.ValueMap[I]; 507 if (!AssignedReg) 508 // Use the new register. 509 AssignedReg = Reg; 510 else if (Reg != AssignedReg) { 511 // Arrange for uses of AssignedReg to be replaced by uses of Reg. 512 for (unsigned i = 0; i < NumRegs; i++) { 513 FuncInfo.RegFixups[AssignedReg + i] = Reg + i; 514 FuncInfo.RegsWithFixups.insert(Reg + i); 515 } 516 517 AssignedReg = Reg; 518 } 519 } 520 521 std::pair<Register, bool> FastISel::getRegForGEPIndex(const Value *Idx) { 522 Register IdxN = getRegForValue(Idx); 523 if (!IdxN) 524 // Unhandled operand. Halt "fast" selection and bail. 525 return std::pair<Register, bool>(Register(), false); 526 527 bool IdxNIsKill = hasTrivialKill(Idx); 528 529 // If the index is smaller or larger than intptr_t, truncate or extend it. 530 MVT PtrVT = TLI.getPointerTy(DL); 531 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false); 532 if (IdxVT.bitsLT(PtrVT)) { 533 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN, 534 IdxNIsKill); 535 IdxNIsKill = true; 536 } else if (IdxVT.bitsGT(PtrVT)) { 537 IdxN = 538 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill); 539 IdxNIsKill = true; 540 } 541 return std::pair<Register, bool>(IdxN, IdxNIsKill); 542 } 543 544 void FastISel::recomputeInsertPt() { 545 if (getLastLocalValue()) { 546 FuncInfo.InsertPt = getLastLocalValue(); 547 FuncInfo.MBB = FuncInfo.InsertPt->getParent(); 548 ++FuncInfo.InsertPt; 549 } else 550 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI(); 551 552 // Now skip past any EH_LABELs, which must remain at the beginning. 553 while (FuncInfo.InsertPt != FuncInfo.MBB->end() && 554 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL) 555 ++FuncInfo.InsertPt; 556 } 557 558 void FastISel::removeDeadCode(MachineBasicBlock::iterator I, 559 MachineBasicBlock::iterator E) { 560 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 && 561 "Invalid iterator!"); 562 while (I != E) { 563 if (LastFlushPoint == I) 564 LastFlushPoint = E; 565 if (SavedInsertPt == I) 566 SavedInsertPt = E; 567 if (EmitStartPt == I) 568 EmitStartPt = E.isValid() ? &*E : nullptr; 569 if (LastLocalValue == I) 570 LastLocalValue = E.isValid() ? &*E : nullptr; 571 572 MachineInstr *Dead = &*I; 573 ++I; 574 Dead->eraseFromParent(); 575 ++NumFastIselDead; 576 } 577 recomputeInsertPt(); 578 } 579 580 FastISel::SavePoint FastISel::enterLocalValueArea() { 581 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt; 582 DebugLoc OldDL = DbgLoc; 583 recomputeInsertPt(); 584 DbgLoc = DebugLoc(); 585 SavePoint SP = {OldInsertPt, OldDL}; 586 return SP; 587 } 588 589 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) { 590 if (FuncInfo.InsertPt != FuncInfo.MBB->begin()) 591 LastLocalValue = &*std::prev(FuncInfo.InsertPt); 592 593 // Restore the previous insert position. 594 FuncInfo.InsertPt = OldInsertPt.InsertPt; 595 DbgLoc = OldInsertPt.DL; 596 } 597 598 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) { 599 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true); 600 if (VT == MVT::Other || !VT.isSimple()) 601 // Unhandled type. Halt "fast" selection and bail. 602 return false; 603 604 // We only handle legal types. For example, on x86-32 the instruction 605 // selector contains all of the 64-bit instructions from x86-64, 606 // under the assumption that i64 won't be used if the target doesn't 607 // support it. 608 if (!TLI.isTypeLegal(VT)) { 609 // MVT::i1 is special. Allow AND, OR, or XOR because they 610 // don't require additional zeroing, which makes them easy. 611 if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR || 612 ISDOpcode == ISD::XOR)) 613 VT = TLI.getTypeToTransformTo(I->getContext(), VT); 614 else 615 return false; 616 } 617 618 // Check if the first operand is a constant, and handle it as "ri". At -O0, 619 // we don't have anything that canonicalizes operand order. 620 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0))) 621 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) { 622 Register Op1 = getRegForValue(I->getOperand(1)); 623 if (!Op1) 624 return false; 625 bool Op1IsKill = hasTrivialKill(I->getOperand(1)); 626 627 Register ResultReg = 628 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill, 629 CI->getZExtValue(), VT.getSimpleVT()); 630 if (!ResultReg) 631 return false; 632 633 // We successfully emitted code for the given LLVM Instruction. 634 updateValueMap(I, ResultReg); 635 return true; 636 } 637 638 Register Op0 = getRegForValue(I->getOperand(0)); 639 if (!Op0) // Unhandled operand. Halt "fast" selection and bail. 640 return false; 641 bool Op0IsKill = hasTrivialKill(I->getOperand(0)); 642 643 // Check if the second operand is a constant and handle it appropriately. 644 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { 645 uint64_t Imm = CI->getSExtValue(); 646 647 // Transform "sdiv exact X, 8" -> "sra X, 3". 648 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) && 649 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) { 650 Imm = Log2_64(Imm); 651 ISDOpcode = ISD::SRA; 652 } 653 654 // Transform "urem x, pow2" -> "and x, pow2-1". 655 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) && 656 isPowerOf2_64(Imm)) { 657 --Imm; 658 ISDOpcode = ISD::AND; 659 } 660 661 Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, 662 Op0IsKill, Imm, VT.getSimpleVT()); 663 if (!ResultReg) 664 return false; 665 666 // We successfully emitted code for the given LLVM Instruction. 667 updateValueMap(I, ResultReg); 668 return true; 669 } 670 671 Register Op1 = getRegForValue(I->getOperand(1)); 672 if (!Op1) // Unhandled operand. Halt "fast" selection and bail. 673 return false; 674 bool Op1IsKill = hasTrivialKill(I->getOperand(1)); 675 676 // Now we have both operands in registers. Emit the instruction. 677 Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(), 678 ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill); 679 if (!ResultReg) 680 // Target-specific code wasn't able to find a machine opcode for 681 // the given ISD opcode and type. Halt "fast" selection and bail. 682 return false; 683 684 // We successfully emitted code for the given LLVM Instruction. 685 updateValueMap(I, ResultReg); 686 return true; 687 } 688 689 bool FastISel::selectGetElementPtr(const User *I) { 690 Register N = getRegForValue(I->getOperand(0)); 691 if (!N) // Unhandled operand. Halt "fast" selection and bail. 692 return false; 693 bool NIsKill = hasTrivialKill(I->getOperand(0)); 694 695 // Keep a running tab of the total offset to coalesce multiple N = N + Offset 696 // into a single N = N + TotalOffset. 697 uint64_t TotalOffs = 0; 698 // FIXME: What's a good SWAG number for MaxOffs? 699 uint64_t MaxOffs = 2048; 700 MVT VT = TLI.getPointerTy(DL); 701 for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I); 702 GTI != E; ++GTI) { 703 const Value *Idx = GTI.getOperand(); 704 if (StructType *StTy = GTI.getStructTypeOrNull()) { 705 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue(); 706 if (Field) { 707 // N = N + Offset 708 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field); 709 if (TotalOffs >= MaxOffs) { 710 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 711 if (!N) // Unhandled operand. Halt "fast" selection and bail. 712 return false; 713 NIsKill = true; 714 TotalOffs = 0; 715 } 716 } 717 } else { 718 Type *Ty = GTI.getIndexedType(); 719 720 // If this is a constant subscript, handle it quickly. 721 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) { 722 if (CI->isZero()) 723 continue; 724 // N = N + Offset 725 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue(); 726 TotalOffs += DL.getTypeAllocSize(Ty) * IdxN; 727 if (TotalOffs >= MaxOffs) { 728 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 729 if (!N) // Unhandled operand. Halt "fast" selection and bail. 730 return false; 731 NIsKill = true; 732 TotalOffs = 0; 733 } 734 continue; 735 } 736 if (TotalOffs) { 737 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 738 if (!N) // Unhandled operand. Halt "fast" selection and bail. 739 return false; 740 NIsKill = true; 741 TotalOffs = 0; 742 } 743 744 // N = N + Idx * ElementSize; 745 uint64_t ElementSize = DL.getTypeAllocSize(Ty); 746 std::pair<Register, bool> Pair = getRegForGEPIndex(Idx); 747 Register IdxN = Pair.first; 748 bool IdxNIsKill = Pair.second; 749 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. 750 return false; 751 752 if (ElementSize != 1) { 753 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT); 754 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail. 755 return false; 756 IdxNIsKill = true; 757 } 758 N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill); 759 if (!N) // Unhandled operand. Halt "fast" selection and bail. 760 return false; 761 } 762 } 763 if (TotalOffs) { 764 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT); 765 if (!N) // Unhandled operand. Halt "fast" selection and bail. 766 return false; 767 } 768 769 // We successfully emitted code for the given LLVM Instruction. 770 updateValueMap(I, N); 771 return true; 772 } 773 774 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops, 775 const CallInst *CI, unsigned StartIdx) { 776 for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) { 777 Value *Val = CI->getArgOperand(i); 778 // Check for constants and encode them with a StackMaps::ConstantOp prefix. 779 if (const auto *C = dyn_cast<ConstantInt>(Val)) { 780 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); 781 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue())); 782 } else if (isa<ConstantPointerNull>(Val)) { 783 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp)); 784 Ops.push_back(MachineOperand::CreateImm(0)); 785 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) { 786 // Values coming from a stack location also require a special encoding, 787 // but that is added later on by the target specific frame index 788 // elimination implementation. 789 auto SI = FuncInfo.StaticAllocaMap.find(AI); 790 if (SI != FuncInfo.StaticAllocaMap.end()) 791 Ops.push_back(MachineOperand::CreateFI(SI->second)); 792 else 793 return false; 794 } else { 795 Register Reg = getRegForValue(Val); 796 if (!Reg) 797 return false; 798 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false)); 799 } 800 } 801 return true; 802 } 803 804 bool FastISel::selectStackmap(const CallInst *I) { 805 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>, 806 // [live variables...]) 807 assert(I->getCalledFunction()->getReturnType()->isVoidTy() && 808 "Stackmap cannot return a value."); 809 810 // The stackmap intrinsic only records the live variables (the arguments 811 // passed to it) and emits NOPS (if requested). Unlike the patchpoint 812 // intrinsic, this won't be lowered to a function call. This means we don't 813 // have to worry about calling conventions and target-specific lowering code. 814 // Instead we perform the call lowering right here. 815 // 816 // CALLSEQ_START(0, 0...) 817 // STACKMAP(id, nbytes, ...) 818 // CALLSEQ_END(0, 0) 819 // 820 SmallVector<MachineOperand, 32> Ops; 821 822 // Add the <id> and <numBytes> constants. 823 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) && 824 "Expected a constant integer."); 825 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)); 826 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); 827 828 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) && 829 "Expected a constant integer."); 830 const auto *NumBytes = 831 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)); 832 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); 833 834 // Push live variables for the stack map (skipping the first two arguments 835 // <id> and <numBytes>). 836 if (!addStackMapLiveVars(Ops, I, 2)) 837 return false; 838 839 // We are not adding any register mask info here, because the stackmap doesn't 840 // clobber anything. 841 842 // Add scratch registers as implicit def and early clobber. 843 CallingConv::ID CC = I->getCallingConv(); 844 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); 845 for (unsigned i = 0; ScratchRegs[i]; ++i) 846 Ops.push_back(MachineOperand::CreateReg( 847 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false, 848 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true)); 849 850 // Issue CALLSEQ_START 851 unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); 852 auto Builder = 853 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown)); 854 const MCInstrDesc &MCID = Builder.getInstr()->getDesc(); 855 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I) 856 Builder.addImm(0); 857 858 // Issue STACKMAP. 859 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 860 TII.get(TargetOpcode::STACKMAP)); 861 for (auto const &MO : Ops) 862 MIB.add(MO); 863 864 // Issue CALLSEQ_END 865 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); 866 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp)) 867 .addImm(0) 868 .addImm(0); 869 870 // Inform the Frame Information that we have a stackmap in this function. 871 FuncInfo.MF->getFrameInfo().setHasStackMap(); 872 873 return true; 874 } 875 876 /// Lower an argument list according to the target calling convention. 877 /// 878 /// This is a helper for lowering intrinsics that follow a target calling 879 /// convention or require stack pointer adjustment. Only a subset of the 880 /// intrinsic's operands need to participate in the calling convention. 881 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx, 882 unsigned NumArgs, const Value *Callee, 883 bool ForceRetVoidTy, CallLoweringInfo &CLI) { 884 ArgListTy Args; 885 Args.reserve(NumArgs); 886 887 // Populate the argument list. 888 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) { 889 Value *V = CI->getOperand(ArgI); 890 891 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 892 893 ArgListEntry Entry; 894 Entry.Val = V; 895 Entry.Ty = V->getType(); 896 Entry.setAttributes(CI, ArgI); 897 Args.push_back(Entry); 898 } 899 900 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext()) 901 : CI->getType(); 902 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs); 903 904 return lowerCallTo(CLI); 905 } 906 907 FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee( 908 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy, 909 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) { 910 SmallString<32> MangledName; 911 Mangler::getNameWithPrefix(MangledName, Target, DL); 912 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName); 913 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs); 914 } 915 916 bool FastISel::selectPatchpoint(const CallInst *I) { 917 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>, 918 // i32 <numBytes>, 919 // i8* <target>, 920 // i32 <numArgs>, 921 // [Args...], 922 // [live variables...]) 923 CallingConv::ID CC = I->getCallingConv(); 924 bool IsAnyRegCC = CC == CallingConv::AnyReg; 925 bool HasDef = !I->getType()->isVoidTy(); 926 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts(); 927 928 // Get the real number of arguments participating in the call <numArgs> 929 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) && 930 "Expected a constant integer."); 931 const auto *NumArgsVal = 932 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)); 933 unsigned NumArgs = NumArgsVal->getZExtValue(); 934 935 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs> 936 // This includes all meta-operands up to but not including CC. 937 unsigned NumMetaOpers = PatchPointOpers::CCPos; 938 assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs && 939 "Not enough arguments provided to the patchpoint intrinsic"); 940 941 // For AnyRegCC the arguments are lowered later on manually. 942 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs; 943 CallLoweringInfo CLI; 944 CLI.setIsPatchPoint(); 945 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI)) 946 return false; 947 948 assert(CLI.Call && "No call instruction specified."); 949 950 SmallVector<MachineOperand, 32> Ops; 951 952 // Add an explicit result reg if we use the anyreg calling convention. 953 if (IsAnyRegCC && HasDef) { 954 assert(CLI.NumResultRegs == 0 && "Unexpected result register."); 955 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64)); 956 CLI.NumResultRegs = 1; 957 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true)); 958 } 959 960 // Add the <id> and <numBytes> constants. 961 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) && 962 "Expected a constant integer."); 963 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)); 964 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue())); 965 966 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) && 967 "Expected a constant integer."); 968 const auto *NumBytes = 969 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)); 970 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue())); 971 972 // Add the call target. 973 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) { 974 uint64_t CalleeConstAddr = 975 cast<ConstantInt>(C->getOperand(0))->getZExtValue(); 976 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr)); 977 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) { 978 if (C->getOpcode() == Instruction::IntToPtr) { 979 uint64_t CalleeConstAddr = 980 cast<ConstantInt>(C->getOperand(0))->getZExtValue(); 981 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr)); 982 } else 983 llvm_unreachable("Unsupported ConstantExpr."); 984 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) { 985 Ops.push_back(MachineOperand::CreateGA(GV, 0)); 986 } else if (isa<ConstantPointerNull>(Callee)) 987 Ops.push_back(MachineOperand::CreateImm(0)); 988 else 989 llvm_unreachable("Unsupported callee address."); 990 991 // Adjust <numArgs> to account for any arguments that have been passed on 992 // the stack instead. 993 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size(); 994 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs)); 995 996 // Add the calling convention 997 Ops.push_back(MachineOperand::CreateImm((unsigned)CC)); 998 999 // Add the arguments we omitted previously. The register allocator should 1000 // place these in any free register. 1001 if (IsAnyRegCC) { 1002 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) { 1003 Register Reg = getRegForValue(I->getArgOperand(i)); 1004 if (!Reg) 1005 return false; 1006 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false)); 1007 } 1008 } 1009 1010 // Push the arguments from the call instruction. 1011 for (auto Reg : CLI.OutRegs) 1012 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false)); 1013 1014 // Push live variables for the stack map. 1015 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs)) 1016 return false; 1017 1018 // Push the register mask info. 1019 Ops.push_back(MachineOperand::CreateRegMask( 1020 TRI.getCallPreservedMask(*FuncInfo.MF, CC))); 1021 1022 // Add scratch registers as implicit def and early clobber. 1023 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC); 1024 for (unsigned i = 0; ScratchRegs[i]; ++i) 1025 Ops.push_back(MachineOperand::CreateReg( 1026 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false, 1027 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true)); 1028 1029 // Add implicit defs (return values). 1030 for (auto Reg : CLI.InRegs) 1031 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true, 1032 /*isImp=*/true)); 1033 1034 // Insert the patchpoint instruction before the call generated by the target. 1035 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc, 1036 TII.get(TargetOpcode::PATCHPOINT)); 1037 1038 for (auto &MO : Ops) 1039 MIB.add(MO); 1040 1041 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI); 1042 1043 // Delete the original call instruction. 1044 CLI.Call->eraseFromParent(); 1045 1046 // Inform the Frame Information that we have a patchpoint in this function. 1047 FuncInfo.MF->getFrameInfo().setHasPatchPoint(); 1048 1049 if (CLI.NumResultRegs) 1050 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs); 1051 return true; 1052 } 1053 1054 bool FastISel::selectXRayCustomEvent(const CallInst *I) { 1055 const auto &Triple = TM.getTargetTriple(); 1056 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux()) 1057 return true; // don't do anything to this instruction. 1058 SmallVector<MachineOperand, 8> Ops; 1059 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)), 1060 /*isDef=*/false)); 1061 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)), 1062 /*isDef=*/false)); 1063 MachineInstrBuilder MIB = 1064 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1065 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL)); 1066 for (auto &MO : Ops) 1067 MIB.add(MO); 1068 1069 // Insert the Patchable Event Call instruction, that gets lowered properly. 1070 return true; 1071 } 1072 1073 bool FastISel::selectXRayTypedEvent(const CallInst *I) { 1074 const auto &Triple = TM.getTargetTriple(); 1075 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux()) 1076 return true; // don't do anything to this instruction. 1077 SmallVector<MachineOperand, 8> Ops; 1078 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)), 1079 /*isDef=*/false)); 1080 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)), 1081 /*isDef=*/false)); 1082 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)), 1083 /*isDef=*/false)); 1084 MachineInstrBuilder MIB = 1085 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1086 TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL)); 1087 for (auto &MO : Ops) 1088 MIB.add(MO); 1089 1090 // Insert the Patchable Typed Event Call instruction, that gets lowered properly. 1091 return true; 1092 } 1093 1094 /// Returns an AttributeList representing the attributes applied to the return 1095 /// value of the given call. 1096 static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) { 1097 SmallVector<Attribute::AttrKind, 2> Attrs; 1098 if (CLI.RetSExt) 1099 Attrs.push_back(Attribute::SExt); 1100 if (CLI.RetZExt) 1101 Attrs.push_back(Attribute::ZExt); 1102 if (CLI.IsInReg) 1103 Attrs.push_back(Attribute::InReg); 1104 1105 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex, 1106 Attrs); 1107 } 1108 1109 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName, 1110 unsigned NumArgs) { 1111 MCContext &Ctx = MF->getContext(); 1112 SmallString<32> MangledName; 1113 Mangler::getNameWithPrefix(MangledName, SymName, DL); 1114 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName); 1115 return lowerCallTo(CI, Sym, NumArgs); 1116 } 1117 1118 bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol, 1119 unsigned NumArgs) { 1120 FunctionType *FTy = CI->getFunctionType(); 1121 Type *RetTy = CI->getType(); 1122 1123 ArgListTy Args; 1124 Args.reserve(NumArgs); 1125 1126 // Populate the argument list. 1127 // Attributes for args start at offset 1, after the return attribute. 1128 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) { 1129 Value *V = CI->getOperand(ArgI); 1130 1131 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); 1132 1133 ArgListEntry Entry; 1134 Entry.Val = V; 1135 Entry.Ty = V->getType(); 1136 Entry.setAttributes(CI, ArgI); 1137 Args.push_back(Entry); 1138 } 1139 TLI.markLibCallAttributes(MF, CI->getCallingConv(), Args); 1140 1141 CallLoweringInfo CLI; 1142 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs); 1143 1144 return lowerCallTo(CLI); 1145 } 1146 1147 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) { 1148 // Handle the incoming return values from the call. 1149 CLI.clearIns(); 1150 SmallVector<EVT, 4> RetTys; 1151 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys); 1152 1153 SmallVector<ISD::OutputArg, 4> Outs; 1154 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL); 1155 1156 bool CanLowerReturn = TLI.CanLowerReturn( 1157 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext()); 1158 1159 // FIXME: sret demotion isn't supported yet - bail out. 1160 if (!CanLowerReturn) 1161 return false; 1162 1163 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { 1164 EVT VT = RetTys[I]; 1165 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT); 1166 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT); 1167 for (unsigned i = 0; i != NumRegs; ++i) { 1168 ISD::InputArg MyFlags; 1169 MyFlags.VT = RegisterVT; 1170 MyFlags.ArgVT = VT; 1171 MyFlags.Used = CLI.IsReturnValueUsed; 1172 if (CLI.RetSExt) 1173 MyFlags.Flags.setSExt(); 1174 if (CLI.RetZExt) 1175 MyFlags.Flags.setZExt(); 1176 if (CLI.IsInReg) 1177 MyFlags.Flags.setInReg(); 1178 CLI.Ins.push_back(MyFlags); 1179 } 1180 } 1181 1182 // Handle all of the outgoing arguments. 1183 CLI.clearOuts(); 1184 for (auto &Arg : CLI.getArgs()) { 1185 Type *FinalType = Arg.Ty; 1186 if (Arg.IsByVal) 1187 FinalType = cast<PointerType>(Arg.Ty)->getElementType(); 1188 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters( 1189 FinalType, CLI.CallConv, CLI.IsVarArg); 1190 1191 ISD::ArgFlagsTy Flags; 1192 if (Arg.IsZExt) 1193 Flags.setZExt(); 1194 if (Arg.IsSExt) 1195 Flags.setSExt(); 1196 if (Arg.IsInReg) 1197 Flags.setInReg(); 1198 if (Arg.IsSRet) 1199 Flags.setSRet(); 1200 if (Arg.IsSwiftSelf) 1201 Flags.setSwiftSelf(); 1202 if (Arg.IsSwiftError) 1203 Flags.setSwiftError(); 1204 if (Arg.IsCFGuardTarget) 1205 Flags.setCFGuardTarget(); 1206 if (Arg.IsByVal) 1207 Flags.setByVal(); 1208 if (Arg.IsInAlloca) { 1209 Flags.setInAlloca(); 1210 // Set the byval flag for CCAssignFn callbacks that don't know about 1211 // inalloca. This way we can know how many bytes we should've allocated 1212 // and how many bytes a callee cleanup function will pop. If we port 1213 // inalloca to more targets, we'll have to add custom inalloca handling in 1214 // the various CC lowering callbacks. 1215 Flags.setByVal(); 1216 } 1217 if (Arg.IsPreallocated) { 1218 Flags.setPreallocated(); 1219 // Set the byval flag for CCAssignFn callbacks that don't know about 1220 // preallocated. This way we can know how many bytes we should've 1221 // allocated and how many bytes a callee cleanup function will pop. If we 1222 // port preallocated to more targets, we'll have to add custom 1223 // preallocated handling in the various CC lowering callbacks. 1224 Flags.setByVal(); 1225 } 1226 if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) { 1227 PointerType *Ty = cast<PointerType>(Arg.Ty); 1228 Type *ElementTy = Ty->getElementType(); 1229 unsigned FrameSize = 1230 DL.getTypeAllocSize(Arg.ByValType ? Arg.ByValType : ElementTy); 1231 1232 // For ByVal, alignment should come from FE. BE will guess if this info 1233 // is not there, but there are cases it cannot get right. 1234 MaybeAlign FrameAlign = Arg.Alignment; 1235 if (!FrameAlign) 1236 FrameAlign = Align(TLI.getByValTypeAlignment(ElementTy, DL)); 1237 Flags.setByValSize(FrameSize); 1238 Flags.setByValAlign(*FrameAlign); 1239 } 1240 if (Arg.IsNest) 1241 Flags.setNest(); 1242 if (NeedsRegBlock) 1243 Flags.setInConsecutiveRegs(); 1244 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty)); 1245 1246 CLI.OutVals.push_back(Arg.Val); 1247 CLI.OutFlags.push_back(Flags); 1248 } 1249 1250 if (!fastLowerCall(CLI)) 1251 return false; 1252 1253 // Set all unused physreg defs as dead. 1254 assert(CLI.Call && "No call instruction specified."); 1255 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI); 1256 1257 if (CLI.NumResultRegs && CLI.CB) 1258 updateValueMap(CLI.CB, CLI.ResultReg, CLI.NumResultRegs); 1259 1260 // Set labels for heapallocsite call. 1261 if (CLI.CB) 1262 if (MDNode *MD = CLI.CB->getMetadata("heapallocsite")) 1263 CLI.Call->setHeapAllocMarker(*MF, MD); 1264 1265 return true; 1266 } 1267 1268 bool FastISel::lowerCall(const CallInst *CI) { 1269 FunctionType *FuncTy = CI->getFunctionType(); 1270 Type *RetTy = CI->getType(); 1271 1272 ArgListTy Args; 1273 ArgListEntry Entry; 1274 Args.reserve(CI->arg_size()); 1275 1276 for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) { 1277 Value *V = *i; 1278 1279 // Skip empty types 1280 if (V->getType()->isEmptyTy()) 1281 continue; 1282 1283 Entry.Val = V; 1284 Entry.Ty = V->getType(); 1285 1286 // Skip the first return-type Attribute to get to params. 1287 Entry.setAttributes(CI, i - CI->arg_begin()); 1288 Args.push_back(Entry); 1289 } 1290 1291 // Check if target-independent constraints permit a tail call here. 1292 // Target-dependent constraints are checked within fastLowerCall. 1293 bool IsTailCall = CI->isTailCall(); 1294 if (IsTailCall && !isInTailCallPosition(*CI, TM)) 1295 IsTailCall = false; 1296 if (IsTailCall && MF->getFunction() 1297 .getFnAttribute("disable-tail-calls") 1298 .getValueAsString() == "true") 1299 IsTailCall = false; 1300 1301 CallLoweringInfo CLI; 1302 CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI) 1303 .setTailCall(IsTailCall); 1304 1305 return lowerCallTo(CLI); 1306 } 1307 1308 bool FastISel::selectCall(const User *I) { 1309 const CallInst *Call = cast<CallInst>(I); 1310 1311 // Handle simple inline asms. 1312 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) { 1313 // If the inline asm has side effects, then make sure that no local value 1314 // lives across by flushing the local value map. 1315 if (IA->hasSideEffects()) 1316 flushLocalValueMap(); 1317 1318 // Don't attempt to handle constraints. 1319 if (!IA->getConstraintString().empty()) 1320 return false; 1321 1322 unsigned ExtraInfo = 0; 1323 if (IA->hasSideEffects()) 1324 ExtraInfo |= InlineAsm::Extra_HasSideEffects; 1325 if (IA->isAlignStack()) 1326 ExtraInfo |= InlineAsm::Extra_IsAlignStack; 1327 if (Call->isConvergent()) 1328 ExtraInfo |= InlineAsm::Extra_IsConvergent; 1329 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect; 1330 1331 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1332 TII.get(TargetOpcode::INLINEASM)); 1333 MIB.addExternalSymbol(IA->getAsmString().c_str()); 1334 MIB.addImm(ExtraInfo); 1335 1336 const MDNode *SrcLoc = Call->getMetadata("srcloc"); 1337 if (SrcLoc) 1338 MIB.addMetadata(SrcLoc); 1339 1340 return true; 1341 } 1342 1343 // Handle intrinsic function calls. 1344 if (const auto *II = dyn_cast<IntrinsicInst>(Call)) 1345 return selectIntrinsicCall(II); 1346 1347 // Usually, it does not make sense to initialize a value, 1348 // make an unrelated function call and use the value, because 1349 // it tends to be spilled on the stack. So, we move the pointer 1350 // to the last local value to the beginning of the block, so that 1351 // all the values which have already been materialized, 1352 // appear after the call. It also makes sense to skip intrinsics 1353 // since they tend to be inlined. 1354 flushLocalValueMap(); 1355 1356 return lowerCall(Call); 1357 } 1358 1359 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { 1360 switch (II->getIntrinsicID()) { 1361 default: 1362 break; 1363 // At -O0 we don't care about the lifetime intrinsics. 1364 case Intrinsic::lifetime_start: 1365 case Intrinsic::lifetime_end: 1366 // The donothing intrinsic does, well, nothing. 1367 case Intrinsic::donothing: 1368 // Neither does the sideeffect intrinsic. 1369 case Intrinsic::sideeffect: 1370 // Neither does the assume intrinsic; it's also OK not to codegen its operand. 1371 case Intrinsic::assume: 1372 return true; 1373 case Intrinsic::dbg_declare: { 1374 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II); 1375 assert(DI->getVariable() && "Missing variable"); 1376 if (!FuncInfo.MF->getMMI().hasDebugInfo()) { 1377 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI 1378 << " (!hasDebugInfo)\n"); 1379 return true; 1380 } 1381 1382 const Value *Address = DI->getAddress(); 1383 if (!Address || isa<UndefValue>(Address)) { 1384 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI 1385 << " (bad/undef address)\n"); 1386 return true; 1387 } 1388 1389 // Byval arguments with frame indices were already handled after argument 1390 // lowering and before isel. 1391 const auto *Arg = 1392 dyn_cast<Argument>(Address->stripInBoundsConstantOffsets()); 1393 if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX) 1394 return true; 1395 1396 Optional<MachineOperand> Op; 1397 if (Register Reg = lookUpRegForValue(Address)) 1398 Op = MachineOperand::CreateReg(Reg, false); 1399 1400 // If we have a VLA that has a "use" in a metadata node that's then used 1401 // here but it has no other uses, then we have a problem. E.g., 1402 // 1403 // int foo (const int *x) { 1404 // char a[*x]; 1405 // return 0; 1406 // } 1407 // 1408 // If we assign 'a' a vreg and fast isel later on has to use the selection 1409 // DAG isel, it will want to copy the value to the vreg. However, there are 1410 // no uses, which goes counter to what selection DAG isel expects. 1411 if (!Op && !Address->use_empty() && isa<Instruction>(Address) && 1412 (!isa<AllocaInst>(Address) || 1413 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address)))) 1414 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address), 1415 false); 1416 1417 if (Op) { 1418 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) && 1419 "Expected inlined-at fields to agree"); 1420 // A dbg.declare describes the address of a source variable, so lower it 1421 // into an indirect DBG_VALUE. 1422 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1423 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, 1424 *Op, DI->getVariable(), DI->getExpression()); 1425 } else { 1426 // We can't yet handle anything else here because it would require 1427 // generating code, thus altering codegen because of debug info. 1428 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI 1429 << " (no materialized reg for address)\n"); 1430 } 1431 return true; 1432 } 1433 case Intrinsic::dbg_value: { 1434 // This form of DBG_VALUE is target-independent. 1435 const DbgValueInst *DI = cast<DbgValueInst>(II); 1436 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE); 1437 const Value *V = DI->getValue(); 1438 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) && 1439 "Expected inlined-at fields to agree"); 1440 if (!V || isa<UndefValue>(V)) { 1441 // Currently the optimizer can produce this; insert an undef to 1442 // help debugging. 1443 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U, 1444 DI->getVariable(), DI->getExpression()); 1445 } else if (const auto *CI = dyn_cast<ConstantInt>(V)) { 1446 if (CI->getBitWidth() > 64) 1447 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1448 .addCImm(CI) 1449 .addImm(0U) 1450 .addMetadata(DI->getVariable()) 1451 .addMetadata(DI->getExpression()); 1452 else 1453 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1454 .addImm(CI->getZExtValue()) 1455 .addImm(0U) 1456 .addMetadata(DI->getVariable()) 1457 .addMetadata(DI->getExpression()); 1458 } else if (const auto *CF = dyn_cast<ConstantFP>(V)) { 1459 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 1460 .addFPImm(CF) 1461 .addImm(0U) 1462 .addMetadata(DI->getVariable()) 1463 .addMetadata(DI->getExpression()); 1464 } else if (Register Reg = lookUpRegForValue(V)) { 1465 // FIXME: This does not handle register-indirect values at offset 0. 1466 bool IsIndirect = false; 1467 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg, 1468 DI->getVariable(), DI->getExpression()); 1469 } else { 1470 // We don't know how to handle other cases, so we drop. 1471 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1472 } 1473 return true; 1474 } 1475 case Intrinsic::dbg_label: { 1476 const DbgLabelInst *DI = cast<DbgLabelInst>(II); 1477 assert(DI->getLabel() && "Missing label"); 1478 if (!FuncInfo.MF->getMMI().hasDebugInfo()) { 1479 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); 1480 return true; 1481 } 1482 1483 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1484 TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel()); 1485 return true; 1486 } 1487 case Intrinsic::objectsize: 1488 llvm_unreachable("llvm.objectsize.* should have been lowered already"); 1489 1490 case Intrinsic::is_constant: 1491 llvm_unreachable("llvm.is.constant.* should have been lowered already"); 1492 1493 case Intrinsic::launder_invariant_group: 1494 case Intrinsic::strip_invariant_group: 1495 case Intrinsic::expect: { 1496 Register ResultReg = getRegForValue(II->getArgOperand(0)); 1497 if (!ResultReg) 1498 return false; 1499 updateValueMap(II, ResultReg); 1500 return true; 1501 } 1502 case Intrinsic::experimental_stackmap: 1503 return selectStackmap(II); 1504 case Intrinsic::experimental_patchpoint_void: 1505 case Intrinsic::experimental_patchpoint_i64: 1506 return selectPatchpoint(II); 1507 1508 case Intrinsic::xray_customevent: 1509 return selectXRayCustomEvent(II); 1510 case Intrinsic::xray_typedevent: 1511 return selectXRayTypedEvent(II); 1512 } 1513 1514 return fastLowerIntrinsicCall(II); 1515 } 1516 1517 bool FastISel::selectCast(const User *I, unsigned Opcode) { 1518 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); 1519 EVT DstVT = TLI.getValueType(DL, I->getType()); 1520 1521 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other || 1522 !DstVT.isSimple()) 1523 // Unhandled type. Halt "fast" selection and bail. 1524 return false; 1525 1526 // Check if the destination type is legal. 1527 if (!TLI.isTypeLegal(DstVT)) 1528 return false; 1529 1530 // Check if the source operand is legal. 1531 if (!TLI.isTypeLegal(SrcVT)) 1532 return false; 1533 1534 Register InputReg = getRegForValue(I->getOperand(0)); 1535 if (!InputReg) 1536 // Unhandled operand. Halt "fast" selection and bail. 1537 return false; 1538 1539 bool InputRegIsKill = hasTrivialKill(I->getOperand(0)); 1540 1541 Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), 1542 Opcode, InputReg, InputRegIsKill); 1543 if (!ResultReg) 1544 return false; 1545 1546 updateValueMap(I, ResultReg); 1547 return true; 1548 } 1549 1550 bool FastISel::selectBitCast(const User *I) { 1551 // If the bitcast doesn't change the type, just use the operand value. 1552 if (I->getType() == I->getOperand(0)->getType()) { 1553 Register Reg = getRegForValue(I->getOperand(0)); 1554 if (!Reg) 1555 return false; 1556 updateValueMap(I, Reg); 1557 return true; 1558 } 1559 1560 // Bitcasts of other values become reg-reg copies or BITCAST operators. 1561 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType()); 1562 EVT DstEVT = TLI.getValueType(DL, I->getType()); 1563 if (SrcEVT == MVT::Other || DstEVT == MVT::Other || 1564 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT)) 1565 // Unhandled type. Halt "fast" selection and bail. 1566 return false; 1567 1568 MVT SrcVT = SrcEVT.getSimpleVT(); 1569 MVT DstVT = DstEVT.getSimpleVT(); 1570 Register Op0 = getRegForValue(I->getOperand(0)); 1571 if (!Op0) // Unhandled operand. Halt "fast" selection and bail. 1572 return false; 1573 bool Op0IsKill = hasTrivialKill(I->getOperand(0)); 1574 1575 // First, try to perform the bitcast by inserting a reg-reg copy. 1576 Register ResultReg; 1577 if (SrcVT == DstVT) { 1578 const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT); 1579 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT); 1580 // Don't attempt a cross-class copy. It will likely fail. 1581 if (SrcClass == DstClass) { 1582 ResultReg = createResultReg(DstClass); 1583 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1584 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0); 1585 } 1586 } 1587 1588 // If the reg-reg copy failed, select a BITCAST opcode. 1589 if (!ResultReg) 1590 ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill); 1591 1592 if (!ResultReg) 1593 return false; 1594 1595 updateValueMap(I, ResultReg); 1596 return true; 1597 } 1598 1599 bool FastISel::selectFreeze(const User *I) { 1600 Register Reg = getRegForValue(I->getOperand(0)); 1601 if (!Reg) 1602 // Unhandled operand. 1603 return false; 1604 1605 EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType()); 1606 if (ETy == MVT::Other || !TLI.isTypeLegal(ETy)) 1607 // Unhandled type, bail out. 1608 return false; 1609 1610 MVT Ty = ETy.getSimpleVT(); 1611 const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty); 1612 Register ResultReg = createResultReg(TyRegClass); 1613 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 1614 TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg); 1615 1616 updateValueMap(I, ResultReg); 1617 return true; 1618 } 1619 1620 // Remove local value instructions starting from the instruction after 1621 // SavedLastLocalValue to the current function insert point. 1622 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue) 1623 { 1624 MachineInstr *CurLastLocalValue = getLastLocalValue(); 1625 if (CurLastLocalValue != SavedLastLocalValue) { 1626 // Find the first local value instruction to be deleted. 1627 // This is the instruction after SavedLastLocalValue if it is non-NULL. 1628 // Otherwise it's the first instruction in the block. 1629 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue); 1630 if (SavedLastLocalValue) 1631 ++FirstDeadInst; 1632 else 1633 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI(); 1634 setLastLocalValue(SavedLastLocalValue); 1635 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt); 1636 } 1637 } 1638 1639 bool FastISel::selectInstruction(const Instruction *I) { 1640 MachineInstr *SavedLastLocalValue = getLastLocalValue(); 1641 // Just before the terminator instruction, insert instructions to 1642 // feed PHI nodes in successor blocks. 1643 if (I->isTerminator()) { 1644 if (!handlePHINodesInSuccessorBlocks(I->getParent())) { 1645 // PHI node handling may have generated local value instructions, 1646 // even though it failed to handle all PHI nodes. 1647 // We remove these instructions because SelectionDAGISel will generate 1648 // them again. 1649 removeDeadLocalValueCode(SavedLastLocalValue); 1650 return false; 1651 } 1652 } 1653 1654 // FastISel does not handle any operand bundles except OB_funclet. 1655 if (auto *Call = dyn_cast<CallBase>(I)) 1656 for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i) 1657 if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet) 1658 return false; 1659 1660 DbgLoc = I->getDebugLoc(); 1661 1662 SavedInsertPt = FuncInfo.InsertPt; 1663 1664 if (const auto *Call = dyn_cast<CallInst>(I)) { 1665 const Function *F = Call->getCalledFunction(); 1666 LibFunc Func; 1667 1668 // As a special case, don't handle calls to builtin library functions that 1669 // may be translated directly to target instructions. 1670 if (F && !F->hasLocalLinkage() && F->hasName() && 1671 LibInfo->getLibFunc(F->getName(), Func) && 1672 LibInfo->hasOptimizedCodeGen(Func)) 1673 return false; 1674 1675 // Don't handle Intrinsic::trap if a trap function is specified. 1676 if (F && F->getIntrinsicID() == Intrinsic::trap && 1677 Call->hasFnAttr("trap-func-name")) 1678 return false; 1679 } 1680 1681 // First, try doing target-independent selection. 1682 if (!SkipTargetIndependentISel) { 1683 if (selectOperator(I, I->getOpcode())) { 1684 ++NumFastIselSuccessIndependent; 1685 DbgLoc = DebugLoc(); 1686 return true; 1687 } 1688 // Remove dead code. 1689 recomputeInsertPt(); 1690 if (SavedInsertPt != FuncInfo.InsertPt) 1691 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); 1692 SavedInsertPt = FuncInfo.InsertPt; 1693 } 1694 // Next, try calling the target to attempt to handle the instruction. 1695 if (fastSelectInstruction(I)) { 1696 ++NumFastIselSuccessTarget; 1697 DbgLoc = DebugLoc(); 1698 return true; 1699 } 1700 // Remove dead code. 1701 recomputeInsertPt(); 1702 if (SavedInsertPt != FuncInfo.InsertPt) 1703 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt); 1704 1705 DbgLoc = DebugLoc(); 1706 // Undo phi node updates, because they will be added again by SelectionDAG. 1707 if (I->isTerminator()) { 1708 // PHI node handling may have generated local value instructions. 1709 // We remove them because SelectionDAGISel will generate them again. 1710 removeDeadLocalValueCode(SavedLastLocalValue); 1711 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 1712 } 1713 return false; 1714 } 1715 1716 /// Emit an unconditional branch to the given block, unless it is the immediate 1717 /// (fall-through) successor, and update the CFG. 1718 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, 1719 const DebugLoc &DbgLoc) { 1720 if (FuncInfo.MBB->getBasicBlock()->sizeWithoutDebug() > 1 && 1721 FuncInfo.MBB->isLayoutSuccessor(MSucc)) { 1722 // For more accurate line information if this is the only non-debug 1723 // instruction in the block then emit it, otherwise we have the 1724 // unconditional fall-through case, which needs no instructions. 1725 } else { 1726 // The unconditional branch case. 1727 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr, 1728 SmallVector<MachineOperand, 0>(), DbgLoc); 1729 } 1730 if (FuncInfo.BPI) { 1731 auto BranchProbability = FuncInfo.BPI->getEdgeProbability( 1732 FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock()); 1733 FuncInfo.MBB->addSuccessor(MSucc, BranchProbability); 1734 } else 1735 FuncInfo.MBB->addSuccessorWithoutProb(MSucc); 1736 } 1737 1738 void FastISel::finishCondBranch(const BasicBlock *BranchBB, 1739 MachineBasicBlock *TrueMBB, 1740 MachineBasicBlock *FalseMBB) { 1741 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can 1742 // happen in degenerate IR and MachineIR forbids to have a block twice in the 1743 // successor/predecessor lists. 1744 if (TrueMBB != FalseMBB) { 1745 if (FuncInfo.BPI) { 1746 auto BranchProbability = 1747 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock()); 1748 FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability); 1749 } else 1750 FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB); 1751 } 1752 1753 fastEmitBranch(FalseMBB, DbgLoc); 1754 } 1755 1756 /// Emit an FNeg operation. 1757 bool FastISel::selectFNeg(const User *I, const Value *In) { 1758 Register OpReg = getRegForValue(In); 1759 if (!OpReg) 1760 return false; 1761 bool OpRegIsKill = hasTrivialKill(In); 1762 1763 // If the target has ISD::FNEG, use it. 1764 EVT VT = TLI.getValueType(DL, I->getType()); 1765 Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG, 1766 OpReg, OpRegIsKill); 1767 if (ResultReg) { 1768 updateValueMap(I, ResultReg); 1769 return true; 1770 } 1771 1772 // Bitcast the value to integer, twiddle the sign bit with xor, 1773 // and then bitcast it back to floating-point. 1774 if (VT.getSizeInBits() > 64) 1775 return false; 1776 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits()); 1777 if (!TLI.isTypeLegal(IntVT)) 1778 return false; 1779 1780 Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(), 1781 ISD::BITCAST, OpReg, OpRegIsKill); 1782 if (!IntReg) 1783 return false; 1784 1785 Register IntResultReg = fastEmit_ri_( 1786 IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true, 1787 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT()); 1788 if (!IntResultReg) 1789 return false; 1790 1791 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST, 1792 IntResultReg, /*IsKill=*/true); 1793 if (!ResultReg) 1794 return false; 1795 1796 updateValueMap(I, ResultReg); 1797 return true; 1798 } 1799 1800 bool FastISel::selectExtractValue(const User *U) { 1801 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U); 1802 if (!EVI) 1803 return false; 1804 1805 // Make sure we only try to handle extracts with a legal result. But also 1806 // allow i1 because it's easy. 1807 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true); 1808 if (!RealVT.isSimple()) 1809 return false; 1810 MVT VT = RealVT.getSimpleVT(); 1811 if (!TLI.isTypeLegal(VT) && VT != MVT::i1) 1812 return false; 1813 1814 const Value *Op0 = EVI->getOperand(0); 1815 Type *AggTy = Op0->getType(); 1816 1817 // Get the base result register. 1818 unsigned ResultReg; 1819 DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(Op0); 1820 if (I != FuncInfo.ValueMap.end()) 1821 ResultReg = I->second; 1822 else if (isa<Instruction>(Op0)) 1823 ResultReg = FuncInfo.InitializeRegForValue(Op0); 1824 else 1825 return false; // fast-isel can't handle aggregate constants at the moment 1826 1827 // Get the actual result register, which is an offset from the base register. 1828 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices()); 1829 1830 SmallVector<EVT, 4> AggValueVTs; 1831 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs); 1832 1833 for (unsigned i = 0; i < VTIndex; i++) 1834 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]); 1835 1836 updateValueMap(EVI, ResultReg); 1837 return true; 1838 } 1839 1840 bool FastISel::selectOperator(const User *I, unsigned Opcode) { 1841 switch (Opcode) { 1842 case Instruction::Add: 1843 return selectBinaryOp(I, ISD::ADD); 1844 case Instruction::FAdd: 1845 return selectBinaryOp(I, ISD::FADD); 1846 case Instruction::Sub: 1847 return selectBinaryOp(I, ISD::SUB); 1848 case Instruction::FSub: { 1849 // FNeg is currently represented in LLVM IR as a special case of FSub. 1850 Value *X; 1851 if (match(I, m_FNeg(m_Value(X)))) 1852 return selectFNeg(I, X); 1853 return selectBinaryOp(I, ISD::FSUB); 1854 } 1855 case Instruction::Mul: 1856 return selectBinaryOp(I, ISD::MUL); 1857 case Instruction::FMul: 1858 return selectBinaryOp(I, ISD::FMUL); 1859 case Instruction::SDiv: 1860 return selectBinaryOp(I, ISD::SDIV); 1861 case Instruction::UDiv: 1862 return selectBinaryOp(I, ISD::UDIV); 1863 case Instruction::FDiv: 1864 return selectBinaryOp(I, ISD::FDIV); 1865 case Instruction::SRem: 1866 return selectBinaryOp(I, ISD::SREM); 1867 case Instruction::URem: 1868 return selectBinaryOp(I, ISD::UREM); 1869 case Instruction::FRem: 1870 return selectBinaryOp(I, ISD::FREM); 1871 case Instruction::Shl: 1872 return selectBinaryOp(I, ISD::SHL); 1873 case Instruction::LShr: 1874 return selectBinaryOp(I, ISD::SRL); 1875 case Instruction::AShr: 1876 return selectBinaryOp(I, ISD::SRA); 1877 case Instruction::And: 1878 return selectBinaryOp(I, ISD::AND); 1879 case Instruction::Or: 1880 return selectBinaryOp(I, ISD::OR); 1881 case Instruction::Xor: 1882 return selectBinaryOp(I, ISD::XOR); 1883 1884 case Instruction::FNeg: 1885 return selectFNeg(I, I->getOperand(0)); 1886 1887 case Instruction::GetElementPtr: 1888 return selectGetElementPtr(I); 1889 1890 case Instruction::Br: { 1891 const BranchInst *BI = cast<BranchInst>(I); 1892 1893 if (BI->isUnconditional()) { 1894 const BasicBlock *LLVMSucc = BI->getSuccessor(0); 1895 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc]; 1896 fastEmitBranch(MSucc, BI->getDebugLoc()); 1897 return true; 1898 } 1899 1900 // Conditional branches are not handed yet. 1901 // Halt "fast" selection and bail. 1902 return false; 1903 } 1904 1905 case Instruction::Unreachable: 1906 if (TM.Options.TrapUnreachable) 1907 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0; 1908 else 1909 return true; 1910 1911 case Instruction::Alloca: 1912 // FunctionLowering has the static-sized case covered. 1913 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I))) 1914 return true; 1915 1916 // Dynamic-sized alloca is not handled yet. 1917 return false; 1918 1919 case Instruction::Call: 1920 // On AIX, call lowering uses the DAG-ISEL path currently so that the 1921 // callee of the direct function call instruction will be mapped to the 1922 // symbol for the function's entry point, which is distinct from the 1923 // function descriptor symbol. The latter is the symbol whose XCOFF symbol 1924 // name is the C-linkage name of the source level function. 1925 if (TM.getTargetTriple().isOSAIX()) 1926 return false; 1927 return selectCall(I); 1928 1929 case Instruction::BitCast: 1930 return selectBitCast(I); 1931 1932 case Instruction::FPToSI: 1933 return selectCast(I, ISD::FP_TO_SINT); 1934 case Instruction::ZExt: 1935 return selectCast(I, ISD::ZERO_EXTEND); 1936 case Instruction::SExt: 1937 return selectCast(I, ISD::SIGN_EXTEND); 1938 case Instruction::Trunc: 1939 return selectCast(I, ISD::TRUNCATE); 1940 case Instruction::SIToFP: 1941 return selectCast(I, ISD::SINT_TO_FP); 1942 1943 case Instruction::IntToPtr: // Deliberate fall-through. 1944 case Instruction::PtrToInt: { 1945 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType()); 1946 EVT DstVT = TLI.getValueType(DL, I->getType()); 1947 if (DstVT.bitsGT(SrcVT)) 1948 return selectCast(I, ISD::ZERO_EXTEND); 1949 if (DstVT.bitsLT(SrcVT)) 1950 return selectCast(I, ISD::TRUNCATE); 1951 Register Reg = getRegForValue(I->getOperand(0)); 1952 if (!Reg) 1953 return false; 1954 updateValueMap(I, Reg); 1955 return true; 1956 } 1957 1958 case Instruction::ExtractValue: 1959 return selectExtractValue(I); 1960 1961 case Instruction::Freeze: 1962 return selectFreeze(I); 1963 1964 case Instruction::PHI: 1965 llvm_unreachable("FastISel shouldn't visit PHI nodes!"); 1966 1967 default: 1968 // Unhandled instruction. Halt "fast" selection and bail. 1969 return false; 1970 } 1971 } 1972 1973 FastISel::FastISel(FunctionLoweringInfo &FuncInfo, 1974 const TargetLibraryInfo *LibInfo, 1975 bool SkipTargetIndependentISel) 1976 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()), 1977 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()), 1978 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()), 1979 TII(*MF->getSubtarget().getInstrInfo()), 1980 TLI(*MF->getSubtarget().getTargetLowering()), 1981 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo), 1982 SkipTargetIndependentISel(SkipTargetIndependentISel), 1983 LastLocalValue(nullptr), EmitStartPt(nullptr) {} 1984 1985 FastISel::~FastISel() = default; 1986 1987 bool FastISel::fastLowerArguments() { return false; } 1988 1989 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; } 1990 1991 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) { 1992 return false; 1993 } 1994 1995 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; } 1996 1997 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/, 1998 bool /*Op0IsKill*/) { 1999 return 0; 2000 } 2001 2002 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/, 2003 bool /*Op0IsKill*/, unsigned /*Op1*/, 2004 bool /*Op1IsKill*/) { 2005 return 0; 2006 } 2007 2008 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) { 2009 return 0; 2010 } 2011 2012 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned, 2013 const ConstantFP * /*FPImm*/) { 2014 return 0; 2015 } 2016 2017 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/, 2018 bool /*Op0IsKill*/, uint64_t /*Imm*/) { 2019 return 0; 2020 } 2021 2022 /// This method is a wrapper of fastEmit_ri. It first tries to emit an 2023 /// instruction with an immediate operand using fastEmit_ri. 2024 /// If that fails, it materializes the immediate into a register and try 2025 /// fastEmit_rr instead. 2026 Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, 2027 bool Op0IsKill, uint64_t Imm, MVT ImmType) { 2028 // If this is a multiply by a power of two, emit this as a shift left. 2029 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) { 2030 Opcode = ISD::SHL; 2031 Imm = Log2_64(Imm); 2032 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) { 2033 // div x, 8 -> srl x, 3 2034 Opcode = ISD::SRL; 2035 Imm = Log2_64(Imm); 2036 } 2037 2038 // Horrible hack (to be removed), check to make sure shift amounts are 2039 // in-range. 2040 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) && 2041 Imm >= VT.getSizeInBits()) 2042 return 0; 2043 2044 // First check if immediate type is legal. If not, we can't use the ri form. 2045 Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm); 2046 if (ResultReg) 2047 return ResultReg; 2048 Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm); 2049 bool IsImmKill = true; 2050 if (!MaterialReg) { 2051 // This is a bit ugly/slow, but failing here means falling out of 2052 // fast-isel, which would be very slow. 2053 IntegerType *ITy = 2054 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits()); 2055 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm)); 2056 if (!MaterialReg) 2057 return 0; 2058 // FIXME: If the materialized register here has no uses yet then this 2059 // will be the first use and we should be able to mark it as killed. 2060 // However, the local value area for materialising constant expressions 2061 // grows down, not up, which means that any constant expressions we generate 2062 // later which also use 'Imm' could be after this instruction and therefore 2063 // after this kill. 2064 IsImmKill = false; 2065 } 2066 return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill); 2067 } 2068 2069 Register FastISel::createResultReg(const TargetRegisterClass *RC) { 2070 return MRI.createVirtualRegister(RC); 2071 } 2072 2073 Register FastISel::constrainOperandRegClass(const MCInstrDesc &II, Register Op, 2074 unsigned OpNum) { 2075 if (Op.isVirtual()) { 2076 const TargetRegisterClass *RegClass = 2077 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF); 2078 if (!MRI.constrainRegClass(Op, RegClass)) { 2079 // If it's not legal to COPY between the register classes, something 2080 // has gone very wrong before we got here. 2081 Register NewOp = createResultReg(RegClass); 2082 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2083 TII.get(TargetOpcode::COPY), NewOp).addReg(Op); 2084 return NewOp; 2085 } 2086 } 2087 return Op; 2088 } 2089 2090 Register FastISel::fastEmitInst_(unsigned MachineInstOpcode, 2091 const TargetRegisterClass *RC) { 2092 Register ResultReg = createResultReg(RC); 2093 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2094 2095 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg); 2096 return ResultReg; 2097 } 2098 2099 Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode, 2100 const TargetRegisterClass *RC, unsigned Op0, 2101 bool Op0IsKill) { 2102 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2103 2104 Register ResultReg = createResultReg(RC); 2105 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2106 2107 if (II.getNumDefs() >= 1) 2108 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 2109 .addReg(Op0, getKillRegState(Op0IsKill)); 2110 else { 2111 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 2112 .addReg(Op0, getKillRegState(Op0IsKill)); 2113 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2114 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 2115 } 2116 2117 return ResultReg; 2118 } 2119 2120 Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode, 2121 const TargetRegisterClass *RC, unsigned Op0, 2122 bool Op0IsKill, unsigned Op1, 2123 bool Op1IsKill) { 2124 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2125 2126 Register ResultReg = createResultReg(RC); 2127 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2128 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 2129 2130 if (II.getNumDefs() >= 1) 2131 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 2132 .addReg(Op0, getKillRegState(Op0IsKill)) 2133 .addReg(Op1, getKillRegState(Op1IsKill)); 2134 else { 2135 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 2136 .addReg(Op0, getKillRegState(Op0IsKill)) 2137 .addReg(Op1, getKillRegState(Op1IsKill)); 2138 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2139 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 2140 } 2141 return ResultReg; 2142 } 2143 2144 Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode, 2145 const TargetRegisterClass *RC, unsigned Op0, 2146 bool Op0IsKill, unsigned Op1, 2147 bool Op1IsKill, unsigned Op2, 2148 bool Op2IsKill) { 2149 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2150 2151 Register ResultReg = createResultReg(RC); 2152 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2153 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 2154 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2); 2155 2156 if (II.getNumDefs() >= 1) 2157 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 2158 .addReg(Op0, getKillRegState(Op0IsKill)) 2159 .addReg(Op1, getKillRegState(Op1IsKill)) 2160 .addReg(Op2, getKillRegState(Op2IsKill)); 2161 else { 2162 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 2163 .addReg(Op0, getKillRegState(Op0IsKill)) 2164 .addReg(Op1, getKillRegState(Op1IsKill)) 2165 .addReg(Op2, getKillRegState(Op2IsKill)); 2166 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2167 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 2168 } 2169 return ResultReg; 2170 } 2171 2172 Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode, 2173 const TargetRegisterClass *RC, unsigned Op0, 2174 bool Op0IsKill, uint64_t Imm) { 2175 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2176 2177 Register ResultReg = createResultReg(RC); 2178 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2179 2180 if (II.getNumDefs() >= 1) 2181 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 2182 .addReg(Op0, getKillRegState(Op0IsKill)) 2183 .addImm(Imm); 2184 else { 2185 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 2186 .addReg(Op0, getKillRegState(Op0IsKill)) 2187 .addImm(Imm); 2188 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2189 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 2190 } 2191 return ResultReg; 2192 } 2193 2194 Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode, 2195 const TargetRegisterClass *RC, unsigned Op0, 2196 bool Op0IsKill, uint64_t Imm1, 2197 uint64_t Imm2) { 2198 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2199 2200 Register ResultReg = createResultReg(RC); 2201 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2202 2203 if (II.getNumDefs() >= 1) 2204 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 2205 .addReg(Op0, getKillRegState(Op0IsKill)) 2206 .addImm(Imm1) 2207 .addImm(Imm2); 2208 else { 2209 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 2210 .addReg(Op0, getKillRegState(Op0IsKill)) 2211 .addImm(Imm1) 2212 .addImm(Imm2); 2213 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2214 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 2215 } 2216 return ResultReg; 2217 } 2218 2219 Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode, 2220 const TargetRegisterClass *RC, 2221 const ConstantFP *FPImm) { 2222 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2223 2224 Register ResultReg = createResultReg(RC); 2225 2226 if (II.getNumDefs() >= 1) 2227 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 2228 .addFPImm(FPImm); 2229 else { 2230 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 2231 .addFPImm(FPImm); 2232 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2233 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 2234 } 2235 return ResultReg; 2236 } 2237 2238 Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode, 2239 const TargetRegisterClass *RC, unsigned Op0, 2240 bool Op0IsKill, unsigned Op1, 2241 bool Op1IsKill, uint64_t Imm) { 2242 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2243 2244 Register ResultReg = createResultReg(RC); 2245 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs()); 2246 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1); 2247 2248 if (II.getNumDefs() >= 1) 2249 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 2250 .addReg(Op0, getKillRegState(Op0IsKill)) 2251 .addReg(Op1, getKillRegState(Op1IsKill)) 2252 .addImm(Imm); 2253 else { 2254 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II) 2255 .addReg(Op0, getKillRegState(Op0IsKill)) 2256 .addReg(Op1, getKillRegState(Op1IsKill)) 2257 .addImm(Imm); 2258 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2259 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 2260 } 2261 return ResultReg; 2262 } 2263 2264 Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode, 2265 const TargetRegisterClass *RC, uint64_t Imm) { 2266 Register ResultReg = createResultReg(RC); 2267 const MCInstrDesc &II = TII.get(MachineInstOpcode); 2268 2269 if (II.getNumDefs() >= 1) 2270 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) 2271 .addImm(Imm); 2272 else { 2273 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm); 2274 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, 2275 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]); 2276 } 2277 return ResultReg; 2278 } 2279 2280 Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, 2281 bool Op0IsKill, uint32_t Idx) { 2282 Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT)); 2283 assert(Register::isVirtualRegister(Op0) && 2284 "Cannot yet extract from physregs"); 2285 const TargetRegisterClass *RC = MRI.getRegClass(Op0); 2286 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx)); 2287 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY), 2288 ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx); 2289 return ResultReg; 2290 } 2291 2292 /// Emit MachineInstrs to compute the value of Op with all but the least 2293 /// significant bit set to zero. 2294 Register FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) { 2295 return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1); 2296 } 2297 2298 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks. 2299 /// Emit code to ensure constants are copied into registers when needed. 2300 /// Remember the virtual registers that need to be added to the Machine PHI 2301 /// nodes as input. We cannot just directly add them, because expansion 2302 /// might result in multiple MBB's for one BB. As such, the start of the 2303 /// BB might correspond to a different MBB than the end. 2304 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { 2305 const Instruction *TI = LLVMBB->getTerminator(); 2306 2307 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; 2308 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size(); 2309 2310 // Check successor nodes' PHI nodes that expect a constant to be available 2311 // from this block. 2312 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { 2313 const BasicBlock *SuccBB = TI->getSuccessor(succ); 2314 if (!isa<PHINode>(SuccBB->begin())) 2315 continue; 2316 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; 2317 2318 // If this terminator has multiple identical successors (common for 2319 // switches), only handle each succ once. 2320 if (!SuccsHandled.insert(SuccMBB).second) 2321 continue; 2322 2323 MachineBasicBlock::iterator MBBI = SuccMBB->begin(); 2324 2325 // At this point we know that there is a 1-1 correspondence between LLVM PHI 2326 // nodes and Machine PHI nodes, but the incoming operands have not been 2327 // emitted yet. 2328 for (const PHINode &PN : SuccBB->phis()) { 2329 // Ignore dead phi's. 2330 if (PN.use_empty()) 2331 continue; 2332 2333 // Only handle legal types. Two interesting things to note here. First, 2334 // by bailing out early, we may leave behind some dead instructions, 2335 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its 2336 // own moves. Second, this check is necessary because FastISel doesn't 2337 // use CreateRegs to create registers, so it always creates 2338 // exactly one register for each non-void instruction. 2339 EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true); 2340 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) { 2341 // Handle integer promotions, though, because they're common and easy. 2342 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) { 2343 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 2344 return false; 2345 } 2346 } 2347 2348 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB); 2349 2350 // Set the DebugLoc for the copy. Prefer the location of the operand 2351 // if there is one; use the location of the PHI otherwise. 2352 DbgLoc = PN.getDebugLoc(); 2353 if (const auto *Inst = dyn_cast<Instruction>(PHIOp)) 2354 DbgLoc = Inst->getDebugLoc(); 2355 2356 Register Reg = getRegForValue(PHIOp); 2357 if (!Reg) { 2358 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate); 2359 return false; 2360 } 2361 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg)); 2362 DbgLoc = DebugLoc(); 2363 } 2364 } 2365 2366 return true; 2367 } 2368 2369 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) { 2370 assert(LI->hasOneUse() && 2371 "tryToFoldLoad expected a LoadInst with a single use"); 2372 // We know that the load has a single use, but don't know what it is. If it 2373 // isn't one of the folded instructions, then we can't succeed here. Handle 2374 // this by scanning the single-use users of the load until we get to FoldInst. 2375 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs. 2376 2377 const Instruction *TheUser = LI->user_back(); 2378 while (TheUser != FoldInst && // Scan up until we find FoldInst. 2379 // Stay in the right block. 2380 TheUser->getParent() == FoldInst->getParent() && 2381 --MaxUsers) { // Don't scan too far. 2382 // If there are multiple or no uses of this instruction, then bail out. 2383 if (!TheUser->hasOneUse()) 2384 return false; 2385 2386 TheUser = TheUser->user_back(); 2387 } 2388 2389 // If we didn't find the fold instruction, then we failed to collapse the 2390 // sequence. 2391 if (TheUser != FoldInst) 2392 return false; 2393 2394 // Don't try to fold volatile loads. Target has to deal with alignment 2395 // constraints. 2396 if (LI->isVolatile()) 2397 return false; 2398 2399 // Figure out which vreg this is going into. If there is no assigned vreg yet 2400 // then there actually was no reference to it. Perhaps the load is referenced 2401 // by a dead instruction. 2402 Register LoadReg = getRegForValue(LI); 2403 if (!LoadReg) 2404 return false; 2405 2406 // We can't fold if this vreg has no uses or more than one use. Multiple uses 2407 // may mean that the instruction got lowered to multiple MIs, or the use of 2408 // the loaded value ended up being multiple operands of the result. 2409 if (!MRI.hasOneUse(LoadReg)) 2410 return false; 2411 2412 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg); 2413 MachineInstr *User = RI->getParent(); 2414 2415 // Set the insertion point properly. Folding the load can cause generation of 2416 // other random instructions (like sign extends) for addressing modes; make 2417 // sure they get inserted in a logical place before the new instruction. 2418 FuncInfo.InsertPt = User; 2419 FuncInfo.MBB = User->getParent(); 2420 2421 // Ask the target to try folding the load. 2422 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI); 2423 } 2424 2425 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) { 2426 // Must be an add. 2427 if (!isa<AddOperator>(Add)) 2428 return false; 2429 // Type size needs to match. 2430 if (DL.getTypeSizeInBits(GEP->getType()) != 2431 DL.getTypeSizeInBits(Add->getType())) 2432 return false; 2433 // Must be in the same basic block. 2434 if (isa<Instruction>(Add) && 2435 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB) 2436 return false; 2437 // Must have a constant operand. 2438 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1)); 2439 } 2440 2441 MachineMemOperand * 2442 FastISel::createMachineMemOperandFor(const Instruction *I) const { 2443 const Value *Ptr; 2444 Type *ValTy; 2445 MaybeAlign Alignment; 2446 MachineMemOperand::Flags Flags; 2447 bool IsVolatile; 2448 2449 if (const auto *LI = dyn_cast<LoadInst>(I)) { 2450 Alignment = LI->getAlign(); 2451 IsVolatile = LI->isVolatile(); 2452 Flags = MachineMemOperand::MOLoad; 2453 Ptr = LI->getPointerOperand(); 2454 ValTy = LI->getType(); 2455 } else if (const auto *SI = dyn_cast<StoreInst>(I)) { 2456 Alignment = SI->getAlign(); 2457 IsVolatile = SI->isVolatile(); 2458 Flags = MachineMemOperand::MOStore; 2459 Ptr = SI->getPointerOperand(); 2460 ValTy = SI->getValueOperand()->getType(); 2461 } else 2462 return nullptr; 2463 2464 bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal); 2465 bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load); 2466 bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable); 2467 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range); 2468 2469 AAMDNodes AAInfo; 2470 I->getAAMetadata(AAInfo); 2471 2472 if (!Alignment) // Ensure that codegen never sees alignment 0. 2473 Alignment = DL.getABITypeAlign(ValTy); 2474 2475 unsigned Size = DL.getTypeStoreSize(ValTy); 2476 2477 if (IsVolatile) 2478 Flags |= MachineMemOperand::MOVolatile; 2479 if (IsNonTemporal) 2480 Flags |= MachineMemOperand::MONonTemporal; 2481 if (IsDereferenceable) 2482 Flags |= MachineMemOperand::MODereferenceable; 2483 if (IsInvariant) 2484 Flags |= MachineMemOperand::MOInvariant; 2485 2486 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size, 2487 *Alignment, AAInfo, Ranges); 2488 } 2489 2490 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const { 2491 // If both operands are the same, then try to optimize or fold the cmp. 2492 CmpInst::Predicate Predicate = CI->getPredicate(); 2493 if (CI->getOperand(0) != CI->getOperand(1)) 2494 return Predicate; 2495 2496 switch (Predicate) { 2497 default: llvm_unreachable("Invalid predicate!"); 2498 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break; 2499 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break; 2500 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break; 2501 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break; 2502 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break; 2503 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break; 2504 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break; 2505 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break; 2506 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break; 2507 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break; 2508 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break; 2509 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; 2510 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break; 2511 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; 2512 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break; 2513 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break; 2514 2515 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break; 2516 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break; 2517 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break; 2518 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break; 2519 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break; 2520 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break; 2521 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break; 2522 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break; 2523 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break; 2524 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break; 2525 } 2526 2527 return Predicate; 2528 } 2529