1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the FastISel class.
10 //
11 // "Fast" instruction selection is designed to emit very poor code quickly.
12 // Also, it is not designed to be able to do much lowering, so most illegal
13 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
14 // also not intended to be able to do much optimization, except in a few cases
15 // where doing optimizations reduces overall compile time. For example, folding
16 // constants into immediate fields is often done, because it's cheap and it
17 // reduces the number of instructions later phases have to examine.
18 //
19 // "Fast" instruction selection is able to fail gracefully and transfer
20 // control to the SelectionDAG selector for operations that it doesn't
21 // support. In many cases, this allows us to avoid duplicating a lot of
22 // the complicated lowering logic that SelectionDAG currently has.
23 //
24 // The intended use for "fast" instruction selection is "-O0" mode
25 // compilation, where the quality of the generated code is irrelevant when
26 // weighed against the speed at which the code can be generated. Also,
27 // at -O0, the LLVM optimizers are not running, and this makes the
28 // compile time of codegen a much higher portion of the overall compile
29 // time. Despite its limitations, "fast" instruction selection is able to
30 // handle enough code on its own to provide noticeable overall speedups
31 // in -O0 compiles.
32 //
33 // Basic operations are supported in a target-independent way, by reading
34 // the same instruction descriptions that the SelectionDAG selector reads,
35 // and identifying simple arithmetic operations that can be directly selected
36 // from simple operators. More complicated operations currently require
37 // target-specific code.
38 //
39 //===----------------------------------------------------------------------===//
40
41 #include "llvm/CodeGen/FastISel.h"
42 #include "llvm/ADT/APFloat.h"
43 #include "llvm/ADT/APSInt.h"
44 #include "llvm/ADT/DenseMap.h"
45 #include "llvm/ADT/SmallPtrSet.h"
46 #include "llvm/ADT/SmallString.h"
47 #include "llvm/ADT/SmallVector.h"
48 #include "llvm/ADT/Statistic.h"
49 #include "llvm/Analysis/BranchProbabilityInfo.h"
50 #include "llvm/Analysis/TargetLibraryInfo.h"
51 #include "llvm/CodeGen/Analysis.h"
52 #include "llvm/CodeGen/FunctionLoweringInfo.h"
53 #include "llvm/CodeGen/ISDOpcodes.h"
54 #include "llvm/CodeGen/MachineBasicBlock.h"
55 #include "llvm/CodeGen/MachineFrameInfo.h"
56 #include "llvm/CodeGen/MachineInstr.h"
57 #include "llvm/CodeGen/MachineInstrBuilder.h"
58 #include "llvm/CodeGen/MachineMemOperand.h"
59 #include "llvm/CodeGen/MachineModuleInfo.h"
60 #include "llvm/CodeGen/MachineOperand.h"
61 #include "llvm/CodeGen/MachineRegisterInfo.h"
62 #include "llvm/CodeGen/StackMaps.h"
63 #include "llvm/CodeGen/TargetInstrInfo.h"
64 #include "llvm/CodeGen/TargetLowering.h"
65 #include "llvm/CodeGen/TargetSubtargetInfo.h"
66 #include "llvm/CodeGen/ValueTypes.h"
67 #include "llvm/CodeGenTypes/MachineValueType.h"
68 #include "llvm/IR/Argument.h"
69 #include "llvm/IR/Attributes.h"
70 #include "llvm/IR/BasicBlock.h"
71 #include "llvm/IR/CallingConv.h"
72 #include "llvm/IR/Constant.h"
73 #include "llvm/IR/Constants.h"
74 #include "llvm/IR/DataLayout.h"
75 #include "llvm/IR/DebugLoc.h"
76 #include "llvm/IR/DerivedTypes.h"
77 #include "llvm/IR/DiagnosticInfo.h"
78 #include "llvm/IR/Function.h"
79 #include "llvm/IR/GetElementPtrTypeIterator.h"
80 #include "llvm/IR/GlobalValue.h"
81 #include "llvm/IR/InlineAsm.h"
82 #include "llvm/IR/InstrTypes.h"
83 #include "llvm/IR/Instruction.h"
84 #include "llvm/IR/Instructions.h"
85 #include "llvm/IR/IntrinsicInst.h"
86 #include "llvm/IR/LLVMContext.h"
87 #include "llvm/IR/Mangler.h"
88 #include "llvm/IR/Metadata.h"
89 #include "llvm/IR/Operator.h"
90 #include "llvm/IR/PatternMatch.h"
91 #include "llvm/IR/Type.h"
92 #include "llvm/IR/User.h"
93 #include "llvm/IR/Value.h"
94 #include "llvm/MC/MCContext.h"
95 #include "llvm/MC/MCInstrDesc.h"
96 #include "llvm/Support/Casting.h"
97 #include "llvm/Support/Debug.h"
98 #include "llvm/Support/ErrorHandling.h"
99 #include "llvm/Support/MathExtras.h"
100 #include "llvm/Support/raw_ostream.h"
101 #include "llvm/Target/TargetMachine.h"
102 #include "llvm/Target/TargetOptions.h"
103 #include <algorithm>
104 #include <cassert>
105 #include <cstdint>
106 #include <iterator>
107 #include <optional>
108 #include <utility>
109
110 using namespace llvm;
111 using namespace PatternMatch;
112
113 #define DEBUG_TYPE "isel"
114
115 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
116 "target-independent selector");
117 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
118 "target-specific selector");
119 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
120
121 /// Set the current block to which generated machine instructions will be
122 /// appended.
startNewBlock()123 void FastISel::startNewBlock() {
124 assert(LocalValueMap.empty() &&
125 "local values should be cleared after finishing a BB");
126
127 // Instructions are appended to FuncInfo.MBB. If the basic block already
128 // contains labels or copies, use the last instruction as the last local
129 // value.
130 EmitStartPt = nullptr;
131 if (!FuncInfo.MBB->empty())
132 EmitStartPt = &FuncInfo.MBB->back();
133 LastLocalValue = EmitStartPt;
134 }
135
finishBasicBlock()136 void FastISel::finishBasicBlock() { flushLocalValueMap(); }
137
lowerArguments()138 bool FastISel::lowerArguments() {
139 if (!FuncInfo.CanLowerReturn)
140 // Fallback to SDISel argument lowering code to deal with sret pointer
141 // parameter.
142 return false;
143
144 if (!fastLowerArguments())
145 return false;
146
147 // Enter arguments into ValueMap for uses in non-entry BBs.
148 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
149 E = FuncInfo.Fn->arg_end();
150 I != E; ++I) {
151 DenseMap<const Value *, Register>::iterator VI = LocalValueMap.find(&*I);
152 assert(VI != LocalValueMap.end() && "Missed an argument?");
153 FuncInfo.ValueMap[&*I] = VI->second;
154 }
155 return true;
156 }
157
158 /// Return the defined register if this instruction defines exactly one
159 /// virtual register and uses no other virtual registers. Otherwise return 0.
findLocalRegDef(MachineInstr & MI)160 static Register findLocalRegDef(MachineInstr &MI) {
161 Register RegDef;
162 for (const MachineOperand &MO : MI.operands()) {
163 if (!MO.isReg())
164 continue;
165 if (MO.isDef()) {
166 if (RegDef)
167 return Register();
168 RegDef = MO.getReg();
169 } else if (MO.getReg().isVirtual()) {
170 // This is another use of a vreg. Don't delete it.
171 return Register();
172 }
173 }
174 return RegDef;
175 }
176
isRegUsedByPhiNodes(Register DefReg,FunctionLoweringInfo & FuncInfo)177 static bool isRegUsedByPhiNodes(Register DefReg,
178 FunctionLoweringInfo &FuncInfo) {
179 for (auto &P : FuncInfo.PHINodesToUpdate)
180 if (P.second == DefReg)
181 return true;
182 return false;
183 }
184
flushLocalValueMap()185 void FastISel::flushLocalValueMap() {
186 // If FastISel bails out, it could leave local value instructions behind
187 // that aren't used for anything. Detect and erase those.
188 if (LastLocalValue != EmitStartPt) {
189 // Save the first instruction after local values, for later.
190 MachineBasicBlock::iterator FirstNonValue(LastLocalValue);
191 ++FirstNonValue;
192
193 MachineBasicBlock::reverse_iterator RE =
194 EmitStartPt ? MachineBasicBlock::reverse_iterator(EmitStartPt)
195 : FuncInfo.MBB->rend();
196 MachineBasicBlock::reverse_iterator RI(LastLocalValue);
197 for (MachineInstr &LocalMI :
198 llvm::make_early_inc_range(llvm::make_range(RI, RE))) {
199 Register DefReg = findLocalRegDef(LocalMI);
200 if (!DefReg)
201 continue;
202 if (FuncInfo.RegsWithFixups.count(DefReg))
203 continue;
204 bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
205 if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
206 if (EmitStartPt == &LocalMI)
207 EmitStartPt = EmitStartPt->getPrevNode();
208 LLVM_DEBUG(dbgs() << "removing dead local value materialization"
209 << LocalMI);
210 LocalMI.eraseFromParent();
211 }
212 }
213
214 if (FirstNonValue != FuncInfo.MBB->end()) {
215 // See if there are any local value instructions left. If so, we want to
216 // make sure the first one has a debug location; if it doesn't, use the
217 // first non-value instruction's debug location.
218
219 // If EmitStartPt is non-null, this block had copies at the top before
220 // FastISel started doing anything; it points to the last one, so the
221 // first local value instruction is the one after EmitStartPt.
222 // If EmitStartPt is null, the first local value instruction is at the
223 // top of the block.
224 MachineBasicBlock::iterator FirstLocalValue =
225 EmitStartPt ? ++MachineBasicBlock::iterator(EmitStartPt)
226 : FuncInfo.MBB->begin();
227 if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc())
228 FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc());
229 }
230 }
231
232 LocalValueMap.clear();
233 LastLocalValue = EmitStartPt;
234 recomputeInsertPt();
235 SavedInsertPt = FuncInfo.InsertPt;
236 }
237
getRegForValue(const Value * V)238 Register FastISel::getRegForValue(const Value *V) {
239 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
240 // Don't handle non-simple values in FastISel.
241 if (!RealVT.isSimple())
242 return Register();
243
244 // Ignore illegal types. We must do this before looking up the value
245 // in ValueMap because Arguments are given virtual registers regardless
246 // of whether FastISel can handle them.
247 MVT VT = RealVT.getSimpleVT();
248 if (!TLI.isTypeLegal(VT)) {
249 // Handle integer promotions, though, because they're common and easy.
250 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
251 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
252 else
253 return Register();
254 }
255
256 // Look up the value to see if we already have a register for it.
257 Register Reg = lookUpRegForValue(V);
258 if (Reg)
259 return Reg;
260
261 // In bottom-up mode, just create the virtual register which will be used
262 // to hold the value. It will be materialized later.
263 if (isa<Instruction>(V) &&
264 (!isa<AllocaInst>(V) ||
265 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
266 return FuncInfo.InitializeRegForValue(V);
267
268 SavePoint SaveInsertPt = enterLocalValueArea();
269
270 // Materialize the value in a register. Emit any instructions in the
271 // local value area.
272 Reg = materializeRegForValue(V, VT);
273
274 leaveLocalValueArea(SaveInsertPt);
275
276 return Reg;
277 }
278
materializeConstant(const Value * V,MVT VT)279 Register FastISel::materializeConstant(const Value *V, MVT VT) {
280 Register Reg;
281 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
282 if (CI->getValue().getActiveBits() <= 64)
283 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
284 } else if (isa<AllocaInst>(V))
285 Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
286 else if (isa<ConstantPointerNull>(V))
287 // Translate this as an integer zero so that it can be
288 // local-CSE'd with actual integer zeros.
289 Reg =
290 getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getType())));
291 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
292 if (CF->isNullValue())
293 Reg = fastMaterializeFloatZero(CF);
294 else
295 // Try to emit the constant directly.
296 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
297
298 if (!Reg) {
299 // Try to emit the constant by using an integer constant with a cast.
300 const APFloat &Flt = CF->getValueAPF();
301 EVT IntVT = TLI.getPointerTy(DL);
302 uint32_t IntBitWidth = IntVT.getSizeInBits();
303 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
304 bool isExact;
305 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
306 if (isExact) {
307 Register IntegerReg =
308 getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
309 if (IntegerReg)
310 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
311 IntegerReg);
312 }
313 }
314 } else if (const auto *Op = dyn_cast<Operator>(V)) {
315 if (!selectOperator(Op, Op->getOpcode()))
316 if (!isa<Instruction>(Op) ||
317 !fastSelectInstruction(cast<Instruction>(Op)))
318 return 0;
319 Reg = lookUpRegForValue(Op);
320 } else if (isa<UndefValue>(V)) {
321 Reg = createResultReg(TLI.getRegClassFor(VT));
322 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
323 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
324 }
325 return Reg;
326 }
327
328 /// Helper for getRegForValue. This function is called when the value isn't
329 /// already available in a register and must be materialized with new
330 /// instructions.
materializeRegForValue(const Value * V,MVT VT)331 Register FastISel::materializeRegForValue(const Value *V, MVT VT) {
332 Register Reg;
333 // Give the target-specific code a try first.
334 if (isa<Constant>(V))
335 Reg = fastMaterializeConstant(cast<Constant>(V));
336
337 // If target-specific code couldn't or didn't want to handle the value, then
338 // give target-independent code a try.
339 if (!Reg)
340 Reg = materializeConstant(V, VT);
341
342 // Don't cache constant materializations in the general ValueMap.
343 // To do so would require tracking what uses they dominate.
344 if (Reg) {
345 LocalValueMap[V] = Reg;
346 LastLocalValue = MRI.getVRegDef(Reg);
347 }
348 return Reg;
349 }
350
lookUpRegForValue(const Value * V)351 Register FastISel::lookUpRegForValue(const Value *V) {
352 // Look up the value to see if we already have a register for it. We
353 // cache values defined by Instructions across blocks, and other values
354 // only locally. This is because Instructions already have the SSA
355 // def-dominates-use requirement enforced.
356 DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(V);
357 if (I != FuncInfo.ValueMap.end())
358 return I->second;
359 return LocalValueMap[V];
360 }
361
updateValueMap(const Value * I,Register Reg,unsigned NumRegs)362 void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) {
363 if (!isa<Instruction>(I)) {
364 LocalValueMap[I] = Reg;
365 return;
366 }
367
368 Register &AssignedReg = FuncInfo.ValueMap[I];
369 if (!AssignedReg)
370 // Use the new register.
371 AssignedReg = Reg;
372 else if (Reg != AssignedReg) {
373 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
374 for (unsigned i = 0; i < NumRegs; i++) {
375 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
376 FuncInfo.RegsWithFixups.insert(Reg + i);
377 }
378
379 AssignedReg = Reg;
380 }
381 }
382
getRegForGEPIndex(MVT PtrVT,const Value * Idx)383 Register FastISel::getRegForGEPIndex(MVT PtrVT, const Value *Idx) {
384 Register IdxN = getRegForValue(Idx);
385 if (!IdxN)
386 // Unhandled operand. Halt "fast" selection and bail.
387 return Register();
388
389 // If the index is smaller or larger than intptr_t, truncate or extend it.
390 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
391 if (IdxVT.bitsLT(PtrVT)) {
392 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
393 } else if (IdxVT.bitsGT(PtrVT)) {
394 IdxN =
395 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
396 }
397 return IdxN;
398 }
399
getRegForGEPIndex(const Value * Idx)400 Register FastISel::getRegForGEPIndex(const Value *Idx) {
401 return getRegForGEPIndex(TLI.getPointerTy(DL), Idx);
402 }
403
recomputeInsertPt()404 void FastISel::recomputeInsertPt() {
405 if (getLastLocalValue()) {
406 FuncInfo.InsertPt = getLastLocalValue();
407 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
408 ++FuncInfo.InsertPt;
409 } else
410 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
411 }
412
removeDeadCode(MachineBasicBlock::iterator I,MachineBasicBlock::iterator E)413 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
414 MachineBasicBlock::iterator E) {
415 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
416 "Invalid iterator!");
417 while (I != E) {
418 if (SavedInsertPt == I)
419 SavedInsertPt = E;
420 if (EmitStartPt == I)
421 EmitStartPt = E.isValid() ? &*E : nullptr;
422 if (LastLocalValue == I)
423 LastLocalValue = E.isValid() ? &*E : nullptr;
424
425 MachineInstr *Dead = &*I;
426 ++I;
427 Dead->eraseFromParent();
428 ++NumFastIselDead;
429 }
430 recomputeInsertPt();
431 }
432
enterLocalValueArea()433 FastISel::SavePoint FastISel::enterLocalValueArea() {
434 SavePoint OldInsertPt = FuncInfo.InsertPt;
435 recomputeInsertPt();
436 return OldInsertPt;
437 }
438
leaveLocalValueArea(SavePoint OldInsertPt)439 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
440 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
441 LastLocalValue = &*std::prev(FuncInfo.InsertPt);
442
443 // Restore the previous insert position.
444 FuncInfo.InsertPt = OldInsertPt;
445 }
446
selectBinaryOp(const User * I,unsigned ISDOpcode)447 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
448 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
449 if (VT == MVT::Other || !VT.isSimple())
450 // Unhandled type. Halt "fast" selection and bail.
451 return false;
452
453 // We only handle legal types. For example, on x86-32 the instruction
454 // selector contains all of the 64-bit instructions from x86-64,
455 // under the assumption that i64 won't be used if the target doesn't
456 // support it.
457 if (!TLI.isTypeLegal(VT)) {
458 // MVT::i1 is special. Allow AND, OR, or XOR because they
459 // don't require additional zeroing, which makes them easy.
460 if (VT == MVT::i1 && ISD::isBitwiseLogicOp(ISDOpcode))
461 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
462 else
463 return false;
464 }
465
466 // Check if the first operand is a constant, and handle it as "ri". At -O0,
467 // we don't have anything that canonicalizes operand order.
468 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
469 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
470 Register Op1 = getRegForValue(I->getOperand(1));
471 if (!Op1)
472 return false;
473
474 Register ResultReg =
475 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(),
476 VT.getSimpleVT());
477 if (!ResultReg)
478 return false;
479
480 // We successfully emitted code for the given LLVM Instruction.
481 updateValueMap(I, ResultReg);
482 return true;
483 }
484
485 Register Op0 = getRegForValue(I->getOperand(0));
486 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
487 return false;
488
489 // Check if the second operand is a constant and handle it appropriately.
490 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
491 uint64_t Imm = CI->getSExtValue();
492
493 // Transform "sdiv exact X, 8" -> "sra X, 3".
494 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
495 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
496 Imm = Log2_64(Imm);
497 ISDOpcode = ISD::SRA;
498 }
499
500 // Transform "urem x, pow2" -> "and x, pow2-1".
501 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
502 isPowerOf2_64(Imm)) {
503 --Imm;
504 ISDOpcode = ISD::AND;
505 }
506
507 Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm,
508 VT.getSimpleVT());
509 if (!ResultReg)
510 return false;
511
512 // We successfully emitted code for the given LLVM Instruction.
513 updateValueMap(I, ResultReg);
514 return true;
515 }
516
517 Register Op1 = getRegForValue(I->getOperand(1));
518 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
519 return false;
520
521 // Now we have both operands in registers. Emit the instruction.
522 Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
523 ISDOpcode, Op0, Op1);
524 if (!ResultReg)
525 // Target-specific code wasn't able to find a machine opcode for
526 // the given ISD opcode and type. Halt "fast" selection and bail.
527 return false;
528
529 // We successfully emitted code for the given LLVM Instruction.
530 updateValueMap(I, ResultReg);
531 return true;
532 }
533
selectGetElementPtr(const User * I)534 bool FastISel::selectGetElementPtr(const User *I) {
535 Register N = getRegForValue(I->getOperand(0));
536 if (!N) // Unhandled operand. Halt "fast" selection and bail.
537 return false;
538
539 // FIXME: The code below does not handle vector GEPs. Halt "fast" selection
540 // and bail.
541 if (isa<VectorType>(I->getType()))
542 return false;
543
544 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
545 // into a single N = N + TotalOffset.
546 uint64_t TotalOffs = 0;
547 // FIXME: What's a good SWAG number for MaxOffs?
548 uint64_t MaxOffs = 2048;
549 MVT VT = TLI.getValueType(DL, I->getType()).getSimpleVT();
550
551 for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
552 GTI != E; ++GTI) {
553 const Value *Idx = GTI.getOperand();
554 if (StructType *StTy = GTI.getStructTypeOrNull()) {
555 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
556 if (Field) {
557 // N = N + Offset
558 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
559 if (TotalOffs >= MaxOffs) {
560 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
561 if (!N) // Unhandled operand. Halt "fast" selection and bail.
562 return false;
563 TotalOffs = 0;
564 }
565 }
566 } else {
567 // If this is a constant subscript, handle it quickly.
568 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
569 if (CI->isZero())
570 continue;
571 // N = N + Offset
572 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
573 TotalOffs += GTI.getSequentialElementStride(DL) * IdxN;
574 if (TotalOffs >= MaxOffs) {
575 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
576 if (!N) // Unhandled operand. Halt "fast" selection and bail.
577 return false;
578 TotalOffs = 0;
579 }
580 continue;
581 }
582 if (TotalOffs) {
583 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
584 if (!N) // Unhandled operand. Halt "fast" selection and bail.
585 return false;
586 TotalOffs = 0;
587 }
588
589 // N = N + Idx * ElementSize;
590 uint64_t ElementSize = GTI.getSequentialElementStride(DL);
591 Register IdxN = getRegForGEPIndex(VT, Idx);
592 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
593 return false;
594
595 if (ElementSize != 1) {
596 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
597 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
598 return false;
599 }
600 N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
601 if (!N) // Unhandled operand. Halt "fast" selection and bail.
602 return false;
603 }
604 }
605 if (TotalOffs) {
606 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
607 if (!N) // Unhandled operand. Halt "fast" selection and bail.
608 return false;
609 }
610
611 // We successfully emitted code for the given LLVM Instruction.
612 updateValueMap(I, N);
613 return true;
614 }
615
addStackMapLiveVars(SmallVectorImpl<MachineOperand> & Ops,const CallInst * CI,unsigned StartIdx)616 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
617 const CallInst *CI, unsigned StartIdx) {
618 for (unsigned i = StartIdx, e = CI->arg_size(); i != e; ++i) {
619 Value *Val = CI->getArgOperand(i);
620 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
621 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
622 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
623 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
624 } else if (isa<ConstantPointerNull>(Val)) {
625 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
626 Ops.push_back(MachineOperand::CreateImm(0));
627 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
628 // Values coming from a stack location also require a special encoding,
629 // but that is added later on by the target specific frame index
630 // elimination implementation.
631 auto SI = FuncInfo.StaticAllocaMap.find(AI);
632 if (SI != FuncInfo.StaticAllocaMap.end())
633 Ops.push_back(MachineOperand::CreateFI(SI->second));
634 else
635 return false;
636 } else {
637 Register Reg = getRegForValue(Val);
638 if (!Reg)
639 return false;
640 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
641 }
642 }
643 return true;
644 }
645
selectStackmap(const CallInst * I)646 bool FastISel::selectStackmap(const CallInst *I) {
647 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
648 // [live variables...])
649 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
650 "Stackmap cannot return a value.");
651
652 // The stackmap intrinsic only records the live variables (the arguments
653 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
654 // intrinsic, this won't be lowered to a function call. This means we don't
655 // have to worry about calling conventions and target-specific lowering code.
656 // Instead we perform the call lowering right here.
657 //
658 // CALLSEQ_START(0, 0...)
659 // STACKMAP(id, nbytes, ...)
660 // CALLSEQ_END(0, 0)
661 //
662 SmallVector<MachineOperand, 32> Ops;
663
664 // Add the <id> and <numBytes> constants.
665 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
666 "Expected a constant integer.");
667 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
668 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
669
670 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
671 "Expected a constant integer.");
672 const auto *NumBytes =
673 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
674 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
675
676 // Push live variables for the stack map (skipping the first two arguments
677 // <id> and <numBytes>).
678 if (!addStackMapLiveVars(Ops, I, 2))
679 return false;
680
681 // We are not adding any register mask info here, because the stackmap doesn't
682 // clobber anything.
683
684 // Add scratch registers as implicit def and early clobber.
685 CallingConv::ID CC = I->getCallingConv();
686 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
687 for (unsigned i = 0; ScratchRegs[i]; ++i)
688 Ops.push_back(MachineOperand::CreateReg(
689 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
690 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
691
692 // Issue CALLSEQ_START
693 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
694 auto Builder =
695 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown));
696 const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
697 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
698 Builder.addImm(0);
699
700 // Issue STACKMAP.
701 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
702 TII.get(TargetOpcode::STACKMAP));
703 for (auto const &MO : Ops)
704 MIB.add(MO);
705
706 // Issue CALLSEQ_END
707 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
708 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp))
709 .addImm(0)
710 .addImm(0);
711
712 // Inform the Frame Information that we have a stackmap in this function.
713 FuncInfo.MF->getFrameInfo().setHasStackMap();
714
715 return true;
716 }
717
718 /// Lower an argument list according to the target calling convention.
719 ///
720 /// This is a helper for lowering intrinsics that follow a target calling
721 /// convention or require stack pointer adjustment. Only a subset of the
722 /// intrinsic's operands need to participate in the calling convention.
lowerCallOperands(const CallInst * CI,unsigned ArgIdx,unsigned NumArgs,const Value * Callee,bool ForceRetVoidTy,CallLoweringInfo & CLI)723 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
724 unsigned NumArgs, const Value *Callee,
725 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
726 ArgListTy Args;
727 Args.reserve(NumArgs);
728
729 // Populate the argument list.
730 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
731 Value *V = CI->getOperand(ArgI);
732
733 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
734
735 ArgListEntry Entry;
736 Entry.Val = V;
737 Entry.Ty = V->getType();
738 Entry.setAttributes(CI, ArgI);
739 Args.push_back(Entry);
740 }
741
742 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
743 : CI->getType();
744 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
745
746 return lowerCallTo(CLI);
747 }
748
setCallee(const DataLayout & DL,MCContext & Ctx,CallingConv::ID CC,Type * ResultTy,StringRef Target,ArgListTy && ArgsList,unsigned FixedArgs)749 FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
750 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
751 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
752 SmallString<32> MangledName;
753 Mangler::getNameWithPrefix(MangledName, Target, DL);
754 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
755 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
756 }
757
selectPatchpoint(const CallInst * I)758 bool FastISel::selectPatchpoint(const CallInst *I) {
759 // <ty> @llvm.experimental.patchpoint.<ty>(i64 <id>,
760 // i32 <numBytes>,
761 // i8* <target>,
762 // i32 <numArgs>,
763 // [Args...],
764 // [live variables...])
765 CallingConv::ID CC = I->getCallingConv();
766 bool IsAnyRegCC = CC == CallingConv::AnyReg;
767 bool HasDef = !I->getType()->isVoidTy();
768 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
769
770 // Check if we can lower the return type when using anyregcc.
771 MVT ValueType;
772 if (IsAnyRegCC && HasDef) {
773 ValueType = TLI.getSimpleValueType(DL, I->getType(), /*AllowUnknown=*/true);
774 if (ValueType == MVT::Other)
775 return false;
776 }
777
778 // Get the real number of arguments participating in the call <numArgs>
779 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
780 "Expected a constant integer.");
781 const auto *NumArgsVal =
782 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
783 unsigned NumArgs = NumArgsVal->getZExtValue();
784
785 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
786 // This includes all meta-operands up to but not including CC.
787 unsigned NumMetaOpers = PatchPointOpers::CCPos;
788 assert(I->arg_size() >= NumMetaOpers + NumArgs &&
789 "Not enough arguments provided to the patchpoint intrinsic");
790
791 // For AnyRegCC the arguments are lowered later on manually.
792 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
793 CallLoweringInfo CLI;
794 CLI.setIsPatchPoint();
795 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
796 return false;
797
798 assert(CLI.Call && "No call instruction specified.");
799
800 SmallVector<MachineOperand, 32> Ops;
801
802 // Add an explicit result reg if we use the anyreg calling convention.
803 if (IsAnyRegCC && HasDef) {
804 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
805 assert(ValueType.isValid());
806 CLI.ResultReg = createResultReg(TLI.getRegClassFor(ValueType));
807 CLI.NumResultRegs = 1;
808 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true));
809 }
810
811 // Add the <id> and <numBytes> constants.
812 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
813 "Expected a constant integer.");
814 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
815 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
816
817 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
818 "Expected a constant integer.");
819 const auto *NumBytes =
820 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
821 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
822
823 // Add the call target.
824 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
825 uint64_t CalleeConstAddr =
826 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
827 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
828 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
829 if (C->getOpcode() == Instruction::IntToPtr) {
830 uint64_t CalleeConstAddr =
831 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
832 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
833 } else
834 llvm_unreachable("Unsupported ConstantExpr.");
835 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
836 Ops.push_back(MachineOperand::CreateGA(GV, 0));
837 } else if (isa<ConstantPointerNull>(Callee))
838 Ops.push_back(MachineOperand::CreateImm(0));
839 else
840 llvm_unreachable("Unsupported callee address.");
841
842 // Adjust <numArgs> to account for any arguments that have been passed on
843 // the stack instead.
844 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
845 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
846
847 // Add the calling convention
848 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
849
850 // Add the arguments we omitted previously. The register allocator should
851 // place these in any free register.
852 if (IsAnyRegCC) {
853 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
854 Register Reg = getRegForValue(I->getArgOperand(i));
855 if (!Reg)
856 return false;
857 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
858 }
859 }
860
861 // Push the arguments from the call instruction.
862 for (auto Reg : CLI.OutRegs)
863 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
864
865 // Push live variables for the stack map.
866 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
867 return false;
868
869 // Push the register mask info.
870 Ops.push_back(MachineOperand::CreateRegMask(
871 TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
872
873 // Add scratch registers as implicit def and early clobber.
874 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
875 for (unsigned i = 0; ScratchRegs[i]; ++i)
876 Ops.push_back(MachineOperand::CreateReg(
877 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
878 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
879
880 // Add implicit defs (return values).
881 for (auto Reg : CLI.InRegs)
882 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true,
883 /*isImp=*/true));
884
885 // Insert the patchpoint instruction before the call generated by the target.
886 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, MIMD,
887 TII.get(TargetOpcode::PATCHPOINT));
888
889 for (auto &MO : Ops)
890 MIB.add(MO);
891
892 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
893
894 // Delete the original call instruction.
895 CLI.Call->eraseFromParent();
896
897 // Inform the Frame Information that we have a patchpoint in this function.
898 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
899
900 if (CLI.NumResultRegs)
901 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
902 return true;
903 }
904
selectXRayCustomEvent(const CallInst * I)905 bool FastISel::selectXRayCustomEvent(const CallInst *I) {
906 const auto &Triple = TM.getTargetTriple();
907 if (Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
908 return true; // don't do anything to this instruction.
909 SmallVector<MachineOperand, 8> Ops;
910 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
911 /*isDef=*/false));
912 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
913 /*isDef=*/false));
914 MachineInstrBuilder MIB =
915 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
916 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
917 for (auto &MO : Ops)
918 MIB.add(MO);
919
920 // Insert the Patchable Event Call instruction, that gets lowered properly.
921 return true;
922 }
923
selectXRayTypedEvent(const CallInst * I)924 bool FastISel::selectXRayTypedEvent(const CallInst *I) {
925 const auto &Triple = TM.getTargetTriple();
926 if (Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
927 return true; // don't do anything to this instruction.
928 SmallVector<MachineOperand, 8> Ops;
929 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
930 /*isDef=*/false));
931 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
932 /*isDef=*/false));
933 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)),
934 /*isDef=*/false));
935 MachineInstrBuilder MIB =
936 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
937 TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
938 for (auto &MO : Ops)
939 MIB.add(MO);
940
941 // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
942 return true;
943 }
944
945 /// Returns an AttributeList representing the attributes applied to the return
946 /// value of the given call.
getReturnAttrs(FastISel::CallLoweringInfo & CLI)947 static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
948 SmallVector<Attribute::AttrKind, 2> Attrs;
949 if (CLI.RetSExt)
950 Attrs.push_back(Attribute::SExt);
951 if (CLI.RetZExt)
952 Attrs.push_back(Attribute::ZExt);
953 if (CLI.IsInReg)
954 Attrs.push_back(Attribute::InReg);
955
956 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
957 Attrs);
958 }
959
lowerCallTo(const CallInst * CI,const char * SymName,unsigned NumArgs)960 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
961 unsigned NumArgs) {
962 MCContext &Ctx = MF->getContext();
963 SmallString<32> MangledName;
964 Mangler::getNameWithPrefix(MangledName, SymName, DL);
965 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
966 return lowerCallTo(CI, Sym, NumArgs);
967 }
968
lowerCallTo(const CallInst * CI,MCSymbol * Symbol,unsigned NumArgs)969 bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
970 unsigned NumArgs) {
971 FunctionType *FTy = CI->getFunctionType();
972 Type *RetTy = CI->getType();
973
974 ArgListTy Args;
975 Args.reserve(NumArgs);
976
977 // Populate the argument list.
978 // Attributes for args start at offset 1, after the return attribute.
979 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
980 Value *V = CI->getOperand(ArgI);
981
982 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
983
984 ArgListEntry Entry;
985 Entry.Val = V;
986 Entry.Ty = V->getType();
987 Entry.setAttributes(CI, ArgI);
988 Args.push_back(Entry);
989 }
990 TLI.markLibCallAttributes(MF, CI->getCallingConv(), Args);
991
992 CallLoweringInfo CLI;
993 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs);
994
995 return lowerCallTo(CLI);
996 }
997
lowerCallTo(CallLoweringInfo & CLI)998 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
999 // Handle the incoming return values from the call.
1000 CLI.clearIns();
1001 SmallVector<EVT, 4> RetTys;
1002 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
1003
1004 SmallVector<ISD::OutputArg, 4> Outs;
1005 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
1006
1007 bool CanLowerReturn = TLI.CanLowerReturn(
1008 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
1009
1010 // FIXME: sret demotion isn't supported yet - bail out.
1011 if (!CanLowerReturn)
1012 return false;
1013
1014 for (EVT VT : RetTys) {
1015 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
1016 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
1017 for (unsigned i = 0; i != NumRegs; ++i) {
1018 ISD::InputArg MyFlags;
1019 MyFlags.VT = RegisterVT;
1020 MyFlags.ArgVT = VT;
1021 MyFlags.Used = CLI.IsReturnValueUsed;
1022 if (CLI.RetSExt)
1023 MyFlags.Flags.setSExt();
1024 if (CLI.RetZExt)
1025 MyFlags.Flags.setZExt();
1026 if (CLI.IsInReg)
1027 MyFlags.Flags.setInReg();
1028 CLI.Ins.push_back(MyFlags);
1029 }
1030 }
1031
1032 // Handle all of the outgoing arguments.
1033 CLI.clearOuts();
1034 for (auto &Arg : CLI.getArgs()) {
1035 Type *FinalType = Arg.Ty;
1036 if (Arg.IsByVal)
1037 FinalType = Arg.IndirectType;
1038 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1039 FinalType, CLI.CallConv, CLI.IsVarArg, DL);
1040
1041 ISD::ArgFlagsTy Flags;
1042 if (Arg.IsZExt)
1043 Flags.setZExt();
1044 if (Arg.IsSExt)
1045 Flags.setSExt();
1046 if (Arg.IsInReg)
1047 Flags.setInReg();
1048 if (Arg.IsSRet)
1049 Flags.setSRet();
1050 if (Arg.IsSwiftSelf)
1051 Flags.setSwiftSelf();
1052 if (Arg.IsSwiftAsync)
1053 Flags.setSwiftAsync();
1054 if (Arg.IsSwiftError)
1055 Flags.setSwiftError();
1056 if (Arg.IsCFGuardTarget)
1057 Flags.setCFGuardTarget();
1058 if (Arg.IsByVal)
1059 Flags.setByVal();
1060 if (Arg.IsInAlloca) {
1061 Flags.setInAlloca();
1062 // Set the byval flag for CCAssignFn callbacks that don't know about
1063 // inalloca. This way we can know how many bytes we should've allocated
1064 // and how many bytes a callee cleanup function will pop. If we port
1065 // inalloca to more targets, we'll have to add custom inalloca handling in
1066 // the various CC lowering callbacks.
1067 Flags.setByVal();
1068 }
1069 if (Arg.IsPreallocated) {
1070 Flags.setPreallocated();
1071 // Set the byval flag for CCAssignFn callbacks that don't know about
1072 // preallocated. This way we can know how many bytes we should've
1073 // allocated and how many bytes a callee cleanup function will pop. If we
1074 // port preallocated to more targets, we'll have to add custom
1075 // preallocated handling in the various CC lowering callbacks.
1076 Flags.setByVal();
1077 }
1078 MaybeAlign MemAlign = Arg.Alignment;
1079 if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) {
1080 unsigned FrameSize = DL.getTypeAllocSize(Arg.IndirectType);
1081
1082 // For ByVal, alignment should come from FE. BE will guess if this info
1083 // is not there, but there are cases it cannot get right.
1084 if (!MemAlign)
1085 MemAlign = Align(TLI.getByValTypeAlignment(Arg.IndirectType, DL));
1086 Flags.setByValSize(FrameSize);
1087 } else if (!MemAlign) {
1088 MemAlign = DL.getABITypeAlign(Arg.Ty);
1089 }
1090 Flags.setMemAlign(*MemAlign);
1091 if (Arg.IsNest)
1092 Flags.setNest();
1093 if (NeedsRegBlock)
1094 Flags.setInConsecutiveRegs();
1095 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
1096 CLI.OutVals.push_back(Arg.Val);
1097 CLI.OutFlags.push_back(Flags);
1098 }
1099
1100 if (!fastLowerCall(CLI))
1101 return false;
1102
1103 // Set all unused physreg defs as dead.
1104 assert(CLI.Call && "No call instruction specified.");
1105 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1106
1107 if (CLI.NumResultRegs && CLI.CB)
1108 updateValueMap(CLI.CB, CLI.ResultReg, CLI.NumResultRegs);
1109
1110 // Set labels for heapallocsite call.
1111 if (CLI.CB)
1112 if (MDNode *MD = CLI.CB->getMetadata("heapallocsite"))
1113 CLI.Call->setHeapAllocMarker(*MF, MD);
1114
1115 return true;
1116 }
1117
lowerCall(const CallInst * CI)1118 bool FastISel::lowerCall(const CallInst *CI) {
1119 FunctionType *FuncTy = CI->getFunctionType();
1120 Type *RetTy = CI->getType();
1121
1122 ArgListTy Args;
1123 ArgListEntry Entry;
1124 Args.reserve(CI->arg_size());
1125
1126 for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) {
1127 Value *V = *i;
1128
1129 // Skip empty types
1130 if (V->getType()->isEmptyTy())
1131 continue;
1132
1133 Entry.Val = V;
1134 Entry.Ty = V->getType();
1135
1136 // Skip the first return-type Attribute to get to params.
1137 Entry.setAttributes(CI, i - CI->arg_begin());
1138 Args.push_back(Entry);
1139 }
1140
1141 // Check if target-independent constraints permit a tail call here.
1142 // Target-dependent constraints are checked within fastLowerCall.
1143 bool IsTailCall = CI->isTailCall();
1144 if (IsTailCall && !isInTailCallPosition(*CI, TM))
1145 IsTailCall = false;
1146 if (IsTailCall && !CI->isMustTailCall() &&
1147 MF->getFunction().getFnAttribute("disable-tail-calls").getValueAsBool())
1148 IsTailCall = false;
1149
1150 CallLoweringInfo CLI;
1151 CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI)
1152 .setTailCall(IsTailCall);
1153
1154 diagnoseDontCall(*CI);
1155
1156 return lowerCallTo(CLI);
1157 }
1158
selectCall(const User * I)1159 bool FastISel::selectCall(const User *I) {
1160 const CallInst *Call = cast<CallInst>(I);
1161
1162 // Handle simple inline asms.
1163 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) {
1164 // Don't attempt to handle constraints.
1165 if (!IA->getConstraintString().empty())
1166 return false;
1167
1168 unsigned ExtraInfo = 0;
1169 if (IA->hasSideEffects())
1170 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1171 if (IA->isAlignStack())
1172 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1173 if (Call->isConvergent())
1174 ExtraInfo |= InlineAsm::Extra_IsConvergent;
1175 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
1176
1177 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1178 TII.get(TargetOpcode::INLINEASM));
1179 MIB.addExternalSymbol(IA->getAsmString().c_str());
1180 MIB.addImm(ExtraInfo);
1181
1182 const MDNode *SrcLoc = Call->getMetadata("srcloc");
1183 if (SrcLoc)
1184 MIB.addMetadata(SrcLoc);
1185
1186 return true;
1187 }
1188
1189 // Handle intrinsic function calls.
1190 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1191 return selectIntrinsicCall(II);
1192
1193 return lowerCall(Call);
1194 }
1195
handleDbgInfo(const Instruction * II)1196 void FastISel::handleDbgInfo(const Instruction *II) {
1197 if (!II->hasDbgRecords())
1198 return;
1199
1200 // Clear any metadata.
1201 MIMD = MIMetadata();
1202
1203 // Reverse order of debug records, because fast-isel walks through backwards.
1204 for (DbgRecord &DR : llvm::reverse(II->getDbgRecordRange())) {
1205 flushLocalValueMap();
1206 recomputeInsertPt();
1207
1208 if (DbgLabelRecord *DLR = dyn_cast<DbgLabelRecord>(&DR)) {
1209 assert(DLR->getLabel() && "Missing label");
1210 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1211 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DLR << "\n");
1212 continue;
1213 }
1214
1215 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DLR->getDebugLoc(),
1216 TII.get(TargetOpcode::DBG_LABEL))
1217 .addMetadata(DLR->getLabel());
1218 continue;
1219 }
1220
1221 DbgVariableRecord &DVR = cast<DbgVariableRecord>(DR);
1222
1223 Value *V = nullptr;
1224 if (!DVR.hasArgList())
1225 V = DVR.getVariableLocationOp(0);
1226
1227 bool Res = false;
1228 if (DVR.getType() == DbgVariableRecord::LocationType::Value ||
1229 DVR.getType() == DbgVariableRecord::LocationType::Assign) {
1230 Res = lowerDbgValue(V, DVR.getExpression(), DVR.getVariable(),
1231 DVR.getDebugLoc());
1232 } else {
1233 assert(DVR.getType() == DbgVariableRecord::LocationType::Declare);
1234 if (FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1235 continue;
1236 Res = lowerDbgDeclare(V, DVR.getExpression(), DVR.getVariable(),
1237 DVR.getDebugLoc());
1238 }
1239
1240 if (!Res)
1241 LLVM_DEBUG(dbgs() << "Dropping debug-info for " << DVR << "\n";);
1242 }
1243 }
1244
lowerDbgValue(const Value * V,DIExpression * Expr,DILocalVariable * Var,const DebugLoc & DL)1245 bool FastISel::lowerDbgValue(const Value *V, DIExpression *Expr,
1246 DILocalVariable *Var, const DebugLoc &DL) {
1247 // This form of DBG_VALUE is target-independent.
1248 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1249 if (!V || isa<UndefValue>(V)) {
1250 // DI is either undef or cannot produce a valid DBG_VALUE, so produce an
1251 // undef DBG_VALUE to terminate any prior location.
1252 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, false, 0U, Var, Expr);
1253 return true;
1254 }
1255 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1256 // See if there's an expression to constant-fold.
1257 if (Expr)
1258 std::tie(Expr, CI) = Expr->constantFold(CI);
1259 if (CI->getBitWidth() > 64)
1260 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1261 .addCImm(CI)
1262 .addImm(0U)
1263 .addMetadata(Var)
1264 .addMetadata(Expr);
1265 else
1266 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1267 .addImm(CI->getZExtValue())
1268 .addImm(0U)
1269 .addMetadata(Var)
1270 .addMetadata(Expr);
1271 return true;
1272 }
1273 if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1274 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
1275 .addFPImm(CF)
1276 .addImm(0U)
1277 .addMetadata(Var)
1278 .addMetadata(Expr);
1279 return true;
1280 }
1281 if (const auto *Arg = dyn_cast<Argument>(V);
1282 Arg && Expr && Expr->isEntryValue()) {
1283 // As per the Verifier, this case is only valid for swift async Args.
1284 assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
1285
1286 Register Reg = getRegForValue(Arg);
1287 for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
1288 if (Reg == VirtReg || Reg == PhysReg) {
1289 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, false /*IsIndirect*/,
1290 PhysReg, Var, Expr);
1291 return true;
1292 }
1293
1294 LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
1295 "couldn't find a physical register\n");
1296 return false;
1297 }
1298 if (auto SI = FuncInfo.StaticAllocaMap.find(dyn_cast<AllocaInst>(V));
1299 SI != FuncInfo.StaticAllocaMap.end()) {
1300 MachineOperand FrameIndexOp = MachineOperand::CreateFI(SI->second);
1301 bool IsIndirect = false;
1302 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect, FrameIndexOp,
1303 Var, Expr);
1304 return true;
1305 }
1306 if (Register Reg = lookUpRegForValue(V)) {
1307 // FIXME: This does not handle register-indirect values at offset 0.
1308 if (!FuncInfo.MF->useDebugInstrRef()) {
1309 bool IsIndirect = false;
1310 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, IsIndirect, Reg, Var,
1311 Expr);
1312 return true;
1313 }
1314 // If using instruction referencing, produce this as a DBG_INSTR_REF,
1315 // to be later patched up by finalizeDebugInstrRefs.
1316 SmallVector<MachineOperand, 1> MOs({MachineOperand::CreateReg(
1317 /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
1318 /* isKill */ false, /* isDead */ false,
1319 /* isUndef */ false, /* isEarlyClobber */ false,
1320 /* SubReg */ 0, /* isDebug */ true)});
1321 SmallVector<uint64_t, 2> Ops({dwarf::DW_OP_LLVM_arg, 0});
1322 auto *NewExpr = DIExpression::prependOpcodes(Expr, Ops);
1323 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1324 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, MOs,
1325 Var, NewExpr);
1326 return true;
1327 }
1328 return false;
1329 }
1330
lowerDbgDeclare(const Value * Address,DIExpression * Expr,DILocalVariable * Var,const DebugLoc & DL)1331 bool FastISel::lowerDbgDeclare(const Value *Address, DIExpression *Expr,
1332 DILocalVariable *Var, const DebugLoc &DL) {
1333 if (!Address || isa<UndefValue>(Address)) {
1334 LLVM_DEBUG(dbgs() << "Dropping debug info (bad/undef address)\n");
1335 return false;
1336 }
1337
1338 std::optional<MachineOperand> Op;
1339 if (Register Reg = lookUpRegForValue(Address))
1340 Op = MachineOperand::CreateReg(Reg, false);
1341
1342 // If we have a VLA that has a "use" in a metadata node that's then used
1343 // here but it has no other uses, then we have a problem. E.g.,
1344 //
1345 // int foo (const int *x) {
1346 // char a[*x];
1347 // return 0;
1348 // }
1349 //
1350 // If we assign 'a' a vreg and fast isel later on has to use the selection
1351 // DAG isel, it will want to copy the value to the vreg. However, there are
1352 // no uses, which goes counter to what selection DAG isel expects.
1353 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1354 (!isa<AllocaInst>(Address) ||
1355 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1356 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1357 false);
1358
1359 if (Op) {
1360 assert(Var->isValidLocationForIntrinsic(DL) &&
1361 "Expected inlined-at fields to agree");
1362 if (FuncInfo.MF->useDebugInstrRef() && Op->isReg()) {
1363 // If using instruction referencing, produce this as a DBG_INSTR_REF,
1364 // to be later patched up by finalizeDebugInstrRefs. Tack a deref onto
1365 // the expression, we don't have an "indirect" flag in DBG_INSTR_REF.
1366 SmallVector<uint64_t, 3> Ops(
1367 {dwarf::DW_OP_LLVM_arg, 0, dwarf::DW_OP_deref});
1368 auto *NewExpr = DIExpression::prependOpcodes(Expr, Ops);
1369 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1370 TII.get(TargetOpcode::DBG_INSTR_REF), /*IsIndirect*/ false, *Op,
1371 Var, NewExpr);
1372 return true;
1373 }
1374
1375 // A dbg.declare describes the address of a source variable, so lower it
1376 // into an indirect DBG_VALUE.
1377 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1378 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, *Op, Var,
1379 Expr);
1380 return true;
1381 }
1382
1383 // We can't yet handle anything else here because it would require
1384 // generating code, thus altering codegen because of debug info.
1385 LLVM_DEBUG(
1386 dbgs() << "Dropping debug info (no materialized reg for address)\n");
1387 return false;
1388 }
1389
selectIntrinsicCall(const IntrinsicInst * II)1390 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
1391 switch (II->getIntrinsicID()) {
1392 default:
1393 break;
1394 // At -O0 we don't care about the lifetime intrinsics.
1395 case Intrinsic::lifetime_start:
1396 case Intrinsic::lifetime_end:
1397 // The donothing intrinsic does, well, nothing.
1398 case Intrinsic::donothing:
1399 // Neither does the sideeffect intrinsic.
1400 case Intrinsic::sideeffect:
1401 // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1402 case Intrinsic::assume:
1403 // Neither does the llvm.experimental.noalias.scope.decl intrinsic
1404 case Intrinsic::experimental_noalias_scope_decl:
1405 return true;
1406 case Intrinsic::dbg_declare: {
1407 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1408 assert(DI->getVariable() && "Missing variable");
1409 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1410 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1411 << " (!hasDebugInfo)\n");
1412 return true;
1413 }
1414
1415 if (FuncInfo.PreprocessedDbgDeclares.contains(DI))
1416 return true;
1417
1418 const Value *Address = DI->getAddress();
1419 if (!lowerDbgDeclare(Address, DI->getExpression(), DI->getVariable(),
1420 MIMD.getDL()))
1421 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI);
1422
1423 return true;
1424 }
1425 case Intrinsic::dbg_assign:
1426 // A dbg.assign is a dbg.value with more information, typically produced
1427 // during optimisation. If one reaches fastisel then something odd has
1428 // happened (such as an optimised function being always-inlined into an
1429 // optnone function). We will not be using the extra information in the
1430 // dbg.assign in that case, just use its dbg.value fields.
1431 [[fallthrough]];
1432 case Intrinsic::dbg_value: {
1433 // This form of DBG_VALUE is target-independent.
1434 const DbgValueInst *DI = cast<DbgValueInst>(II);
1435 const Value *V = DI->getValue();
1436 DIExpression *Expr = DI->getExpression();
1437 DILocalVariable *Var = DI->getVariable();
1438 if (DI->hasArgList())
1439 // Signal that we don't have a location for this.
1440 V = nullptr;
1441
1442 assert(Var->isValidLocationForIntrinsic(MIMD.getDL()) &&
1443 "Expected inlined-at fields to agree");
1444
1445 if (!lowerDbgValue(V, Expr, Var, MIMD.getDL()))
1446 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1447
1448 return true;
1449 }
1450 case Intrinsic::dbg_label: {
1451 const DbgLabelInst *DI = cast<DbgLabelInst>(II);
1452 assert(DI->getLabel() && "Missing label");
1453 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1454 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1455 return true;
1456 }
1457
1458 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1459 TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel());
1460 return true;
1461 }
1462 case Intrinsic::objectsize:
1463 llvm_unreachable("llvm.objectsize.* should have been lowered already");
1464
1465 case Intrinsic::is_constant:
1466 llvm_unreachable("llvm.is.constant.* should have been lowered already");
1467
1468 case Intrinsic::allow_runtime_check:
1469 case Intrinsic::allow_ubsan_check: {
1470 Register ResultReg = getRegForValue(ConstantInt::getTrue(II->getType()));
1471 if (!ResultReg)
1472 return false;
1473 updateValueMap(II, ResultReg);
1474 return true;
1475 }
1476
1477 case Intrinsic::launder_invariant_group:
1478 case Intrinsic::strip_invariant_group:
1479 case Intrinsic::expect: {
1480 Register ResultReg = getRegForValue(II->getArgOperand(0));
1481 if (!ResultReg)
1482 return false;
1483 updateValueMap(II, ResultReg);
1484 return true;
1485 }
1486 case Intrinsic::experimental_stackmap:
1487 return selectStackmap(II);
1488 case Intrinsic::experimental_patchpoint_void:
1489 case Intrinsic::experimental_patchpoint:
1490 return selectPatchpoint(II);
1491
1492 case Intrinsic::xray_customevent:
1493 return selectXRayCustomEvent(II);
1494 case Intrinsic::xray_typedevent:
1495 return selectXRayTypedEvent(II);
1496 }
1497
1498 return fastLowerIntrinsicCall(II);
1499 }
1500
selectCast(const User * I,unsigned Opcode)1501 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1502 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1503 EVT DstVT = TLI.getValueType(DL, I->getType());
1504
1505 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1506 !DstVT.isSimple())
1507 // Unhandled type. Halt "fast" selection and bail.
1508 return false;
1509
1510 // Check if the destination type is legal.
1511 if (!TLI.isTypeLegal(DstVT))
1512 return false;
1513
1514 // Check if the source operand is legal.
1515 if (!TLI.isTypeLegal(SrcVT))
1516 return false;
1517
1518 Register InputReg = getRegForValue(I->getOperand(0));
1519 if (!InputReg)
1520 // Unhandled operand. Halt "fast" selection and bail.
1521 return false;
1522
1523 Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1524 Opcode, InputReg);
1525 if (!ResultReg)
1526 return false;
1527
1528 updateValueMap(I, ResultReg);
1529 return true;
1530 }
1531
selectBitCast(const User * I)1532 bool FastISel::selectBitCast(const User *I) {
1533 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1534 EVT DstEVT = TLI.getValueType(DL, I->getType());
1535 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1536 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1537 // Unhandled type. Halt "fast" selection and bail.
1538 return false;
1539
1540 MVT SrcVT = SrcEVT.getSimpleVT();
1541 MVT DstVT = DstEVT.getSimpleVT();
1542 Register Op0 = getRegForValue(I->getOperand(0));
1543 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1544 return false;
1545
1546 // If the bitcast doesn't change the type, just use the operand value.
1547 if (SrcVT == DstVT) {
1548 updateValueMap(I, Op0);
1549 return true;
1550 }
1551
1552 // Otherwise, select a BITCAST opcode.
1553 Register ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0);
1554 if (!ResultReg)
1555 return false;
1556
1557 updateValueMap(I, ResultReg);
1558 return true;
1559 }
1560
selectFreeze(const User * I)1561 bool FastISel::selectFreeze(const User *I) {
1562 Register Reg = getRegForValue(I->getOperand(0));
1563 if (!Reg)
1564 // Unhandled operand.
1565 return false;
1566
1567 EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType());
1568 if (ETy == MVT::Other || !TLI.isTypeLegal(ETy))
1569 // Unhandled type, bail out.
1570 return false;
1571
1572 MVT Ty = ETy.getSimpleVT();
1573 const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty);
1574 Register ResultReg = createResultReg(TyRegClass);
1575 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1576 TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
1577
1578 updateValueMap(I, ResultReg);
1579 return true;
1580 }
1581
1582 // Remove local value instructions starting from the instruction after
1583 // SavedLastLocalValue to the current function insert point.
removeDeadLocalValueCode(MachineInstr * SavedLastLocalValue)1584 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1585 {
1586 MachineInstr *CurLastLocalValue = getLastLocalValue();
1587 if (CurLastLocalValue != SavedLastLocalValue) {
1588 // Find the first local value instruction to be deleted.
1589 // This is the instruction after SavedLastLocalValue if it is non-NULL.
1590 // Otherwise it's the first instruction in the block.
1591 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1592 if (SavedLastLocalValue)
1593 ++FirstDeadInst;
1594 else
1595 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1596 setLastLocalValue(SavedLastLocalValue);
1597 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1598 }
1599 }
1600
selectInstruction(const Instruction * I)1601 bool FastISel::selectInstruction(const Instruction *I) {
1602 // Flush the local value map before starting each instruction.
1603 // This improves locality and debugging, and can reduce spills.
1604 // Reuse of values across IR instructions is relatively uncommon.
1605 flushLocalValueMap();
1606
1607 MachineInstr *SavedLastLocalValue = getLastLocalValue();
1608 // Just before the terminator instruction, insert instructions to
1609 // feed PHI nodes in successor blocks.
1610 if (I->isTerminator()) {
1611 if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1612 // PHI node handling may have generated local value instructions,
1613 // even though it failed to handle all PHI nodes.
1614 // We remove these instructions because SelectionDAGISel will generate
1615 // them again.
1616 removeDeadLocalValueCode(SavedLastLocalValue);
1617 return false;
1618 }
1619 }
1620
1621 // FastISel does not handle any operand bundles except OB_funclet.
1622 if (auto *Call = dyn_cast<CallBase>(I))
1623 for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i)
1624 if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1625 return false;
1626
1627 MIMD = MIMetadata(*I);
1628
1629 SavedInsertPt = FuncInfo.InsertPt;
1630
1631 if (const auto *Call = dyn_cast<CallInst>(I)) {
1632 const Function *F = Call->getCalledFunction();
1633 LibFunc Func;
1634
1635 // As a special case, don't handle calls to builtin library functions that
1636 // may be translated directly to target instructions.
1637 if (F && !F->hasLocalLinkage() && F->hasName() &&
1638 LibInfo->getLibFunc(F->getName(), Func) &&
1639 LibInfo->hasOptimizedCodeGen(Func))
1640 return false;
1641
1642 // Don't handle Intrinsic::trap if a trap function is specified.
1643 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1644 Call->hasFnAttr("trap-func-name"))
1645 return false;
1646 }
1647
1648 // First, try doing target-independent selection.
1649 if (!SkipTargetIndependentISel) {
1650 if (selectOperator(I, I->getOpcode())) {
1651 ++NumFastIselSuccessIndependent;
1652 MIMD = {};
1653 return true;
1654 }
1655 // Remove dead code.
1656 recomputeInsertPt();
1657 if (SavedInsertPt != FuncInfo.InsertPt)
1658 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1659 SavedInsertPt = FuncInfo.InsertPt;
1660 }
1661 // Next, try calling the target to attempt to handle the instruction.
1662 if (fastSelectInstruction(I)) {
1663 ++NumFastIselSuccessTarget;
1664 MIMD = {};
1665 return true;
1666 }
1667 // Remove dead code.
1668 recomputeInsertPt();
1669 if (SavedInsertPt != FuncInfo.InsertPt)
1670 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1671
1672 MIMD = {};
1673 // Undo phi node updates, because they will be added again by SelectionDAG.
1674 if (I->isTerminator()) {
1675 // PHI node handling may have generated local value instructions.
1676 // We remove them because SelectionDAGISel will generate them again.
1677 removeDeadLocalValueCode(SavedLastLocalValue);
1678 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1679 }
1680 return false;
1681 }
1682
1683 /// Emit an unconditional branch to the given block, unless it is the immediate
1684 /// (fall-through) successor, and update the CFG.
fastEmitBranch(MachineBasicBlock * MSucc,const DebugLoc & DbgLoc)1685 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc,
1686 const DebugLoc &DbgLoc) {
1687 if (FuncInfo.MBB->getBasicBlock()->sizeWithoutDebug() > 1 &&
1688 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1689 // For more accurate line information if this is the only non-debug
1690 // instruction in the block then emit it, otherwise we have the
1691 // unconditional fall-through case, which needs no instructions.
1692 } else {
1693 // The unconditional branch case.
1694 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1695 SmallVector<MachineOperand, 0>(), DbgLoc);
1696 }
1697 if (FuncInfo.BPI) {
1698 auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
1699 FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1700 FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
1701 } else
1702 FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
1703 }
1704
finishCondBranch(const BasicBlock * BranchBB,MachineBasicBlock * TrueMBB,MachineBasicBlock * FalseMBB)1705 void FastISel::finishCondBranch(const BasicBlock *BranchBB,
1706 MachineBasicBlock *TrueMBB,
1707 MachineBasicBlock *FalseMBB) {
1708 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1709 // happen in degenerate IR and MachineIR forbids to have a block twice in the
1710 // successor/predecessor lists.
1711 if (TrueMBB != FalseMBB) {
1712 if (FuncInfo.BPI) {
1713 auto BranchProbability =
1714 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1715 FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
1716 } else
1717 FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
1718 }
1719
1720 fastEmitBranch(FalseMBB, MIMD.getDL());
1721 }
1722
1723 /// Emit an FNeg operation.
selectFNeg(const User * I,const Value * In)1724 bool FastISel::selectFNeg(const User *I, const Value *In) {
1725 Register OpReg = getRegForValue(In);
1726 if (!OpReg)
1727 return false;
1728
1729 // If the target has ISD::FNEG, use it.
1730 EVT VT = TLI.getValueType(DL, I->getType());
1731 Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1732 OpReg);
1733 if (ResultReg) {
1734 updateValueMap(I, ResultReg);
1735 return true;
1736 }
1737
1738 // Bitcast the value to integer, twiddle the sign bit with xor,
1739 // and then bitcast it back to floating-point.
1740 if (VT.getSizeInBits() > 64)
1741 return false;
1742 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1743 if (!TLI.isTypeLegal(IntVT))
1744 return false;
1745
1746 Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1747 ISD::BITCAST, OpReg);
1748 if (!IntReg)
1749 return false;
1750
1751 Register IntResultReg = fastEmit_ri_(
1752 IntVT.getSimpleVT(), ISD::XOR, IntReg,
1753 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1754 if (!IntResultReg)
1755 return false;
1756
1757 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1758 IntResultReg);
1759 if (!ResultReg)
1760 return false;
1761
1762 updateValueMap(I, ResultReg);
1763 return true;
1764 }
1765
selectExtractValue(const User * U)1766 bool FastISel::selectExtractValue(const User *U) {
1767 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1768 if (!EVI)
1769 return false;
1770
1771 // Make sure we only try to handle extracts with a legal result. But also
1772 // allow i1 because it's easy.
1773 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1774 if (!RealVT.isSimple())
1775 return false;
1776 MVT VT = RealVT.getSimpleVT();
1777 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1778 return false;
1779
1780 const Value *Op0 = EVI->getOperand(0);
1781 Type *AggTy = Op0->getType();
1782
1783 // Get the base result register.
1784 unsigned ResultReg;
1785 DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(Op0);
1786 if (I != FuncInfo.ValueMap.end())
1787 ResultReg = I->second;
1788 else if (isa<Instruction>(Op0))
1789 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1790 else
1791 return false; // fast-isel can't handle aggregate constants at the moment
1792
1793 // Get the actual result register, which is an offset from the base register.
1794 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1795
1796 SmallVector<EVT, 4> AggValueVTs;
1797 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1798
1799 for (unsigned i = 0; i < VTIndex; i++)
1800 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1801
1802 updateValueMap(EVI, ResultReg);
1803 return true;
1804 }
1805
selectOperator(const User * I,unsigned Opcode)1806 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1807 switch (Opcode) {
1808 case Instruction::Add:
1809 return selectBinaryOp(I, ISD::ADD);
1810 case Instruction::FAdd:
1811 return selectBinaryOp(I, ISD::FADD);
1812 case Instruction::Sub:
1813 return selectBinaryOp(I, ISD::SUB);
1814 case Instruction::FSub:
1815 return selectBinaryOp(I, ISD::FSUB);
1816 case Instruction::Mul:
1817 return selectBinaryOp(I, ISD::MUL);
1818 case Instruction::FMul:
1819 return selectBinaryOp(I, ISD::FMUL);
1820 case Instruction::SDiv:
1821 return selectBinaryOp(I, ISD::SDIV);
1822 case Instruction::UDiv:
1823 return selectBinaryOp(I, ISD::UDIV);
1824 case Instruction::FDiv:
1825 return selectBinaryOp(I, ISD::FDIV);
1826 case Instruction::SRem:
1827 return selectBinaryOp(I, ISD::SREM);
1828 case Instruction::URem:
1829 return selectBinaryOp(I, ISD::UREM);
1830 case Instruction::FRem:
1831 return selectBinaryOp(I, ISD::FREM);
1832 case Instruction::Shl:
1833 return selectBinaryOp(I, ISD::SHL);
1834 case Instruction::LShr:
1835 return selectBinaryOp(I, ISD::SRL);
1836 case Instruction::AShr:
1837 return selectBinaryOp(I, ISD::SRA);
1838 case Instruction::And:
1839 return selectBinaryOp(I, ISD::AND);
1840 case Instruction::Or:
1841 return selectBinaryOp(I, ISD::OR);
1842 case Instruction::Xor:
1843 return selectBinaryOp(I, ISD::XOR);
1844
1845 case Instruction::FNeg:
1846 return selectFNeg(I, I->getOperand(0));
1847
1848 case Instruction::GetElementPtr:
1849 return selectGetElementPtr(I);
1850
1851 case Instruction::Br: {
1852 const BranchInst *BI = cast<BranchInst>(I);
1853
1854 if (BI->isUnconditional()) {
1855 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1856 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1857 fastEmitBranch(MSucc, BI->getDebugLoc());
1858 return true;
1859 }
1860
1861 // Conditional branches are not handed yet.
1862 // Halt "fast" selection and bail.
1863 return false;
1864 }
1865
1866 case Instruction::Unreachable:
1867 if (TM.Options.TrapUnreachable)
1868 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1869 else
1870 return true;
1871
1872 case Instruction::Alloca:
1873 // FunctionLowering has the static-sized case covered.
1874 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1875 return true;
1876
1877 // Dynamic-sized alloca is not handled yet.
1878 return false;
1879
1880 case Instruction::Call:
1881 // On AIX, normal call lowering uses the DAG-ISEL path currently so that the
1882 // callee of the direct function call instruction will be mapped to the
1883 // symbol for the function's entry point, which is distinct from the
1884 // function descriptor symbol. The latter is the symbol whose XCOFF symbol
1885 // name is the C-linkage name of the source level function.
1886 // But fast isel still has the ability to do selection for intrinsics.
1887 if (TM.getTargetTriple().isOSAIX() && !isa<IntrinsicInst>(I))
1888 return false;
1889 return selectCall(I);
1890
1891 case Instruction::BitCast:
1892 return selectBitCast(I);
1893
1894 case Instruction::FPToSI:
1895 return selectCast(I, ISD::FP_TO_SINT);
1896 case Instruction::ZExt:
1897 return selectCast(I, ISD::ZERO_EXTEND);
1898 case Instruction::SExt:
1899 return selectCast(I, ISD::SIGN_EXTEND);
1900 case Instruction::Trunc:
1901 return selectCast(I, ISD::TRUNCATE);
1902 case Instruction::SIToFP:
1903 return selectCast(I, ISD::SINT_TO_FP);
1904
1905 case Instruction::IntToPtr: // Deliberate fall-through.
1906 case Instruction::PtrToInt: {
1907 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1908 EVT DstVT = TLI.getValueType(DL, I->getType());
1909 if (DstVT.bitsGT(SrcVT))
1910 return selectCast(I, ISD::ZERO_EXTEND);
1911 if (DstVT.bitsLT(SrcVT))
1912 return selectCast(I, ISD::TRUNCATE);
1913 Register Reg = getRegForValue(I->getOperand(0));
1914 if (!Reg)
1915 return false;
1916 updateValueMap(I, Reg);
1917 return true;
1918 }
1919
1920 case Instruction::ExtractValue:
1921 return selectExtractValue(I);
1922
1923 case Instruction::Freeze:
1924 return selectFreeze(I);
1925
1926 case Instruction::PHI:
1927 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1928
1929 default:
1930 // Unhandled instruction. Halt "fast" selection and bail.
1931 return false;
1932 }
1933 }
1934
FastISel(FunctionLoweringInfo & FuncInfo,const TargetLibraryInfo * LibInfo,bool SkipTargetIndependentISel)1935 FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
1936 const TargetLibraryInfo *LibInfo,
1937 bool SkipTargetIndependentISel)
1938 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1939 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1940 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1941 TII(*MF->getSubtarget().getInstrInfo()),
1942 TLI(*MF->getSubtarget().getTargetLowering()),
1943 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1944 SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1945
1946 FastISel::~FastISel() = default;
1947
fastLowerArguments()1948 bool FastISel::fastLowerArguments() { return false; }
1949
fastLowerCall(CallLoweringInfo &)1950 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1951
fastLowerIntrinsicCall(const IntrinsicInst *)1952 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
1953 return false;
1954 }
1955
fastEmit_(MVT,MVT,unsigned)1956 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1957
fastEmit_r(MVT,MVT,unsigned,unsigned)1958 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/) {
1959 return 0;
1960 }
1961
fastEmit_rr(MVT,MVT,unsigned,unsigned,unsigned)1962 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1963 unsigned /*Op1*/) {
1964 return 0;
1965 }
1966
fastEmit_i(MVT,MVT,unsigned,uint64_t)1967 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1968 return 0;
1969 }
1970
fastEmit_f(MVT,MVT,unsigned,const ConstantFP *)1971 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1972 const ConstantFP * /*FPImm*/) {
1973 return 0;
1974 }
1975
fastEmit_ri(MVT,MVT,unsigned,unsigned,uint64_t)1976 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1977 uint64_t /*Imm*/) {
1978 return 0;
1979 }
1980
1981 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1982 /// instruction with an immediate operand using fastEmit_ri.
1983 /// If that fails, it materializes the immediate into a register and try
1984 /// fastEmit_rr instead.
fastEmit_ri_(MVT VT,unsigned Opcode,unsigned Op0,uint64_t Imm,MVT ImmType)1985 Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1986 uint64_t Imm, MVT ImmType) {
1987 // If this is a multiply by a power of two, emit this as a shift left.
1988 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1989 Opcode = ISD::SHL;
1990 Imm = Log2_64(Imm);
1991 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1992 // div x, 8 -> srl x, 3
1993 Opcode = ISD::SRL;
1994 Imm = Log2_64(Imm);
1995 }
1996
1997 // Horrible hack (to be removed), check to make sure shift amounts are
1998 // in-range.
1999 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
2000 Imm >= VT.getSizeInBits())
2001 return 0;
2002
2003 // First check if immediate type is legal. If not, we can't use the ri form.
2004 Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm);
2005 if (ResultReg)
2006 return ResultReg;
2007 Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
2008 if (!MaterialReg) {
2009 // This is a bit ugly/slow, but failing here means falling out of
2010 // fast-isel, which would be very slow.
2011 IntegerType *ITy =
2012 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
2013 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
2014 if (!MaterialReg)
2015 return 0;
2016 }
2017 return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
2018 }
2019
createResultReg(const TargetRegisterClass * RC)2020 Register FastISel::createResultReg(const TargetRegisterClass *RC) {
2021 return MRI.createVirtualRegister(RC);
2022 }
2023
constrainOperandRegClass(const MCInstrDesc & II,Register Op,unsigned OpNum)2024 Register FastISel::constrainOperandRegClass(const MCInstrDesc &II, Register Op,
2025 unsigned OpNum) {
2026 if (Op.isVirtual()) {
2027 const TargetRegisterClass *RegClass =
2028 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
2029 if (!MRI.constrainRegClass(Op, RegClass)) {
2030 // If it's not legal to COPY between the register classes, something
2031 // has gone very wrong before we got here.
2032 Register NewOp = createResultReg(RegClass);
2033 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2034 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
2035 return NewOp;
2036 }
2037 }
2038 return Op;
2039 }
2040
fastEmitInst_(unsigned MachineInstOpcode,const TargetRegisterClass * RC)2041 Register FastISel::fastEmitInst_(unsigned MachineInstOpcode,
2042 const TargetRegisterClass *RC) {
2043 Register ResultReg = createResultReg(RC);
2044 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2045
2046 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg);
2047 return ResultReg;
2048 }
2049
fastEmitInst_r(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0)2050 Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
2051 const TargetRegisterClass *RC, unsigned Op0) {
2052 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2053
2054 Register ResultReg = createResultReg(RC);
2055 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2056
2057 if (II.getNumDefs() >= 1)
2058 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2059 .addReg(Op0);
2060 else {
2061 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2062 .addReg(Op0);
2063 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2064 ResultReg)
2065 .addReg(II.implicit_defs()[0]);
2066 }
2067
2068 return ResultReg;
2069 }
2070
fastEmitInst_rr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,unsigned Op1)2071 Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2072 const TargetRegisterClass *RC, unsigned Op0,
2073 unsigned Op1) {
2074 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2075
2076 Register ResultReg = createResultReg(RC);
2077 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2078 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2079
2080 if (II.getNumDefs() >= 1)
2081 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2082 .addReg(Op0)
2083 .addReg(Op1);
2084 else {
2085 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2086 .addReg(Op0)
2087 .addReg(Op1);
2088 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2089 ResultReg)
2090 .addReg(II.implicit_defs()[0]);
2091 }
2092 return ResultReg;
2093 }
2094
fastEmitInst_rrr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,unsigned Op1,unsigned Op2)2095 Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
2096 const TargetRegisterClass *RC, unsigned Op0,
2097 unsigned Op1, unsigned Op2) {
2098 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2099
2100 Register ResultReg = createResultReg(RC);
2101 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2102 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2103 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
2104
2105 if (II.getNumDefs() >= 1)
2106 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2107 .addReg(Op0)
2108 .addReg(Op1)
2109 .addReg(Op2);
2110 else {
2111 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2112 .addReg(Op0)
2113 .addReg(Op1)
2114 .addReg(Op2);
2115 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2116 ResultReg)
2117 .addReg(II.implicit_defs()[0]);
2118 }
2119 return ResultReg;
2120 }
2121
fastEmitInst_ri(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,uint64_t Imm)2122 Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
2123 const TargetRegisterClass *RC, unsigned Op0,
2124 uint64_t Imm) {
2125 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2126
2127 Register ResultReg = createResultReg(RC);
2128 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2129
2130 if (II.getNumDefs() >= 1)
2131 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2132 .addReg(Op0)
2133 .addImm(Imm);
2134 else {
2135 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2136 .addReg(Op0)
2137 .addImm(Imm);
2138 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2139 ResultReg)
2140 .addReg(II.implicit_defs()[0]);
2141 }
2142 return ResultReg;
2143 }
2144
fastEmitInst_rii(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,uint64_t Imm1,uint64_t Imm2)2145 Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
2146 const TargetRegisterClass *RC, unsigned Op0,
2147 uint64_t Imm1, uint64_t Imm2) {
2148 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2149
2150 Register ResultReg = createResultReg(RC);
2151 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2152
2153 if (II.getNumDefs() >= 1)
2154 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2155 .addReg(Op0)
2156 .addImm(Imm1)
2157 .addImm(Imm2);
2158 else {
2159 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2160 .addReg(Op0)
2161 .addImm(Imm1)
2162 .addImm(Imm2);
2163 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2164 ResultReg)
2165 .addReg(II.implicit_defs()[0]);
2166 }
2167 return ResultReg;
2168 }
2169
fastEmitInst_f(unsigned MachineInstOpcode,const TargetRegisterClass * RC,const ConstantFP * FPImm)2170 Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
2171 const TargetRegisterClass *RC,
2172 const ConstantFP *FPImm) {
2173 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2174
2175 Register ResultReg = createResultReg(RC);
2176
2177 if (II.getNumDefs() >= 1)
2178 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2179 .addFPImm(FPImm);
2180 else {
2181 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2182 .addFPImm(FPImm);
2183 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2184 ResultReg)
2185 .addReg(II.implicit_defs()[0]);
2186 }
2187 return ResultReg;
2188 }
2189
fastEmitInst_rri(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,unsigned Op1,uint64_t Imm)2190 Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
2191 const TargetRegisterClass *RC, unsigned Op0,
2192 unsigned Op1, uint64_t Imm) {
2193 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2194
2195 Register ResultReg = createResultReg(RC);
2196 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2197 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2198
2199 if (II.getNumDefs() >= 1)
2200 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2201 .addReg(Op0)
2202 .addReg(Op1)
2203 .addImm(Imm);
2204 else {
2205 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2206 .addReg(Op0)
2207 .addReg(Op1)
2208 .addImm(Imm);
2209 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2210 ResultReg)
2211 .addReg(II.implicit_defs()[0]);
2212 }
2213 return ResultReg;
2214 }
2215
fastEmitInst_i(unsigned MachineInstOpcode,const TargetRegisterClass * RC,uint64_t Imm)2216 Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
2217 const TargetRegisterClass *RC, uint64_t Imm) {
2218 Register ResultReg = createResultReg(RC);
2219 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2220
2221 if (II.getNumDefs() >= 1)
2222 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
2223 .addImm(Imm);
2224 else {
2225 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II).addImm(Imm);
2226 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2227 ResultReg)
2228 .addReg(II.implicit_defs()[0]);
2229 }
2230 return ResultReg;
2231 }
2232
fastEmitInst_extractsubreg(MVT RetVT,unsigned Op0,uint32_t Idx)2233 Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
2234 uint32_t Idx) {
2235 Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2236 assert(Register::isVirtualRegister(Op0) &&
2237 "Cannot yet extract from physregs");
2238 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2239 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
2240 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
2241 ResultReg).addReg(Op0, 0, Idx);
2242 return ResultReg;
2243 }
2244
2245 /// Emit MachineInstrs to compute the value of Op with all but the least
2246 /// significant bit set to zero.
fastEmitZExtFromI1(MVT VT,unsigned Op0)2247 Register FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0) {
2248 return fastEmit_ri(VT, VT, ISD::AND, Op0, 1);
2249 }
2250
2251 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2252 /// Emit code to ensure constants are copied into registers when needed.
2253 /// Remember the virtual registers that need to be added to the Machine PHI
2254 /// nodes as input. We cannot just directly add them, because expansion
2255 /// might result in multiple MBB's for one BB. As such, the start of the
2256 /// BB might correspond to a different MBB than the end.
handlePHINodesInSuccessorBlocks(const BasicBlock * LLVMBB)2257 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2258 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
2259 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
2260
2261 // Check successor nodes' PHI nodes that expect a constant to be available
2262 // from this block.
2263 for (const BasicBlock *SuccBB : successors(LLVMBB)) {
2264 if (!isa<PHINode>(SuccBB->begin()))
2265 continue;
2266 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2267
2268 // If this terminator has multiple identical successors (common for
2269 // switches), only handle each succ once.
2270 if (!SuccsHandled.insert(SuccMBB).second)
2271 continue;
2272
2273 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2274
2275 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2276 // nodes and Machine PHI nodes, but the incoming operands have not been
2277 // emitted yet.
2278 for (const PHINode &PN : SuccBB->phis()) {
2279 // Ignore dead phi's.
2280 if (PN.use_empty())
2281 continue;
2282
2283 // Only handle legal types. Two interesting things to note here. First,
2284 // by bailing out early, we may leave behind some dead instructions,
2285 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2286 // own moves. Second, this check is necessary because FastISel doesn't
2287 // use CreateRegs to create registers, so it always creates
2288 // exactly one register for each non-void instruction.
2289 EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
2290 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2291 // Handle integer promotions, though, because they're common and easy.
2292 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2293 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2294 return false;
2295 }
2296 }
2297
2298 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2299
2300 // Set the DebugLoc for the copy. Use the location of the operand if
2301 // there is one; otherwise no location, flushLocalValueMap will fix it.
2302 MIMD = {};
2303 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2304 MIMD = MIMetadata(*Inst);
2305
2306 Register Reg = getRegForValue(PHIOp);
2307 if (!Reg) {
2308 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2309 return false;
2310 }
2311 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2312 MIMD = {};
2313 }
2314 }
2315
2316 return true;
2317 }
2318
tryToFoldLoad(const LoadInst * LI,const Instruction * FoldInst)2319 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2320 assert(LI->hasOneUse() &&
2321 "tryToFoldLoad expected a LoadInst with a single use");
2322 // We know that the load has a single use, but don't know what it is. If it
2323 // isn't one of the folded instructions, then we can't succeed here. Handle
2324 // this by scanning the single-use users of the load until we get to FoldInst.
2325 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2326
2327 const Instruction *TheUser = LI->user_back();
2328 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2329 // Stay in the right block.
2330 TheUser->getParent() == FoldInst->getParent() &&
2331 --MaxUsers) { // Don't scan too far.
2332 // If there are multiple or no uses of this instruction, then bail out.
2333 if (!TheUser->hasOneUse())
2334 return false;
2335
2336 TheUser = TheUser->user_back();
2337 }
2338
2339 // If we didn't find the fold instruction, then we failed to collapse the
2340 // sequence.
2341 if (TheUser != FoldInst)
2342 return false;
2343
2344 // Don't try to fold volatile loads. Target has to deal with alignment
2345 // constraints.
2346 if (LI->isVolatile())
2347 return false;
2348
2349 // Figure out which vreg this is going into. If there is no assigned vreg yet
2350 // then there actually was no reference to it. Perhaps the load is referenced
2351 // by a dead instruction.
2352 Register LoadReg = getRegForValue(LI);
2353 if (!LoadReg)
2354 return false;
2355
2356 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2357 // may mean that the instruction got lowered to multiple MIs, or the use of
2358 // the loaded value ended up being multiple operands of the result.
2359 if (!MRI.hasOneUse(LoadReg))
2360 return false;
2361
2362 // If the register has fixups, there may be additional uses through a
2363 // different alias of the register.
2364 if (FuncInfo.RegsWithFixups.contains(LoadReg))
2365 return false;
2366
2367 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2368 MachineInstr *User = RI->getParent();
2369
2370 // Set the insertion point properly. Folding the load can cause generation of
2371 // other random instructions (like sign extends) for addressing modes; make
2372 // sure they get inserted in a logical place before the new instruction.
2373 FuncInfo.InsertPt = User;
2374 FuncInfo.MBB = User->getParent();
2375
2376 // Ask the target to try folding the load.
2377 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2378 }
2379
canFoldAddIntoGEP(const User * GEP,const Value * Add)2380 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2381 // Must be an add.
2382 if (!isa<AddOperator>(Add))
2383 return false;
2384 // Type size needs to match.
2385 if (DL.getTypeSizeInBits(GEP->getType()) !=
2386 DL.getTypeSizeInBits(Add->getType()))
2387 return false;
2388 // Must be in the same basic block.
2389 if (isa<Instruction>(Add) &&
2390 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2391 return false;
2392 // Must have a constant operand.
2393 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2394 }
2395
2396 MachineMemOperand *
createMachineMemOperandFor(const Instruction * I) const2397 FastISel::createMachineMemOperandFor(const Instruction *I) const {
2398 const Value *Ptr;
2399 Type *ValTy;
2400 MaybeAlign Alignment;
2401 MachineMemOperand::Flags Flags;
2402 bool IsVolatile;
2403
2404 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2405 Alignment = LI->getAlign();
2406 IsVolatile = LI->isVolatile();
2407 Flags = MachineMemOperand::MOLoad;
2408 Ptr = LI->getPointerOperand();
2409 ValTy = LI->getType();
2410 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2411 Alignment = SI->getAlign();
2412 IsVolatile = SI->isVolatile();
2413 Flags = MachineMemOperand::MOStore;
2414 Ptr = SI->getPointerOperand();
2415 ValTy = SI->getValueOperand()->getType();
2416 } else
2417 return nullptr;
2418
2419 bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal);
2420 bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load);
2421 bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable);
2422 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2423
2424 AAMDNodes AAInfo = I->getAAMetadata();
2425
2426 if (!Alignment) // Ensure that codegen never sees alignment 0.
2427 Alignment = DL.getABITypeAlign(ValTy);
2428
2429 unsigned Size = DL.getTypeStoreSize(ValTy);
2430
2431 if (IsVolatile)
2432 Flags |= MachineMemOperand::MOVolatile;
2433 if (IsNonTemporal)
2434 Flags |= MachineMemOperand::MONonTemporal;
2435 if (IsDereferenceable)
2436 Flags |= MachineMemOperand::MODereferenceable;
2437 if (IsInvariant)
2438 Flags |= MachineMemOperand::MOInvariant;
2439
2440 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2441 *Alignment, AAInfo, Ranges);
2442 }
2443
optimizeCmpPredicate(const CmpInst * CI) const2444 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
2445 // If both operands are the same, then try to optimize or fold the cmp.
2446 CmpInst::Predicate Predicate = CI->getPredicate();
2447 if (CI->getOperand(0) != CI->getOperand(1))
2448 return Predicate;
2449
2450 switch (Predicate) {
2451 default: llvm_unreachable("Invalid predicate!");
2452 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2453 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2454 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2455 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2456 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2457 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2458 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2459 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2460 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2461 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2462 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2463 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2464 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2465 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2466 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2467 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2468
2469 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2470 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2471 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2472 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2473 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2474 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2475 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2476 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2477 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2478 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2479 }
2480
2481 return Predicate;
2482 }
2483