1 //===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the X86-specific support for the FastISel class. Much
10 // of the target-specific code is generated by tablegen in the file
11 // X86GenFastISel.inc, which is #included here.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "X86.h"
16 #include "X86CallingConv.h"
17 #include "X86InstrBuilder.h"
18 #include "X86InstrInfo.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86RegisterInfo.h"
21 #include "X86Subtarget.h"
22 #include "X86TargetMachine.h"
23 #include "llvm/Analysis/BranchProbabilityInfo.h"
24 #include "llvm/CodeGen/FastISel.h"
25 #include "llvm/CodeGen/FunctionLoweringInfo.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/IR/CallingConv.h"
30 #include "llvm/IR/DebugInfo.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/GetElementPtrTypeIterator.h"
33 #include "llvm/IR/GlobalAlias.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/IntrinsicsX86.h"
38 #include "llvm/IR/Operator.h"
39 #include "llvm/MC/MCAsmInfo.h"
40 #include "llvm/MC/MCSymbol.h"
41 #include "llvm/Support/ErrorHandling.h"
42 #include "llvm/Target/TargetOptions.h"
43 using namespace llvm;
44
45 namespace {
46
47 class X86FastISel final : public FastISel {
48 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
49 /// make the right decision when generating code for different targets.
50 const X86Subtarget *Subtarget;
51
52 public:
X86FastISel(FunctionLoweringInfo & funcInfo,const TargetLibraryInfo * libInfo)53 explicit X86FastISel(FunctionLoweringInfo &funcInfo,
54 const TargetLibraryInfo *libInfo)
55 : FastISel(funcInfo, libInfo) {
56 Subtarget = &funcInfo.MF->getSubtarget<X86Subtarget>();
57 }
58
59 bool fastSelectInstruction(const Instruction *I) override;
60
61 /// The specified machine instr operand is a vreg, and that
62 /// vreg is being provided by the specified load instruction. If possible,
63 /// try to fold the load as an operand to the instruction, returning true if
64 /// possible.
65 bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
66 const LoadInst *LI) override;
67
68 bool fastLowerArguments() override;
69 bool fastLowerCall(CallLoweringInfo &CLI) override;
70 bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
71
72 #include "X86GenFastISel.inc"
73
74 private:
75 bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT,
76 const DebugLoc &DL);
77
78 bool X86FastEmitLoad(MVT VT, X86AddressMode &AM, MachineMemOperand *MMO,
79 unsigned &ResultReg, unsigned Alignment = 1);
80
81 bool X86FastEmitStore(EVT VT, const Value *Val, X86AddressMode &AM,
82 MachineMemOperand *MMO = nullptr, bool Aligned = false);
83 bool X86FastEmitStore(EVT VT, unsigned ValReg, X86AddressMode &AM,
84 MachineMemOperand *MMO = nullptr, bool Aligned = false);
85
86 bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
87 unsigned &ResultReg);
88
89 bool X86SelectAddress(const Value *V, X86AddressMode &AM);
90 bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
91
92 bool X86SelectLoad(const Instruction *I);
93
94 bool X86SelectStore(const Instruction *I);
95
96 bool X86SelectRet(const Instruction *I);
97
98 bool X86SelectCmp(const Instruction *I);
99
100 bool X86SelectZExt(const Instruction *I);
101
102 bool X86SelectSExt(const Instruction *I);
103
104 bool X86SelectBranch(const Instruction *I);
105
106 bool X86SelectShift(const Instruction *I);
107
108 bool X86SelectDivRem(const Instruction *I);
109
110 bool X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I);
111
112 bool X86FastEmitSSESelect(MVT RetVT, const Instruction *I);
113
114 bool X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I);
115
116 bool X86SelectSelect(const Instruction *I);
117
118 bool X86SelectTrunc(const Instruction *I);
119
120 bool X86SelectFPExtOrFPTrunc(const Instruction *I, unsigned Opc,
121 const TargetRegisterClass *RC);
122
123 bool X86SelectFPExt(const Instruction *I);
124 bool X86SelectFPTrunc(const Instruction *I);
125 bool X86SelectSIToFP(const Instruction *I);
126 bool X86SelectUIToFP(const Instruction *I);
127 bool X86SelectIntToFP(const Instruction *I, bool IsSigned);
128
getInstrInfo() const129 const X86InstrInfo *getInstrInfo() const {
130 return Subtarget->getInstrInfo();
131 }
getTargetMachine() const132 const X86TargetMachine *getTargetMachine() const {
133 return static_cast<const X86TargetMachine *>(&TM);
134 }
135
136 bool handleConstantAddresses(const Value *V, X86AddressMode &AM);
137
138 unsigned X86MaterializeInt(const ConstantInt *CI, MVT VT);
139 unsigned X86MaterializeFP(const ConstantFP *CFP, MVT VT);
140 unsigned X86MaterializeGV(const GlobalValue *GV, MVT VT);
141 unsigned fastMaterializeConstant(const Constant *C) override;
142
143 unsigned fastMaterializeAlloca(const AllocaInst *C) override;
144
145 unsigned fastMaterializeFloatZero(const ConstantFP *CF) override;
146
147 /// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
148 /// computed in an SSE register, not on the X87 floating point stack.
isScalarFPTypeInSSEReg(EVT VT) const149 bool isScalarFPTypeInSSEReg(EVT VT) const {
150 return (VT == MVT::f64 && Subtarget->hasSSE2()) ||
151 (VT == MVT::f32 && Subtarget->hasSSE1()) || VT == MVT::f16;
152 }
153
154 bool isTypeLegal(Type *Ty, MVT &VT, bool AllowI1 = false);
155
156 bool IsMemcpySmall(uint64_t Len);
157
158 bool TryEmitSmallMemcpy(X86AddressMode DestAM,
159 X86AddressMode SrcAM, uint64_t Len);
160
161 bool foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
162 const Value *Cond);
163
164 const MachineInstrBuilder &addFullAddress(const MachineInstrBuilder &MIB,
165 X86AddressMode &AM);
166
167 unsigned fastEmitInst_rrrr(unsigned MachineInstOpcode,
168 const TargetRegisterClass *RC, unsigned Op0,
169 unsigned Op1, unsigned Op2, unsigned Op3);
170 };
171
172 } // end anonymous namespace.
173
174 static std::pair<unsigned, bool>
getX86SSEConditionCode(CmpInst::Predicate Predicate)175 getX86SSEConditionCode(CmpInst::Predicate Predicate) {
176 unsigned CC;
177 bool NeedSwap = false;
178
179 // SSE Condition code mapping:
180 // 0 - EQ
181 // 1 - LT
182 // 2 - LE
183 // 3 - UNORD
184 // 4 - NEQ
185 // 5 - NLT
186 // 6 - NLE
187 // 7 - ORD
188 switch (Predicate) {
189 default: llvm_unreachable("Unexpected predicate");
190 case CmpInst::FCMP_OEQ: CC = 0; break;
191 case CmpInst::FCMP_OGT: NeedSwap = true; [[fallthrough]];
192 case CmpInst::FCMP_OLT: CC = 1; break;
193 case CmpInst::FCMP_OGE: NeedSwap = true; [[fallthrough]];
194 case CmpInst::FCMP_OLE: CC = 2; break;
195 case CmpInst::FCMP_UNO: CC = 3; break;
196 case CmpInst::FCMP_UNE: CC = 4; break;
197 case CmpInst::FCMP_ULE: NeedSwap = true; [[fallthrough]];
198 case CmpInst::FCMP_UGE: CC = 5; break;
199 case CmpInst::FCMP_ULT: NeedSwap = true; [[fallthrough]];
200 case CmpInst::FCMP_UGT: CC = 6; break;
201 case CmpInst::FCMP_ORD: CC = 7; break;
202 case CmpInst::FCMP_UEQ: CC = 8; break;
203 case CmpInst::FCMP_ONE: CC = 12; break;
204 }
205
206 return std::make_pair(CC, NeedSwap);
207 }
208
209 /// Adds a complex addressing mode to the given machine instr builder.
210 /// Note, this will constrain the index register. If its not possible to
211 /// constrain the given index register, then a new one will be created. The
212 /// IndexReg field of the addressing mode will be updated to match in this case.
213 const MachineInstrBuilder &
addFullAddress(const MachineInstrBuilder & MIB,X86AddressMode & AM)214 X86FastISel::addFullAddress(const MachineInstrBuilder &MIB,
215 X86AddressMode &AM) {
216 // First constrain the index register. It needs to be a GR64_NOSP.
217 AM.IndexReg = constrainOperandRegClass(MIB->getDesc(), AM.IndexReg,
218 MIB->getNumOperands() +
219 X86::AddrIndexReg);
220 return ::addFullAddress(MIB, AM);
221 }
222
223 /// Check if it is possible to fold the condition from the XALU intrinsic
224 /// into the user. The condition code will only be updated on success.
foldX86XALUIntrinsic(X86::CondCode & CC,const Instruction * I,const Value * Cond)225 bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I,
226 const Value *Cond) {
227 if (!isa<ExtractValueInst>(Cond))
228 return false;
229
230 const auto *EV = cast<ExtractValueInst>(Cond);
231 if (!isa<IntrinsicInst>(EV->getAggregateOperand()))
232 return false;
233
234 const auto *II = cast<IntrinsicInst>(EV->getAggregateOperand());
235 MVT RetVT;
236 const Function *Callee = II->getCalledFunction();
237 Type *RetTy =
238 cast<StructType>(Callee->getReturnType())->getTypeAtIndex(0U);
239 if (!isTypeLegal(RetTy, RetVT))
240 return false;
241
242 if (RetVT != MVT::i32 && RetVT != MVT::i64)
243 return false;
244
245 X86::CondCode TmpCC;
246 switch (II->getIntrinsicID()) {
247 default: return false;
248 case Intrinsic::sadd_with_overflow:
249 case Intrinsic::ssub_with_overflow:
250 case Intrinsic::smul_with_overflow:
251 case Intrinsic::umul_with_overflow: TmpCC = X86::COND_O; break;
252 case Intrinsic::uadd_with_overflow:
253 case Intrinsic::usub_with_overflow: TmpCC = X86::COND_B; break;
254 }
255
256 // Check if both instructions are in the same basic block.
257 if (II->getParent() != I->getParent())
258 return false;
259
260 // Make sure nothing is in the way
261 BasicBlock::const_iterator Start(I);
262 BasicBlock::const_iterator End(II);
263 for (auto Itr = std::prev(Start); Itr != End; --Itr) {
264 // We only expect extractvalue instructions between the intrinsic and the
265 // instruction to be selected.
266 if (!isa<ExtractValueInst>(Itr))
267 return false;
268
269 // Check that the extractvalue operand comes from the intrinsic.
270 const auto *EVI = cast<ExtractValueInst>(Itr);
271 if (EVI->getAggregateOperand() != II)
272 return false;
273 }
274
275 // Make sure no potentially eflags clobbering phi moves can be inserted in
276 // between.
277 auto HasPhis = [](const BasicBlock *Succ) { return !Succ->phis().empty(); };
278 if (I->isTerminator() && llvm::any_of(successors(I), HasPhis))
279 return false;
280
281 // Make sure there are no potentially eflags clobbering constant
282 // materializations in between.
283 if (llvm::any_of(I->operands(), [](Value *V) { return isa<Constant>(V); }))
284 return false;
285
286 CC = TmpCC;
287 return true;
288 }
289
isTypeLegal(Type * Ty,MVT & VT,bool AllowI1)290 bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
291 EVT evt = TLI.getValueType(DL, Ty, /*AllowUnknown=*/true);
292 if (evt == MVT::Other || !evt.isSimple())
293 // Unhandled type. Halt "fast" selection and bail.
294 return false;
295
296 VT = evt.getSimpleVT();
297 // For now, require SSE/SSE2 for performing floating-point operations,
298 // since x87 requires additional work.
299 if (VT == MVT::f64 && !Subtarget->hasSSE2())
300 return false;
301 if (VT == MVT::f32 && !Subtarget->hasSSE1())
302 return false;
303 // Similarly, no f80 support yet.
304 if (VT == MVT::f80)
305 return false;
306 // We only handle legal types. For example, on x86-32 the instruction
307 // selector contains all of the 64-bit instructions from x86-64,
308 // under the assumption that i64 won't be used if the target doesn't
309 // support it.
310 return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
311 }
312
313 /// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
314 /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
315 /// Return true and the result register by reference if it is possible.
X86FastEmitLoad(MVT VT,X86AddressMode & AM,MachineMemOperand * MMO,unsigned & ResultReg,unsigned Alignment)316 bool X86FastISel::X86FastEmitLoad(MVT VT, X86AddressMode &AM,
317 MachineMemOperand *MMO, unsigned &ResultReg,
318 unsigned Alignment) {
319 bool HasSSE1 = Subtarget->hasSSE1();
320 bool HasSSE2 = Subtarget->hasSSE2();
321 bool HasSSE41 = Subtarget->hasSSE41();
322 bool HasAVX = Subtarget->hasAVX();
323 bool HasAVX2 = Subtarget->hasAVX2();
324 bool HasAVX512 = Subtarget->hasAVX512();
325 bool HasVLX = Subtarget->hasVLX();
326 bool IsNonTemporal = MMO && MMO->isNonTemporal();
327
328 // Treat i1 loads the same as i8 loads. Masking will be done when storing.
329 if (VT == MVT::i1)
330 VT = MVT::i8;
331
332 // Get opcode and regclass of the output for the given load instruction.
333 unsigned Opc = 0;
334 switch (VT.SimpleTy) {
335 default: return false;
336 case MVT::i8:
337 Opc = X86::MOV8rm;
338 break;
339 case MVT::i16:
340 Opc = X86::MOV16rm;
341 break;
342 case MVT::i32:
343 Opc = X86::MOV32rm;
344 break;
345 case MVT::i64:
346 // Must be in x86-64 mode.
347 Opc = X86::MOV64rm;
348 break;
349 case MVT::f32:
350 Opc = HasAVX512 ? X86::VMOVSSZrm_alt
351 : HasAVX ? X86::VMOVSSrm_alt
352 : HasSSE1 ? X86::MOVSSrm_alt
353 : X86::LD_Fp32m;
354 break;
355 case MVT::f64:
356 Opc = HasAVX512 ? X86::VMOVSDZrm_alt
357 : HasAVX ? X86::VMOVSDrm_alt
358 : HasSSE2 ? X86::MOVSDrm_alt
359 : X86::LD_Fp64m;
360 break;
361 case MVT::f80:
362 // No f80 support yet.
363 return false;
364 case MVT::v4f32:
365 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
366 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
367 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
368 else if (Alignment >= 16)
369 Opc = HasVLX ? X86::VMOVAPSZ128rm :
370 HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm;
371 else
372 Opc = HasVLX ? X86::VMOVUPSZ128rm :
373 HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm;
374 break;
375 case MVT::v2f64:
376 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
377 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
378 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
379 else if (Alignment >= 16)
380 Opc = HasVLX ? X86::VMOVAPDZ128rm :
381 HasAVX ? X86::VMOVAPDrm : X86::MOVAPDrm;
382 else
383 Opc = HasVLX ? X86::VMOVUPDZ128rm :
384 HasAVX ? X86::VMOVUPDrm : X86::MOVUPDrm;
385 break;
386 case MVT::v4i32:
387 case MVT::v2i64:
388 case MVT::v8i16:
389 case MVT::v16i8:
390 if (IsNonTemporal && Alignment >= 16 && HasSSE41)
391 Opc = HasVLX ? X86::VMOVNTDQAZ128rm :
392 HasAVX ? X86::VMOVNTDQArm : X86::MOVNTDQArm;
393 else if (Alignment >= 16)
394 Opc = HasVLX ? X86::VMOVDQA64Z128rm :
395 HasAVX ? X86::VMOVDQArm : X86::MOVDQArm;
396 else
397 Opc = HasVLX ? X86::VMOVDQU64Z128rm :
398 HasAVX ? X86::VMOVDQUrm : X86::MOVDQUrm;
399 break;
400 case MVT::v8f32:
401 assert(HasAVX);
402 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
403 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
404 else if (IsNonTemporal && Alignment >= 16)
405 return false; // Force split for X86::VMOVNTDQArm
406 else if (Alignment >= 32)
407 Opc = HasVLX ? X86::VMOVAPSZ256rm : X86::VMOVAPSYrm;
408 else
409 Opc = HasVLX ? X86::VMOVUPSZ256rm : X86::VMOVUPSYrm;
410 break;
411 case MVT::v4f64:
412 assert(HasAVX);
413 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
414 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
415 else if (IsNonTemporal && Alignment >= 16)
416 return false; // Force split for X86::VMOVNTDQArm
417 else if (Alignment >= 32)
418 Opc = HasVLX ? X86::VMOVAPDZ256rm : X86::VMOVAPDYrm;
419 else
420 Opc = HasVLX ? X86::VMOVUPDZ256rm : X86::VMOVUPDYrm;
421 break;
422 case MVT::v8i32:
423 case MVT::v4i64:
424 case MVT::v16i16:
425 case MVT::v32i8:
426 assert(HasAVX);
427 if (IsNonTemporal && Alignment >= 32 && HasAVX2)
428 Opc = HasVLX ? X86::VMOVNTDQAZ256rm : X86::VMOVNTDQAYrm;
429 else if (IsNonTemporal && Alignment >= 16)
430 return false; // Force split for X86::VMOVNTDQArm
431 else if (Alignment >= 32)
432 Opc = HasVLX ? X86::VMOVDQA64Z256rm : X86::VMOVDQAYrm;
433 else
434 Opc = HasVLX ? X86::VMOVDQU64Z256rm : X86::VMOVDQUYrm;
435 break;
436 case MVT::v16f32:
437 assert(HasAVX512);
438 if (IsNonTemporal && Alignment >= 64)
439 Opc = X86::VMOVNTDQAZrm;
440 else
441 Opc = (Alignment >= 64) ? X86::VMOVAPSZrm : X86::VMOVUPSZrm;
442 break;
443 case MVT::v8f64:
444 assert(HasAVX512);
445 if (IsNonTemporal && Alignment >= 64)
446 Opc = X86::VMOVNTDQAZrm;
447 else
448 Opc = (Alignment >= 64) ? X86::VMOVAPDZrm : X86::VMOVUPDZrm;
449 break;
450 case MVT::v8i64:
451 case MVT::v16i32:
452 case MVT::v32i16:
453 case MVT::v64i8:
454 assert(HasAVX512);
455 // Note: There are a lot more choices based on type with AVX-512, but
456 // there's really no advantage when the load isn't masked.
457 if (IsNonTemporal && Alignment >= 64)
458 Opc = X86::VMOVNTDQAZrm;
459 else
460 Opc = (Alignment >= 64) ? X86::VMOVDQA64Zrm : X86::VMOVDQU64Zrm;
461 break;
462 }
463
464 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
465
466 ResultReg = createResultReg(RC);
467 MachineInstrBuilder MIB =
468 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg);
469 addFullAddress(MIB, AM);
470 if (MMO)
471 MIB->addMemOperand(*FuncInfo.MF, MMO);
472 return true;
473 }
474
475 /// X86FastEmitStore - Emit a machine instruction to store a value Val of
476 /// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
477 /// and a displacement offset, or a GlobalAddress,
478 /// i.e. V. Return true if it is possible.
X86FastEmitStore(EVT VT,unsigned ValReg,X86AddressMode & AM,MachineMemOperand * MMO,bool Aligned)479 bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, X86AddressMode &AM,
480 MachineMemOperand *MMO, bool Aligned) {
481 bool HasSSE1 = Subtarget->hasSSE1();
482 bool HasSSE2 = Subtarget->hasSSE2();
483 bool HasSSE4A = Subtarget->hasSSE4A();
484 bool HasAVX = Subtarget->hasAVX();
485 bool HasAVX512 = Subtarget->hasAVX512();
486 bool HasVLX = Subtarget->hasVLX();
487 bool IsNonTemporal = MMO && MMO->isNonTemporal();
488
489 // Get opcode and regclass of the output for the given store instruction.
490 unsigned Opc = 0;
491 switch (VT.getSimpleVT().SimpleTy) {
492 case MVT::f80: // No f80 support yet.
493 default: return false;
494 case MVT::i1: {
495 // Mask out all but lowest bit.
496 Register AndResult = createResultReg(&X86::GR8RegClass);
497 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
498 TII.get(X86::AND8ri), AndResult)
499 .addReg(ValReg).addImm(1);
500 ValReg = AndResult;
501 [[fallthrough]]; // handle i1 as i8.
502 }
503 case MVT::i8: Opc = X86::MOV8mr; break;
504 case MVT::i16: Opc = X86::MOV16mr; break;
505 case MVT::i32:
506 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTImr : X86::MOV32mr;
507 break;
508 case MVT::i64:
509 // Must be in x86-64 mode.
510 Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTI_64mr : X86::MOV64mr;
511 break;
512 case MVT::f32:
513 if (HasSSE1) {
514 if (IsNonTemporal && HasSSE4A)
515 Opc = X86::MOVNTSS;
516 else
517 Opc = HasAVX512 ? X86::VMOVSSZmr :
518 HasAVX ? X86::VMOVSSmr : X86::MOVSSmr;
519 } else
520 Opc = X86::ST_Fp32m;
521 break;
522 case MVT::f64:
523 if (HasSSE2) {
524 if (IsNonTemporal && HasSSE4A)
525 Opc = X86::MOVNTSD;
526 else
527 Opc = HasAVX512 ? X86::VMOVSDZmr :
528 HasAVX ? X86::VMOVSDmr : X86::MOVSDmr;
529 } else
530 Opc = X86::ST_Fp64m;
531 break;
532 case MVT::x86mmx:
533 Opc = (IsNonTemporal && HasSSE1) ? X86::MMX_MOVNTQmr : X86::MMX_MOVQ64mr;
534 break;
535 case MVT::v4f32:
536 if (Aligned) {
537 if (IsNonTemporal)
538 Opc = HasVLX ? X86::VMOVNTPSZ128mr :
539 HasAVX ? X86::VMOVNTPSmr : X86::MOVNTPSmr;
540 else
541 Opc = HasVLX ? X86::VMOVAPSZ128mr :
542 HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr;
543 } else
544 Opc = HasVLX ? X86::VMOVUPSZ128mr :
545 HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr;
546 break;
547 case MVT::v2f64:
548 if (Aligned) {
549 if (IsNonTemporal)
550 Opc = HasVLX ? X86::VMOVNTPDZ128mr :
551 HasAVX ? X86::VMOVNTPDmr : X86::MOVNTPDmr;
552 else
553 Opc = HasVLX ? X86::VMOVAPDZ128mr :
554 HasAVX ? X86::VMOVAPDmr : X86::MOVAPDmr;
555 } else
556 Opc = HasVLX ? X86::VMOVUPDZ128mr :
557 HasAVX ? X86::VMOVUPDmr : X86::MOVUPDmr;
558 break;
559 case MVT::v4i32:
560 case MVT::v2i64:
561 case MVT::v8i16:
562 case MVT::v16i8:
563 if (Aligned) {
564 if (IsNonTemporal)
565 Opc = HasVLX ? X86::VMOVNTDQZ128mr :
566 HasAVX ? X86::VMOVNTDQmr : X86::MOVNTDQmr;
567 else
568 Opc = HasVLX ? X86::VMOVDQA64Z128mr :
569 HasAVX ? X86::VMOVDQAmr : X86::MOVDQAmr;
570 } else
571 Opc = HasVLX ? X86::VMOVDQU64Z128mr :
572 HasAVX ? X86::VMOVDQUmr : X86::MOVDQUmr;
573 break;
574 case MVT::v8f32:
575 assert(HasAVX);
576 if (Aligned) {
577 if (IsNonTemporal)
578 Opc = HasVLX ? X86::VMOVNTPSZ256mr : X86::VMOVNTPSYmr;
579 else
580 Opc = HasVLX ? X86::VMOVAPSZ256mr : X86::VMOVAPSYmr;
581 } else
582 Opc = HasVLX ? X86::VMOVUPSZ256mr : X86::VMOVUPSYmr;
583 break;
584 case MVT::v4f64:
585 assert(HasAVX);
586 if (Aligned) {
587 if (IsNonTemporal)
588 Opc = HasVLX ? X86::VMOVNTPDZ256mr : X86::VMOVNTPDYmr;
589 else
590 Opc = HasVLX ? X86::VMOVAPDZ256mr : X86::VMOVAPDYmr;
591 } else
592 Opc = HasVLX ? X86::VMOVUPDZ256mr : X86::VMOVUPDYmr;
593 break;
594 case MVT::v8i32:
595 case MVT::v4i64:
596 case MVT::v16i16:
597 case MVT::v32i8:
598 assert(HasAVX);
599 if (Aligned) {
600 if (IsNonTemporal)
601 Opc = HasVLX ? X86::VMOVNTDQZ256mr : X86::VMOVNTDQYmr;
602 else
603 Opc = HasVLX ? X86::VMOVDQA64Z256mr : X86::VMOVDQAYmr;
604 } else
605 Opc = HasVLX ? X86::VMOVDQU64Z256mr : X86::VMOVDQUYmr;
606 break;
607 case MVT::v16f32:
608 assert(HasAVX512);
609 if (Aligned)
610 Opc = IsNonTemporal ? X86::VMOVNTPSZmr : X86::VMOVAPSZmr;
611 else
612 Opc = X86::VMOVUPSZmr;
613 break;
614 case MVT::v8f64:
615 assert(HasAVX512);
616 if (Aligned) {
617 Opc = IsNonTemporal ? X86::VMOVNTPDZmr : X86::VMOVAPDZmr;
618 } else
619 Opc = X86::VMOVUPDZmr;
620 break;
621 case MVT::v8i64:
622 case MVT::v16i32:
623 case MVT::v32i16:
624 case MVT::v64i8:
625 assert(HasAVX512);
626 // Note: There are a lot more choices based on type with AVX-512, but
627 // there's really no advantage when the store isn't masked.
628 if (Aligned)
629 Opc = IsNonTemporal ? X86::VMOVNTDQZmr : X86::VMOVDQA64Zmr;
630 else
631 Opc = X86::VMOVDQU64Zmr;
632 break;
633 }
634
635 const MCInstrDesc &Desc = TII.get(Opc);
636 // Some of the instructions in the previous switch use FR128 instead
637 // of FR32 for ValReg. Make sure the register we feed the instruction
638 // matches its register class constraints.
639 // Note: This is fine to do a copy from FR32 to FR128, this is the
640 // same registers behind the scene and actually why it did not trigger
641 // any bugs before.
642 ValReg = constrainOperandRegClass(Desc, ValReg, Desc.getNumOperands() - 1);
643 MachineInstrBuilder MIB =
644 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, Desc);
645 addFullAddress(MIB, AM).addReg(ValReg);
646 if (MMO)
647 MIB->addMemOperand(*FuncInfo.MF, MMO);
648
649 return true;
650 }
651
X86FastEmitStore(EVT VT,const Value * Val,X86AddressMode & AM,MachineMemOperand * MMO,bool Aligned)652 bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
653 X86AddressMode &AM,
654 MachineMemOperand *MMO, bool Aligned) {
655 // Handle 'null' like i32/i64 0.
656 if (isa<ConstantPointerNull>(Val))
657 Val = Constant::getNullValue(DL.getIntPtrType(Val->getContext()));
658
659 // If this is a store of a simple constant, fold the constant into the store.
660 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
661 unsigned Opc = 0;
662 bool Signed = true;
663 switch (VT.getSimpleVT().SimpleTy) {
664 default: break;
665 case MVT::i1:
666 Signed = false;
667 [[fallthrough]]; // Handle as i8.
668 case MVT::i8: Opc = X86::MOV8mi; break;
669 case MVT::i16: Opc = X86::MOV16mi; break;
670 case MVT::i32: Opc = X86::MOV32mi; break;
671 case MVT::i64:
672 // Must be a 32-bit sign extended value.
673 if (isInt<32>(CI->getSExtValue()))
674 Opc = X86::MOV64mi32;
675 break;
676 }
677
678 if (Opc) {
679 MachineInstrBuilder MIB =
680 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc));
681 addFullAddress(MIB, AM).addImm(Signed ? (uint64_t) CI->getSExtValue()
682 : CI->getZExtValue());
683 if (MMO)
684 MIB->addMemOperand(*FuncInfo.MF, MMO);
685 return true;
686 }
687 }
688
689 Register ValReg = getRegForValue(Val);
690 if (ValReg == 0)
691 return false;
692
693 return X86FastEmitStore(VT, ValReg, AM, MMO, Aligned);
694 }
695
696 /// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
697 /// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
698 /// ISD::SIGN_EXTEND).
X86FastEmitExtend(ISD::NodeType Opc,EVT DstVT,unsigned Src,EVT SrcVT,unsigned & ResultReg)699 bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
700 unsigned Src, EVT SrcVT,
701 unsigned &ResultReg) {
702 unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src);
703 if (RR == 0)
704 return false;
705
706 ResultReg = RR;
707 return true;
708 }
709
handleConstantAddresses(const Value * V,X86AddressMode & AM)710 bool X86FastISel::handleConstantAddresses(const Value *V, X86AddressMode &AM) {
711 // Handle constant address.
712 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
713 // Can't handle alternate code models yet.
714 if (TM.getCodeModel() != CodeModel::Small &&
715 TM.getCodeModel() != CodeModel::Medium)
716 return false;
717
718 // Can't handle large objects yet.
719 if (TM.isLargeGlobalValue(GV))
720 return false;
721
722 // Can't handle TLS yet.
723 if (GV->isThreadLocal())
724 return false;
725
726 // Can't handle !absolute_symbol references yet.
727 if (GV->isAbsoluteSymbolRef())
728 return false;
729
730 // RIP-relative addresses can't have additional register operands, so if
731 // we've already folded stuff into the addressing mode, just force the
732 // global value into its own register, which we can use as the basereg.
733 if (!Subtarget->isPICStyleRIPRel() ||
734 (AM.Base.Reg == 0 && AM.IndexReg == 0)) {
735 // Okay, we've committed to selecting this global. Set up the address.
736 AM.GV = GV;
737
738 // Allow the subtarget to classify the global.
739 unsigned char GVFlags = Subtarget->classifyGlobalReference(GV);
740
741 // If this reference is relative to the pic base, set it now.
742 if (isGlobalRelativeToPICBase(GVFlags)) {
743 // FIXME: How do we know Base.Reg is free??
744 AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
745 }
746
747 // Unless the ABI requires an extra load, return a direct reference to
748 // the global.
749 if (!isGlobalStubReference(GVFlags)) {
750 if (Subtarget->isPICStyleRIPRel()) {
751 // Use rip-relative addressing if we can. Above we verified that the
752 // base and index registers are unused.
753 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
754 AM.Base.Reg = X86::RIP;
755 }
756 AM.GVOpFlags = GVFlags;
757 return true;
758 }
759
760 // Ok, we need to do a load from a stub. If we've already loaded from
761 // this stub, reuse the loaded pointer, otherwise emit the load now.
762 DenseMap<const Value *, Register>::iterator I = LocalValueMap.find(V);
763 Register LoadReg;
764 if (I != LocalValueMap.end() && I->second) {
765 LoadReg = I->second;
766 } else {
767 // Issue load from stub.
768 unsigned Opc = 0;
769 const TargetRegisterClass *RC = nullptr;
770 X86AddressMode StubAM;
771 StubAM.Base.Reg = AM.Base.Reg;
772 StubAM.GV = GV;
773 StubAM.GVOpFlags = GVFlags;
774
775 // Prepare for inserting code in the local-value area.
776 SavePoint SaveInsertPt = enterLocalValueArea();
777
778 if (TLI.getPointerTy(DL) == MVT::i64) {
779 Opc = X86::MOV64rm;
780 RC = &X86::GR64RegClass;
781 } else {
782 Opc = X86::MOV32rm;
783 RC = &X86::GR32RegClass;
784 }
785
786 if (Subtarget->isPICStyleRIPRel() || GVFlags == X86II::MO_GOTPCREL ||
787 GVFlags == X86II::MO_GOTPCREL_NORELAX)
788 StubAM.Base.Reg = X86::RIP;
789
790 LoadReg = createResultReg(RC);
791 MachineInstrBuilder LoadMI =
792 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), LoadReg);
793 addFullAddress(LoadMI, StubAM);
794
795 // Ok, back to normal mode.
796 leaveLocalValueArea(SaveInsertPt);
797
798 // Prevent loading GV stub multiple times in same MBB.
799 LocalValueMap[V] = LoadReg;
800 }
801
802 // Now construct the final address. Note that the Disp, Scale,
803 // and Index values may already be set here.
804 AM.Base.Reg = LoadReg;
805 AM.GV = nullptr;
806 return true;
807 }
808 }
809
810 // If all else fails, try to materialize the value in a register.
811 if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
812 if (AM.Base.Reg == 0) {
813 AM.Base.Reg = getRegForValue(V);
814 return AM.Base.Reg != 0;
815 }
816 if (AM.IndexReg == 0) {
817 assert(AM.Scale == 1 && "Scale with no index!");
818 AM.IndexReg = getRegForValue(V);
819 return AM.IndexReg != 0;
820 }
821 }
822
823 return false;
824 }
825
826 /// X86SelectAddress - Attempt to fill in an address from the given value.
827 ///
X86SelectAddress(const Value * V,X86AddressMode & AM)828 bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
829 SmallVector<const Value *, 32> GEPs;
830 redo_gep:
831 const User *U = nullptr;
832 unsigned Opcode = Instruction::UserOp1;
833 if (const Instruction *I = dyn_cast<Instruction>(V)) {
834 // Don't walk into other basic blocks; it's possible we haven't
835 // visited them yet, so the instructions may not yet be assigned
836 // virtual registers.
837 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(V)) ||
838 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
839 Opcode = I->getOpcode();
840 U = I;
841 }
842 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
843 Opcode = C->getOpcode();
844 U = C;
845 }
846
847 if (PointerType *Ty = dyn_cast<PointerType>(V->getType()))
848 if (Ty->getAddressSpace() > 255)
849 // Fast instruction selection doesn't support the special
850 // address spaces.
851 return false;
852
853 switch (Opcode) {
854 default: break;
855 case Instruction::BitCast:
856 // Look past bitcasts.
857 return X86SelectAddress(U->getOperand(0), AM);
858
859 case Instruction::IntToPtr:
860 // Look past no-op inttoptrs.
861 if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
862 TLI.getPointerTy(DL))
863 return X86SelectAddress(U->getOperand(0), AM);
864 break;
865
866 case Instruction::PtrToInt:
867 // Look past no-op ptrtoints.
868 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
869 return X86SelectAddress(U->getOperand(0), AM);
870 break;
871
872 case Instruction::Alloca: {
873 // Do static allocas.
874 const AllocaInst *A = cast<AllocaInst>(V);
875 DenseMap<const AllocaInst *, int>::iterator SI =
876 FuncInfo.StaticAllocaMap.find(A);
877 if (SI != FuncInfo.StaticAllocaMap.end()) {
878 AM.BaseType = X86AddressMode::FrameIndexBase;
879 AM.Base.FrameIndex = SI->second;
880 return true;
881 }
882 break;
883 }
884
885 case Instruction::Add: {
886 // Adds of constants are common and easy enough.
887 if (const ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
888 uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();
889 // They have to fit in the 32-bit signed displacement field though.
890 if (isInt<32>(Disp)) {
891 AM.Disp = (uint32_t)Disp;
892 return X86SelectAddress(U->getOperand(0), AM);
893 }
894 }
895 break;
896 }
897
898 case Instruction::GetElementPtr: {
899 X86AddressMode SavedAM = AM;
900
901 // Pattern-match simple GEPs.
902 uint64_t Disp = (int32_t)AM.Disp;
903 unsigned IndexReg = AM.IndexReg;
904 unsigned Scale = AM.Scale;
905 MVT PtrVT = TLI.getValueType(DL, U->getType()).getSimpleVT();
906
907 gep_type_iterator GTI = gep_type_begin(U);
908 // Iterate through the indices, folding what we can. Constants can be
909 // folded, and one dynamic index can be handled, if the scale is supported.
910 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
911 i != e; ++i, ++GTI) {
912 const Value *Op = *i;
913 if (StructType *STy = GTI.getStructTypeOrNull()) {
914 const StructLayout *SL = DL.getStructLayout(STy);
915 Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
916 continue;
917 }
918
919 // A array/variable index is always of the form i*S where S is the
920 // constant scale size. See if we can push the scale into immediates.
921 uint64_t S = GTI.getSequentialElementStride(DL);
922 for (;;) {
923 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
924 // Constant-offset addressing.
925 Disp += CI->getSExtValue() * S;
926 break;
927 }
928 if (canFoldAddIntoGEP(U, Op)) {
929 // A compatible add with a constant operand. Fold the constant.
930 ConstantInt *CI =
931 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
932 Disp += CI->getSExtValue() * S;
933 // Iterate on the other operand.
934 Op = cast<AddOperator>(Op)->getOperand(0);
935 continue;
936 }
937 if (IndexReg == 0 &&
938 (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
939 (S == 1 || S == 2 || S == 4 || S == 8)) {
940 // Scaled-index addressing.
941 Scale = S;
942 IndexReg = getRegForGEPIndex(PtrVT, Op);
943 if (IndexReg == 0)
944 return false;
945 break;
946 }
947 // Unsupported.
948 goto unsupported_gep;
949 }
950 }
951
952 // Check for displacement overflow.
953 if (!isInt<32>(Disp))
954 break;
955
956 AM.IndexReg = IndexReg;
957 AM.Scale = Scale;
958 AM.Disp = (uint32_t)Disp;
959 GEPs.push_back(V);
960
961 if (const GetElementPtrInst *GEP =
962 dyn_cast<GetElementPtrInst>(U->getOperand(0))) {
963 // Ok, the GEP indices were covered by constant-offset and scaled-index
964 // addressing. Update the address state and move on to examining the base.
965 V = GEP;
966 goto redo_gep;
967 } else if (X86SelectAddress(U->getOperand(0), AM)) {
968 return true;
969 }
970
971 // If we couldn't merge the gep value into this addr mode, revert back to
972 // our address and just match the value instead of completely failing.
973 AM = SavedAM;
974
975 for (const Value *I : reverse(GEPs))
976 if (handleConstantAddresses(I, AM))
977 return true;
978
979 return false;
980 unsupported_gep:
981 // Ok, the GEP indices weren't all covered.
982 break;
983 }
984 }
985
986 return handleConstantAddresses(V, AM);
987 }
988
989 /// X86SelectCallAddress - Attempt to fill in an address from the given value.
990 ///
X86SelectCallAddress(const Value * V,X86AddressMode & AM)991 bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
992 const User *U = nullptr;
993 unsigned Opcode = Instruction::UserOp1;
994 const Instruction *I = dyn_cast<Instruction>(V);
995 // Record if the value is defined in the same basic block.
996 //
997 // This information is crucial to know whether or not folding an
998 // operand is valid.
999 // Indeed, FastISel generates or reuses a virtual register for all
1000 // operands of all instructions it selects. Obviously, the definition and
1001 // its uses must use the same virtual register otherwise the produced
1002 // code is incorrect.
1003 // Before instruction selection, FunctionLoweringInfo::set sets the virtual
1004 // registers for values that are alive across basic blocks. This ensures
1005 // that the values are consistently set between across basic block, even
1006 // if different instruction selection mechanisms are used (e.g., a mix of
1007 // SDISel and FastISel).
1008 // For values local to a basic block, the instruction selection process
1009 // generates these virtual registers with whatever method is appropriate
1010 // for its needs. In particular, FastISel and SDISel do not share the way
1011 // local virtual registers are set.
1012 // Therefore, this is impossible (or at least unsafe) to share values
1013 // between basic blocks unless they use the same instruction selection
1014 // method, which is not guarantee for X86.
1015 // Moreover, things like hasOneUse could not be used accurately, if we
1016 // allow to reference values across basic blocks whereas they are not
1017 // alive across basic blocks initially.
1018 bool InMBB = true;
1019 if (I) {
1020 Opcode = I->getOpcode();
1021 U = I;
1022 InMBB = I->getParent() == FuncInfo.MBB->getBasicBlock();
1023 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
1024 Opcode = C->getOpcode();
1025 U = C;
1026 }
1027
1028 switch (Opcode) {
1029 default: break;
1030 case Instruction::BitCast:
1031 // Look past bitcasts if its operand is in the same BB.
1032 if (InMBB)
1033 return X86SelectCallAddress(U->getOperand(0), AM);
1034 break;
1035
1036 case Instruction::IntToPtr:
1037 // Look past no-op inttoptrs if its operand is in the same BB.
1038 if (InMBB &&
1039 TLI.getValueType(DL, U->getOperand(0)->getType()) ==
1040 TLI.getPointerTy(DL))
1041 return X86SelectCallAddress(U->getOperand(0), AM);
1042 break;
1043
1044 case Instruction::PtrToInt:
1045 // Look past no-op ptrtoints if its operand is in the same BB.
1046 if (InMBB && TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
1047 return X86SelectCallAddress(U->getOperand(0), AM);
1048 break;
1049 }
1050
1051 // Handle constant address.
1052 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1053 // Can't handle alternate code models yet.
1054 if (TM.getCodeModel() != CodeModel::Small &&
1055 TM.getCodeModel() != CodeModel::Medium)
1056 return false;
1057
1058 // RIP-relative addresses can't have additional register operands.
1059 if (Subtarget->isPICStyleRIPRel() &&
1060 (AM.Base.Reg != 0 || AM.IndexReg != 0))
1061 return false;
1062
1063 // Can't handle TLS.
1064 if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
1065 if (GVar->isThreadLocal())
1066 return false;
1067
1068 // Okay, we've committed to selecting this global. Set up the basic address.
1069 AM.GV = GV;
1070
1071 // Return a direct reference to the global. Fastisel can handle calls to
1072 // functions that require loads, such as dllimport and nonlazybind
1073 // functions.
1074 if (Subtarget->isPICStyleRIPRel()) {
1075 // Use rip-relative addressing if we can. Above we verified that the
1076 // base and index registers are unused.
1077 assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
1078 AM.Base.Reg = X86::RIP;
1079 } else {
1080 AM.GVOpFlags = Subtarget->classifyLocalReference(nullptr);
1081 }
1082
1083 return true;
1084 }
1085
1086 // If all else fails, try to materialize the value in a register.
1087 if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
1088 auto GetCallRegForValue = [this](const Value *V) {
1089 Register Reg = getRegForValue(V);
1090
1091 // In 64-bit mode, we need a 64-bit register even if pointers are 32 bits.
1092 if (Reg && Subtarget->isTarget64BitILP32()) {
1093 Register CopyReg = createResultReg(&X86::GR32RegClass);
1094 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV32rr),
1095 CopyReg)
1096 .addReg(Reg);
1097
1098 Register ExtReg = createResultReg(&X86::GR64RegClass);
1099 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1100 TII.get(TargetOpcode::SUBREG_TO_REG), ExtReg)
1101 .addImm(0)
1102 .addReg(CopyReg)
1103 .addImm(X86::sub_32bit);
1104 Reg = ExtReg;
1105 }
1106
1107 return Reg;
1108 };
1109
1110 if (AM.Base.Reg == 0) {
1111 AM.Base.Reg = GetCallRegForValue(V);
1112 return AM.Base.Reg != 0;
1113 }
1114 if (AM.IndexReg == 0) {
1115 assert(AM.Scale == 1 && "Scale with no index!");
1116 AM.IndexReg = GetCallRegForValue(V);
1117 return AM.IndexReg != 0;
1118 }
1119 }
1120
1121 return false;
1122 }
1123
1124
1125 /// X86SelectStore - Select and emit code to implement store instructions.
X86SelectStore(const Instruction * I)1126 bool X86FastISel::X86SelectStore(const Instruction *I) {
1127 // Atomic stores need special handling.
1128 const StoreInst *S = cast<StoreInst>(I);
1129
1130 if (S->isAtomic())
1131 return false;
1132
1133 const Value *PtrV = I->getOperand(1);
1134 if (TLI.supportSwiftError()) {
1135 // Swifterror values can come from either a function parameter with
1136 // swifterror attribute or an alloca with swifterror attribute.
1137 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
1138 if (Arg->hasSwiftErrorAttr())
1139 return false;
1140 }
1141
1142 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
1143 if (Alloca->isSwiftError())
1144 return false;
1145 }
1146 }
1147
1148 const Value *Val = S->getValueOperand();
1149 const Value *Ptr = S->getPointerOperand();
1150
1151 MVT VT;
1152 if (!isTypeLegal(Val->getType(), VT, /*AllowI1=*/true))
1153 return false;
1154
1155 Align Alignment = S->getAlign();
1156 Align ABIAlignment = DL.getABITypeAlign(Val->getType());
1157 bool Aligned = Alignment >= ABIAlignment;
1158
1159 X86AddressMode AM;
1160 if (!X86SelectAddress(Ptr, AM))
1161 return false;
1162
1163 return X86FastEmitStore(VT, Val, AM, createMachineMemOperandFor(I), Aligned);
1164 }
1165
1166 /// X86SelectRet - Select and emit code to implement ret instructions.
X86SelectRet(const Instruction * I)1167 bool X86FastISel::X86SelectRet(const Instruction *I) {
1168 const ReturnInst *Ret = cast<ReturnInst>(I);
1169 const Function &F = *I->getParent()->getParent();
1170 const X86MachineFunctionInfo *X86MFInfo =
1171 FuncInfo.MF->getInfo<X86MachineFunctionInfo>();
1172
1173 if (!FuncInfo.CanLowerReturn)
1174 return false;
1175
1176 if (TLI.supportSwiftError() &&
1177 F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
1178 return false;
1179
1180 if (TLI.supportSplitCSR(FuncInfo.MF))
1181 return false;
1182
1183 CallingConv::ID CC = F.getCallingConv();
1184 if (CC != CallingConv::C &&
1185 CC != CallingConv::Fast &&
1186 CC != CallingConv::Tail &&
1187 CC != CallingConv::SwiftTail &&
1188 CC != CallingConv::X86_FastCall &&
1189 CC != CallingConv::X86_StdCall &&
1190 CC != CallingConv::X86_ThisCall &&
1191 CC != CallingConv::X86_64_SysV &&
1192 CC != CallingConv::Win64)
1193 return false;
1194
1195 // Don't handle popping bytes if they don't fit the ret's immediate.
1196 if (!isUInt<16>(X86MFInfo->getBytesToPopOnReturn()))
1197 return false;
1198
1199 // fastcc with -tailcallopt is intended to provide a guaranteed
1200 // tail call optimization. Fastisel doesn't know how to do that.
1201 if ((CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt) ||
1202 CC == CallingConv::Tail || CC == CallingConv::SwiftTail)
1203 return false;
1204
1205 // Let SDISel handle vararg functions.
1206 if (F.isVarArg())
1207 return false;
1208
1209 // Build a list of return value registers.
1210 SmallVector<unsigned, 4> RetRegs;
1211
1212 if (Ret->getNumOperands() > 0) {
1213 SmallVector<ISD::OutputArg, 4> Outs;
1214 GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
1215
1216 // Analyze operands of the call, assigning locations to each operand.
1217 SmallVector<CCValAssign, 16> ValLocs;
1218 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());
1219 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
1220
1221 const Value *RV = Ret->getOperand(0);
1222 Register Reg = getRegForValue(RV);
1223 if (Reg == 0)
1224 return false;
1225
1226 // Only handle a single return value for now.
1227 if (ValLocs.size() != 1)
1228 return false;
1229
1230 CCValAssign &VA = ValLocs[0];
1231
1232 // Don't bother handling odd stuff for now.
1233 if (VA.getLocInfo() != CCValAssign::Full)
1234 return false;
1235 // Only handle register returns for now.
1236 if (!VA.isRegLoc())
1237 return false;
1238
1239 // The calling-convention tables for x87 returns don't tell
1240 // the whole story.
1241 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
1242 return false;
1243
1244 unsigned SrcReg = Reg + VA.getValNo();
1245 EVT SrcVT = TLI.getValueType(DL, RV->getType());
1246 EVT DstVT = VA.getValVT();
1247 // Special handling for extended integers.
1248 if (SrcVT != DstVT) {
1249 if (SrcVT != MVT::i1 && SrcVT != MVT::i8 && SrcVT != MVT::i16)
1250 return false;
1251
1252 if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
1253 return false;
1254
1255 if (SrcVT == MVT::i1) {
1256 if (Outs[0].Flags.isSExt())
1257 return false;
1258 SrcReg = fastEmitZExtFromI1(MVT::i8, SrcReg);
1259 SrcVT = MVT::i8;
1260 }
1261 if (SrcVT != DstVT) {
1262 unsigned Op =
1263 Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
1264 SrcReg =
1265 fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op, SrcReg);
1266 }
1267 }
1268
1269 // Make the copy.
1270 Register DstReg = VA.getLocReg();
1271 const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
1272 // Avoid a cross-class copy. This is very unlikely.
1273 if (!SrcRC->contains(DstReg))
1274 return false;
1275 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1276 TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
1277
1278 // Add register to return instruction.
1279 RetRegs.push_back(VA.getLocReg());
1280 }
1281
1282 // Swift calling convention does not require we copy the sret argument
1283 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
1284
1285 // All x86 ABIs require that for returning structs by value we copy
1286 // the sret argument into %rax/%eax (depending on ABI) for the return.
1287 // We saved the argument into a virtual register in the entry block,
1288 // so now we copy the value out and into %rax/%eax.
1289 if (F.hasStructRetAttr() && CC != CallingConv::Swift &&
1290 CC != CallingConv::SwiftTail) {
1291 Register Reg = X86MFInfo->getSRetReturnReg();
1292 assert(Reg &&
1293 "SRetReturnReg should have been set in LowerFormalArguments()!");
1294 unsigned RetReg = Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX;
1295 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1296 TII.get(TargetOpcode::COPY), RetReg).addReg(Reg);
1297 RetRegs.push_back(RetReg);
1298 }
1299
1300 // Now emit the RET.
1301 MachineInstrBuilder MIB;
1302 if (X86MFInfo->getBytesToPopOnReturn()) {
1303 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1304 TII.get(Subtarget->is64Bit() ? X86::RETI64 : X86::RETI32))
1305 .addImm(X86MFInfo->getBytesToPopOnReturn());
1306 } else {
1307 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1308 TII.get(Subtarget->is64Bit() ? X86::RET64 : X86::RET32));
1309 }
1310 for (unsigned Reg : RetRegs)
1311 MIB.addReg(Reg, RegState::Implicit);
1312 return true;
1313 }
1314
1315 /// X86SelectLoad - Select and emit code to implement load instructions.
1316 ///
X86SelectLoad(const Instruction * I)1317 bool X86FastISel::X86SelectLoad(const Instruction *I) {
1318 const LoadInst *LI = cast<LoadInst>(I);
1319
1320 // Atomic loads need special handling.
1321 if (LI->isAtomic())
1322 return false;
1323
1324 const Value *SV = I->getOperand(0);
1325 if (TLI.supportSwiftError()) {
1326 // Swifterror values can come from either a function parameter with
1327 // swifterror attribute or an alloca with swifterror attribute.
1328 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
1329 if (Arg->hasSwiftErrorAttr())
1330 return false;
1331 }
1332
1333 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
1334 if (Alloca->isSwiftError())
1335 return false;
1336 }
1337 }
1338
1339 MVT VT;
1340 if (!isTypeLegal(LI->getType(), VT, /*AllowI1=*/true))
1341 return false;
1342
1343 const Value *Ptr = LI->getPointerOperand();
1344
1345 X86AddressMode AM;
1346 if (!X86SelectAddress(Ptr, AM))
1347 return false;
1348
1349 unsigned ResultReg = 0;
1350 if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg,
1351 LI->getAlign().value()))
1352 return false;
1353
1354 updateValueMap(I, ResultReg);
1355 return true;
1356 }
1357
X86ChooseCmpOpcode(EVT VT,const X86Subtarget * Subtarget)1358 static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
1359 bool HasAVX512 = Subtarget->hasAVX512();
1360 bool HasAVX = Subtarget->hasAVX();
1361 bool HasSSE1 = Subtarget->hasSSE1();
1362 bool HasSSE2 = Subtarget->hasSSE2();
1363
1364 switch (VT.getSimpleVT().SimpleTy) {
1365 default: return 0;
1366 case MVT::i8: return X86::CMP8rr;
1367 case MVT::i16: return X86::CMP16rr;
1368 case MVT::i32: return X86::CMP32rr;
1369 case MVT::i64: return X86::CMP64rr;
1370 case MVT::f32:
1371 return HasAVX512 ? X86::VUCOMISSZrr
1372 : HasAVX ? X86::VUCOMISSrr
1373 : HasSSE1 ? X86::UCOMISSrr
1374 : 0;
1375 case MVT::f64:
1376 return HasAVX512 ? X86::VUCOMISDZrr
1377 : HasAVX ? X86::VUCOMISDrr
1378 : HasSSE2 ? X86::UCOMISDrr
1379 : 0;
1380 }
1381 }
1382
1383 /// If we have a comparison with RHS as the RHS of the comparison, return an
1384 /// opcode that works for the compare (e.g. CMP32ri) otherwise return 0.
X86ChooseCmpImmediateOpcode(EVT VT,const ConstantInt * RHSC)1385 static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
1386 switch (VT.getSimpleVT().SimpleTy) {
1387 // Otherwise, we can't fold the immediate into this comparison.
1388 default:
1389 return 0;
1390 case MVT::i8:
1391 return X86::CMP8ri;
1392 case MVT::i16:
1393 return X86::CMP16ri;
1394 case MVT::i32:
1395 return X86::CMP32ri;
1396 case MVT::i64:
1397 // 64-bit comparisons are only valid if the immediate fits in a 32-bit sext
1398 // field.
1399 return isInt<32>(RHSC->getSExtValue()) ? X86::CMP64ri32 : 0;
1400 }
1401 }
1402
X86FastEmitCompare(const Value * Op0,const Value * Op1,EVT VT,const DebugLoc & CurMIMD)1403 bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT,
1404 const DebugLoc &CurMIMD) {
1405 Register Op0Reg = getRegForValue(Op0);
1406 if (Op0Reg == 0) return false;
1407
1408 // Handle 'null' like i32/i64 0.
1409 if (isa<ConstantPointerNull>(Op1))
1410 Op1 = Constant::getNullValue(DL.getIntPtrType(Op0->getContext()));
1411
1412 // We have two options: compare with register or immediate. If the RHS of
1413 // the compare is an immediate that we can fold into this compare, use
1414 // CMPri, otherwise use CMPrr.
1415 if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1416 if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
1417 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurMIMD, TII.get(CompareImmOpc))
1418 .addReg(Op0Reg)
1419 .addImm(Op1C->getSExtValue());
1420 return true;
1421 }
1422 }
1423
1424 unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
1425 if (CompareOpc == 0) return false;
1426
1427 Register Op1Reg = getRegForValue(Op1);
1428 if (Op1Reg == 0) return false;
1429 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurMIMD, TII.get(CompareOpc))
1430 .addReg(Op0Reg)
1431 .addReg(Op1Reg);
1432
1433 return true;
1434 }
1435
X86SelectCmp(const Instruction * I)1436 bool X86FastISel::X86SelectCmp(const Instruction *I) {
1437 const CmpInst *CI = cast<CmpInst>(I);
1438
1439 MVT VT;
1440 if (!isTypeLegal(I->getOperand(0)->getType(), VT))
1441 return false;
1442
1443 // Below code only works for scalars.
1444 if (VT.isVector())
1445 return false;
1446
1447 // Try to optimize or fold the cmp.
1448 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1449 unsigned ResultReg = 0;
1450 switch (Predicate) {
1451 default: break;
1452 case CmpInst::FCMP_FALSE: {
1453 ResultReg = createResultReg(&X86::GR32RegClass);
1454 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV32r0),
1455 ResultReg);
1456 ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultReg, X86::sub_8bit);
1457 if (!ResultReg)
1458 return false;
1459 break;
1460 }
1461 case CmpInst::FCMP_TRUE: {
1462 ResultReg = createResultReg(&X86::GR8RegClass);
1463 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV8ri),
1464 ResultReg).addImm(1);
1465 break;
1466 }
1467 }
1468
1469 if (ResultReg) {
1470 updateValueMap(I, ResultReg);
1471 return true;
1472 }
1473
1474 const Value *LHS = CI->getOperand(0);
1475 const Value *RHS = CI->getOperand(1);
1476
1477 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
1478 // We don't have to materialize a zero constant for this case and can just use
1479 // %x again on the RHS.
1480 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
1481 const auto *RHSC = dyn_cast<ConstantFP>(RHS);
1482 if (RHSC && RHSC->isNullValue())
1483 RHS = LHS;
1484 }
1485
1486 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
1487 static const uint16_t SETFOpcTable[2][3] = {
1488 { X86::COND_E, X86::COND_NP, X86::AND8rr },
1489 { X86::COND_NE, X86::COND_P, X86::OR8rr }
1490 };
1491 const uint16_t *SETFOpc = nullptr;
1492 switch (Predicate) {
1493 default: break;
1494 case CmpInst::FCMP_OEQ: SETFOpc = &SETFOpcTable[0][0]; break;
1495 case CmpInst::FCMP_UNE: SETFOpc = &SETFOpcTable[1][0]; break;
1496 }
1497
1498 ResultReg = createResultReg(&X86::GR8RegClass);
1499 if (SETFOpc) {
1500 if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
1501 return false;
1502
1503 Register FlagReg1 = createResultReg(&X86::GR8RegClass);
1504 Register FlagReg2 = createResultReg(&X86::GR8RegClass);
1505 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr),
1506 FlagReg1).addImm(SETFOpc[0]);
1507 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr),
1508 FlagReg2).addImm(SETFOpc[1]);
1509 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(SETFOpc[2]),
1510 ResultReg).addReg(FlagReg1).addReg(FlagReg2);
1511 updateValueMap(I, ResultReg);
1512 return true;
1513 }
1514
1515 X86::CondCode CC;
1516 bool SwapArgs;
1517 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1518 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1519
1520 if (SwapArgs)
1521 std::swap(LHS, RHS);
1522
1523 // Emit a compare of LHS/RHS.
1524 if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
1525 return false;
1526
1527 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr),
1528 ResultReg).addImm(CC);
1529 updateValueMap(I, ResultReg);
1530 return true;
1531 }
1532
X86SelectZExt(const Instruction * I)1533 bool X86FastISel::X86SelectZExt(const Instruction *I) {
1534 EVT DstVT = TLI.getValueType(DL, I->getType());
1535 if (!TLI.isTypeLegal(DstVT))
1536 return false;
1537
1538 Register ResultReg = getRegForValue(I->getOperand(0));
1539 if (ResultReg == 0)
1540 return false;
1541
1542 // Handle zero-extension from i1 to i8, which is common.
1543 MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
1544 if (SrcVT == MVT::i1) {
1545 // Set the high bits to zero.
1546 ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg);
1547 SrcVT = MVT::i8;
1548
1549 if (ResultReg == 0)
1550 return false;
1551 }
1552
1553 if (DstVT == MVT::i64) {
1554 // Handle extension to 64-bits via sub-register shenanigans.
1555 unsigned MovInst;
1556
1557 switch (SrcVT.SimpleTy) {
1558 case MVT::i8: MovInst = X86::MOVZX32rr8; break;
1559 case MVT::i16: MovInst = X86::MOVZX32rr16; break;
1560 case MVT::i32: MovInst = X86::MOV32rr; break;
1561 default: llvm_unreachable("Unexpected zext to i64 source type");
1562 }
1563
1564 Register Result32 = createResultReg(&X86::GR32RegClass);
1565 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovInst), Result32)
1566 .addReg(ResultReg);
1567
1568 ResultReg = createResultReg(&X86::GR64RegClass);
1569 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::SUBREG_TO_REG),
1570 ResultReg)
1571 .addImm(0).addReg(Result32).addImm(X86::sub_32bit);
1572 } else if (DstVT == MVT::i16) {
1573 // i8->i16 doesn't exist in the autogenerated isel table. Need to zero
1574 // extend to 32-bits and then extract down to 16-bits.
1575 Register Result32 = createResultReg(&X86::GR32RegClass);
1576 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOVZX32rr8),
1577 Result32).addReg(ResultReg);
1578
1579 ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit);
1580 } else if (DstVT != MVT::i8) {
1581 ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
1582 ResultReg);
1583 if (ResultReg == 0)
1584 return false;
1585 }
1586
1587 updateValueMap(I, ResultReg);
1588 return true;
1589 }
1590
X86SelectSExt(const Instruction * I)1591 bool X86FastISel::X86SelectSExt(const Instruction *I) {
1592 EVT DstVT = TLI.getValueType(DL, I->getType());
1593 if (!TLI.isTypeLegal(DstVT))
1594 return false;
1595
1596 Register ResultReg = getRegForValue(I->getOperand(0));
1597 if (ResultReg == 0)
1598 return false;
1599
1600 // Handle sign-extension from i1 to i8.
1601 MVT SrcVT = TLI.getSimpleValueType(DL, I->getOperand(0)->getType());
1602 if (SrcVT == MVT::i1) {
1603 // Set the high bits to zero.
1604 Register ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg);
1605 if (ZExtReg == 0)
1606 return false;
1607
1608 // Negate the result to make an 8-bit sign extended value.
1609 ResultReg = createResultReg(&X86::GR8RegClass);
1610 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::NEG8r),
1611 ResultReg).addReg(ZExtReg);
1612
1613 SrcVT = MVT::i8;
1614 }
1615
1616 if (DstVT == MVT::i16) {
1617 // i8->i16 doesn't exist in the autogenerated isel table. Need to sign
1618 // extend to 32-bits and then extract down to 16-bits.
1619 Register Result32 = createResultReg(&X86::GR32RegClass);
1620 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOVSX32rr8),
1621 Result32).addReg(ResultReg);
1622
1623 ResultReg = fastEmitInst_extractsubreg(MVT::i16, Result32, X86::sub_16bit);
1624 } else if (DstVT != MVT::i8) {
1625 ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::SIGN_EXTEND,
1626 ResultReg);
1627 if (ResultReg == 0)
1628 return false;
1629 }
1630
1631 updateValueMap(I, ResultReg);
1632 return true;
1633 }
1634
X86SelectBranch(const Instruction * I)1635 bool X86FastISel::X86SelectBranch(const Instruction *I) {
1636 // Unconditional branches are selected by tablegen-generated code.
1637 // Handle a conditional branch.
1638 const BranchInst *BI = cast<BranchInst>(I);
1639 MachineBasicBlock *TrueMBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
1640 MachineBasicBlock *FalseMBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
1641
1642 // Fold the common case of a conditional branch with a comparison
1643 // in the same block (values defined on other blocks may not have
1644 // initialized registers).
1645 X86::CondCode CC;
1646 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
1647 if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
1648 EVT VT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1649
1650 // Try to optimize or fold the cmp.
1651 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1652 switch (Predicate) {
1653 default: break;
1654 case CmpInst::FCMP_FALSE: fastEmitBranch(FalseMBB, MIMD.getDL()); return true;
1655 case CmpInst::FCMP_TRUE: fastEmitBranch(TrueMBB, MIMD.getDL()); return true;
1656 }
1657
1658 const Value *CmpLHS = CI->getOperand(0);
1659 const Value *CmpRHS = CI->getOperand(1);
1660
1661 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x,
1662 // 0.0.
1663 // We don't have to materialize a zero constant for this case and can just
1664 // use %x again on the RHS.
1665 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
1666 const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
1667 if (CmpRHSC && CmpRHSC->isNullValue())
1668 CmpRHS = CmpLHS;
1669 }
1670
1671 // Try to take advantage of fallthrough opportunities.
1672 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1673 std::swap(TrueMBB, FalseMBB);
1674 Predicate = CmpInst::getInversePredicate(Predicate);
1675 }
1676
1677 // FCMP_OEQ and FCMP_UNE cannot be expressed with a single flag/condition
1678 // code check. Instead two branch instructions are required to check all
1679 // the flags. First we change the predicate to a supported condition code,
1680 // which will be the first branch. Later one we will emit the second
1681 // branch.
1682 bool NeedExtraBranch = false;
1683 switch (Predicate) {
1684 default: break;
1685 case CmpInst::FCMP_OEQ:
1686 std::swap(TrueMBB, FalseMBB);
1687 [[fallthrough]];
1688 case CmpInst::FCMP_UNE:
1689 NeedExtraBranch = true;
1690 Predicate = CmpInst::FCMP_ONE;
1691 break;
1692 }
1693
1694 bool SwapArgs;
1695 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
1696 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1697
1698 if (SwapArgs)
1699 std::swap(CmpLHS, CmpRHS);
1700
1701 // Emit a compare of the LHS and RHS, setting the flags.
1702 if (!X86FastEmitCompare(CmpLHS, CmpRHS, VT, CI->getDebugLoc()))
1703 return false;
1704
1705 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
1706 .addMBB(TrueMBB).addImm(CC);
1707
1708 // X86 requires a second branch to handle UNE (and OEQ, which is mapped
1709 // to UNE above).
1710 if (NeedExtraBranch) {
1711 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
1712 .addMBB(TrueMBB).addImm(X86::COND_P);
1713 }
1714
1715 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1716 return true;
1717 }
1718 } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
1719 // Handle things like "%cond = trunc i32 %X to i1 / br i1 %cond", which
1720 // typically happen for _Bool and C++ bools.
1721 MVT SourceVT;
1722 if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
1723 isTypeLegal(TI->getOperand(0)->getType(), SourceVT)) {
1724 unsigned TestOpc = 0;
1725 switch (SourceVT.SimpleTy) {
1726 default: break;
1727 case MVT::i8: TestOpc = X86::TEST8ri; break;
1728 case MVT::i16: TestOpc = X86::TEST16ri; break;
1729 case MVT::i32: TestOpc = X86::TEST32ri; break;
1730 case MVT::i64: TestOpc = X86::TEST64ri32; break;
1731 }
1732 if (TestOpc) {
1733 Register OpReg = getRegForValue(TI->getOperand(0));
1734 if (OpReg == 0) return false;
1735
1736 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TestOpc))
1737 .addReg(OpReg).addImm(1);
1738
1739 unsigned JmpCond = X86::COND_NE;
1740 if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
1741 std::swap(TrueMBB, FalseMBB);
1742 JmpCond = X86::COND_E;
1743 }
1744
1745 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
1746 .addMBB(TrueMBB).addImm(JmpCond);
1747
1748 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1749 return true;
1750 }
1751 }
1752 } else if (foldX86XALUIntrinsic(CC, BI, BI->getCondition())) {
1753 // Fake request the condition, otherwise the intrinsic might be completely
1754 // optimized away.
1755 Register TmpReg = getRegForValue(BI->getCondition());
1756 if (TmpReg == 0)
1757 return false;
1758
1759 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
1760 .addMBB(TrueMBB).addImm(CC);
1761 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1762 return true;
1763 }
1764
1765 // Otherwise do a clumsy setcc and re-test it.
1766 // Note that i1 essentially gets ANY_EXTEND'ed to i8 where it isn't used
1767 // in an explicit cast, so make sure to handle that correctly.
1768 Register OpReg = getRegForValue(BI->getCondition());
1769 if (OpReg == 0) return false;
1770
1771 // In case OpReg is a K register, COPY to a GPR
1772 if (MRI.getRegClass(OpReg) == &X86::VK1RegClass) {
1773 unsigned KOpReg = OpReg;
1774 OpReg = createResultReg(&X86::GR32RegClass);
1775 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1776 TII.get(TargetOpcode::COPY), OpReg)
1777 .addReg(KOpReg);
1778 OpReg = fastEmitInst_extractsubreg(MVT::i8, OpReg, X86::sub_8bit);
1779 }
1780 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri))
1781 .addReg(OpReg)
1782 .addImm(1);
1783 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
1784 .addMBB(TrueMBB).addImm(X86::COND_NE);
1785 finishCondBranch(BI->getParent(), TrueMBB, FalseMBB);
1786 return true;
1787 }
1788
X86SelectShift(const Instruction * I)1789 bool X86FastISel::X86SelectShift(const Instruction *I) {
1790 unsigned CReg = 0, OpReg = 0;
1791 const TargetRegisterClass *RC = nullptr;
1792 if (I->getType()->isIntegerTy(8)) {
1793 CReg = X86::CL;
1794 RC = &X86::GR8RegClass;
1795 switch (I->getOpcode()) {
1796 case Instruction::LShr: OpReg = X86::SHR8rCL; break;
1797 case Instruction::AShr: OpReg = X86::SAR8rCL; break;
1798 case Instruction::Shl: OpReg = X86::SHL8rCL; break;
1799 default: return false;
1800 }
1801 } else if (I->getType()->isIntegerTy(16)) {
1802 CReg = X86::CX;
1803 RC = &X86::GR16RegClass;
1804 switch (I->getOpcode()) {
1805 default: llvm_unreachable("Unexpected shift opcode");
1806 case Instruction::LShr: OpReg = X86::SHR16rCL; break;
1807 case Instruction::AShr: OpReg = X86::SAR16rCL; break;
1808 case Instruction::Shl: OpReg = X86::SHL16rCL; break;
1809 }
1810 } else if (I->getType()->isIntegerTy(32)) {
1811 CReg = X86::ECX;
1812 RC = &X86::GR32RegClass;
1813 switch (I->getOpcode()) {
1814 default: llvm_unreachable("Unexpected shift opcode");
1815 case Instruction::LShr: OpReg = X86::SHR32rCL; break;
1816 case Instruction::AShr: OpReg = X86::SAR32rCL; break;
1817 case Instruction::Shl: OpReg = X86::SHL32rCL; break;
1818 }
1819 } else if (I->getType()->isIntegerTy(64)) {
1820 CReg = X86::RCX;
1821 RC = &X86::GR64RegClass;
1822 switch (I->getOpcode()) {
1823 default: llvm_unreachable("Unexpected shift opcode");
1824 case Instruction::LShr: OpReg = X86::SHR64rCL; break;
1825 case Instruction::AShr: OpReg = X86::SAR64rCL; break;
1826 case Instruction::Shl: OpReg = X86::SHL64rCL; break;
1827 }
1828 } else {
1829 return false;
1830 }
1831
1832 MVT VT;
1833 if (!isTypeLegal(I->getType(), VT))
1834 return false;
1835
1836 Register Op0Reg = getRegForValue(I->getOperand(0));
1837 if (Op0Reg == 0) return false;
1838
1839 Register Op1Reg = getRegForValue(I->getOperand(1));
1840 if (Op1Reg == 0) return false;
1841 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
1842 CReg).addReg(Op1Reg);
1843
1844 // The shift instruction uses X86::CL. If we defined a super-register
1845 // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
1846 if (CReg != X86::CL)
1847 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1848 TII.get(TargetOpcode::KILL), X86::CL)
1849 .addReg(CReg, RegState::Kill);
1850
1851 Register ResultReg = createResultReg(RC);
1852 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(OpReg), ResultReg)
1853 .addReg(Op0Reg);
1854 updateValueMap(I, ResultReg);
1855 return true;
1856 }
1857
X86SelectDivRem(const Instruction * I)1858 bool X86FastISel::X86SelectDivRem(const Instruction *I) {
1859 const static unsigned NumTypes = 4; // i8, i16, i32, i64
1860 const static unsigned NumOps = 4; // SDiv, SRem, UDiv, URem
1861 const static bool S = true; // IsSigned
1862 const static bool U = false; // !IsSigned
1863 const static unsigned Copy = TargetOpcode::COPY;
1864 // For the X86 DIV/IDIV instruction, in most cases the dividend
1865 // (numerator) must be in a specific register pair highreg:lowreg,
1866 // producing the quotient in lowreg and the remainder in highreg.
1867 // For most data types, to set up the instruction, the dividend is
1868 // copied into lowreg, and lowreg is sign-extended or zero-extended
1869 // into highreg. The exception is i8, where the dividend is defined
1870 // as a single register rather than a register pair, and we
1871 // therefore directly sign-extend or zero-extend the dividend into
1872 // lowreg, instead of copying, and ignore the highreg.
1873 const static struct DivRemEntry {
1874 // The following portion depends only on the data type.
1875 const TargetRegisterClass *RC;
1876 unsigned LowInReg; // low part of the register pair
1877 unsigned HighInReg; // high part of the register pair
1878 // The following portion depends on both the data type and the operation.
1879 struct DivRemResult {
1880 unsigned OpDivRem; // The specific DIV/IDIV opcode to use.
1881 unsigned OpSignExtend; // Opcode for sign-extending lowreg into
1882 // highreg, or copying a zero into highreg.
1883 unsigned OpCopy; // Opcode for copying dividend into lowreg, or
1884 // zero/sign-extending into lowreg for i8.
1885 unsigned DivRemResultReg; // Register containing the desired result.
1886 bool IsOpSigned; // Whether to use signed or unsigned form.
1887 } ResultTable[NumOps];
1888 } OpTable[NumTypes] = {
1889 { &X86::GR8RegClass, X86::AX, 0, {
1890 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S }, // SDiv
1891 { X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S }, // SRem
1892 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U }, // UDiv
1893 { X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U }, // URem
1894 }
1895 }, // i8
1896 { &X86::GR16RegClass, X86::AX, X86::DX, {
1897 { X86::IDIV16r, X86::CWD, Copy, X86::AX, S }, // SDiv
1898 { X86::IDIV16r, X86::CWD, Copy, X86::DX, S }, // SRem
1899 { X86::DIV16r, X86::MOV32r0, Copy, X86::AX, U }, // UDiv
1900 { X86::DIV16r, X86::MOV32r0, Copy, X86::DX, U }, // URem
1901 }
1902 }, // i16
1903 { &X86::GR32RegClass, X86::EAX, X86::EDX, {
1904 { X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S }, // SDiv
1905 { X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S }, // SRem
1906 { X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U }, // UDiv
1907 { X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U }, // URem
1908 }
1909 }, // i32
1910 { &X86::GR64RegClass, X86::RAX, X86::RDX, {
1911 { X86::IDIV64r, X86::CQO, Copy, X86::RAX, S }, // SDiv
1912 { X86::IDIV64r, X86::CQO, Copy, X86::RDX, S }, // SRem
1913 { X86::DIV64r, X86::MOV32r0, Copy, X86::RAX, U }, // UDiv
1914 { X86::DIV64r, X86::MOV32r0, Copy, X86::RDX, U }, // URem
1915 }
1916 }, // i64
1917 };
1918
1919 MVT VT;
1920 if (!isTypeLegal(I->getType(), VT))
1921 return false;
1922
1923 unsigned TypeIndex, OpIndex;
1924 switch (VT.SimpleTy) {
1925 default: return false;
1926 case MVT::i8: TypeIndex = 0; break;
1927 case MVT::i16: TypeIndex = 1; break;
1928 case MVT::i32: TypeIndex = 2; break;
1929 case MVT::i64: TypeIndex = 3;
1930 if (!Subtarget->is64Bit())
1931 return false;
1932 break;
1933 }
1934
1935 switch (I->getOpcode()) {
1936 default: llvm_unreachable("Unexpected div/rem opcode");
1937 case Instruction::SDiv: OpIndex = 0; break;
1938 case Instruction::SRem: OpIndex = 1; break;
1939 case Instruction::UDiv: OpIndex = 2; break;
1940 case Instruction::URem: OpIndex = 3; break;
1941 }
1942
1943 const DivRemEntry &TypeEntry = OpTable[TypeIndex];
1944 const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
1945 Register Op0Reg = getRegForValue(I->getOperand(0));
1946 if (Op0Reg == 0)
1947 return false;
1948 Register Op1Reg = getRegForValue(I->getOperand(1));
1949 if (Op1Reg == 0)
1950 return false;
1951
1952 // Move op0 into low-order input register.
1953 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1954 TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg);
1955 // Zero-extend or sign-extend into high-order input register.
1956 if (OpEntry.OpSignExtend) {
1957 if (OpEntry.IsOpSigned)
1958 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1959 TII.get(OpEntry.OpSignExtend));
1960 else {
1961 Register Zero32 = createResultReg(&X86::GR32RegClass);
1962 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1963 TII.get(X86::MOV32r0), Zero32);
1964
1965 // Copy the zero into the appropriate sub/super/identical physical
1966 // register. Unfortunately the operations needed are not uniform enough
1967 // to fit neatly into the table above.
1968 if (VT == MVT::i16) {
1969 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1970 TII.get(Copy), TypeEntry.HighInReg)
1971 .addReg(Zero32, 0, X86::sub_16bit);
1972 } else if (VT == MVT::i32) {
1973 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1974 TII.get(Copy), TypeEntry.HighInReg)
1975 .addReg(Zero32);
1976 } else if (VT == MVT::i64) {
1977 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1978 TII.get(TargetOpcode::SUBREG_TO_REG), TypeEntry.HighInReg)
1979 .addImm(0).addReg(Zero32).addImm(X86::sub_32bit);
1980 }
1981 }
1982 }
1983 // Generate the DIV/IDIV instruction.
1984 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
1985 TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);
1986 // For i8 remainder, we can't reference ah directly, as we'll end
1987 // up with bogus copies like %r9b = COPY %ah. Reference ax
1988 // instead to prevent ah references in a rex instruction.
1989 //
1990 // The current assumption of the fast register allocator is that isel
1991 // won't generate explicit references to the GR8_NOREX registers. If
1992 // the allocator and/or the backend get enhanced to be more robust in
1993 // that regard, this can be, and should be, removed.
1994 unsigned ResultReg = 0;
1995 if ((I->getOpcode() == Instruction::SRem ||
1996 I->getOpcode() == Instruction::URem) &&
1997 OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
1998 Register SourceSuperReg = createResultReg(&X86::GR16RegClass);
1999 Register ResultSuperReg = createResultReg(&X86::GR16RegClass);
2000 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2001 TII.get(Copy), SourceSuperReg).addReg(X86::AX);
2002
2003 // Shift AX right by 8 bits instead of using AH.
2004 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SHR16ri),
2005 ResultSuperReg).addReg(SourceSuperReg).addImm(8);
2006
2007 // Now reference the 8-bit subreg of the result.
2008 ResultReg = fastEmitInst_extractsubreg(MVT::i8, ResultSuperReg,
2009 X86::sub_8bit);
2010 }
2011 // Copy the result out of the physreg if we haven't already.
2012 if (!ResultReg) {
2013 ResultReg = createResultReg(TypeEntry.RC);
2014 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Copy), ResultReg)
2015 .addReg(OpEntry.DivRemResultReg);
2016 }
2017 updateValueMap(I, ResultReg);
2018
2019 return true;
2020 }
2021
2022 /// Emit a conditional move instruction (if the are supported) to lower
2023 /// the select.
X86FastEmitCMoveSelect(MVT RetVT,const Instruction * I)2024 bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
2025 // Check if the subtarget supports these instructions.
2026 if (!Subtarget->canUseCMOV())
2027 return false;
2028
2029 // FIXME: Add support for i8.
2030 if (RetVT < MVT::i16 || RetVT > MVT::i64)
2031 return false;
2032
2033 const Value *Cond = I->getOperand(0);
2034 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2035 bool NeedTest = true;
2036 X86::CondCode CC = X86::COND_NE;
2037
2038 // Optimize conditions coming from a compare if both instructions are in the
2039 // same basic block (values defined in other basic blocks may not have
2040 // initialized registers).
2041 const auto *CI = dyn_cast<CmpInst>(Cond);
2042 if (CI && (CI->getParent() == I->getParent())) {
2043 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2044
2045 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
2046 static const uint16_t SETFOpcTable[2][3] = {
2047 { X86::COND_NP, X86::COND_E, X86::TEST8rr },
2048 { X86::COND_P, X86::COND_NE, X86::OR8rr }
2049 };
2050 const uint16_t *SETFOpc = nullptr;
2051 switch (Predicate) {
2052 default: break;
2053 case CmpInst::FCMP_OEQ:
2054 SETFOpc = &SETFOpcTable[0][0];
2055 Predicate = CmpInst::ICMP_NE;
2056 break;
2057 case CmpInst::FCMP_UNE:
2058 SETFOpc = &SETFOpcTable[1][0];
2059 Predicate = CmpInst::ICMP_NE;
2060 break;
2061 }
2062
2063 bool NeedSwap;
2064 std::tie(CC, NeedSwap) = X86::getX86ConditionCode(Predicate);
2065 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
2066
2067 const Value *CmpLHS = CI->getOperand(0);
2068 const Value *CmpRHS = CI->getOperand(1);
2069 if (NeedSwap)
2070 std::swap(CmpLHS, CmpRHS);
2071
2072 EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType());
2073 // Emit a compare of the LHS and RHS, setting the flags.
2074 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
2075 return false;
2076
2077 if (SETFOpc) {
2078 Register FlagReg1 = createResultReg(&X86::GR8RegClass);
2079 Register FlagReg2 = createResultReg(&X86::GR8RegClass);
2080 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr),
2081 FlagReg1).addImm(SETFOpc[0]);
2082 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr),
2083 FlagReg2).addImm(SETFOpc[1]);
2084 auto const &II = TII.get(SETFOpc[2]);
2085 if (II.getNumDefs()) {
2086 Register TmpReg = createResultReg(&X86::GR8RegClass);
2087 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, TmpReg)
2088 .addReg(FlagReg2).addReg(FlagReg1);
2089 } else {
2090 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
2091 .addReg(FlagReg2).addReg(FlagReg1);
2092 }
2093 }
2094 NeedTest = false;
2095 } else if (foldX86XALUIntrinsic(CC, I, Cond)) {
2096 // Fake request the condition, otherwise the intrinsic might be completely
2097 // optimized away.
2098 Register TmpReg = getRegForValue(Cond);
2099 if (TmpReg == 0)
2100 return false;
2101
2102 NeedTest = false;
2103 }
2104
2105 if (NeedTest) {
2106 // Selects operate on i1, however, CondReg is 8 bits width and may contain
2107 // garbage. Indeed, only the less significant bit is supposed to be
2108 // accurate. If we read more than the lsb, we may see non-zero values
2109 // whereas lsb is zero. Therefore, we have to truncate Op0Reg to i1 for
2110 // the select. This is achieved by performing TEST against 1.
2111 Register CondReg = getRegForValue(Cond);
2112 if (CondReg == 0)
2113 return false;
2114
2115 // In case OpReg is a K register, COPY to a GPR
2116 if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
2117 unsigned KCondReg = CondReg;
2118 CondReg = createResultReg(&X86::GR32RegClass);
2119 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2120 TII.get(TargetOpcode::COPY), CondReg)
2121 .addReg(KCondReg);
2122 CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit);
2123 }
2124 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri))
2125 .addReg(CondReg)
2126 .addImm(1);
2127 }
2128
2129 const Value *LHS = I->getOperand(1);
2130 const Value *RHS = I->getOperand(2);
2131
2132 Register RHSReg = getRegForValue(RHS);
2133 Register LHSReg = getRegForValue(LHS);
2134 if (!LHSReg || !RHSReg)
2135 return false;
2136
2137 const TargetRegisterInfo &TRI = *Subtarget->getRegisterInfo();
2138 unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(*RC) / 8, false,
2139 Subtarget->hasNDD());
2140 Register ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, LHSReg, CC);
2141 updateValueMap(I, ResultReg);
2142 return true;
2143 }
2144
2145 /// Emit SSE or AVX instructions to lower the select.
2146 ///
2147 /// Try to use SSE1/SSE2 instructions to simulate a select without branches.
2148 /// This lowers fp selects into a CMP/AND/ANDN/OR sequence when the necessary
2149 /// SSE instructions are available. If AVX is available, try to use a VBLENDV.
X86FastEmitSSESelect(MVT RetVT,const Instruction * I)2150 bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
2151 // Optimize conditions coming from a compare if both instructions are in the
2152 // same basic block (values defined in other basic blocks may not have
2153 // initialized registers).
2154 const auto *CI = dyn_cast<FCmpInst>(I->getOperand(0));
2155 if (!CI || (CI->getParent() != I->getParent()))
2156 return false;
2157
2158 if (I->getType() != CI->getOperand(0)->getType() ||
2159 !((Subtarget->hasSSE1() && RetVT == MVT::f32) ||
2160 (Subtarget->hasSSE2() && RetVT == MVT::f64)))
2161 return false;
2162
2163 const Value *CmpLHS = CI->getOperand(0);
2164 const Value *CmpRHS = CI->getOperand(1);
2165 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2166
2167 // The optimizer might have replaced fcmp oeq %x, %x with fcmp ord %x, 0.0.
2168 // We don't have to materialize a zero constant for this case and can just use
2169 // %x again on the RHS.
2170 if (Predicate == CmpInst::FCMP_ORD || Predicate == CmpInst::FCMP_UNO) {
2171 const auto *CmpRHSC = dyn_cast<ConstantFP>(CmpRHS);
2172 if (CmpRHSC && CmpRHSC->isNullValue())
2173 CmpRHS = CmpLHS;
2174 }
2175
2176 unsigned CC;
2177 bool NeedSwap;
2178 std::tie(CC, NeedSwap) = getX86SSEConditionCode(Predicate);
2179 if (CC > 7 && !Subtarget->hasAVX())
2180 return false;
2181
2182 if (NeedSwap)
2183 std::swap(CmpLHS, CmpRHS);
2184
2185 const Value *LHS = I->getOperand(1);
2186 const Value *RHS = I->getOperand(2);
2187
2188 Register LHSReg = getRegForValue(LHS);
2189 Register RHSReg = getRegForValue(RHS);
2190 Register CmpLHSReg = getRegForValue(CmpLHS);
2191 Register CmpRHSReg = getRegForValue(CmpRHS);
2192 if (!LHSReg || !RHSReg || !CmpLHSReg || !CmpRHSReg)
2193 return false;
2194
2195 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2196 unsigned ResultReg;
2197
2198 if (Subtarget->hasAVX512()) {
2199 // If we have AVX512 we can use a mask compare and masked movss/sd.
2200 const TargetRegisterClass *VR128X = &X86::VR128XRegClass;
2201 const TargetRegisterClass *VK1 = &X86::VK1RegClass;
2202
2203 unsigned CmpOpcode =
2204 (RetVT == MVT::f32) ? X86::VCMPSSZrri : X86::VCMPSDZrri;
2205 Register CmpReg = fastEmitInst_rri(CmpOpcode, VK1, CmpLHSReg, CmpRHSReg,
2206 CC);
2207
2208 // Need an IMPLICIT_DEF for the input that is used to generate the upper
2209 // bits of the result register since its not based on any of the inputs.
2210 Register ImplicitDefReg = createResultReg(VR128X);
2211 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2212 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2213
2214 // Place RHSReg is the passthru of the masked movss/sd operation and put
2215 // LHS in the input. The mask input comes from the compare.
2216 unsigned MovOpcode =
2217 (RetVT == MVT::f32) ? X86::VMOVSSZrrk : X86::VMOVSDZrrk;
2218 unsigned MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, CmpReg,
2219 ImplicitDefReg, LHSReg);
2220
2221 ResultReg = createResultReg(RC);
2222 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2223 TII.get(TargetOpcode::COPY), ResultReg).addReg(MovReg);
2224
2225 } else if (Subtarget->hasAVX()) {
2226 const TargetRegisterClass *VR128 = &X86::VR128RegClass;
2227
2228 // If we have AVX, create 1 blendv instead of 3 logic instructions.
2229 // Blendv was introduced with SSE 4.1, but the 2 register form implicitly
2230 // uses XMM0 as the selection register. That may need just as many
2231 // instructions as the AND/ANDN/OR sequence due to register moves, so
2232 // don't bother.
2233 unsigned CmpOpcode =
2234 (RetVT == MVT::f32) ? X86::VCMPSSrri : X86::VCMPSDrri;
2235 unsigned BlendOpcode =
2236 (RetVT == MVT::f32) ? X86::VBLENDVPSrrr : X86::VBLENDVPDrrr;
2237
2238 Register CmpReg = fastEmitInst_rri(CmpOpcode, RC, CmpLHSReg, CmpRHSReg,
2239 CC);
2240 Register VBlendReg = fastEmitInst_rrr(BlendOpcode, VR128, RHSReg, LHSReg,
2241 CmpReg);
2242 ResultReg = createResultReg(RC);
2243 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2244 TII.get(TargetOpcode::COPY), ResultReg).addReg(VBlendReg);
2245 } else {
2246 // Choose the SSE instruction sequence based on data type (float or double).
2247 static const uint16_t OpcTable[2][4] = {
2248 { X86::CMPSSrri, X86::ANDPSrr, X86::ANDNPSrr, X86::ORPSrr },
2249 { X86::CMPSDrri, X86::ANDPDrr, X86::ANDNPDrr, X86::ORPDrr }
2250 };
2251
2252 const uint16_t *Opc = nullptr;
2253 switch (RetVT.SimpleTy) {
2254 default: return false;
2255 case MVT::f32: Opc = &OpcTable[0][0]; break;
2256 case MVT::f64: Opc = &OpcTable[1][0]; break;
2257 }
2258
2259 const TargetRegisterClass *VR128 = &X86::VR128RegClass;
2260 Register CmpReg = fastEmitInst_rri(Opc[0], RC, CmpLHSReg, CmpRHSReg, CC);
2261 Register AndReg = fastEmitInst_rr(Opc[1], VR128, CmpReg, LHSReg);
2262 Register AndNReg = fastEmitInst_rr(Opc[2], VR128, CmpReg, RHSReg);
2263 Register OrReg = fastEmitInst_rr(Opc[3], VR128, AndNReg, AndReg);
2264 ResultReg = createResultReg(RC);
2265 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2266 TII.get(TargetOpcode::COPY), ResultReg).addReg(OrReg);
2267 }
2268 updateValueMap(I, ResultReg);
2269 return true;
2270 }
2271
X86FastEmitPseudoSelect(MVT RetVT,const Instruction * I)2272 bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
2273 // These are pseudo CMOV instructions and will be later expanded into control-
2274 // flow.
2275 unsigned Opc;
2276 switch (RetVT.SimpleTy) {
2277 default: return false;
2278 case MVT::i8: Opc = X86::CMOV_GR8; break;
2279 case MVT::i16: Opc = X86::CMOV_GR16; break;
2280 case MVT::i32: Opc = X86::CMOV_GR32; break;
2281 case MVT::f16:
2282 Opc = Subtarget->hasAVX512() ? X86::CMOV_FR16X : X86::CMOV_FR16; break;
2283 case MVT::f32:
2284 Opc = Subtarget->hasAVX512() ? X86::CMOV_FR32X : X86::CMOV_FR32; break;
2285 case MVT::f64:
2286 Opc = Subtarget->hasAVX512() ? X86::CMOV_FR64X : X86::CMOV_FR64; break;
2287 }
2288
2289 const Value *Cond = I->getOperand(0);
2290 X86::CondCode CC = X86::COND_NE;
2291
2292 // Optimize conditions coming from a compare if both instructions are in the
2293 // same basic block (values defined in other basic blocks may not have
2294 // initialized registers).
2295 const auto *CI = dyn_cast<CmpInst>(Cond);
2296 if (CI && (CI->getParent() == I->getParent())) {
2297 bool NeedSwap;
2298 std::tie(CC, NeedSwap) = X86::getX86ConditionCode(CI->getPredicate());
2299 if (CC > X86::LAST_VALID_COND)
2300 return false;
2301
2302 const Value *CmpLHS = CI->getOperand(0);
2303 const Value *CmpRHS = CI->getOperand(1);
2304
2305 if (NeedSwap)
2306 std::swap(CmpLHS, CmpRHS);
2307
2308 EVT CmpVT = TLI.getValueType(DL, CmpLHS->getType());
2309 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT, CI->getDebugLoc()))
2310 return false;
2311 } else {
2312 Register CondReg = getRegForValue(Cond);
2313 if (CondReg == 0)
2314 return false;
2315
2316 // In case OpReg is a K register, COPY to a GPR
2317 if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
2318 unsigned KCondReg = CondReg;
2319 CondReg = createResultReg(&X86::GR32RegClass);
2320 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2321 TII.get(TargetOpcode::COPY), CondReg)
2322 .addReg(KCondReg);
2323 CondReg = fastEmitInst_extractsubreg(MVT::i8, CondReg, X86::sub_8bit);
2324 }
2325 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TEST8ri))
2326 .addReg(CondReg)
2327 .addImm(1);
2328 }
2329
2330 const Value *LHS = I->getOperand(1);
2331 const Value *RHS = I->getOperand(2);
2332
2333 Register LHSReg = getRegForValue(LHS);
2334 Register RHSReg = getRegForValue(RHS);
2335 if (!LHSReg || !RHSReg)
2336 return false;
2337
2338 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2339
2340 Register ResultReg =
2341 fastEmitInst_rri(Opc, RC, RHSReg, LHSReg, CC);
2342 updateValueMap(I, ResultReg);
2343 return true;
2344 }
2345
X86SelectSelect(const Instruction * I)2346 bool X86FastISel::X86SelectSelect(const Instruction *I) {
2347 MVT RetVT;
2348 if (!isTypeLegal(I->getType(), RetVT))
2349 return false;
2350
2351 // Check if we can fold the select.
2352 if (const auto *CI = dyn_cast<CmpInst>(I->getOperand(0))) {
2353 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
2354 const Value *Opnd = nullptr;
2355 switch (Predicate) {
2356 default: break;
2357 case CmpInst::FCMP_FALSE: Opnd = I->getOperand(2); break;
2358 case CmpInst::FCMP_TRUE: Opnd = I->getOperand(1); break;
2359 }
2360 // No need for a select anymore - this is an unconditional move.
2361 if (Opnd) {
2362 Register OpReg = getRegForValue(Opnd);
2363 if (OpReg == 0)
2364 return false;
2365 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
2366 Register ResultReg = createResultReg(RC);
2367 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2368 TII.get(TargetOpcode::COPY), ResultReg)
2369 .addReg(OpReg);
2370 updateValueMap(I, ResultReg);
2371 return true;
2372 }
2373 }
2374
2375 // First try to use real conditional move instructions.
2376 if (X86FastEmitCMoveSelect(RetVT, I))
2377 return true;
2378
2379 // Try to use a sequence of SSE instructions to simulate a conditional move.
2380 if (X86FastEmitSSESelect(RetVT, I))
2381 return true;
2382
2383 // Fall-back to pseudo conditional move instructions, which will be later
2384 // converted to control-flow.
2385 if (X86FastEmitPseudoSelect(RetVT, I))
2386 return true;
2387
2388 return false;
2389 }
2390
2391 // Common code for X86SelectSIToFP and X86SelectUIToFP.
X86SelectIntToFP(const Instruction * I,bool IsSigned)2392 bool X86FastISel::X86SelectIntToFP(const Instruction *I, bool IsSigned) {
2393 // The target-independent selection algorithm in FastISel already knows how
2394 // to select a SINT_TO_FP if the target is SSE but not AVX.
2395 // Early exit if the subtarget doesn't have AVX.
2396 // Unsigned conversion requires avx512.
2397 bool HasAVX512 = Subtarget->hasAVX512();
2398 if (!Subtarget->hasAVX() || (!IsSigned && !HasAVX512))
2399 return false;
2400
2401 // TODO: We could sign extend narrower types.
2402 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
2403 if (SrcVT != MVT::i32 && SrcVT != MVT::i64)
2404 return false;
2405
2406 // Select integer to float/double conversion.
2407 Register OpReg = getRegForValue(I->getOperand(0));
2408 if (OpReg == 0)
2409 return false;
2410
2411 unsigned Opcode;
2412
2413 static const uint16_t SCvtOpc[2][2][2] = {
2414 { { X86::VCVTSI2SSrr, X86::VCVTSI642SSrr },
2415 { X86::VCVTSI2SDrr, X86::VCVTSI642SDrr } },
2416 { { X86::VCVTSI2SSZrr, X86::VCVTSI642SSZrr },
2417 { X86::VCVTSI2SDZrr, X86::VCVTSI642SDZrr } },
2418 };
2419 static const uint16_t UCvtOpc[2][2] = {
2420 { X86::VCVTUSI2SSZrr, X86::VCVTUSI642SSZrr },
2421 { X86::VCVTUSI2SDZrr, X86::VCVTUSI642SDZrr },
2422 };
2423 bool Is64Bit = SrcVT == MVT::i64;
2424
2425 if (I->getType()->isDoubleTy()) {
2426 // s/uitofp int -> double
2427 Opcode = IsSigned ? SCvtOpc[HasAVX512][1][Is64Bit] : UCvtOpc[1][Is64Bit];
2428 } else if (I->getType()->isFloatTy()) {
2429 // s/uitofp int -> float
2430 Opcode = IsSigned ? SCvtOpc[HasAVX512][0][Is64Bit] : UCvtOpc[0][Is64Bit];
2431 } else
2432 return false;
2433
2434 MVT DstVT = TLI.getValueType(DL, I->getType()).getSimpleVT();
2435 const TargetRegisterClass *RC = TLI.getRegClassFor(DstVT);
2436 Register ImplicitDefReg = createResultReg(RC);
2437 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2438 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2439 Register ResultReg = fastEmitInst_rr(Opcode, RC, ImplicitDefReg, OpReg);
2440 updateValueMap(I, ResultReg);
2441 return true;
2442 }
2443
X86SelectSIToFP(const Instruction * I)2444 bool X86FastISel::X86SelectSIToFP(const Instruction *I) {
2445 return X86SelectIntToFP(I, /*IsSigned*/true);
2446 }
2447
X86SelectUIToFP(const Instruction * I)2448 bool X86FastISel::X86SelectUIToFP(const Instruction *I) {
2449 return X86SelectIntToFP(I, /*IsSigned*/false);
2450 }
2451
2452 // Helper method used by X86SelectFPExt and X86SelectFPTrunc.
X86SelectFPExtOrFPTrunc(const Instruction * I,unsigned TargetOpc,const TargetRegisterClass * RC)2453 bool X86FastISel::X86SelectFPExtOrFPTrunc(const Instruction *I,
2454 unsigned TargetOpc,
2455 const TargetRegisterClass *RC) {
2456 assert((I->getOpcode() == Instruction::FPExt ||
2457 I->getOpcode() == Instruction::FPTrunc) &&
2458 "Instruction must be an FPExt or FPTrunc!");
2459 bool HasAVX = Subtarget->hasAVX();
2460
2461 Register OpReg = getRegForValue(I->getOperand(0));
2462 if (OpReg == 0)
2463 return false;
2464
2465 unsigned ImplicitDefReg;
2466 if (HasAVX) {
2467 ImplicitDefReg = createResultReg(RC);
2468 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2469 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2470
2471 }
2472
2473 Register ResultReg = createResultReg(RC);
2474 MachineInstrBuilder MIB;
2475 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpc),
2476 ResultReg);
2477
2478 if (HasAVX)
2479 MIB.addReg(ImplicitDefReg);
2480
2481 MIB.addReg(OpReg);
2482 updateValueMap(I, ResultReg);
2483 return true;
2484 }
2485
X86SelectFPExt(const Instruction * I)2486 bool X86FastISel::X86SelectFPExt(const Instruction *I) {
2487 if (Subtarget->hasSSE2() && I->getType()->isDoubleTy() &&
2488 I->getOperand(0)->getType()->isFloatTy()) {
2489 bool HasAVX512 = Subtarget->hasAVX512();
2490 // fpext from float to double.
2491 unsigned Opc =
2492 HasAVX512 ? X86::VCVTSS2SDZrr
2493 : Subtarget->hasAVX() ? X86::VCVTSS2SDrr : X86::CVTSS2SDrr;
2494 return X86SelectFPExtOrFPTrunc(I, Opc, TLI.getRegClassFor(MVT::f64));
2495 }
2496
2497 return false;
2498 }
2499
X86SelectFPTrunc(const Instruction * I)2500 bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
2501 if (Subtarget->hasSSE2() && I->getType()->isFloatTy() &&
2502 I->getOperand(0)->getType()->isDoubleTy()) {
2503 bool HasAVX512 = Subtarget->hasAVX512();
2504 // fptrunc from double to float.
2505 unsigned Opc =
2506 HasAVX512 ? X86::VCVTSD2SSZrr
2507 : Subtarget->hasAVX() ? X86::VCVTSD2SSrr : X86::CVTSD2SSrr;
2508 return X86SelectFPExtOrFPTrunc(I, Opc, TLI.getRegClassFor(MVT::f32));
2509 }
2510
2511 return false;
2512 }
2513
X86SelectTrunc(const Instruction * I)2514 bool X86FastISel::X86SelectTrunc(const Instruction *I) {
2515 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
2516 EVT DstVT = TLI.getValueType(DL, I->getType());
2517
2518 // This code only handles truncation to byte.
2519 if (DstVT != MVT::i8 && DstVT != MVT::i1)
2520 return false;
2521 if (!TLI.isTypeLegal(SrcVT))
2522 return false;
2523
2524 Register InputReg = getRegForValue(I->getOperand(0));
2525 if (!InputReg)
2526 // Unhandled operand. Halt "fast" selection and bail.
2527 return false;
2528
2529 if (SrcVT == MVT::i8) {
2530 // Truncate from i8 to i1; no code needed.
2531 updateValueMap(I, InputReg);
2532 return true;
2533 }
2534
2535 // Issue an extract_subreg.
2536 Register ResultReg = fastEmitInst_extractsubreg(MVT::i8, InputReg,
2537 X86::sub_8bit);
2538 if (!ResultReg)
2539 return false;
2540
2541 updateValueMap(I, ResultReg);
2542 return true;
2543 }
2544
IsMemcpySmall(uint64_t Len)2545 bool X86FastISel::IsMemcpySmall(uint64_t Len) {
2546 return Len <= (Subtarget->is64Bit() ? 32 : 16);
2547 }
2548
TryEmitSmallMemcpy(X86AddressMode DestAM,X86AddressMode SrcAM,uint64_t Len)2549 bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
2550 X86AddressMode SrcAM, uint64_t Len) {
2551
2552 // Make sure we don't bloat code by inlining very large memcpy's.
2553 if (!IsMemcpySmall(Len))
2554 return false;
2555
2556 bool i64Legal = Subtarget->is64Bit();
2557
2558 // We don't care about alignment here since we just emit integer accesses.
2559 while (Len) {
2560 MVT VT;
2561 if (Len >= 8 && i64Legal)
2562 VT = MVT::i64;
2563 else if (Len >= 4)
2564 VT = MVT::i32;
2565 else if (Len >= 2)
2566 VT = MVT::i16;
2567 else
2568 VT = MVT::i8;
2569
2570 unsigned Reg;
2571 bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg);
2572 RV &= X86FastEmitStore(VT, Reg, DestAM);
2573 assert(RV && "Failed to emit load or store??");
2574 (void)RV;
2575
2576 unsigned Size = VT.getSizeInBits()/8;
2577 Len -= Size;
2578 DestAM.Disp += Size;
2579 SrcAM.Disp += Size;
2580 }
2581
2582 return true;
2583 }
2584
fastLowerIntrinsicCall(const IntrinsicInst * II)2585 bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
2586 // FIXME: Handle more intrinsics.
2587 switch (II->getIntrinsicID()) {
2588 default: return false;
2589 case Intrinsic::convert_from_fp16:
2590 case Intrinsic::convert_to_fp16: {
2591 if (Subtarget->useSoftFloat() || !Subtarget->hasF16C())
2592 return false;
2593
2594 const Value *Op = II->getArgOperand(0);
2595 Register InputReg = getRegForValue(Op);
2596 if (InputReg == 0)
2597 return false;
2598
2599 // F16C only allows converting from float to half and from half to float.
2600 bool IsFloatToHalf = II->getIntrinsicID() == Intrinsic::convert_to_fp16;
2601 if (IsFloatToHalf) {
2602 if (!Op->getType()->isFloatTy())
2603 return false;
2604 } else {
2605 if (!II->getType()->isFloatTy())
2606 return false;
2607 }
2608
2609 unsigned ResultReg = 0;
2610 const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::v8i16);
2611 if (IsFloatToHalf) {
2612 // 'InputReg' is implicitly promoted from register class FR32 to
2613 // register class VR128 by method 'constrainOperandRegClass' which is
2614 // directly called by 'fastEmitInst_ri'.
2615 // Instruction VCVTPS2PHrr takes an extra immediate operand which is
2616 // used to provide rounding control: use MXCSR.RC, encoded as 0b100.
2617 // It's consistent with the other FP instructions, which are usually
2618 // controlled by MXCSR.
2619 unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPS2PHZ128rr
2620 : X86::VCVTPS2PHrr;
2621 InputReg = fastEmitInst_ri(Opc, RC, InputReg, 4);
2622
2623 // Move the lower 32-bits of ResultReg to another register of class GR32.
2624 Opc = Subtarget->hasAVX512() ? X86::VMOVPDI2DIZrr
2625 : X86::VMOVPDI2DIrr;
2626 ResultReg = createResultReg(&X86::GR32RegClass);
2627 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
2628 .addReg(InputReg, RegState::Kill);
2629
2630 // The result value is in the lower 16-bits of ResultReg.
2631 unsigned RegIdx = X86::sub_16bit;
2632 ResultReg = fastEmitInst_extractsubreg(MVT::i16, ResultReg, RegIdx);
2633 } else {
2634 assert(Op->getType()->isIntegerTy(16) && "Expected a 16-bit integer!");
2635 // Explicitly zero-extend the input to 32-bit.
2636 InputReg = fastEmit_r(MVT::i16, MVT::i32, ISD::ZERO_EXTEND, InputReg);
2637
2638 // The following SCALAR_TO_VECTOR will be expanded into a VMOVDI2PDIrr.
2639 InputReg = fastEmit_r(MVT::i32, MVT::v4i32, ISD::SCALAR_TO_VECTOR,
2640 InputReg);
2641
2642 unsigned Opc = Subtarget->hasVLX() ? X86::VCVTPH2PSZ128rr
2643 : X86::VCVTPH2PSrr;
2644 InputReg = fastEmitInst_r(Opc, RC, InputReg);
2645
2646 // The result value is in the lower 32-bits of ResultReg.
2647 // Emit an explicit copy from register class VR128 to register class FR32.
2648 ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
2649 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2650 TII.get(TargetOpcode::COPY), ResultReg)
2651 .addReg(InputReg, RegState::Kill);
2652 }
2653
2654 updateValueMap(II, ResultReg);
2655 return true;
2656 }
2657 case Intrinsic::frameaddress: {
2658 MachineFunction *MF = FuncInfo.MF;
2659 if (MF->getTarget().getMCAsmInfo()->usesWindowsCFI())
2660 return false;
2661
2662 Type *RetTy = II->getCalledFunction()->getReturnType();
2663
2664 MVT VT;
2665 if (!isTypeLegal(RetTy, VT))
2666 return false;
2667
2668 unsigned Opc;
2669 const TargetRegisterClass *RC = nullptr;
2670
2671 switch (VT.SimpleTy) {
2672 default: llvm_unreachable("Invalid result type for frameaddress.");
2673 case MVT::i32: Opc = X86::MOV32rm; RC = &X86::GR32RegClass; break;
2674 case MVT::i64: Opc = X86::MOV64rm; RC = &X86::GR64RegClass; break;
2675 }
2676
2677 // This needs to be set before we call getPtrSizedFrameRegister, otherwise
2678 // we get the wrong frame register.
2679 MachineFrameInfo &MFI = MF->getFrameInfo();
2680 MFI.setFrameAddressIsTaken(true);
2681
2682 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2683 unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(*MF);
2684 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
2685 (FrameReg == X86::EBP && VT == MVT::i32)) &&
2686 "Invalid Frame Register!");
2687
2688 // Always make a copy of the frame register to a vreg first, so that we
2689 // never directly reference the frame register (the TwoAddressInstruction-
2690 // Pass doesn't like that).
2691 Register SrcReg = createResultReg(RC);
2692 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2693 TII.get(TargetOpcode::COPY), SrcReg).addReg(FrameReg);
2694
2695 // Now recursively load from the frame address.
2696 // movq (%rbp), %rax
2697 // movq (%rax), %rax
2698 // movq (%rax), %rax
2699 // ...
2700 unsigned Depth = cast<ConstantInt>(II->getOperand(0))->getZExtValue();
2701 while (Depth--) {
2702 Register DestReg = createResultReg(RC);
2703 addDirectMem(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2704 TII.get(Opc), DestReg), SrcReg);
2705 SrcReg = DestReg;
2706 }
2707
2708 updateValueMap(II, SrcReg);
2709 return true;
2710 }
2711 case Intrinsic::memcpy: {
2712 const MemCpyInst *MCI = cast<MemCpyInst>(II);
2713 // Don't handle volatile or variable length memcpys.
2714 if (MCI->isVolatile())
2715 return false;
2716
2717 if (isa<ConstantInt>(MCI->getLength())) {
2718 // Small memcpy's are common enough that we want to do them
2719 // without a call if possible.
2720 uint64_t Len = cast<ConstantInt>(MCI->getLength())->getZExtValue();
2721 if (IsMemcpySmall(Len)) {
2722 X86AddressMode DestAM, SrcAM;
2723 if (!X86SelectAddress(MCI->getRawDest(), DestAM) ||
2724 !X86SelectAddress(MCI->getRawSource(), SrcAM))
2725 return false;
2726 TryEmitSmallMemcpy(DestAM, SrcAM, Len);
2727 return true;
2728 }
2729 }
2730
2731 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2732 if (!MCI->getLength()->getType()->isIntegerTy(SizeWidth))
2733 return false;
2734
2735 if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
2736 return false;
2737
2738 return lowerCallTo(II, "memcpy", II->arg_size() - 1);
2739 }
2740 case Intrinsic::memset: {
2741 const MemSetInst *MSI = cast<MemSetInst>(II);
2742
2743 if (MSI->isVolatile())
2744 return false;
2745
2746 unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
2747 if (!MSI->getLength()->getType()->isIntegerTy(SizeWidth))
2748 return false;
2749
2750 if (MSI->getDestAddressSpace() > 255)
2751 return false;
2752
2753 return lowerCallTo(II, "memset", II->arg_size() - 1);
2754 }
2755 case Intrinsic::stackprotector: {
2756 // Emit code to store the stack guard onto the stack.
2757 EVT PtrTy = TLI.getPointerTy(DL);
2758
2759 const Value *Op1 = II->getArgOperand(0); // The guard's value.
2760 const AllocaInst *Slot = cast<AllocaInst>(II->getArgOperand(1));
2761
2762 MFI.setStackProtectorIndex(FuncInfo.StaticAllocaMap[Slot]);
2763
2764 // Grab the frame index.
2765 X86AddressMode AM;
2766 if (!X86SelectAddress(Slot, AM)) return false;
2767 if (!X86FastEmitStore(PtrTy, Op1, AM)) return false;
2768 return true;
2769 }
2770 case Intrinsic::dbg_declare: {
2771 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
2772 X86AddressMode AM;
2773 assert(DI->getAddress() && "Null address should be checked earlier!");
2774 if (!X86SelectAddress(DI->getAddress(), AM))
2775 return false;
2776 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
2777 assert(DI->getVariable()->isValidLocationForIntrinsic(MIMD.getDL()) &&
2778 "Expected inlined-at fields to agree");
2779 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II), AM)
2780 .addImm(0)
2781 .addMetadata(DI->getVariable())
2782 .addMetadata(DI->getExpression());
2783 return true;
2784 }
2785 case Intrinsic::trap: {
2786 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::TRAP));
2787 return true;
2788 }
2789 case Intrinsic::sqrt: {
2790 if (!Subtarget->hasSSE1())
2791 return false;
2792
2793 Type *RetTy = II->getCalledFunction()->getReturnType();
2794
2795 MVT VT;
2796 if (!isTypeLegal(RetTy, VT))
2797 return false;
2798
2799 // Unfortunately we can't use fastEmit_r, because the AVX version of FSQRT
2800 // is not generated by FastISel yet.
2801 // FIXME: Update this code once tablegen can handle it.
2802 static const uint16_t SqrtOpc[3][2] = {
2803 { X86::SQRTSSr, X86::SQRTSDr },
2804 { X86::VSQRTSSr, X86::VSQRTSDr },
2805 { X86::VSQRTSSZr, X86::VSQRTSDZr },
2806 };
2807 unsigned AVXLevel = Subtarget->hasAVX512() ? 2 :
2808 Subtarget->hasAVX() ? 1 :
2809 0;
2810 unsigned Opc;
2811 switch (VT.SimpleTy) {
2812 default: return false;
2813 case MVT::f32: Opc = SqrtOpc[AVXLevel][0]; break;
2814 case MVT::f64: Opc = SqrtOpc[AVXLevel][1]; break;
2815 }
2816
2817 const Value *SrcVal = II->getArgOperand(0);
2818 Register SrcReg = getRegForValue(SrcVal);
2819
2820 if (SrcReg == 0)
2821 return false;
2822
2823 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
2824 unsigned ImplicitDefReg = 0;
2825 if (AVXLevel > 0) {
2826 ImplicitDefReg = createResultReg(RC);
2827 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2828 TII.get(TargetOpcode::IMPLICIT_DEF), ImplicitDefReg);
2829 }
2830
2831 Register ResultReg = createResultReg(RC);
2832 MachineInstrBuilder MIB;
2833 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc),
2834 ResultReg);
2835
2836 if (ImplicitDefReg)
2837 MIB.addReg(ImplicitDefReg);
2838
2839 MIB.addReg(SrcReg);
2840
2841 updateValueMap(II, ResultReg);
2842 return true;
2843 }
2844 case Intrinsic::sadd_with_overflow:
2845 case Intrinsic::uadd_with_overflow:
2846 case Intrinsic::ssub_with_overflow:
2847 case Intrinsic::usub_with_overflow:
2848 case Intrinsic::smul_with_overflow:
2849 case Intrinsic::umul_with_overflow: {
2850 // This implements the basic lowering of the xalu with overflow intrinsics
2851 // into add/sub/mul followed by either seto or setb.
2852 const Function *Callee = II->getCalledFunction();
2853 auto *Ty = cast<StructType>(Callee->getReturnType());
2854 Type *RetTy = Ty->getTypeAtIndex(0U);
2855 assert(Ty->getTypeAtIndex(1)->isIntegerTy() &&
2856 Ty->getTypeAtIndex(1)->getScalarSizeInBits() == 1 &&
2857 "Overflow value expected to be an i1");
2858
2859 MVT VT;
2860 if (!isTypeLegal(RetTy, VT))
2861 return false;
2862
2863 if (VT < MVT::i8 || VT > MVT::i64)
2864 return false;
2865
2866 const Value *LHS = II->getArgOperand(0);
2867 const Value *RHS = II->getArgOperand(1);
2868
2869 // Canonicalize immediate to the RHS.
2870 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS) && II->isCommutative())
2871 std::swap(LHS, RHS);
2872
2873 unsigned BaseOpc, CondCode;
2874 switch (II->getIntrinsicID()) {
2875 default: llvm_unreachable("Unexpected intrinsic!");
2876 case Intrinsic::sadd_with_overflow:
2877 BaseOpc = ISD::ADD; CondCode = X86::COND_O; break;
2878 case Intrinsic::uadd_with_overflow:
2879 BaseOpc = ISD::ADD; CondCode = X86::COND_B; break;
2880 case Intrinsic::ssub_with_overflow:
2881 BaseOpc = ISD::SUB; CondCode = X86::COND_O; break;
2882 case Intrinsic::usub_with_overflow:
2883 BaseOpc = ISD::SUB; CondCode = X86::COND_B; break;
2884 case Intrinsic::smul_with_overflow:
2885 BaseOpc = X86ISD::SMUL; CondCode = X86::COND_O; break;
2886 case Intrinsic::umul_with_overflow:
2887 BaseOpc = X86ISD::UMUL; CondCode = X86::COND_O; break;
2888 }
2889
2890 Register LHSReg = getRegForValue(LHS);
2891 if (LHSReg == 0)
2892 return false;
2893
2894 unsigned ResultReg = 0;
2895 // Check if we have an immediate version.
2896 if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
2897 static const uint16_t Opc[2][4] = {
2898 { X86::INC8r, X86::INC16r, X86::INC32r, X86::INC64r },
2899 { X86::DEC8r, X86::DEC16r, X86::DEC32r, X86::DEC64r }
2900 };
2901
2902 if (CI->isOne() && (BaseOpc == ISD::ADD || BaseOpc == ISD::SUB) &&
2903 CondCode == X86::COND_O) {
2904 // We can use INC/DEC.
2905 ResultReg = createResultReg(TLI.getRegClassFor(VT));
2906 bool IsDec = BaseOpc == ISD::SUB;
2907 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2908 TII.get(Opc[IsDec][VT.SimpleTy-MVT::i8]), ResultReg)
2909 .addReg(LHSReg);
2910 } else
2911 ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, CI->getZExtValue());
2912 }
2913
2914 unsigned RHSReg;
2915 if (!ResultReg) {
2916 RHSReg = getRegForValue(RHS);
2917 if (RHSReg == 0)
2918 return false;
2919 ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, RHSReg);
2920 }
2921
2922 // FastISel doesn't have a pattern for all X86::MUL*r and X86::IMUL*r. Emit
2923 // it manually.
2924 if (BaseOpc == X86ISD::UMUL && !ResultReg) {
2925 static const uint16_t MULOpc[] =
2926 { X86::MUL8r, X86::MUL16r, X86::MUL32r, X86::MUL64r };
2927 static const MCPhysReg Reg[] = { X86::AL, X86::AX, X86::EAX, X86::RAX };
2928 // First copy the first operand into RAX, which is an implicit input to
2929 // the X86::MUL*r instruction.
2930 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2931 TII.get(TargetOpcode::COPY), Reg[VT.SimpleTy-MVT::i8])
2932 .addReg(LHSReg);
2933 ResultReg = fastEmitInst_r(MULOpc[VT.SimpleTy-MVT::i8],
2934 TLI.getRegClassFor(VT), RHSReg);
2935 } else if (BaseOpc == X86ISD::SMUL && !ResultReg) {
2936 static const uint16_t MULOpc[] =
2937 { X86::IMUL8r, X86::IMUL16rr, X86::IMUL32rr, X86::IMUL64rr };
2938 if (VT == MVT::i8) {
2939 // Copy the first operand into AL, which is an implicit input to the
2940 // X86::IMUL8r instruction.
2941 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
2942 TII.get(TargetOpcode::COPY), X86::AL)
2943 .addReg(LHSReg);
2944 ResultReg = fastEmitInst_r(MULOpc[0], TLI.getRegClassFor(VT), RHSReg);
2945 } else
2946 ResultReg = fastEmitInst_rr(MULOpc[VT.SimpleTy-MVT::i8],
2947 TLI.getRegClassFor(VT), LHSReg, RHSReg);
2948 }
2949
2950 if (!ResultReg)
2951 return false;
2952
2953 // Assign to a GPR since the overflow return value is lowered to a SETcc.
2954 Register ResultReg2 = createResultReg(&X86::GR8RegClass);
2955 assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers.");
2956 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::SETCCr),
2957 ResultReg2).addImm(CondCode);
2958
2959 updateValueMap(II, ResultReg, 2);
2960 return true;
2961 }
2962 case Intrinsic::x86_sse_cvttss2si:
2963 case Intrinsic::x86_sse_cvttss2si64:
2964 case Intrinsic::x86_sse2_cvttsd2si:
2965 case Intrinsic::x86_sse2_cvttsd2si64: {
2966 bool IsInputDouble;
2967 switch (II->getIntrinsicID()) {
2968 default: llvm_unreachable("Unexpected intrinsic.");
2969 case Intrinsic::x86_sse_cvttss2si:
2970 case Intrinsic::x86_sse_cvttss2si64:
2971 if (!Subtarget->hasSSE1())
2972 return false;
2973 IsInputDouble = false;
2974 break;
2975 case Intrinsic::x86_sse2_cvttsd2si:
2976 case Intrinsic::x86_sse2_cvttsd2si64:
2977 if (!Subtarget->hasSSE2())
2978 return false;
2979 IsInputDouble = true;
2980 break;
2981 }
2982
2983 Type *RetTy = II->getCalledFunction()->getReturnType();
2984 MVT VT;
2985 if (!isTypeLegal(RetTy, VT))
2986 return false;
2987
2988 static const uint16_t CvtOpc[3][2][2] = {
2989 { { X86::CVTTSS2SIrr, X86::CVTTSS2SI64rr },
2990 { X86::CVTTSD2SIrr, X86::CVTTSD2SI64rr } },
2991 { { X86::VCVTTSS2SIrr, X86::VCVTTSS2SI64rr },
2992 { X86::VCVTTSD2SIrr, X86::VCVTTSD2SI64rr } },
2993 { { X86::VCVTTSS2SIZrr, X86::VCVTTSS2SI64Zrr },
2994 { X86::VCVTTSD2SIZrr, X86::VCVTTSD2SI64Zrr } },
2995 };
2996 unsigned AVXLevel = Subtarget->hasAVX512() ? 2 :
2997 Subtarget->hasAVX() ? 1 :
2998 0;
2999 unsigned Opc;
3000 switch (VT.SimpleTy) {
3001 default: llvm_unreachable("Unexpected result type.");
3002 case MVT::i32: Opc = CvtOpc[AVXLevel][IsInputDouble][0]; break;
3003 case MVT::i64: Opc = CvtOpc[AVXLevel][IsInputDouble][1]; break;
3004 }
3005
3006 // Check if we can fold insertelement instructions into the convert.
3007 const Value *Op = II->getArgOperand(0);
3008 while (auto *IE = dyn_cast<InsertElementInst>(Op)) {
3009 const Value *Index = IE->getOperand(2);
3010 if (!isa<ConstantInt>(Index))
3011 break;
3012 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
3013
3014 if (Idx == 0) {
3015 Op = IE->getOperand(1);
3016 break;
3017 }
3018 Op = IE->getOperand(0);
3019 }
3020
3021 Register Reg = getRegForValue(Op);
3022 if (Reg == 0)
3023 return false;
3024
3025 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3026 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg)
3027 .addReg(Reg);
3028
3029 updateValueMap(II, ResultReg);
3030 return true;
3031 }
3032 case Intrinsic::x86_sse42_crc32_32_8:
3033 case Intrinsic::x86_sse42_crc32_32_16:
3034 case Intrinsic::x86_sse42_crc32_32_32:
3035 case Intrinsic::x86_sse42_crc32_64_64: {
3036 if (!Subtarget->hasCRC32())
3037 return false;
3038
3039 Type *RetTy = II->getCalledFunction()->getReturnType();
3040
3041 MVT VT;
3042 if (!isTypeLegal(RetTy, VT))
3043 return false;
3044
3045 unsigned Opc;
3046 const TargetRegisterClass *RC = nullptr;
3047
3048 switch (II->getIntrinsicID()) {
3049 default:
3050 llvm_unreachable("Unexpected intrinsic.");
3051 #define GET_EGPR_IF_ENABLED(OPC) Subtarget->hasEGPR() ? OPC##_EVEX : OPC
3052 case Intrinsic::x86_sse42_crc32_32_8:
3053 Opc = GET_EGPR_IF_ENABLED(X86::CRC32r32r8);
3054 RC = &X86::GR32RegClass;
3055 break;
3056 case Intrinsic::x86_sse42_crc32_32_16:
3057 Opc = GET_EGPR_IF_ENABLED(X86::CRC32r32r16);
3058 RC = &X86::GR32RegClass;
3059 break;
3060 case Intrinsic::x86_sse42_crc32_32_32:
3061 Opc = GET_EGPR_IF_ENABLED(X86::CRC32r32r32);
3062 RC = &X86::GR32RegClass;
3063 break;
3064 case Intrinsic::x86_sse42_crc32_64_64:
3065 Opc = GET_EGPR_IF_ENABLED(X86::CRC32r64r64);
3066 RC = &X86::GR64RegClass;
3067 break;
3068 #undef GET_EGPR_IF_ENABLED
3069 }
3070
3071 const Value *LHS = II->getArgOperand(0);
3072 const Value *RHS = II->getArgOperand(1);
3073
3074 Register LHSReg = getRegForValue(LHS);
3075 Register RHSReg = getRegForValue(RHS);
3076 if (!LHSReg || !RHSReg)
3077 return false;
3078
3079 Register ResultReg = fastEmitInst_rr(Opc, RC, LHSReg, RHSReg);
3080 if (!ResultReg)
3081 return false;
3082
3083 updateValueMap(II, ResultReg);
3084 return true;
3085 }
3086 }
3087 }
3088
fastLowerArguments()3089 bool X86FastISel::fastLowerArguments() {
3090 if (!FuncInfo.CanLowerReturn)
3091 return false;
3092
3093 const Function *F = FuncInfo.Fn;
3094 if (F->isVarArg())
3095 return false;
3096
3097 CallingConv::ID CC = F->getCallingConv();
3098 if (CC != CallingConv::C)
3099 return false;
3100
3101 if (Subtarget->isCallingConvWin64(CC))
3102 return false;
3103
3104 if (!Subtarget->is64Bit())
3105 return false;
3106
3107 if (Subtarget->useSoftFloat())
3108 return false;
3109
3110 // Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments.
3111 unsigned GPRCnt = 0;
3112 unsigned FPRCnt = 0;
3113 for (auto const &Arg : F->args()) {
3114 if (Arg.hasAttribute(Attribute::ByVal) ||
3115 Arg.hasAttribute(Attribute::InReg) ||
3116 Arg.hasAttribute(Attribute::StructRet) ||
3117 Arg.hasAttribute(Attribute::SwiftSelf) ||
3118 Arg.hasAttribute(Attribute::SwiftAsync) ||
3119 Arg.hasAttribute(Attribute::SwiftError) ||
3120 Arg.hasAttribute(Attribute::Nest))
3121 return false;
3122
3123 Type *ArgTy = Arg.getType();
3124 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
3125 return false;
3126
3127 EVT ArgVT = TLI.getValueType(DL, ArgTy);
3128 if (!ArgVT.isSimple()) return false;
3129 switch (ArgVT.getSimpleVT().SimpleTy) {
3130 default: return false;
3131 case MVT::i32:
3132 case MVT::i64:
3133 ++GPRCnt;
3134 break;
3135 case MVT::f32:
3136 case MVT::f64:
3137 if (!Subtarget->hasSSE1())
3138 return false;
3139 ++FPRCnt;
3140 break;
3141 }
3142
3143 if (GPRCnt > 6)
3144 return false;
3145
3146 if (FPRCnt > 8)
3147 return false;
3148 }
3149
3150 static const MCPhysReg GPR32ArgRegs[] = {
3151 X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
3152 };
3153 static const MCPhysReg GPR64ArgRegs[] = {
3154 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9
3155 };
3156 static const MCPhysReg XMMArgRegs[] = {
3157 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3158 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3159 };
3160
3161 unsigned GPRIdx = 0;
3162 unsigned FPRIdx = 0;
3163 for (auto const &Arg : F->args()) {
3164 MVT VT = TLI.getSimpleValueType(DL, Arg.getType());
3165 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
3166 unsigned SrcReg;
3167 switch (VT.SimpleTy) {
3168 default: llvm_unreachable("Unexpected value type.");
3169 case MVT::i32: SrcReg = GPR32ArgRegs[GPRIdx++]; break;
3170 case MVT::i64: SrcReg = GPR64ArgRegs[GPRIdx++]; break;
3171 case MVT::f32: [[fallthrough]];
3172 case MVT::f64: SrcReg = XMMArgRegs[FPRIdx++]; break;
3173 }
3174 Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
3175 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
3176 // Without this, EmitLiveInCopies may eliminate the livein if its only
3177 // use is a bitcast (which isn't turned into an instruction).
3178 Register ResultReg = createResultReg(RC);
3179 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3180 TII.get(TargetOpcode::COPY), ResultReg)
3181 .addReg(DstReg, getKillRegState(true));
3182 updateValueMap(&Arg, ResultReg);
3183 }
3184 return true;
3185 }
3186
computeBytesPoppedByCalleeForSRet(const X86Subtarget * Subtarget,CallingConv::ID CC,const CallBase * CB)3187 static unsigned computeBytesPoppedByCalleeForSRet(const X86Subtarget *Subtarget,
3188 CallingConv::ID CC,
3189 const CallBase *CB) {
3190 if (Subtarget->is64Bit())
3191 return 0;
3192 if (Subtarget->getTargetTriple().isOSMSVCRT())
3193 return 0;
3194 if (CC == CallingConv::Fast || CC == CallingConv::GHC ||
3195 CC == CallingConv::HiPE || CC == CallingConv::Tail ||
3196 CC == CallingConv::SwiftTail)
3197 return 0;
3198
3199 if (CB)
3200 if (CB->arg_empty() || !CB->paramHasAttr(0, Attribute::StructRet) ||
3201 CB->paramHasAttr(0, Attribute::InReg) || Subtarget->isTargetMCU())
3202 return 0;
3203
3204 return 4;
3205 }
3206
fastLowerCall(CallLoweringInfo & CLI)3207 bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
3208 auto &OutVals = CLI.OutVals;
3209 auto &OutFlags = CLI.OutFlags;
3210 auto &OutRegs = CLI.OutRegs;
3211 auto &Ins = CLI.Ins;
3212 auto &InRegs = CLI.InRegs;
3213 CallingConv::ID CC = CLI.CallConv;
3214 bool &IsTailCall = CLI.IsTailCall;
3215 bool IsVarArg = CLI.IsVarArg;
3216 const Value *Callee = CLI.Callee;
3217 MCSymbol *Symbol = CLI.Symbol;
3218 const auto *CB = CLI.CB;
3219
3220 bool Is64Bit = Subtarget->is64Bit();
3221 bool IsWin64 = Subtarget->isCallingConvWin64(CC);
3222
3223 // Call / invoke instructions with NoCfCheck attribute require special
3224 // handling.
3225 if (CB && CB->doesNoCfCheck())
3226 return false;
3227
3228 // Functions with no_caller_saved_registers that need special handling.
3229 if ((CB && isa<CallInst>(CB) && CB->hasFnAttr("no_caller_saved_registers")))
3230 return false;
3231
3232 // Functions with no_callee_saved_registers that need special handling.
3233 if ((CB && CB->hasFnAttr("no_callee_saved_registers")))
3234 return false;
3235
3236 // Indirect calls with CFI checks need special handling.
3237 if (CB && CB->isIndirectCall() && CB->getOperandBundle(LLVMContext::OB_kcfi))
3238 return false;
3239
3240 // Functions using thunks for indirect calls need to use SDISel.
3241 if (Subtarget->useIndirectThunkCalls())
3242 return false;
3243
3244 // Handle only C and fastcc calling conventions for now.
3245 switch (CC) {
3246 default: return false;
3247 case CallingConv::C:
3248 case CallingConv::Fast:
3249 case CallingConv::Tail:
3250 case CallingConv::Swift:
3251 case CallingConv::SwiftTail:
3252 case CallingConv::X86_FastCall:
3253 case CallingConv::X86_StdCall:
3254 case CallingConv::X86_ThisCall:
3255 case CallingConv::Win64:
3256 case CallingConv::X86_64_SysV:
3257 case CallingConv::CFGuard_Check:
3258 break;
3259 }
3260
3261 // Allow SelectionDAG isel to handle tail calls.
3262 if (IsTailCall)
3263 return false;
3264
3265 // fastcc with -tailcallopt is intended to provide a guaranteed
3266 // tail call optimization. Fastisel doesn't know how to do that.
3267 if ((CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt) ||
3268 CC == CallingConv::Tail || CC == CallingConv::SwiftTail)
3269 return false;
3270
3271 // Don't know how to handle Win64 varargs yet. Nothing special needed for
3272 // x86-32. Special handling for x86-64 is implemented.
3273 if (IsVarArg && IsWin64)
3274 return false;
3275
3276 // Don't know about inalloca yet.
3277 if (CLI.CB && CLI.CB->hasInAllocaArgument())
3278 return false;
3279
3280 for (auto Flag : CLI.OutFlags)
3281 if (Flag.isSwiftError() || Flag.isPreallocated())
3282 return false;
3283
3284 SmallVector<MVT, 16> OutVTs;
3285 SmallVector<unsigned, 16> ArgRegs;
3286
3287 // If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra
3288 // instruction. This is safe because it is common to all FastISel supported
3289 // calling conventions on x86.
3290 for (int i = 0, e = OutVals.size(); i != e; ++i) {
3291 Value *&Val = OutVals[i];
3292 ISD::ArgFlagsTy Flags = OutFlags[i];
3293 if (auto *CI = dyn_cast<ConstantInt>(Val)) {
3294 if (CI->getBitWidth() < 32) {
3295 if (Flags.isSExt())
3296 Val = ConstantInt::get(CI->getContext(), CI->getValue().sext(32));
3297 else
3298 Val = ConstantInt::get(CI->getContext(), CI->getValue().zext(32));
3299 }
3300 }
3301
3302 // Passing bools around ends up doing a trunc to i1 and passing it.
3303 // Codegen this as an argument + "and 1".
3304 MVT VT;
3305 auto *TI = dyn_cast<TruncInst>(Val);
3306 unsigned ResultReg;
3307 if (TI && TI->getType()->isIntegerTy(1) && CLI.CB &&
3308 (TI->getParent() == CLI.CB->getParent()) && TI->hasOneUse()) {
3309 Value *PrevVal = TI->getOperand(0);
3310 ResultReg = getRegForValue(PrevVal);
3311
3312 if (!ResultReg)
3313 return false;
3314
3315 if (!isTypeLegal(PrevVal->getType(), VT))
3316 return false;
3317
3318 ResultReg = fastEmit_ri(VT, VT, ISD::AND, ResultReg, 1);
3319 } else {
3320 if (!isTypeLegal(Val->getType(), VT) ||
3321 (VT.isVector() && VT.getVectorElementType() == MVT::i1))
3322 return false;
3323 ResultReg = getRegForValue(Val);
3324 }
3325
3326 if (!ResultReg)
3327 return false;
3328
3329 ArgRegs.push_back(ResultReg);
3330 OutVTs.push_back(VT);
3331 }
3332
3333 // Analyze operands of the call, assigning locations to each operand.
3334 SmallVector<CCValAssign, 16> ArgLocs;
3335 CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext());
3336
3337 // Allocate shadow area for Win64
3338 if (IsWin64)
3339 CCInfo.AllocateStack(32, Align(8));
3340
3341 CCInfo.AnalyzeCallOperands(OutVTs, OutFlags, CC_X86);
3342
3343 // Get a count of how many bytes are to be pushed on the stack.
3344 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3345
3346 // Issue CALLSEQ_START
3347 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
3348 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackDown))
3349 .addImm(NumBytes).addImm(0).addImm(0);
3350
3351 // Walk the register/memloc assignments, inserting copies/loads.
3352 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3353 for (const CCValAssign &VA : ArgLocs) {
3354 const Value *ArgVal = OutVals[VA.getValNo()];
3355 MVT ArgVT = OutVTs[VA.getValNo()];
3356
3357 if (ArgVT == MVT::x86mmx)
3358 return false;
3359
3360 unsigned ArgReg = ArgRegs[VA.getValNo()];
3361
3362 // Promote the value if needed.
3363 switch (VA.getLocInfo()) {
3364 case CCValAssign::Full: break;
3365 case CCValAssign::SExt: {
3366 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
3367 "Unexpected extend");
3368
3369 if (ArgVT == MVT::i1)
3370 return false;
3371
3372 bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
3373 ArgVT, ArgReg);
3374 assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
3375 ArgVT = VA.getLocVT();
3376 break;
3377 }
3378 case CCValAssign::ZExt: {
3379 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
3380 "Unexpected extend");
3381
3382 // Handle zero-extension from i1 to i8, which is common.
3383 if (ArgVT == MVT::i1) {
3384 // Set the high bits to zero.
3385 ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg);
3386 ArgVT = MVT::i8;
3387
3388 if (ArgReg == 0)
3389 return false;
3390 }
3391
3392 bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
3393 ArgVT, ArgReg);
3394 assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
3395 ArgVT = VA.getLocVT();
3396 break;
3397 }
3398 case CCValAssign::AExt: {
3399 assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
3400 "Unexpected extend");
3401 bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(), ArgReg,
3402 ArgVT, ArgReg);
3403 if (!Emitted)
3404 Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(), ArgReg,
3405 ArgVT, ArgReg);
3406 if (!Emitted)
3407 Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), ArgReg,
3408 ArgVT, ArgReg);
3409
3410 assert(Emitted && "Failed to emit a aext!"); (void)Emitted;
3411 ArgVT = VA.getLocVT();
3412 break;
3413 }
3414 case CCValAssign::BCvt: {
3415 ArgReg = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, ArgReg);
3416 assert(ArgReg && "Failed to emit a bitcast!");
3417 ArgVT = VA.getLocVT();
3418 break;
3419 }
3420 case CCValAssign::VExt:
3421 // VExt has not been implemented, so this should be impossible to reach
3422 // for now. However, fallback to Selection DAG isel once implemented.
3423 return false;
3424 case CCValAssign::AExtUpper:
3425 case CCValAssign::SExtUpper:
3426 case CCValAssign::ZExtUpper:
3427 case CCValAssign::FPExt:
3428 case CCValAssign::Trunc:
3429 llvm_unreachable("Unexpected loc info!");
3430 case CCValAssign::Indirect:
3431 // FIXME: Indirect doesn't need extending, but fast-isel doesn't fully
3432 // support this.
3433 return false;
3434 }
3435
3436 if (VA.isRegLoc()) {
3437 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3438 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
3439 OutRegs.push_back(VA.getLocReg());
3440 } else {
3441 assert(VA.isMemLoc() && "Unknown value location!");
3442
3443 // Don't emit stores for undef values.
3444 if (isa<UndefValue>(ArgVal))
3445 continue;
3446
3447 unsigned LocMemOffset = VA.getLocMemOffset();
3448 X86AddressMode AM;
3449 AM.Base.Reg = RegInfo->getStackRegister();
3450 AM.Disp = LocMemOffset;
3451 ISD::ArgFlagsTy Flags = OutFlags[VA.getValNo()];
3452 Align Alignment = DL.getABITypeAlign(ArgVal->getType());
3453 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
3454 MachinePointerInfo::getStack(*FuncInfo.MF, LocMemOffset),
3455 MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
3456 if (Flags.isByVal()) {
3457 X86AddressMode SrcAM;
3458 SrcAM.Base.Reg = ArgReg;
3459 if (!TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize()))
3460 return false;
3461 } else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {
3462 // If this is a really simple value, emit this with the Value* version
3463 // of X86FastEmitStore. If it isn't simple, we don't want to do this,
3464 // as it can cause us to reevaluate the argument.
3465 if (!X86FastEmitStore(ArgVT, ArgVal, AM, MMO))
3466 return false;
3467 } else {
3468 if (!X86FastEmitStore(ArgVT, ArgReg, AM, MMO))
3469 return false;
3470 }
3471 }
3472 }
3473
3474 // ELF / PIC requires GOT in the EBX register before function calls via PLT
3475 // GOT pointer.
3476 if (Subtarget->isPICStyleGOT()) {
3477 unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3478 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3479 TII.get(TargetOpcode::COPY), X86::EBX).addReg(Base);
3480 }
3481
3482 if (Is64Bit && IsVarArg && !IsWin64) {
3483 // From AMD64 ABI document:
3484 // For calls that may call functions that use varargs or stdargs
3485 // (prototype-less calls or calls to functions containing ellipsis (...) in
3486 // the declaration) %al is used as hidden argument to specify the number
3487 // of SSE registers used. The contents of %al do not need to match exactly
3488 // the number of registers, but must be an ubound on the number of SSE
3489 // registers used and is in the range 0 - 8 inclusive.
3490
3491 // Count the number of XMM registers allocated.
3492 static const MCPhysReg XMMArgRegs[] = {
3493 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3494 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3495 };
3496 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
3497 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3498 && "SSE registers cannot be used when SSE is disabled");
3499 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV8ri),
3500 X86::AL).addImm(NumXMMRegs);
3501 }
3502
3503 // Materialize callee address in a register. FIXME: GV address can be
3504 // handled with a CALLpcrel32 instead.
3505 X86AddressMode CalleeAM;
3506 if (!X86SelectCallAddress(Callee, CalleeAM))
3507 return false;
3508
3509 unsigned CalleeOp = 0;
3510 const GlobalValue *GV = nullptr;
3511 if (CalleeAM.GV != nullptr) {
3512 GV = CalleeAM.GV;
3513 } else if (CalleeAM.Base.Reg != 0) {
3514 CalleeOp = CalleeAM.Base.Reg;
3515 } else
3516 return false;
3517
3518 // Issue the call.
3519 MachineInstrBuilder MIB;
3520 if (CalleeOp) {
3521 // Register-indirect call.
3522 unsigned CallOpc = Is64Bit ? X86::CALL64r : X86::CALL32r;
3523 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CallOpc))
3524 .addReg(CalleeOp);
3525 } else {
3526 // Direct call.
3527 assert(GV && "Not a direct call");
3528 // See if we need any target-specific flags on the GV operand.
3529 unsigned char OpFlags = Subtarget->classifyGlobalFunctionReference(GV);
3530 if (OpFlags == X86II::MO_PLT && !Is64Bit &&
3531 TM.getRelocationModel() == Reloc::Static && isa<Function>(GV) &&
3532 cast<Function>(GV)->isIntrinsic())
3533 OpFlags = X86II::MO_NO_FLAG;
3534
3535 // This will be a direct call, or an indirect call through memory for
3536 // NonLazyBind calls or dllimport calls.
3537 bool NeedLoad = OpFlags == X86II::MO_DLLIMPORT ||
3538 OpFlags == X86II::MO_GOTPCREL ||
3539 OpFlags == X86II::MO_GOTPCREL_NORELAX ||
3540 OpFlags == X86II::MO_COFFSTUB;
3541 unsigned CallOpc = NeedLoad
3542 ? (Is64Bit ? X86::CALL64m : X86::CALL32m)
3543 : (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32);
3544
3545 MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(CallOpc));
3546 if (NeedLoad)
3547 MIB.addReg(Is64Bit ? X86::RIP : 0).addImm(1).addReg(0);
3548 if (Symbol)
3549 MIB.addSym(Symbol, OpFlags);
3550 else
3551 MIB.addGlobalAddress(GV, 0, OpFlags);
3552 if (NeedLoad)
3553 MIB.addReg(0);
3554 }
3555
3556 // Add a register mask operand representing the call-preserved registers.
3557 // Proper defs for return values will be added by setPhysRegsDeadExcept().
3558 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
3559
3560 // Add an implicit use GOT pointer in EBX.
3561 if (Subtarget->isPICStyleGOT())
3562 MIB.addReg(X86::EBX, RegState::Implicit);
3563
3564 if (Is64Bit && IsVarArg && !IsWin64)
3565 MIB.addReg(X86::AL, RegState::Implicit);
3566
3567 // Add implicit physical register uses to the call.
3568 for (auto Reg : OutRegs)
3569 MIB.addReg(Reg, RegState::Implicit);
3570
3571 // Issue CALLSEQ_END
3572 unsigned NumBytesForCalleeToPop =
3573 X86::isCalleePop(CC, Subtarget->is64Bit(), IsVarArg,
3574 TM.Options.GuaranteedTailCallOpt)
3575 ? NumBytes // Callee pops everything.
3576 : computeBytesPoppedByCalleeForSRet(Subtarget, CC, CLI.CB);
3577 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
3578 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(AdjStackUp))
3579 .addImm(NumBytes).addImm(NumBytesForCalleeToPop);
3580
3581 // Now handle call return values.
3582 SmallVector<CCValAssign, 16> RVLocs;
3583 CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs,
3584 CLI.RetTy->getContext());
3585 CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
3586
3587 // Copy all of the result registers out of their specified physreg.
3588 Register ResultReg = FuncInfo.CreateRegs(CLI.RetTy);
3589 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3590 CCValAssign &VA = RVLocs[i];
3591 EVT CopyVT = VA.getValVT();
3592 unsigned CopyReg = ResultReg + i;
3593 Register SrcReg = VA.getLocReg();
3594
3595 // If this is x86-64, and we disabled SSE, we can't return FP values
3596 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
3597 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
3598 report_fatal_error("SSE register return with SSE disabled");
3599 }
3600
3601 // If we prefer to use the value in xmm registers, copy it out as f80 and
3602 // use a truncate to move it from fp stack reg to xmm reg.
3603 if ((SrcReg == X86::FP0 || SrcReg == X86::FP1) &&
3604 isScalarFPTypeInSSEReg(VA.getValVT())) {
3605 CopyVT = MVT::f80;
3606 CopyReg = createResultReg(&X86::RFP80RegClass);
3607 }
3608
3609 // Copy out the result.
3610 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3611 TII.get(TargetOpcode::COPY), CopyReg).addReg(SrcReg);
3612 InRegs.push_back(VA.getLocReg());
3613
3614 // Round the f80 to the right size, which also moves it to the appropriate
3615 // xmm register. This is accomplished by storing the f80 value in memory
3616 // and then loading it back.
3617 if (CopyVT != VA.getValVT()) {
3618 EVT ResVT = VA.getValVT();
3619 unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
3620 unsigned MemSize = ResVT.getSizeInBits()/8;
3621 int FI = MFI.CreateStackObject(MemSize, Align(MemSize), false);
3622 addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3623 TII.get(Opc)), FI)
3624 .addReg(CopyReg);
3625 Opc = ResVT == MVT::f32 ? X86::MOVSSrm_alt : X86::MOVSDrm_alt;
3626 addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3627 TII.get(Opc), ResultReg + i), FI);
3628 }
3629 }
3630
3631 CLI.ResultReg = ResultReg;
3632 CLI.NumResultRegs = RVLocs.size();
3633 CLI.Call = MIB;
3634
3635 return true;
3636 }
3637
3638 bool
fastSelectInstruction(const Instruction * I)3639 X86FastISel::fastSelectInstruction(const Instruction *I) {
3640 switch (I->getOpcode()) {
3641 default: break;
3642 case Instruction::Load:
3643 return X86SelectLoad(I);
3644 case Instruction::Store:
3645 return X86SelectStore(I);
3646 case Instruction::Ret:
3647 return X86SelectRet(I);
3648 case Instruction::ICmp:
3649 case Instruction::FCmp:
3650 return X86SelectCmp(I);
3651 case Instruction::ZExt:
3652 return X86SelectZExt(I);
3653 case Instruction::SExt:
3654 return X86SelectSExt(I);
3655 case Instruction::Br:
3656 return X86SelectBranch(I);
3657 case Instruction::LShr:
3658 case Instruction::AShr:
3659 case Instruction::Shl:
3660 return X86SelectShift(I);
3661 case Instruction::SDiv:
3662 case Instruction::UDiv:
3663 case Instruction::SRem:
3664 case Instruction::URem:
3665 return X86SelectDivRem(I);
3666 case Instruction::Select:
3667 return X86SelectSelect(I);
3668 case Instruction::Trunc:
3669 return X86SelectTrunc(I);
3670 case Instruction::FPExt:
3671 return X86SelectFPExt(I);
3672 case Instruction::FPTrunc:
3673 return X86SelectFPTrunc(I);
3674 case Instruction::SIToFP:
3675 return X86SelectSIToFP(I);
3676 case Instruction::UIToFP:
3677 return X86SelectUIToFP(I);
3678 case Instruction::IntToPtr: // Deliberate fall-through.
3679 case Instruction::PtrToInt: {
3680 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
3681 EVT DstVT = TLI.getValueType(DL, I->getType());
3682 if (DstVT.bitsGT(SrcVT))
3683 return X86SelectZExt(I);
3684 if (DstVT.bitsLT(SrcVT))
3685 return X86SelectTrunc(I);
3686 Register Reg = getRegForValue(I->getOperand(0));
3687 if (Reg == 0) return false;
3688 updateValueMap(I, Reg);
3689 return true;
3690 }
3691 case Instruction::BitCast: {
3692 // Select SSE2/AVX bitcasts between 128/256/512 bit vector types.
3693 if (!Subtarget->hasSSE2())
3694 return false;
3695
3696 MVT SrcVT, DstVT;
3697 if (!isTypeLegal(I->getOperand(0)->getType(), SrcVT) ||
3698 !isTypeLegal(I->getType(), DstVT))
3699 return false;
3700
3701 // Only allow vectors that use xmm/ymm/zmm.
3702 if (!SrcVT.isVector() || !DstVT.isVector() ||
3703 SrcVT.getVectorElementType() == MVT::i1 ||
3704 DstVT.getVectorElementType() == MVT::i1)
3705 return false;
3706
3707 Register Reg = getRegForValue(I->getOperand(0));
3708 if (!Reg)
3709 return false;
3710
3711 // Emit a reg-reg copy so we don't propagate cached known bits information
3712 // with the wrong VT if we fall out of fast isel after selecting this.
3713 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
3714 Register ResultReg = createResultReg(DstClass);
3715 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3716 TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
3717
3718 updateValueMap(I, ResultReg);
3719 return true;
3720 }
3721 }
3722
3723 return false;
3724 }
3725
X86MaterializeInt(const ConstantInt * CI,MVT VT)3726 unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
3727 if (VT > MVT::i64)
3728 return 0;
3729
3730 uint64_t Imm = CI->getZExtValue();
3731 if (Imm == 0) {
3732 Register SrcReg = fastEmitInst_(X86::MOV32r0, &X86::GR32RegClass);
3733 switch (VT.SimpleTy) {
3734 default: llvm_unreachable("Unexpected value type");
3735 case MVT::i1:
3736 case MVT::i8:
3737 return fastEmitInst_extractsubreg(MVT::i8, SrcReg, X86::sub_8bit);
3738 case MVT::i16:
3739 return fastEmitInst_extractsubreg(MVT::i16, SrcReg, X86::sub_16bit);
3740 case MVT::i32:
3741 return SrcReg;
3742 case MVT::i64: {
3743 Register ResultReg = createResultReg(&X86::GR64RegClass);
3744 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3745 TII.get(TargetOpcode::SUBREG_TO_REG), ResultReg)
3746 .addImm(0).addReg(SrcReg).addImm(X86::sub_32bit);
3747 return ResultReg;
3748 }
3749 }
3750 }
3751
3752 unsigned Opc = 0;
3753 switch (VT.SimpleTy) {
3754 default: llvm_unreachable("Unexpected value type");
3755 case MVT::i1:
3756 VT = MVT::i8;
3757 [[fallthrough]];
3758 case MVT::i8: Opc = X86::MOV8ri; break;
3759 case MVT::i16: Opc = X86::MOV16ri; break;
3760 case MVT::i32: Opc = X86::MOV32ri; break;
3761 case MVT::i64: {
3762 if (isUInt<32>(Imm))
3763 Opc = X86::MOV32ri64;
3764 else if (isInt<32>(Imm))
3765 Opc = X86::MOV64ri32;
3766 else
3767 Opc = X86::MOV64ri;
3768 break;
3769 }
3770 }
3771 return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
3772 }
3773
X86MaterializeFP(const ConstantFP * CFP,MVT VT)3774 unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
3775 if (CFP->isNullValue())
3776 return fastMaterializeFloatZero(CFP);
3777
3778 // Can't handle alternate code models yet.
3779 CodeModel::Model CM = TM.getCodeModel();
3780 if (CM != CodeModel::Small && CM != CodeModel::Medium &&
3781 CM != CodeModel::Large)
3782 return 0;
3783
3784 // Get opcode and regclass of the output for the given load instruction.
3785 unsigned Opc = 0;
3786 bool HasSSE1 = Subtarget->hasSSE1();
3787 bool HasSSE2 = Subtarget->hasSSE2();
3788 bool HasAVX = Subtarget->hasAVX();
3789 bool HasAVX512 = Subtarget->hasAVX512();
3790 switch (VT.SimpleTy) {
3791 default: return 0;
3792 case MVT::f32:
3793 Opc = HasAVX512 ? X86::VMOVSSZrm_alt
3794 : HasAVX ? X86::VMOVSSrm_alt
3795 : HasSSE1 ? X86::MOVSSrm_alt
3796 : X86::LD_Fp32m;
3797 break;
3798 case MVT::f64:
3799 Opc = HasAVX512 ? X86::VMOVSDZrm_alt
3800 : HasAVX ? X86::VMOVSDrm_alt
3801 : HasSSE2 ? X86::MOVSDrm_alt
3802 : X86::LD_Fp64m;
3803 break;
3804 case MVT::f80:
3805 // No f80 support yet.
3806 return 0;
3807 }
3808
3809 // MachineConstantPool wants an explicit alignment.
3810 Align Alignment = DL.getPrefTypeAlign(CFP->getType());
3811
3812 // x86-32 PIC requires a PIC base register for constant pools.
3813 unsigned PICBase = 0;
3814 unsigned char OpFlag = Subtarget->classifyLocalReference(nullptr);
3815 if (OpFlag == X86II::MO_PIC_BASE_OFFSET)
3816 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3817 else if (OpFlag == X86II::MO_GOTOFF)
3818 PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
3819 else if (Subtarget->is64Bit() && TM.getCodeModel() != CodeModel::Large)
3820 PICBase = X86::RIP;
3821
3822 // Create the load from the constant pool.
3823 unsigned CPI = MCP.getConstantPoolIndex(CFP, Alignment);
3824 Register ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
3825
3826 // Large code model only applies to 64-bit mode.
3827 if (Subtarget->is64Bit() && CM == CodeModel::Large) {
3828 Register AddrReg = createResultReg(&X86::GR64RegClass);
3829 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV64ri),
3830 AddrReg)
3831 .addConstantPoolIndex(CPI, 0, OpFlag);
3832 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3833 TII.get(Opc), ResultReg);
3834 addRegReg(MIB, AddrReg, false, PICBase, false);
3835 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
3836 MachinePointerInfo::getConstantPool(*FuncInfo.MF),
3837 MachineMemOperand::MOLoad, DL.getPointerSize(), Alignment);
3838 MIB->addMemOperand(*FuncInfo.MF, MMO);
3839 return ResultReg;
3840 }
3841
3842 addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3843 TII.get(Opc), ResultReg),
3844 CPI, PICBase, OpFlag);
3845 return ResultReg;
3846 }
3847
X86MaterializeGV(const GlobalValue * GV,MVT VT)3848 unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
3849 // Can't handle large GlobalValues yet.
3850 if (TM.getCodeModel() != CodeModel::Small &&
3851 TM.getCodeModel() != CodeModel::Medium)
3852 return 0;
3853 if (TM.isLargeGlobalValue(GV))
3854 return 0;
3855
3856 // Materialize addresses with LEA/MOV instructions.
3857 X86AddressMode AM;
3858 if (X86SelectAddress(GV, AM)) {
3859 // If the expression is just a basereg, then we're done, otherwise we need
3860 // to emit an LEA.
3861 if (AM.BaseType == X86AddressMode::RegBase &&
3862 AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == nullptr)
3863 return AM.Base.Reg;
3864
3865 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3866 if (TM.getRelocationModel() == Reloc::Static &&
3867 TLI.getPointerTy(DL) == MVT::i64) {
3868 // The displacement code could be more than 32 bits away so we need to use
3869 // an instruction with a 64 bit immediate
3870 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::MOV64ri),
3871 ResultReg)
3872 .addGlobalAddress(GV);
3873 } else {
3874 unsigned Opc =
3875 TLI.getPointerTy(DL) == MVT::i32
3876 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
3877 : X86::LEA64r;
3878 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3879 TII.get(Opc), ResultReg), AM);
3880 }
3881 return ResultReg;
3882 }
3883 return 0;
3884 }
3885
fastMaterializeConstant(const Constant * C)3886 unsigned X86FastISel::fastMaterializeConstant(const Constant *C) {
3887 EVT CEVT = TLI.getValueType(DL, C->getType(), true);
3888
3889 // Only handle simple types.
3890 if (!CEVT.isSimple())
3891 return 0;
3892 MVT VT = CEVT.getSimpleVT();
3893
3894 if (const auto *CI = dyn_cast<ConstantInt>(C))
3895 return X86MaterializeInt(CI, VT);
3896 if (const auto *CFP = dyn_cast<ConstantFP>(C))
3897 return X86MaterializeFP(CFP, VT);
3898 if (const auto *GV = dyn_cast<GlobalValue>(C))
3899 return X86MaterializeGV(GV, VT);
3900 if (isa<UndefValue>(C)) {
3901 unsigned Opc = 0;
3902 switch (VT.SimpleTy) {
3903 default:
3904 break;
3905 case MVT::f32:
3906 if (!Subtarget->hasSSE1())
3907 Opc = X86::LD_Fp032;
3908 break;
3909 case MVT::f64:
3910 if (!Subtarget->hasSSE2())
3911 Opc = X86::LD_Fp064;
3912 break;
3913 case MVT::f80:
3914 Opc = X86::LD_Fp080;
3915 break;
3916 }
3917
3918 if (Opc) {
3919 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3920 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc),
3921 ResultReg);
3922 return ResultReg;
3923 }
3924 }
3925
3926 return 0;
3927 }
3928
fastMaterializeAlloca(const AllocaInst * C)3929 unsigned X86FastISel::fastMaterializeAlloca(const AllocaInst *C) {
3930 // Fail on dynamic allocas. At this point, getRegForValue has already
3931 // checked its CSE maps, so if we're here trying to handle a dynamic
3932 // alloca, we're not going to succeed. X86SelectAddress has a
3933 // check for dynamic allocas, because it's called directly from
3934 // various places, but targetMaterializeAlloca also needs a check
3935 // in order to avoid recursion between getRegForValue,
3936 // X86SelectAddrss, and targetMaterializeAlloca.
3937 if (!FuncInfo.StaticAllocaMap.count(C))
3938 return 0;
3939 assert(C->isStaticAlloca() && "dynamic alloca in the static alloca map?");
3940
3941 X86AddressMode AM;
3942 if (!X86SelectAddress(C, AM))
3943 return 0;
3944 unsigned Opc =
3945 TLI.getPointerTy(DL) == MVT::i32
3946 ? (Subtarget->isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r)
3947 : X86::LEA64r;
3948 const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL));
3949 Register ResultReg = createResultReg(RC);
3950 addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3951 TII.get(Opc), ResultReg), AM);
3952 return ResultReg;
3953 }
3954
fastMaterializeFloatZero(const ConstantFP * CF)3955 unsigned X86FastISel::fastMaterializeFloatZero(const ConstantFP *CF) {
3956 MVT VT;
3957 if (!isTypeLegal(CF->getType(), VT))
3958 return 0;
3959
3960 // Get opcode and regclass for the given zero.
3961 bool HasSSE1 = Subtarget->hasSSE1();
3962 bool HasSSE2 = Subtarget->hasSSE2();
3963 bool HasAVX512 = Subtarget->hasAVX512();
3964 unsigned Opc = 0;
3965 switch (VT.SimpleTy) {
3966 default: return 0;
3967 case MVT::f16:
3968 Opc = HasAVX512 ? X86::AVX512_FsFLD0SH : X86::FsFLD0SH;
3969 break;
3970 case MVT::f32:
3971 Opc = HasAVX512 ? X86::AVX512_FsFLD0SS
3972 : HasSSE1 ? X86::FsFLD0SS
3973 : X86::LD_Fp032;
3974 break;
3975 case MVT::f64:
3976 Opc = HasAVX512 ? X86::AVX512_FsFLD0SD
3977 : HasSSE2 ? X86::FsFLD0SD
3978 : X86::LD_Fp064;
3979 break;
3980 case MVT::f80:
3981 // No f80 support yet.
3982 return 0;
3983 }
3984
3985 Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
3986 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), ResultReg);
3987 return ResultReg;
3988 }
3989
3990
tryToFoldLoadIntoMI(MachineInstr * MI,unsigned OpNo,const LoadInst * LI)3991 bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
3992 const LoadInst *LI) {
3993 const Value *Ptr = LI->getPointerOperand();
3994 X86AddressMode AM;
3995 if (!X86SelectAddress(Ptr, AM))
3996 return false;
3997
3998 const X86InstrInfo &XII = (const X86InstrInfo &)TII;
3999
4000 unsigned Size = DL.getTypeAllocSize(LI->getType());
4001
4002 SmallVector<MachineOperand, 8> AddrOps;
4003 AM.getFullAddress(AddrOps);
4004
4005 MachineInstr *Result = XII.foldMemoryOperandImpl(
4006 *FuncInfo.MF, *MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, LI->getAlign(),
4007 /*AllowCommute=*/true);
4008 if (!Result)
4009 return false;
4010
4011 // The index register could be in the wrong register class. Unfortunately,
4012 // foldMemoryOperandImpl could have commuted the instruction so its not enough
4013 // to just look at OpNo + the offset to the index reg. We actually need to
4014 // scan the instruction to find the index reg and see if its the correct reg
4015 // class.
4016 unsigned OperandNo = 0;
4017 for (MachineInstr::mop_iterator I = Result->operands_begin(),
4018 E = Result->operands_end(); I != E; ++I, ++OperandNo) {
4019 MachineOperand &MO = *I;
4020 if (!MO.isReg() || MO.isDef() || MO.getReg() != AM.IndexReg)
4021 continue;
4022 // Found the index reg, now try to rewrite it.
4023 Register IndexReg = constrainOperandRegClass(Result->getDesc(),
4024 MO.getReg(), OperandNo);
4025 if (IndexReg == MO.getReg())
4026 continue;
4027 MO.setReg(IndexReg);
4028 }
4029
4030 Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
4031 Result->cloneInstrSymbols(*FuncInfo.MF, *MI);
4032 MachineBasicBlock::iterator I(MI);
4033 removeDeadCode(I, std::next(I));
4034 return true;
4035 }
4036
fastEmitInst_rrrr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,unsigned Op1,unsigned Op2,unsigned Op3)4037 unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode,
4038 const TargetRegisterClass *RC,
4039 unsigned Op0, unsigned Op1,
4040 unsigned Op2, unsigned Op3) {
4041 const MCInstrDesc &II = TII.get(MachineInstOpcode);
4042
4043 Register ResultReg = createResultReg(RC);
4044 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
4045 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
4046 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
4047 Op3 = constrainOperandRegClass(II, Op3, II.getNumDefs() + 3);
4048
4049 if (II.getNumDefs() >= 1)
4050 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
4051 .addReg(Op0)
4052 .addReg(Op1)
4053 .addReg(Op2)
4054 .addReg(Op3);
4055 else {
4056 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
4057 .addReg(Op0)
4058 .addReg(Op1)
4059 .addReg(Op2)
4060 .addReg(Op3);
4061 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
4062 ResultReg)
4063 .addReg(II.implicit_defs()[0]);
4064 }
4065 return ResultReg;
4066 }
4067
4068
4069 namespace llvm {
createFastISel(FunctionLoweringInfo & funcInfo,const TargetLibraryInfo * libInfo)4070 FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo,
4071 const TargetLibraryInfo *libInfo) {
4072 return new X86FastISel(funcInfo, libInfo);
4073 }
4074 }
4075