xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp (revision c8e7f78a3d28ff6e6223ed136ada8e1e2f34965e)
1 //===-- lib/CodeGen/GlobalISel/InlineAsmLowering.cpp ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the lowering from LLVM IR inline asm to MIR INLINEASM
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
15 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
16 #include "llvm/CodeGen/MachineOperand.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetLowering.h"
19 #include "llvm/IR/Module.h"
20 
21 #define DEBUG_TYPE "inline-asm-lowering"
22 
23 using namespace llvm;
24 
25 void InlineAsmLowering::anchor() {}
26 
27 namespace {
28 
29 /// GISelAsmOperandInfo - This contains information for each constraint that we
30 /// are lowering.
31 class GISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
32 public:
33   /// Regs - If this is a register or register class operand, this
34   /// contains the set of assigned registers corresponding to the operand.
35   SmallVector<Register, 1> Regs;
36 
37   explicit GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &Info)
38       : TargetLowering::AsmOperandInfo(Info) {}
39 };
40 
41 using GISelAsmOperandInfoVector = SmallVector<GISelAsmOperandInfo, 16>;
42 
43 class ExtraFlags {
44   unsigned Flags = 0;
45 
46 public:
47   explicit ExtraFlags(const CallBase &CB) {
48     const InlineAsm *IA = cast<InlineAsm>(CB.getCalledOperand());
49     if (IA->hasSideEffects())
50       Flags |= InlineAsm::Extra_HasSideEffects;
51     if (IA->isAlignStack())
52       Flags |= InlineAsm::Extra_IsAlignStack;
53     if (CB.isConvergent())
54       Flags |= InlineAsm::Extra_IsConvergent;
55     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
56   }
57 
58   void update(const TargetLowering::AsmOperandInfo &OpInfo) {
59     // Ideally, we would only check against memory constraints.  However, the
60     // meaning of an Other constraint can be target-specific and we can't easily
61     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
62     // for Other constraints as well.
63     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
64         OpInfo.ConstraintType == TargetLowering::C_Other) {
65       if (OpInfo.Type == InlineAsm::isInput)
66         Flags |= InlineAsm::Extra_MayLoad;
67       else if (OpInfo.Type == InlineAsm::isOutput)
68         Flags |= InlineAsm::Extra_MayStore;
69       else if (OpInfo.Type == InlineAsm::isClobber)
70         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
71     }
72   }
73 
74   unsigned get() const { return Flags; }
75 };
76 
77 } // namespace
78 
79 /// Assign virtual/physical registers for the specified register operand.
80 static void getRegistersForValue(MachineFunction &MF,
81                                  MachineIRBuilder &MIRBuilder,
82                                  GISelAsmOperandInfo &OpInfo,
83                                  GISelAsmOperandInfo &RefOpInfo) {
84 
85   const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
86   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
87 
88   // No work to do for memory operations.
89   if (OpInfo.ConstraintType == TargetLowering::C_Memory)
90     return;
91 
92   // If this is a constraint for a single physreg, or a constraint for a
93   // register class, find it.
94   Register AssignedReg;
95   const TargetRegisterClass *RC;
96   std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
97       &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
98   // RC is unset only on failure. Return immediately.
99   if (!RC)
100     return;
101 
102   // No need to allocate a matching input constraint since the constraint it's
103   // matching to has already been allocated.
104   if (OpInfo.isMatchingInputConstraint())
105     return;
106 
107   // Initialize NumRegs.
108   unsigned NumRegs = 1;
109   if (OpInfo.ConstraintVT != MVT::Other)
110     NumRegs =
111         TLI.getNumRegisters(MF.getFunction().getContext(), OpInfo.ConstraintVT);
112 
113   // If this is a constraint for a specific physical register, but the type of
114   // the operand requires more than one register to be passed, we allocate the
115   // required amount of physical registers, starting from the selected physical
116   // register.
117   // For this, first retrieve a register iterator for the given register class
118   TargetRegisterClass::iterator I = RC->begin();
119   MachineRegisterInfo &RegInfo = MF.getRegInfo();
120 
121   // Advance the iterator to the assigned register (if set)
122   if (AssignedReg) {
123     for (; *I != AssignedReg; ++I)
124       assert(I != RC->end() && "AssignedReg should be a member of provided RC");
125   }
126 
127   // Finally, assign the registers. If the AssignedReg isn't set, create virtual
128   // registers with the provided register class
129   for (; NumRegs; --NumRegs, ++I) {
130     assert(I != RC->end() && "Ran out of registers to allocate!");
131     Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
132     OpInfo.Regs.push_back(R);
133   }
134 }
135 
136 /// Return an integer indicating how general CT is.
137 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
138   switch (CT) {
139   case TargetLowering::C_Immediate:
140   case TargetLowering::C_Other:
141   case TargetLowering::C_Unknown:
142     return 0;
143   case TargetLowering::C_Register:
144     return 1;
145   case TargetLowering::C_RegisterClass:
146     return 2;
147   case TargetLowering::C_Memory:
148   case TargetLowering::C_Address:
149     return 3;
150   }
151   llvm_unreachable("Invalid constraint type");
152 }
153 
154 static void chooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
155                              const TargetLowering *TLI) {
156   assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
157   unsigned BestIdx = 0;
158   TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
159   int BestGenerality = -1;
160 
161   // Loop over the options, keeping track of the most general one.
162   for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
163     TargetLowering::ConstraintType CType =
164         TLI->getConstraintType(OpInfo.Codes[i]);
165 
166     // Indirect 'other' or 'immediate' constraints are not allowed.
167     if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory ||
168                                CType == TargetLowering::C_Register ||
169                                CType == TargetLowering::C_RegisterClass))
170       continue;
171 
172     // If this is an 'other' or 'immediate' constraint, see if the operand is
173     // valid for it. For example, on X86 we might have an 'rI' constraint. If
174     // the operand is an integer in the range [0..31] we want to use I (saving a
175     // load of a register), otherwise we must use 'r'.
176     if (CType == TargetLowering::C_Other ||
177         CType == TargetLowering::C_Immediate) {
178       assert(OpInfo.Codes[i].size() == 1 &&
179              "Unhandled multi-letter 'other' constraint");
180       // FIXME: prefer immediate constraints if the target allows it
181     }
182 
183     // Things with matching constraints can only be registers, per gcc
184     // documentation.  This mainly affects "g" constraints.
185     if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
186       continue;
187 
188     // This constraint letter is more general than the previous one, use it.
189     int Generality = getConstraintGenerality(CType);
190     if (Generality > BestGenerality) {
191       BestType = CType;
192       BestIdx = i;
193       BestGenerality = Generality;
194     }
195   }
196 
197   OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
198   OpInfo.ConstraintType = BestType;
199 }
200 
201 static void computeConstraintToUse(const TargetLowering *TLI,
202                                    TargetLowering::AsmOperandInfo &OpInfo) {
203   assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
204 
205   // Single-letter constraints ('r') are very common.
206   if (OpInfo.Codes.size() == 1) {
207     OpInfo.ConstraintCode = OpInfo.Codes[0];
208     OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode);
209   } else {
210     chooseConstraint(OpInfo, TLI);
211   }
212 
213   // 'X' matches anything.
214   if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
215     // Labels and constants are handled elsewhere ('X' is the only thing
216     // that matches labels).  For Functions, the type here is the type of
217     // the result, which is not what we want to look at; leave them alone.
218     Value *Val = OpInfo.CallOperandVal;
219     if (isa<BasicBlock>(Val) || isa<ConstantInt>(Val) || isa<Function>(Val))
220       return;
221 
222     // Otherwise, try to resolve it to something we know about by looking at
223     // the actual operand type.
224     if (const char *Repl = TLI->LowerXConstraint(OpInfo.ConstraintVT)) {
225       OpInfo.ConstraintCode = Repl;
226       OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode);
227     }
228   }
229 }
230 
231 static unsigned getNumOpRegs(const MachineInstr &I, unsigned OpIdx) {
232   unsigned Flag = I.getOperand(OpIdx).getImm();
233   return InlineAsm::getNumOperandRegisters(Flag);
234 }
235 
236 static bool buildAnyextOrCopy(Register Dst, Register Src,
237                               MachineIRBuilder &MIRBuilder) {
238   const TargetRegisterInfo *TRI =
239       MIRBuilder.getMF().getSubtarget().getRegisterInfo();
240   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
241 
242   auto SrcTy = MRI->getType(Src);
243   if (!SrcTy.isValid()) {
244     LLVM_DEBUG(dbgs() << "Source type for copy is not valid\n");
245     return false;
246   }
247   unsigned SrcSize = TRI->getRegSizeInBits(Src, *MRI);
248   unsigned DstSize = TRI->getRegSizeInBits(Dst, *MRI);
249 
250   if (DstSize < SrcSize) {
251     LLVM_DEBUG(dbgs() << "Input can't fit in destination reg class\n");
252     return false;
253   }
254 
255   // Attempt to anyext small scalar sources.
256   if (DstSize > SrcSize) {
257     if (!SrcTy.isScalar()) {
258       LLVM_DEBUG(dbgs() << "Can't extend non-scalar input to size of"
259                            "destination register class\n");
260       return false;
261     }
262     Src = MIRBuilder.buildAnyExt(LLT::scalar(DstSize), Src).getReg(0);
263   }
264 
265   MIRBuilder.buildCopy(Dst, Src);
266   return true;
267 }
268 
269 bool InlineAsmLowering::lowerInlineAsm(
270     MachineIRBuilder &MIRBuilder, const CallBase &Call,
271     std::function<ArrayRef<Register>(const Value &Val)> GetOrCreateVRegs)
272     const {
273   const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
274 
275   /// ConstraintOperands - Information about all of the constraints.
276   GISelAsmOperandInfoVector ConstraintOperands;
277 
278   MachineFunction &MF = MIRBuilder.getMF();
279   const Function &F = MF.getFunction();
280   const DataLayout &DL = F.getParent()->getDataLayout();
281   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
282 
283   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
284 
285   TargetLowering::AsmOperandInfoVector TargetConstraints =
286       TLI->ParseConstraints(DL, TRI, Call);
287 
288   ExtraFlags ExtraInfo(Call);
289   unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
290   unsigned ResNo = 0; // ResNo - The result number of the next output.
291   for (auto &T : TargetConstraints) {
292     ConstraintOperands.push_back(GISelAsmOperandInfo(T));
293     GISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
294 
295     // Compute the value type for each operand.
296     if (OpInfo.hasArg()) {
297       OpInfo.CallOperandVal = const_cast<Value *>(Call.getArgOperand(ArgNo));
298 
299       if (isa<BasicBlock>(OpInfo.CallOperandVal)) {
300         LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n");
301         return false;
302       }
303 
304       Type *OpTy = OpInfo.CallOperandVal->getType();
305 
306       // If this is an indirect operand, the operand is a pointer to the
307       // accessed type.
308       if (OpInfo.isIndirect) {
309         OpTy = Call.getParamElementType(ArgNo);
310         assert(OpTy && "Indirect operand must have elementtype attribute");
311       }
312 
313       // FIXME: Support aggregate input operands
314       if (!OpTy->isSingleValueType()) {
315         LLVM_DEBUG(
316             dbgs() << "Aggregate input operands are not supported yet\n");
317         return false;
318       }
319 
320       OpInfo.ConstraintVT =
321           TLI->getAsmOperandValueType(DL, OpTy, true).getSimpleVT();
322       ++ArgNo;
323     } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
324       assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
325       if (StructType *STy = dyn_cast<StructType>(Call.getType())) {
326         OpInfo.ConstraintVT =
327             TLI->getSimpleValueType(DL, STy->getElementType(ResNo));
328       } else {
329         assert(ResNo == 0 && "Asm only has one result!");
330         OpInfo.ConstraintVT =
331             TLI->getAsmOperandValueType(DL, Call.getType()).getSimpleVT();
332       }
333       ++ResNo;
334     } else {
335       assert(OpInfo.Type != InlineAsm::isLabel &&
336              "GlobalISel currently doesn't support callbr");
337       OpInfo.ConstraintVT = MVT::Other;
338     }
339 
340     if (OpInfo.ConstraintVT == MVT::i64x8)
341       return false;
342 
343     // Compute the constraint code and ConstraintType to use.
344     computeConstraintToUse(TLI, OpInfo);
345 
346     // The selected constraint type might expose new sideeffects
347     ExtraInfo.update(OpInfo);
348   }
349 
350   // At this point, all operand types are decided.
351   // Create the MachineInstr, but don't insert it yet since input
352   // operands still need to insert instructions before this one
353   auto Inst = MIRBuilder.buildInstrNoInsert(TargetOpcode::INLINEASM)
354                   .addExternalSymbol(IA->getAsmString().c_str())
355                   .addImm(ExtraInfo.get());
356 
357   // Starting from this operand: flag followed by register(s) will be added as
358   // operands to Inst for each constraint. Used for matching input constraints.
359   unsigned StartIdx = Inst->getNumOperands();
360 
361   // Collects the output operands for later processing
362   GISelAsmOperandInfoVector OutputOperands;
363 
364   for (auto &OpInfo : ConstraintOperands) {
365     GISelAsmOperandInfo &RefOpInfo =
366         OpInfo.isMatchingInputConstraint()
367             ? ConstraintOperands[OpInfo.getMatchedOperand()]
368             : OpInfo;
369 
370     // Assign registers for register operands
371     getRegistersForValue(MF, MIRBuilder, OpInfo, RefOpInfo);
372 
373     switch (OpInfo.Type) {
374     case InlineAsm::isOutput:
375       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
376         unsigned ConstraintID =
377             TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode);
378         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
379                "Failed to convert memory constraint code to constraint id.");
380 
381         // Add information to the INLINEASM instruction to know about this
382         // output.
383         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
384         OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
385         Inst.addImm(OpFlags);
386         ArrayRef<Register> SourceRegs =
387             GetOrCreateVRegs(*OpInfo.CallOperandVal);
388         assert(
389             SourceRegs.size() == 1 &&
390             "Expected the memory output to fit into a single virtual register");
391         Inst.addReg(SourceRegs[0]);
392       } else {
393         // Otherwise, this outputs to a register (directly for C_Register /
394         // C_RegisterClass/C_Other.
395         assert(OpInfo.ConstraintType == TargetLowering::C_Register ||
396                OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
397                OpInfo.ConstraintType == TargetLowering::C_Other);
398 
399         // Find a register that we can use.
400         if (OpInfo.Regs.empty()) {
401           LLVM_DEBUG(dbgs()
402                      << "Couldn't allocate output register for constraint\n");
403           return false;
404         }
405 
406         // Add information to the INLINEASM instruction to know that this
407         // register is set.
408         unsigned Flag = InlineAsm::getFlagWord(
409             OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber
410                                   : InlineAsm::Kind_RegDef,
411             OpInfo.Regs.size());
412         if (OpInfo.Regs.front().isVirtual()) {
413           // Put the register class of the virtual registers in the flag word.
414           // That way, later passes can recompute register class constraints for
415           // inline assembly as well as normal instructions. Don't do this for
416           // tied operands that can use the regclass information from the def.
417           const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front());
418           Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
419         }
420 
421         Inst.addImm(Flag);
422 
423         for (Register Reg : OpInfo.Regs) {
424           Inst.addReg(Reg,
425                       RegState::Define | getImplRegState(Reg.isPhysical()) |
426                           (OpInfo.isEarlyClobber ? RegState::EarlyClobber : 0));
427         }
428 
429         // Remember this output operand for later processing
430         OutputOperands.push_back(OpInfo);
431       }
432 
433       break;
434     case InlineAsm::isInput:
435     case InlineAsm::isLabel: {
436       if (OpInfo.isMatchingInputConstraint()) {
437         unsigned DefIdx = OpInfo.getMatchedOperand();
438         // Find operand with register def that corresponds to DefIdx.
439         unsigned InstFlagIdx = StartIdx;
440         for (unsigned i = 0; i < DefIdx; ++i)
441           InstFlagIdx += getNumOpRegs(*Inst, InstFlagIdx) + 1;
442         assert(getNumOpRegs(*Inst, InstFlagIdx) == 1 && "Wrong flag");
443 
444         unsigned MatchedOperandFlag = Inst->getOperand(InstFlagIdx).getImm();
445         if (InlineAsm::isMemKind(MatchedOperandFlag)) {
446           LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not "
447                                "supported. This should be target specific.\n");
448           return false;
449         }
450         if (!InlineAsm::isRegDefKind(MatchedOperandFlag) &&
451             !InlineAsm::isRegDefEarlyClobberKind(MatchedOperandFlag)) {
452           LLVM_DEBUG(dbgs() << "Unknown matching constraint\n");
453           return false;
454         }
455 
456         // We want to tie input to register in next operand.
457         unsigned DefRegIdx = InstFlagIdx + 1;
458         Register Def = Inst->getOperand(DefRegIdx).getReg();
459 
460         ArrayRef<Register> SrcRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
461         assert(SrcRegs.size() == 1 && "Single register is expected here");
462 
463         // When Def is physreg: use given input.
464         Register In = SrcRegs[0];
465         // When Def is vreg: copy input to new vreg with same reg class as Def.
466         if (Def.isVirtual()) {
467           In = MRI->createVirtualRegister(MRI->getRegClass(Def));
468           if (!buildAnyextOrCopy(In, SrcRegs[0], MIRBuilder))
469             return false;
470         }
471 
472         // Add Flag and input register operand (In) to Inst. Tie In to Def.
473         unsigned UseFlag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, 1);
474         unsigned Flag = InlineAsm::getFlagWordForMatchingOp(UseFlag, DefIdx);
475         Inst.addImm(Flag);
476         Inst.addReg(In);
477         Inst->tieOperands(DefRegIdx, Inst->getNumOperands() - 1);
478         break;
479       }
480 
481       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
482           OpInfo.isIndirect) {
483         LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint "
484                              "not supported yet\n");
485         return false;
486       }
487 
488       if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
489           OpInfo.ConstraintType == TargetLowering::C_Other) {
490 
491         std::vector<MachineOperand> Ops;
492         if (!lowerAsmOperandForConstraint(OpInfo.CallOperandVal,
493                                           OpInfo.ConstraintCode, Ops,
494                                           MIRBuilder)) {
495           LLVM_DEBUG(dbgs() << "Don't support constraint: "
496                             << OpInfo.ConstraintCode << " yet\n");
497           return false;
498         }
499 
500         assert(Ops.size() > 0 &&
501                "Expected constraint to be lowered to at least one operand");
502 
503         // Add information to the INLINEASM node to know about this input.
504         unsigned OpFlags =
505             InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
506         Inst.addImm(OpFlags);
507         Inst.add(Ops);
508         break;
509       }
510 
511       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
512 
513         if (!OpInfo.isIndirect) {
514           LLVM_DEBUG(dbgs()
515                      << "Cannot indirectify memory input operands yet\n");
516           return false;
517         }
518 
519         assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
520 
521         unsigned ConstraintID =
522             TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode);
523         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
524         OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
525         Inst.addImm(OpFlags);
526         ArrayRef<Register> SourceRegs =
527             GetOrCreateVRegs(*OpInfo.CallOperandVal);
528         assert(
529             SourceRegs.size() == 1 &&
530             "Expected the memory input to fit into a single virtual register");
531         Inst.addReg(SourceRegs[0]);
532         break;
533       }
534 
535       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
536               OpInfo.ConstraintType == TargetLowering::C_Register) &&
537              "Unknown constraint type!");
538 
539       if (OpInfo.isIndirect) {
540         LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet "
541                              "for constraint '"
542                           << OpInfo.ConstraintCode << "'\n");
543         return false;
544       }
545 
546       // Copy the input into the appropriate registers.
547       if (OpInfo.Regs.empty()) {
548         LLVM_DEBUG(
549             dbgs()
550             << "Couldn't allocate input register for register constraint\n");
551         return false;
552       }
553 
554       unsigned NumRegs = OpInfo.Regs.size();
555       ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
556       assert(NumRegs == SourceRegs.size() &&
557              "Expected the number of input registers to match the number of "
558              "source registers");
559 
560       if (NumRegs > 1) {
561         LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are "
562                              "not supported yet\n");
563         return false;
564       }
565 
566       unsigned Flag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, NumRegs);
567       if (OpInfo.Regs.front().isVirtual()) {
568         // Put the register class of the virtual registers in the flag word.
569         const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front());
570         Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
571       }
572       Inst.addImm(Flag);
573       if (!buildAnyextOrCopy(OpInfo.Regs[0], SourceRegs[0], MIRBuilder))
574         return false;
575       Inst.addReg(OpInfo.Regs[0]);
576       break;
577     }
578 
579     case InlineAsm::isClobber: {
580 
581       unsigned NumRegs = OpInfo.Regs.size();
582       if (NumRegs > 0) {
583         unsigned Flag =
584             InlineAsm::getFlagWord(InlineAsm::Kind_Clobber, NumRegs);
585         Inst.addImm(Flag);
586 
587         for (Register Reg : OpInfo.Regs) {
588           Inst.addReg(Reg, RegState::Define | RegState::EarlyClobber |
589                                getImplRegState(Reg.isPhysical()));
590         }
591       }
592       break;
593     }
594     }
595   }
596 
597   if (const MDNode *SrcLoc = Call.getMetadata("srcloc"))
598     Inst.addMetadata(SrcLoc);
599 
600   // All inputs are handled, insert the instruction now
601   MIRBuilder.insertInstr(Inst);
602 
603   // Finally, copy the output operands into the output registers
604   ArrayRef<Register> ResRegs = GetOrCreateVRegs(Call);
605   if (ResRegs.size() != OutputOperands.size()) {
606     LLVM_DEBUG(dbgs() << "Expected the number of output registers to match the "
607                          "number of destination registers\n");
608     return false;
609   }
610   for (unsigned int i = 0, e = ResRegs.size(); i < e; i++) {
611     GISelAsmOperandInfo &OpInfo = OutputOperands[i];
612 
613     if (OpInfo.Regs.empty())
614       continue;
615 
616     switch (OpInfo.ConstraintType) {
617     case TargetLowering::C_Register:
618     case TargetLowering::C_RegisterClass: {
619       if (OpInfo.Regs.size() > 1) {
620         LLVM_DEBUG(dbgs() << "Output operands with multiple defining "
621                              "registers are not supported yet\n");
622         return false;
623       }
624 
625       Register SrcReg = OpInfo.Regs[0];
626       unsigned SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
627       LLT ResTy = MRI->getType(ResRegs[i]);
628       if (ResTy.isScalar() && ResTy.getSizeInBits() < SrcSize) {
629         // First copy the non-typed virtual register into a generic virtual
630         // register
631         Register Tmp1Reg =
632             MRI->createGenericVirtualRegister(LLT::scalar(SrcSize));
633         MIRBuilder.buildCopy(Tmp1Reg, SrcReg);
634         // Need to truncate the result of the register
635         MIRBuilder.buildTrunc(ResRegs[i], Tmp1Reg);
636       } else if (ResTy.getSizeInBits() == SrcSize) {
637         MIRBuilder.buildCopy(ResRegs[i], SrcReg);
638       } else {
639         LLVM_DEBUG(dbgs() << "Unhandled output operand with "
640                              "mismatched register size\n");
641         return false;
642       }
643 
644       break;
645     }
646     case TargetLowering::C_Immediate:
647     case TargetLowering::C_Other:
648       LLVM_DEBUG(
649           dbgs() << "Cannot lower target specific output constraints yet\n");
650       return false;
651     case TargetLowering::C_Memory:
652       break; // Already handled.
653     case TargetLowering::C_Address:
654       break; // Silence warning.
655     case TargetLowering::C_Unknown:
656       LLVM_DEBUG(dbgs() << "Unexpected unknown constraint\n");
657       return false;
658     }
659   }
660 
661   return true;
662 }
663 
664 bool InlineAsmLowering::lowerAsmOperandForConstraint(
665     Value *Val, StringRef Constraint, std::vector<MachineOperand> &Ops,
666     MachineIRBuilder &MIRBuilder) const {
667   if (Constraint.size() > 1)
668     return false;
669 
670   char ConstraintLetter = Constraint[0];
671   switch (ConstraintLetter) {
672   default:
673     return false;
674   case 'i': // Simple Integer or Relocatable Constant
675   case 'n': // immediate integer with a known value.
676     if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
677       assert(CI->getBitWidth() <= 64 &&
678              "expected immediate to fit into 64-bits");
679       // Boolean constants should be zero-extended, others are sign-extended
680       bool IsBool = CI->getBitWidth() == 1;
681       int64_t ExtVal = IsBool ? CI->getZExtValue() : CI->getSExtValue();
682       Ops.push_back(MachineOperand::CreateImm(ExtVal));
683       return true;
684     }
685     return false;
686   }
687 }
688