xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp (revision 29332c0dcee1e80c9fb871e06c3160bd5deb1b44)
1 //===-- lib/CodeGen/GlobalISel/InlineAsmLowering.cpp ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the lowering from LLVM IR inline asm to MIR INLINEASM
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
15 #include "llvm/CodeGen/Analysis.h"
16 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
17 #include "llvm/CodeGen/GlobalISel/Utils.h"
18 #include "llvm/CodeGen/MachineOperand.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 
26 #define DEBUG_TYPE "inline-asm-lowering"
27 
28 using namespace llvm;
29 
30 void InlineAsmLowering::anchor() {}
31 
32 namespace {
33 
34 /// GISelAsmOperandInfo - This contains information for each constraint that we
35 /// are lowering.
36 class GISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
37 public:
38   /// Regs - If this is a register or register class operand, this
39   /// contains the set of assigned registers corresponding to the operand.
40   SmallVector<Register, 1> Regs;
41 
42   explicit GISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &Info)
43       : TargetLowering::AsmOperandInfo(Info) {}
44 };
45 
46 using GISelAsmOperandInfoVector = SmallVector<GISelAsmOperandInfo, 16>;
47 
48 class ExtraFlags {
49   unsigned Flags = 0;
50 
51 public:
52   explicit ExtraFlags(const CallBase &CB) {
53     const InlineAsm *IA = cast<InlineAsm>(CB.getCalledOperand());
54     if (IA->hasSideEffects())
55       Flags |= InlineAsm::Extra_HasSideEffects;
56     if (IA->isAlignStack())
57       Flags |= InlineAsm::Extra_IsAlignStack;
58     if (CB.isConvergent())
59       Flags |= InlineAsm::Extra_IsConvergent;
60     Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
61   }
62 
63   void update(const TargetLowering::AsmOperandInfo &OpInfo) {
64     // Ideally, we would only check against memory constraints.  However, the
65     // meaning of an Other constraint can be target-specific and we can't easily
66     // reason about it.  Therefore, be conservative and set MayLoad/MayStore
67     // for Other constraints as well.
68     if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
69         OpInfo.ConstraintType == TargetLowering::C_Other) {
70       if (OpInfo.Type == InlineAsm::isInput)
71         Flags |= InlineAsm::Extra_MayLoad;
72       else if (OpInfo.Type == InlineAsm::isOutput)
73         Flags |= InlineAsm::Extra_MayStore;
74       else if (OpInfo.Type == InlineAsm::isClobber)
75         Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
76     }
77   }
78 
79   unsigned get() const { return Flags; }
80 };
81 
82 } // namespace
83 
84 /// Assign virtual/physical registers for the specified register operand.
85 static void getRegistersForValue(MachineFunction &MF,
86                                  MachineIRBuilder &MIRBuilder,
87                                  GISelAsmOperandInfo &OpInfo,
88                                  GISelAsmOperandInfo &RefOpInfo) {
89 
90   const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
91   const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
92 
93   // No work to do for memory operations.
94   if (OpInfo.ConstraintType == TargetLowering::C_Memory)
95     return;
96 
97   // If this is a constraint for a single physreg, or a constraint for a
98   // register class, find it.
99   Register AssignedReg;
100   const TargetRegisterClass *RC;
101   std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
102       &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
103   // RC is unset only on failure. Return immediately.
104   if (!RC)
105     return;
106 
107   // No need to allocate a matching input constraint since the constraint it's
108   // matching to has already been allocated.
109   if (OpInfo.isMatchingInputConstraint())
110     return;
111 
112   // Initialize NumRegs.
113   unsigned NumRegs = 1;
114   if (OpInfo.ConstraintVT != MVT::Other)
115     NumRegs =
116         TLI.getNumRegisters(MF.getFunction().getContext(), OpInfo.ConstraintVT);
117 
118   // If this is a constraint for a specific physical register, but the type of
119   // the operand requires more than one register to be passed, we allocate the
120   // required amount of physical registers, starting from the selected physical
121   // register.
122   // For this, first retrieve a register iterator for the given register class
123   TargetRegisterClass::iterator I = RC->begin();
124   MachineRegisterInfo &RegInfo = MF.getRegInfo();
125 
126   // Advance the iterator to the assigned register (if set)
127   if (AssignedReg) {
128     for (; *I != AssignedReg; ++I)
129       assert(I != RC->end() && "AssignedReg should be a member of provided RC");
130   }
131 
132   // Finally, assign the registers. If the AssignedReg isn't set, create virtual
133   // registers with the provided register class
134   for (; NumRegs; --NumRegs, ++I) {
135     assert(I != RC->end() && "Ran out of registers to allocate!");
136     Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
137     OpInfo.Regs.push_back(R);
138   }
139 }
140 
141 /// Return an integer indicating how general CT is.
142 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) {
143   switch (CT) {
144   case TargetLowering::C_Immediate:
145   case TargetLowering::C_Other:
146   case TargetLowering::C_Unknown:
147     return 0;
148   case TargetLowering::C_Register:
149     return 1;
150   case TargetLowering::C_RegisterClass:
151     return 2;
152   case TargetLowering::C_Memory:
153     return 3;
154   }
155   llvm_unreachable("Invalid constraint type");
156 }
157 
158 static void chooseConstraint(TargetLowering::AsmOperandInfo &OpInfo,
159                              const TargetLowering *TLI) {
160   assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options");
161   unsigned BestIdx = 0;
162   TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown;
163   int BestGenerality = -1;
164 
165   // Loop over the options, keeping track of the most general one.
166   for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) {
167     TargetLowering::ConstraintType CType =
168         TLI->getConstraintType(OpInfo.Codes[i]);
169 
170     // Indirect 'other' or 'immediate' constraints are not allowed.
171     if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory ||
172                                CType == TargetLowering::C_Register ||
173                                CType == TargetLowering::C_RegisterClass))
174       continue;
175 
176     // If this is an 'other' or 'immediate' constraint, see if the operand is
177     // valid for it. For example, on X86 we might have an 'rI' constraint. If
178     // the operand is an integer in the range [0..31] we want to use I (saving a
179     // load of a register), otherwise we must use 'r'.
180     if (CType == TargetLowering::C_Other ||
181         CType == TargetLowering::C_Immediate) {
182       assert(OpInfo.Codes[i].size() == 1 &&
183              "Unhandled multi-letter 'other' constraint");
184       // FIXME: prefer immediate constraints if the target allows it
185     }
186 
187     // Things with matching constraints can only be registers, per gcc
188     // documentation.  This mainly affects "g" constraints.
189     if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput())
190       continue;
191 
192     // This constraint letter is more general than the previous one, use it.
193     int Generality = getConstraintGenerality(CType);
194     if (Generality > BestGenerality) {
195       BestType = CType;
196       BestIdx = i;
197       BestGenerality = Generality;
198     }
199   }
200 
201   OpInfo.ConstraintCode = OpInfo.Codes[BestIdx];
202   OpInfo.ConstraintType = BestType;
203 }
204 
205 static void computeConstraintToUse(const TargetLowering *TLI,
206                                    TargetLowering::AsmOperandInfo &OpInfo) {
207   assert(!OpInfo.Codes.empty() && "Must have at least one constraint");
208 
209   // Single-letter constraints ('r') are very common.
210   if (OpInfo.Codes.size() == 1) {
211     OpInfo.ConstraintCode = OpInfo.Codes[0];
212     OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode);
213   } else {
214     chooseConstraint(OpInfo, TLI);
215   }
216 
217   // 'X' matches anything.
218   if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) {
219     // Labels and constants are handled elsewhere ('X' is the only thing
220     // that matches labels).  For Functions, the type here is the type of
221     // the result, which is not what we want to look at; leave them alone.
222     Value *Val = OpInfo.CallOperandVal;
223     if (isa<BasicBlock>(Val) || isa<ConstantInt>(Val) || isa<Function>(Val))
224       return;
225 
226     // Otherwise, try to resolve it to something we know about by looking at
227     // the actual operand type.
228     if (const char *Repl = TLI->LowerXConstraint(OpInfo.ConstraintVT)) {
229       OpInfo.ConstraintCode = Repl;
230       OpInfo.ConstraintType = TLI->getConstraintType(OpInfo.ConstraintCode);
231     }
232   }
233 }
234 
235 static unsigned getNumOpRegs(const MachineInstr &I, unsigned OpIdx) {
236   unsigned Flag = I.getOperand(OpIdx).getImm();
237   return InlineAsm::getNumOperandRegisters(Flag);
238 }
239 
240 static bool buildAnyextOrCopy(Register Dst, Register Src,
241                               MachineIRBuilder &MIRBuilder) {
242   const TargetRegisterInfo *TRI =
243       MIRBuilder.getMF().getSubtarget().getRegisterInfo();
244   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
245 
246   auto SrcTy = MRI->getType(Src);
247   if (!SrcTy.isValid()) {
248     LLVM_DEBUG(dbgs() << "Source type for copy is not valid\n");
249     return false;
250   }
251   unsigned SrcSize = TRI->getRegSizeInBits(Src, *MRI);
252   unsigned DstSize = TRI->getRegSizeInBits(Dst, *MRI);
253 
254   if (DstSize < SrcSize) {
255     LLVM_DEBUG(dbgs() << "Input can't fit in destination reg class\n");
256     return false;
257   }
258 
259   // Attempt to anyext small scalar sources.
260   if (DstSize > SrcSize) {
261     if (!SrcTy.isScalar()) {
262       LLVM_DEBUG(dbgs() << "Can't extend non-scalar input to size of"
263                            "destination register class\n");
264       return false;
265     }
266     Src = MIRBuilder.buildAnyExt(LLT::scalar(DstSize), Src).getReg(0);
267   }
268 
269   MIRBuilder.buildCopy(Dst, Src);
270   return true;
271 }
272 
273 bool InlineAsmLowering::lowerInlineAsm(
274     MachineIRBuilder &MIRBuilder, const CallBase &Call,
275     std::function<ArrayRef<Register>(const Value &Val)> GetOrCreateVRegs)
276     const {
277   const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
278 
279   /// ConstraintOperands - Information about all of the constraints.
280   GISelAsmOperandInfoVector ConstraintOperands;
281 
282   MachineFunction &MF = MIRBuilder.getMF();
283   const Function &F = MF.getFunction();
284   const DataLayout &DL = F.getParent()->getDataLayout();
285   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
286 
287   MachineRegisterInfo *MRI = MIRBuilder.getMRI();
288 
289   TargetLowering::AsmOperandInfoVector TargetConstraints =
290       TLI->ParseConstraints(DL, TRI, Call);
291 
292   ExtraFlags ExtraInfo(Call);
293   unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
294   unsigned ResNo = 0; // ResNo - The result number of the next output.
295   for (auto &T : TargetConstraints) {
296     ConstraintOperands.push_back(GISelAsmOperandInfo(T));
297     GISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
298 
299     // Compute the value type for each operand.
300     if (OpInfo.Type == InlineAsm::isInput ||
301         (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
302 
303       OpInfo.CallOperandVal = const_cast<Value *>(Call.getArgOperand(ArgNo++));
304 
305       if (isa<BasicBlock>(OpInfo.CallOperandVal)) {
306         LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n");
307         return false;
308       }
309 
310       Type *OpTy = OpInfo.CallOperandVal->getType();
311 
312       // If this is an indirect operand, the operand is a pointer to the
313       // accessed type.
314       if (OpInfo.isIndirect) {
315         PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
316         if (!PtrTy)
317           report_fatal_error("Indirect operand for inline asm not a pointer!");
318         OpTy = PtrTy->getElementType();
319       }
320 
321       // FIXME: Support aggregate input operands
322       if (!OpTy->isSingleValueType()) {
323         LLVM_DEBUG(
324             dbgs() << "Aggregate input operands are not supported yet\n");
325         return false;
326       }
327 
328       OpInfo.ConstraintVT =
329           TLI->getAsmOperandValueType(DL, OpTy, true).getSimpleVT();
330 
331     } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
332       assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
333       if (StructType *STy = dyn_cast<StructType>(Call.getType())) {
334         OpInfo.ConstraintVT =
335             TLI->getSimpleValueType(DL, STy->getElementType(ResNo));
336       } else {
337         assert(ResNo == 0 && "Asm only has one result!");
338         OpInfo.ConstraintVT =
339             TLI->getAsmOperandValueType(DL, Call.getType()).getSimpleVT();
340       }
341       ++ResNo;
342     } else {
343       OpInfo.ConstraintVT = MVT::Other;
344     }
345 
346     if (OpInfo.ConstraintVT == MVT::i64x8)
347       return false;
348 
349     // Compute the constraint code and ConstraintType to use.
350     computeConstraintToUse(TLI, OpInfo);
351 
352     // The selected constraint type might expose new sideeffects
353     ExtraInfo.update(OpInfo);
354   }
355 
356   // At this point, all operand types are decided.
357   // Create the MachineInstr, but don't insert it yet since input
358   // operands still need to insert instructions before this one
359   auto Inst = MIRBuilder.buildInstrNoInsert(TargetOpcode::INLINEASM)
360                   .addExternalSymbol(IA->getAsmString().c_str())
361                   .addImm(ExtraInfo.get());
362 
363   // Starting from this operand: flag followed by register(s) will be added as
364   // operands to Inst for each constraint. Used for matching input constraints.
365   unsigned StartIdx = Inst->getNumOperands();
366 
367   // Collects the output operands for later processing
368   GISelAsmOperandInfoVector OutputOperands;
369 
370   for (auto &OpInfo : ConstraintOperands) {
371     GISelAsmOperandInfo &RefOpInfo =
372         OpInfo.isMatchingInputConstraint()
373             ? ConstraintOperands[OpInfo.getMatchedOperand()]
374             : OpInfo;
375 
376     // Assign registers for register operands
377     getRegistersForValue(MF, MIRBuilder, OpInfo, RefOpInfo);
378 
379     switch (OpInfo.Type) {
380     case InlineAsm::isOutput:
381       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
382         unsigned ConstraintID =
383             TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode);
384         assert(ConstraintID != InlineAsm::Constraint_Unknown &&
385                "Failed to convert memory constraint code to constraint id.");
386 
387         // Add information to the INLINEASM instruction to know about this
388         // output.
389         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
390         OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
391         Inst.addImm(OpFlags);
392         ArrayRef<Register> SourceRegs =
393             GetOrCreateVRegs(*OpInfo.CallOperandVal);
394         assert(
395             SourceRegs.size() == 1 &&
396             "Expected the memory output to fit into a single virtual register");
397         Inst.addReg(SourceRegs[0]);
398       } else {
399         // Otherwise, this outputs to a register (directly for C_Register /
400         // C_RegisterClass. Find a register that we can use.
401         assert(OpInfo.ConstraintType == TargetLowering::C_Register ||
402                OpInfo.ConstraintType == TargetLowering::C_RegisterClass);
403 
404         if (OpInfo.Regs.empty()) {
405           LLVM_DEBUG(dbgs()
406                      << "Couldn't allocate output register for constraint\n");
407           return false;
408         }
409 
410         // Add information to the INLINEASM instruction to know that this
411         // register is set.
412         unsigned Flag = InlineAsm::getFlagWord(
413             OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber
414                                   : InlineAsm::Kind_RegDef,
415             OpInfo.Regs.size());
416         if (OpInfo.Regs.front().isVirtual()) {
417           // Put the register class of the virtual registers in the flag word.
418           // That way, later passes can recompute register class constraints for
419           // inline assembly as well as normal instructions. Don't do this for
420           // tied operands that can use the regclass information from the def.
421           const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front());
422           Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
423         }
424 
425         Inst.addImm(Flag);
426 
427         for (Register Reg : OpInfo.Regs) {
428           Inst.addReg(Reg,
429                       RegState::Define | getImplRegState(Reg.isPhysical()) |
430                           (OpInfo.isEarlyClobber ? RegState::EarlyClobber : 0));
431         }
432 
433         // Remember this output operand for later processing
434         OutputOperands.push_back(OpInfo);
435       }
436 
437       break;
438     case InlineAsm::isInput: {
439       if (OpInfo.isMatchingInputConstraint()) {
440         unsigned DefIdx = OpInfo.getMatchedOperand();
441         // Find operand with register def that corresponds to DefIdx.
442         unsigned InstFlagIdx = StartIdx;
443         for (unsigned i = 0; i < DefIdx; ++i)
444           InstFlagIdx += getNumOpRegs(*Inst, InstFlagIdx) + 1;
445         assert(getNumOpRegs(*Inst, InstFlagIdx) == 1 && "Wrong flag");
446 
447         unsigned MatchedOperandFlag = Inst->getOperand(InstFlagIdx).getImm();
448         if (InlineAsm::isMemKind(MatchedOperandFlag)) {
449           LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not "
450                                "supported. This should be target specific.\n");
451           return false;
452         }
453         if (!InlineAsm::isRegDefKind(MatchedOperandFlag) &&
454             !InlineAsm::isRegDefEarlyClobberKind(MatchedOperandFlag)) {
455           LLVM_DEBUG(dbgs() << "Unknown matching constraint\n");
456           return false;
457         }
458 
459         // We want to tie input to register in next operand.
460         unsigned DefRegIdx = InstFlagIdx + 1;
461         Register Def = Inst->getOperand(DefRegIdx).getReg();
462 
463         ArrayRef<Register> SrcRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
464         assert(SrcRegs.size() == 1 && "Single register is expected here");
465 
466         // When Def is physreg: use given input.
467         Register In = SrcRegs[0];
468         // When Def is vreg: copy input to new vreg with same reg class as Def.
469         if (Def.isVirtual()) {
470           In = MRI->createVirtualRegister(MRI->getRegClass(Def));
471           if (!buildAnyextOrCopy(In, SrcRegs[0], MIRBuilder))
472             return false;
473         }
474 
475         // Add Flag and input register operand (In) to Inst. Tie In to Def.
476         unsigned UseFlag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, 1);
477         unsigned Flag = InlineAsm::getFlagWordForMatchingOp(UseFlag, DefIdx);
478         Inst.addImm(Flag);
479         Inst.addReg(In);
480         Inst->tieOperands(DefRegIdx, Inst->getNumOperands() - 1);
481         break;
482       }
483 
484       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
485           OpInfo.isIndirect) {
486         LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint "
487                              "not supported yet\n");
488         return false;
489       }
490 
491       if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
492           OpInfo.ConstraintType == TargetLowering::C_Other) {
493 
494         std::vector<MachineOperand> Ops;
495         if (!lowerAsmOperandForConstraint(OpInfo.CallOperandVal,
496                                           OpInfo.ConstraintCode, Ops,
497                                           MIRBuilder)) {
498           LLVM_DEBUG(dbgs() << "Don't support constraint: "
499                             << OpInfo.ConstraintCode << " yet\n");
500           return false;
501         }
502 
503         assert(Ops.size() > 0 &&
504                "Expected constraint to be lowered to at least one operand");
505 
506         // Add information to the INLINEASM node to know about this input.
507         unsigned OpFlags =
508             InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
509         Inst.addImm(OpFlags);
510         Inst.add(Ops);
511         break;
512       }
513 
514       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
515 
516         if (!OpInfo.isIndirect) {
517           LLVM_DEBUG(dbgs()
518                      << "Cannot indirectify memory input operands yet\n");
519           return false;
520         }
521 
522         assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
523 
524         unsigned ConstraintID =
525             TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode);
526         unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
527         OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
528         Inst.addImm(OpFlags);
529         ArrayRef<Register> SourceRegs =
530             GetOrCreateVRegs(*OpInfo.CallOperandVal);
531         assert(
532             SourceRegs.size() == 1 &&
533             "Expected the memory input to fit into a single virtual register");
534         Inst.addReg(SourceRegs[0]);
535         break;
536       }
537 
538       assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
539               OpInfo.ConstraintType == TargetLowering::C_Register) &&
540              "Unknown constraint type!");
541 
542       if (OpInfo.isIndirect) {
543         LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet "
544                              "for constraint '"
545                           << OpInfo.ConstraintCode << "'\n");
546         return false;
547       }
548 
549       // Copy the input into the appropriate registers.
550       if (OpInfo.Regs.empty()) {
551         LLVM_DEBUG(
552             dbgs()
553             << "Couldn't allocate input register for register constraint\n");
554         return false;
555       }
556 
557       unsigned NumRegs = OpInfo.Regs.size();
558       ArrayRef<Register> SourceRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
559       assert(NumRegs == SourceRegs.size() &&
560              "Expected the number of input registers to match the number of "
561              "source registers");
562 
563       if (NumRegs > 1) {
564         LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are "
565                              "not supported yet\n");
566         return false;
567       }
568 
569       unsigned Flag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, NumRegs);
570       if (OpInfo.Regs.front().isVirtual()) {
571         // Put the register class of the virtual registers in the flag word.
572         const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front());
573         Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
574       }
575       Inst.addImm(Flag);
576       if (!buildAnyextOrCopy(OpInfo.Regs[0], SourceRegs[0], MIRBuilder))
577         return false;
578       Inst.addReg(OpInfo.Regs[0]);
579       break;
580     }
581 
582     case InlineAsm::isClobber: {
583 
584       unsigned NumRegs = OpInfo.Regs.size();
585       if (NumRegs > 0) {
586         unsigned Flag =
587             InlineAsm::getFlagWord(InlineAsm::Kind_Clobber, NumRegs);
588         Inst.addImm(Flag);
589 
590         for (Register Reg : OpInfo.Regs) {
591           Inst.addReg(Reg, RegState::Define | RegState::EarlyClobber |
592                                getImplRegState(Reg.isPhysical()));
593         }
594       }
595       break;
596     }
597     }
598   }
599 
600   if (const MDNode *SrcLoc = Call.getMetadata("srcloc"))
601     Inst.addMetadata(SrcLoc);
602 
603   // All inputs are handled, insert the instruction now
604   MIRBuilder.insertInstr(Inst);
605 
606   // Finally, copy the output operands into the output registers
607   ArrayRef<Register> ResRegs = GetOrCreateVRegs(Call);
608   if (ResRegs.size() != OutputOperands.size()) {
609     LLVM_DEBUG(dbgs() << "Expected the number of output registers to match the "
610                          "number of destination registers\n");
611     return false;
612   }
613   for (unsigned int i = 0, e = ResRegs.size(); i < e; i++) {
614     GISelAsmOperandInfo &OpInfo = OutputOperands[i];
615 
616     if (OpInfo.Regs.empty())
617       continue;
618 
619     switch (OpInfo.ConstraintType) {
620     case TargetLowering::C_Register:
621     case TargetLowering::C_RegisterClass: {
622       if (OpInfo.Regs.size() > 1) {
623         LLVM_DEBUG(dbgs() << "Output operands with multiple defining "
624                              "registers are not supported yet\n");
625         return false;
626       }
627 
628       Register SrcReg = OpInfo.Regs[0];
629       unsigned SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
630       if (MRI->getType(ResRegs[i]).getSizeInBits() < SrcSize) {
631         // First copy the non-typed virtual register into a generic virtual
632         // register
633         Register Tmp1Reg =
634             MRI->createGenericVirtualRegister(LLT::scalar(SrcSize));
635         MIRBuilder.buildCopy(Tmp1Reg, SrcReg);
636         // Need to truncate the result of the register
637         MIRBuilder.buildTrunc(ResRegs[i], Tmp1Reg);
638       } else {
639         MIRBuilder.buildCopy(ResRegs[i], SrcReg);
640       }
641       break;
642     }
643     case TargetLowering::C_Immediate:
644     case TargetLowering::C_Other:
645       LLVM_DEBUG(
646           dbgs() << "Cannot lower target specific output constraints yet\n");
647       return false;
648     case TargetLowering::C_Memory:
649       break; // Already handled.
650     case TargetLowering::C_Unknown:
651       LLVM_DEBUG(dbgs() << "Unexpected unknown constraint\n");
652       return false;
653     }
654   }
655 
656   return true;
657 }
658 
659 bool InlineAsmLowering::lowerAsmOperandForConstraint(
660     Value *Val, StringRef Constraint, std::vector<MachineOperand> &Ops,
661     MachineIRBuilder &MIRBuilder) const {
662   if (Constraint.size() > 1)
663     return false;
664 
665   char ConstraintLetter = Constraint[0];
666   switch (ConstraintLetter) {
667   default:
668     return false;
669   case 'i': // Simple Integer or Relocatable Constant
670   case 'n': // immediate integer with a known value.
671     if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
672       assert(CI->getBitWidth() <= 64 &&
673              "expected immediate to fit into 64-bits");
674       // Boolean constants should be zero-extended, others are sign-extended
675       bool IsBool = CI->getBitWidth() == 1;
676       int64_t ExtVal = IsBool ? CI->getZExtValue() : CI->getSExtValue();
677       Ops.push_back(MachineOperand::CreateImm(ExtVal));
678       return true;
679     }
680     return false;
681   }
682 }
683