xref: /freebsd/contrib/llvm-project/llvm/lib/Target/BPF/BPFISelLowering.cpp (revision 480093f4440d54b30b3025afeac24b48f2ba7a2e)
1 //===-- BPFISelLowering.cpp - BPF DAG Lowering Implementation  ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that BPF uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "BPFISelLowering.h"
15 #include "BPF.h"
16 #include "BPFSubtarget.h"
17 #include "BPFTargetMachine.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/SelectionDAGISel.h"
24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/IR/DiagnosticInfo.h"
27 #include "llvm/IR/DiagnosticPrinter.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/raw_ostream.h"
31 using namespace llvm;
32 
33 #define DEBUG_TYPE "bpf-lower"
34 
35 static cl::opt<bool> BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order",
36   cl::Hidden, cl::init(false),
37   cl::desc("Expand memcpy into load/store pairs in order"));
38 
39 static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg) {
40   MachineFunction &MF = DAG.getMachineFunction();
41   DAG.getContext()->diagnose(
42       DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
43 }
44 
45 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg,
46                  SDValue Val) {
47   MachineFunction &MF = DAG.getMachineFunction();
48   std::string Str;
49   raw_string_ostream OS(Str);
50   OS << Msg;
51   Val->print(OS);
52   OS.flush();
53   DAG.getContext()->diagnose(
54       DiagnosticInfoUnsupported(MF.getFunction(), Str, DL.getDebugLoc()));
55 }
56 
57 BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
58                                      const BPFSubtarget &STI)
59     : TargetLowering(TM) {
60 
61   // Set up the register classes.
62   addRegisterClass(MVT::i64, &BPF::GPRRegClass);
63   if (STI.getHasAlu32())
64     addRegisterClass(MVT::i32, &BPF::GPR32RegClass);
65 
66   // Compute derived properties from the register classes
67   computeRegisterProperties(STI.getRegisterInfo());
68 
69   setStackPointerRegisterToSaveRestore(BPF::R11);
70 
71   setOperationAction(ISD::BR_CC, MVT::i64, Custom);
72   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
73   setOperationAction(ISD::BRIND, MVT::Other, Expand);
74   setOperationAction(ISD::BRCOND, MVT::Other, Expand);
75 
76   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
77 
78   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
79   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
80   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
81 
82   for (auto VT : { MVT::i32, MVT::i64 }) {
83     if (VT == MVT::i32 && !STI.getHasAlu32())
84       continue;
85 
86     setOperationAction(ISD::SDIVREM, VT, Expand);
87     setOperationAction(ISD::UDIVREM, VT, Expand);
88     setOperationAction(ISD::SREM, VT, Expand);
89     setOperationAction(ISD::UREM, VT, Expand);
90     setOperationAction(ISD::MULHU, VT, Expand);
91     setOperationAction(ISD::MULHS, VT, Expand);
92     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
93     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
94     setOperationAction(ISD::ROTR, VT, Expand);
95     setOperationAction(ISD::ROTL, VT, Expand);
96     setOperationAction(ISD::SHL_PARTS, VT, Expand);
97     setOperationAction(ISD::SRL_PARTS, VT, Expand);
98     setOperationAction(ISD::SRA_PARTS, VT, Expand);
99     setOperationAction(ISD::CTPOP, VT, Expand);
100 
101     setOperationAction(ISD::SETCC, VT, Expand);
102     setOperationAction(ISD::SELECT, VT, Expand);
103     setOperationAction(ISD::SELECT_CC, VT, Custom);
104   }
105 
106   if (STI.getHasAlu32()) {
107     setOperationAction(ISD::BSWAP, MVT::i32, Promote);
108     setOperationAction(ISD::BR_CC, MVT::i32,
109                        STI.getHasJmp32() ? Custom : Promote);
110   }
111 
112   setOperationAction(ISD::CTTZ, MVT::i64, Custom);
113   setOperationAction(ISD::CTLZ, MVT::i64, Custom);
114   setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom);
115   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
116 
117   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
118   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
119   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
120   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand);
121 
122   // Extended load operations for i1 types must be promoted
123   for (MVT VT : MVT::integer_valuetypes()) {
124     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
125     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
126     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
127 
128     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
129     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand);
130     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
131   }
132 
133   setBooleanContents(ZeroOrOneBooleanContent);
134 
135   // Function alignments
136   setMinFunctionAlignment(Align(8));
137   setPrefFunctionAlignment(Align(8));
138 
139   if (BPFExpandMemcpyInOrder) {
140     // LLVM generic code will try to expand memcpy into load/store pairs at this
141     // stage which is before quite a few IR optimization passes, therefore the
142     // loads and stores could potentially be moved apart from each other which
143     // will cause trouble to memcpy pattern matcher inside kernel eBPF JIT
144     // compilers.
145     //
146     // When -bpf-expand-memcpy-in-order specified, we want to defer the expand
147     // of memcpy to later stage in IR optimization pipeline so those load/store
148     // pairs won't be touched and could be kept in order. Hence, we set
149     // MaxStoresPerMem* to zero to disable the generic getMemcpyLoadsAndStores
150     // code path, and ask LLVM to use target expander EmitTargetCodeForMemcpy.
151     MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 0;
152     MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 0;
153     MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = 0;
154   } else {
155     // inline memcpy() for kernel to see explicit copy
156     unsigned CommonMaxStores =
157       STI.getSelectionDAGInfo()->getCommonMaxStoresPerMemFunc();
158 
159     MaxStoresPerMemset = MaxStoresPerMemsetOptSize = CommonMaxStores;
160     MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = CommonMaxStores;
161     MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = CommonMaxStores;
162   }
163 
164   // CPU/Feature control
165   HasAlu32 = STI.getHasAlu32();
166   HasJmp32 = STI.getHasJmp32();
167   HasJmpExt = STI.getHasJmpExt();
168 }
169 
170 bool BPFTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
171   return false;
172 }
173 
174 std::pair<unsigned, const TargetRegisterClass *>
175 BPFTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
176                                                 StringRef Constraint,
177                                                 MVT VT) const {
178   if (Constraint.size() == 1)
179     // GCC Constraint Letters
180     switch (Constraint[0]) {
181     case 'r': // GENERAL_REGS
182       return std::make_pair(0U, &BPF::GPRRegClass);
183     default:
184       break;
185     }
186 
187   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
188 }
189 
190 SDValue BPFTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
191   switch (Op.getOpcode()) {
192   case ISD::BR_CC:
193     return LowerBR_CC(Op, DAG);
194   case ISD::GlobalAddress:
195     return LowerGlobalAddress(Op, DAG);
196   case ISD::SELECT_CC:
197     return LowerSELECT_CC(Op, DAG);
198   default:
199     llvm_unreachable("unimplemented operand");
200   }
201 }
202 
203 // Calling Convention Implementation
204 #include "BPFGenCallingConv.inc"
205 
206 SDValue BPFTargetLowering::LowerFormalArguments(
207     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
208     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
209     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
210   switch (CallConv) {
211   default:
212     report_fatal_error("Unsupported calling convention");
213   case CallingConv::C:
214   case CallingConv::Fast:
215     break;
216   }
217 
218   MachineFunction &MF = DAG.getMachineFunction();
219   MachineRegisterInfo &RegInfo = MF.getRegInfo();
220 
221   // Assign locations to all of the incoming arguments.
222   SmallVector<CCValAssign, 16> ArgLocs;
223   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
224   CCInfo.AnalyzeFormalArguments(Ins, getHasAlu32() ? CC_BPF32 : CC_BPF64);
225 
226   for (auto &VA : ArgLocs) {
227     if (VA.isRegLoc()) {
228       // Arguments passed in registers
229       EVT RegVT = VA.getLocVT();
230       MVT::SimpleValueType SimpleTy = RegVT.getSimpleVT().SimpleTy;
231       switch (SimpleTy) {
232       default: {
233         errs() << "LowerFormalArguments Unhandled argument type: "
234                << RegVT.getEVTString() << '\n';
235         llvm_unreachable(0);
236       }
237       case MVT::i32:
238       case MVT::i64:
239         Register VReg = RegInfo.createVirtualRegister(
240             SimpleTy == MVT::i64 ? &BPF::GPRRegClass : &BPF::GPR32RegClass);
241         RegInfo.addLiveIn(VA.getLocReg(), VReg);
242         SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT);
243 
244         // If this is an value that has been promoted to wider types, insert an
245         // assert[sz]ext to capture this, then truncate to the right size.
246         if (VA.getLocInfo() == CCValAssign::SExt)
247           ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue,
248                                  DAG.getValueType(VA.getValVT()));
249         else if (VA.getLocInfo() == CCValAssign::ZExt)
250           ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue,
251                                  DAG.getValueType(VA.getValVT()));
252 
253         if (VA.getLocInfo() != CCValAssign::Full)
254           ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue);
255 
256         InVals.push_back(ArgValue);
257 
258 	break;
259       }
260     } else {
261       fail(DL, DAG, "defined with too many args");
262       InVals.push_back(DAG.getConstant(0, DL, VA.getLocVT()));
263     }
264   }
265 
266   if (IsVarArg || MF.getFunction().hasStructRetAttr()) {
267     fail(DL, DAG, "functions with VarArgs or StructRet are not supported");
268   }
269 
270   return Chain;
271 }
272 
273 const unsigned BPFTargetLowering::MaxArgs = 5;
274 
275 SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
276                                      SmallVectorImpl<SDValue> &InVals) const {
277   SelectionDAG &DAG = CLI.DAG;
278   auto &Outs = CLI.Outs;
279   auto &OutVals = CLI.OutVals;
280   auto &Ins = CLI.Ins;
281   SDValue Chain = CLI.Chain;
282   SDValue Callee = CLI.Callee;
283   bool &IsTailCall = CLI.IsTailCall;
284   CallingConv::ID CallConv = CLI.CallConv;
285   bool IsVarArg = CLI.IsVarArg;
286   MachineFunction &MF = DAG.getMachineFunction();
287 
288   // BPF target does not support tail call optimization.
289   IsTailCall = false;
290 
291   switch (CallConv) {
292   default:
293     report_fatal_error("Unsupported calling convention");
294   case CallingConv::Fast:
295   case CallingConv::C:
296     break;
297   }
298 
299   // Analyze operands of the call, assigning locations to each operand.
300   SmallVector<CCValAssign, 16> ArgLocs;
301   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
302 
303   CCInfo.AnalyzeCallOperands(Outs, getHasAlu32() ? CC_BPF32 : CC_BPF64);
304 
305   unsigned NumBytes = CCInfo.getNextStackOffset();
306 
307   if (Outs.size() > MaxArgs)
308     fail(CLI.DL, DAG, "too many args to ", Callee);
309 
310   for (auto &Arg : Outs) {
311     ISD::ArgFlagsTy Flags = Arg.Flags;
312     if (!Flags.isByVal())
313       continue;
314 
315     fail(CLI.DL, DAG, "pass by value not supported ", Callee);
316   }
317 
318   auto PtrVT = getPointerTy(MF.getDataLayout());
319   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
320 
321   SmallVector<std::pair<unsigned, SDValue>, MaxArgs> RegsToPass;
322 
323   // Walk arg assignments
324   for (unsigned i = 0,
325                 e = std::min(static_cast<unsigned>(ArgLocs.size()), MaxArgs);
326        i != e; ++i) {
327     CCValAssign &VA = ArgLocs[i];
328     SDValue Arg = OutVals[i];
329 
330     // Promote the value if needed.
331     switch (VA.getLocInfo()) {
332     default:
333       llvm_unreachable("Unknown loc info");
334     case CCValAssign::Full:
335       break;
336     case CCValAssign::SExt:
337       Arg = DAG.getNode(ISD::SIGN_EXTEND, CLI.DL, VA.getLocVT(), Arg);
338       break;
339     case CCValAssign::ZExt:
340       Arg = DAG.getNode(ISD::ZERO_EXTEND, CLI.DL, VA.getLocVT(), Arg);
341       break;
342     case CCValAssign::AExt:
343       Arg = DAG.getNode(ISD::ANY_EXTEND, CLI.DL, VA.getLocVT(), Arg);
344       break;
345     }
346 
347     // Push arguments into RegsToPass vector
348     if (VA.isRegLoc())
349       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
350     else
351       llvm_unreachable("call arg pass bug");
352   }
353 
354   SDValue InFlag;
355 
356   // Build a sequence of copy-to-reg nodes chained together with token chain and
357   // flag operands which copy the outgoing args into registers.  The InFlag in
358   // necessary since all emitted instructions must be stuck together.
359   for (auto &Reg : RegsToPass) {
360     Chain = DAG.getCopyToReg(Chain, CLI.DL, Reg.first, Reg.second, InFlag);
361     InFlag = Chain.getValue(1);
362   }
363 
364   // If the callee is a GlobalAddress node (quite common, every direct call is)
365   // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
366   // Likewise ExternalSymbol -> TargetExternalSymbol.
367   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
368     Callee = DAG.getTargetGlobalAddress(G->getGlobal(), CLI.DL, PtrVT,
369                                         G->getOffset(), 0);
370   } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
371     Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0);
372     fail(CLI.DL, DAG, Twine("A call to built-in function '"
373                             + StringRef(E->getSymbol())
374                             + "' is not supported."));
375   }
376 
377   // Returns a chain & a flag for retval copy to use.
378   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
379   SmallVector<SDValue, 8> Ops;
380   Ops.push_back(Chain);
381   Ops.push_back(Callee);
382 
383   // Add argument registers to the end of the list so that they are
384   // known live into the call.
385   for (auto &Reg : RegsToPass)
386     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
387 
388   if (InFlag.getNode())
389     Ops.push_back(InFlag);
390 
391   Chain = DAG.getNode(BPFISD::CALL, CLI.DL, NodeTys, Ops);
392   InFlag = Chain.getValue(1);
393 
394   // Create the CALLSEQ_END node.
395   Chain = DAG.getCALLSEQ_END(
396       Chain, DAG.getConstant(NumBytes, CLI.DL, PtrVT, true),
397       DAG.getConstant(0, CLI.DL, PtrVT, true), InFlag, CLI.DL);
398   InFlag = Chain.getValue(1);
399 
400   // Handle result values, copying them out of physregs into vregs that we
401   // return.
402   return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, CLI.DL, DAG,
403                          InVals);
404 }
405 
406 SDValue
407 BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
408                                bool IsVarArg,
409                                const SmallVectorImpl<ISD::OutputArg> &Outs,
410                                const SmallVectorImpl<SDValue> &OutVals,
411                                const SDLoc &DL, SelectionDAG &DAG) const {
412   unsigned Opc = BPFISD::RET_FLAG;
413 
414   // CCValAssign - represent the assignment of the return value to a location
415   SmallVector<CCValAssign, 16> RVLocs;
416   MachineFunction &MF = DAG.getMachineFunction();
417 
418   // CCState - Info about the registers and stack slot.
419   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
420 
421   if (MF.getFunction().getReturnType()->isAggregateType()) {
422     fail(DL, DAG, "only integer returns supported");
423     return DAG.getNode(Opc, DL, MVT::Other, Chain);
424   }
425 
426   // Analize return values.
427   CCInfo.AnalyzeReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
428 
429   SDValue Flag;
430   SmallVector<SDValue, 4> RetOps(1, Chain);
431 
432   // Copy the result values into the output registers.
433   for (unsigned i = 0; i != RVLocs.size(); ++i) {
434     CCValAssign &VA = RVLocs[i];
435     assert(VA.isRegLoc() && "Can only return in registers!");
436 
437     Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Flag);
438 
439     // Guarantee that all emitted copies are stuck together,
440     // avoiding something bad.
441     Flag = Chain.getValue(1);
442     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
443   }
444 
445   RetOps[0] = Chain; // Update chain.
446 
447   // Add the flag if we have it.
448   if (Flag.getNode())
449     RetOps.push_back(Flag);
450 
451   return DAG.getNode(Opc, DL, MVT::Other, RetOps);
452 }
453 
454 SDValue BPFTargetLowering::LowerCallResult(
455     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
456     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
457     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
458 
459   MachineFunction &MF = DAG.getMachineFunction();
460   // Assign locations to each value returned by this call.
461   SmallVector<CCValAssign, 16> RVLocs;
462   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
463 
464   if (Ins.size() >= 2) {
465     fail(DL, DAG, "only small returns supported");
466     for (unsigned i = 0, e = Ins.size(); i != e; ++i)
467       InVals.push_back(DAG.getConstant(0, DL, Ins[i].VT));
468     return DAG.getCopyFromReg(Chain, DL, 1, Ins[0].VT, InFlag).getValue(1);
469   }
470 
471   CCInfo.AnalyzeCallResult(Ins, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64);
472 
473   // Copy all of the result registers out of their specified physreg.
474   for (auto &Val : RVLocs) {
475     Chain = DAG.getCopyFromReg(Chain, DL, Val.getLocReg(),
476                                Val.getValVT(), InFlag).getValue(1);
477     InFlag = Chain.getValue(2);
478     InVals.push_back(Chain.getValue(0));
479   }
480 
481   return Chain;
482 }
483 
484 static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
485   switch (CC) {
486   default:
487     break;
488   case ISD::SETULT:
489   case ISD::SETULE:
490   case ISD::SETLT:
491   case ISD::SETLE:
492     CC = ISD::getSetCCSwappedOperands(CC);
493     std::swap(LHS, RHS);
494     break;
495   }
496 }
497 
498 SDValue BPFTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
499   SDValue Chain = Op.getOperand(0);
500   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
501   SDValue LHS = Op.getOperand(2);
502   SDValue RHS = Op.getOperand(3);
503   SDValue Dest = Op.getOperand(4);
504   SDLoc DL(Op);
505 
506   if (!getHasJmpExt())
507     NegateCC(LHS, RHS, CC);
508 
509   return DAG.getNode(BPFISD::BR_CC, DL, Op.getValueType(), Chain, LHS, RHS,
510                      DAG.getConstant(CC, DL, LHS.getValueType()), Dest);
511 }
512 
513 SDValue BPFTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
514   SDValue LHS = Op.getOperand(0);
515   SDValue RHS = Op.getOperand(1);
516   SDValue TrueV = Op.getOperand(2);
517   SDValue FalseV = Op.getOperand(3);
518   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
519   SDLoc DL(Op);
520 
521   if (!getHasJmpExt())
522     NegateCC(LHS, RHS, CC);
523 
524   SDValue TargetCC = DAG.getConstant(CC, DL, LHS.getValueType());
525   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
526   SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
527 
528   return DAG.getNode(BPFISD::SELECT_CC, DL, VTs, Ops);
529 }
530 
531 const char *BPFTargetLowering::getTargetNodeName(unsigned Opcode) const {
532   switch ((BPFISD::NodeType)Opcode) {
533   case BPFISD::FIRST_NUMBER:
534     break;
535   case BPFISD::RET_FLAG:
536     return "BPFISD::RET_FLAG";
537   case BPFISD::CALL:
538     return "BPFISD::CALL";
539   case BPFISD::SELECT_CC:
540     return "BPFISD::SELECT_CC";
541   case BPFISD::BR_CC:
542     return "BPFISD::BR_CC";
543   case BPFISD::Wrapper:
544     return "BPFISD::Wrapper";
545   case BPFISD::MEMCPY:
546     return "BPFISD::MEMCPY";
547   }
548   return nullptr;
549 }
550 
551 SDValue BPFTargetLowering::LowerGlobalAddress(SDValue Op,
552                                               SelectionDAG &DAG) const {
553   auto N = cast<GlobalAddressSDNode>(Op);
554   assert(N->getOffset() == 0 && "Invalid offset for global address");
555 
556   SDLoc DL(Op);
557   const GlobalValue *GV = N->getGlobal();
558   SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i64);
559 
560   return DAG.getNode(BPFISD::Wrapper, DL, MVT::i64, GA);
561 }
562 
563 unsigned
564 BPFTargetLowering::EmitSubregExt(MachineInstr &MI, MachineBasicBlock *BB,
565                                  unsigned Reg, bool isSigned) const {
566   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
567   const TargetRegisterClass *RC = getRegClassFor(MVT::i64);
568   int RShiftOp = isSigned ? BPF::SRA_ri : BPF::SRL_ri;
569   MachineFunction *F = BB->getParent();
570   DebugLoc DL = MI.getDebugLoc();
571 
572   MachineRegisterInfo &RegInfo = F->getRegInfo();
573   Register PromotedReg0 = RegInfo.createVirtualRegister(RC);
574   Register PromotedReg1 = RegInfo.createVirtualRegister(RC);
575   Register PromotedReg2 = RegInfo.createVirtualRegister(RC);
576   BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg);
577   BuildMI(BB, DL, TII.get(BPF::SLL_ri), PromotedReg1)
578     .addReg(PromotedReg0).addImm(32);
579   BuildMI(BB, DL, TII.get(RShiftOp), PromotedReg2)
580     .addReg(PromotedReg1).addImm(32);
581 
582   return PromotedReg2;
583 }
584 
585 MachineBasicBlock *
586 BPFTargetLowering::EmitInstrWithCustomInserterMemcpy(MachineInstr &MI,
587                                                      MachineBasicBlock *BB)
588                                                      const {
589   MachineFunction *MF = MI.getParent()->getParent();
590   MachineRegisterInfo &MRI = MF->getRegInfo();
591   MachineInstrBuilder MIB(*MF, MI);
592   unsigned ScratchReg;
593 
594   // This function does custom insertion during lowering BPFISD::MEMCPY which
595   // only has two register operands from memcpy semantics, the copy source
596   // address and the copy destination address.
597   //
598   // Because we will expand BPFISD::MEMCPY into load/store pairs, we will need
599   // a third scratch register to serve as the destination register of load and
600   // source register of store.
601   //
602   // The scratch register here is with the Define | Dead | EarlyClobber flags.
603   // The EarlyClobber flag has the semantic property that the operand it is
604   // attached to is clobbered before the rest of the inputs are read. Hence it
605   // must be unique among the operands to the instruction. The Define flag is
606   // needed to coerce the machine verifier that an Undef value isn't a problem
607   // as we anyway is loading memory into it. The Dead flag is needed as the
608   // value in scratch isn't supposed to be used by any other instruction.
609   ScratchReg = MRI.createVirtualRegister(&BPF::GPRRegClass);
610   MIB.addReg(ScratchReg,
611              RegState::Define | RegState::Dead | RegState::EarlyClobber);
612 
613   return BB;
614 }
615 
616 MachineBasicBlock *
617 BPFTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
618                                                MachineBasicBlock *BB) const {
619   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
620   DebugLoc DL = MI.getDebugLoc();
621   unsigned Opc = MI.getOpcode();
622   bool isSelectRROp = (Opc == BPF::Select ||
623                        Opc == BPF::Select_64_32 ||
624                        Opc == BPF::Select_32 ||
625                        Opc == BPF::Select_32_64);
626 
627   bool isMemcpyOp = Opc == BPF::MEMCPY;
628 
629 #ifndef NDEBUG
630   bool isSelectRIOp = (Opc == BPF::Select_Ri ||
631                        Opc == BPF::Select_Ri_64_32 ||
632                        Opc == BPF::Select_Ri_32 ||
633                        Opc == BPF::Select_Ri_32_64);
634 
635 
636   assert((isSelectRROp || isSelectRIOp || isMemcpyOp) &&
637          "Unexpected instr type to insert");
638 #endif
639 
640   if (isMemcpyOp)
641     return EmitInstrWithCustomInserterMemcpy(MI, BB);
642 
643   bool is32BitCmp = (Opc == BPF::Select_32 ||
644                      Opc == BPF::Select_32_64 ||
645                      Opc == BPF::Select_Ri_32 ||
646                      Opc == BPF::Select_Ri_32_64);
647 
648   // To "insert" a SELECT instruction, we actually have to insert the diamond
649   // control-flow pattern.  The incoming instruction knows the destination vreg
650   // to set, the condition code register to branch on, the true/false values to
651   // select between, and a branch opcode to use.
652   const BasicBlock *LLVM_BB = BB->getBasicBlock();
653   MachineFunction::iterator I = ++BB->getIterator();
654 
655   // ThisMBB:
656   // ...
657   //  TrueVal = ...
658   //  jmp_XX r1, r2 goto Copy1MBB
659   //  fallthrough --> Copy0MBB
660   MachineBasicBlock *ThisMBB = BB;
661   MachineFunction *F = BB->getParent();
662   MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
663   MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(LLVM_BB);
664 
665   F->insert(I, Copy0MBB);
666   F->insert(I, Copy1MBB);
667   // Update machine-CFG edges by transferring all successors of the current
668   // block to the new block which will contain the Phi node for the select.
669   Copy1MBB->splice(Copy1MBB->begin(), BB,
670                    std::next(MachineBasicBlock::iterator(MI)), BB->end());
671   Copy1MBB->transferSuccessorsAndUpdatePHIs(BB);
672   // Next, add the true and fallthrough blocks as its successors.
673   BB->addSuccessor(Copy0MBB);
674   BB->addSuccessor(Copy1MBB);
675 
676   // Insert Branch if Flag
677   int CC = MI.getOperand(3).getImm();
678   int NewCC;
679   switch (CC) {
680 #define SET_NEWCC(X, Y) \
681   case ISD::X: \
682     if (is32BitCmp && HasJmp32) \
683       NewCC = isSelectRROp ? BPF::Y##_rr_32 : BPF::Y##_ri_32; \
684     else \
685       NewCC = isSelectRROp ? BPF::Y##_rr : BPF::Y##_ri; \
686     break
687   SET_NEWCC(SETGT, JSGT);
688   SET_NEWCC(SETUGT, JUGT);
689   SET_NEWCC(SETGE, JSGE);
690   SET_NEWCC(SETUGE, JUGE);
691   SET_NEWCC(SETEQ, JEQ);
692   SET_NEWCC(SETNE, JNE);
693   SET_NEWCC(SETLT, JSLT);
694   SET_NEWCC(SETULT, JULT);
695   SET_NEWCC(SETLE, JSLE);
696   SET_NEWCC(SETULE, JULE);
697   default:
698     report_fatal_error("unimplemented select CondCode " + Twine(CC));
699   }
700 
701   Register LHS = MI.getOperand(1).getReg();
702   bool isSignedCmp = (CC == ISD::SETGT ||
703                       CC == ISD::SETGE ||
704                       CC == ISD::SETLT ||
705                       CC == ISD::SETLE);
706 
707   // eBPF at the moment only has 64-bit comparison. Any 32-bit comparison need
708   // to be promoted, however if the 32-bit comparison operands are destination
709   // registers then they are implicitly zero-extended already, there is no
710   // need of explicit zero-extend sequence for them.
711   //
712   // We simply do extension for all situations in this method, but we will
713   // try to remove those unnecessary in BPFMIPeephole pass.
714   if (is32BitCmp && !HasJmp32)
715     LHS = EmitSubregExt(MI, BB, LHS, isSignedCmp);
716 
717   if (isSelectRROp) {
718     Register RHS = MI.getOperand(2).getReg();
719 
720     if (is32BitCmp && !HasJmp32)
721       RHS = EmitSubregExt(MI, BB, RHS, isSignedCmp);
722 
723     BuildMI(BB, DL, TII.get(NewCC)).addReg(LHS).addReg(RHS).addMBB(Copy1MBB);
724   } else {
725     int64_t imm32 = MI.getOperand(2).getImm();
726     // sanity check before we build J*_ri instruction.
727     assert (isInt<32>(imm32));
728     BuildMI(BB, DL, TII.get(NewCC))
729         .addReg(LHS).addImm(imm32).addMBB(Copy1MBB);
730   }
731 
732   // Copy0MBB:
733   //  %FalseValue = ...
734   //  # fallthrough to Copy1MBB
735   BB = Copy0MBB;
736 
737   // Update machine-CFG edges
738   BB->addSuccessor(Copy1MBB);
739 
740   // Copy1MBB:
741   //  %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ]
742   // ...
743   BB = Copy1MBB;
744   BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg())
745       .addReg(MI.getOperand(5).getReg())
746       .addMBB(Copy0MBB)
747       .addReg(MI.getOperand(4).getReg())
748       .addMBB(ThisMBB);
749 
750   MI.eraseFromParent(); // The pseudo instruction is gone now.
751   return BB;
752 }
753 
754 EVT BPFTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
755                                           EVT VT) const {
756   return getHasAlu32() ? MVT::i32 : MVT::i64;
757 }
758 
759 MVT BPFTargetLowering::getScalarShiftAmountTy(const DataLayout &DL,
760                                               EVT VT) const {
761   return (getHasAlu32() && VT == MVT::i32) ? MVT::i32 : MVT::i64;
762 }
763