xref: /freebsd/contrib/llvm-project/llvm/lib/Target/Xtensa/XtensaISelLowering.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===- XtensaISelLowering.cpp - Xtensa DAG Lowering Implementation --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that Xtensa uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "XtensaISelLowering.h"
15 #include "XtensaConstantPoolValue.h"
16 #include "XtensaSubtarget.h"
17 #include "XtensaTargetMachine.h"
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineJumpTableInfo.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/MathExtras.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include <deque>
29 
30 using namespace llvm;
31 
32 #define DEBUG_TYPE "xtensa-lower"
33 
34 // Return true if we must use long (in fact, indirect) function call.
35 // It's simplified version, production implimentation must
36 // resolve a functions in ROM (usually glibc functions)
isLongCall(const char * str)37 static bool isLongCall(const char *str) {
38   // Currently always use long calls
39   return true;
40 }
41 
XtensaTargetLowering(const TargetMachine & TM,const XtensaSubtarget & STI)42 XtensaTargetLowering::XtensaTargetLowering(const TargetMachine &TM,
43                                            const XtensaSubtarget &STI)
44     : TargetLowering(TM), Subtarget(STI) {
45   MVT PtrVT = MVT::i32;
46   // Set up the register classes.
47   addRegisterClass(MVT::i32, &Xtensa::ARRegClass);
48 
49   // Set up special registers.
50   setStackPointerRegisterToSaveRestore(Xtensa::SP);
51 
52   setSchedulingPreference(Sched::RegPressure);
53 
54   setMinFunctionAlignment(Align(4));
55 
56   setOperationAction(ISD::Constant, MVT::i32, Custom);
57   setOperationAction(ISD::Constant, MVT::i64, Expand);
58 
59   setBooleanContents(ZeroOrOneBooleanContent);
60 
61   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
62   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
63   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
64 
65   setOperationAction(ISD::BITCAST, MVT::i32, Expand);
66   setOperationAction(ISD::BITCAST, MVT::f32, Expand);
67   setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
68   setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
69   setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
70   setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand);
71 
72   // No sign extend instructions for i1
73   for (MVT VT : MVT::integer_valuetypes()) {
74     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
75     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
76     setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
77   }
78 
79   setOperationAction(ISD::ConstantPool, PtrVT, Custom);
80   setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
81   setOperationAction(ISD::BlockAddress, PtrVT, Custom);
82   setOperationAction(ISD::JumpTable, PtrVT, Custom);
83 
84   // Expand jump table branches as address arithmetic followed by an
85   // indirect jump.
86   setOperationAction(ISD::BR_JT, MVT::Other, Custom);
87 
88   setOperationAction(ISD::BR_CC, MVT::i32, Legal);
89   setOperationAction(ISD::BR_CC, MVT::i64, Expand);
90   setOperationAction(ISD::BR_CC, MVT::f32, Expand);
91 
92   setOperationAction(ISD::SELECT, MVT::i32, Expand);
93   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
94   setOperationAction(ISD::SETCC, MVT::i32, Expand);
95 
96   setCondCodeAction(ISD::SETGT, MVT::i32, Expand);
97   setCondCodeAction(ISD::SETLE, MVT::i32, Expand);
98   setCondCodeAction(ISD::SETUGT, MVT::i32, Expand);
99   setCondCodeAction(ISD::SETULE, MVT::i32, Expand);
100 
101   // Implement custom stack allocations
102   setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
103   // Implement custom stack save and restore
104   setOperationAction(ISD::STACKSAVE, MVT::Other, Custom);
105   setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);
106 
107   // Compute derived properties from the register classes
108   computeRegisterProperties(STI.getRegisterInfo());
109 }
110 
isOffsetFoldingLegal(const GlobalAddressSDNode * GA) const111 bool XtensaTargetLowering::isOffsetFoldingLegal(
112     const GlobalAddressSDNode *GA) const {
113   // The Xtensa target isn't yet aware of offsets.
114   return false;
115 }
116 
117 //===----------------------------------------------------------------------===//
118 // Calling conventions
119 //===----------------------------------------------------------------------===//
120 
121 #include "XtensaGenCallingConv.inc"
122 
CC_Xtensa_Custom(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)123 static bool CC_Xtensa_Custom(unsigned ValNo, MVT ValVT, MVT LocVT,
124                              CCValAssign::LocInfo LocInfo,
125                              ISD::ArgFlagsTy ArgFlags, CCState &State) {
126   static const MCPhysReg IntRegs[] = {Xtensa::A2, Xtensa::A3, Xtensa::A4,
127                                       Xtensa::A5, Xtensa::A6, Xtensa::A7};
128 
129   if (ArgFlags.isByVal()) {
130     Align ByValAlign = ArgFlags.getNonZeroByValAlign();
131     unsigned ByValSize = ArgFlags.getByValSize();
132     if (ByValSize < 4) {
133       ByValSize = 4;
134     }
135     if (ByValAlign < Align(4)) {
136       ByValAlign = Align(4);
137     }
138     unsigned Offset = State.AllocateStack(ByValSize, ByValAlign);
139     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
140     // Mark all unused registers as allocated to avoid misuse
141     // of such registers.
142     while (State.AllocateReg(IntRegs))
143       ;
144     return false;
145   }
146 
147   // Promote i8 and i16
148   if (LocVT == MVT::i8 || LocVT == MVT::i16) {
149     LocVT = MVT::i32;
150     if (ArgFlags.isSExt())
151       LocInfo = CCValAssign::SExt;
152     else if (ArgFlags.isZExt())
153       LocInfo = CCValAssign::ZExt;
154     else
155       LocInfo = CCValAssign::AExt;
156   }
157 
158   unsigned Register;
159 
160   Align OrigAlign = ArgFlags.getNonZeroOrigAlign();
161   bool needs64BitAlign = (ValVT == MVT::i32 && OrigAlign == Align(8));
162   bool needs128BitAlign = (ValVT == MVT::i32 && OrigAlign == Align(16));
163 
164   if (ValVT == MVT::i32) {
165     Register = State.AllocateReg(IntRegs);
166     // If this is the first part of an i64 arg,
167     // the allocated register must be either A2, A4 or A6.
168     if (needs64BitAlign && (Register == Xtensa::A3 || Register == Xtensa::A5 ||
169                             Register == Xtensa::A7))
170       Register = State.AllocateReg(IntRegs);
171     // arguments with 16byte alignment must be passed in the first register or
172     // passed via stack
173     if (needs128BitAlign && (Register != Xtensa::A2))
174       while ((Register = State.AllocateReg(IntRegs)))
175         ;
176     LocVT = MVT::i32;
177   } else if (ValVT == MVT::f64) {
178     // Allocate int register and shadow next int register.
179     Register = State.AllocateReg(IntRegs);
180     if (Register == Xtensa::A3 || Register == Xtensa::A5 ||
181         Register == Xtensa::A7)
182       Register = State.AllocateReg(IntRegs);
183     State.AllocateReg(IntRegs);
184     LocVT = MVT::i32;
185   } else {
186     report_fatal_error("Cannot handle this ValVT.");
187   }
188 
189   if (!Register) {
190     unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), OrigAlign);
191     State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
192   } else {
193     State.addLoc(CCValAssign::getReg(ValNo, ValVT, Register, LocVT, LocInfo));
194   }
195 
196   return false;
197 }
198 
CCAssignFnForCall(CallingConv::ID CC,bool IsVarArg) const199 CCAssignFn *XtensaTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
200                                                     bool IsVarArg) const {
201   return CC_Xtensa_Custom;
202 }
203 
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & DL,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const204 SDValue XtensaTargetLowering::LowerFormalArguments(
205     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
206     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
207     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
208   MachineFunction &MF = DAG.getMachineFunction();
209   MachineFrameInfo &MFI = MF.getFrameInfo();
210 
211   // Used with vargs to acumulate store chains.
212   std::vector<SDValue> OutChains;
213 
214   if (IsVarArg)
215     report_fatal_error("Var arg not supported by FormalArguments Lowering");
216 
217   // Assign locations to all of the incoming arguments.
218   SmallVector<CCValAssign, 16> ArgLocs;
219   CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
220                  *DAG.getContext());
221 
222   CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, IsVarArg));
223 
224   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
225     CCValAssign &VA = ArgLocs[i];
226     // Arguments stored on registers
227     if (VA.isRegLoc()) {
228       EVT RegVT = VA.getLocVT();
229       const TargetRegisterClass *RC;
230 
231       if (RegVT == MVT::i32)
232         RC = &Xtensa::ARRegClass;
233       else
234         report_fatal_error("RegVT not supported by FormalArguments Lowering");
235 
236       // Transform the arguments stored on
237       // physical registers into virtual ones
238       unsigned Register = MF.addLiveIn(VA.getLocReg(), RC);
239       SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Register, RegVT);
240 
241       // If this is an 8 or 16-bit value, it has been passed promoted
242       // to 32 bits.  Insert an assert[sz]ext to capture this, then
243       // truncate to the right size.
244       if (VA.getLocInfo() != CCValAssign::Full) {
245         unsigned Opcode = 0;
246         if (VA.getLocInfo() == CCValAssign::SExt)
247           Opcode = ISD::AssertSext;
248         else if (VA.getLocInfo() == CCValAssign::ZExt)
249           Opcode = ISD::AssertZext;
250         if (Opcode)
251           ArgValue = DAG.getNode(Opcode, DL, RegVT, ArgValue,
252                                  DAG.getValueType(VA.getValVT()));
253         ArgValue = DAG.getNode((VA.getValVT() == MVT::f32) ? ISD::BITCAST
254                                                            : ISD::TRUNCATE,
255                                DL, VA.getValVT(), ArgValue);
256       }
257 
258       InVals.push_back(ArgValue);
259 
260     } else {
261       assert(VA.isMemLoc());
262 
263       EVT ValVT = VA.getValVT();
264 
265       // The stack pointer offset is relative to the caller stack frame.
266       int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
267                                      true);
268 
269       if (Ins[VA.getValNo()].Flags.isByVal()) {
270         // Assume that in this case load operation is created
271         SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
272         InVals.push_back(FIN);
273       } else {
274         // Create load nodes to retrieve arguments from the stack
275         SDValue FIN =
276             DAG.getFrameIndex(FI, getFrameIndexTy(DAG.getDataLayout()));
277         InVals.push_back(DAG.getLoad(
278             ValVT, DL, Chain, FIN,
279             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
280       }
281     }
282   }
283 
284   // All stores are grouped in one node to allow the matching between
285   // the size of Ins and InVals. This only happens when on varg functions
286   if (!OutChains.empty()) {
287     OutChains.push_back(Chain);
288     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
289   }
290 
291   return Chain;
292 }
293 
294 SDValue
LowerCall(CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const295 XtensaTargetLowering::LowerCall(CallLoweringInfo &CLI,
296                                 SmallVectorImpl<SDValue> &InVals) const {
297   SelectionDAG &DAG = CLI.DAG;
298   SDLoc &DL = CLI.DL;
299   SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
300   SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
301   SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
302   SDValue Chain = CLI.Chain;
303   SDValue Callee = CLI.Callee;
304   bool &IsTailCall = CLI.IsTailCall;
305   CallingConv::ID CallConv = CLI.CallConv;
306   bool IsVarArg = CLI.IsVarArg;
307 
308   MachineFunction &MF = DAG.getMachineFunction();
309   EVT PtrVT = getPointerTy(DAG.getDataLayout());
310   const TargetFrameLowering *TFL = Subtarget.getFrameLowering();
311 
312   // TODO: Support tail call optimization.
313   IsTailCall = false;
314 
315   // Analyze the operands of the call, assigning locations to each operand.
316   SmallVector<CCValAssign, 16> ArgLocs;
317   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
318 
319   CCAssignFn *CC = CCAssignFnForCall(CallConv, IsVarArg);
320 
321   CCInfo.AnalyzeCallOperands(Outs, CC);
322 
323   // Get a count of how many bytes are to be pushed on the stack.
324   unsigned NumBytes = CCInfo.getStackSize();
325 
326   Align StackAlignment = TFL->getStackAlign();
327   unsigned NextStackOffset = alignTo(NumBytes, StackAlignment);
328 
329   Chain = DAG.getCALLSEQ_START(Chain, NextStackOffset, 0, DL);
330 
331   // Copy argument values to their designated locations.
332   std::deque<std::pair<unsigned, SDValue>> RegsToPass;
333   SmallVector<SDValue, 8> MemOpChains;
334   SDValue StackPtr;
335   for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
336     CCValAssign &VA = ArgLocs[I];
337     SDValue ArgValue = OutVals[I];
338     ISD::ArgFlagsTy Flags = Outs[I].Flags;
339 
340     if (VA.isRegLoc())
341       // Queue up the argument copies and emit them at the end.
342       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
343     else if (Flags.isByVal()) {
344       assert(VA.isMemLoc());
345       assert(Flags.getByValSize() &&
346              "ByVal args of size 0 should have been ignored by front-end.");
347       assert(!IsTailCall &&
348              "Do not tail-call optimize if there is a byval argument.");
349 
350       if (!StackPtr.getNode())
351         StackPtr = DAG.getCopyFromReg(Chain, DL, Xtensa::SP, PtrVT);
352       unsigned Offset = VA.getLocMemOffset();
353       SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
354                                     DAG.getIntPtrConstant(Offset, DL));
355       SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), DL, MVT::i32);
356       SDValue Memcpy = DAG.getMemcpy(
357           Chain, DL, Address, ArgValue, SizeNode, Flags.getNonZeroByValAlign(),
358           /*isVolatile=*/false, /*AlwaysInline=*/false,
359           /*CI=*/nullptr, std::nullopt, MachinePointerInfo(), MachinePointerInfo());
360       MemOpChains.push_back(Memcpy);
361     } else {
362       assert(VA.isMemLoc() && "Argument not register or memory");
363 
364       // Work out the address of the stack slot.  Unpromoted ints and
365       // floats are passed as right-justified 8-byte values.
366       if (!StackPtr.getNode())
367         StackPtr = DAG.getCopyFromReg(Chain, DL, Xtensa::SP, PtrVT);
368       unsigned Offset = VA.getLocMemOffset();
369       SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
370                                     DAG.getIntPtrConstant(Offset, DL));
371 
372       // Emit the store.
373       MemOpChains.push_back(
374           DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
375     }
376   }
377 
378   // Join the stores, which are independent of one another.
379   if (!MemOpChains.empty())
380     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
381 
382   // Build a sequence of copy-to-reg nodes, chained and glued together.
383   SDValue Glue;
384   for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
385     unsigned Reg = RegsToPass[I].first;
386     Chain = DAG.getCopyToReg(Chain, DL, Reg, RegsToPass[I].second, Glue);
387     Glue = Chain.getValue(1);
388   }
389   std::string name;
390   unsigned char TF = 0;
391 
392   // Accept direct calls by converting symbolic call addresses to the
393   // associated Target* opcodes.
394   if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
395     name = E->getSymbol();
396     TF = E->getTargetFlags();
397     if (isPositionIndependent()) {
398       report_fatal_error("PIC relocations is not supported");
399     } else
400       Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
401   } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
402     const GlobalValue *GV = G->getGlobal();
403     name = GV->getName().str();
404   }
405 
406   if ((!name.empty()) && isLongCall(name.c_str())) {
407     // Create a constant pool entry for the callee address
408     XtensaCP::XtensaCPModifier Modifier = XtensaCP::no_modifier;
409 
410     XtensaConstantPoolValue *CPV = XtensaConstantPoolSymbol::Create(
411         *DAG.getContext(), name.c_str(), 0 /* XtensaCLabelIndex */, false,
412         Modifier);
413 
414     // Get the address of the callee into a register
415     SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4), 0, TF);
416     SDValue CPWrap = getAddrPCRel(CPAddr, DAG);
417     Callee = CPWrap;
418   }
419 
420   // The first call operand is the chain and the second is the target address.
421   SmallVector<SDValue, 8> Ops;
422   Ops.push_back(Chain);
423   Ops.push_back(Callee);
424 
425   // Add a register mask operand representing the call-preserved registers.
426   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
427   const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
428   assert(Mask && "Missing call preserved mask for calling convention");
429   Ops.push_back(DAG.getRegisterMask(Mask));
430 
431   // Add argument registers to the end of the list so that they are
432   // known live into the call.
433   for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
434     unsigned Reg = RegsToPass[I].first;
435     Ops.push_back(DAG.getRegister(Reg, RegsToPass[I].second.getValueType()));
436   }
437 
438   // Glue the call to the argument copies, if any.
439   if (Glue.getNode())
440     Ops.push_back(Glue);
441 
442   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
443   Chain = DAG.getNode(XtensaISD::CALL, DL, NodeTys, Ops);
444   Glue = Chain.getValue(1);
445 
446   // Mark the end of the call, which is glued to the call itself.
447   Chain = DAG.getCALLSEQ_END(Chain, DAG.getConstant(NumBytes, DL, PtrVT, true),
448                              DAG.getConstant(0, DL, PtrVT, true), Glue, DL);
449   Glue = Chain.getValue(1);
450 
451   // Assign locations to each value returned by this call.
452   SmallVector<CCValAssign, 16> RetLocs;
453   CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
454   RetCCInfo.AnalyzeCallResult(Ins, RetCC_Xtensa);
455 
456   // Copy all of the result registers out of their specified physreg.
457   for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
458     CCValAssign &VA = RetLocs[I];
459 
460     // Copy the value out, gluing the copy to the end of the call sequence.
461     unsigned Reg = VA.getLocReg();
462     SDValue RetValue = DAG.getCopyFromReg(Chain, DL, Reg, VA.getLocVT(), Glue);
463     Chain = RetValue.getValue(1);
464     Glue = RetValue.getValue(2);
465 
466     InVals.push_back(RetValue);
467   }
468   return Chain;
469 }
470 
CanLowerReturn(CallingConv::ID CallConv,MachineFunction & MF,bool IsVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,LLVMContext & Context) const471 bool XtensaTargetLowering::CanLowerReturn(
472     CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
473     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
474   SmallVector<CCValAssign, 16> RVLocs;
475   CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
476   return CCInfo.CheckReturn(Outs, RetCC_Xtensa);
477 }
478 
479 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & DL,SelectionDAG & DAG) const480 XtensaTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
481                                   bool IsVarArg,
482                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
483                                   const SmallVectorImpl<SDValue> &OutVals,
484                                   const SDLoc &DL, SelectionDAG &DAG) const {
485   if (IsVarArg)
486     report_fatal_error("VarArg not supported");
487 
488   MachineFunction &MF = DAG.getMachineFunction();
489 
490   // Assign locations to each returned value.
491   SmallVector<CCValAssign, 16> RetLocs;
492   CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
493   RetCCInfo.AnalyzeReturn(Outs, RetCC_Xtensa);
494 
495   SDValue Glue;
496   // Quick exit for void returns
497   if (RetLocs.empty())
498     return DAG.getNode(XtensaISD::RET, DL, MVT::Other, Chain);
499 
500   // Copy the result values into the output registers.
501   SmallVector<SDValue, 4> RetOps;
502   RetOps.push_back(Chain);
503   for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
504     CCValAssign &VA = RetLocs[I];
505     SDValue RetValue = OutVals[I];
506 
507     // Make the return register live on exit.
508     assert(VA.isRegLoc() && "Can only return in registers!");
509 
510     // Chain and glue the copies together.
511     unsigned Register = VA.getLocReg();
512     Chain = DAG.getCopyToReg(Chain, DL, Register, RetValue, Glue);
513     Glue = Chain.getValue(1);
514     RetOps.push_back(DAG.getRegister(Register, VA.getLocVT()));
515   }
516 
517   // Update chain and glue.
518   RetOps[0] = Chain;
519   if (Glue.getNode())
520     RetOps.push_back(Glue);
521 
522   return DAG.getNode(XtensaISD::RET, DL, MVT::Other, RetOps);
523 }
524 
getBranchOpcode(ISD::CondCode Cond)525 static unsigned getBranchOpcode(ISD::CondCode Cond) {
526   switch (Cond) {
527   case ISD::SETEQ:
528     return Xtensa::BEQ;
529   case ISD::SETNE:
530     return Xtensa::BNE;
531   case ISD::SETLT:
532     return Xtensa::BLT;
533   case ISD::SETLE:
534     return Xtensa::BGE;
535   case ISD::SETGT:
536     return Xtensa::BLT;
537   case ISD::SETGE:
538     return Xtensa::BGE;
539   case ISD::SETULT:
540     return Xtensa::BLTU;
541   case ISD::SETULE:
542     return Xtensa::BGEU;
543   case ISD::SETUGT:
544     return Xtensa::BLTU;
545   case ISD::SETUGE:
546     return Xtensa::BGEU;
547   default:
548     llvm_unreachable("Unknown branch kind");
549   }
550 }
551 
LowerSELECT_CC(SDValue Op,SelectionDAG & DAG) const552 SDValue XtensaTargetLowering::LowerSELECT_CC(SDValue Op,
553                                              SelectionDAG &DAG) const {
554   SDLoc DL(Op);
555   EVT Ty = Op.getOperand(0).getValueType();
556   SDValue LHS = Op.getOperand(0);
557   SDValue RHS = Op.getOperand(1);
558   SDValue TrueValue = Op.getOperand(2);
559   SDValue FalseValue = Op.getOperand(3);
560   ISD::CondCode CC = cast<CondCodeSDNode>(Op->getOperand(4))->get();
561 
562   unsigned BrOpcode = getBranchOpcode(CC);
563   SDValue TargetCC = DAG.getConstant(BrOpcode, DL, MVT::i32);
564 
565   return DAG.getNode(XtensaISD::SELECT_CC, DL, Ty, LHS, RHS, TrueValue,
566                      FalseValue, TargetCC);
567 }
568 
LowerImmediate(SDValue Op,SelectionDAG & DAG) const569 SDValue XtensaTargetLowering::LowerImmediate(SDValue Op,
570                                              SelectionDAG &DAG) const {
571   const ConstantSDNode *CN = cast<ConstantSDNode>(Op);
572   SDLoc DL(CN);
573   APInt APVal = CN->getAPIntValue();
574   int64_t Value = APVal.getSExtValue();
575   if (Op.getValueType() == MVT::i32) {
576     // Check if use node maybe lowered to the MOVI instruction
577     if (Value > -2048 && Value <= 2047)
578       return Op;
579     // Check if use node maybe lowered to the ADDMI instruction
580     SDNode &OpNode = *Op.getNode();
581     if ((OpNode.hasOneUse() && OpNode.use_begin()->getOpcode() == ISD::ADD) &&
582         isShiftedInt<16, 8>(Value))
583       return Op;
584     Type *Ty = Type::getInt32Ty(*DAG.getContext());
585     Constant *CV = ConstantInt::get(Ty, Value);
586     SDValue CP = DAG.getConstantPool(CV, MVT::i32);
587     return CP;
588   }
589   return Op;
590 }
591 
LowerGlobalAddress(SDValue Op,SelectionDAG & DAG) const592 SDValue XtensaTargetLowering::LowerGlobalAddress(SDValue Op,
593                                                  SelectionDAG &DAG) const {
594   const GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
595   SDLoc DL(Op);
596   auto PtrVT = Op.getValueType();
597   const GlobalValue *GV = G->getGlobal();
598 
599   SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, Align(4));
600   SDValue CPWrap = getAddrPCRel(CPAddr, DAG);
601 
602   return CPWrap;
603 }
604 
LowerBlockAddress(SDValue Op,SelectionDAG & DAG) const605 SDValue XtensaTargetLowering::LowerBlockAddress(SDValue Op,
606                                                 SelectionDAG &DAG) const {
607   BlockAddressSDNode *Node = cast<BlockAddressSDNode>(Op);
608   const BlockAddress *BA = Node->getBlockAddress();
609   EVT PtrVT = Op.getValueType();
610 
611   XtensaConstantPoolValue *CPV =
612       XtensaConstantPoolConstant::Create(BA, 0, XtensaCP::CPBlockAddress);
613   SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
614   SDValue CPWrap = getAddrPCRel(CPAddr, DAG);
615 
616   return CPWrap;
617 }
618 
LowerBR_JT(SDValue Op,SelectionDAG & DAG) const619 SDValue XtensaTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
620   SDValue Chain = Op.getOperand(0);
621   SDValue Table = Op.getOperand(1);
622   SDValue Index = Op.getOperand(2);
623   SDLoc DL(Op);
624   JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
625   MachineFunction &MF = DAG.getMachineFunction();
626   const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
627   SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
628   const DataLayout &TD = DAG.getDataLayout();
629   EVT PtrVT = Table.getValueType();
630   unsigned EntrySize = MJTI->getEntrySize(TD);
631 
632   Index = DAG.getNode(ISD::MUL, DL, Index.getValueType(), Index,
633                       DAG.getConstant(EntrySize, DL, Index.getValueType()));
634   SDValue Addr = DAG.getNode(ISD::ADD, DL, Index.getValueType(), Index, Table);
635   SDValue LD =
636       DAG.getLoad(PtrVT, DL, Chain, Addr,
637                   MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
638 
639   return DAG.getNode(XtensaISD::BR_JT, DL, MVT::Other, LD.getValue(1), LD,
640                      TargetJT);
641 }
642 
LowerJumpTable(SDValue Op,SelectionDAG & DAG) const643 SDValue XtensaTargetLowering::LowerJumpTable(SDValue Op,
644                                              SelectionDAG &DAG) const {
645   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
646   EVT PtrVT = Op.getValueType();
647 
648   // Create a constant pool entry for the callee address
649   XtensaConstantPoolValue *CPV =
650       XtensaConstantPoolJumpTable::Create(*DAG.getContext(), JT->getIndex());
651 
652   // Get the address of the callee into a register
653   SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
654 
655   return getAddrPCRel(CPAddr, DAG);
656 }
657 
getAddrPCRel(SDValue Op,SelectionDAG & DAG) const658 SDValue XtensaTargetLowering::getAddrPCRel(SDValue Op,
659                                            SelectionDAG &DAG) const {
660   SDLoc DL(Op);
661   EVT Ty = Op.getValueType();
662   return DAG.getNode(XtensaISD::PCREL_WRAPPER, DL, Ty, Op);
663 }
664 
LowerConstantPool(ConstantPoolSDNode * CP,SelectionDAG & DAG) const665 SDValue XtensaTargetLowering::LowerConstantPool(ConstantPoolSDNode *CP,
666                                                 SelectionDAG &DAG) const {
667   EVT PtrVT = getPointerTy(DAG.getDataLayout());
668   SDValue Result;
669   if (!CP->isMachineConstantPoolEntry()) {
670     Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign(),
671                                        CP->getOffset());
672   } else {
673     report_fatal_error("This constantpool type is not supported yet");
674   }
675 
676   return getAddrPCRel(Result, DAG);
677 }
678 
LowerSTACKSAVE(SDValue Op,SelectionDAG & DAG) const679 SDValue XtensaTargetLowering::LowerSTACKSAVE(SDValue Op,
680                                              SelectionDAG &DAG) const {
681   return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op), Xtensa::SP,
682                             Op.getValueType());
683 }
684 
LowerSTACKRESTORE(SDValue Op,SelectionDAG & DAG) const685 SDValue XtensaTargetLowering::LowerSTACKRESTORE(SDValue Op,
686                                                 SelectionDAG &DAG) const {
687   return DAG.getCopyToReg(Op.getOperand(0), SDLoc(Op), Xtensa::SP,
688                           Op.getOperand(1));
689 }
690 
LowerDYNAMIC_STACKALLOC(SDValue Op,SelectionDAG & DAG) const691 SDValue XtensaTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
692                                                       SelectionDAG &DAG) const {
693   SDValue Chain = Op.getOperand(0); // Legalize the chain.
694   SDValue Size = Op.getOperand(1);  // Legalize the size.
695   EVT VT = Size->getValueType(0);
696   SDLoc DL(Op);
697 
698   // Round up Size to 32
699   SDValue SizeTmp =
700       DAG.getNode(ISD::ADD, DL, VT, Size, DAG.getConstant(31, DL, MVT::i32));
701   SDValue SizeRoundUp = DAG.getNode(ISD::AND, DL, VT, SizeTmp,
702                                     DAG.getConstant(~31, DL, MVT::i32));
703 
704   unsigned SPReg = Xtensa::SP;
705   SDValue SP = DAG.getCopyFromReg(Chain, DL, SPReg, VT);
706   SDValue NewSP = DAG.getNode(ISD::SUB, DL, VT, SP, SizeRoundUp); // Value
707   Chain = DAG.getCopyToReg(SP.getValue(1), DL, SPReg, NewSP); // Output chain
708 
709   SDValue NewVal = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i32);
710   Chain = NewVal.getValue(1);
711 
712   SDValue Ops[2] = {NewVal, Chain};
713   return DAG.getMergeValues(Ops, DL);
714 }
715 
LowerOperation(SDValue Op,SelectionDAG & DAG) const716 SDValue XtensaTargetLowering::LowerOperation(SDValue Op,
717                                              SelectionDAG &DAG) const {
718   switch (Op.getOpcode()) {
719   case ISD::BR_JT:
720     return LowerBR_JT(Op, DAG);
721   case ISD::Constant:
722     return LowerImmediate(Op, DAG);
723   case ISD::GlobalAddress:
724     return LowerGlobalAddress(Op, DAG);
725   case ISD::BlockAddress:
726     return LowerBlockAddress(Op, DAG);
727   case ISD::JumpTable:
728     return LowerJumpTable(Op, DAG);
729   case ISD::ConstantPool:
730     return LowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
731   case ISD::SELECT_CC:
732     return LowerSELECT_CC(Op, DAG);
733   case ISD::STACKSAVE:
734     return LowerSTACKSAVE(Op, DAG);
735   case ISD::STACKRESTORE:
736     return LowerSTACKRESTORE(Op, DAG);
737   case ISD::DYNAMIC_STACKALLOC:
738     return LowerDYNAMIC_STACKALLOC(Op, DAG);
739   default:
740     report_fatal_error("Unexpected node to lower");
741   }
742 }
743 
getTargetNodeName(unsigned Opcode) const744 const char *XtensaTargetLowering::getTargetNodeName(unsigned Opcode) const {
745   switch (Opcode) {
746   case XtensaISD::BR_JT:
747     return "XtensaISD::BR_JT";
748   case XtensaISD::CALL:
749     return "XtensaISD::CALL";
750   case XtensaISD::PCREL_WRAPPER:
751     return "XtensaISD::PCREL_WRAPPER";
752   case XtensaISD::RET:
753     return "XtensaISD::RET";
754   case XtensaISD::SELECT_CC:
755     return "XtensaISD::SELECT_CC";
756   }
757   return nullptr;
758 }
759 
760 //===----------------------------------------------------------------------===//
761 // Custom insertion
762 //===----------------------------------------------------------------------===//
763 
764 MachineBasicBlock *
emitSelectCC(MachineInstr & MI,MachineBasicBlock * MBB) const765 XtensaTargetLowering::emitSelectCC(MachineInstr &MI,
766                                    MachineBasicBlock *MBB) const {
767   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
768   DebugLoc DL = MI.getDebugLoc();
769 
770   MachineOperand &LHS = MI.getOperand(1);
771   MachineOperand &RHS = MI.getOperand(2);
772   MachineOperand &TrueValue = MI.getOperand(3);
773   MachineOperand &FalseValue = MI.getOperand(4);
774   unsigned BrKind = MI.getOperand(5).getImm();
775 
776   // To "insert" a SELECT_CC instruction, we actually have to insert
777   // CopyMBB and SinkMBB  blocks and add branch to MBB. We build phi
778   // operation in SinkMBB like phi (TrueVakue,FalseValue), where TrueValue
779   // is passed from MMB and FalseValue is passed from CopyMBB.
780   //   MBB
781   //   |   \
782   //   |   CopyMBB
783   //   |   /
784   //   SinkMBB
785   // The incoming instruction knows the
786   // destination vreg to set, the condition code register to branch on, the
787   // true/false values to select between, and a branch opcode to use.
788   const BasicBlock *LLVM_BB = MBB->getBasicBlock();
789   MachineFunction::iterator It = ++MBB->getIterator();
790 
791   MachineFunction *F = MBB->getParent();
792   MachineBasicBlock *CopyMBB = F->CreateMachineBasicBlock(LLVM_BB);
793   MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
794 
795   F->insert(It, CopyMBB);
796   F->insert(It, SinkMBB);
797 
798   // Transfer the remainder of MBB and its successor edges to SinkMBB.
799   SinkMBB->splice(SinkMBB->begin(), MBB,
800                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
801   SinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
802 
803   MBB->addSuccessor(CopyMBB);
804   MBB->addSuccessor(SinkMBB);
805 
806   BuildMI(MBB, DL, TII.get(BrKind))
807       .addReg(LHS.getReg())
808       .addReg(RHS.getReg())
809       .addMBB(SinkMBB);
810 
811   CopyMBB->addSuccessor(SinkMBB);
812 
813   //  SinkMBB:
814   //   %Result = phi [ %FalseValue, CopyMBB ], [ %TrueValue, MBB ]
815   //  ...
816 
817   BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII.get(Xtensa::PHI),
818           MI.getOperand(0).getReg())
819       .addReg(FalseValue.getReg())
820       .addMBB(CopyMBB)
821       .addReg(TrueValue.getReg())
822       .addMBB(MBB);
823 
824   MI.eraseFromParent(); // The pseudo instruction is gone now.
825   return SinkMBB;
826 }
827 
EmitInstrWithCustomInserter(MachineInstr & MI,MachineBasicBlock * MBB) const828 MachineBasicBlock *XtensaTargetLowering::EmitInstrWithCustomInserter(
829     MachineInstr &MI, MachineBasicBlock *MBB) const {
830   switch (MI.getOpcode()) {
831   case Xtensa::SELECT:
832     return emitSelectCC(MI, MBB);
833   default:
834     llvm_unreachable("Unexpected instr type to insert");
835   }
836 }
837