xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AVR/AVRISelLowering.cpp (revision 63f537551380d2dab29fa402ad1269feae17e594)
1 //===-- AVRISelLowering.cpp - AVR DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that AVR uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AVRISelLowering.h"
15 
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/Support/ErrorHandling.h"
27 
28 #include "AVR.h"
29 #include "AVRMachineFunctionInfo.h"
30 #include "AVRSubtarget.h"
31 #include "AVRTargetMachine.h"
32 #include "MCTargetDesc/AVRMCTargetDesc.h"
33 
34 namespace llvm {
35 
36 AVRTargetLowering::AVRTargetLowering(const AVRTargetMachine &TM,
37                                      const AVRSubtarget &STI)
38     : TargetLowering(TM), Subtarget(STI) {
39   // Set up the register classes.
40   addRegisterClass(MVT::i8, &AVR::GPR8RegClass);
41   addRegisterClass(MVT::i16, &AVR::DREGSRegClass);
42 
43   // Compute derived properties from the register classes.
44   computeRegisterProperties(Subtarget.getRegisterInfo());
45 
46   setBooleanContents(ZeroOrOneBooleanContent);
47   setBooleanVectorContents(ZeroOrOneBooleanContent);
48   setSchedulingPreference(Sched::RegPressure);
49   setStackPointerRegisterToSaveRestore(AVR::SP);
50   setSupportsUnalignedAtomics(true);
51 
52   setOperationAction(ISD::GlobalAddress, MVT::i16, Custom);
53   setOperationAction(ISD::BlockAddress, MVT::i16, Custom);
54 
55   setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
56   setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
57   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i8, Expand);
58   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i16, Expand);
59 
60   setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
61 
62   for (MVT VT : MVT::integer_valuetypes()) {
63     for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) {
64       setLoadExtAction(N, VT, MVT::i1, Promote);
65       setLoadExtAction(N, VT, MVT::i8, Expand);
66     }
67   }
68 
69   setTruncStoreAction(MVT::i16, MVT::i8, Expand);
70 
71   for (MVT VT : MVT::integer_valuetypes()) {
72     setOperationAction(ISD::ADDC, VT, Legal);
73     setOperationAction(ISD::SUBC, VT, Legal);
74     setOperationAction(ISD::ADDE, VT, Legal);
75     setOperationAction(ISD::SUBE, VT, Legal);
76   }
77 
78   // sub (x, imm) gets canonicalized to add (x, -imm), so for illegal types
79   // revert into a sub since we don't have an add with immediate instruction.
80   setOperationAction(ISD::ADD, MVT::i32, Custom);
81   setOperationAction(ISD::ADD, MVT::i64, Custom);
82 
83   // our shift instructions are only able to shift 1 bit at a time, so handle
84   // this in a custom way.
85   setOperationAction(ISD::SRA, MVT::i8, Custom);
86   setOperationAction(ISD::SHL, MVT::i8, Custom);
87   setOperationAction(ISD::SRL, MVT::i8, Custom);
88   setOperationAction(ISD::SRA, MVT::i16, Custom);
89   setOperationAction(ISD::SHL, MVT::i16, Custom);
90   setOperationAction(ISD::SRL, MVT::i16, Custom);
91   setOperationAction(ISD::SRA, MVT::i32, Custom);
92   setOperationAction(ISD::SHL, MVT::i32, Custom);
93   setOperationAction(ISD::SRL, MVT::i32, Custom);
94   setOperationAction(ISD::SHL_PARTS, MVT::i16, Expand);
95   setOperationAction(ISD::SRA_PARTS, MVT::i16, Expand);
96   setOperationAction(ISD::SRL_PARTS, MVT::i16, Expand);
97 
98   setOperationAction(ISD::ROTL, MVT::i8, Custom);
99   setOperationAction(ISD::ROTL, MVT::i16, Expand);
100   setOperationAction(ISD::ROTR, MVT::i8, Custom);
101   setOperationAction(ISD::ROTR, MVT::i16, Expand);
102 
103   setOperationAction(ISD::BR_CC, MVT::i8, Custom);
104   setOperationAction(ISD::BR_CC, MVT::i16, Custom);
105   setOperationAction(ISD::BR_CC, MVT::i32, Custom);
106   setOperationAction(ISD::BR_CC, MVT::i64, Custom);
107   setOperationAction(ISD::BRCOND, MVT::Other, Expand);
108 
109   setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
110   setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
111   setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
112   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
113   setOperationAction(ISD::SETCC, MVT::i8, Custom);
114   setOperationAction(ISD::SETCC, MVT::i16, Custom);
115   setOperationAction(ISD::SETCC, MVT::i32, Custom);
116   setOperationAction(ISD::SETCC, MVT::i64, Custom);
117   setOperationAction(ISD::SELECT, MVT::i8, Expand);
118   setOperationAction(ISD::SELECT, MVT::i16, Expand);
119 
120   setOperationAction(ISD::BSWAP, MVT::i16, Expand);
121 
122   // Add support for postincrement and predecrement load/stores.
123   setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
124   setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
125   setIndexedLoadAction(ISD::PRE_DEC, MVT::i8, Legal);
126   setIndexedLoadAction(ISD::PRE_DEC, MVT::i16, Legal);
127   setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
128   setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
129   setIndexedStoreAction(ISD::PRE_DEC, MVT::i8, Legal);
130   setIndexedStoreAction(ISD::PRE_DEC, MVT::i16, Legal);
131 
132   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
133 
134   setOperationAction(ISD::VASTART, MVT::Other, Custom);
135   setOperationAction(ISD::VAEND, MVT::Other, Expand);
136   setOperationAction(ISD::VAARG, MVT::Other, Expand);
137   setOperationAction(ISD::VACOPY, MVT::Other, Expand);
138 
139   // Atomic operations which must be lowered to rtlib calls
140   for (MVT VT : MVT::integer_valuetypes()) {
141     setOperationAction(ISD::ATOMIC_SWAP, VT, Expand);
142     setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Expand);
143     setOperationAction(ISD::ATOMIC_LOAD_NAND, VT, Expand);
144     setOperationAction(ISD::ATOMIC_LOAD_MAX, VT, Expand);
145     setOperationAction(ISD::ATOMIC_LOAD_MIN, VT, Expand);
146     setOperationAction(ISD::ATOMIC_LOAD_UMAX, VT, Expand);
147     setOperationAction(ISD::ATOMIC_LOAD_UMIN, VT, Expand);
148   }
149 
150   // Division/remainder
151   setOperationAction(ISD::UDIV, MVT::i8, Expand);
152   setOperationAction(ISD::UDIV, MVT::i16, Expand);
153   setOperationAction(ISD::UREM, MVT::i8, Expand);
154   setOperationAction(ISD::UREM, MVT::i16, Expand);
155   setOperationAction(ISD::SDIV, MVT::i8, Expand);
156   setOperationAction(ISD::SDIV, MVT::i16, Expand);
157   setOperationAction(ISD::SREM, MVT::i8, Expand);
158   setOperationAction(ISD::SREM, MVT::i16, Expand);
159 
160   // Make division and modulus custom
161   setOperationAction(ISD::UDIVREM, MVT::i8, Custom);
162   setOperationAction(ISD::UDIVREM, MVT::i16, Custom);
163   setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
164   setOperationAction(ISD::SDIVREM, MVT::i8, Custom);
165   setOperationAction(ISD::SDIVREM, MVT::i16, Custom);
166   setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
167 
168   // Do not use MUL. The AVR instructions are closer to SMUL_LOHI &co.
169   setOperationAction(ISD::MUL, MVT::i8, Expand);
170   setOperationAction(ISD::MUL, MVT::i16, Expand);
171 
172   // Expand 16 bit multiplications.
173   setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
174   setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
175 
176   // Expand multiplications to libcalls when there is
177   // no hardware MUL.
178   if (!Subtarget.supportsMultiplication()) {
179     setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
180     setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
181   }
182 
183   for (MVT VT : MVT::integer_valuetypes()) {
184     setOperationAction(ISD::MULHS, VT, Expand);
185     setOperationAction(ISD::MULHU, VT, Expand);
186   }
187 
188   for (MVT VT : MVT::integer_valuetypes()) {
189     setOperationAction(ISD::CTPOP, VT, Expand);
190     setOperationAction(ISD::CTLZ, VT, Expand);
191     setOperationAction(ISD::CTTZ, VT, Expand);
192   }
193 
194   for (MVT VT : MVT::integer_valuetypes()) {
195     setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
196     // TODO: The generated code is pretty poor. Investigate using the
197     // same "shift and subtract with carry" trick that we do for
198     // extending 8-bit to 16-bit. This may require infrastructure
199     // improvements in how we treat 16-bit "registers" to be feasible.
200   }
201 
202   // Division rtlib functions (not supported), use divmod functions instead
203   setLibcallName(RTLIB::SDIV_I8, nullptr);
204   setLibcallName(RTLIB::SDIV_I16, nullptr);
205   setLibcallName(RTLIB::SDIV_I32, nullptr);
206   setLibcallName(RTLIB::UDIV_I8, nullptr);
207   setLibcallName(RTLIB::UDIV_I16, nullptr);
208   setLibcallName(RTLIB::UDIV_I32, nullptr);
209 
210   // Modulus rtlib functions (not supported), use divmod functions instead
211   setLibcallName(RTLIB::SREM_I8, nullptr);
212   setLibcallName(RTLIB::SREM_I16, nullptr);
213   setLibcallName(RTLIB::SREM_I32, nullptr);
214   setLibcallName(RTLIB::UREM_I8, nullptr);
215   setLibcallName(RTLIB::UREM_I16, nullptr);
216   setLibcallName(RTLIB::UREM_I32, nullptr);
217 
218   // Division and modulus rtlib functions
219   setLibcallName(RTLIB::SDIVREM_I8, "__divmodqi4");
220   setLibcallName(RTLIB::SDIVREM_I16, "__divmodhi4");
221   setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
222   setLibcallName(RTLIB::UDIVREM_I8, "__udivmodqi4");
223   setLibcallName(RTLIB::UDIVREM_I16, "__udivmodhi4");
224   setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
225 
226   // Several of the runtime library functions use a special calling conv
227   setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::AVR_BUILTIN);
228   setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::AVR_BUILTIN);
229   setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::AVR_BUILTIN);
230   setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::AVR_BUILTIN);
231 
232   // Trigonometric rtlib functions
233   setLibcallName(RTLIB::SIN_F32, "sin");
234   setLibcallName(RTLIB::COS_F32, "cos");
235 
236   setMinFunctionAlignment(Align(2));
237   setMinimumJumpTableEntries(UINT_MAX);
238 }
239 
240 const char *AVRTargetLowering::getTargetNodeName(unsigned Opcode) const {
241 #define NODE(name)                                                             \
242   case AVRISD::name:                                                           \
243     return #name
244 
245   switch (Opcode) {
246   default:
247     return nullptr;
248     NODE(RET_FLAG);
249     NODE(RETI_FLAG);
250     NODE(CALL);
251     NODE(WRAPPER);
252     NODE(LSL);
253     NODE(LSLW);
254     NODE(LSR);
255     NODE(LSRW);
256     NODE(ROL);
257     NODE(ROR);
258     NODE(ASR);
259     NODE(ASRW);
260     NODE(LSLLOOP);
261     NODE(LSRLOOP);
262     NODE(ROLLOOP);
263     NODE(RORLOOP);
264     NODE(ASRLOOP);
265     NODE(BRCOND);
266     NODE(CMP);
267     NODE(CMPC);
268     NODE(TST);
269     NODE(SELECT_CC);
270 #undef NODE
271   }
272 }
273 
274 EVT AVRTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
275                                           EVT VT) const {
276   assert(!VT.isVector() && "No AVR SetCC type for vectors!");
277   return MVT::i8;
278 }
279 
280 SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const {
281   unsigned Opc8;
282   const SDNode *N = Op.getNode();
283   EVT VT = Op.getValueType();
284   SDLoc dl(N);
285   assert(isPowerOf2_32(VT.getSizeInBits()) &&
286          "Expected power-of-2 shift amount");
287 
288   if (VT.getSizeInBits() == 32) {
289     if (!isa<ConstantSDNode>(N->getOperand(1))) {
290       // 32-bit shifts are converted to a loop in IR.
291       // This should be unreachable.
292       report_fatal_error("Expected a constant shift amount!");
293     }
294     SDVTList ResTys = DAG.getVTList(MVT::i16, MVT::i16);
295     SDValue SrcLo =
296         DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i16, Op.getOperand(0),
297                     DAG.getConstant(0, dl, MVT::i16));
298     SDValue SrcHi =
299         DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i16, Op.getOperand(0),
300                     DAG.getConstant(1, dl, MVT::i16));
301     uint64_t ShiftAmount =
302         cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
303     if (ShiftAmount == 16) {
304       // Special case these two operations because they appear to be used by the
305       // generic codegen parts to lower 32-bit numbers.
306       // TODO: perhaps we can lower shift amounts bigger than 16 to a 16-bit
307       // shift of a part of the 32-bit value?
308       switch (Op.getOpcode()) {
309       case ISD::SHL: {
310         SDValue Zero = DAG.getConstant(0, dl, MVT::i16);
311         return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i32, Zero, SrcLo);
312       }
313       case ISD::SRL: {
314         SDValue Zero = DAG.getConstant(0, dl, MVT::i16);
315         return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i32, SrcHi, Zero);
316       }
317       }
318     }
319     SDValue Cnt = DAG.getTargetConstant(ShiftAmount, dl, MVT::i8);
320     unsigned Opc;
321     switch (Op.getOpcode()) {
322     default:
323       llvm_unreachable("Invalid 32-bit shift opcode!");
324     case ISD::SHL:
325       Opc = AVRISD::LSLW;
326       break;
327     case ISD::SRL:
328       Opc = AVRISD::LSRW;
329       break;
330     case ISD::SRA:
331       Opc = AVRISD::ASRW;
332       break;
333     }
334     SDValue Result = DAG.getNode(Opc, dl, ResTys, SrcLo, SrcHi, Cnt);
335     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i32, Result.getValue(0),
336                        Result.getValue(1));
337   }
338 
339   // Expand non-constant shifts to loops.
340   if (!isa<ConstantSDNode>(N->getOperand(1))) {
341     switch (Op.getOpcode()) {
342     default:
343       llvm_unreachable("Invalid shift opcode!");
344     case ISD::SHL:
345       return DAG.getNode(AVRISD::LSLLOOP, dl, VT, N->getOperand(0),
346                          N->getOperand(1));
347     case ISD::SRL:
348       return DAG.getNode(AVRISD::LSRLOOP, dl, VT, N->getOperand(0),
349                          N->getOperand(1));
350     case ISD::ROTL: {
351       SDValue Amt = N->getOperand(1);
352       EVT AmtVT = Amt.getValueType();
353       Amt = DAG.getNode(ISD::AND, dl, AmtVT, Amt,
354                         DAG.getConstant(VT.getSizeInBits() - 1, dl, AmtVT));
355       return DAG.getNode(AVRISD::ROLLOOP, dl, VT, N->getOperand(0), Amt);
356     }
357     case ISD::ROTR: {
358       SDValue Amt = N->getOperand(1);
359       EVT AmtVT = Amt.getValueType();
360       Amt = DAG.getNode(ISD::AND, dl, AmtVT, Amt,
361                         DAG.getConstant(VT.getSizeInBits() - 1, dl, AmtVT));
362       return DAG.getNode(AVRISD::RORLOOP, dl, VT, N->getOperand(0), Amt);
363     }
364     case ISD::SRA:
365       return DAG.getNode(AVRISD::ASRLOOP, dl, VT, N->getOperand(0),
366                          N->getOperand(1));
367     }
368   }
369 
370   uint64_t ShiftAmount = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
371   SDValue Victim = N->getOperand(0);
372 
373   switch (Op.getOpcode()) {
374   case ISD::SRA:
375     Opc8 = AVRISD::ASR;
376     break;
377   case ISD::ROTL:
378     Opc8 = AVRISD::ROL;
379     ShiftAmount = ShiftAmount % VT.getSizeInBits();
380     break;
381   case ISD::ROTR:
382     Opc8 = AVRISD::ROR;
383     ShiftAmount = ShiftAmount % VT.getSizeInBits();
384     break;
385   case ISD::SRL:
386     Opc8 = AVRISD::LSR;
387     break;
388   case ISD::SHL:
389     Opc8 = AVRISD::LSL;
390     break;
391   default:
392     llvm_unreachable("Invalid shift opcode");
393   }
394 
395   // Optimize int8/int16 shifts.
396   if (VT.getSizeInBits() == 8) {
397     if (Op.getOpcode() == ISD::SHL && 4 <= ShiftAmount && ShiftAmount < 7) {
398       // Optimize LSL when 4 <= ShiftAmount <= 6.
399       Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim);
400       Victim =
401           DAG.getNode(ISD::AND, dl, VT, Victim, DAG.getConstant(0xf0, dl, VT));
402       ShiftAmount -= 4;
403     } else if (Op.getOpcode() == ISD::SRL && 4 <= ShiftAmount &&
404                ShiftAmount < 7) {
405       // Optimize LSR when 4 <= ShiftAmount <= 6.
406       Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim);
407       Victim =
408           DAG.getNode(ISD::AND, dl, VT, Victim, DAG.getConstant(0x0f, dl, VT));
409       ShiftAmount -= 4;
410     } else if (Op.getOpcode() == ISD::SHL && ShiftAmount == 7) {
411       // Optimize LSL when ShiftAmount == 7.
412       Victim = DAG.getNode(AVRISD::LSLBN, dl, VT, Victim,
413                            DAG.getConstant(7, dl, VT));
414       ShiftAmount = 0;
415     } else if (Op.getOpcode() == ISD::SRL && ShiftAmount == 7) {
416       // Optimize LSR when ShiftAmount == 7.
417       Victim = DAG.getNode(AVRISD::LSRBN, dl, VT, Victim,
418                            DAG.getConstant(7, dl, VT));
419       ShiftAmount = 0;
420     } else if (Op.getOpcode() == ISD::SRA && ShiftAmount == 6) {
421       // Optimize ASR when ShiftAmount == 6.
422       Victim = DAG.getNode(AVRISD::ASRBN, dl, VT, Victim,
423                            DAG.getConstant(6, dl, VT));
424       ShiftAmount = 0;
425     } else if (Op.getOpcode() == ISD::SRA && ShiftAmount == 7) {
426       // Optimize ASR when ShiftAmount == 7.
427       Victim = DAG.getNode(AVRISD::ASRBN, dl, VT, Victim,
428                            DAG.getConstant(7, dl, VT));
429       ShiftAmount = 0;
430     }
431   } else if (VT.getSizeInBits() == 16) {
432     if (Op.getOpcode() == ISD::SRA)
433       // Special optimization for int16 arithmetic right shift.
434       switch (ShiftAmount) {
435       case 15:
436         Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,
437                              DAG.getConstant(15, dl, VT));
438         ShiftAmount = 0;
439         break;
440       case 14:
441         Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,
442                              DAG.getConstant(14, dl, VT));
443         ShiftAmount = 0;
444         break;
445       case 7:
446         Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,
447                              DAG.getConstant(7, dl, VT));
448         ShiftAmount = 0;
449         break;
450       default:
451         break;
452       }
453     if (4 <= ShiftAmount && ShiftAmount < 8)
454       switch (Op.getOpcode()) {
455       case ISD::SHL:
456         Victim = DAG.getNode(AVRISD::LSLWN, dl, VT, Victim,
457                              DAG.getConstant(4, dl, VT));
458         ShiftAmount -= 4;
459         break;
460       case ISD::SRL:
461         Victim = DAG.getNode(AVRISD::LSRWN, dl, VT, Victim,
462                              DAG.getConstant(4, dl, VT));
463         ShiftAmount -= 4;
464         break;
465       default:
466         break;
467       }
468     else if (8 <= ShiftAmount && ShiftAmount < 12)
469       switch (Op.getOpcode()) {
470       case ISD::SHL:
471         Victim = DAG.getNode(AVRISD::LSLWN, dl, VT, Victim,
472                              DAG.getConstant(8, dl, VT));
473         ShiftAmount -= 8;
474         // Only operate on the higher byte for remaining shift bits.
475         Opc8 = AVRISD::LSLHI;
476         break;
477       case ISD::SRL:
478         Victim = DAG.getNode(AVRISD::LSRWN, dl, VT, Victim,
479                              DAG.getConstant(8, dl, VT));
480         ShiftAmount -= 8;
481         // Only operate on the lower byte for remaining shift bits.
482         Opc8 = AVRISD::LSRLO;
483         break;
484       case ISD::SRA:
485         Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,
486                              DAG.getConstant(8, dl, VT));
487         ShiftAmount -= 8;
488         // Only operate on the lower byte for remaining shift bits.
489         Opc8 = AVRISD::ASRLO;
490         break;
491       default:
492         break;
493       }
494     else if (12 <= ShiftAmount)
495       switch (Op.getOpcode()) {
496       case ISD::SHL:
497         Victim = DAG.getNode(AVRISD::LSLWN, dl, VT, Victim,
498                              DAG.getConstant(12, dl, VT));
499         ShiftAmount -= 12;
500         // Only operate on the higher byte for remaining shift bits.
501         Opc8 = AVRISD::LSLHI;
502         break;
503       case ISD::SRL:
504         Victim = DAG.getNode(AVRISD::LSRWN, dl, VT, Victim,
505                              DAG.getConstant(12, dl, VT));
506         ShiftAmount -= 12;
507         // Only operate on the lower byte for remaining shift bits.
508         Opc8 = AVRISD::LSRLO;
509         break;
510       case ISD::SRA:
511         Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim,
512                              DAG.getConstant(8, dl, VT));
513         ShiftAmount -= 8;
514         // Only operate on the lower byte for remaining shift bits.
515         Opc8 = AVRISD::ASRLO;
516         break;
517       default:
518         break;
519       }
520   }
521 
522   while (ShiftAmount--) {
523     Victim = DAG.getNode(Opc8, dl, VT, Victim);
524   }
525 
526   return Victim;
527 }
528 
529 SDValue AVRTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
530   unsigned Opcode = Op->getOpcode();
531   assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
532          "Invalid opcode for Div/Rem lowering");
533   bool IsSigned = (Opcode == ISD::SDIVREM);
534   EVT VT = Op->getValueType(0);
535   Type *Ty = VT.getTypeForEVT(*DAG.getContext());
536 
537   RTLIB::Libcall LC;
538   switch (VT.getSimpleVT().SimpleTy) {
539   default:
540     llvm_unreachable("Unexpected request for libcall!");
541   case MVT::i8:
542     LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
543     break;
544   case MVT::i16:
545     LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
546     break;
547   case MVT::i32:
548     LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
549     break;
550   }
551 
552   SDValue InChain = DAG.getEntryNode();
553 
554   TargetLowering::ArgListTy Args;
555   TargetLowering::ArgListEntry Entry;
556   for (SDValue const &Value : Op->op_values()) {
557     Entry.Node = Value;
558     Entry.Ty = Value.getValueType().getTypeForEVT(*DAG.getContext());
559     Entry.IsSExt = IsSigned;
560     Entry.IsZExt = !IsSigned;
561     Args.push_back(Entry);
562   }
563 
564   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
565                                          getPointerTy(DAG.getDataLayout()));
566 
567   Type *RetTy = (Type *)StructType::get(Ty, Ty);
568 
569   SDLoc dl(Op);
570   TargetLowering::CallLoweringInfo CLI(DAG);
571   CLI.setDebugLoc(dl)
572       .setChain(InChain)
573       .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
574       .setInRegister()
575       .setSExtResult(IsSigned)
576       .setZExtResult(!IsSigned);
577 
578   std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
579   return CallInfo.first;
580 }
581 
582 SDValue AVRTargetLowering::LowerGlobalAddress(SDValue Op,
583                                               SelectionDAG &DAG) const {
584   auto DL = DAG.getDataLayout();
585 
586   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
587   int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
588 
589   // Create the TargetGlobalAddress node, folding in the constant offset.
590   SDValue Result =
591       DAG.getTargetGlobalAddress(GV, SDLoc(Op), getPointerTy(DL), Offset);
592   return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result);
593 }
594 
595 SDValue AVRTargetLowering::LowerBlockAddress(SDValue Op,
596                                              SelectionDAG &DAG) const {
597   auto DL = DAG.getDataLayout();
598   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
599 
600   SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(DL));
601 
602   return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result);
603 }
604 
605 /// IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC.
606 static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC) {
607   switch (CC) {
608   default:
609     llvm_unreachable("Unknown condition code!");
610   case ISD::SETEQ:
611     return AVRCC::COND_EQ;
612   case ISD::SETNE:
613     return AVRCC::COND_NE;
614   case ISD::SETGE:
615     return AVRCC::COND_GE;
616   case ISD::SETLT:
617     return AVRCC::COND_LT;
618   case ISD::SETUGE:
619     return AVRCC::COND_SH;
620   case ISD::SETULT:
621     return AVRCC::COND_LO;
622   }
623 }
624 
625 /// Returns appropriate CP/CPI/CPC nodes code for the given 8/16-bit operands.
626 SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS,
627                                      SelectionDAG &DAG, SDLoc DL) const {
628   assert((LHS.getSimpleValueType() == RHS.getSimpleValueType()) &&
629          "LHS and RHS have different types");
630   assert(((LHS.getSimpleValueType() == MVT::i16) ||
631           (LHS.getSimpleValueType() == MVT::i8)) &&
632          "invalid comparison type");
633 
634   SDValue Cmp;
635 
636   if (LHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(RHS)) {
637     // Generate a CPI/CPC pair if RHS is a 16-bit constant.
638     SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
639                                 DAG.getIntPtrConstant(0, DL));
640     SDValue LHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS,
641                                 DAG.getIntPtrConstant(1, DL));
642     SDValue RHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, RHS,
643                                 DAG.getIntPtrConstant(0, DL));
644     SDValue RHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, RHS,
645                                 DAG.getIntPtrConstant(1, DL));
646     Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHSlo, RHSlo);
647     Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp);
648   } else {
649     // Generate ordinary 16-bit comparison.
650     Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHS, RHS);
651   }
652 
653   return Cmp;
654 }
655 
656 /// Returns appropriate AVR CMP/CMPC nodes and corresponding condition code for
657 /// the given operands.
658 SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
659                                      SDValue &AVRcc, SelectionDAG &DAG,
660                                      SDLoc DL) const {
661   SDValue Cmp;
662   EVT VT = LHS.getValueType();
663   bool UseTest = false;
664 
665   switch (CC) {
666   default:
667     break;
668   case ISD::SETLE: {
669     // Swap operands and reverse the branching condition.
670     std::swap(LHS, RHS);
671     CC = ISD::SETGE;
672     break;
673   }
674   case ISD::SETGT: {
675     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
676       switch (C->getSExtValue()) {
677       case -1: {
678         // When doing lhs > -1 use a tst instruction on the top part of lhs
679         // and use brpl instead of using a chain of cp/cpc.
680         UseTest = true;
681         AVRcc = DAG.getConstant(AVRCC::COND_PL, DL, MVT::i8);
682         break;
683       }
684       case 0: {
685         // Turn lhs > 0 into 0 < lhs since 0 can be materialized with
686         // __zero_reg__ in lhs.
687         RHS = LHS;
688         LHS = DAG.getConstant(0, DL, VT);
689         CC = ISD::SETLT;
690         break;
691       }
692       default: {
693         // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows
694         // us to  fold the constant into the cmp instruction.
695         RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT);
696         CC = ISD::SETGE;
697         break;
698       }
699       }
700       break;
701     }
702     // Swap operands and reverse the branching condition.
703     std::swap(LHS, RHS);
704     CC = ISD::SETLT;
705     break;
706   }
707   case ISD::SETLT: {
708     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
709       switch (C->getSExtValue()) {
710       case 1: {
711         // Turn lhs < 1 into 0 >= lhs since 0 can be materialized with
712         // __zero_reg__ in lhs.
713         RHS = LHS;
714         LHS = DAG.getConstant(0, DL, VT);
715         CC = ISD::SETGE;
716         break;
717       }
718       case 0: {
719         // When doing lhs < 0 use a tst instruction on the top part of lhs
720         // and use brmi instead of using a chain of cp/cpc.
721         UseTest = true;
722         AVRcc = DAG.getConstant(AVRCC::COND_MI, DL, MVT::i8);
723         break;
724       }
725       }
726     }
727     break;
728   }
729   case ISD::SETULE: {
730     // Swap operands and reverse the branching condition.
731     std::swap(LHS, RHS);
732     CC = ISD::SETUGE;
733     break;
734   }
735   case ISD::SETUGT: {
736     // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to
737     // fold the constant into the cmp instruction.
738     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
739       RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT);
740       CC = ISD::SETUGE;
741       break;
742     }
743     // Swap operands and reverse the branching condition.
744     std::swap(LHS, RHS);
745     CC = ISD::SETULT;
746     break;
747   }
748   }
749 
750   // Expand 32 and 64 bit comparisons with custom CMP and CMPC nodes instead of
751   // using the default and/or/xor expansion code which is much longer.
752   if (VT == MVT::i32) {
753     SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS,
754                                 DAG.getIntPtrConstant(0, DL));
755     SDValue LHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS,
756                                 DAG.getIntPtrConstant(1, DL));
757     SDValue RHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS,
758                                 DAG.getIntPtrConstant(0, DL));
759     SDValue RHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS,
760                                 DAG.getIntPtrConstant(1, DL));
761 
762     if (UseTest) {
763       // When using tst we only care about the highest part.
764       SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHShi,
765                                 DAG.getIntPtrConstant(1, DL));
766       Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top);
767     } else {
768       Cmp = getAVRCmp(LHSlo, RHSlo, DAG, DL);
769       Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp);
770     }
771   } else if (VT == MVT::i64) {
772     SDValue LHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS,
773                                 DAG.getIntPtrConstant(0, DL));
774     SDValue LHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS,
775                                 DAG.getIntPtrConstant(1, DL));
776 
777     SDValue LHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0,
778                                DAG.getIntPtrConstant(0, DL));
779     SDValue LHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0,
780                                DAG.getIntPtrConstant(1, DL));
781     SDValue LHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1,
782                                DAG.getIntPtrConstant(0, DL));
783     SDValue LHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1,
784                                DAG.getIntPtrConstant(1, DL));
785 
786     SDValue RHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS,
787                                 DAG.getIntPtrConstant(0, DL));
788     SDValue RHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS,
789                                 DAG.getIntPtrConstant(1, DL));
790 
791     SDValue RHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0,
792                                DAG.getIntPtrConstant(0, DL));
793     SDValue RHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0,
794                                DAG.getIntPtrConstant(1, DL));
795     SDValue RHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1,
796                                DAG.getIntPtrConstant(0, DL));
797     SDValue RHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1,
798                                DAG.getIntPtrConstant(1, DL));
799 
800     if (UseTest) {
801       // When using tst we only care about the highest part.
802       SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS3,
803                                 DAG.getIntPtrConstant(1, DL));
804       Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top);
805     } else {
806       Cmp = getAVRCmp(LHS0, RHS0, DAG, DL);
807       Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS1, RHS1, Cmp);
808       Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS2, RHS2, Cmp);
809       Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS3, RHS3, Cmp);
810     }
811   } else if (VT == MVT::i8 || VT == MVT::i16) {
812     if (UseTest) {
813       // When using tst we only care about the highest part.
814       Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue,
815                         (VT == MVT::i8)
816                             ? LHS
817                             : DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8,
818                                           LHS, DAG.getIntPtrConstant(1, DL)));
819     } else {
820       Cmp = getAVRCmp(LHS, RHS, DAG, DL);
821     }
822   } else {
823     llvm_unreachable("Invalid comparison size");
824   }
825 
826   // When using a test instruction AVRcc is already set.
827   if (!UseTest) {
828     AVRcc = DAG.getConstant(intCCToAVRCC(CC), DL, MVT::i8);
829   }
830 
831   return Cmp;
832 }
833 
834 SDValue AVRTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
835   SDValue Chain = Op.getOperand(0);
836   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
837   SDValue LHS = Op.getOperand(2);
838   SDValue RHS = Op.getOperand(3);
839   SDValue Dest = Op.getOperand(4);
840   SDLoc dl(Op);
841 
842   SDValue TargetCC;
843   SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl);
844 
845   return DAG.getNode(AVRISD::BRCOND, dl, MVT::Other, Chain, Dest, TargetCC,
846                      Cmp);
847 }
848 
849 SDValue AVRTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
850   SDValue LHS = Op.getOperand(0);
851   SDValue RHS = Op.getOperand(1);
852   SDValue TrueV = Op.getOperand(2);
853   SDValue FalseV = Op.getOperand(3);
854   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
855   SDLoc dl(Op);
856 
857   SDValue TargetCC;
858   SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl);
859 
860   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
861   SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp};
862 
863   return DAG.getNode(AVRISD::SELECT_CC, dl, VTs, Ops);
864 }
865 
866 SDValue AVRTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
867   SDValue LHS = Op.getOperand(0);
868   SDValue RHS = Op.getOperand(1);
869   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
870   SDLoc DL(Op);
871 
872   SDValue TargetCC;
873   SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, DL);
874 
875   SDValue TrueV = DAG.getConstant(1, DL, Op.getValueType());
876   SDValue FalseV = DAG.getConstant(0, DL, Op.getValueType());
877   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
878   SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp};
879 
880   return DAG.getNode(AVRISD::SELECT_CC, DL, VTs, Ops);
881 }
882 
883 SDValue AVRTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
884   const MachineFunction &MF = DAG.getMachineFunction();
885   const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
886   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
887   auto DL = DAG.getDataLayout();
888   SDLoc dl(Op);
889 
890   // Vastart just stores the address of the VarArgsFrameIndex slot into the
891   // memory location argument.
892   SDValue FI = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), getPointerTy(DL));
893 
894   return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1),
895                       MachinePointerInfo(SV));
896 }
897 
898 // Modify the existing ISD::INLINEASM node to add the implicit zero register.
899 SDValue AVRTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
900   SDValue ZeroReg = DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8);
901   if (Op.getOperand(Op.getNumOperands() - 1) == ZeroReg ||
902       Op.getOperand(Op.getNumOperands() - 2) == ZeroReg) {
903     // Zero register has already been added. Don't add it again.
904     // If this isn't handled, we get called over and over again.
905     return Op;
906   }
907 
908   // Get a list of operands to the new INLINEASM node. This is mostly a copy,
909   // with some edits.
910   // Add the following operands at the end (but before the glue node, if it's
911   // there):
912   //  - The flags of the implicit zero register operand.
913   //  - The implicit zero register operand itself.
914   SDLoc dl(Op);
915   SmallVector<SDValue, 8> Ops;
916   SDNode *N = Op.getNode();
917   SDValue Glue;
918   for (unsigned I = 0; I < N->getNumOperands(); I++) {
919     SDValue Operand = N->getOperand(I);
920     if (Operand.getValueType() == MVT::Glue) {
921       // The glue operand always needs to be at the end, so we need to treat it
922       // specially.
923       Glue = Operand;
924     } else {
925       Ops.push_back(Operand);
926     }
927   }
928   unsigned Flags = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, 1);
929   Ops.push_back(DAG.getTargetConstant(Flags, dl, MVT::i32));
930   Ops.push_back(ZeroReg);
931   if (Glue) {
932     Ops.push_back(Glue);
933   }
934 
935   // Replace the current INLINEASM node with a new one that has the zero
936   // register as implicit parameter.
937   SDValue New = DAG.getNode(N->getOpcode(), dl, N->getVTList(), Ops);
938   DAG.ReplaceAllUsesOfValueWith(Op, New);
939   DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), New.getValue(1));
940 
941   return New;
942 }
943 
944 SDValue AVRTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
945   switch (Op.getOpcode()) {
946   default:
947     llvm_unreachable("Don't know how to custom lower this!");
948   case ISD::SHL:
949   case ISD::SRA:
950   case ISD::SRL:
951   case ISD::ROTL:
952   case ISD::ROTR:
953     return LowerShifts(Op, DAG);
954   case ISD::GlobalAddress:
955     return LowerGlobalAddress(Op, DAG);
956   case ISD::BlockAddress:
957     return LowerBlockAddress(Op, DAG);
958   case ISD::BR_CC:
959     return LowerBR_CC(Op, DAG);
960   case ISD::SELECT_CC:
961     return LowerSELECT_CC(Op, DAG);
962   case ISD::SETCC:
963     return LowerSETCC(Op, DAG);
964   case ISD::VASTART:
965     return LowerVASTART(Op, DAG);
966   case ISD::SDIVREM:
967   case ISD::UDIVREM:
968     return LowerDivRem(Op, DAG);
969   case ISD::INLINEASM:
970     return LowerINLINEASM(Op, DAG);
971   }
972 
973   return SDValue();
974 }
975 
976 /// Replace a node with an illegal result type
977 /// with a new node built out of custom code.
978 void AVRTargetLowering::ReplaceNodeResults(SDNode *N,
979                                            SmallVectorImpl<SDValue> &Results,
980                                            SelectionDAG &DAG) const {
981   SDLoc DL(N);
982 
983   switch (N->getOpcode()) {
984   case ISD::ADD: {
985     // Convert add (x, imm) into sub (x, -imm).
986     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
987       SDValue Sub = DAG.getNode(
988           ISD::SUB, DL, N->getValueType(0), N->getOperand(0),
989           DAG.getConstant(-C->getAPIntValue(), DL, C->getValueType(0)));
990       Results.push_back(Sub);
991     }
992     break;
993   }
994   default: {
995     SDValue Res = LowerOperation(SDValue(N, 0), DAG);
996 
997     for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I)
998       Results.push_back(Res.getValue(I));
999 
1000     break;
1001   }
1002   }
1003 }
1004 
1005 /// Return true if the addressing mode represented
1006 /// by AM is legal for this target, for a load/store of the specified type.
1007 bool AVRTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1008                                               const AddrMode &AM, Type *Ty,
1009                                               unsigned AS,
1010                                               Instruction *I) const {
1011   int64_t Offs = AM.BaseOffs;
1012 
1013   // Allow absolute addresses.
1014   if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && Offs == 0) {
1015     return true;
1016   }
1017 
1018   // Flash memory instructions only allow zero offsets.
1019   if (isa<PointerType>(Ty) && AS == AVR::ProgramMemory) {
1020     return false;
1021   }
1022 
1023   // Allow reg+<6bit> offset.
1024   if (Offs < 0)
1025     Offs = -Offs;
1026   if (AM.BaseGV == nullptr && AM.HasBaseReg && AM.Scale == 0 &&
1027       isUInt<6>(Offs)) {
1028     return true;
1029   }
1030 
1031   return false;
1032 }
1033 
1034 /// Returns true by value, base pointer and
1035 /// offset pointer and addressing mode by reference if the node's address
1036 /// can be legally represented as pre-indexed load / store address.
1037 bool AVRTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
1038                                                   SDValue &Offset,
1039                                                   ISD::MemIndexedMode &AM,
1040                                                   SelectionDAG &DAG) const {
1041   EVT VT;
1042   const SDNode *Op;
1043   SDLoc DL(N);
1044 
1045   if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1046     VT = LD->getMemoryVT();
1047     Op = LD->getBasePtr().getNode();
1048     if (LD->getExtensionType() != ISD::NON_EXTLOAD)
1049       return false;
1050     if (AVR::isProgramMemoryAccess(LD)) {
1051       return false;
1052     }
1053   } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
1054     VT = ST->getMemoryVT();
1055     Op = ST->getBasePtr().getNode();
1056     if (AVR::isProgramMemoryAccess(ST)) {
1057       return false;
1058     }
1059   } else {
1060     return false;
1061   }
1062 
1063   if (VT != MVT::i8 && VT != MVT::i16) {
1064     return false;
1065   }
1066 
1067   if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) {
1068     return false;
1069   }
1070 
1071   if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
1072     int RHSC = RHS->getSExtValue();
1073     if (Op->getOpcode() == ISD::SUB)
1074       RHSC = -RHSC;
1075 
1076     if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) {
1077       return false;
1078     }
1079 
1080     Base = Op->getOperand(0);
1081     Offset = DAG.getConstant(RHSC, DL, MVT::i8);
1082     AM = ISD::PRE_DEC;
1083 
1084     return true;
1085   }
1086 
1087   return false;
1088 }
1089 
1090 /// Returns true by value, base pointer and
1091 /// offset pointer and addressing mode by reference if this node can be
1092 /// combined with a load / store to form a post-indexed load / store.
1093 bool AVRTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
1094                                                    SDValue &Base,
1095                                                    SDValue &Offset,
1096                                                    ISD::MemIndexedMode &AM,
1097                                                    SelectionDAG &DAG) const {
1098   EVT VT;
1099   SDLoc DL(N);
1100 
1101   if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
1102     VT = LD->getMemoryVT();
1103     if (LD->getExtensionType() != ISD::NON_EXTLOAD)
1104       return false;
1105   } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
1106     VT = ST->getMemoryVT();
1107     if (AVR::isProgramMemoryAccess(ST)) {
1108       return false;
1109     }
1110   } else {
1111     return false;
1112   }
1113 
1114   if (VT != MVT::i8 && VT != MVT::i16) {
1115     return false;
1116   }
1117 
1118   if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) {
1119     return false;
1120   }
1121 
1122   if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) {
1123     int RHSC = RHS->getSExtValue();
1124     if (Op->getOpcode() == ISD::SUB)
1125       RHSC = -RHSC;
1126     if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) {
1127       return false;
1128     }
1129 
1130     Base = Op->getOperand(0);
1131     Offset = DAG.getConstant(RHSC, DL, MVT::i8);
1132     AM = ISD::POST_INC;
1133 
1134     return true;
1135   }
1136 
1137   return false;
1138 }
1139 
1140 bool AVRTargetLowering::isOffsetFoldingLegal(
1141     const GlobalAddressSDNode *GA) const {
1142   return true;
1143 }
1144 
1145 //===----------------------------------------------------------------------===//
1146 //             Formal Arguments Calling Convention Implementation
1147 //===----------------------------------------------------------------------===//
1148 
1149 #include "AVRGenCallingConv.inc"
1150 
1151 /// Registers for calling conventions, ordered in reverse as required by ABI.
1152 /// Both arrays must be of the same length.
1153 static const MCPhysReg RegList8AVR[] = {
1154     AVR::R25, AVR::R24, AVR::R23, AVR::R22, AVR::R21, AVR::R20,
1155     AVR::R19, AVR::R18, AVR::R17, AVR::R16, AVR::R15, AVR::R14,
1156     AVR::R13, AVR::R12, AVR::R11, AVR::R10, AVR::R9,  AVR::R8};
1157 static const MCPhysReg RegList8Tiny[] = {AVR::R25, AVR::R24, AVR::R23,
1158                                          AVR::R22, AVR::R21, AVR::R20};
1159 static const MCPhysReg RegList16AVR[] = {
1160     AVR::R26R25, AVR::R25R24, AVR::R24R23, AVR::R23R22, AVR::R22R21,
1161     AVR::R21R20, AVR::R20R19, AVR::R19R18, AVR::R18R17, AVR::R17R16,
1162     AVR::R16R15, AVR::R15R14, AVR::R14R13, AVR::R13R12, AVR::R12R11,
1163     AVR::R11R10, AVR::R10R9,  AVR::R9R8};
1164 static const MCPhysReg RegList16Tiny[] = {AVR::R26R25, AVR::R25R24,
1165                                           AVR::R24R23, AVR::R23R22,
1166                                           AVR::R22R21, AVR::R21R20};
1167 
1168 static_assert(std::size(RegList8AVR) == std::size(RegList16AVR),
1169               "8-bit and 16-bit register arrays must be of equal length");
1170 static_assert(std::size(RegList8Tiny) == std::size(RegList16Tiny),
1171               "8-bit and 16-bit register arrays must be of equal length");
1172 
1173 /// Analyze incoming and outgoing function arguments. We need custom C++ code
1174 /// to handle special constraints in the ABI.
1175 /// In addition, all pieces of a certain argument have to be passed either
1176 /// using registers or the stack but never mixing both.
1177 template <typename ArgT>
1178 static void analyzeArguments(TargetLowering::CallLoweringInfo *CLI,
1179                              const Function *F, const DataLayout *TD,
1180                              const SmallVectorImpl<ArgT> &Args,
1181                              SmallVectorImpl<CCValAssign> &ArgLocs,
1182                              CCState &CCInfo, bool Tiny) {
1183   // Choose the proper register list for argument passing according to the ABI.
1184   ArrayRef<MCPhysReg> RegList8;
1185   ArrayRef<MCPhysReg> RegList16;
1186   if (Tiny) {
1187     RegList8 = ArrayRef(RegList8Tiny, std::size(RegList8Tiny));
1188     RegList16 = ArrayRef(RegList16Tiny, std::size(RegList16Tiny));
1189   } else {
1190     RegList8 = ArrayRef(RegList8AVR, std::size(RegList8AVR));
1191     RegList16 = ArrayRef(RegList16AVR, std::size(RegList16AVR));
1192   }
1193 
1194   unsigned NumArgs = Args.size();
1195   // This is the index of the last used register, in RegList*.
1196   // -1 means R26 (R26 is never actually used in CC).
1197   int RegLastIdx = -1;
1198   // Once a value is passed to the stack it will always be used
1199   bool UseStack = false;
1200   for (unsigned i = 0; i != NumArgs;) {
1201     MVT VT = Args[i].VT;
1202     // We have to count the number of bytes for each function argument, that is
1203     // those Args with the same OrigArgIndex. This is important in case the
1204     // function takes an aggregate type.
1205     // Current argument will be between [i..j).
1206     unsigned ArgIndex = Args[i].OrigArgIndex;
1207     unsigned TotalBytes = VT.getStoreSize();
1208     unsigned j = i + 1;
1209     for (; j != NumArgs; ++j) {
1210       if (Args[j].OrigArgIndex != ArgIndex)
1211         break;
1212       TotalBytes += Args[j].VT.getStoreSize();
1213     }
1214     // Round up to even number of bytes.
1215     TotalBytes = alignTo(TotalBytes, 2);
1216     // Skip zero sized arguments
1217     if (TotalBytes == 0)
1218       continue;
1219     // The index of the first register to be used
1220     unsigned RegIdx = RegLastIdx + TotalBytes;
1221     RegLastIdx = RegIdx;
1222     // If there are not enough registers, use the stack
1223     if (RegIdx >= RegList8.size()) {
1224       UseStack = true;
1225     }
1226     for (; i != j; ++i) {
1227       MVT VT = Args[i].VT;
1228 
1229       if (UseStack) {
1230         auto evt = EVT(VT).getTypeForEVT(CCInfo.getContext());
1231         unsigned Offset = CCInfo.AllocateStack(TD->getTypeAllocSize(evt),
1232                                                TD->getABITypeAlign(evt));
1233         CCInfo.addLoc(
1234             CCValAssign::getMem(i, VT, Offset, VT, CCValAssign::Full));
1235       } else {
1236         unsigned Reg;
1237         if (VT == MVT::i8) {
1238           Reg = CCInfo.AllocateReg(RegList8[RegIdx]);
1239         } else if (VT == MVT::i16) {
1240           Reg = CCInfo.AllocateReg(RegList16[RegIdx]);
1241         } else {
1242           llvm_unreachable(
1243               "calling convention can only manage i8 and i16 types");
1244         }
1245         assert(Reg && "register not available in calling convention");
1246         CCInfo.addLoc(CCValAssign::getReg(i, VT, Reg, VT, CCValAssign::Full));
1247         // Registers inside a particular argument are sorted in increasing order
1248         // (remember the array is reversed).
1249         RegIdx -= VT.getStoreSize();
1250       }
1251     }
1252   }
1253 }
1254 
1255 /// Count the total number of bytes needed to pass or return these arguments.
1256 template <typename ArgT>
1257 static unsigned
1258 getTotalArgumentsSizeInBytes(const SmallVectorImpl<ArgT> &Args) {
1259   unsigned TotalBytes = 0;
1260 
1261   for (const ArgT &Arg : Args) {
1262     TotalBytes += Arg.VT.getStoreSize();
1263   }
1264   return TotalBytes;
1265 }
1266 
1267 /// Analyze incoming and outgoing value of returning from a function.
1268 /// The algorithm is similar to analyzeArguments, but there can only be
1269 /// one value, possibly an aggregate, and it is limited to 8 bytes.
1270 template <typename ArgT>
1271 static void analyzeReturnValues(const SmallVectorImpl<ArgT> &Args,
1272                                 CCState &CCInfo, bool Tiny) {
1273   unsigned NumArgs = Args.size();
1274   unsigned TotalBytes = getTotalArgumentsSizeInBytes(Args);
1275   // CanLowerReturn() guarantees this assertion.
1276   if (Tiny)
1277     assert(TotalBytes <= 4 &&
1278            "return values greater than 4 bytes cannot be lowered on AVRTiny");
1279   else
1280     assert(TotalBytes <= 8 &&
1281            "return values greater than 8 bytes cannot be lowered on AVR");
1282 
1283   // Choose the proper register list for argument passing according to the ABI.
1284   ArrayRef<MCPhysReg> RegList8;
1285   ArrayRef<MCPhysReg> RegList16;
1286   if (Tiny) {
1287     RegList8 = ArrayRef(RegList8Tiny, std::size(RegList8Tiny));
1288     RegList16 = ArrayRef(RegList16Tiny, std::size(RegList16Tiny));
1289   } else {
1290     RegList8 = ArrayRef(RegList8AVR, std::size(RegList8AVR));
1291     RegList16 = ArrayRef(RegList16AVR, std::size(RegList16AVR));
1292   }
1293 
1294   // GCC-ABI says that the size is rounded up to the next even number,
1295   // but actually once it is more than 4 it will always round up to 8.
1296   if (TotalBytes > 4) {
1297     TotalBytes = 8;
1298   } else {
1299     TotalBytes = alignTo(TotalBytes, 2);
1300   }
1301 
1302   // The index of the first register to use.
1303   int RegIdx = TotalBytes - 1;
1304   for (unsigned i = 0; i != NumArgs; ++i) {
1305     MVT VT = Args[i].VT;
1306     unsigned Reg;
1307     if (VT == MVT::i8) {
1308       Reg = CCInfo.AllocateReg(RegList8[RegIdx]);
1309     } else if (VT == MVT::i16) {
1310       Reg = CCInfo.AllocateReg(RegList16[RegIdx]);
1311     } else {
1312       llvm_unreachable("calling convention can only manage i8 and i16 types");
1313     }
1314     assert(Reg && "register not available in calling convention");
1315     CCInfo.addLoc(CCValAssign::getReg(i, VT, Reg, VT, CCValAssign::Full));
1316     // Registers sort in increasing order
1317     RegIdx -= VT.getStoreSize();
1318   }
1319 }
1320 
1321 SDValue AVRTargetLowering::LowerFormalArguments(
1322     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1323     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1324     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1325   MachineFunction &MF = DAG.getMachineFunction();
1326   MachineFrameInfo &MFI = MF.getFrameInfo();
1327   auto DL = DAG.getDataLayout();
1328 
1329   // Assign locations to all of the incoming arguments.
1330   SmallVector<CCValAssign, 16> ArgLocs;
1331   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1332                  *DAG.getContext());
1333 
1334   // Variadic functions do not need all the analysis below.
1335   if (isVarArg) {
1336     CCInfo.AnalyzeFormalArguments(Ins, ArgCC_AVR_Vararg);
1337   } else {
1338     analyzeArguments(nullptr, &MF.getFunction(), &DL, Ins, ArgLocs, CCInfo,
1339                      Subtarget.hasTinyEncoding());
1340   }
1341 
1342   SDValue ArgValue;
1343   for (CCValAssign &VA : ArgLocs) {
1344 
1345     // Arguments stored on registers.
1346     if (VA.isRegLoc()) {
1347       EVT RegVT = VA.getLocVT();
1348       const TargetRegisterClass *RC;
1349       if (RegVT == MVT::i8) {
1350         RC = &AVR::GPR8RegClass;
1351       } else if (RegVT == MVT::i16) {
1352         RC = &AVR::DREGSRegClass;
1353       } else {
1354         llvm_unreachable("Unknown argument type!");
1355       }
1356 
1357       Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
1358       ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
1359 
1360       // :NOTE: Clang should not promote any i8 into i16 but for safety the
1361       // following code will handle zexts or sexts generated by other
1362       // front ends. Otherwise:
1363       // If this is an 8 bit value, it is really passed promoted
1364       // to 16 bits. Insert an assert[sz]ext to capture this, then
1365       // truncate to the right size.
1366       switch (VA.getLocInfo()) {
1367       default:
1368         llvm_unreachable("Unknown loc info!");
1369       case CCValAssign::Full:
1370         break;
1371       case CCValAssign::BCvt:
1372         ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
1373         break;
1374       case CCValAssign::SExt:
1375         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
1376                                DAG.getValueType(VA.getValVT()));
1377         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1378         break;
1379       case CCValAssign::ZExt:
1380         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
1381                                DAG.getValueType(VA.getValVT()));
1382         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1383         break;
1384       }
1385 
1386       InVals.push_back(ArgValue);
1387     } else {
1388       // Only arguments passed on the stack should make it here.
1389       assert(VA.isMemLoc());
1390 
1391       EVT LocVT = VA.getLocVT();
1392 
1393       // Create the frame index object for this incoming parameter.
1394       int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1395                                      VA.getLocMemOffset(), true);
1396 
1397       // Create the SelectionDAG nodes corresponding to a load
1398       // from this parameter.
1399       SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DL));
1400       InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN,
1401                                    MachinePointerInfo::getFixedStack(MF, FI)));
1402     }
1403   }
1404 
1405   // If the function takes variable number of arguments, make a frame index for
1406   // the start of the first vararg value... for expansion of llvm.va_start.
1407   if (isVarArg) {
1408     unsigned StackSize = CCInfo.getNextStackOffset();
1409     AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1410 
1411     AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize, true));
1412   }
1413 
1414   return Chain;
1415 }
1416 
1417 //===----------------------------------------------------------------------===//
1418 //                  Call Calling Convention Implementation
1419 //===----------------------------------------------------------------------===//
1420 
1421 SDValue AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1422                                      SmallVectorImpl<SDValue> &InVals) const {
1423   SelectionDAG &DAG = CLI.DAG;
1424   SDLoc &DL = CLI.DL;
1425   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1426   SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1427   SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1428   SDValue Chain = CLI.Chain;
1429   SDValue Callee = CLI.Callee;
1430   bool &isTailCall = CLI.IsTailCall;
1431   CallingConv::ID CallConv = CLI.CallConv;
1432   bool isVarArg = CLI.IsVarArg;
1433 
1434   MachineFunction &MF = DAG.getMachineFunction();
1435 
1436   // AVR does not yet support tail call optimization.
1437   isTailCall = false;
1438 
1439   // Analyze operands of the call, assigning locations to each operand.
1440   SmallVector<CCValAssign, 16> ArgLocs;
1441   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1442                  *DAG.getContext());
1443 
1444   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
1445   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
1446   // node so that legalize doesn't hack it.
1447   const Function *F = nullptr;
1448   if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1449     const GlobalValue *GV = G->getGlobal();
1450     if (isa<Function>(GV))
1451       F = cast<Function>(GV);
1452     Callee =
1453         DAG.getTargetGlobalAddress(GV, DL, getPointerTy(DAG.getDataLayout()));
1454   } else if (const ExternalSymbolSDNode *ES =
1455                  dyn_cast<ExternalSymbolSDNode>(Callee)) {
1456     Callee = DAG.getTargetExternalSymbol(ES->getSymbol(),
1457                                          getPointerTy(DAG.getDataLayout()));
1458   }
1459 
1460   // Variadic functions do not need all the analysis below.
1461   if (isVarArg) {
1462     CCInfo.AnalyzeCallOperands(Outs, ArgCC_AVR_Vararg);
1463   } else {
1464     analyzeArguments(&CLI, F, &DAG.getDataLayout(), Outs, ArgLocs, CCInfo,
1465                      Subtarget.hasTinyEncoding());
1466   }
1467 
1468   // Get a count of how many bytes are to be pushed on the stack.
1469   unsigned NumBytes = CCInfo.getNextStackOffset();
1470 
1471   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);
1472 
1473   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1474 
1475   // First, walk the register assignments, inserting copies.
1476   unsigned AI, AE;
1477   bool HasStackArgs = false;
1478   for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) {
1479     CCValAssign &VA = ArgLocs[AI];
1480     EVT RegVT = VA.getLocVT();
1481     SDValue Arg = OutVals[AI];
1482 
1483     // Promote the value if needed. With Clang this should not happen.
1484     switch (VA.getLocInfo()) {
1485     default:
1486       llvm_unreachable("Unknown loc info!");
1487     case CCValAssign::Full:
1488       break;
1489     case CCValAssign::SExt:
1490       Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg);
1491       break;
1492     case CCValAssign::ZExt:
1493       Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg);
1494       break;
1495     case CCValAssign::AExt:
1496       Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg);
1497       break;
1498     case CCValAssign::BCvt:
1499       Arg = DAG.getNode(ISD::BITCAST, DL, RegVT, Arg);
1500       break;
1501     }
1502 
1503     // Stop when we encounter a stack argument, we need to process them
1504     // in reverse order in the loop below.
1505     if (VA.isMemLoc()) {
1506       HasStackArgs = true;
1507       break;
1508     }
1509 
1510     // Arguments that can be passed on registers must be kept in the RegsToPass
1511     // vector.
1512     RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1513   }
1514 
1515   // Second, stack arguments have to walked.
1516   // Previously this code created chained stores but those chained stores appear
1517   // to be unchained in the legalization phase. Therefore, do not attempt to
1518   // chain them here. In fact, chaining them here somehow causes the first and
1519   // second store to be reversed which is the exact opposite of the intended
1520   // effect.
1521   if (HasStackArgs) {
1522     SmallVector<SDValue, 8> MemOpChains;
1523     for (; AI != AE; AI++) {
1524       CCValAssign &VA = ArgLocs[AI];
1525       SDValue Arg = OutVals[AI];
1526 
1527       assert(VA.isMemLoc());
1528 
1529       // SP points to one stack slot further so add one to adjust it.
1530       SDValue PtrOff = DAG.getNode(
1531           ISD::ADD, DL, getPointerTy(DAG.getDataLayout()),
1532           DAG.getRegister(AVR::SP, getPointerTy(DAG.getDataLayout())),
1533           DAG.getIntPtrConstant(VA.getLocMemOffset() + 1, DL));
1534 
1535       MemOpChains.push_back(
1536           DAG.getStore(Chain, DL, Arg, PtrOff,
1537                        MachinePointerInfo::getStack(MF, VA.getLocMemOffset())));
1538     }
1539 
1540     if (!MemOpChains.empty())
1541       Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1542   }
1543 
1544   // Build a sequence of copy-to-reg nodes chained together with token chain and
1545   // flag operands which copy the outgoing args into registers.  The InFlag in
1546   // necessary since all emited instructions must be stuck together.
1547   SDValue InFlag;
1548   for (auto Reg : RegsToPass) {
1549     Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, InFlag);
1550     InFlag = Chain.getValue(1);
1551   }
1552 
1553   // Returns a chain & a flag for retval copy to use.
1554   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1555   SmallVector<SDValue, 8> Ops;
1556   Ops.push_back(Chain);
1557   Ops.push_back(Callee);
1558 
1559   // Add argument registers to the end of the list so that they are known live
1560   // into the call.
1561   for (auto Reg : RegsToPass) {
1562     Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
1563   }
1564 
1565   // The zero register (usually R1) must be passed as an implicit register so
1566   // that this register is correctly zeroed in interrupts.
1567   Ops.push_back(DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8));
1568 
1569   // Add a register mask operand representing the call-preserved registers.
1570   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
1571   const uint32_t *Mask =
1572       TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv);
1573   assert(Mask && "Missing call preserved mask for calling convention");
1574   Ops.push_back(DAG.getRegisterMask(Mask));
1575 
1576   if (InFlag.getNode()) {
1577     Ops.push_back(InFlag);
1578   }
1579 
1580   Chain = DAG.getNode(AVRISD::CALL, DL, NodeTys, Ops);
1581   InFlag = Chain.getValue(1);
1582 
1583   // Create the CALLSEQ_END node.
1584   Chain = DAG.getCALLSEQ_END(Chain, NumBytes, 0, InFlag, DL);
1585 
1586   if (!Ins.empty()) {
1587     InFlag = Chain.getValue(1);
1588   }
1589 
1590   // Handle result values, copying them out of physregs into vregs that we
1591   // return.
1592   return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, DL, DAG,
1593                          InVals);
1594 }
1595 
1596 /// Lower the result values of a call into the
1597 /// appropriate copies out of appropriate physical registers.
1598 ///
1599 SDValue AVRTargetLowering::LowerCallResult(
1600     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
1601     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1602     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1603 
1604   // Assign locations to each value returned by this call.
1605   SmallVector<CCValAssign, 16> RVLocs;
1606   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1607                  *DAG.getContext());
1608 
1609   // Handle runtime calling convs.
1610   if (CallConv == CallingConv::AVR_BUILTIN) {
1611     CCInfo.AnalyzeCallResult(Ins, RetCC_AVR_BUILTIN);
1612   } else {
1613     analyzeReturnValues(Ins, CCInfo, Subtarget.hasTinyEncoding());
1614   }
1615 
1616   // Copy all of the result registers out of their specified physreg.
1617   for (CCValAssign const &RVLoc : RVLocs) {
1618     Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(),
1619                                InFlag)
1620                 .getValue(1);
1621     InFlag = Chain.getValue(2);
1622     InVals.push_back(Chain.getValue(0));
1623   }
1624 
1625   return Chain;
1626 }
1627 
1628 //===----------------------------------------------------------------------===//
1629 //               Return Value Calling Convention Implementation
1630 //===----------------------------------------------------------------------===//
1631 
1632 bool AVRTargetLowering::CanLowerReturn(
1633     CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
1634     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
1635   if (CallConv == CallingConv::AVR_BUILTIN) {
1636     SmallVector<CCValAssign, 16> RVLocs;
1637     CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
1638     return CCInfo.CheckReturn(Outs, RetCC_AVR_BUILTIN);
1639   }
1640 
1641   unsigned TotalBytes = getTotalArgumentsSizeInBytes(Outs);
1642   return TotalBytes <= (unsigned)(Subtarget.hasTinyEncoding() ? 4 : 8);
1643 }
1644 
1645 SDValue
1646 AVRTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1647                                bool isVarArg,
1648                                const SmallVectorImpl<ISD::OutputArg> &Outs,
1649                                const SmallVectorImpl<SDValue> &OutVals,
1650                                const SDLoc &dl, SelectionDAG &DAG) const {
1651   // CCValAssign - represent the assignment of the return value to locations.
1652   SmallVector<CCValAssign, 16> RVLocs;
1653 
1654   // CCState - Info about the registers and stack slot.
1655   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1656                  *DAG.getContext());
1657 
1658   MachineFunction &MF = DAG.getMachineFunction();
1659 
1660   // Analyze return values.
1661   if (CallConv == CallingConv::AVR_BUILTIN) {
1662     CCInfo.AnalyzeReturn(Outs, RetCC_AVR_BUILTIN);
1663   } else {
1664     analyzeReturnValues(Outs, CCInfo, Subtarget.hasTinyEncoding());
1665   }
1666 
1667   SDValue Flag;
1668   SmallVector<SDValue, 4> RetOps(1, Chain);
1669   // Copy the result values into the output registers.
1670   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1671     CCValAssign &VA = RVLocs[i];
1672     assert(VA.isRegLoc() && "Can only return in registers!");
1673 
1674     Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
1675 
1676     // Guarantee that all emitted copies are stuck together with flags.
1677     Flag = Chain.getValue(1);
1678     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1679   }
1680 
1681   // Don't emit the ret/reti instruction when the naked attribute is present in
1682   // the function being compiled.
1683   if (MF.getFunction().getAttributes().hasFnAttr(Attribute::Naked)) {
1684     return Chain;
1685   }
1686 
1687   const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>();
1688 
1689   if (!AFI->isInterruptOrSignalHandler()) {
1690     // The return instruction has an implicit zero register operand: it must
1691     // contain zero on return.
1692     // This is not needed in interrupts however, where the zero register is
1693     // handled specially (only pushed/popped when needed).
1694     RetOps.push_back(DAG.getRegister(Subtarget.getZeroRegister(), MVT::i8));
1695   }
1696 
1697   unsigned RetOpc =
1698       AFI->isInterruptOrSignalHandler() ? AVRISD::RETI_FLAG : AVRISD::RET_FLAG;
1699 
1700   RetOps[0] = Chain; // Update chain.
1701 
1702   if (Flag.getNode()) {
1703     RetOps.push_back(Flag);
1704   }
1705 
1706   return DAG.getNode(RetOpc, dl, MVT::Other, RetOps);
1707 }
1708 
1709 //===----------------------------------------------------------------------===//
1710 //  Custom Inserters
1711 //===----------------------------------------------------------------------===//
1712 
1713 MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &MI,
1714                                                   MachineBasicBlock *BB) const {
1715   unsigned Opc;
1716   const TargetRegisterClass *RC;
1717   bool HasRepeatedOperand = false;
1718   bool HasZeroOperand = false;
1719   MachineFunction *F = BB->getParent();
1720   MachineRegisterInfo &RI = F->getRegInfo();
1721   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1722   DebugLoc dl = MI.getDebugLoc();
1723 
1724   switch (MI.getOpcode()) {
1725   default:
1726     llvm_unreachable("Invalid shift opcode!");
1727   case AVR::Lsl8:
1728     Opc = AVR::ADDRdRr; // LSL is an alias of ADD Rd, Rd
1729     RC = &AVR::GPR8RegClass;
1730     HasRepeatedOperand = true;
1731     break;
1732   case AVR::Lsl16:
1733     Opc = AVR::LSLWRd;
1734     RC = &AVR::DREGSRegClass;
1735     break;
1736   case AVR::Asr8:
1737     Opc = AVR::ASRRd;
1738     RC = &AVR::GPR8RegClass;
1739     break;
1740   case AVR::Asr16:
1741     Opc = AVR::ASRWRd;
1742     RC = &AVR::DREGSRegClass;
1743     break;
1744   case AVR::Lsr8:
1745     Opc = AVR::LSRRd;
1746     RC = &AVR::GPR8RegClass;
1747     break;
1748   case AVR::Lsr16:
1749     Opc = AVR::LSRWRd;
1750     RC = &AVR::DREGSRegClass;
1751     break;
1752   case AVR::Rol8:
1753     Opc = AVR::ROLBRd;
1754     RC = &AVR::GPR8RegClass;
1755     HasZeroOperand = true;
1756     break;
1757   case AVR::Rol16:
1758     Opc = AVR::ROLWRd;
1759     RC = &AVR::DREGSRegClass;
1760     break;
1761   case AVR::Ror8:
1762     Opc = AVR::RORBRd;
1763     RC = &AVR::GPR8RegClass;
1764     break;
1765   case AVR::Ror16:
1766     Opc = AVR::RORWRd;
1767     RC = &AVR::DREGSRegClass;
1768     break;
1769   }
1770 
1771   const BasicBlock *LLVM_BB = BB->getBasicBlock();
1772 
1773   MachineFunction::iterator I;
1774   for (I = BB->getIterator(); I != F->end() && &(*I) != BB; ++I)
1775     ;
1776   if (I != F->end())
1777     ++I;
1778 
1779   // Create loop block.
1780   MachineBasicBlock *LoopBB = F->CreateMachineBasicBlock(LLVM_BB);
1781   MachineBasicBlock *CheckBB = F->CreateMachineBasicBlock(LLVM_BB);
1782   MachineBasicBlock *RemBB = F->CreateMachineBasicBlock(LLVM_BB);
1783 
1784   F->insert(I, LoopBB);
1785   F->insert(I, CheckBB);
1786   F->insert(I, RemBB);
1787 
1788   // Update machine-CFG edges by transferring all successors of the current
1789   // block to the block containing instructions after shift.
1790   RemBB->splice(RemBB->begin(), BB, std::next(MachineBasicBlock::iterator(MI)),
1791                 BB->end());
1792   RemBB->transferSuccessorsAndUpdatePHIs(BB);
1793 
1794   // Add edges BB => LoopBB => CheckBB => RemBB, CheckBB => LoopBB.
1795   BB->addSuccessor(CheckBB);
1796   LoopBB->addSuccessor(CheckBB);
1797   CheckBB->addSuccessor(LoopBB);
1798   CheckBB->addSuccessor(RemBB);
1799 
1800   Register ShiftAmtReg = RI.createVirtualRegister(&AVR::GPR8RegClass);
1801   Register ShiftAmtReg2 = RI.createVirtualRegister(&AVR::GPR8RegClass);
1802   Register ShiftReg = RI.createVirtualRegister(RC);
1803   Register ShiftReg2 = RI.createVirtualRegister(RC);
1804   Register ShiftAmtSrcReg = MI.getOperand(2).getReg();
1805   Register SrcReg = MI.getOperand(1).getReg();
1806   Register DstReg = MI.getOperand(0).getReg();
1807 
1808   // BB:
1809   // rjmp CheckBB
1810   BuildMI(BB, dl, TII.get(AVR::RJMPk)).addMBB(CheckBB);
1811 
1812   // LoopBB:
1813   // ShiftReg2 = shift ShiftReg
1814   auto ShiftMI = BuildMI(LoopBB, dl, TII.get(Opc), ShiftReg2).addReg(ShiftReg);
1815   if (HasRepeatedOperand)
1816     ShiftMI.addReg(ShiftReg);
1817   if (HasZeroOperand)
1818     ShiftMI.addReg(Subtarget.getZeroRegister());
1819 
1820   // CheckBB:
1821   // ShiftReg = phi [%SrcReg, BB], [%ShiftReg2, LoopBB]
1822   // ShiftAmt = phi [%N,      BB], [%ShiftAmt2, LoopBB]
1823   // DestReg  = phi [%SrcReg, BB], [%ShiftReg,  LoopBB]
1824   // ShiftAmt2 = ShiftAmt - 1;
1825   // if (ShiftAmt2 >= 0) goto LoopBB;
1826   BuildMI(CheckBB, dl, TII.get(AVR::PHI), ShiftReg)
1827       .addReg(SrcReg)
1828       .addMBB(BB)
1829       .addReg(ShiftReg2)
1830       .addMBB(LoopBB);
1831   BuildMI(CheckBB, dl, TII.get(AVR::PHI), ShiftAmtReg)
1832       .addReg(ShiftAmtSrcReg)
1833       .addMBB(BB)
1834       .addReg(ShiftAmtReg2)
1835       .addMBB(LoopBB);
1836   BuildMI(CheckBB, dl, TII.get(AVR::PHI), DstReg)
1837       .addReg(SrcReg)
1838       .addMBB(BB)
1839       .addReg(ShiftReg2)
1840       .addMBB(LoopBB);
1841 
1842   BuildMI(CheckBB, dl, TII.get(AVR::DECRd), ShiftAmtReg2).addReg(ShiftAmtReg);
1843   BuildMI(CheckBB, dl, TII.get(AVR::BRPLk)).addMBB(LoopBB);
1844 
1845   MI.eraseFromParent(); // The pseudo instruction is gone now.
1846   return RemBB;
1847 }
1848 
1849 // Do a multibyte AVR shift. Insert shift instructions and put the output
1850 // registers in the Regs array.
1851 // Because AVR does not have a normal shift instruction (only a single bit shift
1852 // instruction), we have to emulate this behavior with other instructions.
1853 // It first tries large steps (moving registers around) and then smaller steps
1854 // like single bit shifts.
1855 // Large shifts actually reduce the number of shifted registers, so the below
1856 // algorithms have to work independently of the number of registers that are
1857 // shifted.
1858 // For more information and background, see this blogpost:
1859 // https://aykevl.nl/2021/02/avr-bitshift
1860 static void insertMultibyteShift(MachineInstr &MI, MachineBasicBlock *BB,
1861                                  MutableArrayRef<std::pair<Register, int>> Regs,
1862                                  ISD::NodeType Opc, int64_t ShiftAmt) {
1863   const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
1864   const AVRSubtarget &STI = BB->getParent()->getSubtarget<AVRSubtarget>();
1865   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
1866   const DebugLoc &dl = MI.getDebugLoc();
1867 
1868   const bool ShiftLeft = Opc == ISD::SHL;
1869   const bool ArithmeticShift = Opc == ISD::SRA;
1870 
1871   // Zero a register, for use in later operations.
1872   Register ZeroReg = MRI.createVirtualRegister(&AVR::GPR8RegClass);
1873   BuildMI(*BB, MI, dl, TII.get(AVR::COPY), ZeroReg)
1874       .addReg(STI.getZeroRegister());
1875 
1876   // Do a shift modulo 6 or 7. This is a bit more complicated than most shifts
1877   // and is hard to compose with the rest, so these are special cased.
1878   // The basic idea is to shift one or two bits in the opposite direction and
1879   // then move registers around to get the correct end result.
1880   if (ShiftLeft && (ShiftAmt % 8) >= 6) {
1881     // Left shift modulo 6 or 7.
1882 
1883     // Create a slice of the registers we're going to modify, to ease working
1884     // with them.
1885     size_t ShiftRegsOffset = ShiftAmt / 8;
1886     size_t ShiftRegsSize = Regs.size() - ShiftRegsOffset;
1887     MutableArrayRef<std::pair<Register, int>> ShiftRegs =
1888         Regs.slice(ShiftRegsOffset, ShiftRegsSize);
1889 
1890     // Shift one to the right, keeping the least significant bit as the carry
1891     // bit.
1892     insertMultibyteShift(MI, BB, ShiftRegs, ISD::SRL, 1);
1893 
1894     // Rotate the least significant bit from the carry bit into a new register
1895     // (that starts out zero).
1896     Register LowByte = MRI.createVirtualRegister(&AVR::GPR8RegClass);
1897     BuildMI(*BB, MI, dl, TII.get(AVR::RORRd), LowByte).addReg(ZeroReg);
1898 
1899     // Shift one more to the right if this is a modulo-6 shift.
1900     if (ShiftAmt % 8 == 6) {
1901       insertMultibyteShift(MI, BB, ShiftRegs, ISD::SRL, 1);
1902       Register NewLowByte = MRI.createVirtualRegister(&AVR::GPR8RegClass);
1903       BuildMI(*BB, MI, dl, TII.get(AVR::RORRd), NewLowByte).addReg(LowByte);
1904       LowByte = NewLowByte;
1905     }
1906 
1907     // Move all registers to the left, zeroing the bottom registers as needed.
1908     for (size_t I = 0; I < Regs.size(); I++) {
1909       int ShiftRegsIdx = I + 1;
1910       if (ShiftRegsIdx < (int)ShiftRegs.size()) {
1911         Regs[I] = ShiftRegs[ShiftRegsIdx];
1912       } else if (ShiftRegsIdx == (int)ShiftRegs.size()) {
1913         Regs[I] = std::pair(LowByte, 0);
1914       } else {
1915         Regs[I] = std::pair(ZeroReg, 0);
1916       }
1917     }
1918 
1919     return;
1920   }
1921 
1922   // Right shift modulo 6 or 7.
1923   if (!ShiftLeft && (ShiftAmt % 8) >= 6) {
1924     // Create a view on the registers we're going to modify, to ease working
1925     // with them.
1926     size_t ShiftRegsSize = Regs.size() - (ShiftAmt / 8);
1927     MutableArrayRef<std::pair<Register, int>> ShiftRegs =
1928         Regs.slice(0, ShiftRegsSize);
1929 
1930     // Shift one to the left.
1931     insertMultibyteShift(MI, BB, ShiftRegs, ISD::SHL, 1);
1932 
1933     // Sign or zero extend the most significant register into a new register.
1934     // The HighByte is the byte that still has one (or two) bits from the
1935     // original value. The ExtByte is purely a zero/sign extend byte (all bits
1936     // are either 0 or 1).
1937     Register HighByte = MRI.createVirtualRegister(&AVR::GPR8RegClass);
1938     Register ExtByte = 0;
1939     if (ArithmeticShift) {
1940       // Sign-extend bit that was shifted out last.
1941       BuildMI(*BB, MI, dl, TII.get(AVR::SBCRdRr), HighByte)
1942           .addReg(HighByte, RegState::Undef)
1943           .addReg(HighByte, RegState::Undef);
1944       ExtByte = HighByte;
1945       // The highest bit of the original value is the same as the zero-extend
1946       // byte, so HighByte and ExtByte are the same.
1947     } else {
1948       // Use the zero register for zero extending.
1949       ExtByte = ZeroReg;
1950       // Rotate most significant bit into a new register (that starts out zero).
1951       BuildMI(*BB, MI, dl, TII.get(AVR::ADCRdRr), HighByte)
1952           .addReg(ExtByte)
1953           .addReg(ExtByte);
1954     }
1955 
1956     // Shift one more to the left for modulo 6 shifts.
1957     if (ShiftAmt % 8 == 6) {
1958       insertMultibyteShift(MI, BB, ShiftRegs, ISD::SHL, 1);
1959       // Shift the topmost bit into the HighByte.
1960       Register NewExt = MRI.createVirtualRegister(&AVR::GPR8RegClass);
1961       BuildMI(*BB, MI, dl, TII.get(AVR::ADCRdRr), NewExt)
1962           .addReg(HighByte)
1963           .addReg(HighByte);
1964       HighByte = NewExt;
1965     }
1966 
1967     // Move all to the right, while sign or zero extending.
1968     for (int I = Regs.size() - 1; I >= 0; I--) {
1969       int ShiftRegsIdx = I - (Regs.size() - ShiftRegs.size()) - 1;
1970       if (ShiftRegsIdx >= 0) {
1971         Regs[I] = ShiftRegs[ShiftRegsIdx];
1972       } else if (ShiftRegsIdx == -1) {
1973         Regs[I] = std::pair(HighByte, 0);
1974       } else {
1975         Regs[I] = std::pair(ExtByte, 0);
1976       }
1977     }
1978 
1979     return;
1980   }
1981 
1982   // For shift amounts of at least one register, simply rename the registers and
1983   // zero the bottom registers.
1984   while (ShiftLeft && ShiftAmt >= 8) {
1985     // Move all registers one to the left.
1986     for (size_t I = 0; I < Regs.size() - 1; I++) {
1987       Regs[I] = Regs[I + 1];
1988     }
1989 
1990     // Zero the least significant register.
1991     Regs[Regs.size() - 1] = std::pair(ZeroReg, 0);
1992 
1993     // Continue shifts with the leftover registers.
1994     Regs = Regs.drop_back(1);
1995 
1996     ShiftAmt -= 8;
1997   }
1998 
1999   // And again, the same for right shifts.
2000   Register ShrExtendReg = 0;
2001   if (!ShiftLeft && ShiftAmt >= 8) {
2002     if (ArithmeticShift) {
2003       // Sign extend the most significant register into ShrExtendReg.
2004       ShrExtendReg = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2005       Register Tmp = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2006       BuildMI(*BB, MI, dl, TII.get(AVR::ADDRdRr), Tmp)
2007           .addReg(Regs[0].first, 0, Regs[0].second)
2008           .addReg(Regs[0].first, 0, Regs[0].second);
2009       BuildMI(*BB, MI, dl, TII.get(AVR::SBCRdRr), ShrExtendReg)
2010           .addReg(Tmp)
2011           .addReg(Tmp);
2012     } else {
2013       ShrExtendReg = ZeroReg;
2014     }
2015     for (; ShiftAmt >= 8; ShiftAmt -= 8) {
2016       // Move all registers one to the right.
2017       for (size_t I = Regs.size() - 1; I != 0; I--) {
2018         Regs[I] = Regs[I - 1];
2019       }
2020 
2021       // Zero or sign extend the most significant register.
2022       Regs[0] = std::pair(ShrExtendReg, 0);
2023 
2024       // Continue shifts with the leftover registers.
2025       Regs = Regs.drop_front(1);
2026     }
2027   }
2028 
2029   // The bigger shifts are already handled above.
2030   assert((ShiftAmt < 8) && "Unexpect shift amount");
2031 
2032   // Shift by four bits, using a complicated swap/eor/andi/eor sequence.
2033   // It only works for logical shifts because the bits shifted in are all
2034   // zeroes.
2035   // To shift a single byte right, it produces code like this:
2036   //   swap r0
2037   //   andi r0, 0x0f
2038   // For a two-byte (16-bit) shift, it adds the following instructions to shift
2039   // the upper byte into the lower byte:
2040   //   swap r1
2041   //   eor r0, r1
2042   //   andi r1, 0x0f
2043   //   eor r0, r1
2044   // For bigger shifts, it repeats the above sequence. For example, for a 3-byte
2045   // (24-bit) shift it adds:
2046   //   swap r2
2047   //   eor r1, r2
2048   //   andi r2, 0x0f
2049   //   eor r1, r2
2050   if (!ArithmeticShift && ShiftAmt >= 4) {
2051     Register Prev = 0;
2052     for (size_t I = 0; I < Regs.size(); I++) {
2053       size_t Idx = ShiftLeft ? I : Regs.size() - I - 1;
2054       Register SwapReg = MRI.createVirtualRegister(&AVR::LD8RegClass);
2055       BuildMI(*BB, MI, dl, TII.get(AVR::SWAPRd), SwapReg)
2056           .addReg(Regs[Idx].first, 0, Regs[Idx].second);
2057       if (I != 0) {
2058         Register R = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2059         BuildMI(*BB, MI, dl, TII.get(AVR::EORRdRr), R)
2060             .addReg(Prev)
2061             .addReg(SwapReg);
2062         Prev = R;
2063       }
2064       Register AndReg = MRI.createVirtualRegister(&AVR::LD8RegClass);
2065       BuildMI(*BB, MI, dl, TII.get(AVR::ANDIRdK), AndReg)
2066           .addReg(SwapReg)
2067           .addImm(ShiftLeft ? 0xf0 : 0x0f);
2068       if (I != 0) {
2069         Register R = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2070         BuildMI(*BB, MI, dl, TII.get(AVR::EORRdRr), R)
2071             .addReg(Prev)
2072             .addReg(AndReg);
2073         size_t PrevIdx = ShiftLeft ? Idx - 1 : Idx + 1;
2074         Regs[PrevIdx] = std::pair(R, 0);
2075       }
2076       Prev = AndReg;
2077       Regs[Idx] = std::pair(AndReg, 0);
2078     }
2079     ShiftAmt -= 4;
2080   }
2081 
2082   // Shift by one. This is the fallback that always works, and the shift
2083   // operation that is used for 1, 2, and 3 bit shifts.
2084   while (ShiftLeft && ShiftAmt) {
2085     // Shift one to the left.
2086     for (ssize_t I = Regs.size() - 1; I >= 0; I--) {
2087       Register Out = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2088       Register In = Regs[I].first;
2089       Register InSubreg = Regs[I].second;
2090       if (I == (ssize_t)Regs.size() - 1) { // first iteration
2091         BuildMI(*BB, MI, dl, TII.get(AVR::ADDRdRr), Out)
2092             .addReg(In, 0, InSubreg)
2093             .addReg(In, 0, InSubreg);
2094       } else {
2095         BuildMI(*BB, MI, dl, TII.get(AVR::ADCRdRr), Out)
2096             .addReg(In, 0, InSubreg)
2097             .addReg(In, 0, InSubreg);
2098       }
2099       Regs[I] = std::pair(Out, 0);
2100     }
2101     ShiftAmt--;
2102   }
2103   while (!ShiftLeft && ShiftAmt) {
2104     // Shift one to the right.
2105     for (size_t I = 0; I < Regs.size(); I++) {
2106       Register Out = MRI.createVirtualRegister(&AVR::GPR8RegClass);
2107       Register In = Regs[I].first;
2108       Register InSubreg = Regs[I].second;
2109       if (I == 0) {
2110         unsigned Opc = ArithmeticShift ? AVR::ASRRd : AVR::LSRRd;
2111         BuildMI(*BB, MI, dl, TII.get(Opc), Out).addReg(In, 0, InSubreg);
2112       } else {
2113         BuildMI(*BB, MI, dl, TII.get(AVR::RORRd), Out).addReg(In, 0, InSubreg);
2114       }
2115       Regs[I] = std::pair(Out, 0);
2116     }
2117     ShiftAmt--;
2118   }
2119 
2120   if (ShiftAmt != 0) {
2121     llvm_unreachable("don't know how to shift!"); // sanity check
2122   }
2123 }
2124 
2125 // Do a wide (32-bit) shift.
2126 MachineBasicBlock *
2127 AVRTargetLowering::insertWideShift(MachineInstr &MI,
2128                                    MachineBasicBlock *BB) const {
2129   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
2130   const DebugLoc &dl = MI.getDebugLoc();
2131 
2132   // How much to shift to the right (meaning: a negative number indicates a left
2133   // shift).
2134   int64_t ShiftAmt = MI.getOperand(4).getImm();
2135   ISD::NodeType Opc;
2136   switch (MI.getOpcode()) {
2137   case AVR::Lsl32:
2138     Opc = ISD::SHL;
2139     break;
2140   case AVR::Lsr32:
2141     Opc = ISD::SRL;
2142     break;
2143   case AVR::Asr32:
2144     Opc = ISD::SRA;
2145     break;
2146   }
2147 
2148   // Read the input registers, with the most significant register at index 0.
2149   std::array<std::pair<Register, int>, 4> Registers = {
2150       std::pair(MI.getOperand(3).getReg(), AVR::sub_hi),
2151       std::pair(MI.getOperand(3).getReg(), AVR::sub_lo),
2152       std::pair(MI.getOperand(2).getReg(), AVR::sub_hi),
2153       std::pair(MI.getOperand(2).getReg(), AVR::sub_lo),
2154   };
2155 
2156   // Do the shift. The registers are modified in-place.
2157   insertMultibyteShift(MI, BB, Registers, Opc, ShiftAmt);
2158 
2159   // Combine the 8-bit registers into 16-bit register pairs.
2160   // This done either from LSB to MSB or from MSB to LSB, depending on the
2161   // shift. It's an optimization so that the register allocator will use the
2162   // fewest movs possible (which order we use isn't a correctness issue, just an
2163   // optimization issue).
2164   //   - lsl prefers starting from the most significant byte (2nd case).
2165   //   - lshr prefers starting from the least significant byte (1st case).
2166   //   - for ashr it depends on the number of shifted bytes.
2167   // Some shift operations still don't get the most optimal mov sequences even
2168   // with this distinction. TODO: figure out why and try to fix it (but we're
2169   // already equal to or faster than avr-gcc in all cases except ashr 8).
2170   if (Opc != ISD::SHL &&
2171       (Opc != ISD::SRA || (ShiftAmt < 16 || ShiftAmt >= 22))) {
2172     // Use the resulting registers starting with the least significant byte.
2173     BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(0).getReg())
2174         .addReg(Registers[3].first, 0, Registers[3].second)
2175         .addImm(AVR::sub_lo)
2176         .addReg(Registers[2].first, 0, Registers[2].second)
2177         .addImm(AVR::sub_hi);
2178     BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(1).getReg())
2179         .addReg(Registers[1].first, 0, Registers[1].second)
2180         .addImm(AVR::sub_lo)
2181         .addReg(Registers[0].first, 0, Registers[0].second)
2182         .addImm(AVR::sub_hi);
2183   } else {
2184     // Use the resulting registers starting with the most significant byte.
2185     BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(1).getReg())
2186         .addReg(Registers[0].first, 0, Registers[0].second)
2187         .addImm(AVR::sub_hi)
2188         .addReg(Registers[1].first, 0, Registers[1].second)
2189         .addImm(AVR::sub_lo);
2190     BuildMI(*BB, MI, dl, TII.get(AVR::REG_SEQUENCE), MI.getOperand(0).getReg())
2191         .addReg(Registers[2].first, 0, Registers[2].second)
2192         .addImm(AVR::sub_hi)
2193         .addReg(Registers[3].first, 0, Registers[3].second)
2194         .addImm(AVR::sub_lo);
2195   }
2196 
2197   // Remove the pseudo instruction.
2198   MI.eraseFromParent();
2199   return BB;
2200 }
2201 
2202 static bool isCopyMulResult(MachineBasicBlock::iterator const &I) {
2203   if (I->getOpcode() == AVR::COPY) {
2204     Register SrcReg = I->getOperand(1).getReg();
2205     return (SrcReg == AVR::R0 || SrcReg == AVR::R1);
2206   }
2207 
2208   return false;
2209 }
2210 
2211 // The mul instructions wreak havock on our zero_reg R1. We need to clear it
2212 // after the result has been evacuated. This is probably not the best way to do
2213 // it, but it works for now.
2214 MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &MI,
2215                                                 MachineBasicBlock *BB) const {
2216   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
2217   MachineBasicBlock::iterator I(MI);
2218   ++I; // in any case insert *after* the mul instruction
2219   if (isCopyMulResult(I))
2220     ++I;
2221   if (isCopyMulResult(I))
2222     ++I;
2223   BuildMI(*BB, I, MI.getDebugLoc(), TII.get(AVR::EORRdRr), AVR::R1)
2224       .addReg(AVR::R1)
2225       .addReg(AVR::R1);
2226   return BB;
2227 }
2228 
2229 // Insert a read from the zero register.
2230 MachineBasicBlock *
2231 AVRTargetLowering::insertCopyZero(MachineInstr &MI,
2232                                   MachineBasicBlock *BB) const {
2233   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
2234   MachineBasicBlock::iterator I(MI);
2235   BuildMI(*BB, I, MI.getDebugLoc(), TII.get(AVR::COPY))
2236       .add(MI.getOperand(0))
2237       .addReg(Subtarget.getZeroRegister());
2238   MI.eraseFromParent();
2239   return BB;
2240 }
2241 
2242 // Lower atomicrmw operation to disable interrupts, do operation, and restore
2243 // interrupts. This works because all AVR microcontrollers are single core.
2244 MachineBasicBlock *AVRTargetLowering::insertAtomicArithmeticOp(
2245     MachineInstr &MI, MachineBasicBlock *BB, unsigned Opcode, int Width) const {
2246   MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
2247   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
2248   MachineBasicBlock::iterator I(MI);
2249   DebugLoc dl = MI.getDebugLoc();
2250 
2251   // Example instruction sequence, for an atomic 8-bit add:
2252   //   ldi r25, 5
2253   //   in r0, SREG
2254   //   cli
2255   //   ld r24, X
2256   //   add r25, r24
2257   //   st X, r25
2258   //   out SREG, r0
2259 
2260   const TargetRegisterClass *RC =
2261       (Width == 8) ? &AVR::GPR8RegClass : &AVR::DREGSRegClass;
2262   unsigned LoadOpcode = (Width == 8) ? AVR::LDRdPtr : AVR::LDWRdPtr;
2263   unsigned StoreOpcode = (Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr;
2264 
2265   // Disable interrupts.
2266   BuildMI(*BB, I, dl, TII.get(AVR::INRdA), Subtarget.getTmpRegister())
2267       .addImm(Subtarget.getIORegSREG());
2268   BuildMI(*BB, I, dl, TII.get(AVR::BCLRs)).addImm(7);
2269 
2270   // Load the original value.
2271   BuildMI(*BB, I, dl, TII.get(LoadOpcode), MI.getOperand(0).getReg())
2272       .add(MI.getOperand(1));
2273 
2274   // Do the arithmetic operation.
2275   Register Result = MRI.createVirtualRegister(RC);
2276   BuildMI(*BB, I, dl, TII.get(Opcode), Result)
2277       .addReg(MI.getOperand(0).getReg())
2278       .add(MI.getOperand(2));
2279 
2280   // Store the result.
2281   BuildMI(*BB, I, dl, TII.get(StoreOpcode))
2282       .add(MI.getOperand(1))
2283       .addReg(Result);
2284 
2285   // Restore interrupts.
2286   BuildMI(*BB, I, dl, TII.get(AVR::OUTARr))
2287       .addImm(Subtarget.getIORegSREG())
2288       .addReg(Subtarget.getTmpRegister());
2289 
2290   // Remove the pseudo instruction.
2291   MI.eraseFromParent();
2292   return BB;
2293 }
2294 
2295 MachineBasicBlock *
2296 AVRTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
2297                                                MachineBasicBlock *MBB) const {
2298   int Opc = MI.getOpcode();
2299 
2300   // Pseudo shift instructions with a non constant shift amount are expanded
2301   // into a loop.
2302   switch (Opc) {
2303   case AVR::Lsl8:
2304   case AVR::Lsl16:
2305   case AVR::Lsr8:
2306   case AVR::Lsr16:
2307   case AVR::Rol8:
2308   case AVR::Rol16:
2309   case AVR::Ror8:
2310   case AVR::Ror16:
2311   case AVR::Asr8:
2312   case AVR::Asr16:
2313     return insertShift(MI, MBB);
2314   case AVR::Lsl32:
2315   case AVR::Lsr32:
2316   case AVR::Asr32:
2317     return insertWideShift(MI, MBB);
2318   case AVR::MULRdRr:
2319   case AVR::MULSRdRr:
2320     return insertMul(MI, MBB);
2321   case AVR::CopyZero:
2322     return insertCopyZero(MI, MBB);
2323   case AVR::AtomicLoadAdd8:
2324     return insertAtomicArithmeticOp(MI, MBB, AVR::ADDRdRr, 8);
2325   case AVR::AtomicLoadAdd16:
2326     return insertAtomicArithmeticOp(MI, MBB, AVR::ADDWRdRr, 16);
2327   case AVR::AtomicLoadSub8:
2328     return insertAtomicArithmeticOp(MI, MBB, AVR::SUBRdRr, 8);
2329   case AVR::AtomicLoadSub16:
2330     return insertAtomicArithmeticOp(MI, MBB, AVR::SUBWRdRr, 16);
2331   case AVR::AtomicLoadAnd8:
2332     return insertAtomicArithmeticOp(MI, MBB, AVR::ANDRdRr, 8);
2333   case AVR::AtomicLoadAnd16:
2334     return insertAtomicArithmeticOp(MI, MBB, AVR::ANDWRdRr, 16);
2335   case AVR::AtomicLoadOr8:
2336     return insertAtomicArithmeticOp(MI, MBB, AVR::ORRdRr, 8);
2337   case AVR::AtomicLoadOr16:
2338     return insertAtomicArithmeticOp(MI, MBB, AVR::ORWRdRr, 16);
2339   case AVR::AtomicLoadXor8:
2340     return insertAtomicArithmeticOp(MI, MBB, AVR::EORRdRr, 8);
2341   case AVR::AtomicLoadXor16:
2342     return insertAtomicArithmeticOp(MI, MBB, AVR::EORWRdRr, 16);
2343   }
2344 
2345   assert((Opc == AVR::Select16 || Opc == AVR::Select8) &&
2346          "Unexpected instr type to insert");
2347 
2348   const AVRInstrInfo &TII = (const AVRInstrInfo &)*MI.getParent()
2349                                 ->getParent()
2350                                 ->getSubtarget()
2351                                 .getInstrInfo();
2352   DebugLoc dl = MI.getDebugLoc();
2353 
2354   // To "insert" a SELECT instruction, we insert the diamond
2355   // control-flow pattern. The incoming instruction knows the
2356   // destination vreg to set, the condition code register to branch
2357   // on, the true/false values to select between, and a branch opcode
2358   // to use.
2359 
2360   MachineFunction *MF = MBB->getParent();
2361   const BasicBlock *LLVM_BB = MBB->getBasicBlock();
2362   MachineBasicBlock *FallThrough = MBB->getFallThrough();
2363 
2364   // If the current basic block falls through to another basic block,
2365   // we must insert an unconditional branch to the fallthrough destination
2366   // if we are to insert basic blocks at the prior fallthrough point.
2367   if (FallThrough != nullptr) {
2368     BuildMI(MBB, dl, TII.get(AVR::RJMPk)).addMBB(FallThrough);
2369   }
2370 
2371   MachineBasicBlock *trueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
2372   MachineBasicBlock *falseMBB = MF->CreateMachineBasicBlock(LLVM_BB);
2373 
2374   MachineFunction::iterator I;
2375   for (I = MF->begin(); I != MF->end() && &(*I) != MBB; ++I)
2376     ;
2377   if (I != MF->end())
2378     ++I;
2379   MF->insert(I, trueMBB);
2380   MF->insert(I, falseMBB);
2381 
2382   // Transfer remaining instructions and all successors of the current
2383   // block to the block which will contain the Phi node for the
2384   // select.
2385   trueMBB->splice(trueMBB->begin(), MBB,
2386                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
2387   trueMBB->transferSuccessorsAndUpdatePHIs(MBB);
2388 
2389   AVRCC::CondCodes CC = (AVRCC::CondCodes)MI.getOperand(3).getImm();
2390   BuildMI(MBB, dl, TII.getBrCond(CC)).addMBB(trueMBB);
2391   BuildMI(MBB, dl, TII.get(AVR::RJMPk)).addMBB(falseMBB);
2392   MBB->addSuccessor(falseMBB);
2393   MBB->addSuccessor(trueMBB);
2394 
2395   // Unconditionally flow back to the true block
2396   BuildMI(falseMBB, dl, TII.get(AVR::RJMPk)).addMBB(trueMBB);
2397   falseMBB->addSuccessor(trueMBB);
2398 
2399   // Set up the Phi node to determine where we came from
2400   BuildMI(*trueMBB, trueMBB->begin(), dl, TII.get(AVR::PHI),
2401           MI.getOperand(0).getReg())
2402       .addReg(MI.getOperand(1).getReg())
2403       .addMBB(MBB)
2404       .addReg(MI.getOperand(2).getReg())
2405       .addMBB(falseMBB);
2406 
2407   MI.eraseFromParent(); // The pseudo instruction is gone now.
2408   return trueMBB;
2409 }
2410 
2411 //===----------------------------------------------------------------------===//
2412 //  Inline Asm Support
2413 //===----------------------------------------------------------------------===//
2414 
2415 AVRTargetLowering::ConstraintType
2416 AVRTargetLowering::getConstraintType(StringRef Constraint) const {
2417   if (Constraint.size() == 1) {
2418     // See http://www.nongnu.org/avr-libc/user-manual/inline_asm.html
2419     switch (Constraint[0]) {
2420     default:
2421       break;
2422     case 'a': // Simple upper registers
2423     case 'b': // Base pointer registers pairs
2424     case 'd': // Upper register
2425     case 'l': // Lower registers
2426     case 'e': // Pointer register pairs
2427     case 'q': // Stack pointer register
2428     case 'r': // Any register
2429     case 'w': // Special upper register pairs
2430       return C_RegisterClass;
2431     case 't': // Temporary register
2432     case 'x':
2433     case 'X': // Pointer register pair X
2434     case 'y':
2435     case 'Y': // Pointer register pair Y
2436     case 'z':
2437     case 'Z': // Pointer register pair Z
2438       return C_Register;
2439     case 'Q': // A memory address based on Y or Z pointer with displacement.
2440       return C_Memory;
2441     case 'G': // Floating point constant
2442     case 'I': // 6-bit positive integer constant
2443     case 'J': // 6-bit negative integer constant
2444     case 'K': // Integer constant (Range: 2)
2445     case 'L': // Integer constant (Range: 0)
2446     case 'M': // 8-bit integer constant
2447     case 'N': // Integer constant (Range: -1)
2448     case 'O': // Integer constant (Range: 8, 16, 24)
2449     case 'P': // Integer constant (Range: 1)
2450     case 'R': // Integer constant (Range: -6 to 5)x
2451       return C_Immediate;
2452     }
2453   }
2454 
2455   return TargetLowering::getConstraintType(Constraint);
2456 }
2457 
2458 unsigned
2459 AVRTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
2460   // Not sure if this is actually the right thing to do, but we got to do
2461   // *something* [agnat]
2462   switch (ConstraintCode[0]) {
2463   case 'Q':
2464     return InlineAsm::Constraint_Q;
2465   }
2466   return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
2467 }
2468 
2469 AVRTargetLowering::ConstraintWeight
2470 AVRTargetLowering::getSingleConstraintMatchWeight(
2471     AsmOperandInfo &info, const char *constraint) const {
2472   ConstraintWeight weight = CW_Invalid;
2473   Value *CallOperandVal = info.CallOperandVal;
2474 
2475   // If we don't have a value, we can't do a match,
2476   // but allow it at the lowest weight.
2477   // (this behaviour has been copied from the ARM backend)
2478   if (!CallOperandVal) {
2479     return CW_Default;
2480   }
2481 
2482   // Look at the constraint type.
2483   switch (*constraint) {
2484   default:
2485     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
2486     break;
2487   case 'd':
2488   case 'r':
2489   case 'l':
2490     weight = CW_Register;
2491     break;
2492   case 'a':
2493   case 'b':
2494   case 'e':
2495   case 'q':
2496   case 't':
2497   case 'w':
2498   case 'x':
2499   case 'X':
2500   case 'y':
2501   case 'Y':
2502   case 'z':
2503   case 'Z':
2504     weight = CW_SpecificReg;
2505     break;
2506   case 'G':
2507     if (const ConstantFP *C = dyn_cast<ConstantFP>(CallOperandVal)) {
2508       if (C->isZero()) {
2509         weight = CW_Constant;
2510       }
2511     }
2512     break;
2513   case 'I':
2514     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2515       if (isUInt<6>(C->getZExtValue())) {
2516         weight = CW_Constant;
2517       }
2518     }
2519     break;
2520   case 'J':
2521     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2522       if ((C->getSExtValue() >= -63) && (C->getSExtValue() <= 0)) {
2523         weight = CW_Constant;
2524       }
2525     }
2526     break;
2527   case 'K':
2528     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2529       if (C->getZExtValue() == 2) {
2530         weight = CW_Constant;
2531       }
2532     }
2533     break;
2534   case 'L':
2535     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2536       if (C->getZExtValue() == 0) {
2537         weight = CW_Constant;
2538       }
2539     }
2540     break;
2541   case 'M':
2542     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2543       if (isUInt<8>(C->getZExtValue())) {
2544         weight = CW_Constant;
2545       }
2546     }
2547     break;
2548   case 'N':
2549     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2550       if (C->getSExtValue() == -1) {
2551         weight = CW_Constant;
2552       }
2553     }
2554     break;
2555   case 'O':
2556     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2557       if ((C->getZExtValue() == 8) || (C->getZExtValue() == 16) ||
2558           (C->getZExtValue() == 24)) {
2559         weight = CW_Constant;
2560       }
2561     }
2562     break;
2563   case 'P':
2564     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2565       if (C->getZExtValue() == 1) {
2566         weight = CW_Constant;
2567       }
2568     }
2569     break;
2570   case 'R':
2571     if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
2572       if ((C->getSExtValue() >= -6) && (C->getSExtValue() <= 5)) {
2573         weight = CW_Constant;
2574       }
2575     }
2576     break;
2577   case 'Q':
2578     weight = CW_Memory;
2579     break;
2580   }
2581 
2582   return weight;
2583 }
2584 
2585 std::pair<unsigned, const TargetRegisterClass *>
2586 AVRTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
2587                                                 StringRef Constraint,
2588                                                 MVT VT) const {
2589   if (Constraint.size() == 1) {
2590     switch (Constraint[0]) {
2591     case 'a': // Simple upper registers r16..r23.
2592       if (VT == MVT::i8)
2593         return std::make_pair(0U, &AVR::LD8loRegClass);
2594       else if (VT == MVT::i16)
2595         return std::make_pair(0U, &AVR::DREGSLD8loRegClass);
2596       break;
2597     case 'b': // Base pointer registers: y, z.
2598       if (VT == MVT::i8 || VT == MVT::i16)
2599         return std::make_pair(0U, &AVR::PTRDISPREGSRegClass);
2600       break;
2601     case 'd': // Upper registers r16..r31.
2602       if (VT == MVT::i8)
2603         return std::make_pair(0U, &AVR::LD8RegClass);
2604       else if (VT == MVT::i16)
2605         return std::make_pair(0U, &AVR::DLDREGSRegClass);
2606       break;
2607     case 'l': // Lower registers r0..r15.
2608       if (VT == MVT::i8)
2609         return std::make_pair(0U, &AVR::GPR8loRegClass);
2610       else if (VT == MVT::i16)
2611         return std::make_pair(0U, &AVR::DREGSloRegClass);
2612       break;
2613     case 'e': // Pointer register pairs: x, y, z.
2614       if (VT == MVT::i8 || VT == MVT::i16)
2615         return std::make_pair(0U, &AVR::PTRREGSRegClass);
2616       break;
2617     case 'q': // Stack pointer register: SPH:SPL.
2618       return std::make_pair(0U, &AVR::GPRSPRegClass);
2619     case 'r': // Any register: r0..r31.
2620       if (VT == MVT::i8)
2621         return std::make_pair(0U, &AVR::GPR8RegClass);
2622       else if (VT == MVT::i16)
2623         return std::make_pair(0U, &AVR::DREGSRegClass);
2624       break;
2625     case 't': // Temporary register: r0.
2626       if (VT == MVT::i8)
2627         return std::make_pair(unsigned(Subtarget.getTmpRegister()),
2628                               &AVR::GPR8RegClass);
2629       break;
2630     case 'w': // Special upper register pairs: r24, r26, r28, r30.
2631       if (VT == MVT::i8 || VT == MVT::i16)
2632         return std::make_pair(0U, &AVR::IWREGSRegClass);
2633       break;
2634     case 'x': // Pointer register pair X: r27:r26.
2635     case 'X':
2636       if (VT == MVT::i8 || VT == MVT::i16)
2637         return std::make_pair(unsigned(AVR::R27R26), &AVR::PTRREGSRegClass);
2638       break;
2639     case 'y': // Pointer register pair Y: r29:r28.
2640     case 'Y':
2641       if (VT == MVT::i8 || VT == MVT::i16)
2642         return std::make_pair(unsigned(AVR::R29R28), &AVR::PTRREGSRegClass);
2643       break;
2644     case 'z': // Pointer register pair Z: r31:r30.
2645     case 'Z':
2646       if (VT == MVT::i8 || VT == MVT::i16)
2647         return std::make_pair(unsigned(AVR::R31R30), &AVR::PTRREGSRegClass);
2648       break;
2649     default:
2650       break;
2651     }
2652   }
2653 
2654   return TargetLowering::getRegForInlineAsmConstraint(
2655       Subtarget.getRegisterInfo(), Constraint, VT);
2656 }
2657 
2658 void AVRTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
2659                                                      std::string &Constraint,
2660                                                      std::vector<SDValue> &Ops,
2661                                                      SelectionDAG &DAG) const {
2662   SDValue Result;
2663   SDLoc DL(Op);
2664   EVT Ty = Op.getValueType();
2665 
2666   // Currently only support length 1 constraints.
2667   if (Constraint.length() != 1) {
2668     return;
2669   }
2670 
2671   char ConstraintLetter = Constraint[0];
2672   switch (ConstraintLetter) {
2673   default:
2674     break;
2675   // Deal with integers first:
2676   case 'I':
2677   case 'J':
2678   case 'K':
2679   case 'L':
2680   case 'M':
2681   case 'N':
2682   case 'O':
2683   case 'P':
2684   case 'R': {
2685     const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
2686     if (!C) {
2687       return;
2688     }
2689 
2690     int64_t CVal64 = C->getSExtValue();
2691     uint64_t CUVal64 = C->getZExtValue();
2692     switch (ConstraintLetter) {
2693     case 'I': // 0..63
2694       if (!isUInt<6>(CUVal64))
2695         return;
2696       Result = DAG.getTargetConstant(CUVal64, DL, Ty);
2697       break;
2698     case 'J': // -63..0
2699       if (CVal64 < -63 || CVal64 > 0)
2700         return;
2701       Result = DAG.getTargetConstant(CVal64, DL, Ty);
2702       break;
2703     case 'K': // 2
2704       if (CUVal64 != 2)
2705         return;
2706       Result = DAG.getTargetConstant(CUVal64, DL, Ty);
2707       break;
2708     case 'L': // 0
2709       if (CUVal64 != 0)
2710         return;
2711       Result = DAG.getTargetConstant(CUVal64, DL, Ty);
2712       break;
2713     case 'M': // 0..255
2714       if (!isUInt<8>(CUVal64))
2715         return;
2716       // i8 type may be printed as a negative number,
2717       // e.g. 254 would be printed as -2,
2718       // so we force it to i16 at least.
2719       if (Ty.getSimpleVT() == MVT::i8) {
2720         Ty = MVT::i16;
2721       }
2722       Result = DAG.getTargetConstant(CUVal64, DL, Ty);
2723       break;
2724     case 'N': // -1
2725       if (CVal64 != -1)
2726         return;
2727       Result = DAG.getTargetConstant(CVal64, DL, Ty);
2728       break;
2729     case 'O': // 8, 16, 24
2730       if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24)
2731         return;
2732       Result = DAG.getTargetConstant(CUVal64, DL, Ty);
2733       break;
2734     case 'P': // 1
2735       if (CUVal64 != 1)
2736         return;
2737       Result = DAG.getTargetConstant(CUVal64, DL, Ty);
2738       break;
2739     case 'R': // -6..5
2740       if (CVal64 < -6 || CVal64 > 5)
2741         return;
2742       Result = DAG.getTargetConstant(CVal64, DL, Ty);
2743       break;
2744     }
2745 
2746     break;
2747   }
2748   case 'G':
2749     const ConstantFPSDNode *FC = dyn_cast<ConstantFPSDNode>(Op);
2750     if (!FC || !FC->isZero())
2751       return;
2752     // Soften float to i8 0
2753     Result = DAG.getTargetConstant(0, DL, MVT::i8);
2754     break;
2755   }
2756 
2757   if (Result.getNode()) {
2758     Ops.push_back(Result);
2759     return;
2760   }
2761 
2762   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
2763 }
2764 
2765 Register AVRTargetLowering::getRegisterByName(const char *RegName, LLT VT,
2766                                               const MachineFunction &MF) const {
2767   Register Reg;
2768 
2769   if (VT == LLT::scalar(8)) {
2770     Reg = StringSwitch<unsigned>(RegName)
2771               .Case("r0", AVR::R0)
2772               .Case("r1", AVR::R1)
2773               .Default(0);
2774   } else {
2775     Reg = StringSwitch<unsigned>(RegName)
2776               .Case("r0", AVR::R1R0)
2777               .Case("sp", AVR::SP)
2778               .Default(0);
2779   }
2780 
2781   if (Reg)
2782     return Reg;
2783 
2784   report_fatal_error(
2785       Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
2786 }
2787 
2788 } // end of namespace llvm
2789