1 //===-- AVRISelLowering.cpp - AVR DAG Lowering Implementation -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that AVR uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "AVRISelLowering.h" 15 16 #include "llvm/ADT/StringSwitch.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/CodeGen/CallingConvLower.h" 19 #include "llvm/CodeGen/MachineFrameInfo.h" 20 #include "llvm/CodeGen/MachineInstrBuilder.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/SelectionDAG.h" 23 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 24 #include "llvm/IR/Function.h" 25 #include "llvm/Support/ErrorHandling.h" 26 27 #include "AVR.h" 28 #include "AVRMachineFunctionInfo.h" 29 #include "AVRSubtarget.h" 30 #include "AVRTargetMachine.h" 31 #include "MCTargetDesc/AVRMCTargetDesc.h" 32 33 namespace llvm { 34 35 AVRTargetLowering::AVRTargetLowering(const AVRTargetMachine &TM, 36 const AVRSubtarget &STI) 37 : TargetLowering(TM), Subtarget(STI) { 38 // Set up the register classes. 39 addRegisterClass(MVT::i8, &AVR::GPR8RegClass); 40 addRegisterClass(MVT::i16, &AVR::DREGSRegClass); 41 42 // Compute derived properties from the register classes. 43 computeRegisterProperties(Subtarget.getRegisterInfo()); 44 45 setBooleanContents(ZeroOrOneBooleanContent); 46 setBooleanVectorContents(ZeroOrOneBooleanContent); 47 setSchedulingPreference(Sched::RegPressure); 48 setStackPointerRegisterToSaveRestore(AVR::SP); 49 setSupportsUnalignedAtomics(true); 50 51 setOperationAction(ISD::GlobalAddress, MVT::i16, Custom); 52 setOperationAction(ISD::BlockAddress, MVT::i16, Custom); 53 54 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 55 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 56 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i8, Expand); 57 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i16, Expand); 58 59 for (MVT VT : MVT::integer_valuetypes()) { 60 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}) { 61 setLoadExtAction(N, VT, MVT::i1, Promote); 62 setLoadExtAction(N, VT, MVT::i8, Expand); 63 } 64 } 65 66 setTruncStoreAction(MVT::i16, MVT::i8, Expand); 67 68 for (MVT VT : MVT::integer_valuetypes()) { 69 setOperationAction(ISD::ADDC, VT, Legal); 70 setOperationAction(ISD::SUBC, VT, Legal); 71 setOperationAction(ISD::ADDE, VT, Legal); 72 setOperationAction(ISD::SUBE, VT, Legal); 73 } 74 75 // sub (x, imm) gets canonicalized to add (x, -imm), so for illegal types 76 // revert into a sub since we don't have an add with immediate instruction. 77 setOperationAction(ISD::ADD, MVT::i32, Custom); 78 setOperationAction(ISD::ADD, MVT::i64, Custom); 79 80 // our shift instructions are only able to shift 1 bit at a time, so handle 81 // this in a custom way. 82 setOperationAction(ISD::SRA, MVT::i8, Custom); 83 setOperationAction(ISD::SHL, MVT::i8, Custom); 84 setOperationAction(ISD::SRL, MVT::i8, Custom); 85 setOperationAction(ISD::SRA, MVT::i16, Custom); 86 setOperationAction(ISD::SHL, MVT::i16, Custom); 87 setOperationAction(ISD::SRL, MVT::i16, Custom); 88 setOperationAction(ISD::SHL_PARTS, MVT::i16, Expand); 89 setOperationAction(ISD::SRA_PARTS, MVT::i16, Expand); 90 setOperationAction(ISD::SRL_PARTS, MVT::i16, Expand); 91 92 setOperationAction(ISD::ROTL, MVT::i8, Custom); 93 setOperationAction(ISD::ROTL, MVT::i16, Expand); 94 setOperationAction(ISD::ROTR, MVT::i8, Custom); 95 setOperationAction(ISD::ROTR, MVT::i16, Expand); 96 97 setOperationAction(ISD::BR_CC, MVT::i8, Custom); 98 setOperationAction(ISD::BR_CC, MVT::i16, Custom); 99 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 100 setOperationAction(ISD::BR_CC, MVT::i64, Custom); 101 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 102 103 setOperationAction(ISD::SELECT_CC, MVT::i8, Custom); 104 setOperationAction(ISD::SELECT_CC, MVT::i16, Custom); 105 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand); 106 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 107 setOperationAction(ISD::SETCC, MVT::i8, Custom); 108 setOperationAction(ISD::SETCC, MVT::i16, Custom); 109 setOperationAction(ISD::SETCC, MVT::i32, Custom); 110 setOperationAction(ISD::SETCC, MVT::i64, Custom); 111 setOperationAction(ISD::SELECT, MVT::i8, Expand); 112 setOperationAction(ISD::SELECT, MVT::i16, Expand); 113 114 setOperationAction(ISD::BSWAP, MVT::i16, Expand); 115 116 // Add support for postincrement and predecrement load/stores. 117 setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal); 118 setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal); 119 setIndexedLoadAction(ISD::PRE_DEC, MVT::i8, Legal); 120 setIndexedLoadAction(ISD::PRE_DEC, MVT::i16, Legal); 121 setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal); 122 setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal); 123 setIndexedStoreAction(ISD::PRE_DEC, MVT::i8, Legal); 124 setIndexedStoreAction(ISD::PRE_DEC, MVT::i16, Legal); 125 126 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 127 128 setOperationAction(ISD::VASTART, MVT::Other, Custom); 129 setOperationAction(ISD::VAEND, MVT::Other, Expand); 130 setOperationAction(ISD::VAARG, MVT::Other, Expand); 131 setOperationAction(ISD::VACOPY, MVT::Other, Expand); 132 133 // Atomic operations which must be lowered to rtlib calls 134 for (MVT VT : MVT::integer_valuetypes()) { 135 setOperationAction(ISD::ATOMIC_SWAP, VT, Expand); 136 setOperationAction(ISD::ATOMIC_CMP_SWAP, VT, Expand); 137 setOperationAction(ISD::ATOMIC_LOAD_NAND, VT, Expand); 138 setOperationAction(ISD::ATOMIC_LOAD_MAX, VT, Expand); 139 setOperationAction(ISD::ATOMIC_LOAD_MIN, VT, Expand); 140 setOperationAction(ISD::ATOMIC_LOAD_UMAX, VT, Expand); 141 setOperationAction(ISD::ATOMIC_LOAD_UMIN, VT, Expand); 142 } 143 144 // Division/remainder 145 setOperationAction(ISD::UDIV, MVT::i8, Expand); 146 setOperationAction(ISD::UDIV, MVT::i16, Expand); 147 setOperationAction(ISD::UREM, MVT::i8, Expand); 148 setOperationAction(ISD::UREM, MVT::i16, Expand); 149 setOperationAction(ISD::SDIV, MVT::i8, Expand); 150 setOperationAction(ISD::SDIV, MVT::i16, Expand); 151 setOperationAction(ISD::SREM, MVT::i8, Expand); 152 setOperationAction(ISD::SREM, MVT::i16, Expand); 153 154 // Make division and modulus custom 155 setOperationAction(ISD::UDIVREM, MVT::i8, Custom); 156 setOperationAction(ISD::UDIVREM, MVT::i16, Custom); 157 setOperationAction(ISD::UDIVREM, MVT::i32, Custom); 158 setOperationAction(ISD::SDIVREM, MVT::i8, Custom); 159 setOperationAction(ISD::SDIVREM, MVT::i16, Custom); 160 setOperationAction(ISD::SDIVREM, MVT::i32, Custom); 161 162 // Do not use MUL. The AVR instructions are closer to SMUL_LOHI &co. 163 setOperationAction(ISD::MUL, MVT::i8, Expand); 164 setOperationAction(ISD::MUL, MVT::i16, Expand); 165 166 // Expand 16 bit multiplications. 167 setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand); 168 setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand); 169 170 // Expand multiplications to libcalls when there is 171 // no hardware MUL. 172 if (!Subtarget.supportsMultiplication()) { 173 setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand); 174 setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand); 175 } 176 177 for (MVT VT : MVT::integer_valuetypes()) { 178 setOperationAction(ISD::MULHS, VT, Expand); 179 setOperationAction(ISD::MULHU, VT, Expand); 180 } 181 182 for (MVT VT : MVT::integer_valuetypes()) { 183 setOperationAction(ISD::CTPOP, VT, Expand); 184 setOperationAction(ISD::CTLZ, VT, Expand); 185 setOperationAction(ISD::CTTZ, VT, Expand); 186 } 187 188 for (MVT VT : MVT::integer_valuetypes()) { 189 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 190 // TODO: The generated code is pretty poor. Investigate using the 191 // same "shift and subtract with carry" trick that we do for 192 // extending 8-bit to 16-bit. This may require infrastructure 193 // improvements in how we treat 16-bit "registers" to be feasible. 194 } 195 196 // Division rtlib functions (not supported), use divmod functions instead 197 setLibcallName(RTLIB::SDIV_I8, nullptr); 198 setLibcallName(RTLIB::SDIV_I16, nullptr); 199 setLibcallName(RTLIB::SDIV_I32, nullptr); 200 setLibcallName(RTLIB::UDIV_I8, nullptr); 201 setLibcallName(RTLIB::UDIV_I16, nullptr); 202 setLibcallName(RTLIB::UDIV_I32, nullptr); 203 204 // Modulus rtlib functions (not supported), use divmod functions instead 205 setLibcallName(RTLIB::SREM_I8, nullptr); 206 setLibcallName(RTLIB::SREM_I16, nullptr); 207 setLibcallName(RTLIB::SREM_I32, nullptr); 208 setLibcallName(RTLIB::UREM_I8, nullptr); 209 setLibcallName(RTLIB::UREM_I16, nullptr); 210 setLibcallName(RTLIB::UREM_I32, nullptr); 211 212 // Division and modulus rtlib functions 213 setLibcallName(RTLIB::SDIVREM_I8, "__divmodqi4"); 214 setLibcallName(RTLIB::SDIVREM_I16, "__divmodhi4"); 215 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4"); 216 setLibcallName(RTLIB::UDIVREM_I8, "__udivmodqi4"); 217 setLibcallName(RTLIB::UDIVREM_I16, "__udivmodhi4"); 218 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4"); 219 220 // Several of the runtime library functions use a special calling conv 221 setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::AVR_BUILTIN); 222 setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::AVR_BUILTIN); 223 setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::AVR_BUILTIN); 224 setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::AVR_BUILTIN); 225 226 // Trigonometric rtlib functions 227 setLibcallName(RTLIB::SIN_F32, "sin"); 228 setLibcallName(RTLIB::COS_F32, "cos"); 229 230 setMinFunctionAlignment(Align(2)); 231 setMinimumJumpTableEntries(UINT_MAX); 232 } 233 234 const char *AVRTargetLowering::getTargetNodeName(unsigned Opcode) const { 235 #define NODE(name) \ 236 case AVRISD::name: \ 237 return #name 238 239 switch (Opcode) { 240 default: 241 return nullptr; 242 NODE(RET_FLAG); 243 NODE(RETI_FLAG); 244 NODE(CALL); 245 NODE(WRAPPER); 246 NODE(LSL); 247 NODE(LSR); 248 NODE(ROL); 249 NODE(ROR); 250 NODE(ASR); 251 NODE(LSLLOOP); 252 NODE(LSRLOOP); 253 NODE(ROLLOOP); 254 NODE(RORLOOP); 255 NODE(ASRLOOP); 256 NODE(BRCOND); 257 NODE(CMP); 258 NODE(CMPC); 259 NODE(TST); 260 NODE(SELECT_CC); 261 #undef NODE 262 } 263 } 264 265 EVT AVRTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 266 EVT VT) const { 267 assert(!VT.isVector() && "No AVR SetCC type for vectors!"); 268 return MVT::i8; 269 } 270 271 SDValue AVRTargetLowering::LowerShifts(SDValue Op, SelectionDAG &DAG) const { 272 //:TODO: this function has to be completely rewritten to produce optimal 273 // code, for now it's producing very long but correct code. 274 unsigned Opc8; 275 const SDNode *N = Op.getNode(); 276 EVT VT = Op.getValueType(); 277 SDLoc dl(N); 278 assert(isPowerOf2_32(VT.getSizeInBits()) && 279 "Expected power-of-2 shift amount"); 280 281 // Expand non-constant shifts to loops. 282 if (!isa<ConstantSDNode>(N->getOperand(1))) { 283 switch (Op.getOpcode()) { 284 default: 285 llvm_unreachable("Invalid shift opcode!"); 286 case ISD::SHL: 287 return DAG.getNode(AVRISD::LSLLOOP, dl, VT, N->getOperand(0), 288 N->getOperand(1)); 289 case ISD::SRL: 290 return DAG.getNode(AVRISD::LSRLOOP, dl, VT, N->getOperand(0), 291 N->getOperand(1)); 292 case ISD::ROTL: { 293 SDValue Amt = N->getOperand(1); 294 EVT AmtVT = Amt.getValueType(); 295 Amt = DAG.getNode(ISD::AND, dl, AmtVT, Amt, 296 DAG.getConstant(VT.getSizeInBits() - 1, dl, AmtVT)); 297 return DAG.getNode(AVRISD::ROLLOOP, dl, VT, N->getOperand(0), Amt); 298 } 299 case ISD::ROTR: { 300 SDValue Amt = N->getOperand(1); 301 EVT AmtVT = Amt.getValueType(); 302 Amt = DAG.getNode(ISD::AND, dl, AmtVT, Amt, 303 DAG.getConstant(VT.getSizeInBits() - 1, dl, AmtVT)); 304 return DAG.getNode(AVRISD::RORLOOP, dl, VT, N->getOperand(0), Amt); 305 } 306 case ISD::SRA: 307 return DAG.getNode(AVRISD::ASRLOOP, dl, VT, N->getOperand(0), 308 N->getOperand(1)); 309 } 310 } 311 312 uint64_t ShiftAmount = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 313 SDValue Victim = N->getOperand(0); 314 315 switch (Op.getOpcode()) { 316 case ISD::SRA: 317 Opc8 = AVRISD::ASR; 318 break; 319 case ISD::ROTL: 320 Opc8 = AVRISD::ROL; 321 ShiftAmount = ShiftAmount % VT.getSizeInBits(); 322 break; 323 case ISD::ROTR: 324 Opc8 = AVRISD::ROR; 325 ShiftAmount = ShiftAmount % VT.getSizeInBits(); 326 break; 327 case ISD::SRL: 328 Opc8 = AVRISD::LSR; 329 break; 330 case ISD::SHL: 331 Opc8 = AVRISD::LSL; 332 break; 333 default: 334 llvm_unreachable("Invalid shift opcode"); 335 } 336 337 // Optimize int8/int16 shifts. 338 if (VT.getSizeInBits() == 8) { 339 if (Op.getOpcode() == ISD::SHL && 4 <= ShiftAmount && ShiftAmount < 7) { 340 // Optimize LSL when 4 <= ShiftAmount <= 6. 341 Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim); 342 Victim = 343 DAG.getNode(ISD::AND, dl, VT, Victim, DAG.getConstant(0xf0, dl, VT)); 344 ShiftAmount -= 4; 345 } else if (Op.getOpcode() == ISD::SRL && 4 <= ShiftAmount && 346 ShiftAmount < 7) { 347 // Optimize LSR when 4 <= ShiftAmount <= 6. 348 Victim = DAG.getNode(AVRISD::SWAP, dl, VT, Victim); 349 Victim = 350 DAG.getNode(ISD::AND, dl, VT, Victim, DAG.getConstant(0x0f, dl, VT)); 351 ShiftAmount -= 4; 352 } else if (Op.getOpcode() == ISD::SHL && ShiftAmount == 7) { 353 // Optimize LSL when ShiftAmount == 7. 354 Victim = DAG.getNode(AVRISD::LSLBN, dl, VT, Victim, 355 DAG.getConstant(7, dl, VT)); 356 ShiftAmount = 0; 357 } else if (Op.getOpcode() == ISD::SRL && ShiftAmount == 7) { 358 // Optimize LSR when ShiftAmount == 7. 359 Victim = DAG.getNode(AVRISD::LSRBN, dl, VT, Victim, 360 DAG.getConstant(7, dl, VT)); 361 ShiftAmount = 0; 362 } else if (Op.getOpcode() == ISD::SRA && ShiftAmount == 7) { 363 // Optimize ASR when ShiftAmount == 7. 364 Victim = DAG.getNode(AVRISD::ASRBN, dl, VT, Victim, 365 DAG.getConstant(7, dl, VT)); 366 ShiftAmount = 0; 367 } 368 } else if (VT.getSizeInBits() == 16) { 369 if (4 <= ShiftAmount && ShiftAmount < 8) 370 switch (Op.getOpcode()) { 371 case ISD::SHL: 372 Victim = DAG.getNode(AVRISD::LSLWN, dl, VT, Victim, 373 DAG.getConstant(4, dl, VT)); 374 ShiftAmount -= 4; 375 break; 376 case ISD::SRL: 377 Victim = DAG.getNode(AVRISD::LSRWN, dl, VT, Victim, 378 DAG.getConstant(4, dl, VT)); 379 ShiftAmount -= 4; 380 break; 381 default: 382 break; 383 } 384 else if (8 <= ShiftAmount && ShiftAmount < 12) 385 switch (Op.getOpcode()) { 386 case ISD::SHL: 387 Victim = DAG.getNode(AVRISD::LSLWN, dl, VT, Victim, 388 DAG.getConstant(8, dl, VT)); 389 ShiftAmount -= 8; 390 break; 391 case ISD::SRL: 392 Victim = DAG.getNode(AVRISD::LSRWN, dl, VT, Victim, 393 DAG.getConstant(8, dl, VT)); 394 ShiftAmount -= 8; 395 break; 396 case ISD::SRA: 397 Victim = DAG.getNode(AVRISD::ASRWN, dl, VT, Victim, 398 DAG.getConstant(8, dl, VT)); 399 ShiftAmount -= 8; 400 break; 401 default: 402 break; 403 } 404 else if (12 <= ShiftAmount) 405 switch (Op.getOpcode()) { 406 case ISD::SHL: 407 Victim = DAG.getNode(AVRISD::LSLWN, dl, VT, Victim, 408 DAG.getConstant(12, dl, VT)); 409 ShiftAmount -= 12; 410 break; 411 case ISD::SRL: 412 Victim = DAG.getNode(AVRISD::LSRWN, dl, VT, Victim, 413 DAG.getConstant(12, dl, VT)); 414 ShiftAmount -= 12; 415 break; 416 default: 417 break; 418 } 419 } 420 421 while (ShiftAmount--) { 422 Victim = DAG.getNode(Opc8, dl, VT, Victim); 423 } 424 425 return Victim; 426 } 427 428 SDValue AVRTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { 429 unsigned Opcode = Op->getOpcode(); 430 assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) && 431 "Invalid opcode for Div/Rem lowering"); 432 bool IsSigned = (Opcode == ISD::SDIVREM); 433 EVT VT = Op->getValueType(0); 434 Type *Ty = VT.getTypeForEVT(*DAG.getContext()); 435 436 RTLIB::Libcall LC; 437 switch (VT.getSimpleVT().SimpleTy) { 438 default: 439 llvm_unreachable("Unexpected request for libcall!"); 440 case MVT::i8: 441 LC = IsSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; 442 break; 443 case MVT::i16: 444 LC = IsSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; 445 break; 446 case MVT::i32: 447 LC = IsSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; 448 break; 449 } 450 451 SDValue InChain = DAG.getEntryNode(); 452 453 TargetLowering::ArgListTy Args; 454 TargetLowering::ArgListEntry Entry; 455 for (SDValue const &Value : Op->op_values()) { 456 Entry.Node = Value; 457 Entry.Ty = Value.getValueType().getTypeForEVT(*DAG.getContext()); 458 Entry.IsSExt = IsSigned; 459 Entry.IsZExt = !IsSigned; 460 Args.push_back(Entry); 461 } 462 463 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 464 getPointerTy(DAG.getDataLayout())); 465 466 Type *RetTy = (Type *)StructType::get(Ty, Ty); 467 468 SDLoc dl(Op); 469 TargetLowering::CallLoweringInfo CLI(DAG); 470 CLI.setDebugLoc(dl) 471 .setChain(InChain) 472 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) 473 .setInRegister() 474 .setSExtResult(IsSigned) 475 .setZExtResult(!IsSigned); 476 477 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); 478 return CallInfo.first; 479 } 480 481 SDValue AVRTargetLowering::LowerGlobalAddress(SDValue Op, 482 SelectionDAG &DAG) const { 483 auto DL = DAG.getDataLayout(); 484 485 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 486 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); 487 488 // Create the TargetGlobalAddress node, folding in the constant offset. 489 SDValue Result = 490 DAG.getTargetGlobalAddress(GV, SDLoc(Op), getPointerTy(DL), Offset); 491 return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result); 492 } 493 494 SDValue AVRTargetLowering::LowerBlockAddress(SDValue Op, 495 SelectionDAG &DAG) const { 496 auto DL = DAG.getDataLayout(); 497 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); 498 499 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(DL)); 500 501 return DAG.getNode(AVRISD::WRAPPER, SDLoc(Op), getPointerTy(DL), Result); 502 } 503 504 /// IntCCToAVRCC - Convert a DAG integer condition code to an AVR CC. 505 static AVRCC::CondCodes intCCToAVRCC(ISD::CondCode CC) { 506 switch (CC) { 507 default: 508 llvm_unreachable("Unknown condition code!"); 509 case ISD::SETEQ: 510 return AVRCC::COND_EQ; 511 case ISD::SETNE: 512 return AVRCC::COND_NE; 513 case ISD::SETGE: 514 return AVRCC::COND_GE; 515 case ISD::SETLT: 516 return AVRCC::COND_LT; 517 case ISD::SETUGE: 518 return AVRCC::COND_SH; 519 case ISD::SETULT: 520 return AVRCC::COND_LO; 521 } 522 } 523 524 /// Returns appropriate CP/CPI/CPC nodes code for the given 8/16-bit operands. 525 SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS, 526 SelectionDAG &DAG, SDLoc DL) const { 527 assert((LHS.getSimpleValueType() == RHS.getSimpleValueType()) && 528 "LHS and RHS have different types"); 529 assert(((LHS.getSimpleValueType() == MVT::i16) || 530 (LHS.getSimpleValueType() == MVT::i8)) && "invalid comparison type"); 531 532 SDValue Cmp; 533 534 if (LHS.getSimpleValueType() == MVT::i16 && isa<ConstantSDNode>(RHS)) { 535 // Generate a CPI/CPC pair if RHS is a 16-bit constant. 536 SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS, 537 DAG.getIntPtrConstant(0, DL)); 538 SDValue LHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS, 539 DAG.getIntPtrConstant(1, DL)); 540 SDValue RHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, RHS, 541 DAG.getIntPtrConstant(0, DL)); 542 SDValue RHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, RHS, 543 DAG.getIntPtrConstant(1, DL)); 544 Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHSlo, RHSlo); 545 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp); 546 } else { 547 // Generate ordinary 16-bit comparison. 548 Cmp = DAG.getNode(AVRISD::CMP, DL, MVT::Glue, LHS, RHS); 549 } 550 551 return Cmp; 552 } 553 554 /// Returns appropriate AVR CMP/CMPC nodes and corresponding condition code for 555 /// the given operands. 556 SDValue AVRTargetLowering::getAVRCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 557 SDValue &AVRcc, SelectionDAG &DAG, 558 SDLoc DL) const { 559 SDValue Cmp; 560 EVT VT = LHS.getValueType(); 561 bool UseTest = false; 562 563 switch (CC) { 564 default: 565 break; 566 case ISD::SETLE: { 567 // Swap operands and reverse the branching condition. 568 std::swap(LHS, RHS); 569 CC = ISD::SETGE; 570 break; 571 } 572 case ISD::SETGT: { 573 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { 574 switch (C->getSExtValue()) { 575 case -1: { 576 // When doing lhs > -1 use a tst instruction on the top part of lhs 577 // and use brpl instead of using a chain of cp/cpc. 578 UseTest = true; 579 AVRcc = DAG.getConstant(AVRCC::COND_PL, DL, MVT::i8); 580 break; 581 } 582 case 0: { 583 // Turn lhs > 0 into 0 < lhs since 0 can be materialized with 584 // __zero_reg__ in lhs. 585 RHS = LHS; 586 LHS = DAG.getConstant(0, DL, VT); 587 CC = ISD::SETLT; 588 break; 589 } 590 default: { 591 // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows 592 // us to fold the constant into the cmp instruction. 593 RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT); 594 CC = ISD::SETGE; 595 break; 596 } 597 } 598 break; 599 } 600 // Swap operands and reverse the branching condition. 601 std::swap(LHS, RHS); 602 CC = ISD::SETLT; 603 break; 604 } 605 case ISD::SETLT: { 606 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { 607 switch (C->getSExtValue()) { 608 case 1: { 609 // Turn lhs < 1 into 0 >= lhs since 0 can be materialized with 610 // __zero_reg__ in lhs. 611 RHS = LHS; 612 LHS = DAG.getConstant(0, DL, VT); 613 CC = ISD::SETGE; 614 break; 615 } 616 case 0: { 617 // When doing lhs < 0 use a tst instruction on the top part of lhs 618 // and use brmi instead of using a chain of cp/cpc. 619 UseTest = true; 620 AVRcc = DAG.getConstant(AVRCC::COND_MI, DL, MVT::i8); 621 break; 622 } 623 } 624 } 625 break; 626 } 627 case ISD::SETULE: { 628 // Swap operands and reverse the branching condition. 629 std::swap(LHS, RHS); 630 CC = ISD::SETUGE; 631 break; 632 } 633 case ISD::SETUGT: { 634 // Turn lhs < rhs with lhs constant into rhs >= lhs+1, this allows us to 635 // fold the constant into the cmp instruction. 636 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) { 637 RHS = DAG.getConstant(C->getSExtValue() + 1, DL, VT); 638 CC = ISD::SETUGE; 639 break; 640 } 641 // Swap operands and reverse the branching condition. 642 std::swap(LHS, RHS); 643 CC = ISD::SETULT; 644 break; 645 } 646 } 647 648 // Expand 32 and 64 bit comparisons with custom CMP and CMPC nodes instead of 649 // using the default and/or/xor expansion code which is much longer. 650 if (VT == MVT::i32) { 651 SDValue LHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS, 652 DAG.getIntPtrConstant(0, DL)); 653 SDValue LHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS, 654 DAG.getIntPtrConstant(1, DL)); 655 SDValue RHSlo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS, 656 DAG.getIntPtrConstant(0, DL)); 657 SDValue RHShi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS, 658 DAG.getIntPtrConstant(1, DL)); 659 660 if (UseTest) { 661 // When using tst we only care about the highest part. 662 SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHShi, 663 DAG.getIntPtrConstant(1, DL)); 664 Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top); 665 } else { 666 Cmp = getAVRCmp(LHSlo, RHSlo, DAG, DL); 667 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHShi, RHShi, Cmp); 668 } 669 } else if (VT == MVT::i64) { 670 SDValue LHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS, 671 DAG.getIntPtrConstant(0, DL)); 672 SDValue LHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, LHS, 673 DAG.getIntPtrConstant(1, DL)); 674 675 SDValue LHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0, 676 DAG.getIntPtrConstant(0, DL)); 677 SDValue LHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_0, 678 DAG.getIntPtrConstant(1, DL)); 679 SDValue LHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1, 680 DAG.getIntPtrConstant(0, DL)); 681 SDValue LHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, LHS_1, 682 DAG.getIntPtrConstant(1, DL)); 683 684 SDValue RHS_0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS, 685 DAG.getIntPtrConstant(0, DL)); 686 SDValue RHS_1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, RHS, 687 DAG.getIntPtrConstant(1, DL)); 688 689 SDValue RHS0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0, 690 DAG.getIntPtrConstant(0, DL)); 691 SDValue RHS1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_0, 692 DAG.getIntPtrConstant(1, DL)); 693 SDValue RHS2 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1, 694 DAG.getIntPtrConstant(0, DL)); 695 SDValue RHS3 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i16, RHS_1, 696 DAG.getIntPtrConstant(1, DL)); 697 698 if (UseTest) { 699 // When using tst we only care about the highest part. 700 SDValue Top = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, LHS3, 701 DAG.getIntPtrConstant(1, DL)); 702 Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, Top); 703 } else { 704 Cmp = getAVRCmp(LHS0, RHS0, DAG, DL); 705 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS1, RHS1, Cmp); 706 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS2, RHS2, Cmp); 707 Cmp = DAG.getNode(AVRISD::CMPC, DL, MVT::Glue, LHS3, RHS3, Cmp); 708 } 709 } else if (VT == MVT::i8 || VT == MVT::i16) { 710 if (UseTest) { 711 // When using tst we only care about the highest part. 712 Cmp = DAG.getNode(AVRISD::TST, DL, MVT::Glue, 713 (VT == MVT::i8) 714 ? LHS 715 : DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i8, 716 LHS, DAG.getIntPtrConstant(1, DL))); 717 } else { 718 Cmp = getAVRCmp(LHS, RHS, DAG, DL); 719 } 720 } else { 721 llvm_unreachable("Invalid comparison size"); 722 } 723 724 // When using a test instruction AVRcc is already set. 725 if (!UseTest) { 726 AVRcc = DAG.getConstant(intCCToAVRCC(CC), DL, MVT::i8); 727 } 728 729 return Cmp; 730 } 731 732 SDValue AVRTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 733 SDValue Chain = Op.getOperand(0); 734 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 735 SDValue LHS = Op.getOperand(2); 736 SDValue RHS = Op.getOperand(3); 737 SDValue Dest = Op.getOperand(4); 738 SDLoc dl(Op); 739 740 SDValue TargetCC; 741 SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl); 742 743 return DAG.getNode(AVRISD::BRCOND, dl, MVT::Other, Chain, Dest, TargetCC, 744 Cmp); 745 } 746 747 SDValue AVRTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 748 SDValue LHS = Op.getOperand(0); 749 SDValue RHS = Op.getOperand(1); 750 SDValue TrueV = Op.getOperand(2); 751 SDValue FalseV = Op.getOperand(3); 752 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 753 SDLoc dl(Op); 754 755 SDValue TargetCC; 756 SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, dl); 757 758 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 759 SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp}; 760 761 return DAG.getNode(AVRISD::SELECT_CC, dl, VTs, Ops); 762 } 763 764 SDValue AVRTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 765 SDValue LHS = Op.getOperand(0); 766 SDValue RHS = Op.getOperand(1); 767 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 768 SDLoc DL(Op); 769 770 SDValue TargetCC; 771 SDValue Cmp = getAVRCmp(LHS, RHS, CC, TargetCC, DAG, DL); 772 773 SDValue TrueV = DAG.getConstant(1, DL, Op.getValueType()); 774 SDValue FalseV = DAG.getConstant(0, DL, Op.getValueType()); 775 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 776 SDValue Ops[] = {TrueV, FalseV, TargetCC, Cmp}; 777 778 return DAG.getNode(AVRISD::SELECT_CC, DL, VTs, Ops); 779 } 780 781 SDValue AVRTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { 782 const MachineFunction &MF = DAG.getMachineFunction(); 783 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>(); 784 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 785 auto DL = DAG.getDataLayout(); 786 SDLoc dl(Op); 787 788 // Vastart just stores the address of the VarArgsFrameIndex slot into the 789 // memory location argument. 790 SDValue FI = DAG.getFrameIndex(AFI->getVarArgsFrameIndex(), getPointerTy(DL)); 791 792 return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1), 793 MachinePointerInfo(SV)); 794 } 795 796 SDValue AVRTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 797 switch (Op.getOpcode()) { 798 default: 799 llvm_unreachable("Don't know how to custom lower this!"); 800 case ISD::SHL: 801 case ISD::SRA: 802 case ISD::SRL: 803 case ISD::ROTL: 804 case ISD::ROTR: 805 return LowerShifts(Op, DAG); 806 case ISD::GlobalAddress: 807 return LowerGlobalAddress(Op, DAG); 808 case ISD::BlockAddress: 809 return LowerBlockAddress(Op, DAG); 810 case ISD::BR_CC: 811 return LowerBR_CC(Op, DAG); 812 case ISD::SELECT_CC: 813 return LowerSELECT_CC(Op, DAG); 814 case ISD::SETCC: 815 return LowerSETCC(Op, DAG); 816 case ISD::VASTART: 817 return LowerVASTART(Op, DAG); 818 case ISD::SDIVREM: 819 case ISD::UDIVREM: 820 return LowerDivRem(Op, DAG); 821 } 822 823 return SDValue(); 824 } 825 826 /// Replace a node with an illegal result type 827 /// with a new node built out of custom code. 828 void AVRTargetLowering::ReplaceNodeResults(SDNode *N, 829 SmallVectorImpl<SDValue> &Results, 830 SelectionDAG &DAG) const { 831 SDLoc DL(N); 832 833 switch (N->getOpcode()) { 834 case ISD::ADD: { 835 // Convert add (x, imm) into sub (x, -imm). 836 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 837 SDValue Sub = DAG.getNode( 838 ISD::SUB, DL, N->getValueType(0), N->getOperand(0), 839 DAG.getConstant(-C->getAPIntValue(), DL, C->getValueType(0))); 840 Results.push_back(Sub); 841 } 842 break; 843 } 844 default: { 845 SDValue Res = LowerOperation(SDValue(N, 0), DAG); 846 847 for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I) 848 Results.push_back(Res.getValue(I)); 849 850 break; 851 } 852 } 853 } 854 855 /// Return true if the addressing mode represented 856 /// by AM is legal for this target, for a load/store of the specified type. 857 bool AVRTargetLowering::isLegalAddressingMode(const DataLayout &DL, 858 const AddrMode &AM, Type *Ty, 859 unsigned AS, Instruction *I) const { 860 int64_t Offs = AM.BaseOffs; 861 862 // Allow absolute addresses. 863 if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && Offs == 0) { 864 return true; 865 } 866 867 // Flash memory instructions only allow zero offsets. 868 if (isa<PointerType>(Ty) && AS == AVR::ProgramMemory) { 869 return false; 870 } 871 872 // Allow reg+<6bit> offset. 873 if (Offs < 0) 874 Offs = -Offs; 875 if (AM.BaseGV == 0 && AM.HasBaseReg && AM.Scale == 0 && isUInt<6>(Offs)) { 876 return true; 877 } 878 879 return false; 880 } 881 882 /// Returns true by value, base pointer and 883 /// offset pointer and addressing mode by reference if the node's address 884 /// can be legally represented as pre-indexed load / store address. 885 bool AVRTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 886 SDValue &Offset, 887 ISD::MemIndexedMode &AM, 888 SelectionDAG &DAG) const { 889 EVT VT; 890 const SDNode *Op; 891 SDLoc DL(N); 892 893 if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 894 VT = LD->getMemoryVT(); 895 Op = LD->getBasePtr().getNode(); 896 if (LD->getExtensionType() != ISD::NON_EXTLOAD) 897 return false; 898 if (AVR::isProgramMemoryAccess(LD)) { 899 return false; 900 } 901 } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 902 VT = ST->getMemoryVT(); 903 Op = ST->getBasePtr().getNode(); 904 if (AVR::isProgramMemoryAccess(ST)) { 905 return false; 906 } 907 } else { 908 return false; 909 } 910 911 if (VT != MVT::i8 && VT != MVT::i16) { 912 return false; 913 } 914 915 if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) { 916 return false; 917 } 918 919 if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) { 920 int RHSC = RHS->getSExtValue(); 921 if (Op->getOpcode() == ISD::SUB) 922 RHSC = -RHSC; 923 924 if ((VT == MVT::i16 && RHSC != -2) || (VT == MVT::i8 && RHSC != -1)) { 925 return false; 926 } 927 928 Base = Op->getOperand(0); 929 Offset = DAG.getConstant(RHSC, DL, MVT::i8); 930 AM = ISD::PRE_DEC; 931 932 return true; 933 } 934 935 return false; 936 } 937 938 /// Returns true by value, base pointer and 939 /// offset pointer and addressing mode by reference if this node can be 940 /// combined with a load / store to form a post-indexed load / store. 941 bool AVRTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op, 942 SDValue &Base, 943 SDValue &Offset, 944 ISD::MemIndexedMode &AM, 945 SelectionDAG &DAG) const { 946 EVT VT; 947 SDLoc DL(N); 948 949 if (const LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 950 VT = LD->getMemoryVT(); 951 if (LD->getExtensionType() != ISD::NON_EXTLOAD) 952 return false; 953 } else if (const StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 954 VT = ST->getMemoryVT(); 955 if (AVR::isProgramMemoryAccess(ST)) { 956 return false; 957 } 958 } else { 959 return false; 960 } 961 962 if (VT != MVT::i8 && VT != MVT::i16) { 963 return false; 964 } 965 966 if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) { 967 return false; 968 } 969 970 if (const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) { 971 int RHSC = RHS->getSExtValue(); 972 if (Op->getOpcode() == ISD::SUB) 973 RHSC = -RHSC; 974 if ((VT == MVT::i16 && RHSC != 2) || (VT == MVT::i8 && RHSC != 1)) { 975 return false; 976 } 977 978 Base = Op->getOperand(0); 979 Offset = DAG.getConstant(RHSC, DL, MVT::i8); 980 AM = ISD::POST_INC; 981 982 return true; 983 } 984 985 return false; 986 } 987 988 bool AVRTargetLowering::isOffsetFoldingLegal( 989 const GlobalAddressSDNode *GA) const { 990 return true; 991 } 992 993 //===----------------------------------------------------------------------===// 994 // Formal Arguments Calling Convention Implementation 995 //===----------------------------------------------------------------------===// 996 997 #include "AVRGenCallingConv.inc" 998 999 /// Registers for calling conventions, ordered in reverse as required by ABI. 1000 /// Both arrays must be of the same length. 1001 static const MCPhysReg RegList8[] = { 1002 AVR::R25, AVR::R24, AVR::R23, AVR::R22, AVR::R21, AVR::R20, 1003 AVR::R19, AVR::R18, AVR::R17, AVR::R16, AVR::R15, AVR::R14, 1004 AVR::R13, AVR::R12, AVR::R11, AVR::R10, AVR::R9, AVR::R8}; 1005 static const MCPhysReg RegList16[] = { 1006 AVR::R26R25, AVR::R25R24, AVR::R24R23, AVR::R23R22, 1007 AVR::R22R21, AVR::R21R20, AVR::R20R19, AVR::R19R18, 1008 AVR::R18R17, AVR::R17R16, AVR::R16R15, AVR::R15R14, 1009 AVR::R14R13, AVR::R13R12, AVR::R12R11, AVR::R11R10, 1010 AVR::R10R9, AVR::R9R8}; 1011 1012 static_assert(array_lengthof(RegList8) == array_lengthof(RegList16), 1013 "8-bit and 16-bit register arrays must be of equal length"); 1014 1015 /// Analyze incoming and outgoing function arguments. We need custom C++ code 1016 /// to handle special constraints in the ABI. 1017 /// In addition, all pieces of a certain argument have to be passed either 1018 /// using registers or the stack but never mixing both. 1019 template <typename ArgT> 1020 static void 1021 analyzeArguments(TargetLowering::CallLoweringInfo *CLI, const Function *F, 1022 const DataLayout *TD, const SmallVectorImpl<ArgT> &Args, 1023 SmallVectorImpl<CCValAssign> &ArgLocs, CCState &CCInfo) { 1024 unsigned NumArgs = Args.size(); 1025 // This is the index of the last used register, in RegList*. 1026 // -1 means R26 (R26 is never actually used in CC). 1027 int RegLastIdx = -1; 1028 // Once a value is passed to the stack it will always be used 1029 bool UseStack = false; 1030 for (unsigned i = 0; i != NumArgs;) { 1031 MVT VT = Args[i].VT; 1032 // We have to count the number of bytes for each function argument, that is 1033 // those Args with the same OrigArgIndex. This is important in case the 1034 // function takes an aggregate type. 1035 // Current argument will be between [i..j). 1036 unsigned ArgIndex = Args[i].OrigArgIndex; 1037 unsigned TotalBytes = VT.getStoreSize(); 1038 unsigned j = i + 1; 1039 for (; j != NumArgs; ++j) { 1040 if (Args[j].OrigArgIndex != ArgIndex) 1041 break; 1042 TotalBytes += Args[j].VT.getStoreSize(); 1043 } 1044 // Round up to even number of bytes. 1045 TotalBytes = alignTo(TotalBytes, 2); 1046 // Skip zero sized arguments 1047 if (TotalBytes == 0) 1048 continue; 1049 // The index of the first register to be used 1050 unsigned RegIdx = RegLastIdx + TotalBytes; 1051 RegLastIdx = RegIdx; 1052 // If there are not enough registers, use the stack 1053 if (RegIdx >= array_lengthof(RegList8)) { 1054 UseStack = true; 1055 } 1056 for (; i != j; ++i) { 1057 MVT VT = Args[i].VT; 1058 1059 if (UseStack) { 1060 auto evt = EVT(VT).getTypeForEVT(CCInfo.getContext()); 1061 unsigned Offset = CCInfo.AllocateStack(TD->getTypeAllocSize(evt), 1062 TD->getABITypeAlign(evt)); 1063 CCInfo.addLoc( 1064 CCValAssign::getMem(i, VT, Offset, VT, CCValAssign::Full)); 1065 } else { 1066 unsigned Reg; 1067 if (VT == MVT::i8) { 1068 Reg = CCInfo.AllocateReg(RegList8[RegIdx]); 1069 } else if (VT == MVT::i16) { 1070 Reg = CCInfo.AllocateReg(RegList16[RegIdx]); 1071 } else { 1072 llvm_unreachable( 1073 "calling convention can only manage i8 and i16 types"); 1074 } 1075 assert(Reg && "register not available in calling convention"); 1076 CCInfo.addLoc(CCValAssign::getReg(i, VT, Reg, VT, CCValAssign::Full)); 1077 // Registers inside a particular argument are sorted in increasing order 1078 // (remember the array is reversed). 1079 RegIdx -= VT.getStoreSize(); 1080 } 1081 } 1082 } 1083 } 1084 1085 /// Count the total number of bytes needed to pass or return these arguments. 1086 template <typename ArgT> 1087 static unsigned getTotalArgumentsSizeInBytes(const SmallVectorImpl<ArgT> &Args) { 1088 unsigned TotalBytes = 0; 1089 1090 for (const ArgT& Arg : Args) { 1091 TotalBytes += Arg.VT.getStoreSize(); 1092 } 1093 return TotalBytes; 1094 } 1095 1096 /// Analyze incoming and outgoing value of returning from a function. 1097 /// The algorithm is similar to analyzeArguments, but there can only be 1098 /// one value, possibly an aggregate, and it is limited to 8 bytes. 1099 template <typename ArgT> 1100 static void analyzeReturnValues(const SmallVectorImpl<ArgT> &Args, 1101 CCState &CCInfo) { 1102 unsigned NumArgs = Args.size(); 1103 unsigned TotalBytes = getTotalArgumentsSizeInBytes(Args); 1104 // CanLowerReturn() guarantees this assertion. 1105 assert(TotalBytes <= 8 && "return values greater than 8 bytes cannot be lowered"); 1106 1107 // GCC-ABI says that the size is rounded up to the next even number, 1108 // but actually once it is more than 4 it will always round up to 8. 1109 if (TotalBytes > 4) { 1110 TotalBytes = 8; 1111 } else { 1112 TotalBytes = alignTo(TotalBytes, 2); 1113 } 1114 1115 // The index of the first register to use. 1116 int RegIdx = TotalBytes - 1; 1117 for (unsigned i = 0; i != NumArgs; ++i) { 1118 MVT VT = Args[i].VT; 1119 unsigned Reg; 1120 if (VT == MVT::i8) { 1121 Reg = CCInfo.AllocateReg(RegList8[RegIdx]); 1122 } else if (VT == MVT::i16) { 1123 Reg = CCInfo.AllocateReg(RegList16[RegIdx]); 1124 } else { 1125 llvm_unreachable("calling convention can only manage i8 and i16 types"); 1126 } 1127 assert(Reg && "register not available in calling convention"); 1128 CCInfo.addLoc(CCValAssign::getReg(i, VT, Reg, VT, CCValAssign::Full)); 1129 // Registers sort in increasing order 1130 RegIdx -= VT.getStoreSize(); 1131 } 1132 } 1133 1134 SDValue AVRTargetLowering::LowerFormalArguments( 1135 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1136 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, 1137 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1138 MachineFunction &MF = DAG.getMachineFunction(); 1139 MachineFrameInfo &MFI = MF.getFrameInfo(); 1140 auto DL = DAG.getDataLayout(); 1141 1142 // Assign locations to all of the incoming arguments. 1143 SmallVector<CCValAssign, 16> ArgLocs; 1144 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1145 *DAG.getContext()); 1146 1147 // Variadic functions do not need all the analysis below. 1148 if (isVarArg) { 1149 CCInfo.AnalyzeFormalArguments(Ins, ArgCC_AVR_Vararg); 1150 } else { 1151 analyzeArguments(nullptr, &MF.getFunction(), &DL, Ins, ArgLocs, CCInfo); 1152 } 1153 1154 SDValue ArgValue; 1155 for (CCValAssign &VA : ArgLocs) { 1156 1157 // Arguments stored on registers. 1158 if (VA.isRegLoc()) { 1159 EVT RegVT = VA.getLocVT(); 1160 const TargetRegisterClass *RC; 1161 if (RegVT == MVT::i8) { 1162 RC = &AVR::GPR8RegClass; 1163 } else if (RegVT == MVT::i16) { 1164 RC = &AVR::DREGSRegClass; 1165 } else { 1166 llvm_unreachable("Unknown argument type!"); 1167 } 1168 1169 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 1170 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); 1171 1172 // :NOTE: Clang should not promote any i8 into i16 but for safety the 1173 // following code will handle zexts or sexts generated by other 1174 // front ends. Otherwise: 1175 // If this is an 8 bit value, it is really passed promoted 1176 // to 16 bits. Insert an assert[sz]ext to capture this, then 1177 // truncate to the right size. 1178 switch (VA.getLocInfo()) { 1179 default: 1180 llvm_unreachable("Unknown loc info!"); 1181 case CCValAssign::Full: 1182 break; 1183 case CCValAssign::BCvt: 1184 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue); 1185 break; 1186 case CCValAssign::SExt: 1187 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, 1188 DAG.getValueType(VA.getValVT())); 1189 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1190 break; 1191 case CCValAssign::ZExt: 1192 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, 1193 DAG.getValueType(VA.getValVT())); 1194 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); 1195 break; 1196 } 1197 1198 InVals.push_back(ArgValue); 1199 } else { 1200 // Sanity check. 1201 assert(VA.isMemLoc()); 1202 1203 EVT LocVT = VA.getLocVT(); 1204 1205 // Create the frame index object for this incoming parameter. 1206 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8, 1207 VA.getLocMemOffset(), true); 1208 1209 // Create the SelectionDAG nodes corresponding to a load 1210 // from this parameter. 1211 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DL)); 1212 InVals.push_back(DAG.getLoad(LocVT, dl, Chain, FIN, 1213 MachinePointerInfo::getFixedStack(MF, FI))); 1214 } 1215 } 1216 1217 // If the function takes variable number of arguments, make a frame index for 1218 // the start of the first vararg value... for expansion of llvm.va_start. 1219 if (isVarArg) { 1220 unsigned StackSize = CCInfo.getNextStackOffset(); 1221 AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>(); 1222 1223 AFI->setVarArgsFrameIndex(MFI.CreateFixedObject(2, StackSize, true)); 1224 } 1225 1226 return Chain; 1227 } 1228 1229 //===----------------------------------------------------------------------===// 1230 // Call Calling Convention Implementation 1231 //===----------------------------------------------------------------------===// 1232 1233 SDValue AVRTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 1234 SmallVectorImpl<SDValue> &InVals) const { 1235 SelectionDAG &DAG = CLI.DAG; 1236 SDLoc &DL = CLI.DL; 1237 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 1238 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 1239 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 1240 SDValue Chain = CLI.Chain; 1241 SDValue Callee = CLI.Callee; 1242 bool &isTailCall = CLI.IsTailCall; 1243 CallingConv::ID CallConv = CLI.CallConv; 1244 bool isVarArg = CLI.IsVarArg; 1245 1246 MachineFunction &MF = DAG.getMachineFunction(); 1247 1248 // AVR does not yet support tail call optimization. 1249 isTailCall = false; 1250 1251 // Analyze operands of the call, assigning locations to each operand. 1252 SmallVector<CCValAssign, 16> ArgLocs; 1253 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1254 *DAG.getContext()); 1255 1256 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 1257 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 1258 // node so that legalize doesn't hack it. 1259 const Function *F = nullptr; 1260 if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 1261 const GlobalValue *GV = G->getGlobal(); 1262 1263 F = cast<Function>(GV); 1264 Callee = 1265 DAG.getTargetGlobalAddress(GV, DL, getPointerTy(DAG.getDataLayout())); 1266 } else if (const ExternalSymbolSDNode *ES = 1267 dyn_cast<ExternalSymbolSDNode>(Callee)) { 1268 Callee = DAG.getTargetExternalSymbol(ES->getSymbol(), 1269 getPointerTy(DAG.getDataLayout())); 1270 } 1271 1272 // Variadic functions do not need all the analysis below. 1273 if (isVarArg) { 1274 CCInfo.AnalyzeCallOperands(Outs, ArgCC_AVR_Vararg); 1275 } else { 1276 analyzeArguments(&CLI, F, &DAG.getDataLayout(), Outs, ArgLocs, CCInfo); 1277 } 1278 1279 // Get a count of how many bytes are to be pushed on the stack. 1280 unsigned NumBytes = CCInfo.getNextStackOffset(); 1281 1282 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL); 1283 1284 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 1285 1286 // First, walk the register assignments, inserting copies. 1287 unsigned AI, AE; 1288 bool HasStackArgs = false; 1289 for (AI = 0, AE = ArgLocs.size(); AI != AE; ++AI) { 1290 CCValAssign &VA = ArgLocs[AI]; 1291 EVT RegVT = VA.getLocVT(); 1292 SDValue Arg = OutVals[AI]; 1293 1294 // Promote the value if needed. With Clang this should not happen. 1295 switch (VA.getLocInfo()) { 1296 default: 1297 llvm_unreachable("Unknown loc info!"); 1298 case CCValAssign::Full: 1299 break; 1300 case CCValAssign::SExt: 1301 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, RegVT, Arg); 1302 break; 1303 case CCValAssign::ZExt: 1304 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, RegVT, Arg); 1305 break; 1306 case CCValAssign::AExt: 1307 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, RegVT, Arg); 1308 break; 1309 case CCValAssign::BCvt: 1310 Arg = DAG.getNode(ISD::BITCAST, DL, RegVT, Arg); 1311 break; 1312 } 1313 1314 // Stop when we encounter a stack argument, we need to process them 1315 // in reverse order in the loop below. 1316 if (VA.isMemLoc()) { 1317 HasStackArgs = true; 1318 break; 1319 } 1320 1321 // Arguments that can be passed on registers must be kept in the RegsToPass 1322 // vector. 1323 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 1324 } 1325 1326 // Second, stack arguments have to walked. 1327 // Previously this code created chained stores but those chained stores appear 1328 // to be unchained in the legalization phase. Therefore, do not attempt to 1329 // chain them here. In fact, chaining them here somehow causes the first and 1330 // second store to be reversed which is the exact opposite of the intended 1331 // effect. 1332 if (HasStackArgs) { 1333 SmallVector<SDValue, 8> MemOpChains; 1334 for (; AI != AE; AI++) { 1335 CCValAssign &VA = ArgLocs[AI]; 1336 SDValue Arg = OutVals[AI]; 1337 1338 assert(VA.isMemLoc()); 1339 1340 // SP points to one stack slot further so add one to adjust it. 1341 SDValue PtrOff = DAG.getNode( 1342 ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), 1343 DAG.getRegister(AVR::SP, getPointerTy(DAG.getDataLayout())), 1344 DAG.getIntPtrConstant(VA.getLocMemOffset() + 1, DL)); 1345 1346 MemOpChains.push_back( 1347 DAG.getStore(Chain, DL, Arg, PtrOff, 1348 MachinePointerInfo::getStack(MF, VA.getLocMemOffset()))); 1349 } 1350 1351 if (!MemOpChains.empty()) 1352 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 1353 } 1354 1355 // Build a sequence of copy-to-reg nodes chained together with token chain and 1356 // flag operands which copy the outgoing args into registers. The InFlag in 1357 // necessary since all emited instructions must be stuck together. 1358 SDValue InFlag; 1359 for (auto Reg : RegsToPass) { 1360 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, InFlag); 1361 InFlag = Chain.getValue(1); 1362 } 1363 1364 // Returns a chain & a flag for retval copy to use. 1365 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 1366 SmallVector<SDValue, 8> Ops; 1367 Ops.push_back(Chain); 1368 Ops.push_back(Callee); 1369 1370 // Add argument registers to the end of the list so that they are known live 1371 // into the call. 1372 for (auto Reg : RegsToPass) { 1373 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 1374 } 1375 1376 // Add a register mask operand representing the call-preserved registers. 1377 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 1378 const uint32_t *Mask = 1379 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv); 1380 assert(Mask && "Missing call preserved mask for calling convention"); 1381 Ops.push_back(DAG.getRegisterMask(Mask)); 1382 1383 if (InFlag.getNode()) { 1384 Ops.push_back(InFlag); 1385 } 1386 1387 Chain = DAG.getNode(AVRISD::CALL, DL, NodeTys, Ops); 1388 InFlag = Chain.getValue(1); 1389 1390 // Create the CALLSEQ_END node. 1391 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true), 1392 DAG.getIntPtrConstant(0, DL, true), InFlag, DL); 1393 1394 if (!Ins.empty()) { 1395 InFlag = Chain.getValue(1); 1396 } 1397 1398 // Handle result values, copying them out of physregs into vregs that we 1399 // return. 1400 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, DL, DAG, 1401 InVals); 1402 } 1403 1404 /// Lower the result values of a call into the 1405 /// appropriate copies out of appropriate physical registers. 1406 /// 1407 SDValue AVRTargetLowering::LowerCallResult( 1408 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 1409 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, SelectionDAG &DAG, 1410 SmallVectorImpl<SDValue> &InVals) const { 1411 1412 // Assign locations to each value returned by this call. 1413 SmallVector<CCValAssign, 16> RVLocs; 1414 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1415 *DAG.getContext()); 1416 1417 // Handle runtime calling convs. 1418 if (CallConv == CallingConv::AVR_BUILTIN) { 1419 CCInfo.AnalyzeCallResult(Ins, RetCC_AVR_BUILTIN); 1420 } else { 1421 analyzeReturnValues(Ins, CCInfo); 1422 } 1423 1424 // Copy all of the result registers out of their specified physreg. 1425 for (CCValAssign const &RVLoc : RVLocs) { 1426 Chain = DAG.getCopyFromReg(Chain, dl, RVLoc.getLocReg(), RVLoc.getValVT(), 1427 InFlag) 1428 .getValue(1); 1429 InFlag = Chain.getValue(2); 1430 InVals.push_back(Chain.getValue(0)); 1431 } 1432 1433 return Chain; 1434 } 1435 1436 //===----------------------------------------------------------------------===// 1437 // Return Value Calling Convention Implementation 1438 //===----------------------------------------------------------------------===// 1439 1440 bool AVRTargetLowering::CanLowerReturn( 1441 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, 1442 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 1443 if (CallConv == CallingConv::AVR_BUILTIN) { 1444 SmallVector<CCValAssign, 16> RVLocs; 1445 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 1446 return CCInfo.CheckReturn(Outs, RetCC_AVR_BUILTIN); 1447 } 1448 1449 unsigned TotalBytes = getTotalArgumentsSizeInBytes(Outs); 1450 return TotalBytes <= 8; 1451 } 1452 1453 SDValue 1454 AVRTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 1455 bool isVarArg, 1456 const SmallVectorImpl<ISD::OutputArg> &Outs, 1457 const SmallVectorImpl<SDValue> &OutVals, 1458 const SDLoc &dl, SelectionDAG &DAG) const { 1459 // CCValAssign - represent the assignment of the return value to locations. 1460 SmallVector<CCValAssign, 16> RVLocs; 1461 1462 // CCState - Info about the registers and stack slot. 1463 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 1464 *DAG.getContext()); 1465 1466 MachineFunction &MF = DAG.getMachineFunction(); 1467 1468 // Analyze return values. 1469 if (CallConv == CallingConv::AVR_BUILTIN) { 1470 CCInfo.AnalyzeReturn(Outs, RetCC_AVR_BUILTIN); 1471 } else { 1472 analyzeReturnValues(Outs, CCInfo); 1473 } 1474 1475 SDValue Flag; 1476 SmallVector<SDValue, 4> RetOps(1, Chain); 1477 // Copy the result values into the output registers. 1478 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { 1479 CCValAssign &VA = RVLocs[i]; 1480 assert(VA.isRegLoc() && "Can only return in registers!"); 1481 1482 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); 1483 1484 // Guarantee that all emitted copies are stuck together with flags. 1485 Flag = Chain.getValue(1); 1486 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 1487 } 1488 1489 // Don't emit the ret/reti instruction when the naked attribute is present in 1490 // the function being compiled. 1491 if (MF.getFunction().getAttributes().hasAttribute( 1492 AttributeList::FunctionIndex, Attribute::Naked)) { 1493 return Chain; 1494 } 1495 1496 const AVRMachineFunctionInfo *AFI = MF.getInfo<AVRMachineFunctionInfo>(); 1497 1498 unsigned RetOpc = 1499 AFI->isInterruptOrSignalHandler() 1500 ? AVRISD::RETI_FLAG 1501 : AVRISD::RET_FLAG; 1502 1503 RetOps[0] = Chain; // Update chain. 1504 1505 if (Flag.getNode()) { 1506 RetOps.push_back(Flag); 1507 } 1508 1509 return DAG.getNode(RetOpc, dl, MVT::Other, RetOps); 1510 } 1511 1512 //===----------------------------------------------------------------------===// 1513 // Custom Inserters 1514 //===----------------------------------------------------------------------===// 1515 1516 MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &MI, 1517 MachineBasicBlock *BB) const { 1518 unsigned Opc; 1519 const TargetRegisterClass *RC; 1520 bool HasRepeatedOperand = false; 1521 MachineFunction *F = BB->getParent(); 1522 MachineRegisterInfo &RI = F->getRegInfo(); 1523 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1524 DebugLoc dl = MI.getDebugLoc(); 1525 1526 switch (MI.getOpcode()) { 1527 default: 1528 llvm_unreachable("Invalid shift opcode!"); 1529 case AVR::Lsl8: 1530 Opc = AVR::ADDRdRr; // LSL is an alias of ADD Rd, Rd 1531 RC = &AVR::GPR8RegClass; 1532 HasRepeatedOperand = true; 1533 break; 1534 case AVR::Lsl16: 1535 Opc = AVR::LSLWRd; 1536 RC = &AVR::DREGSRegClass; 1537 break; 1538 case AVR::Asr8: 1539 Opc = AVR::ASRRd; 1540 RC = &AVR::GPR8RegClass; 1541 break; 1542 case AVR::Asr16: 1543 Opc = AVR::ASRWRd; 1544 RC = &AVR::DREGSRegClass; 1545 break; 1546 case AVR::Lsr8: 1547 Opc = AVR::LSRRd; 1548 RC = &AVR::GPR8RegClass; 1549 break; 1550 case AVR::Lsr16: 1551 Opc = AVR::LSRWRd; 1552 RC = &AVR::DREGSRegClass; 1553 break; 1554 case AVR::Rol8: 1555 Opc = AVR::ROLBRd; 1556 RC = &AVR::GPR8RegClass; 1557 break; 1558 case AVR::Rol16: 1559 Opc = AVR::ROLWRd; 1560 RC = &AVR::DREGSRegClass; 1561 break; 1562 case AVR::Ror8: 1563 Opc = AVR::RORBRd; 1564 RC = &AVR::GPR8RegClass; 1565 break; 1566 case AVR::Ror16: 1567 Opc = AVR::RORWRd; 1568 RC = &AVR::DREGSRegClass; 1569 break; 1570 } 1571 1572 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 1573 1574 MachineFunction::iterator I; 1575 for (I = BB->getIterator(); I != F->end() && &(*I) != BB; ++I); 1576 if (I != F->end()) ++I; 1577 1578 // Create loop block. 1579 MachineBasicBlock *LoopBB = F->CreateMachineBasicBlock(LLVM_BB); 1580 MachineBasicBlock *CheckBB = F->CreateMachineBasicBlock(LLVM_BB); 1581 MachineBasicBlock *RemBB = F->CreateMachineBasicBlock(LLVM_BB); 1582 1583 F->insert(I, LoopBB); 1584 F->insert(I, CheckBB); 1585 F->insert(I, RemBB); 1586 1587 // Update machine-CFG edges by transferring all successors of the current 1588 // block to the block containing instructions after shift. 1589 RemBB->splice(RemBB->begin(), BB, std::next(MachineBasicBlock::iterator(MI)), 1590 BB->end()); 1591 RemBB->transferSuccessorsAndUpdatePHIs(BB); 1592 1593 // Add edges BB => LoopBB => CheckBB => RemBB, CheckBB => LoopBB. 1594 BB->addSuccessor(CheckBB); 1595 LoopBB->addSuccessor(CheckBB); 1596 CheckBB->addSuccessor(LoopBB); 1597 CheckBB->addSuccessor(RemBB); 1598 1599 Register ShiftAmtReg = RI.createVirtualRegister(&AVR::GPR8RegClass); 1600 Register ShiftAmtReg2 = RI.createVirtualRegister(&AVR::GPR8RegClass); 1601 Register ShiftReg = RI.createVirtualRegister(RC); 1602 Register ShiftReg2 = RI.createVirtualRegister(RC); 1603 Register ShiftAmtSrcReg = MI.getOperand(2).getReg(); 1604 Register SrcReg = MI.getOperand(1).getReg(); 1605 Register DstReg = MI.getOperand(0).getReg(); 1606 1607 // BB: 1608 // rjmp CheckBB 1609 BuildMI(BB, dl, TII.get(AVR::RJMPk)).addMBB(CheckBB); 1610 1611 // LoopBB: 1612 // ShiftReg2 = shift ShiftReg 1613 auto ShiftMI = BuildMI(LoopBB, dl, TII.get(Opc), ShiftReg2).addReg(ShiftReg); 1614 if (HasRepeatedOperand) 1615 ShiftMI.addReg(ShiftReg); 1616 1617 // CheckBB: 1618 // ShiftReg = phi [%SrcReg, BB], [%ShiftReg2, LoopBB] 1619 // ShiftAmt = phi [%N, BB], [%ShiftAmt2, LoopBB] 1620 // DestReg = phi [%SrcReg, BB], [%ShiftReg, LoopBB] 1621 // ShiftAmt2 = ShiftAmt - 1; 1622 // if (ShiftAmt2 >= 0) goto LoopBB; 1623 BuildMI(CheckBB, dl, TII.get(AVR::PHI), ShiftReg) 1624 .addReg(SrcReg) 1625 .addMBB(BB) 1626 .addReg(ShiftReg2) 1627 .addMBB(LoopBB); 1628 BuildMI(CheckBB, dl, TII.get(AVR::PHI), ShiftAmtReg) 1629 .addReg(ShiftAmtSrcReg) 1630 .addMBB(BB) 1631 .addReg(ShiftAmtReg2) 1632 .addMBB(LoopBB); 1633 BuildMI(CheckBB, dl, TII.get(AVR::PHI), DstReg) 1634 .addReg(SrcReg) 1635 .addMBB(BB) 1636 .addReg(ShiftReg2) 1637 .addMBB(LoopBB); 1638 1639 BuildMI(CheckBB, dl, TII.get(AVR::DECRd), ShiftAmtReg2) 1640 .addReg(ShiftAmtReg); 1641 BuildMI(CheckBB, dl, TII.get(AVR::BRPLk)).addMBB(LoopBB); 1642 1643 MI.eraseFromParent(); // The pseudo instruction is gone now. 1644 return RemBB; 1645 } 1646 1647 static bool isCopyMulResult(MachineBasicBlock::iterator const &I) { 1648 if (I->getOpcode() == AVR::COPY) { 1649 Register SrcReg = I->getOperand(1).getReg(); 1650 return (SrcReg == AVR::R0 || SrcReg == AVR::R1); 1651 } 1652 1653 return false; 1654 } 1655 1656 // The mul instructions wreak havock on our zero_reg R1. We need to clear it 1657 // after the result has been evacuated. This is probably not the best way to do 1658 // it, but it works for now. 1659 MachineBasicBlock *AVRTargetLowering::insertMul(MachineInstr &MI, 1660 MachineBasicBlock *BB) const { 1661 const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); 1662 MachineBasicBlock::iterator I(MI); 1663 ++I; // in any case insert *after* the mul instruction 1664 if (isCopyMulResult(I)) 1665 ++I; 1666 if (isCopyMulResult(I)) 1667 ++I; 1668 BuildMI(*BB, I, MI.getDebugLoc(), TII.get(AVR::EORRdRr), AVR::R1) 1669 .addReg(AVR::R1) 1670 .addReg(AVR::R1); 1671 return BB; 1672 } 1673 1674 MachineBasicBlock * 1675 AVRTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 1676 MachineBasicBlock *MBB) const { 1677 int Opc = MI.getOpcode(); 1678 1679 // Pseudo shift instructions with a non constant shift amount are expanded 1680 // into a loop. 1681 switch (Opc) { 1682 case AVR::Lsl8: 1683 case AVR::Lsl16: 1684 case AVR::Lsr8: 1685 case AVR::Lsr16: 1686 case AVR::Rol8: 1687 case AVR::Rol16: 1688 case AVR::Ror8: 1689 case AVR::Ror16: 1690 case AVR::Asr8: 1691 case AVR::Asr16: 1692 return insertShift(MI, MBB); 1693 case AVR::MULRdRr: 1694 case AVR::MULSRdRr: 1695 return insertMul(MI, MBB); 1696 } 1697 1698 assert((Opc == AVR::Select16 || Opc == AVR::Select8) && 1699 "Unexpected instr type to insert"); 1700 1701 const AVRInstrInfo &TII = (const AVRInstrInfo &)*MI.getParent() 1702 ->getParent() 1703 ->getSubtarget() 1704 .getInstrInfo(); 1705 DebugLoc dl = MI.getDebugLoc(); 1706 1707 // To "insert" a SELECT instruction, we insert the diamond 1708 // control-flow pattern. The incoming instruction knows the 1709 // destination vreg to set, the condition code register to branch 1710 // on, the true/false values to select between, and a branch opcode 1711 // to use. 1712 1713 MachineFunction *MF = MBB->getParent(); 1714 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 1715 MachineBasicBlock *FallThrough = MBB->getFallThrough(); 1716 1717 // If the current basic block falls through to another basic block, 1718 // we must insert an unconditional branch to the fallthrough destination 1719 // if we are to insert basic blocks at the prior fallthrough point. 1720 if (FallThrough != nullptr) { 1721 BuildMI(MBB, dl, TII.get(AVR::RJMPk)).addMBB(FallThrough); 1722 } 1723 1724 MachineBasicBlock *trueMBB = MF->CreateMachineBasicBlock(LLVM_BB); 1725 MachineBasicBlock *falseMBB = MF->CreateMachineBasicBlock(LLVM_BB); 1726 1727 MachineFunction::iterator I; 1728 for (I = MF->begin(); I != MF->end() && &(*I) != MBB; ++I); 1729 if (I != MF->end()) ++I; 1730 MF->insert(I, trueMBB); 1731 MF->insert(I, falseMBB); 1732 1733 // Transfer remaining instructions and all successors of the current 1734 // block to the block which will contain the Phi node for the 1735 // select. 1736 trueMBB->splice(trueMBB->begin(), MBB, 1737 std::next(MachineBasicBlock::iterator(MI)), MBB->end()); 1738 trueMBB->transferSuccessorsAndUpdatePHIs(MBB); 1739 1740 AVRCC::CondCodes CC = (AVRCC::CondCodes)MI.getOperand(3).getImm(); 1741 BuildMI(MBB, dl, TII.getBrCond(CC)).addMBB(trueMBB); 1742 BuildMI(MBB, dl, TII.get(AVR::RJMPk)).addMBB(falseMBB); 1743 MBB->addSuccessor(falseMBB); 1744 MBB->addSuccessor(trueMBB); 1745 1746 // Unconditionally flow back to the true block 1747 BuildMI(falseMBB, dl, TII.get(AVR::RJMPk)).addMBB(trueMBB); 1748 falseMBB->addSuccessor(trueMBB); 1749 1750 // Set up the Phi node to determine where we came from 1751 BuildMI(*trueMBB, trueMBB->begin(), dl, TII.get(AVR::PHI), MI.getOperand(0).getReg()) 1752 .addReg(MI.getOperand(1).getReg()) 1753 .addMBB(MBB) 1754 .addReg(MI.getOperand(2).getReg()) 1755 .addMBB(falseMBB) ; 1756 1757 MI.eraseFromParent(); // The pseudo instruction is gone now. 1758 return trueMBB; 1759 } 1760 1761 //===----------------------------------------------------------------------===// 1762 // Inline Asm Support 1763 //===----------------------------------------------------------------------===// 1764 1765 AVRTargetLowering::ConstraintType 1766 AVRTargetLowering::getConstraintType(StringRef Constraint) const { 1767 if (Constraint.size() == 1) { 1768 // See http://www.nongnu.org/avr-libc/user-manual/inline_asm.html 1769 switch (Constraint[0]) { 1770 default: 1771 break; 1772 case 'a': // Simple upper registers 1773 case 'b': // Base pointer registers pairs 1774 case 'd': // Upper register 1775 case 'l': // Lower registers 1776 case 'e': // Pointer register pairs 1777 case 'q': // Stack pointer register 1778 case 'r': // Any register 1779 case 'w': // Special upper register pairs 1780 return C_RegisterClass; 1781 case 't': // Temporary register 1782 case 'x': case 'X': // Pointer register pair X 1783 case 'y': case 'Y': // Pointer register pair Y 1784 case 'z': case 'Z': // Pointer register pair Z 1785 return C_Register; 1786 case 'Q': // A memory address based on Y or Z pointer with displacement. 1787 return C_Memory; 1788 case 'G': // Floating point constant 1789 case 'I': // 6-bit positive integer constant 1790 case 'J': // 6-bit negative integer constant 1791 case 'K': // Integer constant (Range: 2) 1792 case 'L': // Integer constant (Range: 0) 1793 case 'M': // 8-bit integer constant 1794 case 'N': // Integer constant (Range: -1) 1795 case 'O': // Integer constant (Range: 8, 16, 24) 1796 case 'P': // Integer constant (Range: 1) 1797 case 'R': // Integer constant (Range: -6 to 5)x 1798 return C_Immediate; 1799 } 1800 } 1801 1802 return TargetLowering::getConstraintType(Constraint); 1803 } 1804 1805 unsigned 1806 AVRTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const { 1807 // Not sure if this is actually the right thing to do, but we got to do 1808 // *something* [agnat] 1809 switch (ConstraintCode[0]) { 1810 case 'Q': 1811 return InlineAsm::Constraint_Q; 1812 } 1813 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 1814 } 1815 1816 AVRTargetLowering::ConstraintWeight 1817 AVRTargetLowering::getSingleConstraintMatchWeight( 1818 AsmOperandInfo &info, const char *constraint) const { 1819 ConstraintWeight weight = CW_Invalid; 1820 Value *CallOperandVal = info.CallOperandVal; 1821 1822 // If we don't have a value, we can't do a match, 1823 // but allow it at the lowest weight. 1824 // (this behaviour has been copied from the ARM backend) 1825 if (!CallOperandVal) { 1826 return CW_Default; 1827 } 1828 1829 // Look at the constraint type. 1830 switch (*constraint) { 1831 default: 1832 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 1833 break; 1834 case 'd': 1835 case 'r': 1836 case 'l': 1837 weight = CW_Register; 1838 break; 1839 case 'a': 1840 case 'b': 1841 case 'e': 1842 case 'q': 1843 case 't': 1844 case 'w': 1845 case 'x': case 'X': 1846 case 'y': case 'Y': 1847 case 'z': case 'Z': 1848 weight = CW_SpecificReg; 1849 break; 1850 case 'G': 1851 if (const ConstantFP *C = dyn_cast<ConstantFP>(CallOperandVal)) { 1852 if (C->isZero()) { 1853 weight = CW_Constant; 1854 } 1855 } 1856 break; 1857 case 'I': 1858 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 1859 if (isUInt<6>(C->getZExtValue())) { 1860 weight = CW_Constant; 1861 } 1862 } 1863 break; 1864 case 'J': 1865 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 1866 if ((C->getSExtValue() >= -63) && (C->getSExtValue() <= 0)) { 1867 weight = CW_Constant; 1868 } 1869 } 1870 break; 1871 case 'K': 1872 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 1873 if (C->getZExtValue() == 2) { 1874 weight = CW_Constant; 1875 } 1876 } 1877 break; 1878 case 'L': 1879 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 1880 if (C->getZExtValue() == 0) { 1881 weight = CW_Constant; 1882 } 1883 } 1884 break; 1885 case 'M': 1886 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 1887 if (isUInt<8>(C->getZExtValue())) { 1888 weight = CW_Constant; 1889 } 1890 } 1891 break; 1892 case 'N': 1893 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 1894 if (C->getSExtValue() == -1) { 1895 weight = CW_Constant; 1896 } 1897 } 1898 break; 1899 case 'O': 1900 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 1901 if ((C->getZExtValue() == 8) || (C->getZExtValue() == 16) || 1902 (C->getZExtValue() == 24)) { 1903 weight = CW_Constant; 1904 } 1905 } 1906 break; 1907 case 'P': 1908 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 1909 if (C->getZExtValue() == 1) { 1910 weight = CW_Constant; 1911 } 1912 } 1913 break; 1914 case 'R': 1915 if (const ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { 1916 if ((C->getSExtValue() >= -6) && (C->getSExtValue() <= 5)) { 1917 weight = CW_Constant; 1918 } 1919 } 1920 break; 1921 case 'Q': 1922 weight = CW_Memory; 1923 break; 1924 } 1925 1926 return weight; 1927 } 1928 1929 std::pair<unsigned, const TargetRegisterClass *> 1930 AVRTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 1931 StringRef Constraint, 1932 MVT VT) const { 1933 if (Constraint.size() == 1) { 1934 switch (Constraint[0]) { 1935 case 'a': // Simple upper registers r16..r23. 1936 if (VT == MVT::i8) 1937 return std::make_pair(0U, &AVR::LD8loRegClass); 1938 else if (VT == MVT::i16) 1939 return std::make_pair(0U, &AVR::DREGSLD8loRegClass); 1940 break; 1941 case 'b': // Base pointer registers: y, z. 1942 if (VT == MVT::i8 || VT == MVT::i16) 1943 return std::make_pair(0U, &AVR::PTRDISPREGSRegClass); 1944 break; 1945 case 'd': // Upper registers r16..r31. 1946 if (VT == MVT::i8) 1947 return std::make_pair(0U, &AVR::LD8RegClass); 1948 else if (VT == MVT::i16) 1949 return std::make_pair(0U, &AVR::DLDREGSRegClass); 1950 break; 1951 case 'l': // Lower registers r0..r15. 1952 if (VT == MVT::i8) 1953 return std::make_pair(0U, &AVR::GPR8loRegClass); 1954 else if (VT == MVT::i16) 1955 return std::make_pair(0U, &AVR::DREGSloRegClass); 1956 break; 1957 case 'e': // Pointer register pairs: x, y, z. 1958 if (VT == MVT::i8 || VT == MVT::i16) 1959 return std::make_pair(0U, &AVR::PTRREGSRegClass); 1960 break; 1961 case 'q': // Stack pointer register: SPH:SPL. 1962 return std::make_pair(0U, &AVR::GPRSPRegClass); 1963 case 'r': // Any register: r0..r31. 1964 if (VT == MVT::i8) 1965 return std::make_pair(0U, &AVR::GPR8RegClass); 1966 else if (VT == MVT::i16) 1967 return std::make_pair(0U, &AVR::DREGSRegClass); 1968 break; 1969 case 't': // Temporary register: r0. 1970 if (VT == MVT::i8) 1971 return std::make_pair(unsigned(AVR::R0), &AVR::GPR8RegClass); 1972 break; 1973 case 'w': // Special upper register pairs: r24, r26, r28, r30. 1974 if (VT == MVT::i8 || VT == MVT::i16) 1975 return std::make_pair(0U, &AVR::IWREGSRegClass); 1976 break; 1977 case 'x': // Pointer register pair X: r27:r26. 1978 case 'X': 1979 if (VT == MVT::i8 || VT == MVT::i16) 1980 return std::make_pair(unsigned(AVR::R27R26), &AVR::PTRREGSRegClass); 1981 break; 1982 case 'y': // Pointer register pair Y: r29:r28. 1983 case 'Y': 1984 if (VT == MVT::i8 || VT == MVT::i16) 1985 return std::make_pair(unsigned(AVR::R29R28), &AVR::PTRREGSRegClass); 1986 break; 1987 case 'z': // Pointer register pair Z: r31:r30. 1988 case 'Z': 1989 if (VT == MVT::i8 || VT == MVT::i16) 1990 return std::make_pair(unsigned(AVR::R31R30), &AVR::PTRREGSRegClass); 1991 break; 1992 default: 1993 break; 1994 } 1995 } 1996 1997 return TargetLowering::getRegForInlineAsmConstraint( 1998 Subtarget.getRegisterInfo(), Constraint, VT); 1999 } 2000 2001 void AVRTargetLowering::LowerAsmOperandForConstraint(SDValue Op, 2002 std::string &Constraint, 2003 std::vector<SDValue> &Ops, 2004 SelectionDAG &DAG) const { 2005 SDValue Result(0, 0); 2006 SDLoc DL(Op); 2007 EVT Ty = Op.getValueType(); 2008 2009 // Currently only support length 1 constraints. 2010 if (Constraint.length() != 1) { 2011 return; 2012 } 2013 2014 char ConstraintLetter = Constraint[0]; 2015 switch (ConstraintLetter) { 2016 default: 2017 break; 2018 // Deal with integers first: 2019 case 'I': 2020 case 'J': 2021 case 'K': 2022 case 'L': 2023 case 'M': 2024 case 'N': 2025 case 'O': 2026 case 'P': 2027 case 'R': { 2028 const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 2029 if (!C) { 2030 return; 2031 } 2032 2033 int64_t CVal64 = C->getSExtValue(); 2034 uint64_t CUVal64 = C->getZExtValue(); 2035 switch (ConstraintLetter) { 2036 case 'I': // 0..63 2037 if (!isUInt<6>(CUVal64)) 2038 return; 2039 Result = DAG.getTargetConstant(CUVal64, DL, Ty); 2040 break; 2041 case 'J': // -63..0 2042 if (CVal64 < -63 || CVal64 > 0) 2043 return; 2044 Result = DAG.getTargetConstant(CVal64, DL, Ty); 2045 break; 2046 case 'K': // 2 2047 if (CUVal64 != 2) 2048 return; 2049 Result = DAG.getTargetConstant(CUVal64, DL, Ty); 2050 break; 2051 case 'L': // 0 2052 if (CUVal64 != 0) 2053 return; 2054 Result = DAG.getTargetConstant(CUVal64, DL, Ty); 2055 break; 2056 case 'M': // 0..255 2057 if (!isUInt<8>(CUVal64)) 2058 return; 2059 // i8 type may be printed as a negative number, 2060 // e.g. 254 would be printed as -2, 2061 // so we force it to i16 at least. 2062 if (Ty.getSimpleVT() == MVT::i8) { 2063 Ty = MVT::i16; 2064 } 2065 Result = DAG.getTargetConstant(CUVal64, DL, Ty); 2066 break; 2067 case 'N': // -1 2068 if (CVal64 != -1) 2069 return; 2070 Result = DAG.getTargetConstant(CVal64, DL, Ty); 2071 break; 2072 case 'O': // 8, 16, 24 2073 if (CUVal64 != 8 && CUVal64 != 16 && CUVal64 != 24) 2074 return; 2075 Result = DAG.getTargetConstant(CUVal64, DL, Ty); 2076 break; 2077 case 'P': // 1 2078 if (CUVal64 != 1) 2079 return; 2080 Result = DAG.getTargetConstant(CUVal64, DL, Ty); 2081 break; 2082 case 'R': // -6..5 2083 if (CVal64 < -6 || CVal64 > 5) 2084 return; 2085 Result = DAG.getTargetConstant(CVal64, DL, Ty); 2086 break; 2087 } 2088 2089 break; 2090 } 2091 case 'G': 2092 const ConstantFPSDNode *FC = dyn_cast<ConstantFPSDNode>(Op); 2093 if (!FC || !FC->isZero()) 2094 return; 2095 // Soften float to i8 0 2096 Result = DAG.getTargetConstant(0, DL, MVT::i8); 2097 break; 2098 } 2099 2100 if (Result.getNode()) { 2101 Ops.push_back(Result); 2102 return; 2103 } 2104 2105 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 2106 } 2107 2108 Register AVRTargetLowering::getRegisterByName(const char *RegName, LLT VT, 2109 const MachineFunction &MF) const { 2110 Register Reg; 2111 2112 if (VT == LLT::scalar(8)) { 2113 Reg = StringSwitch<unsigned>(RegName) 2114 .Case("r0", AVR::R0) 2115 .Case("r1", AVR::R1) 2116 .Default(0); 2117 } else { 2118 Reg = StringSwitch<unsigned>(RegName) 2119 .Case("r0", AVR::R1R0) 2120 .Case("sp", AVR::SP) 2121 .Default(0); 2122 } 2123 2124 if (Reg) 2125 return Reg; 2126 2127 report_fatal_error( 2128 Twine("Invalid register name \"" + StringRef(RegName) + "\".")); 2129 } 2130 2131 } // end of namespace llvm 2132