1 //===-- BPFISelLowering.cpp - BPF DAG Lowering Implementation ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that BPF uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "BPFISelLowering.h" 15 #include "BPF.h" 16 #include "BPFSubtarget.h" 17 #include "BPFTargetMachine.h" 18 #include "llvm/CodeGen/CallingConvLower.h" 19 #include "llvm/CodeGen/MachineFrameInfo.h" 20 #include "llvm/CodeGen/MachineFunction.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/MachineRegisterInfo.h" 23 #include "llvm/CodeGen/SelectionDAGISel.h" 24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 25 #include "llvm/CodeGen/ValueTypes.h" 26 #include "llvm/IR/DiagnosticInfo.h" 27 #include "llvm/IR/DiagnosticPrinter.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/raw_ostream.h" 31 using namespace llvm; 32 33 #define DEBUG_TYPE "bpf-lower" 34 35 static cl::opt<bool> BPFExpandMemcpyInOrder("bpf-expand-memcpy-in-order", 36 cl::Hidden, cl::init(false), 37 cl::desc("Expand memcpy into load/store pairs in order")); 38 39 static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg) { 40 MachineFunction &MF = DAG.getMachineFunction(); 41 DAG.getContext()->diagnose( 42 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc())); 43 } 44 45 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg, 46 SDValue Val) { 47 MachineFunction &MF = DAG.getMachineFunction(); 48 std::string Str; 49 raw_string_ostream OS(Str); 50 OS << Msg; 51 Val->print(OS); 52 OS.flush(); 53 DAG.getContext()->diagnose( 54 DiagnosticInfoUnsupported(MF.getFunction(), Str, DL.getDebugLoc())); 55 } 56 57 BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM, 58 const BPFSubtarget &STI) 59 : TargetLowering(TM) { 60 61 // Set up the register classes. 62 addRegisterClass(MVT::i64, &BPF::GPRRegClass); 63 if (STI.getHasAlu32()) 64 addRegisterClass(MVT::i32, &BPF::GPR32RegClass); 65 66 // Compute derived properties from the register classes 67 computeRegisterProperties(STI.getRegisterInfo()); 68 69 setStackPointerRegisterToSaveRestore(BPF::R11); 70 71 setOperationAction(ISD::BR_CC, MVT::i64, Custom); 72 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 73 setOperationAction(ISD::BRIND, MVT::Other, Expand); 74 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 75 76 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 77 78 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); 79 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 80 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 81 82 for (auto VT : { MVT::i32, MVT::i64 }) { 83 if (VT == MVT::i32 && !STI.getHasAlu32()) 84 continue; 85 86 setOperationAction(ISD::SDIVREM, VT, Expand); 87 setOperationAction(ISD::UDIVREM, VT, Expand); 88 setOperationAction(ISD::SREM, VT, Expand); 89 setOperationAction(ISD::UREM, VT, Expand); 90 setOperationAction(ISD::MULHU, VT, Expand); 91 setOperationAction(ISD::MULHS, VT, Expand); 92 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 93 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 94 setOperationAction(ISD::ROTR, VT, Expand); 95 setOperationAction(ISD::ROTL, VT, Expand); 96 setOperationAction(ISD::SHL_PARTS, VT, Expand); 97 setOperationAction(ISD::SRL_PARTS, VT, Expand); 98 setOperationAction(ISD::SRA_PARTS, VT, Expand); 99 setOperationAction(ISD::CTPOP, VT, Expand); 100 101 setOperationAction(ISD::SETCC, VT, Expand); 102 setOperationAction(ISD::SELECT, VT, Expand); 103 setOperationAction(ISD::SELECT_CC, VT, Custom); 104 } 105 106 if (STI.getHasAlu32()) { 107 setOperationAction(ISD::BSWAP, MVT::i32, Promote); 108 setOperationAction(ISD::BR_CC, MVT::i32, 109 STI.getHasJmp32() ? Custom : Promote); 110 } 111 112 setOperationAction(ISD::CTTZ, MVT::i64, Custom); 113 setOperationAction(ISD::CTLZ, MVT::i64, Custom); 114 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom); 115 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 116 117 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 118 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 119 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 120 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand); 121 122 // Extended load operations for i1 types must be promoted 123 for (MVT VT : MVT::integer_valuetypes()) { 124 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 125 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 126 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 127 128 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand); 129 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Expand); 130 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand); 131 } 132 133 setBooleanContents(ZeroOrOneBooleanContent); 134 135 // Function alignments 136 setMinFunctionAlignment(Align(8)); 137 setPrefFunctionAlignment(Align(8)); 138 139 if (BPFExpandMemcpyInOrder) { 140 // LLVM generic code will try to expand memcpy into load/store pairs at this 141 // stage which is before quite a few IR optimization passes, therefore the 142 // loads and stores could potentially be moved apart from each other which 143 // will cause trouble to memcpy pattern matcher inside kernel eBPF JIT 144 // compilers. 145 // 146 // When -bpf-expand-memcpy-in-order specified, we want to defer the expand 147 // of memcpy to later stage in IR optimization pipeline so those load/store 148 // pairs won't be touched and could be kept in order. Hence, we set 149 // MaxStoresPerMem* to zero to disable the generic getMemcpyLoadsAndStores 150 // code path, and ask LLVM to use target expander EmitTargetCodeForMemcpy. 151 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 0; 152 MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 0; 153 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = 0; 154 } else { 155 // inline memcpy() for kernel to see explicit copy 156 unsigned CommonMaxStores = 157 STI.getSelectionDAGInfo()->getCommonMaxStoresPerMemFunc(); 158 159 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = CommonMaxStores; 160 MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = CommonMaxStores; 161 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize = CommonMaxStores; 162 } 163 164 // CPU/Feature control 165 HasAlu32 = STI.getHasAlu32(); 166 HasJmp32 = STI.getHasJmp32(); 167 HasJmpExt = STI.getHasJmpExt(); 168 } 169 170 bool BPFTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 171 return false; 172 } 173 174 bool BPFTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 175 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 176 return false; 177 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 178 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 179 return NumBits1 > NumBits2; 180 } 181 182 bool BPFTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 183 if (!VT1.isInteger() || !VT2.isInteger()) 184 return false; 185 unsigned NumBits1 = VT1.getSizeInBits(); 186 unsigned NumBits2 = VT2.getSizeInBits(); 187 return NumBits1 > NumBits2; 188 } 189 190 bool BPFTargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { 191 if (!getHasAlu32() || !Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 192 return false; 193 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 194 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 195 return NumBits1 == 32 && NumBits2 == 64; 196 } 197 198 bool BPFTargetLowering::isZExtFree(EVT VT1, EVT VT2) const { 199 if (!getHasAlu32() || !VT1.isInteger() || !VT2.isInteger()) 200 return false; 201 unsigned NumBits1 = VT1.getSizeInBits(); 202 unsigned NumBits2 = VT2.getSizeInBits(); 203 return NumBits1 == 32 && NumBits2 == 64; 204 } 205 206 std::pair<unsigned, const TargetRegisterClass *> 207 BPFTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 208 StringRef Constraint, 209 MVT VT) const { 210 if (Constraint.size() == 1) 211 // GCC Constraint Letters 212 switch (Constraint[0]) { 213 case 'r': // GENERAL_REGS 214 return std::make_pair(0U, &BPF::GPRRegClass); 215 default: 216 break; 217 } 218 219 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 220 } 221 222 SDValue BPFTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 223 switch (Op.getOpcode()) { 224 case ISD::BR_CC: 225 return LowerBR_CC(Op, DAG); 226 case ISD::GlobalAddress: 227 return LowerGlobalAddress(Op, DAG); 228 case ISD::SELECT_CC: 229 return LowerSELECT_CC(Op, DAG); 230 case ISD::DYNAMIC_STACKALLOC: 231 report_fatal_error("Unsupported dynamic stack allocation"); 232 default: 233 llvm_unreachable("unimplemented operand"); 234 } 235 } 236 237 // Calling Convention Implementation 238 #include "BPFGenCallingConv.inc" 239 240 SDValue BPFTargetLowering::LowerFormalArguments( 241 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 242 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 243 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 244 switch (CallConv) { 245 default: 246 report_fatal_error("Unsupported calling convention"); 247 case CallingConv::C: 248 case CallingConv::Fast: 249 break; 250 } 251 252 MachineFunction &MF = DAG.getMachineFunction(); 253 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 254 255 // Assign locations to all of the incoming arguments. 256 SmallVector<CCValAssign, 16> ArgLocs; 257 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 258 CCInfo.AnalyzeFormalArguments(Ins, getHasAlu32() ? CC_BPF32 : CC_BPF64); 259 260 for (auto &VA : ArgLocs) { 261 if (VA.isRegLoc()) { 262 // Arguments passed in registers 263 EVT RegVT = VA.getLocVT(); 264 MVT::SimpleValueType SimpleTy = RegVT.getSimpleVT().SimpleTy; 265 switch (SimpleTy) { 266 default: { 267 errs() << "LowerFormalArguments Unhandled argument type: " 268 << RegVT.getEVTString() << '\n'; 269 llvm_unreachable(0); 270 } 271 case MVT::i32: 272 case MVT::i64: 273 Register VReg = RegInfo.createVirtualRegister( 274 SimpleTy == MVT::i64 ? &BPF::GPRRegClass : &BPF::GPR32RegClass); 275 RegInfo.addLiveIn(VA.getLocReg(), VReg); 276 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, RegVT); 277 278 // If this is an value that has been promoted to wider types, insert an 279 // assert[sz]ext to capture this, then truncate to the right size. 280 if (VA.getLocInfo() == CCValAssign::SExt) 281 ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue, 282 DAG.getValueType(VA.getValVT())); 283 else if (VA.getLocInfo() == CCValAssign::ZExt) 284 ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue, 285 DAG.getValueType(VA.getValVT())); 286 287 if (VA.getLocInfo() != CCValAssign::Full) 288 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), ArgValue); 289 290 InVals.push_back(ArgValue); 291 292 break; 293 } 294 } else { 295 fail(DL, DAG, "defined with too many args"); 296 InVals.push_back(DAG.getConstant(0, DL, VA.getLocVT())); 297 } 298 } 299 300 if (IsVarArg || MF.getFunction().hasStructRetAttr()) { 301 fail(DL, DAG, "functions with VarArgs or StructRet are not supported"); 302 } 303 304 return Chain; 305 } 306 307 const unsigned BPFTargetLowering::MaxArgs = 5; 308 309 SDValue BPFTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, 310 SmallVectorImpl<SDValue> &InVals) const { 311 SelectionDAG &DAG = CLI.DAG; 312 auto &Outs = CLI.Outs; 313 auto &OutVals = CLI.OutVals; 314 auto &Ins = CLI.Ins; 315 SDValue Chain = CLI.Chain; 316 SDValue Callee = CLI.Callee; 317 bool &IsTailCall = CLI.IsTailCall; 318 CallingConv::ID CallConv = CLI.CallConv; 319 bool IsVarArg = CLI.IsVarArg; 320 MachineFunction &MF = DAG.getMachineFunction(); 321 322 // BPF target does not support tail call optimization. 323 IsTailCall = false; 324 325 switch (CallConv) { 326 default: 327 report_fatal_error("Unsupported calling convention"); 328 case CallingConv::Fast: 329 case CallingConv::C: 330 break; 331 } 332 333 // Analyze operands of the call, assigning locations to each operand. 334 SmallVector<CCValAssign, 16> ArgLocs; 335 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 336 337 CCInfo.AnalyzeCallOperands(Outs, getHasAlu32() ? CC_BPF32 : CC_BPF64); 338 339 unsigned NumBytes = CCInfo.getNextStackOffset(); 340 341 if (Outs.size() > MaxArgs) 342 fail(CLI.DL, DAG, "too many args to ", Callee); 343 344 for (auto &Arg : Outs) { 345 ISD::ArgFlagsTy Flags = Arg.Flags; 346 if (!Flags.isByVal()) 347 continue; 348 349 fail(CLI.DL, DAG, "pass by value not supported ", Callee); 350 } 351 352 auto PtrVT = getPointerTy(MF.getDataLayout()); 353 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); 354 355 SmallVector<std::pair<unsigned, SDValue>, MaxArgs> RegsToPass; 356 357 // Walk arg assignments 358 for (unsigned i = 0, 359 e = std::min(static_cast<unsigned>(ArgLocs.size()), MaxArgs); 360 i != e; ++i) { 361 CCValAssign &VA = ArgLocs[i]; 362 SDValue Arg = OutVals[i]; 363 364 // Promote the value if needed. 365 switch (VA.getLocInfo()) { 366 default: 367 llvm_unreachable("Unknown loc info"); 368 case CCValAssign::Full: 369 break; 370 case CCValAssign::SExt: 371 Arg = DAG.getNode(ISD::SIGN_EXTEND, CLI.DL, VA.getLocVT(), Arg); 372 break; 373 case CCValAssign::ZExt: 374 Arg = DAG.getNode(ISD::ZERO_EXTEND, CLI.DL, VA.getLocVT(), Arg); 375 break; 376 case CCValAssign::AExt: 377 Arg = DAG.getNode(ISD::ANY_EXTEND, CLI.DL, VA.getLocVT(), Arg); 378 break; 379 } 380 381 // Push arguments into RegsToPass vector 382 if (VA.isRegLoc()) 383 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 384 else 385 llvm_unreachable("call arg pass bug"); 386 } 387 388 SDValue InFlag; 389 390 // Build a sequence of copy-to-reg nodes chained together with token chain and 391 // flag operands which copy the outgoing args into registers. The InFlag in 392 // necessary since all emitted instructions must be stuck together. 393 for (auto &Reg : RegsToPass) { 394 Chain = DAG.getCopyToReg(Chain, CLI.DL, Reg.first, Reg.second, InFlag); 395 InFlag = Chain.getValue(1); 396 } 397 398 // If the callee is a GlobalAddress node (quite common, every direct call is) 399 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it. 400 // Likewise ExternalSymbol -> TargetExternalSymbol. 401 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 402 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), CLI.DL, PtrVT, 403 G->getOffset(), 0); 404 } else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee)) { 405 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, 0); 406 fail(CLI.DL, DAG, Twine("A call to built-in function '" 407 + StringRef(E->getSymbol()) 408 + "' is not supported.")); 409 } 410 411 // Returns a chain & a flag for retval copy to use. 412 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 413 SmallVector<SDValue, 8> Ops; 414 Ops.push_back(Chain); 415 Ops.push_back(Callee); 416 417 // Add argument registers to the end of the list so that they are 418 // known live into the call. 419 for (auto &Reg : RegsToPass) 420 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 421 422 if (InFlag.getNode()) 423 Ops.push_back(InFlag); 424 425 Chain = DAG.getNode(BPFISD::CALL, CLI.DL, NodeTys, Ops); 426 InFlag = Chain.getValue(1); 427 428 // Create the CALLSEQ_END node. 429 Chain = DAG.getCALLSEQ_END( 430 Chain, DAG.getConstant(NumBytes, CLI.DL, PtrVT, true), 431 DAG.getConstant(0, CLI.DL, PtrVT, true), InFlag, CLI.DL); 432 InFlag = Chain.getValue(1); 433 434 // Handle result values, copying them out of physregs into vregs that we 435 // return. 436 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, CLI.DL, DAG, 437 InVals); 438 } 439 440 SDValue 441 BPFTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 442 bool IsVarArg, 443 const SmallVectorImpl<ISD::OutputArg> &Outs, 444 const SmallVectorImpl<SDValue> &OutVals, 445 const SDLoc &DL, SelectionDAG &DAG) const { 446 unsigned Opc = BPFISD::RET_FLAG; 447 448 // CCValAssign - represent the assignment of the return value to a location 449 SmallVector<CCValAssign, 16> RVLocs; 450 MachineFunction &MF = DAG.getMachineFunction(); 451 452 // CCState - Info about the registers and stack slot. 453 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); 454 455 if (MF.getFunction().getReturnType()->isAggregateType()) { 456 fail(DL, DAG, "only integer returns supported"); 457 return DAG.getNode(Opc, DL, MVT::Other, Chain); 458 } 459 460 // Analize return values. 461 CCInfo.AnalyzeReturn(Outs, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64); 462 463 SDValue Flag; 464 SmallVector<SDValue, 4> RetOps(1, Chain); 465 466 // Copy the result values into the output registers. 467 for (unsigned i = 0; i != RVLocs.size(); ++i) { 468 CCValAssign &VA = RVLocs[i]; 469 assert(VA.isRegLoc() && "Can only return in registers!"); 470 471 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVals[i], Flag); 472 473 // Guarantee that all emitted copies are stuck together, 474 // avoiding something bad. 475 Flag = Chain.getValue(1); 476 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 477 } 478 479 RetOps[0] = Chain; // Update chain. 480 481 // Add the flag if we have it. 482 if (Flag.getNode()) 483 RetOps.push_back(Flag); 484 485 return DAG.getNode(Opc, DL, MVT::Other, RetOps); 486 } 487 488 SDValue BPFTargetLowering::LowerCallResult( 489 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, 490 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 491 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 492 493 MachineFunction &MF = DAG.getMachineFunction(); 494 // Assign locations to each value returned by this call. 495 SmallVector<CCValAssign, 16> RVLocs; 496 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); 497 498 if (Ins.size() >= 2) { 499 fail(DL, DAG, "only small returns supported"); 500 for (unsigned i = 0, e = Ins.size(); i != e; ++i) 501 InVals.push_back(DAG.getConstant(0, DL, Ins[i].VT)); 502 return DAG.getCopyFromReg(Chain, DL, 1, Ins[0].VT, InFlag).getValue(1); 503 } 504 505 CCInfo.AnalyzeCallResult(Ins, getHasAlu32() ? RetCC_BPF32 : RetCC_BPF64); 506 507 // Copy all of the result registers out of their specified physreg. 508 for (auto &Val : RVLocs) { 509 Chain = DAG.getCopyFromReg(Chain, DL, Val.getLocReg(), 510 Val.getValVT(), InFlag).getValue(1); 511 InFlag = Chain.getValue(2); 512 InVals.push_back(Chain.getValue(0)); 513 } 514 515 return Chain; 516 } 517 518 static void NegateCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) { 519 switch (CC) { 520 default: 521 break; 522 case ISD::SETULT: 523 case ISD::SETULE: 524 case ISD::SETLT: 525 case ISD::SETLE: 526 CC = ISD::getSetCCSwappedOperands(CC); 527 std::swap(LHS, RHS); 528 break; 529 } 530 } 531 532 SDValue BPFTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 533 SDValue Chain = Op.getOperand(0); 534 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 535 SDValue LHS = Op.getOperand(2); 536 SDValue RHS = Op.getOperand(3); 537 SDValue Dest = Op.getOperand(4); 538 SDLoc DL(Op); 539 540 if (!getHasJmpExt()) 541 NegateCC(LHS, RHS, CC); 542 543 return DAG.getNode(BPFISD::BR_CC, DL, Op.getValueType(), Chain, LHS, RHS, 544 DAG.getConstant(CC, DL, LHS.getValueType()), Dest); 545 } 546 547 SDValue BPFTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 548 SDValue LHS = Op.getOperand(0); 549 SDValue RHS = Op.getOperand(1); 550 SDValue TrueV = Op.getOperand(2); 551 SDValue FalseV = Op.getOperand(3); 552 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 553 SDLoc DL(Op); 554 555 if (!getHasJmpExt()) 556 NegateCC(LHS, RHS, CC); 557 558 SDValue TargetCC = DAG.getConstant(CC, DL, LHS.getValueType()); 559 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); 560 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; 561 562 return DAG.getNode(BPFISD::SELECT_CC, DL, VTs, Ops); 563 } 564 565 const char *BPFTargetLowering::getTargetNodeName(unsigned Opcode) const { 566 switch ((BPFISD::NodeType)Opcode) { 567 case BPFISD::FIRST_NUMBER: 568 break; 569 case BPFISD::RET_FLAG: 570 return "BPFISD::RET_FLAG"; 571 case BPFISD::CALL: 572 return "BPFISD::CALL"; 573 case BPFISD::SELECT_CC: 574 return "BPFISD::SELECT_CC"; 575 case BPFISD::BR_CC: 576 return "BPFISD::BR_CC"; 577 case BPFISD::Wrapper: 578 return "BPFISD::Wrapper"; 579 case BPFISD::MEMCPY: 580 return "BPFISD::MEMCPY"; 581 } 582 return nullptr; 583 } 584 585 SDValue BPFTargetLowering::LowerGlobalAddress(SDValue Op, 586 SelectionDAG &DAG) const { 587 auto N = cast<GlobalAddressSDNode>(Op); 588 assert(N->getOffset() == 0 && "Invalid offset for global address"); 589 590 SDLoc DL(Op); 591 const GlobalValue *GV = N->getGlobal(); 592 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i64); 593 594 return DAG.getNode(BPFISD::Wrapper, DL, MVT::i64, GA); 595 } 596 597 unsigned 598 BPFTargetLowering::EmitSubregExt(MachineInstr &MI, MachineBasicBlock *BB, 599 unsigned Reg, bool isSigned) const { 600 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 601 const TargetRegisterClass *RC = getRegClassFor(MVT::i64); 602 int RShiftOp = isSigned ? BPF::SRA_ri : BPF::SRL_ri; 603 MachineFunction *F = BB->getParent(); 604 DebugLoc DL = MI.getDebugLoc(); 605 606 MachineRegisterInfo &RegInfo = F->getRegInfo(); 607 608 if (!isSigned) { 609 Register PromotedReg0 = RegInfo.createVirtualRegister(RC); 610 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg); 611 return PromotedReg0; 612 } 613 Register PromotedReg0 = RegInfo.createVirtualRegister(RC); 614 Register PromotedReg1 = RegInfo.createVirtualRegister(RC); 615 Register PromotedReg2 = RegInfo.createVirtualRegister(RC); 616 BuildMI(BB, DL, TII.get(BPF::MOV_32_64), PromotedReg0).addReg(Reg); 617 BuildMI(BB, DL, TII.get(BPF::SLL_ri), PromotedReg1) 618 .addReg(PromotedReg0).addImm(32); 619 BuildMI(BB, DL, TII.get(RShiftOp), PromotedReg2) 620 .addReg(PromotedReg1).addImm(32); 621 622 return PromotedReg2; 623 } 624 625 MachineBasicBlock * 626 BPFTargetLowering::EmitInstrWithCustomInserterMemcpy(MachineInstr &MI, 627 MachineBasicBlock *BB) 628 const { 629 MachineFunction *MF = MI.getParent()->getParent(); 630 MachineRegisterInfo &MRI = MF->getRegInfo(); 631 MachineInstrBuilder MIB(*MF, MI); 632 unsigned ScratchReg; 633 634 // This function does custom insertion during lowering BPFISD::MEMCPY which 635 // only has two register operands from memcpy semantics, the copy source 636 // address and the copy destination address. 637 // 638 // Because we will expand BPFISD::MEMCPY into load/store pairs, we will need 639 // a third scratch register to serve as the destination register of load and 640 // source register of store. 641 // 642 // The scratch register here is with the Define | Dead | EarlyClobber flags. 643 // The EarlyClobber flag has the semantic property that the operand it is 644 // attached to is clobbered before the rest of the inputs are read. Hence it 645 // must be unique among the operands to the instruction. The Define flag is 646 // needed to coerce the machine verifier that an Undef value isn't a problem 647 // as we anyway is loading memory into it. The Dead flag is needed as the 648 // value in scratch isn't supposed to be used by any other instruction. 649 ScratchReg = MRI.createVirtualRegister(&BPF::GPRRegClass); 650 MIB.addReg(ScratchReg, 651 RegState::Define | RegState::Dead | RegState::EarlyClobber); 652 653 return BB; 654 } 655 656 MachineBasicBlock * 657 BPFTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 658 MachineBasicBlock *BB) const { 659 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 660 DebugLoc DL = MI.getDebugLoc(); 661 unsigned Opc = MI.getOpcode(); 662 bool isSelectRROp = (Opc == BPF::Select || 663 Opc == BPF::Select_64_32 || 664 Opc == BPF::Select_32 || 665 Opc == BPF::Select_32_64); 666 667 bool isMemcpyOp = Opc == BPF::MEMCPY; 668 669 #ifndef NDEBUG 670 bool isSelectRIOp = (Opc == BPF::Select_Ri || 671 Opc == BPF::Select_Ri_64_32 || 672 Opc == BPF::Select_Ri_32 || 673 Opc == BPF::Select_Ri_32_64); 674 675 676 assert((isSelectRROp || isSelectRIOp || isMemcpyOp) && 677 "Unexpected instr type to insert"); 678 #endif 679 680 if (isMemcpyOp) 681 return EmitInstrWithCustomInserterMemcpy(MI, BB); 682 683 bool is32BitCmp = (Opc == BPF::Select_32 || 684 Opc == BPF::Select_32_64 || 685 Opc == BPF::Select_Ri_32 || 686 Opc == BPF::Select_Ri_32_64); 687 688 // To "insert" a SELECT instruction, we actually have to insert the diamond 689 // control-flow pattern. The incoming instruction knows the destination vreg 690 // to set, the condition code register to branch on, the true/false values to 691 // select between, and a branch opcode to use. 692 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 693 MachineFunction::iterator I = ++BB->getIterator(); 694 695 // ThisMBB: 696 // ... 697 // TrueVal = ... 698 // jmp_XX r1, r2 goto Copy1MBB 699 // fallthrough --> Copy0MBB 700 MachineBasicBlock *ThisMBB = BB; 701 MachineFunction *F = BB->getParent(); 702 MachineBasicBlock *Copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); 703 MachineBasicBlock *Copy1MBB = F->CreateMachineBasicBlock(LLVM_BB); 704 705 F->insert(I, Copy0MBB); 706 F->insert(I, Copy1MBB); 707 // Update machine-CFG edges by transferring all successors of the current 708 // block to the new block which will contain the Phi node for the select. 709 Copy1MBB->splice(Copy1MBB->begin(), BB, 710 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 711 Copy1MBB->transferSuccessorsAndUpdatePHIs(BB); 712 // Next, add the true and fallthrough blocks as its successors. 713 BB->addSuccessor(Copy0MBB); 714 BB->addSuccessor(Copy1MBB); 715 716 // Insert Branch if Flag 717 int CC = MI.getOperand(3).getImm(); 718 int NewCC; 719 switch (CC) { 720 #define SET_NEWCC(X, Y) \ 721 case ISD::X: \ 722 if (is32BitCmp && HasJmp32) \ 723 NewCC = isSelectRROp ? BPF::Y##_rr_32 : BPF::Y##_ri_32; \ 724 else \ 725 NewCC = isSelectRROp ? BPF::Y##_rr : BPF::Y##_ri; \ 726 break 727 SET_NEWCC(SETGT, JSGT); 728 SET_NEWCC(SETUGT, JUGT); 729 SET_NEWCC(SETGE, JSGE); 730 SET_NEWCC(SETUGE, JUGE); 731 SET_NEWCC(SETEQ, JEQ); 732 SET_NEWCC(SETNE, JNE); 733 SET_NEWCC(SETLT, JSLT); 734 SET_NEWCC(SETULT, JULT); 735 SET_NEWCC(SETLE, JSLE); 736 SET_NEWCC(SETULE, JULE); 737 default: 738 report_fatal_error("unimplemented select CondCode " + Twine(CC)); 739 } 740 741 Register LHS = MI.getOperand(1).getReg(); 742 bool isSignedCmp = (CC == ISD::SETGT || 743 CC == ISD::SETGE || 744 CC == ISD::SETLT || 745 CC == ISD::SETLE); 746 747 // eBPF at the moment only has 64-bit comparison. Any 32-bit comparison need 748 // to be promoted, however if the 32-bit comparison operands are destination 749 // registers then they are implicitly zero-extended already, there is no 750 // need of explicit zero-extend sequence for them. 751 // 752 // We simply do extension for all situations in this method, but we will 753 // try to remove those unnecessary in BPFMIPeephole pass. 754 if (is32BitCmp && !HasJmp32) 755 LHS = EmitSubregExt(MI, BB, LHS, isSignedCmp); 756 757 if (isSelectRROp) { 758 Register RHS = MI.getOperand(2).getReg(); 759 760 if (is32BitCmp && !HasJmp32) 761 RHS = EmitSubregExt(MI, BB, RHS, isSignedCmp); 762 763 BuildMI(BB, DL, TII.get(NewCC)).addReg(LHS).addReg(RHS).addMBB(Copy1MBB); 764 } else { 765 int64_t imm32 = MI.getOperand(2).getImm(); 766 // sanity check before we build J*_ri instruction. 767 assert (isInt<32>(imm32)); 768 BuildMI(BB, DL, TII.get(NewCC)) 769 .addReg(LHS).addImm(imm32).addMBB(Copy1MBB); 770 } 771 772 // Copy0MBB: 773 // %FalseValue = ... 774 // # fallthrough to Copy1MBB 775 BB = Copy0MBB; 776 777 // Update machine-CFG edges 778 BB->addSuccessor(Copy1MBB); 779 780 // Copy1MBB: 781 // %Result = phi [ %FalseValue, Copy0MBB ], [ %TrueValue, ThisMBB ] 782 // ... 783 BB = Copy1MBB; 784 BuildMI(*BB, BB->begin(), DL, TII.get(BPF::PHI), MI.getOperand(0).getReg()) 785 .addReg(MI.getOperand(5).getReg()) 786 .addMBB(Copy0MBB) 787 .addReg(MI.getOperand(4).getReg()) 788 .addMBB(ThisMBB); 789 790 MI.eraseFromParent(); // The pseudo instruction is gone now. 791 return BB; 792 } 793 794 EVT BPFTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &, 795 EVT VT) const { 796 return getHasAlu32() ? MVT::i32 : MVT::i64; 797 } 798 799 MVT BPFTargetLowering::getScalarShiftAmountTy(const DataLayout &DL, 800 EVT VT) const { 801 return (getHasAlu32() && VT == MVT::i32) ? MVT::i32 : MVT::i64; 802 } 803