1//===-- RISCVInstrInfo.td - Target Description for RISC-V --*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the RISC-V instructions in TableGen format. 10// 11//===----------------------------------------------------------------------===// 12 13//===----------------------------------------------------------------------===// 14// RISC-V specific DAG Nodes. 15//===----------------------------------------------------------------------===// 16 17// Target-independent type requirements, but with target-specific formats. 18def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>, 19 SDTCisVT<1, i32>]>; 20def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, 21 SDTCisVT<1, i32>]>; 22 23// Target-dependent type requirements. 24def SDT_RISCVCall : SDTypeProfile<0, -1, [SDTCisVT<0, XLenVT>]>; 25def SDT_RISCVSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<1, 2>, 26 SDTCisVT<3, OtherVT>, 27 SDTCisSameAs<0, 4>, 28 SDTCisSameAs<4, 5>]>; 29def SDT_RISCVBrCC : SDTypeProfile<0, 4, [SDTCisSameAs<0, 1>, 30 SDTCisVT<2, OtherVT>, 31 SDTCisVT<3, OtherVT>]>; 32def SDT_RISCVReadCSR : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>]>; 33def SDT_RISCVWriteCSR : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisInt<1>]>; 34def SDT_RISCVSwapCSR : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>, 35 SDTCisInt<2>]>; 36def SDT_RISCVReadCycleWide : SDTypeProfile<2, 0, [SDTCisVT<0, i32>, 37 SDTCisVT<1, i32>]>; 38def SDT_RISCVIntUnaryOpW : SDTypeProfile<1, 1, [ 39 SDTCisSameAs<0, 1>, SDTCisVT<0, i64> 40]>; 41def SDT_RISCVIntBinOpW : SDTypeProfile<1, 2, [ 42 SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64> 43]>; 44def SDT_RISCVIntShiftDOpW : SDTypeProfile<1, 3, [ 45 SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64>, SDTCisVT<3, i64> 46]>; 47 48// Target-independent nodes, but with target-specific formats. 49def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart, 50 [SDNPHasChain, SDNPOutGlue]>; 51def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd, 52 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; 53 54// Target-dependent nodes. 55def riscv_call : SDNode<"RISCVISD::CALL", SDT_RISCVCall, 56 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, 57 SDNPVariadic]>; 58def riscv_ret_glue : SDNode<"RISCVISD::RET_GLUE", SDTNone, 59 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; 60def riscv_sret_glue : SDNode<"RISCVISD::SRET_GLUE", SDTNone, 61 [SDNPHasChain, SDNPOptInGlue]>; 62def riscv_mret_glue : SDNode<"RISCVISD::MRET_GLUE", SDTNone, 63 [SDNPHasChain, SDNPOptInGlue]>; 64def riscv_selectcc : SDNode<"RISCVISD::SELECT_CC", SDT_RISCVSelectCC>; 65def riscv_brcc : SDNode<"RISCVISD::BR_CC", SDT_RISCVBrCC, 66 [SDNPHasChain]>; 67def riscv_tail : SDNode<"RISCVISD::TAIL", SDT_RISCVCall, 68 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, 69 SDNPVariadic]>; 70def riscv_sllw : SDNode<"RISCVISD::SLLW", SDT_RISCVIntBinOpW>; 71def riscv_sraw : SDNode<"RISCVISD::SRAW", SDT_RISCVIntBinOpW>; 72def riscv_srlw : SDNode<"RISCVISD::SRLW", SDT_RISCVIntBinOpW>; 73def riscv_read_csr : SDNode<"RISCVISD::READ_CSR", SDT_RISCVReadCSR, 74 [SDNPHasChain]>; 75def riscv_write_csr : SDNode<"RISCVISD::WRITE_CSR", SDT_RISCVWriteCSR, 76 [SDNPHasChain]>; 77def riscv_swap_csr : SDNode<"RISCVISD::SWAP_CSR", SDT_RISCVSwapCSR, 78 [SDNPHasChain]>; 79 80def riscv_read_cycle_wide : SDNode<"RISCVISD::READ_CYCLE_WIDE", 81 SDT_RISCVReadCycleWide, 82 [SDNPHasChain, SDNPSideEffect]>; 83 84def riscv_add_lo : SDNode<"RISCVISD::ADD_LO", SDTIntBinOp>; 85def riscv_hi : SDNode<"RISCVISD::HI", SDTIntUnaryOp>; 86def riscv_lla : SDNode<"RISCVISD::LLA", SDTIntUnaryOp>; 87def riscv_lga : SDNode<"RISCVISD::LGA", SDTLoad, 88 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; 89def riscv_add_tprel : SDNode<"RISCVISD::ADD_TPREL", 90 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 91 SDTCisSameAs<0, 2>, 92 SDTCisSameAs<0, 3>, 93 SDTCisInt<0>]>>; 94 95def riscv_la_tls_ie : SDNode<"RISCVISD::LA_TLS_IE", SDTLoad, 96 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; 97def riscv_la_tls_gd : SDNode<"RISCVISD::LA_TLS_GD", SDTIntUnaryOp>; 98 99//===----------------------------------------------------------------------===// 100// Operand and SDNode transformation definitions. 101//===----------------------------------------------------------------------===// 102 103class ImmXLenAsmOperand<string prefix, string suffix = ""> : AsmOperandClass { 104 let Name = prefix # "ImmXLen" # suffix; 105 let RenderMethod = "addImmOperands"; 106 let DiagnosticType = !strconcat("Invalid", Name); 107} 108 109class ImmAsmOperand<string prefix, int width, string suffix> : AsmOperandClass { 110 let Name = prefix # "Imm" # width # suffix; 111 let RenderMethod = "addImmOperands"; 112 let DiagnosticType = !strconcat("Invalid", Name); 113} 114 115def ImmZeroAsmOperand : AsmOperandClass { 116 let Name = "ImmZero"; 117 let RenderMethod = "addImmOperands"; 118 let DiagnosticType = !strconcat("Invalid", Name); 119} 120 121// A parse method for (${gpr}) or 0(${gpr}), where the 0 is be silently ignored. 122def ZeroOffsetMemOpOperand : AsmOperandClass { 123 let Name = "ZeroOffsetMemOpOperand"; 124 let RenderMethod = "addRegOperands"; 125 let PredicateMethod = "isGPR"; 126 let ParserMethod = "parseZeroOffsetMemOp"; 127} 128 129class MemOperand<RegisterClass regClass> : RegisterOperand<regClass>{ 130 let OperandType = "OPERAND_MEMORY"; 131} 132 133def GPRMemZeroOffset : MemOperand<GPR> { 134 let ParserMatchClass = ZeroOffsetMemOpOperand; 135 let PrintMethod = "printZeroOffsetMemOp"; 136} 137 138def GPRMem : MemOperand<GPR>; 139 140def SPMem : MemOperand<SP>; 141 142def GPRCMem : MemOperand<GPRC>; 143 144class SImmAsmOperand<int width, string suffix = ""> 145 : ImmAsmOperand<"S", width, suffix> { 146} 147 148class UImmAsmOperand<int width, string suffix = ""> 149 : ImmAsmOperand<"U", width, suffix> { 150} 151 152def FenceArg : AsmOperandClass { 153 let Name = "FenceArg"; 154 let RenderMethod = "addFenceArgOperands"; 155 let ParserMethod = "parseFenceArg"; 156} 157 158def fencearg : Operand<XLenVT> { 159 let ParserMatchClass = FenceArg; 160 let PrintMethod = "printFenceArg"; 161 let DecoderMethod = "decodeUImmOperand<4>"; 162 let OperandType = "OPERAND_UIMM4"; 163 let OperandNamespace = "RISCVOp"; 164} 165 166def UImmLog2XLenAsmOperand : AsmOperandClass { 167 let Name = "UImmLog2XLen"; 168 let RenderMethod = "addImmOperands"; 169 let DiagnosticType = "InvalidUImmLog2XLen"; 170} 171 172def uimmlog2xlen : Operand<XLenVT>, ImmLeaf<XLenVT, [{ 173 if (Subtarget->is64Bit()) 174 return isUInt<6>(Imm); 175 return isUInt<5>(Imm); 176}]> { 177 let ParserMatchClass = UImmLog2XLenAsmOperand; 178 // TODO: should ensure invalid shamt is rejected when decoding. 179 let DecoderMethod = "decodeUImmOperand<6>"; 180 let MCOperandPredicate = [{ 181 int64_t Imm; 182 if (!MCOp.evaluateAsConstantImm(Imm)) 183 return false; 184 if (STI.getTargetTriple().isArch64Bit()) 185 return isUInt<6>(Imm); 186 return isUInt<5>(Imm); 187 }]; 188 let OperandType = "OPERAND_UIMMLOG2XLEN"; 189 let OperandNamespace = "RISCVOp"; 190} 191 192def uimm1 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isUInt<1>(Imm);}]> { 193 let ParserMatchClass = UImmAsmOperand<1>; 194 let DecoderMethod = "decodeUImmOperand<1>"; 195 let OperandType = "OPERAND_UIMM1"; 196 let OperandNamespace = "RISCVOp"; 197} 198 199def uimm2 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isUInt<2>(Imm);}]> { 200 let ParserMatchClass = UImmAsmOperand<2>; 201 let DecoderMethod = "decodeUImmOperand<2>"; 202 let OperandType = "OPERAND_UIMM2"; 203 let OperandNamespace = "RISCVOp"; 204 let MCOperandPredicate = [{ 205 int64_t Imm; 206 if (!MCOp.evaluateAsConstantImm(Imm)) 207 return false; 208 return isUInt<2>(Imm); 209 }]; 210} 211 212def uimm3 : Operand<XLenVT> { 213 let ParserMatchClass = UImmAsmOperand<3>; 214 let DecoderMethod = "decodeUImmOperand<3>"; 215 let OperandType = "OPERAND_UIMM3"; 216 let OperandNamespace = "RISCVOp"; 217} 218 219def uimm4 : Operand<XLenVT> { 220 let ParserMatchClass = UImmAsmOperand<4>; 221 let DecoderMethod = "decodeUImmOperand<4>"; 222 let OperandType = "OPERAND_UIMM4"; 223 let OperandNamespace = "RISCVOp"; 224} 225 226def uimm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isUInt<5>(Imm);}]> { 227 let ParserMatchClass = UImmAsmOperand<5>; 228 let DecoderMethod = "decodeUImmOperand<5>"; 229 let OperandType = "OPERAND_UIMM5"; 230 let OperandNamespace = "RISCVOp"; 231} 232 233def InsnDirectiveOpcode : AsmOperandClass { 234 let Name = "InsnDirectiveOpcode"; 235 let ParserMethod = "parseInsnDirectiveOpcode"; 236 let RenderMethod = "addImmOperands"; 237 let PredicateMethod = "isImm"; 238} 239 240def uimm6 : Operand<XLenVT> { 241 let ParserMatchClass = UImmAsmOperand<6>; 242 let DecoderMethod = "decodeUImmOperand<6>"; 243 let OperandType = "OPERAND_UIMM6"; 244 let OperandNamespace = "RISCVOp"; 245} 246 247def uimm7_opcode : Operand<XLenVT> { 248 let ParserMatchClass = InsnDirectiveOpcode; 249 let DecoderMethod = "decodeUImmOperand<7>"; 250 let OperandType = "OPERAND_UIMM7"; 251 let OperandNamespace = "RISCVOp"; 252} 253 254def uimm7 : Operand<XLenVT> { 255 let ParserMatchClass = UImmAsmOperand<7>; 256 let DecoderMethod = "decodeUImmOperand<7>"; 257 let OperandType = "OPERAND_UIMM7"; 258 let OperandNamespace = "RISCVOp"; 259} 260 261def uimm8 : Operand<XLenVT> { 262 let ParserMatchClass = UImmAsmOperand<8>; 263 let DecoderMethod = "decodeUImmOperand<8>"; 264 let OperandType = "OPERAND_UIMM8"; 265 let OperandNamespace = "RISCVOp"; 266} 267 268def simm12 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<12>(Imm);}]> { 269 let ParserMatchClass = SImmAsmOperand<12>; 270 let EncoderMethod = "getImmOpValue"; 271 let DecoderMethod = "decodeSImmOperand<12>"; 272 let MCOperandPredicate = [{ 273 int64_t Imm; 274 if (MCOp.evaluateAsConstantImm(Imm)) 275 return isInt<12>(Imm); 276 return MCOp.isBareSymbolRef(); 277 }]; 278 let OperandType = "OPERAND_SIMM12"; 279 let OperandNamespace = "RISCVOp"; 280} 281 282// A 12-bit signed immediate which cannot fit in 6-bit signed immediate, 283// but even negative value fit in 12-bit. 284def simm12_no6 : ImmLeaf<XLenVT, [{ 285 return isInt<12>(Imm) && !isInt<6>(Imm) && isInt<12>(-Imm);}]>; 286 287// A 13-bit signed immediate where the least significant bit is zero. 288def simm13_lsb0 : Operand<OtherVT> { 289 let ParserMatchClass = SImmAsmOperand<13, "Lsb0">; 290 let PrintMethod = "printBranchOperand"; 291 let EncoderMethod = "getImmOpValueAsr1"; 292 let DecoderMethod = "decodeSImmOperandAndLsl1<13>"; 293 let MCOperandPredicate = [{ 294 int64_t Imm; 295 if (MCOp.evaluateAsConstantImm(Imm)) 296 return isShiftedInt<12, 1>(Imm); 297 return MCOp.isBareSymbolRef(); 298 }]; 299 let OperandType = "OPERAND_PCREL"; 300} 301 302class UImm20Operand : Operand<XLenVT> { 303 let EncoderMethod = "getImmOpValue"; 304 let DecoderMethod = "decodeUImmOperand<20>"; 305 let MCOperandPredicate = [{ 306 int64_t Imm; 307 if (MCOp.evaluateAsConstantImm(Imm)) 308 return isUInt<20>(Imm); 309 return MCOp.isBareSymbolRef(); 310 }]; 311 let OperandType = "OPERAND_UIMM20"; 312 let OperandNamespace = "RISCVOp"; 313} 314 315def uimm20_lui : UImm20Operand { 316 let ParserMatchClass = UImmAsmOperand<20, "LUI">; 317} 318def uimm20_auipc : UImm20Operand { 319 let ParserMatchClass = UImmAsmOperand<20, "AUIPC">; 320} 321 322def Simm21Lsb0JALAsmOperand : SImmAsmOperand<21, "Lsb0JAL"> { 323 let ParserMethod = "parseJALOffset"; 324} 325 326// A 21-bit signed immediate where the least significant bit is zero. 327def simm21_lsb0_jal : Operand<OtherVT> { 328 let ParserMatchClass = Simm21Lsb0JALAsmOperand; 329 let PrintMethod = "printBranchOperand"; 330 let EncoderMethod = "getImmOpValueAsr1"; 331 let DecoderMethod = "decodeSImmOperandAndLsl1<21>"; 332 let MCOperandPredicate = [{ 333 int64_t Imm; 334 if (MCOp.evaluateAsConstantImm(Imm)) 335 return isShiftedInt<20, 1>(Imm); 336 return MCOp.isBareSymbolRef(); 337 }]; 338 let OperandType = "OPERAND_PCREL"; 339} 340 341def BareSymbol : AsmOperandClass { 342 let Name = "BareSymbol"; 343 let RenderMethod = "addImmOperands"; 344 let DiagnosticType = "InvalidBareSymbol"; 345 let ParserMethod = "parseBareSymbol"; 346} 347 348// A bare symbol. 349def bare_symbol : Operand<XLenVT> { 350 let ParserMatchClass = BareSymbol; 351} 352 353def CallSymbol : AsmOperandClass { 354 let Name = "CallSymbol"; 355 let RenderMethod = "addImmOperands"; 356 let DiagnosticType = "InvalidCallSymbol"; 357 let ParserMethod = "parseCallSymbol"; 358} 359 360// A bare symbol used in call/tail only. 361def call_symbol : Operand<XLenVT> { 362 let ParserMatchClass = CallSymbol; 363} 364 365def PseudoJumpSymbol : AsmOperandClass { 366 let Name = "PseudoJumpSymbol"; 367 let RenderMethod = "addImmOperands"; 368 let DiagnosticType = "InvalidPseudoJumpSymbol"; 369 let ParserMethod = "parsePseudoJumpSymbol"; 370} 371 372// A bare symbol used for pseudo jumps only. 373def pseudo_jump_symbol : Operand<XLenVT> { 374 let ParserMatchClass = PseudoJumpSymbol; 375} 376 377def TPRelAddSymbol : AsmOperandClass { 378 let Name = "TPRelAddSymbol"; 379 let RenderMethod = "addImmOperands"; 380 let DiagnosticType = "InvalidTPRelAddSymbol"; 381 let ParserMethod = "parseOperandWithModifier"; 382} 383 384// A bare symbol with the %tprel_add variant. 385def tprel_add_symbol : Operand<XLenVT> { 386 let ParserMatchClass = TPRelAddSymbol; 387} 388 389def CSRSystemRegister : AsmOperandClass { 390 let Name = "CSRSystemRegister"; 391 let ParserMethod = "parseCSRSystemRegister"; 392 let DiagnosticType = "InvalidCSRSystemRegister"; 393} 394 395def csr_sysreg : Operand<XLenVT> { 396 let ParserMatchClass = CSRSystemRegister; 397 let PrintMethod = "printCSRSystemRegister"; 398 let DecoderMethod = "decodeUImmOperand<12>"; 399 let OperandType = "OPERAND_UIMM12"; 400 let OperandNamespace = "RISCVOp"; 401} 402 403// A parameterized register class alternative to i32imm/i64imm from Target.td. 404def ixlenimm : Operand<XLenVT>; 405 406def ixlenimm_li : Operand<XLenVT> { 407 let ParserMatchClass = ImmXLenAsmOperand<"", "LI">; 408} 409 410// Accepts subset of LI operands, used by LAImm and LLAImm 411def ixlenimm_li_restricted : Operand<XLenVT> { 412 let ParserMatchClass = ImmXLenAsmOperand<"", "LI_Restricted">; 413} 414 415// Standalone (codegen-only) immleaf patterns. 416 417// A 6-bit constant greater than 32. 418def uimm6gt32 : ImmLeaf<XLenVT, [{ 419 return isUInt<6>(Imm) && Imm > 32; 420}]>; 421 422// Addressing modes. 423// Necessary because a frameindex can't be matched directly in a pattern. 424def FrameAddrRegImm : ComplexPattern<iPTR, 2, "SelectFrameAddrRegImm", 425 [frameindex, or, add]>; 426def AddrRegImm : ComplexPattern<iPTR, 2, "SelectAddrRegImm">; 427 428// Return the negation of an immediate value. 429def NegImm : SDNodeXForm<imm, [{ 430 return CurDAG->getTargetConstant(-N->getSExtValue(), SDLoc(N), 431 N->getValueType(0)); 432}]>; 433 434// Return an immediate value minus 32. 435def ImmSub32 : SDNodeXForm<imm, [{ 436 return CurDAG->getTargetConstant(N->getSExtValue() - 32, SDLoc(N), 437 N->getValueType(0)); 438}]>; 439 440// Return an immediate subtracted from XLen. 441def ImmSubFromXLen : SDNodeXForm<imm, [{ 442 uint64_t XLen = Subtarget->getXLen(); 443 return CurDAG->getTargetConstant(XLen - N->getZExtValue(), SDLoc(N), 444 N->getValueType(0)); 445}]>; 446 447// Return an immediate subtracted from 32. 448def ImmSubFrom32 : SDNodeXForm<imm, [{ 449 return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N), 450 N->getValueType(0)); 451}]>; 452 453// Check if (add r, imm) can be optimized to (ADDI (ADDI r, imm0), imm1), 454// in which imm = imm0 + imm1 and both imm0 and imm1 are simm12. We make imm0 455// as large as possible and imm1 as small as possible so that we might be able 456// to use c.addi for the small immediate. 457def AddiPair : PatLeaf<(imm), [{ 458 if (!N->hasOneUse()) 459 return false; 460 // The immediate operand must be in range [-4096,-2049] or [2048,4094]. 461 int64_t Imm = N->getSExtValue(); 462 return (-4096 <= Imm && Imm <= -2049) || (2048 <= Imm && Imm <= 4094); 463}]>; 464 465// Return imm - (imm < 0 ? -2048 : 2047). 466def AddiPairImmSmall : SDNodeXForm<imm, [{ 467 int64_t Imm = N->getSExtValue(); 468 int64_t Adj = N->getSExtValue() < 0 ? -2048 : 2047; 469 return CurDAG->getTargetConstant(Imm - Adj, SDLoc(N), 470 N->getValueType(0)); 471}]>; 472 473// Return -2048 if immediate is negative or 2047 if positive. These are the 474// largest simm12 values. 475def AddiPairImmLarge : SDNodeXForm<imm, [{ 476 int64_t Imm = N->getSExtValue() < 0 ? -2048 : 2047; 477 return CurDAG->getTargetConstant(Imm, SDLoc(N), 478 N->getValueType(0)); 479}]>; 480 481def TrailingZeros : SDNodeXForm<imm, [{ 482 return CurDAG->getTargetConstant(llvm::countr_zero(N->getZExtValue()), 483 SDLoc(N), N->getValueType(0)); 484}]>; 485 486def XLenSubTrailingOnes : SDNodeXForm<imm, [{ 487 uint64_t XLen = Subtarget->getXLen(); 488 uint64_t TrailingOnes = llvm::countr_one(N->getZExtValue()); 489 return CurDAG->getTargetConstant(XLen - TrailingOnes, SDLoc(N), 490 N->getValueType(0)); 491}]>; 492 493// Checks if this mask is a non-empty sequence of ones starting at the 494// most/least significant bit with the remainder zero and exceeds simm32/simm12. 495def LeadingOnesMask : PatLeaf<(imm), [{ 496 if (!N->hasOneUse()) 497 return false; 498 return !isInt<32>(N->getSExtValue()) && isMask_64(~N->getSExtValue()); 499}], TrailingZeros>; 500 501def TrailingOnesMask : PatLeaf<(imm), [{ 502 if (!N->hasOneUse()) 503 return false; 504 return !isInt<12>(N->getSExtValue()) && isMask_64(N->getZExtValue()); 505}], XLenSubTrailingOnes>; 506 507// Similar to LeadingOnesMask, but only consider leading ones in the lower 32 508// bits. 509def LeadingOnesWMask : PatLeaf<(imm), [{ 510 if (!N->hasOneUse()) 511 return false; 512 // If the value is a uint32 but not an int32, it must have bit 31 set and 513 // bits 63:32 cleared. After that we're looking for a shifted mask but not 514 // an all ones mask. 515 int64_t Imm = N->getSExtValue(); 516 return !isInt<32>(Imm) && isUInt<32>(Imm) && isShiftedMask_64(Imm) && 517 Imm != UINT64_C(0xffffffff); 518}], TrailingZeros>; 519 520//===----------------------------------------------------------------------===// 521// Instruction Formats 522//===----------------------------------------------------------------------===// 523 524include "RISCVInstrFormats.td" 525 526//===----------------------------------------------------------------------===// 527// Instruction Class Templates 528//===----------------------------------------------------------------------===// 529 530let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 531class BranchCC_rri<bits<3> funct3, string opcodestr> 532 : RVInstB<funct3, OPC_BRANCH, (outs), 533 (ins GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12), 534 opcodestr, "$rs1, $rs2, $imm12">, 535 Sched<[WriteJmp, ReadJmp, ReadJmp]> { 536 let isBranch = 1; 537 let isTerminator = 1; 538} 539 540let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in { 541class Load_ri<bits<3> funct3, string opcodestr> 542 : RVInstI<funct3, OPC_LOAD, (outs GPR:$rd), (ins GPRMem:$rs1, simm12:$imm12), 543 opcodestr, "$rd, ${imm12}(${rs1})">; 544 545class HLoad_r<bits<7> funct7, bits<5> funct5, string opcodestr> 546 : RVInstR<funct7, 0b100, OPC_SYSTEM, (outs GPR:$rd), 547 (ins GPRMemZeroOffset:$rs1), opcodestr, "$rd, $rs1"> { 548 let rs2 = funct5; 549} 550} 551 552// Operands for stores are in the order srcreg, base, offset rather than 553// reflecting the order these fields are specified in the instruction 554// encoding. 555let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in { 556class Store_rri<bits<3> funct3, string opcodestr> 557 : RVInstS<funct3, OPC_STORE, (outs), 558 (ins GPR:$rs2, GPRMem:$rs1, simm12:$imm12), 559 opcodestr, "$rs2, ${imm12}(${rs1})">; 560 561class HStore_rr<bits<7> funct7, string opcodestr> 562 : RVInstR<funct7, 0b100, OPC_SYSTEM, (outs), 563 (ins GPR:$rs2, GPRMemZeroOffset:$rs1), 564 opcodestr, "$rs2, $rs1"> { 565 let rd = 0; 566} 567} 568 569let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 570class ALU_ri<bits<3> funct3, string opcodestr> 571 : RVInstI<funct3, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1, simm12:$imm12), 572 opcodestr, "$rd, $rs1, $imm12">, 573 Sched<[WriteIALU, ReadIALU]>; 574 575let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 576class Shift_ri<bits<5> imm11_7, bits<3> funct3, string opcodestr> 577 : RVInstIShift<imm11_7, funct3, OPC_OP_IMM, (outs GPR:$rd), 578 (ins GPR:$rs1, uimmlog2xlen:$shamt), opcodestr, 579 "$rd, $rs1, $shamt">, 580 Sched<[WriteShiftImm, ReadShiftImm]>; 581 582let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 583class ALU_rr<bits<7> funct7, bits<3> funct3, string opcodestr, 584 bit Commutable = 0> 585 : RVInstR<funct7, funct3, OPC_OP, (outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), 586 opcodestr, "$rd, $rs1, $rs2"> { 587 let isCommutable = Commutable; 588} 589 590let hasNoSchedulingInfo = 1, 591 hasSideEffects = 1, mayLoad = 0, mayStore = 0 in 592class CSR_ir<bits<3> funct3, string opcodestr> 593 : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd), (ins csr_sysreg:$imm12, GPR:$rs1), 594 opcodestr, "$rd, $imm12, $rs1">, Sched<[WriteCSR, ReadCSR]>; 595 596let hasNoSchedulingInfo = 1, 597 hasSideEffects = 1, mayLoad = 0, mayStore = 0 in 598class CSR_ii<bits<3> funct3, string opcodestr> 599 : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd), 600 (ins csr_sysreg:$imm12, uimm5:$rs1), 601 opcodestr, "$rd, $imm12, $rs1">, Sched<[WriteCSR]>; 602 603let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 604class ShiftW_ri<bits<7> imm11_5, bits<3> funct3, string opcodestr> 605 : RVInstIShiftW<imm11_5, funct3, OPC_OP_IMM_32, (outs GPR:$rd), 606 (ins GPR:$rs1, uimm5:$shamt), opcodestr, 607 "$rd, $rs1, $shamt">, 608 Sched<[WriteShiftImm32, ReadShiftImm32]>; 609 610let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 611class ALUW_rr<bits<7> funct7, bits<3> funct3, string opcodestr, 612 bit Commutable = 0> 613 : RVInstR<funct7, funct3, OPC_OP_32, (outs GPR:$rd), 614 (ins GPR:$rs1, GPR:$rs2), opcodestr, "$rd, $rs1, $rs2"> { 615 let isCommutable = Commutable; 616} 617 618let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in 619class Priv<string opcodestr, bits<7> funct7> 620 : RVInstR<funct7, 0b000, OPC_SYSTEM, (outs), (ins GPR:$rs1, GPR:$rs2), 621 opcodestr, "">; 622 623let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in 624class Priv_rr<string opcodestr, bits<7> funct7> 625 : RVInstR<funct7, 0b000, OPC_SYSTEM, (outs), (ins GPR:$rs1, GPR:$rs2), 626 opcodestr, "$rs1, $rs2"> { 627 let rd = 0; 628} 629 630//===----------------------------------------------------------------------===// 631// Instructions 632//===----------------------------------------------------------------------===// 633 634let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { 635let isReMaterializable = 1, isAsCheapAsAMove = 1, 636 IsSignExtendingOpW = 1 in 637def LUI : RVInstU<OPC_LUI, (outs GPR:$rd), (ins uimm20_lui:$imm20), 638 "lui", "$rd, $imm20">, Sched<[WriteIALU]>; 639 640def AUIPC : RVInstU<OPC_AUIPC, (outs GPR:$rd), (ins uimm20_auipc:$imm20), 641 "auipc", "$rd, $imm20">, Sched<[WriteIALU]>; 642 643def JAL : RVInstJ<OPC_JAL, (outs GPR:$rd), (ins simm21_lsb0_jal:$imm20), 644 "jal", "$rd, $imm20">, Sched<[WriteJal]>; 645 646def JALR : RVInstI<0b000, OPC_JALR, (outs GPR:$rd), 647 (ins GPR:$rs1, simm12:$imm12), 648 "jalr", "$rd, ${imm12}(${rs1})">, 649 Sched<[WriteJalr, ReadJalr]>; 650} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 651 652def BEQ : BranchCC_rri<0b000, "beq">; 653def BNE : BranchCC_rri<0b001, "bne">; 654def BLT : BranchCC_rri<0b100, "blt">; 655def BGE : BranchCC_rri<0b101, "bge">; 656def BLTU : BranchCC_rri<0b110, "bltu">; 657def BGEU : BranchCC_rri<0b111, "bgeu">; 658 659let IsSignExtendingOpW = 1 in { 660def LB : Load_ri<0b000, "lb">, Sched<[WriteLDB, ReadMemBase]>; 661def LH : Load_ri<0b001, "lh">, Sched<[WriteLDH, ReadMemBase]>; 662def LW : Load_ri<0b010, "lw">, Sched<[WriteLDW, ReadMemBase]>; 663def LBU : Load_ri<0b100, "lbu">, Sched<[WriteLDB, ReadMemBase]>; 664def LHU : Load_ri<0b101, "lhu">, Sched<[WriteLDH, ReadMemBase]>; 665} 666 667def SB : Store_rri<0b000, "sb">, Sched<[WriteSTB, ReadStoreData, ReadMemBase]>; 668def SH : Store_rri<0b001, "sh">, Sched<[WriteSTH, ReadStoreData, ReadMemBase]>; 669def SW : Store_rri<0b010, "sw">, Sched<[WriteSTW, ReadStoreData, ReadMemBase]>; 670 671// ADDI isn't always rematerializable, but isReMaterializable will be used as 672// a hint which is verified in isReallyTriviallyReMaterializable. 673let isReMaterializable = 1, isAsCheapAsAMove = 1 in 674def ADDI : ALU_ri<0b000, "addi">; 675 676let IsSignExtendingOpW = 1 in { 677def SLTI : ALU_ri<0b010, "slti">; 678def SLTIU : ALU_ri<0b011, "sltiu">; 679} 680 681let isReMaterializable = 1, isAsCheapAsAMove = 1 in { 682def XORI : ALU_ri<0b100, "xori">; 683def ORI : ALU_ri<0b110, "ori">; 684} 685 686def ANDI : ALU_ri<0b111, "andi">; 687 688def SLLI : Shift_ri<0b00000, 0b001, "slli">; 689def SRLI : Shift_ri<0b00000, 0b101, "srli">; 690def SRAI : Shift_ri<0b01000, 0b101, "srai">; 691 692def ADD : ALU_rr<0b0000000, 0b000, "add", Commutable=1>, 693 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 694def SUB : ALU_rr<0b0100000, 0b000, "sub">, 695 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 696def SLL : ALU_rr<0b0000000, 0b001, "sll">, 697 Sched<[WriteShiftReg, ReadShiftReg, ReadShiftReg]>; 698let IsSignExtendingOpW = 1 in { 699def SLT : ALU_rr<0b0000000, 0b010, "slt">, 700 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 701def SLTU : ALU_rr<0b0000000, 0b011, "sltu">, 702 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 703} 704def XOR : ALU_rr<0b0000000, 0b100, "xor", Commutable=1>, 705 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 706def SRL : ALU_rr<0b0000000, 0b101, "srl">, 707 Sched<[WriteShiftReg, ReadShiftReg, ReadShiftReg]>; 708def SRA : ALU_rr<0b0100000, 0b101, "sra">, 709 Sched<[WriteShiftReg, ReadShiftReg, ReadShiftReg]>; 710def OR : ALU_rr<0b0000000, 0b110, "or", Commutable=1>, 711 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 712def AND : ALU_rr<0b0000000, 0b111, "and", Commutable=1>, 713 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 714 715let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in { 716def FENCE : RVInstI<0b000, OPC_MISC_MEM, (outs), 717 (ins fencearg:$pred, fencearg:$succ), 718 "fence", "$pred, $succ">, Sched<[]> { 719 bits<4> pred; 720 bits<4> succ; 721 722 let rs1 = 0; 723 let rd = 0; 724 let imm12 = {0b0000,pred,succ}; 725} 726 727def FENCE_TSO : RVInstI<0b000, OPC_MISC_MEM, (outs), (ins), "fence.tso", "">, Sched<[]> { 728 let rs1 = 0; 729 let rd = 0; 730 let imm12 = {0b1000,0b0011,0b0011}; 731} 732 733def FENCE_I : RVInstI<0b001, OPC_MISC_MEM, (outs), (ins), "fence.i", "">, Sched<[]> { 734 let rs1 = 0; 735 let rd = 0; 736 let imm12 = 0; 737} 738 739def ECALL : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "ecall", "">, Sched<[WriteJmp]> { 740 let rs1 = 0; 741 let rd = 0; 742 let imm12 = 0; 743} 744 745def EBREAK : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "ebreak", "">, 746 Sched<[]> { 747 let rs1 = 0; 748 let rd = 0; 749 let imm12 = 1; 750} 751 752// This is a de facto standard (as set by GNU binutils) 32-bit unimplemented 753// instruction (i.e., it should always trap, if your implementation has invalid 754// instruction traps). 755def UNIMP : RVInstI<0b001, OPC_SYSTEM, (outs), (ins), "unimp", "">, 756 Sched<[]> { 757 let rs1 = 0; 758 let rd = 0; 759 let imm12 = 0b110000000000; 760} 761 762let Predicates = [HasStdExtZawrs] in { 763def WRS_NTO : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "wrs.nto", "">, 764 Sched<[]> { 765 let rs1 = 0; 766 let rd = 0; 767 let imm12 = 0b000000001101; 768} 769 770def WRS_STO : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "wrs.sto", "">, 771 Sched<[]> { 772 let rs1 = 0; 773 let rd = 0; 774 let imm12 = 0b000000011101; 775} 776} // Predicates = [HasStdExtZawrs] 777 778} // hasSideEffects = 1, mayLoad = 0, mayStore = 0 779 780def CSRRW : CSR_ir<0b001, "csrrw">; 781def CSRRS : CSR_ir<0b010, "csrrs">; 782def CSRRC : CSR_ir<0b011, "csrrc">; 783 784def CSRRWI : CSR_ii<0b101, "csrrwi">; 785def CSRRSI : CSR_ii<0b110, "csrrsi">; 786def CSRRCI : CSR_ii<0b111, "csrrci">; 787 788/// RV64I instructions 789 790let Predicates = [IsRV64] in { 791def LWU : Load_ri<0b110, "lwu">, Sched<[WriteLDW, ReadMemBase]>; 792def LD : Load_ri<0b011, "ld">, Sched<[WriteLDD, ReadMemBase]>; 793def SD : Store_rri<0b011, "sd">, Sched<[WriteSTD, ReadStoreData, ReadMemBase]>; 794 795let IsSignExtendingOpW = 1 in { 796let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 797def ADDIW : RVInstI<0b000, OPC_OP_IMM_32, (outs GPR:$rd), 798 (ins GPR:$rs1, simm12:$imm12), 799 "addiw", "$rd, $rs1, $imm12">, 800 Sched<[WriteIALU32, ReadIALU32]>; 801 802def SLLIW : ShiftW_ri<0b0000000, 0b001, "slliw">; 803def SRLIW : ShiftW_ri<0b0000000, 0b101, "srliw">; 804def SRAIW : ShiftW_ri<0b0100000, 0b101, "sraiw">; 805 806def ADDW : ALUW_rr<0b0000000, 0b000, "addw", Commutable=1>, 807 Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>; 808def SUBW : ALUW_rr<0b0100000, 0b000, "subw">, 809 Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>; 810def SLLW : ALUW_rr<0b0000000, 0b001, "sllw">, 811 Sched<[WriteShiftReg32, ReadShiftReg32, ReadShiftReg32]>; 812def SRLW : ALUW_rr<0b0000000, 0b101, "srlw">, 813 Sched<[WriteShiftReg32, ReadShiftReg32, ReadShiftReg32]>; 814def SRAW : ALUW_rr<0b0100000, 0b101, "sraw">, 815 Sched<[WriteShiftReg32, ReadShiftReg32, ReadShiftReg32]>; 816} // IsSignExtendingOpW = 1 817} // Predicates = [IsRV64] 818 819//===----------------------------------------------------------------------===// 820// Privileged instructions 821//===----------------------------------------------------------------------===// 822 823let isBarrier = 1, isReturn = 1, isTerminator = 1 in { 824def SRET : Priv<"sret", 0b0001000>, Sched<[]> { 825 let rd = 0; 826 let rs1 = 0; 827 let rs2 = 0b00010; 828} 829 830def MRET : Priv<"mret", 0b0011000>, Sched<[]> { 831 let rd = 0; 832 let rs1 = 0; 833 let rs2 = 0b00010; 834} 835} // isBarrier = 1, isReturn = 1, isTerminator = 1 836 837def WFI : Priv<"wfi", 0b0001000>, Sched<[]> { 838 let rd = 0; 839 let rs1 = 0; 840 let rs2 = 0b00101; 841} 842 843let Predicates = [HasStdExtSvinval] in { 844def SFENCE_W_INVAL : Priv<"sfence.w.inval", 0b0001100>, Sched<[]> { 845 let rd = 0; 846 let rs1 = 0; 847 let rs2 = 0; 848} 849 850def SFENCE_INVAL_IR : Priv<"sfence.inval.ir", 0b0001100>, Sched<[]> { 851 let rd = 0; 852 let rs1 = 0; 853 let rs2 = 0b00001; 854} 855def SINVAL_VMA : Priv_rr<"sinval.vma", 0b0001011>, Sched<[]>; 856def HINVAL_VVMA : Priv_rr<"hinval.vvma", 0b0010011>, Sched<[]>; 857def HINVAL_GVMA : Priv_rr<"hinval.gvma", 0b0110011>, Sched<[]>; 858} // Predicates = [HasStdExtSvinval] 859 860def SFENCE_VMA : Priv_rr<"sfence.vma", 0b0001001>, Sched<[]>; 861 862let Predicates = [HasStdExtH] in { 863def HFENCE_VVMA : Priv_rr<"hfence.vvma", 0b0010001>, Sched<[]>; 864def HFENCE_GVMA : Priv_rr<"hfence.gvma", 0b0110001>, Sched<[]>; 865 866def HLV_B : HLoad_r<0b0110000, 0b00000, "hlv.b">, Sched<[]>; 867def HLV_BU : HLoad_r<0b0110000, 0b00001, "hlv.bu">, Sched<[]>; 868def HLV_H : HLoad_r<0b0110010, 0b00000, "hlv.h">, Sched<[]>; 869def HLV_HU : HLoad_r<0b0110010, 0b00001, "hlv.hu">, Sched<[]>; 870def HLVX_HU : HLoad_r<0b0110010, 0b00011, "hlvx.hu">, Sched<[]>; 871def HLV_W : HLoad_r<0b0110100, 0b00000, "hlv.w">, Sched<[]>; 872def HLVX_WU : HLoad_r<0b0110100, 0b00011, "hlvx.wu">, Sched<[]>; 873def HSV_B : HStore_rr<0b0110001, "hsv.b">, Sched<[]>; 874def HSV_H : HStore_rr<0b0110011, "hsv.h">, Sched<[]>; 875def HSV_W : HStore_rr<0b0110101, "hsv.w">, Sched<[]>; 876} 877let Predicates = [IsRV64, HasStdExtH] in { 878def HLV_WU : HLoad_r<0b0110100, 0b00001, "hlv.wu">, Sched<[]>; 879def HLV_D : HLoad_r<0b0110110, 0b00000, "hlv.d">, Sched<[]>; 880def HSV_D : HStore_rr<0b0110111, "hsv.d">, Sched<[]>; 881} 882 883//===----------------------------------------------------------------------===// 884// Debug instructions 885//===----------------------------------------------------------------------===// 886 887let isBarrier = 1, isReturn = 1, isTerminator = 1 in { 888def DRET : Priv<"dret", 0b0111101>, Sched<[]> { 889 let rd = 0; 890 let rs1 = 0; 891 let rs2 = 0b10010; 892} 893} // isBarrier = 1, isReturn = 1, isTerminator = 1 894 895//===----------------------------------------------------------------------===// 896// Assembler Pseudo Instructions (User-Level ISA, Version 2.2, Chapter 20) 897//===----------------------------------------------------------------------===// 898 899def : InstAlias<"nop", (ADDI X0, X0, 0)>; 900 901// Note that the size is 32 because up to 8 32-bit instructions are needed to 902// generate an arbitrary 64-bit immediate. However, the size does not really 903// matter since PseudoLI is currently only used in the AsmParser where it gets 904// expanded to real instructions immediately. 905let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 32, 906 isCodeGenOnly = 0, isAsmParserOnly = 1 in 907def PseudoLI : Pseudo<(outs GPR:$rd), (ins ixlenimm_li:$imm), [], 908 "li", "$rd, $imm">; 909 910def PseudoLB : PseudoLoad<"lb">; 911def PseudoLBU : PseudoLoad<"lbu">; 912def PseudoLH : PseudoLoad<"lh">; 913def PseudoLHU : PseudoLoad<"lhu">; 914def PseudoLW : PseudoLoad<"lw">; 915 916def PseudoSB : PseudoStore<"sb">; 917def PseudoSH : PseudoStore<"sh">; 918def PseudoSW : PseudoStore<"sw">; 919 920let Predicates = [IsRV64] in { 921def PseudoLWU : PseudoLoad<"lwu">; 922def PseudoLD : PseudoLoad<"ld">; 923def PseudoSD : PseudoStore<"sd">; 924} // Predicates = [IsRV64] 925 926def : InstAlias<"li $rd, $imm", (ADDI GPR:$rd, X0, simm12:$imm)>; 927def : InstAlias<"mv $rd, $rs", (ADDI GPR:$rd, GPR:$rs, 0)>; 928def : InstAlias<"not $rd, $rs", (XORI GPR:$rd, GPR:$rs, -1)>; 929def : InstAlias<"neg $rd, $rs", (SUB GPR:$rd, X0, GPR:$rs)>; 930 931let Predicates = [IsRV64] in { 932def : InstAlias<"negw $rd, $rs", (SUBW GPR:$rd, X0, GPR:$rs)>; 933def : InstAlias<"sext.w $rd, $rs", (ADDIW GPR:$rd, GPR:$rs, 0)>; 934} // Predicates = [IsRV64] 935 936def : InstAlias<"seqz $rd, $rs", (SLTIU GPR:$rd, GPR:$rs, 1)>; 937def : InstAlias<"snez $rd, $rs", (SLTU GPR:$rd, X0, GPR:$rs)>; 938def : InstAlias<"sltz $rd, $rs", (SLT GPR:$rd, GPR:$rs, X0)>; 939def : InstAlias<"sgtz $rd, $rs", (SLT GPR:$rd, X0, GPR:$rs)>; 940 941// sgt/sgtu are recognised by the GNU assembler but the canonical slt/sltu 942// form will always be printed. Therefore, set a zero weight. 943def : InstAlias<"sgt $rd, $rs, $rt", (SLT GPR:$rd, GPR:$rt, GPR:$rs), 0>; 944def : InstAlias<"sgtu $rd, $rs, $rt", (SLTU GPR:$rd, GPR:$rt, GPR:$rs), 0>; 945 946def : InstAlias<"beqz $rs, $offset", 947 (BEQ GPR:$rs, X0, simm13_lsb0:$offset)>; 948def : InstAlias<"bnez $rs, $offset", 949 (BNE GPR:$rs, X0, simm13_lsb0:$offset)>; 950def : InstAlias<"blez $rs, $offset", 951 (BGE X0, GPR:$rs, simm13_lsb0:$offset)>; 952def : InstAlias<"bgez $rs, $offset", 953 (BGE GPR:$rs, X0, simm13_lsb0:$offset)>; 954def : InstAlias<"bltz $rs, $offset", 955 (BLT GPR:$rs, X0, simm13_lsb0:$offset)>; 956def : InstAlias<"bgtz $rs, $offset", 957 (BLT X0, GPR:$rs, simm13_lsb0:$offset)>; 958 959// Always output the canonical mnemonic for the pseudo branch instructions. 960// The GNU tools emit the canonical mnemonic for the branch pseudo instructions 961// as well (e.g. "bgt" will be recognised by the assembler but never printed by 962// objdump). Match this behaviour by setting a zero weight. 963def : InstAlias<"bgt $rs, $rt, $offset", 964 (BLT GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>; 965def : InstAlias<"ble $rs, $rt, $offset", 966 (BGE GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>; 967def : InstAlias<"bgtu $rs, $rt, $offset", 968 (BLTU GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>; 969def : InstAlias<"bleu $rs, $rt, $offset", 970 (BGEU GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>; 971 972def : InstAlias<"j $offset", (JAL X0, simm21_lsb0_jal:$offset)>; 973def : InstAlias<"jal $offset", (JAL X1, simm21_lsb0_jal:$offset)>; 974 975// Non-zero offset aliases of "jalr" are the lowest weight, followed by the 976// two-register form, then the one-register forms and finally "ret". 977def : InstAlias<"jr $rs", (JALR X0, GPR:$rs, 0), 3>; 978def : InstAlias<"jr ${offset}(${rs})", (JALR X0, GPR:$rs, simm12:$offset)>; 979def : InstAlias<"jalr $rs", (JALR X1, GPR:$rs, 0), 3>; 980def : InstAlias<"jalr ${offset}(${rs})", (JALR X1, GPR:$rs, simm12:$offset)>; 981def : InstAlias<"jalr $rd, $rs", (JALR GPR:$rd, GPR:$rs, 0), 2>; 982def : InstAlias<"ret", (JALR X0, X1, 0), 4>; 983 984// Non-canonical forms for jump targets also accepted by the assembler. 985def : InstAlias<"jr $rs, $offset", (JALR X0, GPR:$rs, simm12:$offset), 0>; 986def : InstAlias<"jalr $rs, $offset", (JALR X1, GPR:$rs, simm12:$offset), 0>; 987def : InstAlias<"jalr $rd, $rs, $offset", (JALR GPR:$rd, GPR:$rs, simm12:$offset), 0>; 988 989def : InstAlias<"fence", (FENCE 0xF, 0xF)>; // 0xF == iorw 990 991let Predicates = [HasStdExtZihintpause] in 992def : InstAlias<"pause", (FENCE 0x1, 0x0)>; // 0x1 == w 993 994def : InstAlias<"rdinstret $rd", (CSRRS GPR:$rd, INSTRET.Encoding, X0)>; 995def : InstAlias<"rdcycle $rd", (CSRRS GPR:$rd, CYCLE.Encoding, X0)>; 996def : InstAlias<"rdtime $rd", (CSRRS GPR:$rd, TIME.Encoding, X0)>; 997 998let Predicates = [IsRV32] in { 999def : InstAlias<"rdinstreth $rd", (CSRRS GPR:$rd, INSTRETH.Encoding, X0)>; 1000def : InstAlias<"rdcycleh $rd", (CSRRS GPR:$rd, CYCLEH.Encoding, X0)>; 1001def : InstAlias<"rdtimeh $rd", (CSRRS GPR:$rd, TIMEH.Encoding, X0)>; 1002} // Predicates = [IsRV32] 1003 1004def : InstAlias<"csrr $rd, $csr", (CSRRS GPR:$rd, csr_sysreg:$csr, X0)>; 1005def : InstAlias<"csrw $csr, $rs", (CSRRW X0, csr_sysreg:$csr, GPR:$rs)>; 1006def : InstAlias<"csrs $csr, $rs", (CSRRS X0, csr_sysreg:$csr, GPR:$rs)>; 1007def : InstAlias<"csrc $csr, $rs", (CSRRC X0, csr_sysreg:$csr, GPR:$rs)>; 1008 1009def : InstAlias<"csrwi $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>; 1010def : InstAlias<"csrsi $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>; 1011def : InstAlias<"csrci $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>; 1012 1013let EmitPriority = 0 in { 1014def : InstAlias<"csrw $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>; 1015def : InstAlias<"csrs $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>; 1016def : InstAlias<"csrc $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>; 1017 1018def : InstAlias<"csrrw $rd, $csr, $imm", (CSRRWI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>; 1019def : InstAlias<"csrrs $rd, $csr, $imm", (CSRRSI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>; 1020def : InstAlias<"csrrc $rd, $csr, $imm", (CSRRCI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>; 1021} 1022 1023def : InstAlias<"sfence.vma", (SFENCE_VMA X0, X0)>; 1024def : InstAlias<"sfence.vma $rs", (SFENCE_VMA GPR:$rs, X0)>; 1025 1026def : InstAlias<"hfence.gvma", (HFENCE_GVMA X0, X0)>; 1027def : InstAlias<"hfence.gvma $rs", (HFENCE_GVMA GPR:$rs, X0)>; 1028 1029def : InstAlias<"hfence.vvma", (HFENCE_VVMA X0, X0)>; 1030def : InstAlias<"hfence.vvma $rs", (HFENCE_VVMA GPR:$rs, X0)>; 1031 1032let Predicates = [HasStdExtZihintntl] in { 1033 def : InstAlias<"ntl.p1", (ADD X0, X0, X2)>; 1034 def : InstAlias<"ntl.pall", (ADD X0, X0, X3)>; 1035 def : InstAlias<"ntl.s1", (ADD X0, X0, X4)>; 1036 def : InstAlias<"ntl.all", (ADD X0, X0, X5)>; 1037} // Predicates = [HasStdExtZihintntl] 1038 1039let EmitPriority = 0 in { 1040def : InstAlias<"lb $rd, (${rs1})", 1041 (LB GPR:$rd, GPR:$rs1, 0)>; 1042def : InstAlias<"lh $rd, (${rs1})", 1043 (LH GPR:$rd, GPR:$rs1, 0)>; 1044def : InstAlias<"lw $rd, (${rs1})", 1045 (LW GPR:$rd, GPR:$rs1, 0)>; 1046def : InstAlias<"lbu $rd, (${rs1})", 1047 (LBU GPR:$rd, GPR:$rs1, 0)>; 1048def : InstAlias<"lhu $rd, (${rs1})", 1049 (LHU GPR:$rd, GPR:$rs1, 0)>; 1050 1051def : InstAlias<"sb $rs2, (${rs1})", 1052 (SB GPR:$rs2, GPR:$rs1, 0)>; 1053def : InstAlias<"sh $rs2, (${rs1})", 1054 (SH GPR:$rs2, GPR:$rs1, 0)>; 1055def : InstAlias<"sw $rs2, (${rs1})", 1056 (SW GPR:$rs2, GPR:$rs1, 0)>; 1057 1058def : InstAlias<"add $rd, $rs1, $imm12", 1059 (ADDI GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1060def : InstAlias<"and $rd, $rs1, $imm12", 1061 (ANDI GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1062def : InstAlias<"xor $rd, $rs1, $imm12", 1063 (XORI GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1064def : InstAlias<"or $rd, $rs1, $imm12", 1065 (ORI GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1066def : InstAlias<"sll $rd, $rs1, $shamt", 1067 (SLLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>; 1068def : InstAlias<"srl $rd, $rs1, $shamt", 1069 (SRLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>; 1070def : InstAlias<"sra $rd, $rs1, $shamt", 1071 (SRAI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>; 1072let Predicates = [IsRV64] in { 1073def : InstAlias<"lwu $rd, (${rs1})", 1074 (LWU GPR:$rd, GPR:$rs1, 0)>; 1075def : InstAlias<"ld $rd, (${rs1})", 1076 (LD GPR:$rd, GPR:$rs1, 0)>; 1077def : InstAlias<"sd $rs2, (${rs1})", 1078 (SD GPR:$rs2, GPR:$rs1, 0)>; 1079 1080def : InstAlias<"addw $rd, $rs1, $imm12", 1081 (ADDIW GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1082def : InstAlias<"sllw $rd, $rs1, $shamt", 1083 (SLLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>; 1084def : InstAlias<"srlw $rd, $rs1, $shamt", 1085 (SRLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>; 1086def : InstAlias<"sraw $rd, $rs1, $shamt", 1087 (SRAIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>; 1088} // Predicates = [IsRV64] 1089def : InstAlias<"slt $rd, $rs1, $imm12", 1090 (SLTI GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1091def : InstAlias<"sltu $rd, $rs1, $imm12", 1092 (SLTIU GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1093} 1094 1095def : MnemonicAlias<"move", "mv">; 1096 1097// The SCALL and SBREAK instructions wererenamed to ECALL and EBREAK in 1098// version 2.1 of the user-level ISA. Like the GNU toolchain, we still accept 1099// the old name for backwards compatibility. 1100def : MnemonicAlias<"scall", "ecall">; 1101def : MnemonicAlias<"sbreak", "ebreak">; 1102 1103// This alias was added to the spec in December 2020. Don't print it by default 1104// to allow assembly we print to be compatible with versions of GNU assembler 1105// that don't support this alias. 1106def : InstAlias<"zext.b $rd, $rs", (ANDI GPR:$rd, GPR:$rs, 0xFF), 0>; 1107 1108//===----------------------------------------------------------------------===// 1109// .insn directive instructions 1110//===----------------------------------------------------------------------===// 1111 1112def AnyRegOperand : AsmOperandClass { 1113 let Name = "AnyRegOperand"; 1114 let RenderMethod = "addRegOperands"; 1115 let PredicateMethod = "isAnyReg"; 1116} 1117 1118def AnyReg : Operand<XLenVT> { 1119 let OperandType = "OPERAND_REGISTER"; 1120 let ParserMatchClass = AnyRegOperand; 1121} 1122 1123// isCodeGenOnly = 1 to hide them from the tablegened assembly parser. 1124let isCodeGenOnly = 1, hasSideEffects = 1, mayLoad = 1, mayStore = 1, 1125 hasNoSchedulingInfo = 1 in { 1126def InsnR : DirectiveInsnR<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, uimm3:$funct3, 1127 uimm7:$funct7, AnyReg:$rs1, 1128 AnyReg:$rs2), 1129 "$opcode, $funct3, $funct7, $rd, $rs1, $rs2">; 1130def InsnR4 : DirectiveInsnR4<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, 1131 uimm3:$funct3, 1132 uimm2:$funct2, 1133 AnyReg:$rs1, AnyReg:$rs2, 1134 AnyReg:$rs3), 1135 "$opcode, $funct3, $funct2, $rd, $rs1, $rs2, $rs3">; 1136def InsnI : DirectiveInsnI<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, uimm3:$funct3, 1137 AnyReg:$rs1, simm12:$imm12), 1138 "$opcode, $funct3, $rd, $rs1, $imm12">; 1139def InsnI_Mem : DirectiveInsnI<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, 1140 uimm3:$funct3, 1141 AnyReg:$rs1, 1142 simm12:$imm12), 1143 "$opcode, $funct3, $rd, ${imm12}(${rs1})">; 1144def InsnB : DirectiveInsnB<(outs), (ins uimm7_opcode:$opcode, uimm3:$funct3, 1145 AnyReg:$rs1, AnyReg:$rs2, 1146 simm13_lsb0:$imm12), 1147 "$opcode, $funct3, $rs1, $rs2, $imm12">; 1148def InsnU : DirectiveInsnU<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, 1149 uimm20_lui:$imm20), 1150 "$opcode, $rd, $imm20">; 1151def InsnJ : DirectiveInsnJ<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, 1152 simm21_lsb0_jal:$imm20), 1153 "$opcode, $rd, $imm20">; 1154def InsnS : DirectiveInsnS<(outs), (ins uimm7_opcode:$opcode, uimm3:$funct3, 1155 AnyReg:$rs2, AnyReg:$rs1, 1156 simm12:$imm12), 1157 "$opcode, $funct3, $rs2, ${imm12}(${rs1})">; 1158} 1159 1160// Use InstAliases to match these so that we can combine the insn and format 1161// into a mnemonic to use as the key for the tablegened asm matcher table. The 1162// parser will take care of creating these fake mnemonics and will only do it 1163// for known formats. 1164let EmitPriority = 0 in { 1165def : InstAlias<".insn_r $opcode, $funct3, $funct7, $rd, $rs1, $rs2", 1166 (InsnR AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, uimm7:$funct7, 1167 AnyReg:$rs1, AnyReg:$rs2)>; 1168// Accept 4 register form of ".insn r" as alias for ".insn r4". 1169def : InstAlias<".insn_r $opcode, $funct3, $funct2, $rd, $rs1, $rs2, $rs3", 1170 (InsnR4 AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, uimm2:$funct2, 1171 AnyReg:$rs1, AnyReg:$rs2, AnyReg:$rs3)>; 1172def : InstAlias<".insn_r4 $opcode, $funct3, $funct2, $rd, $rs1, $rs2, $rs3", 1173 (InsnR4 AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, uimm2:$funct2, 1174 AnyReg:$rs1, AnyReg:$rs2, AnyReg:$rs3)>; 1175def : InstAlias<".insn_i $opcode, $funct3, $rd, $rs1, $imm12", 1176 (InsnI AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1, 1177 simm12:$imm12)>; 1178def : InstAlias<".insn_i $opcode, $funct3, $rd, ${imm12}(${rs1})", 1179 (InsnI_Mem AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, 1180 AnyReg:$rs1, simm12:$imm12)>; 1181def : InstAlias<".insn_b $opcode, $funct3, $rs1, $rs2, $imm12", 1182 (InsnB uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1, 1183 AnyReg:$rs2, simm13_lsb0:$imm12)>; 1184// Accept sb as an alias for b. 1185def : InstAlias<".insn_sb $opcode, $funct3, $rs1, $rs2, $imm12", 1186 (InsnB uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1, 1187 AnyReg:$rs2, simm13_lsb0:$imm12)>; 1188def : InstAlias<".insn_u $opcode, $rd, $imm20", 1189 (InsnU AnyReg:$rd, uimm7_opcode:$opcode, uimm20_lui:$imm20)>; 1190def : InstAlias<".insn_j $opcode, $rd, $imm20", 1191 (InsnJ AnyReg:$rd, uimm7_opcode:$opcode, simm21_lsb0_jal:$imm20)>; 1192// Accept uj as an alias for j. 1193def : InstAlias<".insn_uj $opcode, $rd, $imm20", 1194 (InsnJ AnyReg:$rd, uimm7_opcode:$opcode, simm21_lsb0_jal:$imm20)>; 1195def : InstAlias<".insn_s $opcode, $funct3, $rs2, ${imm12}(${rs1})", 1196 (InsnS uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs2, 1197 AnyReg:$rs1, simm12:$imm12)>; 1198} 1199 1200//===----------------------------------------------------------------------===// 1201// Pseudo-instructions and codegen patterns 1202// 1203// Naming convention: For 'generic' pattern classes, we use the naming 1204// convention PatTy1Ty2. For pattern classes which offer a more complex 1205// expansion, prefix the class name, e.g. BccPat. 1206//===----------------------------------------------------------------------===// 1207 1208/// Generic pattern classes 1209 1210class PatGpr<SDPatternOperator OpNode, RVInst Inst, ValueType vt = XLenVT> 1211 : Pat<(vt (OpNode (vt GPR:$rs1))), (Inst GPR:$rs1)>; 1212class PatGprGpr<SDPatternOperator OpNode, RVInst Inst, ValueType vt = XLenVT> 1213 : Pat<(vt (OpNode (vt GPR:$rs1), (vt GPR:$rs2))), (Inst GPR:$rs1, GPR:$rs2)>; 1214 1215class PatGprImm<SDPatternOperator OpNode, RVInst Inst, ImmLeaf ImmType> 1216 : Pat<(XLenVT (OpNode (XLenVT GPR:$rs1), ImmType:$imm)), 1217 (Inst GPR:$rs1, ImmType:$imm)>; 1218class PatGprSimm12<SDPatternOperator OpNode, RVInstI Inst> 1219 : PatGprImm<OpNode, Inst, simm12>; 1220class PatGprUimmLog2XLen<SDPatternOperator OpNode, RVInstIShift Inst> 1221 : PatGprImm<OpNode, Inst, uimmlog2xlen>; 1222 1223/// Predicates 1224 1225def assertsexti32 : PatFrag<(ops node:$src), (assertsext node:$src), [{ 1226 return cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32); 1227}]>; 1228def sexti16 : ComplexPattern<XLenVT, 1, "selectSExtBits<16>">; 1229def sexti32 : ComplexPattern<i64, 1, "selectSExtBits<32>">; 1230def assertzexti32 : PatFrag<(ops node:$src), (assertzext node:$src), [{ 1231 return cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32); 1232}]>; 1233def zexti32 : ComplexPattern<i64, 1, "selectZExtBits<32>">; 1234def zexti16 : ComplexPattern<XLenVT, 1, "selectZExtBits<16>">; 1235def zexti8 : ComplexPattern<XLenVT, 1, "selectZExtBits<8>">; 1236 1237def ext : PatFrags<(ops node:$A), [(sext node:$A), (zext node:$A)]>; 1238 1239class binop_oneuse<SDPatternOperator operator> 1240 : PatFrag<(ops node:$A, node:$B), 1241 (operator node:$A, node:$B), [{ 1242 return N->hasOneUse(); 1243}]>; 1244 1245def and_oneuse : binop_oneuse<and>; 1246def mul_oneuse : binop_oneuse<mul>; 1247 1248def mul_const_oneuse : PatFrag<(ops node:$A, node:$B), 1249 (mul node:$A, node:$B), [{ 1250 if (auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1))) 1251 return N1C->hasOneUse(); 1252 return false; 1253}]>; 1254 1255class unop_oneuse<SDPatternOperator operator> 1256 : PatFrag<(ops node:$A), 1257 (operator node:$A), [{ 1258 return N->hasOneUse(); 1259}]>; 1260 1261def sext_oneuse : unop_oneuse<sext>; 1262def zext_oneuse : unop_oneuse<zext>; 1263def anyext_oneuse : unop_oneuse<anyext>; 1264def ext_oneuse : unop_oneuse<ext>; 1265def fpext_oneuse : unop_oneuse<any_fpextend>; 1266 1267/// Simple arithmetic operations 1268 1269def : PatGprGpr<add, ADD>; 1270def : PatGprSimm12<add, ADDI>; 1271def : PatGprGpr<sub, SUB>; 1272def : PatGprGpr<or, OR>; 1273def : PatGprSimm12<or, ORI>; 1274def : PatGprGpr<and, AND>; 1275def : PatGprSimm12<and, ANDI>; 1276def : PatGprGpr<xor, XOR>; 1277def : PatGprSimm12<xor, XORI>; 1278def : PatGprUimmLog2XLen<shl, SLLI>; 1279def : PatGprUimmLog2XLen<srl, SRLI>; 1280def : PatGprUimmLog2XLen<sra, SRAI>; 1281 1282// Select 'or' as ADDI if the immediate bits are known to be 0 in $rs1. This 1283// can improve compressibility. 1284def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{ 1285 KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0); 1286 KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0); 1287 return KnownBits::haveNoCommonBitsSet(Known0, Known1); 1288}]>; 1289def : PatGprSimm12<or_is_add, ADDI>; 1290 1291// negate of low bit can be done via two (compressible) shifts. The negate 1292// is never compressible since rs1 and rd can't be the same register. 1293def : Pat<(XLenVT (sub 0, (and_oneuse GPR:$rs, 1))), 1294 (SRAI (SLLI $rs, (ImmSubFromXLen (XLenVT 1))), 1295 (ImmSubFromXLen (XLenVT 1)))>; 1296 1297// AND with leading/trailing ones mask exceeding simm32/simm12. 1298def : Pat<(i64 (and GPR:$rs, LeadingOnesMask:$mask)), 1299 (SLLI (SRLI $rs, LeadingOnesMask:$mask), LeadingOnesMask:$mask)>; 1300def : Pat<(XLenVT (and GPR:$rs, TrailingOnesMask:$mask)), 1301 (SRLI (SLLI $rs, TrailingOnesMask:$mask), TrailingOnesMask:$mask)>; 1302 1303// Match both a plain shift and one where the shift amount is masked (this is 1304// typically introduced when the legalizer promotes the shift amount and 1305// zero-extends it). For RISC-V, the mask is unnecessary as shifts in the base 1306// ISA only read the least significant 5 bits (RV32I) or 6 bits (RV64I). 1307def shiftMaskXLen : ComplexPattern<XLenVT, 1, "selectShiftMaskXLen", [], [], 0>; 1308def shiftMask32 : ComplexPattern<i64, 1, "selectShiftMask32", [], [], 0>; 1309 1310class shiftop<SDPatternOperator operator> 1311 : PatFrag<(ops node:$val, node:$count), 1312 (operator node:$val, (XLenVT (shiftMaskXLen node:$count)))>; 1313class shiftopw<SDPatternOperator operator> 1314 : PatFrag<(ops node:$val, node:$count), 1315 (operator node:$val, (i64 (shiftMask32 node:$count)))>; 1316 1317def : PatGprGpr<shiftop<shl>, SLL>; 1318def : PatGprGpr<shiftop<srl>, SRL>; 1319def : PatGprGpr<shiftop<sra>, SRA>; 1320 1321// This is a special case of the ADD instruction used to facilitate the use of a 1322// fourth operand to emit a relocation on a symbol relating to this instruction. 1323// The relocation does not affect any bits of the instruction itself but is used 1324// as a hint to the linker. 1325let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0 in 1326def PseudoAddTPRel : Pseudo<(outs GPR:$rd), 1327 (ins GPR:$rs1, GPR:$rs2, tprel_add_symbol:$src), [], 1328 "add", "$rd, $rs1, $rs2, $src">; 1329 1330/// FrameIndex calculations 1331 1332def : Pat<(FrameAddrRegImm (iPTR GPR:$rs1), simm12:$imm12), 1333 (ADDI GPR:$rs1, simm12:$imm12)>; 1334 1335/// HI and ADD_LO address nodes. 1336 1337def : Pat<(riscv_hi tglobaladdr:$in), (LUI tglobaladdr:$in)>; 1338def : Pat<(riscv_hi tblockaddress:$in), (LUI tblockaddress:$in)>; 1339def : Pat<(riscv_hi tjumptable:$in), (LUI tjumptable:$in)>; 1340def : Pat<(riscv_hi tconstpool:$in), (LUI tconstpool:$in)>; 1341 1342def : Pat<(riscv_add_lo GPR:$hi, tglobaladdr:$lo), 1343 (ADDI GPR:$hi, tglobaladdr:$lo)>; 1344def : Pat<(riscv_add_lo GPR:$hi, tblockaddress:$lo), 1345 (ADDI GPR:$hi, tblockaddress:$lo)>; 1346def : Pat<(riscv_add_lo GPR:$hi, tjumptable:$lo), 1347 (ADDI GPR:$hi, tjumptable:$lo)>; 1348def : Pat<(riscv_add_lo GPR:$hi, tconstpool:$lo), 1349 (ADDI GPR:$hi, tconstpool:$lo)>; 1350 1351/// TLS address nodes. 1352 1353def : Pat<(riscv_hi tglobaltlsaddr:$in), (LUI tglobaltlsaddr:$in)>; 1354def : Pat<(riscv_add_tprel GPR:$rs1, GPR:$rs2, tglobaltlsaddr:$src), 1355 (PseudoAddTPRel GPR:$rs1, GPR:$rs2, tglobaltlsaddr:$src)>; 1356def : Pat<(riscv_add_lo GPR:$src, tglobaltlsaddr:$lo), 1357 (ADDI GPR:$src, tglobaltlsaddr:$lo)>; 1358 1359/// Setcc 1360 1361def : PatGprGpr<setlt, SLT>; 1362def : PatGprSimm12<setlt, SLTI>; 1363def : PatGprGpr<setult, SLTU>; 1364def : PatGprSimm12<setult, SLTIU>; 1365 1366// RISC-V doesn't have general instructions for integer setne/seteq, but we can 1367// check for equality with 0. These ComplexPatterns rewrite the setne/seteq into 1368// something that can be compared with 0. 1369// These ComplexPatterns must be used in pairs. 1370def riscv_setne : ComplexPattern<XLenVT, 1, "selectSETNE", [setcc]>; 1371def riscv_seteq : ComplexPattern<XLenVT, 1, "selectSETEQ", [setcc]>; 1372 1373// Define pattern expansions for setcc operations that aren't directly 1374// handled by a RISC-V instruction. 1375def : Pat<(riscv_seteq (XLenVT GPR:$rs1)), (SLTIU GPR:$rs1, 1)>; 1376def : Pat<(riscv_setne (XLenVT GPR:$rs1)), (SLTU (XLenVT X0), GPR:$rs1)>; 1377def : Pat<(XLenVT (setne (XLenVT GPR:$rs1), -1)), (SLTIU GPR:$rs1, -1)>; 1378 1379def IntCCtoRISCVCC : SDNodeXForm<riscv_selectcc, [{ 1380 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 1381 RISCVCC::CondCode BrCC = getRISCVCCForIntCC(CC); 1382 return CurDAG->getTargetConstant(BrCC, SDLoc(N), Subtarget->getXLenVT()); 1383}]>; 1384 1385def riscv_selectcc_frag : PatFrag<(ops node:$lhs, node:$rhs, node:$cc, 1386 node:$truev, node:$falsev), 1387 (riscv_selectcc node:$lhs, node:$rhs, 1388 node:$cc, node:$truev, 1389 node:$falsev), [{}], 1390 IntCCtoRISCVCC>; 1391 1392let Predicates = [HasShortForwardBranchOpt], isSelect = 1, 1393 Constraints = "$dst = $falsev", isCommutable = 1, Size = 8 in { 1394// This instruction moves $truev to $dst when the condition is true. It will 1395// be expanded to control flow in RISCVExpandPseudoInsts. 1396def PseudoCCMOVGPR : Pseudo<(outs GPR:$dst), 1397 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1398 GPR:$falsev, GPR:$truev), 1399 [(set GPR:$dst, 1400 (riscv_selectcc_frag:$cc (XLenVT GPR:$lhs), 1401 GPR:$rhs, cond, 1402 (XLenVT GPR:$truev), 1403 GPR:$falsev))]>, 1404 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1405 ReadSFBALU, ReadSFBALU]>; 1406} 1407 1408// Conditional binops, that updates update $dst to (op rs1, rs2) when condition 1409// is true. Returns $falsev otherwise. Selected by optimizeSelect. 1410// TODO: Can we use DefaultOperands on the regular binop to accomplish this more 1411// like how ARM does predication? 1412let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, 1413 Constraints = "$dst = $falsev" in { 1414def PseudoCCADD : Pseudo<(outs GPR:$dst), 1415 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1416 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1417 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1418 ReadSFBALU, ReadSFBALU, ReadSFBALU]>; 1419def PseudoCCSUB : Pseudo<(outs GPR:$dst), 1420 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1421 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1422 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1423 ReadSFBALU, ReadSFBALU, ReadSFBALU]>; 1424def PseudoCCAND : Pseudo<(outs GPR:$dst), 1425 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1426 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1427 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1428 ReadSFBALU, ReadSFBALU, ReadSFBALU]>; 1429def PseudoCCOR : Pseudo<(outs GPR:$dst), 1430 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1431 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1432 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1433 ReadSFBALU, ReadSFBALU, ReadSFBALU]>; 1434def PseudoCCXOR : Pseudo<(outs GPR:$dst), 1435 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1436 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1437 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1438 ReadSFBALU, ReadSFBALU, ReadSFBALU]>; 1439 1440// RV64I instructions 1441def PseudoCCADDW : Pseudo<(outs GPR:$dst), 1442 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1443 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1444 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1445 ReadSFBALU, ReadSFBALU, ReadSFBALU]>; 1446def PseudoCCSUBW : Pseudo<(outs GPR:$dst), 1447 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1448 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1449 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1450 ReadSFBALU, ReadSFBALU, ReadSFBALU]>; 1451} 1452 1453multiclass SelectCC_GPR_rrirr<DAGOperand valty, ValueType vt> { 1454 let usesCustomInserter = 1 in 1455 def _Using_CC_GPR : Pseudo<(outs valty:$dst), 1456 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1457 valty:$truev, valty:$falsev), 1458 [(set valty:$dst, 1459 (riscv_selectcc_frag:$cc (XLenVT GPR:$lhs), GPR:$rhs, cond, 1460 (vt valty:$truev), valty:$falsev))]>; 1461 // Explicitly select 0 in the condition to X0. The register coalescer doesn't 1462 // always do it. 1463 def : Pat<(riscv_selectcc_frag:$cc (XLenVT GPR:$lhs), 0, cond, (vt valty:$truev), 1464 valty:$falsev), 1465 (!cast<Instruction>(NAME#"_Using_CC_GPR") GPR:$lhs, (XLenVT X0), 1466 (IntCCtoRISCVCC $cc), valty:$truev, valty:$falsev)>; 1467} 1468 1469let Predicates = [NoShortForwardBranchOpt] in 1470defm Select_GPR : SelectCC_GPR_rrirr<GPR, XLenVT>; 1471 1472class SelectCompressOpt<CondCode Cond> 1473 : Pat<(riscv_selectcc_frag:$select (XLenVT GPR:$lhs), simm12_no6:$Constant, Cond, 1474 (XLenVT GPR:$truev), GPR:$falsev), 1475 (Select_GPR_Using_CC_GPR (ADDI GPR:$lhs, (NegImm simm12:$Constant)), (XLenVT X0), 1476 (IntCCtoRISCVCC $select), GPR:$truev, GPR:$falsev)>; 1477 1478def OptForMinSize : Predicate<"MF ? MF->getFunction().hasMinSize() : false">; 1479 1480let Predicates = [HasStdExtC, OptForMinSize] in { 1481 def : SelectCompressOpt<SETEQ>; 1482 def : SelectCompressOpt<SETNE>; 1483} 1484 1485/// Branches and jumps 1486 1487// Match `riscv_brcc` and lower to the appropriate RISC-V branch instruction. 1488multiclass BccPat<CondCode Cond, RVInstB Inst> { 1489 def : Pat<(riscv_brcc (XLenVT GPR:$rs1), GPR:$rs2, Cond, bb:$imm12), 1490 (Inst GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12)>; 1491 // Explicitly select 0 to X0. The register coalescer doesn't always do it. 1492 def : Pat<(riscv_brcc (XLenVT GPR:$rs1), 0, Cond, bb:$imm12), 1493 (Inst GPR:$rs1, (XLenVT X0), simm13_lsb0:$imm12)>; 1494} 1495 1496class BrccCompressOpt<CondCode Cond, RVInstB Inst> 1497 : Pat<(riscv_brcc GPR:$lhs, simm12_no6:$Constant, Cond, bb:$place), 1498 (Inst (ADDI GPR:$lhs, (NegImm simm12:$Constant)), (XLenVT X0), bb:$place)>; 1499 1500defm : BccPat<SETEQ, BEQ>; 1501defm : BccPat<SETNE, BNE>; 1502defm : BccPat<SETLT, BLT>; 1503defm : BccPat<SETGE, BGE>; 1504defm : BccPat<SETULT, BLTU>; 1505defm : BccPat<SETUGE, BGEU>; 1506 1507let Predicates = [HasStdExtC, OptForMinSize] in { 1508 def : BrccCompressOpt<SETEQ, BEQ>; 1509 def : BrccCompressOpt<SETNE, BNE>; 1510} 1511 1512class LongBccPseudo : Pseudo<(outs), 1513 (ins GPR:$rs1, GPR:$rs2, simm21_lsb0_jal:$imm20), 1514 []> { 1515 let Size = 8; 1516 let isBarrier = 1; 1517 let isBranch = 1; 1518 let hasSideEffects = 0; 1519 let mayStore = 0; 1520 let mayLoad = 0; 1521 let isAsmParserOnly = 1; 1522 let hasNoSchedulingInfo = 1; 1523} 1524 1525def PseudoLongBEQ : LongBccPseudo; 1526def PseudoLongBNE : LongBccPseudo; 1527def PseudoLongBLT : LongBccPseudo; 1528def PseudoLongBGE : LongBccPseudo; 1529def PseudoLongBLTU : LongBccPseudo; 1530def PseudoLongBGEU : LongBccPseudo; 1531 1532let isBarrier = 1, isBranch = 1, isTerminator = 1 in 1533def PseudoBR : Pseudo<(outs), (ins simm21_lsb0_jal:$imm20), [(br bb:$imm20)]>, 1534 PseudoInstExpansion<(JAL X0, simm21_lsb0_jal:$imm20)>; 1535 1536let isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in 1537def PseudoBRIND : Pseudo<(outs), (ins GPRJALR:$rs1, simm12:$imm12), []>, 1538 PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12:$imm12)>; 1539 1540def : Pat<(brind GPRJALR:$rs1), (PseudoBRIND GPRJALR:$rs1, 0)>; 1541def : Pat<(brind (add GPRJALR:$rs1, simm12:$imm12)), 1542 (PseudoBRIND GPRJALR:$rs1, simm12:$imm12)>; 1543 1544// PseudoCALLReg is a generic pseudo instruction for calls which will eventually 1545// expand to auipc and jalr while encoding, with any given register used as the 1546// destination. 1547// Define AsmString to print "call" when compile with -S flag. 1548// Define isCodeGenOnly = 0 to support parsing assembly "call" instruction. 1549let isCall = 1, isBarrier = 1, isCodeGenOnly = 0, Size = 8, hasSideEffects = 0, 1550 mayStore = 0, mayLoad = 0 in 1551def PseudoCALLReg : Pseudo<(outs GPR:$rd), (ins call_symbol:$func), [], 1552 "call", "$rd, $func">, 1553 Sched<[WriteIALU, WriteJalr, ReadJalr]>; 1554 1555// PseudoCALL is a pseudo instruction which will eventually expand to auipc 1556// and jalr while encoding. This is desirable, as an auipc+jalr pair with 1557// R_RISCV_CALL and R_RISCV_RELAX relocations can be be relaxed by the linker 1558// if the offset fits in a signed 21-bit immediate. 1559// Define AsmString to print "call" when compile with -S flag. 1560// Define isCodeGenOnly = 0 to support parsing assembly "call" instruction. 1561let isCall = 1, Defs = [X1], isCodeGenOnly = 0, Size = 8 in 1562def PseudoCALL : Pseudo<(outs), (ins call_symbol:$func), [], 1563 "call", "$func">, 1564 Sched<[WriteIALU, WriteJalr, ReadJalr]>; 1565 1566def : Pat<(riscv_call tglobaladdr:$func), (PseudoCALL tglobaladdr:$func)>; 1567def : Pat<(riscv_call texternalsym:$func), (PseudoCALL texternalsym:$func)>; 1568 1569def : Pat<(riscv_sret_glue), (SRET (XLenVT X0), (XLenVT X0))>; 1570def : Pat<(riscv_mret_glue), (MRET (XLenVT X0), (XLenVT X0))>; 1571 1572let isCall = 1, Defs = [X1] in 1573def PseudoCALLIndirect : Pseudo<(outs), (ins GPRJALR:$rs1), 1574 [(riscv_call GPRJALR:$rs1)]>, 1575 PseudoInstExpansion<(JALR X1, GPR:$rs1, 0)>; 1576 1577let isBarrier = 1, isReturn = 1, isTerminator = 1 in 1578def PseudoRET : Pseudo<(outs), (ins), [(riscv_ret_glue)]>, 1579 PseudoInstExpansion<(JALR X0, X1, 0)>; 1580 1581// PseudoTAIL is a pseudo instruction similar to PseudoCALL and will eventually 1582// expand to auipc and jalr while encoding. 1583// Define AsmString to print "tail" when compile with -S flag. 1584let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2], 1585 Size = 8, isCodeGenOnly = 0 in 1586def PseudoTAIL : Pseudo<(outs), (ins call_symbol:$dst), [], 1587 "tail", "$dst">, 1588 Sched<[WriteIALU, WriteJalr, ReadJalr]>; 1589 1590let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2] in 1591def PseudoTAILIndirect : Pseudo<(outs), (ins GPRTC:$rs1), 1592 [(riscv_tail GPRTC:$rs1)]>, 1593 PseudoInstExpansion<(JALR X0, GPR:$rs1, 0)>; 1594 1595def : Pat<(riscv_tail (iPTR tglobaladdr:$dst)), 1596 (PseudoTAIL tglobaladdr:$dst)>; 1597def : Pat<(riscv_tail (iPTR texternalsym:$dst)), 1598 (PseudoTAIL texternalsym:$dst)>; 1599 1600let isCall = 0, isBarrier = 1, isBranch = 1, isTerminator = 1, Size = 8, 1601 isCodeGenOnly = 0, hasSideEffects = 0, mayStore = 0, mayLoad = 0 in 1602def PseudoJump : Pseudo<(outs GPR:$rd), (ins pseudo_jump_symbol:$target), [], 1603 "jump", "$target, $rd">, 1604 Sched<[WriteIALU, WriteJalr, ReadJalr]>; 1605 1606let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1607 isAsmParserOnly = 1 in 1608def PseudoLLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1609 "lla", "$dst, $src">; 1610 1611// Refer to comment on PseudoLI for explanation of Size=32 1612let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1613 isAsmParserOnly = 1 in 1614def PseudoLLAImm : Pseudo<(outs GPR:$dst), (ins ixlenimm_li_restricted:$imm), [], 1615 "lla", "$dst, $imm">; 1616def : Pat<(riscv_lla tglobaladdr:$in), (PseudoLLA tglobaladdr:$in)>; 1617def : Pat<(riscv_lla tblockaddress:$in), (PseudoLLA tblockaddress:$in)>; 1618def : Pat<(riscv_lla tjumptable:$in), (PseudoLLA tjumptable:$in)>; 1619def : Pat<(riscv_lla tconstpool:$in), (PseudoLLA tconstpool:$in)>; 1620 1621let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1622 isAsmParserOnly = 1 in 1623def PseudoLGA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1624 "lga", "$dst, $src">; 1625 1626def : Pat<(iPTR (riscv_lga tglobaladdr:$in)), (PseudoLGA tglobaladdr:$in)>; 1627 1628let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1629 isAsmParserOnly = 1 in 1630def PseudoLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1631 "la", "$dst, $src">; 1632 1633// Refer to comment on PseudoLI for explanation of Size=32 1634let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 32, 1635 isCodeGenOnly = 0, isAsmParserOnly = 1 in 1636def PseudoLAImm : Pseudo<(outs GPR:$rd), (ins ixlenimm_li_restricted:$imm), [], 1637 "la", "$rd, $imm">; 1638 1639let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1640 isAsmParserOnly = 1 in 1641def PseudoLA_TLS_IE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1642 "la.tls.ie", "$dst, $src">; 1643 1644def : Pat<(iPTR (riscv_la_tls_ie tglobaltlsaddr:$in)), 1645 (PseudoLA_TLS_IE tglobaltlsaddr:$in)>; 1646 1647let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1648 isAsmParserOnly = 1 in 1649def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1650 "la.tls.gd", "$dst, $src">; 1651 1652def : Pat<(riscv_la_tls_gd tglobaltlsaddr:$in), 1653 (PseudoLA_TLS_GD tglobaltlsaddr:$in)>; 1654 1655/// Sign/Zero Extends 1656 1657// There are single-instruction versions of these in Zbb, so disable these 1658// Pseudos if that extension is present. 1659let hasSideEffects = 0, mayLoad = 0, 1660 mayStore = 0, isCodeGenOnly = 0, isAsmParserOnly = 1 in { 1661def PseudoSEXT_B : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "sext.b", "$rd, $rs">; 1662def PseudoSEXT_H : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "sext.h", "$rd, $rs">; 1663// rv64's sext.w is defined above, using InstAlias<"sext.w ... 1664// zext.b is defined above, using InstAlias<"zext.b ... 1665def PseudoZEXT_H : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "zext.h", "$rd, $rs">; 1666} // hasSideEffects = 0, ... 1667 1668let Predicates = [IsRV64], hasSideEffects = 0, mayLoad = 0, mayStore = 0, 1669 isCodeGenOnly = 0, isAsmParserOnly = 1 in { 1670def PseudoZEXT_W : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "zext.w", "$rd, $rs">; 1671} // Predicates = [IsRV64], ... 1672 1673/// Loads 1674 1675class LdPat<PatFrag LoadOp, RVInst Inst, ValueType vt = XLenVT> 1676 : Pat<(vt (LoadOp (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12))), 1677 (Inst GPR:$rs1, simm12:$imm12)>; 1678 1679def : LdPat<sextloadi8, LB>; 1680def : LdPat<extloadi8, LBU>; // Prefer unsigned due to no c.lb in Zcb. 1681def : LdPat<sextloadi16, LH>; 1682def : LdPat<extloadi16, LH>; 1683def : LdPat<load, LW, i32>, Requires<[IsRV32]>; 1684def : LdPat<zextloadi8, LBU>; 1685def : LdPat<zextloadi16, LHU>; 1686 1687/// Stores 1688 1689class StPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy, 1690 ValueType vt> 1691 : Pat<(StoreOp (vt StTy:$rs2), (AddrRegImm (XLenVT GPR:$rs1), 1692 simm12:$imm12)), 1693 (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>; 1694 1695def : StPat<truncstorei8, SB, GPR, XLenVT>; 1696def : StPat<truncstorei16, SH, GPR, XLenVT>; 1697def : StPat<store, SW, GPR, i32>, Requires<[IsRV32]>; 1698 1699/// Fences 1700 1701// Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set 1702// Manual: Volume I. 1703 1704// fence acquire -> fence r, rw 1705def : Pat<(atomic_fence (XLenVT 4), (timm)), (FENCE 0b10, 0b11)>; 1706// fence release -> fence rw, w 1707def : Pat<(atomic_fence (XLenVT 5), (timm)), (FENCE 0b11, 0b1)>; 1708// fence acq_rel -> fence.tso 1709def : Pat<(atomic_fence (XLenVT 6), (timm)), (FENCE_TSO)>; 1710// fence seq_cst -> fence rw, rw 1711def : Pat<(atomic_fence (XLenVT 7), (timm)), (FENCE 0b11, 0b11)>; 1712 1713// Lowering for atomic load and store is defined in RISCVInstrInfoA.td. 1714// Although these are lowered to fence+load/store instructions defined in the 1715// base RV32I/RV64I ISA, this lowering is only used when the A extension is 1716// present. This is necessary as it isn't valid to mix __atomic_* libcalls 1717// with inline atomic operations for the same object. 1718 1719/// Access to system registers 1720 1721// Helpers for defining specific operations. They are defined for each system 1722// register separately. Side effect is not used because dependencies are 1723// expressed via use-def properties. 1724 1725class ReadSysReg<SysReg SR, list<Register> Regs> 1726 : Pseudo<(outs GPR:$rd), (ins), 1727 [(set GPR:$rd, (XLenVT (riscv_read_csr (XLenVT SR.Encoding))))]>, 1728 PseudoInstExpansion<(CSRRS GPR:$rd, SR.Encoding, X0)> { 1729 let hasSideEffects = 0; 1730 let Uses = Regs; 1731} 1732 1733class WriteSysReg<SysReg SR, list<Register> Regs> 1734 : Pseudo<(outs), (ins GPR:$val), 1735 [(riscv_write_csr (XLenVT SR.Encoding), (XLenVT GPR:$val))]>, 1736 PseudoInstExpansion<(CSRRW X0, SR.Encoding, GPR:$val)> { 1737 let hasSideEffects = 0; 1738 let Defs = Regs; 1739} 1740 1741class WriteSysRegImm<SysReg SR, list<Register> Regs> 1742 : Pseudo<(outs), (ins uimm5:$val), 1743 [(riscv_write_csr (XLenVT SR.Encoding), uimm5:$val)]>, 1744 PseudoInstExpansion<(CSRRWI X0, SR.Encoding, uimm5:$val)> { 1745 let hasSideEffects = 0; 1746 let Defs = Regs; 1747} 1748 1749class SwapSysReg<SysReg SR, list<Register> Regs> 1750 : Pseudo<(outs GPR:$rd), (ins GPR:$val), 1751 [(set GPR:$rd, (riscv_swap_csr (XLenVT SR.Encoding), (XLenVT GPR:$val)))]>, 1752 PseudoInstExpansion<(CSRRW GPR:$rd, SR.Encoding, GPR:$val)> { 1753 let hasSideEffects = 0; 1754 let Uses = Regs; 1755 let Defs = Regs; 1756} 1757 1758class SwapSysRegImm<SysReg SR, list<Register> Regs> 1759 : Pseudo<(outs GPR:$rd), (ins uimm5:$val), 1760 [(set GPR:$rd, (XLenVT (riscv_swap_csr (XLenVT SR.Encoding), uimm5:$val)))]>, 1761 PseudoInstExpansion<(CSRRWI GPR:$rd, SR.Encoding, uimm5:$val)> { 1762 let hasSideEffects = 0; 1763 let Uses = Regs; 1764 let Defs = Regs; 1765} 1766 1767def ReadFRM : ReadSysReg<SysRegFRM, [FRM]>; 1768def WriteFRM : WriteSysReg<SysRegFRM, [FRM]>; 1769def WriteFRMImm : WriteSysRegImm<SysRegFRM, [FRM]>; 1770def SwapFRMImm : SwapSysRegImm<SysRegFRM, [FRM]>; 1771 1772def WriteVXRMImm : WriteSysRegImm<SysRegVXRM, [VXRM]>; 1773 1774let hasSideEffects = true in { 1775def ReadFFLAGS : ReadSysReg<SysRegFFLAGS, [FFLAGS]>; 1776def WriteFFLAGS : WriteSysReg<SysRegFFLAGS, [FFLAGS]>; 1777} 1778/// Other pseudo-instructions 1779 1780// Pessimistically assume the stack pointer will be clobbered 1781let Defs = [X2], Uses = [X2] in { 1782def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), 1783 [(callseq_start timm:$amt1, timm:$amt2)]>; 1784def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), 1785 [(callseq_end timm:$amt1, timm:$amt2)]>; 1786} // Defs = [X2], Uses = [X2] 1787 1788/// RV64 patterns 1789 1790let Predicates = [IsRV64, NotHasStdExtZba] in { 1791def : Pat<(i64 (and GPR:$rs1, 0xffffffff)), (SRLI (SLLI GPR:$rs1, 32), 32)>; 1792 1793// If we're shifting a 32-bit zero extended value left by 0-31 bits, use 2 1794// shifts instead of 3. This can occur when unsigned is used to index an array. 1795def : Pat<(i64 (shl (and GPR:$rs1, 0xffffffff), uimm5:$shamt)), 1796 (SRLI (SLLI GPR:$rs1, 32), (ImmSubFrom32 uimm5:$shamt))>; 1797} 1798 1799// PatFrag to allow ADDW/SUBW/MULW/SLLW to be selected from i64 add/sub/mul/shl 1800// if only the lower 32 bits of their result is used. 1801class binop_allwusers<SDPatternOperator operator> 1802 : PatFrag<(ops node:$lhs, node:$rhs), 1803 (i64 (operator node:$lhs, node:$rhs)), [{ 1804 return hasAllWUsers(Node); 1805}]>; 1806 1807def sexti32_allwusers : PatFrag<(ops node:$src), 1808 (sext_inreg node:$src, i32), [{ 1809 return hasAllWUsers(Node); 1810}]>; 1811 1812def ImmSExt32 : SDNodeXForm<imm, [{ 1813 return CurDAG->getTargetConstant(SignExtend64<32>(N->getSExtValue()), 1814 SDLoc(N), N->getValueType(0)); 1815}]>; 1816// Look for constants where the upper 32 bits are 0, but sign extending bit 31 1817// would be an simm12. 1818def u32simm12 : ImmLeaf<XLenVT, [{ 1819 return isUInt<32>(Imm) && isInt<12>(SignExtend64<32>(Imm)); 1820}], ImmSExt32>; 1821 1822let Predicates = [IsRV64] in { 1823 1824def : Pat<(i64 (and GPR:$rs, LeadingOnesWMask:$mask)), 1825 (SLLI (SRLIW $rs, LeadingOnesWMask:$mask), LeadingOnesWMask:$mask)>; 1826 1827/// sext and zext 1828 1829// Sign extend is not needed if all users are W instructions. 1830def : Pat<(sexti32_allwusers GPR:$rs1), (XLenVT GPR:$rs1)>; 1831 1832def : Pat<(sext_inreg GPR:$rs1, i32), (ADDIW GPR:$rs1, 0)>; 1833 1834/// ALU operations 1835 1836def : Pat<(i64 (srl (and GPR:$rs1, 0xffffffff), uimm5:$shamt)), 1837 (SRLIW GPR:$rs1, uimm5:$shamt)>; 1838def : Pat<(i64 (srl (shl GPR:$rs1, (i64 32)), uimm6gt32:$shamt)), 1839 (SRLIW GPR:$rs1, (ImmSub32 uimm6gt32:$shamt))>; 1840def : Pat<(sra (sext_inreg GPR:$rs1, i32), uimm5:$shamt), 1841 (SRAIW GPR:$rs1, uimm5:$shamt)>; 1842def : Pat<(i64 (sra (shl GPR:$rs1, (i64 32)), uimm6gt32:$shamt)), 1843 (SRAIW GPR:$rs1, (ImmSub32 uimm6gt32:$shamt))>; 1844 1845def : PatGprGpr<shiftopw<riscv_sllw>, SLLW>; 1846def : PatGprGpr<shiftopw<riscv_srlw>, SRLW>; 1847def : PatGprGpr<shiftopw<riscv_sraw>, SRAW>; 1848 1849// Select W instructions if only the lower 32 bits of the result are used. 1850def : PatGprGpr<binop_allwusers<add>, ADDW>; 1851def : PatGprSimm12<binop_allwusers<add>, ADDIW>; 1852def : PatGprGpr<binop_allwusers<sub>, SUBW>; 1853def : PatGprImm<binop_allwusers<shl>, SLLIW, uimm5>; 1854 1855// If this is a shr of a value sign extended from i32, and all the users only 1856// use the lower 32 bits, we can use an sraiw to remove the sext_inreg. This 1857// occurs because SimplifyDemandedBits prefers srl over sra. 1858def : Pat<(binop_allwusers<srl> (sext_inreg GPR:$rs1, i32), uimm5:$shamt), 1859 (SRAIW GPR:$rs1, uimm5:$shamt)>; 1860 1861// Use binop_allwusers to recover immediates that may have been broken by 1862// SimplifyDemandedBits. 1863def : Pat<(binop_allwusers<and> GPR:$rs1, u32simm12:$imm), 1864 (ANDI GPR:$rs1, u32simm12:$imm)>; 1865 1866def : Pat<(binop_allwusers<or> GPR:$rs1, u32simm12:$imm), 1867 (ORI GPR:$rs1, u32simm12:$imm)>; 1868 1869def : Pat<(binop_allwusers<xor> GPR:$rs1, u32simm12:$imm), 1870 (XORI GPR:$rs1, u32simm12:$imm)>; 1871/// Loads 1872 1873def : LdPat<sextloadi32, LW, i64>; 1874def : LdPat<extloadi32, LW, i64>; 1875def : LdPat<zextloadi32, LWU, i64>; 1876def : LdPat<load, LD, i64>; 1877 1878/// Stores 1879 1880def : StPat<truncstorei32, SW, GPR, i64>; 1881def : StPat<store, SD, GPR, i64>; 1882} // Predicates = [IsRV64] 1883 1884/// readcyclecounter 1885// On RV64, we can directly read the 64-bit "cycle" CSR. 1886let Predicates = [IsRV64] in 1887def : Pat<(i64 (readcyclecounter)), (CSRRS CYCLE.Encoding, (XLenVT X0))>; 1888// On RV32, ReadCycleWide will be expanded to the suggested loop reading both 1889// halves of the 64-bit "cycle" CSR. 1890let Predicates = [IsRV32], usesCustomInserter = 1, hasNoSchedulingInfo = 1 in 1891def ReadCycleWide : Pseudo<(outs GPR:$lo, GPR:$hi), (ins), 1892 [(set GPR:$lo, GPR:$hi, (riscv_read_cycle_wide))], 1893 "", "">; 1894 1895/// traps 1896 1897// We lower `trap` to `unimp`, as this causes a hard exception on nearly all 1898// systems. 1899def : Pat<(trap), (UNIMP)>; 1900 1901// We lower `debugtrap` to `ebreak`, as this will get the attention of the 1902// debugger if possible. 1903def : Pat<(debugtrap), (EBREAK)>; 1904 1905let Predicates = [IsRV64], Uses = [X5], 1906 Defs = [X1, X6, X7, X28, X29, X30, X31] in 1907def HWASAN_CHECK_MEMACCESS_SHORTGRANULES 1908 : Pseudo<(outs), (ins GPRJALR:$ptr, i32imm:$accessinfo), 1909 [(int_hwasan_check_memaccess_shortgranules X5, GPRJALR:$ptr, 1910 (i32 timm:$accessinfo))]>; 1911 1912// This gets lowered into a 20-byte instruction sequence (at most) 1913let hasSideEffects = 0, mayLoad = 1, mayStore = 0, 1914 Defs = [ X6, X7, X28, X29, X30, X31 ], Size = 20 in { 1915def KCFI_CHECK 1916 : Pseudo<(outs), (ins GPRJALR:$ptr, i32imm:$type), []>, Sched<[]>; 1917} 1918 1919/// Simple optimization 1920def : Pat<(XLenVT (add GPR:$rs1, (AddiPair:$rs2))), 1921 (ADDI (ADDI GPR:$rs1, (AddiPairImmLarge AddiPair:$rs2)), 1922 (AddiPairImmSmall GPR:$rs2))>; 1923 1924let Predicates = [IsRV64] in { 1925// Select W instructions if only the lower 32-bits of the result are used. 1926def : Pat<(binop_allwusers<add> GPR:$rs1, (AddiPair:$rs2)), 1927 (ADDIW (ADDIW GPR:$rs1, (AddiPairImmLarge AddiPair:$rs2)), 1928 (AddiPairImmSmall AddiPair:$rs2))>; 1929} 1930 1931//===----------------------------------------------------------------------===// 1932// Standard extensions 1933//===----------------------------------------------------------------------===// 1934 1935// Multiply and Division 1936include "RISCVInstrInfoM.td" 1937 1938// Atomic 1939include "RISCVInstrInfoA.td" 1940 1941// Scalar FP 1942include "RISCVInstrInfoF.td" 1943include "RISCVInstrInfoD.td" 1944include "RISCVInstrInfoZfh.td" 1945include "RISCVInstrInfoZfbfmin.td" 1946include "RISCVInstrInfoZfa.td" 1947 1948// Scalar bitmanip and cryptography 1949include "RISCVInstrInfoZb.td" 1950include "RISCVInstrInfoZk.td" 1951 1952// Vector 1953include "RISCVInstrInfoV.td" 1954include "RISCVInstrInfoZvfbf.td" 1955include "RISCVInstrInfoZvk.td" 1956 1957// Integer 1958include "RISCVInstrInfoZicbo.td" 1959include "RISCVInstrInfoZicond.td" 1960 1961// Compressed 1962include "RISCVInstrInfoC.td" 1963include "RISCVInstrInfoZc.td" 1964 1965//===----------------------------------------------------------------------===// 1966// Vendor extensions 1967//===----------------------------------------------------------------------===// 1968 1969include "RISCVInstrInfoXVentana.td" 1970include "RISCVInstrInfoXTHead.td" 1971include "RISCVInstrInfoXSf.td" 1972include "RISCVInstrInfoXCV.td" 1973