1//===-- RISCVInstrInfoA.td - RISC-V 'A' instructions -------*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the RISC-V instructions from the standard 'A', Atomic 10// Instructions extension. 11// 12//===----------------------------------------------------------------------===// 13 14//===----------------------------------------------------------------------===// 15// Operand and SDNode transformation definitions. 16//===----------------------------------------------------------------------===// 17 18// A parse method for (${gpr}) or 0(${gpr}), where the 0 is be silently ignored. 19// Used for GNU as Compatibility. 20def AtomicMemOpOperand : AsmOperandClass { 21 let Name = "AtomicMemOpOperand"; 22 let RenderMethod = "addRegOperands"; 23 let PredicateMethod = "isGPR"; 24 let ParserMethod = "parseAtomicMemOp"; 25} 26 27def GPRMemAtomic : RegisterOperand<GPR> { 28 let ParserMatchClass = AtomicMemOpOperand; 29 let PrintMethod = "printAtomicMemOp"; 30} 31 32//===----------------------------------------------------------------------===// 33// Instruction class templates 34//===----------------------------------------------------------------------===// 35 36let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in 37class LR_r<bit aq, bit rl, bits<3> funct3, string opcodestr> 38 : RVInstRAtomic<0b00010, aq, rl, funct3, OPC_AMO, 39 (outs GPR:$rd), (ins GPRMemAtomic:$rs1), 40 opcodestr, "$rd, $rs1"> { 41 let rs2 = 0; 42} 43 44multiclass LR_r_aq_rl<bits<3> funct3, string opcodestr> { 45 def "" : LR_r<0, 0, funct3, opcodestr>; 46 def _AQ : LR_r<1, 0, funct3, opcodestr # ".aq">; 47 def _RL : LR_r<0, 1, funct3, opcodestr # ".rl">; 48 def _AQ_RL : LR_r<1, 1, funct3, opcodestr # ".aqrl">; 49} 50 51let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in 52class AMO_rr<bits<5> funct5, bit aq, bit rl, bits<3> funct3, string opcodestr> 53 : RVInstRAtomic<funct5, aq, rl, funct3, OPC_AMO, 54 (outs GPR:$rd), (ins GPRMemAtomic:$rs1, GPR:$rs2), 55 opcodestr, "$rd, $rs2, $rs1">; 56 57multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> { 58 def "" : AMO_rr<funct5, 0, 0, funct3, opcodestr>; 59 def _AQ : AMO_rr<funct5, 1, 0, funct3, opcodestr # ".aq">; 60 def _RL : AMO_rr<funct5, 0, 1, funct3, opcodestr # ".rl">; 61 def _AQ_RL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">; 62} 63 64multiclass AtomicStPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy, 65 ValueType vt = XLenVT> { 66 def : Pat<(StoreOp BaseAddr:$rs1, (vt StTy:$rs2)), 67 (Inst StTy:$rs2, BaseAddr:$rs1, 0)>; 68 def : Pat<(StoreOp (add BaseAddr:$rs1, simm12:$imm12), (vt StTy:$rs2)), 69 (Inst StTy:$rs2, BaseAddr:$rs1, simm12:$imm12)>; 70 def : Pat<(StoreOp (IsOrAdd AddrFI:$rs1, simm12:$imm12), (vt StTy:$rs2)), 71 (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>; 72} 73 74//===----------------------------------------------------------------------===// 75// Instructions 76//===----------------------------------------------------------------------===// 77 78let Predicates = [HasStdExtA] in { 79defm LR_W : LR_r_aq_rl<0b010, "lr.w">, Sched<[WriteAtomicLDW, ReadAtomicLDW]>; 80defm SC_W : AMO_rr_aq_rl<0b00011, 0b010, "sc.w">, 81 Sched<[WriteAtomicSTW, ReadAtomicSTW, ReadAtomicSTW]>; 82defm AMOSWAP_W : AMO_rr_aq_rl<0b00001, 0b010, "amoswap.w">, 83 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 84defm AMOADD_W : AMO_rr_aq_rl<0b00000, 0b010, "amoadd.w">, 85 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 86defm AMOXOR_W : AMO_rr_aq_rl<0b00100, 0b010, "amoxor.w">, 87 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 88defm AMOAND_W : AMO_rr_aq_rl<0b01100, 0b010, "amoand.w">, 89 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 90defm AMOOR_W : AMO_rr_aq_rl<0b01000, 0b010, "amoor.w">, 91 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 92defm AMOMIN_W : AMO_rr_aq_rl<0b10000, 0b010, "amomin.w">, 93 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 94defm AMOMAX_W : AMO_rr_aq_rl<0b10100, 0b010, "amomax.w">, 95 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 96defm AMOMINU_W : AMO_rr_aq_rl<0b11000, 0b010, "amominu.w">, 97 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 98defm AMOMAXU_W : AMO_rr_aq_rl<0b11100, 0b010, "amomaxu.w">, 99 Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>; 100} // Predicates = [HasStdExtA] 101 102let Predicates = [HasStdExtA, IsRV64] in { 103defm LR_D : LR_r_aq_rl<0b011, "lr.d">, Sched<[WriteAtomicLDD, ReadAtomicLDD]>; 104defm SC_D : AMO_rr_aq_rl<0b00011, 0b011, "sc.d">, 105 Sched<[WriteAtomicSTD, ReadAtomicSTD, ReadAtomicSTD]>; 106defm AMOSWAP_D : AMO_rr_aq_rl<0b00001, 0b011, "amoswap.d">, 107 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 108defm AMOADD_D : AMO_rr_aq_rl<0b00000, 0b011, "amoadd.d">, 109 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 110defm AMOXOR_D : AMO_rr_aq_rl<0b00100, 0b011, "amoxor.d">, 111 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 112defm AMOAND_D : AMO_rr_aq_rl<0b01100, 0b011, "amoand.d">, 113 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 114defm AMOOR_D : AMO_rr_aq_rl<0b01000, 0b011, "amoor.d">, 115 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 116defm AMOMIN_D : AMO_rr_aq_rl<0b10000, 0b011, "amomin.d">, 117 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 118defm AMOMAX_D : AMO_rr_aq_rl<0b10100, 0b011, "amomax.d">, 119 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 120defm AMOMINU_D : AMO_rr_aq_rl<0b11000, 0b011, "amominu.d">, 121 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 122defm AMOMAXU_D : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">, 123 Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>; 124} // Predicates = [HasStdExtA, IsRV64] 125 126//===----------------------------------------------------------------------===// 127// Pseudo-instructions and codegen patterns 128//===----------------------------------------------------------------------===// 129 130let Predicates = [HasStdExtA] in { 131 132/// Atomic loads and stores 133 134// Fences will be inserted for atomic load/stores according to the logic in 135// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}. 136 137defm : LdPat<atomic_load_8, LB>; 138defm : LdPat<atomic_load_16, LH>; 139defm : LdPat<atomic_load_32, LW>; 140 141defm : AtomicStPat<atomic_store_8, SB, GPR>; 142defm : AtomicStPat<atomic_store_16, SH, GPR>; 143defm : AtomicStPat<atomic_store_32, SW, GPR>; 144 145/// AMOs 146 147multiclass AMOPat<string AtomicOp, string BaseInst> { 148 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"), 149 !cast<RVInst>(BaseInst)>; 150 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"), 151 !cast<RVInst>(BaseInst#"_AQ")>; 152 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"), 153 !cast<RVInst>(BaseInst#"_RL")>; 154 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"), 155 !cast<RVInst>(BaseInst#"_AQ_RL")>; 156 def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"), 157 !cast<RVInst>(BaseInst#"_AQ_RL")>; 158} 159 160defm : AMOPat<"atomic_swap_32", "AMOSWAP_W">; 161defm : AMOPat<"atomic_load_add_32", "AMOADD_W">; 162defm : AMOPat<"atomic_load_and_32", "AMOAND_W">; 163defm : AMOPat<"atomic_load_or_32", "AMOOR_W">; 164defm : AMOPat<"atomic_load_xor_32", "AMOXOR_W">; 165defm : AMOPat<"atomic_load_max_32", "AMOMAX_W">; 166defm : AMOPat<"atomic_load_min_32", "AMOMIN_W">; 167defm : AMOPat<"atomic_load_umax_32", "AMOMAXU_W">; 168defm : AMOPat<"atomic_load_umin_32", "AMOMINU_W">; 169 170def : Pat<(atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr), 171 (AMOADD_W GPR:$addr, (SUB X0, GPR:$incr))>; 172def : Pat<(atomic_load_sub_32_acquire GPR:$addr, GPR:$incr), 173 (AMOADD_W_AQ GPR:$addr, (SUB X0, GPR:$incr))>; 174def : Pat<(atomic_load_sub_32_release GPR:$addr, GPR:$incr), 175 (AMOADD_W_RL GPR:$addr, (SUB X0, GPR:$incr))>; 176def : Pat<(atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr), 177 (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>; 178def : Pat<(atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr), 179 (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>; 180 181/// Pseudo AMOs 182 183class PseudoAMO : Pseudo<(outs GPR:$res, GPR:$scratch), 184 (ins GPR:$addr, GPR:$incr, ixlenimm:$ordering), []> { 185 let Constraints = "@earlyclobber $res,@earlyclobber $scratch"; 186 let mayLoad = 1; 187 let mayStore = 1; 188 let hasSideEffects = 0; 189} 190 191let Size = 20 in 192def PseudoAtomicLoadNand32 : PseudoAMO; 193// Ordering constants must be kept in sync with the AtomicOrdering enum in 194// AtomicOrdering.h. 195def : Pat<(atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr), 196 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>; 197def : Pat<(atomic_load_nand_32_acquire GPR:$addr, GPR:$incr), 198 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>; 199def : Pat<(atomic_load_nand_32_release GPR:$addr, GPR:$incr), 200 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>; 201def : Pat<(atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr), 202 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>; 203def : Pat<(atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr), 204 (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>; 205 206class PseudoMaskedAMO 207 : Pseudo<(outs GPR:$res, GPR:$scratch), 208 (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> { 209 let Constraints = "@earlyclobber $res,@earlyclobber $scratch"; 210 let mayLoad = 1; 211 let mayStore = 1; 212 let hasSideEffects = 0; 213} 214 215class PseudoMaskedAMOMinMax 216 : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2), 217 (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$sextshamt, 218 ixlenimm:$ordering), []> { 219 let Constraints = "@earlyclobber $res,@earlyclobber $scratch1," 220 "@earlyclobber $scratch2"; 221 let mayLoad = 1; 222 let mayStore = 1; 223 let hasSideEffects = 0; 224} 225 226class PseudoMaskedAMOUMinUMax 227 : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2), 228 (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> { 229 let Constraints = "@earlyclobber $res,@earlyclobber $scratch1," 230 "@earlyclobber $scratch2"; 231 let mayLoad = 1; 232 let mayStore = 1; 233 let hasSideEffects = 0; 234} 235 236class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst> 237 : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering), 238 (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering)>; 239 240class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst> 241 : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt, 242 timm:$ordering), 243 (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt, 244 timm:$ordering)>; 245 246let Size = 28 in 247def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO; 248def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32, 249 PseudoMaskedAtomicSwap32>; 250let Size = 28 in 251def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAMO; 252def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32, 253 PseudoMaskedAtomicLoadAdd32>; 254let Size = 28 in 255def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAMO; 256def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32, 257 PseudoMaskedAtomicLoadSub32>; 258let Size = 32 in 259def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAMO; 260def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32, 261 PseudoMaskedAtomicLoadNand32>; 262let Size = 44 in 263def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMOMinMax; 264def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32, 265 PseudoMaskedAtomicLoadMax32>; 266let Size = 44 in 267def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMOMinMax; 268def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32, 269 PseudoMaskedAtomicLoadMin32>; 270let Size = 36 in 271def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax; 272def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32, 273 PseudoMaskedAtomicLoadUMax32>; 274let Size = 36 in 275def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax; 276def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32, 277 PseudoMaskedAtomicLoadUMin32>; 278 279/// Compare and exchange 280 281class PseudoCmpXchg 282 : Pseudo<(outs GPR:$res, GPR:$scratch), 283 (ins GPR:$addr, GPR:$cmpval, GPR:$newval, ixlenimm:$ordering), []> { 284 let Constraints = "@earlyclobber $res,@earlyclobber $scratch"; 285 let mayLoad = 1; 286 let mayStore = 1; 287 let hasSideEffects = 0; 288 let Size = 16; 289} 290 291// Ordering constants must be kept in sync with the AtomicOrdering enum in 292// AtomicOrdering.h. 293multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst> { 294 def : Pat<(!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new), 295 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>; 296 def : Pat<(!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new), 297 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>; 298 def : Pat<(!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new), 299 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>; 300 def : Pat<(!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new), 301 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>; 302 def : Pat<(!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new), 303 (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>; 304} 305 306def PseudoCmpXchg32 : PseudoCmpXchg; 307defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32>; 308 309def PseudoMaskedCmpXchg32 310 : Pseudo<(outs GPR:$res, GPR:$scratch), 311 (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, 312 ixlenimm:$ordering), []> { 313 let Constraints = "@earlyclobber $res,@earlyclobber $scratch"; 314 let mayLoad = 1; 315 let mayStore = 1; 316 let hasSideEffects = 0; 317 let Size = 32; 318} 319 320def : Pat<(int_riscv_masked_cmpxchg_i32 321 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering), 322 (PseudoMaskedCmpXchg32 323 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>; 324 325} // Predicates = [HasStdExtA] 326 327let Predicates = [HasStdExtA, IsRV64] in { 328 329/// 64-bit atomic loads and stores 330 331// Fences will be inserted for atomic load/stores according to the logic in 332// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}. 333defm : LdPat<atomic_load_64, LD, i64>; 334defm : AtomicStPat<atomic_store_64, SD, GPR, i64>; 335 336defm : AMOPat<"atomic_swap_64", "AMOSWAP_D">; 337defm : AMOPat<"atomic_load_add_64", "AMOADD_D">; 338defm : AMOPat<"atomic_load_and_64", "AMOAND_D">; 339defm : AMOPat<"atomic_load_or_64", "AMOOR_D">; 340defm : AMOPat<"atomic_load_xor_64", "AMOXOR_D">; 341defm : AMOPat<"atomic_load_max_64", "AMOMAX_D">; 342defm : AMOPat<"atomic_load_min_64", "AMOMIN_D">; 343defm : AMOPat<"atomic_load_umax_64", "AMOMAXU_D">; 344defm : AMOPat<"atomic_load_umin_64", "AMOMINU_D">; 345 346/// 64-bit AMOs 347 348def : Pat<(i64 (atomic_load_sub_64_monotonic GPR:$addr, GPR:$incr)), 349 (AMOADD_D GPR:$addr, (SUB X0, GPR:$incr))>; 350def : Pat<(i64 (atomic_load_sub_64_acquire GPR:$addr, GPR:$incr)), 351 (AMOADD_D_AQ GPR:$addr, (SUB X0, GPR:$incr))>; 352def : Pat<(i64 (atomic_load_sub_64_release GPR:$addr, GPR:$incr)), 353 (AMOADD_D_RL GPR:$addr, (SUB X0, GPR:$incr))>; 354def : Pat<(i64 (atomic_load_sub_64_acq_rel GPR:$addr, GPR:$incr)), 355 (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>; 356def : Pat<(i64 (atomic_load_sub_64_seq_cst GPR:$addr, GPR:$incr)), 357 (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>; 358 359/// 64-bit pseudo AMOs 360 361let Size = 20 in 362def PseudoAtomicLoadNand64 : PseudoAMO; 363// Ordering constants must be kept in sync with the AtomicOrdering enum in 364// AtomicOrdering.h. 365def : Pat<(i64 (atomic_load_nand_64_monotonic GPR:$addr, GPR:$incr)), 366 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>; 367def : Pat<(i64 (atomic_load_nand_64_acquire GPR:$addr, GPR:$incr)), 368 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>; 369def : Pat<(i64 (atomic_load_nand_64_release GPR:$addr, GPR:$incr)), 370 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>; 371def : Pat<(i64 (atomic_load_nand_64_acq_rel GPR:$addr, GPR:$incr)), 372 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>; 373def : Pat<(i64 (atomic_load_nand_64_seq_cst GPR:$addr, GPR:$incr)), 374 (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>; 375 376def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64, 377 PseudoMaskedAtomicSwap32>; 378def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64, 379 PseudoMaskedAtomicLoadAdd32>; 380def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64, 381 PseudoMaskedAtomicLoadSub32>; 382def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64, 383 PseudoMaskedAtomicLoadNand32>; 384def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64, 385 PseudoMaskedAtomicLoadMax32>; 386def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64, 387 PseudoMaskedAtomicLoadMin32>; 388def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64, 389 PseudoMaskedAtomicLoadUMax32>; 390def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64, 391 PseudoMaskedAtomicLoadUMin32>; 392 393/// 64-bit compare and exchange 394 395def PseudoCmpXchg64 : PseudoCmpXchg; 396defm : PseudoCmpXchgPat<"atomic_cmp_swap_64", PseudoCmpXchg64>; 397 398def : Pat<(int_riscv_masked_cmpxchg_i64 399 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering), 400 (PseudoMaskedCmpXchg32 401 GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>; 402} // Predicates = [HasStdExtA, IsRV64] 403