xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoA.td (revision a521f2116473fbd8c09db395518f060a27d02334)
1//===-- RISCVInstrInfoA.td - RISC-V 'A' instructions -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the RISC-V instructions from the standard 'A', Atomic
10// Instructions extension.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// Operand and SDNode transformation definitions.
16//===----------------------------------------------------------------------===//
17
18// A parse method for (${gpr}) or 0(${gpr}), where the 0 is be silently ignored.
19// Used for GNU as Compatibility.
20def AtomicMemOpOperand : AsmOperandClass {
21  let Name = "AtomicMemOpOperand";
22  let RenderMethod = "addRegOperands";
23  let PredicateMethod = "isGPR";
24  let ParserMethod = "parseAtomicMemOp";
25}
26
27def GPRMemAtomic : RegisterOperand<GPR> {
28  let ParserMatchClass = AtomicMemOpOperand;
29  let PrintMethod = "printAtomicMemOp";
30}
31
32//===----------------------------------------------------------------------===//
33// Instruction class templates
34//===----------------------------------------------------------------------===//
35
36let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
37class LR_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
38    : RVInstRAtomic<0b00010, aq, rl, funct3, OPC_AMO,
39                    (outs GPR:$rd), (ins GPRMemAtomic:$rs1),
40                    opcodestr, "$rd, $rs1"> {
41  let rs2 = 0;
42}
43
44multiclass LR_r_aq_rl<bits<3> funct3, string opcodestr> {
45  def ""     : LR_r<0, 0, funct3, opcodestr>;
46  def _AQ    : LR_r<1, 0, funct3, opcodestr # ".aq">;
47  def _RL    : LR_r<0, 1, funct3, opcodestr # ".rl">;
48  def _AQ_RL : LR_r<1, 1, funct3, opcodestr # ".aqrl">;
49}
50
51let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
52class AMO_rr<bits<5> funct5, bit aq, bit rl, bits<3> funct3, string opcodestr>
53    : RVInstRAtomic<funct5, aq, rl, funct3, OPC_AMO,
54                    (outs GPR:$rd), (ins GPRMemAtomic:$rs1, GPR:$rs2),
55                    opcodestr, "$rd, $rs2, $rs1">;
56
57multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> {
58  def ""     : AMO_rr<funct5, 0, 0, funct3, opcodestr>;
59  def _AQ    : AMO_rr<funct5, 1, 0, funct3, opcodestr # ".aq">;
60  def _RL    : AMO_rr<funct5, 0, 1, funct3, opcodestr # ".rl">;
61  def _AQ_RL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">;
62}
63
64multiclass AtomicStPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy> {
65  def : Pat<(StoreOp GPR:$rs1, StTy:$rs2), (Inst StTy:$rs2, GPR:$rs1, 0)>;
66  def : Pat<(StoreOp AddrFI:$rs1, StTy:$rs2), (Inst StTy:$rs2, AddrFI:$rs1, 0)>;
67  def : Pat<(StoreOp (add GPR:$rs1, simm12:$imm12), StTy:$rs2),
68            (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>;
69  def : Pat<(StoreOp (add AddrFI:$rs1, simm12:$imm12), StTy:$rs2),
70            (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
71  def : Pat<(StoreOp (IsOrAdd AddrFI:$rs1, simm12:$imm12), StTy:$rs2),
72            (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
73}
74
75//===----------------------------------------------------------------------===//
76// Instructions
77//===----------------------------------------------------------------------===//
78
79let Predicates = [HasStdExtA] in {
80defm LR_W       : LR_r_aq_rl<0b010, "lr.w">, Sched<[WriteAtomicLDW, ReadAtomicLDW]>;
81defm SC_W       : AMO_rr_aq_rl<0b00011, 0b010, "sc.w">,
82                  Sched<[WriteAtomicSTW, ReadAtomicSTW, ReadAtomicSTW]>;
83defm AMOSWAP_W  : AMO_rr_aq_rl<0b00001, 0b010, "amoswap.w">,
84                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
85defm AMOADD_W   : AMO_rr_aq_rl<0b00000, 0b010, "amoadd.w">,
86                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
87defm AMOXOR_W   : AMO_rr_aq_rl<0b00100, 0b010, "amoxor.w">,
88                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
89defm AMOAND_W   : AMO_rr_aq_rl<0b01100, 0b010, "amoand.w">,
90                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
91defm AMOOR_W    : AMO_rr_aq_rl<0b01000, 0b010, "amoor.w">,
92                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
93defm AMOMIN_W   : AMO_rr_aq_rl<0b10000, 0b010, "amomin.w">,
94                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
95defm AMOMAX_W   : AMO_rr_aq_rl<0b10100, 0b010, "amomax.w">,
96                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
97defm AMOMINU_W  : AMO_rr_aq_rl<0b11000, 0b010, "amominu.w">,
98                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
99defm AMOMAXU_W  : AMO_rr_aq_rl<0b11100, 0b010, "amomaxu.w">,
100                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
101} // Predicates = [HasStdExtA]
102
103let Predicates = [HasStdExtA, IsRV64] in {
104defm LR_D       : LR_r_aq_rl<0b011, "lr.d">, Sched<[WriteAtomicLDD, ReadAtomicLDD]>;
105defm SC_D       : AMO_rr_aq_rl<0b00011, 0b011, "sc.d">,
106                  Sched<[WriteAtomicSTD, ReadAtomicSTD, ReadAtomicSTD]>;
107defm AMOSWAP_D  : AMO_rr_aq_rl<0b00001, 0b011, "amoswap.d">,
108                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
109defm AMOADD_D   : AMO_rr_aq_rl<0b00000, 0b011, "amoadd.d">,
110                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
111defm AMOXOR_D   : AMO_rr_aq_rl<0b00100, 0b011, "amoxor.d">,
112                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
113defm AMOAND_D   : AMO_rr_aq_rl<0b01100, 0b011, "amoand.d">,
114                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
115defm AMOOR_D    : AMO_rr_aq_rl<0b01000, 0b011, "amoor.d">,
116                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
117defm AMOMIN_D   : AMO_rr_aq_rl<0b10000, 0b011, "amomin.d">,
118                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
119defm AMOMAX_D   : AMO_rr_aq_rl<0b10100, 0b011, "amomax.d">,
120                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
121defm AMOMINU_D  : AMO_rr_aq_rl<0b11000, 0b011, "amominu.d">,
122                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
123defm AMOMAXU_D  : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">,
124                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
125} // Predicates = [HasStdExtA, IsRV64]
126
127//===----------------------------------------------------------------------===//
128// Pseudo-instructions and codegen patterns
129//===----------------------------------------------------------------------===//
130
131let Predicates = [HasStdExtA] in {
132
133/// Atomic loads and stores
134
135// Fences will be inserted for atomic load/stores according to the logic in
136// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
137
138defm : LdPat<atomic_load_8,  LB>;
139defm : LdPat<atomic_load_16, LH>;
140defm : LdPat<atomic_load_32, LW>;
141
142defm : AtomicStPat<atomic_store_8,  SB, GPR>;
143defm : AtomicStPat<atomic_store_16, SH, GPR>;
144defm : AtomicStPat<atomic_store_32, SW, GPR>;
145
146/// AMOs
147
148multiclass AMOPat<string AtomicOp, string BaseInst> {
149  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
150                  !cast<RVInst>(BaseInst)>;
151  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
152                  !cast<RVInst>(BaseInst#"_AQ")>;
153  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
154                  !cast<RVInst>(BaseInst#"_RL")>;
155  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
156                  !cast<RVInst>(BaseInst#"_AQ_RL")>;
157  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
158                  !cast<RVInst>(BaseInst#"_AQ_RL")>;
159}
160
161defm : AMOPat<"atomic_swap_32", "AMOSWAP_W">;
162defm : AMOPat<"atomic_load_add_32", "AMOADD_W">;
163defm : AMOPat<"atomic_load_and_32", "AMOAND_W">;
164defm : AMOPat<"atomic_load_or_32", "AMOOR_W">;
165defm : AMOPat<"atomic_load_xor_32", "AMOXOR_W">;
166defm : AMOPat<"atomic_load_max_32", "AMOMAX_W">;
167defm : AMOPat<"atomic_load_min_32", "AMOMIN_W">;
168defm : AMOPat<"atomic_load_umax_32", "AMOMAXU_W">;
169defm : AMOPat<"atomic_load_umin_32", "AMOMINU_W">;
170
171def : Pat<(atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr),
172          (AMOADD_W GPR:$addr, (SUB X0, GPR:$incr))>;
173def : Pat<(atomic_load_sub_32_acquire GPR:$addr, GPR:$incr),
174          (AMOADD_W_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
175def : Pat<(atomic_load_sub_32_release GPR:$addr, GPR:$incr),
176          (AMOADD_W_RL GPR:$addr, (SUB X0, GPR:$incr))>;
177def : Pat<(atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr),
178          (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
179def : Pat<(atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr),
180          (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
181
182/// Pseudo AMOs
183
184class PseudoAMO : Pseudo<(outs GPR:$res, GPR:$scratch),
185                         (ins GPR:$addr, GPR:$incr, ixlenimm:$ordering), []> {
186  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
187  let mayLoad = 1;
188  let mayStore = 1;
189  let hasSideEffects = 0;
190}
191
192def PseudoAtomicLoadNand32 : PseudoAMO;
193// Ordering constants must be kept in sync with the AtomicOrdering enum in
194// AtomicOrdering.h.
195def : Pat<(atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr),
196          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>;
197def : Pat<(atomic_load_nand_32_acquire GPR:$addr, GPR:$incr),
198          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>;
199def : Pat<(atomic_load_nand_32_release GPR:$addr, GPR:$incr),
200          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>;
201def : Pat<(atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr),
202          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>;
203def : Pat<(atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr),
204          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>;
205
206class PseudoMaskedAMO
207    : Pseudo<(outs GPR:$res, GPR:$scratch),
208             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
209  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
210  let mayLoad = 1;
211  let mayStore = 1;
212  let hasSideEffects = 0;
213}
214
215class PseudoMaskedAMOMinMax
216    : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
217             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$sextshamt,
218              ixlenimm:$ordering), []> {
219  let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
220                    "@earlyclobber $scratch2";
221  let mayLoad = 1;
222  let mayStore = 1;
223  let hasSideEffects = 0;
224}
225
226class PseudoMaskedAMOUMinUMax
227    : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
228             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
229  let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
230                    "@earlyclobber $scratch2";
231  let mayLoad = 1;
232  let mayStore = 1;
233  let hasSideEffects = 0;
234}
235
236class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst>
237    : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering),
238          (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering)>;
239
240class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst>
241    : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
242           timm:$ordering),
243          (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
244           timm:$ordering)>;
245
246def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO;
247def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32,
248                         PseudoMaskedAtomicSwap32>;
249def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAMO;
250def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32,
251                         PseudoMaskedAtomicLoadAdd32>;
252def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAMO;
253def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32,
254                         PseudoMaskedAtomicLoadSub32>;
255def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAMO;
256def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32,
257                         PseudoMaskedAtomicLoadNand32>;
258def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMOMinMax;
259def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32,
260                               PseudoMaskedAtomicLoadMax32>;
261def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMOMinMax;
262def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32,
263                               PseudoMaskedAtomicLoadMin32>;
264def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax;
265def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32,
266                         PseudoMaskedAtomicLoadUMax32>;
267def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax;
268def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,
269                         PseudoMaskedAtomicLoadUMin32>;
270
271/// Compare and exchange
272
273class PseudoCmpXchg
274    : Pseudo<(outs GPR:$res, GPR:$scratch),
275             (ins GPR:$addr, GPR:$cmpval, GPR:$newval, ixlenimm:$ordering), []> {
276  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
277  let mayLoad = 1;
278  let mayStore = 1;
279  let hasSideEffects = 0;
280}
281
282// Ordering constants must be kept in sync with the AtomicOrdering enum in
283// AtomicOrdering.h.
284multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst> {
285  def : Pat<(!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new),
286            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
287  def : Pat<(!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new),
288            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
289  def : Pat<(!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new),
290            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
291  def : Pat<(!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new),
292            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
293  def : Pat<(!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new),
294            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
295}
296
297def PseudoCmpXchg32 : PseudoCmpXchg;
298defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32>;
299
300def PseudoMaskedCmpXchg32
301    : Pseudo<(outs GPR:$res, GPR:$scratch),
302             (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask,
303              ixlenimm:$ordering), []> {
304  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
305  let mayLoad = 1;
306  let mayStore = 1;
307  let hasSideEffects = 0;
308}
309
310def : Pat<(int_riscv_masked_cmpxchg_i32
311            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
312          (PseudoMaskedCmpXchg32
313            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
314
315} // Predicates = [HasStdExtA]
316
317let Predicates = [HasStdExtA, IsRV64] in {
318
319/// 64-bit atomic loads and stores
320
321// Fences will be inserted for atomic load/stores according to the logic in
322// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
323defm : LdPat<atomic_load_64, LD>;
324defm : AtomicStPat<atomic_store_64, SD, GPR>;
325
326defm : AMOPat<"atomic_swap_64", "AMOSWAP_D">;
327defm : AMOPat<"atomic_load_add_64", "AMOADD_D">;
328defm : AMOPat<"atomic_load_and_64", "AMOAND_D">;
329defm : AMOPat<"atomic_load_or_64", "AMOOR_D">;
330defm : AMOPat<"atomic_load_xor_64", "AMOXOR_D">;
331defm : AMOPat<"atomic_load_max_64", "AMOMAX_D">;
332defm : AMOPat<"atomic_load_min_64", "AMOMIN_D">;
333defm : AMOPat<"atomic_load_umax_64", "AMOMAXU_D">;
334defm : AMOPat<"atomic_load_umin_64", "AMOMINU_D">;
335
336/// 64-bit AMOs
337
338def : Pat<(atomic_load_sub_64_monotonic GPR:$addr, GPR:$incr),
339          (AMOADD_D GPR:$addr, (SUB X0, GPR:$incr))>;
340def : Pat<(atomic_load_sub_64_acquire GPR:$addr, GPR:$incr),
341          (AMOADD_D_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
342def : Pat<(atomic_load_sub_64_release GPR:$addr, GPR:$incr),
343          (AMOADD_D_RL GPR:$addr, (SUB X0, GPR:$incr))>;
344def : Pat<(atomic_load_sub_64_acq_rel GPR:$addr, GPR:$incr),
345          (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
346def : Pat<(atomic_load_sub_64_seq_cst GPR:$addr, GPR:$incr),
347          (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
348
349/// 64-bit pseudo AMOs
350
351def PseudoAtomicLoadNand64 : PseudoAMO;
352// Ordering constants must be kept in sync with the AtomicOrdering enum in
353// AtomicOrdering.h.
354def : Pat<(atomic_load_nand_64_monotonic GPR:$addr, GPR:$incr),
355          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>;
356def : Pat<(atomic_load_nand_64_acquire GPR:$addr, GPR:$incr),
357          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>;
358def : Pat<(atomic_load_nand_64_release GPR:$addr, GPR:$incr),
359          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>;
360def : Pat<(atomic_load_nand_64_acq_rel GPR:$addr, GPR:$incr),
361          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>;
362def : Pat<(atomic_load_nand_64_seq_cst GPR:$addr, GPR:$incr),
363          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>;
364
365def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
366                         PseudoMaskedAtomicSwap32>;
367def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
368                         PseudoMaskedAtomicLoadAdd32>;
369def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
370                         PseudoMaskedAtomicLoadSub32>;
371def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
372                         PseudoMaskedAtomicLoadNand32>;
373def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
374                               PseudoMaskedAtomicLoadMax32>;
375def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
376                               PseudoMaskedAtomicLoadMin32>;
377def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
378                         PseudoMaskedAtomicLoadUMax32>;
379def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
380                         PseudoMaskedAtomicLoadUMin32>;
381
382/// 64-bit compare and exchange
383
384def PseudoCmpXchg64 : PseudoCmpXchg;
385defm : PseudoCmpXchgPat<"atomic_cmp_swap_64", PseudoCmpXchg64>;
386
387def : Pat<(int_riscv_masked_cmpxchg_i64
388            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
389          (PseudoMaskedCmpXchg32
390            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
391} // Predicates = [HasStdExtA, IsRV64]
392