xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoA.td (revision 29332c0dcee1e80c9fb871e06c3160bd5deb1b44)
1//===-- RISCVInstrInfoA.td - RISC-V 'A' instructions -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the RISC-V instructions from the standard 'A', Atomic
10// Instructions extension.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// Operand and SDNode transformation definitions.
16//===----------------------------------------------------------------------===//
17
18// A parse method for (${gpr}) or 0(${gpr}), where the 0 is be silently ignored.
19// Used for GNU as Compatibility.
20def AtomicMemOpOperand : AsmOperandClass {
21  let Name = "AtomicMemOpOperand";
22  let RenderMethod = "addRegOperands";
23  let PredicateMethod = "isGPR";
24  let ParserMethod = "parseAtomicMemOp";
25}
26
27def GPRMemAtomic : RegisterOperand<GPR> {
28  let ParserMatchClass = AtomicMemOpOperand;
29  let PrintMethod = "printAtomicMemOp";
30}
31
32//===----------------------------------------------------------------------===//
33// Instruction class templates
34//===----------------------------------------------------------------------===//
35
36let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
37class LR_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
38    : RVInstRAtomic<0b00010, aq, rl, funct3, OPC_AMO,
39                    (outs GPR:$rd), (ins GPRMemAtomic:$rs1),
40                    opcodestr, "$rd, $rs1"> {
41  let rs2 = 0;
42}
43
44multiclass LR_r_aq_rl<bits<3> funct3, string opcodestr> {
45  def ""     : LR_r<0, 0, funct3, opcodestr>;
46  def _AQ    : LR_r<1, 0, funct3, opcodestr # ".aq">;
47  def _RL    : LR_r<0, 1, funct3, opcodestr # ".rl">;
48  def _AQ_RL : LR_r<1, 1, funct3, opcodestr # ".aqrl">;
49}
50
51let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
52class AMO_rr<bits<5> funct5, bit aq, bit rl, bits<3> funct3, string opcodestr>
53    : RVInstRAtomic<funct5, aq, rl, funct3, OPC_AMO,
54                    (outs GPR:$rd), (ins GPRMemAtomic:$rs1, GPR:$rs2),
55                    opcodestr, "$rd, $rs2, $rs1">;
56
57multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> {
58  def ""     : AMO_rr<funct5, 0, 0, funct3, opcodestr>;
59  def _AQ    : AMO_rr<funct5, 1, 0, funct3, opcodestr # ".aq">;
60  def _RL    : AMO_rr<funct5, 0, 1, funct3, opcodestr # ".rl">;
61  def _AQ_RL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">;
62}
63
64multiclass AtomicStPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy,
65                       ValueType vt = XLenVT> {
66  def : Pat<(StoreOp BaseAddr:$rs1, (vt StTy:$rs2)),
67            (Inst StTy:$rs2, BaseAddr:$rs1, 0)>;
68  def : Pat<(StoreOp (add BaseAddr:$rs1, simm12:$imm12), (vt StTy:$rs2)),
69            (Inst StTy:$rs2, BaseAddr:$rs1, simm12:$imm12)>;
70  def : Pat<(StoreOp (IsOrAdd AddrFI:$rs1, simm12:$imm12), (vt StTy:$rs2)),
71            (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
72}
73
74//===----------------------------------------------------------------------===//
75// Instructions
76//===----------------------------------------------------------------------===//
77
78let Predicates = [HasStdExtA] in {
79defm LR_W       : LR_r_aq_rl<0b010, "lr.w">, Sched<[WriteAtomicLDW, ReadAtomicLDW]>;
80defm SC_W       : AMO_rr_aq_rl<0b00011, 0b010, "sc.w">,
81                  Sched<[WriteAtomicSTW, ReadAtomicSTW, ReadAtomicSTW]>;
82defm AMOSWAP_W  : AMO_rr_aq_rl<0b00001, 0b010, "amoswap.w">,
83                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
84defm AMOADD_W   : AMO_rr_aq_rl<0b00000, 0b010, "amoadd.w">,
85                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
86defm AMOXOR_W   : AMO_rr_aq_rl<0b00100, 0b010, "amoxor.w">,
87                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
88defm AMOAND_W   : AMO_rr_aq_rl<0b01100, 0b010, "amoand.w">,
89                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
90defm AMOOR_W    : AMO_rr_aq_rl<0b01000, 0b010, "amoor.w">,
91                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
92defm AMOMIN_W   : AMO_rr_aq_rl<0b10000, 0b010, "amomin.w">,
93                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
94defm AMOMAX_W   : AMO_rr_aq_rl<0b10100, 0b010, "amomax.w">,
95                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
96defm AMOMINU_W  : AMO_rr_aq_rl<0b11000, 0b010, "amominu.w">,
97                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
98defm AMOMAXU_W  : AMO_rr_aq_rl<0b11100, 0b010, "amomaxu.w">,
99                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
100} // Predicates = [HasStdExtA]
101
102let Predicates = [HasStdExtA, IsRV64] in {
103defm LR_D       : LR_r_aq_rl<0b011, "lr.d">, Sched<[WriteAtomicLDD, ReadAtomicLDD]>;
104defm SC_D       : AMO_rr_aq_rl<0b00011, 0b011, "sc.d">,
105                  Sched<[WriteAtomicSTD, ReadAtomicSTD, ReadAtomicSTD]>;
106defm AMOSWAP_D  : AMO_rr_aq_rl<0b00001, 0b011, "amoswap.d">,
107                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
108defm AMOADD_D   : AMO_rr_aq_rl<0b00000, 0b011, "amoadd.d">,
109                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
110defm AMOXOR_D   : AMO_rr_aq_rl<0b00100, 0b011, "amoxor.d">,
111                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
112defm AMOAND_D   : AMO_rr_aq_rl<0b01100, 0b011, "amoand.d">,
113                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
114defm AMOOR_D    : AMO_rr_aq_rl<0b01000, 0b011, "amoor.d">,
115                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
116defm AMOMIN_D   : AMO_rr_aq_rl<0b10000, 0b011, "amomin.d">,
117                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
118defm AMOMAX_D   : AMO_rr_aq_rl<0b10100, 0b011, "amomax.d">,
119                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
120defm AMOMINU_D  : AMO_rr_aq_rl<0b11000, 0b011, "amominu.d">,
121                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
122defm AMOMAXU_D  : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">,
123                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
124} // Predicates = [HasStdExtA, IsRV64]
125
126//===----------------------------------------------------------------------===//
127// Pseudo-instructions and codegen patterns
128//===----------------------------------------------------------------------===//
129
130let Predicates = [HasStdExtA] in {
131
132/// Atomic loads and stores
133
134// Fences will be inserted for atomic load/stores according to the logic in
135// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
136
137defm : LdPat<atomic_load_8,  LB>;
138defm : LdPat<atomic_load_16, LH>;
139defm : LdPat<atomic_load_32, LW>;
140
141defm : AtomicStPat<atomic_store_8,  SB, GPR>;
142defm : AtomicStPat<atomic_store_16, SH, GPR>;
143defm : AtomicStPat<atomic_store_32, SW, GPR>;
144
145/// AMOs
146
147multiclass AMOPat<string AtomicOp, string BaseInst> {
148  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
149                  !cast<RVInst>(BaseInst)>;
150  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
151                  !cast<RVInst>(BaseInst#"_AQ")>;
152  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
153                  !cast<RVInst>(BaseInst#"_RL")>;
154  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
155                  !cast<RVInst>(BaseInst#"_AQ_RL")>;
156  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
157                  !cast<RVInst>(BaseInst#"_AQ_RL")>;
158}
159
160defm : AMOPat<"atomic_swap_32", "AMOSWAP_W">;
161defm : AMOPat<"atomic_load_add_32", "AMOADD_W">;
162defm : AMOPat<"atomic_load_and_32", "AMOAND_W">;
163defm : AMOPat<"atomic_load_or_32", "AMOOR_W">;
164defm : AMOPat<"atomic_load_xor_32", "AMOXOR_W">;
165defm : AMOPat<"atomic_load_max_32", "AMOMAX_W">;
166defm : AMOPat<"atomic_load_min_32", "AMOMIN_W">;
167defm : AMOPat<"atomic_load_umax_32", "AMOMAXU_W">;
168defm : AMOPat<"atomic_load_umin_32", "AMOMINU_W">;
169
170def : Pat<(atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr),
171          (AMOADD_W GPR:$addr, (SUB X0, GPR:$incr))>;
172def : Pat<(atomic_load_sub_32_acquire GPR:$addr, GPR:$incr),
173          (AMOADD_W_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
174def : Pat<(atomic_load_sub_32_release GPR:$addr, GPR:$incr),
175          (AMOADD_W_RL GPR:$addr, (SUB X0, GPR:$incr))>;
176def : Pat<(atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr),
177          (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
178def : Pat<(atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr),
179          (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
180
181/// Pseudo AMOs
182
183class PseudoAMO : Pseudo<(outs GPR:$res, GPR:$scratch),
184                         (ins GPR:$addr, GPR:$incr, ixlenimm:$ordering), []> {
185  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
186  let mayLoad = 1;
187  let mayStore = 1;
188  let hasSideEffects = 0;
189}
190
191def PseudoAtomicLoadNand32 : PseudoAMO;
192// Ordering constants must be kept in sync with the AtomicOrdering enum in
193// AtomicOrdering.h.
194def : Pat<(atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr),
195          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>;
196def : Pat<(atomic_load_nand_32_acquire GPR:$addr, GPR:$incr),
197          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>;
198def : Pat<(atomic_load_nand_32_release GPR:$addr, GPR:$incr),
199          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>;
200def : Pat<(atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr),
201          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>;
202def : Pat<(atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr),
203          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>;
204
205class PseudoMaskedAMO
206    : Pseudo<(outs GPR:$res, GPR:$scratch),
207             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
208  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
209  let mayLoad = 1;
210  let mayStore = 1;
211  let hasSideEffects = 0;
212}
213
214class PseudoMaskedAMOMinMax
215    : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
216             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$sextshamt,
217              ixlenimm:$ordering), []> {
218  let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
219                    "@earlyclobber $scratch2";
220  let mayLoad = 1;
221  let mayStore = 1;
222  let hasSideEffects = 0;
223}
224
225class PseudoMaskedAMOUMinUMax
226    : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
227             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
228  let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
229                    "@earlyclobber $scratch2";
230  let mayLoad = 1;
231  let mayStore = 1;
232  let hasSideEffects = 0;
233}
234
235class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst>
236    : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering),
237          (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering)>;
238
239class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst>
240    : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
241           timm:$ordering),
242          (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
243           timm:$ordering)>;
244
245def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO;
246def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32,
247                         PseudoMaskedAtomicSwap32>;
248def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAMO;
249def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32,
250                         PseudoMaskedAtomicLoadAdd32>;
251def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAMO;
252def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32,
253                         PseudoMaskedAtomicLoadSub32>;
254def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAMO;
255def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32,
256                         PseudoMaskedAtomicLoadNand32>;
257def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMOMinMax;
258def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32,
259                               PseudoMaskedAtomicLoadMax32>;
260def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMOMinMax;
261def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32,
262                               PseudoMaskedAtomicLoadMin32>;
263def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax;
264def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32,
265                         PseudoMaskedAtomicLoadUMax32>;
266def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax;
267def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,
268                         PseudoMaskedAtomicLoadUMin32>;
269
270/// Compare and exchange
271
272class PseudoCmpXchg
273    : Pseudo<(outs GPR:$res, GPR:$scratch),
274             (ins GPR:$addr, GPR:$cmpval, GPR:$newval, ixlenimm:$ordering), []> {
275  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
276  let mayLoad = 1;
277  let mayStore = 1;
278  let hasSideEffects = 0;
279}
280
281// Ordering constants must be kept in sync with the AtomicOrdering enum in
282// AtomicOrdering.h.
283multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst> {
284  def : Pat<(!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new),
285            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
286  def : Pat<(!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new),
287            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
288  def : Pat<(!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new),
289            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
290  def : Pat<(!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new),
291            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
292  def : Pat<(!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new),
293            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
294}
295
296def PseudoCmpXchg32 : PseudoCmpXchg;
297defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32>;
298
299def PseudoMaskedCmpXchg32
300    : Pseudo<(outs GPR:$res, GPR:$scratch),
301             (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask,
302              ixlenimm:$ordering), []> {
303  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
304  let mayLoad = 1;
305  let mayStore = 1;
306  let hasSideEffects = 0;
307}
308
309def : Pat<(int_riscv_masked_cmpxchg_i32
310            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
311          (PseudoMaskedCmpXchg32
312            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
313
314} // Predicates = [HasStdExtA]
315
316let Predicates = [HasStdExtA, IsRV64] in {
317
318/// 64-bit atomic loads and stores
319
320// Fences will be inserted for atomic load/stores according to the logic in
321// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
322defm : LdPat<atomic_load_64, LD, i64>;
323defm : AtomicStPat<atomic_store_64, SD, GPR, i64>;
324
325defm : AMOPat<"atomic_swap_64", "AMOSWAP_D">;
326defm : AMOPat<"atomic_load_add_64", "AMOADD_D">;
327defm : AMOPat<"atomic_load_and_64", "AMOAND_D">;
328defm : AMOPat<"atomic_load_or_64", "AMOOR_D">;
329defm : AMOPat<"atomic_load_xor_64", "AMOXOR_D">;
330defm : AMOPat<"atomic_load_max_64", "AMOMAX_D">;
331defm : AMOPat<"atomic_load_min_64", "AMOMIN_D">;
332defm : AMOPat<"atomic_load_umax_64", "AMOMAXU_D">;
333defm : AMOPat<"atomic_load_umin_64", "AMOMINU_D">;
334
335/// 64-bit AMOs
336
337def : Pat<(i64 (atomic_load_sub_64_monotonic GPR:$addr, GPR:$incr)),
338          (AMOADD_D GPR:$addr, (SUB X0, GPR:$incr))>;
339def : Pat<(i64 (atomic_load_sub_64_acquire GPR:$addr, GPR:$incr)),
340          (AMOADD_D_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
341def : Pat<(i64 (atomic_load_sub_64_release GPR:$addr, GPR:$incr)),
342          (AMOADD_D_RL GPR:$addr, (SUB X0, GPR:$incr))>;
343def : Pat<(i64 (atomic_load_sub_64_acq_rel GPR:$addr, GPR:$incr)),
344          (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
345def : Pat<(i64 (atomic_load_sub_64_seq_cst GPR:$addr, GPR:$incr)),
346          (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
347
348/// 64-bit pseudo AMOs
349
350def PseudoAtomicLoadNand64 : PseudoAMO;
351// Ordering constants must be kept in sync with the AtomicOrdering enum in
352// AtomicOrdering.h.
353def : Pat<(i64 (atomic_load_nand_64_monotonic GPR:$addr, GPR:$incr)),
354          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>;
355def : Pat<(i64 (atomic_load_nand_64_acquire GPR:$addr, GPR:$incr)),
356          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>;
357def : Pat<(i64 (atomic_load_nand_64_release GPR:$addr, GPR:$incr)),
358          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>;
359def : Pat<(i64 (atomic_load_nand_64_acq_rel GPR:$addr, GPR:$incr)),
360          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>;
361def : Pat<(i64 (atomic_load_nand_64_seq_cst GPR:$addr, GPR:$incr)),
362          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>;
363
364def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
365                         PseudoMaskedAtomicSwap32>;
366def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
367                         PseudoMaskedAtomicLoadAdd32>;
368def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
369                         PseudoMaskedAtomicLoadSub32>;
370def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
371                         PseudoMaskedAtomicLoadNand32>;
372def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
373                               PseudoMaskedAtomicLoadMax32>;
374def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
375                               PseudoMaskedAtomicLoadMin32>;
376def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
377                         PseudoMaskedAtomicLoadUMax32>;
378def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
379                         PseudoMaskedAtomicLoadUMin32>;
380
381/// 64-bit compare and exchange
382
383def PseudoCmpXchg64 : PseudoCmpXchg;
384defm : PseudoCmpXchgPat<"atomic_cmp_swap_64", PseudoCmpXchg64>;
385
386def : Pat<(int_riscv_masked_cmpxchg_i64
387            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
388          (PseudoMaskedCmpXchg32
389            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
390} // Predicates = [HasStdExtA, IsRV64]
391