xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoA.td (revision 7fdf597e96a02165cfe22ff357b857d5fa15ed8a)
1//===-- RISCVInstrInfoA.td - RISC-V 'A' instructions -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the RISC-V instructions from the standard 'A', Atomic
10// Instructions extension.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// Instruction class templates
16//===----------------------------------------------------------------------===//
17
18let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
19class LR_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
20    : RVInstRAtomic<0b00010, aq, rl, funct3, OPC_AMO,
21                    (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1),
22                    opcodestr, "$rd, $rs1"> {
23  let rs2 = 0;
24}
25
26multiclass LR_r_aq_rl<bits<3> funct3, string opcodestr> {
27  def ""     : LR_r<0, 0, funct3, opcodestr>;
28  def _AQ    : LR_r<1, 0, funct3, opcodestr # ".aq">;
29  def _RL    : LR_r<0, 1, funct3, opcodestr # ".rl">;
30  def _AQ_RL : LR_r<1, 1, funct3, opcodestr # ".aqrl">;
31}
32
33let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
34class SC_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
35    : RVInstRAtomic<0b00011, aq, rl, funct3, OPC_AMO,
36                    (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1, GPR:$rs2),
37                    opcodestr, "$rd, $rs2, $rs1">;
38
39multiclass SC_r_aq_rl<bits<3> funct3, string opcodestr> {
40  def ""     : SC_r<0, 0, funct3, opcodestr>;
41  def _AQ    : SC_r<1, 0, funct3, opcodestr # ".aq">;
42  def _RL    : SC_r<0, 1, funct3, opcodestr # ".rl">;
43  def _AQ_RL : SC_r<1, 1, funct3, opcodestr # ".aqrl">;
44}
45
46let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
47class AMO_rr<bits<5> funct5, bit aq, bit rl, bits<3> funct3, string opcodestr>
48    : RVInstRAtomic<funct5, aq, rl, funct3, OPC_AMO,
49                    (outs GPR:$rd), (ins GPRMemZeroOffset:$rs1, GPR:$rs2),
50                    opcodestr, "$rd, $rs2, $rs1">;
51
52multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> {
53  def ""     : AMO_rr<funct5, 0, 0, funct3, opcodestr>;
54  def _AQ    : AMO_rr<funct5, 1, 0, funct3, opcodestr # ".aq">;
55  def _RL    : AMO_rr<funct5, 0, 1, funct3, opcodestr # ".rl">;
56  def _AQ_RL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">;
57}
58
59//===----------------------------------------------------------------------===//
60// Instructions
61//===----------------------------------------------------------------------===//
62
63let Predicates = [HasStdExtAOrZalrsc], IsSignExtendingOpW = 1 in {
64defm LR_W       : LR_r_aq_rl<0b010, "lr.w">, Sched<[WriteAtomicLDW, ReadAtomicLDW]>;
65defm SC_W       : SC_r_aq_rl<0b010, "sc.w">,
66                  Sched<[WriteAtomicSTW, ReadAtomicSTW, ReadAtomicSTW]>;
67} // Predicates = [HasStdExtAOrZalrsc], IsSignExtendingOpW = 1
68
69let Predicates = [HasStdExtAOrZaamo], IsSignExtendingOpW = 1 in {
70defm AMOSWAP_W  : AMO_rr_aq_rl<0b00001, 0b010, "amoswap.w">,
71                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
72defm AMOADD_W   : AMO_rr_aq_rl<0b00000, 0b010, "amoadd.w">,
73                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
74defm AMOXOR_W   : AMO_rr_aq_rl<0b00100, 0b010, "amoxor.w">,
75                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
76defm AMOAND_W   : AMO_rr_aq_rl<0b01100, 0b010, "amoand.w">,
77                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
78defm AMOOR_W    : AMO_rr_aq_rl<0b01000, 0b010, "amoor.w">,
79                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
80defm AMOMIN_W   : AMO_rr_aq_rl<0b10000, 0b010, "amomin.w">,
81                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
82defm AMOMAX_W   : AMO_rr_aq_rl<0b10100, 0b010, "amomax.w">,
83                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
84defm AMOMINU_W  : AMO_rr_aq_rl<0b11000, 0b010, "amominu.w">,
85                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
86defm AMOMAXU_W  : AMO_rr_aq_rl<0b11100, 0b010, "amomaxu.w">,
87                  Sched<[WriteAtomicW, ReadAtomicWA, ReadAtomicWD]>;
88} // Predicates = [HasStdExtAOrZaamo], IsSignExtendingOpW = 1
89
90let Predicates = [HasStdExtAOrZalrsc, IsRV64] in {
91defm LR_D       : LR_r_aq_rl<0b011, "lr.d">, Sched<[WriteAtomicLDD, ReadAtomicLDD]>;
92defm SC_D       : SC_r_aq_rl<0b011, "sc.d">,
93                  Sched<[WriteAtomicSTD, ReadAtomicSTD, ReadAtomicSTD]>;
94} // Predicates = [HasStdExtAOrZalrsc, IsRV64]
95
96let Predicates = [HasStdExtAOrZaamo, IsRV64] in {
97defm AMOSWAP_D  : AMO_rr_aq_rl<0b00001, 0b011, "amoswap.d">,
98                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
99defm AMOADD_D   : AMO_rr_aq_rl<0b00000, 0b011, "amoadd.d">,
100                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
101defm AMOXOR_D   : AMO_rr_aq_rl<0b00100, 0b011, "amoxor.d">,
102                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
103defm AMOAND_D   : AMO_rr_aq_rl<0b01100, 0b011, "amoand.d">,
104                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
105defm AMOOR_D    : AMO_rr_aq_rl<0b01000, 0b011, "amoor.d">,
106                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
107defm AMOMIN_D   : AMO_rr_aq_rl<0b10000, 0b011, "amomin.d">,
108                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
109defm AMOMAX_D   : AMO_rr_aq_rl<0b10100, 0b011, "amomax.d">,
110                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
111defm AMOMINU_D  : AMO_rr_aq_rl<0b11000, 0b011, "amominu.d">,
112                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
113defm AMOMAXU_D  : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">,
114                  Sched<[WriteAtomicD, ReadAtomicDA, ReadAtomicDD]>;
115} // Predicates = [HasStdExtAOrZaamo, IsRV64]
116
117//===----------------------------------------------------------------------===//
118// Pseudo-instructions and codegen patterns
119//===----------------------------------------------------------------------===//
120
121// Atomic load/store are available under both +a and +force-atomics.
122// Fences will be inserted for atomic load/stores according to the logic in
123// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
124let Predicates = [HasAtomicLdSt] in {
125  def : LdPat<atomic_load_8,  LB>;
126  def : LdPat<atomic_load_16, LH>;
127  def : LdPat<atomic_load_32, LW>;
128
129  def : StPat<atomic_store_8,  SB, GPR, XLenVT>;
130  def : StPat<atomic_store_16, SH, GPR, XLenVT>;
131  def : StPat<atomic_store_32, SW, GPR, XLenVT>;
132}
133
134let Predicates = [HasAtomicLdSt, IsRV64] in {
135  def : LdPat<atomic_load_64, LD, i64>;
136  def : StPat<atomic_store_64, SD, GPR, i64>;
137}
138
139/// AMOs
140
141multiclass AMOPat<string AtomicOp, string BaseInst, ValueType vt = XLenVT,
142                  list<Predicate> ExtraPreds = []> {
143let Predicates = !listconcat([HasStdExtA, NotHasStdExtZtso], ExtraPreds) in {
144  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
145                  !cast<RVInst>(BaseInst), vt>;
146  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
147                  !cast<RVInst>(BaseInst#"_AQ"), vt>;
148  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
149                  !cast<RVInst>(BaseInst#"_RL"), vt>;
150  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
151                  !cast<RVInst>(BaseInst#"_AQ_RL"), vt>;
152  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
153                  !cast<RVInst>(BaseInst#"_AQ_RL"), vt>;
154}
155let Predicates = !listconcat([HasStdExtA, HasStdExtZtso], ExtraPreds) in {
156  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
157                  !cast<RVInst>(BaseInst), vt>;
158  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
159                  !cast<RVInst>(BaseInst), vt>;
160  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
161                  !cast<RVInst>(BaseInst), vt>;
162  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
163                  !cast<RVInst>(BaseInst), vt>;
164  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
165                  !cast<RVInst>(BaseInst), vt>;
166}
167}
168
169defm : AMOPat<"atomic_swap_i32", "AMOSWAP_W">;
170defm : AMOPat<"atomic_load_add_i32", "AMOADD_W">;
171defm : AMOPat<"atomic_load_and_i32", "AMOAND_W">;
172defm : AMOPat<"atomic_load_or_i32", "AMOOR_W">;
173defm : AMOPat<"atomic_load_xor_i32", "AMOXOR_W">;
174defm : AMOPat<"atomic_load_max_i32", "AMOMAX_W">;
175defm : AMOPat<"atomic_load_min_i32", "AMOMIN_W">;
176defm : AMOPat<"atomic_load_umax_i32", "AMOMAXU_W">;
177defm : AMOPat<"atomic_load_umin_i32", "AMOMINU_W">;
178
179defm : AMOPat<"atomic_swap_i64", "AMOSWAP_D", i64, [IsRV64]>;
180defm : AMOPat<"atomic_load_add_i64", "AMOADD_D", i64, [IsRV64]>;
181defm : AMOPat<"atomic_load_and_i64", "AMOAND_D", i64, [IsRV64]>;
182defm : AMOPat<"atomic_load_or_i64", "AMOOR_D", i64, [IsRV64]>;
183defm : AMOPat<"atomic_load_xor_i64", "AMOXOR_D", i64, [IsRV64]>;
184defm : AMOPat<"atomic_load_max_i64", "AMOMAX_D", i64, [IsRV64]>;
185defm : AMOPat<"atomic_load_min_i64", "AMOMIN_D", i64, [IsRV64]>;
186defm : AMOPat<"atomic_load_umax_i64", "AMOMAXU_D", i64, [IsRV64]>;
187defm : AMOPat<"atomic_load_umin_i64", "AMOMINU_D", i64, [IsRV64]>;
188
189
190/// Pseudo AMOs
191
192class PseudoAMO : Pseudo<(outs GPR:$res, GPR:$scratch),
193                         (ins GPR:$addr, GPR:$incr, ixlenimm:$ordering), []> {
194  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
195  let mayLoad = 1;
196  let mayStore = 1;
197  let hasSideEffects = 0;
198}
199
200class PseudoMaskedAMO
201    : Pseudo<(outs GPR:$res, GPR:$scratch),
202             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
203  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
204  let mayLoad = 1;
205  let mayStore = 1;
206  let hasSideEffects = 0;
207}
208
209class PseudoMaskedAMOMinMax
210    : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
211             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$sextshamt,
212              ixlenimm:$ordering), []> {
213  let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
214                    "@earlyclobber $scratch2";
215  let mayLoad = 1;
216  let mayStore = 1;
217  let hasSideEffects = 0;
218}
219
220class PseudoMaskedAMOUMinUMax
221    : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
222             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
223  let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
224                    "@earlyclobber $scratch2";
225  let mayLoad = 1;
226  let mayStore = 1;
227  let hasSideEffects = 0;
228}
229
230class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst>
231    : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering),
232          (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, timm:$ordering)>;
233
234class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst>
235    : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
236           timm:$ordering),
237          (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
238           timm:$ordering)>;
239
240let Predicates = [HasStdExtA] in {
241
242let Size = 20 in
243def PseudoAtomicLoadNand32 : PseudoAMO;
244// Ordering constants must be kept in sync with the AtomicOrdering enum in
245// AtomicOrdering.h.
246def : Pat<(XLenVT (atomic_load_nand_i32_monotonic GPR:$addr, GPR:$incr)),
247          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>;
248def : Pat<(XLenVT (atomic_load_nand_i32_acquire GPR:$addr, GPR:$incr)),
249          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>;
250def : Pat<(XLenVT (atomic_load_nand_i32_release GPR:$addr, GPR:$incr)),
251          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>;
252def : Pat<(XLenVT (atomic_load_nand_i32_acq_rel GPR:$addr, GPR:$incr)),
253          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>;
254def : Pat<(XLenVT (atomic_load_nand_i32_seq_cst GPR:$addr, GPR:$incr)),
255          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>;
256
257let Size = 28 in
258def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO;
259def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32,
260                         PseudoMaskedAtomicSwap32>;
261let Size = 28 in
262def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAMO;
263def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32,
264                         PseudoMaskedAtomicLoadAdd32>;
265let Size = 28 in
266def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAMO;
267def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32,
268                         PseudoMaskedAtomicLoadSub32>;
269let Size = 32 in
270def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAMO;
271def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32,
272                         PseudoMaskedAtomicLoadNand32>;
273let Size = 44 in
274def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMOMinMax;
275def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32,
276                               PseudoMaskedAtomicLoadMax32>;
277let Size = 44 in
278def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMOMinMax;
279def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32,
280                               PseudoMaskedAtomicLoadMin32>;
281let Size = 36 in
282def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax;
283def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32,
284                         PseudoMaskedAtomicLoadUMax32>;
285let Size = 36 in
286def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax;
287def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,
288                         PseudoMaskedAtomicLoadUMin32>;
289} // Predicates = [HasStdExtA]
290
291let Predicates = [HasStdExtA, IsRV64] in {
292
293let Size = 20 in
294def PseudoAtomicLoadNand64 : PseudoAMO;
295// Ordering constants must be kept in sync with the AtomicOrdering enum in
296// AtomicOrdering.h.
297def : Pat<(i64 (atomic_load_nand_i64_monotonic GPR:$addr, GPR:$incr)),
298          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>;
299def : Pat<(i64 (atomic_load_nand_i64_acquire GPR:$addr, GPR:$incr)),
300          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>;
301def : Pat<(i64 (atomic_load_nand_i64_release GPR:$addr, GPR:$incr)),
302          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>;
303def : Pat<(i64 (atomic_load_nand_i64_acq_rel GPR:$addr, GPR:$incr)),
304          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>;
305def : Pat<(i64 (atomic_load_nand_i64_seq_cst GPR:$addr, GPR:$incr)),
306          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>;
307
308def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
309                         PseudoMaskedAtomicSwap32>;
310def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
311                         PseudoMaskedAtomicLoadAdd32>;
312def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
313                         PseudoMaskedAtomicLoadSub32>;
314def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
315                         PseudoMaskedAtomicLoadNand32>;
316def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
317                               PseudoMaskedAtomicLoadMax32>;
318def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
319                               PseudoMaskedAtomicLoadMin32>;
320def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
321                         PseudoMaskedAtomicLoadUMax32>;
322def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
323                         PseudoMaskedAtomicLoadUMin32>;
324} // Predicates = [HasStdExtA, IsRV64]
325
326
327/// Compare and exchange
328
329class PseudoCmpXchg
330    : Pseudo<(outs GPR:$res, GPR:$scratch),
331             (ins GPR:$addr, GPR:$cmpval, GPR:$newval, ixlenimm:$ordering), []> {
332  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
333  let mayLoad = 1;
334  let mayStore = 1;
335  let hasSideEffects = 0;
336  let Size = 16;
337}
338
339// Ordering constants must be kept in sync with the AtomicOrdering enum in
340// AtomicOrdering.h.
341multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst,
342                            ValueType vt = XLenVT> {
343  def : Pat<(vt (!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new)),
344            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
345  def : Pat<(vt (!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new)),
346            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
347  def : Pat<(vt (!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new)),
348            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
349  def : Pat<(vt (!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new)),
350            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
351  def : Pat<(vt (!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new)),
352            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
353}
354
355let Predicates = [HasStdExtA, NoStdExtZacas] in {
356def PseudoCmpXchg32 : PseudoCmpXchg;
357defm : PseudoCmpXchgPat<"atomic_cmp_swap_i32", PseudoCmpXchg32>;
358}
359
360let Predicates = [HasStdExtA, NoStdExtZacas, IsRV64] in {
361def PseudoCmpXchg64 : PseudoCmpXchg;
362defm : PseudoCmpXchgPat<"atomic_cmp_swap_i64", PseudoCmpXchg64, i64>;
363}
364
365let Predicates = [HasStdExtA] in {
366def PseudoMaskedCmpXchg32
367    : Pseudo<(outs GPR:$res, GPR:$scratch),
368             (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask,
369              ixlenimm:$ordering), []> {
370  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
371  let mayLoad = 1;
372  let mayStore = 1;
373  let hasSideEffects = 0;
374  let Size = 32;
375}
376
377def : Pat<(int_riscv_masked_cmpxchg_i32
378            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
379          (PseudoMaskedCmpXchg32
380            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
381} // Predicates = [HasStdExtA]
382
383let Predicates = [HasStdExtA, IsRV64] in {
384def : Pat<(int_riscv_masked_cmpxchg_i64
385            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering),
386          (PseudoMaskedCmpXchg32
387            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, timm:$ordering)>;
388} // Predicates = [HasStdExtA, IsRV64]
389
390//===----------------------------------------------------------------------===//
391// Experimental RV64 i32 legalization patterns.
392//===----------------------------------------------------------------------===//
393
394class PatGprGprA<SDPatternOperator OpNode, RVInst Inst, ValueType vt>
395    : Pat<(vt (OpNode (XLenVT GPR:$rs1), (vt GPR:$rs2))), (Inst GPR:$rs1, GPR:$rs2)>;
396
397multiclass AMOPat2<string AtomicOp, string BaseInst, ValueType vt = XLenVT,
398                   list<Predicate> ExtraPreds = []> {
399let Predicates = !listconcat([HasStdExtA, NotHasStdExtZtso], ExtraPreds) in {
400  def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_monotonic"),
401                   !cast<RVInst>(BaseInst), vt>;
402  def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_acquire"),
403                   !cast<RVInst>(BaseInst#"_AQ"), vt>;
404  def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_release"),
405                   !cast<RVInst>(BaseInst#"_RL"), vt>;
406  def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_acq_rel"),
407                   !cast<RVInst>(BaseInst#"_AQ_RL"), vt>;
408  def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_seq_cst"),
409                   !cast<RVInst>(BaseInst#"_AQ_RL"), vt>;
410}
411let Predicates = !listconcat([HasStdExtA, HasStdExtZtso], ExtraPreds) in {
412  def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_monotonic"),
413                   !cast<RVInst>(BaseInst), vt>;
414  def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_acquire"),
415                   !cast<RVInst>(BaseInst), vt>;
416  def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_release"),
417                   !cast<RVInst>(BaseInst), vt>;
418  def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_acq_rel"),
419                   !cast<RVInst>(BaseInst), vt>;
420  def : PatGprGprA<!cast<PatFrag>(AtomicOp#"_seq_cst"),
421                   !cast<RVInst>(BaseInst), vt>;
422}
423}
424
425defm : AMOPat2<"atomic_swap_i32", "AMOSWAP_W", i32>;
426defm : AMOPat2<"atomic_load_add_i32", "AMOADD_W", i32>;
427defm : AMOPat2<"atomic_load_and_i32", "AMOAND_W", i32>;
428defm : AMOPat2<"atomic_load_or_i32", "AMOOR_W", i32>;
429defm : AMOPat2<"atomic_load_xor_i32", "AMOXOR_W", i32>;
430defm : AMOPat2<"atomic_load_max_i32", "AMOMAX_W", i32>;
431defm : AMOPat2<"atomic_load_min_i32", "AMOMIN_W", i32>;
432defm : AMOPat2<"atomic_load_umax_i32", "AMOMAXU_W", i32>;
433defm : AMOPat2<"atomic_load_umin_i32", "AMOMINU_W", i32>;
434
435let Predicates = [HasStdExtA, IsRV64] in
436defm : PseudoCmpXchgPat<"atomic_cmp_swap_i32", PseudoCmpXchg32, i32>;
437
438let Predicates = [HasAtomicLdSt] in {
439  def : LdPat<atomic_load_8,  LB, i32>;
440  def : LdPat<atomic_load_16, LH, i32>;
441  def : LdPat<atomic_load_32, LW, i32>;
442
443  def : StPat<atomic_store_8,  SB, GPR, i32>;
444  def : StPat<atomic_store_16, SH, GPR, i32>;
445  def : StPat<atomic_store_32, SW, GPR, i32>;
446}
447
448