xref: /freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/RISCVInstrInfoA.td (revision ee0fe82ee2892f5ece189db0eab38913aaab5f0f)
1//===-- RISCVInstrInfoA.td - RISC-V 'A' instructions -------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file describes the RISC-V instructions from the standard 'A', Atomic
10// Instructions extension.
11//
12//===----------------------------------------------------------------------===//
13
14//===----------------------------------------------------------------------===//
15// Operand and SDNode transformation definitions.
16//===----------------------------------------------------------------------===//
17
18// A parse method for (${gpr}) or 0(${gpr}), where the 0 is be silently ignored.
19// Used for GNU as Compatibility.
20def AtomicMemOpOperand : AsmOperandClass {
21  let Name = "AtomicMemOpOperand";
22  let RenderMethod = "addRegOperands";
23  let PredicateMethod = "isReg";
24  let ParserMethod = "parseAtomicMemOp";
25}
26
27def GPRMemAtomic : RegisterOperand<GPR> {
28  let ParserMatchClass = AtomicMemOpOperand;
29  let PrintMethod = "printAtomicMemOp";
30}
31
32//===----------------------------------------------------------------------===//
33// Instruction class templates
34//===----------------------------------------------------------------------===//
35
36let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
37class LR_r<bit aq, bit rl, bits<3> funct3, string opcodestr>
38    : RVInstRAtomic<0b00010, aq, rl, funct3, OPC_AMO,
39                    (outs GPR:$rd), (ins GPRMemAtomic:$rs1),
40                    opcodestr, "$rd, $rs1"> {
41  let rs2 = 0;
42}
43
44multiclass LR_r_aq_rl<bits<3> funct3, string opcodestr> {
45  def ""     : LR_r<0, 0, funct3, opcodestr>;
46  def _AQ    : LR_r<1, 0, funct3, opcodestr # ".aq">;
47  def _RL    : LR_r<0, 1, funct3, opcodestr # ".rl">;
48  def _AQ_RL : LR_r<1, 1, funct3, opcodestr # ".aqrl">;
49}
50
51let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in
52class AMO_rr<bits<5> funct5, bit aq, bit rl, bits<3> funct3, string opcodestr>
53    : RVInstRAtomic<funct5, aq, rl, funct3, OPC_AMO,
54                    (outs GPR:$rd), (ins GPRMemAtomic:$rs1, GPR:$rs2),
55                    opcodestr, "$rd, $rs2, $rs1">;
56
57multiclass AMO_rr_aq_rl<bits<5> funct5, bits<3> funct3, string opcodestr> {
58  def ""     : AMO_rr<funct5, 0, 0, funct3, opcodestr>;
59  def _AQ    : AMO_rr<funct5, 1, 0, funct3, opcodestr # ".aq">;
60  def _RL    : AMO_rr<funct5, 0, 1, funct3, opcodestr # ".rl">;
61  def _AQ_RL : AMO_rr<funct5, 1, 1, funct3, opcodestr # ".aqrl">;
62}
63
64multiclass AtomicStPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy> {
65  def : Pat<(StoreOp GPR:$rs1, StTy:$rs2), (Inst StTy:$rs2, GPR:$rs1, 0)>;
66  def : Pat<(StoreOp AddrFI:$rs1, StTy:$rs2), (Inst StTy:$rs2, AddrFI:$rs1, 0)>;
67  def : Pat<(StoreOp (add GPR:$rs1, simm12:$imm12), StTy:$rs2),
68            (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>;
69  def : Pat<(StoreOp (add AddrFI:$rs1, simm12:$imm12), StTy:$rs2),
70            (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
71  def : Pat<(StoreOp (IsOrAdd AddrFI:$rs1, simm12:$imm12), StTy:$rs2),
72            (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
73}
74
75//===----------------------------------------------------------------------===//
76// Instructions
77//===----------------------------------------------------------------------===//
78
79let Predicates = [HasStdExtA] in {
80defm LR_W       : LR_r_aq_rl<0b010, "lr.w">;
81defm SC_W       : AMO_rr_aq_rl<0b00011, 0b010, "sc.w">;
82defm AMOSWAP_W  : AMO_rr_aq_rl<0b00001, 0b010, "amoswap.w">;
83defm AMOADD_W   : AMO_rr_aq_rl<0b00000, 0b010, "amoadd.w">;
84defm AMOXOR_W   : AMO_rr_aq_rl<0b00100, 0b010, "amoxor.w">;
85defm AMOAND_W   : AMO_rr_aq_rl<0b01100, 0b010, "amoand.w">;
86defm AMOOR_W    : AMO_rr_aq_rl<0b01000, 0b010, "amoor.w">;
87defm AMOMIN_W   : AMO_rr_aq_rl<0b10000, 0b010, "amomin.w">;
88defm AMOMAX_W   : AMO_rr_aq_rl<0b10100, 0b010, "amomax.w">;
89defm AMOMINU_W  : AMO_rr_aq_rl<0b11000, 0b010, "amominu.w">;
90defm AMOMAXU_W  : AMO_rr_aq_rl<0b11100, 0b010, "amomaxu.w">;
91} // Predicates = [HasStdExtA]
92
93let Predicates = [HasStdExtA, IsRV64] in {
94defm LR_D       : LR_r_aq_rl<0b011, "lr.d">;
95defm SC_D       : AMO_rr_aq_rl<0b00011, 0b011, "sc.d">;
96defm AMOSWAP_D  : AMO_rr_aq_rl<0b00001, 0b011, "amoswap.d">;
97defm AMOADD_D   : AMO_rr_aq_rl<0b00000, 0b011, "amoadd.d">;
98defm AMOXOR_D   : AMO_rr_aq_rl<0b00100, 0b011, "amoxor.d">;
99defm AMOAND_D   : AMO_rr_aq_rl<0b01100, 0b011, "amoand.d">;
100defm AMOOR_D    : AMO_rr_aq_rl<0b01000, 0b011, "amoor.d">;
101defm AMOMIN_D   : AMO_rr_aq_rl<0b10000, 0b011, "amomin.d">;
102defm AMOMAX_D   : AMO_rr_aq_rl<0b10100, 0b011, "amomax.d">;
103defm AMOMINU_D  : AMO_rr_aq_rl<0b11000, 0b011, "amominu.d">;
104defm AMOMAXU_D  : AMO_rr_aq_rl<0b11100, 0b011, "amomaxu.d">;
105} // Predicates = [HasStdExtA, IsRV64]
106
107//===----------------------------------------------------------------------===//
108// Pseudo-instructions and codegen patterns
109//===----------------------------------------------------------------------===//
110
111let Predicates = [HasStdExtA] in {
112
113/// Atomic loads and stores
114
115// Fences will be inserted for atomic load/stores according to the logic in
116// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
117
118defm : LdPat<atomic_load_8,  LB>;
119defm : LdPat<atomic_load_16, LH>;
120defm : LdPat<atomic_load_32, LW>;
121
122defm : AtomicStPat<atomic_store_8,  SB, GPR>;
123defm : AtomicStPat<atomic_store_16, SH, GPR>;
124defm : AtomicStPat<atomic_store_32, SW, GPR>;
125
126/// AMOs
127
128multiclass AMOPat<string AtomicOp, string BaseInst> {
129  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_monotonic"),
130                  !cast<RVInst>(BaseInst)>;
131  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acquire"),
132                  !cast<RVInst>(BaseInst#"_AQ")>;
133  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_release"),
134                  !cast<RVInst>(BaseInst#"_RL")>;
135  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_acq_rel"),
136                  !cast<RVInst>(BaseInst#"_AQ_RL")>;
137  def : PatGprGpr<!cast<PatFrag>(AtomicOp#"_seq_cst"),
138                  !cast<RVInst>(BaseInst#"_AQ_RL")>;
139}
140
141defm : AMOPat<"atomic_swap_32", "AMOSWAP_W">;
142defm : AMOPat<"atomic_load_add_32", "AMOADD_W">;
143defm : AMOPat<"atomic_load_and_32", "AMOAND_W">;
144defm : AMOPat<"atomic_load_or_32", "AMOOR_W">;
145defm : AMOPat<"atomic_load_xor_32", "AMOXOR_W">;
146defm : AMOPat<"atomic_load_max_32", "AMOMAX_W">;
147defm : AMOPat<"atomic_load_min_32", "AMOMIN_W">;
148defm : AMOPat<"atomic_load_umax_32", "AMOMAXU_W">;
149defm : AMOPat<"atomic_load_umin_32", "AMOMINU_W">;
150
151def : Pat<(atomic_load_sub_32_monotonic GPR:$addr, GPR:$incr),
152          (AMOADD_W GPR:$addr, (SUB X0, GPR:$incr))>;
153def : Pat<(atomic_load_sub_32_acquire GPR:$addr, GPR:$incr),
154          (AMOADD_W_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
155def : Pat<(atomic_load_sub_32_release GPR:$addr, GPR:$incr),
156          (AMOADD_W_RL GPR:$addr, (SUB X0, GPR:$incr))>;
157def : Pat<(atomic_load_sub_32_acq_rel GPR:$addr, GPR:$incr),
158          (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
159def : Pat<(atomic_load_sub_32_seq_cst GPR:$addr, GPR:$incr),
160          (AMOADD_W_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
161
162/// Pseudo AMOs
163
164class PseudoAMO : Pseudo<(outs GPR:$res, GPR:$scratch),
165                         (ins GPR:$addr, GPR:$incr, ixlenimm:$ordering), []> {
166  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
167  let mayLoad = 1;
168  let mayStore = 1;
169  let hasSideEffects = 0;
170}
171
172def PseudoAtomicLoadNand32 : PseudoAMO;
173// Ordering constants must be kept in sync with the AtomicOrdering enum in
174// AtomicOrdering.h.
175def : Pat<(atomic_load_nand_32_monotonic GPR:$addr, GPR:$incr),
176          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 2)>;
177def : Pat<(atomic_load_nand_32_acquire GPR:$addr, GPR:$incr),
178          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 4)>;
179def : Pat<(atomic_load_nand_32_release GPR:$addr, GPR:$incr),
180          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 5)>;
181def : Pat<(atomic_load_nand_32_acq_rel GPR:$addr, GPR:$incr),
182          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 6)>;
183def : Pat<(atomic_load_nand_32_seq_cst GPR:$addr, GPR:$incr),
184          (PseudoAtomicLoadNand32 GPR:$addr, GPR:$incr, 7)>;
185
186class PseudoMaskedAMO
187    : Pseudo<(outs GPR:$res, GPR:$scratch),
188             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
189  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
190  let mayLoad = 1;
191  let mayStore = 1;
192  let hasSideEffects = 0;
193}
194
195class PseudoMaskedAMOMinMax
196    : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
197             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$sextshamt,
198              ixlenimm:$ordering), []> {
199  let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
200                    "@earlyclobber $scratch2";
201  let mayLoad = 1;
202  let mayStore = 1;
203  let hasSideEffects = 0;
204}
205
206class PseudoMaskedAMOUMinUMax
207    : Pseudo<(outs GPR:$res, GPR:$scratch1, GPR:$scratch2),
208             (ins GPR:$addr, GPR:$incr, GPR:$mask, ixlenimm:$ordering), []> {
209  let Constraints = "@earlyclobber $res,@earlyclobber $scratch1,"
210                    "@earlyclobber $scratch2";
211  let mayLoad = 1;
212  let mayStore = 1;
213  let hasSideEffects = 0;
214}
215
216class PseudoMaskedAMOPat<Intrinsic intrin, Pseudo AMOInst>
217    : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, imm:$ordering),
218          (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, imm:$ordering)>;
219
220class PseudoMaskedAMOMinMaxPat<Intrinsic intrin, Pseudo AMOInst>
221    : Pat<(intrin GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
222           imm:$ordering),
223          (AMOInst GPR:$addr, GPR:$incr, GPR:$mask, GPR:$shiftamt,
224           imm:$ordering)>;
225
226def PseudoMaskedAtomicSwap32 : PseudoMaskedAMO;
227def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i32,
228                         PseudoMaskedAtomicSwap32>;
229def PseudoMaskedAtomicLoadAdd32 : PseudoMaskedAMO;
230def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i32,
231                         PseudoMaskedAtomicLoadAdd32>;
232def PseudoMaskedAtomicLoadSub32 : PseudoMaskedAMO;
233def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i32,
234                         PseudoMaskedAtomicLoadSub32>;
235def PseudoMaskedAtomicLoadNand32 : PseudoMaskedAMO;
236def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i32,
237                         PseudoMaskedAtomicLoadNand32>;
238def PseudoMaskedAtomicLoadMax32 : PseudoMaskedAMOMinMax;
239def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i32,
240                               PseudoMaskedAtomicLoadMax32>;
241def PseudoMaskedAtomicLoadMin32 : PseudoMaskedAMOMinMax;
242def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i32,
243                               PseudoMaskedAtomicLoadMin32>;
244def PseudoMaskedAtomicLoadUMax32 : PseudoMaskedAMOUMinUMax;
245def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i32,
246                         PseudoMaskedAtomicLoadUMax32>;
247def PseudoMaskedAtomicLoadUMin32 : PseudoMaskedAMOUMinUMax;
248def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i32,
249                         PseudoMaskedAtomicLoadUMin32>;
250
251/// Compare and exchange
252
253class PseudoCmpXchg
254    : Pseudo<(outs GPR:$res, GPR:$scratch),
255             (ins GPR:$addr, GPR:$cmpval, GPR:$newval, ixlenimm:$ordering), []> {
256  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
257  let mayLoad = 1;
258  let mayStore = 1;
259  let hasSideEffects = 0;
260}
261
262// Ordering constants must be kept in sync with the AtomicOrdering enum in
263// AtomicOrdering.h.
264multiclass PseudoCmpXchgPat<string Op, Pseudo CmpXchgInst> {
265  def : Pat<(!cast<PatFrag>(Op#"_monotonic") GPR:$addr, GPR:$cmp, GPR:$new),
266            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 2)>;
267  def : Pat<(!cast<PatFrag>(Op#"_acquire") GPR:$addr, GPR:$cmp, GPR:$new),
268            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 4)>;
269  def : Pat<(!cast<PatFrag>(Op#"_release") GPR:$addr, GPR:$cmp, GPR:$new),
270            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 5)>;
271  def : Pat<(!cast<PatFrag>(Op#"_acq_rel") GPR:$addr, GPR:$cmp, GPR:$new),
272            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 6)>;
273  def : Pat<(!cast<PatFrag>(Op#"_seq_cst") GPR:$addr, GPR:$cmp, GPR:$new),
274            (CmpXchgInst GPR:$addr, GPR:$cmp, GPR:$new, 7)>;
275}
276
277def PseudoCmpXchg32 : PseudoCmpXchg;
278defm : PseudoCmpXchgPat<"atomic_cmp_swap_32", PseudoCmpXchg32>;
279
280def PseudoMaskedCmpXchg32
281    : Pseudo<(outs GPR:$res, GPR:$scratch),
282             (ins GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask,
283              ixlenimm:$ordering), []> {
284  let Constraints = "@earlyclobber $res,@earlyclobber $scratch";
285  let mayLoad = 1;
286  let mayStore = 1;
287  let hasSideEffects = 0;
288}
289
290def : Pat<(int_riscv_masked_cmpxchg_i32
291            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering),
292          (PseudoMaskedCmpXchg32
293            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering)>;
294
295} // Predicates = [HasStdExtA]
296
297let Predicates = [HasStdExtA, IsRV64] in {
298
299/// 64-bit atomic loads and stores
300
301// Fences will be inserted for atomic load/stores according to the logic in
302// RISCVTargetLowering::{emitLeadingFence,emitTrailingFence}.
303defm : LdPat<atomic_load_64, LD>;
304defm : AtomicStPat<atomic_store_64, SD, GPR>;
305
306defm : AMOPat<"atomic_swap_64", "AMOSWAP_D">;
307defm : AMOPat<"atomic_load_add_64", "AMOADD_D">;
308defm : AMOPat<"atomic_load_and_64", "AMOAND_D">;
309defm : AMOPat<"atomic_load_or_64", "AMOOR_D">;
310defm : AMOPat<"atomic_load_xor_64", "AMOXOR_D">;
311defm : AMOPat<"atomic_load_max_64", "AMOMAX_D">;
312defm : AMOPat<"atomic_load_min_64", "AMOMIN_D">;
313defm : AMOPat<"atomic_load_umax_64", "AMOMAXU_D">;
314defm : AMOPat<"atomic_load_umin_64", "AMOMINU_D">;
315
316/// 64-bit AMOs
317
318def : Pat<(atomic_load_sub_64_monotonic GPR:$addr, GPR:$incr),
319          (AMOADD_D GPR:$addr, (SUB X0, GPR:$incr))>;
320def : Pat<(atomic_load_sub_64_acquire GPR:$addr, GPR:$incr),
321          (AMOADD_D_AQ GPR:$addr, (SUB X0, GPR:$incr))>;
322def : Pat<(atomic_load_sub_64_release GPR:$addr, GPR:$incr),
323          (AMOADD_D_RL GPR:$addr, (SUB X0, GPR:$incr))>;
324def : Pat<(atomic_load_sub_64_acq_rel GPR:$addr, GPR:$incr),
325          (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
326def : Pat<(atomic_load_sub_64_seq_cst GPR:$addr, GPR:$incr),
327          (AMOADD_D_AQ_RL GPR:$addr, (SUB X0, GPR:$incr))>;
328
329/// 64-bit pseudo AMOs
330
331def PseudoAtomicLoadNand64 : PseudoAMO;
332// Ordering constants must be kept in sync with the AtomicOrdering enum in
333// AtomicOrdering.h.
334def : Pat<(atomic_load_nand_64_monotonic GPR:$addr, GPR:$incr),
335          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 2)>;
336def : Pat<(atomic_load_nand_64_acquire GPR:$addr, GPR:$incr),
337          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 4)>;
338def : Pat<(atomic_load_nand_64_release GPR:$addr, GPR:$incr),
339          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 5)>;
340def : Pat<(atomic_load_nand_64_acq_rel GPR:$addr, GPR:$incr),
341          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 6)>;
342def : Pat<(atomic_load_nand_64_seq_cst GPR:$addr, GPR:$incr),
343          (PseudoAtomicLoadNand64 GPR:$addr, GPR:$incr, 7)>;
344
345def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_xchg_i64,
346                         PseudoMaskedAtomicSwap32>;
347def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_add_i64,
348                         PseudoMaskedAtomicLoadAdd32>;
349def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_sub_i64,
350                         PseudoMaskedAtomicLoadSub32>;
351def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_nand_i64,
352                         PseudoMaskedAtomicLoadNand32>;
353def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_max_i64,
354                               PseudoMaskedAtomicLoadMax32>;
355def : PseudoMaskedAMOMinMaxPat<int_riscv_masked_atomicrmw_min_i64,
356                               PseudoMaskedAtomicLoadMin32>;
357def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umax_i64,
358                         PseudoMaskedAtomicLoadUMax32>;
359def : PseudoMaskedAMOPat<int_riscv_masked_atomicrmw_umin_i64,
360                         PseudoMaskedAtomicLoadUMin32>;
361
362/// 64-bit compare and exchange
363
364def PseudoCmpXchg64 : PseudoCmpXchg;
365defm : PseudoCmpXchgPat<"atomic_cmp_swap_64", PseudoCmpXchg64>;
366
367def : Pat<(int_riscv_masked_cmpxchg_i64
368            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering),
369          (PseudoMaskedCmpXchg32
370            GPR:$addr, GPR:$cmpval, GPR:$newval, GPR:$mask, imm:$ordering)>;
371} // Predicates = [HasStdExtA, IsRV64]
372