xref: /freebsd/contrib/llvm-project/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td (revision 77013d11e6483b970af25e13c9b892075742f7e5)
1// WebAssemblyInstrAtomics.td-WebAssembly Atomic codegen support-*- tablegen -*-
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// WebAssembly Atomic operand code-gen constructs.
11///
12//===----------------------------------------------------------------------===//
13
14let UseNamedOperandTable = 1 in
15multiclass ATOMIC_I<dag oops_r, dag iops_r, dag oops_s, dag iops_s,
16                    list<dag> pattern_r, string asmstr_r,
17                    string asmstr_s, bits<32> atomic_op,
18                    string is64 = "false"> {
19  defm "" : I<oops_r, iops_r, oops_s, iops_s, pattern_r, asmstr_r, asmstr_s,
20              !or(0xfe00, !and(0xff, atomic_op)), is64>,
21            Requires<[HasAtomics]>;
22}
23
24multiclass ATOMIC_NRI<dag oops, dag iops, list<dag> pattern, string asmstr = "",
25                      bits<32> atomic_op = -1> {
26  defm "" : NRI<oops, iops, pattern, asmstr,
27                !or(0xfe00, !and(0xff, atomic_op))>,
28            Requires<[HasAtomics]>;
29}
30
31//===----------------------------------------------------------------------===//
32// Atomic wait / notify
33//===----------------------------------------------------------------------===//
34
35let hasSideEffects = 1 in {
36defm MEMORY_ATOMIC_NOTIFY_A32 :
37  ATOMIC_I<(outs I32:$dst),
38           (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I32:$count),
39           (outs), (ins P2Align:$p2align, offset32_op:$off), [],
40           "memory.atomic.notify \t$dst, ${off}(${addr})${p2align}, $count",
41           "memory.atomic.notify \t${off}${p2align}", 0x00, "false">;
42defm MEMORY_ATOMIC_NOTIFY_A64 :
43  ATOMIC_I<(outs I32:$dst),
44           (ins P2Align:$p2align, offset64_op:$off, I64:$addr, I32:$count),
45           (outs), (ins P2Align:$p2align, offset64_op:$off), [],
46           "memory.atomic.notify \t$dst, ${off}(${addr})${p2align}, $count",
47           "memory.atomic.notify \t${off}${p2align}", 0x00, "true">;
48let mayLoad = 1 in {
49defm MEMORY_ATOMIC_WAIT32_A32 :
50  ATOMIC_I<(outs I32:$dst),
51           (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I32:$exp,
52                I64:$timeout),
53           (outs), (ins P2Align:$p2align, offset32_op:$off), [],
54           "memory.atomic.wait32 \t$dst, ${off}(${addr})${p2align}, $exp, $timeout",
55           "memory.atomic.wait32 \t${off}${p2align}", 0x01, "false">;
56defm MEMORY_ATOMIC_WAIT32_A64 :
57  ATOMIC_I<(outs I32:$dst),
58           (ins P2Align:$p2align, offset64_op:$off, I64:$addr, I32:$exp,
59                I64:$timeout),
60           (outs), (ins P2Align:$p2align, offset64_op:$off), [],
61           "memory.atomic.wait32 \t$dst, ${off}(${addr})${p2align}, $exp, $timeout",
62           "memory.atomic.wait32 \t${off}${p2align}", 0x01, "true">;
63defm MEMORY_ATOMIC_WAIT64_A32 :
64  ATOMIC_I<(outs I32:$dst),
65           (ins P2Align:$p2align, offset32_op:$off, I32:$addr, I64:$exp,
66                I64:$timeout),
67           (outs), (ins P2Align:$p2align, offset32_op:$off), [],
68           "memory.atomic.wait64 \t$dst, ${off}(${addr})${p2align}, $exp, $timeout",
69           "memory.atomic.wait64 \t${off}${p2align}", 0x02, "false">;
70defm MEMORY_ATOMIC_WAIT64_A64 :
71  ATOMIC_I<(outs I32:$dst),
72           (ins P2Align:$p2align, offset64_op:$off, I64:$addr, I64:$exp,
73                I64:$timeout),
74           (outs), (ins P2Align:$p2align, offset64_op:$off), [],
75           "memory.atomic.wait64 \t$dst, ${off}(${addr})${p2align}, $exp, $timeout",
76           "memory.atomic.wait64 \t${off}${p2align}", 0x02, "true">;
77} // mayLoad = 1
78} // hasSideEffects = 1
79
80// Select notifys with no constant offset.
81def NotifyPatNoOffset_A32 :
82  Pat<(i32 (int_wasm_memory_atomic_notify I32:$addr, I32:$count)),
83      (MEMORY_ATOMIC_NOTIFY_A32 0, 0, I32:$addr, I32:$count)>,
84  Requires<[HasAddr32, HasAtomics]>;
85def NotifyPatNoOffset_A64 :
86  Pat<(i32 (int_wasm_memory_atomic_notify I64:$addr, I32:$count)),
87      (MEMORY_ATOMIC_NOTIFY_A64 0, 0, I64:$addr, I32:$count)>,
88  Requires<[HasAddr64, HasAtomics]>;
89
90// Select notifys with a constant offset.
91
92// Pattern with address + immediate offset
93multiclass NotifyPatImmOff<PatFrag operand, string inst> {
94  def : Pat<(i32 (int_wasm_memory_atomic_notify (operand I32:$addr, imm:$off),
95                  I32:$count)),
96            (!cast<NI>(inst#_A32) 0, imm:$off, I32:$addr, I32:$count)>,
97        Requires<[HasAddr32, HasAtomics]>;
98  def : Pat<(i32 (int_wasm_memory_atomic_notify (operand I64:$addr, imm:$off),
99                  I32:$count)),
100            (!cast<NI>(inst#_A64) 0, imm:$off, I64:$addr, I32:$count)>,
101        Requires<[HasAddr64, HasAtomics]>;
102}
103defm : NotifyPatImmOff<regPlusImm, "MEMORY_ATOMIC_NOTIFY">;
104defm : NotifyPatImmOff<or_is_add, "MEMORY_ATOMIC_NOTIFY">;
105
106// Select notifys with just a constant offset.
107def NotifyPatOffsetOnly_A32 :
108  Pat<(i32 (int_wasm_memory_atomic_notify imm:$off, I32:$count)),
109      (MEMORY_ATOMIC_NOTIFY_A32 0, imm:$off, (CONST_I32 0), I32:$count)>,
110  Requires<[HasAddr32, HasAtomics]>;
111def NotifyPatOffsetOnly_A64 :
112  Pat<(i32 (int_wasm_memory_atomic_notify imm:$off, I32:$count)),
113      (MEMORY_ATOMIC_NOTIFY_A64 0, imm:$off, (CONST_I64 0), I32:$count)>,
114  Requires<[HasAddr64, HasAtomics]>;
115
116def NotifyPatGlobalAddrOffOnly_A32 :
117  Pat<(i32 (int_wasm_memory_atomic_notify (WebAssemblywrapper tglobaladdr:$off),
118                                          I32:$count)),
119      (MEMORY_ATOMIC_NOTIFY_A32 0, tglobaladdr:$off, (CONST_I32 0), I32:$count)
120     >,
121  Requires<[HasAddr32, HasAtomics, IsNotPIC]>;
122def NotifyPatGlobalAddrOffOnly_A64 :
123  Pat<(i32 (int_wasm_memory_atomic_notify (WebAssemblywrapper tglobaladdr:$off),
124                                          I32:$count)),
125      (MEMORY_ATOMIC_NOTIFY_A64 0, tglobaladdr:$off, (CONST_I64 0), I32:$count)
126     >,
127  Requires<[HasAddr64, HasAtomics, IsNotPIC]>;
128
129// Select waits with no constant offset.
130multiclass WaitPatNoOffset<ValueType ty, Intrinsic kind,
131                      string inst> {
132  def : Pat<(i32 (kind I32:$addr, ty:$exp, I64:$timeout)),
133            (!cast<NI>(inst#_A32) 0, 0, I32:$addr, ty:$exp, I64:$timeout)>,
134        Requires<[HasAddr32, HasAtomics]>;
135  def : Pat<(i32 (kind I64:$addr, ty:$exp, I64:$timeout)),
136            (!cast<NI>(inst#_A64) 0, 0, I64:$addr, ty:$exp, I64:$timeout)>,
137        Requires<[HasAddr64, HasAtomics]>;
138}
139defm : WaitPatNoOffset<i32, int_wasm_memory_atomic_wait32,
140                       "MEMORY_ATOMIC_WAIT32">;
141defm : WaitPatNoOffset<i64, int_wasm_memory_atomic_wait64,
142                       "MEMORY_ATOMIC_WAIT64">;
143defm : WaitPatNoOffset<i32, int_wasm_memory_atomic_wait32,
144                       "MEMORY_ATOMIC_WAIT32">;
145defm : WaitPatNoOffset<i64, int_wasm_memory_atomic_wait64,
146                       "MEMORY_ATOMIC_WAIT64">;
147
148// Select waits with a constant offset.
149
150// Pattern with address + immediate offset
151multiclass WaitPatImmOff<ValueType ty, Intrinsic kind, PatFrag operand,
152                         string inst> {
153  def : Pat<(i32 (kind (operand I32:$addr, imm:$off), ty:$exp, I64:$timeout)),
154            (!cast<NI>(inst#_A32) 0, imm:$off, I32:$addr, ty:$exp,
155              I64:$timeout)>,
156        Requires<[HasAddr32, HasAtomics]>;
157  def : Pat<(i32 (kind (operand I64:$addr, imm:$off), ty:$exp, I64:$timeout)),
158            (!cast<NI>(inst#_A64) 0, imm:$off, I64:$addr, ty:$exp,
159              I64:$timeout)>,
160        Requires<[HasAddr64, HasAtomics]>;
161}
162defm : WaitPatImmOff<i32, int_wasm_memory_atomic_wait32, regPlusImm,
163                     "MEMORY_ATOMIC_WAIT32">;
164defm : WaitPatImmOff<i32, int_wasm_memory_atomic_wait32, or_is_add,
165                     "MEMORY_ATOMIC_WAIT32">;
166defm : WaitPatImmOff<i64, int_wasm_memory_atomic_wait64, regPlusImm,
167                     "MEMORY_ATOMIC_WAIT64">;
168defm : WaitPatImmOff<i64, int_wasm_memory_atomic_wait64, or_is_add,
169                     "MEMORY_ATOMIC_WAIT64">;
170
171// Select waits with just a constant offset.
172multiclass WaitPatOffsetOnly<ValueType ty, Intrinsic kind, string inst> {
173  def : Pat<(i32 (kind imm:$off, ty:$exp, I64:$timeout)),
174            (!cast<NI>(inst#_A32) 0, imm:$off, (CONST_I32 0), ty:$exp,
175               I64:$timeout)>,
176        Requires<[HasAddr32, HasAtomics]>;
177  def : Pat<(i32 (kind imm:$off, ty:$exp, I64:$timeout)),
178            (!cast<NI>(inst#_A64) 0, imm:$off, (CONST_I64 0), ty:$exp,
179               I64:$timeout)>,
180        Requires<[HasAddr64, HasAtomics]>;
181}
182defm : WaitPatOffsetOnly<i32, int_wasm_memory_atomic_wait32,
183                         "MEMORY_ATOMIC_WAIT32">;
184defm : WaitPatOffsetOnly<i64, int_wasm_memory_atomic_wait64,
185                         "MEMORY_ATOMIC_WAIT64">;
186
187multiclass WaitPatGlobalAddrOffOnly<ValueType ty, Intrinsic kind, string inst> {
188  def : Pat<(i32 (kind (WebAssemblywrapper tglobaladdr:$off), ty:$exp,
189                  I64:$timeout)),
190            (!cast<NI>(inst#_A32) 0, tglobaladdr:$off, (CONST_I32 0), ty:$exp,
191               I64:$timeout)>,
192        Requires<[HasAddr32, HasAtomics, IsNotPIC]>;
193  def : Pat<(i32 (kind (WebAssemblywrapper tglobaladdr:$off), ty:$exp,
194                  I64:$timeout)),
195            (!cast<NI>(inst#_A64) 0, tglobaladdr:$off, (CONST_I64 0), ty:$exp,
196               I64:$timeout)>,
197        Requires<[HasAddr64, HasAtomics, IsNotPIC]>;
198}
199defm : WaitPatGlobalAddrOffOnly<i32, int_wasm_memory_atomic_wait32,
200                                "MEMORY_ATOMIC_WAIT32">;
201defm : WaitPatGlobalAddrOffOnly<i64, int_wasm_memory_atomic_wait64,
202                                "MEMORY_ATOMIC_WAIT64">;
203
204//===----------------------------------------------------------------------===//
205// Atomic fences
206//===----------------------------------------------------------------------===//
207
208// A compiler fence instruction that prevents reordering of instructions.
209let Defs = [ARGUMENTS] in {
210let isPseudo = 1, hasSideEffects = 1 in
211defm COMPILER_FENCE : ATOMIC_NRI<(outs), (ins), [], "compiler_fence">;
212let hasSideEffects = 1 in
213defm ATOMIC_FENCE : ATOMIC_NRI<(outs), (ins i8imm:$flags), [], "atomic.fence",
214                               0x03>;
215} // Defs = [ARGUMENTS]
216
217//===----------------------------------------------------------------------===//
218// Atomic loads
219//===----------------------------------------------------------------------===//
220
221multiclass AtomicLoad<WebAssemblyRegClass rc, string name, int atomic_op> {
222  defm "" : WebAssemblyLoad<rc, name, !or(0xfe00, !and(0xff, atomic_op)),
223                            [HasAtomics]>;
224}
225
226defm ATOMIC_LOAD_I32 : AtomicLoad<I32, "i32.atomic.load", 0x10>;
227defm ATOMIC_LOAD_I64 : AtomicLoad<I64, "i64.atomic.load", 0x11>;
228
229// Select loads with no constant offset.
230defm : LoadPatNoOffset<i32, atomic_load_32, "ATOMIC_LOAD_I32">;
231defm : LoadPatNoOffset<i64, atomic_load_64, "ATOMIC_LOAD_I64">;
232
233// Select loads with a constant offset.
234
235// Pattern with address + immediate offset
236defm : LoadPatImmOff<i32, atomic_load_32, regPlusImm, "ATOMIC_LOAD_I32">;
237defm : LoadPatImmOff<i64, atomic_load_64, regPlusImm, "ATOMIC_LOAD_I64">;
238defm : LoadPatImmOff<i32, atomic_load_32, or_is_add, "ATOMIC_LOAD_I32">;
239defm : LoadPatImmOff<i64, atomic_load_64, or_is_add, "ATOMIC_LOAD_I64">;
240
241// Select loads with just a constant offset.
242defm : LoadPatOffsetOnly<i32, atomic_load_32, "ATOMIC_LOAD_I32">;
243defm : LoadPatOffsetOnly<i64, atomic_load_64, "ATOMIC_LOAD_I64">;
244
245defm : LoadPatGlobalAddrOffOnly<i32, atomic_load_32, "ATOMIC_LOAD_I32">;
246defm : LoadPatGlobalAddrOffOnly<i64, atomic_load_64, "ATOMIC_LOAD_I64">;
247
248
249// Extending loads. Note that there are only zero-extending atomic loads, no
250// sign-extending loads.
251defm ATOMIC_LOAD8_U_I32 : AtomicLoad<I32, "i32.atomic.load8_u", 0x12>;
252defm ATOMIC_LOAD16_U_I32 : AtomicLoad<I32, "i32.atomic.load16_u", 0x13>;
253defm ATOMIC_LOAD8_U_I64 : AtomicLoad<I64, "i64.atomic.load8_u", 0x14>;
254defm ATOMIC_LOAD16_U_I64 : AtomicLoad<I64, "i64.atomic.load16_u", 0x15>;
255defm ATOMIC_LOAD32_U_I64 : AtomicLoad<I64, "i64.atomic.load32_u", 0x16>;
256
257// Fragments for extending loads. These are different from regular loads because
258// the SDNodes are derived from AtomicSDNode rather than LoadSDNode and
259// therefore don't have the extension type field. So instead of matching that,
260// we match the patterns that the type legalizer expands them to.
261
262// We directly match zext patterns and select the zext atomic loads.
263// i32 (zext (i8 (atomic_load_8))) gets legalized to
264// i32 (and (i32 (atomic_load_8)), 255)
265// These can be selected to a single zero-extending atomic load instruction.
266def zext_aload_8_32 :
267  PatFrag<(ops node:$addr), (and (i32 (atomic_load_8 node:$addr)), 255)>;
268def zext_aload_16_32 :
269  PatFrag<(ops node:$addr), (and (i32 (atomic_load_16 node:$addr)), 65535)>;
270// Unlike regular loads, extension to i64 is handled differently than i32.
271// i64 (zext (i8 (atomic_load_8))) gets legalized to
272// i64 (and (i64 (anyext (i32 (atomic_load_8)))), 255)
273def zext_aload_8_64 :
274  PatFrag<(ops node:$addr),
275          (and (i64 (anyext (i32 (atomic_load_8 node:$addr)))), 255)>;
276def zext_aload_16_64 :
277  PatFrag<(ops node:$addr),
278          (and (i64 (anyext (i32 (atomic_load_16 node:$addr)))), 65535)>;
279def zext_aload_32_64 :
280  PatFrag<(ops node:$addr),
281          (zext (i32 (atomic_load node:$addr)))>;
282
283// We don't have single sext atomic load instructions. So for sext loads, we
284// match bare subword loads (for 32-bit results) and anyext loads (for 64-bit
285// results) and select a zext load; the next instruction will be sext_inreg
286// which is selected by itself.
287def sext_aload_8_64 :
288  PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_8 node:$addr)))>;
289def sext_aload_16_64 :
290  PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_16 node:$addr)))>;
291
292// Select zero-extending loads with no constant offset.
293defm : LoadPatNoOffset<i32, zext_aload_8_32, "ATOMIC_LOAD8_U_I32">;
294defm : LoadPatNoOffset<i32, zext_aload_16_32, "ATOMIC_LOAD16_U_I32">;
295defm : LoadPatNoOffset<i64, zext_aload_8_64, "ATOMIC_LOAD8_U_I64">;
296defm : LoadPatNoOffset<i64, zext_aload_16_64, "ATOMIC_LOAD16_U_I64">;
297defm : LoadPatNoOffset<i64, zext_aload_32_64, "ATOMIC_LOAD32_U_I64">;
298
299// Select sign-extending loads with no constant offset
300defm : LoadPatNoOffset<i32, atomic_load_8, "ATOMIC_LOAD8_U_I32">;
301defm : LoadPatNoOffset<i32, atomic_load_16, "ATOMIC_LOAD16_U_I32">;
302defm : LoadPatNoOffset<i64, sext_aload_8_64, "ATOMIC_LOAD8_U_I64">;
303defm : LoadPatNoOffset<i64, sext_aload_16_64, "ATOMIC_LOAD16_U_I64">;
304// 32->64 sext load gets selected as i32.atomic.load, i64.extend_i32_s
305
306// Zero-extending loads with constant offset
307defm : LoadPatImmOff<i32, zext_aload_8_32, regPlusImm, "ATOMIC_LOAD8_U_I32">;
308defm : LoadPatImmOff<i32, zext_aload_16_32, regPlusImm, "ATOMIC_LOAD16_U_I32">;
309defm : LoadPatImmOff<i32, zext_aload_8_32, or_is_add, "ATOMIC_LOAD8_U_I32">;
310defm : LoadPatImmOff<i32, zext_aload_16_32, or_is_add, "ATOMIC_LOAD16_U_I32">;
311defm : LoadPatImmOff<i64, zext_aload_8_64, regPlusImm, "ATOMIC_LOAD8_U_I64">;
312defm : LoadPatImmOff<i64, zext_aload_16_64, regPlusImm, "ATOMIC_LOAD16_U_I64">;
313defm : LoadPatImmOff<i64, zext_aload_32_64, regPlusImm, "ATOMIC_LOAD32_U_I64">;
314defm : LoadPatImmOff<i64, zext_aload_8_64, or_is_add, "ATOMIC_LOAD8_U_I64">;
315defm : LoadPatImmOff<i64, zext_aload_16_64, or_is_add, "ATOMIC_LOAD16_U_I64">;
316defm : LoadPatImmOff<i64, zext_aload_32_64, or_is_add, "ATOMIC_LOAD32_U_I64">;
317
318// Sign-extending loads with constant offset
319defm : LoadPatImmOff<i32, atomic_load_8, regPlusImm, "ATOMIC_LOAD8_U_I32">;
320defm : LoadPatImmOff<i32, atomic_load_16, regPlusImm, "ATOMIC_LOAD16_U_I32">;
321defm : LoadPatImmOff<i32, atomic_load_8, or_is_add, "ATOMIC_LOAD8_U_I32">;
322defm : LoadPatImmOff<i32, atomic_load_16, or_is_add, "ATOMIC_LOAD16_U_I32">;
323defm : LoadPatImmOff<i64, sext_aload_8_64, regPlusImm, "ATOMIC_LOAD8_U_I64">;
324defm : LoadPatImmOff<i64, sext_aload_16_64, regPlusImm, "ATOMIC_LOAD16_U_I64">;
325defm : LoadPatImmOff<i64, sext_aload_8_64, or_is_add, "ATOMIC_LOAD8_U_I64">;
326defm : LoadPatImmOff<i64, sext_aload_16_64, or_is_add, "ATOMIC_LOAD16_U_I64">;
327// No 32->64 patterns, just use i32.atomic.load and i64.extend_s/i64
328
329// Extending loads with just a constant offset
330defm : LoadPatOffsetOnly<i32, zext_aload_8_32, "ATOMIC_LOAD8_U_I32">;
331defm : LoadPatOffsetOnly<i32, zext_aload_16_32, "ATOMIC_LOAD16_U_I32">;
332defm : LoadPatOffsetOnly<i64, zext_aload_8_64, "ATOMIC_LOAD8_U_I64">;
333defm : LoadPatOffsetOnly<i64, zext_aload_16_64, "ATOMIC_LOAD16_U_I64">;
334defm : LoadPatOffsetOnly<i64, zext_aload_32_64, "ATOMIC_LOAD32_U_I64">;
335defm : LoadPatOffsetOnly<i32, atomic_load_8, "ATOMIC_LOAD8_U_I32">;
336defm : LoadPatOffsetOnly<i32, atomic_load_16, "ATOMIC_LOAD16_U_I32">;
337defm : LoadPatOffsetOnly<i64, sext_aload_8_64, "ATOMIC_LOAD8_U_I64">;
338defm : LoadPatOffsetOnly<i64, sext_aload_16_64, "ATOMIC_LOAD16_U_I64">;
339
340defm : LoadPatGlobalAddrOffOnly<i32, zext_aload_8_32, "ATOMIC_LOAD8_U_I32">;
341defm : LoadPatGlobalAddrOffOnly<i32, zext_aload_16_32, "ATOMIC_LOAD16_U_I32">;
342defm : LoadPatGlobalAddrOffOnly<i64, zext_aload_8_64, "ATOMIC_LOAD8_U_I64">;
343defm : LoadPatGlobalAddrOffOnly<i64, zext_aload_16_64, "ATOMIC_LOAD16_U_I64">;
344defm : LoadPatGlobalAddrOffOnly<i64, zext_aload_32_64, "ATOMIC_LOAD32_U_I64">;
345defm : LoadPatGlobalAddrOffOnly<i32, atomic_load_8, "ATOMIC_LOAD8_U_I32">;
346defm : LoadPatGlobalAddrOffOnly<i32, atomic_load_16, "ATOMIC_LOAD16_U_I32">;
347defm : LoadPatGlobalAddrOffOnly<i64, sext_aload_8_64, "ATOMIC_LOAD8_U_I64">;
348defm : LoadPatGlobalAddrOffOnly<i64, sext_aload_16_64, "ATOMIC_LOAD16_U_I64">;
349
350
351//===----------------------------------------------------------------------===//
352// Atomic stores
353//===----------------------------------------------------------------------===//
354
355multiclass AtomicStore<WebAssemblyRegClass rc, string name, int atomic_op> {
356  defm "" : WebAssemblyStore<rc, name, !or(0xfe00, !and(0xff, atomic_op)),
357                             [HasAtomics]>;
358}
359
360defm ATOMIC_STORE_I32 : AtomicStore<I32, "i32.atomic.store", 0x17>;
361defm ATOMIC_STORE_I64 : AtomicStore<I64, "i64.atomic.store", 0x18>;
362
363// We need an 'atomic' version of store patterns because store and atomic_store
364// nodes have different operand orders:
365// store: (store $val, $ptr)
366// atomic_store: (store $ptr, $val)
367
368
369// Select stores with no constant offset.
370multiclass AStorePatNoOffset<ValueType ty, PatFrag kind, string inst> {
371  def : Pat<(kind I32:$addr, ty:$val),
372            (!cast<NI>(inst#_A32) 0, 0, I32:$addr, ty:$val)>,
373        Requires<[HasAddr32, HasAtomics]>;
374  def : Pat<(kind I64:$addr, ty:$val),
375            (!cast<NI>(inst#_A64) 0, 0, I64:$addr, ty:$val)>,
376        Requires<[HasAddr64, HasAtomics]>;
377}
378defm : AStorePatNoOffset<i32, atomic_store_32, "ATOMIC_STORE_I32">;
379defm : AStorePatNoOffset<i64, atomic_store_64, "ATOMIC_STORE_I64">;
380
381// Select stores with a constant offset.
382
383// Pattern with address + immediate offset
384multiclass AStorePatImmOff<ValueType ty, PatFrag kind, PatFrag operand,
385                           string inst> {
386  def : Pat<(kind (operand I32:$addr, imm:$off), ty:$val),
387            (!cast<NI>(inst#_A32) 0, imm:$off, I32:$addr, ty:$val)>,
388        Requires<[HasAddr32, HasAtomics]>;
389  def : Pat<(kind (operand I64:$addr, imm:$off), ty:$val),
390            (!cast<NI>(inst#_A64) 0, imm:$off, I64:$addr, ty:$val)>,
391        Requires<[HasAddr64, HasAtomics]>;
392}
393defm : AStorePatImmOff<i32, atomic_store_32, regPlusImm, "ATOMIC_STORE_I32">;
394defm : AStorePatImmOff<i64, atomic_store_64, regPlusImm, "ATOMIC_STORE_I64">;
395
396// Select stores with just a constant offset.
397multiclass AStorePatOffsetOnly<ValueType ty, PatFrag kind, string inst> {
398  def : Pat<(kind imm:$off, ty:$val),
399            (!cast<NI>(inst#_A32) 0, imm:$off, (CONST_I32 0), ty:$val)>,
400        Requires<[HasAddr32, HasAtomics]>;
401  def : Pat<(kind imm:$off, ty:$val),
402            (!cast<NI>(inst#_A64) 0, imm:$off, (CONST_I64 0), ty:$val)>,
403        Requires<[HasAddr64, HasAtomics]>;
404}
405defm : AStorePatOffsetOnly<i32, atomic_store_32, "ATOMIC_STORE_I32">;
406defm : AStorePatOffsetOnly<i64, atomic_store_64, "ATOMIC_STORE_I64">;
407
408multiclass AStorePatGlobalAddrOffOnly<ValueType ty, PatFrag kind, string inst> {
409  def : Pat<(kind (WebAssemblywrapper tglobaladdr:$off), ty:$val),
410            (!cast<NI>(inst#_A32) 0, tglobaladdr:$off, (CONST_I32 0), ty:$val)>,
411        Requires<[HasAddr32, HasAtomics, IsNotPIC]>;
412  def : Pat<(kind (WebAssemblywrapper tglobaladdr:$off), ty:$val),
413            (!cast<NI>(inst#_A64) 0, tglobaladdr:$off, (CONST_I64 0), ty:$val)>,
414        Requires<[HasAddr64, HasAtomics, IsNotPIC]>;
415}
416defm : AStorePatGlobalAddrOffOnly<i32, atomic_store_32, "ATOMIC_STORE_I32">;
417defm : AStorePatGlobalAddrOffOnly<i64, atomic_store_64, "ATOMIC_STORE_I64">;
418
419
420// Truncating stores.
421defm ATOMIC_STORE8_I32 : AtomicStore<I32, "i32.atomic.store8", 0x19>;
422defm ATOMIC_STORE16_I32 : AtomicStore<I32, "i32.atomic.store16", 0x1a>;
423defm ATOMIC_STORE8_I64 : AtomicStore<I64, "i64.atomic.store8", 0x1b>;
424defm ATOMIC_STORE16_I64 : AtomicStore<I64, "i64.atomic.store16", 0x1c>;
425defm ATOMIC_STORE32_I64 : AtomicStore<I64, "i64.atomic.store32", 0x1d>;
426
427// Fragments for truncating stores.
428
429// We don't have single truncating atomic store instructions. For 32-bit
430// instructions, we just need to match bare atomic stores. On the other hand,
431// truncating stores from i64 values are once truncated to i32 first.
432class trunc_astore_64<PatFrag kind> :
433  PatFrag<(ops node:$addr, node:$val),
434          (kind node:$addr, (i32 (trunc (i64 node:$val))))>;
435def trunc_astore_8_64 : trunc_astore_64<atomic_store_8>;
436def trunc_astore_16_64 : trunc_astore_64<atomic_store_16>;
437def trunc_astore_32_64 : trunc_astore_64<atomic_store_32>;
438
439
440// Truncating stores with no constant offset
441defm : AStorePatNoOffset<i32, atomic_store_8, "ATOMIC_STORE8_I32">;
442defm : AStorePatNoOffset<i32, atomic_store_16, "ATOMIC_STORE16_I32">;
443defm : AStorePatNoOffset<i64, trunc_astore_8_64, "ATOMIC_STORE8_I64">;
444defm : AStorePatNoOffset<i64, trunc_astore_16_64, "ATOMIC_STORE16_I64">;
445defm : AStorePatNoOffset<i64, trunc_astore_32_64, "ATOMIC_STORE32_I64">;
446
447// Truncating stores with a constant offset
448defm : AStorePatImmOff<i32, atomic_store_8, regPlusImm, "ATOMIC_STORE8_I32">;
449defm : AStorePatImmOff<i32, atomic_store_16, regPlusImm, "ATOMIC_STORE16_I32">;
450defm : AStorePatImmOff<i64, trunc_astore_8_64, regPlusImm, "ATOMIC_STORE8_I64">;
451defm : AStorePatImmOff<i64, trunc_astore_16_64, regPlusImm,
452                       "ATOMIC_STORE16_I64">;
453defm : AStorePatImmOff<i64, trunc_astore_32_64, regPlusImm,
454                       "ATOMIC_STORE32_I64">;
455defm : AStorePatImmOff<i32, atomic_store_8, or_is_add, "ATOMIC_STORE8_I32">;
456defm : AStorePatImmOff<i32, atomic_store_16, or_is_add, "ATOMIC_STORE16_I32">;
457defm : AStorePatImmOff<i64, trunc_astore_8_64, or_is_add, "ATOMIC_STORE8_I64">;
458defm : AStorePatImmOff<i64, trunc_astore_16_64, or_is_add,
459                       "ATOMIC_STORE16_I64">;
460defm : AStorePatImmOff<i64, trunc_astore_32_64, or_is_add,
461                       "ATOMIC_STORE32_I64">;
462
463// Truncating stores with just a constant offset
464defm : AStorePatOffsetOnly<i32, atomic_store_8, "ATOMIC_STORE8_I32">;
465defm : AStorePatOffsetOnly<i32, atomic_store_16, "ATOMIC_STORE16_I32">;
466defm : AStorePatOffsetOnly<i64, trunc_astore_8_64, "ATOMIC_STORE8_I64">;
467defm : AStorePatOffsetOnly<i64, trunc_astore_16_64, "ATOMIC_STORE16_I64">;
468defm : AStorePatOffsetOnly<i64, trunc_astore_32_64, "ATOMIC_STORE32_I64">;
469
470defm : AStorePatGlobalAddrOffOnly<i32, atomic_store_8, "ATOMIC_STORE8_I32">;
471defm : AStorePatGlobalAddrOffOnly<i32, atomic_store_16, "ATOMIC_STORE16_I32">;
472defm : AStorePatGlobalAddrOffOnly<i64, trunc_astore_8_64, "ATOMIC_STORE8_I64">;
473defm : AStorePatGlobalAddrOffOnly<i64, trunc_astore_16_64, "ATOMIC_STORE16_I64">;
474defm : AStorePatGlobalAddrOffOnly<i64, trunc_astore_32_64, "ATOMIC_STORE32_I64">;
475
476
477//===----------------------------------------------------------------------===//
478// Atomic binary read-modify-writes
479//===----------------------------------------------------------------------===//
480
481multiclass WebAssemblyBinRMW<WebAssemblyRegClass rc, string name,
482                             int atomic_op> {
483  defm "_A32" :
484    ATOMIC_I<(outs rc:$dst),
485             (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$val),
486             (outs), (ins P2Align:$p2align, offset32_op:$off), [],
487             !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $val"),
488             !strconcat(name, "\t${off}${p2align}"), atomic_op, "false">;
489  defm "_A64" :
490    ATOMIC_I<(outs rc:$dst),
491             (ins P2Align:$p2align, offset64_op:$off, I64:$addr, rc:$val),
492             (outs), (ins P2Align:$p2align, offset64_op:$off), [],
493             !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $val"),
494             !strconcat(name, "\t${off}${p2align}"), atomic_op, "true">;
495}
496
497defm ATOMIC_RMW_ADD_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.add", 0x1e>;
498defm ATOMIC_RMW_ADD_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.add", 0x1f>;
499defm ATOMIC_RMW8_U_ADD_I32 :
500  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.add_u", 0x20>;
501defm ATOMIC_RMW16_U_ADD_I32 :
502  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.add_u", 0x21>;
503defm ATOMIC_RMW8_U_ADD_I64 :
504  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.add_u", 0x22>;
505defm ATOMIC_RMW16_U_ADD_I64 :
506  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.add_u", 0x23>;
507defm ATOMIC_RMW32_U_ADD_I64 :
508  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.add_u", 0x24>;
509
510defm ATOMIC_RMW_SUB_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.sub", 0x25>;
511defm ATOMIC_RMW_SUB_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.sub", 0x26>;
512defm ATOMIC_RMW8_U_SUB_I32 :
513  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.sub_u", 0x27>;
514defm ATOMIC_RMW16_U_SUB_I32 :
515  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.sub_u", 0x28>;
516defm ATOMIC_RMW8_U_SUB_I64 :
517  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.sub_u", 0x29>;
518defm ATOMIC_RMW16_U_SUB_I64 :
519  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.sub_u", 0x2a>;
520defm ATOMIC_RMW32_U_SUB_I64 :
521  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.sub_u", 0x2b>;
522
523defm ATOMIC_RMW_AND_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.and", 0x2c>;
524defm ATOMIC_RMW_AND_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.and", 0x2d>;
525defm ATOMIC_RMW8_U_AND_I32 :
526  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.and_u", 0x2e>;
527defm ATOMIC_RMW16_U_AND_I32 :
528  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.and_u", 0x2f>;
529defm ATOMIC_RMW8_U_AND_I64 :
530  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.and_u", 0x30>;
531defm ATOMIC_RMW16_U_AND_I64 :
532  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.and_u", 0x31>;
533defm ATOMIC_RMW32_U_AND_I64 :
534  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.and_u", 0x32>;
535
536defm ATOMIC_RMW_OR_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.or", 0x33>;
537defm ATOMIC_RMW_OR_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.or", 0x34>;
538defm ATOMIC_RMW8_U_OR_I32 :
539  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.or_u", 0x35>;
540defm ATOMIC_RMW16_U_OR_I32 :
541  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.or_u", 0x36>;
542defm ATOMIC_RMW8_U_OR_I64 :
543  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.or_u", 0x37>;
544defm ATOMIC_RMW16_U_OR_I64 :
545  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.or_u", 0x38>;
546defm ATOMIC_RMW32_U_OR_I64 :
547  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.or_u", 0x39>;
548
549defm ATOMIC_RMW_XOR_I32 : WebAssemblyBinRMW<I32, "i32.atomic.rmw.xor", 0x3a>;
550defm ATOMIC_RMW_XOR_I64 : WebAssemblyBinRMW<I64, "i64.atomic.rmw.xor", 0x3b>;
551defm ATOMIC_RMW8_U_XOR_I32 :
552  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.xor_u", 0x3c>;
553defm ATOMIC_RMW16_U_XOR_I32 :
554  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.xor_u", 0x3d>;
555defm ATOMIC_RMW8_U_XOR_I64 :
556  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.xor_u", 0x3e>;
557defm ATOMIC_RMW16_U_XOR_I64 :
558  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.xor_u", 0x3f>;
559defm ATOMIC_RMW32_U_XOR_I64 :
560  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.xor_u", 0x40>;
561
562defm ATOMIC_RMW_XCHG_I32 :
563  WebAssemblyBinRMW<I32, "i32.atomic.rmw.xchg", 0x41>;
564defm ATOMIC_RMW_XCHG_I64 :
565  WebAssemblyBinRMW<I64, "i64.atomic.rmw.xchg", 0x42>;
566defm ATOMIC_RMW8_U_XCHG_I32 :
567  WebAssemblyBinRMW<I32, "i32.atomic.rmw8.xchg_u", 0x43>;
568defm ATOMIC_RMW16_U_XCHG_I32 :
569  WebAssemblyBinRMW<I32, "i32.atomic.rmw16.xchg_u", 0x44>;
570defm ATOMIC_RMW8_U_XCHG_I64 :
571  WebAssemblyBinRMW<I64, "i64.atomic.rmw8.xchg_u", 0x45>;
572defm ATOMIC_RMW16_U_XCHG_I64 :
573  WebAssemblyBinRMW<I64, "i64.atomic.rmw16.xchg_u", 0x46>;
574defm ATOMIC_RMW32_U_XCHG_I64 :
575  WebAssemblyBinRMW<I64, "i64.atomic.rmw32.xchg_u", 0x47>;
576
577// Select binary RMWs with no constant offset.
578multiclass BinRMWPatNoOffset<ValueType ty, PatFrag kind, string inst> {
579  def : Pat<(ty (kind I32:$addr, ty:$val)),
580            (!cast<NI>(inst#_A32) 0, 0, I32:$addr, ty:$val)>,
581        Requires<[HasAddr32, HasAtomics]>;
582  def : Pat<(ty (kind I64:$addr, ty:$val)),
583            (!cast<NI>(inst#_A64) 0, 0, I64:$addr, ty:$val)>,
584        Requires<[HasAddr64, HasAtomics]>;
585}
586
587// Select binary RMWs with a constant offset.
588
589// Pattern with address + immediate offset
590multiclass BinRMWPatImmOff<ValueType ty, PatFrag kind, PatFrag operand,
591                           string inst> {
592  def : Pat<(ty (kind (operand I32:$addr, imm:$off), ty:$val)),
593            (!cast<NI>(inst#_A32) 0, imm:$off, I32:$addr, ty:$val)>,
594        Requires<[HasAddr32, HasAtomics]>;
595  def : Pat<(ty (kind (operand I64:$addr, imm:$off), ty:$val)),
596            (!cast<NI>(inst#_A64) 0, imm:$off, I64:$addr, ty:$val)>,
597        Requires<[HasAddr64, HasAtomics]>;
598}
599
600// Select binary RMWs with just a constant offset.
601multiclass BinRMWPatOffsetOnly<ValueType ty, PatFrag kind, string inst> {
602  def : Pat<(ty (kind imm:$off, ty:$val)),
603            (!cast<NI>(inst#_A32) 0, imm:$off, (CONST_I32 0), ty:$val)>,
604        Requires<[HasAddr32, HasAtomics]>;
605  def : Pat<(ty (kind imm:$off, ty:$val)),
606            (!cast<NI>(inst#_A64) 0, imm:$off, (CONST_I64 0), ty:$val)>,
607        Requires<[HasAddr64, HasAtomics]>;
608}
609
610multiclass BinRMWPatGlobalAddrOffOnly<ValueType ty, PatFrag kind, NI inst> {
611  def : Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off), ty:$val)),
612            (!cast<NI>(inst#_A32) 0, tglobaladdr:$off, (CONST_I32 0), ty:$val)>,
613        Requires<[HasAddr32, HasAtomics, IsNotPIC]>;
614  def : Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off), ty:$val)),
615            (!cast<NI>(inst#_A64) 0, tglobaladdr:$off, (CONST_I64 0), ty:$val)>,
616        Requires<[HasAddr64, HasAtomics, IsNotPIC]>;
617}
618
619// Patterns for various addressing modes.
620multiclass BinRMWPattern<PatFrag rmw_32, PatFrag rmw_64, string inst_32,
621                         string inst_64> {
622  defm : BinRMWPatNoOffset<i32, rmw_32, inst_32>;
623  defm : BinRMWPatNoOffset<i64, rmw_64, inst_64>;
624
625  defm : BinRMWPatImmOff<i32, rmw_32, regPlusImm, inst_32>;
626  defm : BinRMWPatImmOff<i64, rmw_64, regPlusImm, inst_64>;
627  defm : BinRMWPatImmOff<i32, rmw_32, or_is_add, inst_32>;
628  defm : BinRMWPatImmOff<i64, rmw_64, or_is_add, inst_64>;
629
630  defm : BinRMWPatOffsetOnly<i32, rmw_32, inst_32>;
631  defm : BinRMWPatOffsetOnly<i64, rmw_64, inst_64>;
632
633  defm : BinRMWPatGlobalAddrOffOnly<i32, rmw_32, inst_32>;
634  defm : BinRMWPatGlobalAddrOffOnly<i64, rmw_64, inst_64>;
635}
636
637defm : BinRMWPattern<atomic_load_add_32, atomic_load_add_64,
638                     "ATOMIC_RMW_ADD_I32", "ATOMIC_RMW_ADD_I64">;
639defm : BinRMWPattern<atomic_load_sub_32, atomic_load_sub_64,
640                     "ATOMIC_RMW_SUB_I32", "ATOMIC_RMW_SUB_I64">;
641defm : BinRMWPattern<atomic_load_and_32, atomic_load_and_64,
642                     "ATOMIC_RMW_AND_I32", "ATOMIC_RMW_AND_I64">;
643defm : BinRMWPattern<atomic_load_or_32, atomic_load_or_64,
644                     "ATOMIC_RMW_OR_I32", "ATOMIC_RMW_OR_I64">;
645defm : BinRMWPattern<atomic_load_xor_32, atomic_load_xor_64,
646                     "ATOMIC_RMW_XOR_I32", "ATOMIC_RMW_XOR_I64">;
647defm : BinRMWPattern<atomic_swap_32, atomic_swap_64,
648                     "ATOMIC_RMW_XCHG_I32", "ATOMIC_RMW_XCHG_I64">;
649
650// Truncating & zero-extending binary RMW patterns.
651// These are combined patterns of truncating store patterns and zero-extending
652// load patterns above.
653class zext_bin_rmw_8_32<PatFrag kind> :
654  PatFrag<(ops node:$addr, node:$val),
655          (and (i32 (kind node:$addr, node:$val)), 255)>;
656class zext_bin_rmw_16_32<PatFrag kind> :
657  PatFrag<(ops node:$addr, node:$val),
658          (and (i32 (kind node:$addr, node:$val)), 65535)>;
659class zext_bin_rmw_8_64<PatFrag kind> :
660  PatFrag<(ops node:$addr, node:$val),
661    (and (i64 (anyext (i32 (kind node:$addr,
662                                 (i32 (trunc (i64 node:$val))))))), 255)>;
663class zext_bin_rmw_16_64<PatFrag kind> :
664  PatFrag<(ops node:$addr, node:$val),
665    (and (i64 (anyext (i32 (kind node:$addr,
666                                 (i32 (trunc (i64 node:$val))))))), 65535)>;
667class zext_bin_rmw_32_64<PatFrag kind> :
668  PatFrag<(ops node:$addr, node:$val),
669          (zext (i32 (kind node:$addr, (i32 (trunc (i64 node:$val))))))>;
670
671// Truncating & sign-extending binary RMW patterns.
672// These are combined patterns of truncating store patterns and sign-extending
673// load patterns above. We match subword RMWs (for 32-bit) and anyext RMWs (for
674// 64-bit) and select a zext RMW; the next instruction will be sext_inreg which
675// is selected by itself.
676class sext_bin_rmw_8_32<PatFrag kind> :
677  PatFrag<(ops node:$addr, node:$val), (kind node:$addr, node:$val)>;
678class sext_bin_rmw_16_32<PatFrag kind> : sext_bin_rmw_8_32<kind>;
679class sext_bin_rmw_8_64<PatFrag kind> :
680  PatFrag<(ops node:$addr, node:$val),
681          (anyext (i32 (kind node:$addr, (i32 (trunc (i64 node:$val))))))>;
682class sext_bin_rmw_16_64<PatFrag kind> : sext_bin_rmw_8_64<kind>;
683// 32->64 sext RMW gets selected as i32.atomic.rmw.***, i64.extend_i32_s
684
685// Patterns for various addressing modes for truncating-extending binary RMWs.
686multiclass BinRMWTruncExtPattern<
687  PatFrag rmw_8, PatFrag rmw_16, PatFrag rmw_32, PatFrag rmw_64,
688  NI inst8_32, NI inst16_32, NI inst8_64, NI inst16_64, NI inst32_64> {
689  // Truncating-extending binary RMWs with no constant offset
690  defm : BinRMWPatNoOffset<i32, zext_bin_rmw_8_32<rmw_8>, inst8_32>;
691  defm : BinRMWPatNoOffset<i32, zext_bin_rmw_16_32<rmw_16>, inst16_32>;
692  defm : BinRMWPatNoOffset<i64, zext_bin_rmw_8_64<rmw_8>, inst8_64>;
693  defm : BinRMWPatNoOffset<i64, zext_bin_rmw_16_64<rmw_16>, inst16_64>;
694  defm : BinRMWPatNoOffset<i64, zext_bin_rmw_32_64<rmw_32>, inst32_64>;
695
696  defm : BinRMWPatNoOffset<i32, sext_bin_rmw_8_32<rmw_8>, inst8_32>;
697  defm : BinRMWPatNoOffset<i32, sext_bin_rmw_16_32<rmw_16>, inst16_32>;
698  defm : BinRMWPatNoOffset<i64, sext_bin_rmw_8_64<rmw_8>, inst8_64>;
699  defm : BinRMWPatNoOffset<i64, sext_bin_rmw_16_64<rmw_16>, inst16_64>;
700
701  // Truncating-extending binary RMWs with a constant offset
702  defm : BinRMWPatImmOff<i32, zext_bin_rmw_8_32<rmw_8>, regPlusImm, inst8_32>;
703  defm : BinRMWPatImmOff<i32, zext_bin_rmw_16_32<rmw_16>, regPlusImm,
704                         inst16_32>;
705  defm : BinRMWPatImmOff<i64, zext_bin_rmw_8_64<rmw_8>, regPlusImm, inst8_64>;
706  defm : BinRMWPatImmOff<i64, zext_bin_rmw_16_64<rmw_16>, regPlusImm,
707                         inst16_64>;
708  defm : BinRMWPatImmOff<i64, zext_bin_rmw_32_64<rmw_32>, regPlusImm,
709                         inst32_64>;
710  defm : BinRMWPatImmOff<i32, zext_bin_rmw_8_32<rmw_8>, or_is_add, inst8_32>;
711  defm : BinRMWPatImmOff<i32, zext_bin_rmw_16_32<rmw_16>, or_is_add, inst16_32>;
712  defm : BinRMWPatImmOff<i64, zext_bin_rmw_8_64<rmw_8>, or_is_add, inst8_64>;
713  defm : BinRMWPatImmOff<i64, zext_bin_rmw_16_64<rmw_16>, or_is_add, inst16_64>;
714  defm : BinRMWPatImmOff<i64, zext_bin_rmw_32_64<rmw_32>, or_is_add, inst32_64>;
715
716  defm : BinRMWPatImmOff<i32, sext_bin_rmw_8_32<rmw_8>, regPlusImm, inst8_32>;
717  defm : BinRMWPatImmOff<i32, sext_bin_rmw_16_32<rmw_16>, regPlusImm,
718                         inst16_32>;
719  defm : BinRMWPatImmOff<i64, sext_bin_rmw_8_64<rmw_8>, regPlusImm, inst8_64>;
720  defm : BinRMWPatImmOff<i64, sext_bin_rmw_16_64<rmw_16>, regPlusImm,
721                         inst16_64>;
722  defm : BinRMWPatImmOff<i32, sext_bin_rmw_8_32<rmw_8>, or_is_add, inst8_32>;
723  defm : BinRMWPatImmOff<i32, sext_bin_rmw_16_32<rmw_16>, or_is_add, inst16_32>;
724  defm : BinRMWPatImmOff<i64, sext_bin_rmw_8_64<rmw_8>, or_is_add, inst8_64>;
725  defm : BinRMWPatImmOff<i64, sext_bin_rmw_16_64<rmw_16>, or_is_add, inst16_64>;
726
727  // Truncating-extending binary RMWs with just a constant offset
728  defm : BinRMWPatOffsetOnly<i32, zext_bin_rmw_8_32<rmw_8>, inst8_32>;
729  defm : BinRMWPatOffsetOnly<i32, zext_bin_rmw_16_32<rmw_16>, inst16_32>;
730  defm : BinRMWPatOffsetOnly<i64, zext_bin_rmw_8_64<rmw_8>, inst8_64>;
731  defm : BinRMWPatOffsetOnly<i64, zext_bin_rmw_16_64<rmw_16>, inst16_64>;
732  defm : BinRMWPatOffsetOnly<i64, zext_bin_rmw_32_64<rmw_32>, inst32_64>;
733
734  defm : BinRMWPatOffsetOnly<i32, sext_bin_rmw_8_32<rmw_8>, inst8_32>;
735  defm : BinRMWPatOffsetOnly<i32, sext_bin_rmw_16_32<rmw_16>, inst16_32>;
736  defm : BinRMWPatOffsetOnly<i64, sext_bin_rmw_8_64<rmw_8>, inst8_64>;
737  defm : BinRMWPatOffsetOnly<i64, sext_bin_rmw_16_64<rmw_16>, inst16_64>;
738
739  defm : BinRMWPatGlobalAddrOffOnly<i32, zext_bin_rmw_8_32<rmw_8>, inst8_32>;
740  defm : BinRMWPatGlobalAddrOffOnly<i32, zext_bin_rmw_16_32<rmw_16>, inst16_32>;
741  defm : BinRMWPatGlobalAddrOffOnly<i64, zext_bin_rmw_8_64<rmw_8>, inst8_64>;
742  defm : BinRMWPatGlobalAddrOffOnly<i64, zext_bin_rmw_16_64<rmw_16>, inst16_64>;
743  defm : BinRMWPatGlobalAddrOffOnly<i64, zext_bin_rmw_32_64<rmw_32>, inst32_64>;
744
745  defm : BinRMWPatGlobalAddrOffOnly<i32, sext_bin_rmw_8_32<rmw_8>, inst8_32>;
746  defm : BinRMWPatGlobalAddrOffOnly<i32, sext_bin_rmw_16_32<rmw_16>, inst16_32>;
747  defm : BinRMWPatGlobalAddrOffOnly<i64, sext_bin_rmw_8_64<rmw_8>, inst8_64>;
748  defm : BinRMWPatGlobalAddrOffOnly<i64, sext_bin_rmw_16_64<rmw_16>, inst16_64>;
749}
750
751defm : BinRMWTruncExtPattern<
752  atomic_load_add_8, atomic_load_add_16, atomic_load_add_32, atomic_load_add_64,
753  "ATOMIC_RMW8_U_ADD_I32", "ATOMIC_RMW16_U_ADD_I32",
754  "ATOMIC_RMW8_U_ADD_I64", "ATOMIC_RMW16_U_ADD_I64", "ATOMIC_RMW32_U_ADD_I64">;
755defm : BinRMWTruncExtPattern<
756  atomic_load_sub_8, atomic_load_sub_16, atomic_load_sub_32, atomic_load_sub_64,
757  "ATOMIC_RMW8_U_SUB_I32", "ATOMIC_RMW16_U_SUB_I32",
758  "ATOMIC_RMW8_U_SUB_I64", "ATOMIC_RMW16_U_SUB_I64", "ATOMIC_RMW32_U_SUB_I64">;
759defm : BinRMWTruncExtPattern<
760  atomic_load_and_8, atomic_load_and_16, atomic_load_and_32, atomic_load_and_64,
761  "ATOMIC_RMW8_U_AND_I32", "ATOMIC_RMW16_U_AND_I32",
762  "ATOMIC_RMW8_U_AND_I64", "ATOMIC_RMW16_U_AND_I64", "ATOMIC_RMW32_U_AND_I64">;
763defm : BinRMWTruncExtPattern<
764  atomic_load_or_8, atomic_load_or_16, atomic_load_or_32, atomic_load_or_64,
765  "ATOMIC_RMW8_U_OR_I32", "ATOMIC_RMW16_U_OR_I32",
766  "ATOMIC_RMW8_U_OR_I64", "ATOMIC_RMW16_U_OR_I64", "ATOMIC_RMW32_U_OR_I64">;
767defm : BinRMWTruncExtPattern<
768  atomic_load_xor_8, atomic_load_xor_16, atomic_load_xor_32, atomic_load_xor_64,
769  "ATOMIC_RMW8_U_XOR_I32", "ATOMIC_RMW16_U_XOR_I32",
770  "ATOMIC_RMW8_U_XOR_I64", "ATOMIC_RMW16_U_XOR_I64", "ATOMIC_RMW32_U_XOR_I64">;
771defm : BinRMWTruncExtPattern<
772  atomic_swap_8, atomic_swap_16, atomic_swap_32, atomic_swap_64,
773  "ATOMIC_RMW8_U_XCHG_I32", "ATOMIC_RMW16_U_XCHG_I32",
774  "ATOMIC_RMW8_U_XCHG_I64", "ATOMIC_RMW16_U_XCHG_I64",
775  "ATOMIC_RMW32_U_XCHG_I64">;
776
777//===----------------------------------------------------------------------===//
778// Atomic ternary read-modify-writes
779//===----------------------------------------------------------------------===//
780
781// TODO LLVM IR's cmpxchg instruction returns a pair of {loaded value, success
782// flag}. When we use the success flag or both values, we can't make use of i64
783// truncate/extend versions of instructions for now, which is suboptimal.
784// Consider adding a pass after instruction selection that optimizes this case
785// if it is frequent.
786
787multiclass WebAssemblyTerRMW<WebAssemblyRegClass rc, string name,
788                             int atomic_op> {
789  defm "_A32" :
790    ATOMIC_I<(outs rc:$dst),
791             (ins P2Align:$p2align, offset32_op:$off, I32:$addr, rc:$exp,
792                  rc:$new_),
793             (outs), (ins P2Align:$p2align, offset32_op:$off), [],
794             !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $exp, $new_"),
795             !strconcat(name, "\t${off}${p2align}"), atomic_op, "false">;
796  defm "_A64" :
797    ATOMIC_I<(outs rc:$dst),
798             (ins P2Align:$p2align, offset64_op:$off, I64:$addr, rc:$exp,
799                  rc:$new_),
800             (outs), (ins P2Align:$p2align, offset64_op:$off), [],
801             !strconcat(name, "\t$dst, ${off}(${addr})${p2align}, $exp, $new_"),
802             !strconcat(name, "\t${off}${p2align}"), atomic_op, "true">;
803}
804
805defm ATOMIC_RMW_CMPXCHG_I32 :
806  WebAssemblyTerRMW<I32, "i32.atomic.rmw.cmpxchg", 0x48>;
807defm ATOMIC_RMW_CMPXCHG_I64 :
808  WebAssemblyTerRMW<I64, "i64.atomic.rmw.cmpxchg", 0x49>;
809defm ATOMIC_RMW8_U_CMPXCHG_I32 :
810  WebAssemblyTerRMW<I32, "i32.atomic.rmw8.cmpxchg_u", 0x4a>;
811defm ATOMIC_RMW16_U_CMPXCHG_I32 :
812  WebAssemblyTerRMW<I32, "i32.atomic.rmw16.cmpxchg_u", 0x4b>;
813defm ATOMIC_RMW8_U_CMPXCHG_I64 :
814  WebAssemblyTerRMW<I64, "i64.atomic.rmw8.cmpxchg_u", 0x4c>;
815defm ATOMIC_RMW16_U_CMPXCHG_I64 :
816  WebAssemblyTerRMW<I64, "i64.atomic.rmw16.cmpxchg_u", 0x4d>;
817defm ATOMIC_RMW32_U_CMPXCHG_I64 :
818  WebAssemblyTerRMW<I64, "i64.atomic.rmw32.cmpxchg_u", 0x4e>;
819
820// Select ternary RMWs with no constant offset.
821multiclass TerRMWPatNoOffset<ValueType ty, PatFrag kind, string inst> {
822  def : Pat<(ty (kind I32:$addr, ty:$exp, ty:$new)),
823            (!cast<NI>(inst#_A32) 0, 0, I32:$addr, ty:$exp, ty:$new)>,
824        Requires<[HasAddr32, HasAtomics]>;
825  def : Pat<(ty (kind I64:$addr, ty:$exp, ty:$new)),
826            (!cast<NI>(inst#_A64) 0, 0, I64:$addr, ty:$exp, ty:$new)>,
827        Requires<[HasAddr64, HasAtomics]>;
828}
829
830// Select ternary RMWs with a constant offset.
831
832// Pattern with address + immediate offset
833multiclass TerRMWPatImmOff<ValueType ty, PatFrag kind, PatFrag operand,
834                           string inst> {
835  def : Pat<(ty (kind (operand I32:$addr, imm:$off), ty:$exp, ty:$new)),
836            (!cast<NI>(inst#_A32) 0, imm:$off, I32:$addr, ty:$exp, ty:$new)>,
837        Requires<[HasAddr32, HasAtomics]>;
838  def : Pat<(ty (kind (operand I64:$addr, imm:$off), ty:$exp, ty:$new)),
839            (!cast<NI>(inst#_A64) 0, imm:$off, I64:$addr, ty:$exp, ty:$new)>,
840        Requires<[HasAddr64, HasAtomics]>;
841}
842
843// Select ternary RMWs with just a constant offset.
844multiclass TerRMWPatOffsetOnly<ValueType ty, PatFrag kind, string inst> {
845  def : Pat<(ty (kind imm:$off, ty:$exp, ty:$new)),
846            (!cast<NI>(inst#_A32) 0, imm:$off, (CONST_I32 0), ty:$exp,
847              ty:$new)>;
848  def : Pat<(ty (kind imm:$off, ty:$exp, ty:$new)),
849            (!cast<NI>(inst#_A64) 0, imm:$off, (CONST_I64 0), ty:$exp,
850              ty:$new)>;
851}
852
853multiclass TerRMWPatGlobalAddrOffOnly<ValueType ty, PatFrag kind, string inst> {
854  def : Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off), ty:$exp, ty:$new)),
855            (!cast<NI>(inst#_A32) 0, tglobaladdr:$off, (CONST_I32 0), ty:$exp,
856              ty:$new)>,
857        Requires<[HasAddr32, HasAtomics, IsNotPIC]>;
858  def : Pat<(ty (kind (WebAssemblywrapper tglobaladdr:$off), ty:$exp, ty:$new)),
859            (!cast<NI>(inst#_A64) 0, tglobaladdr:$off, (CONST_I64 0), ty:$exp,
860              ty:$new)>,
861        Requires<[HasAddr64, HasAtomics, IsNotPIC]>;
862}
863
864// Patterns for various addressing modes.
865multiclass TerRMWPattern<PatFrag rmw_32, PatFrag rmw_64, string inst_32,
866                         string inst_64> {
867  defm : TerRMWPatNoOffset<i32, rmw_32, inst_32>;
868  defm : TerRMWPatNoOffset<i64, rmw_64, inst_64>;
869
870  defm : TerRMWPatImmOff<i32, rmw_32, regPlusImm, inst_32>;
871  defm : TerRMWPatImmOff<i64, rmw_64, regPlusImm, inst_64>;
872  defm : TerRMWPatImmOff<i32, rmw_32, or_is_add, inst_32>;
873  defm : TerRMWPatImmOff<i64, rmw_64, or_is_add, inst_64>;
874
875  defm : TerRMWPatOffsetOnly<i32, rmw_32, inst_32>;
876  defm : TerRMWPatOffsetOnly<i64, rmw_64, inst_64>;
877
878  defm : TerRMWPatGlobalAddrOffOnly<i32, rmw_32, inst_32>;
879  defm : TerRMWPatGlobalAddrOffOnly<i64, rmw_64, inst_64>;
880}
881
882defm : TerRMWPattern<atomic_cmp_swap_32, atomic_cmp_swap_64,
883                     "ATOMIC_RMW_CMPXCHG_I32", "ATOMIC_RMW_CMPXCHG_I64">;
884
885// Truncating & zero-extending ternary RMW patterns.
886// DAG legalization & optimization before instruction selection may introduce
887// additional nodes such as anyext or assertzext depending on operand types.
888class zext_ter_rmw_8_32<PatFrag kind> :
889  PatFrag<(ops node:$addr, node:$exp, node:$new),
890          (and (i32 (kind node:$addr, node:$exp, node:$new)), 255)>;
891class zext_ter_rmw_16_32<PatFrag kind> :
892  PatFrag<(ops node:$addr, node:$exp, node:$new),
893          (and (i32 (kind node:$addr, node:$exp, node:$new)), 65535)>;
894class zext_ter_rmw_8_64<PatFrag kind> :
895  PatFrag<(ops node:$addr, node:$exp, node:$new),
896          (zext (i32 (assertzext (i32 (kind node:$addr,
897                                            (i32 (trunc (i64 node:$exp))),
898                                            (i32 (trunc (i64 node:$new))))))))>;
899class zext_ter_rmw_16_64<PatFrag kind> : zext_ter_rmw_8_64<kind>;
900class zext_ter_rmw_32_64<PatFrag kind> :
901  PatFrag<(ops node:$addr, node:$exp, node:$new),
902          (zext (i32 (kind node:$addr,
903                           (i32 (trunc (i64 node:$exp))),
904                           (i32 (trunc (i64 node:$new))))))>;
905
906// Truncating & sign-extending ternary RMW patterns.
907// We match subword RMWs (for 32-bit) and anyext RMWs (for 64-bit) and select a
908// zext RMW; the next instruction will be sext_inreg which is selected by
909// itself.
910class sext_ter_rmw_8_32<PatFrag kind> :
911  PatFrag<(ops node:$addr, node:$exp, node:$new),
912          (kind node:$addr, node:$exp, node:$new)>;
913class sext_ter_rmw_16_32<PatFrag kind> : sext_ter_rmw_8_32<kind>;
914class sext_ter_rmw_8_64<PatFrag kind> :
915  PatFrag<(ops node:$addr, node:$exp, node:$new),
916          (anyext (i32 (assertzext (i32
917            (kind node:$addr,
918                  (i32 (trunc (i64 node:$exp))),
919                  (i32 (trunc (i64 node:$new))))))))>;
920class sext_ter_rmw_16_64<PatFrag kind> : sext_ter_rmw_8_64<kind>;
921// 32->64 sext RMW gets selected as i32.atomic.rmw.***, i64.extend_i32_s
922
923// Patterns for various addressing modes for truncating-extending ternary RMWs.
924multiclass TerRMWTruncExtPattern<
925  PatFrag rmw_8, PatFrag rmw_16, PatFrag rmw_32, PatFrag rmw_64,
926  string inst8_32, string inst16_32, string inst8_64, string inst16_64,
927  string inst32_64> {
928  // Truncating-extending ternary RMWs with no constant offset
929  defm : TerRMWPatNoOffset<i32, zext_ter_rmw_8_32<rmw_8>, inst8_32>;
930  defm : TerRMWPatNoOffset<i32, zext_ter_rmw_16_32<rmw_16>, inst16_32>;
931  defm : TerRMWPatNoOffset<i64, zext_ter_rmw_8_64<rmw_8>, inst8_64>;
932  defm : TerRMWPatNoOffset<i64, zext_ter_rmw_16_64<rmw_16>, inst16_64>;
933  defm : TerRMWPatNoOffset<i64, zext_ter_rmw_32_64<rmw_32>, inst32_64>;
934
935  defm : TerRMWPatNoOffset<i32, sext_ter_rmw_8_32<rmw_8>, inst8_32>;
936  defm : TerRMWPatNoOffset<i32, sext_ter_rmw_16_32<rmw_16>, inst16_32>;
937  defm : TerRMWPatNoOffset<i64, sext_ter_rmw_8_64<rmw_8>, inst8_64>;
938  defm : TerRMWPatNoOffset<i64, sext_ter_rmw_16_64<rmw_16>, inst16_64>;
939
940  // Truncating-extending ternary RMWs with a constant offset
941  defm : TerRMWPatImmOff<i32, zext_ter_rmw_8_32<rmw_8>, regPlusImm, inst8_32>;
942  defm : TerRMWPatImmOff<i32, zext_ter_rmw_16_32<rmw_16>, regPlusImm,
943                         inst16_32>;
944  defm : TerRMWPatImmOff<i64, zext_ter_rmw_8_64<rmw_8>, regPlusImm, inst8_64>;
945  defm : TerRMWPatImmOff<i64, zext_ter_rmw_16_64<rmw_16>, regPlusImm,
946                         inst16_64>;
947  defm : TerRMWPatImmOff<i64, zext_ter_rmw_32_64<rmw_32>, regPlusImm,
948                         inst32_64>;
949  defm : TerRMWPatImmOff<i32, zext_ter_rmw_8_32<rmw_8>, or_is_add, inst8_32>;
950  defm : TerRMWPatImmOff<i32, zext_ter_rmw_16_32<rmw_16>, or_is_add, inst16_32>;
951  defm : TerRMWPatImmOff<i64, zext_ter_rmw_8_64<rmw_8>, or_is_add, inst8_64>;
952  defm : TerRMWPatImmOff<i64, zext_ter_rmw_16_64<rmw_16>, or_is_add, inst16_64>;
953  defm : TerRMWPatImmOff<i64, zext_ter_rmw_32_64<rmw_32>, or_is_add, inst32_64>;
954
955  defm : TerRMWPatImmOff<i32, sext_ter_rmw_8_32<rmw_8>, regPlusImm, inst8_32>;
956  defm : TerRMWPatImmOff<i32, sext_ter_rmw_16_32<rmw_16>, regPlusImm,
957                         inst16_32>;
958  defm : TerRMWPatImmOff<i64, sext_ter_rmw_8_64<rmw_8>, regPlusImm, inst8_64>;
959  defm : TerRMWPatImmOff<i64, sext_ter_rmw_16_64<rmw_16>, regPlusImm,
960                         inst16_64>;
961  defm : TerRMWPatImmOff<i32, sext_ter_rmw_8_32<rmw_8>, or_is_add, inst8_32>;
962  defm : TerRMWPatImmOff<i32, sext_ter_rmw_16_32<rmw_16>, or_is_add, inst16_32>;
963  defm : TerRMWPatImmOff<i64, sext_ter_rmw_8_64<rmw_8>, or_is_add, inst8_64>;
964  defm : TerRMWPatImmOff<i64, sext_ter_rmw_16_64<rmw_16>, or_is_add, inst16_64>;
965
966  // Truncating-extending ternary RMWs with just a constant offset
967  defm : TerRMWPatOffsetOnly<i32, zext_ter_rmw_8_32<rmw_8>, inst8_32>;
968  defm : TerRMWPatOffsetOnly<i32, zext_ter_rmw_16_32<rmw_16>, inst16_32>;
969  defm : TerRMWPatOffsetOnly<i64, zext_ter_rmw_8_64<rmw_8>, inst8_64>;
970  defm : TerRMWPatOffsetOnly<i64, zext_ter_rmw_16_64<rmw_16>, inst16_64>;
971  defm : TerRMWPatOffsetOnly<i64, zext_ter_rmw_32_64<rmw_32>, inst32_64>;
972
973  defm : TerRMWPatOffsetOnly<i32, sext_ter_rmw_8_32<rmw_8>, inst8_32>;
974  defm : TerRMWPatOffsetOnly<i32, sext_ter_rmw_16_32<rmw_16>, inst16_32>;
975  defm : TerRMWPatOffsetOnly<i64, sext_ter_rmw_8_64<rmw_8>, inst8_64>;
976  defm : TerRMWPatOffsetOnly<i64, sext_ter_rmw_16_64<rmw_16>, inst16_64>;
977
978  defm : TerRMWPatGlobalAddrOffOnly<i32, zext_ter_rmw_8_32<rmw_8>, inst8_32>;
979  defm : TerRMWPatGlobalAddrOffOnly<i32, zext_ter_rmw_16_32<rmw_16>, inst16_32>;
980  defm : TerRMWPatGlobalAddrOffOnly<i64, zext_ter_rmw_8_64<rmw_8>, inst8_64>;
981  defm : TerRMWPatGlobalAddrOffOnly<i64, zext_ter_rmw_16_64<rmw_16>, inst16_64>;
982  defm : TerRMWPatGlobalAddrOffOnly<i64, zext_ter_rmw_32_64<rmw_32>, inst32_64>;
983
984  defm : TerRMWPatGlobalAddrOffOnly<i32, sext_ter_rmw_8_32<rmw_8>, inst8_32>;
985  defm : TerRMWPatGlobalAddrOffOnly<i32, sext_ter_rmw_16_32<rmw_16>, inst16_32>;
986  defm : TerRMWPatGlobalAddrOffOnly<i64, sext_ter_rmw_8_64<rmw_8>, inst8_64>;
987  defm : TerRMWPatGlobalAddrOffOnly<i64, sext_ter_rmw_16_64<rmw_16>, inst16_64>;
988}
989
990defm : TerRMWTruncExtPattern<
991  atomic_cmp_swap_8, atomic_cmp_swap_16, atomic_cmp_swap_32, atomic_cmp_swap_64,
992  "ATOMIC_RMW8_U_CMPXCHG_I32", "ATOMIC_RMW16_U_CMPXCHG_I32",
993  "ATOMIC_RMW8_U_CMPXCHG_I64", "ATOMIC_RMW16_U_CMPXCHG_I64",
994  "ATOMIC_RMW32_U_CMPXCHG_I64">;
995