xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/AArch64InstrAtomics.td (revision ee0fe82ee2892f5ece189db0eab38913aaab5f0f)
1//=- AArch64InstrAtomics.td - AArch64 Atomic codegen support -*- tablegen -*-=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// AArch64 Atomic operand code-gen constructs.
10//
11//===----------------------------------------------------------------------===//
12
13//===----------------------------------
14// Atomic fences
15//===----------------------------------
16let AddedComplexity = 15, Size = 0 in
17def CompilerBarrier : Pseudo<(outs), (ins i32imm:$ordering),
18                             [(atomic_fence imm:$ordering, 0)]>, Sched<[]>;
19def : Pat<(atomic_fence (i64 4), (imm)), (DMB (i32 0x9))>;
20def : Pat<(atomic_fence (imm), (imm)), (DMB (i32 0xb))>;
21
22//===----------------------------------
23// Atomic loads
24//===----------------------------------
25
26// When they're actually atomic, only one addressing mode (GPR64sp) is
27// supported, but when they're relaxed and anything can be used, all the
28// standard modes would be valid and may give efficiency gains.
29
30// A atomic load operation that actually needs acquire semantics.
31class acquiring_load<PatFrag base>
32  : PatFrag<(ops node:$ptr), (base node:$ptr)> {
33  let IsAtomic = 1;
34  let IsAtomicOrderingAcquireOrStronger = 1;
35}
36
37// An atomic load operation that does not need either acquire or release
38// semantics.
39class relaxed_load<PatFrag base>
40  : PatFrag<(ops node:$ptr), (base node:$ptr)> {
41  let IsAtomic = 1;
42  let IsAtomicOrderingAcquireOrStronger = 0;
43}
44
45// 8-bit loads
46def : Pat<(acquiring_load<atomic_load_8>  GPR64sp:$ptr), (LDARB GPR64sp:$ptr)>;
47def : Pat<(relaxed_load<atomic_load_8> (ro_Windexed8 GPR64sp:$Rn, GPR32:$Rm,
48                                                     ro_Wextend8:$offset)),
49          (LDRBBroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$offset)>;
50def : Pat<(relaxed_load<atomic_load_8> (ro_Xindexed8 GPR64sp:$Rn, GPR64:$Rm,
51                                                     ro_Xextend8:$offset)),
52          (LDRBBroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$offset)>;
53def : Pat<(relaxed_load<atomic_load_8> (am_indexed8 GPR64sp:$Rn,
54                                                    uimm12s1:$offset)),
55          (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
56def : Pat<(relaxed_load<atomic_load_8>
57               (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
58          (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
59
60// 16-bit loads
61def : Pat<(acquiring_load<atomic_load_16> GPR64sp:$ptr), (LDARH GPR64sp:$ptr)>;
62def : Pat<(relaxed_load<atomic_load_16> (ro_Windexed16 GPR64sp:$Rn, GPR32:$Rm,
63                                                       ro_Wextend16:$extend)),
64          (LDRHHroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend16:$extend)>;
65def : Pat<(relaxed_load<atomic_load_16> (ro_Xindexed16 GPR64sp:$Rn, GPR64:$Rm,
66                                                       ro_Xextend16:$extend)),
67          (LDRHHroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend16:$extend)>;
68def : Pat<(relaxed_load<atomic_load_16> (am_indexed16 GPR64sp:$Rn,
69                                                      uimm12s2:$offset)),
70          (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
71def : Pat<(relaxed_load<atomic_load_16>
72               (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
73          (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
74
75// 32-bit loads
76def : Pat<(acquiring_load<atomic_load_32> GPR64sp:$ptr), (LDARW GPR64sp:$ptr)>;
77def : Pat<(relaxed_load<atomic_load_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
78                                                       ro_Wextend32:$extend)),
79          (LDRWroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
80def : Pat<(relaxed_load<atomic_load_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
81                                                       ro_Xextend32:$extend)),
82          (LDRWroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
83def : Pat<(relaxed_load<atomic_load_32> (am_indexed32 GPR64sp:$Rn,
84                                                      uimm12s4:$offset)),
85          (LDRWui GPR64sp:$Rn, uimm12s4:$offset)>;
86def : Pat<(relaxed_load<atomic_load_32>
87               (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
88          (LDURWi GPR64sp:$Rn, simm9:$offset)>;
89
90// 64-bit loads
91def : Pat<(acquiring_load<atomic_load_64> GPR64sp:$ptr), (LDARX GPR64sp:$ptr)>;
92def : Pat<(relaxed_load<atomic_load_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
93                                                       ro_Wextend64:$extend)),
94          (LDRXroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
95def : Pat<(relaxed_load<atomic_load_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
96                                                       ro_Xextend64:$extend)),
97          (LDRXroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
98def : Pat<(relaxed_load<atomic_load_64> (am_indexed64 GPR64sp:$Rn,
99                                                      uimm12s8:$offset)),
100          (LDRXui GPR64sp:$Rn, uimm12s8:$offset)>;
101def : Pat<(relaxed_load<atomic_load_64>
102               (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
103          (LDURXi GPR64sp:$Rn, simm9:$offset)>;
104
105//===----------------------------------
106// Atomic stores
107//===----------------------------------
108
109// When they're actually atomic, only one addressing mode (GPR64sp) is
110// supported, but when they're relaxed and anything can be used, all the
111// standard modes would be valid and may give efficiency gains.
112
113// A store operation that actually needs release semantics.
114class releasing_store<PatFrag base>
115  : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val)> {
116  let IsAtomic = 1;
117  let IsAtomicOrderingReleaseOrStronger = 1;
118}
119
120// An atomic store operation that doesn't actually need to be atomic on AArch64.
121class relaxed_store<PatFrag base>
122  : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val)> {
123  let IsAtomic = 1;
124  let IsAtomicOrderingReleaseOrStronger = 0;
125}
126
127// 8-bit stores
128def : Pat<(releasing_store<atomic_store_8> GPR64sp:$ptr, GPR32:$val),
129          (STLRB GPR32:$val, GPR64sp:$ptr)>;
130def : Pat<(relaxed_store<atomic_store_8>
131               (ro_Windexed8 GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$extend),
132               GPR32:$val),
133          (STRBBroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend8:$extend)>;
134def : Pat<(relaxed_store<atomic_store_8>
135               (ro_Xindexed8 GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$extend),
136               GPR32:$val),
137          (STRBBroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend8:$extend)>;
138def : Pat<(relaxed_store<atomic_store_8>
139               (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset), GPR32:$val),
140          (STRBBui GPR32:$val, GPR64sp:$Rn, uimm12s1:$offset)>;
141def : Pat<(relaxed_store<atomic_store_8>
142               (am_unscaled8 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
143          (STURBBi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
144
145// 16-bit stores
146def : Pat<(releasing_store<atomic_store_16> GPR64sp:$ptr, GPR32:$val),
147          (STLRH GPR32:$val, GPR64sp:$ptr)>;
148def : Pat<(relaxed_store<atomic_store_16> (ro_Windexed16 GPR64sp:$Rn, GPR32:$Rm,
149                                                         ro_Wextend16:$extend),
150                                          GPR32:$val),
151          (STRHHroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend16:$extend)>;
152def : Pat<(relaxed_store<atomic_store_16> (ro_Xindexed16 GPR64sp:$Rn, GPR64:$Rm,
153                                                         ro_Xextend16:$extend),
154                                          GPR32:$val),
155          (STRHHroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend16:$extend)>;
156def : Pat<(relaxed_store<atomic_store_16>
157              (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset), GPR32:$val),
158          (STRHHui GPR32:$val, GPR64sp:$Rn, uimm12s2:$offset)>;
159def : Pat<(relaxed_store<atomic_store_16>
160               (am_unscaled16 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
161          (STURHHi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
162
163// 32-bit stores
164def : Pat<(releasing_store<atomic_store_32> GPR64sp:$ptr, GPR32:$val),
165          (STLRW GPR32:$val, GPR64sp:$ptr)>;
166def : Pat<(relaxed_store<atomic_store_32> (ro_Windexed32 GPR64sp:$Rn, GPR32:$Rm,
167                                                         ro_Wextend32:$extend),
168                                          GPR32:$val),
169          (STRWroW GPR32:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend32:$extend)>;
170def : Pat<(relaxed_store<atomic_store_32> (ro_Xindexed32 GPR64sp:$Rn, GPR64:$Rm,
171                                                         ro_Xextend32:$extend),
172                                          GPR32:$val),
173          (STRWroX GPR32:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend32:$extend)>;
174def : Pat<(relaxed_store<atomic_store_32>
175              (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset), GPR32:$val),
176          (STRWui GPR32:$val, GPR64sp:$Rn, uimm12s4:$offset)>;
177def : Pat<(relaxed_store<atomic_store_32>
178               (am_unscaled32 GPR64sp:$Rn, simm9:$offset), GPR32:$val),
179          (STURWi GPR32:$val, GPR64sp:$Rn, simm9:$offset)>;
180
181// 64-bit stores
182def : Pat<(releasing_store<atomic_store_64> GPR64sp:$ptr, GPR64:$val),
183          (STLRX GPR64:$val, GPR64sp:$ptr)>;
184def : Pat<(relaxed_store<atomic_store_64> (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
185                                                         ro_Wextend16:$extend),
186                                          GPR64:$val),
187          (STRXroW GPR64:$val, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
188def : Pat<(relaxed_store<atomic_store_64> (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
189                                                         ro_Xextend16:$extend),
190                                          GPR64:$val),
191          (STRXroX GPR64:$val, GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
192def : Pat<(relaxed_store<atomic_store_64>
193              (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset), GPR64:$val),
194          (STRXui GPR64:$val, GPR64sp:$Rn, uimm12s8:$offset)>;
195def : Pat<(relaxed_store<atomic_store_64>
196               (am_unscaled64 GPR64sp:$Rn, simm9:$offset), GPR64:$val),
197          (STURXi GPR64:$val, GPR64sp:$Rn, simm9:$offset)>;
198
199//===----------------------------------
200// Low-level exclusive operations
201//===----------------------------------
202
203// Load-exclusives.
204
205def ldxr_1 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
206  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
207}]>;
208
209def ldxr_2 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
210  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
211}]>;
212
213def ldxr_4 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
214  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
215}]>;
216
217def ldxr_8 : PatFrag<(ops node:$ptr), (int_aarch64_ldxr node:$ptr), [{
218  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
219}]>;
220
221def : Pat<(ldxr_1 GPR64sp:$addr),
222          (SUBREG_TO_REG (i64 0), (LDXRB GPR64sp:$addr), sub_32)>;
223def : Pat<(ldxr_2 GPR64sp:$addr),
224          (SUBREG_TO_REG (i64 0), (LDXRH GPR64sp:$addr), sub_32)>;
225def : Pat<(ldxr_4 GPR64sp:$addr),
226          (SUBREG_TO_REG (i64 0), (LDXRW GPR64sp:$addr), sub_32)>;
227def : Pat<(ldxr_8 GPR64sp:$addr), (LDXRX GPR64sp:$addr)>;
228
229def : Pat<(and (ldxr_1 GPR64sp:$addr), 0xff),
230          (SUBREG_TO_REG (i64 0), (LDXRB GPR64sp:$addr), sub_32)>;
231def : Pat<(and (ldxr_2 GPR64sp:$addr), 0xffff),
232          (SUBREG_TO_REG (i64 0), (LDXRH GPR64sp:$addr), sub_32)>;
233def : Pat<(and (ldxr_4 GPR64sp:$addr), 0xffffffff),
234          (SUBREG_TO_REG (i64 0), (LDXRW GPR64sp:$addr), sub_32)>;
235
236// Load-exclusives.
237
238def ldaxr_1 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
239  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
240}]>;
241
242def ldaxr_2 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
243  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
244}]>;
245
246def ldaxr_4 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
247  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
248}]>;
249
250def ldaxr_8 : PatFrag<(ops node:$ptr), (int_aarch64_ldaxr node:$ptr), [{
251  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
252}]>;
253
254def : Pat<(ldaxr_1 GPR64sp:$addr),
255          (SUBREG_TO_REG (i64 0), (LDAXRB GPR64sp:$addr), sub_32)>;
256def : Pat<(ldaxr_2 GPR64sp:$addr),
257          (SUBREG_TO_REG (i64 0), (LDAXRH GPR64sp:$addr), sub_32)>;
258def : Pat<(ldaxr_4 GPR64sp:$addr),
259          (SUBREG_TO_REG (i64 0), (LDAXRW GPR64sp:$addr), sub_32)>;
260def : Pat<(ldaxr_8 GPR64sp:$addr), (LDAXRX GPR64sp:$addr)>;
261
262def : Pat<(and (ldaxr_1 GPR64sp:$addr), 0xff),
263          (SUBREG_TO_REG (i64 0), (LDAXRB GPR64sp:$addr), sub_32)>;
264def : Pat<(and (ldaxr_2 GPR64sp:$addr), 0xffff),
265          (SUBREG_TO_REG (i64 0), (LDAXRH GPR64sp:$addr), sub_32)>;
266def : Pat<(and (ldaxr_4 GPR64sp:$addr), 0xffffffff),
267          (SUBREG_TO_REG (i64 0), (LDAXRW GPR64sp:$addr), sub_32)>;
268
269// Store-exclusives.
270
271def stxr_1 : PatFrag<(ops node:$val, node:$ptr),
272                     (int_aarch64_stxr node:$val, node:$ptr), [{
273  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
274}]>;
275
276def stxr_2 : PatFrag<(ops node:$val, node:$ptr),
277                     (int_aarch64_stxr node:$val, node:$ptr), [{
278  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
279}]>;
280
281def stxr_4 : PatFrag<(ops node:$val, node:$ptr),
282                     (int_aarch64_stxr node:$val, node:$ptr), [{
283  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
284}]>;
285
286def stxr_8 : PatFrag<(ops node:$val, node:$ptr),
287                     (int_aarch64_stxr node:$val, node:$ptr), [{
288  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
289}]>;
290
291
292def : Pat<(stxr_1 GPR64:$val, GPR64sp:$addr),
293          (STXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
294def : Pat<(stxr_2 GPR64:$val, GPR64sp:$addr),
295          (STXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
296def : Pat<(stxr_4 GPR64:$val, GPR64sp:$addr),
297          (STXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
298def : Pat<(stxr_8 GPR64:$val, GPR64sp:$addr),
299          (STXRX GPR64:$val, GPR64sp:$addr)>;
300
301def : Pat<(stxr_1 (zext (and GPR32:$val, 0xff)), GPR64sp:$addr),
302          (STXRB GPR32:$val, GPR64sp:$addr)>;
303def : Pat<(stxr_2 (zext (and GPR32:$val, 0xffff)), GPR64sp:$addr),
304          (STXRH GPR32:$val, GPR64sp:$addr)>;
305def : Pat<(stxr_4 (zext GPR32:$val), GPR64sp:$addr),
306          (STXRW GPR32:$val, GPR64sp:$addr)>;
307
308def : Pat<(stxr_1 (and GPR64:$val, 0xff), GPR64sp:$addr),
309          (STXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
310def : Pat<(stxr_2 (and GPR64:$val, 0xffff), GPR64sp:$addr),
311          (STXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
312def : Pat<(stxr_4 (and GPR64:$val, 0xffffffff), GPR64sp:$addr),
313          (STXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
314
315// Store-release-exclusives.
316
317def stlxr_1 : PatFrag<(ops node:$val, node:$ptr),
318                     (int_aarch64_stlxr node:$val, node:$ptr), [{
319  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i8;
320}]>;
321
322def stlxr_2 : PatFrag<(ops node:$val, node:$ptr),
323                     (int_aarch64_stlxr node:$val, node:$ptr), [{
324  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
325}]>;
326
327def stlxr_4 : PatFrag<(ops node:$val, node:$ptr),
328                     (int_aarch64_stlxr node:$val, node:$ptr), [{
329  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
330}]>;
331
332def stlxr_8 : PatFrag<(ops node:$val, node:$ptr),
333                     (int_aarch64_stlxr node:$val, node:$ptr), [{
334  return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
335}]>;
336
337
338def : Pat<(stlxr_1 GPR64:$val, GPR64sp:$addr),
339          (STLXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
340def : Pat<(stlxr_2 GPR64:$val, GPR64sp:$addr),
341          (STLXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
342def : Pat<(stlxr_4 GPR64:$val, GPR64sp:$addr),
343          (STLXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
344def : Pat<(stlxr_8 GPR64:$val, GPR64sp:$addr),
345          (STLXRX GPR64:$val, GPR64sp:$addr)>;
346
347def : Pat<(stlxr_1 (zext (and GPR32:$val, 0xff)), GPR64sp:$addr),
348          (STLXRB GPR32:$val, GPR64sp:$addr)>;
349def : Pat<(stlxr_2 (zext (and GPR32:$val, 0xffff)), GPR64sp:$addr),
350          (STLXRH GPR32:$val, GPR64sp:$addr)>;
351def : Pat<(stlxr_4 (zext GPR32:$val), GPR64sp:$addr),
352          (STLXRW GPR32:$val, GPR64sp:$addr)>;
353
354def : Pat<(stlxr_1 (and GPR64:$val, 0xff), GPR64sp:$addr),
355          (STLXRB (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
356def : Pat<(stlxr_2 (and GPR64:$val, 0xffff), GPR64sp:$addr),
357          (STLXRH (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
358def : Pat<(stlxr_4 (and GPR64:$val, 0xffffffff), GPR64sp:$addr),
359          (STLXRW (EXTRACT_SUBREG GPR64:$val, sub_32), GPR64sp:$addr)>;
360
361
362// And clear exclusive.
363
364def : Pat<(int_aarch64_clrex), (CLREX 0xf)>;
365
366//===----------------------------------
367// Atomic cmpxchg for -O0
368//===----------------------------------
369
370// The fast register allocator used during -O0 inserts spills to cover any VRegs
371// live across basic block boundaries. When this happens between an LDXR and an
372// STXR it can clear the exclusive monitor, causing all cmpxchg attempts to
373// fail.
374
375// Unfortunately, this means we have to have an alternative (expanded
376// post-regalloc) path for -O0 compilations. Fortunately this path can be
377// significantly more naive than the standard expansion: we conservatively
378// assume seq_cst, strong cmpxchg and omit clrex on failure.
379
380let Constraints = "@earlyclobber $Rd,@earlyclobber $scratch",
381    mayLoad = 1, mayStore = 1 in {
382def CMP_SWAP_8 : Pseudo<(outs GPR32:$Rd, GPR32:$scratch),
383                        (ins GPR64:$addr, GPR32:$desired, GPR32:$new), []>,
384                 Sched<[WriteAtomic]>;
385
386def CMP_SWAP_16 : Pseudo<(outs GPR32:$Rd, GPR32:$scratch),
387                         (ins GPR64:$addr, GPR32:$desired, GPR32:$new), []>,
388                  Sched<[WriteAtomic]>;
389
390def CMP_SWAP_32 : Pseudo<(outs GPR32:$Rd, GPR32:$scratch),
391                         (ins GPR64:$addr, GPR32:$desired, GPR32:$new), []>,
392                  Sched<[WriteAtomic]>;
393
394def CMP_SWAP_64 : Pseudo<(outs GPR64:$Rd, GPR32:$scratch),
395                         (ins GPR64:$addr, GPR64:$desired, GPR64:$new), []>,
396                  Sched<[WriteAtomic]>;
397}
398
399let Constraints = "@earlyclobber $RdLo,@earlyclobber $RdHi,@earlyclobber $scratch",
400    mayLoad = 1, mayStore = 1 in
401def CMP_SWAP_128 : Pseudo<(outs GPR64:$RdLo, GPR64:$RdHi, GPR32:$scratch),
402                          (ins GPR64:$addr, GPR64:$desiredLo, GPR64:$desiredHi,
403                               GPR64:$newLo, GPR64:$newHi), []>,
404                   Sched<[WriteAtomic]>;
405
406// v8.1 Atomic instructions:
407let Predicates = [HasLSE] in {
408  defm : LDOPregister_patterns<"LDADD", "atomic_load_add">;
409  defm : LDOPregister_patterns<"LDSET", "atomic_load_or">;
410  defm : LDOPregister_patterns<"LDEOR", "atomic_load_xor">;
411  defm : LDOPregister_patterns<"LDCLR", "atomic_load_clr">;
412  defm : LDOPregister_patterns<"LDSMAX", "atomic_load_max">;
413  defm : LDOPregister_patterns<"LDSMIN", "atomic_load_min">;
414  defm : LDOPregister_patterns<"LDUMAX", "atomic_load_umax">;
415  defm : LDOPregister_patterns<"LDUMIN", "atomic_load_umin">;
416  defm : LDOPregister_patterns<"SWP", "atomic_swap">;
417  defm : CASregister_patterns<"CAS", "atomic_cmp_swap">;
418
419  // These two patterns are only needed for global isel, selection dag isel
420  // converts atomic load-sub into a sub and atomic load-add, and likewise for
421  // and -> clr.
422  defm : LDOPregister_patterns_mod<"LDADD", "atomic_load_sub", "SUB">;
423  defm : LDOPregister_patterns_mod<"LDCLR", "atomic_load_and", "ORN">;
424}
425
426