xref: /titanic_51/usr/src/common/atomic/sparcv9/atomic.s (revision bf56214c0556fa6864189c826d39dbe156bb22a0)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26	.ident	"%Z%%M%	%I%	%E% SMI"
27
28	.file	"%M%"
29
30#include <sys/asm_linkage.h>
31
32#if defined(_KERNEL)
33	/*
34	 * Legacy kernel interfaces; they will go away (eventually).
35	 */
36	ANSI_PRAGMA_WEAK2(cas8,atomic_cas_8,function)
37	ANSI_PRAGMA_WEAK2(cas32,atomic_cas_32,function)
38	ANSI_PRAGMA_WEAK2(cas64,atomic_cas_64,function)
39	ANSI_PRAGMA_WEAK2(caslong,atomic_cas_ulong,function)
40	ANSI_PRAGMA_WEAK2(casptr,atomic_cas_ptr,function)
41	ANSI_PRAGMA_WEAK2(atomic_and_long,atomic_and_ulong,function)
42	ANSI_PRAGMA_WEAK2(atomic_or_long,atomic_or_ulong,function)
43	ANSI_PRAGMA_WEAK2(swapl,atomic_swap_32,function)
44#else
45	/*
46	 * Include the definitions for the libc weak aliases.
47	 */
48#include "../atomic_asm_weak.h"
49#endif
50
51	/*
52	 * NOTE: If atomic_inc_8 and atomic_inc_8_nv are ever
53	 * separated, you need to also edit the libc sparcv9 platform
54	 * specific mapfile and remove the NODYNSORT attribute
55	 * from atomic_inc_8_nv.
56	 */
57	ENTRY(atomic_inc_8)
58	ALTENTRY(atomic_inc_8_nv)
59	ALTENTRY(atomic_inc_uchar)
60	ALTENTRY(atomic_inc_uchar_nv)
61	ba	add_8
62	  add	%g0, 1, %o1
63	SET_SIZE(atomic_inc_uchar_nv)
64	SET_SIZE(atomic_inc_uchar)
65	SET_SIZE(atomic_inc_8_nv)
66	SET_SIZE(atomic_inc_8)
67
68	/*
69	 * NOTE: If atomic_dec_8 and atomic_dec_8_nv are ever
70	 * separated, you need to also edit the libc sparcv9 platform
71	 * specific mapfile and remove the NODYNSORT attribute
72	 * from atomic_dec_8_nv.
73	 */
74	ENTRY(atomic_dec_8)
75	ALTENTRY(atomic_dec_8_nv)
76	ALTENTRY(atomic_dec_uchar)
77	ALTENTRY(atomic_dec_uchar_nv)
78	ba	add_8
79	  sub	%g0, 1, %o1
80	SET_SIZE(atomic_dec_uchar_nv)
81	SET_SIZE(atomic_dec_uchar)
82	SET_SIZE(atomic_dec_8_nv)
83	SET_SIZE(atomic_dec_8)
84
85	/*
86	 * NOTE: If atomic_add_8 and atomic_add_8_nv are ever
87	 * separated, you need to also edit the libc sparcv9 platform
88	 * specific mapfile and remove the NODYNSORT attribute
89	 * from atomic_add_8_nv.
90	 */
91	ENTRY(atomic_add_8)
92	ALTENTRY(atomic_add_8_nv)
93	ALTENTRY(atomic_add_char)
94	ALTENTRY(atomic_add_char_nv)
95add_8:
96	and	%o0, 0x3, %o4		! %o4 = byte offset, left-to-right
97	xor	%o4, 0x3, %g1		! %g1 = byte offset, right-to-left
98	sll	%g1, 3, %g1		! %g1 = bit offset, right-to-left
99	set	0xff, %o3		! %o3 = mask
100	sll	%o3, %g1, %o3		! %o3 = shifted to bit offset
101	sll	%o1, %g1, %o1		! %o1 = shifted to bit offset
102	and	%o1, %o3, %o1		! %o1 = single byte value
103	andn	%o0, 0x3, %o0		! %o0 = word address
104	ld	[%o0], %o2		! read old value
1051:
106	add	%o2, %o1, %o5		! add value to the old value
107	and	%o5, %o3, %o5		! clear other bits
108	andn	%o2, %o3, %o4		! clear target bits
109	or	%o4, %o5, %o5		! insert the new value
110	cas	[%o0], %o2, %o5
111	cmp	%o2, %o5
112	bne,a,pn %icc, 1b
113	  mov	%o5, %o2		! %o2 = old value
114	add	%o2, %o1, %o5
115	and	%o5, %o3, %o5
116	retl
117	srl	%o5, %g1, %o0		! %o0 = new value
118	SET_SIZE(atomic_add_char_nv)
119	SET_SIZE(atomic_add_char)
120	SET_SIZE(atomic_add_8_nv)
121	SET_SIZE(atomic_add_8)
122
123	/*
124	 * NOTE: If atomic_inc_16 and atomic_inc_16_nv are ever
125	 * separated, you need to also edit the libc sparcv9 platform
126	 * specific mapfile and remove the NODYNSORT attribute
127	 * from atomic_inc_16_nv.
128	 */
129	ENTRY(atomic_inc_16)
130	ALTENTRY(atomic_inc_16_nv)
131	ALTENTRY(atomic_inc_ushort)
132	ALTENTRY(atomic_inc_ushort_nv)
133	ba	add_16
134	  add	%g0, 1, %o1
135	SET_SIZE(atomic_inc_ushort_nv)
136	SET_SIZE(atomic_inc_ushort)
137	SET_SIZE(atomic_inc_16_nv)
138	SET_SIZE(atomic_inc_16)
139
140	/*
141	 * NOTE: If atomic_dec_16 and atomic_dec_16_nv are ever
142	 * separated, you need to also edit the libc sparcv9 platform
143	 * specific mapfile and remove the NODYNSORT attribute
144	 * from atomic_dec_16_nv.
145	 */
146	ENTRY(atomic_dec_16)
147	ALTENTRY(atomic_dec_16_nv)
148	ALTENTRY(atomic_dec_ushort)
149	ALTENTRY(atomic_dec_ushort_nv)
150	ba	add_16
151	  sub	%g0, 1, %o1
152	SET_SIZE(atomic_dec_ushort_nv)
153	SET_SIZE(atomic_dec_ushort)
154	SET_SIZE(atomic_dec_16_nv)
155	SET_SIZE(atomic_dec_16)
156
157	/*
158	 * NOTE: If atomic_add_16 and atomic_add_16_nv are ever
159	 * separated, you need to also edit the libc sparcv9 platform
160	 * specific mapfile and remove the NODYNSORT attribute
161	 * from atomic_add_16_nv.
162	 */
163	ENTRY(atomic_add_16)
164	ALTENTRY(atomic_add_16_nv)
165	ALTENTRY(atomic_add_short)
166	ALTENTRY(atomic_add_short_nv)
167add_16:
168	and	%o0, 0x2, %o4		! %o4 = byte offset, left-to-right
169	xor	%o4, 0x2, %g1		! %g1 = byte offset, right-to-left
170	sll	%o4, 3, %o4		! %o4 = bit offset, left-to-right
171	sll	%g1, 3, %g1		! %g1 = bit offset, right-to-left
172	sethi	%hi(0xffff0000), %o3	! %o3 = mask
173	srl	%o3, %o4, %o3		! %o3 = shifted to bit offset
174	sll	%o1, %g1, %o1		! %o1 = shifted to bit offset
175	and	%o1, %o3, %o1		! %o1 = single short value
176	andn	%o0, 0x2, %o0		! %o0 = word address
177	! if low-order bit is 1, we will properly get an alignment fault here
178	ld	[%o0], %o2		! read old value
1791:
180	add	%o1, %o2, %o5		! add value to the old value
181	and	%o5, %o3, %o5		! clear other bits
182	andn	%o2, %o3, %o4		! clear target bits
183	or	%o4, %o5, %o5		! insert the new value
184	cas	[%o0], %o2, %o5
185	cmp	%o2, %o5
186	bne,a,pn %icc, 1b
187	  mov	%o5, %o2		! %o2 = old value
188	add	%o1, %o2, %o5
189	and	%o5, %o3, %o5
190	retl
191	srl	%o5, %g1, %o0		! %o0 = new value
192	SET_SIZE(atomic_add_short_nv)
193	SET_SIZE(atomic_add_short)
194	SET_SIZE(atomic_add_16_nv)
195	SET_SIZE(atomic_add_16)
196
197	/*
198	 * NOTE: If atomic_inc_32 and atomic_inc_32_nv are ever
199	 * separated, you need to also edit the libc sparcv9 platform
200	 * specific mapfile and remove the NODYNSORT attribute
201	 * from atomic_inc_32_nv.
202	 */
203	ENTRY(atomic_inc_32)
204	ALTENTRY(atomic_inc_32_nv)
205	ALTENTRY(atomic_inc_uint)
206	ALTENTRY(atomic_inc_uint_nv)
207	ba	add_32
208	  add	%g0, 1, %o1
209	SET_SIZE(atomic_inc_uint_nv)
210	SET_SIZE(atomic_inc_uint)
211	SET_SIZE(atomic_inc_32_nv)
212	SET_SIZE(atomic_inc_32)
213
214	/*
215	 * NOTE: If atomic_dec_32 and atomic_dec_32_nv are ever
216	 * separated, you need to also edit the libc sparcv9 platform
217	 * specific mapfile and remove the NODYNSORT attribute
218	 * from atomic_dec_32_nv.
219	 */
220	ENTRY(atomic_dec_32)
221	ALTENTRY(atomic_dec_32_nv)
222	ALTENTRY(atomic_dec_uint)
223	ALTENTRY(atomic_dec_uint_nv)
224	ba	add_32
225	  sub	%g0, 1, %o1
226	SET_SIZE(atomic_dec_uint_nv)
227	SET_SIZE(atomic_dec_uint)
228	SET_SIZE(atomic_dec_32_nv)
229	SET_SIZE(atomic_dec_32)
230
231	/*
232	 * NOTE: If atomic_add_32 and atomic_add_32_nv are ever
233	 * separated, you need to also edit the libc sparcv9 platform
234	 * specific mapfile and remove the NODYNSORT attribute
235	 * from atomic_add_32_nv.
236	 */
237	ENTRY(atomic_add_32)
238	ALTENTRY(atomic_add_32_nv)
239	ALTENTRY(atomic_add_int)
240	ALTENTRY(atomic_add_int_nv)
241add_32:
242	ld	[%o0], %o2
2431:
244	add	%o2, %o1, %o3
245	cas	[%o0], %o2, %o3
246	cmp	%o2, %o3
247	bne,a,pn %icc, 1b
248	  mov	%o3, %o2
249	retl
250	add	%o2, %o1, %o0		! return new value
251	SET_SIZE(atomic_add_int_nv)
252	SET_SIZE(atomic_add_int)
253	SET_SIZE(atomic_add_32_nv)
254	SET_SIZE(atomic_add_32)
255
256	/*
257	 * NOTE: If atomic_inc_64 and atomic_inc_64_nv are ever
258	 * separated, you need to also edit the libc sparcv9 platform
259	 * specific mapfile and remove the NODYNSORT attribute
260	 * from atomic_inc_64_nv.
261	 */
262	ENTRY(atomic_inc_64)
263	ALTENTRY(atomic_inc_64_nv)
264	ALTENTRY(atomic_inc_ulong)
265	ALTENTRY(atomic_inc_ulong_nv)
266	ba	add_64
267	  add	%g0, 1, %o1
268	SET_SIZE(atomic_inc_ulong_nv)
269	SET_SIZE(atomic_inc_ulong)
270	SET_SIZE(atomic_inc_64_nv)
271	SET_SIZE(atomic_inc_64)
272
273	/*
274	 * NOTE: If atomic_dec_64 and atomic_dec_64_nv are ever
275	 * separated, you need to also edit the libc sparcv9 platform
276	 * specific mapfile and remove the NODYNSORT attribute
277	 * from atomic_dec_64_nv.
278	 */
279	ENTRY(atomic_dec_64)
280	ALTENTRY(atomic_dec_64_nv)
281	ALTENTRY(atomic_dec_ulong)
282	ALTENTRY(atomic_dec_ulong_nv)
283	ba	add_64
284	  sub	%g0, 1, %o1
285	SET_SIZE(atomic_dec_ulong_nv)
286	SET_SIZE(atomic_dec_ulong)
287	SET_SIZE(atomic_dec_64_nv)
288	SET_SIZE(atomic_dec_64)
289
290	/*
291	 * NOTE: If atomic_add_64 and atomic_add_64_nv are ever
292	 * separated, you need to also edit the libc sparcv9 platform
293	 * specific mapfile and remove the NODYNSORT attribute
294	 * from atomic_add_64_nv.
295	 */
296	ENTRY(atomic_add_64)
297	ALTENTRY(atomic_add_64_nv)
298	ALTENTRY(atomic_add_ptr)
299	ALTENTRY(atomic_add_ptr_nv)
300	ALTENTRY(atomic_add_long)
301	ALTENTRY(atomic_add_long_nv)
302add_64:
303	ldx	[%o0], %o2
3041:
305	add	%o2, %o1, %o3
306	casx	[%o0], %o2, %o3
307	cmp	%o2, %o3
308	bne,a,pn %xcc, 1b
309	  mov	%o3, %o2
310	retl
311	add	%o2, %o1, %o0		! return new value
312	SET_SIZE(atomic_add_long_nv)
313	SET_SIZE(atomic_add_long)
314	SET_SIZE(atomic_add_ptr_nv)
315	SET_SIZE(atomic_add_ptr)
316	SET_SIZE(atomic_add_64_nv)
317	SET_SIZE(atomic_add_64)
318
319	/*
320	 * NOTE: If atomic_or_8 and atomic_or_8_nv are ever
321	 * separated, you need to also edit the libc sparcv9 platform
322	 * specific mapfile and remove the NODYNSORT attribute
323	 * from atomic_or_8_nv.
324	 */
325	ENTRY(atomic_or_8)
326	ALTENTRY(atomic_or_8_nv)
327	ALTENTRY(atomic_or_uchar)
328	ALTENTRY(atomic_or_uchar_nv)
329	and	%o0, 0x3, %o4		! %o4 = byte offset, left-to-right
330	xor	%o4, 0x3, %g1		! %g1 = byte offset, right-to-left
331	sll	%g1, 3, %g1		! %g1 = bit offset, right-to-left
332	set	0xff, %o3		! %o3 = mask
333	sll	%o3, %g1, %o3		! %o3 = shifted to bit offset
334	sll	%o1, %g1, %o1		! %o1 = shifted to bit offset
335	and	%o1, %o3, %o1		! %o1 = single byte value
336	andn	%o0, 0x3, %o0		! %o0 = word address
337	ld	[%o0], %o2		! read old value
3381:
339	or	%o2, %o1, %o5		! or in the new value
340	cas	[%o0], %o2, %o5
341	cmp	%o2, %o5
342	bne,a,pn %icc, 1b
343	  mov	%o5, %o2		! %o2 = old value
344	or	%o2, %o1, %o5
345	and	%o5, %o3, %o5
346	retl
347	srl	%o5, %g1, %o0		! %o0 = new value
348	SET_SIZE(atomic_or_uchar_nv)
349	SET_SIZE(atomic_or_uchar)
350	SET_SIZE(atomic_or_8_nv)
351	SET_SIZE(atomic_or_8)
352
353	/*
354	 * NOTE: If atomic_or_16 and atomic_or_16_nv are ever
355	 * separated, you need to also edit the libc sparcv9 platform
356	 * specific mapfile and remove the NODYNSORT attribute
357	 * from atomic_or_16_nv.
358	 */
359	ENTRY(atomic_or_16)
360	ALTENTRY(atomic_or_16_nv)
361	ALTENTRY(atomic_or_ushort)
362	ALTENTRY(atomic_or_ushort_nv)
363	and	%o0, 0x2, %o4		! %o4 = byte offset, left-to-right
364	xor	%o4, 0x2, %g1		! %g1 = byte offset, right-to-left
365	sll	%o4, 3, %o4		! %o4 = bit offset, left-to-right
366	sll	%g1, 3, %g1		! %g1 = bit offset, right-to-left
367	sethi	%hi(0xffff0000), %o3	! %o3 = mask
368	srl	%o3, %o4, %o3		! %o3 = shifted to bit offset
369	sll	%o1, %g1, %o1		! %o1 = shifted to bit offset
370	and	%o1, %o3, %o1		! %o1 = single short value
371	andn	%o0, 0x2, %o0		! %o0 = word address
372	! if low-order bit is 1, we will properly get an alignment fault here
373	ld	[%o0], %o2		! read old value
3741:
375	or	%o2, %o1, %o5		! or in the new value
376	cas	[%o0], %o2, %o5
377	cmp	%o2, %o5
378	bne,a,pn %icc, 1b
379	  mov	%o5, %o2		! %o2 = old value
380	or	%o2, %o1, %o5		! or in the new value
381	and	%o5, %o3, %o5
382	retl
383	srl	%o5, %g1, %o0		! %o0 = new value
384	SET_SIZE(atomic_or_ushort_nv)
385	SET_SIZE(atomic_or_ushort)
386	SET_SIZE(atomic_or_16_nv)
387	SET_SIZE(atomic_or_16)
388
389	/*
390	 * NOTE: If atomic_or_32 and atomic_or_32_nv are ever
391	 * separated, you need to also edit the libc sparcv9 platform
392	 * specific mapfile and remove the NODYNSORT attribute
393	 * from atomic_or_32_nv.
394	 */
395	ENTRY(atomic_or_32)
396	ALTENTRY(atomic_or_32_nv)
397	ALTENTRY(atomic_or_uint)
398	ALTENTRY(atomic_or_uint_nv)
399	ld	[%o0], %o2
4001:
401	or	%o2, %o1, %o3
402	cas	[%o0], %o2, %o3
403	cmp	%o2, %o3
404	bne,a,pn %icc, 1b
405	  mov	%o3, %o2
406	retl
407	or	%o2, %o1, %o0		! return new value
408	SET_SIZE(atomic_or_uint_nv)
409	SET_SIZE(atomic_or_uint)
410	SET_SIZE(atomic_or_32_nv)
411	SET_SIZE(atomic_or_32)
412
413	/*
414	 * NOTE: If atomic_or_64 and atomic_or_64_nv are ever
415	 * separated, you need to also edit the libc sparcv9 platform
416	 * specific mapfile and remove the NODYNSORT attribute
417	 * from atomic_or_64_nv.
418	 */
419	ENTRY(atomic_or_64)
420	ALTENTRY(atomic_or_64_nv)
421	ALTENTRY(atomic_or_ulong)
422	ALTENTRY(atomic_or_ulong_nv)
423	ldx	[%o0], %o2
4241:
425	or	%o2, %o1, %o3
426	casx	[%o0], %o2, %o3
427	cmp	%o2, %o3
428	bne,a,pn %xcc, 1b
429	  mov	%o3, %o2
430	retl
431	or	%o2, %o1, %o0		! return new value
432	SET_SIZE(atomic_or_ulong_nv)
433	SET_SIZE(atomic_or_ulong)
434	SET_SIZE(atomic_or_64_nv)
435	SET_SIZE(atomic_or_64)
436
437	/*
438	 * NOTE: If atomic_and_8 and atomic_and_8_nv are ever
439	 * separated, you need to also edit the libc sparcv9 platform
440	 * specific mapfile and remove the NODYNSORT attribute
441	 * from atomic_and_8_nv.
442	 */
443	ENTRY(atomic_and_8)
444	ALTENTRY(atomic_and_8_nv)
445	ALTENTRY(atomic_and_uchar)
446	ALTENTRY(atomic_and_uchar_nv)
447	and	%o0, 0x3, %o4		! %o4 = byte offset, left-to-right
448	xor	%o4, 0x3, %g1		! %g1 = byte offset, right-to-left
449	sll	%g1, 3, %g1		! %g1 = bit offset, right-to-left
450	set	0xff, %o3		! %o3 = mask
451	sll	%o3, %g1, %o3		! %o3 = shifted to bit offset
452	sll	%o1, %g1, %o1		! %o1 = shifted to bit offset
453	orn	%o1, %o3, %o1		! all ones in other bytes
454	andn	%o0, 0x3, %o0		! %o0 = word address
455	ld	[%o0], %o2		! read old value
4561:
457	and	%o2, %o1, %o5		! and in the new value
458	cas	[%o0], %o2, %o5
459	cmp	%o2, %o5
460	bne,a,pn %icc, 1b
461	  mov	%o5, %o2		! %o2 = old value
462	and	%o2, %o1, %o5
463	and	%o5, %o3, %o5
464	retl
465	srl	%o5, %g1, %o0		! %o0 = new value
466	SET_SIZE(atomic_and_uchar_nv)
467	SET_SIZE(atomic_and_uchar)
468	SET_SIZE(atomic_and_8_nv)
469	SET_SIZE(atomic_and_8)
470
471	/*
472	 * NOTE: If atomic_and_16 and atomic_and_16_nv are ever
473	 * separated, you need to also edit the libc sparcv9 platform
474	 * specific mapfile and remove the NODYNSORT attribute
475	 * from atomic_and_16_nv.
476	 */
477	ENTRY(atomic_and_16)
478	ALTENTRY(atomic_and_16_nv)
479	ALTENTRY(atomic_and_ushort)
480	ALTENTRY(atomic_and_ushort_nv)
481	and	%o0, 0x2, %o4		! %o4 = byte offset, left-to-right
482	xor	%o4, 0x2, %g1		! %g1 = byte offset, right-to-left
483	sll	%o4, 3, %o4		! %o4 = bit offset, left-to-right
484	sll	%g1, 3, %g1		! %g1 = bit offset, right-to-left
485	sethi	%hi(0xffff0000), %o3	! %o3 = mask
486	srl	%o3, %o4, %o3		! %o3 = shifted to bit offset
487	sll	%o1, %g1, %o1		! %o1 = shifted to bit offset
488	orn	%o1, %o3, %o1		! all ones in the other half
489	andn	%o0, 0x2, %o0		! %o0 = word address
490	! if low-order bit is 1, we will properly get an alignment fault here
491	ld	[%o0], %o2		! read old value
4921:
493	and	%o2, %o1, %o5		! and in the new value
494	cas	[%o0], %o2, %o5
495	cmp	%o2, %o5
496	bne,a,pn %icc, 1b
497	  mov	%o5, %o2		! %o2 = old value
498	and	%o2, %o1, %o5
499	and	%o5, %o3, %o5
500	retl
501	srl	%o5, %g1, %o0		! %o0 = new value
502	SET_SIZE(atomic_and_ushort_nv)
503	SET_SIZE(atomic_and_ushort)
504	SET_SIZE(atomic_and_16_nv)
505	SET_SIZE(atomic_and_16)
506
507	/*
508	 * NOTE: If atomic_and_32 and atomic_and_32_nv are ever
509	 * separated, you need to also edit the libc sparcv9 platform
510	 * specific mapfile and remove the NODYNSORT attribute
511	 * from atomic_and_32_nv.
512	 */
513	ENTRY(atomic_and_32)
514	ALTENTRY(atomic_and_32_nv)
515	ALTENTRY(atomic_and_uint)
516	ALTENTRY(atomic_and_uint_nv)
517	ld	[%o0], %o2
5181:
519	and	%o2, %o1, %o3
520	cas	[%o0], %o2, %o3
521	cmp	%o2, %o3
522	bne,a,pn %icc, 1b
523	  mov	%o3, %o2
524	retl
525	and	%o2, %o1, %o0		! return new value
526	SET_SIZE(atomic_and_uint_nv)
527	SET_SIZE(atomic_and_uint)
528	SET_SIZE(atomic_and_32_nv)
529	SET_SIZE(atomic_and_32)
530
531	/*
532	 * NOTE: If atomic_and_64 and atomic_and_64_nv are ever
533	 * separated, you need to also edit the libc sparcv9 platform
534	 * specific mapfile and remove the NODYNSORT attribute
535	 * from atomic_and_64_nv.
536	 */
537	ENTRY(atomic_and_64)
538	ALTENTRY(atomic_and_64_nv)
539	ALTENTRY(atomic_and_ulong)
540	ALTENTRY(atomic_and_ulong_nv)
541	ldx	[%o0], %o2
5421:
543	and	%o2, %o1, %o3
544	casx	[%o0], %o2, %o3
545	cmp	%o2, %o3
546	bne,a,pn %xcc, 1b
547	  mov	%o3, %o2
548	retl
549	and	%o2, %o1, %o0		! return new value
550	SET_SIZE(atomic_and_ulong_nv)
551	SET_SIZE(atomic_and_ulong)
552	SET_SIZE(atomic_and_64_nv)
553	SET_SIZE(atomic_and_64)
554
555	ENTRY(atomic_cas_8)
556	ALTENTRY(atomic_cas_uchar)
557	and	%o0, 0x3, %o4		! %o4 = byte offset, left-to-right
558	xor	%o4, 0x3, %g1		! %g1 = byte offset, right-to-left
559	sll	%g1, 3, %g1		! %g1 = bit offset, right-to-left
560	set	0xff, %o3		! %o3 = mask
561	sll	%o3, %g1, %o3		! %o3 = shifted to bit offset
562	sll	%o1, %g1, %o1		! %o1 = shifted to bit offset
563	and	%o1, %o3, %o1		! %o1 = single byte value
564	sll	%o2, %g1, %o2		! %o2 = shifted to bit offset
565	and	%o2, %o3, %o2		! %o2 = single byte value
566	andn	%o0, 0x3, %o0		! %o0 = word address
567	ld	[%o0], %o4		! read old value
5681:
569	andn	%o4, %o3, %o4		! clear target bits
570	or	%o4, %o2, %o5		! insert the new value
571	or	%o4, %o1, %o4		! insert the comparison value
572	cas	[%o0], %o4, %o5
573	cmp	%o4, %o5		! did we succeed?
574	be,pt	%icc, 2f
575	  and	%o5, %o3, %o4		! isolate the old value
576	cmp	%o1, %o4		! should we have succeeded?
577	be,a,pt	%icc, 1b		! yes, try again
578	  mov	%o5, %o4		! %o4 = old value
5792:
580	retl
581	srl	%o4, %g1, %o0		! %o0 = old value
582	SET_SIZE(atomic_cas_uchar)
583	SET_SIZE(atomic_cas_8)
584
585	ENTRY(atomic_cas_16)
586	ALTENTRY(atomic_cas_ushort)
587	and	%o0, 0x2, %o4		! %o4 = byte offset, left-to-right
588	xor	%o4, 0x2, %g1		! %g1 = byte offset, right-to-left
589	sll	%o4, 3, %o4		! %o4 = bit offset, left-to-right
590	sll	%g1, 3, %g1		! %g1 = bit offset, right-to-left
591	sethi	%hi(0xffff0000), %o3	! %o3 = mask
592	srl	%o3, %o4, %o3		! %o3 = shifted to bit offset
593	sll	%o1, %g1, %o1		! %o1 = shifted to bit offset
594	and	%o1, %o3, %o1		! %o1 = single short value
595	sll	%o2, %g1, %o2		! %o2 = shifted to bit offset
596	and	%o2, %o3, %o2		! %o2 = single short value
597	andn	%o0, 0x2, %o0		! %o0 = word address
598	! if low-order bit is 1, we will properly get an alignment fault here
599	ld	[%o0], %o4		! read old value
6001:
601	andn	%o4, %o3, %o4		! clear target bits
602	or	%o4, %o2, %o5		! insert the new value
603	or	%o4, %o1, %o4		! insert the comparison value
604	cas	[%o0], %o4, %o5
605	cmp	%o4, %o5		! did we succeed?
606	be,pt	%icc, 2f
607	  and	%o5, %o3, %o4		! isolate the old value
608	cmp	%o1, %o4		! should we have succeeded?
609	be,a,pt	%icc, 1b		! yes, try again
610	  mov	%o5, %o4		! %o4 = old value
6112:
612	retl
613	srl	%o4, %g1, %o0		! %o0 = old value
614	SET_SIZE(atomic_cas_ushort)
615	SET_SIZE(atomic_cas_16)
616
617	ENTRY(atomic_cas_32)
618	ALTENTRY(atomic_cas_uint)
619	cas	[%o0], %o1, %o2
620	retl
621	mov	%o2, %o0
622	SET_SIZE(atomic_cas_uint)
623	SET_SIZE(atomic_cas_32)
624
625	ENTRY(atomic_cas_64)
626	ALTENTRY(atomic_cas_ptr)
627	ALTENTRY(atomic_cas_ulong)
628	casx	[%o0], %o1, %o2
629	retl
630	mov	%o2, %o0
631	SET_SIZE(atomic_cas_ulong)
632	SET_SIZE(atomic_cas_ptr)
633	SET_SIZE(atomic_cas_64)
634
635	ENTRY(atomic_swap_8)
636	ALTENTRY(atomic_swap_uchar)
637	and	%o0, 0x3, %o4		! %o4 = byte offset, left-to-right
638	xor	%o4, 0x3, %g1		! %g1 = byte offset, right-to-left
639	sll	%g1, 3, %g1		! %g1 = bit offset, right-to-left
640	set	0xff, %o3		! %o3 = mask
641	sll	%o3, %g1, %o3		! %o3 = shifted to bit offset
642	sll	%o1, %g1, %o1		! %o1 = shifted to bit offset
643	and	%o1, %o3, %o1		! %o1 = single byte value
644	andn	%o0, 0x3, %o0		! %o0 = word address
645	ld	[%o0], %o2		! read old value
6461:
647	andn	%o2, %o3, %o5		! clear target bits
648	or	%o5, %o1, %o5		! insert the new value
649	cas	[%o0], %o2, %o5
650	cmp	%o2, %o5
651	bne,a,pn %icc, 1b
652	  mov	%o5, %o2		! %o2 = old value
653	and	%o5, %o3, %o5
654	retl
655	srl	%o5, %g1, %o0		! %o0 = old value
656	SET_SIZE(atomic_swap_uchar)
657	SET_SIZE(atomic_swap_8)
658
659	ENTRY(atomic_swap_16)
660	ALTENTRY(atomic_swap_ushort)
661	and	%o0, 0x2, %o4		! %o4 = byte offset, left-to-right
662	xor	%o4, 0x2, %g1		! %g1 = byte offset, right-to-left
663	sll	%o4, 3, %o4		! %o4 = bit offset, left-to-right
664	sll	%g1, 3, %g1		! %g1 = bit offset, right-to-left
665	sethi	%hi(0xffff0000), %o3	! %o3 = mask
666	srl	%o3, %o4, %o3		! %o3 = shifted to bit offset
667	sll	%o1, %g1, %o1		! %o1 = shifted to bit offset
668	and	%o1, %o3, %o1		! %o1 = single short value
669	andn	%o0, 0x2, %o0		! %o0 = word address
670	! if low-order bit is 1, we will properly get an alignment fault here
671	ld	[%o0], %o2		! read old value
6721:
673	andn	%o2, %o3, %o5		! clear target bits
674	or	%o5, %o1, %o5		! insert the new value
675	cas	[%o0], %o2, %o5
676	cmp	%o2, %o5
677	bne,a,pn %icc, 1b
678	  mov	%o5, %o2		! %o2 = old value
679	and	%o5, %o3, %o5
680	retl
681	srl	%o5, %g1, %o0		! %o0 = old value
682	SET_SIZE(atomic_swap_ushort)
683	SET_SIZE(atomic_swap_16)
684
685	ENTRY(atomic_swap_32)
686	ALTENTRY(atomic_swap_uint)
687	ld	[%o0], %o2
6881:
689	mov	%o1, %o3
690	cas	[%o0], %o2, %o3
691	cmp	%o2, %o3
692	bne,a,pn %icc, 1b
693	  mov	%o3, %o2
694	retl
695	mov	%o3, %o0
696	SET_SIZE(atomic_swap_uint)
697	SET_SIZE(atomic_swap_32)
698
699	ENTRY(atomic_swap_64)
700	ALTENTRY(atomic_swap_ptr)
701	ALTENTRY(atomic_swap_ulong)
702	ldx	[%o0], %o2
7031:
704	mov	%o1, %o3
705	casx	[%o0], %o2, %o3
706	cmp	%o2, %o3
707	bne,a,pn %xcc, 1b
708	  mov	%o3, %o2
709	retl
710	mov	%o3, %o0
711	SET_SIZE(atomic_swap_ulong)
712	SET_SIZE(atomic_swap_ptr)
713	SET_SIZE(atomic_swap_64)
714
715	ENTRY(atomic_set_long_excl)
716	mov	1, %o3
717	slln	%o3, %o1, %o3
718	ldn	[%o0], %o2
7191:
720	andcc	%o2, %o3, %g0		! test if the bit is set
721	bnz,a,pn %ncc, 2f		! if so, then fail out
722	  mov	-1, %o0
723	or	%o2, %o3, %o4		! set the bit, and try to commit it
724	casn	[%o0], %o2, %o4
725	cmp	%o2, %o4
726	bne,a,pn %ncc, 1b		! failed to commit, try again
727	  mov	%o4, %o2
728	mov	%g0, %o0
7292:
730	retl
731	nop
732	SET_SIZE(atomic_set_long_excl)
733
734	ENTRY(atomic_clear_long_excl)
735	mov	1, %o3
736	slln	%o3, %o1, %o3
737	ldn	[%o0], %o2
7381:
739	andncc	%o3, %o2, %g0		! test if the bit is clear
740	bnz,a,pn %ncc, 2f		! if so, then fail out
741	  mov	-1, %o0
742	andn	%o2, %o3, %o4		! clear the bit, and try to commit it
743	casn	[%o0], %o2, %o4
744	cmp	%o2, %o4
745	bne,a,pn %ncc, 1b		! failed to commit, try again
746	  mov	%o4, %o2
747	mov	%g0, %o0
7482:
749	retl
750	nop
751	SET_SIZE(atomic_clear_long_excl)
752
753#if !defined(_KERNEL)
754
755	/*
756	 * Spitfires and Blackbirds have a problem with membars in the
757	 * delay slot (SF_ERRATA_51).  For safety's sake, we assume
758	 * that the whole world needs the workaround.
759	 */
760	ENTRY(membar_enter)
761	membar	#StoreLoad|#StoreStore
762	retl
763	nop
764	SET_SIZE(membar_enter)
765
766	ENTRY(membar_exit)
767	membar	#LoadStore|#StoreStore
768	retl
769	nop
770	SET_SIZE(membar_exit)
771
772	ENTRY(membar_producer)
773	membar	#StoreStore
774	retl
775	nop
776	SET_SIZE(membar_producer)
777
778	ENTRY(membar_consumer)
779	membar	#LoadLoad
780	retl
781	nop
782	SET_SIZE(membar_consumer)
783
784#endif	/* !_KERNEL */
785