xref: /titanic_51/usr/src/uts/sparc/v9/ml/lock_prim.s (revision bfb9a534bea88594587923fa8ad3424b6495849c)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#if defined(lint)
29#include <sys/types.h>
30#include <sys/thread.h>
31#include <sys/cpuvar.h>
32#else	/* lint */
33#include "assym.h"
34#endif	/* lint */
35
36#include <sys/t_lock.h>
37#include <sys/mutex.h>
38#include <sys/mutex_impl.h>
39#include <sys/rwlock_impl.h>
40#include <sys/asm_linkage.h>
41#include <sys/machlock.h>
42#include <sys/machthread.h>
43#include <sys/lockstat.h>
44
45/* #define DEBUG */
46
47#ifdef DEBUG
48#include <sys/machparam.h>
49#endif /* DEBUG */
50
51/************************************************************************
52 *		ATOMIC OPERATIONS
53 */
54
55/*
56 * uint8_t	ldstub(uint8_t *cp)
57 *
58 * Store 0xFF at the specified location, and return its previous content.
59 */
60
61#if defined(lint)
62uint8_t
63ldstub(uint8_t *cp)
64{
65	uint8_t	rv;
66	rv = *cp;
67	*cp = 0xFF;
68	return rv;
69}
70#else	/* lint */
71
72	ENTRY(ldstub)
73	retl
74	ldstub	[%o0], %o0
75	SET_SIZE(ldstub)
76
77#endif	/* lint */
78
79/************************************************************************
80 *		MEMORY BARRIERS -- see atomic.h for full descriptions.
81 */
82
83#if defined(lint)
84
85void
86membar_enter(void)
87{}
88
89void
90membar_exit(void)
91{}
92
93void
94membar_producer(void)
95{}
96
97void
98membar_consumer(void)
99{}
100
101#else	/* lint */
102
103#ifdef SF_ERRATA_51
104	.align 32
105	ENTRY(membar_return)
106	retl
107	nop
108	SET_SIZE(membar_return)
109#define	MEMBAR_RETURN	ba,pt %icc, membar_return
110#else
111#define	MEMBAR_RETURN	retl
112#endif
113
114	ENTRY(membar_enter)
115	MEMBAR_RETURN
116	membar	#StoreLoad|#StoreStore
117	SET_SIZE(membar_enter)
118
119	ENTRY(membar_exit)
120	MEMBAR_RETURN
121	membar	#LoadStore|#StoreStore
122	SET_SIZE(membar_exit)
123
124	ENTRY(membar_producer)
125	MEMBAR_RETURN
126	membar	#StoreStore
127	SET_SIZE(membar_producer)
128
129	ENTRY(membar_consumer)
130	MEMBAR_RETURN
131	membar	#LoadLoad
132	SET_SIZE(membar_consumer)
133
134#endif	/* lint */
135
136/************************************************************************
137 *		MINIMUM LOCKS
138 */
139
140#if defined(lint)
141
142/*
143 * lock_try(lp), ulock_try(lp)
144 *	- returns non-zero on success.
145 *	- doesn't block interrupts so don't use this to spin on a lock.
146 *	- uses "0xFF is busy, anything else is free" model.
147 *
148 *      ulock_try() is for a lock in the user address space.
149 *      For all V7/V8 sparc systems they are same since the kernel and
150 *      user are mapped in a user' context.
151 *      For V9 platforms the lock_try and ulock_try are different impl.
152 */
153
154int
155lock_try(lock_t *lp)
156{
157	return (0xFF ^ ldstub(lp));
158}
159
160int
161lock_spin_try(lock_t *lp)
162{
163	return (0xFF ^ ldstub(lp));
164}
165
166void
167lock_set(lock_t *lp)
168{
169	extern void lock_set_spin(lock_t *);
170
171	if (!lock_try(lp))
172		lock_set_spin(lp);
173	membar_enter();
174}
175
176void
177lock_clear(lock_t *lp)
178{
179	membar_exit();
180	*lp = 0;
181}
182
183int
184ulock_try(lock_t *lp)
185{
186	return (0xFF ^ ldstub(lp));
187}
188
189void
190ulock_clear(lock_t *lp)
191{
192	membar_exit();
193	*lp = 0;
194}
195
196#else	/* lint */
197
198	.align	32
199	ENTRY(lock_try)
200	ldstub	[%o0], %o1		! try to set lock, get value in %o1
201	brnz,pn	%o1, 1f
202	membar	#LoadLoad
203.lock_try_lockstat_patch_point:
204	retl
205	or	%o0, 1, %o0		! ensure lo32 != 0
2061:
207	retl
208	clr	%o0
209	SET_SIZE(lock_try)
210
211	.align	32
212	ENTRY(lock_spin_try)
213	ldstub	[%o0], %o1		! try to set lock, get value in %o1
214	brnz,pn	%o1, 1f
215	membar	#LoadLoad
216	retl
217	or	%o0, 1, %o0		! ensure lo32 != 0
2181:
219	retl
220	clr	%o0
221	SET_SIZE(lock_spin_try)
222
223	.align	32
224	ENTRY(lock_set)
225	ldstub	[%o0], %o1
226	brnz,pn	%o1, 1f			! go to C for the hard case
227	membar	#LoadLoad
228.lock_set_lockstat_patch_point:
229	retl
230	nop
2311:
232	sethi	%hi(lock_set_spin), %o2	! load up for jump to C
233	jmp	%o2 + %lo(lock_set_spin)
234	nop				! delay: do nothing
235	SET_SIZE(lock_set)
236
237	ENTRY(lock_clear)
238	membar	#LoadStore|#StoreStore
239.lock_clear_lockstat_patch_point:
240	retl
241	clrb	[%o0]
242	SET_SIZE(lock_clear)
243
244	.align	32
245	ENTRY(ulock_try)
246	ldstuba	[%o0]ASI_USER, %o1	! try to set lock, get value in %o1
247	xor	%o1, 0xff, %o0		! delay - return non-zero if success
248	retl
249	  membar	#LoadLoad
250	SET_SIZE(ulock_try)
251
252	ENTRY(ulock_clear)
253	membar  #LoadStore|#StoreStore
254	retl
255	  stba	%g0, [%o0]ASI_USER	! clear lock
256	SET_SIZE(ulock_clear)
257
258#endif	/* lint */
259
260
261/*
262 * lock_set_spl(lp, new_pil, *old_pil_addr)
263 *	Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
264 */
265
266#if defined(lint)
267
268/* ARGSUSED */
269void
270lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil_addr)
271{
272	extern int splr(int);
273	extern void lock_set_spl_spin(lock_t *, int, u_short *, int);
274	int old_pil;
275
276	old_pil = splr(new_pil);
277	if (!lock_try(lp)) {
278		lock_set_spl_spin(lp, new_pil, old_pil_addr, old_pil);
279	} else {
280		*old_pil_addr = (u_short)old_pil;
281		membar_enter();
282	}
283}
284
285#else	/* lint */
286
287	ENTRY(lock_set_spl)
288	rdpr	%pil, %o3			! %o3 = current pil
289	cmp	%o3, %o1			! is current pil high enough?
290	bl,a,pt %icc, 1f			! if not, write %pil in delay
291	wrpr	%g0, %o1, %pil
2921:
293	ldstub	[%o0], %o4			! try the lock
294	brnz,pn	%o4, 2f				! go to C for the miss case
295	membar	#LoadLoad
296.lock_set_spl_lockstat_patch_point:
297	retl
298	sth	%o3, [%o2]			! delay - save original pil
2992:
300	sethi	%hi(lock_set_spl_spin), %o5	! load up jmp to C
301	jmp	%o5 + %lo(lock_set_spl_spin)	! jmp to lock_set_spl_spin
302	nop					! delay: do nothing
303	SET_SIZE(lock_set_spl)
304
305#endif	/* lint */
306
307/*
308 * lock_clear_splx(lp, s)
309 */
310
311#if defined(lint)
312
313void
314lock_clear_splx(lock_t *lp, int s)
315{
316	extern void splx(int);
317
318	lock_clear(lp);
319	splx(s);
320}
321
322#else	/* lint */
323
324	ENTRY(lock_clear_splx)
325	ldn	[THREAD_REG + T_CPU], %o2	! get CPU pointer
326	membar	#LoadStore|#StoreStore
327	ld	[%o2 + CPU_BASE_SPL], %o2
328	clrb	[%o0]				! clear lock
329	cmp	%o2, %o1			! compare new to base
330	movl	%xcc, %o1, %o2			! use new pri if base is less
331.lock_clear_splx_lockstat_patch_point:
332	retl
333	wrpr	%g0, %o2, %pil
334	SET_SIZE(lock_clear_splx)
335
336#endif	/* lint */
337
338/*
339 * mutex_enter() and mutex_exit().
340 *
341 * These routines handle the simple cases of mutex_enter() (adaptive
342 * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
343 * If anything complicated is going on we punt to mutex_vector_enter().
344 *
345 * mutex_tryenter() is similar to mutex_enter() but returns zero if
346 * the lock cannot be acquired, nonzero on success.
347 *
348 * If mutex_exit() gets preempted in the window between checking waiters
349 * and clearing the lock, we can miss wakeups.  Disabling preemption
350 * in the mutex code is prohibitively expensive, so instead we detect
351 * mutex preemption by examining the trapped PC in the interrupt path.
352 * If we interrupt a thread in mutex_exit() that has not yet cleared
353 * the lock, pil_interrupt() resets its PC back to the beginning of
354 * mutex_exit() so it will check again for waiters when it resumes.
355 *
356 * The lockstat code below is activated when the lockstat driver
357 * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
358 * Note that we don't need to test lockstat_event_mask here -- we won't
359 * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
360 */
361
362#if defined (lint)
363
364/* ARGSUSED */
365void
366mutex_enter(kmutex_t *lp)
367{}
368
369/* ARGSUSED */
370int
371mutex_tryenter(kmutex_t *lp)
372{ return (0); }
373
374/* ARGSUSED */
375void
376mutex_exit(kmutex_t *lp)
377{}
378
379/* ARGSUSED */
380void *
381mutex_owner_running(mutex_impl_t *lp)
382{ return (NULL); }
383
384#else
385	.align	32
386	ENTRY(mutex_enter)
387	mov	THREAD_REG, %o1
388	casx	[%o0], %g0, %o1			! try to acquire as adaptive
389	brnz,pn	%o1, 1f				! locked or wrong type
390	membar	#LoadLoad
391.mutex_enter_lockstat_patch_point:
392	retl
393	nop
3941:
395	sethi	%hi(mutex_vector_enter), %o2	! load up for jump to C
396	jmp	%o2 + %lo(mutex_vector_enter)
397	nop
398	SET_SIZE(mutex_enter)
399
400	ENTRY(mutex_tryenter)
401	mov	THREAD_REG, %o1
402	casx	[%o0], %g0, %o1			! try to acquire as adaptive
403	brnz,pn	%o1, 1f				! locked or wrong type continue
404	membar	#LoadLoad
405.mutex_tryenter_lockstat_patch_point:
406	retl
407	or	%o0, 1, %o0			! ensure lo32 != 0
4081:
409	sethi	%hi(mutex_vector_tryenter), %o2		! hi bits
410	jmp	%o2 + %lo(mutex_vector_tryenter)	! go to C
411	nop
412	SET_SIZE(mutex_tryenter)
413
414	ENTRY(mutex_adaptive_tryenter)
415	mov	THREAD_REG, %o1
416	casx	[%o0], %g0, %o1			! try to acquire as adaptive
417	brnz,pn	%o1, 0f				! locked or wrong type
418	membar	#LoadLoad
419	retl
420	or	%o0, 1, %o0			! ensure lo32 != 0
4210:
422	retl
423	mov	%g0, %o0
424	SET_SIZE(mutex_adaptive_tryenter)
425
426	! these need to be together and cache aligned for performance.
427	.align 64
428	.global	mutex_exit_critical_size
429	.global	mutex_exit_critical_start
430	.global mutex_owner_running_critical_size
431	.global mutex_owner_running_critical_start
432
433mutex_exit_critical_size = .mutex_exit_critical_end - mutex_exit_critical_start
434
435	.align	32
436
437	ENTRY(mutex_exit)
438mutex_exit_critical_start:		! If we are interrupted, restart here
439	ldn	[%o0], %o1		! get the owner field
440	membar	#LoadStore|#StoreStore
441	cmp	THREAD_REG, %o1		! do we own lock with no waiters?
442	be,a,pt	%ncc, 1f		! if so, drive on ...
443	stn	%g0, [%o0]		! delay: clear lock if we owned it
444.mutex_exit_critical_end:		! for pil_interrupt() hook
445	ba,a,pt	%xcc, mutex_vector_exit	! go to C for the hard cases
4461:
447.mutex_exit_lockstat_patch_point:
448	retl
449	nop
450	SET_SIZE(mutex_exit)
451
452mutex_owner_running_critical_size = .mutex_owner_running_critical_end - mutex_owner_running_critical_start
453
454	.align  32
455
456	ENTRY(mutex_owner_running)
457mutex_owner_running_critical_start:	! If interrupted restart here
458	ldn	[%o0], %o1		! get the owner field
459	and	%o1, MUTEX_THREAD, %o1	! remove the waiters bit if any
460	brz,pn	%o1, 1f			! if so, drive on ...
461	nop
462	ldn	[%o1+T_CPU], %o2	! get owner->t_cpu
463	ldn	[%o2+CPU_THREAD], %o3	! get owner->t_cpu->cpu_thread
464.mutex_owner_running_critical_end:	! for pil_interrupt() hook
465	cmp	%o1, %o3		! owner == running thread?
466	be,a,pt	%xcc, 2f		! yes, go return cpu
467	nop
4681:
469	retl
470	mov	%g0, %o0		! return 0 (owner not running)
4712:
472	retl
473	mov	%o2, %o0		! owner running, return cpu
474	SET_SIZE(mutex_owner_running)
475
476#endif	/* lint */
477
478/*
479 * rw_enter() and rw_exit().
480 *
481 * These routines handle the simple cases of rw_enter (write-locking an unheld
482 * lock or read-locking a lock that's neither write-locked nor write-wanted)
483 * and rw_exit (no waiters or not the last reader).  If anything complicated
484 * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
485 */
486#if defined(lint)
487
488/* ARGSUSED */
489void
490rw_enter(krwlock_t *lp, krw_t rw)
491{}
492
493/* ARGSUSED */
494void
495rw_exit(krwlock_t *lp)
496{}
497
498#else
499
500	.align	16
501	ENTRY(rw_enter)
502	cmp	%o1, RW_WRITER			! entering as writer?
503	be,a,pn	%icc, 2f			! if so, go do it ...
504	or	THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner
505	ldn	[%o0], %o4			! %o4 = old lock value
5061:
507	andcc	%o4, RW_WRITE_CLAIMED, %g0	! write-locked or write-wanted?
508	bz,pt	%xcc, 3f			! if so, prepare to block
509	add	%o4, RW_READ_LOCK, %o5		! delay: increment hold count
510	sethi	%hi(rw_enter_sleep), %o2	! load up jump
511	jmp	%o2 + %lo(rw_enter_sleep)	! jmp to rw_enter_sleep
512	nop					! delay: do nothing
5133:
514	casx	[%o0], %o4, %o5			! try to grab read lock
515	cmp	%o4, %o5			! did we get it?
516#ifdef sun4v
517	be,a,pt %xcc, 0f
518	membar  #LoadLoad
519	sethi	%hi(rw_enter_sleep), %o2	! load up jump
520	jmp	%o2 + %lo(rw_enter_sleep)	! jmp to rw_enter_sleep
521	nop					! delay: do nothing
5220:
523#else /* sun4v */
524	bne,pn	%xcc, 1b			! if not, try again
525	mov	%o5, %o4			! delay: %o4 = old lock value
526	membar	#LoadLoad
527#endif /* sun4v */
528.rw_read_enter_lockstat_patch_point:
529	retl
530	nop
5312:
532	casx	[%o0], %g0, %o5			! try to grab write lock
533	brz,pt %o5, 4f				! branch around if we got it
534	membar	#LoadLoad			! done regardless of where we go
535	sethi	%hi(rw_enter_sleep), %o2
536	jmp	%o2 + %lo(rw_enter_sleep)	! jump to rw_enter_sleep if not
537	nop					! delay: do nothing
5384:
539.rw_write_enter_lockstat_patch_point:
540	retl
541	nop
542	SET_SIZE(rw_enter)
543
544	.align	16
545	ENTRY(rw_exit)
546	ldn	[%o0], %o4			! %o4 = old lock value
547	membar	#LoadStore|#StoreStore		! membar_exit()
548	subcc	%o4, RW_READ_LOCK, %o5		! %o5 = new lock value if reader
549	bnz,pn	%xcc, 2f			! single reader, no waiters?
550	clr	%o1
5511:
552	srl	%o4, RW_HOLD_COUNT_SHIFT, %o3	! %o3 = hold count (lockstat)
553	casx	[%o0], %o4, %o5			! try to drop lock
554	cmp	%o4, %o5			! did we succeed?
555	bne,pn	%xcc, rw_exit_wakeup		! if not, go to C
556	nop					! delay: do nothing
557.rw_read_exit_lockstat_patch_point:
558	retl
559	nop					! delay: do nothing
5602:
561	andcc	%o4, RW_WRITE_LOCKED, %g0	! are we a writer?
562	bnz,a,pt %xcc, 3f
563	or	THREAD_REG, RW_WRITE_LOCKED, %o4 ! delay: %o4 = owner
564	cmp	%o5, RW_READ_LOCK		! would lock still be held?
565	bge,pt	%xcc, 1b			! if so, go ahead and drop it
566	nop
567	ba,pt	%xcc, rw_exit_wakeup		! otherwise, wake waiters
568	nop
5693:
570	casx	[%o0], %o4, %o1			! try to drop write lock
571	cmp	%o4, %o1			! did we succeed?
572	bne,pn	%xcc, rw_exit_wakeup		! if not, go to C
573	nop
574.rw_write_exit_lockstat_patch_point:
575	retl
576	nop
577	SET_SIZE(rw_exit)
578
579#endif
580
581#if defined(lint)
582
583void
584lockstat_hot_patch(void)
585{}
586
587#else
588
589#define	RETL			0x81c3e008
590#define	NOP			0x01000000
591#define BA			0x10800000
592
593#define	DISP22			((1 << 22) - 1)
594#define	ANNUL			0x20000000
595
596#define	HOT_PATCH_COMMON(addr, event, normal_instr, annul, rs)		\
597	ba	1f;							\
598	rd	%pc, %o0;						\
599	save	%sp, -SA(MINFRAME), %sp;				\
600	set	lockstat_probemap, %l1;					\
601	ld	[%l1 + (event * DTRACE_IDSIZE)], %o0;			\
602	brz,pn	%o0, 0f;						\
603	ldub	[THREAD_REG + T_LOCKSTAT], %l0;				\
604	add	%l0, 1, %l2;						\
605	stub	%l2, [THREAD_REG + T_LOCKSTAT];				\
606	set	lockstat_probe, %g1;					\
607	ld	[%l1 + (event * DTRACE_IDSIZE)], %o0;			\
608	brz,a,pn %o0, 0f;						\
609	stub	%l0, [THREAD_REG + T_LOCKSTAT];				\
610	ldn	[%g1], %g2;						\
611	mov	rs, %o2;						\
612	jmpl	%g2, %o7;						\
613	mov	%i0, %o1;						\
614	stub	%l0, [THREAD_REG + T_LOCKSTAT];				\
6150:	ret;								\
616	restore	%g0, 1, %o0;	/* for mutex_tryenter / lock_try */	\
6171:	set	addr, %o1;						\
618	sub	%o0, %o1, %o0;						\
619	srl	%o0, 2, %o0;						\
620	inc	%o0;							\
621	set	DISP22, %o1;						\
622	and	%o1, %o0, %o0;						\
623	set	BA, %o1;						\
624	or	%o1, %o0, %o0;						\
625	sethi	%hi(annul), %o2;					\
626	add	%o0, %o2, %o2;						\
627	set	addr, %o0;						\
628	set	normal_instr, %o1;					\
629	ld	[%i0 + (event * DTRACE_IDSIZE)], %o3;			\
630	tst	%o3;							\
631	movnz	%icc, %o2, %o1;						\
632	call	hot_patch_kernel_text;					\
633	mov	4, %o2;							\
634	membar	#Sync
635
636#define	HOT_PATCH(addr, event, normal_instr)	\
637	HOT_PATCH_COMMON(addr, event, normal_instr, 0, %i1)
638
639#define	HOT_PATCH_ARG(addr, event, normal_instr, arg)	\
640	HOT_PATCH_COMMON(addr, event, normal_instr, 0, arg)
641
642#define HOT_PATCH_ANNULLED(addr, event, normal_instr)	\
643	HOT_PATCH_COMMON(addr, event, normal_instr, ANNUL, %i1)
644
645	ENTRY(lockstat_hot_patch)
646	save	%sp, -SA(MINFRAME), %sp
647	set	lockstat_probemap, %i0
648	HOT_PATCH(.mutex_enter_lockstat_patch_point,
649		LS_MUTEX_ENTER_ACQUIRE, RETL)
650	HOT_PATCH_ANNULLED(.mutex_tryenter_lockstat_patch_point,
651		LS_MUTEX_TRYENTER_ACQUIRE, RETL)
652	HOT_PATCH(.mutex_exit_lockstat_patch_point,
653		LS_MUTEX_EXIT_RELEASE, RETL)
654	HOT_PATCH(.rw_write_enter_lockstat_patch_point,
655		LS_RW_ENTER_ACQUIRE, RETL)
656	HOT_PATCH(.rw_read_enter_lockstat_patch_point,
657		LS_RW_ENTER_ACQUIRE, RETL)
658	HOT_PATCH_ARG(.rw_write_exit_lockstat_patch_point,
659		LS_RW_EXIT_RELEASE, RETL, RW_WRITER)
660	HOT_PATCH_ARG(.rw_read_exit_lockstat_patch_point,
661		LS_RW_EXIT_RELEASE, RETL, RW_READER)
662	HOT_PATCH(.lock_set_lockstat_patch_point,
663		LS_LOCK_SET_ACQUIRE, RETL)
664	HOT_PATCH_ANNULLED(.lock_try_lockstat_patch_point,
665		LS_LOCK_TRY_ACQUIRE, RETL)
666	HOT_PATCH(.lock_clear_lockstat_patch_point,
667		LS_LOCK_CLEAR_RELEASE, RETL)
668	HOT_PATCH(.lock_set_spl_lockstat_patch_point,
669		LS_LOCK_SET_SPL_ACQUIRE, RETL)
670	HOT_PATCH(.lock_clear_splx_lockstat_patch_point,
671		LS_LOCK_CLEAR_SPLX_RELEASE, RETL)
672	ret
673	restore
674	SET_SIZE(lockstat_hot_patch)
675
676#endif	/* lint */
677
678/*
679 * asm_mutex_spin_enter(mutex_t *)
680 *
681 * For use by assembly interrupt handler only.
682 * Does not change spl, since the interrupt handler is assumed to be
683 * running at high level already.
684 * Traps may be off, so cannot panic.
685 * Does not keep statistics on the lock.
686 *
687 * Entry:	%l6 - points to mutex
688 *		%l7 - address of call (returns to %l7+8)
689 * Uses:	%l6, %l5
690 */
691#ifndef lint
692	.align 16
693	ENTRY_NP(asm_mutex_spin_enter)
694	ldstub	[%l6 + M_SPINLOCK], %l5	! try to set lock, get value in %l5
6951:
696	tst	%l5
697	bnz	3f			! lock already held - go spin
698	nop
6992:
700	jmp	%l7 + 8			! return
701	membar	#LoadLoad
702	!
703	! Spin on lock without using an atomic operation to prevent the caches
704	! from unnecessarily moving ownership of the line around.
705	!
7063:
707	ldub	[%l6 + M_SPINLOCK], %l5
7084:
709	tst	%l5
710	bz,a	1b			! lock appears to be free, try again
711	ldstub	[%l6 + M_SPINLOCK], %l5	! delay slot - try to set lock
712
713	sethi	%hi(panicstr) , %l5
714	ldn	[%l5 + %lo(panicstr)], %l5
715	tst	%l5
716	bnz	2b			! after panic, feign success
717	nop
718	b	4b
719	ldub	[%l6 + M_SPINLOCK], %l5	! delay - reload lock
720	SET_SIZE(asm_mutex_spin_enter)
721#endif /* lint */
722
723/*
724 * asm_mutex_spin_exit(mutex_t *)
725 *
726 * For use by assembly interrupt handler only.
727 * Does not change spl, since the interrupt handler is assumed to be
728 * running at high level already.
729 *
730 * Entry:	%l6 - points to mutex
731 *		%l7 - address of call (returns to %l7+8)
732 * Uses:	none
733 */
734#ifndef lint
735	ENTRY_NP(asm_mutex_spin_exit)
736	membar	#LoadStore|#StoreStore
737	jmp	%l7 + 8			! return
738	clrb	[%l6 + M_SPINLOCK]	! delay - clear lock
739	SET_SIZE(asm_mutex_spin_exit)
740#endif /* lint */
741
742/*
743 * thread_onproc()
744 * Set thread in onproc state for the specified CPU.
745 * Also set the thread lock pointer to the CPU's onproc lock.
746 * Since the new lock isn't held, the store ordering is important.
747 * If not done in assembler, the compiler could reorder the stores.
748 */
749#if defined(lint)
750
751void
752thread_onproc(kthread_id_t t, cpu_t *cp)
753{
754	t->t_state = TS_ONPROC;
755	t->t_lockp = &cp->cpu_thread_lock;
756}
757
758#else	/* lint */
759
760	ENTRY(thread_onproc)
761	set	TS_ONPROC, %o2		! TS_ONPROC state
762	st	%o2, [%o0 + T_STATE]	! store state
763	add	%o1, CPU_THREAD_LOCK, %o3 ! pointer to disp_lock while running
764	retl				! return
765	stn	%o3, [%o0 + T_LOCKP]	! delay - store new lock pointer
766	SET_SIZE(thread_onproc)
767
768#endif	/* lint */
769
770/* delay function used in some mutex code - just do 3 nop cas ops */
771#if defined(lint)
772
773/* ARGSUSED */
774void
775cas_delay(void *addr)
776{}
777#else	/* lint */
778	ENTRY(cas_delay)
779	casx [%o0], %g0, %g0
780	casx [%o0], %g0, %g0
781	retl
782	casx [%o0], %g0, %g0
783	SET_SIZE(cas_delay)
784#endif	/* lint */
785
786#if defined(lint)
787
788/*
789 * alternative delay function for some niagara processors.   The rd
790 * instruction uses less resources than casx on those cpus.
791 */
792/* ARGSUSED */
793void
794rdccr_delay(void)
795{}
796#else	/* lint */
797	ENTRY(rdccr_delay)
798	rd	%ccr, %g0
799	rd	%ccr, %g0
800	retl
801	rd	%ccr, %g0
802	SET_SIZE(rdccr_delay)
803#endif	/* lint */
804
805/*
806 * mutex_delay_default(void)
807 * Spins for approx a few hundred processor cycles and returns to caller.
808 */
809#if defined(lint)
810
811void
812mutex_delay_default(void)
813{}
814
815#else	/* lint */
816
817	ENTRY(mutex_delay_default)
818	mov	72,%o0
8191:	brgz	%o0, 1b
820	dec	%o0
821	retl
822	nop
823	SET_SIZE(mutex_delay_default)
824
825#endif  /* lint */
826