xref: /titanic_52/usr/src/uts/sparc/v9/ml/lock_prim.s (revision 4bff34e37def8a90f9194d81bc345c52ba20086a)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#pragma ident	"%Z%%M%	%I%	%E% SMI"
27
28#if defined(lint)
29#include <sys/types.h>
30#include <sys/thread.h>
31#include <sys/cpuvar.h>
32#else	/* lint */
33#include "assym.h"
34#endif	/* lint */
35
36#include <sys/t_lock.h>
37#include <sys/mutex.h>
38#include <sys/mutex_impl.h>
39#include <sys/rwlock_impl.h>
40#include <sys/asm_linkage.h>
41#include <sys/machlock.h>
42#include <sys/machthread.h>
43#include <sys/lockstat.h>
44
45/* #define DEBUG */
46
47#ifdef DEBUG
48#include <sys/machparam.h>
49#endif /* DEBUG */
50
51/************************************************************************
52 *		ATOMIC OPERATIONS
53 */
54
55/*
56 * uint8_t	ldstub(uint8_t *cp)
57 *
58 * Store 0xFF at the specified location, and return its previous content.
59 */
60
61#if defined(lint)
62uint8_t
63ldstub(uint8_t *cp)
64{
65	uint8_t	rv;
66	rv = *cp;
67	*cp = 0xFF;
68	return rv;
69}
70#else	/* lint */
71
72	ENTRY(ldstub)
73	retl
74	ldstub	[%o0], %o0
75	SET_SIZE(ldstub)
76
77#endif	/* lint */
78
79/************************************************************************
80 *		MEMORY BARRIERS -- see atomic.h for full descriptions.
81 */
82
83#if defined(lint)
84
85void
86membar_enter(void)
87{}
88
89void
90membar_exit(void)
91{}
92
93void
94membar_producer(void)
95{}
96
97void
98membar_consumer(void)
99{}
100
101#else	/* lint */
102
103#ifdef SF_ERRATA_51
104	.align 32
105	ENTRY(membar_return)
106	retl
107	nop
108	SET_SIZE(membar_return)
109#define	MEMBAR_RETURN	ba,pt %icc, membar_return
110#else
111#define	MEMBAR_RETURN	retl
112#endif
113
114	ENTRY(membar_enter)
115	MEMBAR_RETURN
116	membar	#StoreLoad|#StoreStore
117	SET_SIZE(membar_enter)
118
119	ENTRY(membar_exit)
120	MEMBAR_RETURN
121	membar	#LoadStore|#StoreStore
122	SET_SIZE(membar_exit)
123
124	ENTRY(membar_producer)
125	MEMBAR_RETURN
126	membar	#StoreStore
127	SET_SIZE(membar_producer)
128
129	ENTRY(membar_consumer)
130	MEMBAR_RETURN
131	membar	#LoadLoad
132	SET_SIZE(membar_consumer)
133
134#endif	/* lint */
135
136/************************************************************************
137 *		MINIMUM LOCKS
138 */
139
140#if defined(lint)
141
142/*
143 * lock_try(lp), ulock_try(lp)
144 *	- returns non-zero on success.
145 *	- doesn't block interrupts so don't use this to spin on a lock.
146 *	- uses "0xFF is busy, anything else is free" model.
147 *
148 *      ulock_try() is for a lock in the user address space.
149 *      For all V7/V8 sparc systems they are same since the kernel and
150 *      user are mapped in a user' context.
151 *      For V9 platforms the lock_try and ulock_try are different impl.
152 */
153
154int
155lock_try(lock_t *lp)
156{
157	return (0xFF ^ ldstub(lp));
158}
159
160int
161lock_spin_try(lock_t *lp)
162{
163	return (0xFF ^ ldstub(lp));
164}
165
166void
167lock_set(lock_t *lp)
168{
169	extern void lock_set_spin(lock_t *);
170
171	if (!lock_try(lp))
172		lock_set_spin(lp);
173	membar_enter();
174}
175
176void
177lock_clear(lock_t *lp)
178{
179	membar_exit();
180	*lp = 0;
181}
182
183int
184ulock_try(lock_t *lp)
185{
186	return (0xFF ^ ldstub(lp));
187}
188
189void
190ulock_clear(lock_t *lp)
191{
192	membar_exit();
193	*lp = 0;
194}
195
196#else	/* lint */
197
198	.align	32
199	ENTRY(lock_try)
200	ldstub	[%o0], %o1		! try to set lock, get value in %o1
201	brnz,pn	%o1, 1f
202	membar	#LoadLoad
203.lock_try_lockstat_patch_point:
204	retl
205	or	%o0, 1, %o0		! ensure lo32 != 0
2061:
207	retl
208	clr	%o0
209	SET_SIZE(lock_try)
210
211	.align	32
212	ENTRY(lock_spin_try)
213	ldstub	[%o0], %o1		! try to set lock, get value in %o1
214	brnz,pn	%o1, 1f
215	membar	#LoadLoad
216	retl
217	or	%o0, 1, %o0		! ensure lo32 != 0
2181:
219	retl
220	clr	%o0
221	SET_SIZE(lock_spin_try)
222
223	.align	32
224	ENTRY(lock_set)
225	ldstub	[%o0], %o1
226	brnz,pn	%o1, 1f			! go to C for the hard case
227	membar	#LoadLoad
228.lock_set_lockstat_patch_point:
229	retl
230	nop
2311:
232	sethi	%hi(lock_set_spin), %o2	! load up for jump to C
233	jmp	%o2 + %lo(lock_set_spin)
234	nop				! delay: do nothing
235	SET_SIZE(lock_set)
236
237	ENTRY(lock_clear)
238	membar	#LoadStore|#StoreStore
239.lock_clear_lockstat_patch_point:
240	retl
241	clrb	[%o0]
242	SET_SIZE(lock_clear)
243
244	.align	32
245	ENTRY(ulock_try)
246	ldstuba	[%o0]ASI_USER, %o1	! try to set lock, get value in %o1
247	xor	%o1, 0xff, %o0		! delay - return non-zero if success
248	retl
249	  membar	#LoadLoad
250	SET_SIZE(ulock_try)
251
252	ENTRY(ulock_clear)
253	membar  #LoadStore|#StoreStore
254	retl
255	  stba	%g0, [%o0]ASI_USER	! clear lock
256	SET_SIZE(ulock_clear)
257
258#endif	/* lint */
259
260
261/*
262 * lock_set_spl(lp, new_pil, *old_pil_addr)
263 * 	Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
264 */
265
266#if defined(lint)
267
268/* ARGSUSED */
269void
270lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil_addr)
271{
272	extern int splr(int);
273	extern void lock_set_spl_spin(lock_t *, int, u_short *, int);
274	int old_pil;
275
276	old_pil = splr(new_pil);
277	if (!lock_try(lp)) {
278		lock_set_spl_spin(lp, new_pil, old_pil_addr, old_pil);
279	} else {
280		*old_pil_addr = (u_short)old_pil;
281		membar_enter();
282	}
283}
284
285#else	/* lint */
286
287	ENTRY(lock_set_spl)
288	rdpr	%pil, %o3			! %o3 = current pil
289	cmp	%o3, %o1			! is current pil high enough?
290	bl,a,pt %icc, 1f			! if not, write %pil in delay
291	wrpr	%g0, %o1, %pil
2921:
293	ldstub	[%o0], %o4			! try the lock
294	brnz,pn	%o4, 2f				! go to C for the miss case
295	membar	#LoadLoad
296.lock_set_spl_lockstat_patch_point:
297	retl
298	sth	%o3, [%o2]			! delay - save original pil
2992:
300	sethi	%hi(lock_set_spl_spin), %o5	! load up jmp to C
301	jmp	%o5 + %lo(lock_set_spl_spin)	! jmp to lock_set_spl_spin
302	nop					! delay: do nothing
303	SET_SIZE(lock_set_spl)
304
305#endif	/* lint */
306
307/*
308 * lock_clear_splx(lp, s)
309 */
310
311#if defined(lint)
312
313void
314lock_clear_splx(lock_t *lp, int s)
315{
316	extern void splx(int);
317
318	lock_clear(lp);
319	splx(s);
320}
321
322#else	/* lint */
323
324	ENTRY(lock_clear_splx)
325	ldn	[THREAD_REG + T_CPU], %o2	! get CPU pointer
326	membar	#LoadStore|#StoreStore
327	ld	[%o2 + CPU_BASE_SPL], %o2
328	clrb	[%o0]				! clear lock
329	cmp	%o2, %o1			! compare new to base
330	movl	%xcc, %o1, %o2			! use new pri if base is less
331.lock_clear_splx_lockstat_patch_point:
332	retl
333	wrpr	%g0, %o2, %pil
334	SET_SIZE(lock_clear_splx)
335
336#endif	/* lint */
337
338/*
339 * mutex_enter() and mutex_exit().
340 *
341 * These routines handle the simple cases of mutex_enter() (adaptive
342 * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
343 * If anything complicated is going on we punt to mutex_vector_enter().
344 *
345 * mutex_tryenter() is similar to mutex_enter() but returns zero if
346 * the lock cannot be acquired, nonzero on success.
347 *
348 * If mutex_exit() gets preempted in the window between checking waiters
349 * and clearing the lock, we can miss wakeups.  Disabling preemption
350 * in the mutex code is prohibitively expensive, so instead we detect
351 * mutex preemption by examining the trapped PC in the interrupt path.
352 * If we interrupt a thread in mutex_exit() that has not yet cleared
353 * the lock, pil_interrupt() resets its PC back to the beginning of
354 * mutex_exit() so it will check again for waiters when it resumes.
355 *
356 * The lockstat code below is activated when the lockstat driver
357 * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
358 * Note that we don't need to test lockstat_event_mask here -- we won't
359 * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
360 */
361
362#if defined (lint)
363
364/* ARGSUSED */
365void
366mutex_enter(kmutex_t *lp)
367{}
368
369/* ARGSUSED */
370int
371mutex_tryenter(kmutex_t *lp)
372{ return (0); }
373
374/* ARGSUSED */
375void
376mutex_exit(kmutex_t *lp)
377{}
378
379/* ARGSUSED */
380void *
381mutex_owner_running(mutex_impl_t *lp)
382{ return (NULL); }
383
384#else
385	.align	32
386	ENTRY(mutex_enter)
387	mov	THREAD_REG, %o1
388	casx	[%o0], %g0, %o1			! try to acquire as adaptive
389	brnz,pn	%o1, 1f				! locked or wrong type
390	membar	#LoadLoad
391.mutex_enter_lockstat_patch_point:
392	retl
393	nop
3941:
395	sethi	%hi(mutex_vector_enter), %o2	! load up for jump to C
396	jmp	%o2 + %lo(mutex_vector_enter)
397	nop
398	SET_SIZE(mutex_enter)
399
400	ENTRY(mutex_tryenter)
401	mov	THREAD_REG, %o1
402	casx	[%o0], %g0, %o1			! try to acquire as adaptive
403	brnz,pn	%o1, 1f				! locked or wrong type continue
404	membar	#LoadLoad
405.mutex_tryenter_lockstat_patch_point:
406	retl
407	or	%o0, 1, %o0			! ensure lo32 != 0
4081:
409	sethi	%hi(mutex_vector_tryenter), %o2		! hi bits
410	jmp	%o2 + %lo(mutex_vector_tryenter)	! go to C
411	nop
412	SET_SIZE(mutex_tryenter)
413
414	ENTRY(mutex_adaptive_tryenter)
415	mov	THREAD_REG, %o1
416	casx	[%o0], %g0, %o1			! try to acquire as adaptive
417	brnz,pn	%o1, 0f				! locked or wrong type
418	membar	#LoadLoad
419	retl
420	or	%o0, 1, %o0			! ensure lo32 != 0
4210:
422	retl
423	mov	%g0, %o0
424	SET_SIZE(mutex_adaptive_tryenter)
425
426	! these need to be together and cache aligned for performance.
427	.align 64
428	.global	mutex_exit_critical_size
429	.global	mutex_exit_critical_start
430	.global mutex_owner_running_critical_size
431	.global mutex_owner_running_critical_start
432
433mutex_exit_critical_size = .mutex_exit_critical_end - mutex_exit_critical_start
434
435	.align	32
436
437	ENTRY(mutex_exit)
438mutex_exit_critical_start:		! If we are interrupted, restart here
439	ldn	[%o0], %o1		! get the owner field
440	membar	#LoadStore|#StoreStore
441	cmp	THREAD_REG, %o1		! do we own lock with no waiters?
442	be,a,pt	%ncc, 1f		! if so, drive on ...
443	stn	%g0, [%o0]		! delay: clear lock if we owned it
444.mutex_exit_critical_end:		! for pil_interrupt() hook
445	ba,a,pt	%xcc, mutex_vector_exit	! go to C for the hard cases
4461:
447.mutex_exit_lockstat_patch_point:
448	retl
449	nop
450	SET_SIZE(mutex_exit)
451
452mutex_owner_running_critical_size = .mutex_owner_running_critical_end - mutex_owner_running_critical_start
453
454	.align  32
455
456	ENTRY(mutex_owner_running)
457mutex_owner_running_critical_start:	! If interrupted restart here
458	ldn	[%o0], %o1		! get the owner field
459	and	%o1, MUTEX_THREAD, %o1	! remove the waiters bit if any
460	brz,pn	%o1, 1f			! if so, drive on ...
461	nop
462	ldn	[%o1+T_CPU], %o2	! get owner->t_cpu
463	ldn	[%o2+CPU_THREAD], %o3	! get owner->t_cpu->cpu_thread
464.mutex_owner_running_critical_end:	! for pil_interrupt() hook
465	cmp	%o1, %o3		! owner == running thread?
466	be,a,pt	%xcc, 2f		! yes, go return cpu
467	nop
4681:
469	retl
470	mov	%g0, %o0		! return 0 (owner not running)
4712:
472	retl
473	mov	%o2, %o0		! owner running, return cpu
474	SET_SIZE(mutex_owner_running)
475
476#endif	/* lint */
477
478/*
479 * rw_enter() and rw_exit().
480 *
481 * These routines handle the simple cases of rw_enter (write-locking an unheld
482 * lock or read-locking a lock that's neither write-locked nor write-wanted)
483 * and rw_exit (no waiters or not the last reader).  If anything complicated
484 * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
485 */
486#if defined(lint)
487
488/* ARGSUSED */
489void
490rw_enter(krwlock_t *lp, krw_t rw)
491{}
492
493/* ARGSUSED */
494void
495rw_exit(krwlock_t *lp)
496{}
497
498#else
499
500	.align	16
501	ENTRY(rw_enter)
502	cmp	%o1, RW_WRITER			! entering as writer?
503	be,a,pn	%icc, 2f			! if so, go do it ...
504	or	THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner
505	ld	[THREAD_REG + T_KPRI_REQ], %o3	! begin THREAD_KPRI_REQUEST()
506	ldn	[%o0], %o4			! %o4 = old lock value
507	inc	%o3				! bump kpri
508	st	%o3, [THREAD_REG + T_KPRI_REQ]	! store new kpri
5091:
510	andcc	%o4, RW_WRITE_CLAIMED, %g0	! write-locked or write-wanted?
511	bz,pt	%xcc, 3f	 		! if so, prepare to block
512	add	%o4, RW_READ_LOCK, %o5		! delay: increment hold count
513	sethi	%hi(rw_enter_sleep), %o2	! load up jump
514	jmp	%o2 + %lo(rw_enter_sleep)	! jmp to rw_enter_sleep
515	nop					! delay: do nothing
5163:
517	casx	[%o0], %o4, %o5			! try to grab read lock
518	cmp	%o4, %o5			! did we get it?
519	bne,pn	%xcc, 1b			! if not, try again
520	mov	%o5, %o4			! delay: %o4 = old lock value
521	membar	#LoadLoad
522.rw_read_enter_lockstat_patch_point:
523	retl
524	nop
5252:
526	casx	[%o0], %g0, %o5			! try to grab write lock
527	brz,pt %o5, 4f				! branch around if we got it
528	membar	#LoadLoad			! done regardless of where we go
529	sethi	%hi(rw_enter_sleep), %o2
530	jmp	%o2 + %lo(rw_enter_sleep)	! jump to rw_enter_sleep if not
531	nop					! delay: do nothing
5324:
533.rw_write_enter_lockstat_patch_point:
534	retl
535	nop
536	SET_SIZE(rw_enter)
537
538	.align	16
539	ENTRY(rw_exit)
540	ldn	[%o0], %o4			! %o4 = old lock value
541	membar	#LoadStore|#StoreStore		! membar_exit()
542	subcc	%o4, RW_READ_LOCK, %o5		! %o5 = new lock value if reader
543	bnz,pn	%xcc, 2f			! single reader, no waiters?
544	clr	%o1
5451:
546	ld	[THREAD_REG + T_KPRI_REQ], %g1	! begin THREAD_KPRI_RELEASE()
547	srl	%o4, RW_HOLD_COUNT_SHIFT, %o3	! %o3 = hold count (lockstat)
548	casx	[%o0], %o4, %o5			! try to drop lock
549	cmp	%o4, %o5			! did we succeed?
550	bne,pn	%xcc, rw_exit_wakeup		! if not, go to C
551	dec	%g1				! delay: drop kpri
552.rw_read_exit_lockstat_patch_point:
553	retl
554	st	%g1, [THREAD_REG + T_KPRI_REQ]	! delay: store new kpri
5552:
556	andcc	%o4, RW_WRITE_LOCKED, %g0	! are we a writer?
557	bnz,a,pt %xcc, 3f
558	or	THREAD_REG, RW_WRITE_LOCKED, %o4 ! delay: %o4 = owner
559	cmp	%o5, RW_READ_LOCK		! would lock still be held?
560	bge,pt	%xcc, 1b			! if so, go ahead and drop it
561	nop
562	ba,pt	%xcc, rw_exit_wakeup		! otherwise, wake waiters
563	nop
5643:
565	casx	[%o0], %o4, %o1			! try to drop write lock
566	cmp	%o4, %o1			! did we succeed?
567	bne,pn	%xcc, rw_exit_wakeup		! if not, go to C
568	nop
569.rw_write_exit_lockstat_patch_point:
570	retl
571	nop
572	SET_SIZE(rw_exit)
573
574#endif
575
576#if defined(lint)
577
578void
579lockstat_hot_patch(void)
580{}
581
582#else
583
584#define	RETL			0x81c3e008
585#define	NOP			0x01000000
586#define BA			0x10800000
587
588#define	DISP22			((1 << 22) - 1)
589#define	ANNUL			0x20000000
590
591#define	HOT_PATCH_COMMON(addr, event, normal_instr, annul, rs)		\
592	ba	1f;							\
593	rd	%pc, %o0;						\
594	save	%sp, -SA(MINFRAME), %sp;				\
595	set	lockstat_probemap, %l1;					\
596	ld	[%l1 + (event * DTRACE_IDSIZE)], %o0;			\
597	brz,pn	%o0, 0f;						\
598	ldub	[THREAD_REG + T_LOCKSTAT], %l0;				\
599	add	%l0, 1, %l2;						\
600	stub	%l2, [THREAD_REG + T_LOCKSTAT];				\
601	set	lockstat_probe, %g1;					\
602	ld	[%l1 + (event * DTRACE_IDSIZE)], %o0;			\
603	brz,a,pn %o0, 0f;						\
604	stub	%l0, [THREAD_REG + T_LOCKSTAT];				\
605	ldn	[%g1], %g2;						\
606	mov	rs, %o2;						\
607	jmpl	%g2, %o7;						\
608	mov	%i0, %o1;						\
609	stub	%l0, [THREAD_REG + T_LOCKSTAT];				\
6100:	ret;								\
611	restore	%g0, 1, %o0;	/* for mutex_tryenter / lock_try */	\
6121:	set	addr, %o1;						\
613	sub	%o0, %o1, %o0;						\
614	srl	%o0, 2, %o0;						\
615	inc	%o0;							\
616	set	DISP22, %o1;						\
617	and	%o1, %o0, %o0;						\
618	set	BA, %o1;						\
619	or	%o1, %o0, %o0;						\
620	sethi	%hi(annul), %o2;					\
621	add	%o0, %o2, %o2;						\
622	set	addr, %o0;						\
623	set	normal_instr, %o1;					\
624	ld	[%i0 + (event * DTRACE_IDSIZE)], %o3;			\
625	tst	%o3;							\
626	movnz	%icc, %o2, %o1;						\
627	call	hot_patch_kernel_text;					\
628	mov	4, %o2;							\
629	membar	#Sync
630
631#define	HOT_PATCH(addr, event, normal_instr)	\
632	HOT_PATCH_COMMON(addr, event, normal_instr, 0, %i1)
633
634#define	HOT_PATCH_ARG(addr, event, normal_instr, arg)	\
635	HOT_PATCH_COMMON(addr, event, normal_instr, 0, arg)
636
637#define HOT_PATCH_ANNULLED(addr, event, normal_instr)	\
638	HOT_PATCH_COMMON(addr, event, normal_instr, ANNUL, %i1)
639
640	ENTRY(lockstat_hot_patch)
641	save	%sp, -SA(MINFRAME), %sp
642	set	lockstat_probemap, %i0
643	HOT_PATCH(.mutex_enter_lockstat_patch_point,
644		LS_MUTEX_ENTER_ACQUIRE, RETL)
645	HOT_PATCH_ANNULLED(.mutex_tryenter_lockstat_patch_point,
646		LS_MUTEX_TRYENTER_ACQUIRE, RETL)
647	HOT_PATCH(.mutex_exit_lockstat_patch_point,
648		LS_MUTEX_EXIT_RELEASE, RETL)
649	HOT_PATCH(.rw_write_enter_lockstat_patch_point,
650		LS_RW_ENTER_ACQUIRE, RETL)
651	HOT_PATCH(.rw_read_enter_lockstat_patch_point,
652		LS_RW_ENTER_ACQUIRE, RETL)
653	HOT_PATCH_ARG(.rw_write_exit_lockstat_patch_point,
654		LS_RW_EXIT_RELEASE, RETL, RW_WRITER)
655	HOT_PATCH_ARG(.rw_read_exit_lockstat_patch_point,
656		LS_RW_EXIT_RELEASE, RETL, RW_READER)
657	HOT_PATCH(.lock_set_lockstat_patch_point,
658		LS_LOCK_SET_ACQUIRE, RETL)
659	HOT_PATCH_ANNULLED(.lock_try_lockstat_patch_point,
660		LS_LOCK_TRY_ACQUIRE, RETL)
661	HOT_PATCH(.lock_clear_lockstat_patch_point,
662		LS_LOCK_CLEAR_RELEASE, RETL)
663	HOT_PATCH(.lock_set_spl_lockstat_patch_point,
664		LS_LOCK_SET_SPL_ACQUIRE, RETL)
665	HOT_PATCH(.lock_clear_splx_lockstat_patch_point,
666		LS_LOCK_CLEAR_SPLX_RELEASE, RETL)
667	ret
668	restore
669	SET_SIZE(lockstat_hot_patch)
670
671#endif	/* lint */
672
673/*
674 * asm_mutex_spin_enter(mutex_t *)
675 *
676 * For use by assembly interrupt handler only.
677 * Does not change spl, since the interrupt handler is assumed to be
678 * running at high level already.
679 * Traps may be off, so cannot panic.
680 * Does not keep statistics on the lock.
681 *
682 * Entry:	%l6 - points to mutex
683 * 		%l7 - address of call (returns to %l7+8)
684 * Uses:	%l6, %l5
685 */
686#ifndef lint
687	.align 16
688	ENTRY_NP(asm_mutex_spin_enter)
689	ldstub	[%l6 + M_SPINLOCK], %l5	! try to set lock, get value in %l5
6901:
691	tst	%l5
692	bnz	3f			! lock already held - go spin
693	nop
6942:
695	jmp	%l7 + 8			! return
696	membar	#LoadLoad
697	!
698	! Spin on lock without using an atomic operation to prevent the caches
699	! from unnecessarily moving ownership of the line around.
700	!
7013:
702	ldub	[%l6 + M_SPINLOCK], %l5
7034:
704	tst	%l5
705	bz,a	1b			! lock appears to be free, try again
706	ldstub	[%l6 + M_SPINLOCK], %l5	! delay slot - try to set lock
707
708	sethi	%hi(panicstr) , %l5
709	ldn	[%l5 + %lo(panicstr)], %l5
710	tst 	%l5
711	bnz	2b			! after panic, feign success
712	nop
713	b	4b
714	ldub	[%l6 + M_SPINLOCK], %l5	! delay - reload lock
715	SET_SIZE(asm_mutex_spin_enter)
716#endif /* lint */
717
718/*
719 * asm_mutex_spin_exit(mutex_t *)
720 *
721 * For use by assembly interrupt handler only.
722 * Does not change spl, since the interrupt handler is assumed to be
723 * running at high level already.
724 *
725 * Entry:	%l6 - points to mutex
726 * 		%l7 - address of call (returns to %l7+8)
727 * Uses:	none
728 */
729#ifndef lint
730	ENTRY_NP(asm_mutex_spin_exit)
731	membar	#LoadStore|#StoreStore
732	jmp	%l7 + 8			! return
733	clrb	[%l6 + M_SPINLOCK]	! delay - clear lock
734	SET_SIZE(asm_mutex_spin_exit)
735#endif /* lint */
736
737/*
738 * thread_onproc()
739 * Set thread in onproc state for the specified CPU.
740 * Also set the thread lock pointer to the CPU's onproc lock.
741 * Since the new lock isn't held, the store ordering is important.
742 * If not done in assembler, the compiler could reorder the stores.
743 */
744#if defined(lint)
745
746void
747thread_onproc(kthread_id_t t, cpu_t *cp)
748{
749	t->t_state = TS_ONPROC;
750	t->t_lockp = &cp->cpu_thread_lock;
751}
752
753#else	/* lint */
754
755	ENTRY(thread_onproc)
756	set	TS_ONPROC, %o2		! TS_ONPROC state
757	st	%o2, [%o0 + T_STATE]	! store state
758	add	%o1, CPU_THREAD_LOCK, %o3 ! pointer to disp_lock while running
759	retl				! return
760	stn	%o3, [%o0 + T_LOCKP]	! delay - store new lock pointer
761	SET_SIZE(thread_onproc)
762
763#endif	/* lint */
764
765/* delay function used in some mutex code - just do 3 nop cas ops */
766#if defined(lint)
767
768/* ARGSUSED */
769void
770cas_delay(void *addr)
771{}
772#else	/* lint */
773	ENTRY(cas_delay)
774	casx [%o0], %g0, %g0
775	casx [%o0], %g0, %g0
776	retl
777	casx [%o0], %g0, %g0
778	SET_SIZE(cas_delay)
779#endif	/* lint */
780
781#if defined(lint)
782
783/*
784 * alternative delay function for some niagara processors.   The rd
785 * instruction uses less resources than casx on those cpus.
786 */
787/* ARGSUSED */
788void
789rdccr_delay(void)
790{}
791#else	/* lint */
792	ENTRY(rdccr_delay)
793	rd	%ccr, %g0
794	rd	%ccr, %g0
795	retl
796	rd	%ccr, %g0
797	SET_SIZE(rdccr_delay)
798#endif	/* lint */
799
800/*
801 * mutex_delay_default(void)
802 * Spins for approx a few hundred processor cycles and returns to caller.
803 */
804#if defined(lint)
805
806void
807mutex_delay_default(void)
808{}
809
810#else	/* lint */
811
812	ENTRY(mutex_delay_default)
813	mov	72,%o0
8141:	brgz	%o0, 1b
815	dec	%o0
816	retl
817	nop
818	SET_SIZE(mutex_delay_default)
819
820#endif  /* lint */
821