xref: /titanic_44/usr/src/uts/sparc/v9/ml/lock_prim.s (revision 5151fb1220e0ceafdc172203863c73da4285c170)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29#if defined(lint)
30#include <sys/types.h>
31#include <sys/thread.h>
32#include <sys/cpuvar.h>
33#else	/* lint */
34#include "assym.h"
35#endif	/* lint */
36
37#include <sys/t_lock.h>
38#include <sys/mutex.h>
39#include <sys/mutex_impl.h>
40#include <sys/rwlock_impl.h>
41#include <sys/asm_linkage.h>
42#include <sys/machlock.h>
43#include <sys/machthread.h>
44#include <sys/lockstat.h>
45
46/* #define DEBUG */
47
48#ifdef DEBUG
49#include <sys/machparam.h>
50#endif /* DEBUG */
51
52/************************************************************************
53 *		ATOMIC OPERATIONS
54 */
55
56/*
57 * uint8_t	ldstub(uint8_t *cp)
58 *
59 * Store 0xFF at the specified location, and return its previous content.
60 */
61
62#if defined(lint)
63uint8_t
64ldstub(uint8_t *cp)
65{
66	uint8_t	rv;
67	rv = *cp;
68	*cp = 0xFF;
69	return rv;
70}
71#else	/* lint */
72
73	ENTRY(ldstub)
74	retl
75	ldstub	[%o0], %o0
76	SET_SIZE(ldstub)
77
78#endif	/* lint */
79
80/************************************************************************
81 *		MEMORY BARRIERS -- see atomic.h for full descriptions.
82 */
83
84#if defined(lint)
85
86void
87membar_enter(void)
88{}
89
90void
91membar_exit(void)
92{}
93
94void
95membar_producer(void)
96{}
97
98void
99membar_consumer(void)
100{}
101
102#else	/* lint */
103
104#ifdef SF_ERRATA_51
105	.align 32
106	ENTRY(membar_return)
107	retl
108	nop
109	SET_SIZE(membar_return)
110#define	MEMBAR_RETURN	ba,pt %icc, membar_return
111#else
112#define	MEMBAR_RETURN	retl
113#endif
114
115	ENTRY(membar_enter)
116	MEMBAR_RETURN
117	membar	#StoreLoad|#StoreStore
118	SET_SIZE(membar_enter)
119
120	ENTRY(membar_exit)
121	MEMBAR_RETURN
122	membar	#LoadStore|#StoreStore
123	SET_SIZE(membar_exit)
124
125	ENTRY(membar_producer)
126	MEMBAR_RETURN
127	membar	#StoreStore
128	SET_SIZE(membar_producer)
129
130	ENTRY(membar_consumer)
131	MEMBAR_RETURN
132	membar	#LoadLoad
133	SET_SIZE(membar_consumer)
134
135#endif	/* lint */
136
137/************************************************************************
138 *		MINIMUM LOCKS
139 */
140
141#if defined(lint)
142
143/*
144 * lock_try(lp), ulock_try(lp)
145 *	- returns non-zero on success.
146 *	- doesn't block interrupts so don't use this to spin on a lock.
147 *	- uses "0xFF is busy, anything else is free" model.
148 *
149 *      ulock_try() is for a lock in the user address space.
150 *      For all V7/V8 sparc systems they are same since the kernel and
151 *      user are mapped in a user' context.
152 *      For V9 platforms the lock_try and ulock_try are different impl.
153 */
154
155int
156lock_try(lock_t *lp)
157{
158	return (0xFF ^ ldstub(lp));
159}
160
161int
162lock_spin_try(lock_t *lp)
163{
164	return (0xFF ^ ldstub(lp));
165}
166
167void
168lock_set(lock_t *lp)
169{
170	extern void lock_set_spin(lock_t *);
171
172	if (!lock_try(lp))
173		lock_set_spin(lp);
174	membar_enter();
175}
176
177void
178lock_clear(lock_t *lp)
179{
180	membar_exit();
181	*lp = 0;
182}
183
184int
185ulock_try(lock_t *lp)
186{
187	return (0xFF ^ ldstub(lp));
188}
189
190void
191ulock_clear(lock_t *lp)
192{
193	membar_exit();
194	*lp = 0;
195}
196
197#else	/* lint */
198
199	.align	32
200	ENTRY(lock_try)
201	ldstub	[%o0], %o1		! try to set lock, get value in %o1
202	brnz,pn	%o1, 1f
203	membar	#LoadLoad
204.lock_try_lockstat_patch_point:
205	retl
206	or	%o0, 1, %o0		! ensure lo32 != 0
2071:
208	retl
209	clr	%o0
210	SET_SIZE(lock_try)
211
212	.align	32
213	ENTRY(lock_spin_try)
214	ldstub	[%o0], %o1		! try to set lock, get value in %o1
215	brnz,pn	%o1, 1f
216	membar	#LoadLoad
217	retl
218	or	%o0, 1, %o0		! ensure lo32 != 0
2191:
220	retl
221	clr	%o0
222	SET_SIZE(lock_spin_try)
223
224	.align	32
225	ENTRY(lock_set)
226	ldstub	[%o0], %o1
227	brnz,pn	%o1, 1f			! go to C for the hard case
228	membar	#LoadLoad
229.lock_set_lockstat_patch_point:
230	retl
231	nop
2321:
233	sethi	%hi(lock_set_spin), %o2	! load up for jump to C
234	jmp	%o2 + %lo(lock_set_spin)
235	nop				! delay: do nothing
236	SET_SIZE(lock_set)
237
238	ENTRY(lock_clear)
239	membar	#LoadStore|#StoreStore
240.lock_clear_lockstat_patch_point:
241	retl
242	clrb	[%o0]
243	SET_SIZE(lock_clear)
244
245	.align	32
246	ENTRY(ulock_try)
247	ldstuba	[%o0]ASI_USER, %o1	! try to set lock, get value in %o1
248	xor	%o1, 0xff, %o0		! delay - return non-zero if success
249	retl
250	  membar	#LoadLoad
251	SET_SIZE(ulock_try)
252
253	ENTRY(ulock_clear)
254	membar  #LoadStore|#StoreStore
255	retl
256	  stba	%g0, [%o0]ASI_USER	! clear lock
257	SET_SIZE(ulock_clear)
258
259#endif	/* lint */
260
261
262/*
263 * lock_set_spl(lp, new_pil, *old_pil_addr)
264 * 	Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
265 */
266
267#if defined(lint)
268
269/* ARGSUSED */
270void
271lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil_addr)
272{
273	extern int splr(int);
274	extern void lock_set_spl_spin(lock_t *, int, u_short *, int);
275	int old_pil;
276
277	old_pil = splr(new_pil);
278	if (!lock_try(lp)) {
279		lock_set_spl_spin(lp, new_pil, old_pil_addr, old_pil);
280	} else {
281		*old_pil_addr = (u_short)old_pil;
282		membar_enter();
283	}
284}
285
286#else	/* lint */
287
288	ENTRY(lock_set_spl)
289	rdpr	%pil, %o3			! %o3 = current pil
290	cmp	%o3, %o1			! is current pil high enough?
291	bl,a,pt %icc, 1f			! if not, write %pil in delay
292	wrpr	%g0, %o1, %pil
2931:
294	ldstub	[%o0], %o4			! try the lock
295	brnz,pn	%o4, 2f				! go to C for the miss case
296	membar	#LoadLoad
297.lock_set_spl_lockstat_patch_point:
298	retl
299	sth	%o3, [%o2]			! delay - save original pil
3002:
301	sethi	%hi(lock_set_spl_spin), %o5	! load up jmp to C
302	jmp	%o5 + %lo(lock_set_spl_spin)	! jmp to lock_set_spl_spin
303	nop					! delay: do nothing
304	SET_SIZE(lock_set_spl)
305
306#endif	/* lint */
307
308/*
309 * lock_clear_splx(lp, s)
310 */
311
312#if defined(lint)
313
314void
315lock_clear_splx(lock_t *lp, int s)
316{
317	extern void splx(int);
318
319	lock_clear(lp);
320	splx(s);
321}
322
323#else	/* lint */
324
325	ENTRY(lock_clear_splx)
326	ldn	[THREAD_REG + T_CPU], %o2	! get CPU pointer
327	membar	#LoadStore|#StoreStore
328	ld	[%o2 + CPU_BASE_SPL], %o2
329	clrb	[%o0]				! clear lock
330	cmp	%o2, %o1			! compare new to base
331	movl	%xcc, %o1, %o2			! use new pri if base is less
332.lock_clear_splx_lockstat_patch_point:
333	retl
334	wrpr	%g0, %o2, %pil
335	SET_SIZE(lock_clear_splx)
336
337#endif	/* lint */
338
339/*
340 * mutex_enter() and mutex_exit().
341 *
342 * These routines handle the simple cases of mutex_enter() (adaptive
343 * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
344 * If anything complicated is going on we punt to mutex_vector_enter().
345 *
346 * mutex_tryenter() is similar to mutex_enter() but returns zero if
347 * the lock cannot be acquired, nonzero on success.
348 *
349 * If mutex_exit() gets preempted in the window between checking waiters
350 * and clearing the lock, we can miss wakeups.  Disabling preemption
351 * in the mutex code is prohibitively expensive, so instead we detect
352 * mutex preemption by examining the trapped PC in the interrupt path.
353 * If we interrupt a thread in mutex_exit() that has not yet cleared
354 * the lock, pil_interrupt() resets its PC back to the beginning of
355 * mutex_exit() so it will check again for waiters when it resumes.
356 *
357 * The lockstat code below is activated when the lockstat driver
358 * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
359 * Note that we don't need to test lockstat_event_mask here -- we won't
360 * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
361 */
362
363#if defined (lint)
364
365/* ARGSUSED */
366void
367mutex_enter(kmutex_t *lp)
368{}
369
370/* ARGSUSED */
371int
372mutex_tryenter(kmutex_t *lp)
373{ return (0); }
374
375/* ARGSUSED */
376void
377mutex_exit(kmutex_t *lp)
378{}
379
380#else
381	.align	32
382	ENTRY(mutex_enter)
383	mov	THREAD_REG, %o1
384	casx	[%o0], %g0, %o1			! try to acquire as adaptive
385	brnz,pn	%o1, 1f				! locked or wrong type
386	membar	#LoadLoad
387.mutex_enter_lockstat_patch_point:
388	retl
389	nop
3901:
391	sethi	%hi(mutex_vector_enter), %o2	! load up for jump to C
392	jmp	%o2 + %lo(mutex_vector_enter)
393	nop
394	SET_SIZE(mutex_enter)
395
396	ENTRY(mutex_tryenter)
397	mov	THREAD_REG, %o1
398	casx	[%o0], %g0, %o1			! try to acquire as adaptive
399	brnz,pn	%o1, 1f				! locked or wrong type continue
400	membar	#LoadLoad
401.mutex_tryenter_lockstat_patch_point:
402	retl
403	or	%o0, 1, %o0			! ensure lo32 != 0
4041:
405	sethi	%hi(mutex_vector_tryenter), %o2		! hi bits
406	jmp	%o2 + %lo(mutex_vector_tryenter)	! go to C
407	nop
408	SET_SIZE(mutex_tryenter)
409
410	ENTRY(mutex_adaptive_tryenter)
411	mov	THREAD_REG, %o1
412	casx	[%o0], %g0, %o1			! try to acquire as adaptive
413	brnz,pn	%o1, 0f				! locked or wrong type
414	membar	#LoadLoad
415	retl
416	or	%o0, 1, %o0			! ensure lo32 != 0
4170:
418	retl
419	mov	%g0, %o0
420	SET_SIZE(mutex_adaptive_tryenter)
421
422	.global	mutex_exit_critical_size
423	.global	mutex_exit_critical_start
424
425mutex_exit_critical_size = .mutex_exit_critical_end - mutex_exit_critical_start
426
427	.align	32
428
429	ENTRY(mutex_exit)
430mutex_exit_critical_start:		! If we are interrupted, restart here
431	ldn	[%o0], %o1		! get the owner field
432	membar	#LoadStore|#StoreStore
433	cmp	THREAD_REG, %o1		! do we own lock with no waiters?
434	be,a,pt	%ncc, 1f		! if so, drive on ...
435	stn	%g0, [%o0]		! delay: clear lock if we owned it
436.mutex_exit_critical_end:		! for pil_interrupt() hook
437	ba,a,pt	%xcc, mutex_vector_exit	! go to C for the hard cases
4381:
439.mutex_exit_lockstat_patch_point:
440	retl
441	nop
442	SET_SIZE(mutex_exit)
443
444#endif	/* lint */
445
446/*
447 * rw_enter() and rw_exit().
448 *
449 * These routines handle the simple cases of rw_enter (write-locking an unheld
450 * lock or read-locking a lock that's neither write-locked nor write-wanted)
451 * and rw_exit (no waiters or not the last reader).  If anything complicated
452 * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
453 */
454#if defined(lint)
455
456/* ARGSUSED */
457void
458rw_enter(krwlock_t *lp, krw_t rw)
459{}
460
461/* ARGSUSED */
462void
463rw_exit(krwlock_t *lp)
464{}
465
466#else
467
468	.align	16
469	ENTRY(rw_enter)
470	cmp	%o1, RW_WRITER			! entering as writer?
471	be,a,pn	%icc, 2f			! if so, go do it ...
472	or	THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner
473	ld	[THREAD_REG + T_KPRI_REQ], %o3	! begin THREAD_KPRI_REQUEST()
474	ldn	[%o0], %o4			! %o4 = old lock value
475	inc	%o3				! bump kpri
476	st	%o3, [THREAD_REG + T_KPRI_REQ]	! store new kpri
4771:
478	andcc	%o4, RW_WRITE_CLAIMED, %g0	! write-locked or write-wanted?
479	bz,pt	%xcc, 3f	 		! if so, prepare to block
480	add	%o4, RW_READ_LOCK, %o5		! delay: increment hold count
481	sethi	%hi(rw_enter_sleep), %o2	! load up jump
482	jmp	%o2 + %lo(rw_enter_sleep)	! jmp to rw_enter_sleep
483	nop					! delay: do nothing
4843:
485	casx	[%o0], %o4, %o5			! try to grab read lock
486	cmp	%o4, %o5			! did we get it?
487	bne,pn	%xcc, 1b			! if not, try again
488	mov	%o5, %o4			! delay: %o4 = old lock value
489	membar	#LoadLoad
490.rw_read_enter_lockstat_patch_point:
491	retl
492	nop
4932:
494	casx	[%o0], %g0, %o5			! try to grab write lock
495	brz,pt %o5, 4f				! branch around if we got it
496	membar	#LoadLoad			! done regardless of where we go
497	sethi	%hi(rw_enter_sleep), %o2
498	jmp	%o2 + %lo(rw_enter_sleep)	! jump to rw_enter_sleep if not
499	nop					! delay: do nothing
5004:
501.rw_write_enter_lockstat_patch_point:
502	retl
503	nop
504	SET_SIZE(rw_enter)
505
506	.align	16
507	ENTRY(rw_exit)
508	ldn	[%o0], %o4			! %o4 = old lock value
509	membar	#LoadStore|#StoreStore		! membar_exit()
510	subcc	%o4, RW_READ_LOCK, %o5		! %o5 = new lock value if reader
511	bnz,pn	%xcc, 2f			! single reader, no waiters?
512	clr	%o1
5131:
514	ld	[THREAD_REG + T_KPRI_REQ], %g1	! begin THREAD_KPRI_RELEASE()
515	srl	%o4, RW_HOLD_COUNT_SHIFT, %o3	! %o3 = hold count (lockstat)
516	casx	[%o0], %o4, %o5			! try to drop lock
517	cmp	%o4, %o5			! did we succeed?
518	bne,pn	%xcc, rw_exit_wakeup		! if not, go to C
519	dec	%g1				! delay: drop kpri
520.rw_read_exit_lockstat_patch_point:
521	retl
522	st	%g1, [THREAD_REG + T_KPRI_REQ]	! delay: store new kpri
5232:
524	andcc	%o4, RW_WRITE_LOCKED, %g0	! are we a writer?
525	bnz,a,pt %xcc, 3f
526	or	THREAD_REG, RW_WRITE_LOCKED, %o4 ! delay: %o4 = owner
527	cmp	%o5, RW_READ_LOCK		! would lock still be held?
528	bge,pt	%xcc, 1b			! if so, go ahead and drop it
529	nop
530	ba,pt	%xcc, rw_exit_wakeup		! otherwise, wake waiters
531	nop
5323:
533	casx	[%o0], %o4, %o1			! try to drop write lock
534	cmp	%o4, %o1			! did we succeed?
535	bne,pn	%xcc, rw_exit_wakeup		! if not, go to C
536	nop
537.rw_write_exit_lockstat_patch_point:
538	retl
539	nop
540	SET_SIZE(rw_exit)
541
542#endif
543
544#if defined(lint)
545
546void
547lockstat_hot_patch(void)
548{}
549
550#else
551
552#define	RETL			0x81c3e008
553#define	NOP			0x01000000
554#define BA			0x10800000
555
556#define	DISP22			((1 << 22) - 1)
557#define	ANNUL			0x20000000
558
559#define	HOT_PATCH_COMMON(addr, event, normal_instr, annul, rs)		\
560	ba	1f;							\
561	rd	%pc, %o0;						\
562	save	%sp, -SA(MINFRAME), %sp;				\
563	set	lockstat_probemap, %l1;					\
564	ld	[%l1 + (event * DTRACE_IDSIZE)], %o0;			\
565	brz,pn	%o0, 0f;						\
566	ldub	[THREAD_REG + T_LOCKSTAT], %l0;				\
567	add	%l0, 1, %l2;						\
568	stub	%l2, [THREAD_REG + T_LOCKSTAT];				\
569	set	lockstat_probe, %g1;					\
570	ld	[%l1 + (event * DTRACE_IDSIZE)], %o0;			\
571	brz,a,pn %o0, 0f;						\
572	stub	%l0, [THREAD_REG + T_LOCKSTAT];				\
573	ldn	[%g1], %g2;						\
574	mov	rs, %o2;						\
575	jmpl	%g2, %o7;						\
576	mov	%i0, %o1;						\
577	stub	%l0, [THREAD_REG + T_LOCKSTAT];				\
5780:	ret;								\
579	restore	%g0, 1, %o0;	/* for mutex_tryenter / lock_try */	\
5801:	set	addr, %o1;						\
581	sub	%o0, %o1, %o0;						\
582	srl	%o0, 2, %o0;						\
583	inc	%o0;							\
584	set	DISP22, %o1;						\
585	and	%o1, %o0, %o0;						\
586	set	BA, %o1;						\
587	or	%o1, %o0, %o0;						\
588	sethi	%hi(annul), %o2;					\
589	add	%o0, %o2, %o2;						\
590	set	addr, %o0;						\
591	set	normal_instr, %o1;					\
592	ld	[%i0 + (event * DTRACE_IDSIZE)], %o3;			\
593	tst	%o3;							\
594	movnz	%icc, %o2, %o1;						\
595	call	hot_patch_kernel_text;					\
596	mov	4, %o2;							\
597	membar	#Sync
598
599#define	HOT_PATCH(addr, event, normal_instr)	\
600	HOT_PATCH_COMMON(addr, event, normal_instr, 0, %i1)
601
602#define	HOT_PATCH_ARG(addr, event, normal_instr, arg)	\
603	HOT_PATCH_COMMON(addr, event, normal_instr, 0, arg)
604
605#define HOT_PATCH_ANNULLED(addr, event, normal_instr)	\
606	HOT_PATCH_COMMON(addr, event, normal_instr, ANNUL, %i1)
607
608	ENTRY(lockstat_hot_patch)
609	save	%sp, -SA(MINFRAME), %sp
610	set	lockstat_probemap, %i0
611	HOT_PATCH(.mutex_enter_lockstat_patch_point,
612		LS_MUTEX_ENTER_ACQUIRE, RETL)
613	HOT_PATCH_ANNULLED(.mutex_tryenter_lockstat_patch_point,
614		LS_MUTEX_TRYENTER_ACQUIRE, RETL)
615	HOT_PATCH(.mutex_exit_lockstat_patch_point,
616		LS_MUTEX_EXIT_RELEASE, RETL)
617	HOT_PATCH(.rw_write_enter_lockstat_patch_point,
618		LS_RW_ENTER_ACQUIRE, RETL)
619	HOT_PATCH(.rw_read_enter_lockstat_patch_point,
620		LS_RW_ENTER_ACQUIRE, RETL)
621	HOT_PATCH_ARG(.rw_write_exit_lockstat_patch_point,
622		LS_RW_EXIT_RELEASE, RETL, RW_WRITER)
623	HOT_PATCH_ARG(.rw_read_exit_lockstat_patch_point,
624		LS_RW_EXIT_RELEASE, RETL, RW_READER)
625	HOT_PATCH(.lock_set_lockstat_patch_point,
626		LS_LOCK_SET_ACQUIRE, RETL)
627	HOT_PATCH_ANNULLED(.lock_try_lockstat_patch_point,
628		LS_LOCK_TRY_ACQUIRE, RETL)
629	HOT_PATCH(.lock_clear_lockstat_patch_point,
630		LS_LOCK_CLEAR_RELEASE, RETL)
631	HOT_PATCH(.lock_set_spl_lockstat_patch_point,
632		LS_LOCK_SET_SPL_ACQUIRE, RETL)
633	HOT_PATCH(.lock_clear_splx_lockstat_patch_point,
634		LS_LOCK_CLEAR_SPLX_RELEASE, RETL)
635	ret
636	restore
637	SET_SIZE(lockstat_hot_patch)
638
639#endif	/* lint */
640
641/*
642 * asm_mutex_spin_enter(mutex_t *)
643 *
644 * For use by assembly interrupt handler only.
645 * Does not change spl, since the interrupt handler is assumed to be
646 * running at high level already.
647 * Traps may be off, so cannot panic.
648 * Does not keep statistics on the lock.
649 *
650 * Entry:	%l6 - points to mutex
651 * 		%l7 - address of call (returns to %l7+8)
652 * Uses:	%l6, %l5
653 */
654#ifndef lint
655	.align 16
656	ENTRY_NP(asm_mutex_spin_enter)
657	ldstub	[%l6 + M_SPINLOCK], %l5	! try to set lock, get value in %l5
6581:
659	tst	%l5
660	bnz	3f			! lock already held - go spin
661	nop
6622:
663	jmp	%l7 + 8			! return
664	membar	#LoadLoad
665	!
666	! Spin on lock without using an atomic operation to prevent the caches
667	! from unnecessarily moving ownership of the line around.
668	!
6693:
670	ldub	[%l6 + M_SPINLOCK], %l5
6714:
672	tst	%l5
673	bz,a	1b			! lock appears to be free, try again
674	ldstub	[%l6 + M_SPINLOCK], %l5	! delay slot - try to set lock
675
676	sethi	%hi(panicstr) , %l5
677	ldn	[%l5 + %lo(panicstr)], %l5
678	tst 	%l5
679	bnz	2b			! after panic, feign success
680	nop
681	b	4b
682	ldub	[%l6 + M_SPINLOCK], %l5	! delay - reload lock
683	SET_SIZE(asm_mutex_spin_enter)
684#endif /* lint */
685
686/*
687 * asm_mutex_spin_exit(mutex_t *)
688 *
689 * For use by assembly interrupt handler only.
690 * Does not change spl, since the interrupt handler is assumed to be
691 * running at high level already.
692 *
693 * Entry:	%l6 - points to mutex
694 * 		%l7 - address of call (returns to %l7+8)
695 * Uses:	none
696 */
697#ifndef lint
698	ENTRY_NP(asm_mutex_spin_exit)
699	membar	#LoadStore|#StoreStore
700	jmp	%l7 + 8			! return
701	clrb	[%l6 + M_SPINLOCK]	! delay - clear lock
702	SET_SIZE(asm_mutex_spin_exit)
703#endif /* lint */
704
705/*
706 * thread_onproc()
707 * Set thread in onproc state for the specified CPU.
708 * Also set the thread lock pointer to the CPU's onproc lock.
709 * Since the new lock isn't held, the store ordering is important.
710 * If not done in assembler, the compiler could reorder the stores.
711 */
712#if defined(lint)
713
714void
715thread_onproc(kthread_id_t t, cpu_t *cp)
716{
717	t->t_state = TS_ONPROC;
718	t->t_lockp = &cp->cpu_thread_lock;
719}
720
721#else	/* lint */
722
723	ENTRY(thread_onproc)
724	set	TS_ONPROC, %o2		! TS_ONPROC state
725	st	%o2, [%o0 + T_STATE]	! store state
726	add	%o1, CPU_THREAD_LOCK, %o3 ! pointer to disp_lock while running
727	retl				! return
728	stn	%o3, [%o0 + T_LOCKP]	! delay - store new lock pointer
729	SET_SIZE(thread_onproc)
730
731#endif	/* lint */
732