xref: /titanic_44/usr/src/uts/sparc/v9/ml/lock_prim.s (revision 374ae87f60894937d3c6e53ec4a739188e702ea5)
17c478bd9Sstevel@tonic-gate/*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5575a7426Spt157919 * Common Development and Distribution License (the "License").
6575a7426Spt157919 * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217c478bd9Sstevel@tonic-gate/*
22575a7426Spt157919 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate */
257c478bd9Sstevel@tonic-gate
267c478bd9Sstevel@tonic-gate#pragma ident	"%Z%%M%	%I%	%E% SMI"
277c478bd9Sstevel@tonic-gate
287c478bd9Sstevel@tonic-gate#if defined(lint)
297c478bd9Sstevel@tonic-gate#include <sys/types.h>
307c478bd9Sstevel@tonic-gate#include <sys/thread.h>
317c478bd9Sstevel@tonic-gate#include <sys/cpuvar.h>
327c478bd9Sstevel@tonic-gate#else	/* lint */
337c478bd9Sstevel@tonic-gate#include "assym.h"
347c478bd9Sstevel@tonic-gate#endif	/* lint */
357c478bd9Sstevel@tonic-gate
367c478bd9Sstevel@tonic-gate#include <sys/t_lock.h>
377c478bd9Sstevel@tonic-gate#include <sys/mutex.h>
387c478bd9Sstevel@tonic-gate#include <sys/mutex_impl.h>
397c478bd9Sstevel@tonic-gate#include <sys/rwlock_impl.h>
407c478bd9Sstevel@tonic-gate#include <sys/asm_linkage.h>
417c478bd9Sstevel@tonic-gate#include <sys/machlock.h>
427c478bd9Sstevel@tonic-gate#include <sys/machthread.h>
437c478bd9Sstevel@tonic-gate#include <sys/lockstat.h>
447c478bd9Sstevel@tonic-gate
457c478bd9Sstevel@tonic-gate/* #define DEBUG */
467c478bd9Sstevel@tonic-gate
477c478bd9Sstevel@tonic-gate#ifdef DEBUG
487c478bd9Sstevel@tonic-gate#include <sys/machparam.h>
497c478bd9Sstevel@tonic-gate#endif /* DEBUG */
507c478bd9Sstevel@tonic-gate
517c478bd9Sstevel@tonic-gate/************************************************************************
527c478bd9Sstevel@tonic-gate *		ATOMIC OPERATIONS
537c478bd9Sstevel@tonic-gate */
547c478bd9Sstevel@tonic-gate
557c478bd9Sstevel@tonic-gate/*
567c478bd9Sstevel@tonic-gate * uint8_t	ldstub(uint8_t *cp)
577c478bd9Sstevel@tonic-gate *
587c478bd9Sstevel@tonic-gate * Store 0xFF at the specified location, and return its previous content.
597c478bd9Sstevel@tonic-gate */
607c478bd9Sstevel@tonic-gate
617c478bd9Sstevel@tonic-gate#if defined(lint)
627c478bd9Sstevel@tonic-gateuint8_t
637c478bd9Sstevel@tonic-gateldstub(uint8_t *cp)
647c478bd9Sstevel@tonic-gate{
657c478bd9Sstevel@tonic-gate	uint8_t	rv;
667c478bd9Sstevel@tonic-gate	rv = *cp;
677c478bd9Sstevel@tonic-gate	*cp = 0xFF;
687c478bd9Sstevel@tonic-gate	return rv;
697c478bd9Sstevel@tonic-gate}
707c478bd9Sstevel@tonic-gate#else	/* lint */
717c478bd9Sstevel@tonic-gate
727c478bd9Sstevel@tonic-gate	ENTRY(ldstub)
737c478bd9Sstevel@tonic-gate	retl
747c478bd9Sstevel@tonic-gate	ldstub	[%o0], %o0
757c478bd9Sstevel@tonic-gate	SET_SIZE(ldstub)
767c478bd9Sstevel@tonic-gate
777c478bd9Sstevel@tonic-gate#endif	/* lint */
787c478bd9Sstevel@tonic-gate
797c478bd9Sstevel@tonic-gate/************************************************************************
807c478bd9Sstevel@tonic-gate *		MEMORY BARRIERS -- see atomic.h for full descriptions.
817c478bd9Sstevel@tonic-gate */
827c478bd9Sstevel@tonic-gate
837c478bd9Sstevel@tonic-gate#if defined(lint)
847c478bd9Sstevel@tonic-gate
857c478bd9Sstevel@tonic-gatevoid
867c478bd9Sstevel@tonic-gatemembar_enter(void)
877c478bd9Sstevel@tonic-gate{}
887c478bd9Sstevel@tonic-gate
897c478bd9Sstevel@tonic-gatevoid
907c478bd9Sstevel@tonic-gatemembar_exit(void)
917c478bd9Sstevel@tonic-gate{}
927c478bd9Sstevel@tonic-gate
937c478bd9Sstevel@tonic-gatevoid
947c478bd9Sstevel@tonic-gatemembar_producer(void)
957c478bd9Sstevel@tonic-gate{}
967c478bd9Sstevel@tonic-gate
977c478bd9Sstevel@tonic-gatevoid
987c478bd9Sstevel@tonic-gatemembar_consumer(void)
997c478bd9Sstevel@tonic-gate{}
1007c478bd9Sstevel@tonic-gate
1017c478bd9Sstevel@tonic-gate#else	/* lint */
1027c478bd9Sstevel@tonic-gate
1037c478bd9Sstevel@tonic-gate#ifdef SF_ERRATA_51
1047c478bd9Sstevel@tonic-gate	.align 32
1057c478bd9Sstevel@tonic-gate	ENTRY(membar_return)
1067c478bd9Sstevel@tonic-gate	retl
1077c478bd9Sstevel@tonic-gate	nop
1087c478bd9Sstevel@tonic-gate	SET_SIZE(membar_return)
1097c478bd9Sstevel@tonic-gate#define	MEMBAR_RETURN	ba,pt %icc, membar_return
1107c478bd9Sstevel@tonic-gate#else
1117c478bd9Sstevel@tonic-gate#define	MEMBAR_RETURN	retl
1127c478bd9Sstevel@tonic-gate#endif
1137c478bd9Sstevel@tonic-gate
1147c478bd9Sstevel@tonic-gate	ENTRY(membar_enter)
1157c478bd9Sstevel@tonic-gate	MEMBAR_RETURN
1167c478bd9Sstevel@tonic-gate	membar	#StoreLoad|#StoreStore
1177c478bd9Sstevel@tonic-gate	SET_SIZE(membar_enter)
1187c478bd9Sstevel@tonic-gate
1197c478bd9Sstevel@tonic-gate	ENTRY(membar_exit)
1207c478bd9Sstevel@tonic-gate	MEMBAR_RETURN
1217c478bd9Sstevel@tonic-gate	membar	#LoadStore|#StoreStore
1227c478bd9Sstevel@tonic-gate	SET_SIZE(membar_exit)
1237c478bd9Sstevel@tonic-gate
1247c478bd9Sstevel@tonic-gate	ENTRY(membar_producer)
1257c478bd9Sstevel@tonic-gate	MEMBAR_RETURN
1267c478bd9Sstevel@tonic-gate	membar	#StoreStore
1277c478bd9Sstevel@tonic-gate	SET_SIZE(membar_producer)
1287c478bd9Sstevel@tonic-gate
1297c478bd9Sstevel@tonic-gate	ENTRY(membar_consumer)
1307c478bd9Sstevel@tonic-gate	MEMBAR_RETURN
1317c478bd9Sstevel@tonic-gate	membar	#LoadLoad
1327c478bd9Sstevel@tonic-gate	SET_SIZE(membar_consumer)
1337c478bd9Sstevel@tonic-gate
1347c478bd9Sstevel@tonic-gate#endif	/* lint */
1357c478bd9Sstevel@tonic-gate
1367c478bd9Sstevel@tonic-gate/************************************************************************
1377c478bd9Sstevel@tonic-gate *		MINIMUM LOCKS
1387c478bd9Sstevel@tonic-gate */
1397c478bd9Sstevel@tonic-gate
1407c478bd9Sstevel@tonic-gate#if defined(lint)
1417c478bd9Sstevel@tonic-gate
1427c478bd9Sstevel@tonic-gate/*
1437c478bd9Sstevel@tonic-gate * lock_try(lp), ulock_try(lp)
1447c478bd9Sstevel@tonic-gate *	- returns non-zero on success.
1457c478bd9Sstevel@tonic-gate *	- doesn't block interrupts so don't use this to spin on a lock.
1467c478bd9Sstevel@tonic-gate *	- uses "0xFF is busy, anything else is free" model.
1477c478bd9Sstevel@tonic-gate *
1487c478bd9Sstevel@tonic-gate *      ulock_try() is for a lock in the user address space.
1497c478bd9Sstevel@tonic-gate *      For all V7/V8 sparc systems they are same since the kernel and
1507c478bd9Sstevel@tonic-gate *      user are mapped in a user' context.
1517c478bd9Sstevel@tonic-gate *      For V9 platforms the lock_try and ulock_try are different impl.
1527c478bd9Sstevel@tonic-gate */
1537c478bd9Sstevel@tonic-gate
1547c478bd9Sstevel@tonic-gateint
1557c478bd9Sstevel@tonic-gatelock_try(lock_t *lp)
1567c478bd9Sstevel@tonic-gate{
1577c478bd9Sstevel@tonic-gate	return (0xFF ^ ldstub(lp));
1587c478bd9Sstevel@tonic-gate}
1597c478bd9Sstevel@tonic-gate
1607c478bd9Sstevel@tonic-gateint
1617c478bd9Sstevel@tonic-gatelock_spin_try(lock_t *lp)
1627c478bd9Sstevel@tonic-gate{
1637c478bd9Sstevel@tonic-gate	return (0xFF ^ ldstub(lp));
1647c478bd9Sstevel@tonic-gate}
1657c478bd9Sstevel@tonic-gate
1667c478bd9Sstevel@tonic-gatevoid
1677c478bd9Sstevel@tonic-gatelock_set(lock_t *lp)
1687c478bd9Sstevel@tonic-gate{
1697c478bd9Sstevel@tonic-gate	extern void lock_set_spin(lock_t *);
1707c478bd9Sstevel@tonic-gate
1717c478bd9Sstevel@tonic-gate	if (!lock_try(lp))
1727c478bd9Sstevel@tonic-gate		lock_set_spin(lp);
1737c478bd9Sstevel@tonic-gate	membar_enter();
1747c478bd9Sstevel@tonic-gate}
1757c478bd9Sstevel@tonic-gate
1767c478bd9Sstevel@tonic-gatevoid
1777c478bd9Sstevel@tonic-gatelock_clear(lock_t *lp)
1787c478bd9Sstevel@tonic-gate{
1797c478bd9Sstevel@tonic-gate	membar_exit();
1807c478bd9Sstevel@tonic-gate	*lp = 0;
1817c478bd9Sstevel@tonic-gate}
1827c478bd9Sstevel@tonic-gate
1837c478bd9Sstevel@tonic-gateint
1847c478bd9Sstevel@tonic-gateulock_try(lock_t *lp)
1857c478bd9Sstevel@tonic-gate{
1867c478bd9Sstevel@tonic-gate	return (0xFF ^ ldstub(lp));
1877c478bd9Sstevel@tonic-gate}
1887c478bd9Sstevel@tonic-gate
1897c478bd9Sstevel@tonic-gatevoid
1907c478bd9Sstevel@tonic-gateulock_clear(lock_t *lp)
1917c478bd9Sstevel@tonic-gate{
1927c478bd9Sstevel@tonic-gate	membar_exit();
1937c478bd9Sstevel@tonic-gate	*lp = 0;
1947c478bd9Sstevel@tonic-gate}
1957c478bd9Sstevel@tonic-gate
1967c478bd9Sstevel@tonic-gate#else	/* lint */
1977c478bd9Sstevel@tonic-gate
1987c478bd9Sstevel@tonic-gate	.align	32
1997c478bd9Sstevel@tonic-gate	ENTRY(lock_try)
2007c478bd9Sstevel@tonic-gate	ldstub	[%o0], %o1		! try to set lock, get value in %o1
2017c478bd9Sstevel@tonic-gate	brnz,pn	%o1, 1f
2027c478bd9Sstevel@tonic-gate	membar	#LoadLoad
2037c478bd9Sstevel@tonic-gate.lock_try_lockstat_patch_point:
2047c478bd9Sstevel@tonic-gate	retl
2057c478bd9Sstevel@tonic-gate	or	%o0, 1, %o0		! ensure lo32 != 0
2067c478bd9Sstevel@tonic-gate1:
2077c478bd9Sstevel@tonic-gate	retl
2087c478bd9Sstevel@tonic-gate	clr	%o0
2097c478bd9Sstevel@tonic-gate	SET_SIZE(lock_try)
2107c478bd9Sstevel@tonic-gate
2117c478bd9Sstevel@tonic-gate	.align	32
2127c478bd9Sstevel@tonic-gate	ENTRY(lock_spin_try)
2137c478bd9Sstevel@tonic-gate	ldstub	[%o0], %o1		! try to set lock, get value in %o1
2147c478bd9Sstevel@tonic-gate	brnz,pn	%o1, 1f
2157c478bd9Sstevel@tonic-gate	membar	#LoadLoad
2167c478bd9Sstevel@tonic-gate	retl
2177c478bd9Sstevel@tonic-gate	or	%o0, 1, %o0		! ensure lo32 != 0
2187c478bd9Sstevel@tonic-gate1:
2197c478bd9Sstevel@tonic-gate	retl
2207c478bd9Sstevel@tonic-gate	clr	%o0
2217c478bd9Sstevel@tonic-gate	SET_SIZE(lock_spin_try)
2227c478bd9Sstevel@tonic-gate
2237c478bd9Sstevel@tonic-gate	.align	32
2247c478bd9Sstevel@tonic-gate	ENTRY(lock_set)
2257c478bd9Sstevel@tonic-gate	ldstub	[%o0], %o1
2267c478bd9Sstevel@tonic-gate	brnz,pn	%o1, 1f			! go to C for the hard case
2277c478bd9Sstevel@tonic-gate	membar	#LoadLoad
2287c478bd9Sstevel@tonic-gate.lock_set_lockstat_patch_point:
2297c478bd9Sstevel@tonic-gate	retl
2307c478bd9Sstevel@tonic-gate	nop
2317c478bd9Sstevel@tonic-gate1:
2327c478bd9Sstevel@tonic-gate	sethi	%hi(lock_set_spin), %o2	! load up for jump to C
2337c478bd9Sstevel@tonic-gate	jmp	%o2 + %lo(lock_set_spin)
2347c478bd9Sstevel@tonic-gate	nop				! delay: do nothing
2357c478bd9Sstevel@tonic-gate	SET_SIZE(lock_set)
2367c478bd9Sstevel@tonic-gate
2377c478bd9Sstevel@tonic-gate	ENTRY(lock_clear)
2387c478bd9Sstevel@tonic-gate	membar	#LoadStore|#StoreStore
2397c478bd9Sstevel@tonic-gate.lock_clear_lockstat_patch_point:
2407c478bd9Sstevel@tonic-gate	retl
2417c478bd9Sstevel@tonic-gate	clrb	[%o0]
2427c478bd9Sstevel@tonic-gate	SET_SIZE(lock_clear)
2437c478bd9Sstevel@tonic-gate
2447c478bd9Sstevel@tonic-gate	.align	32
2457c478bd9Sstevel@tonic-gate	ENTRY(ulock_try)
2467c478bd9Sstevel@tonic-gate	ldstuba	[%o0]ASI_USER, %o1	! try to set lock, get value in %o1
2477c478bd9Sstevel@tonic-gate	xor	%o1, 0xff, %o0		! delay - return non-zero if success
2487c478bd9Sstevel@tonic-gate	retl
2497c478bd9Sstevel@tonic-gate	  membar	#LoadLoad
2507c478bd9Sstevel@tonic-gate	SET_SIZE(ulock_try)
2517c478bd9Sstevel@tonic-gate
2527c478bd9Sstevel@tonic-gate	ENTRY(ulock_clear)
2537c478bd9Sstevel@tonic-gate	membar  #LoadStore|#StoreStore
2547c478bd9Sstevel@tonic-gate	retl
2557c478bd9Sstevel@tonic-gate	  stba	%g0, [%o0]ASI_USER	! clear lock
2567c478bd9Sstevel@tonic-gate	SET_SIZE(ulock_clear)
2577c478bd9Sstevel@tonic-gate
2587c478bd9Sstevel@tonic-gate#endif	/* lint */
2597c478bd9Sstevel@tonic-gate
2607c478bd9Sstevel@tonic-gate
2617c478bd9Sstevel@tonic-gate/*
2627c478bd9Sstevel@tonic-gate * lock_set_spl(lp, new_pil, *old_pil_addr)
2637c478bd9Sstevel@tonic-gate * 	Sets pil to new_pil, grabs lp, stores old pil in *old_pil_addr.
2647c478bd9Sstevel@tonic-gate */
2657c478bd9Sstevel@tonic-gate
2667c478bd9Sstevel@tonic-gate#if defined(lint)
2677c478bd9Sstevel@tonic-gate
2687c478bd9Sstevel@tonic-gate/* ARGSUSED */
2697c478bd9Sstevel@tonic-gatevoid
2707c478bd9Sstevel@tonic-gatelock_set_spl(lock_t *lp, int new_pil, u_short *old_pil_addr)
2717c478bd9Sstevel@tonic-gate{
2727c478bd9Sstevel@tonic-gate	extern int splr(int);
2737c478bd9Sstevel@tonic-gate	extern void lock_set_spl_spin(lock_t *, int, u_short *, int);
2747c478bd9Sstevel@tonic-gate	int old_pil;
2757c478bd9Sstevel@tonic-gate
2767c478bd9Sstevel@tonic-gate	old_pil = splr(new_pil);
2777c478bd9Sstevel@tonic-gate	if (!lock_try(lp)) {
2787c478bd9Sstevel@tonic-gate		lock_set_spl_spin(lp, new_pil, old_pil_addr, old_pil);
2797c478bd9Sstevel@tonic-gate	} else {
2807c478bd9Sstevel@tonic-gate		*old_pil_addr = (u_short)old_pil;
2817c478bd9Sstevel@tonic-gate		membar_enter();
2827c478bd9Sstevel@tonic-gate	}
2837c478bd9Sstevel@tonic-gate}
2847c478bd9Sstevel@tonic-gate
2857c478bd9Sstevel@tonic-gate#else	/* lint */
2867c478bd9Sstevel@tonic-gate
2877c478bd9Sstevel@tonic-gate	ENTRY(lock_set_spl)
2887c478bd9Sstevel@tonic-gate	rdpr	%pil, %o3			! %o3 = current pil
2897c478bd9Sstevel@tonic-gate	cmp	%o3, %o1			! is current pil high enough?
2907c478bd9Sstevel@tonic-gate	bl,a,pt %icc, 1f			! if not, write %pil in delay
2917c478bd9Sstevel@tonic-gate	wrpr	%g0, %o1, %pil
2927c478bd9Sstevel@tonic-gate1:
2937c478bd9Sstevel@tonic-gate	ldstub	[%o0], %o4			! try the lock
2947c478bd9Sstevel@tonic-gate	brnz,pn	%o4, 2f				! go to C for the miss case
2957c478bd9Sstevel@tonic-gate	membar	#LoadLoad
2967c478bd9Sstevel@tonic-gate.lock_set_spl_lockstat_patch_point:
2977c478bd9Sstevel@tonic-gate	retl
2987c478bd9Sstevel@tonic-gate	sth	%o3, [%o2]			! delay - save original pil
2997c478bd9Sstevel@tonic-gate2:
3007c478bd9Sstevel@tonic-gate	sethi	%hi(lock_set_spl_spin), %o5	! load up jmp to C
3017c478bd9Sstevel@tonic-gate	jmp	%o5 + %lo(lock_set_spl_spin)	! jmp to lock_set_spl_spin
3027c478bd9Sstevel@tonic-gate	nop					! delay: do nothing
3037c478bd9Sstevel@tonic-gate	SET_SIZE(lock_set_spl)
3047c478bd9Sstevel@tonic-gate
3057c478bd9Sstevel@tonic-gate#endif	/* lint */
3067c478bd9Sstevel@tonic-gate
3077c478bd9Sstevel@tonic-gate/*
3087c478bd9Sstevel@tonic-gate * lock_clear_splx(lp, s)
3097c478bd9Sstevel@tonic-gate */
3107c478bd9Sstevel@tonic-gate
3117c478bd9Sstevel@tonic-gate#if defined(lint)
3127c478bd9Sstevel@tonic-gate
3137c478bd9Sstevel@tonic-gatevoid
3147c478bd9Sstevel@tonic-gatelock_clear_splx(lock_t *lp, int s)
3157c478bd9Sstevel@tonic-gate{
3167c478bd9Sstevel@tonic-gate	extern void splx(int);
3177c478bd9Sstevel@tonic-gate
3187c478bd9Sstevel@tonic-gate	lock_clear(lp);
3197c478bd9Sstevel@tonic-gate	splx(s);
3207c478bd9Sstevel@tonic-gate}
3217c478bd9Sstevel@tonic-gate
3227c478bd9Sstevel@tonic-gate#else	/* lint */
3237c478bd9Sstevel@tonic-gate
3247c478bd9Sstevel@tonic-gate	ENTRY(lock_clear_splx)
3257c478bd9Sstevel@tonic-gate	ldn	[THREAD_REG + T_CPU], %o2	! get CPU pointer
3267c478bd9Sstevel@tonic-gate	membar	#LoadStore|#StoreStore
3277c478bd9Sstevel@tonic-gate	ld	[%o2 + CPU_BASE_SPL], %o2
3287c478bd9Sstevel@tonic-gate	clrb	[%o0]				! clear lock
3297c478bd9Sstevel@tonic-gate	cmp	%o2, %o1			! compare new to base
3307c478bd9Sstevel@tonic-gate	movl	%xcc, %o1, %o2			! use new pri if base is less
3317c478bd9Sstevel@tonic-gate.lock_clear_splx_lockstat_patch_point:
3327c478bd9Sstevel@tonic-gate	retl
3337c478bd9Sstevel@tonic-gate	wrpr	%g0, %o2, %pil
3347c478bd9Sstevel@tonic-gate	SET_SIZE(lock_clear_splx)
3357c478bd9Sstevel@tonic-gate
3367c478bd9Sstevel@tonic-gate#endif	/* lint */
3377c478bd9Sstevel@tonic-gate
3387c478bd9Sstevel@tonic-gate/*
3397c478bd9Sstevel@tonic-gate * mutex_enter() and mutex_exit().
3407c478bd9Sstevel@tonic-gate *
3417c478bd9Sstevel@tonic-gate * These routines handle the simple cases of mutex_enter() (adaptive
3427c478bd9Sstevel@tonic-gate * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
3437c478bd9Sstevel@tonic-gate * If anything complicated is going on we punt to mutex_vector_enter().
3447c478bd9Sstevel@tonic-gate *
3457c478bd9Sstevel@tonic-gate * mutex_tryenter() is similar to mutex_enter() but returns zero if
3467c478bd9Sstevel@tonic-gate * the lock cannot be acquired, nonzero on success.
3477c478bd9Sstevel@tonic-gate *
3487c478bd9Sstevel@tonic-gate * If mutex_exit() gets preempted in the window between checking waiters
3497c478bd9Sstevel@tonic-gate * and clearing the lock, we can miss wakeups.  Disabling preemption
3507c478bd9Sstevel@tonic-gate * in the mutex code is prohibitively expensive, so instead we detect
3517c478bd9Sstevel@tonic-gate * mutex preemption by examining the trapped PC in the interrupt path.
3527c478bd9Sstevel@tonic-gate * If we interrupt a thread in mutex_exit() that has not yet cleared
3537c478bd9Sstevel@tonic-gate * the lock, pil_interrupt() resets its PC back to the beginning of
3547c478bd9Sstevel@tonic-gate * mutex_exit() so it will check again for waiters when it resumes.
3557c478bd9Sstevel@tonic-gate *
3567c478bd9Sstevel@tonic-gate * The lockstat code below is activated when the lockstat driver
3577c478bd9Sstevel@tonic-gate * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
3587c478bd9Sstevel@tonic-gate * Note that we don't need to test lockstat_event_mask here -- we won't
3597c478bd9Sstevel@tonic-gate * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
3607c478bd9Sstevel@tonic-gate */
3617c478bd9Sstevel@tonic-gate
3627c478bd9Sstevel@tonic-gate#if defined (lint)
3637c478bd9Sstevel@tonic-gate
3647c478bd9Sstevel@tonic-gate/* ARGSUSED */
3657c478bd9Sstevel@tonic-gatevoid
3667c478bd9Sstevel@tonic-gatemutex_enter(kmutex_t *lp)
3677c478bd9Sstevel@tonic-gate{}
3687c478bd9Sstevel@tonic-gate
3697c478bd9Sstevel@tonic-gate/* ARGSUSED */
3707c478bd9Sstevel@tonic-gateint
3717c478bd9Sstevel@tonic-gatemutex_tryenter(kmutex_t *lp)
3727c478bd9Sstevel@tonic-gate{ return (0); }
3737c478bd9Sstevel@tonic-gate
3747c478bd9Sstevel@tonic-gate/* ARGSUSED */
3757c478bd9Sstevel@tonic-gatevoid
3767c478bd9Sstevel@tonic-gatemutex_exit(kmutex_t *lp)
3777c478bd9Sstevel@tonic-gate{}
3787c478bd9Sstevel@tonic-gate
379575a7426Spt157919/* ARGSUSED */
380575a7426Spt157919void *
381575a7426Spt157919mutex_owner_running(mutex_impl_t *lp)
382575a7426Spt157919{ return (NULL); }
383575a7426Spt157919
3847c478bd9Sstevel@tonic-gate#else
3857c478bd9Sstevel@tonic-gate	.align	32
3867c478bd9Sstevel@tonic-gate	ENTRY(mutex_enter)
3877c478bd9Sstevel@tonic-gate	mov	THREAD_REG, %o1
3887c478bd9Sstevel@tonic-gate	casx	[%o0], %g0, %o1			! try to acquire as adaptive
3897c478bd9Sstevel@tonic-gate	brnz,pn	%o1, 1f				! locked or wrong type
3907c478bd9Sstevel@tonic-gate	membar	#LoadLoad
3917c478bd9Sstevel@tonic-gate.mutex_enter_lockstat_patch_point:
3927c478bd9Sstevel@tonic-gate	retl
3937c478bd9Sstevel@tonic-gate	nop
3947c478bd9Sstevel@tonic-gate1:
3957c478bd9Sstevel@tonic-gate	sethi	%hi(mutex_vector_enter), %o2	! load up for jump to C
3967c478bd9Sstevel@tonic-gate	jmp	%o2 + %lo(mutex_vector_enter)
3977c478bd9Sstevel@tonic-gate	nop
3987c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_enter)
3997c478bd9Sstevel@tonic-gate
4007c478bd9Sstevel@tonic-gate	ENTRY(mutex_tryenter)
4017c478bd9Sstevel@tonic-gate	mov	THREAD_REG, %o1
4027c478bd9Sstevel@tonic-gate	casx	[%o0], %g0, %o1			! try to acquire as adaptive
4037c478bd9Sstevel@tonic-gate	brnz,pn	%o1, 1f				! locked or wrong type continue
4047c478bd9Sstevel@tonic-gate	membar	#LoadLoad
4057c478bd9Sstevel@tonic-gate.mutex_tryenter_lockstat_patch_point:
4067c478bd9Sstevel@tonic-gate	retl
4077c478bd9Sstevel@tonic-gate	or	%o0, 1, %o0			! ensure lo32 != 0
4087c478bd9Sstevel@tonic-gate1:
4097c478bd9Sstevel@tonic-gate	sethi	%hi(mutex_vector_tryenter), %o2		! hi bits
4107c478bd9Sstevel@tonic-gate	jmp	%o2 + %lo(mutex_vector_tryenter)	! go to C
4117c478bd9Sstevel@tonic-gate	nop
4127c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_tryenter)
4137c478bd9Sstevel@tonic-gate
4147c478bd9Sstevel@tonic-gate	ENTRY(mutex_adaptive_tryenter)
4157c478bd9Sstevel@tonic-gate	mov	THREAD_REG, %o1
4167c478bd9Sstevel@tonic-gate	casx	[%o0], %g0, %o1			! try to acquire as adaptive
4177c478bd9Sstevel@tonic-gate	brnz,pn	%o1, 0f				! locked or wrong type
4187c478bd9Sstevel@tonic-gate	membar	#LoadLoad
4197c478bd9Sstevel@tonic-gate	retl
4207c478bd9Sstevel@tonic-gate	or	%o0, 1, %o0			! ensure lo32 != 0
4217c478bd9Sstevel@tonic-gate0:
4227c478bd9Sstevel@tonic-gate	retl
4237c478bd9Sstevel@tonic-gate	mov	%g0, %o0
4247c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_adaptive_tryenter)
4257c478bd9Sstevel@tonic-gate
426575a7426Spt157919	! these need to be together and cache aligned for performance.
427575a7426Spt157919	.align 64
4287c478bd9Sstevel@tonic-gate	.global	mutex_exit_critical_size
4297c478bd9Sstevel@tonic-gate	.global	mutex_exit_critical_start
430575a7426Spt157919	.global mutex_owner_running_critical_size
431575a7426Spt157919	.global mutex_owner_running_critical_start
4327c478bd9Sstevel@tonic-gate
4337c478bd9Sstevel@tonic-gatemutex_exit_critical_size = .mutex_exit_critical_end - mutex_exit_critical_start
4347c478bd9Sstevel@tonic-gate
4357c478bd9Sstevel@tonic-gate	.align	32
4367c478bd9Sstevel@tonic-gate
4377c478bd9Sstevel@tonic-gate	ENTRY(mutex_exit)
4387c478bd9Sstevel@tonic-gatemutex_exit_critical_start:		! If we are interrupted, restart here
4397c478bd9Sstevel@tonic-gate	ldn	[%o0], %o1		! get the owner field
4407c478bd9Sstevel@tonic-gate	membar	#LoadStore|#StoreStore
4417c478bd9Sstevel@tonic-gate	cmp	THREAD_REG, %o1		! do we own lock with no waiters?
4427c478bd9Sstevel@tonic-gate	be,a,pt	%ncc, 1f		! if so, drive on ...
4437c478bd9Sstevel@tonic-gate	stn	%g0, [%o0]		! delay: clear lock if we owned it
4447c478bd9Sstevel@tonic-gate.mutex_exit_critical_end:		! for pil_interrupt() hook
4457c478bd9Sstevel@tonic-gate	ba,a,pt	%xcc, mutex_vector_exit	! go to C for the hard cases
4467c478bd9Sstevel@tonic-gate1:
4477c478bd9Sstevel@tonic-gate.mutex_exit_lockstat_patch_point:
4487c478bd9Sstevel@tonic-gate	retl
4497c478bd9Sstevel@tonic-gate	nop
4507c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_exit)
4517c478bd9Sstevel@tonic-gate
452575a7426Spt157919mutex_owner_running_critical_size = .mutex_owner_running_critical_end - mutex_owner_running_critical_start
453575a7426Spt157919
454575a7426Spt157919	.align  32
455575a7426Spt157919
456575a7426Spt157919	ENTRY(mutex_owner_running)
457575a7426Spt157919mutex_owner_running_critical_start:	! If interrupted restart here
458575a7426Spt157919	ldn	[%o0], %o1		! get the owner field
459575a7426Spt157919	and	%o1, MUTEX_THREAD, %o1	! remove the waiters bit if any
460575a7426Spt157919	brz,pn	%o1, 1f			! if so, drive on ...
461575a7426Spt157919	nop
462575a7426Spt157919	ldn	[%o1+T_CPU], %o2	! get owner->t_cpu
463575a7426Spt157919	ldn	[%o2+CPU_THREAD], %o3	! get owner->t_cpu->cpu_thread
464575a7426Spt157919.mutex_owner_running_critical_end:	! for pil_interrupt() hook
465575a7426Spt157919	cmp	%o1, %o3		! owner == running thread?
466575a7426Spt157919	be,a,pt	%xcc, 2f		! yes, go return cpu
467575a7426Spt157919	nop
468575a7426Spt1579191:
469575a7426Spt157919	retl
470575a7426Spt157919	mov	%g0, %o0		! return 0 (owner not running)
471575a7426Spt1579192:
472575a7426Spt157919	retl
473575a7426Spt157919	mov	%o2, %o0		! owner running, return cpu
474575a7426Spt157919	SET_SIZE(mutex_owner_running)
475575a7426Spt157919
4767c478bd9Sstevel@tonic-gate#endif	/* lint */
4777c478bd9Sstevel@tonic-gate
4787c478bd9Sstevel@tonic-gate/*
4797c478bd9Sstevel@tonic-gate * rw_enter() and rw_exit().
4807c478bd9Sstevel@tonic-gate *
4817c478bd9Sstevel@tonic-gate * These routines handle the simple cases of rw_enter (write-locking an unheld
4827c478bd9Sstevel@tonic-gate * lock or read-locking a lock that's neither write-locked nor write-wanted)
4837c478bd9Sstevel@tonic-gate * and rw_exit (no waiters or not the last reader).  If anything complicated
4847c478bd9Sstevel@tonic-gate * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
4857c478bd9Sstevel@tonic-gate */
4867c478bd9Sstevel@tonic-gate#if defined(lint)
4877c478bd9Sstevel@tonic-gate
4887c478bd9Sstevel@tonic-gate/* ARGSUSED */
4897c478bd9Sstevel@tonic-gatevoid
4907c478bd9Sstevel@tonic-gaterw_enter(krwlock_t *lp, krw_t rw)
4917c478bd9Sstevel@tonic-gate{}
4927c478bd9Sstevel@tonic-gate
4937c478bd9Sstevel@tonic-gate/* ARGSUSED */
4947c478bd9Sstevel@tonic-gatevoid
4957c478bd9Sstevel@tonic-gaterw_exit(krwlock_t *lp)
4967c478bd9Sstevel@tonic-gate{}
4977c478bd9Sstevel@tonic-gate
4987c478bd9Sstevel@tonic-gate#else
4997c478bd9Sstevel@tonic-gate
5007c478bd9Sstevel@tonic-gate	.align	16
5017c478bd9Sstevel@tonic-gate	ENTRY(rw_enter)
5027c478bd9Sstevel@tonic-gate	cmp	%o1, RW_WRITER			! entering as writer?
5037c478bd9Sstevel@tonic-gate	be,a,pn	%icc, 2f			! if so, go do it ...
5047c478bd9Sstevel@tonic-gate	or	THREAD_REG, RW_WRITE_LOCKED, %o5 ! delay: %o5 = owner
5057c478bd9Sstevel@tonic-gate	ld	[THREAD_REG + T_KPRI_REQ], %o3	! begin THREAD_KPRI_REQUEST()
5067c478bd9Sstevel@tonic-gate	ldn	[%o0], %o4			! %o4 = old lock value
5077c478bd9Sstevel@tonic-gate	inc	%o3				! bump kpri
5087c478bd9Sstevel@tonic-gate	st	%o3, [THREAD_REG + T_KPRI_REQ]	! store new kpri
5097c478bd9Sstevel@tonic-gate1:
5107c478bd9Sstevel@tonic-gate	andcc	%o4, RW_WRITE_CLAIMED, %g0	! write-locked or write-wanted?
5117c478bd9Sstevel@tonic-gate	bz,pt	%xcc, 3f	 		! if so, prepare to block
5127c478bd9Sstevel@tonic-gate	add	%o4, RW_READ_LOCK, %o5		! delay: increment hold count
5137c478bd9Sstevel@tonic-gate	sethi	%hi(rw_enter_sleep), %o2	! load up jump
5147c478bd9Sstevel@tonic-gate	jmp	%o2 + %lo(rw_enter_sleep)	! jmp to rw_enter_sleep
5157c478bd9Sstevel@tonic-gate	nop					! delay: do nothing
5167c478bd9Sstevel@tonic-gate3:
5177c478bd9Sstevel@tonic-gate	casx	[%o0], %o4, %o5			! try to grab read lock
5187c478bd9Sstevel@tonic-gate	cmp	%o4, %o5			! did we get it?
519*374ae87fSsvemuri#ifdef sun4v
520*374ae87fSsvemuri	be,a,pt %xcc, 0f
521*374ae87fSsvemuri	membar  #LoadLoad
522*374ae87fSsvemuri	sethi	%hi(rw_enter_sleep), %o2	! load up jump
523*374ae87fSsvemuri	jmp	%o2 + %lo(rw_enter_sleep)	! jmp to rw_enter_sleep
524*374ae87fSsvemuri	nop					! delay: do nothing
525*374ae87fSsvemuri0:
526*374ae87fSsvemuri#else /* sun4v */
5277c478bd9Sstevel@tonic-gate	bne,pn	%xcc, 1b			! if not, try again
5287c478bd9Sstevel@tonic-gate	mov	%o5, %o4			! delay: %o4 = old lock value
5297c478bd9Sstevel@tonic-gate	membar	#LoadLoad
530*374ae87fSsvemuri#endif /* sun4v */
5317c478bd9Sstevel@tonic-gate.rw_read_enter_lockstat_patch_point:
5327c478bd9Sstevel@tonic-gate	retl
5337c478bd9Sstevel@tonic-gate	nop
5347c478bd9Sstevel@tonic-gate2:
5357c478bd9Sstevel@tonic-gate	casx	[%o0], %g0, %o5			! try to grab write lock
5367c478bd9Sstevel@tonic-gate	brz,pt %o5, 4f				! branch around if we got it
5377c478bd9Sstevel@tonic-gate	membar	#LoadLoad			! done regardless of where we go
5387c478bd9Sstevel@tonic-gate	sethi	%hi(rw_enter_sleep), %o2
5397c478bd9Sstevel@tonic-gate	jmp	%o2 + %lo(rw_enter_sleep)	! jump to rw_enter_sleep if not
5407c478bd9Sstevel@tonic-gate	nop					! delay: do nothing
5417c478bd9Sstevel@tonic-gate4:
5427c478bd9Sstevel@tonic-gate.rw_write_enter_lockstat_patch_point:
5437c478bd9Sstevel@tonic-gate	retl
5447c478bd9Sstevel@tonic-gate	nop
5457c478bd9Sstevel@tonic-gate	SET_SIZE(rw_enter)
5467c478bd9Sstevel@tonic-gate
5477c478bd9Sstevel@tonic-gate	.align	16
5487c478bd9Sstevel@tonic-gate	ENTRY(rw_exit)
5497c478bd9Sstevel@tonic-gate	ldn	[%o0], %o4			! %o4 = old lock value
5507c478bd9Sstevel@tonic-gate	membar	#LoadStore|#StoreStore		! membar_exit()
5517c478bd9Sstevel@tonic-gate	subcc	%o4, RW_READ_LOCK, %o5		! %o5 = new lock value if reader
5527c478bd9Sstevel@tonic-gate	bnz,pn	%xcc, 2f			! single reader, no waiters?
5537c478bd9Sstevel@tonic-gate	clr	%o1
5547c478bd9Sstevel@tonic-gate1:
5557c478bd9Sstevel@tonic-gate	ld	[THREAD_REG + T_KPRI_REQ], %g1	! begin THREAD_KPRI_RELEASE()
5567c478bd9Sstevel@tonic-gate	srl	%o4, RW_HOLD_COUNT_SHIFT, %o3	! %o3 = hold count (lockstat)
5577c478bd9Sstevel@tonic-gate	casx	[%o0], %o4, %o5			! try to drop lock
5587c478bd9Sstevel@tonic-gate	cmp	%o4, %o5			! did we succeed?
5597c478bd9Sstevel@tonic-gate	bne,pn	%xcc, rw_exit_wakeup		! if not, go to C
5607c478bd9Sstevel@tonic-gate	dec	%g1				! delay: drop kpri
5617c478bd9Sstevel@tonic-gate.rw_read_exit_lockstat_patch_point:
5627c478bd9Sstevel@tonic-gate	retl
5637c478bd9Sstevel@tonic-gate	st	%g1, [THREAD_REG + T_KPRI_REQ]	! delay: store new kpri
5647c478bd9Sstevel@tonic-gate2:
5657c478bd9Sstevel@tonic-gate	andcc	%o4, RW_WRITE_LOCKED, %g0	! are we a writer?
5667c478bd9Sstevel@tonic-gate	bnz,a,pt %xcc, 3f
5677c478bd9Sstevel@tonic-gate	or	THREAD_REG, RW_WRITE_LOCKED, %o4 ! delay: %o4 = owner
5687c478bd9Sstevel@tonic-gate	cmp	%o5, RW_READ_LOCK		! would lock still be held?
5697c478bd9Sstevel@tonic-gate	bge,pt	%xcc, 1b			! if so, go ahead and drop it
5707c478bd9Sstevel@tonic-gate	nop
5717c478bd9Sstevel@tonic-gate	ba,pt	%xcc, rw_exit_wakeup		! otherwise, wake waiters
5727c478bd9Sstevel@tonic-gate	nop
5737c478bd9Sstevel@tonic-gate3:
5747c478bd9Sstevel@tonic-gate	casx	[%o0], %o4, %o1			! try to drop write lock
5757c478bd9Sstevel@tonic-gate	cmp	%o4, %o1			! did we succeed?
5767c478bd9Sstevel@tonic-gate	bne,pn	%xcc, rw_exit_wakeup		! if not, go to C
5777c478bd9Sstevel@tonic-gate	nop
5787c478bd9Sstevel@tonic-gate.rw_write_exit_lockstat_patch_point:
5797c478bd9Sstevel@tonic-gate	retl
5807c478bd9Sstevel@tonic-gate	nop
5817c478bd9Sstevel@tonic-gate	SET_SIZE(rw_exit)
5827c478bd9Sstevel@tonic-gate
5837c478bd9Sstevel@tonic-gate#endif
5847c478bd9Sstevel@tonic-gate
5857c478bd9Sstevel@tonic-gate#if defined(lint)
5867c478bd9Sstevel@tonic-gate
5877c478bd9Sstevel@tonic-gatevoid
5887c478bd9Sstevel@tonic-gatelockstat_hot_patch(void)
5897c478bd9Sstevel@tonic-gate{}
5907c478bd9Sstevel@tonic-gate
5917c478bd9Sstevel@tonic-gate#else
5927c478bd9Sstevel@tonic-gate
5937c478bd9Sstevel@tonic-gate#define	RETL			0x81c3e008
5947c478bd9Sstevel@tonic-gate#define	NOP			0x01000000
5957c478bd9Sstevel@tonic-gate#define BA			0x10800000
5967c478bd9Sstevel@tonic-gate
5977c478bd9Sstevel@tonic-gate#define	DISP22			((1 << 22) - 1)
5987c478bd9Sstevel@tonic-gate#define	ANNUL			0x20000000
5997c478bd9Sstevel@tonic-gate
6007c478bd9Sstevel@tonic-gate#define	HOT_PATCH_COMMON(addr, event, normal_instr, annul, rs)		\
6017c478bd9Sstevel@tonic-gate	ba	1f;							\
6027c478bd9Sstevel@tonic-gate	rd	%pc, %o0;						\
6037c478bd9Sstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp;				\
6047c478bd9Sstevel@tonic-gate	set	lockstat_probemap, %l1;					\
6057c478bd9Sstevel@tonic-gate	ld	[%l1 + (event * DTRACE_IDSIZE)], %o0;			\
6067c478bd9Sstevel@tonic-gate	brz,pn	%o0, 0f;						\
6077c478bd9Sstevel@tonic-gate	ldub	[THREAD_REG + T_LOCKSTAT], %l0;				\
6087c478bd9Sstevel@tonic-gate	add	%l0, 1, %l2;						\
6097c478bd9Sstevel@tonic-gate	stub	%l2, [THREAD_REG + T_LOCKSTAT];				\
6107c478bd9Sstevel@tonic-gate	set	lockstat_probe, %g1;					\
6117c478bd9Sstevel@tonic-gate	ld	[%l1 + (event * DTRACE_IDSIZE)], %o0;			\
6127c478bd9Sstevel@tonic-gate	brz,a,pn %o0, 0f;						\
6137c478bd9Sstevel@tonic-gate	stub	%l0, [THREAD_REG + T_LOCKSTAT];				\
6147c478bd9Sstevel@tonic-gate	ldn	[%g1], %g2;						\
6157c478bd9Sstevel@tonic-gate	mov	rs, %o2;						\
6167c478bd9Sstevel@tonic-gate	jmpl	%g2, %o7;						\
6177c478bd9Sstevel@tonic-gate	mov	%i0, %o1;						\
6187c478bd9Sstevel@tonic-gate	stub	%l0, [THREAD_REG + T_LOCKSTAT];				\
6197c478bd9Sstevel@tonic-gate0:	ret;								\
6207c478bd9Sstevel@tonic-gate	restore	%g0, 1, %o0;	/* for mutex_tryenter / lock_try */	\
6217c478bd9Sstevel@tonic-gate1:	set	addr, %o1;						\
6227c478bd9Sstevel@tonic-gate	sub	%o0, %o1, %o0;						\
6237c478bd9Sstevel@tonic-gate	srl	%o0, 2, %o0;						\
6247c478bd9Sstevel@tonic-gate	inc	%o0;							\
6257c478bd9Sstevel@tonic-gate	set	DISP22, %o1;						\
6267c478bd9Sstevel@tonic-gate	and	%o1, %o0, %o0;						\
6277c478bd9Sstevel@tonic-gate	set	BA, %o1;						\
6287c478bd9Sstevel@tonic-gate	or	%o1, %o0, %o0;						\
6297c478bd9Sstevel@tonic-gate	sethi	%hi(annul), %o2;					\
6307c478bd9Sstevel@tonic-gate	add	%o0, %o2, %o2;						\
6317c478bd9Sstevel@tonic-gate	set	addr, %o0;						\
6327c478bd9Sstevel@tonic-gate	set	normal_instr, %o1;					\
6337c478bd9Sstevel@tonic-gate	ld	[%i0 + (event * DTRACE_IDSIZE)], %o3;			\
6347c478bd9Sstevel@tonic-gate	tst	%o3;							\
6357c478bd9Sstevel@tonic-gate	movnz	%icc, %o2, %o1;						\
6367c478bd9Sstevel@tonic-gate	call	hot_patch_kernel_text;					\
6377c478bd9Sstevel@tonic-gate	mov	4, %o2;							\
6387c478bd9Sstevel@tonic-gate	membar	#Sync
6397c478bd9Sstevel@tonic-gate
6407c478bd9Sstevel@tonic-gate#define	HOT_PATCH(addr, event, normal_instr)	\
6417c478bd9Sstevel@tonic-gate	HOT_PATCH_COMMON(addr, event, normal_instr, 0, %i1)
6427c478bd9Sstevel@tonic-gate
6437c478bd9Sstevel@tonic-gate#define	HOT_PATCH_ARG(addr, event, normal_instr, arg)	\
6447c478bd9Sstevel@tonic-gate	HOT_PATCH_COMMON(addr, event, normal_instr, 0, arg)
6457c478bd9Sstevel@tonic-gate
6467c478bd9Sstevel@tonic-gate#define HOT_PATCH_ANNULLED(addr, event, normal_instr)	\
6477c478bd9Sstevel@tonic-gate	HOT_PATCH_COMMON(addr, event, normal_instr, ANNUL, %i1)
6487c478bd9Sstevel@tonic-gate
6497c478bd9Sstevel@tonic-gate	ENTRY(lockstat_hot_patch)
6507c478bd9Sstevel@tonic-gate	save	%sp, -SA(MINFRAME), %sp
6517c478bd9Sstevel@tonic-gate	set	lockstat_probemap, %i0
6527c478bd9Sstevel@tonic-gate	HOT_PATCH(.mutex_enter_lockstat_patch_point,
6537c478bd9Sstevel@tonic-gate		LS_MUTEX_ENTER_ACQUIRE, RETL)
6547c478bd9Sstevel@tonic-gate	HOT_PATCH_ANNULLED(.mutex_tryenter_lockstat_patch_point,
6557c478bd9Sstevel@tonic-gate		LS_MUTEX_TRYENTER_ACQUIRE, RETL)
6567c478bd9Sstevel@tonic-gate	HOT_PATCH(.mutex_exit_lockstat_patch_point,
6577c478bd9Sstevel@tonic-gate		LS_MUTEX_EXIT_RELEASE, RETL)
6587c478bd9Sstevel@tonic-gate	HOT_PATCH(.rw_write_enter_lockstat_patch_point,
6597c478bd9Sstevel@tonic-gate		LS_RW_ENTER_ACQUIRE, RETL)
6607c478bd9Sstevel@tonic-gate	HOT_PATCH(.rw_read_enter_lockstat_patch_point,
6617c478bd9Sstevel@tonic-gate		LS_RW_ENTER_ACQUIRE, RETL)
6627c478bd9Sstevel@tonic-gate	HOT_PATCH_ARG(.rw_write_exit_lockstat_patch_point,
6637c478bd9Sstevel@tonic-gate		LS_RW_EXIT_RELEASE, RETL, RW_WRITER)
6647c478bd9Sstevel@tonic-gate	HOT_PATCH_ARG(.rw_read_exit_lockstat_patch_point,
6657c478bd9Sstevel@tonic-gate		LS_RW_EXIT_RELEASE, RETL, RW_READER)
6667c478bd9Sstevel@tonic-gate	HOT_PATCH(.lock_set_lockstat_patch_point,
6677c478bd9Sstevel@tonic-gate		LS_LOCK_SET_ACQUIRE, RETL)
6687c478bd9Sstevel@tonic-gate	HOT_PATCH_ANNULLED(.lock_try_lockstat_patch_point,
6697c478bd9Sstevel@tonic-gate		LS_LOCK_TRY_ACQUIRE, RETL)
6707c478bd9Sstevel@tonic-gate	HOT_PATCH(.lock_clear_lockstat_patch_point,
6717c478bd9Sstevel@tonic-gate		LS_LOCK_CLEAR_RELEASE, RETL)
6727c478bd9Sstevel@tonic-gate	HOT_PATCH(.lock_set_spl_lockstat_patch_point,
6737c478bd9Sstevel@tonic-gate		LS_LOCK_SET_SPL_ACQUIRE, RETL)
6747c478bd9Sstevel@tonic-gate	HOT_PATCH(.lock_clear_splx_lockstat_patch_point,
6757c478bd9Sstevel@tonic-gate		LS_LOCK_CLEAR_SPLX_RELEASE, RETL)
6767c478bd9Sstevel@tonic-gate	ret
6777c478bd9Sstevel@tonic-gate	restore
6787c478bd9Sstevel@tonic-gate	SET_SIZE(lockstat_hot_patch)
6797c478bd9Sstevel@tonic-gate
6807c478bd9Sstevel@tonic-gate#endif	/* lint */
6817c478bd9Sstevel@tonic-gate
6827c478bd9Sstevel@tonic-gate/*
6837c478bd9Sstevel@tonic-gate * asm_mutex_spin_enter(mutex_t *)
6847c478bd9Sstevel@tonic-gate *
6857c478bd9Sstevel@tonic-gate * For use by assembly interrupt handler only.
6867c478bd9Sstevel@tonic-gate * Does not change spl, since the interrupt handler is assumed to be
6877c478bd9Sstevel@tonic-gate * running at high level already.
6887c478bd9Sstevel@tonic-gate * Traps may be off, so cannot panic.
6897c478bd9Sstevel@tonic-gate * Does not keep statistics on the lock.
6907c478bd9Sstevel@tonic-gate *
6917c478bd9Sstevel@tonic-gate * Entry:	%l6 - points to mutex
6927c478bd9Sstevel@tonic-gate * 		%l7 - address of call (returns to %l7+8)
6937c478bd9Sstevel@tonic-gate * Uses:	%l6, %l5
6947c478bd9Sstevel@tonic-gate */
6957c478bd9Sstevel@tonic-gate#ifndef lint
6967c478bd9Sstevel@tonic-gate	.align 16
6977c478bd9Sstevel@tonic-gate	ENTRY_NP(asm_mutex_spin_enter)
6987c478bd9Sstevel@tonic-gate	ldstub	[%l6 + M_SPINLOCK], %l5	! try to set lock, get value in %l5
6997c478bd9Sstevel@tonic-gate1:
7007c478bd9Sstevel@tonic-gate	tst	%l5
7017c478bd9Sstevel@tonic-gate	bnz	3f			! lock already held - go spin
7027c478bd9Sstevel@tonic-gate	nop
7037c478bd9Sstevel@tonic-gate2:
7047c478bd9Sstevel@tonic-gate	jmp	%l7 + 8			! return
7057c478bd9Sstevel@tonic-gate	membar	#LoadLoad
7067c478bd9Sstevel@tonic-gate	!
7077c478bd9Sstevel@tonic-gate	! Spin on lock without using an atomic operation to prevent the caches
7087c478bd9Sstevel@tonic-gate	! from unnecessarily moving ownership of the line around.
7097c478bd9Sstevel@tonic-gate	!
7107c478bd9Sstevel@tonic-gate3:
7117c478bd9Sstevel@tonic-gate	ldub	[%l6 + M_SPINLOCK], %l5
7127c478bd9Sstevel@tonic-gate4:
7137c478bd9Sstevel@tonic-gate	tst	%l5
7147c478bd9Sstevel@tonic-gate	bz,a	1b			! lock appears to be free, try again
7157c478bd9Sstevel@tonic-gate	ldstub	[%l6 + M_SPINLOCK], %l5	! delay slot - try to set lock
7167c478bd9Sstevel@tonic-gate
7177c478bd9Sstevel@tonic-gate	sethi	%hi(panicstr) , %l5
7187c478bd9Sstevel@tonic-gate	ldn	[%l5 + %lo(panicstr)], %l5
7197c478bd9Sstevel@tonic-gate	tst 	%l5
7207c478bd9Sstevel@tonic-gate	bnz	2b			! after panic, feign success
7217c478bd9Sstevel@tonic-gate	nop
7227c478bd9Sstevel@tonic-gate	b	4b
7237c478bd9Sstevel@tonic-gate	ldub	[%l6 + M_SPINLOCK], %l5	! delay - reload lock
7247c478bd9Sstevel@tonic-gate	SET_SIZE(asm_mutex_spin_enter)
7257c478bd9Sstevel@tonic-gate#endif /* lint */
7267c478bd9Sstevel@tonic-gate
7277c478bd9Sstevel@tonic-gate/*
7287c478bd9Sstevel@tonic-gate * asm_mutex_spin_exit(mutex_t *)
7297c478bd9Sstevel@tonic-gate *
7307c478bd9Sstevel@tonic-gate * For use by assembly interrupt handler only.
7317c478bd9Sstevel@tonic-gate * Does not change spl, since the interrupt handler is assumed to be
7327c478bd9Sstevel@tonic-gate * running at high level already.
7337c478bd9Sstevel@tonic-gate *
7347c478bd9Sstevel@tonic-gate * Entry:	%l6 - points to mutex
7357c478bd9Sstevel@tonic-gate * 		%l7 - address of call (returns to %l7+8)
7367c478bd9Sstevel@tonic-gate * Uses:	none
7377c478bd9Sstevel@tonic-gate */
7387c478bd9Sstevel@tonic-gate#ifndef lint
7397c478bd9Sstevel@tonic-gate	ENTRY_NP(asm_mutex_spin_exit)
7407c478bd9Sstevel@tonic-gate	membar	#LoadStore|#StoreStore
7417c478bd9Sstevel@tonic-gate	jmp	%l7 + 8			! return
7427c478bd9Sstevel@tonic-gate	clrb	[%l6 + M_SPINLOCK]	! delay - clear lock
7437c478bd9Sstevel@tonic-gate	SET_SIZE(asm_mutex_spin_exit)
7447c478bd9Sstevel@tonic-gate#endif /* lint */
7457c478bd9Sstevel@tonic-gate
7467c478bd9Sstevel@tonic-gate/*
7477c478bd9Sstevel@tonic-gate * thread_onproc()
7487c478bd9Sstevel@tonic-gate * Set thread in onproc state for the specified CPU.
7497c478bd9Sstevel@tonic-gate * Also set the thread lock pointer to the CPU's onproc lock.
7507c478bd9Sstevel@tonic-gate * Since the new lock isn't held, the store ordering is important.
7517c478bd9Sstevel@tonic-gate * If not done in assembler, the compiler could reorder the stores.
7527c478bd9Sstevel@tonic-gate */
7537c478bd9Sstevel@tonic-gate#if defined(lint)
7547c478bd9Sstevel@tonic-gate
7557c478bd9Sstevel@tonic-gatevoid
7567c478bd9Sstevel@tonic-gatethread_onproc(kthread_id_t t, cpu_t *cp)
7577c478bd9Sstevel@tonic-gate{
7587c478bd9Sstevel@tonic-gate	t->t_state = TS_ONPROC;
7597c478bd9Sstevel@tonic-gate	t->t_lockp = &cp->cpu_thread_lock;
7607c478bd9Sstevel@tonic-gate}
7617c478bd9Sstevel@tonic-gate
7627c478bd9Sstevel@tonic-gate#else	/* lint */
7637c478bd9Sstevel@tonic-gate
7647c478bd9Sstevel@tonic-gate	ENTRY(thread_onproc)
7657c478bd9Sstevel@tonic-gate	set	TS_ONPROC, %o2		! TS_ONPROC state
7667c478bd9Sstevel@tonic-gate	st	%o2, [%o0 + T_STATE]	! store state
7677c478bd9Sstevel@tonic-gate	add	%o1, CPU_THREAD_LOCK, %o3 ! pointer to disp_lock while running
7687c478bd9Sstevel@tonic-gate	retl				! return
7697c478bd9Sstevel@tonic-gate	stn	%o3, [%o0 + T_LOCKP]	! delay - store new lock pointer
7707c478bd9Sstevel@tonic-gate	SET_SIZE(thread_onproc)
7717c478bd9Sstevel@tonic-gate
7727c478bd9Sstevel@tonic-gate#endif	/* lint */
773575a7426Spt157919
774575a7426Spt157919/* delay function used in some mutex code - just do 3 nop cas ops */
775575a7426Spt157919#if defined(lint)
776575a7426Spt157919
777575a7426Spt157919/* ARGSUSED */
778575a7426Spt157919void
779575a7426Spt157919cas_delay(void *addr)
780575a7426Spt157919{}
781575a7426Spt157919#else	/* lint */
782575a7426Spt157919	ENTRY(cas_delay)
783575a7426Spt157919	casx [%o0], %g0, %g0
784575a7426Spt157919	casx [%o0], %g0, %g0
785575a7426Spt157919	retl
786575a7426Spt157919	casx [%o0], %g0, %g0
787575a7426Spt157919	SET_SIZE(cas_delay)
788575a7426Spt157919#endif	/* lint */
789575a7426Spt157919
790575a7426Spt157919#if defined(lint)
791575a7426Spt157919
792575a7426Spt157919/*
793575a7426Spt157919 * alternative delay function for some niagara processors.   The rd
794575a7426Spt157919 * instruction uses less resources than casx on those cpus.
795575a7426Spt157919 */
796575a7426Spt157919/* ARGSUSED */
797575a7426Spt157919void
798575a7426Spt157919rdccr_delay(void)
799575a7426Spt157919{}
800575a7426Spt157919#else	/* lint */
801575a7426Spt157919	ENTRY(rdccr_delay)
802575a7426Spt157919	rd	%ccr, %g0
803575a7426Spt157919	rd	%ccr, %g0
804575a7426Spt157919	retl
805575a7426Spt157919	rd	%ccr, %g0
806575a7426Spt157919	SET_SIZE(rdccr_delay)
807575a7426Spt157919#endif	/* lint */
808575a7426Spt157919
809575a7426Spt157919/*
810575a7426Spt157919 * mutex_delay_default(void)
811575a7426Spt157919 * Spins for approx a few hundred processor cycles and returns to caller.
812575a7426Spt157919 */
813575a7426Spt157919#if defined(lint)
814575a7426Spt157919
815575a7426Spt157919void
816575a7426Spt157919mutex_delay_default(void)
817575a7426Spt157919{}
818575a7426Spt157919
819575a7426Spt157919#else	/* lint */
820575a7426Spt157919
821575a7426Spt157919	ENTRY(mutex_delay_default)
822575a7426Spt157919	mov	72,%o0
823575a7426Spt1579191:	brgz	%o0, 1b
824575a7426Spt157919	dec	%o0
825575a7426Spt157919	retl
826575a7426Spt157919	nop
827575a7426Spt157919	SET_SIZE(mutex_delay_default)
828575a7426Spt157919
829575a7426Spt157919#endif  /* lint */
830