xref: /titanic_51/usr/src/uts/intel/ia32/ml/lock_prim.s (revision 31f0c7828c10f70f5ab3465ca0a63903b6a22494)
17c478bd9Sstevel@tonic-gate/*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5ee88d2b9Skchow * Common Development and Distribution License (the "License").
6ee88d2b9Skchow * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217c478bd9Sstevel@tonic-gate/*
222850d85bSmv143129 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate */
257c478bd9Sstevel@tonic-gate
267c478bd9Sstevel@tonic-gate#pragma ident	"%Z%%M%	%I%	%E% SMI"
277c478bd9Sstevel@tonic-gate
287c478bd9Sstevel@tonic-gate#if defined(lint) || defined(__lint)
297c478bd9Sstevel@tonic-gate#include <sys/types.h>
307c478bd9Sstevel@tonic-gate#include <sys/thread.h>
317c478bd9Sstevel@tonic-gate#include <sys/cpuvar.h>
327c478bd9Sstevel@tonic-gate#include <vm/page.h>
337c478bd9Sstevel@tonic-gate#else	/* __lint */
347c478bd9Sstevel@tonic-gate#include "assym.h"
357c478bd9Sstevel@tonic-gate#endif	/* __lint */
367c478bd9Sstevel@tonic-gate
37575a7426Spt157919#include <sys/mutex_impl.h>
387c478bd9Sstevel@tonic-gate#include <sys/asm_linkage.h>
397c478bd9Sstevel@tonic-gate#include <sys/asm_misc.h>
407c478bd9Sstevel@tonic-gate#include <sys/regset.h>
417c478bd9Sstevel@tonic-gate#include <sys/rwlock_impl.h>
427c478bd9Sstevel@tonic-gate#include <sys/lockstat.h>
437c478bd9Sstevel@tonic-gate
447c478bd9Sstevel@tonic-gate/*
457c478bd9Sstevel@tonic-gate * lock_try(lp), ulock_try(lp)
467c478bd9Sstevel@tonic-gate *	- returns non-zero on success.
477c478bd9Sstevel@tonic-gate *	- doesn't block interrupts so don't use this to spin on a lock.
487c478bd9Sstevel@tonic-gate *
497c478bd9Sstevel@tonic-gate * ulock_try() is for a lock in the user address space.
507c478bd9Sstevel@tonic-gate */
517c478bd9Sstevel@tonic-gate
527c478bd9Sstevel@tonic-gate#if defined(lint) || defined(__lint)
537c478bd9Sstevel@tonic-gate
547c478bd9Sstevel@tonic-gate/* ARGSUSED */
557c478bd9Sstevel@tonic-gateint
567c478bd9Sstevel@tonic-gatelock_try(lock_t *lp)
577c478bd9Sstevel@tonic-gate{ return (0); }
587c478bd9Sstevel@tonic-gate
597c478bd9Sstevel@tonic-gate/* ARGSUSED */
607c478bd9Sstevel@tonic-gateint
617c478bd9Sstevel@tonic-gatelock_spin_try(lock_t *lp)
627c478bd9Sstevel@tonic-gate{ return (0); }
637c478bd9Sstevel@tonic-gate
647c478bd9Sstevel@tonic-gate/* ARGSUSED */
657c478bd9Sstevel@tonic-gateint
667c478bd9Sstevel@tonic-gateulock_try(lock_t *lp)
677c478bd9Sstevel@tonic-gate{ return (0); }
687c478bd9Sstevel@tonic-gate
697c478bd9Sstevel@tonic-gate#else	/* __lint */
707c478bd9Sstevel@tonic-gate	.globl	kernelbase
717c478bd9Sstevel@tonic-gate
727c478bd9Sstevel@tonic-gate#if defined(__amd64)
737c478bd9Sstevel@tonic-gate
747c478bd9Sstevel@tonic-gate	ENTRY(lock_try)
757c478bd9Sstevel@tonic-gate	movb	$-1, %dl
767c478bd9Sstevel@tonic-gate	movzbq	%dl, %rax
777c478bd9Sstevel@tonic-gate	xchgb	%dl, (%rdi)
787c478bd9Sstevel@tonic-gate	xorb	%dl, %al
797c478bd9Sstevel@tonic-gate.lock_try_lockstat_patch_point:
807c478bd9Sstevel@tonic-gate	ret
817c478bd9Sstevel@tonic-gate	testb	%al, %al
827c478bd9Sstevel@tonic-gate	jnz	0f
837c478bd9Sstevel@tonic-gate	ret
847c478bd9Sstevel@tonic-gate0:
857c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
867c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi		/* rsi = lock addr */
877c478bd9Sstevel@tonic-gate	movl	$LS_LOCK_TRY_ACQUIRE, %edi /* edi = event */
887c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
897c478bd9Sstevel@tonic-gate	SET_SIZE(lock_try)
907c478bd9Sstevel@tonic-gate
917c478bd9Sstevel@tonic-gate	ENTRY(lock_spin_try)
927c478bd9Sstevel@tonic-gate	movb	$-1, %dl
937c478bd9Sstevel@tonic-gate	movzbq	%dl, %rax
947c478bd9Sstevel@tonic-gate	xchgb	%dl, (%rdi)
957c478bd9Sstevel@tonic-gate	xorb	%dl, %al
967c478bd9Sstevel@tonic-gate	ret
977c478bd9Sstevel@tonic-gate	SET_SIZE(lock_spin_try)
987c478bd9Sstevel@tonic-gate
997c478bd9Sstevel@tonic-gate	ENTRY(ulock_try)
1007c478bd9Sstevel@tonic-gate#ifdef DEBUG
1017c478bd9Sstevel@tonic-gate	movq	kernelbase(%rip), %rax
1027c478bd9Sstevel@tonic-gate	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
1037c478bd9Sstevel@tonic-gate	jb	ulock_pass		/*	uaddr < kernelbase, proceed */
1047c478bd9Sstevel@tonic-gate
1057c478bd9Sstevel@tonic-gate	movq	%rdi, %r12		/* preserve lock ptr for debugging */
1067c478bd9Sstevel@tonic-gate	leaq	.ulock_panic_msg(%rip), %rdi
1077c478bd9Sstevel@tonic-gate	pushq	%rbp			/* align stack properly */
1087c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
1097c478bd9Sstevel@tonic-gate	xorl	%eax, %eax		/* clear for varargs */
1107c478bd9Sstevel@tonic-gate	call	panic
1117c478bd9Sstevel@tonic-gate
1127c478bd9Sstevel@tonic-gate#endif /* DEBUG */
1137c478bd9Sstevel@tonic-gate
1147c478bd9Sstevel@tonic-gateulock_pass:
1157c478bd9Sstevel@tonic-gate	movl	$1, %eax
1167c478bd9Sstevel@tonic-gate	xchgb	%al, (%rdi)
1177c478bd9Sstevel@tonic-gate	xorb	$1, %al
1187c478bd9Sstevel@tonic-gate	ret
1197c478bd9Sstevel@tonic-gate	SET_SIZE(ulock_try)
1207c478bd9Sstevel@tonic-gate
1217c478bd9Sstevel@tonic-gate#else
1227c478bd9Sstevel@tonic-gate
1237c478bd9Sstevel@tonic-gate	ENTRY(lock_try)
1247c478bd9Sstevel@tonic-gate	movl	$1,%edx
1257c478bd9Sstevel@tonic-gate	movl	4(%esp),%ecx		/* ecx = lock addr */
1267c478bd9Sstevel@tonic-gate	xorl	%eax,%eax
1277c478bd9Sstevel@tonic-gate	xchgb	%dl, (%ecx)		/* using dl will avoid partial */
1287c478bd9Sstevel@tonic-gate	testb	%dl,%dl			/* stalls on P6 ? */
1297c478bd9Sstevel@tonic-gate	setz	%al
1307c478bd9Sstevel@tonic-gate.lock_try_lockstat_patch_point:
1317c478bd9Sstevel@tonic-gate	ret
1327c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx	/* edx = thread addr */
1337c478bd9Sstevel@tonic-gate	testl	%eax, %eax
1347c478bd9Sstevel@tonic-gate	jz	0f
1357c478bd9Sstevel@tonic-gate	movl	$LS_LOCK_TRY_ACQUIRE, %eax
1367c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
1377c478bd9Sstevel@tonic-gate0:
1387c478bd9Sstevel@tonic-gate	ret
1397c478bd9Sstevel@tonic-gate	SET_SIZE(lock_try)
1407c478bd9Sstevel@tonic-gate
1417c478bd9Sstevel@tonic-gate	ENTRY(lock_spin_try)
1427c478bd9Sstevel@tonic-gate	movl	$-1,%edx
1437c478bd9Sstevel@tonic-gate	movl	4(%esp),%ecx		/* ecx = lock addr */
1447c478bd9Sstevel@tonic-gate	xorl	%eax,%eax
1457c478bd9Sstevel@tonic-gate	xchgb	%dl, (%ecx)		/* using dl will avoid partial */
1467c478bd9Sstevel@tonic-gate	testb	%dl,%dl			/* stalls on P6 ? */
1477c478bd9Sstevel@tonic-gate	setz	%al
1487c478bd9Sstevel@tonic-gate	ret
1497c478bd9Sstevel@tonic-gate	SET_SIZE(lock_spin_try)
1507c478bd9Sstevel@tonic-gate
1517c478bd9Sstevel@tonic-gate	ENTRY(ulock_try)
1527c478bd9Sstevel@tonic-gate#ifdef DEBUG
1537c478bd9Sstevel@tonic-gate	movl	kernelbase, %eax
1547c478bd9Sstevel@tonic-gate	cmpl	%eax, 4(%esp)		/* test uaddr < kernelbase */
1557c478bd9Sstevel@tonic-gate	jb	ulock_pass		/* uaddr < kernelbase, proceed */
1567c478bd9Sstevel@tonic-gate
1577c478bd9Sstevel@tonic-gate	pushl	$.ulock_panic_msg
1587c478bd9Sstevel@tonic-gate	call	panic
1597c478bd9Sstevel@tonic-gate
1607c478bd9Sstevel@tonic-gate#endif /* DEBUG */
1617c478bd9Sstevel@tonic-gate
1627c478bd9Sstevel@tonic-gateulock_pass:
1637c478bd9Sstevel@tonic-gate	movl	$1,%eax
1647c478bd9Sstevel@tonic-gate	movl	4(%esp),%ecx
1657c478bd9Sstevel@tonic-gate	xchgb	%al, (%ecx)
1667c478bd9Sstevel@tonic-gate	xorb	$1, %al
1677c478bd9Sstevel@tonic-gate	ret
1687c478bd9Sstevel@tonic-gate	SET_SIZE(ulock_try)
1697c478bd9Sstevel@tonic-gate
1707c478bd9Sstevel@tonic-gate#endif	/* !__amd64 */
1717c478bd9Sstevel@tonic-gate
1727c478bd9Sstevel@tonic-gate#ifdef DEBUG
1737c478bd9Sstevel@tonic-gate	.data
1747c478bd9Sstevel@tonic-gate.ulock_panic_msg:
1757c478bd9Sstevel@tonic-gate	.string "ulock_try: Argument is above kernelbase"
1767c478bd9Sstevel@tonic-gate	.text
1777c478bd9Sstevel@tonic-gate#endif	/* DEBUG */
1787c478bd9Sstevel@tonic-gate
1797c478bd9Sstevel@tonic-gate#endif	/* __lint */
1807c478bd9Sstevel@tonic-gate
1817c478bd9Sstevel@tonic-gate/*
1827c478bd9Sstevel@tonic-gate * lock_clear(lp)
1837c478bd9Sstevel@tonic-gate *	- unlock lock without changing interrupt priority level.
1847c478bd9Sstevel@tonic-gate */
1857c478bd9Sstevel@tonic-gate
1867c478bd9Sstevel@tonic-gate#if defined(lint) || defined(__lint)
1877c478bd9Sstevel@tonic-gate
1887c478bd9Sstevel@tonic-gate/* ARGSUSED */
1897c478bd9Sstevel@tonic-gatevoid
1907c478bd9Sstevel@tonic-gatelock_clear(lock_t *lp)
1917c478bd9Sstevel@tonic-gate{}
1927c478bd9Sstevel@tonic-gate
1937c478bd9Sstevel@tonic-gate/* ARGSUSED */
1947c478bd9Sstevel@tonic-gatevoid
1957c478bd9Sstevel@tonic-gateulock_clear(lock_t *lp)
1967c478bd9Sstevel@tonic-gate{}
1977c478bd9Sstevel@tonic-gate
1987c478bd9Sstevel@tonic-gate#else	/* __lint */
1997c478bd9Sstevel@tonic-gate
2007c478bd9Sstevel@tonic-gate#if defined(__amd64)
2017c478bd9Sstevel@tonic-gate
2027c478bd9Sstevel@tonic-gate	ENTRY(lock_clear)
2037c478bd9Sstevel@tonic-gate	movb	$0, (%rdi)
2047c478bd9Sstevel@tonic-gate.lock_clear_lockstat_patch_point:
2057c478bd9Sstevel@tonic-gate	ret
2067c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi			/* rsi = lock addr */
2077c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread addr */
2087c478bd9Sstevel@tonic-gate	movl	$LS_LOCK_CLEAR_RELEASE, %edi	/* edi = event */
2097c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
2107c478bd9Sstevel@tonic-gate	SET_SIZE(lock_clear)
2117c478bd9Sstevel@tonic-gate
2127c478bd9Sstevel@tonic-gate	ENTRY(ulock_clear)
2137c478bd9Sstevel@tonic-gate#ifdef DEBUG
2147c478bd9Sstevel@tonic-gate	movq	kernelbase(%rip), %rcx
2157c478bd9Sstevel@tonic-gate	cmpq	%rcx, %rdi		/* test uaddr < kernelbase */
2167c478bd9Sstevel@tonic-gate	jb	ulock_clr		/*	 uaddr < kernelbase, proceed */
2177c478bd9Sstevel@tonic-gate
2187c478bd9Sstevel@tonic-gate	leaq	.ulock_clear_msg(%rip), %rdi
2197c478bd9Sstevel@tonic-gate	pushq	%rbp			/* align stack properly */
2207c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
2217c478bd9Sstevel@tonic-gate	xorl	%eax, %eax		/* clear for varargs */
2227c478bd9Sstevel@tonic-gate	call	panic
2237c478bd9Sstevel@tonic-gate#endif
2247c478bd9Sstevel@tonic-gate
2257c478bd9Sstevel@tonic-gateulock_clr:
2267c478bd9Sstevel@tonic-gate	movb	$0, (%rdi)
2277c478bd9Sstevel@tonic-gate	ret
2287c478bd9Sstevel@tonic-gate	SET_SIZE(ulock_clear)
2297c478bd9Sstevel@tonic-gate
2307c478bd9Sstevel@tonic-gate#else
2317c478bd9Sstevel@tonic-gate
2327c478bd9Sstevel@tonic-gate	ENTRY(lock_clear)
2337c478bd9Sstevel@tonic-gate	movl	4(%esp), %eax
2347c478bd9Sstevel@tonic-gate	movb	$0, (%eax)
2357c478bd9Sstevel@tonic-gate.lock_clear_lockstat_patch_point:
2367c478bd9Sstevel@tonic-gate	ret
2377c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread addr */
2387c478bd9Sstevel@tonic-gate	movl	%eax, %ecx			/* ecx = lock pointer */
2397c478bd9Sstevel@tonic-gate	movl	$LS_LOCK_CLEAR_RELEASE, %eax
2407c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
2417c478bd9Sstevel@tonic-gate	SET_SIZE(lock_clear)
2427c478bd9Sstevel@tonic-gate
2437c478bd9Sstevel@tonic-gate	ENTRY(ulock_clear)
2447c478bd9Sstevel@tonic-gate#ifdef DEBUG
2457c478bd9Sstevel@tonic-gate	movl	kernelbase, %ecx
2467c478bd9Sstevel@tonic-gate	cmpl	%ecx, 4(%esp)		/* test uaddr < kernelbase */
2477c478bd9Sstevel@tonic-gate	jb	ulock_clr		/* uaddr < kernelbase, proceed */
2487c478bd9Sstevel@tonic-gate
2497c478bd9Sstevel@tonic-gate	pushl	$.ulock_clear_msg
2507c478bd9Sstevel@tonic-gate	call	panic
2517c478bd9Sstevel@tonic-gate#endif
2527c478bd9Sstevel@tonic-gate
2537c478bd9Sstevel@tonic-gateulock_clr:
2547c478bd9Sstevel@tonic-gate	movl	4(%esp),%eax
2557c478bd9Sstevel@tonic-gate	xorl	%ecx,%ecx
2567c478bd9Sstevel@tonic-gate	movb	%cl, (%eax)
2577c478bd9Sstevel@tonic-gate	ret
2587c478bd9Sstevel@tonic-gate	SET_SIZE(ulock_clear)
2597c478bd9Sstevel@tonic-gate
2607c478bd9Sstevel@tonic-gate#endif	/* !__amd64 */
2617c478bd9Sstevel@tonic-gate
2627c478bd9Sstevel@tonic-gate#ifdef DEBUG
2637c478bd9Sstevel@tonic-gate	.data
2647c478bd9Sstevel@tonic-gate.ulock_clear_msg:
2657c478bd9Sstevel@tonic-gate	.string "ulock_clear: Argument is above kernelbase"
2667c478bd9Sstevel@tonic-gate	.text
2677c478bd9Sstevel@tonic-gate#endif	/* DEBUG */
2687c478bd9Sstevel@tonic-gate
2697c478bd9Sstevel@tonic-gate
2707c478bd9Sstevel@tonic-gate#endif	/* __lint */
2717c478bd9Sstevel@tonic-gate
2727c478bd9Sstevel@tonic-gate/*
2737c478bd9Sstevel@tonic-gate * lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil)
2747c478bd9Sstevel@tonic-gate * Drops lp, sets pil to new_pil, stores old pil in *old_pil.
2757c478bd9Sstevel@tonic-gate */
2767c478bd9Sstevel@tonic-gate
2777c478bd9Sstevel@tonic-gate#if defined(lint) || defined(__lint)
2787c478bd9Sstevel@tonic-gate
2797c478bd9Sstevel@tonic-gate/* ARGSUSED */
2807c478bd9Sstevel@tonic-gatevoid
2817c478bd9Sstevel@tonic-gatelock_set_spl(lock_t *lp, int new_pil, u_short *old_pil)
2827c478bd9Sstevel@tonic-gate{}
2837c478bd9Sstevel@tonic-gate
2847c478bd9Sstevel@tonic-gate#else	/* __lint */
2857c478bd9Sstevel@tonic-gate
2867c478bd9Sstevel@tonic-gate#if defined(__amd64)
2877c478bd9Sstevel@tonic-gate
2887c478bd9Sstevel@tonic-gate	ENTRY(lock_set_spl)
2897c478bd9Sstevel@tonic-gate	pushq	%rbp
2907c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
2917c478bd9Sstevel@tonic-gate	subq	$32, %rsp
2927c478bd9Sstevel@tonic-gate	movl	%esi, 8(%rsp)		/* save priority level */
2937c478bd9Sstevel@tonic-gate	movq	%rdx, 16(%rsp)		/* save old pil ptr */
2947c478bd9Sstevel@tonic-gate	movq	%rdi, 24(%rsp)		/* save lock pointer */
2957c478bd9Sstevel@tonic-gate	movl	%esi, %edi		/* pass priority level */
2967c478bd9Sstevel@tonic-gate	call	splr			/* raise priority level */
2977c478bd9Sstevel@tonic-gate	movq	24(%rsp), %rdi		/* rdi = lock addr */
2987c478bd9Sstevel@tonic-gate	movb	$-1, %dl
2997c478bd9Sstevel@tonic-gate	xchgb	%dl, (%rdi)		/* try to set lock */
3007c478bd9Sstevel@tonic-gate	testb	%dl, %dl		/* did we get the lock? ... */
3017c478bd9Sstevel@tonic-gate	jnz	.lss_miss		/* ... no, go to C for the hard case */
3027c478bd9Sstevel@tonic-gate	movq	16(%rsp), %rdx		/* rdx = old pil addr */
3037c478bd9Sstevel@tonic-gate	movw	%ax, (%rdx)		/* store old pil */
3047c478bd9Sstevel@tonic-gate	leave
3057c478bd9Sstevel@tonic-gate.lock_set_spl_lockstat_patch_point:
3067c478bd9Sstevel@tonic-gate	ret
3077c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi		/* rsi = lock addr */
3087c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
3097c478bd9Sstevel@tonic-gate	movl	$LS_LOCK_SET_SPL_ACQUIRE, %edi
3107c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
3117c478bd9Sstevel@tonic-gate.lss_miss:
3127c478bd9Sstevel@tonic-gate	movl	8(%rsp), %esi		/* new_pil */
3137c478bd9Sstevel@tonic-gate	movq	16(%rsp), %rdx		/* old_pil_addr */
3147c478bd9Sstevel@tonic-gate	movl	%eax, %ecx		/* original pil */
3157c478bd9Sstevel@tonic-gate	leave				/* unwind stack */
3167c478bd9Sstevel@tonic-gate	jmp	lock_set_spl_spin
3177c478bd9Sstevel@tonic-gate	SET_SIZE(lock_set_spl)
3187c478bd9Sstevel@tonic-gate
3197c478bd9Sstevel@tonic-gate#else
3207c478bd9Sstevel@tonic-gate
3217c478bd9Sstevel@tonic-gate	ENTRY(lock_set_spl)
3227c478bd9Sstevel@tonic-gate	movl	8(%esp), %eax		/* get priority level */
3237c478bd9Sstevel@tonic-gate	pushl	%eax
3247c478bd9Sstevel@tonic-gate	call	splr			/* raise priority level */
3257c478bd9Sstevel@tonic-gate	movl 	8(%esp), %ecx		/* ecx = lock addr */
3267c478bd9Sstevel@tonic-gate	movl	$-1, %edx
3277c478bd9Sstevel@tonic-gate	addl	$4, %esp
3287c478bd9Sstevel@tonic-gate	xchgb	%dl, (%ecx)		/* try to set lock */
3297c478bd9Sstevel@tonic-gate	testb	%dl, %dl		/* did we get the lock? ... */
3307c478bd9Sstevel@tonic-gate	movl	12(%esp), %edx		/* edx = olp pil addr (ZF unaffected) */
3317c478bd9Sstevel@tonic-gate	jnz	.lss_miss		/* ... no, go to C for the hard case */
3327c478bd9Sstevel@tonic-gate	movw	%ax, (%edx)		/* store old pil */
3337c478bd9Sstevel@tonic-gate.lock_set_spl_lockstat_patch_point:
3347c478bd9Sstevel@tonic-gate	ret
3357c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx	/* edx = thread addr*/
3367c478bd9Sstevel@tonic-gate	movl	$LS_LOCK_SET_SPL_ACQUIRE, %eax
3377c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
3387c478bd9Sstevel@tonic-gate.lss_miss:
3397c478bd9Sstevel@tonic-gate	pushl	%eax			/* original pil */
3407c478bd9Sstevel@tonic-gate	pushl	%edx			/* old_pil addr */
3417c478bd9Sstevel@tonic-gate	pushl	16(%esp)		/* new_pil */
3427c478bd9Sstevel@tonic-gate	pushl	%ecx			/* lock addr */
3437c478bd9Sstevel@tonic-gate	call	lock_set_spl_spin
3447c478bd9Sstevel@tonic-gate	addl	$16, %esp
3457c478bd9Sstevel@tonic-gate	ret
3467c478bd9Sstevel@tonic-gate	SET_SIZE(lock_set_spl)
3477c478bd9Sstevel@tonic-gate
3487c478bd9Sstevel@tonic-gate#endif	/* !__amd64 */
3497c478bd9Sstevel@tonic-gate
3507c478bd9Sstevel@tonic-gate#endif	/* __lint */
3517c478bd9Sstevel@tonic-gate
3527c478bd9Sstevel@tonic-gate/*
3537c478bd9Sstevel@tonic-gate * void
3547c478bd9Sstevel@tonic-gate * lock_init(lp)
3557c478bd9Sstevel@tonic-gate */
3567c478bd9Sstevel@tonic-gate
3577c478bd9Sstevel@tonic-gate#if defined(__lint)
3587c478bd9Sstevel@tonic-gate
3597c478bd9Sstevel@tonic-gate/* ARGSUSED */
3607c478bd9Sstevel@tonic-gatevoid
3617c478bd9Sstevel@tonic-gatelock_init(lock_t *lp)
3627c478bd9Sstevel@tonic-gate{}
3637c478bd9Sstevel@tonic-gate
3647c478bd9Sstevel@tonic-gate#else	/* __lint */
3657c478bd9Sstevel@tonic-gate
3667c478bd9Sstevel@tonic-gate#if defined(__amd64)
3677c478bd9Sstevel@tonic-gate
3687c478bd9Sstevel@tonic-gate	ENTRY(lock_init)
3697c478bd9Sstevel@tonic-gate	movb	$0, (%rdi)
3707c478bd9Sstevel@tonic-gate	ret
3717c478bd9Sstevel@tonic-gate	SET_SIZE(lock_init)
3727c478bd9Sstevel@tonic-gate
3737c478bd9Sstevel@tonic-gate#else
3747c478bd9Sstevel@tonic-gate
3757c478bd9Sstevel@tonic-gate	ENTRY(lock_init)
3767c478bd9Sstevel@tonic-gate	movl	4(%esp), %eax
3777c478bd9Sstevel@tonic-gate	movb	$0, (%eax)
3787c478bd9Sstevel@tonic-gate	ret
3797c478bd9Sstevel@tonic-gate	SET_SIZE(lock_init)
3807c478bd9Sstevel@tonic-gate
3817c478bd9Sstevel@tonic-gate#endif	/* !__amd64 */
3827c478bd9Sstevel@tonic-gate
3837c478bd9Sstevel@tonic-gate#endif	/* __lint */
3847c478bd9Sstevel@tonic-gate
3857c478bd9Sstevel@tonic-gate/*
3867c478bd9Sstevel@tonic-gate * void
3877c478bd9Sstevel@tonic-gate * lock_set(lp)
3887c478bd9Sstevel@tonic-gate */
3897c478bd9Sstevel@tonic-gate
3907c478bd9Sstevel@tonic-gate#if defined(lint) || defined(__lint)
3917c478bd9Sstevel@tonic-gate
3927c478bd9Sstevel@tonic-gate/* ARGSUSED */
3937c478bd9Sstevel@tonic-gatevoid
3947c478bd9Sstevel@tonic-gatelock_set(lock_t *lp)
3957c478bd9Sstevel@tonic-gate{}
3967c478bd9Sstevel@tonic-gate
3977c478bd9Sstevel@tonic-gate#else	/* __lint */
3987c478bd9Sstevel@tonic-gate
3997c478bd9Sstevel@tonic-gate#if defined(__amd64)
4007c478bd9Sstevel@tonic-gate
4017c478bd9Sstevel@tonic-gate	ENTRY(lock_set)
4027c478bd9Sstevel@tonic-gate	movb	$-1, %dl
4037c478bd9Sstevel@tonic-gate	xchgb	%dl, (%rdi)		/* try to set lock */
4047c478bd9Sstevel@tonic-gate	testb	%dl, %dl		/* did we get it? */
4057c478bd9Sstevel@tonic-gate	jnz	lock_set_spin		/* no, go to C for the hard case */
4067c478bd9Sstevel@tonic-gate.lock_set_lockstat_patch_point:
4077c478bd9Sstevel@tonic-gate	ret
4087c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi		/* rsi = lock addr */
4097c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
4107c478bd9Sstevel@tonic-gate	movl	$LS_LOCK_SET_ACQUIRE, %edi
4117c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
4127c478bd9Sstevel@tonic-gate	SET_SIZE(lock_set)
4137c478bd9Sstevel@tonic-gate
4147c478bd9Sstevel@tonic-gate#else
4157c478bd9Sstevel@tonic-gate
4167c478bd9Sstevel@tonic-gate	ENTRY(lock_set)
4177c478bd9Sstevel@tonic-gate	movl	4(%esp), %ecx		/* ecx = lock addr */
4187c478bd9Sstevel@tonic-gate	movl	$-1, %edx
4197c478bd9Sstevel@tonic-gate	xchgb	%dl, (%ecx)		/* try to set lock */
4207c478bd9Sstevel@tonic-gate	testb	%dl, %dl		/* did we get it? */
4217c478bd9Sstevel@tonic-gate	jnz	lock_set_spin		/* no, go to C for the hard case */
4227c478bd9Sstevel@tonic-gate.lock_set_lockstat_patch_point:
4237c478bd9Sstevel@tonic-gate	ret
4247c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx	/* edx = thread addr */
4257c478bd9Sstevel@tonic-gate	movl	$LS_LOCK_SET_ACQUIRE, %eax
4267c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
4277c478bd9Sstevel@tonic-gate	SET_SIZE(lock_set)
4287c478bd9Sstevel@tonic-gate
4297c478bd9Sstevel@tonic-gate#endif	/* !__amd64 */
4307c478bd9Sstevel@tonic-gate
4317c478bd9Sstevel@tonic-gate#endif	/* __lint */
4327c478bd9Sstevel@tonic-gate
4337c478bd9Sstevel@tonic-gate/*
4347c478bd9Sstevel@tonic-gate * lock_clear_splx(lp, s)
4357c478bd9Sstevel@tonic-gate */
4367c478bd9Sstevel@tonic-gate
4377c478bd9Sstevel@tonic-gate#if defined(lint) || defined(__lint)
4387c478bd9Sstevel@tonic-gate
4397c478bd9Sstevel@tonic-gate/* ARGSUSED */
4407c478bd9Sstevel@tonic-gatevoid
4417c478bd9Sstevel@tonic-gatelock_clear_splx(lock_t *lp, int s)
4427c478bd9Sstevel@tonic-gate{}
4437c478bd9Sstevel@tonic-gate
4447c478bd9Sstevel@tonic-gate#else	/* __lint */
4457c478bd9Sstevel@tonic-gate
4467c478bd9Sstevel@tonic-gate#if defined(__amd64)
4477c478bd9Sstevel@tonic-gate
4487c478bd9Sstevel@tonic-gate	ENTRY(lock_clear_splx)
4497c478bd9Sstevel@tonic-gate	movb	$0, (%rdi)		/* clear lock */
4507c478bd9Sstevel@tonic-gate.lock_clear_splx_lockstat_patch_point:
4517c478bd9Sstevel@tonic-gate	jmp	0f
4527c478bd9Sstevel@tonic-gate0:
4537c478bd9Sstevel@tonic-gate	movl	%esi, %edi		/* arg for splx */
4547c478bd9Sstevel@tonic-gate	jmp	splx			/* let splx do its thing */
4557c478bd9Sstevel@tonic-gate.lock_clear_splx_lockstat:
4567c478bd9Sstevel@tonic-gate	pushq	%rbp			/* align stack properly */
4577c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
4587c478bd9Sstevel@tonic-gate	subq	$16, %rsp		/* space to save args across splx */
4597c478bd9Sstevel@tonic-gate	movq	%rdi, 8(%rsp)		/* save lock ptr across splx call */
4607c478bd9Sstevel@tonic-gate	movl	%esi, %edi		/* arg for splx */
4617c478bd9Sstevel@tonic-gate	call	splx			/* lower the priority */
4627c478bd9Sstevel@tonic-gate	movq	8(%rsp), %rsi		/* rsi = lock ptr */
4637c478bd9Sstevel@tonic-gate	leave				/* unwind stack */
4647c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
4657c478bd9Sstevel@tonic-gate	movl	$LS_LOCK_CLEAR_SPLX_RELEASE, %edi
4667c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
4677c478bd9Sstevel@tonic-gate	SET_SIZE(lock_clear_splx)
4687c478bd9Sstevel@tonic-gate
469ae115bc7Smrj#else
470ae115bc7Smrj
471ae115bc7Smrj	ENTRY(lock_clear_splx)
472ae115bc7Smrj	movl	4(%esp), %eax		/* eax = lock addr */
473ae115bc7Smrj	movb	$0, (%eax)		/* clear lock */
474ae115bc7Smrj.lock_clear_splx_lockstat_patch_point:
475ae115bc7Smrj	jmp	0f
476ae115bc7Smrj0:
477ae115bc7Smrj	movl	8(%esp), %edx		/* edx = desired pil */
478ae115bc7Smrj	movl	%edx, 4(%esp)		/* set spl arg up for splx */
479ae115bc7Smrj	jmp	splx			/* let splx do it's thing */
480ae115bc7Smrj.lock_clear_splx_lockstat:
481ae115bc7Smrj	movl	8(%esp), %edx		/* edx = desired pil */
482ae115bc7Smrj	pushl	%ebp			/* set up stack frame */
483ae115bc7Smrj	movl	%esp, %ebp
484ae115bc7Smrj	pushl	%edx
485ae115bc7Smrj	call	splx
486ae115bc7Smrj	leave				/* unwind stack */
487ae115bc7Smrj	movl	4(%esp), %ecx		/* ecx = lock pointer */
488ae115bc7Smrj	movl	%gs:CPU_THREAD, %edx	/* edx = thread addr */
489ae115bc7Smrj	movl	$LS_LOCK_CLEAR_SPLX_RELEASE, %eax
490ae115bc7Smrj	jmp	lockstat_wrapper
491ae115bc7Smrj	SET_SIZE(lock_clear_splx)
492ae115bc7Smrj
493ae115bc7Smrj#endif	/* !__amd64 */
494ae115bc7Smrj
4957c478bd9Sstevel@tonic-gate#if defined(__GNUC_AS__)
4967c478bd9Sstevel@tonic-gate#define	LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL	\
4977c478bd9Sstevel@tonic-gate	(.lock_clear_splx_lockstat - .lock_clear_splx_lockstat_patch_point - 2)
4987c478bd9Sstevel@tonic-gate
4997c478bd9Sstevel@tonic-gate#define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT	\
5007c478bd9Sstevel@tonic-gate	(.lock_clear_splx_lockstat_patch_point + 1)
5017c478bd9Sstevel@tonic-gate#else
5027c478bd9Sstevel@tonic-gate#define	LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL	\
5037c478bd9Sstevel@tonic-gate	[.lock_clear_splx_lockstat - .lock_clear_splx_lockstat_patch_point - 2]
5047c478bd9Sstevel@tonic-gate
5057c478bd9Sstevel@tonic-gate#define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT	\
5067c478bd9Sstevel@tonic-gate	[.lock_clear_splx_lockstat_patch_point + 1]
5077c478bd9Sstevel@tonic-gate#endif
5087c478bd9Sstevel@tonic-gate
5097c478bd9Sstevel@tonic-gate#endif	/* __lint */
5107c478bd9Sstevel@tonic-gate
5117c478bd9Sstevel@tonic-gate/*
5127c478bd9Sstevel@tonic-gate * mutex_enter() and mutex_exit().
5137c478bd9Sstevel@tonic-gate *
5147c478bd9Sstevel@tonic-gate * These routines handle the simple cases of mutex_enter() (adaptive
5157c478bd9Sstevel@tonic-gate * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
5167c478bd9Sstevel@tonic-gate * If anything complicated is going on we punt to mutex_vector_enter().
5177c478bd9Sstevel@tonic-gate *
5187c478bd9Sstevel@tonic-gate * mutex_tryenter() is similar to mutex_enter() but returns zero if
5197c478bd9Sstevel@tonic-gate * the lock cannot be acquired, nonzero on success.
5207c478bd9Sstevel@tonic-gate *
5217c478bd9Sstevel@tonic-gate * If mutex_exit() gets preempted in the window between checking waiters
5227c478bd9Sstevel@tonic-gate * and clearing the lock, we can miss wakeups.  Disabling preemption
5237c478bd9Sstevel@tonic-gate * in the mutex code is prohibitively expensive, so instead we detect
5247c478bd9Sstevel@tonic-gate * mutex preemption by examining the trapped PC in the interrupt path.
5257c478bd9Sstevel@tonic-gate * If we interrupt a thread in mutex_exit() that has not yet cleared
5267c478bd9Sstevel@tonic-gate * the lock, cmnint() resets its PC back to the beginning of
5277c478bd9Sstevel@tonic-gate * mutex_exit() so it will check again for waiters when it resumes.
5287c478bd9Sstevel@tonic-gate *
5297c478bd9Sstevel@tonic-gate * The lockstat code below is activated when the lockstat driver
5307c478bd9Sstevel@tonic-gate * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
5317c478bd9Sstevel@tonic-gate * Note that we don't need to test lockstat_event_mask here -- we won't
5327c478bd9Sstevel@tonic-gate * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
5337c478bd9Sstevel@tonic-gate */
5347c478bd9Sstevel@tonic-gate#if defined(lint) || defined(__lint)
5357c478bd9Sstevel@tonic-gate
5367c478bd9Sstevel@tonic-gate/* ARGSUSED */
5377c478bd9Sstevel@tonic-gatevoid
5387c478bd9Sstevel@tonic-gatemutex_enter(kmutex_t *lp)
5397c478bd9Sstevel@tonic-gate{}
5407c478bd9Sstevel@tonic-gate
5417c478bd9Sstevel@tonic-gate/* ARGSUSED */
5427c478bd9Sstevel@tonic-gateint
5437c478bd9Sstevel@tonic-gatemutex_tryenter(kmutex_t *lp)
5447c478bd9Sstevel@tonic-gate{ return (0); }
5457c478bd9Sstevel@tonic-gate
5467c478bd9Sstevel@tonic-gate/* ARGSUSED */
5477c478bd9Sstevel@tonic-gateint
5487c478bd9Sstevel@tonic-gatemutex_adaptive_tryenter(mutex_impl_t *lp)
5497c478bd9Sstevel@tonic-gate{ return (0); }
5507c478bd9Sstevel@tonic-gate
5517c478bd9Sstevel@tonic-gate/* ARGSUSED */
5527c478bd9Sstevel@tonic-gatevoid
5537c478bd9Sstevel@tonic-gatemutex_exit(kmutex_t *lp)
5547c478bd9Sstevel@tonic-gate{}
5557c478bd9Sstevel@tonic-gate
5567c478bd9Sstevel@tonic-gate#else
5577c478bd9Sstevel@tonic-gate
5587c478bd9Sstevel@tonic-gate#if defined(__amd64)
5597c478bd9Sstevel@tonic-gate
5607c478bd9Sstevel@tonic-gate	ENTRY_NP(mutex_enter)
5617c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
5627c478bd9Sstevel@tonic-gate	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
5637c478bd9Sstevel@tonic-gate	lock
5647c478bd9Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)
5657c478bd9Sstevel@tonic-gate	jnz	mutex_vector_enter
5667c478bd9Sstevel@tonic-gate.mutex_enter_lockstat_patch_point:
567ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
568ee88d2b9Skchow.mutex_enter_6323525_patch_point:
569ee88d2b9Skchow	ret					/* nop space for lfence */
570ee88d2b9Skchow	nop
571ee88d2b9Skchow	nop
572ee88d2b9Skchow.mutex_enter_lockstat_6323525_patch_point:	/* new patch point if lfence */
573ee88d2b9Skchow	nop
574ee88d2b9Skchow#else	/* OPTERON_WORKAROUND_6323525 */
5757c478bd9Sstevel@tonic-gate	ret
576ee88d2b9Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
5777c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi
5787c478bd9Sstevel@tonic-gate	movl	$LS_MUTEX_ENTER_ACQUIRE, %edi
5797c478bd9Sstevel@tonic-gate/*
5807c478bd9Sstevel@tonic-gate * expects %rdx=thread, %rsi=lock, %edi=lockstat event
5817c478bd9Sstevel@tonic-gate */
5827c478bd9Sstevel@tonic-gate	ALTENTRY(lockstat_wrapper)
5837c478bd9Sstevel@tonic-gate	incb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat++ */
5847c478bd9Sstevel@tonic-gate	leaq	lockstat_probemap(%rip), %rax
5857c478bd9Sstevel@tonic-gate	movl	(%rax, %rdi, DTRACE_IDSIZE), %eax
5867c478bd9Sstevel@tonic-gate	testl	%eax, %eax			/* check for non-zero probe */
5877c478bd9Sstevel@tonic-gate	jz	1f
5887c478bd9Sstevel@tonic-gate	pushq	%rbp				/* align stack properly */
5897c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
5907c478bd9Sstevel@tonic-gate	movl	%eax, %edi
5917c478bd9Sstevel@tonic-gate	call	*lockstat_probe
5927c478bd9Sstevel@tonic-gate	leave					/* unwind stack */
5937c478bd9Sstevel@tonic-gate1:
5947c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* reload thread ptr */
5957c478bd9Sstevel@tonic-gate	decb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat-- */
5967c478bd9Sstevel@tonic-gate	movl	$1, %eax			/* return success if tryenter */
5977c478bd9Sstevel@tonic-gate	ret
5987c478bd9Sstevel@tonic-gate	SET_SIZE(lockstat_wrapper)
5997c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_enter)
6007c478bd9Sstevel@tonic-gate
6017c478bd9Sstevel@tonic-gate/*
6027c478bd9Sstevel@tonic-gate * expects %rcx=thread, %rdx=arg, %rsi=lock, %edi=lockstat event
6037c478bd9Sstevel@tonic-gate */
6047c478bd9Sstevel@tonic-gate	ENTRY(lockstat_wrapper_arg)
6057c478bd9Sstevel@tonic-gate	incb	T_LOCKSTAT(%rcx)		/* curthread->t_lockstat++ */
6067c478bd9Sstevel@tonic-gate	leaq	lockstat_probemap(%rip), %rax
6077c478bd9Sstevel@tonic-gate	movl	(%rax, %rdi, DTRACE_IDSIZE), %eax
6087c478bd9Sstevel@tonic-gate	testl	%eax, %eax			/* check for non-zero probe */
6097c478bd9Sstevel@tonic-gate	jz	1f
6107c478bd9Sstevel@tonic-gate	pushq	%rbp				/* align stack properly */
6117c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
6127c478bd9Sstevel@tonic-gate	movl	%eax, %edi
6137c478bd9Sstevel@tonic-gate	call	*lockstat_probe
6147c478bd9Sstevel@tonic-gate	leave					/* unwind stack */
6157c478bd9Sstevel@tonic-gate1:
6167c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* reload thread ptr */
6177c478bd9Sstevel@tonic-gate	decb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat-- */
6187c478bd9Sstevel@tonic-gate	movl	$1, %eax			/* return success if tryenter */
6197c478bd9Sstevel@tonic-gate	ret
6207c478bd9Sstevel@tonic-gate	SET_SIZE(lockstat_wrapper_arg)
6217c478bd9Sstevel@tonic-gate
6227c478bd9Sstevel@tonic-gate
6237c478bd9Sstevel@tonic-gate	ENTRY(mutex_tryenter)
6247c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
6257c478bd9Sstevel@tonic-gate	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
6267c478bd9Sstevel@tonic-gate	lock
6277c478bd9Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)
6287c478bd9Sstevel@tonic-gate	jnz	mutex_vector_tryenter
6297c478bd9Sstevel@tonic-gate	not	%eax				/* return success (nonzero) */
630ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
631ee88d2b9Skchow.mutex_tryenter_lockstat_patch_point:
632ee88d2b9Skchow.mutex_tryenter_6323525_patch_point:
633ee88d2b9Skchow	ret					/* nop space for lfence */
634ee88d2b9Skchow	nop
635ee88d2b9Skchow	nop
636ee88d2b9Skchow.mutex_tryenter_lockstat_6323525_patch_point:	/* new patch point if lfence */
637ee88d2b9Skchow	nop
638ee88d2b9Skchow#else	/* OPTERON_WORKAROUND_6323525 */
6397c478bd9Sstevel@tonic-gate.mutex_tryenter_lockstat_patch_point:
6407c478bd9Sstevel@tonic-gate	ret
641ee88d2b9Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
6427c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi
6437c478bd9Sstevel@tonic-gate	movl	$LS_MUTEX_ENTER_ACQUIRE, %edi
6447c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
6457c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_tryenter)
6467c478bd9Sstevel@tonic-gate
6477c478bd9Sstevel@tonic-gate	ENTRY(mutex_adaptive_tryenter)
6487c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
6497c478bd9Sstevel@tonic-gate	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
6507c478bd9Sstevel@tonic-gate	lock
6517c478bd9Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)
6527c478bd9Sstevel@tonic-gate	jnz	0f
6537c478bd9Sstevel@tonic-gate	not	%eax				/* return success (nonzero) */
654ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
655ee88d2b9Skchow.mutex_atryenter_6323525_patch_point:
656ee88d2b9Skchow	ret					/* nop space for lfence */
657ee88d2b9Skchow	nop
658ee88d2b9Skchow	nop
659ee88d2b9Skchow	nop
660ee88d2b9Skchow#else	/* OPTERON_WORKAROUND_6323525 */
6617c478bd9Sstevel@tonic-gate	ret
662ee88d2b9Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
6637c478bd9Sstevel@tonic-gate0:
6647c478bd9Sstevel@tonic-gate	xorl	%eax, %eax			/* return failure */
6657c478bd9Sstevel@tonic-gate	ret
6667c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_adaptive_tryenter)
6677c478bd9Sstevel@tonic-gate
668575a7426Spt157919	.globl	mutex_owner_running_critical_start
669575a7426Spt157919
670575a7426Spt157919	ENTRY(mutex_owner_running)
671575a7426Spt157919mutex_owner_running_critical_start:
672575a7426Spt157919	movq	(%rdi), %r11		/* get owner field */
673575a7426Spt157919	andq	$MUTEX_THREAD, %r11	/* remove waiters bit */
674575a7426Spt157919	cmpq	$0, %r11		/* if free, skip */
675575a7426Spt157919	je	1f			/* go return 0 */
676575a7426Spt157919	movq	T_CPU(%r11), %r8	/* get owner->t_cpu */
677575a7426Spt157919	movq	CPU_THREAD(%r8), %r9	/* get t_cpu->cpu_thread */
678575a7426Spt157919.mutex_owner_running_critical_end:
679575a7426Spt157919	cmpq	%r11, %r9	/* owner == running thread? */
680575a7426Spt157919	je	2f		/* yes, go return cpu */
681575a7426Spt1579191:
682575a7426Spt157919	xorq	%rax, %rax	/* return 0 */
683575a7426Spt157919	ret
684575a7426Spt1579192:
685575a7426Spt157919	movq	%r8, %rax		/* return cpu */
686575a7426Spt157919	ret
687575a7426Spt157919	SET_SIZE(mutex_owner_running)
688575a7426Spt157919
689575a7426Spt157919	.globl	mutex_owner_running_critical_size
690575a7426Spt157919	.type	mutex_owner_running_critical_size, @object
691575a7426Spt157919	.align	CPTRSIZE
692575a7426Spt157919mutex_owner_running_critical_size:
693575a7426Spt157919	.quad	.mutex_owner_running_critical_end - mutex_owner_running_critical_start
694575a7426Spt157919	SET_SIZE(mutex_owner_running_critical_size)
695575a7426Spt157919
6967c478bd9Sstevel@tonic-gate	.globl	mutex_exit_critical_start
6977c478bd9Sstevel@tonic-gate
6987c478bd9Sstevel@tonic-gate	ENTRY(mutex_exit)
6997c478bd9Sstevel@tonic-gatemutex_exit_critical_start:		/* If interrupted, restart here */
7007c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx
7017c478bd9Sstevel@tonic-gate	cmpq	%rdx, (%rdi)
7027c478bd9Sstevel@tonic-gate	jne	mutex_vector_exit		/* wrong type or wrong owner */
7037c478bd9Sstevel@tonic-gate	movq	$0, (%rdi)			/* clear owner AND lock */
7047c478bd9Sstevel@tonic-gate.mutex_exit_critical_end:
7057c478bd9Sstevel@tonic-gate.mutex_exit_lockstat_patch_point:
7067c478bd9Sstevel@tonic-gate	ret
7077c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi
7087c478bd9Sstevel@tonic-gate	movl	$LS_MUTEX_EXIT_RELEASE, %edi
7097c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
7107c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_exit)
7117c478bd9Sstevel@tonic-gate
7127c478bd9Sstevel@tonic-gate	.globl	mutex_exit_critical_size
7137c478bd9Sstevel@tonic-gate	.type	mutex_exit_critical_size, @object
7147c478bd9Sstevel@tonic-gate	.align	CPTRSIZE
7157c478bd9Sstevel@tonic-gatemutex_exit_critical_size:
7167c478bd9Sstevel@tonic-gate	.quad	.mutex_exit_critical_end - mutex_exit_critical_start
7177c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_exit_critical_size)
7187c478bd9Sstevel@tonic-gate
7197c478bd9Sstevel@tonic-gate#else
7207c478bd9Sstevel@tonic-gate
7217c478bd9Sstevel@tonic-gate	ENTRY_NP(mutex_enter)
7227c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
7237c478bd9Sstevel@tonic-gate	movl	4(%esp), %ecx			/* ecx = lock ptr */
7247c478bd9Sstevel@tonic-gate	xorl	%eax, %eax			/* eax = 0 (unheld adaptive) */
7257c478bd9Sstevel@tonic-gate	lock
7267c478bd9Sstevel@tonic-gate	cmpxchgl %edx, (%ecx)
7277c478bd9Sstevel@tonic-gate	jnz	mutex_vector_enter
728ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
729ee88d2b9Skchow.mutex_enter_lockstat_patch_point:
730ee88d2b9Skchow.mutex_enter_6323525_patch_point:
731ee88d2b9Skchow	ret					/* nop space for lfence */
732ee88d2b9Skchow	nop
733ee88d2b9Skchow	nop
734ee88d2b9Skchow.mutex_enter_lockstat_6323525_patch_point:	/* new patch point if lfence */
735ee88d2b9Skchow	nop
736ee88d2b9Skchow#else	/* OPTERON_WORKAROUND_6323525 */
7377c478bd9Sstevel@tonic-gate.mutex_enter_lockstat_patch_point:
7387c478bd9Sstevel@tonic-gate	ret
739ee88d2b9Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
7407c478bd9Sstevel@tonic-gate	movl	$LS_MUTEX_ENTER_ACQUIRE, %eax
7417c478bd9Sstevel@tonic-gate	ALTENTRY(lockstat_wrapper)	/* expects edx=thread, ecx=lock, */
7427c478bd9Sstevel@tonic-gate					/*   eax=lockstat event */
7437c478bd9Sstevel@tonic-gate	pushl	%ebp				/* buy a frame */
7447c478bd9Sstevel@tonic-gate	movl	%esp, %ebp
7457c478bd9Sstevel@tonic-gate	incb	T_LOCKSTAT(%edx)		/* curthread->t_lockstat++ */
7467c478bd9Sstevel@tonic-gate	pushl	%edx				/* save thread pointer	 */
7477c478bd9Sstevel@tonic-gate	movl	$lockstat_probemap, %edx
7487c478bd9Sstevel@tonic-gate	movl	(%edx, %eax, DTRACE_IDSIZE), %eax
7497c478bd9Sstevel@tonic-gate	testl	%eax, %eax			/* check for non-zero probe */
7507c478bd9Sstevel@tonic-gate	jz	1f
7517c478bd9Sstevel@tonic-gate	pushl	%ecx				/* push lock */
7527c478bd9Sstevel@tonic-gate	pushl	%eax				/* push probe ID */
7537c478bd9Sstevel@tonic-gate	call	*lockstat_probe
7547c478bd9Sstevel@tonic-gate	addl	$8, %esp
7557c478bd9Sstevel@tonic-gate1:
7567c478bd9Sstevel@tonic-gate	popl	%edx				/* restore thread pointer */
7577c478bd9Sstevel@tonic-gate	decb	T_LOCKSTAT(%edx)		/* curthread->t_lockstat-- */
7587c478bd9Sstevel@tonic-gate	movl	$1, %eax			/* return success if tryenter */
7597c478bd9Sstevel@tonic-gate	popl	%ebp				/* pop off frame */
7607c478bd9Sstevel@tonic-gate	ret
7617c478bd9Sstevel@tonic-gate	SET_SIZE(lockstat_wrapper)
7627c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_enter)
7637c478bd9Sstevel@tonic-gate
7647c478bd9Sstevel@tonic-gate	ENTRY(lockstat_wrapper_arg)	/* expects edx=thread, ecx=lock, */
7657c478bd9Sstevel@tonic-gate					/* eax=lockstat event, pushed arg */
7667c478bd9Sstevel@tonic-gate	incb	T_LOCKSTAT(%edx)		/* curthread->t_lockstat++ */
7677c478bd9Sstevel@tonic-gate	pushl	%edx				/* save thread pointer	 */
7687c478bd9Sstevel@tonic-gate	movl	$lockstat_probemap, %edx
7697c478bd9Sstevel@tonic-gate	movl	(%edx, %eax, DTRACE_IDSIZE), %eax
7707c478bd9Sstevel@tonic-gate	testl	%eax, %eax			/* check for non-zero probe */
7717c478bd9Sstevel@tonic-gate	jz	1f
7727c478bd9Sstevel@tonic-gate	pushl	%ebp				/* save %ebp */
7737c478bd9Sstevel@tonic-gate	pushl	8(%esp)				/* push arg1 */
7747c478bd9Sstevel@tonic-gate	movl	%ebp, 12(%esp)			/* fake up the stack frame */
7757c478bd9Sstevel@tonic-gate	movl	%esp, %ebp			/* fake up base pointer */
7767c478bd9Sstevel@tonic-gate	addl	$12, %ebp			/* adjust faked base pointer */
7777c478bd9Sstevel@tonic-gate	pushl	%ecx				/* push lock */
7787c478bd9Sstevel@tonic-gate	pushl	%eax				/* push probe ID */
7797c478bd9Sstevel@tonic-gate	call	*lockstat_probe
7807c478bd9Sstevel@tonic-gate	addl	$12, %esp			/* adjust for arguments */
7817c478bd9Sstevel@tonic-gate	popl	%ebp				/* pop frame */
7827c478bd9Sstevel@tonic-gate1:
7837c478bd9Sstevel@tonic-gate	popl	%edx				/* restore thread pointer */
7847c478bd9Sstevel@tonic-gate	decb	T_LOCKSTAT(%edx)		/* curthread->t_lockstat-- */
7857c478bd9Sstevel@tonic-gate	movl	$1, %eax			/* return success if tryenter */
7867c478bd9Sstevel@tonic-gate	addl	$4, %esp			/* pop argument */
7877c478bd9Sstevel@tonic-gate	ret
7887c478bd9Sstevel@tonic-gate	SET_SIZE(lockstat_wrapper_arg)
7897c478bd9Sstevel@tonic-gate
7907c478bd9Sstevel@tonic-gate
7917c478bd9Sstevel@tonic-gate	ENTRY(mutex_tryenter)
7927c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
7937c478bd9Sstevel@tonic-gate	movl	4(%esp), %ecx			/* ecx = lock ptr */
7947c478bd9Sstevel@tonic-gate	xorl	%eax, %eax			/* eax = 0 (unheld adaptive) */
7957c478bd9Sstevel@tonic-gate	lock
7967c478bd9Sstevel@tonic-gate	cmpxchgl %edx, (%ecx)
7977c478bd9Sstevel@tonic-gate	jnz	mutex_vector_tryenter
7987c478bd9Sstevel@tonic-gate	movl	%ecx, %eax
799ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
800ee88d2b9Skchow.mutex_tryenter_lockstat_patch_point:
801ee88d2b9Skchow.mutex_tryenter_6323525_patch_point:
802ee88d2b9Skchow	ret					/* nop space for lfence */
803ee88d2b9Skchow	nop
804ee88d2b9Skchow	nop
805ee88d2b9Skchow.mutex_tryenter_lockstat_6323525_patch_point:	/* new patch point if lfence */
806ee88d2b9Skchow	nop
807ee88d2b9Skchow#else	/* OPTERON_WORKAROUND_6323525 */
8087c478bd9Sstevel@tonic-gate.mutex_tryenter_lockstat_patch_point:
8097c478bd9Sstevel@tonic-gate	ret
810ee88d2b9Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
8117c478bd9Sstevel@tonic-gate	movl	$LS_MUTEX_ENTER_ACQUIRE, %eax
8127c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
8137c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_tryenter)
8147c478bd9Sstevel@tonic-gate
8157c478bd9Sstevel@tonic-gate	ENTRY(mutex_adaptive_tryenter)
8167c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
8177c478bd9Sstevel@tonic-gate	movl	4(%esp), %ecx			/* ecx = lock ptr */
8187c478bd9Sstevel@tonic-gate	xorl	%eax, %eax			/* eax = 0 (unheld adaptive) */
8197c478bd9Sstevel@tonic-gate	lock
8207c478bd9Sstevel@tonic-gate	cmpxchgl %edx, (%ecx)
8217c478bd9Sstevel@tonic-gate	jnz	0f
8227c478bd9Sstevel@tonic-gate	movl	%ecx, %eax
823ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
824ee88d2b9Skchow.mutex_atryenter_6323525_patch_point:
825ee88d2b9Skchow	ret					/* nop space for lfence */
826ee88d2b9Skchow	nop
827ee88d2b9Skchow	nop
828ee88d2b9Skchow	nop
829ee88d2b9Skchow#else	/* OPTERON_WORKAROUND_6323525 */
8307c478bd9Sstevel@tonic-gate	ret
831ee88d2b9Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
8327c478bd9Sstevel@tonic-gate0:
8337c478bd9Sstevel@tonic-gate	xorl	%eax, %eax
8347c478bd9Sstevel@tonic-gate	ret
8357c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_adaptive_tryenter)
8367c478bd9Sstevel@tonic-gate
837575a7426Spt157919	.globl	mutex_owner_running_critical_start
838575a7426Spt157919
839575a7426Spt157919	ENTRY(mutex_owner_running)
840575a7426Spt157919mutex_owner_running_critical_start:
841575a7426Spt157919	movl	4(%esp), %eax		/* get owner field */
842575a7426Spt157919	movl	(%eax), %eax
843575a7426Spt157919	andl	$MUTEX_THREAD, %eax	/* remove waiters bit */
844575a7426Spt157919	cmpl	$0, %eax		/* if free, skip */
845575a7426Spt157919	je	1f			/* go return 0 */
846575a7426Spt157919	movl	T_CPU(%eax), %ecx	/* get owner->t_cpu */
847575a7426Spt157919	movl	CPU_THREAD(%ecx), %edx	/* get t_cpu->cpu_thread */
848575a7426Spt157919.mutex_owner_running_critical_end:
849575a7426Spt157919	cmpl	%eax, %edx	/* owner == running thread? */
850575a7426Spt157919	je	2f		/* yes, go return cpu */
851575a7426Spt1579191:
852575a7426Spt157919	xorl	%eax, %eax	/* return 0 */
853575a7426Spt157919	ret
854575a7426Spt1579192:
855575a7426Spt157919	movl	%ecx, %eax	/* return cpu */
856575a7426Spt157919	ret
857575a7426Spt157919
858575a7426Spt157919	SET_SIZE(mutex_owner_running)
859575a7426Spt157919
860575a7426Spt157919	.globl	mutex_owner_running_critical_size
861575a7426Spt157919	.type	mutex_owner_running_critical_size, @object
862575a7426Spt157919	.align	CPTRSIZE
863575a7426Spt157919mutex_owner_running_critical_size:
864575a7426Spt157919	.long	.mutex_owner_running_critical_end - mutex_owner_running_critical_start
865575a7426Spt157919	SET_SIZE(mutex_owner_running_critical_size)
866575a7426Spt157919
8677c478bd9Sstevel@tonic-gate	.globl	mutex_exit_critical_start
8687c478bd9Sstevel@tonic-gate
8697c478bd9Sstevel@tonic-gate	ENTRY(mutex_exit)
8707c478bd9Sstevel@tonic-gatemutex_exit_critical_start:		/* If interrupted, restart here */
8717c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx
8727c478bd9Sstevel@tonic-gate	movl	4(%esp), %ecx
8737c478bd9Sstevel@tonic-gate	cmpl	%edx, (%ecx)
8747c478bd9Sstevel@tonic-gate	jne	mutex_vector_exit		/* wrong type or wrong owner */
8757c478bd9Sstevel@tonic-gate	movl	$0, (%ecx)			/* clear owner AND lock */
8767c478bd9Sstevel@tonic-gate.mutex_exit_critical_end:
8777c478bd9Sstevel@tonic-gate.mutex_exit_lockstat_patch_point:
8787c478bd9Sstevel@tonic-gate	ret
8797c478bd9Sstevel@tonic-gate	movl	$LS_MUTEX_EXIT_RELEASE, %eax
8807c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper
8817c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_exit)
8827c478bd9Sstevel@tonic-gate
8837c478bd9Sstevel@tonic-gate	.globl	mutex_exit_critical_size
8847c478bd9Sstevel@tonic-gate	.type	mutex_exit_critical_size, @object
8857c478bd9Sstevel@tonic-gate	.align	CPTRSIZE
8867c478bd9Sstevel@tonic-gatemutex_exit_critical_size:
8877c478bd9Sstevel@tonic-gate	.long	.mutex_exit_critical_end - mutex_exit_critical_start
8887c478bd9Sstevel@tonic-gate	SET_SIZE(mutex_exit_critical_size)
8897c478bd9Sstevel@tonic-gate
8907c478bd9Sstevel@tonic-gate#endif	/* !__amd64 */
8917c478bd9Sstevel@tonic-gate
8927c478bd9Sstevel@tonic-gate#endif	/* __lint */
8937c478bd9Sstevel@tonic-gate
8947c478bd9Sstevel@tonic-gate/*
8957c478bd9Sstevel@tonic-gate * rw_enter() and rw_exit().
8967c478bd9Sstevel@tonic-gate *
8977c478bd9Sstevel@tonic-gate * These routines handle the simple cases of rw_enter (write-locking an unheld
8987c478bd9Sstevel@tonic-gate * lock or read-locking a lock that's neither write-locked nor write-wanted)
8997c478bd9Sstevel@tonic-gate * and rw_exit (no waiters or not the last reader).  If anything complicated
9007c478bd9Sstevel@tonic-gate * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
9017c478bd9Sstevel@tonic-gate */
9027c478bd9Sstevel@tonic-gate#if defined(lint) || defined(__lint)
9037c478bd9Sstevel@tonic-gate
9047c478bd9Sstevel@tonic-gate/* ARGSUSED */
9057c478bd9Sstevel@tonic-gatevoid
9067c478bd9Sstevel@tonic-gaterw_enter(krwlock_t *lp, krw_t rw)
9077c478bd9Sstevel@tonic-gate{}
9087c478bd9Sstevel@tonic-gate
9097c478bd9Sstevel@tonic-gate/* ARGSUSED */
9107c478bd9Sstevel@tonic-gatevoid
9117c478bd9Sstevel@tonic-gaterw_exit(krwlock_t *lp)
9127c478bd9Sstevel@tonic-gate{}
9137c478bd9Sstevel@tonic-gate
9147c478bd9Sstevel@tonic-gate#else	/* __lint */
9157c478bd9Sstevel@tonic-gate
9167c478bd9Sstevel@tonic-gate#if defined(__amd64)
9177c478bd9Sstevel@tonic-gate
9187c478bd9Sstevel@tonic-gate	ENTRY(rw_enter)
9197c478bd9Sstevel@tonic-gate	cmpl	$RW_WRITER, %esi
9207c478bd9Sstevel@tonic-gate	je	.rw_write_enter
9217c478bd9Sstevel@tonic-gate	movq	(%rdi), %rax			/* rax = old rw_wwwh value */
9227c478bd9Sstevel@tonic-gate	testl	$RW_WRITE_LOCKED|RW_WRITE_WANTED, %eax
9237c478bd9Sstevel@tonic-gate	jnz	rw_enter_sleep
9247c478bd9Sstevel@tonic-gate	leaq	RW_READ_LOCK(%rax), %rdx	/* rdx = new rw_wwwh value */
9257c478bd9Sstevel@tonic-gate	lock
9267c478bd9Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)			/* try to grab read lock */
9277c478bd9Sstevel@tonic-gate	jnz	rw_enter_sleep
9287c478bd9Sstevel@tonic-gate.rw_read_enter_lockstat_patch_point:
9297c478bd9Sstevel@tonic-gate	ret
9307c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
9317c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi			/* rsi = lock ptr */
9327c478bd9Sstevel@tonic-gate	movl	$LS_RW_ENTER_ACQUIRE, %edi
9337c478bd9Sstevel@tonic-gate	movl	$RW_READER, %edx
9347c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
9357c478bd9Sstevel@tonic-gate.rw_write_enter:
9362c164fafSPatrick Mooney	movq	%gs:CPU_THREAD, %rdx
9377c478bd9Sstevel@tonic-gate	orq	$RW_WRITE_LOCKED, %rdx		/* rdx = write-locked value */
9387c478bd9Sstevel@tonic-gate	xorl	%eax, %eax			/* rax = unheld value */
9397c478bd9Sstevel@tonic-gate	lock
9407c478bd9Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)			/* try to grab write lock */
9417c478bd9Sstevel@tonic-gate	jnz	rw_enter_sleep
942ee88d2b9Skchow
943ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
944ee88d2b9Skchow.rw_write_enter_lockstat_patch_point:
945ee88d2b9Skchow.rw_write_enter_6323525_patch_point:
946ee88d2b9Skchow	ret
947ee88d2b9Skchow	nop
948ee88d2b9Skchow	nop
949ee88d2b9Skchow.rw_write_enter_lockstat_6323525_patch_point:
950ee88d2b9Skchow	nop
951ee88d2b9Skchow#else	/* OPTERON_WORKAROUND_6323525 */
9527c478bd9Sstevel@tonic-gate.rw_write_enter_lockstat_patch_point:
9537c478bd9Sstevel@tonic-gate	ret
954ee88d2b9Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
955ee88d2b9Skchow
9567c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
9577c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi			/* rsi = lock ptr */
9587c478bd9Sstevel@tonic-gate	movl	$LS_RW_ENTER_ACQUIRE, %edi
9597c478bd9Sstevel@tonic-gate	movl	$RW_WRITER, %edx
9607c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
9617c478bd9Sstevel@tonic-gate	SET_SIZE(rw_enter)
9627c478bd9Sstevel@tonic-gate
9637c478bd9Sstevel@tonic-gate	ENTRY(rw_exit)
9647c478bd9Sstevel@tonic-gate	movq	(%rdi), %rax			/* rax = old rw_wwwh value */
9657c478bd9Sstevel@tonic-gate	cmpl	$RW_READ_LOCK, %eax		/* single-reader, no waiters? */
9667c478bd9Sstevel@tonic-gate	jne	.rw_not_single_reader
9677c478bd9Sstevel@tonic-gate	xorl	%edx, %edx			/* rdx = new value (unheld) */
9687c478bd9Sstevel@tonic-gate.rw_read_exit:
9697c478bd9Sstevel@tonic-gate	lock
9707c478bd9Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)			/* try to drop read lock */
9717c478bd9Sstevel@tonic-gate	jnz	rw_exit_wakeup
9727c478bd9Sstevel@tonic-gate.rw_read_exit_lockstat_patch_point:
9737c478bd9Sstevel@tonic-gate	ret
974*31f0c782SJohn Levon	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
9757c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi			/* rsi = lock ptr */
9767c478bd9Sstevel@tonic-gate	movl	$LS_RW_EXIT_RELEASE, %edi
9777c478bd9Sstevel@tonic-gate	movl	$RW_READER, %edx
9787c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
9797c478bd9Sstevel@tonic-gate.rw_not_single_reader:
9807c478bd9Sstevel@tonic-gate	testl	$RW_WRITE_LOCKED, %eax	/* write-locked or write-wanted? */
9817c478bd9Sstevel@tonic-gate	jnz	.rw_write_exit
9827c478bd9Sstevel@tonic-gate	leaq	-RW_READ_LOCK(%rax), %rdx	/* rdx = new value */
9837c478bd9Sstevel@tonic-gate	cmpl	$RW_READ_LOCK, %edx
9847c478bd9Sstevel@tonic-gate	jge	.rw_read_exit		/* not last reader, safe to drop */
9857c478bd9Sstevel@tonic-gate	jmp	rw_exit_wakeup			/* last reader with waiters */
9867c478bd9Sstevel@tonic-gate.rw_write_exit:
9877c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rax		/* rax = thread ptr */
9887c478bd9Sstevel@tonic-gate	xorl	%edx, %edx			/* rdx = new value (unheld) */
9897c478bd9Sstevel@tonic-gate	orq	$RW_WRITE_LOCKED, %rax		/* eax = write-locked value */
9907c478bd9Sstevel@tonic-gate	lock
9917c478bd9Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)			/* try to drop read lock */
9927c478bd9Sstevel@tonic-gate	jnz	rw_exit_wakeup
9937c478bd9Sstevel@tonic-gate.rw_write_exit_lockstat_patch_point:
9947c478bd9Sstevel@tonic-gate	ret
9957c478bd9Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
9967c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi			/* rsi - lock ptr */
9977c478bd9Sstevel@tonic-gate	movl	$LS_RW_EXIT_RELEASE, %edi
9987c478bd9Sstevel@tonic-gate	movl	$RW_WRITER, %edx
9997c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
10007c478bd9Sstevel@tonic-gate	SET_SIZE(rw_exit)
10017c478bd9Sstevel@tonic-gate
10027c478bd9Sstevel@tonic-gate#else
10037c478bd9Sstevel@tonic-gate
10047c478bd9Sstevel@tonic-gate	ENTRY(rw_enter)
10057c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
10067c478bd9Sstevel@tonic-gate	movl	4(%esp), %ecx			/* ecx = lock ptr */
10077c478bd9Sstevel@tonic-gate	cmpl	$RW_WRITER, 8(%esp)
10087c478bd9Sstevel@tonic-gate	je	.rw_write_enter
10097c478bd9Sstevel@tonic-gate	incl	T_KPRI_REQ(%edx)		/* THREAD_KPRI_REQUEST() */
10107c478bd9Sstevel@tonic-gate	movl	(%ecx), %eax			/* eax = old rw_wwwh value */
10117c478bd9Sstevel@tonic-gate	testl	$RW_WRITE_LOCKED|RW_WRITE_WANTED, %eax
10127c478bd9Sstevel@tonic-gate	jnz	rw_enter_sleep
10137c478bd9Sstevel@tonic-gate	leal	RW_READ_LOCK(%eax), %edx	/* edx = new rw_wwwh value */
10147c478bd9Sstevel@tonic-gate	lock
10157c478bd9Sstevel@tonic-gate	cmpxchgl %edx, (%ecx)			/* try to grab read lock */
10167c478bd9Sstevel@tonic-gate	jnz	rw_enter_sleep
10177c478bd9Sstevel@tonic-gate.rw_read_enter_lockstat_patch_point:
10187c478bd9Sstevel@tonic-gate	ret
10197c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
10207c478bd9Sstevel@tonic-gate	movl	$LS_RW_ENTER_ACQUIRE, %eax
10217c478bd9Sstevel@tonic-gate	pushl	$RW_READER
10227c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
10237c478bd9Sstevel@tonic-gate.rw_write_enter:
10247c478bd9Sstevel@tonic-gate	orl	$RW_WRITE_LOCKED, %edx		/* edx = write-locked value */
10257c478bd9Sstevel@tonic-gate	xorl	%eax, %eax			/* eax = unheld value */
10267c478bd9Sstevel@tonic-gate	lock
10277c478bd9Sstevel@tonic-gate	cmpxchgl %edx, (%ecx)			/* try to grab write lock */
10287c478bd9Sstevel@tonic-gate	jnz	rw_enter_sleep
1029ee88d2b9Skchow
1030ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
1031ee88d2b9Skchow.rw_write_enter_lockstat_patch_point:
1032ee88d2b9Skchow.rw_write_enter_6323525_patch_point:
1033ee88d2b9Skchow	ret
1034ee88d2b9Skchow	nop
1035ee88d2b9Skchow	nop
1036ee88d2b9Skchow.rw_write_enter_lockstat_6323525_patch_point:
1037ee88d2b9Skchow	nop
1038ee88d2b9Skchow#else	/* OPTERON_WORKAROUND_6323525 */
10397c478bd9Sstevel@tonic-gate.rw_write_enter_lockstat_patch_point:
10407c478bd9Sstevel@tonic-gate	ret
1041ee88d2b9Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
1042ee88d2b9Skchow
10437c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
10447c478bd9Sstevel@tonic-gate	movl	$LS_RW_ENTER_ACQUIRE, %eax
10457c478bd9Sstevel@tonic-gate	pushl	$RW_WRITER
10467c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
10477c478bd9Sstevel@tonic-gate	SET_SIZE(rw_enter)
10487c478bd9Sstevel@tonic-gate
10497c478bd9Sstevel@tonic-gate	ENTRY(rw_exit)
10507c478bd9Sstevel@tonic-gate	movl	4(%esp), %ecx			/* ecx = lock ptr */
10517c478bd9Sstevel@tonic-gate	movl	(%ecx), %eax			/* eax = old rw_wwwh value */
10527c478bd9Sstevel@tonic-gate	cmpl	$RW_READ_LOCK, %eax		/* single-reader, no waiters? */
10537c478bd9Sstevel@tonic-gate	jne	.rw_not_single_reader
10547c478bd9Sstevel@tonic-gate	xorl	%edx, %edx			/* edx = new value (unheld) */
10557c478bd9Sstevel@tonic-gate.rw_read_exit:
10567c478bd9Sstevel@tonic-gate	lock
10577c478bd9Sstevel@tonic-gate	cmpxchgl %edx, (%ecx)			/* try to drop read lock */
10587c478bd9Sstevel@tonic-gate	jnz	rw_exit_wakeup
10597c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
10607c478bd9Sstevel@tonic-gate	decl	T_KPRI_REQ(%edx)		/* THREAD_KPRI_RELEASE() */
10617c478bd9Sstevel@tonic-gate.rw_read_exit_lockstat_patch_point:
10627c478bd9Sstevel@tonic-gate	ret
10637c478bd9Sstevel@tonic-gate	movl	$LS_RW_EXIT_RELEASE, %eax
10647c478bd9Sstevel@tonic-gate	pushl	$RW_READER
10657c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
10667c478bd9Sstevel@tonic-gate.rw_not_single_reader:
10677c478bd9Sstevel@tonic-gate	testl	$RW_WRITE_LOCKED, %eax	/* write-locked or write-wanted? */
10687c478bd9Sstevel@tonic-gate	jnz	.rw_write_exit
10697c478bd9Sstevel@tonic-gate	leal	-RW_READ_LOCK(%eax), %edx	/* edx = new value */
10707c478bd9Sstevel@tonic-gate	cmpl	$RW_READ_LOCK, %edx
10717c478bd9Sstevel@tonic-gate	jge	.rw_read_exit		/* not last reader, safe to drop */
10727c478bd9Sstevel@tonic-gate	jmp	rw_exit_wakeup			/* last reader with waiters */
10737c478bd9Sstevel@tonic-gate.rw_write_exit:
10747c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %eax		/* eax = thread ptr */
10757c478bd9Sstevel@tonic-gate	xorl	%edx, %edx			/* edx = new value (unheld) */
10767c478bd9Sstevel@tonic-gate	orl	$RW_WRITE_LOCKED, %eax		/* eax = write-locked value */
10777c478bd9Sstevel@tonic-gate	lock
10787c478bd9Sstevel@tonic-gate	cmpxchgl %edx, (%ecx)			/* try to drop read lock */
10797c478bd9Sstevel@tonic-gate	jnz	rw_exit_wakeup
10807c478bd9Sstevel@tonic-gate.rw_write_exit_lockstat_patch_point:
10817c478bd9Sstevel@tonic-gate	ret
10827c478bd9Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
10837c478bd9Sstevel@tonic-gate	movl	$LS_RW_EXIT_RELEASE, %eax
10847c478bd9Sstevel@tonic-gate	pushl	$RW_WRITER
10857c478bd9Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
10867c478bd9Sstevel@tonic-gate	SET_SIZE(rw_exit)
10877c478bd9Sstevel@tonic-gate
10887c478bd9Sstevel@tonic-gate#endif	/* !__amd64 */
10897c478bd9Sstevel@tonic-gate
10907c478bd9Sstevel@tonic-gate#endif	/* __lint */
10917c478bd9Sstevel@tonic-gate
1092ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
1093ee88d2b9Skchow#if defined(lint) || defined(__lint)
1094ee88d2b9Skchow
1095ee88d2b9Skchowint	workaround_6323525_patched;
1096ee88d2b9Skchow
1097ee88d2b9Skchowvoid
1098ee88d2b9Skchowpatch_workaround_6323525(void)
1099ee88d2b9Skchow{}
1100ee88d2b9Skchow
1101ee88d2b9Skchow#else	/* lint */
1102ee88d2b9Skchow
1103ee88d2b9Skchow/*
1104ee88d2b9Skchow * If it is necessary to patch the lock enter routines with the lfence
1105ee88d2b9Skchow * workaround, workaround_6323525_patched is set to a non-zero value so that
1106ee88d2b9Skchow * the lockstat_hat_patch routine can patch to the new location of the 'ret'
1107ee88d2b9Skchow * instruction.
1108ee88d2b9Skchow */
1109ee88d2b9Skchow	DGDEF3(workaround_6323525_patched, 4, 4)
1110ee88d2b9Skchow	.long	0
1111ee88d2b9Skchow
1112ee88d2b9Skchow#if defined(__amd64)
1113ee88d2b9Skchow
1114ee88d2b9Skchow#define HOT_MUTEX_PATCH(srcaddr, dstaddr, size)	\
1115ee88d2b9Skchow	movq	$size, %rbx;			\
1116ee88d2b9Skchow	movq	$dstaddr, %r13;			\
1117ee88d2b9Skchow	addq	%rbx, %r13;			\
1118ee88d2b9Skchow	movq	$srcaddr, %r12;			\
1119ee88d2b9Skchow	addq	%rbx, %r12;			\
1120ee88d2b9Skchow0:						\
1121ee88d2b9Skchow	decq	%r13;				\
1122ee88d2b9Skchow	decq	%r12;				\
1123ee88d2b9Skchow	movzbl	(%r12), %esi;			\
1124ee88d2b9Skchow	movq	$1, %rdx;			\
1125ee88d2b9Skchow	movq	%r13, %rdi;			\
1126ee88d2b9Skchow	call	hot_patch_kernel_text;		\
1127ee88d2b9Skchow	decq	%rbx;				\
1128ee88d2b9Skchow	testq	%rbx, %rbx;			\
1129ee88d2b9Skchow	jg	0b;
1130ee88d2b9Skchow
1131ee88d2b9Skchow/*
1132ee88d2b9Skchow * patch_workaround_6323525: provide workaround for 6323525
1133ee88d2b9Skchow *
1134ee88d2b9Skchow * The workaround is to place a fencing instruction (lfence) between the
1135ee88d2b9Skchow * mutex operation and the subsequent read-modify-write instruction.
1136ee88d2b9Skchow *
1137ee88d2b9Skchow * This routine hot patches the lfence instruction on top of the space
1138ee88d2b9Skchow * reserved by nops in the lock enter routines.
1139ee88d2b9Skchow */
1140ee88d2b9Skchow	ENTRY_NP(patch_workaround_6323525)
1141ee88d2b9Skchow	pushq	%rbp
1142ee88d2b9Skchow	movq	%rsp, %rbp
1143ee88d2b9Skchow	pushq	%r12
1144ee88d2b9Skchow	pushq	%r13
1145ee88d2b9Skchow	pushq	%rbx
1146ee88d2b9Skchow
1147ee88d2b9Skchow	/*
1148ee88d2b9Skchow	 * lockstat_hot_patch() to use the alternate lockstat workaround
1149ee88d2b9Skchow	 * 6323525 patch points (points past the lfence instruction to the
1150ee88d2b9Skchow	 * new ret) when workaround_6323525_patched is set.
1151ee88d2b9Skchow	 */
1152ee88d2b9Skchow	movl	$1, workaround_6323525_patched
1153ee88d2b9Skchow
1154ee88d2b9Skchow	/*
1155ee88d2b9Skchow	 * patch ret/nop/nop/nop to lfence/ret at the end of the lock enter
1156ee88d2b9Skchow	 * routines. The 4 bytes are patched in reverse order so that the
1157ee88d2b9Skchow	 * the existing ret is overwritten last. This provides lock enter
1158ee88d2b9Skchow	 * sanity during the intermediate patching stages.
1159ee88d2b9Skchow	 */
1160ee88d2b9Skchow	HOT_MUTEX_PATCH(_lfence_insn, .mutex_enter_6323525_patch_point, 4)
1161ee88d2b9Skchow	HOT_MUTEX_PATCH(_lfence_insn, .mutex_tryenter_6323525_patch_point, 4)
1162ee88d2b9Skchow	HOT_MUTEX_PATCH(_lfence_insn, .mutex_atryenter_6323525_patch_point, 4)
1163ee88d2b9Skchow	HOT_MUTEX_PATCH(_lfence_insn, .rw_write_enter_6323525_patch_point, 4)
1164ee88d2b9Skchow
1165ee88d2b9Skchow	popq	%rbx
1166ee88d2b9Skchow	popq	%r13
1167ee88d2b9Skchow	popq	%r12
1168ee88d2b9Skchow	movq	%rbp, %rsp
1169ee88d2b9Skchow	popq	%rbp
1170ee88d2b9Skchow	ret
1171ee88d2b9Skchow_lfence_insn:
1172ee88d2b9Skchow	lfence
1173ee88d2b9Skchow	ret
1174ee88d2b9Skchow	SET_SIZE(patch_workaround_6323525)
1175ee88d2b9Skchow
1176ee88d2b9Skchow
1177ee88d2b9Skchow#else	/* __amd64 */
1178ee88d2b9Skchow
1179ee88d2b9Skchow#define HOT_MUTEX_PATCH(srcaddr, dstaddr, size)	\
1180ee88d2b9Skchow	movl	$size, %ebx;			\
1181ee88d2b9Skchow	movl	$srcaddr, %esi;			\
1182ee88d2b9Skchow	addl	%ebx, %esi;			\
1183ee88d2b9Skchow	movl	$dstaddr, %edi;			\
1184ee88d2b9Skchow	addl	%ebx, %edi;			\
1185ee88d2b9Skchow0:      					\
1186ee88d2b9Skchow	decl	%esi;				\
1187ee88d2b9Skchow	decl	%edi;				\
1188ee88d2b9Skchow	pushl	$1;				\
1189ee88d2b9Skchow	movzbl	(%esi), %eax;			\
1190ee88d2b9Skchow	pushl	%eax;				\
1191ee88d2b9Skchow	pushl	%edi;				\
1192ee88d2b9Skchow	call	hot_patch_kernel_text;		\
1193ee88d2b9Skchow	addl	$12, %esp;			\
1194ee88d2b9Skchow	decl	%ebx;				\
1195ee88d2b9Skchow	testl	%ebx, %ebx;			\
1196ee88d2b9Skchow	jg	0b;
1197ee88d2b9Skchow
1198ee88d2b9Skchow
1199ee88d2b9Skchow	/* see comments above */
1200ee88d2b9Skchow	ENTRY_NP(patch_workaround_6323525)
1201ee88d2b9Skchow	pushl	%ebp
1202ee88d2b9Skchow	movl	%esp, %ebp
1203ee88d2b9Skchow	pushl	%ebx
1204ee88d2b9Skchow	pushl	%esi
1205ee88d2b9Skchow	pushl	%edi
1206ee88d2b9Skchow
1207ee88d2b9Skchow	movl	$1, workaround_6323525_patched
1208ee88d2b9Skchow
1209ee88d2b9Skchow	HOT_MUTEX_PATCH(_lfence_insn, .mutex_enter_6323525_patch_point, 4)
1210ee88d2b9Skchow	HOT_MUTEX_PATCH(_lfence_insn, .mutex_tryenter_6323525_patch_point, 4)
1211ee88d2b9Skchow	HOT_MUTEX_PATCH(_lfence_insn, .mutex_atryenter_6323525_patch_point, 4)
1212ee88d2b9Skchow	HOT_MUTEX_PATCH(_lfence_insn, .rw_write_enter_6323525_patch_point, 4)
1213ee88d2b9Skchow
1214ee88d2b9Skchow	popl	%edi
1215ee88d2b9Skchow	popl	%esi
1216ee88d2b9Skchow	popl	%ebx
1217ee88d2b9Skchow	movl	%ebp, %esp
1218ee88d2b9Skchow	popl	%ebp
1219ee88d2b9Skchow	ret
1220ee88d2b9Skchow_lfence_insn:
1221ee88d2b9Skchow	.byte	0xf, 0xae, 0xe8		/ [lfence instruction]
1222ee88d2b9Skchow	ret
1223ee88d2b9Skchow	SET_SIZE(patch_workaround_6323525)
1224ee88d2b9Skchow
1225ee88d2b9Skchow#endif	/* !__amd64 */
1226ee88d2b9Skchow#endif	/* !lint */
1227ee88d2b9Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
1228ee88d2b9Skchow
1229ee88d2b9Skchow
12307c478bd9Sstevel@tonic-gate#if defined(lint) || defined(__lint)
12317c478bd9Sstevel@tonic-gate
12327c478bd9Sstevel@tonic-gatevoid
12337c478bd9Sstevel@tonic-gatelockstat_hot_patch(void)
12347c478bd9Sstevel@tonic-gate{}
12357c478bd9Sstevel@tonic-gate
12367c478bd9Sstevel@tonic-gate#else
12377c478bd9Sstevel@tonic-gate
12387c478bd9Sstevel@tonic-gate#if defined(__amd64)
12397c478bd9Sstevel@tonic-gate
12407c478bd9Sstevel@tonic-gate#define	HOT_PATCH(addr, event, active_instr, normal_instr, len)	\
12417c478bd9Sstevel@tonic-gate	movq	$normal_instr, %rsi;		\
12427c478bd9Sstevel@tonic-gate	movq	$active_instr, %rdi;		\
12437c478bd9Sstevel@tonic-gate	leaq	lockstat_probemap(%rip), %rax;	\
12447c478bd9Sstevel@tonic-gate	movl	_MUL(event, DTRACE_IDSIZE)(%rax), %eax;	\
12457c478bd9Sstevel@tonic-gate	testl	%eax, %eax;			\
12467c478bd9Sstevel@tonic-gate	jz	9f;				\
12477c478bd9Sstevel@tonic-gate	movq	%rdi, %rsi;			\
12487c478bd9Sstevel@tonic-gate9:						\
12497c478bd9Sstevel@tonic-gate	movq	$len, %rdx;			\
12507c478bd9Sstevel@tonic-gate	movq	$addr, %rdi;			\
12517c478bd9Sstevel@tonic-gate	call	hot_patch_kernel_text
12527c478bd9Sstevel@tonic-gate
12537c478bd9Sstevel@tonic-gate#else
12547c478bd9Sstevel@tonic-gate
12557c478bd9Sstevel@tonic-gate#define	HOT_PATCH(addr, event, active_instr, normal_instr, len)	\
12567c478bd9Sstevel@tonic-gate	movl	$normal_instr, %ecx;		\
12577c478bd9Sstevel@tonic-gate	movl	$active_instr, %edx;		\
12587c478bd9Sstevel@tonic-gate	movl	$lockstat_probemap, %eax;	\
12597c478bd9Sstevel@tonic-gate	movl	_MUL(event, DTRACE_IDSIZE)(%eax), %eax;	\
12607c478bd9Sstevel@tonic-gate	testl	%eax, %eax;			\
12617c478bd9Sstevel@tonic-gate	jz	. + 4;				\
12627c478bd9Sstevel@tonic-gate	movl	%edx, %ecx;			\
12637c478bd9Sstevel@tonic-gate	pushl	$len;				\
12647c478bd9Sstevel@tonic-gate	pushl	%ecx;				\
12657c478bd9Sstevel@tonic-gate	pushl	$addr;				\
12667c478bd9Sstevel@tonic-gate	call	hot_patch_kernel_text;		\
12677c478bd9Sstevel@tonic-gate	addl	$12, %esp;
12687c478bd9Sstevel@tonic-gate
12697c478bd9Sstevel@tonic-gate#endif	/* !__amd64 */
12707c478bd9Sstevel@tonic-gate
12717c478bd9Sstevel@tonic-gate	ENTRY(lockstat_hot_patch)
12727c478bd9Sstevel@tonic-gate#if defined(__amd64)
12737c478bd9Sstevel@tonic-gate	pushq	%rbp			/* align stack properly */
12747c478bd9Sstevel@tonic-gate	movq	%rsp, %rbp
12757c478bd9Sstevel@tonic-gate#endif	/* __amd64 */
1276ee88d2b9Skchow
1277ee88d2b9Skchow#if defined(OPTERON_WORKAROUND_6323525)
1278ee88d2b9Skchow	cmpl	$0, workaround_6323525_patched
1279ee88d2b9Skchow	je	1f
1280ee88d2b9Skchow	HOT_PATCH(.mutex_enter_lockstat_6323525_patch_point,
1281ee88d2b9Skchow		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1282ee88d2b9Skchow	HOT_PATCH(.mutex_tryenter_lockstat_6323525_patch_point,
1283ee88d2b9Skchow		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1284ee88d2b9Skchow	HOT_PATCH(.rw_write_enter_lockstat_6323525_patch_point,
1285ee88d2b9Skchow		LS_RW_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1286ee88d2b9Skchow	jmp	2f
1287ee88d2b9Skchow1:
12887c478bd9Sstevel@tonic-gate	HOT_PATCH(.mutex_enter_lockstat_patch_point,
12897c478bd9Sstevel@tonic-gate		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
12907c478bd9Sstevel@tonic-gate	HOT_PATCH(.mutex_tryenter_lockstat_patch_point,
12917c478bd9Sstevel@tonic-gate		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
12927c478bd9Sstevel@tonic-gate	HOT_PATCH(.rw_write_enter_lockstat_patch_point,
12937c478bd9Sstevel@tonic-gate		LS_RW_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1294ee88d2b9Skchow2:
1295ee88d2b9Skchow#else	/* OPTERON_WORKAROUND_6323525 */
1296ee88d2b9Skchow	HOT_PATCH(.mutex_enter_lockstat_patch_point,
1297ee88d2b9Skchow		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1298ee88d2b9Skchow	HOT_PATCH(.mutex_tryenter_lockstat_patch_point,
1299ee88d2b9Skchow		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1300ee88d2b9Skchow	HOT_PATCH(.rw_write_enter_lockstat_patch_point,
1301ee88d2b9Skchow		LS_RW_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1302ee88d2b9Skchow#endif	/* !OPTERON_WORKAROUND_6323525 */
1303ee88d2b9Skchow	HOT_PATCH(.mutex_exit_lockstat_patch_point,
1304ee88d2b9Skchow		LS_MUTEX_EXIT_RELEASE, NOP_INSTR, RET_INSTR, 1)
13057c478bd9Sstevel@tonic-gate	HOT_PATCH(.rw_read_enter_lockstat_patch_point,
13067c478bd9Sstevel@tonic-gate		LS_RW_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
13077c478bd9Sstevel@tonic-gate	HOT_PATCH(.rw_write_exit_lockstat_patch_point,
13087c478bd9Sstevel@tonic-gate		LS_RW_EXIT_RELEASE, NOP_INSTR, RET_INSTR, 1)
13097c478bd9Sstevel@tonic-gate	HOT_PATCH(.rw_read_exit_lockstat_patch_point,
13107c478bd9Sstevel@tonic-gate		LS_RW_EXIT_RELEASE, NOP_INSTR, RET_INSTR, 1)
13117c478bd9Sstevel@tonic-gate	HOT_PATCH(.lock_set_lockstat_patch_point,
13127c478bd9Sstevel@tonic-gate		LS_LOCK_SET_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
13137c478bd9Sstevel@tonic-gate	HOT_PATCH(.lock_try_lockstat_patch_point,
13147c478bd9Sstevel@tonic-gate		LS_LOCK_TRY_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
13157c478bd9Sstevel@tonic-gate	HOT_PATCH(.lock_clear_lockstat_patch_point,
13167c478bd9Sstevel@tonic-gate		LS_LOCK_CLEAR_RELEASE, NOP_INSTR, RET_INSTR, 1)
13177c478bd9Sstevel@tonic-gate	HOT_PATCH(.lock_set_spl_lockstat_patch_point,
13187c478bd9Sstevel@tonic-gate		LS_LOCK_SET_SPL_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
13197c478bd9Sstevel@tonic-gate
13207c478bd9Sstevel@tonic-gate	HOT_PATCH(LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT,
13217c478bd9Sstevel@tonic-gate		LS_LOCK_CLEAR_SPLX_RELEASE,
13227c478bd9Sstevel@tonic-gate		LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL, 0, 1);
13237c478bd9Sstevel@tonic-gate#if defined(__amd64)
13247c478bd9Sstevel@tonic-gate	leave			/* unwind stack */
13257c478bd9Sstevel@tonic-gate#endif	/* __amd64 */
13267c478bd9Sstevel@tonic-gate	ret
13277c478bd9Sstevel@tonic-gate	SET_SIZE(lockstat_hot_patch)
13287c478bd9Sstevel@tonic-gate
13297c478bd9Sstevel@tonic-gate#endif	/* __lint */
13307c478bd9Sstevel@tonic-gate
13317c478bd9Sstevel@tonic-gate#if defined(lint) || defined(__lint)
13327c478bd9Sstevel@tonic-gate
13337c478bd9Sstevel@tonic-gate/* XX64 membar_*() should be inlines */
13347c478bd9Sstevel@tonic-gate
13357c478bd9Sstevel@tonic-gatevoid
13362850d85bSmv143129membar_sync(void)
13372850d85bSmv143129{}
13382850d85bSmv143129
13392850d85bSmv143129void
13407c478bd9Sstevel@tonic-gatemembar_enter(void)
13417c478bd9Sstevel@tonic-gate{}
13427c478bd9Sstevel@tonic-gate
13437c478bd9Sstevel@tonic-gatevoid
13447c478bd9Sstevel@tonic-gatemembar_exit(void)
13457c478bd9Sstevel@tonic-gate{}
13467c478bd9Sstevel@tonic-gate
13477c478bd9Sstevel@tonic-gatevoid
13487c478bd9Sstevel@tonic-gatemembar_producer(void)
13497c478bd9Sstevel@tonic-gate{}
13507c478bd9Sstevel@tonic-gate
13517c478bd9Sstevel@tonic-gatevoid
13527c478bd9Sstevel@tonic-gatemembar_consumer(void)
13537c478bd9Sstevel@tonic-gate{}
13547c478bd9Sstevel@tonic-gate
13557c478bd9Sstevel@tonic-gate#else	/* __lint */
13567c478bd9Sstevel@tonic-gate
13577c478bd9Sstevel@tonic-gate#if defined(__amd64)
13587c478bd9Sstevel@tonic-gate
13597c478bd9Sstevel@tonic-gate	ENTRY(membar_enter)
13607c478bd9Sstevel@tonic-gate	ALTENTRY(membar_exit)
13612850d85bSmv143129	ALTENTRY(membar_sync)
13627c478bd9Sstevel@tonic-gate	mfence			/* lighter weight than lock; xorq $0,(%rsp) */
13637c478bd9Sstevel@tonic-gate	ret
13642850d85bSmv143129	SET_SIZE(membar_sync)
13657c478bd9Sstevel@tonic-gate	SET_SIZE(membar_exit)
13667c478bd9Sstevel@tonic-gate	SET_SIZE(membar_enter)
13677c478bd9Sstevel@tonic-gate
13687c478bd9Sstevel@tonic-gate	ENTRY(membar_producer)
13697c478bd9Sstevel@tonic-gate	sfence
13707c478bd9Sstevel@tonic-gate	ret
13717c478bd9Sstevel@tonic-gate	SET_SIZE(membar_producer)
13727c478bd9Sstevel@tonic-gate
13737c478bd9Sstevel@tonic-gate	ENTRY(membar_consumer)
13747c478bd9Sstevel@tonic-gate	lfence
13757c478bd9Sstevel@tonic-gate	ret
13767c478bd9Sstevel@tonic-gate	SET_SIZE(membar_consumer)
13777c478bd9Sstevel@tonic-gate
13787c478bd9Sstevel@tonic-gate#else
13797c478bd9Sstevel@tonic-gate
13807c478bd9Sstevel@tonic-gate	ENTRY(membar_enter)
13817c478bd9Sstevel@tonic-gate	ALTENTRY(membar_exit)
13822850d85bSmv143129	ALTENTRY(membar_sync)
13837c478bd9Sstevel@tonic-gate	lock
13847c478bd9Sstevel@tonic-gate	xorl	$0, (%esp)
13857c478bd9Sstevel@tonic-gate	ret
13862850d85bSmv143129	SET_SIZE(membar_sync)
13877c478bd9Sstevel@tonic-gate	SET_SIZE(membar_exit)
13887c478bd9Sstevel@tonic-gate	SET_SIZE(membar_enter)
13897c478bd9Sstevel@tonic-gate
13907c478bd9Sstevel@tonic-gate/*
13917c478bd9Sstevel@tonic-gate * On machines that support sfence and lfence, these
13927c478bd9Sstevel@tonic-gate * memory barriers can be more precisely implemented
13937c478bd9Sstevel@tonic-gate * without causing the whole world to stop
13947c478bd9Sstevel@tonic-gate */
13957c478bd9Sstevel@tonic-gate	ENTRY(membar_producer)
13967c478bd9Sstevel@tonic-gate	.globl	_patch_sfence_ret
13977c478bd9Sstevel@tonic-gate_patch_sfence_ret:			/* c.f. membar #StoreStore */
13987c478bd9Sstevel@tonic-gate	lock
13997c478bd9Sstevel@tonic-gate	xorl	$0, (%esp)
14007c478bd9Sstevel@tonic-gate	ret
14017c478bd9Sstevel@tonic-gate	SET_SIZE(membar_producer)
14027c478bd9Sstevel@tonic-gate
14037c478bd9Sstevel@tonic-gate	ENTRY(membar_consumer)
14047c478bd9Sstevel@tonic-gate	.globl	_patch_lfence_ret
14057c478bd9Sstevel@tonic-gate_patch_lfence_ret:			/* c.f. membar #LoadLoad */
14067c478bd9Sstevel@tonic-gate	lock
14077c478bd9Sstevel@tonic-gate	xorl	$0, (%esp)
14087c478bd9Sstevel@tonic-gate	ret
14097c478bd9Sstevel@tonic-gate	SET_SIZE(membar_consumer)
14107c478bd9Sstevel@tonic-gate
14117c478bd9Sstevel@tonic-gate#endif	/* !__amd64 */
14127c478bd9Sstevel@tonic-gate
14137c478bd9Sstevel@tonic-gate#endif	/* __lint */
14147c478bd9Sstevel@tonic-gate
14157c478bd9Sstevel@tonic-gate/*
14167c478bd9Sstevel@tonic-gate * thread_onproc()
14177c478bd9Sstevel@tonic-gate * Set thread in onproc state for the specified CPU.
14187c478bd9Sstevel@tonic-gate * Also set the thread lock pointer to the CPU's onproc lock.
14197c478bd9Sstevel@tonic-gate * Since the new lock isn't held, the store ordering is important.
14207c478bd9Sstevel@tonic-gate * If not done in assembler, the compiler could reorder the stores.
14217c478bd9Sstevel@tonic-gate */
14227c478bd9Sstevel@tonic-gate#if defined(lint) || defined(__lint)
14237c478bd9Sstevel@tonic-gate
14247c478bd9Sstevel@tonic-gatevoid
14257c478bd9Sstevel@tonic-gatethread_onproc(kthread_id_t t, cpu_t *cp)
14267c478bd9Sstevel@tonic-gate{
14277c478bd9Sstevel@tonic-gate	t->t_state = TS_ONPROC;
14287c478bd9Sstevel@tonic-gate	t->t_lockp = &cp->cpu_thread_lock;
14297c478bd9Sstevel@tonic-gate}
14307c478bd9Sstevel@tonic-gate
14317c478bd9Sstevel@tonic-gate#else	/* __lint */
14327c478bd9Sstevel@tonic-gate
14337c478bd9Sstevel@tonic-gate#if defined(__amd64)
14347c478bd9Sstevel@tonic-gate
14357c478bd9Sstevel@tonic-gate	ENTRY(thread_onproc)
14367c478bd9Sstevel@tonic-gate	addq	$CPU_THREAD_LOCK, %rsi	/* pointer to disp_lock while running */
14377c478bd9Sstevel@tonic-gate	movl	$ONPROC_THREAD, T_STATE(%rdi)	/* set state to TS_ONPROC */
14387c478bd9Sstevel@tonic-gate	movq	%rsi, T_LOCKP(%rdi)	/* store new lock pointer */
14397c478bd9Sstevel@tonic-gate	ret
14407c478bd9Sstevel@tonic-gate	SET_SIZE(thread_onproc)
14417c478bd9Sstevel@tonic-gate
14427c478bd9Sstevel@tonic-gate#else
14437c478bd9Sstevel@tonic-gate
14447c478bd9Sstevel@tonic-gate	ENTRY(thread_onproc)
14457c478bd9Sstevel@tonic-gate	movl	4(%esp), %eax
14467c478bd9Sstevel@tonic-gate	movl	8(%esp), %ecx
14477c478bd9Sstevel@tonic-gate	addl	$CPU_THREAD_LOCK, %ecx	/* pointer to disp_lock while running */
14487c478bd9Sstevel@tonic-gate	movl	$ONPROC_THREAD, T_STATE(%eax)	/* set state to TS_ONPROC */
14497c478bd9Sstevel@tonic-gate	movl	%ecx, T_LOCKP(%eax)	/* store new lock pointer */
14507c478bd9Sstevel@tonic-gate	ret
14517c478bd9Sstevel@tonic-gate	SET_SIZE(thread_onproc)
14527c478bd9Sstevel@tonic-gate
14537c478bd9Sstevel@tonic-gate#endif	/* !__amd64 */
14547c478bd9Sstevel@tonic-gate
14557c478bd9Sstevel@tonic-gate#endif	/* __lint */
1456575a7426Spt157919
1457575a7426Spt157919/*
1458575a7426Spt157919 * mutex_delay_default(void)
1459575a7426Spt157919 * Spins for approx a few hundred processor cycles and returns to caller.
1460575a7426Spt157919 */
1461575a7426Spt157919
1462575a7426Spt157919#if defined(lint) || defined(__lint)
1463575a7426Spt157919
1464575a7426Spt157919void
1465575a7426Spt157919mutex_delay_default(void)
1466575a7426Spt157919{}
1467575a7426Spt157919
1468575a7426Spt157919#else	/* __lint */
1469575a7426Spt157919
1470575a7426Spt157919#if defined(__amd64)
1471575a7426Spt157919
1472575a7426Spt157919	ENTRY(mutex_delay_default)
1473575a7426Spt157919	movq	$92,%r11
1474575a7426Spt1579190:	decq	%r11
1475575a7426Spt157919	jg	0b
1476575a7426Spt157919	ret
1477575a7426Spt157919	SET_SIZE(mutex_delay_default)
1478575a7426Spt157919
1479575a7426Spt157919#else
1480575a7426Spt157919
1481575a7426Spt157919	ENTRY(mutex_delay_default)
1482575a7426Spt157919	push	%ebp
1483575a7426Spt157919	movl	%esp,%ebp
1484575a7426Spt157919	andl	$-16,%esp
1485575a7426Spt157919	push	%ebx
1486575a7426Spt157919	movl	$93,%ebx
1487575a7426Spt1579190:	decl	%ebx
1488575a7426Spt157919	jg	0b
1489575a7426Spt157919	pop	%ebx
1490575a7426Spt157919	leave
1491575a7426Spt157919	ret
1492575a7426Spt157919	SET_SIZE(mutex_delay_default)
1493575a7426Spt157919
1494575a7426Spt157919#endif	/* !__amd64 */
1495575a7426Spt157919#endif	/* __lint */
1496