xref: /titanic_52/usr/src/uts/intel/ia32/ml/lock_prim.s (revision d58fda4376e4bf67072ce2e69f6f47036f9dbb68)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29#if defined(lint) || defined(__lint)
30#include <sys/types.h>
31#include <sys/thread.h>
32#include <sys/cpuvar.h>
33#include <vm/page.h>
34#include <sys/mutex_impl.h>
35#else	/* __lint */
36#include "assym.h"
37#endif	/* __lint */
38
39#include <sys/asm_linkage.h>
40#include <sys/asm_misc.h>
41#include <sys/regset.h>
42#include <sys/rwlock_impl.h>
43#include <sys/lockstat.h>
44
45/*
46 * lock_try(lp), ulock_try(lp)
47 *	- returns non-zero on success.
48 *	- doesn't block interrupts so don't use this to spin on a lock.
49 *
50 * ulock_try() is for a lock in the user address space.
51 */
52
53#if defined(lint) || defined(__lint)
54
55/* ARGSUSED */
56int
57lock_try(lock_t *lp)
58{ return (0); }
59
60/* ARGSUSED */
61int
62lock_spin_try(lock_t *lp)
63{ return (0); }
64
65/* ARGSUSED */
66int
67ulock_try(lock_t *lp)
68{ return (0); }
69
70#else	/* __lint */
71	.globl	kernelbase
72
73#if defined(__amd64)
74
75	ENTRY(lock_try)
76	movb	$-1, %dl
77	movzbq	%dl, %rax
78	xchgb	%dl, (%rdi)
79	xorb	%dl, %al
80.lock_try_lockstat_patch_point:
81	ret
82	testb	%al, %al
83	jnz	0f
84	ret
850:
86	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
87	movq	%rdi, %rsi		/* rsi = lock addr */
88	movl	$LS_LOCK_TRY_ACQUIRE, %edi /* edi = event */
89	jmp	lockstat_wrapper
90	SET_SIZE(lock_try)
91
92	ENTRY(lock_spin_try)
93	movb	$-1, %dl
94	movzbq	%dl, %rax
95	xchgb	%dl, (%rdi)
96	xorb	%dl, %al
97	ret
98	SET_SIZE(lock_spin_try)
99
100	ENTRY(ulock_try)
101#ifdef DEBUG
102	movq	kernelbase(%rip), %rax
103	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
104	jb	ulock_pass		/*	uaddr < kernelbase, proceed */
105
106	movq	%rdi, %r12		/* preserve lock ptr for debugging */
107	leaq	.ulock_panic_msg(%rip), %rdi
108	pushq	%rbp			/* align stack properly */
109	movq	%rsp, %rbp
110	xorl	%eax, %eax		/* clear for varargs */
111	call	panic
112
113#endif /* DEBUG */
114
115ulock_pass:
116	movl	$1, %eax
117	xchgb	%al, (%rdi)
118	xorb	$1, %al
119	ret
120	SET_SIZE(ulock_try)
121
122#else
123
124	ENTRY(lock_try)
125	movl	$1,%edx
126	movl	4(%esp),%ecx		/* ecx = lock addr */
127	xorl	%eax,%eax
128	xchgb	%dl, (%ecx)		/* using dl will avoid partial */
129	testb	%dl,%dl			/* stalls on P6 ? */
130	setz	%al
131.lock_try_lockstat_patch_point:
132	ret
133	movl	%gs:CPU_THREAD, %edx	/* edx = thread addr */
134	testl	%eax, %eax
135	jz	0f
136	movl	$LS_LOCK_TRY_ACQUIRE, %eax
137	jmp	lockstat_wrapper
1380:
139	ret
140	SET_SIZE(lock_try)
141
142	ENTRY(lock_spin_try)
143	movl	$-1,%edx
144	movl	4(%esp),%ecx		/* ecx = lock addr */
145	xorl	%eax,%eax
146	xchgb	%dl, (%ecx)		/* using dl will avoid partial */
147	testb	%dl,%dl			/* stalls on P6 ? */
148	setz	%al
149	ret
150	SET_SIZE(lock_spin_try)
151
152	ENTRY(ulock_try)
153#ifdef DEBUG
154	movl	kernelbase, %eax
155	cmpl	%eax, 4(%esp)		/* test uaddr < kernelbase */
156	jb	ulock_pass		/* uaddr < kernelbase, proceed */
157
158	pushl	$.ulock_panic_msg
159	call	panic
160
161#endif /* DEBUG */
162
163ulock_pass:
164	movl	$1,%eax
165	movl	4(%esp),%ecx
166	xchgb	%al, (%ecx)
167	xorb	$1, %al
168	ret
169	SET_SIZE(ulock_try)
170
171#endif	/* !__amd64 */
172
173#ifdef DEBUG
174	.data
175.ulock_panic_msg:
176	.string "ulock_try: Argument is above kernelbase"
177	.text
178#endif	/* DEBUG */
179
180#endif	/* __lint */
181
182/*
183 * lock_clear(lp)
184 *	- unlock lock without changing interrupt priority level.
185 */
186
187#if defined(lint) || defined(__lint)
188
189/* ARGSUSED */
190void
191lock_clear(lock_t *lp)
192{}
193
194/* ARGSUSED */
195void
196ulock_clear(lock_t *lp)
197{}
198
199#else	/* __lint */
200
201#if defined(__amd64)
202
203	ENTRY(lock_clear)
204	movb	$0, (%rdi)
205.lock_clear_lockstat_patch_point:
206	ret
207	movq	%rdi, %rsi			/* rsi = lock addr */
208	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread addr */
209	movl	$LS_LOCK_CLEAR_RELEASE, %edi	/* edi = event */
210	jmp	lockstat_wrapper
211	SET_SIZE(lock_clear)
212
213	ENTRY(ulock_clear)
214#ifdef DEBUG
215	movq	kernelbase(%rip), %rcx
216	cmpq	%rcx, %rdi		/* test uaddr < kernelbase */
217	jb	ulock_clr		/*	 uaddr < kernelbase, proceed */
218
219	leaq	.ulock_clear_msg(%rip), %rdi
220	pushq	%rbp			/* align stack properly */
221	movq	%rsp, %rbp
222	xorl	%eax, %eax		/* clear for varargs */
223	call	panic
224#endif
225
226ulock_clr:
227	movb	$0, (%rdi)
228	ret
229	SET_SIZE(ulock_clear)
230
231#else
232
233	ENTRY(lock_clear)
234	movl	4(%esp), %eax
235	movb	$0, (%eax)
236.lock_clear_lockstat_patch_point:
237	ret
238	movl	%gs:CPU_THREAD, %edx		/* edx = thread addr */
239	movl	%eax, %ecx			/* ecx = lock pointer */
240	movl	$LS_LOCK_CLEAR_RELEASE, %eax
241	jmp	lockstat_wrapper
242	SET_SIZE(lock_clear)
243
244	ENTRY(ulock_clear)
245#ifdef DEBUG
246	movl	kernelbase, %ecx
247	cmpl	%ecx, 4(%esp)		/* test uaddr < kernelbase */
248	jb	ulock_clr		/* uaddr < kernelbase, proceed */
249
250	pushl	$.ulock_clear_msg
251	call	panic
252#endif
253
254ulock_clr:
255	movl	4(%esp),%eax
256	xorl	%ecx,%ecx
257	movb	%cl, (%eax)
258	ret
259	SET_SIZE(ulock_clear)
260
261#endif	/* !__amd64 */
262
263#ifdef DEBUG
264	.data
265.ulock_clear_msg:
266	.string "ulock_clear: Argument is above kernelbase"
267	.text
268#endif	/* DEBUG */
269
270
271#endif	/* __lint */
272
273/*
274 * lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil)
275 * Drops lp, sets pil to new_pil, stores old pil in *old_pil.
276 */
277
278#if defined(lint) || defined(__lint)
279
280/* ARGSUSED */
281void
282lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil)
283{}
284
285#else	/* __lint */
286
287#if defined(__amd64)
288
289	ENTRY(lock_set_spl)
290	pushq	%rbp
291	movq	%rsp, %rbp
292	subq	$32, %rsp
293	movl	%esi, 8(%rsp)		/* save priority level */
294	movq	%rdx, 16(%rsp)		/* save old pil ptr */
295	movq	%rdi, 24(%rsp)		/* save lock pointer */
296	movl	%esi, %edi		/* pass priority level */
297	call	splr			/* raise priority level */
298	movq	24(%rsp), %rdi		/* rdi = lock addr */
299	movb	$-1, %dl
300	xchgb	%dl, (%rdi)		/* try to set lock */
301	testb	%dl, %dl		/* did we get the lock? ... */
302	jnz	.lss_miss		/* ... no, go to C for the hard case */
303	movq	16(%rsp), %rdx		/* rdx = old pil addr */
304	movw	%ax, (%rdx)		/* store old pil */
305	leave
306.lock_set_spl_lockstat_patch_point:
307	ret
308	movq	%rdi, %rsi		/* rsi = lock addr */
309	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
310	movl	$LS_LOCK_SET_SPL_ACQUIRE, %edi
311	jmp	lockstat_wrapper
312.lss_miss:
313	movl	8(%rsp), %esi		/* new_pil */
314	movq	16(%rsp), %rdx		/* old_pil_addr */
315	movl	%eax, %ecx		/* original pil */
316	leave				/* unwind stack */
317	jmp	lock_set_spl_spin
318	SET_SIZE(lock_set_spl)
319
320#else
321
322	ENTRY(lock_set_spl)
323	movl	8(%esp), %eax		/* get priority level */
324	pushl	%eax
325	call	splr			/* raise priority level */
326	movl 	8(%esp), %ecx		/* ecx = lock addr */
327	movl	$-1, %edx
328	addl	$4, %esp
329	xchgb	%dl, (%ecx)		/* try to set lock */
330	testb	%dl, %dl		/* did we get the lock? ... */
331	movl	12(%esp), %edx		/* edx = olp pil addr (ZF unaffected) */
332	jnz	.lss_miss		/* ... no, go to C for the hard case */
333	movw	%ax, (%edx)		/* store old pil */
334.lock_set_spl_lockstat_patch_point:
335	ret
336	movl	%gs:CPU_THREAD, %edx	/* edx = thread addr*/
337	movl	$LS_LOCK_SET_SPL_ACQUIRE, %eax
338	jmp	lockstat_wrapper
339.lss_miss:
340	pushl	%eax			/* original pil */
341	pushl	%edx			/* old_pil addr */
342	pushl	16(%esp)		/* new_pil */
343	pushl	%ecx			/* lock addr */
344	call	lock_set_spl_spin
345	addl	$16, %esp
346	ret
347	SET_SIZE(lock_set_spl)
348
349#endif	/* !__amd64 */
350
351#endif	/* __lint */
352
353/*
354 * void
355 * lock_init(lp)
356 */
357
358#if defined(__lint)
359
360/* ARGSUSED */
361void
362lock_init(lock_t *lp)
363{}
364
365#else	/* __lint */
366
367#if defined(__amd64)
368
369	ENTRY(lock_init)
370	movb	$0, (%rdi)
371	ret
372	SET_SIZE(lock_init)
373
374#else
375
376	ENTRY(lock_init)
377	movl	4(%esp), %eax
378	movb	$0, (%eax)
379	ret
380	SET_SIZE(lock_init)
381
382#endif	/* !__amd64 */
383
384#endif	/* __lint */
385
386/*
387 * void
388 * lock_set(lp)
389 */
390
391#if defined(lint) || defined(__lint)
392
393/* ARGSUSED */
394void
395lock_set(lock_t *lp)
396{}
397
398#else	/* __lint */
399
400#if defined(__amd64)
401
402	ENTRY(lock_set)
403	movb	$-1, %dl
404	xchgb	%dl, (%rdi)		/* try to set lock */
405	testb	%dl, %dl		/* did we get it? */
406	jnz	lock_set_spin		/* no, go to C for the hard case */
407.lock_set_lockstat_patch_point:
408	ret
409	movq	%rdi, %rsi		/* rsi = lock addr */
410	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
411	movl	$LS_LOCK_SET_ACQUIRE, %edi
412	jmp	lockstat_wrapper
413	SET_SIZE(lock_set)
414
415#else
416
417	ENTRY(lock_set)
418	movl	4(%esp), %ecx		/* ecx = lock addr */
419	movl	$-1, %edx
420	xchgb	%dl, (%ecx)		/* try to set lock */
421	testb	%dl, %dl		/* did we get it? */
422	jnz	lock_set_spin		/* no, go to C for the hard case */
423.lock_set_lockstat_patch_point:
424	ret
425	movl	%gs:CPU_THREAD, %edx	/* edx = thread addr */
426	movl	$LS_LOCK_SET_ACQUIRE, %eax
427	jmp	lockstat_wrapper
428	SET_SIZE(lock_set)
429
430#endif	/* !__amd64 */
431
432#endif	/* __lint */
433
434/*
435 * lock_clear_splx(lp, s)
436 */
437
438#if defined(lint) || defined(__lint)
439
440/* ARGSUSED */
441void
442lock_clear_splx(lock_t *lp, int s)
443{}
444
445#else	/* __lint */
446
447#if defined(__amd64)
448
449	ENTRY(lock_clear_splx)
450	movb	$0, (%rdi)		/* clear lock */
451.lock_clear_splx_lockstat_patch_point:
452	jmp	0f
4530:
454	movl	%esi, %edi		/* arg for splx */
455	jmp	splx			/* let splx do its thing */
456.lock_clear_splx_lockstat:
457	pushq	%rbp			/* align stack properly */
458	movq	%rsp, %rbp
459	subq	$16, %rsp		/* space to save args across splx */
460	movq	%rdi, 8(%rsp)		/* save lock ptr across splx call */
461	movl	%esi, %edi		/* arg for splx */
462	call	splx			/* lower the priority */
463	movq	8(%rsp), %rsi		/* rsi = lock ptr */
464	leave				/* unwind stack */
465	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
466	movl	$LS_LOCK_CLEAR_SPLX_RELEASE, %edi
467	jmp	lockstat_wrapper
468	SET_SIZE(lock_clear_splx)
469
470#if defined(__GNUC_AS__)
471#define	LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL	\
472	(.lock_clear_splx_lockstat - .lock_clear_splx_lockstat_patch_point - 2)
473
474#define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT	\
475	(.lock_clear_splx_lockstat_patch_point + 1)
476#else
477#define	LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL	\
478	[.lock_clear_splx_lockstat - .lock_clear_splx_lockstat_patch_point - 2]
479
480#define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT	\
481	[.lock_clear_splx_lockstat_patch_point + 1]
482#endif
483
484#else
485
486	ENTRY(lock_clear_splx)
487	LOADCPU(%ecx)			/* ecx = cpu pointer */
488	movl	4(%esp), %eax		/* eax = lock addr */
489	movl	8(%esp), %edx		/* edx = desired pil */
490	movb	$0, (%eax)		/* clear lock */
491	cli				/* disable interrupts */
492	call	spl			/* magic calling sequence */
493.lock_clear_splx_lockstat_patch_point:
494	ret
495	movl	4(%esp), %ecx		/* ecx = lock pointer */
496	movl	%gs:CPU_THREAD, %edx	/* edx = thread addr */
497	movl	$LS_LOCK_CLEAR_SPLX_RELEASE, %eax
498	jmp	lockstat_wrapper
499	SET_SIZE(lock_clear_splx)
500
501#endif	/* !__amd64 */
502
503#endif	/* __lint */
504
505/*
506 * mutex_enter() and mutex_exit().
507 *
508 * These routines handle the simple cases of mutex_enter() (adaptive
509 * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
510 * If anything complicated is going on we punt to mutex_vector_enter().
511 *
512 * mutex_tryenter() is similar to mutex_enter() but returns zero if
513 * the lock cannot be acquired, nonzero on success.
514 *
515 * If mutex_exit() gets preempted in the window between checking waiters
516 * and clearing the lock, we can miss wakeups.  Disabling preemption
517 * in the mutex code is prohibitively expensive, so instead we detect
518 * mutex preemption by examining the trapped PC in the interrupt path.
519 * If we interrupt a thread in mutex_exit() that has not yet cleared
520 * the lock, cmnint() resets its PC back to the beginning of
521 * mutex_exit() so it will check again for waiters when it resumes.
522 *
523 * The lockstat code below is activated when the lockstat driver
524 * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
525 * Note that we don't need to test lockstat_event_mask here -- we won't
526 * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
527 */
528#if defined(lint) || defined(__lint)
529
530/* ARGSUSED */
531void
532mutex_enter(kmutex_t *lp)
533{}
534
535/* ARGSUSED */
536int
537mutex_tryenter(kmutex_t *lp)
538{ return (0); }
539
540/* ARGSUSED */
541int
542mutex_adaptive_tryenter(mutex_impl_t *lp)
543{ return (0); }
544
545/* ARGSUSED */
546void
547mutex_exit(kmutex_t *lp)
548{}
549
550#else
551
552#if defined(__amd64)
553
554	ENTRY_NP(mutex_enter)
555	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
556	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
557	lock
558	cmpxchgq %rdx, (%rdi)
559	jnz	mutex_vector_enter
560.mutex_enter_lockstat_patch_point:
561	ret
562	movq	%rdi, %rsi
563	movl	$LS_MUTEX_ENTER_ACQUIRE, %edi
564/*
565 * expects %rdx=thread, %rsi=lock, %edi=lockstat event
566 */
567	ALTENTRY(lockstat_wrapper)
568	incb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat++ */
569	leaq	lockstat_probemap(%rip), %rax
570	movl	(%rax, %rdi, DTRACE_IDSIZE), %eax
571	testl	%eax, %eax			/* check for non-zero probe */
572	jz	1f
573	pushq	%rbp				/* align stack properly */
574	movq	%rsp, %rbp
575	movl	%eax, %edi
576	call	*lockstat_probe
577	leave					/* unwind stack */
5781:
579	movq	%gs:CPU_THREAD, %rdx		/* reload thread ptr */
580	decb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat-- */
581	movl	$1, %eax			/* return success if tryenter */
582	ret
583	SET_SIZE(lockstat_wrapper)
584	SET_SIZE(mutex_enter)
585
586/*
587 * expects %rcx=thread, %rdx=arg, %rsi=lock, %edi=lockstat event
588 */
589	ENTRY(lockstat_wrapper_arg)
590	incb	T_LOCKSTAT(%rcx)		/* curthread->t_lockstat++ */
591	leaq	lockstat_probemap(%rip), %rax
592	movl	(%rax, %rdi, DTRACE_IDSIZE), %eax
593	testl	%eax, %eax			/* check for non-zero probe */
594	jz	1f
595	pushq	%rbp				/* align stack properly */
596	movq	%rsp, %rbp
597	movl	%eax, %edi
598	call	*lockstat_probe
599	leave					/* unwind stack */
6001:
601	movq	%gs:CPU_THREAD, %rdx		/* reload thread ptr */
602	decb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat-- */
603	movl	$1, %eax			/* return success if tryenter */
604	ret
605	SET_SIZE(lockstat_wrapper_arg)
606
607
608	ENTRY(mutex_tryenter)
609	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
610	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
611	lock
612	cmpxchgq %rdx, (%rdi)
613	jnz	mutex_vector_tryenter
614	not	%eax				/* return success (nonzero) */
615.mutex_tryenter_lockstat_patch_point:
616	ret
617	movq	%rdi, %rsi
618	movl	$LS_MUTEX_ENTER_ACQUIRE, %edi
619	jmp	lockstat_wrapper
620	SET_SIZE(mutex_tryenter)
621
622	ENTRY(mutex_adaptive_tryenter)
623	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
624	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
625	lock
626	cmpxchgq %rdx, (%rdi)
627	jnz	0f
628	not	%eax				/* return success (nonzero) */
629	ret
6300:
631	xorl	%eax, %eax			/* return failure */
632	ret
633	SET_SIZE(mutex_adaptive_tryenter)
634
635	.globl mutex_exit_critical_start
636
637	ENTRY(mutex_exit)
638mutex_exit_critical_start:		/* If interrupted, restart here */
639	movq	%gs:CPU_THREAD, %rdx
640	cmpq	%rdx, (%rdi)
641	jne	mutex_vector_exit		/* wrong type or wrong owner */
642	movq	$0, (%rdi)			/* clear owner AND lock */
643.mutex_exit_critical_end:
644.mutex_exit_lockstat_patch_point:
645	ret
646	movq	%rdi, %rsi
647	movl	$LS_MUTEX_EXIT_RELEASE, %edi
648	jmp	lockstat_wrapper
649	SET_SIZE(mutex_exit)
650
651	.globl	mutex_exit_critical_size
652	.type	mutex_exit_critical_size, @object
653	.align	CPTRSIZE
654mutex_exit_critical_size:
655	.quad	.mutex_exit_critical_end - mutex_exit_critical_start
656	SET_SIZE(mutex_exit_critical_size)
657
658#else
659
660	ENTRY_NP(mutex_enter)
661	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
662	movl	4(%esp), %ecx			/* ecx = lock ptr */
663	xorl	%eax, %eax			/* eax = 0 (unheld adaptive) */
664	lock
665	cmpxchgl %edx, (%ecx)
666	jnz	mutex_vector_enter
667.mutex_enter_lockstat_patch_point:
668	ret
669	movl	$LS_MUTEX_ENTER_ACQUIRE, %eax
670	ALTENTRY(lockstat_wrapper)	/* expects edx=thread, ecx=lock, */
671					/*   eax=lockstat event */
672	pushl	%ebp				/* buy a frame */
673	movl	%esp, %ebp
674	incb	T_LOCKSTAT(%edx)		/* curthread->t_lockstat++ */
675	pushl	%edx				/* save thread pointer	 */
676	movl	$lockstat_probemap, %edx
677	movl	(%edx, %eax, DTRACE_IDSIZE), %eax
678	testl	%eax, %eax			/* check for non-zero probe */
679	jz	1f
680	pushl	%ecx				/* push lock */
681	pushl	%eax				/* push probe ID */
682	call	*lockstat_probe
683	addl	$8, %esp
6841:
685	popl	%edx				/* restore thread pointer */
686	decb	T_LOCKSTAT(%edx)		/* curthread->t_lockstat-- */
687	movl	$1, %eax			/* return success if tryenter */
688	popl	%ebp				/* pop off frame */
689	ret
690	SET_SIZE(lockstat_wrapper)
691	SET_SIZE(mutex_enter)
692
693	ENTRY(lockstat_wrapper_arg)	/* expects edx=thread, ecx=lock, */
694					/* eax=lockstat event, pushed arg */
695	incb	T_LOCKSTAT(%edx)		/* curthread->t_lockstat++ */
696	pushl	%edx				/* save thread pointer	 */
697	movl	$lockstat_probemap, %edx
698	movl	(%edx, %eax, DTRACE_IDSIZE), %eax
699	testl	%eax, %eax			/* check for non-zero probe */
700	jz	1f
701	pushl	%ebp				/* save %ebp */
702	pushl	8(%esp)				/* push arg1 */
703	movl	%ebp, 12(%esp)			/* fake up the stack frame */
704	movl	%esp, %ebp			/* fake up base pointer */
705	addl	$12, %ebp			/* adjust faked base pointer */
706	pushl	%ecx				/* push lock */
707	pushl	%eax				/* push probe ID */
708	call	*lockstat_probe
709	addl	$12, %esp			/* adjust for arguments */
710	popl	%ebp				/* pop frame */
7111:
712	popl	%edx				/* restore thread pointer */
713	decb	T_LOCKSTAT(%edx)		/* curthread->t_lockstat-- */
714	movl	$1, %eax			/* return success if tryenter */
715	addl	$4, %esp			/* pop argument */
716	ret
717	SET_SIZE(lockstat_wrapper_arg)
718
719
720	ENTRY(mutex_tryenter)
721	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
722	movl	4(%esp), %ecx			/* ecx = lock ptr */
723	xorl	%eax, %eax			/* eax = 0 (unheld adaptive) */
724	lock
725	cmpxchgl %edx, (%ecx)
726	jnz	mutex_vector_tryenter
727	movl	%ecx, %eax
728.mutex_tryenter_lockstat_patch_point:
729	ret
730	movl	$LS_MUTEX_ENTER_ACQUIRE, %eax
731	jmp	lockstat_wrapper
732	SET_SIZE(mutex_tryenter)
733
734	ENTRY(mutex_adaptive_tryenter)
735	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
736	movl	4(%esp), %ecx			/* ecx = lock ptr */
737	xorl	%eax, %eax			/* eax = 0 (unheld adaptive) */
738	lock
739	cmpxchgl %edx, (%ecx)
740	jnz	0f
741	movl	%ecx, %eax
742	ret
7430:
744	xorl	%eax, %eax
745	ret
746	SET_SIZE(mutex_adaptive_tryenter)
747
748	.globl mutex_exit_critical_size
749	.globl mutex_exit_critical_start
750
751	ENTRY(mutex_exit)
752mutex_exit_critical_start:		/* If interrupted, restart here */
753	movl	%gs:CPU_THREAD, %edx
754	movl	4(%esp), %ecx
755	cmpl	%edx, (%ecx)
756	jne	mutex_vector_exit		/* wrong type or wrong owner */
757	movl	$0, (%ecx)			/* clear owner AND lock */
758.mutex_exit_critical_end:
759.mutex_exit_lockstat_patch_point:
760	ret
761	movl	$LS_MUTEX_EXIT_RELEASE, %eax
762	jmp	lockstat_wrapper
763	SET_SIZE(mutex_exit)
764
765	.globl	mutex_exit_critical_size
766	.type	mutex_exit_critical_size, @object
767	.align	CPTRSIZE
768mutex_exit_critical_size:
769	.long	.mutex_exit_critical_end - mutex_exit_critical_start
770	SET_SIZE(mutex_exit_critical_size)
771
772#endif	/* !__amd64 */
773
774#endif	/* __lint */
775
776/*
777 * rw_enter() and rw_exit().
778 *
779 * These routines handle the simple cases of rw_enter (write-locking an unheld
780 * lock or read-locking a lock that's neither write-locked nor write-wanted)
781 * and rw_exit (no waiters or not the last reader).  If anything complicated
782 * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
783 */
784#if defined(lint) || defined(__lint)
785
786/* ARGSUSED */
787void
788rw_enter(krwlock_t *lp, krw_t rw)
789{}
790
791/* ARGSUSED */
792void
793rw_exit(krwlock_t *lp)
794{}
795
796#else	/* __lint */
797
798#if defined(__amd64)
799
800	ENTRY(rw_enter)
801	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
802	cmpl	$RW_WRITER, %esi
803	je	.rw_write_enter
804	incl	T_KPRI_REQ(%rdx)		/* THREAD_KPRI_REQUEST() */
805	movq	(%rdi), %rax			/* rax = old rw_wwwh value */
806	testl	$RW_WRITE_LOCKED|RW_WRITE_WANTED, %eax
807	jnz	rw_enter_sleep
808	leaq	RW_READ_LOCK(%rax), %rdx	/* rdx = new rw_wwwh value */
809	lock
810	cmpxchgq %rdx, (%rdi)			/* try to grab read lock */
811	jnz	rw_enter_sleep
812.rw_read_enter_lockstat_patch_point:
813	ret
814	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
815	movq	%rdi, %rsi			/* rsi = lock ptr */
816	movl	$LS_RW_ENTER_ACQUIRE, %edi
817	movl	$RW_READER, %edx
818	jmp	lockstat_wrapper_arg
819.rw_write_enter:
820	orq	$RW_WRITE_LOCKED, %rdx		/* rdx = write-locked value */
821	xorl	%eax, %eax			/* rax = unheld value */
822	lock
823	cmpxchgq %rdx, (%rdi)			/* try to grab write lock */
824	jnz	rw_enter_sleep
825.rw_write_enter_lockstat_patch_point:
826	ret
827	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
828	movq	%rdi, %rsi			/* rsi = lock ptr */
829	movl	$LS_RW_ENTER_ACQUIRE, %edi
830	movl	$RW_WRITER, %edx
831	jmp	lockstat_wrapper_arg
832	SET_SIZE(rw_enter)
833
834	ENTRY(rw_exit)
835	movq	(%rdi), %rax			/* rax = old rw_wwwh value */
836	cmpl	$RW_READ_LOCK, %eax		/* single-reader, no waiters? */
837	jne	.rw_not_single_reader
838	xorl	%edx, %edx			/* rdx = new value (unheld) */
839.rw_read_exit:
840	lock
841	cmpxchgq %rdx, (%rdi)			/* try to drop read lock */
842	jnz	rw_exit_wakeup
843	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
844	decl	T_KPRI_REQ(%rcx)		/* THREAD_KPRI_RELEASE() */
845.rw_read_exit_lockstat_patch_point:
846	ret
847	movq	%rdi, %rsi			/* rsi = lock ptr */
848	movl	$LS_RW_EXIT_RELEASE, %edi
849	movl	$RW_READER, %edx
850	jmp	lockstat_wrapper_arg
851.rw_not_single_reader:
852	testl	$RW_WRITE_LOCKED, %eax	/* write-locked or write-wanted? */
853	jnz	.rw_write_exit
854	leaq	-RW_READ_LOCK(%rax), %rdx	/* rdx = new value */
855	cmpl	$RW_READ_LOCK, %edx
856	jge	.rw_read_exit		/* not last reader, safe to drop */
857	jmp	rw_exit_wakeup			/* last reader with waiters */
858.rw_write_exit:
859	movq	%gs:CPU_THREAD, %rax		/* rax = thread ptr */
860	xorl	%edx, %edx			/* rdx = new value (unheld) */
861	orq	$RW_WRITE_LOCKED, %rax		/* eax = write-locked value */
862	lock
863	cmpxchgq %rdx, (%rdi)			/* try to drop read lock */
864	jnz	rw_exit_wakeup
865.rw_write_exit_lockstat_patch_point:
866	ret
867	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
868	movq	%rdi, %rsi			/* rsi - lock ptr */
869	movl	$LS_RW_EXIT_RELEASE, %edi
870	movl	$RW_WRITER, %edx
871	jmp	lockstat_wrapper_arg
872	SET_SIZE(rw_exit)
873
874#else
875
876	ENTRY(rw_enter)
877	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
878	movl	4(%esp), %ecx			/* ecx = lock ptr */
879	cmpl	$RW_WRITER, 8(%esp)
880	je	.rw_write_enter
881	incl	T_KPRI_REQ(%edx)		/* THREAD_KPRI_REQUEST() */
882	movl	(%ecx), %eax			/* eax = old rw_wwwh value */
883	testl	$RW_WRITE_LOCKED|RW_WRITE_WANTED, %eax
884	jnz	rw_enter_sleep
885	leal	RW_READ_LOCK(%eax), %edx	/* edx = new rw_wwwh value */
886	lock
887	cmpxchgl %edx, (%ecx)			/* try to grab read lock */
888	jnz	rw_enter_sleep
889.rw_read_enter_lockstat_patch_point:
890	ret
891	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
892	movl	$LS_RW_ENTER_ACQUIRE, %eax
893	pushl	$RW_READER
894	jmp	lockstat_wrapper_arg
895.rw_write_enter:
896	orl	$RW_WRITE_LOCKED, %edx		/* edx = write-locked value */
897	xorl	%eax, %eax			/* eax = unheld value */
898	lock
899	cmpxchgl %edx, (%ecx)			/* try to grab write lock */
900	jnz	rw_enter_sleep
901.rw_write_enter_lockstat_patch_point:
902	ret
903	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
904	movl	$LS_RW_ENTER_ACQUIRE, %eax
905	pushl	$RW_WRITER
906	jmp	lockstat_wrapper_arg
907	SET_SIZE(rw_enter)
908
909	ENTRY(rw_exit)
910	movl	4(%esp), %ecx			/* ecx = lock ptr */
911	movl	(%ecx), %eax			/* eax = old rw_wwwh value */
912	cmpl	$RW_READ_LOCK, %eax		/* single-reader, no waiters? */
913	jne	.rw_not_single_reader
914	xorl	%edx, %edx			/* edx = new value (unheld) */
915.rw_read_exit:
916	lock
917	cmpxchgl %edx, (%ecx)			/* try to drop read lock */
918	jnz	rw_exit_wakeup
919	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
920	decl	T_KPRI_REQ(%edx)		/* THREAD_KPRI_RELEASE() */
921.rw_read_exit_lockstat_patch_point:
922	ret
923	movl	$LS_RW_EXIT_RELEASE, %eax
924	pushl	$RW_READER
925	jmp	lockstat_wrapper_arg
926.rw_not_single_reader:
927	testl	$RW_WRITE_LOCKED, %eax	/* write-locked or write-wanted? */
928	jnz	.rw_write_exit
929	leal	-RW_READ_LOCK(%eax), %edx	/* edx = new value */
930	cmpl	$RW_READ_LOCK, %edx
931	jge	.rw_read_exit		/* not last reader, safe to drop */
932	jmp	rw_exit_wakeup			/* last reader with waiters */
933.rw_write_exit:
934	movl	%gs:CPU_THREAD, %eax		/* eax = thread ptr */
935	xorl	%edx, %edx			/* edx = new value (unheld) */
936	orl	$RW_WRITE_LOCKED, %eax		/* eax = write-locked value */
937	lock
938	cmpxchgl %edx, (%ecx)			/* try to drop read lock */
939	jnz	rw_exit_wakeup
940.rw_write_exit_lockstat_patch_point:
941	ret
942	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
943	movl	$LS_RW_EXIT_RELEASE, %eax
944	pushl	$RW_WRITER
945	jmp	lockstat_wrapper_arg
946	SET_SIZE(rw_exit)
947
948#endif	/* !__amd64 */
949
950#endif	/* __lint */
951
952#if defined(lint) || defined(__lint)
953
954void
955lockstat_hot_patch(void)
956{}
957
958#else
959
960#if defined(__amd64)
961
962#define	HOT_PATCH(addr, event, active_instr, normal_instr, len)	\
963	movq	$normal_instr, %rsi;		\
964	movq	$active_instr, %rdi;		\
965	leaq	lockstat_probemap(%rip), %rax;	\
966	movl 	_MUL(event, DTRACE_IDSIZE)(%rax), %eax;	\
967	testl	%eax, %eax;			\
968	jz	9f;				\
969	movq	%rdi, %rsi;			\
9709:						\
971	movq	$len, %rdx;			\
972	movq	$addr, %rdi;			\
973	call	hot_patch_kernel_text
974
975#else
976
977#define	HOT_PATCH(addr, event, active_instr, normal_instr, len)	\
978	movl	$normal_instr, %ecx;		\
979	movl	$active_instr, %edx;		\
980	movl	$lockstat_probemap, %eax;	\
981	movl	_MUL(event, DTRACE_IDSIZE)(%eax), %eax;	\
982	testl	%eax, %eax;			\
983	jz	. + 4;				\
984	movl	%edx, %ecx;			\
985	pushl	$len;				\
986	pushl	%ecx;				\
987	pushl	$addr;				\
988	call	hot_patch_kernel_text;		\
989	addl	$12, %esp;
990
991#endif	/* !__amd64 */
992
993	ENTRY(lockstat_hot_patch)
994#if defined(__amd64)
995	pushq	%rbp			/* align stack properly */
996	movq	%rsp, %rbp
997#endif	/* __amd64 */
998	HOT_PATCH(.mutex_enter_lockstat_patch_point,
999		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1000	HOT_PATCH(.mutex_tryenter_lockstat_patch_point,
1001		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1002	HOT_PATCH(.mutex_exit_lockstat_patch_point,
1003		LS_MUTEX_EXIT_RELEASE, NOP_INSTR, RET_INSTR, 1)
1004	HOT_PATCH(.rw_write_enter_lockstat_patch_point,
1005		LS_RW_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1006	HOT_PATCH(.rw_read_enter_lockstat_patch_point,
1007		LS_RW_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1008	HOT_PATCH(.rw_write_exit_lockstat_patch_point,
1009		LS_RW_EXIT_RELEASE, NOP_INSTR, RET_INSTR, 1)
1010	HOT_PATCH(.rw_read_exit_lockstat_patch_point,
1011		LS_RW_EXIT_RELEASE, NOP_INSTR, RET_INSTR, 1)
1012	HOT_PATCH(.lock_set_lockstat_patch_point,
1013		LS_LOCK_SET_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1014	HOT_PATCH(.lock_try_lockstat_patch_point,
1015		LS_LOCK_TRY_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1016	HOT_PATCH(.lock_clear_lockstat_patch_point,
1017		LS_LOCK_CLEAR_RELEASE, NOP_INSTR, RET_INSTR, 1)
1018	HOT_PATCH(.lock_set_spl_lockstat_patch_point,
1019		LS_LOCK_SET_SPL_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
1020
1021#if defined(__amd64)
1022	HOT_PATCH(LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT,
1023		LS_LOCK_CLEAR_SPLX_RELEASE,
1024		LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL, 0, 1);
1025#else
1026	HOT_PATCH(.lock_clear_splx_lockstat_patch_point,
1027		LS_LOCK_CLEAR_SPLX_RELEASE, NOP_INSTR, RET_INSTR, 1)
1028#endif	/* !__amd64 */
1029
1030#if defined(__amd64)
1031	leave			/* unwind stack */
1032#endif	/* __amd64 */
1033	ret
1034	SET_SIZE(lockstat_hot_patch)
1035
1036#endif	/* __lint */
1037
1038#if defined(lint) || defined(__lint)
1039
1040/* XX64 membar_*() should be inlines */
1041
1042void
1043membar_enter(void)
1044{}
1045
1046void
1047membar_exit(void)
1048{}
1049
1050void
1051membar_producer(void)
1052{}
1053
1054void
1055membar_consumer(void)
1056{}
1057
1058#else	/* __lint */
1059
1060#if defined(__amd64)
1061
1062	ENTRY(membar_enter)
1063	ALTENTRY(membar_exit)
1064	mfence			/* lighter weight than lock; xorq $0,(%rsp) */
1065	ret
1066	SET_SIZE(membar_exit)
1067	SET_SIZE(membar_enter)
1068
1069	ENTRY(membar_producer)
1070	sfence
1071	ret
1072	SET_SIZE(membar_producer)
1073
1074	ENTRY(membar_consumer)
1075	lfence
1076	ret
1077	SET_SIZE(membar_consumer)
1078
1079#else
1080
1081	ENTRY(membar_enter)
1082	ALTENTRY(membar_exit)
1083	lock
1084	xorl	$0, (%esp)
1085	ret
1086	SET_SIZE(membar_exit)
1087	SET_SIZE(membar_enter)
1088
1089/*
1090 * On machines that support sfence and lfence, these
1091 * memory barriers can be more precisely implemented
1092 * without causing the whole world to stop
1093 */
1094	ENTRY(membar_producer)
1095	.globl	_patch_sfence_ret
1096_patch_sfence_ret:			/* c.f. membar #StoreStore */
1097	lock
1098	xorl	$0, (%esp)
1099	ret
1100	SET_SIZE(membar_producer)
1101
1102	ENTRY(membar_consumer)
1103	.globl	_patch_lfence_ret
1104_patch_lfence_ret:			/* c.f. membar #LoadLoad */
1105	lock
1106	xorl	$0, (%esp)
1107	ret
1108	SET_SIZE(membar_consumer)
1109
1110#endif	/* !__amd64 */
1111
1112#endif	/* __lint */
1113
1114/*
1115 * thread_onproc()
1116 * Set thread in onproc state for the specified CPU.
1117 * Also set the thread lock pointer to the CPU's onproc lock.
1118 * Since the new lock isn't held, the store ordering is important.
1119 * If not done in assembler, the compiler could reorder the stores.
1120 */
1121#if defined(lint) || defined(__lint)
1122
1123void
1124thread_onproc(kthread_id_t t, cpu_t *cp)
1125{
1126	t->t_state = TS_ONPROC;
1127	t->t_lockp = &cp->cpu_thread_lock;
1128}
1129
1130#else	/* __lint */
1131
1132#if defined(__amd64)
1133
1134	ENTRY(thread_onproc)
1135	addq	$CPU_THREAD_LOCK, %rsi	/* pointer to disp_lock while running */
1136	movl	$ONPROC_THREAD, T_STATE(%rdi)	/* set state to TS_ONPROC */
1137	movq	%rsi, T_LOCKP(%rdi)	/* store new lock pointer */
1138	ret
1139	SET_SIZE(thread_onproc)
1140
1141#else
1142
1143	ENTRY(thread_onproc)
1144	movl	4(%esp), %eax
1145	movl	8(%esp), %ecx
1146	addl	$CPU_THREAD_LOCK, %ecx	/* pointer to disp_lock while running */
1147	movl	$ONPROC_THREAD, T_STATE(%eax)	/* set state to TS_ONPROC */
1148	movl	%ecx, T_LOCKP(%eax)	/* store new lock pointer */
1149	ret
1150	SET_SIZE(thread_onproc)
1151
1152#endif	/* !__amd64 */
1153
1154#endif	/* __lint */
1155