xref: /illumos-gate/usr/src/uts/intel/ml/lock_prim.S (revision 1e56f352c1c208679012bca47d552e127f5b1072)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * Copyright 2019 Joyent, Inc.
28 * Copyright 2022 Oxide Computer Company
29 */
30
31#include "assym.h"
32
33#include <sys/mutex_impl.h>
34#include <sys/asm_linkage.h>
35#include <sys/asm_misc.h>
36#include <sys/regset.h>
37#include <sys/rwlock_impl.h>
38#include <sys/lockstat.h>
39
40
41#if defined(OPTERON_ERRATUM_147)
42
43/*
44 * Leave space for an lfence to be inserted if required by a CPU which suffers
45 * from this erratum.  Pad (with nops) the location for the lfence so that it
46 * is adequately aligned for atomic hotpatching.
47 */
48#define	ERRATUM147_PATCH_POINT(name)	\
49	.align	4, NOP_INSTR;		\
50.##name##_147_patch_point:		\
51	nop;				\
52	nop;				\
53	nop;				\
54	nop;
55
56#else /* defined(OPTERON_ERRATUM_147) */
57
58/* Empty macro so ifdefs are not required for all of the erratum sites. */
59#define	ERRATUM147_PATCH_POINT(name)
60
61#endif /* defined(OPTERON_ERRATUM_147) */
62
63/*
64 * Patch point for lockstat probes.  When the associated probe is disabled, it
65 * will 'ret' from the function.  It is hotpatched to allow execution to fall
66 * through when the probe is enabled.
67 */
68#define	LOCKSTAT_RET(name)		\
69.##name##_lockstat_patch_point:	\
70	ret;
71
72/*
73 * lock_try(lp), ulock_try(lp)
74 *	- returns non-zero on success.
75 *	- doesn't block interrupts so don't use this to spin on a lock.
76 *
77 * ulock_try() is for a lock in the user address space.
78 */
79
80	.globl	kernelbase
81
82	ENTRY(lock_try)
83	movb	$-1, %dl
84	movzbq	%dl, %rax
85	xchgb	%dl, (%rdi)
86	xorb	%dl, %al
87	LOCKSTAT_RET(lock_try)
88
89	testb	%al, %al
90	jnz	0f
91	ret
920:
93	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
94	movq	%rdi, %rsi		/* rsi = lock addr */
95	movl	$LS_LOCK_TRY_ACQUIRE, %edi /* edi = event */
96	jmp	lockstat_wrapper
97	SET_SIZE(lock_try)
98
99	ENTRY(lock_spin_try)
100	movb	$-1, %dl
101	movzbq	%dl, %rax
102	xchgb	%dl, (%rdi)
103	xorb	%dl, %al
104	ret
105	SET_SIZE(lock_spin_try)
106
107	ENTRY(ulock_try)
108#ifdef DEBUG
109	movq	kernelbase(%rip), %rax
110	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
111	jb	ulock_pass		/*	uaddr < kernelbase, proceed */
112
113	movq	%rdi, %r12		/* preserve lock ptr for debugging */
114	leaq	.ulock_panic_msg(%rip), %rdi
115	pushq	%rbp
116	movq	%rsp, %rbp
117	xorl	%eax, %eax		/* clear for varargs */
118	call	panic
119
120#endif /* DEBUG */
121
122ulock_pass:
123	movl	$1, %eax
124	xchgb	%al, (%rdi)
125	xorb	$1, %al
126	ret
127	SET_SIZE(ulock_try)
128
129#ifdef DEBUG
130	.data
131.ulock_panic_msg:
132	.string "ulock_try: Argument is above kernelbase"
133	.text
134#endif	/* DEBUG */
135
136/*
137 * lock_clear(lp)
138 *	- unlock lock without changing interrupt priority level.
139 */
140
141	ENTRY(lock_clear)
142	movb	$0, (%rdi)
143	LOCKSTAT_RET(lock_clear)
144
145	movq	%rdi, %rsi			/* rsi = lock addr */
146	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread addr */
147	movl	$LS_LOCK_CLEAR_RELEASE, %edi	/* edi = event */
148	jmp	lockstat_wrapper
149	SET_SIZE(lock_clear)
150
151	ENTRY(ulock_clear)
152#ifdef DEBUG
153	movq	kernelbase(%rip), %rcx
154	cmpq	%rcx, %rdi		/* test uaddr < kernelbase */
155	jb	ulock_clr		/*	 uaddr < kernelbase, proceed */
156
157	leaq	.ulock_clear_msg(%rip), %rdi
158	pushq	%rbp
159	movq	%rsp, %rbp
160	xorl	%eax, %eax		/* clear for varargs */
161	call	panic
162#endif
163
164ulock_clr:
165	movb	$0, (%rdi)
166	ret
167	SET_SIZE(ulock_clear)
168
169#ifdef DEBUG
170	.data
171.ulock_clear_msg:
172	.string "ulock_clear: Argument is above kernelbase"
173	.text
174#endif	/* DEBUG */
175
176
177/*
178 * lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil)
179 * Drops lp, sets pil to new_pil, stores old pil in *old_pil.
180 */
181
182	ENTRY(lock_set_spl)
183	pushq	%rbp
184	movq	%rsp, %rbp
185	subq	$32, %rsp
186	movl	%esi, 8(%rsp)		/* save priority level */
187	movq	%rdx, 16(%rsp)		/* save old pil ptr */
188	movq	%rdi, 24(%rsp)		/* save lock pointer */
189	movl	%esi, %edi		/* pass priority level */
190	call	splr			/* raise priority level */
191	movq	24(%rsp), %rdi		/* rdi = lock addr */
192	movb	$-1, %dl
193	xchgb	%dl, (%rdi)		/* try to set lock */
194	testb	%dl, %dl		/* did we get the lock? ... */
195	jnz	.lss_miss		/* ... no, go to C for the hard case */
196	movq	16(%rsp), %rdx		/* rdx = old pil addr */
197	movw	%ax, (%rdx)		/* store old pil */
198	leave
199	LOCKSTAT_RET(lock_set_spl)
200
201	movq	%rdi, %rsi		/* rsi = lock addr */
202	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
203	movl	$LS_LOCK_SET_SPL_ACQUIRE, %edi
204	jmp	lockstat_wrapper
205
206.lss_miss:
207	movl	8(%rsp), %esi		/* new_pil */
208	movq	16(%rsp), %rdx		/* old_pil_addr */
209	movl	%eax, %ecx		/* original pil */
210	leave				/* unwind stack */
211	jmp	lock_set_spl_spin
212	SET_SIZE(lock_set_spl)
213
214/*
215 * void
216 * lock_init(lp)
217 */
218
219	ENTRY(lock_init)
220	movb	$0, (%rdi)
221	ret
222	SET_SIZE(lock_init)
223
224/*
225 * void
226 * lock_set(lp)
227 */
228
229	ENTRY(lock_set)
230	movb	$-1, %dl
231	xchgb	%dl, (%rdi)		/* try to set lock */
232	testb	%dl, %dl		/* did we get it? */
233	jnz	lock_set_spin		/* no, go to C for the hard case */
234	LOCKSTAT_RET(lock_set)
235
236	movq	%rdi, %rsi		/* rsi = lock addr */
237	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
238	movl	$LS_LOCK_SET_ACQUIRE, %edi
239	jmp	lockstat_wrapper
240	SET_SIZE(lock_set)
241
242/*
243 * lock_clear_splx(lp, s)
244 */
245
246	ENTRY(lock_clear_splx)
247	pushq	%rbp
248	movq	%rsp, %rbp
249	pushq	%rdi		/* save lp across call for lockstat */
250	movb	$0, (%rdi)	/* clear lock */
251	movl	%esi, %edi	/* arg for splx */
252	call	splx		/* let splx do its thing */
253	popq	%rsi		/* retreive lp for lockstat */
254	leave
255	LOCKSTAT_RET(lock_clear_splx)
256
257	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
258	movl	$LS_LOCK_CLEAR_SPLX_RELEASE, %edi
259	jmp	lockstat_wrapper
260	SET_SIZE(lock_clear_splx)
261
262/*
263 * mutex_enter() and mutex_exit().
264 *
265 * These routines handle the simple cases of mutex_enter() (adaptive
266 * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
267 * If anything complicated is going on we punt to mutex_vector_enter().
268 *
269 * mutex_tryenter() is similar to mutex_enter() but returns zero if
270 * the lock cannot be acquired, nonzero on success.
271 *
272 * If mutex_exit() gets preempted in the window between checking waiters
273 * and clearing the lock, we can miss wakeups.  Disabling preemption
274 * in the mutex code is prohibitively expensive, so instead we detect
275 * mutex preemption by examining the trapped PC in the interrupt path.
276 * If we interrupt a thread in mutex_exit() that has not yet cleared
277 * the lock, cmnint() resets its PC back to the beginning of
278 * mutex_exit() so it will check again for waiters when it resumes.
279 */
280
281	ENTRY_NP(mutex_enter)
282	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
283	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
284	lock
285	cmpxchgq %rdx, (%rdi)
286	jnz	mutex_vector_enter
287
288	ERRATUM147_PATCH_POINT(mutex_enter)
289
290	LOCKSTAT_RET(mutex_enter)
291
292	movq	%rdi, %rsi
293	movl	$LS_MUTEX_ENTER_ACQUIRE, %edi
294	jmp	lockstat_wrapper
295	SET_SIZE(mutex_enter)
296
297
298/*
299 * expects %rdx=thread, %rsi=lock, %edi=lockstat event
300 */
301	ENTRY_NP(lockstat_wrapper)
302	incb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat++ */
303	leaq	lockstat_probemap(%rip), %rax
304	movl	(%rax, %rdi, DTRACE_IDSIZE), %eax
305	testl	%eax, %eax			/* check for non-zero probe */
306	jz	1f
307	pushq	%rbp
308	movq	%rsp, %rbp
309	movl	%eax, %edi
310	movq	lockstat_probe, %rax
311	INDIRECT_CALL_REG(rax)
312	leave					/* unwind stack */
3131:
314	movq	%gs:CPU_THREAD, %rdx		/* reload thread ptr */
315	decb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat-- */
316	movl	$1, %eax			/* return success if tryenter */
317	ret
318	SET_SIZE(lockstat_wrapper)
319
320/*
321 * expects %rcx=thread, %rdx=arg, %rsi=lock, %edi=lockstat event
322 */
323	ENTRY(lockstat_wrapper_arg)
324	incb	T_LOCKSTAT(%rcx)		/* curthread->t_lockstat++ */
325	leaq	lockstat_probemap(%rip), %rax
326	movl	(%rax, %rdi, DTRACE_IDSIZE), %eax
327	testl	%eax, %eax			/* check for non-zero probe */
328	jz	1f
329	pushq	%rbp
330	movq	%rsp, %rbp
331	movl	%eax, %edi
332	movq	lockstat_probe, %rax
333	INDIRECT_CALL_REG(rax)
334	leave					/* unwind stack */
3351:
336	movq	%gs:CPU_THREAD, %rdx		/* reload thread ptr */
337	decb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat-- */
338	movl	$1, %eax			/* return success if tryenter */
339	ret
340	SET_SIZE(lockstat_wrapper_arg)
341
342
343	ENTRY(mutex_tryenter)
344	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
345	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
346	lock
347	cmpxchgq %rdx, (%rdi)
348	jnz	mutex_vector_tryenter
349	not	%eax				/* return success (nonzero) */
350
351	ERRATUM147_PATCH_POINT(mutex_tryenter)
352
353	LOCKSTAT_RET(mutex_tryenter)
354
355	movq	%rdi, %rsi
356	movl	$LS_MUTEX_TRYENTER_ACQUIRE, %edi
357	jmp	lockstat_wrapper
358	SET_SIZE(mutex_tryenter)
359
360	ENTRY(mutex_adaptive_tryenter)
361	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
362	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
363	lock
364	cmpxchgq %rdx, (%rdi)
365	jnz	0f
366	not	%eax				/* return success (nonzero) */
367
368	ERRATUM147_PATCH_POINT(mutex_atryenter)
369
370	ret
3710:
372	xorl	%eax, %eax			/* return failure */
373	ret
374	SET_SIZE(mutex_adaptive_tryenter)
375
376	.globl	mutex_owner_running_critical_start
377
378	ENTRY(mutex_owner_running)
379mutex_owner_running_critical_start:
380	movq	(%rdi), %r11		/* get owner field */
381	andq	$MUTEX_THREAD, %r11	/* remove waiters bit */
382	cmpq	$0, %r11		/* if free, skip */
383	je	1f			/* go return 0 */
384	movq	T_CPU(%r11), %r8	/* get owner->t_cpu */
385	movq	CPU_THREAD(%r8), %r9	/* get t_cpu->cpu_thread */
386.mutex_owner_running_critical_end:
387	cmpq	%r11, %r9	/* owner == running thread? */
388	je	2f		/* yes, go return cpu */
3891:
390	xorq	%rax, %rax	/* return 0 */
391	ret
3922:
393	movq	%r8, %rax		/* return cpu */
394	ret
395	SET_SIZE(mutex_owner_running)
396
397	.globl	mutex_owner_running_critical_size
398	.type	mutex_owner_running_critical_size, @object
399	.align	CPTRSIZE
400mutex_owner_running_critical_size:
401	.quad	.mutex_owner_running_critical_end - mutex_owner_running_critical_start
402	SET_SIZE(mutex_owner_running_critical_size)
403
404	.globl	mutex_exit_critical_start
405
406	ENTRY(mutex_exit)
407mutex_exit_critical_start:		/* If interrupted, restart here */
408	movq	%gs:CPU_THREAD, %rdx
409	cmpq	%rdx, (%rdi)
410	jne	mutex_vector_exit		/* wrong type or wrong owner */
411	movq	$0, (%rdi)			/* clear owner AND lock */
412.mutex_exit_critical_end:
413	LOCKSTAT_RET(mutex_exit)
414
415	movq	%rdi, %rsi
416	movl	$LS_MUTEX_EXIT_RELEASE, %edi
417	jmp	lockstat_wrapper
418	SET_SIZE(mutex_exit)
419
420	.globl	mutex_exit_critical_size
421	.type	mutex_exit_critical_size, @object
422	.align	CPTRSIZE
423mutex_exit_critical_size:
424	.quad	.mutex_exit_critical_end - mutex_exit_critical_start
425	SET_SIZE(mutex_exit_critical_size)
426
427/*
428 * rw_enter() and rw_exit().
429 *
430 * These routines handle the simple cases of rw_enter (write-locking an unheld
431 * lock or read-locking a lock that's neither write-locked nor write-wanted)
432 * and rw_exit (no waiters or not the last reader).  If anything complicated
433 * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
434 */
435
436	ENTRY(rw_enter)
437	cmpl	$RW_WRITER, %esi
438	je	.rw_write_enter
439	movq	(%rdi), %rax			/* rax = old rw_wwwh value */
440	testl	$RW_WRITE_LOCKED|RW_WRITE_WANTED, %eax
441	jnz	rw_enter_sleep
442	leaq	RW_READ_LOCK(%rax), %rdx	/* rdx = new rw_wwwh value */
443	lock
444	cmpxchgq %rdx, (%rdi)			/* try to grab read lock */
445	jnz	rw_enter_sleep
446	LOCKSTAT_RET(rw_read_enter)
447
448	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
449	movq	%rdi, %rsi			/* rsi = lock ptr */
450	movl	$LS_RW_ENTER_ACQUIRE, %edi
451	movl	$RW_READER, %edx
452	jmp	lockstat_wrapper_arg
453
454.rw_write_enter:
455	movq	%gs:CPU_THREAD, %rdx
456	orq	$RW_WRITE_LOCKED, %rdx		/* rdx = write-locked value */
457	xorl	%eax, %eax			/* rax = unheld value */
458	lock
459	cmpxchgq %rdx, (%rdi)			/* try to grab write lock */
460	jnz	rw_enter_sleep
461
462	ERRATUM147_PATCH_POINT(rw_write_enter)
463
464	LOCKSTAT_RET(rw_write_enter)
465
466	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
467	movq	%rdi, %rsi			/* rsi = lock ptr */
468	movl	$LS_RW_ENTER_ACQUIRE, %edi
469	movl	$RW_WRITER, %edx
470	jmp	lockstat_wrapper_arg
471	SET_SIZE(rw_enter)
472
473	ENTRY(rw_exit)
474	movq	(%rdi), %rax			/* rax = old rw_wwwh value */
475	cmpl	$RW_READ_LOCK, %eax		/* single-reader, no waiters? */
476	jne	.rw_not_single_reader
477	xorl	%edx, %edx			/* rdx = new value (unheld) */
478.rw_read_exit:
479	lock
480	cmpxchgq %rdx, (%rdi)			/* try to drop read lock */
481	jnz	rw_exit_wakeup
482	LOCKSTAT_RET(rw_read_exit)
483
484	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
485	movq	%rdi, %rsi			/* rsi = lock ptr */
486	movl	$LS_RW_EXIT_RELEASE, %edi
487	movl	$RW_READER, %edx
488	jmp	lockstat_wrapper_arg
489
490.rw_not_single_reader:
491	testl	$RW_WRITE_LOCKED, %eax	/* write-locked or write-wanted? */
492	jnz	.rw_write_exit
493	leaq	-RW_READ_LOCK(%rax), %rdx	/* rdx = new value */
494	cmpl	$RW_READ_LOCK, %edx
495	jge	.rw_read_exit		/* not last reader, safe to drop */
496	jmp	rw_exit_wakeup			/* last reader with waiters */
497.rw_write_exit:
498	movq	%gs:CPU_THREAD, %rax		/* rax = thread ptr */
499	xorl	%edx, %edx			/* rdx = new value (unheld) */
500	orq	$RW_WRITE_LOCKED, %rax		/* eax = write-locked value */
501	lock
502	cmpxchgq %rdx, (%rdi)			/* try to drop read lock */
503	jnz	rw_exit_wakeup
504	LOCKSTAT_RET(rw_write_exit)
505
506	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
507	movq	%rdi, %rsi			/* rsi - lock ptr */
508	movl	$LS_RW_EXIT_RELEASE, %edi
509	movl	$RW_WRITER, %edx
510	jmp	lockstat_wrapper_arg
511	SET_SIZE(rw_exit)
512
513#if defined(OPTERON_ERRATUM_147)
514
515/*
516 * Track if erratum 147 workaround has been hotpatched into place.
517 */
518	DGDEF3(erratum_147_patched, 4, 4)
519	.long	0
520
521#define HOT_MUTEX_PATCH(iaddr, insn_reg)	\
522	movq	$iaddr, %rdi;		\
523	movl	%insn_reg, %esi;	\
524	movl	$4, %edx;		\
525	call	hot_patch_kernel_text;
526
527
528/*
529 * void
530 * patch_erratum_147(void)
531 *
532 * Patch lock operations to work around erratum 147.
533 *
534 * The workaround is to place a fencing instruction (lfence) between the
535 * mutex operation and the subsequent read-modify-write instruction.
536 */
537
538	ENTRY_NP(patch_erratum_147)
539	pushq	%rbp
540	movq	%rsp, %rbp
541	pushq	%r12
542
543	/*
544	 * Patch `nop; nop; nop; nop` sequence to `lfence; nop`.  Since those
545	 * patch points have been aligned to a 4-byte boundary, we can be
546	 * confident that hot_patch_kernel_text() will be able to proceed
547	 * safely and successfully.
548	 */
549	movl	$0x90e8ae0f, %r12d
550	HOT_MUTEX_PATCH(.mutex_enter_147_patch_point, r12d)
551	HOT_MUTEX_PATCH(.mutex_tryenter_147_patch_point, r12d)
552	HOT_MUTEX_PATCH(.mutex_atryenter_147_patch_point, r12d)
553	HOT_MUTEX_PATCH(.rw_write_enter_147_patch_point, r12d)
554
555	/* Record that erratum 147 points have been hotpatched */
556	movl	$1, erratum_147_patched
557
558	popq	%r12
559	movq	%rbp, %rsp
560	popq	%rbp
561	ret
562	SET_SIZE(patch_erratum_147)
563
564#endif	/* OPTERON_ERRATUM_147 */
565
566	/*
567	 * void
568	 * lockstat_hotpatch_site(caddr_t instr_addr, int do_enable)
569	 */
570	ENTRY(lockstat_hotpatch_site)
571	pushq	%rbp
572	movq	%rsp, %rbp
573	pushq	%rdi
574	pushq	%rsi
575
576	testl	%esi, %esi
577	jz	.do_disable
578
579	/* enable the probe (replace ret with nop) */
580	movl	$NOP_INSTR, %esi
581	movl	$1, %edx
582	call	hot_patch_kernel_text
583	leave
584	ret
585
586.do_disable:
587	/* disable the probe (replace nop with ret) */
588	movl	$RET_INSTR, %esi
589	movl	$1, %edx
590	call	hot_patch_kernel_text
591	leave
592	ret
593	SET_SIZE(lockstat_hotpatch_site)
594
595#define	HOT_PATCH_MATCH(name, probe, reg)			\
596	cmpl	$probe, %reg;					\
597	jne	1f;						\
598	leaq	lockstat_probemap(%rip), %rax;			\
599	movl	_MUL(probe, DTRACE_IDSIZE)(%rax), %esi;		\
600	movq	$.##name##_lockstat_patch_point, %rdi;	\
601	call	lockstat_hotpatch_site;				\
602	1:
603
604/*
605 * void
606 * lockstat_hotpatch_probe(int ls_probe)
607 *
608 * Given a lockstat probe identifier, hotpatch any associated lockstat
609 * primitive routine(s) so they fall through into the lockstat_probe() call (if
610 * the probe is enabled) or return normally (when the probe is disabled).
611 */
612
613	ENTRY(lockstat_hotpatch_probe)
614	pushq	%rbp
615	movq	%rsp, %rbp
616	pushq	%r12
617	movl	%edi, %r12d
618
619	HOT_PATCH_MATCH(mutex_enter, LS_MUTEX_ENTER_ACQUIRE, r12d)
620	HOT_PATCH_MATCH(mutex_tryenter, LS_MUTEX_TRYENTER_ACQUIRE, r12d)
621	HOT_PATCH_MATCH(mutex_exit, LS_MUTEX_EXIT_RELEASE, r12d)
622
623	HOT_PATCH_MATCH(rw_write_enter, LS_RW_ENTER_ACQUIRE, r12d)
624	HOT_PATCH_MATCH(rw_read_enter, LS_RW_ENTER_ACQUIRE, r12d)
625	HOT_PATCH_MATCH(rw_write_exit, LS_RW_EXIT_RELEASE, r12d)
626	HOT_PATCH_MATCH(rw_read_exit, LS_RW_EXIT_RELEASE, r12d)
627
628	HOT_PATCH_MATCH(lock_set, LS_LOCK_SET_ACQUIRE, r12d)
629	HOT_PATCH_MATCH(lock_try, LS_LOCK_TRY_ACQUIRE, r12d)
630	HOT_PATCH_MATCH(lock_clear, LS_LOCK_CLEAR_RELEASE, r12d)
631	HOT_PATCH_MATCH(lock_set_spl, LS_LOCK_SET_SPL_ACQUIRE, r12d)
632	HOT_PATCH_MATCH(lock_clear_splx, LS_LOCK_CLEAR_SPLX_RELEASE, r12d)
633
634	popq	%r12
635	leave
636	ret
637	SET_SIZE(lockstat_hotpatch_probe)
638
639	ENTRY(membar_enter)
640	ALTENTRY(membar_exit)
641	ALTENTRY(membar_sync)
642	mfence			/* lighter weight than lock; xorq $0,(%rsp) */
643	ret
644	SET_SIZE(membar_sync)
645	SET_SIZE(membar_exit)
646	SET_SIZE(membar_enter)
647
648	ENTRY(membar_producer)
649	sfence
650	ret
651	SET_SIZE(membar_producer)
652
653	ENTRY(membar_consumer)
654	lfence
655	ret
656	SET_SIZE(membar_consumer)
657
658/*
659 * thread_onproc()
660 * Set thread in onproc state for the specified CPU.
661 * Also set the thread lock pointer to the CPU's onproc lock.
662 * Since the new lock isn't held, the store ordering is important.
663 * If not done in assembler, the compiler could reorder the stores.
664 */
665
666	ENTRY(thread_onproc)
667	addq	$CPU_THREAD_LOCK, %rsi	/* pointer to disp_lock while running */
668	movl	$ONPROC_THREAD, T_STATE(%rdi)	/* set state to TS_ONPROC */
669	movq	%rsi, T_LOCKP(%rdi)	/* store new lock pointer */
670	ret
671	SET_SIZE(thread_onproc)
672
673/*
674 * mutex_delay_default(void)
675 * Spins for approx a few hundred processor cycles and returns to caller.
676 */
677
678	ENTRY(mutex_delay_default)
679	movq	$92,%r11
6800:	decq	%r11
681	jg	0b
682	ret
683	SET_SIZE(mutex_delay_default)
684