xref: /titanic_51/usr/src/uts/intel/ia32/ml/copy.s (revision 505d05c73a6e56769f263d4803b22eddd168ee24)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*       Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.	*/
28/*       Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T		*/
29/*         All Rights Reserved						*/
30
31/*       Copyright (c) 1987, 1988 Microsoft Corporation			*/
32/*         All Rights Reserved						*/
33
34#pragma ident	"%Z%%M%	%I%	%E% SMI"
35
36#include <sys/errno.h>
37#include <sys/asm_linkage.h>
38
39#if defined(__lint)
40#include <sys/types.h>
41#include <sys/systm.h>
42#else	/* __lint */
43#include "assym.h"
44#endif	/* __lint */
45
46#define	KCOPY_MIN_SIZE	128	/* Must be >= 16 bytes */
47#define	XCOPY_MIN_SIZE	128	/* Must be >= 16 bytes */
48/*
49 * Non-temopral access (NTA) alignment requirement
50 */
51#define	NTA_ALIGN_SIZE	4	/* Must be at least 4-byte aligned */
52#define	NTA_ALIGN_MASK	_CONST(NTA_ALIGN_SIZE-1)
53#define	COUNT_ALIGN_SIZE	16	/* Must be at least 16-byte aligned */
54#define	COUNT_ALIGN_MASK	_CONST(COUNT_ALIGN_SIZE-1)
55
56/*
57 * Copy a block of storage, returning an error code if `from' or
58 * `to' takes a kernel pagefault which cannot be resolved.
59 * Returns errno value on pagefault error, 0 if all ok
60 */
61
62#if defined(__lint)
63
64/* ARGSUSED */
65int
66kcopy(const void *from, void *to, size_t count)
67{ return (0); }
68
69#else	/* __lint */
70
71	.globl	kernelbase
72
73#if defined(__amd64)
74
75	ENTRY(kcopy)
76	pushq	%rbp
77	movq	%rsp, %rbp
78#ifdef DEBUG
79	movq	kernelbase(%rip), %rax
80	cmpq	%rax, %rdi 		/* %rdi = from */
81	jb	0f
82	cmpq	%rax, %rsi		/* %rsi = to */
83	jnb	1f
840:	leaq	.kcopy_panic_msg(%rip), %rdi
85	xorl	%eax, %eax
86	call	panic
871:
88#endif
89	/*
90	 * pass lofault value as 4th argument to do_copy_fault
91	 */
92	leaq	_kcopy_copyerr(%rip), %rcx
93	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
94
95do_copy_fault:
96	movq	T_LOFAULT(%r9), %r11	/* save the current lofault */
97	movq	%rcx, T_LOFAULT(%r9)	/* new lofault */
98
99	xchgq	%rdi, %rsi		/* %rsi = source, %rdi = destination */
100	movq	%rdx, %rcx		/* %rcx = count */
101	shrq	$3, %rcx		/* 8-byte word count */
102	rep
103	  smovq
104
105	movq	%rdx, %rcx
106	andq	$7, %rcx		/* bytes left over */
107	rep
108	  smovb
109	xorl	%eax, %eax		/* return 0 (success) */
110
111	/*
112	 * A fault during do_copy_fault is indicated through an errno value
113	 * in %rax and we iretq from the trap handler to here.
114	 */
115_kcopy_copyerr:
116	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
117	leave
118	ret
119	SET_SIZE(kcopy)
120
121#elif defined(__i386)
122
123#define	ARG_FROM	8
124#define	ARG_TO		12
125#define	ARG_COUNT	16
126
127	ENTRY(kcopy)
128#ifdef DEBUG
129	pushl	%ebp
130	movl	%esp, %ebp
131	movl	kernelbase, %eax
132	cmpl	%eax, ARG_FROM(%ebp)
133	jb	0f
134	cmpl	%eax, ARG_TO(%ebp)
135	jnb	1f
1360:	pushl	$.kcopy_panic_msg
137	call	panic
1381:	popl	%ebp
139#endif
140	lea	_kcopy_copyerr, %eax	/* lofault value */
141	movl	%gs:CPU_THREAD, %edx
142
143do_copy_fault:
144	pushl	%ebp
145	movl	%esp, %ebp		/* setup stack frame */
146	pushl	%esi
147	pushl	%edi			/* save registers */
148
149	movl	T_LOFAULT(%edx), %edi
150	pushl	%edi			/* save the current lofault */
151	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
152
153	movl	ARG_COUNT(%ebp), %ecx
154	movl	ARG_FROM(%ebp), %esi
155	movl	ARG_TO(%ebp), %edi
156	shrl	$2, %ecx		/* word count */
157	rep
158	  smovl
159	movl	ARG_COUNT(%ebp), %ecx
160	andl	$3, %ecx		/* bytes left over */
161	rep
162	  smovb
163	xorl	%eax, %eax
164
165	/*
166	 * A fault during do_copy_fault is indicated through an errno value
167	 * in %eax and we iret from the trap handler to here.
168	 */
169_kcopy_copyerr:
170	popl	%ecx
171	popl	%edi
172	movl	%ecx, T_LOFAULT(%edx)	/* restore the original lofault */
173	popl	%esi
174	popl	%ebp
175	ret
176	SET_SIZE(kcopy)
177
178#undef	ARG_FROM
179#undef	ARG_TO
180#undef	ARG_COUNT
181
182#endif	/* __i386 */
183#endif	/* __lint */
184
185#if defined(__lint)
186
187/*
188 * Copy a block of storage.  Similar to kcopy but uses non-temporal
189 * instructions.
190 */
191
192/* ARGSUSED */
193int
194kcopy_nta(const void *from, void *to, size_t count, int copy_cached)
195{ return (0); }
196
197#else	/* __lint */
198
199#if defined(__amd64)
200
201#define	COPY_LOOP_INIT(src, dst, cnt)	\
202	addq	cnt, src;			\
203	addq	cnt, dst;			\
204	shrq	$3, cnt;			\
205	neg	cnt
206
207	/* Copy 16 bytes per loop.  Uses %rax and %r8 */
208#define	COPY_LOOP_BODY(src, dst, cnt)	\
209	prefetchnta	0x100(src, cnt, 8);	\
210	movq	(src, cnt, 8), %rax;		\
211	movq	0x8(src, cnt, 8), %r8;		\
212	movnti	%rax, (dst, cnt, 8);		\
213	movnti	%r8, 0x8(dst, cnt, 8);		\
214	addq	$2, cnt
215
216	ENTRY(kcopy_nta)
217	pushq	%rbp
218	movq	%rsp, %rbp
219#ifdef DEBUG
220	movq	kernelbase(%rip), %rax
221	cmpq	%rax, %rdi 		/* %rdi = from */
222	jb	0f
223	cmpq	%rax, %rsi		/* %rsi = to */
224	jnb	1f
2250:	leaq	.kcopy_panic_msg(%rip), %rdi
226	xorl	%eax, %eax
227	call	panic
2281:
229#endif
230
231	movq	%gs:CPU_THREAD, %r9
232	cmpq	$0, %rcx		/* No non-temporal access? */
233	/*
234	 * pass lofault value as 4th argument to do_copy_fault
235	 */
236	leaq	_kcopy_nta_copyerr(%rip), %rcx	/* doesn't set rflags */
237	jnz	do_copy_fault		/* use regular access */
238	/*
239	 * Make sure cnt is >= KCOPY_MIN_SIZE
240	 */
241	cmpq	$KCOPY_MIN_SIZE, %rdx
242	jb	do_copy_fault
243
244	/*
245	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
246	 * count is COUNT_ALIGN_SIZE aligned.
247	 */
248	movq	%rdi, %r10
249	orq	%rsi, %r10
250	andq	$NTA_ALIGN_MASK, %r10
251	orq	%rdx, %r10
252	andq	$COUNT_ALIGN_MASK, %r10
253	jnz	do_copy_fault
254
255	ALTENTRY(do_copy_fault_nta)
256	movq    %gs:CPU_THREAD, %r9     /* %r9 = thread addr */
257	movq    T_LOFAULT(%r9), %r11    /* save the current lofault */
258	movq    %rcx, T_LOFAULT(%r9)    /* new lofault */
259
260	/*
261	 * COPY_LOOP_BODY uses %rax and %r8
262	 */
263	COPY_LOOP_INIT(%rdi, %rsi, %rdx)
2642:	COPY_LOOP_BODY(%rdi, %rsi, %rdx)
265	jnz	2b
266
267	mfence
268	xorl	%eax, %eax		/* return 0 (success) */
269
270_kcopy_nta_copyerr:
271	movq	%r11, T_LOFAULT(%r9)    /* restore original lofault */
272	leave
273	ret
274	SET_SIZE(do_copy_fault_nta)
275	SET_SIZE(kcopy_nta)
276
277#elif defined(__i386)
278
279#define	ARG_FROM	8
280#define	ARG_TO		12
281#define	ARG_COUNT	16
282
283#define	COPY_LOOP_INIT(src, dst, cnt)	\
284	addl	cnt, src;			\
285	addl	cnt, dst;			\
286	shrl	$3, cnt;			\
287	neg	cnt
288
289#define	COPY_LOOP_BODY(src, dst, cnt)	\
290	prefetchnta	0x100(src, cnt, 8);	\
291	movl	(src, cnt, 8), %esi;		\
292	movnti	%esi, (dst, cnt, 8);		\
293	movl	0x4(src, cnt, 8), %esi;		\
294	movnti	%esi, 0x4(dst, cnt, 8);		\
295	movl	0x8(src, cnt, 8), %esi;		\
296	movnti	%esi, 0x8(dst, cnt, 8);		\
297	movl	0xc(src, cnt, 8), %esi;		\
298	movnti	%esi, 0xc(dst, cnt, 8);		\
299	addl	$2, cnt
300
301	/*
302	 * kcopy_nta is not implemented for 32-bit as no performance
303	 * improvement was shown.  We simply jump directly to kcopy
304	 * and discard the 4 arguments.
305	 */
306	ENTRY(kcopy_nta)
307	jmp	kcopy
308
309	lea	_kcopy_nta_copyerr, %eax	/* lofault value */
310	ALTENTRY(do_copy_fault_nta)
311	pushl	%ebp
312	movl	%esp, %ebp		/* setup stack frame */
313	pushl	%esi
314	pushl	%edi
315
316	movl	%gs:CPU_THREAD, %edx
317	movl	T_LOFAULT(%edx), %edi
318	pushl	%edi			/* save the current lofault */
319	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
320
321	/* COPY_LOOP_BODY needs to use %esi */
322	movl	ARG_COUNT(%ebp), %ecx
323	movl	ARG_FROM(%ebp), %edi
324	movl	ARG_TO(%ebp), %eax
325	COPY_LOOP_INIT(%edi, %eax, %ecx)
3261:	COPY_LOOP_BODY(%edi, %eax, %ecx)
327	jnz	1b
328	mfence
329
330	xorl	%eax, %eax
331_kcopy_nta_copyerr:
332	popl	%ecx
333	popl	%edi
334	movl	%ecx, T_LOFAULT(%edx)	/* restore the original lofault */
335	popl	%esi
336	leave
337	ret
338	SET_SIZE(do_copy_fault_nta)
339	SET_SIZE(kcopy_nta)
340
341#undef	ARG_FROM
342#undef	ARG_TO
343#undef	ARG_COUNT
344
345#endif	/* __i386 */
346#endif	/* __lint */
347
348#if defined(__lint)
349
350/* ARGSUSED */
351void
352bcopy(const void *from, void *to, size_t count)
353{}
354
355#else	/* __lint */
356
357#if defined(__amd64)
358
359	ENTRY(bcopy)
360#ifdef DEBUG
361	orq	%rdx, %rdx		/* %rdx = count */
362	jz	1f
363	movq	kernelbase(%rip), %rax
364	cmpq	%rax, %rdi		/* %rdi = from */
365	jb	0f
366	cmpq	%rax, %rsi		/* %rsi = to */
367	jnb	1f
3680:	leaq	.bcopy_panic_msg(%rip), %rdi
369	jmp	call_panic		/* setup stack and call panic */
3701:
371#endif
372do_copy:
373	xchgq	%rdi, %rsi		/* %rsi = source, %rdi = destination */
374	movq	%rdx, %rcx		/* %rcx = count */
375	shrq	$3, %rcx		/* 8-byte word count */
376	rep
377	  smovq
378
379	movq	%rdx, %rcx
380	andq	$7, %rcx		/* bytes left over */
381	rep
382	  smovb
383	ret
384
385#ifdef DEBUG
386	/*
387	 * Setup frame on the run-time stack. The end of the input argument
388	 * area must be aligned on a 16 byte boundary. The stack pointer %rsp,
389	 * always points to the end of the latest allocated stack frame.
390	 * panic(const char *format, ...) is a varargs function. When a
391	 * function taking variable arguments is called, %rax must be set
392	 * to eight times the number of floating point parameters passed
393	 * to the function in SSE registers.
394	 */
395call_panic:
396	pushq	%rbp			/* align stack properly */
397	movq	%rsp, %rbp
398	xorl	%eax, %eax		/* no variable arguments */
399	call	panic			/* %rdi = format string */
400#endif
401	SET_SIZE(bcopy)
402
403#elif defined(__i386)
404
405#define	ARG_FROM	4
406#define	ARG_TO		8
407#define	ARG_COUNT	12
408
409	ENTRY(bcopy)
410#ifdef DEBUG
411	movl	ARG_COUNT(%esp), %eax
412	orl	%eax, %eax
413	jz	1f
414	movl	kernelbase, %eax
415	cmpl	%eax, ARG_FROM(%esp)
416	jb	0f
417	cmpl	%eax, ARG_TO(%esp)
418	jnb	1f
4190:	pushl	%ebp
420	movl	%esp, %ebp
421	pushl	$.bcopy_panic_msg
422	call	panic
4231:
424#endif
425do_copy:
426	movl	%esi, %eax		/* save registers */
427	movl	%edi, %edx
428	movl	ARG_COUNT(%esp), %ecx
429	movl	ARG_FROM(%esp), %esi
430	movl	ARG_TO(%esp), %edi
431
432	shrl	$2, %ecx		/* word count */
433	rep
434	  smovl
435	movl	ARG_COUNT(%esp), %ecx
436	andl	$3, %ecx		/* bytes left over */
437	rep
438	  smovb
439	movl	%eax, %esi		/* restore registers */
440	movl	%edx, %edi
441	ret
442	SET_SIZE(bcopy)
443
444#undef	ARG_COUNT
445#undef	ARG_FROM
446#undef	ARG_TO
447
448#endif	/* __i386 */
449#endif	/* __lint */
450
451
452/*
453 * Zero a block of storage, returning an error code if we
454 * take a kernel pagefault which cannot be resolved.
455 * Returns errno value on pagefault error, 0 if all ok
456 */
457
458#if defined(__lint)
459
460/* ARGSUSED */
461int
462kzero(void *addr, size_t count)
463{ return (0); }
464
465#else	/* __lint */
466
467#if defined(__amd64)
468
469	ENTRY(kzero)
470#ifdef DEBUG
471        cmpq	kernelbase(%rip), %rdi	/* %rdi = addr */
472        jnb	0f
473        leaq	.kzero_panic_msg(%rip), %rdi
474	jmp	call_panic		/* setup stack and call panic */
4750:
476#endif
477	/*
478	 * pass lofault value as 3rd argument to do_zero_fault
479	 */
480	leaq	_kzeroerr(%rip), %rdx
481
482do_zero_fault:
483	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
484	movq	T_LOFAULT(%r9), %r11	/* save the current lofault */
485	movq	%rdx, T_LOFAULT(%r9)	/* new lofault */
486
487	movq	%rsi, %rcx		/* get size in bytes */
488	shrq	$3, %rcx		/* count of 8-byte words to zero */
489	xorl	%eax, %eax		/* clear %rax; used in sstoq / sstob */
490	rep
491	  sstoq				/* %rcx = words to clear (%rax=0) */
492
493	movq	%rsi, %rcx
494	andq	$7, %rcx		/* bytes left over */
495	rep
496	  sstob				/* %rcx = residual bytes to clear */
497
498	/*
499	 * A fault during do_zero_fault is indicated through an errno value
500	 * in %rax when we iretq to here.
501	 */
502_kzeroerr:
503	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
504	ret
505	SET_SIZE(kzero)
506
507#elif defined(__i386)
508
509#define	ARG_ADDR	8
510#define	ARG_COUNT	12
511
512	ENTRY(kzero)
513#ifdef DEBUG
514	pushl	%ebp
515	movl	%esp, %ebp
516	movl	kernelbase, %eax
517        cmpl	%eax, ARG_ADDR(%ebp)
518        jnb	0f
519        pushl   $.kzero_panic_msg
520        call    panic
5210:	popl	%ebp
522#endif
523	lea	_kzeroerr, %eax		/* kzeroerr is lofault value */
524
525do_zero_fault:
526	pushl	%ebp			/* save stack base */
527	movl	%esp, %ebp		/* set new stack base */
528	pushl	%edi			/* save %edi */
529
530	mov	%gs:CPU_THREAD, %edx
531	movl	T_LOFAULT(%edx), %edi
532	pushl	%edi			/* save the current lofault */
533	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
534
535	movl	ARG_COUNT(%ebp), %ecx	/* get size in bytes */
536	movl	ARG_ADDR(%ebp), %edi	/* %edi <- address of bytes to clear */
537	shrl	$2, %ecx		/* Count of double words to zero */
538	xorl	%eax, %eax		/* sstol val */
539	rep
540	  sstol			/* %ecx contains words to clear (%eax=0) */
541
542	movl	ARG_COUNT(%ebp), %ecx	/* get size in bytes */
543	andl	$3, %ecx		/* do mod 4 */
544	rep
545	  sstob			/* %ecx contains residual bytes to clear */
546
547	/*
548	 * A fault during do_zero_fault is indicated through an errno value
549	 * in %eax when we iret to here.
550	 */
551_kzeroerr:
552	popl	%edi
553	movl	%edi, T_LOFAULT(%edx)	/* restore the original lofault */
554	popl	%edi
555	popl	%ebp
556	ret
557	SET_SIZE(kzero)
558
559#undef	ARG_ADDR
560#undef	ARG_COUNT
561
562#endif	/* __i386 */
563#endif	/* __lint */
564
565/*
566 * Zero a block of storage.
567 */
568
569#if defined(__lint)
570
571/* ARGSUSED */
572void
573bzero(void *addr, size_t count)
574{}
575
576#else	/* __lint */
577
578#if defined(__amd64)
579
580	ENTRY(bzero)
581#ifdef DEBUG
582	cmpq	kernelbase(%rip), %rdi	/* %rdi = addr */
583	jnb	0f
584	leaq	.bzero_panic_msg(%rip), %rdi
585	jmp	call_panic		/* setup stack and call panic */
5860:
587#endif
588do_zero:
589	movq	%rsi, %rcx		/* get size in bytes */
590	shrq	$3, %rcx		/* count of 8-byte words to zero */
591	xorl	%eax, %eax		/* clear %rax; used in sstoq / sstob */
592	rep
593	  sstoq				/* %rcx = words to clear (%rax=0) */
594
595	movq	%rsi, %rcx
596	andq	$7, %rcx		/* bytes left over */
597	rep
598	  sstob				/* %rcx = residual bytes to clear */
599	ret
600	SET_SIZE(bzero)
601
602#elif defined(__i386)
603
604#define	ARG_ADDR	4
605#define	ARG_COUNT	8
606
607	ENTRY(bzero)
608#ifdef DEBUG
609	movl	kernelbase, %eax
610	cmpl	%eax, ARG_ADDR(%esp)
611	jnb	0f
612	pushl	%ebp
613	movl	%esp, %ebp
614	pushl	$.bzero_panic_msg
615	call	panic
6160:
617#endif
618do_zero:
619	movl	%edi, %edx
620	movl	ARG_COUNT(%esp), %ecx
621	movl	ARG_ADDR(%esp), %edi
622	shrl	$2, %ecx
623	xorl	%eax, %eax
624	rep
625	  sstol
626	movl	ARG_COUNT(%esp), %ecx
627	andl	$3, %ecx
628	rep
629	  sstob
630	movl	%edx, %edi
631	ret
632	SET_SIZE(bzero)
633
634#undef	ARG_ADDR
635#undef	ARG_COUNT
636
637#endif	/* __i386 */
638#endif	/* __lint */
639
640/*
641 * Transfer data to and from user space -
642 * Note that these routines can cause faults
643 * It is assumed that the kernel has nothing at
644 * less than KERNELBASE in the virtual address space.
645 *
646 * Note that copyin(9F) and copyout(9F) are part of the
647 * DDI/DKI which specifies that they return '-1' on "errors."
648 *
649 * Sigh.
650 *
651 * So there's two extremely similar routines - xcopyin_nta() and
652 * xcopyout_nta() which return the errno that we've faithfully computed.
653 * This allows other callers (e.g. uiomove(9F)) to work correctly.
654 * Given that these are used pretty heavily, we expand the calling
655 * sequences inline for all flavours (rather than making wrappers).
656 */
657
658/*
659 * Copy user data to kernel space.
660 */
661
662#if defined(__lint)
663
664/* ARGSUSED */
665int
666copyin(const void *uaddr, void *kaddr, size_t count)
667{ return (0); }
668
669#else	/* lint */
670
671#if defined(__amd64)
672
673	ENTRY(copyin)
674	pushq	%rbp
675	movq	%rsp, %rbp
676	subq	$32, %rsp
677
678	/*
679	 * save args in case we trap and need to rerun as a copyop
680	 */
681	movq	%rdi, (%rsp)
682	movq	%rsi, 0x8(%rsp)
683	movq	%rdx, 0x10(%rsp)
684
685	movq	kernelbase(%rip), %rax
686#ifdef DEBUG
687	cmpq	%rax, %rsi		/* %rsi = kaddr */
688	jnb	1f
689	leaq	.copyin_panic_msg(%rip), %rdi
690	xorl	%eax, %eax
691	call	panic
6921:
693#endif
694	/*
695	 * pass lofault value as 4th argument to do_copy_fault
696	 */
697	leaq	_copyin_err(%rip), %rcx
698
699	movq	%gs:CPU_THREAD, %r9
700	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
701	jb	do_copy_fault
702	jmp	3f
703
704_copyin_err:
705	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
7063:
707	movq	T_COPYOPS(%r9), %rax
708	cmpq	$0, %rax
709	jz	2f
710	/*
711	 * reload args for the copyop
712	 */
713	movq	(%rsp), %rdi
714	movq	0x8(%rsp), %rsi
715	movq	0x10(%rsp), %rdx
716	leave
717	jmp	*CP_COPYIN(%rax)
718
7192:	movl	$-1, %eax
720	leave
721	ret
722	SET_SIZE(copyin)
723
724#elif defined(__i386)
725
726#define	ARG_UADDR	4
727#define	ARG_KADDR	8
728
729	ENTRY(copyin)
730	movl	kernelbase, %ecx
731#ifdef DEBUG
732	cmpl	%ecx, ARG_KADDR(%esp)
733	jnb	1f
734	pushl	%ebp
735	movl	%esp, %ebp
736	pushl	$.copyin_panic_msg
737	call	panic
7381:
739#endif
740	lea	_copyin_err, %eax
741
742	movl	%gs:CPU_THREAD, %edx
743	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
744	jb	do_copy_fault
745	jmp	3f
746
747_copyin_err:
748	popl	%ecx
749	popl	%edi
750	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
751	popl	%esi
752	popl	%ebp
7533:
754	movl	T_COPYOPS(%edx), %eax
755	cmpl	$0, %eax
756	jz	2f
757	jmp	*CP_COPYIN(%eax)
758
7592:	movl	$-1, %eax
760	ret
761	SET_SIZE(copyin)
762
763#undef	ARG_UADDR
764#undef	ARG_KADDR
765
766#endif	/* __i386 */
767#endif	/* __lint */
768
769#if defined(__lint)
770
771/* ARGSUSED */
772int
773xcopyin_nta(const void *uaddr, void *kaddr, size_t count, int copy_cached)
774{ return (0); }
775
776#else	/* __lint */
777
778#if defined(__amd64)
779
780	ENTRY(xcopyin_nta)
781	pushq	%rbp
782	movq	%rsp, %rbp
783	subq	$32, %rsp
784
785	/*
786	 * save args in case we trap and need to rerun as a copyop
787	 * %rcx is consumed in this routine so we don't need to save
788	 * it.
789	 */
790	movq	%rdi, (%rsp)
791	movq	%rsi, 0x8(%rsp)
792	movq	%rdx, 0x10(%rsp)
793
794	movq	kernelbase(%rip), %rax
795#ifdef DEBUG
796	cmpq	%rax, %rsi		/* %rsi = kaddr */
797	jnb	1f
798	leaq	.xcopyin_panic_msg(%rip), %rdi
799	xorl	%eax, %eax
800	call	panic
8011:
802#endif
803	movq	%gs:CPU_THREAD, %r9
804	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
805	jae	3f
806	cmpq	$0, %rcx		/* No non-temporal access? */
807	/*
808	 * pass lofault value as 4th argument to do_copy_fault
809	 */
810	leaq	_xcopyin_err(%rip), %rcx	/* doesn't set rflags */
811	jnz	do_copy_fault		/* use regular access */
812	/*
813	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
814	 */
815	cmpq	$XCOPY_MIN_SIZE, %rdx
816	jb	do_copy_fault
817
818	/*
819	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
820	 * count is COUNT_ALIGN_SIZE aligned.
821	 */
822	movq	%rdi, %r10
823	orq	%rsi, %r10
824	andq	$NTA_ALIGN_MASK, %r10
825	orq	%rdx, %r10
826	andq	$COUNT_ALIGN_MASK, %r10
827	jnz	do_copy_fault
828	jmp	do_copy_fault_nta	/* use non-temporal access */
829
830	/*
831	 * A fault during do_copy_fault or do_copy_fault_nta is
832	 * indicated through an errno value in %rax and we iret from the
833	 * trap handler to here.
834	 */
835_xcopyin_err:
836	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
8373:
838	movq	T_COPYOPS(%r9), %r8
839	cmpq	$0, %r8
840	jz	2f
841
842	/*
843	 * reload args for the copyop
844	 */
845	movq	(%rsp), %rdi
846	movq	0x8(%rsp), %rsi
847	movq	0x10(%rsp), %rdx
848	leave
849	jmp	*CP_XCOPYIN(%r8)
850
8512:	leave
852	ret
853	SET_SIZE(xcopyin_nta)
854
855#elif defined(__i386)
856
857#define	ARG_UADDR	4
858#define	ARG_KADDR	8
859#define	ARG_COUNT	12
860#define	ARG_CACHED	16
861
862	.globl	use_sse_copy
863
864	ENTRY(xcopyin_nta)
865	movl	kernelbase, %ecx
866	lea	_xcopyin_err, %eax
867	movl	%gs:CPU_THREAD, %edx
868	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
869	jae	3f
870
871	cmpl	$0, use_sse_copy	/* no sse support */
872	jz	do_copy_fault
873
874	cmpl	$0, ARG_CACHED(%esp)	/* copy_cached hint set? */
875	jnz	do_copy_fault
876
877	/*
878	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
879	 */
880	cmpl	$XCOPY_MIN_SIZE, ARG_COUNT(%esp)
881	jb	do_copy_fault
882
883	/*
884	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
885	 * count is COUNT_ALIGN_SIZE aligned.
886	 */
887	movl	ARG_UADDR(%esp), %ecx
888	orl	ARG_KADDR(%esp), %ecx
889	andl	$NTA_ALIGN_MASK, %ecx
890	orl	ARG_COUNT(%esp), %ecx
891	andl	$COUNT_ALIGN_MASK, %ecx
892	jnz	do_copy_fault
893
894	jmp	do_copy_fault_nta	/* use regular access */
895
896	/*
897	 * A fault during do_copy_fault or do_copy_fault_nta is
898	 * indicated through an errno value in %eax and we iret from the
899	 * trap handler to here.
900	 */
901_xcopyin_err:
902	popl	%ecx
903	popl	%edi
904	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
905	popl	%esi
906	popl	%ebp
9073:
908	cmpl	$0, T_COPYOPS(%edx)
909	jz	2f
910	movl	T_COPYOPS(%edx), %eax
911	jmp	*CP_XCOPYIN(%eax)
912
9132:	rep; 	ret	/* use 2 byte return instruction when branch target */
914			/* AMD Software Optimization Guide - Section 6.2 */
915	SET_SIZE(xcopyin_nta)
916
917#undef	ARG_UADDR
918#undef	ARG_KADDR
919#undef	ARG_COUNT
920#undef	ARG_CACHED
921
922#endif	/* __i386 */
923#endif	/* __lint */
924
925/*
926 * Copy kernel data to user space.
927 */
928
929#if defined(__lint)
930
931/* ARGSUSED */
932int
933copyout(const void *kaddr, void *uaddr, size_t count)
934{ return (0); }
935
936#else	/* __lint */
937
938#if defined(__amd64)
939
940	ENTRY(copyout)
941	pushq	%rbp
942	movq	%rsp, %rbp
943	subq	$32, %rsp
944
945	/*
946	 * save args in case we trap and need to rerun as a copyop
947	 */
948	movq	%rdi, (%rsp)
949	movq	%rsi, 0x8(%rsp)
950	movq	%rdx, 0x10(%rsp)
951
952	movq	kernelbase(%rip), %rax
953#ifdef DEBUG
954	cmpq	%rax, %rdi		/* %rdi = kaddr */
955	jnb	1f
956	leaq	.copyout_panic_msg(%rip), %rdi
957	xorl	%eax, %eax
958	call	panic
9591:
960#endif
961	/*
962	 * pass lofault value as 4th argument to do_copy_fault
963	 */
964	leaq	_copyout_err(%rip), %rcx
965
966	movq	%gs:CPU_THREAD, %r9
967	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
968	jb	do_copy_fault
969	jmp	3f
970
971_copyout_err:
972	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
9733:
974	movq	T_COPYOPS(%r9), %rax
975	cmpq	$0, %rax
976	jz	2f
977
978	/*
979	 * reload args for the copyop
980	 */
981	movq	(%rsp), %rdi
982	movq	0x8(%rsp), %rsi
983	movq	0x10(%rsp), %rdx
984	leave
985	jmp	*CP_COPYOUT(%rax)
986
9872:	movl	$-1, %eax
988	leave
989	ret
990	SET_SIZE(copyout)
991
992#elif defined(__i386)
993
994#define	ARG_KADDR	4
995#define	ARG_UADDR	8
996
997	ENTRY(copyout)
998	movl	kernelbase, %ecx
999#ifdef DEBUG
1000	cmpl	%ecx, ARG_KADDR(%esp)
1001	jnb	1f
1002	pushl	%ebp
1003	movl	%esp, %ebp
1004	pushl	$.copyout_panic_msg
1005	call	panic
10061:
1007#endif
1008	lea	_copyout_err, %eax
1009	movl	%gs:CPU_THREAD, %edx
1010	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1011	jb	do_copy_fault
1012	jmp	3f
1013
1014_copyout_err:
1015	popl	%ecx
1016	popl	%edi
1017	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
1018	popl	%esi
1019	popl	%ebp
10203:
1021	movl	T_COPYOPS(%edx), %eax
1022	cmpl	$0, %eax
1023	jz	2f
1024	jmp	*CP_COPYOUT(%eax)
1025
10262:	movl	$-1, %eax
1027	ret
1028	SET_SIZE(copyout)
1029
1030#undef	ARG_UADDR
1031#undef	ARG_KADDR
1032
1033#endif	/* __i386 */
1034#endif	/* __lint */
1035
1036#if defined(__lint)
1037
1038/* ARGSUSED */
1039int
1040xcopyout_nta(const void *kaddr, void *uaddr, size_t count, int copy_cached)
1041{ return (0); }
1042
1043#else	/* __lint */
1044
1045#if defined(__amd64)
1046
1047	ENTRY(xcopyout_nta)
1048	pushq	%rbp
1049	movq	%rsp, %rbp
1050	subq	$32, %rsp
1051
1052	/*
1053	 * save args in case we trap and need to rerun as a copyop
1054	 */
1055	movq	%rdi, (%rsp)
1056	movq	%rsi, 0x8(%rsp)
1057	movq	%rdx, 0x10(%rsp)
1058
1059	movq	kernelbase(%rip), %rax
1060#ifdef DEBUG
1061	cmpq	%rax, %rdi		/* %rdi = kaddr */
1062	jnb	1f
1063	leaq	.xcopyout_panic_msg(%rip), %rdi
1064	xorl	%eax, %eax
1065	call	panic
10661:
1067#endif
1068	movq	%gs:CPU_THREAD, %r9
1069	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
1070	jae	3f
1071
1072	cmpq	$0, %rcx		/* No non-temporal access? */
1073	/*
1074	 * pass lofault value as 4th argument to do_copy_fault
1075	 */
1076	leaq	_xcopyout_err(%rip), %rcx
1077	jnz	do_copy_fault
1078	/*
1079	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
1080	 */
1081	cmpq	$XCOPY_MIN_SIZE, %rdx
1082	jb	do_copy_fault
1083
1084	/*
1085	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
1086	 * count is COUNT_ALIGN_SIZE aligned.
1087	 */
1088	movq	%rdi, %r10
1089	orq	%rsi, %r10
1090	andq	$NTA_ALIGN_MASK, %r10
1091	orq	%rdx, %r10
1092	andq	$COUNT_ALIGN_MASK, %r10
1093	jnz	do_copy_fault
1094	jmp	do_copy_fault_nta
1095
1096	/*
1097	 * A fault during do_copy_fault or do_copy_fault_nta is
1098	 * indicated through an errno value in %rax and we iret from the
1099	 * trap handler to here.
1100	 */
1101_xcopyout_err:
1102	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
11033:
1104	movq	T_COPYOPS(%r9), %r8
1105	cmpq	$0, %r8
1106	jz	2f
1107
1108	/*
1109	 * reload args for the copyop
1110	 */
1111	movq	(%rsp), %rdi
1112	movq	0x8(%rsp), %rsi
1113	movq	0x10(%rsp), %rdx
1114	leave
1115	jmp	*CP_XCOPYOUT(%r8)
1116
11172:	leave
1118	ret
1119	SET_SIZE(xcopyout_nta)
1120
1121#elif defined(__i386)
1122
1123#define	ARG_KADDR	4
1124#define	ARG_UADDR	8
1125#define	ARG_COUNT	12
1126#define	ARG_CACHED	16
1127
1128	ENTRY(xcopyout_nta)
1129	movl	kernelbase, %ecx
1130	lea	_xcopyout_err, %eax
1131	movl	%gs:CPU_THREAD, %edx
1132	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1133	jae	3f
1134
1135	cmpl	$0, use_sse_copy	/* no sse support */
1136	jz	do_copy_fault
1137
1138	cmpl	$0, ARG_CACHED(%esp)	/* copy_cached hint set? */
1139	jnz	do_copy_fault
1140
1141	/*
1142	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
1143	 */
1144	cmpl	$XCOPY_MIN_SIZE, %edx
1145	jb	do_copy_fault
1146
1147	/*
1148	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
1149	 * count is COUNT_ALIGN_SIZE aligned.
1150	 */
1151	movl	ARG_UADDR(%esp), %ecx
1152	orl	ARG_KADDR(%esp), %ecx
1153	andl	$NTA_ALIGN_MASK, %ecx
1154	orl	ARG_COUNT(%esp), %ecx
1155	andl	$COUNT_ALIGN_MASK, %ecx
1156	jnz	do_copy_fault
1157	jmp	do_copy_fault_nta
1158
1159	/*
1160	 * A fault during do_copy_fault or do_copy_fault_nta is
1161	 * indicated through an errno value in %eax and we iret from the
1162	 * trap handler to here.
1163	 */
1164_xcopyout_err:
1165	/ restore the original lofault
1166	popl	%ecx
1167	popl	%edi
1168	movl	%ecx, T_LOFAULT(%edx)	/ original lofault
1169	popl	%esi
1170	popl	%ebp
11713:
1172	cmpl	$0, T_COPYOPS(%edx)
1173	jz	2f
1174	movl	T_COPYOPS(%edx), %eax
1175	jmp	*CP_XCOPYOUT(%eax)
1176
11772:	rep;	ret	/* use 2 byte return instruction when branch target */
1178			/* AMD Software Optimization Guide - Section 6.2 */
1179	SET_SIZE(xcopyout_nta)
1180
1181#undef	ARG_UADDR
1182#undef	ARG_KADDR
1183#undef	ARG_COUNT
1184#undef	ARG_CACHED
1185
1186#endif	/* __i386 */
1187#endif	/* __lint */
1188
1189/*
1190 * Copy a null terminated string from one point to another in
1191 * the kernel address space.
1192 */
1193
1194#if defined(__lint)
1195
1196/* ARGSUSED */
1197int
1198copystr(const char *from, char *to, size_t maxlength, size_t *lencopied)
1199{ return (0); }
1200
1201#else	/* __lint */
1202
1203#if defined(__amd64)
1204
1205	ENTRY(copystr)
1206	pushq	%rbp
1207	movq	%rsp, %rbp
1208#ifdef DEBUG
1209	movq	kernelbase(%rip), %rax
1210	cmpq	%rax, %rdi		/* %rdi = from */
1211	jb	0f
1212	cmpq	%rax, %rsi		/* %rsi = to */
1213	jnb	1f
12140:	leaq	.copystr_panic_msg(%rip), %rdi
1215	xorl	%eax, %eax
1216	call	panic
12171:
1218#endif
1219	movq	%gs:CPU_THREAD, %r9
1220	movq	T_LOFAULT(%r9), %r8	/* pass current lofault value as */
1221					/* 5th argument to do_copystr */
1222do_copystr:
1223	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
1224	movq    T_LOFAULT(%r9), %r11	/* save the current lofault */
1225	movq	%r8, T_LOFAULT(%r9)	/* new lofault */
1226
1227	movq	%rdx, %r8		/* save maxlength */
1228
1229	cmpq	$0, %rdx		/* %rdx = maxlength */
1230	je	copystr_enametoolong	/* maxlength == 0 */
1231
1232copystr_loop:
1233	decq	%r8
1234	movb	(%rdi), %al
1235	incq	%rdi
1236	movb	%al, (%rsi)
1237	incq	%rsi
1238	cmpb	$0, %al
1239	je	copystr_null		/* null char */
1240	cmpq	$0, %r8
1241	jne	copystr_loop
1242
1243copystr_enametoolong:
1244	movl	$ENAMETOOLONG, %eax
1245	jmp	copystr_out
1246
1247copystr_null:
1248	xorl	%eax, %eax		/* no error */
1249
1250copystr_out:
1251	cmpq	$0, %rcx		/* want length? */
1252	je	copystr_done		/* no */
1253	subq	%r8, %rdx		/* compute length and store it */
1254	movq	%rdx, (%rcx)
1255
1256copystr_done:
1257	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
1258	leave
1259	ret
1260	SET_SIZE(copystr)
1261
1262#elif defined(__i386)
1263
1264#define	ARG_FROM	8
1265#define	ARG_TO		12
1266#define	ARG_MAXLEN	16
1267#define	ARG_LENCOPIED	20
1268
1269	ENTRY(copystr)
1270#ifdef DEBUG
1271	pushl	%ebp
1272	movl	%esp, %ebp
1273	movl	kernelbase, %eax
1274	cmpl	%eax, ARG_FROM(%esp)
1275	jb	0f
1276	cmpl	%eax, ARG_TO(%esp)
1277	jnb	1f
12780:	pushl	$.copystr_panic_msg
1279	call	panic
12801:	popl	%ebp
1281#endif
1282	/* get the current lofault address */
1283	movl	%gs:CPU_THREAD, %eax
1284	movl	T_LOFAULT(%eax), %eax
1285do_copystr:
1286	pushl	%ebp			/* setup stack frame */
1287	movl	%esp, %ebp
1288	pushl	%ebx			/* save registers */
1289	pushl	%edi
1290
1291	movl	%gs:CPU_THREAD, %ebx
1292	movl	T_LOFAULT(%ebx), %edi
1293	pushl	%edi			/* save the current lofault */
1294	movl	%eax, T_LOFAULT(%ebx)	/* new lofault */
1295
1296	movl	ARG_MAXLEN(%ebp), %ecx
1297	cmpl	$0, %ecx
1298	je	copystr_enametoolong	/* maxlength == 0 */
1299
1300	movl	ARG_FROM(%ebp), %ebx	/* source address */
1301	movl	ARG_TO(%ebp), %edx	/* destination address */
1302
1303copystr_loop:
1304	decl	%ecx
1305	movb	(%ebx), %al
1306	incl	%ebx
1307	movb	%al, (%edx)
1308	incl	%edx
1309	cmpb	$0, %al
1310	je	copystr_null		/* null char */
1311	cmpl	$0, %ecx
1312	jne	copystr_loop
1313
1314copystr_enametoolong:
1315	movl	$ENAMETOOLONG, %eax
1316	jmp	copystr_out
1317
1318copystr_null:
1319	xorl	%eax, %eax		/* no error */
1320
1321copystr_out:
1322	cmpl	$0, ARG_LENCOPIED(%ebp)	/* want length? */
1323	je	copystr_done		/* no */
1324	movl	ARG_MAXLEN(%ebp), %edx
1325	subl	%ecx, %edx		/* compute length and store it */
1326	movl	ARG_LENCOPIED(%ebp), %ecx
1327	movl	%edx, (%ecx)
1328
1329copystr_done:
1330	popl	%edi
1331	movl	%gs:CPU_THREAD, %ebx
1332	movl	%edi, T_LOFAULT(%ebx)	/* restore the original lofault */
1333
1334	popl	%edi
1335	popl	%ebx
1336	popl	%ebp
1337	ret
1338	SET_SIZE(copystr)
1339
1340#undef	ARG_FROM
1341#undef	ARG_TO
1342#undef	ARG_MAXLEN
1343#undef	ARG_LENCOPIED
1344
1345#endif	/* __i386 */
1346#endif	/* __lint */
1347
1348/*
1349 * Copy a null terminated string from the user address space into
1350 * the kernel address space.
1351 */
1352
1353#if defined(__lint)
1354
1355/* ARGSUSED */
1356int
1357copyinstr(const char *uaddr, char *kaddr, size_t maxlength,
1358    size_t *lencopied)
1359{ return (0); }
1360
1361#else	/* __lint */
1362
1363#if defined(__amd64)
1364
1365	ENTRY(copyinstr)
1366	pushq	%rbp
1367	movq	%rsp, %rbp
1368	subq	$32, %rsp
1369
1370	/*
1371	 * save args in case we trap and need to rerun as a copyop
1372	 */
1373	movq	%rdi, (%rsp)
1374	movq	%rsi, 0x8(%rsp)
1375	movq	%rdx, 0x10(%rsp)
1376	movq	%rcx, 0x18(%rsp)
1377
1378	movq	kernelbase(%rip), %rax
1379#ifdef DEBUG
1380	cmpq	%rax, %rsi		/* %rsi = kaddr */
1381	jnb	1f
1382	leaq	.copyinstr_panic_msg(%rip), %rdi
1383	xorl	%eax, %eax
1384	call	panic
13851:
1386#endif
1387	/*
1388	 * pass lofault value as 5th argument to do_copystr
1389	 */
1390	leaq	_copyinstr_error(%rip), %r8
1391
1392	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
1393	jb	do_copystr
1394	movq	%gs:CPU_THREAD, %r9
1395	jmp	3f
1396
1397_copyinstr_error:
1398	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
13993:
1400	movq	T_COPYOPS(%r9), %rax
1401	cmpq	$0, %rax
1402	jz	2f
1403
1404	/*
1405	 * reload args for the copyop
1406	 */
1407	movq	(%rsp), %rdi
1408	movq	0x8(%rsp), %rsi
1409	movq	0x10(%rsp), %rdx
1410	movq	0x18(%rsp), %rcx
1411	leave
1412	jmp	*CP_COPYINSTR(%rax)
1413
14142:	movl	$EFAULT, %eax		/* return EFAULT */
1415	leave
1416	ret
1417	SET_SIZE(copyinstr)
1418
1419#elif defined(__i386)
1420
1421#define	ARG_UADDR	4
1422#define	ARG_KADDR	8
1423
1424	ENTRY(copyinstr)
1425	movl	kernelbase, %ecx
1426#ifdef DEBUG
1427	cmpl	%ecx, ARG_KADDR(%esp)
1428	jnb	1f
1429	pushl	%ebp
1430	movl	%esp, %ebp
1431	pushl	$.copyinstr_panic_msg
1432	call	panic
14331:
1434#endif
1435	lea	_copyinstr_error, %eax
1436	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1437	jb	do_copystr
1438	movl	%gs:CPU_THREAD, %edx
1439	jmp	3f
1440
1441_copyinstr_error:
1442	popl	%edi
1443	movl	%gs:CPU_THREAD, %edx
1444	movl	%edi, T_LOFAULT(%edx)	/* original lofault */
1445
1446	popl	%edi
1447	popl	%ebx
1448	popl	%ebp
14493:
1450	movl	T_COPYOPS(%edx), %eax
1451	cmpl	$0, %eax
1452	jz	2f
1453	jmp	*CP_COPYINSTR(%eax)
1454
14552:	movl	$EFAULT, %eax		/* return EFAULT */
1456	ret
1457	SET_SIZE(copyinstr)
1458
1459#undef	ARG_UADDR
1460#undef	ARG_KADDR
1461
1462#endif	/* __i386 */
1463#endif	/* __lint */
1464
1465/*
1466 * Copy a null terminated string from the kernel
1467 * address space to the user address space.
1468 */
1469
1470#if defined(__lint)
1471
1472/* ARGSUSED */
1473int
1474copyoutstr(const char *kaddr, char *uaddr, size_t maxlength,
1475    size_t *lencopied)
1476{ return (0); }
1477
1478#else	/* __lint */
1479
1480#if defined(__amd64)
1481
1482	ENTRY(copyoutstr)
1483	pushq	%rbp
1484	movq	%rsp, %rbp
1485	subq	$32, %rsp
1486
1487	/*
1488	 * save args in case we trap and need to rerun as a copyop
1489	 */
1490	movq	%rdi, (%rsp)
1491	movq	%rsi, 0x8(%rsp)
1492	movq	%rdx, 0x10(%rsp)
1493	movq	%rcx, 0x18(%rsp)
1494
1495	movq	kernelbase(%rip), %rax
1496#ifdef DEBUG
1497	cmpq	%rax, %rdi		/* %rdi = kaddr */
1498	jnb	1f
1499	leaq	.copyoutstr_panic_msg(%rip), %rdi
1500	jmp	call_panic		/* setup stack and call panic */
15011:
1502#endif
1503	/*
1504	 * pass lofault value as 5th argument to do_copystr
1505	 */
1506	leaq	_copyoutstr_error(%rip), %r8
1507
1508	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
1509	jb	do_copystr
1510	movq	%gs:CPU_THREAD, %r9
1511	jmp	3f
1512
1513_copyoutstr_error:
1514	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
15153:
1516	movq	T_COPYOPS(%r9), %rax
1517	cmpq	$0, %rax
1518	jz	2f
1519
1520	/*
1521	 * reload args for the copyop
1522	 */
1523	movq	(%rsp), %rdi
1524	movq	0x8(%rsp), %rsi
1525	movq	0x10(%rsp), %rdx
1526	movq	0x18(%rsp), %rcx
1527	leave
1528	jmp	*CP_COPYOUTSTR(%rax)
1529
15302:	movl	$EFAULT, %eax		/* return EFAULT */
1531	leave
1532	ret
1533	SET_SIZE(copyoutstr)
1534
1535#elif defined(__i386)
1536
1537#define	ARG_KADDR	4
1538#define	ARG_UADDR	8
1539
1540	ENTRY(copyoutstr)
1541	movl	kernelbase, %ecx
1542#ifdef DEBUG
1543	cmpl	%ecx, ARG_KADDR(%esp)
1544	jnb	1f
1545	pushl	%ebp
1546	movl	%esp, %ebp
1547	pushl	$.copyoutstr_panic_msg
1548	call	panic
15491:
1550#endif
1551	lea	_copyoutstr_error, %eax
1552	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1553	jb	do_copystr
1554	movl	%gs:CPU_THREAD, %edx
1555	jmp	3f
1556
1557_copyoutstr_error:
1558	popl	%edi
1559	movl	%gs:CPU_THREAD, %edx
1560	movl	%edi, T_LOFAULT(%edx)	/* restore the original lofault */
1561
1562	popl	%edi
1563	popl	%ebx
1564	popl	%ebp
15653:
1566	movl	T_COPYOPS(%edx), %eax
1567	cmpl	$0, %eax
1568	jz	2f
1569	jmp	*CP_COPYOUTSTR(%eax)
1570
15712:	movl	$EFAULT, %eax		/* return EFAULT */
1572	ret
1573	SET_SIZE(copyoutstr)
1574
1575#undef	ARG_KADDR
1576#undef	ARG_UADDR
1577
1578#endif	/* __i386 */
1579#endif	/* __lint */
1580
1581/*
1582 * Since all of the fuword() variants are so similar, we have a macro to spit
1583 * them out.  This allows us to create DTrace-unobservable functions easily.
1584 */
1585
1586#if defined(__lint)
1587
1588#if defined(__amd64)
1589
1590/* ARGSUSED */
1591int
1592fuword64(const void *addr, uint64_t *dst)
1593{ return (0); }
1594
1595#endif
1596
1597/* ARGSUSED */
1598int
1599fuword32(const void *addr, uint32_t *dst)
1600{ return (0); }
1601
1602/* ARGSUSED */
1603int
1604fuword16(const void *addr, uint16_t *dst)
1605{ return (0); }
1606
1607/* ARGSUSED */
1608int
1609fuword8(const void *addr, uint8_t *dst)
1610{ return (0); }
1611
1612#else	/* __lint */
1613
1614#if defined(__amd64)
1615
1616/*
1617 * (Note that we don't save and reload the arguments here
1618 * because their values are not altered in the copy path)
1619 */
1620
1621#define	FUWORD(NAME, INSTR, REG, COPYOP)	\
1622	ENTRY(NAME)				\
1623	movq	%gs:CPU_THREAD, %r9;		\
1624	cmpq	kernelbase(%rip), %rdi;		\
1625	jae	1f;				\
1626	leaq	_flt_/**/NAME, %rdx;		\
1627	movq	%rdx, T_LOFAULT(%r9);		\
1628	INSTR	(%rdi), REG;			\
1629	movq	$0, T_LOFAULT(%r9);		\
1630	INSTR	REG, (%rsi);			\
1631	xorl	%eax, %eax;			\
1632	ret;					\
1633_flt_/**/NAME:					\
1634	movq	$0, T_LOFAULT(%r9);		\
16351:						\
1636	movq	T_COPYOPS(%r9), %rax;		\
1637	cmpq	$0, %rax;			\
1638	jz	2f;				\
1639	jmp	*COPYOP(%rax);			\
16402:						\
1641	movl	$-1, %eax;			\
1642	ret;					\
1643	SET_SIZE(NAME)
1644
1645	FUWORD(fuword64, movq, %rax, CP_FUWORD64)
1646	FUWORD(fuword32, movl, %eax, CP_FUWORD32)
1647	FUWORD(fuword16, movw, %ax, CP_FUWORD16)
1648	FUWORD(fuword8, movb, %al, CP_FUWORD8)
1649
1650#elif defined(__i386)
1651
1652#define	FUWORD(NAME, INSTR, REG, COPYOP)	\
1653	ENTRY(NAME)				\
1654	movl	%gs:CPU_THREAD, %ecx;		\
1655	movl	kernelbase, %eax;		\
1656	cmpl	%eax, 4(%esp);			\
1657	jae	1f;				\
1658	lea	_flt_/**/NAME, %edx;		\
1659	movl	%edx, T_LOFAULT(%ecx);		\
1660	movl	4(%esp), %eax;			\
1661	movl	8(%esp), %edx;			\
1662	INSTR	(%eax), REG;			\
1663	movl	$0, T_LOFAULT(%ecx);		\
1664	INSTR	REG, (%edx);			\
1665	xorl	%eax, %eax;			\
1666	ret;					\
1667_flt_/**/NAME:					\
1668	movl	$0, T_LOFAULT(%ecx);		\
16691:						\
1670	movl	T_COPYOPS(%ecx), %eax;		\
1671	cmpl	$0, %eax;			\
1672	jz	2f;				\
1673	jmp	*COPYOP(%eax);			\
16742:						\
1675	movl	$-1, %eax;			\
1676	ret;					\
1677	SET_SIZE(NAME)
1678
1679	FUWORD(fuword32, movl, %eax, CP_FUWORD32)
1680	FUWORD(fuword16, movw, %ax, CP_FUWORD16)
1681	FUWORD(fuword8, movb, %al, CP_FUWORD8)
1682
1683#endif	/* __i386 */
1684
1685#undef	FUWORD
1686
1687#endif	/* __lint */
1688
1689/*
1690 * Set user word.
1691 */
1692
1693#if defined(__lint)
1694
1695#if defined(__amd64)
1696
1697/* ARGSUSED */
1698int
1699suword64(void *addr, uint64_t value)
1700{ return (0); }
1701
1702#endif
1703
1704/* ARGSUSED */
1705int
1706suword32(void *addr, uint32_t value)
1707{ return (0); }
1708
1709/* ARGSUSED */
1710int
1711suword16(void *addr, uint16_t value)
1712{ return (0); }
1713
1714/* ARGSUSED */
1715int
1716suword8(void *addr, uint8_t value)
1717{ return (0); }
1718
1719#else	/* lint */
1720
1721#if defined(__amd64)
1722
1723/*
1724 * (Note that we don't save and reload the arguments here
1725 * because their values are not altered in the copy path)
1726 */
1727
1728#define	SUWORD(NAME, INSTR, REG, COPYOP)	\
1729	ENTRY(NAME)				\
1730	movq	%gs:CPU_THREAD, %r9;		\
1731	cmpq	kernelbase(%rip), %rdi;		\
1732	jae	1f;				\
1733	leaq	_flt_/**/NAME, %rdx;		\
1734	movq	%rdx, T_LOFAULT(%r9);		\
1735	INSTR	REG, (%rdi);			\
1736	movq	$0, T_LOFAULT(%r9);		\
1737	xorl	%eax, %eax;			\
1738	ret;					\
1739_flt_/**/NAME:					\
1740	movq	$0, T_LOFAULT(%r9);		\
17411:						\
1742	movq	T_COPYOPS(%r9), %rax;		\
1743	cmpq	$0, %rax;			\
1744	jz	3f;				\
1745	jmp	*COPYOP(%rax);			\
17463:						\
1747	movl	$-1, %eax;			\
1748	ret;					\
1749	SET_SIZE(NAME)
1750
1751	SUWORD(suword64, movq, %rsi, CP_SUWORD64)
1752	SUWORD(suword32, movl, %esi, CP_SUWORD32)
1753	SUWORD(suword16, movw, %si, CP_SUWORD16)
1754	SUWORD(suword8, movb, %sil, CP_SUWORD8)
1755
1756#elif defined(__i386)
1757
1758#define	SUWORD(NAME, INSTR, REG, COPYOP)	\
1759	ENTRY(NAME)				\
1760	movl	%gs:CPU_THREAD, %ecx;		\
1761	movl	kernelbase, %eax;		\
1762	cmpl	%eax, 4(%esp);			\
1763	jae	1f;				\
1764	lea	_flt_/**/NAME, %edx;		\
1765	movl	%edx, T_LOFAULT(%ecx);		\
1766	movl	4(%esp), %eax;			\
1767	movl	8(%esp), %edx;			\
1768	INSTR	REG, (%eax);			\
1769	movl	$0, T_LOFAULT(%ecx);		\
1770	xorl	%eax, %eax;			\
1771	ret;					\
1772_flt_/**/NAME:					\
1773	movl	$0, T_LOFAULT(%ecx);		\
17741:						\
1775	movl	T_COPYOPS(%ecx), %eax;		\
1776	cmpl	$0, %eax;			\
1777	jz	3f;				\
1778	movl	COPYOP(%eax), %ecx;		\
1779	jmp	*%ecx;				\
17803:						\
1781	movl	$-1, %eax;			\
1782	ret;					\
1783	SET_SIZE(NAME)
1784
1785	SUWORD(suword32, movl, %edx, CP_SUWORD32)
1786	SUWORD(suword16, movw, %dx, CP_SUWORD16)
1787	SUWORD(suword8, movb, %dl, CP_SUWORD8)
1788
1789#endif	/* __i386 */
1790
1791#undef	SUWORD
1792
1793#endif	/* __lint */
1794
1795#if defined(__lint)
1796
1797#if defined(__amd64)
1798
1799/*ARGSUSED*/
1800void
1801fuword64_noerr(const void *addr, uint64_t *dst)
1802{}
1803
1804#endif
1805
1806/*ARGSUSED*/
1807void
1808fuword32_noerr(const void *addr, uint32_t *dst)
1809{}
1810
1811/*ARGSUSED*/
1812void
1813fuword8_noerr(const void *addr, uint8_t *dst)
1814{}
1815
1816/*ARGSUSED*/
1817void
1818fuword16_noerr(const void *addr, uint16_t *dst)
1819{}
1820
1821#else   /* __lint */
1822
1823#if defined(__amd64)
1824
1825#define	FUWORD_NOERR(NAME, INSTR, REG)		\
1826	ENTRY(NAME)				\
1827	cmpq	kernelbase(%rip), %rdi;		\
1828	cmovnbq	kernelbase(%rip), %rdi;		\
1829	INSTR	(%rdi), REG;			\
1830	INSTR	REG, (%rsi);			\
1831	ret;					\
1832	SET_SIZE(NAME)
1833
1834	FUWORD_NOERR(fuword64_noerr, movq, %rax)
1835	FUWORD_NOERR(fuword32_noerr, movl, %eax)
1836	FUWORD_NOERR(fuword16_noerr, movw, %ax)
1837	FUWORD_NOERR(fuword8_noerr, movb, %al)
1838
1839#elif defined(__i386)
1840
1841#define	FUWORD_NOERR(NAME, INSTR, REG)		\
1842	ENTRY(NAME)				\
1843	movl	4(%esp), %eax;			\
1844	cmpl	kernelbase, %eax;		\
1845	jb	1f;				\
1846	movl	kernelbase, %eax;		\
18471:	movl	8(%esp), %edx;			\
1848	INSTR	(%eax), REG;			\
1849	INSTR	REG, (%edx);			\
1850	ret;					\
1851	SET_SIZE(NAME)
1852
1853	FUWORD_NOERR(fuword32_noerr, movl, %ecx)
1854	FUWORD_NOERR(fuword16_noerr, movw, %cx)
1855	FUWORD_NOERR(fuword8_noerr, movb, %cl)
1856
1857#endif	/* __i386 */
1858
1859#undef	FUWORD_NOERR
1860
1861#endif	/* __lint */
1862
1863#if defined(__lint)
1864
1865#if defined(__amd64)
1866
1867/*ARGSUSED*/
1868void
1869suword64_noerr(void *addr, uint64_t value)
1870{}
1871
1872#endif
1873
1874/*ARGSUSED*/
1875void
1876suword32_noerr(void *addr, uint32_t value)
1877{}
1878
1879/*ARGSUSED*/
1880void
1881suword16_noerr(void *addr, uint16_t value)
1882{}
1883
1884/*ARGSUSED*/
1885void
1886suword8_noerr(void *addr, uint8_t value)
1887{}
1888
1889#else	/* lint */
1890
1891#if defined(__amd64)
1892
1893#define	SUWORD_NOERR(NAME, INSTR, REG)		\
1894	ENTRY(NAME)				\
1895	cmpq	kernelbase(%rip), %rdi;		\
1896	cmovnbq	kernelbase(%rip), %rdi;		\
1897	INSTR	REG, (%rdi);			\
1898	ret;					\
1899	SET_SIZE(NAME)
1900
1901	SUWORD_NOERR(suword64_noerr, movq, %rsi)
1902	SUWORD_NOERR(suword32_noerr, movl, %esi)
1903	SUWORD_NOERR(suword16_noerr, movw, %si)
1904	SUWORD_NOERR(suword8_noerr, movb, %sil)
1905
1906#elif defined(__i386)
1907
1908#define	SUWORD_NOERR(NAME, INSTR, REG)		\
1909	ENTRY(NAME)				\
1910	movl	4(%esp), %eax;			\
1911	cmpl	kernelbase, %eax;		\
1912	jb	1f;				\
1913	movl	kernelbase, %eax;		\
19141:						\
1915	movl	8(%esp), %edx;			\
1916	INSTR	REG, (%eax);			\
1917	ret;					\
1918	SET_SIZE(NAME)
1919
1920	SUWORD_NOERR(suword32_noerr, movl, %edx)
1921	SUWORD_NOERR(suword16_noerr, movw, %dx)
1922	SUWORD_NOERR(suword8_noerr, movb, %dl)
1923
1924#endif	/* __i386 */
1925
1926#undef	SUWORD_NOERR
1927
1928#endif	/* lint */
1929
1930
1931#if defined(__lint)
1932
1933/*ARGSUSED*/
1934int
1935subyte(void *addr, uchar_t value)
1936{ return (0); }
1937
1938/*ARGSUSED*/
1939void
1940subyte_noerr(void *addr, uchar_t value)
1941{}
1942
1943/*ARGSUSED*/
1944int
1945fulword(const void *addr, ulong_t *valuep)
1946{ return (0); }
1947
1948/*ARGSUSED*/
1949void
1950fulword_noerr(const void *addr, ulong_t *valuep)
1951{}
1952
1953/*ARGSUSED*/
1954int
1955sulword(void *addr, ulong_t valuep)
1956{ return (0); }
1957
1958/*ARGSUSED*/
1959void
1960sulword_noerr(void *addr, ulong_t valuep)
1961{}
1962
1963#else
1964
1965	.weak	subyte
1966	subyte=suword8
1967	.weak	subyte_noerr
1968	subyte_noerr=suword8_noerr
1969
1970#if defined(__amd64)
1971
1972	.weak	fulword
1973	fulword=fuword64
1974	.weak	fulword_noerr
1975	fulword_noerr=fuword64_noerr
1976	.weak	sulword
1977	sulword=suword64
1978	.weak	sulword_noerr
1979	sulword_noerr=suword64_noerr
1980
1981#elif defined(__i386)
1982
1983	.weak	fulword
1984	fulword=fuword32
1985	.weak	fulword_noerr
1986	fulword_noerr=fuword32_noerr
1987	.weak	sulword
1988	sulword=suword32
1989	.weak	sulword_noerr
1990	sulword_noerr=suword32_noerr
1991
1992#endif /* __i386 */
1993
1994#endif /* __lint */
1995
1996#if defined(__lint)
1997
1998/*
1999 * Copy a block of storage - must not overlap (from + len <= to).
2000 * No fault handler installed (to be called under on_fault())
2001 */
2002
2003/* ARGSUSED */
2004void
2005copyout_noerr(const void *kfrom, void *uto, size_t count)
2006{}
2007
2008/* ARGSUSED */
2009void
2010copyin_noerr(const void *ufrom, void *kto, size_t count)
2011{}
2012
2013/*
2014 * Zero a block of storage in user space
2015 */
2016
2017/* ARGSUSED */
2018void
2019uzero(void *addr, size_t count)
2020{}
2021
2022/*
2023 * copy a block of storage in user space
2024 */
2025
2026/* ARGSUSED */
2027void
2028ucopy(const void *ufrom, void *uto, size_t ulength)
2029{}
2030
2031#else /* __lint */
2032
2033#if defined(__amd64)
2034
2035	ENTRY(copyin_noerr)
2036	movq	kernelbase(%rip), %rax
2037#ifdef DEBUG
2038	cmpq	%rax, %rsi		/* %rsi = kto */
2039	jae	1f
2040	leaq	.cpyin_ne_pmsg(%rip), %rdi
2041	jmp	call_panic		/* setup stack and call panic */
20421:
2043#endif
2044	cmpq	%rax, %rdi		/* ufrom < kernelbase */
2045	jb	do_copy
2046	movq	%rax, %rdi		/* force fault at kernelbase */
2047	jmp	do_copy
2048	SET_SIZE(copyin_noerr)
2049
2050	ENTRY(copyout_noerr)
2051	movq	kernelbase(%rip), %rax
2052#ifdef DEBUG
2053	cmpq	%rax, %rdi		/* %rdi = kfrom */
2054	jae	1f
2055	leaq	.cpyout_ne_pmsg(%rip), %rdi
2056	jmp	call_panic		/* setup stack and call panic */
20571:
2058#endif
2059	cmpq	%rax, %rsi		/* uto < kernelbase */
2060	jb	do_copy
2061	movq	%rax, %rsi		/* force fault at kernelbase */
2062	jmp	do_copy
2063	SET_SIZE(copyout_noerr)
2064
2065	ENTRY(uzero)
2066	movq	kernelbase(%rip), %rax
2067	cmpq	%rax, %rdi
2068	jb	do_zero
2069	movq	%rax, %rdi	/* force fault at kernelbase */
2070	jmp	do_zero
2071	SET_SIZE(uzero)
2072
2073	ENTRY(ucopy)
2074	movq	kernelbase(%rip), %rax
2075	cmpq	%rax, %rdi
2076	jb	1f
2077	movq	%rax, %rdi
20781:
2079	cmpq	%rax, %rsi
2080	jb	do_copy
2081	movq	%rax, %rsi
2082	jmp	do_copy
2083	SET_SIZE(ucopy)
2084
2085#elif defined(__i386)
2086
2087	ENTRY(copyin_noerr)
2088	movl	kernelbase, %eax
2089#ifdef DEBUG
2090	cmpl	%eax, 8(%esp)
2091	jae	1f
2092	pushl	$.cpyin_ne_pmsg
2093	call	panic
20941:
2095#endif
2096	cmpl	%eax, 4(%esp)
2097	jb	do_copy
2098	movl	%eax, 4(%esp)	/* force fault at kernelbase */
2099	jmp	do_copy
2100	SET_SIZE(copyin_noerr)
2101
2102	ENTRY(copyout_noerr)
2103	movl	kernelbase, %eax
2104#ifdef DEBUG
2105	cmpl	%eax, 4(%esp)
2106	jae	1f
2107	pushl	$.cpyout_ne_pmsg
2108	call	panic
21091:
2110#endif
2111	cmpl	%eax, 8(%esp)
2112	jb	do_copy
2113	movl	%eax, 8(%esp)	/* force fault at kernelbase */
2114	jmp	do_copy
2115	SET_SIZE(copyout_noerr)
2116
2117	ENTRY(uzero)
2118	movl	kernelbase, %eax
2119	cmpl	%eax, 4(%esp)
2120	jb	do_zero
2121	movl	%eax, 4(%esp)	/* force fault at kernelbase */
2122	jmp	do_zero
2123	SET_SIZE(uzero)
2124
2125	ENTRY(ucopy)
2126	movl	kernelbase, %eax
2127	cmpl	%eax, 4(%esp)
2128	jb	1f
2129	movl	%eax, 4(%esp)	/* force fault at kernelbase */
21301:
2131	cmpl	%eax, 8(%esp)
2132	jb	do_copy
2133	movl	%eax, 8(%esp)	/* force fault at kernelbase */
2134	jmp	do_copy
2135	SET_SIZE(ucopy)
2136
2137#endif	/* __i386 */
2138
2139#ifdef DEBUG
2140	.data
2141.kcopy_panic_msg:
2142	.string "kcopy: arguments below kernelbase"
2143.bcopy_panic_msg:
2144	.string "bcopy: arguments below kernelbase"
2145.kzero_panic_msg:
2146        .string "kzero: arguments below kernelbase"
2147.bzero_panic_msg:
2148	.string	"bzero: arguments below kernelbase"
2149.copyin_panic_msg:
2150	.string "copyin: kaddr argument below kernelbase"
2151.xcopyin_panic_msg:
2152	.string	"xcopyin: kaddr argument below kernelbase"
2153.copyout_panic_msg:
2154	.string "copyout: kaddr argument below kernelbase"
2155.xcopyout_panic_msg:
2156	.string	"xcopyout: kaddr argument below kernelbase"
2157.copystr_panic_msg:
2158	.string	"copystr: arguments in user space"
2159.copyinstr_panic_msg:
2160	.string	"copyinstr: kaddr argument not in kernel address space"
2161.copyoutstr_panic_msg:
2162	.string	"copyoutstr: kaddr argument not in kernel address space"
2163.cpyin_ne_pmsg:
2164	.string "copyin_noerr: argument not in kernel address space"
2165.cpyout_ne_pmsg:
2166	.string "copyout_noerr: argument not in kernel address space"
2167#endif
2168
2169#endif	/* __lint */
2170