xref: /titanic_41/usr/src/uts/intel/ia32/ml/copy.s (revision f2b7ce3eb6db75966c27b08fa312c3158f8dfabf)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*       Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.	*/
28/*       Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T		*/
29/*         All Rights Reserved						*/
30
31/*       Copyright (c) 1987, 1988 Microsoft Corporation			*/
32/*         All Rights Reserved						*/
33
34#pragma ident	"%Z%%M%	%I%	%E% SMI"
35
36#include <sys/errno.h>
37#include <sys/asm_linkage.h>
38
39#if defined(__lint)
40#include <sys/types.h>
41#include <sys/systm.h>
42#else	/* __lint */
43#include "assym.h"
44#endif	/* __lint */
45
46#define	KCOPY_MIN_SIZE	128	/* Must be >= 16 bytes */
47#define	XCOPY_MIN_SIZE	128	/* Must be >= 16 bytes */
48/*
49 * Non-temopral access (NTA) alignment requirement
50 */
51#define	NTA_ALIGN_SIZE	4	/* Must be at least 4-byte aligned */
52#define	NTA_ALIGN_MASK	_CONST(NTA_ALIGN_SIZE-1)
53#define	COUNT_ALIGN_SIZE	16	/* Must be at least 16-byte aligned */
54#define	COUNT_ALIGN_MASK	_CONST(COUNT_ALIGN_SIZE-1)
55
56/*
57 * Copy a block of storage, returning an error code if `from' or
58 * `to' takes a kernel pagefault which cannot be resolved.
59 * Returns errno value on pagefault error, 0 if all ok
60 */
61
62#if defined(__lint)
63
64/* ARGSUSED */
65int
66kcopy(const void *from, void *to, size_t count)
67{ return (0); }
68
69#else	/* __lint */
70
71	.globl	kernelbase
72
73#if defined(__amd64)
74
75	ENTRY(kcopy)
76	pushq	%rbp
77	movq	%rsp, %rbp
78#ifdef DEBUG
79	movq	kernelbase(%rip), %rax
80	cmpq	%rax, %rdi 		/* %rdi = from */
81	jb	0f
82	cmpq	%rax, %rsi		/* %rsi = to */
83	jnb	1f
840:	leaq	.kcopy_panic_msg(%rip), %rdi
85	xorl	%eax, %eax
86	call	panic
871:
88#endif
89	/*
90	 * pass lofault value as 4th argument to do_copy_fault
91	 */
92	leaq	_kcopy_copyerr(%rip), %rcx
93	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
94
95do_copy_fault:
96	movq	T_LOFAULT(%r9), %r11	/* save the current lofault */
97	movq	%rcx, T_LOFAULT(%r9)	/* new lofault */
98
99	xchgq	%rdi, %rsi		/* %rsi = source, %rdi = destination */
100	movq	%rdx, %rcx		/* %rcx = count */
101	shrq	$3, %rcx		/* 8-byte word count */
102	rep
103	  smovq
104
105	movq	%rdx, %rcx
106	andq	$7, %rcx		/* bytes left over */
107	rep
108	  smovb
109	xorl	%eax, %eax		/* return 0 (success) */
110
111	/*
112	 * A fault during do_copy_fault is indicated through an errno value
113	 * in %rax and we iretq from the trap handler to here.
114	 */
115_kcopy_copyerr:
116	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
117	leave
118	ret
119	SET_SIZE(kcopy)
120
121#elif defined(__i386)
122
123#define	ARG_FROM	8
124#define	ARG_TO		12
125#define	ARG_COUNT	16
126
127	ENTRY(kcopy)
128#ifdef DEBUG
129	pushl	%ebp
130	movl	%esp, %ebp
131	movl	kernelbase, %eax
132	cmpl	%eax, ARG_FROM(%ebp)
133	jb	0f
134	cmpl	%eax, ARG_TO(%ebp)
135	jnb	1f
1360:	pushl	$.kcopy_panic_msg
137	call	panic
1381:	popl	%ebp
139#endif
140	lea	_kcopy_copyerr, %eax	/* lofault value */
141	movl	%gs:CPU_THREAD, %edx
142
143do_copy_fault:
144	pushl	%ebp
145	movl	%esp, %ebp		/* setup stack frame */
146	pushl	%esi
147	pushl	%edi			/* save registers */
148
149	movl	T_LOFAULT(%edx), %edi
150	pushl	%edi			/* save the current lofault */
151	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
152
153	movl	ARG_COUNT(%ebp), %ecx
154	movl	ARG_FROM(%ebp), %esi
155	movl	ARG_TO(%ebp), %edi
156	shrl	$2, %ecx		/* word count */
157	rep
158	  smovl
159	movl	ARG_COUNT(%ebp), %ecx
160	andl	$3, %ecx		/* bytes left over */
161	rep
162	  smovb
163	xorl	%eax, %eax
164
165	/*
166	 * A fault during do_copy_fault is indicated through an errno value
167	 * in %eax and we iret from the trap handler to here.
168	 */
169_kcopy_copyerr:
170	popl	%ecx
171	popl	%edi
172	movl	%ecx, T_LOFAULT(%edx)	/* restore the original lofault */
173	popl	%esi
174	popl	%ebp
175	ret
176	SET_SIZE(kcopy)
177
178#undef	ARG_FROM
179#undef	ARG_TO
180#undef	ARG_COUNT
181
182#endif	/* __i386 */
183#endif	/* __lint */
184
185#if defined(__lint)
186
187/*
188 * Copy a block of storage.  Similar to kcopy but uses non-temporal
189 * instructions.
190 */
191
192/* ARGSUSED */
193int
194kcopy_nta(const void *from, void *to, size_t count, int copy_cached)
195{ return (0); }
196
197#else	/* __lint */
198
199#if defined(__amd64)
200
201#define	COPY_LOOP_INIT(src, dst, cnt)	\
202	addq	cnt, src;			\
203	addq	cnt, dst;			\
204	shrq	$3, cnt;			\
205	neg	cnt
206
207	/* Copy 16 bytes per loop.  Uses %rax and %r8 */
208#define	COPY_LOOP_BODY(src, dst, cnt)	\
209	prefetchnta	0x100(src, cnt, 8);	\
210	movq	(src, cnt, 8), %rax;		\
211	movq	0x8(src, cnt, 8), %r8;		\
212	movnti	%rax, (dst, cnt, 8);		\
213	movnti	%r8, 0x8(dst, cnt, 8);		\
214	addq	$2, cnt
215
216	ENTRY(kcopy_nta)
217	pushq	%rbp
218	movq	%rsp, %rbp
219#ifdef DEBUG
220	movq	kernelbase(%rip), %rax
221	cmpq	%rax, %rdi 		/* %rdi = from */
222	jb	0f
223	cmpq	%rax, %rsi		/* %rsi = to */
224	jnb	1f
2250:	leaq	.kcopy_panic_msg(%rip), %rdi
226	xorl	%eax, %eax
227	call	panic
2281:
229#endif
230
231	movq	%gs:CPU_THREAD, %r9
232	cmpq	$0, %rcx		/* No non-temporal access? */
233	/*
234	 * pass lofault value as 4th argument to do_copy_fault
235	 */
236	leaq	_kcopy_nta_copyerr(%rip), %rcx	/* doesn't set rflags */
237	jnz	do_copy_fault		/* use regular access */
238	/*
239	 * Make sure cnt is >= KCOPY_MIN_SIZE
240	 */
241	cmpq	$KCOPY_MIN_SIZE, %rdx
242	jb	do_copy_fault
243
244	/*
245	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
246	 * count is COUNT_ALIGN_SIZE aligned.
247	 */
248	movq	%rdi, %r10
249	orq	%rsi, %r10
250	andq	$NTA_ALIGN_MASK, %r10
251	orq	%rdx, %r10
252	andq	$COUNT_ALIGN_MASK, %r10
253	jnz	do_copy_fault
254
255	ALTENTRY(do_copy_fault_nta)
256	movq    %gs:CPU_THREAD, %r9     /* %r9 = thread addr */
257	movq    T_LOFAULT(%r9), %r11    /* save the current lofault */
258	movq    %rcx, T_LOFAULT(%r9)    /* new lofault */
259
260	/*
261	 * COPY_LOOP_BODY uses %rax and %r8
262	 */
263	COPY_LOOP_INIT(%rdi, %rsi, %rdx)
2642:	COPY_LOOP_BODY(%rdi, %rsi, %rdx)
265	jnz	2b
266
267	mfence
268	xorl	%eax, %eax		/* return 0 (success) */
269
270_kcopy_nta_copyerr:
271	movq	%r11, T_LOFAULT(%r9)    /* restore original lofault */
272	leave
273	ret
274	SET_SIZE(do_copy_fault_nta)
275	SET_SIZE(kcopy_nta)
276
277#elif defined(__i386)
278
279#define	ARG_FROM	8
280#define	ARG_TO		12
281#define	ARG_COUNT	16
282
283#define	COPY_LOOP_INIT(src, dst, cnt)	\
284	addl	cnt, src;			\
285	addl	cnt, dst;			\
286	shrl	$3, cnt;			\
287	neg	cnt
288
289#define	COPY_LOOP_BODY(src, dst, cnt)	\
290	prefetchnta	0x100(src, cnt, 8);	\
291	movl	(src, cnt, 8), %esi;		\
292	movnti	%esi, (dst, cnt, 8);		\
293	movl	0x4(src, cnt, 8), %esi;		\
294	movnti	%esi, 0x4(dst, cnt, 8);		\
295	movl	0x8(src, cnt, 8), %esi;		\
296	movnti	%esi, 0x8(dst, cnt, 8);		\
297	movl	0xc(src, cnt, 8), %esi;		\
298	movnti	%esi, 0xc(dst, cnt, 8);		\
299	addl	$2, cnt
300
301	/*
302	 * kcopy_nta is not implemented for 32-bit as no performance
303	 * improvement was shown.  We simply jump directly to kcopy
304	 * and discard the 4 arguments.
305	 */
306	ENTRY(kcopy_nta)
307	jmp	kcopy
308
309	lea	_kcopy_nta_copyerr, %eax	/* lofault value */
310	ALTENTRY(do_copy_fault_nta)
311	pushl	%ebp
312	movl	%esp, %ebp		/* setup stack frame */
313	pushl	%esi
314	pushl	%edi
315
316	movl	%gs:CPU_THREAD, %edx
317	movl	T_LOFAULT(%edx), %edi
318	pushl	%edi			/* save the current lofault */
319	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
320
321	/* COPY_LOOP_BODY needs to use %esi */
322	movl	ARG_COUNT(%ebp), %ecx
323	movl	ARG_FROM(%ebp), %edi
324	movl	ARG_TO(%ebp), %eax
325	COPY_LOOP_INIT(%edi, %eax, %ecx)
3261:	COPY_LOOP_BODY(%edi, %eax, %ecx)
327	jnz	1b
328	mfence
329
330	xorl	%eax, %eax
331_kcopy_nta_copyerr:
332	popl	%ecx
333	popl	%edi
334	movl	%ecx, T_LOFAULT(%edx)	/* restore the original lofault */
335	popl	%esi
336	leave
337	ret
338	SET_SIZE(do_copy_fault_nta)
339	SET_SIZE(kcopy_nta)
340
341#undef	ARG_FROM
342#undef	ARG_TO
343#undef	ARG_COUNT
344
345#endif	/* __i386 */
346#endif	/* __lint */
347
348#if defined(__lint)
349
350/* ARGSUSED */
351void
352bcopy(const void *from, void *to, size_t count)
353{}
354
355#else	/* __lint */
356
357#if defined(__amd64)
358
359	ENTRY(bcopy)
360#ifdef DEBUG
361	orq	%rdx, %rdx		/* %rdx = count */
362	jz	1f
363	movq	kernelbase(%rip), %rax
364	cmpq	%rax, %rdi		/* %rdi = from */
365	jb	0f
366	cmpq	%rax, %rsi		/* %rsi = to */
367	jnb	1f
3680:	leaq	.bcopy_panic_msg(%rip), %rdi
369	jmp	call_panic		/* setup stack and call panic */
3701:
371#endif
372do_copy:
373	xchgq	%rdi, %rsi		/* %rsi = source, %rdi = destination */
374	movq	%rdx, %rcx		/* %rcx = count */
375	shrq	$3, %rcx		/* 8-byte word count */
376	rep
377	  smovq
378
379	movq	%rdx, %rcx
380	andq	$7, %rcx		/* bytes left over */
381	rep
382	  smovb
383	ret
384
385#ifdef DEBUG
386	/*
387	 * Setup frame on the run-time stack. The end of the input argument
388	 * area must be aligned on a 16 byte boundary. The stack pointer %rsp,
389	 * always points to the end of the latest allocated stack frame.
390	 * panic(const char *format, ...) is a varargs function. When a
391	 * function taking variable arguments is called, %rax must be set
392	 * to eight times the number of floating point parameters passed
393	 * to the function in SSE registers.
394	 */
395call_panic:
396	pushq	%rbp			/* align stack properly */
397	movq	%rsp, %rbp
398	xorl	%eax, %eax		/* no variable arguments */
399	call	panic			/* %rdi = format string */
400#endif
401	SET_SIZE(bcopy)
402
403#elif defined(__i386)
404
405#define	ARG_FROM	4
406#define	ARG_TO		8
407#define	ARG_COUNT	12
408
409	ENTRY(bcopy)
410#ifdef DEBUG
411	movl	ARG_COUNT(%esp), %eax
412	orl	%eax, %eax
413	jz	1f
414	movl	kernelbase, %eax
415	cmpl	%eax, ARG_FROM(%esp)
416	jb	0f
417	cmpl	%eax, ARG_TO(%esp)
418	jnb	1f
4190:	pushl	%ebp
420	movl	%esp, %ebp
421	pushl	$.bcopy_panic_msg
422	call	panic
4231:
424#endif
425do_copy:
426	movl	%esi, %eax		/* save registers */
427	movl	%edi, %edx
428	movl	ARG_COUNT(%esp), %ecx
429	movl	ARG_FROM(%esp), %esi
430	movl	ARG_TO(%esp), %edi
431
432	shrl	$2, %ecx		/* word count */
433	rep
434	  smovl
435	movl	ARG_COUNT(%esp), %ecx
436	andl	$3, %ecx		/* bytes left over */
437	rep
438	  smovb
439	movl	%eax, %esi		/* restore registers */
440	movl	%edx, %edi
441	ret
442	SET_SIZE(bcopy)
443
444#undef	ARG_COUNT
445#undef	ARG_FROM
446#undef	ARG_TO
447
448#endif	/* __i386 */
449#endif	/* __lint */
450
451
452/*
453 * Zero a block of storage, returning an error code if we
454 * take a kernel pagefault which cannot be resolved.
455 * Returns errno value on pagefault error, 0 if all ok
456 */
457
458#if defined(__lint)
459
460/* ARGSUSED */
461int
462kzero(void *addr, size_t count)
463{ return (0); }
464
465#else	/* __lint */
466
467#if defined(__amd64)
468
469	ENTRY(kzero)
470#ifdef DEBUG
471        cmpq	kernelbase(%rip), %rdi	/* %rdi = addr */
472        jnb	0f
473        leaq	.kzero_panic_msg(%rip), %rdi
474	jmp	call_panic		/* setup stack and call panic */
4750:
476#endif
477	/*
478	 * pass lofault value as 3rd argument to do_zero_fault
479	 */
480	leaq	_kzeroerr(%rip), %rdx
481
482do_zero_fault:
483	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
484	movq	T_LOFAULT(%r9), %r11	/* save the current lofault */
485	movq	%rdx, T_LOFAULT(%r9)	/* new lofault */
486
487	movq	%rsi, %rcx		/* get size in bytes */
488	shrq	$3, %rcx		/* count of 8-byte words to zero */
489	xorl	%eax, %eax		/* clear %rax; used in sstoq / sstob */
490	rep
491	  sstoq				/* %rcx = words to clear (%rax=0) */
492
493	movq	%rsi, %rcx
494	andq	$7, %rcx		/* bytes left over */
495	rep
496	  sstob				/* %rcx = residual bytes to clear */
497
498	/*
499	 * A fault during do_zero_fault is indicated through an errno value
500	 * in %rax when we iretq to here.
501	 */
502_kzeroerr:
503	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
504	ret
505	SET_SIZE(kzero)
506
507#elif defined(__i386)
508
509#define	ARG_ADDR	8
510#define	ARG_COUNT	12
511
512	ENTRY(kzero)
513#ifdef DEBUG
514	pushl	%ebp
515	movl	%esp, %ebp
516	movl	kernelbase, %eax
517        cmpl	%eax, ARG_ADDR(%ebp)
518        jnb	0f
519        pushl   $.kzero_panic_msg
520        call    panic
5210:	popl	%ebp
522#endif
523	lea	_kzeroerr, %eax		/* kzeroerr is lofault value */
524
525do_zero_fault:
526	pushl	%ebp			/* save stack base */
527	movl	%esp, %ebp		/* set new stack base */
528	pushl	%edi			/* save %edi */
529
530	mov	%gs:CPU_THREAD, %edx
531	movl	T_LOFAULT(%edx), %edi
532	pushl	%edi			/* save the current lofault */
533	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
534
535	movl	ARG_COUNT(%ebp), %ecx	/* get size in bytes */
536	movl	ARG_ADDR(%ebp), %edi	/* %edi <- address of bytes to clear */
537	shrl	$2, %ecx		/* Count of double words to zero */
538	xorl	%eax, %eax		/* sstol val */
539	rep
540	  sstol			/* %ecx contains words to clear (%eax=0) */
541
542	movl	ARG_COUNT(%ebp), %ecx	/* get size in bytes */
543	andl	$3, %ecx		/* do mod 4 */
544	rep
545	  sstob			/* %ecx contains residual bytes to clear */
546
547	/*
548	 * A fault during do_zero_fault is indicated through an errno value
549	 * in %eax when we iret to here.
550	 */
551_kzeroerr:
552	popl	%edi
553	movl	%edi, T_LOFAULT(%edx)	/* restore the original lofault */
554	popl	%edi
555	popl	%ebp
556	ret
557	SET_SIZE(kzero)
558
559#undef	ARG_ADDR
560#undef	ARG_COUNT
561
562#endif	/* __i386 */
563#endif	/* __lint */
564
565/*
566 * Zero a block of storage.
567 */
568
569#if defined(__lint)
570
571/* ARGSUSED */
572void
573bzero(void *addr, size_t count)
574{}
575
576#else	/* __lint */
577
578#if defined(__amd64)
579
580	ENTRY(bzero)
581#ifdef DEBUG
582	cmpq	kernelbase(%rip), %rdi	/* %rdi = addr */
583	jnb	0f
584	leaq	.bzero_panic_msg(%rip), %rdi
585	jmp	call_panic		/* setup stack and call panic */
5860:
587#endif
588do_zero:
589	movq	%rsi, %rcx		/* get size in bytes */
590	shrq	$3, %rcx		/* count of 8-byte words to zero */
591	xorl	%eax, %eax		/* clear %rax; used in sstoq / sstob */
592	rep
593	  sstoq				/* %rcx = words to clear (%rax=0) */
594
595	movq	%rsi, %rcx
596	andq	$7, %rcx		/* bytes left over */
597	rep
598	  sstob				/* %rcx = residual bytes to clear */
599	ret
600	SET_SIZE(bzero)
601
602#elif defined(__i386)
603
604#define	ARG_ADDR	4
605#define	ARG_COUNT	8
606
607	ENTRY(bzero)
608#ifdef DEBUG
609	movl	kernelbase, %eax
610	cmpl	%eax, ARG_ADDR(%esp)
611	jnb	0f
612	pushl	%ebp
613	movl	%esp, %ebp
614	pushl	$.bzero_panic_msg
615	call	panic
6160:
617#endif
618do_zero:
619	movl	%edi, %edx
620	movl	ARG_COUNT(%esp), %ecx
621	movl	ARG_ADDR(%esp), %edi
622	shrl	$2, %ecx
623	xorl	%eax, %eax
624	rep
625	  sstol
626	movl	ARG_COUNT(%esp), %ecx
627	andl	$3, %ecx
628	rep
629	  sstob
630	movl	%edx, %edi
631	ret
632	SET_SIZE(bzero)
633
634#undef	ARG_ADDR
635#undef	ARG_COUNT
636
637#endif	/* __i386 */
638#endif	/* __lint */
639
640/*
641 * Transfer data to and from user space -
642 * Note that these routines can cause faults
643 * It is assumed that the kernel has nothing at
644 * less than KERNELBASE in the virtual address space.
645 *
646 * Note that copyin(9F) and copyout(9F) are part of the
647 * DDI/DKI which specifies that they return '-1' on "errors."
648 *
649 * Sigh.
650 *
651 * So there's two extremely similar routines - xcopyin_nta() and
652 * xcopyout_nta() which return the errno that we've faithfully computed.
653 * This allows other callers (e.g. uiomove(9F)) to work correctly.
654 * Given that these are used pretty heavily, we expand the calling
655 * sequences inline for all flavours (rather than making wrappers).
656 */
657
658/*
659 * Copy user data to kernel space.
660 */
661
662#if defined(__lint)
663
664/* ARGSUSED */
665int
666copyin(const void *uaddr, void *kaddr, size_t count)
667{ return (0); }
668
669#else	/* lint */
670
671#if defined(__amd64)
672
673	ENTRY(copyin)
674	pushq	%rbp
675	movq	%rsp, %rbp
676	subq	$32, %rsp
677
678	/*
679	 * save args in case we trap and need to rerun as a copyop
680	 */
681	movq	%rdi, (%rsp)
682	movq	%rsi, 0x8(%rsp)
683	movq	%rdx, 0x10(%rsp)
684
685	movq	kernelbase(%rip), %rax
686#ifdef DEBUG
687	cmpq	%rax, %rsi		/* %rsi = kaddr */
688	jnb	1f
689	leaq	.copyin_panic_msg(%rip), %rdi
690	xorl	%eax, %eax
691	call	panic
6921:
693#endif
694	/*
695	 * pass lofault value as 4th argument to do_copy_fault
696	 */
697	leaq	_copyin_err(%rip), %rcx
698
699	movq	%gs:CPU_THREAD, %r9
700	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
701	jb	do_copy_fault
702	jmp	3f
703
704_copyin_err:
705	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
7063:
707	movq	T_COPYOPS(%r9), %rax
708	cmpq	$0, %rax
709	jz	2f
710	/*
711	 * reload args for the copyop
712	 */
713	movq	(%rsp), %rdi
714	movq	0x8(%rsp), %rsi
715	movq	0x10(%rsp), %rdx
716	leave
717	jmp	*CP_COPYIN(%rax)
718
7192:	movl	$-1, %eax
720	leave
721	ret
722	SET_SIZE(copyin)
723
724#elif defined(__i386)
725
726#define	ARG_UADDR	4
727#define	ARG_KADDR	8
728
729	ENTRY(copyin)
730	movl	kernelbase, %ecx
731#ifdef DEBUG
732	cmpl	%ecx, ARG_KADDR(%esp)
733	jnb	1f
734	pushl	%ebp
735	movl	%esp, %ebp
736	pushl	$.copyin_panic_msg
737	call	panic
7381:
739#endif
740	lea	_copyin_err, %eax
741
742	movl	%gs:CPU_THREAD, %edx
743	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
744	jb	do_copy_fault
745	jmp	3f
746
747_copyin_err:
748	popl	%ecx
749	popl	%edi
750	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
751	popl	%esi
752	popl	%ebp
7533:
754	movl	T_COPYOPS(%edx), %eax
755	cmpl	$0, %eax
756	jz	2f
757	jmp	*CP_COPYIN(%eax)
758
7592:	movl	$-1, %eax
760	ret
761	SET_SIZE(copyin)
762
763#undef	ARG_UADDR
764#undef	ARG_KADDR
765
766#endif	/* __i386 */
767#endif	/* __lint */
768
769#if defined(__lint)
770
771/* ARGSUSED */
772int
773xcopyin_nta(const void *uaddr, void *kaddr, size_t count, int copy_cached)
774{ return (0); }
775
776#else	/* __lint */
777
778#if defined(__amd64)
779
780	ENTRY(xcopyin_nta)
781	pushq	%rbp
782	movq	%rsp, %rbp
783	subq	$32, %rsp
784
785	/*
786	 * save args in case we trap and need to rerun as a copyop
787	 * %rcx is consumed in this routine so we don't need to save
788	 * it.
789	 */
790	movq	%rdi, (%rsp)
791	movq	%rsi, 0x8(%rsp)
792	movq	%rdx, 0x10(%rsp)
793
794	movq	kernelbase(%rip), %rax
795#ifdef DEBUG
796	cmpq	%rax, %rsi		/* %rsi = kaddr */
797	jnb	1f
798	leaq	.xcopyin_panic_msg(%rip), %rdi
799	xorl	%eax, %eax
800	call	panic
8011:
802#endif
803	movq	%gs:CPU_THREAD, %r9
804	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
805	jae	3f
806	cmpq	$0, %rcx		/* No non-temporal access? */
807	/*
808	 * pass lofault value as 4th argument to do_copy_fault
809	 */
810	leaq	_xcopyin_err(%rip), %rcx	/* doesn't set rflags */
811	jnz	do_copy_fault		/* use regular access */
812	/*
813	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
814	 */
815	cmpq	$XCOPY_MIN_SIZE, %rdx
816	jb	do_copy_fault
817
818	/*
819	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
820	 * count is COUNT_ALIGN_SIZE aligned.
821	 */
822	movq	%rdi, %r10
823	orq	%rsi, %r10
824	andq	$NTA_ALIGN_MASK, %r10
825	orq	%rdx, %r10
826	andq	$COUNT_ALIGN_MASK, %r10
827	jnz	do_copy_fault
828	jmp	do_copy_fault_nta	/* use non-temporal access */
829
830	/*
831	 * A fault during do_copy_fault or do_copy_fault_nta is
832	 * indicated through an errno value in %rax and we iret from the
833	 * trap handler to here.
834	 */
835_xcopyin_err:
836	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
8373:
838	movq	T_COPYOPS(%r9), %r8
839	cmpq	$0, %r8
840	jz	2f
841
842	/*
843	 * reload args for the copyop
844	 */
845	movq	(%rsp), %rdi
846	movq	0x8(%rsp), %rsi
847	movq	0x10(%rsp), %rdx
848	leave
849	jmp	*CP_XCOPYIN(%r8)
850
8512:	leave
852	ret
853	SET_SIZE(xcopyin_nta)
854
855#elif defined(__i386)
856
857#define	ARG_UADDR	4
858#define	ARG_KADDR	8
859#define	ARG_COUNT	12
860#define	ARG_CACHED	16
861
862	.globl	use_sse_copy
863
864	ENTRY(xcopyin_nta)
865	movl	kernelbase, %ecx
866	lea	_xcopyin_err, %eax
867	movl	%gs:CPU_THREAD, %edx
868	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
869	jae	3f
870
871	cmpl	$0, use_sse_copy	/* no sse support */
872	jz	do_copy_fault
873
874	cmpl	$0, ARG_CACHED(%esp)	/* copy_cached hint set? */
875	jnz	do_copy_fault
876
877	/*
878	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
879	 */
880	cmpl	$XCOPY_MIN_SIZE, ARG_COUNT(%esp)
881	jb	do_copy_fault
882
883	/*
884	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
885	 * count is COUNT_ALIGN_SIZE aligned.
886	 */
887	movl	ARG_UADDR(%esp), %ecx
888	orl	ARG_KADDR(%esp), %ecx
889	andl	$NTA_ALIGN_MASK, %ecx
890	orl	ARG_COUNT(%esp), %ecx
891	andl	$COUNT_ALIGN_MASK, %ecx
892	jnz	do_copy_fault
893
894	jmp	do_copy_fault_nta	/* use regular access */
895
896	/*
897	 * A fault during do_copy_fault or do_copy_fault_nta is
898	 * indicated through an errno value in %eax and we iret from the
899	 * trap handler to here.
900	 */
901_xcopyin_err:
902	popl	%ecx
903	popl	%edi
904	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
905	popl	%esi
906	popl	%ebp
9073:
908	cmpl	$0, T_COPYOPS(%edx)
909	jz	2f
910	movl	T_COPYOPS(%edx), %eax
911	jmp	*CP_XCOPYIN(%eax)
912
9132:	ret
914	SET_SIZE(xcopyin_nta)
915
916#undef	ARG_UADDR
917#undef	ARG_KADDR
918#undef	ARG_COUNT
919#undef	ARG_CACHED
920
921#endif	/* __i386 */
922#endif	/* __lint */
923
924/*
925 * Copy kernel data to user space.
926 */
927
928#if defined(__lint)
929
930/* ARGSUSED */
931int
932copyout(const void *kaddr, void *uaddr, size_t count)
933{ return (0); }
934
935#else	/* __lint */
936
937#if defined(__amd64)
938
939	ENTRY(copyout)
940	pushq	%rbp
941	movq	%rsp, %rbp
942	subq	$32, %rsp
943
944	/*
945	 * save args in case we trap and need to rerun as a copyop
946	 */
947	movq	%rdi, (%rsp)
948	movq	%rsi, 0x8(%rsp)
949	movq	%rdx, 0x10(%rsp)
950
951	movq	kernelbase(%rip), %rax
952#ifdef DEBUG
953	cmpq	%rax, %rdi		/* %rdi = kaddr */
954	jnb	1f
955	leaq	.copyout_panic_msg(%rip), %rdi
956	xorl	%eax, %eax
957	call	panic
9581:
959#endif
960	/*
961	 * pass lofault value as 4th argument to do_copy_fault
962	 */
963	leaq	_copyout_err(%rip), %rcx
964
965	movq	%gs:CPU_THREAD, %r9
966	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
967	jb	do_copy_fault
968	jmp	3f
969
970_copyout_err:
971	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
9723:
973	movq	T_COPYOPS(%r9), %rax
974	cmpq	$0, %rax
975	jz	2f
976
977	/*
978	 * reload args for the copyop
979	 */
980	movq	(%rsp), %rdi
981	movq	0x8(%rsp), %rsi
982	movq	0x10(%rsp), %rdx
983	leave
984	jmp	*CP_COPYOUT(%rax)
985
9862:	movl	$-1, %eax
987	leave
988	ret
989	SET_SIZE(copyout)
990
991#elif defined(__i386)
992
993#define	ARG_KADDR	4
994#define	ARG_UADDR	8
995
996	ENTRY(copyout)
997	movl	kernelbase, %ecx
998#ifdef DEBUG
999	cmpl	%ecx, ARG_KADDR(%esp)
1000	jnb	1f
1001	pushl	%ebp
1002	movl	%esp, %ebp
1003	pushl	$.copyout_panic_msg
1004	call	panic
10051:
1006#endif
1007	lea	_copyout_err, %eax
1008	movl	%gs:CPU_THREAD, %edx
1009	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1010	jb	do_copy_fault
1011	jmp	3f
1012
1013_copyout_err:
1014	popl	%ecx
1015	popl	%edi
1016	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
1017	popl	%esi
1018	popl	%ebp
10193:
1020	movl	T_COPYOPS(%edx), %eax
1021	cmpl	$0, %eax
1022	jz	2f
1023	jmp	*CP_COPYOUT(%eax)
1024
10252:	movl	$-1, %eax
1026	ret
1027	SET_SIZE(copyout)
1028
1029#undef	ARG_UADDR
1030#undef	ARG_KADDR
1031
1032#endif	/* __i386 */
1033#endif	/* __lint */
1034
1035#if defined(__lint)
1036
1037/* ARGSUSED */
1038int
1039xcopyout_nta(const void *kaddr, void *uaddr, size_t count, int copy_cached)
1040{ return (0); }
1041
1042#else	/* __lint */
1043
1044#if defined(__amd64)
1045
1046	ENTRY(xcopyout_nta)
1047	pushq	%rbp
1048	movq	%rsp, %rbp
1049	subq	$32, %rsp
1050
1051	/*
1052	 * save args in case we trap and need to rerun as a copyop
1053	 */
1054	movq	%rdi, (%rsp)
1055	movq	%rsi, 0x8(%rsp)
1056	movq	%rdx, 0x10(%rsp)
1057
1058	movq	kernelbase(%rip), %rax
1059#ifdef DEBUG
1060	cmpq	%rax, %rdi		/* %rdi = kaddr */
1061	jnb	1f
1062	leaq	.xcopyout_panic_msg(%rip), %rdi
1063	xorl	%eax, %eax
1064	call	panic
10651:
1066#endif
1067	movq	%gs:CPU_THREAD, %r9
1068	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
1069	jae	3f
1070
1071	cmpq	$0, %rcx		/* No non-temporal access? */
1072	/*
1073	 * pass lofault value as 4th argument to do_copy_fault
1074	 */
1075	leaq	_xcopyout_err(%rip), %rcx
1076	jnz	do_copy_fault
1077	/*
1078	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
1079	 */
1080	cmpq	$XCOPY_MIN_SIZE, %rdx
1081	jb	do_copy_fault
1082
1083	/*
1084	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
1085	 * count is COUNT_ALIGN_SIZE aligned.
1086	 */
1087	movq	%rdi, %r10
1088	orq	%rsi, %r10
1089	andq	$NTA_ALIGN_MASK, %r10
1090	orq	%rdx, %r10
1091	andq	$COUNT_ALIGN_MASK, %r10
1092	jnz	do_copy_fault
1093	jmp	do_copy_fault_nta
1094
1095	/*
1096	 * A fault during do_copy_fault or do_copy_fault_nta is
1097	 * indicated through an errno value in %rax and we iret from the
1098	 * trap handler to here.
1099	 */
1100_xcopyout_err:
1101	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
11023:
1103	movq	T_COPYOPS(%r9), %r8
1104	cmpq	$0, %r8
1105	jz	2f
1106
1107	/*
1108	 * reload args for the copyop
1109	 */
1110	movq	(%rsp), %rdi
1111	movq	0x8(%rsp), %rsi
1112	movq	0x10(%rsp), %rdx
1113	leave
1114	jmp	*CP_XCOPYOUT(%r8)
1115
11162:	leave
1117	ret
1118	SET_SIZE(xcopyout_nta)
1119
1120#elif defined(__i386)
1121
1122#define	ARG_KADDR	4
1123#define	ARG_UADDR	8
1124#define	ARG_COUNT	12
1125#define	ARG_CACHED	16
1126
1127	ENTRY(xcopyout_nta)
1128	movl	kernelbase, %ecx
1129	lea	_xcopyout_err, %eax
1130	movl	%gs:CPU_THREAD, %edx
1131	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1132	jae	3f
1133
1134	cmpl	$0, use_sse_copy	/* no sse support */
1135	jz	do_copy_fault
1136
1137	cmpl	$0, ARG_CACHED(%esp)	/* copy_cached hint set? */
1138	jnz	do_copy_fault
1139
1140	/*
1141	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
1142	 */
1143	cmpl	$XCOPY_MIN_SIZE, %edx
1144	jb	do_copy_fault
1145
1146	/*
1147	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
1148	 * count is COUNT_ALIGN_SIZE aligned.
1149	 */
1150	movl	ARG_UADDR(%esp), %ecx
1151	orl	ARG_KADDR(%esp), %ecx
1152	andl	$NTA_ALIGN_MASK, %ecx
1153	orl	ARG_COUNT(%esp), %ecx
1154	andl	$COUNT_ALIGN_MASK, %ecx
1155	jnz	do_copy_fault
1156	jmp	do_copy_fault_nta
1157
1158	/*
1159	 * A fault during do_copy_fault or do_copy_fault_nta is
1160	 * indicated through an errno value in %eax and we iret from the
1161	 * trap handler to here.
1162	 */
1163_xcopyout_err:
1164	/ restore the original lofault
1165	popl	%ecx
1166	popl	%edi
1167	movl	%ecx, T_LOFAULT(%edx)	/ original lofault
1168	popl	%esi
1169	popl	%ebp
11703:
1171	cmpl	$0, T_COPYOPS(%edx)
1172	jz	2f
1173	movl	T_COPYOPS(%edx), %eax
1174	jmp	*CP_XCOPYOUT(%eax)
1175
11762:	ret
1177	SET_SIZE(xcopyout_nta)
1178
1179#undef	ARG_UADDR
1180#undef	ARG_KADDR
1181#undef	ARG_COUNT
1182#undef	ARG_CACHED
1183
1184#endif	/* __i386 */
1185#endif	/* __lint */
1186
1187/*
1188 * Copy a null terminated string from one point to another in
1189 * the kernel address space.
1190 */
1191
1192#if defined(__lint)
1193
1194/* ARGSUSED */
1195int
1196copystr(const char *from, char *to, size_t maxlength, size_t *lencopied)
1197{ return (0); }
1198
1199#else	/* __lint */
1200
1201#if defined(__amd64)
1202
1203	ENTRY(copystr)
1204	pushq	%rbp
1205	movq	%rsp, %rbp
1206#ifdef DEBUG
1207	movq	kernelbase(%rip), %rax
1208	cmpq	%rax, %rdi		/* %rdi = from */
1209	jb	0f
1210	cmpq	%rax, %rsi		/* %rsi = to */
1211	jnb	1f
12120:	leaq	.copystr_panic_msg(%rip), %rdi
1213	xorl	%eax, %eax
1214	call	panic
12151:
1216#endif
1217	movq	%gs:CPU_THREAD, %r9
1218	movq	T_LOFAULT(%r9), %r8	/* pass current lofault value as */
1219					/* 5th argument to do_copystr */
1220do_copystr:
1221	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
1222	movq    T_LOFAULT(%r9), %r11	/* save the current lofault */
1223	movq	%r8, T_LOFAULT(%r9)	/* new lofault */
1224
1225	movq	%rdx, %r8		/* save maxlength */
1226
1227	cmpq	$0, %rdx		/* %rdx = maxlength */
1228	je	copystr_enametoolong	/* maxlength == 0 */
1229
1230copystr_loop:
1231	decq	%r8
1232	movb	(%rdi), %al
1233	incq	%rdi
1234	movb	%al, (%rsi)
1235	incq	%rsi
1236	cmpb	$0, %al
1237	je	copystr_null		/* null char */
1238	cmpq	$0, %r8
1239	jne	copystr_loop
1240
1241copystr_enametoolong:
1242	movl	$ENAMETOOLONG, %eax
1243	jmp	copystr_out
1244
1245copystr_null:
1246	xorl	%eax, %eax		/* no error */
1247
1248copystr_out:
1249	cmpq	$0, %rcx		/* want length? */
1250	je	copystr_done		/* no */
1251	subq	%r8, %rdx		/* compute length and store it */
1252	movq	%rdx, (%rcx)
1253
1254copystr_done:
1255	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
1256	leave
1257	ret
1258	SET_SIZE(copystr)
1259
1260#elif defined(__i386)
1261
1262#define	ARG_FROM	8
1263#define	ARG_TO		12
1264#define	ARG_MAXLEN	16
1265#define	ARG_LENCOPIED	20
1266
1267	ENTRY(copystr)
1268#ifdef DEBUG
1269	pushl	%ebp
1270	movl	%esp, %ebp
1271	movl	kernelbase, %eax
1272	cmpl	%eax, ARG_FROM(%esp)
1273	jb	0f
1274	cmpl	%eax, ARG_TO(%esp)
1275	jnb	1f
12760:	pushl	$.copystr_panic_msg
1277	call	panic
12781:	popl	%ebp
1279#endif
1280	/* get the current lofault address */
1281	movl	%gs:CPU_THREAD, %eax
1282	movl	T_LOFAULT(%eax), %eax
1283do_copystr:
1284	pushl	%ebp			/* setup stack frame */
1285	movl	%esp, %ebp
1286	pushl	%ebx			/* save registers */
1287	pushl	%edi
1288
1289	movl	%gs:CPU_THREAD, %ebx
1290	movl	T_LOFAULT(%ebx), %edi
1291	pushl	%edi			/* save the current lofault */
1292	movl	%eax, T_LOFAULT(%ebx)	/* new lofault */
1293
1294	movl	ARG_MAXLEN(%ebp), %ecx
1295	cmpl	$0, %ecx
1296	je	copystr_enametoolong	/* maxlength == 0 */
1297
1298	movl	ARG_FROM(%ebp), %ebx	/* source address */
1299	movl	ARG_TO(%ebp), %edx	/* destination address */
1300
1301copystr_loop:
1302	decl	%ecx
1303	movb	(%ebx), %al
1304	incl	%ebx
1305	movb	%al, (%edx)
1306	incl	%edx
1307	cmpb	$0, %al
1308	je	copystr_null		/* null char */
1309	cmpl	$0, %ecx
1310	jne	copystr_loop
1311
1312copystr_enametoolong:
1313	movl	$ENAMETOOLONG, %eax
1314	jmp	copystr_out
1315
1316copystr_null:
1317	xorl	%eax, %eax		/* no error */
1318
1319copystr_out:
1320	cmpl	$0, ARG_LENCOPIED(%ebp)	/* want length? */
1321	je	copystr_done		/* no */
1322	movl	ARG_MAXLEN(%ebp), %edx
1323	subl	%ecx, %edx		/* compute length and store it */
1324	movl	ARG_LENCOPIED(%ebp), %ecx
1325	movl	%edx, (%ecx)
1326
1327copystr_done:
1328	popl	%edi
1329	movl	%gs:CPU_THREAD, %ebx
1330	movl	%edi, T_LOFAULT(%ebx)	/* restore the original lofault */
1331
1332	popl	%edi
1333	popl	%ebx
1334	popl	%ebp
1335	ret
1336	SET_SIZE(copystr)
1337
1338#undef	ARG_FROM
1339#undef	ARG_TO
1340#undef	ARG_MAXLEN
1341#undef	ARG_LENCOPIED
1342
1343#endif	/* __i386 */
1344#endif	/* __lint */
1345
1346/*
1347 * Copy a null terminated string from the user address space into
1348 * the kernel address space.
1349 */
1350
1351#if defined(__lint)
1352
1353/* ARGSUSED */
1354int
1355copyinstr(const char *uaddr, char *kaddr, size_t maxlength,
1356    size_t *lencopied)
1357{ return (0); }
1358
1359#else	/* __lint */
1360
1361#if defined(__amd64)
1362
1363	ENTRY(copyinstr)
1364	pushq	%rbp
1365	movq	%rsp, %rbp
1366	subq	$32, %rsp
1367
1368	/*
1369	 * save args in case we trap and need to rerun as a copyop
1370	 */
1371	movq	%rdi, (%rsp)
1372	movq	%rsi, 0x8(%rsp)
1373	movq	%rdx, 0x10(%rsp)
1374	movq	%rcx, 0x18(%rsp)
1375
1376	movq	kernelbase(%rip), %rax
1377#ifdef DEBUG
1378	cmpq	%rax, %rsi		/* %rsi = kaddr */
1379	jnb	1f
1380	leaq	.copyinstr_panic_msg(%rip), %rdi
1381	xorl	%eax, %eax
1382	call	panic
13831:
1384#endif
1385	/*
1386	 * pass lofault value as 5th argument to do_copystr
1387	 */
1388	leaq	_copyinstr_error(%rip), %r8
1389
1390	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
1391	jb	do_copystr
1392	movq	%gs:CPU_THREAD, %r9
1393	jmp	3f
1394
1395_copyinstr_error:
1396	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
13973:
1398	movq	T_COPYOPS(%r9), %rax
1399	cmpq	$0, %rax
1400	jz	2f
1401
1402	/*
1403	 * reload args for the copyop
1404	 */
1405	movq	(%rsp), %rdi
1406	movq	0x8(%rsp), %rsi
1407	movq	0x10(%rsp), %rdx
1408	movq	0x18(%rsp), %rcx
1409	leave
1410	jmp	*CP_COPYINSTR(%rax)
1411
14122:	movl	$EFAULT, %eax		/* return EFAULT */
1413	leave
1414	ret
1415	SET_SIZE(copyinstr)
1416
1417#elif defined(__i386)
1418
1419#define	ARG_UADDR	4
1420#define	ARG_KADDR	8
1421
1422	ENTRY(copyinstr)
1423	movl	kernelbase, %ecx
1424#ifdef DEBUG
1425	cmpl	%ecx, ARG_KADDR(%esp)
1426	jnb	1f
1427	pushl	%ebp
1428	movl	%esp, %ebp
1429	pushl	$.copyinstr_panic_msg
1430	call	panic
14311:
1432#endif
1433	lea	_copyinstr_error, %eax
1434	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1435	jb	do_copystr
1436	movl	%gs:CPU_THREAD, %edx
1437	jmp	3f
1438
1439_copyinstr_error:
1440	popl	%edi
1441	movl	%gs:CPU_THREAD, %edx
1442	movl	%edi, T_LOFAULT(%edx)	/* original lofault */
1443
1444	popl	%edi
1445	popl	%ebx
1446	popl	%ebp
14473:
1448	movl	T_COPYOPS(%edx), %eax
1449	cmpl	$0, %eax
1450	jz	2f
1451	jmp	*CP_COPYINSTR(%eax)
1452
14532:	movl	$EFAULT, %eax		/* return EFAULT */
1454	ret
1455	SET_SIZE(copyinstr)
1456
1457#undef	ARG_UADDR
1458#undef	ARG_KADDR
1459
1460#endif	/* __i386 */
1461#endif	/* __lint */
1462
1463/*
1464 * Copy a null terminated string from the kernel
1465 * address space to the user address space.
1466 */
1467
1468#if defined(__lint)
1469
1470/* ARGSUSED */
1471int
1472copyoutstr(const char *kaddr, char *uaddr, size_t maxlength,
1473    size_t *lencopied)
1474{ return (0); }
1475
1476#else	/* __lint */
1477
1478#if defined(__amd64)
1479
1480	ENTRY(copyoutstr)
1481	pushq	%rbp
1482	movq	%rsp, %rbp
1483	subq	$32, %rsp
1484
1485	/*
1486	 * save args in case we trap and need to rerun as a copyop
1487	 */
1488	movq	%rdi, (%rsp)
1489	movq	%rsi, 0x8(%rsp)
1490	movq	%rdx, 0x10(%rsp)
1491	movq	%rcx, 0x18(%rsp)
1492
1493	movq	kernelbase(%rip), %rax
1494#ifdef DEBUG
1495	cmpq	%rax, %rdi		/* %rdi = kaddr */
1496	jnb	1f
1497	leaq	.copyoutstr_panic_msg(%rip), %rdi
1498	jmp	call_panic		/* setup stack and call panic */
14991:
1500#endif
1501	/*
1502	 * pass lofault value as 5th argument to do_copystr
1503	 */
1504	leaq	_copyoutstr_error(%rip), %r8
1505
1506	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
1507	jb	do_copystr
1508	movq	%gs:CPU_THREAD, %r9
1509	jmp	3f
1510
1511_copyoutstr_error:
1512	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
15133:
1514	movq	T_COPYOPS(%r9), %rax
1515	cmpq	$0, %rax
1516	jz	2f
1517
1518	/*
1519	 * reload args for the copyop
1520	 */
1521	movq	(%rsp), %rdi
1522	movq	0x8(%rsp), %rsi
1523	movq	0x10(%rsp), %rdx
1524	movq	0x18(%rsp), %rcx
1525	leave
1526	jmp	*CP_COPYOUTSTR(%rax)
1527
15282:	movl	$EFAULT, %eax		/* return EFAULT */
1529	leave
1530	ret
1531	SET_SIZE(copyoutstr)
1532
1533#elif defined(__i386)
1534
1535#define	ARG_KADDR	4
1536#define	ARG_UADDR	8
1537
1538	ENTRY(copyoutstr)
1539	movl	kernelbase, %ecx
1540#ifdef DEBUG
1541	cmpl	%ecx, ARG_KADDR(%esp)
1542	jnb	1f
1543	pushl	%ebp
1544	movl	%esp, %ebp
1545	pushl	$.copyoutstr_panic_msg
1546	call	panic
15471:
1548#endif
1549	lea	_copyoutstr_error, %eax
1550	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1551	jb	do_copystr
1552	movl	%gs:CPU_THREAD, %edx
1553	jmp	3f
1554
1555_copyoutstr_error:
1556	popl	%edi
1557	movl	%gs:CPU_THREAD, %edx
1558	movl	%edi, T_LOFAULT(%edx)	/* restore the original lofault */
1559
1560	popl	%edi
1561	popl	%ebx
1562	popl	%ebp
15633:
1564	movl	T_COPYOPS(%edx), %eax
1565	cmpl	$0, %eax
1566	jz	2f
1567	jmp	*CP_COPYOUTSTR(%eax)
1568
15692:	movl	$EFAULT, %eax		/* return EFAULT */
1570	ret
1571	SET_SIZE(copyoutstr)
1572
1573#undef	ARG_KADDR
1574#undef	ARG_UADDR
1575
1576#endif	/* __i386 */
1577#endif	/* __lint */
1578
1579/*
1580 * Since all of the fuword() variants are so similar, we have a macro to spit
1581 * them out.  This allows us to create DTrace-unobservable functions easily.
1582 */
1583
1584#if defined(__lint)
1585
1586#if defined(__amd64)
1587
1588/* ARGSUSED */
1589int
1590fuword64(const void *addr, uint64_t *dst)
1591{ return (0); }
1592
1593#endif
1594
1595/* ARGSUSED */
1596int
1597fuword32(const void *addr, uint32_t *dst)
1598{ return (0); }
1599
1600/* ARGSUSED */
1601int
1602fuword16(const void *addr, uint16_t *dst)
1603{ return (0); }
1604
1605/* ARGSUSED */
1606int
1607fuword8(const void *addr, uint8_t *dst)
1608{ return (0); }
1609
1610#else	/* __lint */
1611
1612#if defined(__amd64)
1613
1614/*
1615 * (Note that we don't save and reload the arguments here
1616 * because their values are not altered in the copy path)
1617 */
1618
1619#define	FUWORD(NAME, INSTR, REG, COPYOP)	\
1620	ENTRY(NAME)				\
1621	movq	%gs:CPU_THREAD, %r9;		\
1622	cmpq	kernelbase(%rip), %rdi;		\
1623	jae	1f;				\
1624	leaq	_flt_/**/NAME, %rdx;		\
1625	movq	%rdx, T_LOFAULT(%r9);		\
1626	INSTR	(%rdi), REG;			\
1627	movq	$0, T_LOFAULT(%r9);		\
1628	INSTR	REG, (%rsi);			\
1629	xorl	%eax, %eax;			\
1630	ret;					\
1631_flt_/**/NAME:					\
1632	movq	$0, T_LOFAULT(%r9);		\
16331:						\
1634	movq	T_COPYOPS(%r9), %rax;		\
1635	cmpq	$0, %rax;			\
1636	jz	2f;				\
1637	jmp	*COPYOP(%rax);			\
16382:						\
1639	movl	$-1, %eax;			\
1640	ret;					\
1641	SET_SIZE(NAME)
1642
1643	FUWORD(fuword64, movq, %rax, CP_FUWORD64)
1644	FUWORD(fuword32, movl, %eax, CP_FUWORD32)
1645	FUWORD(fuword16, movw, %ax, CP_FUWORD16)
1646	FUWORD(fuword8, movb, %al, CP_FUWORD8)
1647
1648#elif defined(__i386)
1649
1650#define	FUWORD(NAME, INSTR, REG, COPYOP)	\
1651	ENTRY(NAME)				\
1652	movl	%gs:CPU_THREAD, %ecx;		\
1653	movl	kernelbase, %eax;		\
1654	cmpl	%eax, 4(%esp);			\
1655	jae	1f;				\
1656	lea	_flt_/**/NAME, %edx;		\
1657	movl	%edx, T_LOFAULT(%ecx);		\
1658	movl	4(%esp), %eax;			\
1659	movl	8(%esp), %edx;			\
1660	INSTR	(%eax), REG;			\
1661	movl	$0, T_LOFAULT(%ecx);		\
1662	INSTR	REG, (%edx);			\
1663	xorl	%eax, %eax;			\
1664	ret;					\
1665_flt_/**/NAME:					\
1666	movl	$0, T_LOFAULT(%ecx);		\
16671:						\
1668	movl	T_COPYOPS(%ecx), %eax;		\
1669	cmpl	$0, %eax;			\
1670	jz	2f;				\
1671	jmp	*COPYOP(%eax);			\
16722:						\
1673	movl	$-1, %eax;			\
1674	ret;					\
1675	SET_SIZE(NAME)
1676
1677	FUWORD(fuword32, movl, %eax, CP_FUWORD32)
1678	FUWORD(fuword16, movw, %ax, CP_FUWORD16)
1679	FUWORD(fuword8, movb, %al, CP_FUWORD8)
1680
1681#endif	/* __i386 */
1682
1683#undef	FUWORD
1684
1685#endif	/* __lint */
1686
1687/*
1688 * Set user word.
1689 */
1690
1691#if defined(__lint)
1692
1693#if defined(__amd64)
1694
1695/* ARGSUSED */
1696int
1697suword64(void *addr, uint64_t value)
1698{ return (0); }
1699
1700#endif
1701
1702/* ARGSUSED */
1703int
1704suword32(void *addr, uint32_t value)
1705{ return (0); }
1706
1707/* ARGSUSED */
1708int
1709suword16(void *addr, uint16_t value)
1710{ return (0); }
1711
1712/* ARGSUSED */
1713int
1714suword8(void *addr, uint8_t value)
1715{ return (0); }
1716
1717#else	/* lint */
1718
1719#if defined(__amd64)
1720
1721/*
1722 * (Note that we don't save and reload the arguments here
1723 * because their values are not altered in the copy path)
1724 */
1725
1726#define	SUWORD(NAME, INSTR, REG, COPYOP)	\
1727	ENTRY(NAME)				\
1728	movq	%gs:CPU_THREAD, %r9;		\
1729	cmpq	kernelbase(%rip), %rdi;		\
1730	jae	1f;				\
1731	leaq	_flt_/**/NAME, %rdx;		\
1732	movq	%rdx, T_LOFAULT(%r9);		\
1733	INSTR	REG, (%rdi);			\
1734	movq	$0, T_LOFAULT(%r9);		\
1735	xorl	%eax, %eax;			\
1736	ret;					\
1737_flt_/**/NAME:					\
1738	movq	$0, T_LOFAULT(%r9);		\
17391:						\
1740	movq	T_COPYOPS(%r9), %rax;		\
1741	cmpq	$0, %rax;			\
1742	jz	3f;				\
1743	jmp	*COPYOP(%rax);			\
17443:						\
1745	movl	$-1, %eax;			\
1746	ret;					\
1747	SET_SIZE(NAME)
1748
1749	SUWORD(suword64, movq, %rsi, CP_SUWORD64)
1750	SUWORD(suword32, movl, %esi, CP_SUWORD32)
1751	SUWORD(suword16, movw, %si, CP_SUWORD16)
1752	SUWORD(suword8, movb, %sil, CP_SUWORD8)
1753
1754#elif defined(__i386)
1755
1756#define	SUWORD(NAME, INSTR, REG, COPYOP)	\
1757	ENTRY(NAME)				\
1758	movl	%gs:CPU_THREAD, %ecx;		\
1759	movl	kernelbase, %eax;		\
1760	cmpl	%eax, 4(%esp);			\
1761	jae	1f;				\
1762	lea	_flt_/**/NAME, %edx;		\
1763	movl	%edx, T_LOFAULT(%ecx);		\
1764	movl	4(%esp), %eax;			\
1765	movl	8(%esp), %edx;			\
1766	INSTR	REG, (%eax);			\
1767	movl	$0, T_LOFAULT(%ecx);		\
1768	xorl	%eax, %eax;			\
1769	ret;					\
1770_flt_/**/NAME:					\
1771	movl	$0, T_LOFAULT(%ecx);		\
17721:						\
1773	movl	T_COPYOPS(%ecx), %eax;		\
1774	cmpl	$0, %eax;			\
1775	jz	3f;				\
1776	movl	COPYOP(%eax), %ecx;		\
1777	jmp	*%ecx;				\
17783:						\
1779	movl	$-1, %eax;			\
1780	ret;					\
1781	SET_SIZE(NAME)
1782
1783	SUWORD(suword32, movl, %edx, CP_SUWORD32)
1784	SUWORD(suword16, movw, %dx, CP_SUWORD16)
1785	SUWORD(suword8, movb, %dl, CP_SUWORD8)
1786
1787#endif	/* __i386 */
1788
1789#undef	SUWORD
1790
1791#endif	/* __lint */
1792
1793#if defined(__lint)
1794
1795#if defined(__amd64)
1796
1797/*ARGSUSED*/
1798void
1799fuword64_noerr(const void *addr, uint64_t *dst)
1800{}
1801
1802#endif
1803
1804/*ARGSUSED*/
1805void
1806fuword32_noerr(const void *addr, uint32_t *dst)
1807{}
1808
1809/*ARGSUSED*/
1810void
1811fuword8_noerr(const void *addr, uint8_t *dst)
1812{}
1813
1814/*ARGSUSED*/
1815void
1816fuword16_noerr(const void *addr, uint16_t *dst)
1817{}
1818
1819#else   /* __lint */
1820
1821#if defined(__amd64)
1822
1823#define	FUWORD_NOERR(NAME, INSTR, REG)		\
1824	ENTRY(NAME)				\
1825	cmpq	kernelbase(%rip), %rdi;		\
1826	cmovnbq	kernelbase(%rip), %rdi;		\
1827	INSTR	(%rdi), REG;			\
1828	INSTR	REG, (%rsi);			\
1829	ret;					\
1830	SET_SIZE(NAME)
1831
1832	FUWORD_NOERR(fuword64_noerr, movq, %rax)
1833	FUWORD_NOERR(fuword32_noerr, movl, %eax)
1834	FUWORD_NOERR(fuword16_noerr, movw, %ax)
1835	FUWORD_NOERR(fuword8_noerr, movb, %al)
1836
1837#elif defined(__i386)
1838
1839#define	FUWORD_NOERR(NAME, INSTR, REG)		\
1840	ENTRY(NAME)				\
1841	movl	4(%esp), %eax;			\
1842	cmpl	kernelbase, %eax;		\
1843	jb	1f;				\
1844	movl	kernelbase, %eax;		\
18451:	movl	8(%esp), %edx;			\
1846	INSTR	(%eax), REG;			\
1847	INSTR	REG, (%edx);			\
1848	ret;					\
1849	SET_SIZE(NAME)
1850
1851	FUWORD_NOERR(fuword32_noerr, movl, %ecx)
1852	FUWORD_NOERR(fuword16_noerr, movw, %cx)
1853	FUWORD_NOERR(fuword8_noerr, movb, %cl)
1854
1855#endif	/* __i386 */
1856
1857#undef	FUWORD_NOERR
1858
1859#endif	/* __lint */
1860
1861#if defined(__lint)
1862
1863#if defined(__amd64)
1864
1865/*ARGSUSED*/
1866void
1867suword64_noerr(void *addr, uint64_t value)
1868{}
1869
1870#endif
1871
1872/*ARGSUSED*/
1873void
1874suword32_noerr(void *addr, uint32_t value)
1875{}
1876
1877/*ARGSUSED*/
1878void
1879suword16_noerr(void *addr, uint16_t value)
1880{}
1881
1882/*ARGSUSED*/
1883void
1884suword8_noerr(void *addr, uint8_t value)
1885{}
1886
1887#else	/* lint */
1888
1889#if defined(__amd64)
1890
1891#define	SUWORD_NOERR(NAME, INSTR, REG)		\
1892	ENTRY(NAME)				\
1893	cmpq	kernelbase(%rip), %rdi;		\
1894	cmovnbq	kernelbase(%rip), %rdi;		\
1895	INSTR	REG, (%rdi);			\
1896	ret;					\
1897	SET_SIZE(NAME)
1898
1899	SUWORD_NOERR(suword64_noerr, movq, %rsi)
1900	SUWORD_NOERR(suword32_noerr, movl, %esi)
1901	SUWORD_NOERR(suword16_noerr, movw, %si)
1902	SUWORD_NOERR(suword8_noerr, movb, %sil)
1903
1904#elif defined(__i386)
1905
1906#define	SUWORD_NOERR(NAME, INSTR, REG)		\
1907	ENTRY(NAME)				\
1908	movl	4(%esp), %eax;			\
1909	cmpl	kernelbase, %eax;		\
1910	jb	1f;				\
1911	movl	kernelbase, %eax;		\
19121:						\
1913	movl	8(%esp), %edx;			\
1914	INSTR	REG, (%eax);			\
1915	ret;					\
1916	SET_SIZE(NAME)
1917
1918	SUWORD_NOERR(suword32_noerr, movl, %edx)
1919	SUWORD_NOERR(suword16_noerr, movw, %dx)
1920	SUWORD_NOERR(suword8_noerr, movb, %dl)
1921
1922#endif	/* __i386 */
1923
1924#undef	SUWORD_NOERR
1925
1926#endif	/* lint */
1927
1928
1929#if defined(__lint)
1930
1931/*ARGSUSED*/
1932int
1933subyte(void *addr, uchar_t value)
1934{ return (0); }
1935
1936/*ARGSUSED*/
1937void
1938subyte_noerr(void *addr, uchar_t value)
1939{}
1940
1941/*ARGSUSED*/
1942int
1943fulword(const void *addr, ulong_t *valuep)
1944{ return (0); }
1945
1946/*ARGSUSED*/
1947void
1948fulword_noerr(const void *addr, ulong_t *valuep)
1949{}
1950
1951/*ARGSUSED*/
1952int
1953sulword(void *addr, ulong_t valuep)
1954{ return (0); }
1955
1956/*ARGSUSED*/
1957void
1958sulword_noerr(void *addr, ulong_t valuep)
1959{}
1960
1961#else
1962
1963	.weak	subyte
1964	subyte=suword8
1965	.weak	subyte_noerr
1966	subyte_noerr=suword8_noerr
1967
1968#if defined(__amd64)
1969
1970	.weak	fulword
1971	fulword=fuword64
1972	.weak	fulword_noerr
1973	fulword_noerr=fuword64_noerr
1974	.weak	sulword
1975	sulword=suword64
1976	.weak	sulword_noerr
1977	sulword_noerr=suword64_noerr
1978
1979#elif defined(__i386)
1980
1981	.weak	fulword
1982	fulword=fuword32
1983	.weak	fulword_noerr
1984	fulword_noerr=fuword32_noerr
1985	.weak	sulword
1986	sulword=suword32
1987	.weak	sulword_noerr
1988	sulword_noerr=suword32_noerr
1989
1990#endif /* __i386 */
1991
1992#endif /* __lint */
1993
1994#if defined(__lint)
1995
1996/*
1997 * Copy a block of storage - must not overlap (from + len <= to).
1998 * No fault handler installed (to be called under on_fault())
1999 */
2000
2001/* ARGSUSED */
2002void
2003copyout_noerr(const void *kfrom, void *uto, size_t count)
2004{}
2005
2006/* ARGSUSED */
2007void
2008copyin_noerr(const void *ufrom, void *kto, size_t count)
2009{}
2010
2011/*
2012 * Zero a block of storage in user space
2013 */
2014
2015/* ARGSUSED */
2016void
2017uzero(void *addr, size_t count)
2018{}
2019
2020/*
2021 * copy a block of storage in user space
2022 */
2023
2024/* ARGSUSED */
2025void
2026ucopy(const void *ufrom, void *uto, size_t ulength)
2027{}
2028
2029#else /* __lint */
2030
2031#if defined(__amd64)
2032
2033	ENTRY(copyin_noerr)
2034	movq	kernelbase(%rip), %rax
2035#ifdef DEBUG
2036	cmpq	%rax, %rsi		/* %rsi = kto */
2037	jae	1f
2038	leaq	.cpyin_ne_pmsg(%rip), %rdi
2039	jmp	call_panic		/* setup stack and call panic */
20401:
2041#endif
2042	cmpq	%rax, %rdi		/* ufrom < kernelbase */
2043	jb	do_copy
2044	movq	%rax, %rdi		/* force fault at kernelbase */
2045	jmp	do_copy
2046	SET_SIZE(copyin_noerr)
2047
2048	ENTRY(copyout_noerr)
2049	movq	kernelbase(%rip), %rax
2050#ifdef DEBUG
2051	cmpq	%rax, %rdi		/* %rdi = kfrom */
2052	jae	1f
2053	leaq	.cpyout_ne_pmsg(%rip), %rdi
2054	jmp	call_panic		/* setup stack and call panic */
20551:
2056#endif
2057	cmpq	%rax, %rsi		/* uto < kernelbase */
2058	jb	do_copy
2059	movq	%rax, %rsi		/* force fault at kernelbase */
2060	jmp	do_copy
2061	SET_SIZE(copyout_noerr)
2062
2063	ENTRY(uzero)
2064	movq	kernelbase(%rip), %rax
2065	cmpq	%rax, %rdi
2066	jb	do_zero
2067	movq	%rax, %rdi	/* force fault at kernelbase */
2068	jmp	do_zero
2069	SET_SIZE(uzero)
2070
2071	ENTRY(ucopy)
2072	movq	kernelbase(%rip), %rax
2073	cmpq	%rax, %rdi
2074	jb	1f
2075	movq	%rax, %rdi
20761:
2077	cmpq	%rax, %rsi
2078	jb	do_copy
2079	movq	%rax, %rsi
2080	jmp	do_copy
2081	SET_SIZE(ucopy)
2082
2083#elif defined(__i386)
2084
2085	ENTRY(copyin_noerr)
2086	movl	kernelbase, %eax
2087#ifdef DEBUG
2088	cmpl	%eax, 8(%esp)
2089	jae	1f
2090	pushl	$.cpyin_ne_pmsg
2091	call	panic
20921:
2093#endif
2094	cmpl	%eax, 4(%esp)
2095	jb	do_copy
2096	movl	%eax, 4(%esp)	/* force fault at kernelbase */
2097	jmp	do_copy
2098	SET_SIZE(copyin_noerr)
2099
2100	ENTRY(copyout_noerr)
2101	movl	kernelbase, %eax
2102#ifdef DEBUG
2103	cmpl	%eax, 4(%esp)
2104	jae	1f
2105	pushl	$.cpyout_ne_pmsg
2106	call	panic
21071:
2108#endif
2109	cmpl	%eax, 8(%esp)
2110	jb	do_copy
2111	movl	%eax, 8(%esp)	/* force fault at kernelbase */
2112	jmp	do_copy
2113	SET_SIZE(copyout_noerr)
2114
2115	ENTRY(uzero)
2116	movl	kernelbase, %eax
2117	cmpl	%eax, 4(%esp)
2118	jb	do_zero
2119	movl	%eax, 4(%esp)	/* force fault at kernelbase */
2120	jmp	do_zero
2121	SET_SIZE(uzero)
2122
2123	ENTRY(ucopy)
2124	movl	kernelbase, %eax
2125	cmpl	%eax, 4(%esp)
2126	jb	1f
2127	movl	%eax, 4(%esp)	/* force fault at kernelbase */
21281:
2129	cmpl	%eax, 8(%esp)
2130	jb	do_copy
2131	movl	%eax, 8(%esp)	/* force fault at kernelbase */
2132	jmp	do_copy
2133	SET_SIZE(ucopy)
2134
2135#endif	/* __i386 */
2136
2137#ifdef DEBUG
2138	.data
2139.kcopy_panic_msg:
2140	.string "kcopy: arguments below kernelbase"
2141.bcopy_panic_msg:
2142	.string "bcopy: arguments below kernelbase"
2143.kzero_panic_msg:
2144        .string "kzero: arguments below kernelbase"
2145.bzero_panic_msg:
2146	.string	"bzero: arguments below kernelbase"
2147.copyin_panic_msg:
2148	.string "copyin: kaddr argument below kernelbase"
2149.xcopyin_panic_msg:
2150	.string	"xcopyin: kaddr argument below kernelbase"
2151.copyout_panic_msg:
2152	.string "copyout: kaddr argument below kernelbase"
2153.xcopyout_panic_msg:
2154	.string	"xcopyout: kaddr argument below kernelbase"
2155.copystr_panic_msg:
2156	.string	"copystr: arguments in user space"
2157.copyinstr_panic_msg:
2158	.string	"copyinstr: kaddr argument not in kernel address space"
2159.copyoutstr_panic_msg:
2160	.string	"copyoutstr: kaddr argument not in kernel address space"
2161.cpyin_ne_pmsg:
2162	.string "copyin_noerr: argument not in kernel address space"
2163.cpyout_ne_pmsg:
2164	.string "copyout_noerr: argument not in kernel address space"
2165#endif
2166
2167#endif	/* __lint */
2168