xref: /titanic_51/usr/src/uts/intel/ia32/ml/copy.s (revision 36615d24946b849e48cedbbafa9adfb4a02b590c)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*       Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.	*/
27/*       Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T		*/
28/*         All Rights Reserved						*/
29
30/*       Copyright (c) 1987, 1988 Microsoft Corporation			*/
31/*         All Rights Reserved						*/
32
33#pragma ident	"%Z%%M%	%I%	%E% SMI"
34
35#include <sys/errno.h>
36#include <sys/asm_linkage.h>
37
38#if defined(__lint)
39#include <sys/types.h>
40#include <sys/systm.h>
41#else	/* __lint */
42#include "assym.h"
43#endif	/* __lint */
44
45#define	KCOPY_MIN_SIZE	128	/* Must be >= 16 bytes */
46#define	XCOPY_MIN_SIZE	128	/* Must be >= 16 bytes */
47/*
48 * Non-temopral access (NTA) alignment requirement
49 */
50#define	NTA_ALIGN_SIZE	4	/* Must be at least 4-byte aligned */
51#define	NTA_ALIGN_MASK	_CONST(NTA_ALIGN_SIZE-1)
52#define	COUNT_ALIGN_SIZE	16	/* Must be at least 16-byte aligned */
53#define	COUNT_ALIGN_MASK	_CONST(COUNT_ALIGN_SIZE-1)
54
55/*
56 * Copy a block of storage, returning an error code if `from' or
57 * `to' takes a kernel pagefault which cannot be resolved.
58 * Returns errno value on pagefault error, 0 if all ok
59 */
60
61#if defined(__lint)
62
63/* ARGSUSED */
64int
65kcopy(const void *from, void *to, size_t count)
66{ return (0); }
67
68#else	/* __lint */
69
70	.globl	kernelbase
71	.globl	postbootkernelbase
72
73#if defined(__amd64)
74
75	ENTRY(kcopy)
76	pushq	%rbp
77	movq	%rsp, %rbp
78#ifdef DEBUG
79	cmpq	postbootkernelbase(%rip), %rdi 		/* %rdi = from */
80	jb	0f
81	cmpq	postbootkernelbase(%rip), %rsi		/* %rsi = to */
82	jnb	1f
830:	leaq	.kcopy_panic_msg(%rip), %rdi
84	xorl	%eax, %eax
85	call	panic
861:
87#endif
88	/*
89	 * pass lofault value as 4th argument to do_copy_fault
90	 */
91	leaq	_kcopy_copyerr(%rip), %rcx
92	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
93
94do_copy_fault:
95	movq	T_LOFAULT(%r9), %r11	/* save the current lofault */
96	movq	%rcx, T_LOFAULT(%r9)	/* new lofault */
97
98	xchgq	%rdi, %rsi		/* %rsi = source, %rdi = destination */
99	movq	%rdx, %rcx		/* %rcx = count */
100	shrq	$3, %rcx		/* 8-byte word count */
101	rep
102	  smovq
103
104	movq	%rdx, %rcx
105	andq	$7, %rcx		/* bytes left over */
106	rep
107	  smovb
108	xorl	%eax, %eax		/* return 0 (success) */
109
110	/*
111	 * A fault during do_copy_fault is indicated through an errno value
112	 * in %rax and we iretq from the trap handler to here.
113	 */
114_kcopy_copyerr:
115	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
116	leave
117	ret
118	SET_SIZE(kcopy)
119
120#elif defined(__i386)
121
122#define	ARG_FROM	8
123#define	ARG_TO		12
124#define	ARG_COUNT	16
125
126	ENTRY(kcopy)
127#ifdef DEBUG
128	pushl	%ebp
129	movl	%esp, %ebp
130	movl	postbootkernelbase, %eax
131	cmpl	%eax, ARG_FROM(%ebp)
132	jb	0f
133	cmpl	%eax, ARG_TO(%ebp)
134	jnb	1f
1350:	pushl	$.kcopy_panic_msg
136	call	panic
1371:	popl	%ebp
138#endif
139	lea	_kcopy_copyerr, %eax	/* lofault value */
140	movl	%gs:CPU_THREAD, %edx
141
142do_copy_fault:
143	pushl	%ebp
144	movl	%esp, %ebp		/* setup stack frame */
145	pushl	%esi
146	pushl	%edi			/* save registers */
147
148	movl	T_LOFAULT(%edx), %edi
149	pushl	%edi			/* save the current lofault */
150	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
151
152	movl	ARG_COUNT(%ebp), %ecx
153	movl	ARG_FROM(%ebp), %esi
154	movl	ARG_TO(%ebp), %edi
155	shrl	$2, %ecx		/* word count */
156	rep
157	  smovl
158	movl	ARG_COUNT(%ebp), %ecx
159	andl	$3, %ecx		/* bytes left over */
160	rep
161	  smovb
162	xorl	%eax, %eax
163
164	/*
165	 * A fault during do_copy_fault is indicated through an errno value
166	 * in %eax and we iret from the trap handler to here.
167	 */
168_kcopy_copyerr:
169	popl	%ecx
170	popl	%edi
171	movl	%ecx, T_LOFAULT(%edx)	/* restore the original lofault */
172	popl	%esi
173	popl	%ebp
174	ret
175	SET_SIZE(kcopy)
176
177#undef	ARG_FROM
178#undef	ARG_TO
179#undef	ARG_COUNT
180
181#endif	/* __i386 */
182#endif	/* __lint */
183
184#if defined(__lint)
185
186/*
187 * Copy a block of storage.  Similar to kcopy but uses non-temporal
188 * instructions.
189 */
190
191/* ARGSUSED */
192int
193kcopy_nta(const void *from, void *to, size_t count, int copy_cached)
194{ return (0); }
195
196#else	/* __lint */
197
198#if defined(__amd64)
199
200#define	COPY_LOOP_INIT(src, dst, cnt)	\
201	addq	cnt, src;			\
202	addq	cnt, dst;			\
203	shrq	$3, cnt;			\
204	neg	cnt
205
206	/* Copy 16 bytes per loop.  Uses %rax and %r8 */
207#define	COPY_LOOP_BODY(src, dst, cnt)	\
208	prefetchnta	0x100(src, cnt, 8);	\
209	movq	(src, cnt, 8), %rax;		\
210	movq	0x8(src, cnt, 8), %r8;		\
211	movnti	%rax, (dst, cnt, 8);		\
212	movnti	%r8, 0x8(dst, cnt, 8);		\
213	addq	$2, cnt
214
215	ENTRY(kcopy_nta)
216	pushq	%rbp
217	movq	%rsp, %rbp
218#ifdef DEBUG
219	cmpq	postbootkernelbase(%rip), %rdi 		/* %rdi = from */
220	jb	0f
221	cmpq	postbootkernelbase(%rip), %rsi		/* %rsi = to */
222	jnb	1f
2230:	leaq	.kcopy_panic_msg(%rip), %rdi
224	xorl	%eax, %eax
225	call	panic
2261:
227#endif
228
229	movq	%gs:CPU_THREAD, %r9
230	cmpq	$0, %rcx		/* No non-temporal access? */
231	/*
232	 * pass lofault value as 4th argument to do_copy_fault
233	 */
234	leaq	_kcopy_nta_copyerr(%rip), %rcx	/* doesn't set rflags */
235	jnz	do_copy_fault		/* use regular access */
236	/*
237	 * Make sure cnt is >= KCOPY_MIN_SIZE
238	 */
239	cmpq	$KCOPY_MIN_SIZE, %rdx
240	jb	do_copy_fault
241
242	/*
243	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
244	 * count is COUNT_ALIGN_SIZE aligned.
245	 */
246	movq	%rdi, %r10
247	orq	%rsi, %r10
248	andq	$NTA_ALIGN_MASK, %r10
249	orq	%rdx, %r10
250	andq	$COUNT_ALIGN_MASK, %r10
251	jnz	do_copy_fault
252
253	ALTENTRY(do_copy_fault_nta)
254	movq    %gs:CPU_THREAD, %r9     /* %r9 = thread addr */
255	movq    T_LOFAULT(%r9), %r11    /* save the current lofault */
256	movq    %rcx, T_LOFAULT(%r9)    /* new lofault */
257
258	/*
259	 * COPY_LOOP_BODY uses %rax and %r8
260	 */
261	COPY_LOOP_INIT(%rdi, %rsi, %rdx)
2622:	COPY_LOOP_BODY(%rdi, %rsi, %rdx)
263	jnz	2b
264
265	mfence
266	xorl	%eax, %eax		/* return 0 (success) */
267
268_kcopy_nta_copyerr:
269	movq	%r11, T_LOFAULT(%r9)    /* restore original lofault */
270	leave
271	ret
272	SET_SIZE(do_copy_fault_nta)
273	SET_SIZE(kcopy_nta)
274
275#elif defined(__i386)
276
277#define	ARG_FROM	8
278#define	ARG_TO		12
279#define	ARG_COUNT	16
280
281#define	COPY_LOOP_INIT(src, dst, cnt)	\
282	addl	cnt, src;			\
283	addl	cnt, dst;			\
284	shrl	$3, cnt;			\
285	neg	cnt
286
287#define	COPY_LOOP_BODY(src, dst, cnt)	\
288	prefetchnta	0x100(src, cnt, 8);	\
289	movl	(src, cnt, 8), %esi;		\
290	movnti	%esi, (dst, cnt, 8);		\
291	movl	0x4(src, cnt, 8), %esi;		\
292	movnti	%esi, 0x4(dst, cnt, 8);		\
293	movl	0x8(src, cnt, 8), %esi;		\
294	movnti	%esi, 0x8(dst, cnt, 8);		\
295	movl	0xc(src, cnt, 8), %esi;		\
296	movnti	%esi, 0xc(dst, cnt, 8);		\
297	addl	$2, cnt
298
299	/*
300	 * kcopy_nta is not implemented for 32-bit as no performance
301	 * improvement was shown.  We simply jump directly to kcopy
302	 * and discard the 4 arguments.
303	 */
304	ENTRY(kcopy_nta)
305	jmp	kcopy
306
307	lea	_kcopy_nta_copyerr, %eax	/* lofault value */
308	ALTENTRY(do_copy_fault_nta)
309	pushl	%ebp
310	movl	%esp, %ebp		/* setup stack frame */
311	pushl	%esi
312	pushl	%edi
313
314	movl	%gs:CPU_THREAD, %edx
315	movl	T_LOFAULT(%edx), %edi
316	pushl	%edi			/* save the current lofault */
317	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
318
319	/* COPY_LOOP_BODY needs to use %esi */
320	movl	ARG_COUNT(%ebp), %ecx
321	movl	ARG_FROM(%ebp), %edi
322	movl	ARG_TO(%ebp), %eax
323	COPY_LOOP_INIT(%edi, %eax, %ecx)
3241:	COPY_LOOP_BODY(%edi, %eax, %ecx)
325	jnz	1b
326	mfence
327
328	xorl	%eax, %eax
329_kcopy_nta_copyerr:
330	popl	%ecx
331	popl	%edi
332	movl	%ecx, T_LOFAULT(%edx)	/* restore the original lofault */
333	popl	%esi
334	leave
335	ret
336	SET_SIZE(do_copy_fault_nta)
337	SET_SIZE(kcopy_nta)
338
339#undef	ARG_FROM
340#undef	ARG_TO
341#undef	ARG_COUNT
342
343#endif	/* __i386 */
344#endif	/* __lint */
345
346#if defined(__lint)
347
348/* ARGSUSED */
349void
350bcopy(const void *from, void *to, size_t count)
351{}
352
353#else	/* __lint */
354
355#if defined(__amd64)
356
357	ENTRY(bcopy)
358#ifdef DEBUG
359	orq	%rdx, %rdx		/* %rdx = count */
360	jz	1f
361	cmpq	postbootkernelbase(%rip), %rdi		/* %rdi = from */
362	jb	0f
363	cmpq	postbootkernelbase(%rip), %rsi		/* %rsi = to */
364	jnb	1f
3650:	leaq	.bcopy_panic_msg(%rip), %rdi
366	jmp	call_panic		/* setup stack and call panic */
3671:
368#endif
369do_copy:
370	xchgq	%rdi, %rsi		/* %rsi = source, %rdi = destination */
371	movq	%rdx, %rcx		/* %rcx = count */
372	shrq	$3, %rcx		/* 8-byte word count */
373	rep
374	  smovq
375
376	movq	%rdx, %rcx
377	andq	$7, %rcx		/* bytes left over */
378	rep
379	  smovb
380	ret
381
382#ifdef DEBUG
383	/*
384	 * Setup frame on the run-time stack. The end of the input argument
385	 * area must be aligned on a 16 byte boundary. The stack pointer %rsp,
386	 * always points to the end of the latest allocated stack frame.
387	 * panic(const char *format, ...) is a varargs function. When a
388	 * function taking variable arguments is called, %rax must be set
389	 * to eight times the number of floating point parameters passed
390	 * to the function in SSE registers.
391	 */
392call_panic:
393	pushq	%rbp			/* align stack properly */
394	movq	%rsp, %rbp
395	xorl	%eax, %eax		/* no variable arguments */
396	call	panic			/* %rdi = format string */
397#endif
398	SET_SIZE(bcopy)
399
400#elif defined(__i386)
401
402#define	ARG_FROM	4
403#define	ARG_TO		8
404#define	ARG_COUNT	12
405
406	ENTRY(bcopy)
407#ifdef DEBUG
408	movl	ARG_COUNT(%esp), %eax
409	orl	%eax, %eax
410	jz	1f
411	movl	postbootkernelbase, %eax
412	cmpl	%eax, ARG_FROM(%esp)
413	jb	0f
414	cmpl	%eax, ARG_TO(%esp)
415	jnb	1f
4160:	pushl	%ebp
417	movl	%esp, %ebp
418	pushl	$.bcopy_panic_msg
419	call	panic
4201:
421#endif
422do_copy:
423	movl	%esi, %eax		/* save registers */
424	movl	%edi, %edx
425	movl	ARG_COUNT(%esp), %ecx
426	movl	ARG_FROM(%esp), %esi
427	movl	ARG_TO(%esp), %edi
428
429	shrl	$2, %ecx		/* word count */
430	rep
431	  smovl
432	movl	ARG_COUNT(%esp), %ecx
433	andl	$3, %ecx		/* bytes left over */
434	rep
435	  smovb
436	movl	%eax, %esi		/* restore registers */
437	movl	%edx, %edi
438	ret
439	SET_SIZE(bcopy)
440
441#undef	ARG_COUNT
442#undef	ARG_FROM
443#undef	ARG_TO
444
445#endif	/* __i386 */
446#endif	/* __lint */
447
448
449/*
450 * Zero a block of storage, returning an error code if we
451 * take a kernel pagefault which cannot be resolved.
452 * Returns errno value on pagefault error, 0 if all ok
453 */
454
455#if defined(__lint)
456
457/* ARGSUSED */
458int
459kzero(void *addr, size_t count)
460{ return (0); }
461
462#else	/* __lint */
463
464#if defined(__amd64)
465
466	ENTRY(kzero)
467#ifdef DEBUG
468        cmpq	postbootkernelbase(%rip), %rdi	/* %rdi = addr */
469        jnb	0f
470        leaq	.kzero_panic_msg(%rip), %rdi
471	jmp	call_panic		/* setup stack and call panic */
4720:
473#endif
474	/*
475	 * pass lofault value as 3rd argument to do_zero_fault
476	 */
477	leaq	_kzeroerr(%rip), %rdx
478
479do_zero_fault:
480	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
481	movq	T_LOFAULT(%r9), %r11	/* save the current lofault */
482	movq	%rdx, T_LOFAULT(%r9)	/* new lofault */
483
484	movq	%rsi, %rcx		/* get size in bytes */
485	shrq	$3, %rcx		/* count of 8-byte words to zero */
486	xorl	%eax, %eax		/* clear %rax; used in sstoq / sstob */
487	rep
488	  sstoq				/* %rcx = words to clear (%rax=0) */
489
490	movq	%rsi, %rcx
491	andq	$7, %rcx		/* bytes left over */
492	rep
493	  sstob				/* %rcx = residual bytes to clear */
494
495	/*
496	 * A fault during do_zero_fault is indicated through an errno value
497	 * in %rax when we iretq to here.
498	 */
499_kzeroerr:
500	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
501	ret
502	SET_SIZE(kzero)
503
504#elif defined(__i386)
505
506#define	ARG_ADDR	8
507#define	ARG_COUNT	12
508
509	ENTRY(kzero)
510#ifdef DEBUG
511	pushl	%ebp
512	movl	%esp, %ebp
513	movl	postbootkernelbase, %eax
514        cmpl	%eax, ARG_ADDR(%ebp)
515        jnb	0f
516        pushl   $.kzero_panic_msg
517        call    panic
5180:	popl	%ebp
519#endif
520	lea	_kzeroerr, %eax		/* kzeroerr is lofault value */
521
522do_zero_fault:
523	pushl	%ebp			/* save stack base */
524	movl	%esp, %ebp		/* set new stack base */
525	pushl	%edi			/* save %edi */
526
527	mov	%gs:CPU_THREAD, %edx
528	movl	T_LOFAULT(%edx), %edi
529	pushl	%edi			/* save the current lofault */
530	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
531
532	movl	ARG_COUNT(%ebp), %ecx	/* get size in bytes */
533	movl	ARG_ADDR(%ebp), %edi	/* %edi <- address of bytes to clear */
534	shrl	$2, %ecx		/* Count of double words to zero */
535	xorl	%eax, %eax		/* sstol val */
536	rep
537	  sstol			/* %ecx contains words to clear (%eax=0) */
538
539	movl	ARG_COUNT(%ebp), %ecx	/* get size in bytes */
540	andl	$3, %ecx		/* do mod 4 */
541	rep
542	  sstob			/* %ecx contains residual bytes to clear */
543
544	/*
545	 * A fault during do_zero_fault is indicated through an errno value
546	 * in %eax when we iret to here.
547	 */
548_kzeroerr:
549	popl	%edi
550	movl	%edi, T_LOFAULT(%edx)	/* restore the original lofault */
551	popl	%edi
552	popl	%ebp
553	ret
554	SET_SIZE(kzero)
555
556#undef	ARG_ADDR
557#undef	ARG_COUNT
558
559#endif	/* __i386 */
560#endif	/* __lint */
561
562/*
563 * Zero a block of storage.
564 */
565
566#if defined(__lint)
567
568/* ARGSUSED */
569void
570bzero(void *addr, size_t count)
571{}
572
573#else	/* __lint */
574
575#if defined(__amd64)
576
577	ENTRY(bzero)
578#ifdef DEBUG
579	cmpq	postbootkernelbase(%rip), %rdi	/* %rdi = addr */
580	jnb	0f
581	leaq	.bzero_panic_msg(%rip), %rdi
582	jmp	call_panic		/* setup stack and call panic */
5830:
584#endif
585do_zero:
586	movq	%rsi, %rcx		/* get size in bytes */
587	shrq	$3, %rcx		/* count of 8-byte words to zero */
588	xorl	%eax, %eax		/* clear %rax; used in sstoq / sstob */
589	rep
590	  sstoq				/* %rcx = words to clear (%rax=0) */
591
592	movq	%rsi, %rcx
593	andq	$7, %rcx		/* bytes left over */
594	rep
595	  sstob				/* %rcx = residual bytes to clear */
596	ret
597	SET_SIZE(bzero)
598
599#elif defined(__i386)
600
601#define	ARG_ADDR	4
602#define	ARG_COUNT	8
603
604	ENTRY(bzero)
605#ifdef DEBUG
606	movl	postbootkernelbase, %eax
607	cmpl	%eax, ARG_ADDR(%esp)
608	jnb	0f
609	pushl	%ebp
610	movl	%esp, %ebp
611	pushl	$.bzero_panic_msg
612	call	panic
6130:
614#endif
615do_zero:
616	movl	%edi, %edx
617	movl	ARG_COUNT(%esp), %ecx
618	movl	ARG_ADDR(%esp), %edi
619	shrl	$2, %ecx
620	xorl	%eax, %eax
621	rep
622	  sstol
623	movl	ARG_COUNT(%esp), %ecx
624	andl	$3, %ecx
625	rep
626	  sstob
627	movl	%edx, %edi
628	ret
629	SET_SIZE(bzero)
630
631#undef	ARG_ADDR
632#undef	ARG_COUNT
633
634#endif	/* __i386 */
635#endif	/* __lint */
636
637/*
638 * Transfer data to and from user space -
639 * Note that these routines can cause faults
640 * It is assumed that the kernel has nothing at
641 * less than KERNELBASE in the virtual address space.
642 *
643 * Note that copyin(9F) and copyout(9F) are part of the
644 * DDI/DKI which specifies that they return '-1' on "errors."
645 *
646 * Sigh.
647 *
648 * So there's two extremely similar routines - xcopyin_nta() and
649 * xcopyout_nta() which return the errno that we've faithfully computed.
650 * This allows other callers (e.g. uiomove(9F)) to work correctly.
651 * Given that these are used pretty heavily, we expand the calling
652 * sequences inline for all flavours (rather than making wrappers).
653 */
654
655/*
656 * Copy user data to kernel space.
657 */
658
659#if defined(__lint)
660
661/* ARGSUSED */
662int
663copyin(const void *uaddr, void *kaddr, size_t count)
664{ return (0); }
665
666#else	/* lint */
667
668#if defined(__amd64)
669
670	ENTRY(copyin)
671	pushq	%rbp
672	movq	%rsp, %rbp
673	subq	$32, %rsp
674
675	/*
676	 * save args in case we trap and need to rerun as a copyop
677	 */
678	movq	%rdi, (%rsp)
679	movq	%rsi, 0x8(%rsp)
680	movq	%rdx, 0x10(%rsp)
681
682	movq	kernelbase(%rip), %rax
683#ifdef DEBUG
684	cmpq	%rax, %rsi		/* %rsi = kaddr */
685	jnb	1f
686	leaq	.copyin_panic_msg(%rip), %rdi
687	xorl	%eax, %eax
688	call	panic
6891:
690#endif
691	/*
692	 * pass lofault value as 4th argument to do_copy_fault
693	 */
694	leaq	_copyin_err(%rip), %rcx
695
696	movq	%gs:CPU_THREAD, %r9
697	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
698	jb	do_copy_fault
699	jmp	3f
700
701_copyin_err:
702	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
7033:
704	movq	T_COPYOPS(%r9), %rax
705	cmpq	$0, %rax
706	jz	2f
707	/*
708	 * reload args for the copyop
709	 */
710	movq	(%rsp), %rdi
711	movq	0x8(%rsp), %rsi
712	movq	0x10(%rsp), %rdx
713	leave
714	jmp	*CP_COPYIN(%rax)
715
7162:	movl	$-1, %eax
717	leave
718	ret
719	SET_SIZE(copyin)
720
721#elif defined(__i386)
722
723#define	ARG_UADDR	4
724#define	ARG_KADDR	8
725
726	ENTRY(copyin)
727	movl	kernelbase, %ecx
728#ifdef DEBUG
729	cmpl	%ecx, ARG_KADDR(%esp)
730	jnb	1f
731	pushl	%ebp
732	movl	%esp, %ebp
733	pushl	$.copyin_panic_msg
734	call	panic
7351:
736#endif
737	lea	_copyin_err, %eax
738
739	movl	%gs:CPU_THREAD, %edx
740	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
741	jb	do_copy_fault
742	jmp	3f
743
744_copyin_err:
745	popl	%ecx
746	popl	%edi
747	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
748	popl	%esi
749	popl	%ebp
7503:
751	movl	T_COPYOPS(%edx), %eax
752	cmpl	$0, %eax
753	jz	2f
754	jmp	*CP_COPYIN(%eax)
755
7562:	movl	$-1, %eax
757	ret
758	SET_SIZE(copyin)
759
760#undef	ARG_UADDR
761#undef	ARG_KADDR
762
763#endif	/* __i386 */
764#endif	/* __lint */
765
766#if defined(__lint)
767
768/* ARGSUSED */
769int
770xcopyin_nta(const void *uaddr, void *kaddr, size_t count, int copy_cached)
771{ return (0); }
772
773#else	/* __lint */
774
775#if defined(__amd64)
776
777	ENTRY(xcopyin_nta)
778	pushq	%rbp
779	movq	%rsp, %rbp
780	subq	$32, %rsp
781
782	/*
783	 * save args in case we trap and need to rerun as a copyop
784	 * %rcx is consumed in this routine so we don't need to save
785	 * it.
786	 */
787	movq	%rdi, (%rsp)
788	movq	%rsi, 0x8(%rsp)
789	movq	%rdx, 0x10(%rsp)
790
791	movq	kernelbase(%rip), %rax
792#ifdef DEBUG
793	cmpq	%rax, %rsi		/* %rsi = kaddr */
794	jnb	1f
795	leaq	.xcopyin_panic_msg(%rip), %rdi
796	xorl	%eax, %eax
797	call	panic
7981:
799#endif
800	movq	%gs:CPU_THREAD, %r9
801	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
802	jae	4f
803	cmpq	$0, %rcx		/* No non-temporal access? */
804	/*
805	 * pass lofault value as 4th argument to do_copy_fault
806	 */
807	leaq	_xcopyin_err(%rip), %rcx	/* doesn't set rflags */
808	jnz	do_copy_fault		/* use regular access */
809	/*
810	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
811	 */
812	cmpq	$XCOPY_MIN_SIZE, %rdx
813	jb	do_copy_fault
814
815	/*
816	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
817	 * count is COUNT_ALIGN_SIZE aligned.
818	 */
819	movq	%rdi, %r10
820	orq	%rsi, %r10
821	andq	$NTA_ALIGN_MASK, %r10
822	orq	%rdx, %r10
823	andq	$COUNT_ALIGN_MASK, %r10
824	jnz	do_copy_fault
825	jmp	do_copy_fault_nta	/* use non-temporal access */
826
8274:
828	movl	$EFAULT, %eax
829	jmp	3f
830
831	/*
832	 * A fault during do_copy_fault or do_copy_fault_nta is
833	 * indicated through an errno value in %rax and we iret from the
834	 * trap handler to here.
835	 */
836_xcopyin_err:
837	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
8383:
839	movq	T_COPYOPS(%r9), %r8
840	cmpq	$0, %r8
841	jz	2f
842
843	/*
844	 * reload args for the copyop
845	 */
846	movq	(%rsp), %rdi
847	movq	0x8(%rsp), %rsi
848	movq	0x10(%rsp), %rdx
849	leave
850	jmp	*CP_XCOPYIN(%r8)
851
8522:	leave
853	ret
854	SET_SIZE(xcopyin_nta)
855
856#elif defined(__i386)
857
858#define	ARG_UADDR	4
859#define	ARG_KADDR	8
860#define	ARG_COUNT	12
861#define	ARG_CACHED	16
862
863	.globl	use_sse_copy
864
865	ENTRY(xcopyin_nta)
866	movl	kernelbase, %ecx
867	lea	_xcopyin_err, %eax
868	movl	%gs:CPU_THREAD, %edx
869	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
870	jae	4f
871
872	cmpl	$0, use_sse_copy	/* no sse support */
873	jz	do_copy_fault
874
875	cmpl	$0, ARG_CACHED(%esp)	/* copy_cached hint set? */
876	jnz	do_copy_fault
877
878	/*
879	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
880	 */
881	cmpl	$XCOPY_MIN_SIZE, ARG_COUNT(%esp)
882	jb	do_copy_fault
883
884	/*
885	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
886	 * count is COUNT_ALIGN_SIZE aligned.
887	 */
888	movl	ARG_UADDR(%esp), %ecx
889	orl	ARG_KADDR(%esp), %ecx
890	andl	$NTA_ALIGN_MASK, %ecx
891	orl	ARG_COUNT(%esp), %ecx
892	andl	$COUNT_ALIGN_MASK, %ecx
893	jnz	do_copy_fault
894
895	jmp	do_copy_fault_nta	/* use regular access */
896
8974:
898	movl	$EFAULT, %eax
899	jmp	3f
900
901	/*
902	 * A fault during do_copy_fault or do_copy_fault_nta is
903	 * indicated through an errno value in %eax and we iret from the
904	 * trap handler to here.
905	 */
906_xcopyin_err:
907	popl	%ecx
908	popl	%edi
909	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
910	popl	%esi
911	popl	%ebp
9123:
913	cmpl	$0, T_COPYOPS(%edx)
914	jz	2f
915	movl	T_COPYOPS(%edx), %eax
916	jmp	*CP_XCOPYIN(%eax)
917
9182:	rep; 	ret	/* use 2 byte return instruction when branch target */
919			/* AMD Software Optimization Guide - Section 6.2 */
920	SET_SIZE(xcopyin_nta)
921
922#undef	ARG_UADDR
923#undef	ARG_KADDR
924#undef	ARG_COUNT
925#undef	ARG_CACHED
926
927#endif	/* __i386 */
928#endif	/* __lint */
929
930/*
931 * Copy kernel data to user space.
932 */
933
934#if defined(__lint)
935
936/* ARGSUSED */
937int
938copyout(const void *kaddr, void *uaddr, size_t count)
939{ return (0); }
940
941#else	/* __lint */
942
943#if defined(__amd64)
944
945	ENTRY(copyout)
946	pushq	%rbp
947	movq	%rsp, %rbp
948	subq	$32, %rsp
949
950	/*
951	 * save args in case we trap and need to rerun as a copyop
952	 */
953	movq	%rdi, (%rsp)
954	movq	%rsi, 0x8(%rsp)
955	movq	%rdx, 0x10(%rsp)
956
957	movq	kernelbase(%rip), %rax
958#ifdef DEBUG
959	cmpq	%rax, %rdi		/* %rdi = kaddr */
960	jnb	1f
961	leaq	.copyout_panic_msg(%rip), %rdi
962	xorl	%eax, %eax
963	call	panic
9641:
965#endif
966	/*
967	 * pass lofault value as 4th argument to do_copy_fault
968	 */
969	leaq	_copyout_err(%rip), %rcx
970
971	movq	%gs:CPU_THREAD, %r9
972	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
973	jb	do_copy_fault
974	jmp	3f
975
976_copyout_err:
977	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
9783:
979	movq	T_COPYOPS(%r9), %rax
980	cmpq	$0, %rax
981	jz	2f
982
983	/*
984	 * reload args for the copyop
985	 */
986	movq	(%rsp), %rdi
987	movq	0x8(%rsp), %rsi
988	movq	0x10(%rsp), %rdx
989	leave
990	jmp	*CP_COPYOUT(%rax)
991
9922:	movl	$-1, %eax
993	leave
994	ret
995	SET_SIZE(copyout)
996
997#elif defined(__i386)
998
999#define	ARG_KADDR	4
1000#define	ARG_UADDR	8
1001
1002	ENTRY(copyout)
1003	movl	kernelbase, %ecx
1004#ifdef DEBUG
1005	cmpl	%ecx, ARG_KADDR(%esp)
1006	jnb	1f
1007	pushl	%ebp
1008	movl	%esp, %ebp
1009	pushl	$.copyout_panic_msg
1010	call	panic
10111:
1012#endif
1013	lea	_copyout_err, %eax
1014	movl	%gs:CPU_THREAD, %edx
1015	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1016	jb	do_copy_fault
1017	jmp	3f
1018
1019_copyout_err:
1020	popl	%ecx
1021	popl	%edi
1022	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
1023	popl	%esi
1024	popl	%ebp
10253:
1026	movl	T_COPYOPS(%edx), %eax
1027	cmpl	$0, %eax
1028	jz	2f
1029	jmp	*CP_COPYOUT(%eax)
1030
10312:	movl	$-1, %eax
1032	ret
1033	SET_SIZE(copyout)
1034
1035#undef	ARG_UADDR
1036#undef	ARG_KADDR
1037
1038#endif	/* __i386 */
1039#endif	/* __lint */
1040
1041#if defined(__lint)
1042
1043/* ARGSUSED */
1044int
1045xcopyout_nta(const void *kaddr, void *uaddr, size_t count, int copy_cached)
1046{ return (0); }
1047
1048#else	/* __lint */
1049
1050#if defined(__amd64)
1051
1052	ENTRY(xcopyout_nta)
1053	pushq	%rbp
1054	movq	%rsp, %rbp
1055	subq	$32, %rsp
1056
1057	/*
1058	 * save args in case we trap and need to rerun as a copyop
1059	 */
1060	movq	%rdi, (%rsp)
1061	movq	%rsi, 0x8(%rsp)
1062	movq	%rdx, 0x10(%rsp)
1063
1064	movq	kernelbase(%rip), %rax
1065#ifdef DEBUG
1066	cmpq	%rax, %rdi		/* %rdi = kaddr */
1067	jnb	1f
1068	leaq	.xcopyout_panic_msg(%rip), %rdi
1069	xorl	%eax, %eax
1070	call	panic
10711:
1072#endif
1073	movq	%gs:CPU_THREAD, %r9
1074	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
1075	jae	4f
1076
1077	cmpq	$0, %rcx		/* No non-temporal access? */
1078	/*
1079	 * pass lofault value as 4th argument to do_copy_fault
1080	 */
1081	leaq	_xcopyout_err(%rip), %rcx
1082	jnz	do_copy_fault
1083	/*
1084	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
1085	 */
1086	cmpq	$XCOPY_MIN_SIZE, %rdx
1087	jb	do_copy_fault
1088
1089	/*
1090	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
1091	 * count is COUNT_ALIGN_SIZE aligned.
1092	 */
1093	movq	%rdi, %r10
1094	orq	%rsi, %r10
1095	andq	$NTA_ALIGN_MASK, %r10
1096	orq	%rdx, %r10
1097	andq	$COUNT_ALIGN_MASK, %r10
1098	jnz	do_copy_fault
1099	jmp	do_copy_fault_nta
1100
11014:
1102	movl	$EFAULT, %eax
1103	jmp	3f
1104
1105	/*
1106	 * A fault during do_copy_fault or do_copy_fault_nta is
1107	 * indicated through an errno value in %rax and we iret from the
1108	 * trap handler to here.
1109	 */
1110_xcopyout_err:
1111	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
11123:
1113	movq	T_COPYOPS(%r9), %r8
1114	cmpq	$0, %r8
1115	jz	2f
1116
1117	/*
1118	 * reload args for the copyop
1119	 */
1120	movq	(%rsp), %rdi
1121	movq	0x8(%rsp), %rsi
1122	movq	0x10(%rsp), %rdx
1123	leave
1124	jmp	*CP_XCOPYOUT(%r8)
1125
11262:	leave
1127	ret
1128	SET_SIZE(xcopyout_nta)
1129
1130#elif defined(__i386)
1131
1132#define	ARG_KADDR	4
1133#define	ARG_UADDR	8
1134#define	ARG_COUNT	12
1135#define	ARG_CACHED	16
1136
1137	ENTRY(xcopyout_nta)
1138	movl	kernelbase, %ecx
1139	lea	_xcopyout_err, %eax
1140	movl	%gs:CPU_THREAD, %edx
1141	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1142	jae	4f
1143
1144	cmpl	$0, use_sse_copy	/* no sse support */
1145	jz	do_copy_fault
1146
1147	cmpl	$0, ARG_CACHED(%esp)	/* copy_cached hint set? */
1148	jnz	do_copy_fault
1149
1150	/*
1151	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
1152	 */
1153	cmpl	$XCOPY_MIN_SIZE, %edx
1154	jb	do_copy_fault
1155
1156	/*
1157	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
1158	 * count is COUNT_ALIGN_SIZE aligned.
1159	 */
1160	movl	ARG_UADDR(%esp), %ecx
1161	orl	ARG_KADDR(%esp), %ecx
1162	andl	$NTA_ALIGN_MASK, %ecx
1163	orl	ARG_COUNT(%esp), %ecx
1164	andl	$COUNT_ALIGN_MASK, %ecx
1165	jnz	do_copy_fault
1166	jmp	do_copy_fault_nta
1167
11684:
1169	movl	$EFAULT, %eax
1170	jmp	3f
1171
1172	/*
1173	 * A fault during do_copy_fault or do_copy_fault_nta is
1174	 * indicated through an errno value in %eax and we iret from the
1175	 * trap handler to here.
1176	 */
1177_xcopyout_err:
1178	/ restore the original lofault
1179	popl	%ecx
1180	popl	%edi
1181	movl	%ecx, T_LOFAULT(%edx)	/ original lofault
1182	popl	%esi
1183	popl	%ebp
11843:
1185	cmpl	$0, T_COPYOPS(%edx)
1186	jz	2f
1187	movl	T_COPYOPS(%edx), %eax
1188	jmp	*CP_XCOPYOUT(%eax)
1189
11902:	rep;	ret	/* use 2 byte return instruction when branch target */
1191			/* AMD Software Optimization Guide - Section 6.2 */
1192	SET_SIZE(xcopyout_nta)
1193
1194#undef	ARG_UADDR
1195#undef	ARG_KADDR
1196#undef	ARG_COUNT
1197#undef	ARG_CACHED
1198
1199#endif	/* __i386 */
1200#endif	/* __lint */
1201
1202/*
1203 * Copy a null terminated string from one point to another in
1204 * the kernel address space.
1205 */
1206
1207#if defined(__lint)
1208
1209/* ARGSUSED */
1210int
1211copystr(const char *from, char *to, size_t maxlength, size_t *lencopied)
1212{ return (0); }
1213
1214#else	/* __lint */
1215
1216#if defined(__amd64)
1217
1218	ENTRY(copystr)
1219	pushq	%rbp
1220	movq	%rsp, %rbp
1221#ifdef DEBUG
1222	movq	kernelbase(%rip), %rax
1223	cmpq	%rax, %rdi		/* %rdi = from */
1224	jb	0f
1225	cmpq	%rax, %rsi		/* %rsi = to */
1226	jnb	1f
12270:	leaq	.copystr_panic_msg(%rip), %rdi
1228	xorl	%eax, %eax
1229	call	panic
12301:
1231#endif
1232	movq	%gs:CPU_THREAD, %r9
1233	movq	T_LOFAULT(%r9), %r8	/* pass current lofault value as */
1234					/* 5th argument to do_copystr */
1235do_copystr:
1236	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
1237	movq    T_LOFAULT(%r9), %r11	/* save the current lofault */
1238	movq	%r8, T_LOFAULT(%r9)	/* new lofault */
1239
1240	movq	%rdx, %r8		/* save maxlength */
1241
1242	cmpq	$0, %rdx		/* %rdx = maxlength */
1243	je	copystr_enametoolong	/* maxlength == 0 */
1244
1245copystr_loop:
1246	decq	%r8
1247	movb	(%rdi), %al
1248	incq	%rdi
1249	movb	%al, (%rsi)
1250	incq	%rsi
1251	cmpb	$0, %al
1252	je	copystr_null		/* null char */
1253	cmpq	$0, %r8
1254	jne	copystr_loop
1255
1256copystr_enametoolong:
1257	movl	$ENAMETOOLONG, %eax
1258	jmp	copystr_out
1259
1260copystr_null:
1261	xorl	%eax, %eax		/* no error */
1262
1263copystr_out:
1264	cmpq	$0, %rcx		/* want length? */
1265	je	copystr_done		/* no */
1266	subq	%r8, %rdx		/* compute length and store it */
1267	movq	%rdx, (%rcx)
1268
1269copystr_done:
1270	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
1271	leave
1272	ret
1273	SET_SIZE(copystr)
1274
1275#elif defined(__i386)
1276
1277#define	ARG_FROM	8
1278#define	ARG_TO		12
1279#define	ARG_MAXLEN	16
1280#define	ARG_LENCOPIED	20
1281
1282	ENTRY(copystr)
1283#ifdef DEBUG
1284	pushl	%ebp
1285	movl	%esp, %ebp
1286	movl	kernelbase, %eax
1287	cmpl	%eax, ARG_FROM(%esp)
1288	jb	0f
1289	cmpl	%eax, ARG_TO(%esp)
1290	jnb	1f
12910:	pushl	$.copystr_panic_msg
1292	call	panic
12931:	popl	%ebp
1294#endif
1295	/* get the current lofault address */
1296	movl	%gs:CPU_THREAD, %eax
1297	movl	T_LOFAULT(%eax), %eax
1298do_copystr:
1299	pushl	%ebp			/* setup stack frame */
1300	movl	%esp, %ebp
1301	pushl	%ebx			/* save registers */
1302	pushl	%edi
1303
1304	movl	%gs:CPU_THREAD, %ebx
1305	movl	T_LOFAULT(%ebx), %edi
1306	pushl	%edi			/* save the current lofault */
1307	movl	%eax, T_LOFAULT(%ebx)	/* new lofault */
1308
1309	movl	ARG_MAXLEN(%ebp), %ecx
1310	cmpl	$0, %ecx
1311	je	copystr_enametoolong	/* maxlength == 0 */
1312
1313	movl	ARG_FROM(%ebp), %ebx	/* source address */
1314	movl	ARG_TO(%ebp), %edx	/* destination address */
1315
1316copystr_loop:
1317	decl	%ecx
1318	movb	(%ebx), %al
1319	incl	%ebx
1320	movb	%al, (%edx)
1321	incl	%edx
1322	cmpb	$0, %al
1323	je	copystr_null		/* null char */
1324	cmpl	$0, %ecx
1325	jne	copystr_loop
1326
1327copystr_enametoolong:
1328	movl	$ENAMETOOLONG, %eax
1329	jmp	copystr_out
1330
1331copystr_null:
1332	xorl	%eax, %eax		/* no error */
1333
1334copystr_out:
1335	cmpl	$0, ARG_LENCOPIED(%ebp)	/* want length? */
1336	je	copystr_done		/* no */
1337	movl	ARG_MAXLEN(%ebp), %edx
1338	subl	%ecx, %edx		/* compute length and store it */
1339	movl	ARG_LENCOPIED(%ebp), %ecx
1340	movl	%edx, (%ecx)
1341
1342copystr_done:
1343	popl	%edi
1344	movl	%gs:CPU_THREAD, %ebx
1345	movl	%edi, T_LOFAULT(%ebx)	/* restore the original lofault */
1346
1347	popl	%edi
1348	popl	%ebx
1349	popl	%ebp
1350	ret
1351	SET_SIZE(copystr)
1352
1353#undef	ARG_FROM
1354#undef	ARG_TO
1355#undef	ARG_MAXLEN
1356#undef	ARG_LENCOPIED
1357
1358#endif	/* __i386 */
1359#endif	/* __lint */
1360
1361/*
1362 * Copy a null terminated string from the user address space into
1363 * the kernel address space.
1364 */
1365
1366#if defined(__lint)
1367
1368/* ARGSUSED */
1369int
1370copyinstr(const char *uaddr, char *kaddr, size_t maxlength,
1371    size_t *lencopied)
1372{ return (0); }
1373
1374#else	/* __lint */
1375
1376#if defined(__amd64)
1377
1378	ENTRY(copyinstr)
1379	pushq	%rbp
1380	movq	%rsp, %rbp
1381	subq	$32, %rsp
1382
1383	/*
1384	 * save args in case we trap and need to rerun as a copyop
1385	 */
1386	movq	%rdi, (%rsp)
1387	movq	%rsi, 0x8(%rsp)
1388	movq	%rdx, 0x10(%rsp)
1389	movq	%rcx, 0x18(%rsp)
1390
1391	movq	kernelbase(%rip), %rax
1392#ifdef DEBUG
1393	cmpq	%rax, %rsi		/* %rsi = kaddr */
1394	jnb	1f
1395	leaq	.copyinstr_panic_msg(%rip), %rdi
1396	xorl	%eax, %eax
1397	call	panic
13981:
1399#endif
1400	/*
1401	 * pass lofault value as 5th argument to do_copystr
1402	 */
1403	leaq	_copyinstr_error(%rip), %r8
1404
1405	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
1406	jb	do_copystr
1407	movq	%gs:CPU_THREAD, %r9
1408	jmp	3f
1409
1410_copyinstr_error:
1411	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
14123:
1413	movq	T_COPYOPS(%r9), %rax
1414	cmpq	$0, %rax
1415	jz	2f
1416
1417	/*
1418	 * reload args for the copyop
1419	 */
1420	movq	(%rsp), %rdi
1421	movq	0x8(%rsp), %rsi
1422	movq	0x10(%rsp), %rdx
1423	movq	0x18(%rsp), %rcx
1424	leave
1425	jmp	*CP_COPYINSTR(%rax)
1426
14272:	movl	$EFAULT, %eax		/* return EFAULT */
1428	leave
1429	ret
1430	SET_SIZE(copyinstr)
1431
1432#elif defined(__i386)
1433
1434#define	ARG_UADDR	4
1435#define	ARG_KADDR	8
1436
1437	ENTRY(copyinstr)
1438	movl	kernelbase, %ecx
1439#ifdef DEBUG
1440	cmpl	%ecx, ARG_KADDR(%esp)
1441	jnb	1f
1442	pushl	%ebp
1443	movl	%esp, %ebp
1444	pushl	$.copyinstr_panic_msg
1445	call	panic
14461:
1447#endif
1448	lea	_copyinstr_error, %eax
1449	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1450	jb	do_copystr
1451	movl	%gs:CPU_THREAD, %edx
1452	jmp	3f
1453
1454_copyinstr_error:
1455	popl	%edi
1456	movl	%gs:CPU_THREAD, %edx
1457	movl	%edi, T_LOFAULT(%edx)	/* original lofault */
1458
1459	popl	%edi
1460	popl	%ebx
1461	popl	%ebp
14623:
1463	movl	T_COPYOPS(%edx), %eax
1464	cmpl	$0, %eax
1465	jz	2f
1466	jmp	*CP_COPYINSTR(%eax)
1467
14682:	movl	$EFAULT, %eax		/* return EFAULT */
1469	ret
1470	SET_SIZE(copyinstr)
1471
1472#undef	ARG_UADDR
1473#undef	ARG_KADDR
1474
1475#endif	/* __i386 */
1476#endif	/* __lint */
1477
1478/*
1479 * Copy a null terminated string from the kernel
1480 * address space to the user address space.
1481 */
1482
1483#if defined(__lint)
1484
1485/* ARGSUSED */
1486int
1487copyoutstr(const char *kaddr, char *uaddr, size_t maxlength,
1488    size_t *lencopied)
1489{ return (0); }
1490
1491#else	/* __lint */
1492
1493#if defined(__amd64)
1494
1495	ENTRY(copyoutstr)
1496	pushq	%rbp
1497	movq	%rsp, %rbp
1498	subq	$32, %rsp
1499
1500	/*
1501	 * save args in case we trap and need to rerun as a copyop
1502	 */
1503	movq	%rdi, (%rsp)
1504	movq	%rsi, 0x8(%rsp)
1505	movq	%rdx, 0x10(%rsp)
1506	movq	%rcx, 0x18(%rsp)
1507
1508	movq	kernelbase(%rip), %rax
1509#ifdef DEBUG
1510	cmpq	%rax, %rdi		/* %rdi = kaddr */
1511	jnb	1f
1512	leaq	.copyoutstr_panic_msg(%rip), %rdi
1513	jmp	call_panic		/* setup stack and call panic */
15141:
1515#endif
1516	/*
1517	 * pass lofault value as 5th argument to do_copystr
1518	 */
1519	leaq	_copyoutstr_error(%rip), %r8
1520
1521	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
1522	jb	do_copystr
1523	movq	%gs:CPU_THREAD, %r9
1524	jmp	3f
1525
1526_copyoutstr_error:
1527	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
15283:
1529	movq	T_COPYOPS(%r9), %rax
1530	cmpq	$0, %rax
1531	jz	2f
1532
1533	/*
1534	 * reload args for the copyop
1535	 */
1536	movq	(%rsp), %rdi
1537	movq	0x8(%rsp), %rsi
1538	movq	0x10(%rsp), %rdx
1539	movq	0x18(%rsp), %rcx
1540	leave
1541	jmp	*CP_COPYOUTSTR(%rax)
1542
15432:	movl	$EFAULT, %eax		/* return EFAULT */
1544	leave
1545	ret
1546	SET_SIZE(copyoutstr)
1547
1548#elif defined(__i386)
1549
1550#define	ARG_KADDR	4
1551#define	ARG_UADDR	8
1552
1553	ENTRY(copyoutstr)
1554	movl	kernelbase, %ecx
1555#ifdef DEBUG
1556	cmpl	%ecx, ARG_KADDR(%esp)
1557	jnb	1f
1558	pushl	%ebp
1559	movl	%esp, %ebp
1560	pushl	$.copyoutstr_panic_msg
1561	call	panic
15621:
1563#endif
1564	lea	_copyoutstr_error, %eax
1565	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1566	jb	do_copystr
1567	movl	%gs:CPU_THREAD, %edx
1568	jmp	3f
1569
1570_copyoutstr_error:
1571	popl	%edi
1572	movl	%gs:CPU_THREAD, %edx
1573	movl	%edi, T_LOFAULT(%edx)	/* restore the original lofault */
1574
1575	popl	%edi
1576	popl	%ebx
1577	popl	%ebp
15783:
1579	movl	T_COPYOPS(%edx), %eax
1580	cmpl	$0, %eax
1581	jz	2f
1582	jmp	*CP_COPYOUTSTR(%eax)
1583
15842:	movl	$EFAULT, %eax		/* return EFAULT */
1585	ret
1586	SET_SIZE(copyoutstr)
1587
1588#undef	ARG_KADDR
1589#undef	ARG_UADDR
1590
1591#endif	/* __i386 */
1592#endif	/* __lint */
1593
1594/*
1595 * Since all of the fuword() variants are so similar, we have a macro to spit
1596 * them out.  This allows us to create DTrace-unobservable functions easily.
1597 */
1598
1599#if defined(__lint)
1600
1601#if defined(__amd64)
1602
1603/* ARGSUSED */
1604int
1605fuword64(const void *addr, uint64_t *dst)
1606{ return (0); }
1607
1608#endif
1609
1610/* ARGSUSED */
1611int
1612fuword32(const void *addr, uint32_t *dst)
1613{ return (0); }
1614
1615/* ARGSUSED */
1616int
1617fuword16(const void *addr, uint16_t *dst)
1618{ return (0); }
1619
1620/* ARGSUSED */
1621int
1622fuword8(const void *addr, uint8_t *dst)
1623{ return (0); }
1624
1625#else	/* __lint */
1626
1627#if defined(__amd64)
1628
1629/*
1630 * (Note that we don't save and reload the arguments here
1631 * because their values are not altered in the copy path)
1632 */
1633
1634#define	FUWORD(NAME, INSTR, REG, COPYOP)	\
1635	ENTRY(NAME)				\
1636	movq	%gs:CPU_THREAD, %r9;		\
1637	cmpq	kernelbase(%rip), %rdi;		\
1638	jae	1f;				\
1639	leaq	_flt_/**/NAME, %rdx;		\
1640	movq	%rdx, T_LOFAULT(%r9);		\
1641	INSTR	(%rdi), REG;			\
1642	movq	$0, T_LOFAULT(%r9);		\
1643	INSTR	REG, (%rsi);			\
1644	xorl	%eax, %eax;			\
1645	ret;					\
1646_flt_/**/NAME:					\
1647	movq	$0, T_LOFAULT(%r9);		\
16481:						\
1649	movq	T_COPYOPS(%r9), %rax;		\
1650	cmpq	$0, %rax;			\
1651	jz	2f;				\
1652	jmp	*COPYOP(%rax);			\
16532:						\
1654	movl	$-1, %eax;			\
1655	ret;					\
1656	SET_SIZE(NAME)
1657
1658	FUWORD(fuword64, movq, %rax, CP_FUWORD64)
1659	FUWORD(fuword32, movl, %eax, CP_FUWORD32)
1660	FUWORD(fuword16, movw, %ax, CP_FUWORD16)
1661	FUWORD(fuword8, movb, %al, CP_FUWORD8)
1662
1663#elif defined(__i386)
1664
1665#define	FUWORD(NAME, INSTR, REG, COPYOP)	\
1666	ENTRY(NAME)				\
1667	movl	%gs:CPU_THREAD, %ecx;		\
1668	movl	kernelbase, %eax;		\
1669	cmpl	%eax, 4(%esp);			\
1670	jae	1f;				\
1671	lea	_flt_/**/NAME, %edx;		\
1672	movl	%edx, T_LOFAULT(%ecx);		\
1673	movl	4(%esp), %eax;			\
1674	movl	8(%esp), %edx;			\
1675	INSTR	(%eax), REG;			\
1676	movl	$0, T_LOFAULT(%ecx);		\
1677	INSTR	REG, (%edx);			\
1678	xorl	%eax, %eax;			\
1679	ret;					\
1680_flt_/**/NAME:					\
1681	movl	$0, T_LOFAULT(%ecx);		\
16821:						\
1683	movl	T_COPYOPS(%ecx), %eax;		\
1684	cmpl	$0, %eax;			\
1685	jz	2f;				\
1686	jmp	*COPYOP(%eax);			\
16872:						\
1688	movl	$-1, %eax;			\
1689	ret;					\
1690	SET_SIZE(NAME)
1691
1692	FUWORD(fuword32, movl, %eax, CP_FUWORD32)
1693	FUWORD(fuword16, movw, %ax, CP_FUWORD16)
1694	FUWORD(fuword8, movb, %al, CP_FUWORD8)
1695
1696#endif	/* __i386 */
1697
1698#undef	FUWORD
1699
1700#endif	/* __lint */
1701
1702/*
1703 * Set user word.
1704 */
1705
1706#if defined(__lint)
1707
1708#if defined(__amd64)
1709
1710/* ARGSUSED */
1711int
1712suword64(void *addr, uint64_t value)
1713{ return (0); }
1714
1715#endif
1716
1717/* ARGSUSED */
1718int
1719suword32(void *addr, uint32_t value)
1720{ return (0); }
1721
1722/* ARGSUSED */
1723int
1724suword16(void *addr, uint16_t value)
1725{ return (0); }
1726
1727/* ARGSUSED */
1728int
1729suword8(void *addr, uint8_t value)
1730{ return (0); }
1731
1732#else	/* lint */
1733
1734#if defined(__amd64)
1735
1736/*
1737 * (Note that we don't save and reload the arguments here
1738 * because their values are not altered in the copy path)
1739 */
1740
1741#define	SUWORD(NAME, INSTR, REG, COPYOP)	\
1742	ENTRY(NAME)				\
1743	movq	%gs:CPU_THREAD, %r9;		\
1744	cmpq	kernelbase(%rip), %rdi;		\
1745	jae	1f;				\
1746	leaq	_flt_/**/NAME, %rdx;		\
1747	movq	%rdx, T_LOFAULT(%r9);		\
1748	INSTR	REG, (%rdi);			\
1749	movq	$0, T_LOFAULT(%r9);		\
1750	xorl	%eax, %eax;			\
1751	ret;					\
1752_flt_/**/NAME:					\
1753	movq	$0, T_LOFAULT(%r9);		\
17541:						\
1755	movq	T_COPYOPS(%r9), %rax;		\
1756	cmpq	$0, %rax;			\
1757	jz	3f;				\
1758	jmp	*COPYOP(%rax);			\
17593:						\
1760	movl	$-1, %eax;			\
1761	ret;					\
1762	SET_SIZE(NAME)
1763
1764	SUWORD(suword64, movq, %rsi, CP_SUWORD64)
1765	SUWORD(suword32, movl, %esi, CP_SUWORD32)
1766	SUWORD(suword16, movw, %si, CP_SUWORD16)
1767	SUWORD(suword8, movb, %sil, CP_SUWORD8)
1768
1769#elif defined(__i386)
1770
1771#define	SUWORD(NAME, INSTR, REG, COPYOP)	\
1772	ENTRY(NAME)				\
1773	movl	%gs:CPU_THREAD, %ecx;		\
1774	movl	kernelbase, %eax;		\
1775	cmpl	%eax, 4(%esp);			\
1776	jae	1f;				\
1777	lea	_flt_/**/NAME, %edx;		\
1778	movl	%edx, T_LOFAULT(%ecx);		\
1779	movl	4(%esp), %eax;			\
1780	movl	8(%esp), %edx;			\
1781	INSTR	REG, (%eax);			\
1782	movl	$0, T_LOFAULT(%ecx);		\
1783	xorl	%eax, %eax;			\
1784	ret;					\
1785_flt_/**/NAME:					\
1786	movl	$0, T_LOFAULT(%ecx);		\
17871:						\
1788	movl	T_COPYOPS(%ecx), %eax;		\
1789	cmpl	$0, %eax;			\
1790	jz	3f;				\
1791	movl	COPYOP(%eax), %ecx;		\
1792	jmp	*%ecx;				\
17933:						\
1794	movl	$-1, %eax;			\
1795	ret;					\
1796	SET_SIZE(NAME)
1797
1798	SUWORD(suword32, movl, %edx, CP_SUWORD32)
1799	SUWORD(suword16, movw, %dx, CP_SUWORD16)
1800	SUWORD(suword8, movb, %dl, CP_SUWORD8)
1801
1802#endif	/* __i386 */
1803
1804#undef	SUWORD
1805
1806#endif	/* __lint */
1807
1808#if defined(__lint)
1809
1810#if defined(__amd64)
1811
1812/*ARGSUSED*/
1813void
1814fuword64_noerr(const void *addr, uint64_t *dst)
1815{}
1816
1817#endif
1818
1819/*ARGSUSED*/
1820void
1821fuword32_noerr(const void *addr, uint32_t *dst)
1822{}
1823
1824/*ARGSUSED*/
1825void
1826fuword8_noerr(const void *addr, uint8_t *dst)
1827{}
1828
1829/*ARGSUSED*/
1830void
1831fuword16_noerr(const void *addr, uint16_t *dst)
1832{}
1833
1834#else   /* __lint */
1835
1836#if defined(__amd64)
1837
1838#define	FUWORD_NOERR(NAME, INSTR, REG)		\
1839	ENTRY(NAME)				\
1840	cmpq	kernelbase(%rip), %rdi;		\
1841	cmovnbq	kernelbase(%rip), %rdi;		\
1842	INSTR	(%rdi), REG;			\
1843	INSTR	REG, (%rsi);			\
1844	ret;					\
1845	SET_SIZE(NAME)
1846
1847	FUWORD_NOERR(fuword64_noerr, movq, %rax)
1848	FUWORD_NOERR(fuword32_noerr, movl, %eax)
1849	FUWORD_NOERR(fuword16_noerr, movw, %ax)
1850	FUWORD_NOERR(fuword8_noerr, movb, %al)
1851
1852#elif defined(__i386)
1853
1854#define	FUWORD_NOERR(NAME, INSTR, REG)		\
1855	ENTRY(NAME)				\
1856	movl	4(%esp), %eax;			\
1857	cmpl	kernelbase, %eax;		\
1858	jb	1f;				\
1859	movl	kernelbase, %eax;		\
18601:	movl	8(%esp), %edx;			\
1861	INSTR	(%eax), REG;			\
1862	INSTR	REG, (%edx);			\
1863	ret;					\
1864	SET_SIZE(NAME)
1865
1866	FUWORD_NOERR(fuword32_noerr, movl, %ecx)
1867	FUWORD_NOERR(fuword16_noerr, movw, %cx)
1868	FUWORD_NOERR(fuword8_noerr, movb, %cl)
1869
1870#endif	/* __i386 */
1871
1872#undef	FUWORD_NOERR
1873
1874#endif	/* __lint */
1875
1876#if defined(__lint)
1877
1878#if defined(__amd64)
1879
1880/*ARGSUSED*/
1881void
1882suword64_noerr(void *addr, uint64_t value)
1883{}
1884
1885#endif
1886
1887/*ARGSUSED*/
1888void
1889suword32_noerr(void *addr, uint32_t value)
1890{}
1891
1892/*ARGSUSED*/
1893void
1894suword16_noerr(void *addr, uint16_t value)
1895{}
1896
1897/*ARGSUSED*/
1898void
1899suword8_noerr(void *addr, uint8_t value)
1900{}
1901
1902#else	/* lint */
1903
1904#if defined(__amd64)
1905
1906#define	SUWORD_NOERR(NAME, INSTR, REG)		\
1907	ENTRY(NAME)				\
1908	cmpq	kernelbase(%rip), %rdi;		\
1909	cmovnbq	kernelbase(%rip), %rdi;		\
1910	INSTR	REG, (%rdi);			\
1911	ret;					\
1912	SET_SIZE(NAME)
1913
1914	SUWORD_NOERR(suword64_noerr, movq, %rsi)
1915	SUWORD_NOERR(suword32_noerr, movl, %esi)
1916	SUWORD_NOERR(suword16_noerr, movw, %si)
1917	SUWORD_NOERR(suword8_noerr, movb, %sil)
1918
1919#elif defined(__i386)
1920
1921#define	SUWORD_NOERR(NAME, INSTR, REG)		\
1922	ENTRY(NAME)				\
1923	movl	4(%esp), %eax;			\
1924	cmpl	kernelbase, %eax;		\
1925	jb	1f;				\
1926	movl	kernelbase, %eax;		\
19271:						\
1928	movl	8(%esp), %edx;			\
1929	INSTR	REG, (%eax);			\
1930	ret;					\
1931	SET_SIZE(NAME)
1932
1933	SUWORD_NOERR(suword32_noerr, movl, %edx)
1934	SUWORD_NOERR(suword16_noerr, movw, %dx)
1935	SUWORD_NOERR(suword8_noerr, movb, %dl)
1936
1937#endif	/* __i386 */
1938
1939#undef	SUWORD_NOERR
1940
1941#endif	/* lint */
1942
1943
1944#if defined(__lint)
1945
1946/*ARGSUSED*/
1947int
1948subyte(void *addr, uchar_t value)
1949{ return (0); }
1950
1951/*ARGSUSED*/
1952void
1953subyte_noerr(void *addr, uchar_t value)
1954{}
1955
1956/*ARGSUSED*/
1957int
1958fulword(const void *addr, ulong_t *valuep)
1959{ return (0); }
1960
1961/*ARGSUSED*/
1962void
1963fulword_noerr(const void *addr, ulong_t *valuep)
1964{}
1965
1966/*ARGSUSED*/
1967int
1968sulword(void *addr, ulong_t valuep)
1969{ return (0); }
1970
1971/*ARGSUSED*/
1972void
1973sulword_noerr(void *addr, ulong_t valuep)
1974{}
1975
1976#else
1977
1978	.weak	subyte
1979	subyte=suword8
1980	.weak	subyte_noerr
1981	subyte_noerr=suword8_noerr
1982
1983#if defined(__amd64)
1984
1985	.weak	fulword
1986	fulword=fuword64
1987	.weak	fulword_noerr
1988	fulword_noerr=fuword64_noerr
1989	.weak	sulword
1990	sulword=suword64
1991	.weak	sulword_noerr
1992	sulword_noerr=suword64_noerr
1993
1994#elif defined(__i386)
1995
1996	.weak	fulword
1997	fulword=fuword32
1998	.weak	fulword_noerr
1999	fulword_noerr=fuword32_noerr
2000	.weak	sulword
2001	sulword=suword32
2002	.weak	sulword_noerr
2003	sulword_noerr=suword32_noerr
2004
2005#endif /* __i386 */
2006
2007#endif /* __lint */
2008
2009#if defined(__lint)
2010
2011/*
2012 * Copy a block of storage - must not overlap (from + len <= to).
2013 * No fault handler installed (to be called under on_fault())
2014 */
2015
2016/* ARGSUSED */
2017void
2018copyout_noerr(const void *kfrom, void *uto, size_t count)
2019{}
2020
2021/* ARGSUSED */
2022void
2023copyin_noerr(const void *ufrom, void *kto, size_t count)
2024{}
2025
2026/*
2027 * Zero a block of storage in user space
2028 */
2029
2030/* ARGSUSED */
2031void
2032uzero(void *addr, size_t count)
2033{}
2034
2035/*
2036 * copy a block of storage in user space
2037 */
2038
2039/* ARGSUSED */
2040void
2041ucopy(const void *ufrom, void *uto, size_t ulength)
2042{}
2043
2044/*
2045 * copy a string in user space
2046 */
2047
2048/* ARGSUSED */
2049void
2050ucopystr(const char *ufrom, char *uto, size_t umaxlength, size_t *lencopied)
2051{}
2052
2053#else /* __lint */
2054
2055#if defined(__amd64)
2056
2057	ENTRY(copyin_noerr)
2058	movq	kernelbase(%rip), %rax
2059#ifdef DEBUG
2060	cmpq	%rax, %rsi		/* %rsi = kto */
2061	jae	1f
2062	leaq	.cpyin_ne_pmsg(%rip), %rdi
2063	jmp	call_panic		/* setup stack and call panic */
20641:
2065#endif
2066	cmpq	%rax, %rdi		/* ufrom < kernelbase */
2067	jb	do_copy
2068	movq	%rax, %rdi		/* force fault at kernelbase */
2069	jmp	do_copy
2070	SET_SIZE(copyin_noerr)
2071
2072	ENTRY(copyout_noerr)
2073	movq	kernelbase(%rip), %rax
2074#ifdef DEBUG
2075	cmpq	%rax, %rdi		/* %rdi = kfrom */
2076	jae	1f
2077	leaq	.cpyout_ne_pmsg(%rip), %rdi
2078	jmp	call_panic		/* setup stack and call panic */
20791:
2080#endif
2081	cmpq	%rax, %rsi		/* uto < kernelbase */
2082	jb	do_copy
2083	movq	%rax, %rsi		/* force fault at kernelbase */
2084	jmp	do_copy
2085	SET_SIZE(copyout_noerr)
2086
2087	ENTRY(uzero)
2088	movq	kernelbase(%rip), %rax
2089	cmpq	%rax, %rdi
2090	jb	do_zero
2091	movq	%rax, %rdi	/* force fault at kernelbase */
2092	jmp	do_zero
2093	SET_SIZE(uzero)
2094
2095	ENTRY(ucopy)
2096	movq	kernelbase(%rip), %rax
2097	cmpq	%rax, %rdi
2098	cmovaeq	%rax, %rdi	/* force fault at kernelbase */
2099	cmpq	%rax, %rsi
2100	cmovaeq	%rax, %rsi	/* force fault at kernelbase */
2101	jmp	do_copy
2102	SET_SIZE(ucopy)
2103
2104	ENTRY(ucopystr)
2105	movq	kernelbase(%rip), %rax
2106	cmpq	%rax, %rdi
2107	cmovaeq	%rax, %rdi	/* force fault at kernelbase */
2108	cmpq	%rax, %rsi
2109	cmovaeq	%rax, %rsi	/* force fault at kernelbase */
2110	/* do_copystr expects lofault address in %r8 */
2111	movq	%gs:CPU_THREAD, %r8
2112	movq	T_LOFAULT(%r8), %r8
2113	jmp	do_copystr
2114	SET_SIZE(ucopystr)
2115
2116#elif defined(__i386)
2117
2118	ENTRY(copyin_noerr)
2119	movl	kernelbase, %eax
2120#ifdef DEBUG
2121	cmpl	%eax, 8(%esp)
2122	jae	1f
2123	pushl	$.cpyin_ne_pmsg
2124	call	panic
21251:
2126#endif
2127	cmpl	%eax, 4(%esp)
2128	jb	do_copy
2129	movl	%eax, 4(%esp)	/* force fault at kernelbase */
2130	jmp	do_copy
2131	SET_SIZE(copyin_noerr)
2132
2133	ENTRY(copyout_noerr)
2134	movl	kernelbase, %eax
2135#ifdef DEBUG
2136	cmpl	%eax, 4(%esp)
2137	jae	1f
2138	pushl	$.cpyout_ne_pmsg
2139	call	panic
21401:
2141#endif
2142	cmpl	%eax, 8(%esp)
2143	jb	do_copy
2144	movl	%eax, 8(%esp)	/* force fault at kernelbase */
2145	jmp	do_copy
2146	SET_SIZE(copyout_noerr)
2147
2148	ENTRY(uzero)
2149	movl	kernelbase, %eax
2150	cmpl	%eax, 4(%esp)
2151	jb	do_zero
2152	movl	%eax, 4(%esp)	/* force fault at kernelbase */
2153	jmp	do_zero
2154	SET_SIZE(uzero)
2155
2156	ENTRY(ucopy)
2157	movl	kernelbase, %eax
2158	cmpl	%eax, 4(%esp)
2159	jb	1f
2160	movl	%eax, 4(%esp)	/* force fault at kernelbase */
21611:
2162	cmpl	%eax, 8(%esp)
2163	jb	do_copy
2164	movl	%eax, 8(%esp)	/* force fault at kernelbase */
2165	jmp	do_copy
2166	SET_SIZE(ucopy)
2167
2168	ENTRY(ucopystr)
2169	movl	kernelbase, %eax
2170	cmpl	%eax, 4(%esp)
2171	jb	1f
2172	movl	%eax, 4(%esp)	/* force fault at kernelbase */
21731:
2174	cmpl	%eax, 8(%esp)
2175	jb	2f
2176	movl	%eax, 8(%esp)	/* force fault at kernelbase */
21772:
2178	/* do_copystr expects the lofault address in %eax */
2179	movl	%gs:CPU_THREAD, %eax
2180	movl	T_LOFAULT(%eax), %eax
2181	jmp	do_copystr
2182	SET_SIZE(ucopystr)
2183
2184#endif	/* __i386 */
2185
2186#ifdef DEBUG
2187	.data
2188.kcopy_panic_msg:
2189	.string "kcopy: arguments below kernelbase"
2190.bcopy_panic_msg:
2191	.string "bcopy: arguments below kernelbase"
2192.kzero_panic_msg:
2193        .string "kzero: arguments below kernelbase"
2194.bzero_panic_msg:
2195	.string	"bzero: arguments below kernelbase"
2196.copyin_panic_msg:
2197	.string "copyin: kaddr argument below kernelbase"
2198.xcopyin_panic_msg:
2199	.string	"xcopyin: kaddr argument below kernelbase"
2200.copyout_panic_msg:
2201	.string "copyout: kaddr argument below kernelbase"
2202.xcopyout_panic_msg:
2203	.string	"xcopyout: kaddr argument below kernelbase"
2204.copystr_panic_msg:
2205	.string	"copystr: arguments in user space"
2206.copyinstr_panic_msg:
2207	.string	"copyinstr: kaddr argument not in kernel address space"
2208.copyoutstr_panic_msg:
2209	.string	"copyoutstr: kaddr argument not in kernel address space"
2210.cpyin_ne_pmsg:
2211	.string "copyin_noerr: argument not in kernel address space"
2212.cpyout_ne_pmsg:
2213	.string "copyout_noerr: argument not in kernel address space"
2214#endif
2215
2216#endif	/* __lint */
2217