xref: /titanic_44/usr/src/uts/intel/ia32/ml/copy.s (revision 28167c24ba5be8b7c1d05e02d053f4a55cd21cc9)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27/*       Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.	*/
28/*       Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T		*/
29/*         All Rights Reserved						*/
30
31/*       Copyright (c) 1987, 1988 Microsoft Corporation			*/
32/*         All Rights Reserved						*/
33
34#pragma ident	"%Z%%M%	%I%	%E% SMI"
35
36#include <sys/errno.h>
37#include <sys/asm_linkage.h>
38
39#if defined(__lint)
40#include <sys/types.h>
41#include <sys/systm.h>
42#else	/* __lint */
43#include "assym.h"
44#endif	/* __lint */
45
46#define	KCOPY_MIN_SIZE	128	/* Must be >= 16 bytes */
47#define	XCOPY_MIN_SIZE	128	/* Must be >= 16 bytes */
48/*
49 * Non-temopral access (NTA) alignment requirement
50 */
51#define	NTA_ALIGN_SIZE	4	/* Must be at least 4-byte aligned */
52#define	NTA_ALIGN_MASK	_CONST(NTA_ALIGN_SIZE-1)
53#define	COUNT_ALIGN_SIZE	16	/* Must be at least 16-byte aligned */
54#define	COUNT_ALIGN_MASK	_CONST(COUNT_ALIGN_SIZE-1)
55
56/*
57 * Copy a block of storage, returning an error code if `from' or
58 * `to' takes a kernel pagefault which cannot be resolved.
59 * Returns errno value on pagefault error, 0 if all ok
60 */
61
62#if defined(__lint)
63
64/* ARGSUSED */
65int
66kcopy(const void *from, void *to, size_t count)
67{ return (0); }
68
69#else	/* __lint */
70
71	.globl	kernelbase
72
73#if defined(__amd64)
74
75	ENTRY(kcopy)
76	pushq	%rbp
77	movq	%rsp, %rbp
78#ifdef DEBUG
79	movq	kernelbase(%rip), %rax
80	cmpq	%rax, %rdi 		/* %rdi = from */
81	jb	0f
82	cmpq	%rax, %rsi		/* %rsi = to */
83	jnb	1f
840:	leaq	.kcopy_panic_msg(%rip), %rdi
85	xorl	%eax, %eax
86	call	panic
871:
88#endif
89	/*
90	 * pass lofault value as 4th argument to do_copy_fault
91	 */
92	leaq	_kcopy_copyerr(%rip), %rcx
93	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
94
95do_copy_fault:
96	movq	T_LOFAULT(%r9), %r11	/* save the current lofault */
97	movq	%rcx, T_LOFAULT(%r9)	/* new lofault */
98
99	xchgq	%rdi, %rsi		/* %rsi = source, %rdi = destination */
100	movq	%rdx, %rcx		/* %rcx = count */
101	shrq	$3, %rcx		/* 8-byte word count */
102	rep
103	  smovq
104
105	movq	%rdx, %rcx
106	andq	$7, %rcx		/* bytes left over */
107	rep
108	  smovb
109	xorl	%eax, %eax		/* return 0 (success) */
110
111	/*
112	 * A fault during do_copy_fault is indicated through an errno value
113	 * in %rax and we iretq from the trap handler to here.
114	 */
115_kcopy_copyerr:
116	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
117	leave
118	ret
119	SET_SIZE(kcopy)
120
121#elif defined(__i386)
122
123#define	ARG_FROM	8
124#define	ARG_TO		12
125#define	ARG_COUNT	16
126
127	ENTRY(kcopy)
128#ifdef DEBUG
129	pushl	%ebp
130	movl	%esp, %ebp
131	movl	kernelbase, %eax
132	cmpl	%eax, ARG_FROM(%ebp)
133	jb	0f
134	cmpl	%eax, ARG_TO(%ebp)
135	jnb	1f
1360:	pushl	$.kcopy_panic_msg
137	call	panic
1381:	popl	%ebp
139#endif
140	lea	_kcopy_copyerr, %eax	/* lofault value */
141	movl	%gs:CPU_THREAD, %edx
142
143do_copy_fault:
144	pushl	%ebp
145	movl	%esp, %ebp		/* setup stack frame */
146	pushl	%esi
147	pushl	%edi			/* save registers */
148
149	movl	T_LOFAULT(%edx), %edi
150	pushl	%edi			/* save the current lofault */
151	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
152
153	movl	ARG_COUNT(%ebp), %ecx
154	movl	ARG_FROM(%ebp), %esi
155	movl	ARG_TO(%ebp), %edi
156	shrl	$2, %ecx		/* word count */
157	rep
158	  smovl
159	movl	ARG_COUNT(%ebp), %ecx
160	andl	$3, %ecx		/* bytes left over */
161	rep
162	  smovb
163	xorl	%eax, %eax
164
165	/*
166	 * A fault during do_copy_fault is indicated through an errno value
167	 * in %eax and we iret from the trap handler to here.
168	 */
169_kcopy_copyerr:
170	popl	%ecx
171	popl	%edi
172	movl	%ecx, T_LOFAULT(%edx)	/* restore the original lofault */
173	popl	%esi
174	popl	%ebp
175	ret
176	SET_SIZE(kcopy)
177
178#undef	ARG_FROM
179#undef	ARG_TO
180#undef	ARG_COUNT
181
182#endif	/* __i386 */
183#endif	/* __lint */
184
185#if defined(__lint)
186
187/*
188 * Copy a block of storage.  Similar to kcopy but uses non-temporal
189 * instructions.
190 */
191
192/* ARGSUSED */
193int
194kcopy_nta(const void *from, void *to, size_t count, int copy_cached)
195{ return (0); }
196
197#else	/* __lint */
198
199#if defined(__amd64)
200
201#define	COPY_LOOP_INIT(src, dst, cnt)	\
202	addq	cnt, src;			\
203	addq	cnt, dst;			\
204	shrq	$3, cnt;			\
205	neg	cnt
206
207	/* Copy 16 bytes per loop.  Uses %rax and %r8 */
208#define	COPY_LOOP_BODY(src, dst, cnt)	\
209	prefetchnta	0x100(src, cnt, 8);	\
210	movq	(src, cnt, 8), %rax;		\
211	movq	0x8(src, cnt, 8), %r8;		\
212	movnti	%rax, (dst, cnt, 8);		\
213	movnti	%r8, 0x8(dst, cnt, 8);		\
214	addq	$2, cnt
215
216	ENTRY(kcopy_nta)
217	pushq	%rbp
218	movq	%rsp, %rbp
219#ifdef DEBUG
220	movq	kernelbase(%rip), %rax
221	cmpq	%rax, %rdi 		/* %rdi = from */
222	jb	0f
223	cmpq	%rax, %rsi		/* %rsi = to */
224	jnb	1f
2250:	leaq	.kcopy_panic_msg(%rip), %rdi
226	xorl	%eax, %eax
227	call	panic
2281:
229#endif
230
231	movq	%gs:CPU_THREAD, %r9
232	cmpq	$0, %rcx		/* No non-temporal access? */
233	/*
234	 * pass lofault value as 4th argument to do_copy_fault
235	 */
236	leaq	_kcopy_nta_copyerr(%rip), %rcx	/* doesn't set rflags */
237	jnz	do_copy_fault		/* use regular access */
238	/*
239	 * Make sure cnt is >= KCOPY_MIN_SIZE
240	 */
241	cmpq	$KCOPY_MIN_SIZE, %rdx
242	jb	do_copy_fault
243
244	/*
245	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
246	 * count is COUNT_ALIGN_SIZE aligned.
247	 */
248	movq	%rdi, %r10
249	orq	%rsi, %r10
250	andq	$NTA_ALIGN_MASK, %r10
251	orq	%rdx, %r10
252	andq	$COUNT_ALIGN_MASK, %r10
253	jnz	do_copy_fault
254
255	ALTENTRY(do_copy_fault_nta)
256	movq    %gs:CPU_THREAD, %r9     /* %r9 = thread addr */
257	movq    T_LOFAULT(%r9), %r11    /* save the current lofault */
258	movq    %rcx, T_LOFAULT(%r9)    /* new lofault */
259
260	/*
261	 * COPY_LOOP_BODY uses %rax and %r8
262	 */
263	COPY_LOOP_INIT(%rdi, %rsi, %rdx)
2642:	COPY_LOOP_BODY(%rdi, %rsi, %rdx)
265	jnz	2b
266
267	mfence
268	xorl	%eax, %eax		/* return 0 (success) */
269
270_kcopy_nta_copyerr:
271	movq	%r11, T_LOFAULT(%r9)    /* restore original lofault */
272	leave
273	ret
274	SET_SIZE(do_copy_fault_nta)
275	SET_SIZE(kcopy_nta)
276
277#elif defined(__i386)
278
279#define	ARG_FROM	8
280#define	ARG_TO		12
281#define	ARG_COUNT	16
282
283#define	COPY_LOOP_INIT(src, dst, cnt)	\
284	addl	cnt, src;			\
285	addl	cnt, dst;			\
286	shrl	$3, cnt;			\
287	neg	cnt
288
289#define	COPY_LOOP_BODY(src, dst, cnt)	\
290	prefetchnta	0x100(src, cnt, 8);	\
291	movl	(src, cnt, 8), %esi;		\
292	movnti	%esi, (dst, cnt, 8);		\
293	movl	0x4(src, cnt, 8), %esi;		\
294	movnti	%esi, 0x4(dst, cnt, 8);		\
295	movl	0x8(src, cnt, 8), %esi;		\
296	movnti	%esi, 0x8(dst, cnt, 8);		\
297	movl	0xc(src, cnt, 8), %esi;		\
298	movnti	%esi, 0xc(dst, cnt, 8);		\
299	addl	$2, cnt
300
301	/*
302	 * kcopy_nta is not implemented for 32-bit as no performance
303	 * improvement was shown.  We simply jump directly to kcopy
304	 * and discard the 4 arguments.
305	 */
306	ENTRY(kcopy_nta)
307	jmp	kcopy
308
309	lea	_kcopy_nta_copyerr, %eax	/* lofault value */
310	ALTENTRY(do_copy_fault_nta)
311	pushl	%ebp
312	movl	%esp, %ebp		/* setup stack frame */
313	pushl	%esi
314	pushl	%edi
315
316	movl	%gs:CPU_THREAD, %edx
317	movl	T_LOFAULT(%edx), %edi
318	pushl	%edi			/* save the current lofault */
319	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
320
321	/* COPY_LOOP_BODY needs to use %esi */
322	movl	ARG_COUNT(%ebp), %ecx
323	movl	ARG_FROM(%ebp), %edi
324	movl	ARG_TO(%ebp), %eax
325	COPY_LOOP_INIT(%edi, %eax, %ecx)
3261:	COPY_LOOP_BODY(%edi, %eax, %ecx)
327	jnz	1b
328	mfence
329
330	xorl	%eax, %eax
331_kcopy_nta_copyerr:
332	popl	%ecx
333	popl	%edi
334	movl	%ecx, T_LOFAULT(%edx)	/* restore the original lofault */
335	popl	%esi
336	leave
337	ret
338	SET_SIZE(do_copy_fault_nta)
339	SET_SIZE(kcopy_nta)
340
341#undef	ARG_FROM
342#undef	ARG_TO
343#undef	ARG_COUNT
344
345#endif	/* __i386 */
346#endif	/* __lint */
347
348#if defined(__lint)
349
350/* ARGSUSED */
351void
352bcopy(const void *from, void *to, size_t count)
353{}
354
355#else	/* __lint */
356
357#if defined(__amd64)
358
359	ENTRY(bcopy)
360#ifdef DEBUG
361	orq	%rdx, %rdx		/* %rdx = count */
362	jz	1f
363	movq	kernelbase(%rip), %rax
364	cmpq	%rax, %rdi		/* %rdi = from */
365	jb	0f
366	cmpq	%rax, %rsi		/* %rsi = to */
367	jnb	1f
3680:	leaq	.bcopy_panic_msg(%rip), %rdi
369	jmp	call_panic		/* setup stack and call panic */
3701:
371#endif
372do_copy:
373	xchgq	%rdi, %rsi		/* %rsi = source, %rdi = destination */
374	movq	%rdx, %rcx		/* %rcx = count */
375	shrq	$3, %rcx		/* 8-byte word count */
376	rep
377	  smovq
378
379	movq	%rdx, %rcx
380	andq	$7, %rcx		/* bytes left over */
381	rep
382	  smovb
383	ret
384
385#ifdef DEBUG
386	/*
387	 * Setup frame on the run-time stack. The end of the input argument
388	 * area must be aligned on a 16 byte boundary. The stack pointer %rsp,
389	 * always points to the end of the latest allocated stack frame.
390	 * panic(const char *format, ...) is a varargs function. When a
391	 * function taking variable arguments is called, %rax must be set
392	 * to eight times the number of floating point parameters passed
393	 * to the function in SSE registers.
394	 */
395call_panic:
396	pushq	%rbp			/* align stack properly */
397	movq	%rsp, %rbp
398	xorl	%eax, %eax		/* no variable arguments */
399	call	panic			/* %rdi = format string */
400#endif
401	SET_SIZE(bcopy)
402
403#elif defined(__i386)
404
405#define	ARG_FROM	4
406#define	ARG_TO		8
407#define	ARG_COUNT	12
408
409	ENTRY(bcopy)
410#ifdef DEBUG
411	movl	ARG_COUNT(%esp), %eax
412	orl	%eax, %eax
413	jz	1f
414	movl	kernelbase, %eax
415	cmpl	%eax, ARG_FROM(%esp)
416	jb	0f
417	cmpl	%eax, ARG_TO(%esp)
418	jnb	1f
4190:	pushl	%ebp
420	movl	%esp, %ebp
421	pushl	$.bcopy_panic_msg
422	call	panic
4231:
424#endif
425do_copy:
426	movl	%esi, %eax		/* save registers */
427	movl	%edi, %edx
428	movl	ARG_COUNT(%esp), %ecx
429	movl	ARG_FROM(%esp), %esi
430	movl	ARG_TO(%esp), %edi
431
432	shrl	$2, %ecx		/* word count */
433	rep
434	  smovl
435	movl	ARG_COUNT(%esp), %ecx
436	andl	$3, %ecx		/* bytes left over */
437	rep
438	  smovb
439	movl	%eax, %esi		/* restore registers */
440	movl	%edx, %edi
441	ret
442	SET_SIZE(bcopy)
443
444#undef	ARG_COUNT
445#undef	ARG_FROM
446#undef	ARG_TO
447
448#endif	/* __i386 */
449#endif	/* __lint */
450
451
452/*
453 * Zero a block of storage, returning an error code if we
454 * take a kernel pagefault which cannot be resolved.
455 * Returns errno value on pagefault error, 0 if all ok
456 */
457
458#if defined(__lint)
459
460/* ARGSUSED */
461int
462kzero(void *addr, size_t count)
463{ return (0); }
464
465#else	/* __lint */
466
467#if defined(__amd64)
468
469	ENTRY(kzero)
470#ifdef DEBUG
471        cmpq	kernelbase(%rip), %rdi	/* %rdi = addr */
472        jnb	0f
473        leaq	.kzero_panic_msg(%rip), %rdi
474	jmp	call_panic		/* setup stack and call panic */
4750:
476#endif
477	/*
478	 * pass lofault value as 3rd argument to do_zero_fault
479	 */
480	leaq	_kzeroerr(%rip), %rdx
481
482do_zero_fault:
483	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
484	movq	T_LOFAULT(%r9), %r11	/* save the current lofault */
485	movq	%rdx, T_LOFAULT(%r9)	/* new lofault */
486
487	movq	%rsi, %rcx		/* get size in bytes */
488	shrq	$3, %rcx		/* count of 8-byte words to zero */
489	xorl	%eax, %eax		/* clear %rax; used in sstoq / sstob */
490	rep
491	  sstoq				/* %rcx = words to clear (%rax=0) */
492
493	movq	%rsi, %rcx
494	andq	$7, %rcx		/* bytes left over */
495	rep
496	  sstob				/* %rcx = residual bytes to clear */
497
498	/*
499	 * A fault during do_zero_fault is indicated through an errno value
500	 * in %rax when we iretq to here.
501	 */
502_kzeroerr:
503	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
504	ret
505	SET_SIZE(kzero)
506
507#elif defined(__i386)
508
509#define	ARG_ADDR	8
510#define	ARG_COUNT	12
511
512	ENTRY(kzero)
513#ifdef DEBUG
514	pushl	%ebp
515	movl	%esp, %ebp
516	movl	kernelbase, %eax
517        cmpl	%eax, ARG_ADDR(%ebp)
518        jnb	0f
519        pushl   $.kzero_panic_msg
520        call    panic
5210:	popl	%ebp
522#endif
523	lea	_kzeroerr, %eax		/* kzeroerr is lofault value */
524
525do_zero_fault:
526	pushl	%ebp			/* save stack base */
527	movl	%esp, %ebp		/* set new stack base */
528	pushl	%edi			/* save %edi */
529
530	mov	%gs:CPU_THREAD, %edx
531	movl	T_LOFAULT(%edx), %edi
532	pushl	%edi			/* save the current lofault */
533	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
534
535	movl	ARG_COUNT(%ebp), %ecx	/* get size in bytes */
536	movl	ARG_ADDR(%ebp), %edi	/* %edi <- address of bytes to clear */
537	shrl	$2, %ecx		/* Count of double words to zero */
538	xorl	%eax, %eax		/* sstol val */
539	rep
540	  sstol			/* %ecx contains words to clear (%eax=0) */
541
542	movl	ARG_COUNT(%ebp), %ecx	/* get size in bytes */
543	andl	$3, %ecx		/* do mod 4 */
544	rep
545	  sstob			/* %ecx contains residual bytes to clear */
546
547	/*
548	 * A fault during do_zero_fault is indicated through an errno value
549	 * in %eax when we iret to here.
550	 */
551_kzeroerr:
552	popl	%edi
553	movl	%edi, T_LOFAULT(%edx)	/* restore the original lofault */
554	popl	%edi
555	popl	%ebp
556	ret
557	SET_SIZE(kzero)
558
559#undef	ARG_ADDR
560#undef	ARG_COUNT
561
562#endif	/* __i386 */
563#endif	/* __lint */
564
565/*
566 * Zero a block of storage.
567 */
568
569#if defined(__lint)
570
571/* ARGSUSED */
572void
573bzero(void *addr, size_t count)
574{}
575
576#else	/* __lint */
577
578#if defined(__amd64)
579
580	ENTRY(bzero)
581#ifdef DEBUG
582	cmpq	kernelbase(%rip), %rdi	/* %rdi = addr */
583	jnb	0f
584	leaq	.bzero_panic_msg(%rip), %rdi
585	jmp	call_panic		/* setup stack and call panic */
5860:
587#endif
588do_zero:
589	movq	%rsi, %rcx		/* get size in bytes */
590	shrq	$3, %rcx		/* count of 8-byte words to zero */
591	xorl	%eax, %eax		/* clear %rax; used in sstoq / sstob */
592	rep
593	  sstoq				/* %rcx = words to clear (%rax=0) */
594
595	movq	%rsi, %rcx
596	andq	$7, %rcx		/* bytes left over */
597	rep
598	  sstob				/* %rcx = residual bytes to clear */
599	ret
600	SET_SIZE(bzero)
601
602#elif defined(__i386)
603
604#define	ARG_ADDR	4
605#define	ARG_COUNT	8
606
607	ENTRY(bzero)
608#ifdef DEBUG
609	movl	kernelbase, %eax
610	cmpl	%eax, ARG_ADDR(%esp)
611	jnb	0f
612	pushl	%ebp
613	movl	%esp, %ebp
614	pushl	$.bzero_panic_msg
615	call	panic
6160:
617#endif
618do_zero:
619	movl	%edi, %edx
620	movl	ARG_COUNT(%esp), %ecx
621	movl	ARG_ADDR(%esp), %edi
622	shrl	$2, %ecx
623	xorl	%eax, %eax
624	rep
625	  sstol
626	movl	ARG_COUNT(%esp), %ecx
627	andl	$3, %ecx
628	rep
629	  sstob
630	movl	%edx, %edi
631	ret
632	SET_SIZE(bzero)
633
634#undef	ARG_ADDR
635#undef	ARG_COUNT
636
637#endif	/* __i386 */
638#endif	/* __lint */
639
640/*
641 * Transfer data to and from user space -
642 * Note that these routines can cause faults
643 * It is assumed that the kernel has nothing at
644 * less than KERNELBASE in the virtual address space.
645 *
646 * Note that copyin(9F) and copyout(9F) are part of the
647 * DDI/DKI which specifies that they return '-1' on "errors."
648 *
649 * Sigh.
650 *
651 * So there's two extremely similar routines - xcopyin_nta() and
652 * xcopyout_nta() which return the errno that we've faithfully computed.
653 * This allows other callers (e.g. uiomove(9F)) to work correctly.
654 * Given that these are used pretty heavily, we expand the calling
655 * sequences inline for all flavours (rather than making wrappers).
656 */
657
658/*
659 * Copy user data to kernel space.
660 */
661
662#if defined(__lint)
663
664/* ARGSUSED */
665int
666copyin(const void *uaddr, void *kaddr, size_t count)
667{ return (0); }
668
669#else	/* lint */
670
671#if defined(__amd64)
672
673	ENTRY(copyin)
674	pushq	%rbp
675	movq	%rsp, %rbp
676	subq	$32, %rsp
677
678	/*
679	 * save args in case we trap and need to rerun as a copyop
680	 */
681	movq	%rdi, (%rsp)
682	movq	%rsi, 0x8(%rsp)
683	movq	%rdx, 0x10(%rsp)
684
685	movq	kernelbase(%rip), %rax
686#ifdef DEBUG
687	cmpq	%rax, %rsi		/* %rsi = kaddr */
688	jnb	1f
689	leaq	.copyin_panic_msg(%rip), %rdi
690	xorl	%eax, %eax
691	call	panic
6921:
693#endif
694	/*
695	 * pass lofault value as 4th argument to do_copy_fault
696	 */
697	leaq	_copyin_err(%rip), %rcx
698
699	movq	%gs:CPU_THREAD, %r9
700	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
701	jb	do_copy_fault
702	jmp	3f
703
704_copyin_err:
705	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
7063:
707	movq	T_COPYOPS(%r9), %rax
708	cmpq	$0, %rax
709	jz	2f
710	/*
711	 * reload args for the copyop
712	 */
713	movq	(%rsp), %rdi
714	movq	0x8(%rsp), %rsi
715	movq	0x10(%rsp), %rdx
716	leave
717	jmp	*CP_COPYIN(%rax)
718
7192:	movl	$-1, %eax
720	leave
721	ret
722	SET_SIZE(copyin)
723
724#elif defined(__i386)
725
726#define	ARG_UADDR	4
727#define	ARG_KADDR	8
728
729	ENTRY(copyin)
730	movl	kernelbase, %ecx
731#ifdef DEBUG
732	cmpl	%ecx, ARG_KADDR(%esp)
733	jnb	1f
734	pushl	%ebp
735	movl	%esp, %ebp
736	pushl	$.copyin_panic_msg
737	call	panic
7381:
739#endif
740	lea	_copyin_err, %eax
741
742	movl	%gs:CPU_THREAD, %edx
743	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
744	jb	do_copy_fault
745	jmp	3f
746
747_copyin_err:
748	popl	%ecx
749	popl	%edi
750	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
751	popl	%esi
752	popl	%ebp
7533:
754	movl	T_COPYOPS(%edx), %eax
755	cmpl	$0, %eax
756	jz	2f
757	jmp	*CP_COPYIN(%eax)
758
7592:	movl	$-1, %eax
760	ret
761	SET_SIZE(copyin)
762
763#undef	ARG_UADDR
764#undef	ARG_KADDR
765
766#endif	/* __i386 */
767#endif	/* __lint */
768
769#if defined(__lint)
770
771/* ARGSUSED */
772int
773xcopyin_nta(const void *uaddr, void *kaddr, size_t count, int copy_cached)
774{ return (0); }
775
776#else	/* __lint */
777
778#if defined(__amd64)
779
780	ENTRY(xcopyin_nta)
781	pushq	%rbp
782	movq	%rsp, %rbp
783	subq	$32, %rsp
784
785	/*
786	 * save args in case we trap and need to rerun as a copyop
787	 * %rcx is consumed in this routine so we don't need to save
788	 * it.
789	 */
790	movq	%rdi, (%rsp)
791	movq	%rsi, 0x8(%rsp)
792	movq	%rdx, 0x10(%rsp)
793
794	movq	kernelbase(%rip), %rax
795#ifdef DEBUG
796	cmpq	%rax, %rsi		/* %rsi = kaddr */
797	jnb	1f
798	leaq	.xcopyin_panic_msg(%rip), %rdi
799	xorl	%eax, %eax
800	call	panic
8011:
802#endif
803	movq	%gs:CPU_THREAD, %r9
804	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
805	jae	4f
806	cmpq	$0, %rcx		/* No non-temporal access? */
807	/*
808	 * pass lofault value as 4th argument to do_copy_fault
809	 */
810	leaq	_xcopyin_err(%rip), %rcx	/* doesn't set rflags */
811	jnz	do_copy_fault		/* use regular access */
812	/*
813	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
814	 */
815	cmpq	$XCOPY_MIN_SIZE, %rdx
816	jb	do_copy_fault
817
818	/*
819	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
820	 * count is COUNT_ALIGN_SIZE aligned.
821	 */
822	movq	%rdi, %r10
823	orq	%rsi, %r10
824	andq	$NTA_ALIGN_MASK, %r10
825	orq	%rdx, %r10
826	andq	$COUNT_ALIGN_MASK, %r10
827	jnz	do_copy_fault
828	jmp	do_copy_fault_nta	/* use non-temporal access */
829
8304:
831	movl	$EFAULT, %eax
832	jmp	3f
833
834	/*
835	 * A fault during do_copy_fault or do_copy_fault_nta is
836	 * indicated through an errno value in %rax and we iret from the
837	 * trap handler to here.
838	 */
839_xcopyin_err:
840	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
8413:
842	movq	T_COPYOPS(%r9), %r8
843	cmpq	$0, %r8
844	jz	2f
845
846	/*
847	 * reload args for the copyop
848	 */
849	movq	(%rsp), %rdi
850	movq	0x8(%rsp), %rsi
851	movq	0x10(%rsp), %rdx
852	leave
853	jmp	*CP_XCOPYIN(%r8)
854
8552:	leave
856	ret
857	SET_SIZE(xcopyin_nta)
858
859#elif defined(__i386)
860
861#define	ARG_UADDR	4
862#define	ARG_KADDR	8
863#define	ARG_COUNT	12
864#define	ARG_CACHED	16
865
866	.globl	use_sse_copy
867
868	ENTRY(xcopyin_nta)
869	movl	kernelbase, %ecx
870	lea	_xcopyin_err, %eax
871	movl	%gs:CPU_THREAD, %edx
872	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
873	jae	4f
874
875	cmpl	$0, use_sse_copy	/* no sse support */
876	jz	do_copy_fault
877
878	cmpl	$0, ARG_CACHED(%esp)	/* copy_cached hint set? */
879	jnz	do_copy_fault
880
881	/*
882	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
883	 */
884	cmpl	$XCOPY_MIN_SIZE, ARG_COUNT(%esp)
885	jb	do_copy_fault
886
887	/*
888	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
889	 * count is COUNT_ALIGN_SIZE aligned.
890	 */
891	movl	ARG_UADDR(%esp), %ecx
892	orl	ARG_KADDR(%esp), %ecx
893	andl	$NTA_ALIGN_MASK, %ecx
894	orl	ARG_COUNT(%esp), %ecx
895	andl	$COUNT_ALIGN_MASK, %ecx
896	jnz	do_copy_fault
897
898	jmp	do_copy_fault_nta	/* use regular access */
899
9004:
901	movl	$EFAULT, %eax
902	jmp	3f
903
904	/*
905	 * A fault during do_copy_fault or do_copy_fault_nta is
906	 * indicated through an errno value in %eax and we iret from the
907	 * trap handler to here.
908	 */
909_xcopyin_err:
910	popl	%ecx
911	popl	%edi
912	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
913	popl	%esi
914	popl	%ebp
9153:
916	cmpl	$0, T_COPYOPS(%edx)
917	jz	2f
918	movl	T_COPYOPS(%edx), %eax
919	jmp	*CP_XCOPYIN(%eax)
920
9212:	rep; 	ret	/* use 2 byte return instruction when branch target */
922			/* AMD Software Optimization Guide - Section 6.2 */
923	SET_SIZE(xcopyin_nta)
924
925#undef	ARG_UADDR
926#undef	ARG_KADDR
927#undef	ARG_COUNT
928#undef	ARG_CACHED
929
930#endif	/* __i386 */
931#endif	/* __lint */
932
933/*
934 * Copy kernel data to user space.
935 */
936
937#if defined(__lint)
938
939/* ARGSUSED */
940int
941copyout(const void *kaddr, void *uaddr, size_t count)
942{ return (0); }
943
944#else	/* __lint */
945
946#if defined(__amd64)
947
948	ENTRY(copyout)
949	pushq	%rbp
950	movq	%rsp, %rbp
951	subq	$32, %rsp
952
953	/*
954	 * save args in case we trap and need to rerun as a copyop
955	 */
956	movq	%rdi, (%rsp)
957	movq	%rsi, 0x8(%rsp)
958	movq	%rdx, 0x10(%rsp)
959
960	movq	kernelbase(%rip), %rax
961#ifdef DEBUG
962	cmpq	%rax, %rdi		/* %rdi = kaddr */
963	jnb	1f
964	leaq	.copyout_panic_msg(%rip), %rdi
965	xorl	%eax, %eax
966	call	panic
9671:
968#endif
969	/*
970	 * pass lofault value as 4th argument to do_copy_fault
971	 */
972	leaq	_copyout_err(%rip), %rcx
973
974	movq	%gs:CPU_THREAD, %r9
975	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
976	jb	do_copy_fault
977	jmp	3f
978
979_copyout_err:
980	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
9813:
982	movq	T_COPYOPS(%r9), %rax
983	cmpq	$0, %rax
984	jz	2f
985
986	/*
987	 * reload args for the copyop
988	 */
989	movq	(%rsp), %rdi
990	movq	0x8(%rsp), %rsi
991	movq	0x10(%rsp), %rdx
992	leave
993	jmp	*CP_COPYOUT(%rax)
994
9952:	movl	$-1, %eax
996	leave
997	ret
998	SET_SIZE(copyout)
999
1000#elif defined(__i386)
1001
1002#define	ARG_KADDR	4
1003#define	ARG_UADDR	8
1004
1005	ENTRY(copyout)
1006	movl	kernelbase, %ecx
1007#ifdef DEBUG
1008	cmpl	%ecx, ARG_KADDR(%esp)
1009	jnb	1f
1010	pushl	%ebp
1011	movl	%esp, %ebp
1012	pushl	$.copyout_panic_msg
1013	call	panic
10141:
1015#endif
1016	lea	_copyout_err, %eax
1017	movl	%gs:CPU_THREAD, %edx
1018	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1019	jb	do_copy_fault
1020	jmp	3f
1021
1022_copyout_err:
1023	popl	%ecx
1024	popl	%edi
1025	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
1026	popl	%esi
1027	popl	%ebp
10283:
1029	movl	T_COPYOPS(%edx), %eax
1030	cmpl	$0, %eax
1031	jz	2f
1032	jmp	*CP_COPYOUT(%eax)
1033
10342:	movl	$-1, %eax
1035	ret
1036	SET_SIZE(copyout)
1037
1038#undef	ARG_UADDR
1039#undef	ARG_KADDR
1040
1041#endif	/* __i386 */
1042#endif	/* __lint */
1043
1044#if defined(__lint)
1045
1046/* ARGSUSED */
1047int
1048xcopyout_nta(const void *kaddr, void *uaddr, size_t count, int copy_cached)
1049{ return (0); }
1050
1051#else	/* __lint */
1052
1053#if defined(__amd64)
1054
1055	ENTRY(xcopyout_nta)
1056	pushq	%rbp
1057	movq	%rsp, %rbp
1058	subq	$32, %rsp
1059
1060	/*
1061	 * save args in case we trap and need to rerun as a copyop
1062	 */
1063	movq	%rdi, (%rsp)
1064	movq	%rsi, 0x8(%rsp)
1065	movq	%rdx, 0x10(%rsp)
1066
1067	movq	kernelbase(%rip), %rax
1068#ifdef DEBUG
1069	cmpq	%rax, %rdi		/* %rdi = kaddr */
1070	jnb	1f
1071	leaq	.xcopyout_panic_msg(%rip), %rdi
1072	xorl	%eax, %eax
1073	call	panic
10741:
1075#endif
1076	movq	%gs:CPU_THREAD, %r9
1077	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
1078	jae	4f
1079
1080	cmpq	$0, %rcx		/* No non-temporal access? */
1081	/*
1082	 * pass lofault value as 4th argument to do_copy_fault
1083	 */
1084	leaq	_xcopyout_err(%rip), %rcx
1085	jnz	do_copy_fault
1086	/*
1087	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
1088	 */
1089	cmpq	$XCOPY_MIN_SIZE, %rdx
1090	jb	do_copy_fault
1091
1092	/*
1093	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
1094	 * count is COUNT_ALIGN_SIZE aligned.
1095	 */
1096	movq	%rdi, %r10
1097	orq	%rsi, %r10
1098	andq	$NTA_ALIGN_MASK, %r10
1099	orq	%rdx, %r10
1100	andq	$COUNT_ALIGN_MASK, %r10
1101	jnz	do_copy_fault
1102	jmp	do_copy_fault_nta
1103
11044:
1105	movl	$EFAULT, %eax
1106	jmp	3f
1107
1108	/*
1109	 * A fault during do_copy_fault or do_copy_fault_nta is
1110	 * indicated through an errno value in %rax and we iret from the
1111	 * trap handler to here.
1112	 */
1113_xcopyout_err:
1114	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
11153:
1116	movq	T_COPYOPS(%r9), %r8
1117	cmpq	$0, %r8
1118	jz	2f
1119
1120	/*
1121	 * reload args for the copyop
1122	 */
1123	movq	(%rsp), %rdi
1124	movq	0x8(%rsp), %rsi
1125	movq	0x10(%rsp), %rdx
1126	leave
1127	jmp	*CP_XCOPYOUT(%r8)
1128
11292:	leave
1130	ret
1131	SET_SIZE(xcopyout_nta)
1132
1133#elif defined(__i386)
1134
1135#define	ARG_KADDR	4
1136#define	ARG_UADDR	8
1137#define	ARG_COUNT	12
1138#define	ARG_CACHED	16
1139
1140	ENTRY(xcopyout_nta)
1141	movl	kernelbase, %ecx
1142	lea	_xcopyout_err, %eax
1143	movl	%gs:CPU_THREAD, %edx
1144	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1145	jae	4f
1146
1147	cmpl	$0, use_sse_copy	/* no sse support */
1148	jz	do_copy_fault
1149
1150	cmpl	$0, ARG_CACHED(%esp)	/* copy_cached hint set? */
1151	jnz	do_copy_fault
1152
1153	/*
1154	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
1155	 */
1156	cmpl	$XCOPY_MIN_SIZE, %edx
1157	jb	do_copy_fault
1158
1159	/*
1160	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
1161	 * count is COUNT_ALIGN_SIZE aligned.
1162	 */
1163	movl	ARG_UADDR(%esp), %ecx
1164	orl	ARG_KADDR(%esp), %ecx
1165	andl	$NTA_ALIGN_MASK, %ecx
1166	orl	ARG_COUNT(%esp), %ecx
1167	andl	$COUNT_ALIGN_MASK, %ecx
1168	jnz	do_copy_fault
1169	jmp	do_copy_fault_nta
1170
11714:
1172	movl	$EFAULT, %eax
1173	jmp	3f
1174
1175	/*
1176	 * A fault during do_copy_fault or do_copy_fault_nta is
1177	 * indicated through an errno value in %eax and we iret from the
1178	 * trap handler to here.
1179	 */
1180_xcopyout_err:
1181	/ restore the original lofault
1182	popl	%ecx
1183	popl	%edi
1184	movl	%ecx, T_LOFAULT(%edx)	/ original lofault
1185	popl	%esi
1186	popl	%ebp
11873:
1188	cmpl	$0, T_COPYOPS(%edx)
1189	jz	2f
1190	movl	T_COPYOPS(%edx), %eax
1191	jmp	*CP_XCOPYOUT(%eax)
1192
11932:	rep;	ret	/* use 2 byte return instruction when branch target */
1194			/* AMD Software Optimization Guide - Section 6.2 */
1195	SET_SIZE(xcopyout_nta)
1196
1197#undef	ARG_UADDR
1198#undef	ARG_KADDR
1199#undef	ARG_COUNT
1200#undef	ARG_CACHED
1201
1202#endif	/* __i386 */
1203#endif	/* __lint */
1204
1205/*
1206 * Copy a null terminated string from one point to another in
1207 * the kernel address space.
1208 */
1209
1210#if defined(__lint)
1211
1212/* ARGSUSED */
1213int
1214copystr(const char *from, char *to, size_t maxlength, size_t *lencopied)
1215{ return (0); }
1216
1217#else	/* __lint */
1218
1219#if defined(__amd64)
1220
1221	ENTRY(copystr)
1222	pushq	%rbp
1223	movq	%rsp, %rbp
1224#ifdef DEBUG
1225	movq	kernelbase(%rip), %rax
1226	cmpq	%rax, %rdi		/* %rdi = from */
1227	jb	0f
1228	cmpq	%rax, %rsi		/* %rsi = to */
1229	jnb	1f
12300:	leaq	.copystr_panic_msg(%rip), %rdi
1231	xorl	%eax, %eax
1232	call	panic
12331:
1234#endif
1235	movq	%gs:CPU_THREAD, %r9
1236	movq	T_LOFAULT(%r9), %r8	/* pass current lofault value as */
1237					/* 5th argument to do_copystr */
1238do_copystr:
1239	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
1240	movq    T_LOFAULT(%r9), %r11	/* save the current lofault */
1241	movq	%r8, T_LOFAULT(%r9)	/* new lofault */
1242
1243	movq	%rdx, %r8		/* save maxlength */
1244
1245	cmpq	$0, %rdx		/* %rdx = maxlength */
1246	je	copystr_enametoolong	/* maxlength == 0 */
1247
1248copystr_loop:
1249	decq	%r8
1250	movb	(%rdi), %al
1251	incq	%rdi
1252	movb	%al, (%rsi)
1253	incq	%rsi
1254	cmpb	$0, %al
1255	je	copystr_null		/* null char */
1256	cmpq	$0, %r8
1257	jne	copystr_loop
1258
1259copystr_enametoolong:
1260	movl	$ENAMETOOLONG, %eax
1261	jmp	copystr_out
1262
1263copystr_null:
1264	xorl	%eax, %eax		/* no error */
1265
1266copystr_out:
1267	cmpq	$0, %rcx		/* want length? */
1268	je	copystr_done		/* no */
1269	subq	%r8, %rdx		/* compute length and store it */
1270	movq	%rdx, (%rcx)
1271
1272copystr_done:
1273	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
1274	leave
1275	ret
1276	SET_SIZE(copystr)
1277
1278#elif defined(__i386)
1279
1280#define	ARG_FROM	8
1281#define	ARG_TO		12
1282#define	ARG_MAXLEN	16
1283#define	ARG_LENCOPIED	20
1284
1285	ENTRY(copystr)
1286#ifdef DEBUG
1287	pushl	%ebp
1288	movl	%esp, %ebp
1289	movl	kernelbase, %eax
1290	cmpl	%eax, ARG_FROM(%esp)
1291	jb	0f
1292	cmpl	%eax, ARG_TO(%esp)
1293	jnb	1f
12940:	pushl	$.copystr_panic_msg
1295	call	panic
12961:	popl	%ebp
1297#endif
1298	/* get the current lofault address */
1299	movl	%gs:CPU_THREAD, %eax
1300	movl	T_LOFAULT(%eax), %eax
1301do_copystr:
1302	pushl	%ebp			/* setup stack frame */
1303	movl	%esp, %ebp
1304	pushl	%ebx			/* save registers */
1305	pushl	%edi
1306
1307	movl	%gs:CPU_THREAD, %ebx
1308	movl	T_LOFAULT(%ebx), %edi
1309	pushl	%edi			/* save the current lofault */
1310	movl	%eax, T_LOFAULT(%ebx)	/* new lofault */
1311
1312	movl	ARG_MAXLEN(%ebp), %ecx
1313	cmpl	$0, %ecx
1314	je	copystr_enametoolong	/* maxlength == 0 */
1315
1316	movl	ARG_FROM(%ebp), %ebx	/* source address */
1317	movl	ARG_TO(%ebp), %edx	/* destination address */
1318
1319copystr_loop:
1320	decl	%ecx
1321	movb	(%ebx), %al
1322	incl	%ebx
1323	movb	%al, (%edx)
1324	incl	%edx
1325	cmpb	$0, %al
1326	je	copystr_null		/* null char */
1327	cmpl	$0, %ecx
1328	jne	copystr_loop
1329
1330copystr_enametoolong:
1331	movl	$ENAMETOOLONG, %eax
1332	jmp	copystr_out
1333
1334copystr_null:
1335	xorl	%eax, %eax		/* no error */
1336
1337copystr_out:
1338	cmpl	$0, ARG_LENCOPIED(%ebp)	/* want length? */
1339	je	copystr_done		/* no */
1340	movl	ARG_MAXLEN(%ebp), %edx
1341	subl	%ecx, %edx		/* compute length and store it */
1342	movl	ARG_LENCOPIED(%ebp), %ecx
1343	movl	%edx, (%ecx)
1344
1345copystr_done:
1346	popl	%edi
1347	movl	%gs:CPU_THREAD, %ebx
1348	movl	%edi, T_LOFAULT(%ebx)	/* restore the original lofault */
1349
1350	popl	%edi
1351	popl	%ebx
1352	popl	%ebp
1353	ret
1354	SET_SIZE(copystr)
1355
1356#undef	ARG_FROM
1357#undef	ARG_TO
1358#undef	ARG_MAXLEN
1359#undef	ARG_LENCOPIED
1360
1361#endif	/* __i386 */
1362#endif	/* __lint */
1363
1364/*
1365 * Copy a null terminated string from the user address space into
1366 * the kernel address space.
1367 */
1368
1369#if defined(__lint)
1370
1371/* ARGSUSED */
1372int
1373copyinstr(const char *uaddr, char *kaddr, size_t maxlength,
1374    size_t *lencopied)
1375{ return (0); }
1376
1377#else	/* __lint */
1378
1379#if defined(__amd64)
1380
1381	ENTRY(copyinstr)
1382	pushq	%rbp
1383	movq	%rsp, %rbp
1384	subq	$32, %rsp
1385
1386	/*
1387	 * save args in case we trap and need to rerun as a copyop
1388	 */
1389	movq	%rdi, (%rsp)
1390	movq	%rsi, 0x8(%rsp)
1391	movq	%rdx, 0x10(%rsp)
1392	movq	%rcx, 0x18(%rsp)
1393
1394	movq	kernelbase(%rip), %rax
1395#ifdef DEBUG
1396	cmpq	%rax, %rsi		/* %rsi = kaddr */
1397	jnb	1f
1398	leaq	.copyinstr_panic_msg(%rip), %rdi
1399	xorl	%eax, %eax
1400	call	panic
14011:
1402#endif
1403	/*
1404	 * pass lofault value as 5th argument to do_copystr
1405	 */
1406	leaq	_copyinstr_error(%rip), %r8
1407
1408	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
1409	jb	do_copystr
1410	movq	%gs:CPU_THREAD, %r9
1411	jmp	3f
1412
1413_copyinstr_error:
1414	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
14153:
1416	movq	T_COPYOPS(%r9), %rax
1417	cmpq	$0, %rax
1418	jz	2f
1419
1420	/*
1421	 * reload args for the copyop
1422	 */
1423	movq	(%rsp), %rdi
1424	movq	0x8(%rsp), %rsi
1425	movq	0x10(%rsp), %rdx
1426	movq	0x18(%rsp), %rcx
1427	leave
1428	jmp	*CP_COPYINSTR(%rax)
1429
14302:	movl	$EFAULT, %eax		/* return EFAULT */
1431	leave
1432	ret
1433	SET_SIZE(copyinstr)
1434
1435#elif defined(__i386)
1436
1437#define	ARG_UADDR	4
1438#define	ARG_KADDR	8
1439
1440	ENTRY(copyinstr)
1441	movl	kernelbase, %ecx
1442#ifdef DEBUG
1443	cmpl	%ecx, ARG_KADDR(%esp)
1444	jnb	1f
1445	pushl	%ebp
1446	movl	%esp, %ebp
1447	pushl	$.copyinstr_panic_msg
1448	call	panic
14491:
1450#endif
1451	lea	_copyinstr_error, %eax
1452	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1453	jb	do_copystr
1454	movl	%gs:CPU_THREAD, %edx
1455	jmp	3f
1456
1457_copyinstr_error:
1458	popl	%edi
1459	movl	%gs:CPU_THREAD, %edx
1460	movl	%edi, T_LOFAULT(%edx)	/* original lofault */
1461
1462	popl	%edi
1463	popl	%ebx
1464	popl	%ebp
14653:
1466	movl	T_COPYOPS(%edx), %eax
1467	cmpl	$0, %eax
1468	jz	2f
1469	jmp	*CP_COPYINSTR(%eax)
1470
14712:	movl	$EFAULT, %eax		/* return EFAULT */
1472	ret
1473	SET_SIZE(copyinstr)
1474
1475#undef	ARG_UADDR
1476#undef	ARG_KADDR
1477
1478#endif	/* __i386 */
1479#endif	/* __lint */
1480
1481/*
1482 * Copy a null terminated string from the kernel
1483 * address space to the user address space.
1484 */
1485
1486#if defined(__lint)
1487
1488/* ARGSUSED */
1489int
1490copyoutstr(const char *kaddr, char *uaddr, size_t maxlength,
1491    size_t *lencopied)
1492{ return (0); }
1493
1494#else	/* __lint */
1495
1496#if defined(__amd64)
1497
1498	ENTRY(copyoutstr)
1499	pushq	%rbp
1500	movq	%rsp, %rbp
1501	subq	$32, %rsp
1502
1503	/*
1504	 * save args in case we trap and need to rerun as a copyop
1505	 */
1506	movq	%rdi, (%rsp)
1507	movq	%rsi, 0x8(%rsp)
1508	movq	%rdx, 0x10(%rsp)
1509	movq	%rcx, 0x18(%rsp)
1510
1511	movq	kernelbase(%rip), %rax
1512#ifdef DEBUG
1513	cmpq	%rax, %rdi		/* %rdi = kaddr */
1514	jnb	1f
1515	leaq	.copyoutstr_panic_msg(%rip), %rdi
1516	jmp	call_panic		/* setup stack and call panic */
15171:
1518#endif
1519	/*
1520	 * pass lofault value as 5th argument to do_copystr
1521	 */
1522	leaq	_copyoutstr_error(%rip), %r8
1523
1524	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
1525	jb	do_copystr
1526	movq	%gs:CPU_THREAD, %r9
1527	jmp	3f
1528
1529_copyoutstr_error:
1530	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
15313:
1532	movq	T_COPYOPS(%r9), %rax
1533	cmpq	$0, %rax
1534	jz	2f
1535
1536	/*
1537	 * reload args for the copyop
1538	 */
1539	movq	(%rsp), %rdi
1540	movq	0x8(%rsp), %rsi
1541	movq	0x10(%rsp), %rdx
1542	movq	0x18(%rsp), %rcx
1543	leave
1544	jmp	*CP_COPYOUTSTR(%rax)
1545
15462:	movl	$EFAULT, %eax		/* return EFAULT */
1547	leave
1548	ret
1549	SET_SIZE(copyoutstr)
1550
1551#elif defined(__i386)
1552
1553#define	ARG_KADDR	4
1554#define	ARG_UADDR	8
1555
1556	ENTRY(copyoutstr)
1557	movl	kernelbase, %ecx
1558#ifdef DEBUG
1559	cmpl	%ecx, ARG_KADDR(%esp)
1560	jnb	1f
1561	pushl	%ebp
1562	movl	%esp, %ebp
1563	pushl	$.copyoutstr_panic_msg
1564	call	panic
15651:
1566#endif
1567	lea	_copyoutstr_error, %eax
1568	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1569	jb	do_copystr
1570	movl	%gs:CPU_THREAD, %edx
1571	jmp	3f
1572
1573_copyoutstr_error:
1574	popl	%edi
1575	movl	%gs:CPU_THREAD, %edx
1576	movl	%edi, T_LOFAULT(%edx)	/* restore the original lofault */
1577
1578	popl	%edi
1579	popl	%ebx
1580	popl	%ebp
15813:
1582	movl	T_COPYOPS(%edx), %eax
1583	cmpl	$0, %eax
1584	jz	2f
1585	jmp	*CP_COPYOUTSTR(%eax)
1586
15872:	movl	$EFAULT, %eax		/* return EFAULT */
1588	ret
1589	SET_SIZE(copyoutstr)
1590
1591#undef	ARG_KADDR
1592#undef	ARG_UADDR
1593
1594#endif	/* __i386 */
1595#endif	/* __lint */
1596
1597/*
1598 * Since all of the fuword() variants are so similar, we have a macro to spit
1599 * them out.  This allows us to create DTrace-unobservable functions easily.
1600 */
1601
1602#if defined(__lint)
1603
1604#if defined(__amd64)
1605
1606/* ARGSUSED */
1607int
1608fuword64(const void *addr, uint64_t *dst)
1609{ return (0); }
1610
1611#endif
1612
1613/* ARGSUSED */
1614int
1615fuword32(const void *addr, uint32_t *dst)
1616{ return (0); }
1617
1618/* ARGSUSED */
1619int
1620fuword16(const void *addr, uint16_t *dst)
1621{ return (0); }
1622
1623/* ARGSUSED */
1624int
1625fuword8(const void *addr, uint8_t *dst)
1626{ return (0); }
1627
1628#else	/* __lint */
1629
1630#if defined(__amd64)
1631
1632/*
1633 * (Note that we don't save and reload the arguments here
1634 * because their values are not altered in the copy path)
1635 */
1636
1637#define	FUWORD(NAME, INSTR, REG, COPYOP)	\
1638	ENTRY(NAME)				\
1639	movq	%gs:CPU_THREAD, %r9;		\
1640	cmpq	kernelbase(%rip), %rdi;		\
1641	jae	1f;				\
1642	leaq	_flt_/**/NAME, %rdx;		\
1643	movq	%rdx, T_LOFAULT(%r9);		\
1644	INSTR	(%rdi), REG;			\
1645	movq	$0, T_LOFAULT(%r9);		\
1646	INSTR	REG, (%rsi);			\
1647	xorl	%eax, %eax;			\
1648	ret;					\
1649_flt_/**/NAME:					\
1650	movq	$0, T_LOFAULT(%r9);		\
16511:						\
1652	movq	T_COPYOPS(%r9), %rax;		\
1653	cmpq	$0, %rax;			\
1654	jz	2f;				\
1655	jmp	*COPYOP(%rax);			\
16562:						\
1657	movl	$-1, %eax;			\
1658	ret;					\
1659	SET_SIZE(NAME)
1660
1661	FUWORD(fuword64, movq, %rax, CP_FUWORD64)
1662	FUWORD(fuword32, movl, %eax, CP_FUWORD32)
1663	FUWORD(fuword16, movw, %ax, CP_FUWORD16)
1664	FUWORD(fuword8, movb, %al, CP_FUWORD8)
1665
1666#elif defined(__i386)
1667
1668#define	FUWORD(NAME, INSTR, REG, COPYOP)	\
1669	ENTRY(NAME)				\
1670	movl	%gs:CPU_THREAD, %ecx;		\
1671	movl	kernelbase, %eax;		\
1672	cmpl	%eax, 4(%esp);			\
1673	jae	1f;				\
1674	lea	_flt_/**/NAME, %edx;		\
1675	movl	%edx, T_LOFAULT(%ecx);		\
1676	movl	4(%esp), %eax;			\
1677	movl	8(%esp), %edx;			\
1678	INSTR	(%eax), REG;			\
1679	movl	$0, T_LOFAULT(%ecx);		\
1680	INSTR	REG, (%edx);			\
1681	xorl	%eax, %eax;			\
1682	ret;					\
1683_flt_/**/NAME:					\
1684	movl	$0, T_LOFAULT(%ecx);		\
16851:						\
1686	movl	T_COPYOPS(%ecx), %eax;		\
1687	cmpl	$0, %eax;			\
1688	jz	2f;				\
1689	jmp	*COPYOP(%eax);			\
16902:						\
1691	movl	$-1, %eax;			\
1692	ret;					\
1693	SET_SIZE(NAME)
1694
1695	FUWORD(fuword32, movl, %eax, CP_FUWORD32)
1696	FUWORD(fuword16, movw, %ax, CP_FUWORD16)
1697	FUWORD(fuword8, movb, %al, CP_FUWORD8)
1698
1699#endif	/* __i386 */
1700
1701#undef	FUWORD
1702
1703#endif	/* __lint */
1704
1705/*
1706 * Set user word.
1707 */
1708
1709#if defined(__lint)
1710
1711#if defined(__amd64)
1712
1713/* ARGSUSED */
1714int
1715suword64(void *addr, uint64_t value)
1716{ return (0); }
1717
1718#endif
1719
1720/* ARGSUSED */
1721int
1722suword32(void *addr, uint32_t value)
1723{ return (0); }
1724
1725/* ARGSUSED */
1726int
1727suword16(void *addr, uint16_t value)
1728{ return (0); }
1729
1730/* ARGSUSED */
1731int
1732suword8(void *addr, uint8_t value)
1733{ return (0); }
1734
1735#else	/* lint */
1736
1737#if defined(__amd64)
1738
1739/*
1740 * (Note that we don't save and reload the arguments here
1741 * because their values are not altered in the copy path)
1742 */
1743
1744#define	SUWORD(NAME, INSTR, REG, COPYOP)	\
1745	ENTRY(NAME)				\
1746	movq	%gs:CPU_THREAD, %r9;		\
1747	cmpq	kernelbase(%rip), %rdi;		\
1748	jae	1f;				\
1749	leaq	_flt_/**/NAME, %rdx;		\
1750	movq	%rdx, T_LOFAULT(%r9);		\
1751	INSTR	REG, (%rdi);			\
1752	movq	$0, T_LOFAULT(%r9);		\
1753	xorl	%eax, %eax;			\
1754	ret;					\
1755_flt_/**/NAME:					\
1756	movq	$0, T_LOFAULT(%r9);		\
17571:						\
1758	movq	T_COPYOPS(%r9), %rax;		\
1759	cmpq	$0, %rax;			\
1760	jz	3f;				\
1761	jmp	*COPYOP(%rax);			\
17623:						\
1763	movl	$-1, %eax;			\
1764	ret;					\
1765	SET_SIZE(NAME)
1766
1767	SUWORD(suword64, movq, %rsi, CP_SUWORD64)
1768	SUWORD(suword32, movl, %esi, CP_SUWORD32)
1769	SUWORD(suword16, movw, %si, CP_SUWORD16)
1770	SUWORD(suword8, movb, %sil, CP_SUWORD8)
1771
1772#elif defined(__i386)
1773
1774#define	SUWORD(NAME, INSTR, REG, COPYOP)	\
1775	ENTRY(NAME)				\
1776	movl	%gs:CPU_THREAD, %ecx;		\
1777	movl	kernelbase, %eax;		\
1778	cmpl	%eax, 4(%esp);			\
1779	jae	1f;				\
1780	lea	_flt_/**/NAME, %edx;		\
1781	movl	%edx, T_LOFAULT(%ecx);		\
1782	movl	4(%esp), %eax;			\
1783	movl	8(%esp), %edx;			\
1784	INSTR	REG, (%eax);			\
1785	movl	$0, T_LOFAULT(%ecx);		\
1786	xorl	%eax, %eax;			\
1787	ret;					\
1788_flt_/**/NAME:					\
1789	movl	$0, T_LOFAULT(%ecx);		\
17901:						\
1791	movl	T_COPYOPS(%ecx), %eax;		\
1792	cmpl	$0, %eax;			\
1793	jz	3f;				\
1794	movl	COPYOP(%eax), %ecx;		\
1795	jmp	*%ecx;				\
17963:						\
1797	movl	$-1, %eax;			\
1798	ret;					\
1799	SET_SIZE(NAME)
1800
1801	SUWORD(suword32, movl, %edx, CP_SUWORD32)
1802	SUWORD(suword16, movw, %dx, CP_SUWORD16)
1803	SUWORD(suword8, movb, %dl, CP_SUWORD8)
1804
1805#endif	/* __i386 */
1806
1807#undef	SUWORD
1808
1809#endif	/* __lint */
1810
1811#if defined(__lint)
1812
1813#if defined(__amd64)
1814
1815/*ARGSUSED*/
1816void
1817fuword64_noerr(const void *addr, uint64_t *dst)
1818{}
1819
1820#endif
1821
1822/*ARGSUSED*/
1823void
1824fuword32_noerr(const void *addr, uint32_t *dst)
1825{}
1826
1827/*ARGSUSED*/
1828void
1829fuword8_noerr(const void *addr, uint8_t *dst)
1830{}
1831
1832/*ARGSUSED*/
1833void
1834fuword16_noerr(const void *addr, uint16_t *dst)
1835{}
1836
1837#else   /* __lint */
1838
1839#if defined(__amd64)
1840
1841#define	FUWORD_NOERR(NAME, INSTR, REG)		\
1842	ENTRY(NAME)				\
1843	cmpq	kernelbase(%rip), %rdi;		\
1844	cmovnbq	kernelbase(%rip), %rdi;		\
1845	INSTR	(%rdi), REG;			\
1846	INSTR	REG, (%rsi);			\
1847	ret;					\
1848	SET_SIZE(NAME)
1849
1850	FUWORD_NOERR(fuword64_noerr, movq, %rax)
1851	FUWORD_NOERR(fuword32_noerr, movl, %eax)
1852	FUWORD_NOERR(fuword16_noerr, movw, %ax)
1853	FUWORD_NOERR(fuword8_noerr, movb, %al)
1854
1855#elif defined(__i386)
1856
1857#define	FUWORD_NOERR(NAME, INSTR, REG)		\
1858	ENTRY(NAME)				\
1859	movl	4(%esp), %eax;			\
1860	cmpl	kernelbase, %eax;		\
1861	jb	1f;				\
1862	movl	kernelbase, %eax;		\
18631:	movl	8(%esp), %edx;			\
1864	INSTR	(%eax), REG;			\
1865	INSTR	REG, (%edx);			\
1866	ret;					\
1867	SET_SIZE(NAME)
1868
1869	FUWORD_NOERR(fuword32_noerr, movl, %ecx)
1870	FUWORD_NOERR(fuword16_noerr, movw, %cx)
1871	FUWORD_NOERR(fuword8_noerr, movb, %cl)
1872
1873#endif	/* __i386 */
1874
1875#undef	FUWORD_NOERR
1876
1877#endif	/* __lint */
1878
1879#if defined(__lint)
1880
1881#if defined(__amd64)
1882
1883/*ARGSUSED*/
1884void
1885suword64_noerr(void *addr, uint64_t value)
1886{}
1887
1888#endif
1889
1890/*ARGSUSED*/
1891void
1892suword32_noerr(void *addr, uint32_t value)
1893{}
1894
1895/*ARGSUSED*/
1896void
1897suword16_noerr(void *addr, uint16_t value)
1898{}
1899
1900/*ARGSUSED*/
1901void
1902suword8_noerr(void *addr, uint8_t value)
1903{}
1904
1905#else	/* lint */
1906
1907#if defined(__amd64)
1908
1909#define	SUWORD_NOERR(NAME, INSTR, REG)		\
1910	ENTRY(NAME)				\
1911	cmpq	kernelbase(%rip), %rdi;		\
1912	cmovnbq	kernelbase(%rip), %rdi;		\
1913	INSTR	REG, (%rdi);			\
1914	ret;					\
1915	SET_SIZE(NAME)
1916
1917	SUWORD_NOERR(suword64_noerr, movq, %rsi)
1918	SUWORD_NOERR(suword32_noerr, movl, %esi)
1919	SUWORD_NOERR(suword16_noerr, movw, %si)
1920	SUWORD_NOERR(suword8_noerr, movb, %sil)
1921
1922#elif defined(__i386)
1923
1924#define	SUWORD_NOERR(NAME, INSTR, REG)		\
1925	ENTRY(NAME)				\
1926	movl	4(%esp), %eax;			\
1927	cmpl	kernelbase, %eax;		\
1928	jb	1f;				\
1929	movl	kernelbase, %eax;		\
19301:						\
1931	movl	8(%esp), %edx;			\
1932	INSTR	REG, (%eax);			\
1933	ret;					\
1934	SET_SIZE(NAME)
1935
1936	SUWORD_NOERR(suword32_noerr, movl, %edx)
1937	SUWORD_NOERR(suword16_noerr, movw, %dx)
1938	SUWORD_NOERR(suword8_noerr, movb, %dl)
1939
1940#endif	/* __i386 */
1941
1942#undef	SUWORD_NOERR
1943
1944#endif	/* lint */
1945
1946
1947#if defined(__lint)
1948
1949/*ARGSUSED*/
1950int
1951subyte(void *addr, uchar_t value)
1952{ return (0); }
1953
1954/*ARGSUSED*/
1955void
1956subyte_noerr(void *addr, uchar_t value)
1957{}
1958
1959/*ARGSUSED*/
1960int
1961fulword(const void *addr, ulong_t *valuep)
1962{ return (0); }
1963
1964/*ARGSUSED*/
1965void
1966fulword_noerr(const void *addr, ulong_t *valuep)
1967{}
1968
1969/*ARGSUSED*/
1970int
1971sulword(void *addr, ulong_t valuep)
1972{ return (0); }
1973
1974/*ARGSUSED*/
1975void
1976sulword_noerr(void *addr, ulong_t valuep)
1977{}
1978
1979#else
1980
1981	.weak	subyte
1982	subyte=suword8
1983	.weak	subyte_noerr
1984	subyte_noerr=suword8_noerr
1985
1986#if defined(__amd64)
1987
1988	.weak	fulword
1989	fulword=fuword64
1990	.weak	fulword_noerr
1991	fulword_noerr=fuword64_noerr
1992	.weak	sulword
1993	sulword=suword64
1994	.weak	sulword_noerr
1995	sulword_noerr=suword64_noerr
1996
1997#elif defined(__i386)
1998
1999	.weak	fulword
2000	fulword=fuword32
2001	.weak	fulword_noerr
2002	fulword_noerr=fuword32_noerr
2003	.weak	sulword
2004	sulword=suword32
2005	.weak	sulword_noerr
2006	sulword_noerr=suword32_noerr
2007
2008#endif /* __i386 */
2009
2010#endif /* __lint */
2011
2012#if defined(__lint)
2013
2014/*
2015 * Copy a block of storage - must not overlap (from + len <= to).
2016 * No fault handler installed (to be called under on_fault())
2017 */
2018
2019/* ARGSUSED */
2020void
2021copyout_noerr(const void *kfrom, void *uto, size_t count)
2022{}
2023
2024/* ARGSUSED */
2025void
2026copyin_noerr(const void *ufrom, void *kto, size_t count)
2027{}
2028
2029/*
2030 * Zero a block of storage in user space
2031 */
2032
2033/* ARGSUSED */
2034void
2035uzero(void *addr, size_t count)
2036{}
2037
2038/*
2039 * copy a block of storage in user space
2040 */
2041
2042/* ARGSUSED */
2043void
2044ucopy(const void *ufrom, void *uto, size_t ulength)
2045{}
2046
2047#else /* __lint */
2048
2049#if defined(__amd64)
2050
2051	ENTRY(copyin_noerr)
2052	movq	kernelbase(%rip), %rax
2053#ifdef DEBUG
2054	cmpq	%rax, %rsi		/* %rsi = kto */
2055	jae	1f
2056	leaq	.cpyin_ne_pmsg(%rip), %rdi
2057	jmp	call_panic		/* setup stack and call panic */
20581:
2059#endif
2060	cmpq	%rax, %rdi		/* ufrom < kernelbase */
2061	jb	do_copy
2062	movq	%rax, %rdi		/* force fault at kernelbase */
2063	jmp	do_copy
2064	SET_SIZE(copyin_noerr)
2065
2066	ENTRY(copyout_noerr)
2067	movq	kernelbase(%rip), %rax
2068#ifdef DEBUG
2069	cmpq	%rax, %rdi		/* %rdi = kfrom */
2070	jae	1f
2071	leaq	.cpyout_ne_pmsg(%rip), %rdi
2072	jmp	call_panic		/* setup stack and call panic */
20731:
2074#endif
2075	cmpq	%rax, %rsi		/* uto < kernelbase */
2076	jb	do_copy
2077	movq	%rax, %rsi		/* force fault at kernelbase */
2078	jmp	do_copy
2079	SET_SIZE(copyout_noerr)
2080
2081	ENTRY(uzero)
2082	movq	kernelbase(%rip), %rax
2083	cmpq	%rax, %rdi
2084	jb	do_zero
2085	movq	%rax, %rdi	/* force fault at kernelbase */
2086	jmp	do_zero
2087	SET_SIZE(uzero)
2088
2089	ENTRY(ucopy)
2090	movq	kernelbase(%rip), %rax
2091	cmpq	%rax, %rdi
2092	jb	1f
2093	movq	%rax, %rdi
20941:
2095	cmpq	%rax, %rsi
2096	jb	do_copy
2097	movq	%rax, %rsi
2098	jmp	do_copy
2099	SET_SIZE(ucopy)
2100
2101#elif defined(__i386)
2102
2103	ENTRY(copyin_noerr)
2104	movl	kernelbase, %eax
2105#ifdef DEBUG
2106	cmpl	%eax, 8(%esp)
2107	jae	1f
2108	pushl	$.cpyin_ne_pmsg
2109	call	panic
21101:
2111#endif
2112	cmpl	%eax, 4(%esp)
2113	jb	do_copy
2114	movl	%eax, 4(%esp)	/* force fault at kernelbase */
2115	jmp	do_copy
2116	SET_SIZE(copyin_noerr)
2117
2118	ENTRY(copyout_noerr)
2119	movl	kernelbase, %eax
2120#ifdef DEBUG
2121	cmpl	%eax, 4(%esp)
2122	jae	1f
2123	pushl	$.cpyout_ne_pmsg
2124	call	panic
21251:
2126#endif
2127	cmpl	%eax, 8(%esp)
2128	jb	do_copy
2129	movl	%eax, 8(%esp)	/* force fault at kernelbase */
2130	jmp	do_copy
2131	SET_SIZE(copyout_noerr)
2132
2133	ENTRY(uzero)
2134	movl	kernelbase, %eax
2135	cmpl	%eax, 4(%esp)
2136	jb	do_zero
2137	movl	%eax, 4(%esp)	/* force fault at kernelbase */
2138	jmp	do_zero
2139	SET_SIZE(uzero)
2140
2141	ENTRY(ucopy)
2142	movl	kernelbase, %eax
2143	cmpl	%eax, 4(%esp)
2144	jb	1f
2145	movl	%eax, 4(%esp)	/* force fault at kernelbase */
21461:
2147	cmpl	%eax, 8(%esp)
2148	jb	do_copy
2149	movl	%eax, 8(%esp)	/* force fault at kernelbase */
2150	jmp	do_copy
2151	SET_SIZE(ucopy)
2152
2153#endif	/* __i386 */
2154
2155#ifdef DEBUG
2156	.data
2157.kcopy_panic_msg:
2158	.string "kcopy: arguments below kernelbase"
2159.bcopy_panic_msg:
2160	.string "bcopy: arguments below kernelbase"
2161.kzero_panic_msg:
2162        .string "kzero: arguments below kernelbase"
2163.bzero_panic_msg:
2164	.string	"bzero: arguments below kernelbase"
2165.copyin_panic_msg:
2166	.string "copyin: kaddr argument below kernelbase"
2167.xcopyin_panic_msg:
2168	.string	"xcopyin: kaddr argument below kernelbase"
2169.copyout_panic_msg:
2170	.string "copyout: kaddr argument below kernelbase"
2171.xcopyout_panic_msg:
2172	.string	"xcopyout: kaddr argument below kernelbase"
2173.copystr_panic_msg:
2174	.string	"copystr: arguments in user space"
2175.copyinstr_panic_msg:
2176	.string	"copyinstr: kaddr argument not in kernel address space"
2177.copyoutstr_panic_msg:
2178	.string	"copyoutstr: kaddr argument not in kernel address space"
2179.cpyin_ne_pmsg:
2180	.string "copyin_noerr: argument not in kernel address space"
2181.cpyout_ne_pmsg:
2182	.string "copyout_noerr: argument not in kernel address space"
2183#endif
2184
2185#endif	/* __lint */
2186