xref: /titanic_44/usr/src/uts/intel/ia32/ml/copy.s (revision 9dd0f810214fdc8e1af881a9a5c4b6927629ff9e)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*       Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.	*/
27/*       Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T		*/
28/*         All Rights Reserved						*/
29
30/*       Copyright (c) 1987, 1988 Microsoft Corporation			*/
31/*         All Rights Reserved						*/
32
33#pragma ident	"%Z%%M%	%I%	%E% SMI"
34
35#include <sys/errno.h>
36#include <sys/asm_linkage.h>
37
38#if defined(__lint)
39#include <sys/types.h>
40#include <sys/systm.h>
41#else	/* __lint */
42#include "assym.h"
43#endif	/* __lint */
44
45#define	KCOPY_MIN_SIZE	128	/* Must be >= 16 bytes */
46#define	XCOPY_MIN_SIZE	128	/* Must be >= 16 bytes */
47/*
48 * Non-temopral access (NTA) alignment requirement
49 */
50#define	NTA_ALIGN_SIZE	4	/* Must be at least 4-byte aligned */
51#define	NTA_ALIGN_MASK	_CONST(NTA_ALIGN_SIZE-1)
52#define	COUNT_ALIGN_SIZE	16	/* Must be at least 16-byte aligned */
53#define	COUNT_ALIGN_MASK	_CONST(COUNT_ALIGN_SIZE-1)
54
55/*
56 * Copy a block of storage, returning an error code if `from' or
57 * `to' takes a kernel pagefault which cannot be resolved.
58 * Returns errno value on pagefault error, 0 if all ok
59 */
60
61#if defined(__lint)
62
63/* ARGSUSED */
64int
65kcopy(const void *from, void *to, size_t count)
66{ return (0); }
67
68#else	/* __lint */
69
70	.globl	kernelbase
71
72#if defined(__amd64)
73
74	ENTRY(kcopy)
75	pushq	%rbp
76	movq	%rsp, %rbp
77#ifdef DEBUG
78	movq	kernelbase(%rip), %rax
79	cmpq	%rax, %rdi 		/* %rdi = from */
80	jb	0f
81	cmpq	%rax, %rsi		/* %rsi = to */
82	jnb	1f
830:	leaq	.kcopy_panic_msg(%rip), %rdi
84	xorl	%eax, %eax
85	call	panic
861:
87#endif
88	/*
89	 * pass lofault value as 4th argument to do_copy_fault
90	 */
91	leaq	_kcopy_copyerr(%rip), %rcx
92	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
93
94do_copy_fault:
95	movq	T_LOFAULT(%r9), %r11	/* save the current lofault */
96	movq	%rcx, T_LOFAULT(%r9)	/* new lofault */
97
98	xchgq	%rdi, %rsi		/* %rsi = source, %rdi = destination */
99	movq	%rdx, %rcx		/* %rcx = count */
100	shrq	$3, %rcx		/* 8-byte word count */
101	rep
102	  smovq
103
104	movq	%rdx, %rcx
105	andq	$7, %rcx		/* bytes left over */
106	rep
107	  smovb
108	xorl	%eax, %eax		/* return 0 (success) */
109
110	/*
111	 * A fault during do_copy_fault is indicated through an errno value
112	 * in %rax and we iretq from the trap handler to here.
113	 */
114_kcopy_copyerr:
115	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
116	leave
117	ret
118	SET_SIZE(kcopy)
119
120#elif defined(__i386)
121
122#define	ARG_FROM	8
123#define	ARG_TO		12
124#define	ARG_COUNT	16
125
126	ENTRY(kcopy)
127#ifdef DEBUG
128	pushl	%ebp
129	movl	%esp, %ebp
130	movl	kernelbase, %eax
131	cmpl	%eax, ARG_FROM(%ebp)
132	jb	0f
133	cmpl	%eax, ARG_TO(%ebp)
134	jnb	1f
1350:	pushl	$.kcopy_panic_msg
136	call	panic
1371:	popl	%ebp
138#endif
139	lea	_kcopy_copyerr, %eax	/* lofault value */
140	movl	%gs:CPU_THREAD, %edx
141
142do_copy_fault:
143	pushl	%ebp
144	movl	%esp, %ebp		/* setup stack frame */
145	pushl	%esi
146	pushl	%edi			/* save registers */
147
148	movl	T_LOFAULT(%edx), %edi
149	pushl	%edi			/* save the current lofault */
150	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
151
152	movl	ARG_COUNT(%ebp), %ecx
153	movl	ARG_FROM(%ebp), %esi
154	movl	ARG_TO(%ebp), %edi
155	shrl	$2, %ecx		/* word count */
156	rep
157	  smovl
158	movl	ARG_COUNT(%ebp), %ecx
159	andl	$3, %ecx		/* bytes left over */
160	rep
161	  smovb
162	xorl	%eax, %eax
163
164	/*
165	 * A fault during do_copy_fault is indicated through an errno value
166	 * in %eax and we iret from the trap handler to here.
167	 */
168_kcopy_copyerr:
169	popl	%ecx
170	popl	%edi
171	movl	%ecx, T_LOFAULT(%edx)	/* restore the original lofault */
172	popl	%esi
173	popl	%ebp
174	ret
175	SET_SIZE(kcopy)
176
177#undef	ARG_FROM
178#undef	ARG_TO
179#undef	ARG_COUNT
180
181#endif	/* __i386 */
182#endif	/* __lint */
183
184#if defined(__lint)
185
186/*
187 * Copy a block of storage.  Similar to kcopy but uses non-temporal
188 * instructions.
189 */
190
191/* ARGSUSED */
192int
193kcopy_nta(const void *from, void *to, size_t count, int copy_cached)
194{ return (0); }
195
196#else	/* __lint */
197
198#if defined(__amd64)
199
200#define	COPY_LOOP_INIT(src, dst, cnt)	\
201	addq	cnt, src;			\
202	addq	cnt, dst;			\
203	shrq	$3, cnt;			\
204	neg	cnt
205
206	/* Copy 16 bytes per loop.  Uses %rax and %r8 */
207#define	COPY_LOOP_BODY(src, dst, cnt)	\
208	prefetchnta	0x100(src, cnt, 8);	\
209	movq	(src, cnt, 8), %rax;		\
210	movq	0x8(src, cnt, 8), %r8;		\
211	movnti	%rax, (dst, cnt, 8);		\
212	movnti	%r8, 0x8(dst, cnt, 8);		\
213	addq	$2, cnt
214
215	ENTRY(kcopy_nta)
216	pushq	%rbp
217	movq	%rsp, %rbp
218#ifdef DEBUG
219	movq	kernelbase(%rip), %rax
220	cmpq	%rax, %rdi 		/* %rdi = from */
221	jb	0f
222	cmpq	%rax, %rsi		/* %rsi = to */
223	jnb	1f
2240:	leaq	.kcopy_panic_msg(%rip), %rdi
225	xorl	%eax, %eax
226	call	panic
2271:
228#endif
229
230	movq	%gs:CPU_THREAD, %r9
231	cmpq	$0, %rcx		/* No non-temporal access? */
232	/*
233	 * pass lofault value as 4th argument to do_copy_fault
234	 */
235	leaq	_kcopy_nta_copyerr(%rip), %rcx	/* doesn't set rflags */
236	jnz	do_copy_fault		/* use regular access */
237	/*
238	 * Make sure cnt is >= KCOPY_MIN_SIZE
239	 */
240	cmpq	$KCOPY_MIN_SIZE, %rdx
241	jb	do_copy_fault
242
243	/*
244	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
245	 * count is COUNT_ALIGN_SIZE aligned.
246	 */
247	movq	%rdi, %r10
248	orq	%rsi, %r10
249	andq	$NTA_ALIGN_MASK, %r10
250	orq	%rdx, %r10
251	andq	$COUNT_ALIGN_MASK, %r10
252	jnz	do_copy_fault
253
254	ALTENTRY(do_copy_fault_nta)
255	movq    %gs:CPU_THREAD, %r9     /* %r9 = thread addr */
256	movq    T_LOFAULT(%r9), %r11    /* save the current lofault */
257	movq    %rcx, T_LOFAULT(%r9)    /* new lofault */
258
259	/*
260	 * COPY_LOOP_BODY uses %rax and %r8
261	 */
262	COPY_LOOP_INIT(%rdi, %rsi, %rdx)
2632:	COPY_LOOP_BODY(%rdi, %rsi, %rdx)
264	jnz	2b
265
266	mfence
267	xorl	%eax, %eax		/* return 0 (success) */
268
269_kcopy_nta_copyerr:
270	movq	%r11, T_LOFAULT(%r9)    /* restore original lofault */
271	leave
272	ret
273	SET_SIZE(do_copy_fault_nta)
274	SET_SIZE(kcopy_nta)
275
276#elif defined(__i386)
277
278#define	ARG_FROM	8
279#define	ARG_TO		12
280#define	ARG_COUNT	16
281
282#define	COPY_LOOP_INIT(src, dst, cnt)	\
283	addl	cnt, src;			\
284	addl	cnt, dst;			\
285	shrl	$3, cnt;			\
286	neg	cnt
287
288#define	COPY_LOOP_BODY(src, dst, cnt)	\
289	prefetchnta	0x100(src, cnt, 8);	\
290	movl	(src, cnt, 8), %esi;		\
291	movnti	%esi, (dst, cnt, 8);		\
292	movl	0x4(src, cnt, 8), %esi;		\
293	movnti	%esi, 0x4(dst, cnt, 8);		\
294	movl	0x8(src, cnt, 8), %esi;		\
295	movnti	%esi, 0x8(dst, cnt, 8);		\
296	movl	0xc(src, cnt, 8), %esi;		\
297	movnti	%esi, 0xc(dst, cnt, 8);		\
298	addl	$2, cnt
299
300	/*
301	 * kcopy_nta is not implemented for 32-bit as no performance
302	 * improvement was shown.  We simply jump directly to kcopy
303	 * and discard the 4 arguments.
304	 */
305	ENTRY(kcopy_nta)
306	jmp	kcopy
307
308	lea	_kcopy_nta_copyerr, %eax	/* lofault value */
309	ALTENTRY(do_copy_fault_nta)
310	pushl	%ebp
311	movl	%esp, %ebp		/* setup stack frame */
312	pushl	%esi
313	pushl	%edi
314
315	movl	%gs:CPU_THREAD, %edx
316	movl	T_LOFAULT(%edx), %edi
317	pushl	%edi			/* save the current lofault */
318	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
319
320	/* COPY_LOOP_BODY needs to use %esi */
321	movl	ARG_COUNT(%ebp), %ecx
322	movl	ARG_FROM(%ebp), %edi
323	movl	ARG_TO(%ebp), %eax
324	COPY_LOOP_INIT(%edi, %eax, %ecx)
3251:	COPY_LOOP_BODY(%edi, %eax, %ecx)
326	jnz	1b
327	mfence
328
329	xorl	%eax, %eax
330_kcopy_nta_copyerr:
331	popl	%ecx
332	popl	%edi
333	movl	%ecx, T_LOFAULT(%edx)	/* restore the original lofault */
334	popl	%esi
335	leave
336	ret
337	SET_SIZE(do_copy_fault_nta)
338	SET_SIZE(kcopy_nta)
339
340#undef	ARG_FROM
341#undef	ARG_TO
342#undef	ARG_COUNT
343
344#endif	/* __i386 */
345#endif	/* __lint */
346
347#if defined(__lint)
348
349/* ARGSUSED */
350void
351bcopy(const void *from, void *to, size_t count)
352{}
353
354#else	/* __lint */
355
356#if defined(__amd64)
357
358	ENTRY(bcopy)
359#ifdef DEBUG
360	orq	%rdx, %rdx		/* %rdx = count */
361	jz	1f
362	movq	kernelbase(%rip), %rax
363	cmpq	%rax, %rdi		/* %rdi = from */
364	jb	0f
365	cmpq	%rax, %rsi		/* %rsi = to */
366	jnb	1f
3670:	leaq	.bcopy_panic_msg(%rip), %rdi
368	jmp	call_panic		/* setup stack and call panic */
3691:
370#endif
371do_copy:
372	xchgq	%rdi, %rsi		/* %rsi = source, %rdi = destination */
373	movq	%rdx, %rcx		/* %rcx = count */
374	shrq	$3, %rcx		/* 8-byte word count */
375	rep
376	  smovq
377
378	movq	%rdx, %rcx
379	andq	$7, %rcx		/* bytes left over */
380	rep
381	  smovb
382	ret
383
384#ifdef DEBUG
385	/*
386	 * Setup frame on the run-time stack. The end of the input argument
387	 * area must be aligned on a 16 byte boundary. The stack pointer %rsp,
388	 * always points to the end of the latest allocated stack frame.
389	 * panic(const char *format, ...) is a varargs function. When a
390	 * function taking variable arguments is called, %rax must be set
391	 * to eight times the number of floating point parameters passed
392	 * to the function in SSE registers.
393	 */
394call_panic:
395	pushq	%rbp			/* align stack properly */
396	movq	%rsp, %rbp
397	xorl	%eax, %eax		/* no variable arguments */
398	call	panic			/* %rdi = format string */
399#endif
400	SET_SIZE(bcopy)
401
402#elif defined(__i386)
403
404#define	ARG_FROM	4
405#define	ARG_TO		8
406#define	ARG_COUNT	12
407
408	ENTRY(bcopy)
409#ifdef DEBUG
410	movl	ARG_COUNT(%esp), %eax
411	orl	%eax, %eax
412	jz	1f
413	movl	kernelbase, %eax
414	cmpl	%eax, ARG_FROM(%esp)
415	jb	0f
416	cmpl	%eax, ARG_TO(%esp)
417	jnb	1f
4180:	pushl	%ebp
419	movl	%esp, %ebp
420	pushl	$.bcopy_panic_msg
421	call	panic
4221:
423#endif
424do_copy:
425	movl	%esi, %eax		/* save registers */
426	movl	%edi, %edx
427	movl	ARG_COUNT(%esp), %ecx
428	movl	ARG_FROM(%esp), %esi
429	movl	ARG_TO(%esp), %edi
430
431	shrl	$2, %ecx		/* word count */
432	rep
433	  smovl
434	movl	ARG_COUNT(%esp), %ecx
435	andl	$3, %ecx		/* bytes left over */
436	rep
437	  smovb
438	movl	%eax, %esi		/* restore registers */
439	movl	%edx, %edi
440	ret
441	SET_SIZE(bcopy)
442
443#undef	ARG_COUNT
444#undef	ARG_FROM
445#undef	ARG_TO
446
447#endif	/* __i386 */
448#endif	/* __lint */
449
450
451/*
452 * Zero a block of storage, returning an error code if we
453 * take a kernel pagefault which cannot be resolved.
454 * Returns errno value on pagefault error, 0 if all ok
455 */
456
457#if defined(__lint)
458
459/* ARGSUSED */
460int
461kzero(void *addr, size_t count)
462{ return (0); }
463
464#else	/* __lint */
465
466#if defined(__amd64)
467
468	ENTRY(kzero)
469#ifdef DEBUG
470        cmpq	kernelbase(%rip), %rdi	/* %rdi = addr */
471        jnb	0f
472        leaq	.kzero_panic_msg(%rip), %rdi
473	jmp	call_panic		/* setup stack and call panic */
4740:
475#endif
476	/*
477	 * pass lofault value as 3rd argument to do_zero_fault
478	 */
479	leaq	_kzeroerr(%rip), %rdx
480
481do_zero_fault:
482	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
483	movq	T_LOFAULT(%r9), %r11	/* save the current lofault */
484	movq	%rdx, T_LOFAULT(%r9)	/* new lofault */
485
486	movq	%rsi, %rcx		/* get size in bytes */
487	shrq	$3, %rcx		/* count of 8-byte words to zero */
488	xorl	%eax, %eax		/* clear %rax; used in sstoq / sstob */
489	rep
490	  sstoq				/* %rcx = words to clear (%rax=0) */
491
492	movq	%rsi, %rcx
493	andq	$7, %rcx		/* bytes left over */
494	rep
495	  sstob				/* %rcx = residual bytes to clear */
496
497	/*
498	 * A fault during do_zero_fault is indicated through an errno value
499	 * in %rax when we iretq to here.
500	 */
501_kzeroerr:
502	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
503	ret
504	SET_SIZE(kzero)
505
506#elif defined(__i386)
507
508#define	ARG_ADDR	8
509#define	ARG_COUNT	12
510
511	ENTRY(kzero)
512#ifdef DEBUG
513	pushl	%ebp
514	movl	%esp, %ebp
515	movl	kernelbase, %eax
516        cmpl	%eax, ARG_ADDR(%ebp)
517        jnb	0f
518        pushl   $.kzero_panic_msg
519        call    panic
5200:	popl	%ebp
521#endif
522	lea	_kzeroerr, %eax		/* kzeroerr is lofault value */
523
524do_zero_fault:
525	pushl	%ebp			/* save stack base */
526	movl	%esp, %ebp		/* set new stack base */
527	pushl	%edi			/* save %edi */
528
529	mov	%gs:CPU_THREAD, %edx
530	movl	T_LOFAULT(%edx), %edi
531	pushl	%edi			/* save the current lofault */
532	movl	%eax, T_LOFAULT(%edx)	/* new lofault */
533
534	movl	ARG_COUNT(%ebp), %ecx	/* get size in bytes */
535	movl	ARG_ADDR(%ebp), %edi	/* %edi <- address of bytes to clear */
536	shrl	$2, %ecx		/* Count of double words to zero */
537	xorl	%eax, %eax		/* sstol val */
538	rep
539	  sstol			/* %ecx contains words to clear (%eax=0) */
540
541	movl	ARG_COUNT(%ebp), %ecx	/* get size in bytes */
542	andl	$3, %ecx		/* do mod 4 */
543	rep
544	  sstob			/* %ecx contains residual bytes to clear */
545
546	/*
547	 * A fault during do_zero_fault is indicated through an errno value
548	 * in %eax when we iret to here.
549	 */
550_kzeroerr:
551	popl	%edi
552	movl	%edi, T_LOFAULT(%edx)	/* restore the original lofault */
553	popl	%edi
554	popl	%ebp
555	ret
556	SET_SIZE(kzero)
557
558#undef	ARG_ADDR
559#undef	ARG_COUNT
560
561#endif	/* __i386 */
562#endif	/* __lint */
563
564/*
565 * Zero a block of storage.
566 */
567
568#if defined(__lint)
569
570/* ARGSUSED */
571void
572bzero(void *addr, size_t count)
573{}
574
575#else	/* __lint */
576
577#if defined(__amd64)
578
579	ENTRY(bzero)
580#ifdef DEBUG
581	cmpq	kernelbase(%rip), %rdi	/* %rdi = addr */
582	jnb	0f
583	leaq	.bzero_panic_msg(%rip), %rdi
584	jmp	call_panic		/* setup stack and call panic */
5850:
586#endif
587do_zero:
588	movq	%rsi, %rcx		/* get size in bytes */
589	shrq	$3, %rcx		/* count of 8-byte words to zero */
590	xorl	%eax, %eax		/* clear %rax; used in sstoq / sstob */
591	rep
592	  sstoq				/* %rcx = words to clear (%rax=0) */
593
594	movq	%rsi, %rcx
595	andq	$7, %rcx		/* bytes left over */
596	rep
597	  sstob				/* %rcx = residual bytes to clear */
598	ret
599	SET_SIZE(bzero)
600
601#elif defined(__i386)
602
603#define	ARG_ADDR	4
604#define	ARG_COUNT	8
605
606	ENTRY(bzero)
607#ifdef DEBUG
608	movl	kernelbase, %eax
609	cmpl	%eax, ARG_ADDR(%esp)
610	jnb	0f
611	pushl	%ebp
612	movl	%esp, %ebp
613	pushl	$.bzero_panic_msg
614	call	panic
6150:
616#endif
617do_zero:
618	movl	%edi, %edx
619	movl	ARG_COUNT(%esp), %ecx
620	movl	ARG_ADDR(%esp), %edi
621	shrl	$2, %ecx
622	xorl	%eax, %eax
623	rep
624	  sstol
625	movl	ARG_COUNT(%esp), %ecx
626	andl	$3, %ecx
627	rep
628	  sstob
629	movl	%edx, %edi
630	ret
631	SET_SIZE(bzero)
632
633#undef	ARG_ADDR
634#undef	ARG_COUNT
635
636#endif	/* __i386 */
637#endif	/* __lint */
638
639/*
640 * Transfer data to and from user space -
641 * Note that these routines can cause faults
642 * It is assumed that the kernel has nothing at
643 * less than KERNELBASE in the virtual address space.
644 *
645 * Note that copyin(9F) and copyout(9F) are part of the
646 * DDI/DKI which specifies that they return '-1' on "errors."
647 *
648 * Sigh.
649 *
650 * So there's two extremely similar routines - xcopyin_nta() and
651 * xcopyout_nta() which return the errno that we've faithfully computed.
652 * This allows other callers (e.g. uiomove(9F)) to work correctly.
653 * Given that these are used pretty heavily, we expand the calling
654 * sequences inline for all flavours (rather than making wrappers).
655 */
656
657/*
658 * Copy user data to kernel space.
659 */
660
661#if defined(__lint)
662
663/* ARGSUSED */
664int
665copyin(const void *uaddr, void *kaddr, size_t count)
666{ return (0); }
667
668#else	/* lint */
669
670#if defined(__amd64)
671
672	ENTRY(copyin)
673	pushq	%rbp
674	movq	%rsp, %rbp
675	subq	$32, %rsp
676
677	/*
678	 * save args in case we trap and need to rerun as a copyop
679	 */
680	movq	%rdi, (%rsp)
681	movq	%rsi, 0x8(%rsp)
682	movq	%rdx, 0x10(%rsp)
683
684	movq	kernelbase(%rip), %rax
685#ifdef DEBUG
686	cmpq	%rax, %rsi		/* %rsi = kaddr */
687	jnb	1f
688	leaq	.copyin_panic_msg(%rip), %rdi
689	xorl	%eax, %eax
690	call	panic
6911:
692#endif
693	/*
694	 * pass lofault value as 4th argument to do_copy_fault
695	 */
696	leaq	_copyin_err(%rip), %rcx
697
698	movq	%gs:CPU_THREAD, %r9
699	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
700	jb	do_copy_fault
701	jmp	3f
702
703_copyin_err:
704	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
7053:
706	movq	T_COPYOPS(%r9), %rax
707	cmpq	$0, %rax
708	jz	2f
709	/*
710	 * reload args for the copyop
711	 */
712	movq	(%rsp), %rdi
713	movq	0x8(%rsp), %rsi
714	movq	0x10(%rsp), %rdx
715	leave
716	jmp	*CP_COPYIN(%rax)
717
7182:	movl	$-1, %eax
719	leave
720	ret
721	SET_SIZE(copyin)
722
723#elif defined(__i386)
724
725#define	ARG_UADDR	4
726#define	ARG_KADDR	8
727
728	ENTRY(copyin)
729	movl	kernelbase, %ecx
730#ifdef DEBUG
731	cmpl	%ecx, ARG_KADDR(%esp)
732	jnb	1f
733	pushl	%ebp
734	movl	%esp, %ebp
735	pushl	$.copyin_panic_msg
736	call	panic
7371:
738#endif
739	lea	_copyin_err, %eax
740
741	movl	%gs:CPU_THREAD, %edx
742	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
743	jb	do_copy_fault
744	jmp	3f
745
746_copyin_err:
747	popl	%ecx
748	popl	%edi
749	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
750	popl	%esi
751	popl	%ebp
7523:
753	movl	T_COPYOPS(%edx), %eax
754	cmpl	$0, %eax
755	jz	2f
756	jmp	*CP_COPYIN(%eax)
757
7582:	movl	$-1, %eax
759	ret
760	SET_SIZE(copyin)
761
762#undef	ARG_UADDR
763#undef	ARG_KADDR
764
765#endif	/* __i386 */
766#endif	/* __lint */
767
768#if defined(__lint)
769
770/* ARGSUSED */
771int
772xcopyin_nta(const void *uaddr, void *kaddr, size_t count, int copy_cached)
773{ return (0); }
774
775#else	/* __lint */
776
777#if defined(__amd64)
778
779	ENTRY(xcopyin_nta)
780	pushq	%rbp
781	movq	%rsp, %rbp
782	subq	$32, %rsp
783
784	/*
785	 * save args in case we trap and need to rerun as a copyop
786	 * %rcx is consumed in this routine so we don't need to save
787	 * it.
788	 */
789	movq	%rdi, (%rsp)
790	movq	%rsi, 0x8(%rsp)
791	movq	%rdx, 0x10(%rsp)
792
793	movq	kernelbase(%rip), %rax
794#ifdef DEBUG
795	cmpq	%rax, %rsi		/* %rsi = kaddr */
796	jnb	1f
797	leaq	.xcopyin_panic_msg(%rip), %rdi
798	xorl	%eax, %eax
799	call	panic
8001:
801#endif
802	movq	%gs:CPU_THREAD, %r9
803	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
804	jae	4f
805	cmpq	$0, %rcx		/* No non-temporal access? */
806	/*
807	 * pass lofault value as 4th argument to do_copy_fault
808	 */
809	leaq	_xcopyin_err(%rip), %rcx	/* doesn't set rflags */
810	jnz	do_copy_fault		/* use regular access */
811	/*
812	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
813	 */
814	cmpq	$XCOPY_MIN_SIZE, %rdx
815	jb	do_copy_fault
816
817	/*
818	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
819	 * count is COUNT_ALIGN_SIZE aligned.
820	 */
821	movq	%rdi, %r10
822	orq	%rsi, %r10
823	andq	$NTA_ALIGN_MASK, %r10
824	orq	%rdx, %r10
825	andq	$COUNT_ALIGN_MASK, %r10
826	jnz	do_copy_fault
827	jmp	do_copy_fault_nta	/* use non-temporal access */
828
8294:
830	movl	$EFAULT, %eax
831	jmp	3f
832
833	/*
834	 * A fault during do_copy_fault or do_copy_fault_nta is
835	 * indicated through an errno value in %rax and we iret from the
836	 * trap handler to here.
837	 */
838_xcopyin_err:
839	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
8403:
841	movq	T_COPYOPS(%r9), %r8
842	cmpq	$0, %r8
843	jz	2f
844
845	/*
846	 * reload args for the copyop
847	 */
848	movq	(%rsp), %rdi
849	movq	0x8(%rsp), %rsi
850	movq	0x10(%rsp), %rdx
851	leave
852	jmp	*CP_XCOPYIN(%r8)
853
8542:	leave
855	ret
856	SET_SIZE(xcopyin_nta)
857
858#elif defined(__i386)
859
860#define	ARG_UADDR	4
861#define	ARG_KADDR	8
862#define	ARG_COUNT	12
863#define	ARG_CACHED	16
864
865	.globl	use_sse_copy
866
867	ENTRY(xcopyin_nta)
868	movl	kernelbase, %ecx
869	lea	_xcopyin_err, %eax
870	movl	%gs:CPU_THREAD, %edx
871	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
872	jae	4f
873
874	cmpl	$0, use_sse_copy	/* no sse support */
875	jz	do_copy_fault
876
877	cmpl	$0, ARG_CACHED(%esp)	/* copy_cached hint set? */
878	jnz	do_copy_fault
879
880	/*
881	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
882	 */
883	cmpl	$XCOPY_MIN_SIZE, ARG_COUNT(%esp)
884	jb	do_copy_fault
885
886	/*
887	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
888	 * count is COUNT_ALIGN_SIZE aligned.
889	 */
890	movl	ARG_UADDR(%esp), %ecx
891	orl	ARG_KADDR(%esp), %ecx
892	andl	$NTA_ALIGN_MASK, %ecx
893	orl	ARG_COUNT(%esp), %ecx
894	andl	$COUNT_ALIGN_MASK, %ecx
895	jnz	do_copy_fault
896
897	jmp	do_copy_fault_nta	/* use regular access */
898
8994:
900	movl	$EFAULT, %eax
901	jmp	3f
902
903	/*
904	 * A fault during do_copy_fault or do_copy_fault_nta is
905	 * indicated through an errno value in %eax and we iret from the
906	 * trap handler to here.
907	 */
908_xcopyin_err:
909	popl	%ecx
910	popl	%edi
911	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
912	popl	%esi
913	popl	%ebp
9143:
915	cmpl	$0, T_COPYOPS(%edx)
916	jz	2f
917	movl	T_COPYOPS(%edx), %eax
918	jmp	*CP_XCOPYIN(%eax)
919
9202:	rep; 	ret	/* use 2 byte return instruction when branch target */
921			/* AMD Software Optimization Guide - Section 6.2 */
922	SET_SIZE(xcopyin_nta)
923
924#undef	ARG_UADDR
925#undef	ARG_KADDR
926#undef	ARG_COUNT
927#undef	ARG_CACHED
928
929#endif	/* __i386 */
930#endif	/* __lint */
931
932/*
933 * Copy kernel data to user space.
934 */
935
936#if defined(__lint)
937
938/* ARGSUSED */
939int
940copyout(const void *kaddr, void *uaddr, size_t count)
941{ return (0); }
942
943#else	/* __lint */
944
945#if defined(__amd64)
946
947	ENTRY(copyout)
948	pushq	%rbp
949	movq	%rsp, %rbp
950	subq	$32, %rsp
951
952	/*
953	 * save args in case we trap and need to rerun as a copyop
954	 */
955	movq	%rdi, (%rsp)
956	movq	%rsi, 0x8(%rsp)
957	movq	%rdx, 0x10(%rsp)
958
959	movq	kernelbase(%rip), %rax
960#ifdef DEBUG
961	cmpq	%rax, %rdi		/* %rdi = kaddr */
962	jnb	1f
963	leaq	.copyout_panic_msg(%rip), %rdi
964	xorl	%eax, %eax
965	call	panic
9661:
967#endif
968	/*
969	 * pass lofault value as 4th argument to do_copy_fault
970	 */
971	leaq	_copyout_err(%rip), %rcx
972
973	movq	%gs:CPU_THREAD, %r9
974	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
975	jb	do_copy_fault
976	jmp	3f
977
978_copyout_err:
979	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
9803:
981	movq	T_COPYOPS(%r9), %rax
982	cmpq	$0, %rax
983	jz	2f
984
985	/*
986	 * reload args for the copyop
987	 */
988	movq	(%rsp), %rdi
989	movq	0x8(%rsp), %rsi
990	movq	0x10(%rsp), %rdx
991	leave
992	jmp	*CP_COPYOUT(%rax)
993
9942:	movl	$-1, %eax
995	leave
996	ret
997	SET_SIZE(copyout)
998
999#elif defined(__i386)
1000
1001#define	ARG_KADDR	4
1002#define	ARG_UADDR	8
1003
1004	ENTRY(copyout)
1005	movl	kernelbase, %ecx
1006#ifdef DEBUG
1007	cmpl	%ecx, ARG_KADDR(%esp)
1008	jnb	1f
1009	pushl	%ebp
1010	movl	%esp, %ebp
1011	pushl	$.copyout_panic_msg
1012	call	panic
10131:
1014#endif
1015	lea	_copyout_err, %eax
1016	movl	%gs:CPU_THREAD, %edx
1017	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1018	jb	do_copy_fault
1019	jmp	3f
1020
1021_copyout_err:
1022	popl	%ecx
1023	popl	%edi
1024	movl	%ecx, T_LOFAULT(%edx)	/* restore original lofault */
1025	popl	%esi
1026	popl	%ebp
10273:
1028	movl	T_COPYOPS(%edx), %eax
1029	cmpl	$0, %eax
1030	jz	2f
1031	jmp	*CP_COPYOUT(%eax)
1032
10332:	movl	$-1, %eax
1034	ret
1035	SET_SIZE(copyout)
1036
1037#undef	ARG_UADDR
1038#undef	ARG_KADDR
1039
1040#endif	/* __i386 */
1041#endif	/* __lint */
1042
1043#if defined(__lint)
1044
1045/* ARGSUSED */
1046int
1047xcopyout_nta(const void *kaddr, void *uaddr, size_t count, int copy_cached)
1048{ return (0); }
1049
1050#else	/* __lint */
1051
1052#if defined(__amd64)
1053
1054	ENTRY(xcopyout_nta)
1055	pushq	%rbp
1056	movq	%rsp, %rbp
1057	subq	$32, %rsp
1058
1059	/*
1060	 * save args in case we trap and need to rerun as a copyop
1061	 */
1062	movq	%rdi, (%rsp)
1063	movq	%rsi, 0x8(%rsp)
1064	movq	%rdx, 0x10(%rsp)
1065
1066	movq	kernelbase(%rip), %rax
1067#ifdef DEBUG
1068	cmpq	%rax, %rdi		/* %rdi = kaddr */
1069	jnb	1f
1070	leaq	.xcopyout_panic_msg(%rip), %rdi
1071	xorl	%eax, %eax
1072	call	panic
10731:
1074#endif
1075	movq	%gs:CPU_THREAD, %r9
1076	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
1077	jae	4f
1078
1079	cmpq	$0, %rcx		/* No non-temporal access? */
1080	/*
1081	 * pass lofault value as 4th argument to do_copy_fault
1082	 */
1083	leaq	_xcopyout_err(%rip), %rcx
1084	jnz	do_copy_fault
1085	/*
1086	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
1087	 */
1088	cmpq	$XCOPY_MIN_SIZE, %rdx
1089	jb	do_copy_fault
1090
1091	/*
1092	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
1093	 * count is COUNT_ALIGN_SIZE aligned.
1094	 */
1095	movq	%rdi, %r10
1096	orq	%rsi, %r10
1097	andq	$NTA_ALIGN_MASK, %r10
1098	orq	%rdx, %r10
1099	andq	$COUNT_ALIGN_MASK, %r10
1100	jnz	do_copy_fault
1101	jmp	do_copy_fault_nta
1102
11034:
1104	movl	$EFAULT, %eax
1105	jmp	3f
1106
1107	/*
1108	 * A fault during do_copy_fault or do_copy_fault_nta is
1109	 * indicated through an errno value in %rax and we iret from the
1110	 * trap handler to here.
1111	 */
1112_xcopyout_err:
1113	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
11143:
1115	movq	T_COPYOPS(%r9), %r8
1116	cmpq	$0, %r8
1117	jz	2f
1118
1119	/*
1120	 * reload args for the copyop
1121	 */
1122	movq	(%rsp), %rdi
1123	movq	0x8(%rsp), %rsi
1124	movq	0x10(%rsp), %rdx
1125	leave
1126	jmp	*CP_XCOPYOUT(%r8)
1127
11282:	leave
1129	ret
1130	SET_SIZE(xcopyout_nta)
1131
1132#elif defined(__i386)
1133
1134#define	ARG_KADDR	4
1135#define	ARG_UADDR	8
1136#define	ARG_COUNT	12
1137#define	ARG_CACHED	16
1138
1139	ENTRY(xcopyout_nta)
1140	movl	kernelbase, %ecx
1141	lea	_xcopyout_err, %eax
1142	movl	%gs:CPU_THREAD, %edx
1143	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1144	jae	4f
1145
1146	cmpl	$0, use_sse_copy	/* no sse support */
1147	jz	do_copy_fault
1148
1149	cmpl	$0, ARG_CACHED(%esp)	/* copy_cached hint set? */
1150	jnz	do_copy_fault
1151
1152	/*
1153	 * Make sure cnt is >= XCOPY_MIN_SIZE bytes
1154	 */
1155	cmpl	$XCOPY_MIN_SIZE, %edx
1156	jb	do_copy_fault
1157
1158	/*
1159	 * Make sure src and dst are NTA_ALIGN_SIZE aligned,
1160	 * count is COUNT_ALIGN_SIZE aligned.
1161	 */
1162	movl	ARG_UADDR(%esp), %ecx
1163	orl	ARG_KADDR(%esp), %ecx
1164	andl	$NTA_ALIGN_MASK, %ecx
1165	orl	ARG_COUNT(%esp), %ecx
1166	andl	$COUNT_ALIGN_MASK, %ecx
1167	jnz	do_copy_fault
1168	jmp	do_copy_fault_nta
1169
11704:
1171	movl	$EFAULT, %eax
1172	jmp	3f
1173
1174	/*
1175	 * A fault during do_copy_fault or do_copy_fault_nta is
1176	 * indicated through an errno value in %eax and we iret from the
1177	 * trap handler to here.
1178	 */
1179_xcopyout_err:
1180	/ restore the original lofault
1181	popl	%ecx
1182	popl	%edi
1183	movl	%ecx, T_LOFAULT(%edx)	/ original lofault
1184	popl	%esi
1185	popl	%ebp
11863:
1187	cmpl	$0, T_COPYOPS(%edx)
1188	jz	2f
1189	movl	T_COPYOPS(%edx), %eax
1190	jmp	*CP_XCOPYOUT(%eax)
1191
11922:	rep;	ret	/* use 2 byte return instruction when branch target */
1193			/* AMD Software Optimization Guide - Section 6.2 */
1194	SET_SIZE(xcopyout_nta)
1195
1196#undef	ARG_UADDR
1197#undef	ARG_KADDR
1198#undef	ARG_COUNT
1199#undef	ARG_CACHED
1200
1201#endif	/* __i386 */
1202#endif	/* __lint */
1203
1204/*
1205 * Copy a null terminated string from one point to another in
1206 * the kernel address space.
1207 */
1208
1209#if defined(__lint)
1210
1211/* ARGSUSED */
1212int
1213copystr(const char *from, char *to, size_t maxlength, size_t *lencopied)
1214{ return (0); }
1215
1216#else	/* __lint */
1217
1218#if defined(__amd64)
1219
1220	ENTRY(copystr)
1221	pushq	%rbp
1222	movq	%rsp, %rbp
1223#ifdef DEBUG
1224	movq	kernelbase(%rip), %rax
1225	cmpq	%rax, %rdi		/* %rdi = from */
1226	jb	0f
1227	cmpq	%rax, %rsi		/* %rsi = to */
1228	jnb	1f
12290:	leaq	.copystr_panic_msg(%rip), %rdi
1230	xorl	%eax, %eax
1231	call	panic
12321:
1233#endif
1234	movq	%gs:CPU_THREAD, %r9
1235	movq	T_LOFAULT(%r9), %r8	/* pass current lofault value as */
1236					/* 5th argument to do_copystr */
1237do_copystr:
1238	movq	%gs:CPU_THREAD, %r9	/* %r9 = thread addr */
1239	movq    T_LOFAULT(%r9), %r11	/* save the current lofault */
1240	movq	%r8, T_LOFAULT(%r9)	/* new lofault */
1241
1242	movq	%rdx, %r8		/* save maxlength */
1243
1244	cmpq	$0, %rdx		/* %rdx = maxlength */
1245	je	copystr_enametoolong	/* maxlength == 0 */
1246
1247copystr_loop:
1248	decq	%r8
1249	movb	(%rdi), %al
1250	incq	%rdi
1251	movb	%al, (%rsi)
1252	incq	%rsi
1253	cmpb	$0, %al
1254	je	copystr_null		/* null char */
1255	cmpq	$0, %r8
1256	jne	copystr_loop
1257
1258copystr_enametoolong:
1259	movl	$ENAMETOOLONG, %eax
1260	jmp	copystr_out
1261
1262copystr_null:
1263	xorl	%eax, %eax		/* no error */
1264
1265copystr_out:
1266	cmpq	$0, %rcx		/* want length? */
1267	je	copystr_done		/* no */
1268	subq	%r8, %rdx		/* compute length and store it */
1269	movq	%rdx, (%rcx)
1270
1271copystr_done:
1272	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
1273	leave
1274	ret
1275	SET_SIZE(copystr)
1276
1277#elif defined(__i386)
1278
1279#define	ARG_FROM	8
1280#define	ARG_TO		12
1281#define	ARG_MAXLEN	16
1282#define	ARG_LENCOPIED	20
1283
1284	ENTRY(copystr)
1285#ifdef DEBUG
1286	pushl	%ebp
1287	movl	%esp, %ebp
1288	movl	kernelbase, %eax
1289	cmpl	%eax, ARG_FROM(%esp)
1290	jb	0f
1291	cmpl	%eax, ARG_TO(%esp)
1292	jnb	1f
12930:	pushl	$.copystr_panic_msg
1294	call	panic
12951:	popl	%ebp
1296#endif
1297	/* get the current lofault address */
1298	movl	%gs:CPU_THREAD, %eax
1299	movl	T_LOFAULT(%eax), %eax
1300do_copystr:
1301	pushl	%ebp			/* setup stack frame */
1302	movl	%esp, %ebp
1303	pushl	%ebx			/* save registers */
1304	pushl	%edi
1305
1306	movl	%gs:CPU_THREAD, %ebx
1307	movl	T_LOFAULT(%ebx), %edi
1308	pushl	%edi			/* save the current lofault */
1309	movl	%eax, T_LOFAULT(%ebx)	/* new lofault */
1310
1311	movl	ARG_MAXLEN(%ebp), %ecx
1312	cmpl	$0, %ecx
1313	je	copystr_enametoolong	/* maxlength == 0 */
1314
1315	movl	ARG_FROM(%ebp), %ebx	/* source address */
1316	movl	ARG_TO(%ebp), %edx	/* destination address */
1317
1318copystr_loop:
1319	decl	%ecx
1320	movb	(%ebx), %al
1321	incl	%ebx
1322	movb	%al, (%edx)
1323	incl	%edx
1324	cmpb	$0, %al
1325	je	copystr_null		/* null char */
1326	cmpl	$0, %ecx
1327	jne	copystr_loop
1328
1329copystr_enametoolong:
1330	movl	$ENAMETOOLONG, %eax
1331	jmp	copystr_out
1332
1333copystr_null:
1334	xorl	%eax, %eax		/* no error */
1335
1336copystr_out:
1337	cmpl	$0, ARG_LENCOPIED(%ebp)	/* want length? */
1338	je	copystr_done		/* no */
1339	movl	ARG_MAXLEN(%ebp), %edx
1340	subl	%ecx, %edx		/* compute length and store it */
1341	movl	ARG_LENCOPIED(%ebp), %ecx
1342	movl	%edx, (%ecx)
1343
1344copystr_done:
1345	popl	%edi
1346	movl	%gs:CPU_THREAD, %ebx
1347	movl	%edi, T_LOFAULT(%ebx)	/* restore the original lofault */
1348
1349	popl	%edi
1350	popl	%ebx
1351	popl	%ebp
1352	ret
1353	SET_SIZE(copystr)
1354
1355#undef	ARG_FROM
1356#undef	ARG_TO
1357#undef	ARG_MAXLEN
1358#undef	ARG_LENCOPIED
1359
1360#endif	/* __i386 */
1361#endif	/* __lint */
1362
1363/*
1364 * Copy a null terminated string from the user address space into
1365 * the kernel address space.
1366 */
1367
1368#if defined(__lint)
1369
1370/* ARGSUSED */
1371int
1372copyinstr(const char *uaddr, char *kaddr, size_t maxlength,
1373    size_t *lencopied)
1374{ return (0); }
1375
1376#else	/* __lint */
1377
1378#if defined(__amd64)
1379
1380	ENTRY(copyinstr)
1381	pushq	%rbp
1382	movq	%rsp, %rbp
1383	subq	$32, %rsp
1384
1385	/*
1386	 * save args in case we trap and need to rerun as a copyop
1387	 */
1388	movq	%rdi, (%rsp)
1389	movq	%rsi, 0x8(%rsp)
1390	movq	%rdx, 0x10(%rsp)
1391	movq	%rcx, 0x18(%rsp)
1392
1393	movq	kernelbase(%rip), %rax
1394#ifdef DEBUG
1395	cmpq	%rax, %rsi		/* %rsi = kaddr */
1396	jnb	1f
1397	leaq	.copyinstr_panic_msg(%rip), %rdi
1398	xorl	%eax, %eax
1399	call	panic
14001:
1401#endif
1402	/*
1403	 * pass lofault value as 5th argument to do_copystr
1404	 */
1405	leaq	_copyinstr_error(%rip), %r8
1406
1407	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
1408	jb	do_copystr
1409	movq	%gs:CPU_THREAD, %r9
1410	jmp	3f
1411
1412_copyinstr_error:
1413	movq	%r11, T_LOFAULT(%r9)	/* restore original lofault */
14143:
1415	movq	T_COPYOPS(%r9), %rax
1416	cmpq	$0, %rax
1417	jz	2f
1418
1419	/*
1420	 * reload args for the copyop
1421	 */
1422	movq	(%rsp), %rdi
1423	movq	0x8(%rsp), %rsi
1424	movq	0x10(%rsp), %rdx
1425	movq	0x18(%rsp), %rcx
1426	leave
1427	jmp	*CP_COPYINSTR(%rax)
1428
14292:	movl	$EFAULT, %eax		/* return EFAULT */
1430	leave
1431	ret
1432	SET_SIZE(copyinstr)
1433
1434#elif defined(__i386)
1435
1436#define	ARG_UADDR	4
1437#define	ARG_KADDR	8
1438
1439	ENTRY(copyinstr)
1440	movl	kernelbase, %ecx
1441#ifdef DEBUG
1442	cmpl	%ecx, ARG_KADDR(%esp)
1443	jnb	1f
1444	pushl	%ebp
1445	movl	%esp, %ebp
1446	pushl	$.copyinstr_panic_msg
1447	call	panic
14481:
1449#endif
1450	lea	_copyinstr_error, %eax
1451	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1452	jb	do_copystr
1453	movl	%gs:CPU_THREAD, %edx
1454	jmp	3f
1455
1456_copyinstr_error:
1457	popl	%edi
1458	movl	%gs:CPU_THREAD, %edx
1459	movl	%edi, T_LOFAULT(%edx)	/* original lofault */
1460
1461	popl	%edi
1462	popl	%ebx
1463	popl	%ebp
14643:
1465	movl	T_COPYOPS(%edx), %eax
1466	cmpl	$0, %eax
1467	jz	2f
1468	jmp	*CP_COPYINSTR(%eax)
1469
14702:	movl	$EFAULT, %eax		/* return EFAULT */
1471	ret
1472	SET_SIZE(copyinstr)
1473
1474#undef	ARG_UADDR
1475#undef	ARG_KADDR
1476
1477#endif	/* __i386 */
1478#endif	/* __lint */
1479
1480/*
1481 * Copy a null terminated string from the kernel
1482 * address space to the user address space.
1483 */
1484
1485#if defined(__lint)
1486
1487/* ARGSUSED */
1488int
1489copyoutstr(const char *kaddr, char *uaddr, size_t maxlength,
1490    size_t *lencopied)
1491{ return (0); }
1492
1493#else	/* __lint */
1494
1495#if defined(__amd64)
1496
1497	ENTRY(copyoutstr)
1498	pushq	%rbp
1499	movq	%rsp, %rbp
1500	subq	$32, %rsp
1501
1502	/*
1503	 * save args in case we trap and need to rerun as a copyop
1504	 */
1505	movq	%rdi, (%rsp)
1506	movq	%rsi, 0x8(%rsp)
1507	movq	%rdx, 0x10(%rsp)
1508	movq	%rcx, 0x18(%rsp)
1509
1510	movq	kernelbase(%rip), %rax
1511#ifdef DEBUG
1512	cmpq	%rax, %rdi		/* %rdi = kaddr */
1513	jnb	1f
1514	leaq	.copyoutstr_panic_msg(%rip), %rdi
1515	jmp	call_panic		/* setup stack and call panic */
15161:
1517#endif
1518	/*
1519	 * pass lofault value as 5th argument to do_copystr
1520	 */
1521	leaq	_copyoutstr_error(%rip), %r8
1522
1523	cmpq	%rax, %rsi		/* test uaddr < kernelbase */
1524	jb	do_copystr
1525	movq	%gs:CPU_THREAD, %r9
1526	jmp	3f
1527
1528_copyoutstr_error:
1529	movq	%r11, T_LOFAULT(%r9)	/* restore the original lofault */
15303:
1531	movq	T_COPYOPS(%r9), %rax
1532	cmpq	$0, %rax
1533	jz	2f
1534
1535	/*
1536	 * reload args for the copyop
1537	 */
1538	movq	(%rsp), %rdi
1539	movq	0x8(%rsp), %rsi
1540	movq	0x10(%rsp), %rdx
1541	movq	0x18(%rsp), %rcx
1542	leave
1543	jmp	*CP_COPYOUTSTR(%rax)
1544
15452:	movl	$EFAULT, %eax		/* return EFAULT */
1546	leave
1547	ret
1548	SET_SIZE(copyoutstr)
1549
1550#elif defined(__i386)
1551
1552#define	ARG_KADDR	4
1553#define	ARG_UADDR	8
1554
1555	ENTRY(copyoutstr)
1556	movl	kernelbase, %ecx
1557#ifdef DEBUG
1558	cmpl	%ecx, ARG_KADDR(%esp)
1559	jnb	1f
1560	pushl	%ebp
1561	movl	%esp, %ebp
1562	pushl	$.copyoutstr_panic_msg
1563	call	panic
15641:
1565#endif
1566	lea	_copyoutstr_error, %eax
1567	cmpl	%ecx, ARG_UADDR(%esp)	/* test uaddr < kernelbase */
1568	jb	do_copystr
1569	movl	%gs:CPU_THREAD, %edx
1570	jmp	3f
1571
1572_copyoutstr_error:
1573	popl	%edi
1574	movl	%gs:CPU_THREAD, %edx
1575	movl	%edi, T_LOFAULT(%edx)	/* restore the original lofault */
1576
1577	popl	%edi
1578	popl	%ebx
1579	popl	%ebp
15803:
1581	movl	T_COPYOPS(%edx), %eax
1582	cmpl	$0, %eax
1583	jz	2f
1584	jmp	*CP_COPYOUTSTR(%eax)
1585
15862:	movl	$EFAULT, %eax		/* return EFAULT */
1587	ret
1588	SET_SIZE(copyoutstr)
1589
1590#undef	ARG_KADDR
1591#undef	ARG_UADDR
1592
1593#endif	/* __i386 */
1594#endif	/* __lint */
1595
1596/*
1597 * Since all of the fuword() variants are so similar, we have a macro to spit
1598 * them out.  This allows us to create DTrace-unobservable functions easily.
1599 */
1600
1601#if defined(__lint)
1602
1603#if defined(__amd64)
1604
1605/* ARGSUSED */
1606int
1607fuword64(const void *addr, uint64_t *dst)
1608{ return (0); }
1609
1610#endif
1611
1612/* ARGSUSED */
1613int
1614fuword32(const void *addr, uint32_t *dst)
1615{ return (0); }
1616
1617/* ARGSUSED */
1618int
1619fuword16(const void *addr, uint16_t *dst)
1620{ return (0); }
1621
1622/* ARGSUSED */
1623int
1624fuword8(const void *addr, uint8_t *dst)
1625{ return (0); }
1626
1627#else	/* __lint */
1628
1629#if defined(__amd64)
1630
1631/*
1632 * (Note that we don't save and reload the arguments here
1633 * because their values are not altered in the copy path)
1634 */
1635
1636#define	FUWORD(NAME, INSTR, REG, COPYOP)	\
1637	ENTRY(NAME)				\
1638	movq	%gs:CPU_THREAD, %r9;		\
1639	cmpq	kernelbase(%rip), %rdi;		\
1640	jae	1f;				\
1641	leaq	_flt_/**/NAME, %rdx;		\
1642	movq	%rdx, T_LOFAULT(%r9);		\
1643	INSTR	(%rdi), REG;			\
1644	movq	$0, T_LOFAULT(%r9);		\
1645	INSTR	REG, (%rsi);			\
1646	xorl	%eax, %eax;			\
1647	ret;					\
1648_flt_/**/NAME:					\
1649	movq	$0, T_LOFAULT(%r9);		\
16501:						\
1651	movq	T_COPYOPS(%r9), %rax;		\
1652	cmpq	$0, %rax;			\
1653	jz	2f;				\
1654	jmp	*COPYOP(%rax);			\
16552:						\
1656	movl	$-1, %eax;			\
1657	ret;					\
1658	SET_SIZE(NAME)
1659
1660	FUWORD(fuword64, movq, %rax, CP_FUWORD64)
1661	FUWORD(fuword32, movl, %eax, CP_FUWORD32)
1662	FUWORD(fuword16, movw, %ax, CP_FUWORD16)
1663	FUWORD(fuword8, movb, %al, CP_FUWORD8)
1664
1665#elif defined(__i386)
1666
1667#define	FUWORD(NAME, INSTR, REG, COPYOP)	\
1668	ENTRY(NAME)				\
1669	movl	%gs:CPU_THREAD, %ecx;		\
1670	movl	kernelbase, %eax;		\
1671	cmpl	%eax, 4(%esp);			\
1672	jae	1f;				\
1673	lea	_flt_/**/NAME, %edx;		\
1674	movl	%edx, T_LOFAULT(%ecx);		\
1675	movl	4(%esp), %eax;			\
1676	movl	8(%esp), %edx;			\
1677	INSTR	(%eax), REG;			\
1678	movl	$0, T_LOFAULT(%ecx);		\
1679	INSTR	REG, (%edx);			\
1680	xorl	%eax, %eax;			\
1681	ret;					\
1682_flt_/**/NAME:					\
1683	movl	$0, T_LOFAULT(%ecx);		\
16841:						\
1685	movl	T_COPYOPS(%ecx), %eax;		\
1686	cmpl	$0, %eax;			\
1687	jz	2f;				\
1688	jmp	*COPYOP(%eax);			\
16892:						\
1690	movl	$-1, %eax;			\
1691	ret;					\
1692	SET_SIZE(NAME)
1693
1694	FUWORD(fuword32, movl, %eax, CP_FUWORD32)
1695	FUWORD(fuword16, movw, %ax, CP_FUWORD16)
1696	FUWORD(fuword8, movb, %al, CP_FUWORD8)
1697
1698#endif	/* __i386 */
1699
1700#undef	FUWORD
1701
1702#endif	/* __lint */
1703
1704/*
1705 * Set user word.
1706 */
1707
1708#if defined(__lint)
1709
1710#if defined(__amd64)
1711
1712/* ARGSUSED */
1713int
1714suword64(void *addr, uint64_t value)
1715{ return (0); }
1716
1717#endif
1718
1719/* ARGSUSED */
1720int
1721suword32(void *addr, uint32_t value)
1722{ return (0); }
1723
1724/* ARGSUSED */
1725int
1726suword16(void *addr, uint16_t value)
1727{ return (0); }
1728
1729/* ARGSUSED */
1730int
1731suword8(void *addr, uint8_t value)
1732{ return (0); }
1733
1734#else	/* lint */
1735
1736#if defined(__amd64)
1737
1738/*
1739 * (Note that we don't save and reload the arguments here
1740 * because their values are not altered in the copy path)
1741 */
1742
1743#define	SUWORD(NAME, INSTR, REG, COPYOP)	\
1744	ENTRY(NAME)				\
1745	movq	%gs:CPU_THREAD, %r9;		\
1746	cmpq	kernelbase(%rip), %rdi;		\
1747	jae	1f;				\
1748	leaq	_flt_/**/NAME, %rdx;		\
1749	movq	%rdx, T_LOFAULT(%r9);		\
1750	INSTR	REG, (%rdi);			\
1751	movq	$0, T_LOFAULT(%r9);		\
1752	xorl	%eax, %eax;			\
1753	ret;					\
1754_flt_/**/NAME:					\
1755	movq	$0, T_LOFAULT(%r9);		\
17561:						\
1757	movq	T_COPYOPS(%r9), %rax;		\
1758	cmpq	$0, %rax;			\
1759	jz	3f;				\
1760	jmp	*COPYOP(%rax);			\
17613:						\
1762	movl	$-1, %eax;			\
1763	ret;					\
1764	SET_SIZE(NAME)
1765
1766	SUWORD(suword64, movq, %rsi, CP_SUWORD64)
1767	SUWORD(suword32, movl, %esi, CP_SUWORD32)
1768	SUWORD(suword16, movw, %si, CP_SUWORD16)
1769	SUWORD(suword8, movb, %sil, CP_SUWORD8)
1770
1771#elif defined(__i386)
1772
1773#define	SUWORD(NAME, INSTR, REG, COPYOP)	\
1774	ENTRY(NAME)				\
1775	movl	%gs:CPU_THREAD, %ecx;		\
1776	movl	kernelbase, %eax;		\
1777	cmpl	%eax, 4(%esp);			\
1778	jae	1f;				\
1779	lea	_flt_/**/NAME, %edx;		\
1780	movl	%edx, T_LOFAULT(%ecx);		\
1781	movl	4(%esp), %eax;			\
1782	movl	8(%esp), %edx;			\
1783	INSTR	REG, (%eax);			\
1784	movl	$0, T_LOFAULT(%ecx);		\
1785	xorl	%eax, %eax;			\
1786	ret;					\
1787_flt_/**/NAME:					\
1788	movl	$0, T_LOFAULT(%ecx);		\
17891:						\
1790	movl	T_COPYOPS(%ecx), %eax;		\
1791	cmpl	$0, %eax;			\
1792	jz	3f;				\
1793	movl	COPYOP(%eax), %ecx;		\
1794	jmp	*%ecx;				\
17953:						\
1796	movl	$-1, %eax;			\
1797	ret;					\
1798	SET_SIZE(NAME)
1799
1800	SUWORD(suword32, movl, %edx, CP_SUWORD32)
1801	SUWORD(suword16, movw, %dx, CP_SUWORD16)
1802	SUWORD(suword8, movb, %dl, CP_SUWORD8)
1803
1804#endif	/* __i386 */
1805
1806#undef	SUWORD
1807
1808#endif	/* __lint */
1809
1810#if defined(__lint)
1811
1812#if defined(__amd64)
1813
1814/*ARGSUSED*/
1815void
1816fuword64_noerr(const void *addr, uint64_t *dst)
1817{}
1818
1819#endif
1820
1821/*ARGSUSED*/
1822void
1823fuword32_noerr(const void *addr, uint32_t *dst)
1824{}
1825
1826/*ARGSUSED*/
1827void
1828fuword8_noerr(const void *addr, uint8_t *dst)
1829{}
1830
1831/*ARGSUSED*/
1832void
1833fuword16_noerr(const void *addr, uint16_t *dst)
1834{}
1835
1836#else   /* __lint */
1837
1838#if defined(__amd64)
1839
1840#define	FUWORD_NOERR(NAME, INSTR, REG)		\
1841	ENTRY(NAME)				\
1842	cmpq	kernelbase(%rip), %rdi;		\
1843	cmovnbq	kernelbase(%rip), %rdi;		\
1844	INSTR	(%rdi), REG;			\
1845	INSTR	REG, (%rsi);			\
1846	ret;					\
1847	SET_SIZE(NAME)
1848
1849	FUWORD_NOERR(fuword64_noerr, movq, %rax)
1850	FUWORD_NOERR(fuword32_noerr, movl, %eax)
1851	FUWORD_NOERR(fuword16_noerr, movw, %ax)
1852	FUWORD_NOERR(fuword8_noerr, movb, %al)
1853
1854#elif defined(__i386)
1855
1856#define	FUWORD_NOERR(NAME, INSTR, REG)		\
1857	ENTRY(NAME)				\
1858	movl	4(%esp), %eax;			\
1859	cmpl	kernelbase, %eax;		\
1860	jb	1f;				\
1861	movl	kernelbase, %eax;		\
18621:	movl	8(%esp), %edx;			\
1863	INSTR	(%eax), REG;			\
1864	INSTR	REG, (%edx);			\
1865	ret;					\
1866	SET_SIZE(NAME)
1867
1868	FUWORD_NOERR(fuword32_noerr, movl, %ecx)
1869	FUWORD_NOERR(fuword16_noerr, movw, %cx)
1870	FUWORD_NOERR(fuword8_noerr, movb, %cl)
1871
1872#endif	/* __i386 */
1873
1874#undef	FUWORD_NOERR
1875
1876#endif	/* __lint */
1877
1878#if defined(__lint)
1879
1880#if defined(__amd64)
1881
1882/*ARGSUSED*/
1883void
1884suword64_noerr(void *addr, uint64_t value)
1885{}
1886
1887#endif
1888
1889/*ARGSUSED*/
1890void
1891suword32_noerr(void *addr, uint32_t value)
1892{}
1893
1894/*ARGSUSED*/
1895void
1896suword16_noerr(void *addr, uint16_t value)
1897{}
1898
1899/*ARGSUSED*/
1900void
1901suword8_noerr(void *addr, uint8_t value)
1902{}
1903
1904#else	/* lint */
1905
1906#if defined(__amd64)
1907
1908#define	SUWORD_NOERR(NAME, INSTR, REG)		\
1909	ENTRY(NAME)				\
1910	cmpq	kernelbase(%rip), %rdi;		\
1911	cmovnbq	kernelbase(%rip), %rdi;		\
1912	INSTR	REG, (%rdi);			\
1913	ret;					\
1914	SET_SIZE(NAME)
1915
1916	SUWORD_NOERR(suword64_noerr, movq, %rsi)
1917	SUWORD_NOERR(suword32_noerr, movl, %esi)
1918	SUWORD_NOERR(suword16_noerr, movw, %si)
1919	SUWORD_NOERR(suword8_noerr, movb, %sil)
1920
1921#elif defined(__i386)
1922
1923#define	SUWORD_NOERR(NAME, INSTR, REG)		\
1924	ENTRY(NAME)				\
1925	movl	4(%esp), %eax;			\
1926	cmpl	kernelbase, %eax;		\
1927	jb	1f;				\
1928	movl	kernelbase, %eax;		\
19291:						\
1930	movl	8(%esp), %edx;			\
1931	INSTR	REG, (%eax);			\
1932	ret;					\
1933	SET_SIZE(NAME)
1934
1935	SUWORD_NOERR(suword32_noerr, movl, %edx)
1936	SUWORD_NOERR(suword16_noerr, movw, %dx)
1937	SUWORD_NOERR(suword8_noerr, movb, %dl)
1938
1939#endif	/* __i386 */
1940
1941#undef	SUWORD_NOERR
1942
1943#endif	/* lint */
1944
1945
1946#if defined(__lint)
1947
1948/*ARGSUSED*/
1949int
1950subyte(void *addr, uchar_t value)
1951{ return (0); }
1952
1953/*ARGSUSED*/
1954void
1955subyte_noerr(void *addr, uchar_t value)
1956{}
1957
1958/*ARGSUSED*/
1959int
1960fulword(const void *addr, ulong_t *valuep)
1961{ return (0); }
1962
1963/*ARGSUSED*/
1964void
1965fulword_noerr(const void *addr, ulong_t *valuep)
1966{}
1967
1968/*ARGSUSED*/
1969int
1970sulword(void *addr, ulong_t valuep)
1971{ return (0); }
1972
1973/*ARGSUSED*/
1974void
1975sulword_noerr(void *addr, ulong_t valuep)
1976{}
1977
1978#else
1979
1980	.weak	subyte
1981	subyte=suword8
1982	.weak	subyte_noerr
1983	subyte_noerr=suword8_noerr
1984
1985#if defined(__amd64)
1986
1987	.weak	fulword
1988	fulword=fuword64
1989	.weak	fulword_noerr
1990	fulword_noerr=fuword64_noerr
1991	.weak	sulword
1992	sulword=suword64
1993	.weak	sulword_noerr
1994	sulword_noerr=suword64_noerr
1995
1996#elif defined(__i386)
1997
1998	.weak	fulword
1999	fulword=fuword32
2000	.weak	fulword_noerr
2001	fulword_noerr=fuword32_noerr
2002	.weak	sulword
2003	sulword=suword32
2004	.weak	sulword_noerr
2005	sulword_noerr=suword32_noerr
2006
2007#endif /* __i386 */
2008
2009#endif /* __lint */
2010
2011#if defined(__lint)
2012
2013/*
2014 * Copy a block of storage - must not overlap (from + len <= to).
2015 * No fault handler installed (to be called under on_fault())
2016 */
2017
2018/* ARGSUSED */
2019void
2020copyout_noerr(const void *kfrom, void *uto, size_t count)
2021{}
2022
2023/* ARGSUSED */
2024void
2025copyin_noerr(const void *ufrom, void *kto, size_t count)
2026{}
2027
2028/*
2029 * Zero a block of storage in user space
2030 */
2031
2032/* ARGSUSED */
2033void
2034uzero(void *addr, size_t count)
2035{}
2036
2037/*
2038 * copy a block of storage in user space
2039 */
2040
2041/* ARGSUSED */
2042void
2043ucopy(const void *ufrom, void *uto, size_t ulength)
2044{}
2045
2046/*
2047 * copy a string in user space
2048 */
2049
2050/* ARGSUSED */
2051void
2052ucopystr(const char *ufrom, char *uto, size_t umaxlength, size_t *lencopied)
2053{}
2054
2055#else /* __lint */
2056
2057#if defined(__amd64)
2058
2059	ENTRY(copyin_noerr)
2060	movq	kernelbase(%rip), %rax
2061#ifdef DEBUG
2062	cmpq	%rax, %rsi		/* %rsi = kto */
2063	jae	1f
2064	leaq	.cpyin_ne_pmsg(%rip), %rdi
2065	jmp	call_panic		/* setup stack and call panic */
20661:
2067#endif
2068	cmpq	%rax, %rdi		/* ufrom < kernelbase */
2069	jb	do_copy
2070	movq	%rax, %rdi		/* force fault at kernelbase */
2071	jmp	do_copy
2072	SET_SIZE(copyin_noerr)
2073
2074	ENTRY(copyout_noerr)
2075	movq	kernelbase(%rip), %rax
2076#ifdef DEBUG
2077	cmpq	%rax, %rdi		/* %rdi = kfrom */
2078	jae	1f
2079	leaq	.cpyout_ne_pmsg(%rip), %rdi
2080	jmp	call_panic		/* setup stack and call panic */
20811:
2082#endif
2083	cmpq	%rax, %rsi		/* uto < kernelbase */
2084	jb	do_copy
2085	movq	%rax, %rsi		/* force fault at kernelbase */
2086	jmp	do_copy
2087	SET_SIZE(copyout_noerr)
2088
2089	ENTRY(uzero)
2090	movq	kernelbase(%rip), %rax
2091	cmpq	%rax, %rdi
2092	jb	do_zero
2093	movq	%rax, %rdi	/* force fault at kernelbase */
2094	jmp	do_zero
2095	SET_SIZE(uzero)
2096
2097	ENTRY(ucopy)
2098	movq	kernelbase(%rip), %rax
2099	cmpq	%rax, %rdi
2100	cmovaeq	%rax, %rdi	/* force fault at kernelbase */
2101	cmpq	%rax, %rsi
2102	cmovaeq	%rax, %rsi	/* force fault at kernelbase */
2103	jmp	do_copy
2104	SET_SIZE(ucopy)
2105
2106	ENTRY(ucopystr)
2107	movq	kernelbase(%rip), %rax
2108	cmpq	%rax, %rdi
2109	cmovaeq	%rax, %rdi	/* force fault at kernelbase */
2110	cmpq	%rax, %rsi
2111	cmovaeq	%rax, %rsi	/* force fault at kernelbase */
2112	/* do_copystr expects lofault address in %r8 */
2113	movq	%gs:CPU_THREAD, %r8
2114	movq	T_LOFAULT(%r8), %r8
2115	jmp	do_copystr
2116	SET_SIZE(ucopystr)
2117
2118#elif defined(__i386)
2119
2120	ENTRY(copyin_noerr)
2121	movl	kernelbase, %eax
2122#ifdef DEBUG
2123	cmpl	%eax, 8(%esp)
2124	jae	1f
2125	pushl	$.cpyin_ne_pmsg
2126	call	panic
21271:
2128#endif
2129	cmpl	%eax, 4(%esp)
2130	jb	do_copy
2131	movl	%eax, 4(%esp)	/* force fault at kernelbase */
2132	jmp	do_copy
2133	SET_SIZE(copyin_noerr)
2134
2135	ENTRY(copyout_noerr)
2136	movl	kernelbase, %eax
2137#ifdef DEBUG
2138	cmpl	%eax, 4(%esp)
2139	jae	1f
2140	pushl	$.cpyout_ne_pmsg
2141	call	panic
21421:
2143#endif
2144	cmpl	%eax, 8(%esp)
2145	jb	do_copy
2146	movl	%eax, 8(%esp)	/* force fault at kernelbase */
2147	jmp	do_copy
2148	SET_SIZE(copyout_noerr)
2149
2150	ENTRY(uzero)
2151	movl	kernelbase, %eax
2152	cmpl	%eax, 4(%esp)
2153	jb	do_zero
2154	movl	%eax, 4(%esp)	/* force fault at kernelbase */
2155	jmp	do_zero
2156	SET_SIZE(uzero)
2157
2158	ENTRY(ucopy)
2159	movl	kernelbase, %eax
2160	cmpl	%eax, 4(%esp)
2161	jb	1f
2162	movl	%eax, 4(%esp)	/* force fault at kernelbase */
21631:
2164	cmpl	%eax, 8(%esp)
2165	jb	do_copy
2166	movl	%eax, 8(%esp)	/* force fault at kernelbase */
2167	jmp	do_copy
2168	SET_SIZE(ucopy)
2169
2170	ENTRY(ucopystr)
2171	movl	kernelbase, %eax
2172	cmpl	%eax, 4(%esp)
2173	jb	1f
2174	movl	%eax, 4(%esp)	/* force fault at kernelbase */
21751:
2176	cmpl	%eax, 8(%esp)
2177	jb	2f
2178	movl	%eax, 8(%esp)	/* force fault at kernelbase */
21792:
2180	/* do_copystr expects the lofault address in %eax */
2181	movl	%gs:CPU_THREAD, %eax
2182	movl	T_LOFAULT(%eax), %eax
2183	jmp	do_copystr
2184	SET_SIZE(ucopystr)
2185
2186#endif	/* __i386 */
2187
2188#ifdef DEBUG
2189	.data
2190.kcopy_panic_msg:
2191	.string "kcopy: arguments below kernelbase"
2192.bcopy_panic_msg:
2193	.string "bcopy: arguments below kernelbase"
2194.kzero_panic_msg:
2195        .string "kzero: arguments below kernelbase"
2196.bzero_panic_msg:
2197	.string	"bzero: arguments below kernelbase"
2198.copyin_panic_msg:
2199	.string "copyin: kaddr argument below kernelbase"
2200.xcopyin_panic_msg:
2201	.string	"xcopyin: kaddr argument below kernelbase"
2202.copyout_panic_msg:
2203	.string "copyout: kaddr argument below kernelbase"
2204.xcopyout_panic_msg:
2205	.string	"xcopyout: kaddr argument below kernelbase"
2206.copystr_panic_msg:
2207	.string	"copystr: arguments in user space"
2208.copyinstr_panic_msg:
2209	.string	"copyinstr: kaddr argument not in kernel address space"
2210.copyoutstr_panic_msg:
2211	.string	"copyoutstr: kaddr argument not in kernel address space"
2212.cpyin_ne_pmsg:
2213	.string "copyin_noerr: argument not in kernel address space"
2214.cpyout_ne_pmsg:
2215	.string "copyout_noerr: argument not in kernel address space"
2216#endif
2217
2218#endif	/* __lint */
2219