xref: /freebsd/sys/i386/linux/linux_machdep.c (revision 10b59a9b4add0320d52c15ce057dd697261e7dfc)
1 /*-
2  * Copyright (c) 2000 Marcel Moolenaar
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer
10  *    in this position and unchanged.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/capability.h>
35 #include <sys/file.h>
36 #include <sys/fcntl.h>
37 #include <sys/imgact.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mman.h>
41 #include <sys/mutex.h>
42 #include <sys/sx.h>
43 #include <sys/priv.h>
44 #include <sys/proc.h>
45 #include <sys/queue.h>
46 #include <sys/resource.h>
47 #include <sys/resourcevar.h>
48 #include <sys/signalvar.h>
49 #include <sys/syscallsubr.h>
50 #include <sys/sysproto.h>
51 #include <sys/unistd.h>
52 #include <sys/wait.h>
53 #include <sys/sched.h>
54 
55 #include <machine/frame.h>
56 #include <machine/psl.h>
57 #include <machine/segments.h>
58 #include <machine/sysarch.h>
59 
60 #include <vm/vm.h>
61 #include <vm/pmap.h>
62 #include <vm/vm_map.h>
63 
64 #include <i386/linux/linux.h>
65 #include <i386/linux/linux_proto.h>
66 #include <compat/linux/linux_ipc.h>
67 #include <compat/linux/linux_misc.h>
68 #include <compat/linux/linux_signal.h>
69 #include <compat/linux/linux_util.h>
70 #include <compat/linux/linux_emul.h>
71 
72 #include <i386/include/pcb.h>			/* needed for pcb definition in linux_set_thread_area */
73 
74 #include "opt_posix.h"
75 
76 extern struct sysentvec elf32_freebsd_sysvec;	/* defined in i386/i386/elf_machdep.c */
77 
78 struct l_descriptor {
79 	l_uint		entry_number;
80 	l_ulong		base_addr;
81 	l_uint		limit;
82 	l_uint		seg_32bit:1;
83 	l_uint		contents:2;
84 	l_uint		read_exec_only:1;
85 	l_uint		limit_in_pages:1;
86 	l_uint		seg_not_present:1;
87 	l_uint		useable:1;
88 };
89 
90 struct l_old_select_argv {
91 	l_int		nfds;
92 	l_fd_set	*readfds;
93 	l_fd_set	*writefds;
94 	l_fd_set	*exceptfds;
95 	struct l_timeval	*timeout;
96 };
97 
98 static int	linux_mmap_common(struct thread *td, l_uintptr_t addr,
99 		    l_size_t len, l_int prot, l_int flags, l_int fd,
100 		    l_loff_t pos);
101 
102 int
103 linux_to_bsd_sigaltstack(int lsa)
104 {
105 	int bsa = 0;
106 
107 	if (lsa & LINUX_SS_DISABLE)
108 		bsa |= SS_DISABLE;
109 	if (lsa & LINUX_SS_ONSTACK)
110 		bsa |= SS_ONSTACK;
111 	return (bsa);
112 }
113 
114 int
115 bsd_to_linux_sigaltstack(int bsa)
116 {
117 	int lsa = 0;
118 
119 	if (bsa & SS_DISABLE)
120 		lsa |= LINUX_SS_DISABLE;
121 	if (bsa & SS_ONSTACK)
122 		lsa |= LINUX_SS_ONSTACK;
123 	return (lsa);
124 }
125 
126 int
127 linux_execve(struct thread *td, struct linux_execve_args *args)
128 {
129 	int error;
130 	char *newpath;
131 	struct image_args eargs;
132 
133 	LCONVPATHEXIST(td, args->path, &newpath);
134 
135 #ifdef DEBUG
136 	if (ldebug(execve))
137 		printf(ARGS(execve, "%s"), newpath);
138 #endif
139 
140 	error = exec_copyin_args(&eargs, newpath, UIO_SYSSPACE,
141 	    args->argp, args->envp);
142 	free(newpath, M_TEMP);
143 	if (error == 0)
144 		error = kern_execve(td, &eargs, NULL);
145 	if (error == 0)
146 	   	/* linux process can exec fbsd one, dont attempt
147 		 * to create emuldata for such process using
148 		 * linux_proc_init, this leads to a panic on KASSERT
149 		 * because such process has p->p_emuldata == NULL
150 		 */
151 		if (SV_PROC_ABI(td->td_proc) == SV_ABI_LINUX)
152    			error = linux_proc_init(td, 0, 0);
153 	return (error);
154 }
155 
156 struct l_ipc_kludge {
157 	struct l_msgbuf *msgp;
158 	l_long msgtyp;
159 };
160 
161 int
162 linux_ipc(struct thread *td, struct linux_ipc_args *args)
163 {
164 
165 	switch (args->what & 0xFFFF) {
166 	case LINUX_SEMOP: {
167 		struct linux_semop_args a;
168 
169 		a.semid = args->arg1;
170 		a.tsops = args->ptr;
171 		a.nsops = args->arg2;
172 		return (linux_semop(td, &a));
173 	}
174 	case LINUX_SEMGET: {
175 		struct linux_semget_args a;
176 
177 		a.key = args->arg1;
178 		a.nsems = args->arg2;
179 		a.semflg = args->arg3;
180 		return (linux_semget(td, &a));
181 	}
182 	case LINUX_SEMCTL: {
183 		struct linux_semctl_args a;
184 		int error;
185 
186 		a.semid = args->arg1;
187 		a.semnum = args->arg2;
188 		a.cmd = args->arg3;
189 		error = copyin(args->ptr, &a.arg, sizeof(a.arg));
190 		if (error)
191 			return (error);
192 		return (linux_semctl(td, &a));
193 	}
194 	case LINUX_MSGSND: {
195 		struct linux_msgsnd_args a;
196 
197 		a.msqid = args->arg1;
198 		a.msgp = args->ptr;
199 		a.msgsz = args->arg2;
200 		a.msgflg = args->arg3;
201 		return (linux_msgsnd(td, &a));
202 	}
203 	case LINUX_MSGRCV: {
204 		struct linux_msgrcv_args a;
205 
206 		a.msqid = args->arg1;
207 		a.msgsz = args->arg2;
208 		a.msgflg = args->arg3;
209 		if ((args->what >> 16) == 0) {
210 			struct l_ipc_kludge tmp;
211 			int error;
212 
213 			if (args->ptr == NULL)
214 				return (EINVAL);
215 			error = copyin(args->ptr, &tmp, sizeof(tmp));
216 			if (error)
217 				return (error);
218 			a.msgp = tmp.msgp;
219 			a.msgtyp = tmp.msgtyp;
220 		} else {
221 			a.msgp = args->ptr;
222 			a.msgtyp = args->arg5;
223 		}
224 		return (linux_msgrcv(td, &a));
225 	}
226 	case LINUX_MSGGET: {
227 		struct linux_msgget_args a;
228 
229 		a.key = args->arg1;
230 		a.msgflg = args->arg2;
231 		return (linux_msgget(td, &a));
232 	}
233 	case LINUX_MSGCTL: {
234 		struct linux_msgctl_args a;
235 
236 		a.msqid = args->arg1;
237 		a.cmd = args->arg2;
238 		a.buf = args->ptr;
239 		return (linux_msgctl(td, &a));
240 	}
241 	case LINUX_SHMAT: {
242 		struct linux_shmat_args a;
243 
244 		a.shmid = args->arg1;
245 		a.shmaddr = args->ptr;
246 		a.shmflg = args->arg2;
247 		a.raddr = (l_ulong *)args->arg3;
248 		return (linux_shmat(td, &a));
249 	}
250 	case LINUX_SHMDT: {
251 		struct linux_shmdt_args a;
252 
253 		a.shmaddr = args->ptr;
254 		return (linux_shmdt(td, &a));
255 	}
256 	case LINUX_SHMGET: {
257 		struct linux_shmget_args a;
258 
259 		a.key = args->arg1;
260 		a.size = args->arg2;
261 		a.shmflg = args->arg3;
262 		return (linux_shmget(td, &a));
263 	}
264 	case LINUX_SHMCTL: {
265 		struct linux_shmctl_args a;
266 
267 		a.shmid = args->arg1;
268 		a.cmd = args->arg2;
269 		a.buf = args->ptr;
270 		return (linux_shmctl(td, &a));
271 	}
272 	default:
273 		break;
274 	}
275 
276 	return (EINVAL);
277 }
278 
279 int
280 linux_old_select(struct thread *td, struct linux_old_select_args *args)
281 {
282 	struct l_old_select_argv linux_args;
283 	struct linux_select_args newsel;
284 	int error;
285 
286 #ifdef DEBUG
287 	if (ldebug(old_select))
288 		printf(ARGS(old_select, "%p"), args->ptr);
289 #endif
290 
291 	error = copyin(args->ptr, &linux_args, sizeof(linux_args));
292 	if (error)
293 		return (error);
294 
295 	newsel.nfds = linux_args.nfds;
296 	newsel.readfds = linux_args.readfds;
297 	newsel.writefds = linux_args.writefds;
298 	newsel.exceptfds = linux_args.exceptfds;
299 	newsel.timeout = linux_args.timeout;
300 	return (linux_select(td, &newsel));
301 }
302 
303 int
304 linux_set_cloned_tls(struct thread *td, void *desc)
305 {
306 	struct segment_descriptor sd;
307 	struct l_user_desc info;
308 	int idx, error;
309 	int a[2];
310 
311 	error = copyin(desc, &info, sizeof(struct l_user_desc));
312 	if (error) {
313 		printf(LMSG("copyin failed!"));
314 	} else {
315 		idx = info.entry_number;
316 
317 		/*
318 		 * looks like we're getting the idx we returned
319 		 * in the set_thread_area() syscall
320 		 */
321 		if (idx != 6 && idx != 3) {
322 			printf(LMSG("resetting idx!"));
323 			idx = 3;
324 		}
325 
326 		/* this doesnt happen in practice */
327 		if (idx == 6) {
328 	   		/* we might copy out the entry_number as 3 */
329 		   	info.entry_number = 3;
330 			error = copyout(&info, desc, sizeof(struct l_user_desc));
331 			if (error)
332 				printf(LMSG("copyout failed!"));
333 		}
334 
335 		a[0] = LINUX_LDT_entry_a(&info);
336 		a[1] = LINUX_LDT_entry_b(&info);
337 
338 		memcpy(&sd, &a, sizeof(a));
339 #ifdef DEBUG
340 		if (ldebug(clone))
341 			printf("Segment created in clone with "
342 			"CLONE_SETTLS: lobase: %x, hibase: %x, "
343 			"lolimit: %x, hilimit: %x, type: %i, "
344 			"dpl: %i, p: %i, xx: %i, def32: %i, "
345 			"gran: %i\n", sd.sd_lobase, sd.sd_hibase,
346 			sd.sd_lolimit, sd.sd_hilimit, sd.sd_type,
347 			sd.sd_dpl, sd.sd_p, sd.sd_xx,
348 			sd.sd_def32, sd.sd_gran);
349 #endif
350 
351 		/* set %gs */
352 		td->td_pcb->pcb_gsd = sd;
353 		td->td_pcb->pcb_gs = GSEL(GUGS_SEL, SEL_UPL);
354 	}
355 
356 	return (error);
357 }
358 
359 int
360 linux_set_upcall_kse(struct thread *td, register_t stack)
361 {
362 
363 	td->td_frame->tf_esp = stack;
364 
365 	return (0);
366 }
367 
368 #define STACK_SIZE  (2 * 1024 * 1024)
369 #define GUARD_SIZE  (4 * PAGE_SIZE)
370 
371 int
372 linux_mmap2(struct thread *td, struct linux_mmap2_args *args)
373 {
374 
375 #ifdef DEBUG
376 	if (ldebug(mmap2))
377 		printf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"),
378 		    (void *)args->addr, args->len, args->prot,
379 		    args->flags, args->fd, args->pgoff);
380 #endif
381 
382 	return (linux_mmap_common(td, args->addr, args->len, args->prot,
383 		args->flags, args->fd, (uint64_t)(uint32_t)args->pgoff *
384 		PAGE_SIZE));
385 }
386 
387 int
388 linux_mmap(struct thread *td, struct linux_mmap_args *args)
389 {
390 	int error;
391 	struct l_mmap_argv linux_args;
392 
393 	error = copyin(args->ptr, &linux_args, sizeof(linux_args));
394 	if (error)
395 		return (error);
396 
397 #ifdef DEBUG
398 	if (ldebug(mmap))
399 		printf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"),
400 		    (void *)linux_args.addr, linux_args.len, linux_args.prot,
401 		    linux_args.flags, linux_args.fd, linux_args.pgoff);
402 #endif
403 
404 	return (linux_mmap_common(td, linux_args.addr, linux_args.len,
405 	    linux_args.prot, linux_args.flags, linux_args.fd,
406 	    (uint32_t)linux_args.pgoff));
407 }
408 
409 static int
410 linux_mmap_common(struct thread *td, l_uintptr_t addr, l_size_t len, l_int prot,
411     l_int flags, l_int fd, l_loff_t pos)
412 {
413 	struct proc *p = td->td_proc;
414 	struct mmap_args /* {
415 		caddr_t addr;
416 		size_t len;
417 		int prot;
418 		int flags;
419 		int fd;
420 		long pad;
421 		off_t pos;
422 	} */ bsd_args;
423 	int error;
424 	struct file *fp;
425 
426 	error = 0;
427 	bsd_args.flags = 0;
428 	fp = NULL;
429 
430 	/*
431 	 * Linux mmap(2):
432 	 * You must specify exactly one of MAP_SHARED and MAP_PRIVATE
433 	 */
434 	if (!((flags & LINUX_MAP_SHARED) ^ (flags & LINUX_MAP_PRIVATE)))
435 		return (EINVAL);
436 
437 	if (flags & LINUX_MAP_SHARED)
438 		bsd_args.flags |= MAP_SHARED;
439 	if (flags & LINUX_MAP_PRIVATE)
440 		bsd_args.flags |= MAP_PRIVATE;
441 	if (flags & LINUX_MAP_FIXED)
442 		bsd_args.flags |= MAP_FIXED;
443 	if (flags & LINUX_MAP_ANON) {
444 		/* Enforce pos to be on page boundary, then ignore. */
445 		if ((pos & PAGE_MASK) != 0)
446 			return (EINVAL);
447 		pos = 0;
448 		bsd_args.flags |= MAP_ANON;
449 	} else
450 		bsd_args.flags |= MAP_NOSYNC;
451 	if (flags & LINUX_MAP_GROWSDOWN)
452 		bsd_args.flags |= MAP_STACK;
453 
454 	/*
455 	 * PROT_READ, PROT_WRITE, or PROT_EXEC implies PROT_READ and PROT_EXEC
456 	 * on Linux/i386. We do this to ensure maximum compatibility.
457 	 * Linux/ia64 does the same in i386 emulation mode.
458 	 */
459 	bsd_args.prot = prot;
460 	if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC))
461 		bsd_args.prot |= PROT_READ | PROT_EXEC;
462 
463 	/* Linux does not check file descriptor when MAP_ANONYMOUS is set. */
464 	bsd_args.fd = (bsd_args.flags & MAP_ANON) ? -1 : fd;
465 	if (bsd_args.fd != -1) {
466 		/*
467 		 * Linux follows Solaris mmap(2) description:
468 		 * The file descriptor fildes is opened with
469 		 * read permission, regardless of the
470 		 * protection options specified.
471 		 *
472 		 * Checking just CAP_MMAP is fine here, since the real work
473 		 * is done in the FreeBSD mmap().
474 		 */
475 
476 		if ((error = fget(td, bsd_args.fd, CAP_MMAP, &fp)) != 0)
477 			return (error);
478 		if (fp->f_type != DTYPE_VNODE) {
479 			fdrop(fp, td);
480 			return (EINVAL);
481 		}
482 
483 		/* Linux mmap() just fails for O_WRONLY files */
484 		if (!(fp->f_flag & FREAD)) {
485 			fdrop(fp, td);
486 			return (EACCES);
487 		}
488 
489 		fdrop(fp, td);
490 	}
491 
492 	if (flags & LINUX_MAP_GROWSDOWN) {
493 		/*
494 		 * The Linux MAP_GROWSDOWN option does not limit auto
495 		 * growth of the region.  Linux mmap with this option
496 		 * takes as addr the inital BOS, and as len, the initial
497 		 * region size.  It can then grow down from addr without
498 		 * limit.  However, linux threads has an implicit internal
499 		 * limit to stack size of STACK_SIZE.  Its just not
500 		 * enforced explicitly in linux.  But, here we impose
501 		 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack
502 		 * region, since we can do this with our mmap.
503 		 *
504 		 * Our mmap with MAP_STACK takes addr as the maximum
505 		 * downsize limit on BOS, and as len the max size of
506 		 * the region.  It them maps the top SGROWSIZ bytes,
507 		 * and auto grows the region down, up to the limit
508 		 * in addr.
509 		 *
510 		 * If we don't use the MAP_STACK option, the effect
511 		 * of this code is to allocate a stack region of a
512 		 * fixed size of (STACK_SIZE - GUARD_SIZE).
513 		 */
514 
515 		if ((caddr_t)PTRIN(addr) + len > p->p_vmspace->vm_maxsaddr) {
516 			/*
517 			 * Some linux apps will attempt to mmap
518 			 * thread stacks near the top of their
519 			 * address space.  If their TOS is greater
520 			 * than vm_maxsaddr, vm_map_growstack()
521 			 * will confuse the thread stack with the
522 			 * process stack and deliver a SEGV if they
523 			 * attempt to grow the thread stack past their
524 			 * current stacksize rlimit.  To avoid this,
525 			 * adjust vm_maxsaddr upwards to reflect
526 			 * the current stacksize rlimit rather
527 			 * than the maximum possible stacksize.
528 			 * It would be better to adjust the
529 			 * mmap'ed region, but some apps do not check
530 			 * mmap's return value.
531 			 */
532 			PROC_LOCK(p);
533 			p->p_vmspace->vm_maxsaddr = (char *)USRSTACK -
534 			    lim_cur(p, RLIMIT_STACK);
535 			PROC_UNLOCK(p);
536 		}
537 
538 		/*
539 		 * This gives us our maximum stack size and a new BOS.
540 		 * If we're using VM_STACK, then mmap will just map
541 		 * the top SGROWSIZ bytes, and let the stack grow down
542 		 * to the limit at BOS.  If we're not using VM_STACK
543 		 * we map the full stack, since we don't have a way
544 		 * to autogrow it.
545 		 */
546 		if (len > STACK_SIZE - GUARD_SIZE) {
547 			bsd_args.addr = (caddr_t)PTRIN(addr);
548 			bsd_args.len = len;
549 		} else {
550 			bsd_args.addr = (caddr_t)PTRIN(addr) -
551 			    (STACK_SIZE - GUARD_SIZE - len);
552 			bsd_args.len = STACK_SIZE - GUARD_SIZE;
553 		}
554 	} else {
555 		bsd_args.addr = (caddr_t)PTRIN(addr);
556 		bsd_args.len  = len;
557 	}
558 	bsd_args.pos = pos;
559 
560 #ifdef DEBUG
561 	if (ldebug(mmap))
562 		printf("-> %s(%p, %d, %d, 0x%08x, %d, 0x%x)\n",
563 		    __func__,
564 		    (void *)bsd_args.addr, bsd_args.len, bsd_args.prot,
565 		    bsd_args.flags, bsd_args.fd, (int)bsd_args.pos);
566 #endif
567 	error = sys_mmap(td, &bsd_args);
568 #ifdef DEBUG
569 	if (ldebug(mmap))
570 		printf("-> %s() return: 0x%x (0x%08x)\n",
571 			__func__, error, (u_int)td->td_retval[0]);
572 #endif
573 	return (error);
574 }
575 
576 int
577 linux_mprotect(struct thread *td, struct linux_mprotect_args *uap)
578 {
579 	struct mprotect_args bsd_args;
580 
581 	bsd_args.addr = uap->addr;
582 	bsd_args.len = uap->len;
583 	bsd_args.prot = uap->prot;
584 	if (bsd_args.prot & (PROT_READ | PROT_WRITE | PROT_EXEC))
585 		bsd_args.prot |= PROT_READ | PROT_EXEC;
586 	return (sys_mprotect(td, &bsd_args));
587 }
588 
589 int
590 linux_pipe(struct thread *td, struct linux_pipe_args *args)
591 {
592 	int error;
593 	int fildes[2];
594 
595 #ifdef DEBUG
596 	if (ldebug(pipe))
597 		printf(ARGS(pipe, "*"));
598 #endif
599 
600 	error = kern_pipe(td, fildes);
601 	if (error)
602 		return (error);
603 
604 	/* XXX: Close descriptors on error. */
605 	return (copyout(fildes, args->pipefds, sizeof fildes));
606 }
607 
608 int
609 linux_ioperm(struct thread *td, struct linux_ioperm_args *args)
610 {
611 	int error;
612 	struct i386_ioperm_args iia;
613 
614 	iia.start = args->start;
615 	iia.length = args->length;
616 	iia.enable = args->enable;
617 	error = i386_set_ioperm(td, &iia);
618 	return (error);
619 }
620 
621 int
622 linux_iopl(struct thread *td, struct linux_iopl_args *args)
623 {
624 	int error;
625 
626 	if (args->level < 0 || args->level > 3)
627 		return (EINVAL);
628 	if ((error = priv_check(td, PRIV_IO)) != 0)
629 		return (error);
630 	if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
631 		return (error);
632 	td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) |
633 	    (args->level * (PSL_IOPL / 3));
634 	return (0);
635 }
636 
637 int
638 linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap)
639 {
640 	int error;
641 	struct i386_ldt_args ldt;
642 	struct l_descriptor ld;
643 	union descriptor desc;
644 	int size, written;
645 
646 	switch (uap->func) {
647 	case 0x00: /* read_ldt */
648 		ldt.start = 0;
649 		ldt.descs = uap->ptr;
650 		ldt.num = uap->bytecount / sizeof(union descriptor);
651 		error = i386_get_ldt(td, &ldt);
652 		td->td_retval[0] *= sizeof(union descriptor);
653 		break;
654 	case 0x02: /* read_default_ldt = 0 */
655 		size = 5*sizeof(struct l_desc_struct);
656 		if (size > uap->bytecount)
657 			size = uap->bytecount;
658 		for (written = error = 0; written < size && error == 0; written++)
659 			error = subyte((char *)uap->ptr + written, 0);
660 		td->td_retval[0] = written;
661 		break;
662 	case 0x01: /* write_ldt */
663 	case 0x11: /* write_ldt */
664 		if (uap->bytecount != sizeof(ld))
665 			return (EINVAL);
666 
667 		error = copyin(uap->ptr, &ld, sizeof(ld));
668 		if (error)
669 			return (error);
670 
671 		ldt.start = ld.entry_number;
672 		ldt.descs = &desc;
673 		ldt.num = 1;
674 		desc.sd.sd_lolimit = (ld.limit & 0x0000ffff);
675 		desc.sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16;
676 		desc.sd.sd_lobase = (ld.base_addr & 0x00ffffff);
677 		desc.sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24;
678 		desc.sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) |
679 			(ld.contents << 2);
680 		desc.sd.sd_dpl = 3;
681 		desc.sd.sd_p = (ld.seg_not_present ^ 1);
682 		desc.sd.sd_xx = 0;
683 		desc.sd.sd_def32 = ld.seg_32bit;
684 		desc.sd.sd_gran = ld.limit_in_pages;
685 		error = i386_set_ldt(td, &ldt, &desc);
686 		break;
687 	default:
688 		error = ENOSYS;
689 		break;
690 	}
691 
692 	if (error == EOPNOTSUPP) {
693 		printf("linux: modify_ldt needs kernel option USER_LDT\n");
694 		error = ENOSYS;
695 	}
696 
697 	return (error);
698 }
699 
700 int
701 linux_sigaction(struct thread *td, struct linux_sigaction_args *args)
702 {
703 	l_osigaction_t osa;
704 	l_sigaction_t act, oact;
705 	int error;
706 
707 #ifdef DEBUG
708 	if (ldebug(sigaction))
709 		printf(ARGS(sigaction, "%d, %p, %p"),
710 		    args->sig, (void *)args->nsa, (void *)args->osa);
711 #endif
712 
713 	if (args->nsa != NULL) {
714 		error = copyin(args->nsa, &osa, sizeof(l_osigaction_t));
715 		if (error)
716 			return (error);
717 		act.lsa_handler = osa.lsa_handler;
718 		act.lsa_flags = osa.lsa_flags;
719 		act.lsa_restorer = osa.lsa_restorer;
720 		LINUX_SIGEMPTYSET(act.lsa_mask);
721 		act.lsa_mask.__bits[0] = osa.lsa_mask;
722 	}
723 
724 	error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL,
725 	    args->osa ? &oact : NULL);
726 
727 	if (args->osa != NULL && !error) {
728 		osa.lsa_handler = oact.lsa_handler;
729 		osa.lsa_flags = oact.lsa_flags;
730 		osa.lsa_restorer = oact.lsa_restorer;
731 		osa.lsa_mask = oact.lsa_mask.__bits[0];
732 		error = copyout(&osa, args->osa, sizeof(l_osigaction_t));
733 	}
734 
735 	return (error);
736 }
737 
738 /*
739  * Linux has two extra args, restart and oldmask.  We dont use these,
740  * but it seems that "restart" is actually a context pointer that
741  * enables the signal to happen with a different register set.
742  */
743 int
744 linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args)
745 {
746 	sigset_t sigmask;
747 	l_sigset_t mask;
748 
749 #ifdef DEBUG
750 	if (ldebug(sigsuspend))
751 		printf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask);
752 #endif
753 
754 	LINUX_SIGEMPTYSET(mask);
755 	mask.__bits[0] = args->mask;
756 	linux_to_bsd_sigset(&mask, &sigmask);
757 	return (kern_sigsuspend(td, sigmask));
758 }
759 
760 int
761 linux_rt_sigsuspend(struct thread *td, struct linux_rt_sigsuspend_args *uap)
762 {
763 	l_sigset_t lmask;
764 	sigset_t sigmask;
765 	int error;
766 
767 #ifdef DEBUG
768 	if (ldebug(rt_sigsuspend))
769 		printf(ARGS(rt_sigsuspend, "%p, %d"),
770 		    (void *)uap->newset, uap->sigsetsize);
771 #endif
772 
773 	if (uap->sigsetsize != sizeof(l_sigset_t))
774 		return (EINVAL);
775 
776 	error = copyin(uap->newset, &lmask, sizeof(l_sigset_t));
777 	if (error)
778 		return (error);
779 
780 	linux_to_bsd_sigset(&lmask, &sigmask);
781 	return (kern_sigsuspend(td, sigmask));
782 }
783 
784 int
785 linux_pause(struct thread *td, struct linux_pause_args *args)
786 {
787 	struct proc *p = td->td_proc;
788 	sigset_t sigmask;
789 
790 #ifdef DEBUG
791 	if (ldebug(pause))
792 		printf(ARGS(pause, ""));
793 #endif
794 
795 	PROC_LOCK(p);
796 	sigmask = td->td_sigmask;
797 	PROC_UNLOCK(p);
798 	return (kern_sigsuspend(td, sigmask));
799 }
800 
801 int
802 linux_sigaltstack(struct thread *td, struct linux_sigaltstack_args *uap)
803 {
804 	stack_t ss, oss;
805 	l_stack_t lss;
806 	int error;
807 
808 #ifdef DEBUG
809 	if (ldebug(sigaltstack))
810 		printf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss);
811 #endif
812 
813 	if (uap->uss != NULL) {
814 		error = copyin(uap->uss, &lss, sizeof(l_stack_t));
815 		if (error)
816 			return (error);
817 
818 		ss.ss_sp = lss.ss_sp;
819 		ss.ss_size = lss.ss_size;
820 		ss.ss_flags = linux_to_bsd_sigaltstack(lss.ss_flags);
821 	}
822 	error = kern_sigaltstack(td, (uap->uss != NULL) ? &ss : NULL,
823 	    (uap->uoss != NULL) ? &oss : NULL);
824 	if (!error && uap->uoss != NULL) {
825 		lss.ss_sp = oss.ss_sp;
826 		lss.ss_size = oss.ss_size;
827 		lss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags);
828 		error = copyout(&lss, uap->uoss, sizeof(l_stack_t));
829 	}
830 
831 	return (error);
832 }
833 
834 int
835 linux_ftruncate64(struct thread *td, struct linux_ftruncate64_args *args)
836 {
837 	struct ftruncate_args sa;
838 
839 #ifdef DEBUG
840 	if (ldebug(ftruncate64))
841 		printf(ARGS(ftruncate64, "%u, %jd"), args->fd,
842 		    (intmax_t)args->length);
843 #endif
844 
845 	sa.fd = args->fd;
846 	sa.length = args->length;
847 	return sys_ftruncate(td, &sa);
848 }
849 
850 int
851 linux_set_thread_area(struct thread *td, struct linux_set_thread_area_args *args)
852 {
853 	struct l_user_desc info;
854 	int error;
855 	int idx;
856 	int a[2];
857 	struct segment_descriptor sd;
858 
859 	error = copyin(args->desc, &info, sizeof(struct l_user_desc));
860 	if (error)
861 		return (error);
862 
863 #ifdef DEBUG
864 	if (ldebug(set_thread_area))
865 	   	printf(ARGS(set_thread_area, "%i, %x, %x, %i, %i, %i, %i, %i, %i\n"),
866 		      info.entry_number,
867       		      info.base_addr,
868       		      info.limit,
869       		      info.seg_32bit,
870 		      info.contents,
871       		      info.read_exec_only,
872       		      info.limit_in_pages,
873       		      info.seg_not_present,
874       		      info.useable);
875 #endif
876 
877 	idx = info.entry_number;
878 	/*
879 	 * Semantics of linux version: every thread in the system has array of
880 	 * 3 tls descriptors. 1st is GLIBC TLS, 2nd is WINE, 3rd unknown. This
881 	 * syscall loads one of the selected tls decriptors with a value and
882 	 * also loads GDT descriptors 6, 7 and 8 with the content of the
883 	 * per-thread descriptors.
884 	 *
885 	 * Semantics of fbsd version: I think we can ignore that linux has 3
886 	 * per-thread descriptors and use just the 1st one. The tls_array[]
887 	 * is used only in set/get-thread_area() syscalls and for loading the
888 	 * GDT descriptors. In fbsd we use just one GDT descriptor for TLS so
889 	 * we will load just one.
890 	 *
891 	 * XXX: this doesn't work when a user space process tries to use more
892 	 * than 1 TLS segment. Comment in the linux sources says wine might do
893 	 * this.
894 	 */
895 
896 	/*
897 	 * we support just GLIBC TLS now
898 	 * we should let 3 proceed as well because we use this segment so
899 	 * if code does two subsequent calls it should succeed
900 	 */
901 	if (idx != 6 && idx != -1 && idx != 3)
902 		return (EINVAL);
903 
904 	/*
905 	 * we have to copy out the GDT entry we use
906 	 * FreeBSD uses GDT entry #3 for storing %gs so load that
907 	 *
908 	 * XXX: what if a user space program doesn't check this value and tries
909 	 * to use 6, 7 or 8?
910 	 */
911 	idx = info.entry_number = 3;
912 	error = copyout(&info, args->desc, sizeof(struct l_user_desc));
913 	if (error)
914 		return (error);
915 
916 	if (LINUX_LDT_empty(&info)) {
917 		a[0] = 0;
918 		a[1] = 0;
919 	} else {
920 		a[0] = LINUX_LDT_entry_a(&info);
921 		a[1] = LINUX_LDT_entry_b(&info);
922 	}
923 
924 	memcpy(&sd, &a, sizeof(a));
925 #ifdef DEBUG
926 	if (ldebug(set_thread_area))
927 	   	printf("Segment created in set_thread_area: lobase: %x, hibase: %x, lolimit: %x, hilimit: %x, type: %i, dpl: %i, p: %i, xx: %i, def32: %i, gran: %i\n", sd.sd_lobase,
928 			sd.sd_hibase,
929 			sd.sd_lolimit,
930 			sd.sd_hilimit,
931 			sd.sd_type,
932 			sd.sd_dpl,
933 			sd.sd_p,
934 			sd.sd_xx,
935 			sd.sd_def32,
936 			sd.sd_gran);
937 #endif
938 
939 	/* this is taken from i386 version of cpu_set_user_tls() */
940 	critical_enter();
941 	/* set %gs */
942 	td->td_pcb->pcb_gsd = sd;
943 	PCPU_GET(fsgs_gdt)[1] = sd;
944 	load_gs(GSEL(GUGS_SEL, SEL_UPL));
945 	critical_exit();
946 
947 	return (0);
948 }
949 
950 int
951 linux_get_thread_area(struct thread *td, struct linux_get_thread_area_args *args)
952 {
953 
954 	struct l_user_desc info;
955 	int error;
956 	int idx;
957 	struct l_desc_struct desc;
958 	struct segment_descriptor sd;
959 
960 #ifdef DEBUG
961 	if (ldebug(get_thread_area))
962 		printf(ARGS(get_thread_area, "%p"), args->desc);
963 #endif
964 
965 	error = copyin(args->desc, &info, sizeof(struct l_user_desc));
966 	if (error)
967 		return (error);
968 
969 	idx = info.entry_number;
970 	/* XXX: I am not sure if we want 3 to be allowed too. */
971 	if (idx != 6 && idx != 3)
972 		return (EINVAL);
973 
974 	idx = 3;
975 
976 	memset(&info, 0, sizeof(info));
977 
978 	sd = PCPU_GET(fsgs_gdt)[1];
979 
980 	memcpy(&desc, &sd, sizeof(desc));
981 
982 	info.entry_number = idx;
983 	info.base_addr = LINUX_GET_BASE(&desc);
984 	info.limit = LINUX_GET_LIMIT(&desc);
985 	info.seg_32bit = LINUX_GET_32BIT(&desc);
986 	info.contents = LINUX_GET_CONTENTS(&desc);
987 	info.read_exec_only = !LINUX_GET_WRITABLE(&desc);
988 	info.limit_in_pages = LINUX_GET_LIMIT_PAGES(&desc);
989 	info.seg_not_present = !LINUX_GET_PRESENT(&desc);
990 	info.useable = LINUX_GET_USEABLE(&desc);
991 
992 	error = copyout(&info, args->desc, sizeof(struct l_user_desc));
993 	if (error)
994 	   	return (EFAULT);
995 
996 	return (0);
997 }
998 
999 /* copied from kern/kern_time.c */
1000 int
1001 linux_timer_create(struct thread *td, struct linux_timer_create_args *args)
1002 {
1003    	return sys_ktimer_create(td, (struct ktimer_create_args *) args);
1004 }
1005 
1006 int
1007 linux_timer_settime(struct thread *td, struct linux_timer_settime_args *args)
1008 {
1009    	return sys_ktimer_settime(td, (struct ktimer_settime_args *) args);
1010 }
1011 
1012 int
1013 linux_timer_gettime(struct thread *td, struct linux_timer_gettime_args *args)
1014 {
1015    	return sys_ktimer_gettime(td, (struct ktimer_gettime_args *) args);
1016 }
1017 
1018 int
1019 linux_timer_getoverrun(struct thread *td, struct linux_timer_getoverrun_args *args)
1020 {
1021    	return sys_ktimer_getoverrun(td, (struct ktimer_getoverrun_args *) args);
1022 }
1023 
1024 int
1025 linux_timer_delete(struct thread *td, struct linux_timer_delete_args *args)
1026 {
1027    	return sys_ktimer_delete(td, (struct ktimer_delete_args *) args);
1028 }
1029 
1030 /* XXX: this wont work with module - convert it */
1031 int
1032 linux_mq_open(struct thread *td, struct linux_mq_open_args *args)
1033 {
1034 #ifdef P1003_1B_MQUEUE
1035    	return sys_kmq_open(td, (struct kmq_open_args *) args);
1036 #else
1037 	return (ENOSYS);
1038 #endif
1039 }
1040 
1041 int
1042 linux_mq_unlink(struct thread *td, struct linux_mq_unlink_args *args)
1043 {
1044 #ifdef P1003_1B_MQUEUE
1045    	return sys_kmq_unlink(td, (struct kmq_unlink_args *) args);
1046 #else
1047 	return (ENOSYS);
1048 #endif
1049 }
1050 
1051 int
1052 linux_mq_timedsend(struct thread *td, struct linux_mq_timedsend_args *args)
1053 {
1054 #ifdef P1003_1B_MQUEUE
1055    	return sys_kmq_timedsend(td, (struct kmq_timedsend_args *) args);
1056 #else
1057 	return (ENOSYS);
1058 #endif
1059 }
1060 
1061 int
1062 linux_mq_timedreceive(struct thread *td, struct linux_mq_timedreceive_args *args)
1063 {
1064 #ifdef P1003_1B_MQUEUE
1065    	return sys_kmq_timedreceive(td, (struct kmq_timedreceive_args *) args);
1066 #else
1067 	return (ENOSYS);
1068 #endif
1069 }
1070 
1071 int
1072 linux_mq_notify(struct thread *td, struct linux_mq_notify_args *args)
1073 {
1074 #ifdef P1003_1B_MQUEUE
1075 	return sys_kmq_notify(td, (struct kmq_notify_args *) args);
1076 #else
1077 	return (ENOSYS);
1078 #endif
1079 }
1080 
1081 int
1082 linux_mq_getsetattr(struct thread *td, struct linux_mq_getsetattr_args *args)
1083 {
1084 #ifdef P1003_1B_MQUEUE
1085    	return sys_kmq_setattr(td, (struct kmq_setattr_args *) args);
1086 #else
1087 	return (ENOSYS);
1088 #endif
1089 }
1090 
1091 int
1092 linux_wait4(struct thread *td, struct linux_wait4_args *args)
1093 {
1094 	int error, options;
1095 	struct rusage ru, *rup;
1096 
1097 #ifdef DEBUG
1098 	if (ldebug(wait4))
1099 		printf(ARGS(wait4, "%d, %p, %d, %p"),
1100 		    args->pid, (void *)args->status, args->options,
1101 		    (void *)args->rusage);
1102 #endif
1103 
1104 	options = (args->options & (WNOHANG | WUNTRACED));
1105 	/* WLINUXCLONE should be equal to __WCLONE, but we make sure */
1106 	if (args->options & __WCLONE)
1107 		options |= WLINUXCLONE;
1108 
1109 	if (args->rusage != NULL)
1110 		rup = &ru;
1111 	else
1112 		rup = NULL;
1113 	error = linux_common_wait(td, args->pid, args->status, options, rup);
1114 	if (error)
1115 		return (error);
1116 	if (args->rusage != NULL)
1117 		error = copyout(&ru, args->rusage, sizeof(ru));
1118 
1119 	return (error);
1120 }
1121