xref: /freebsd/sys/compat/linux/linux_misc.c (revision 67ca7330cf34a789afbbff9ae7e4cdc4a4917ae3)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2002 Doug Rabson
5  * Copyright (c) 1994-1995 Søren Schmidt
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer
13  *    in this position and unchanged.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_compat.h"
36 
37 #include <sys/param.h>
38 #include <sys/blist.h>
39 #include <sys/fcntl.h>
40 #if defined(__i386__)
41 #include <sys/imgact_aout.h>
42 #endif
43 #include <sys/jail.h>
44 #include <sys/kernel.h>
45 #include <sys/limits.h>
46 #include <sys/lock.h>
47 #include <sys/malloc.h>
48 #include <sys/mman.h>
49 #include <sys/mount.h>
50 #include <sys/mutex.h>
51 #include <sys/namei.h>
52 #include <sys/priv.h>
53 #include <sys/proc.h>
54 #include <sys/procctl.h>
55 #include <sys/reboot.h>
56 #include <sys/racct.h>
57 #include <sys/random.h>
58 #include <sys/resourcevar.h>
59 #include <sys/sched.h>
60 #include <sys/sdt.h>
61 #include <sys/signalvar.h>
62 #include <sys/stat.h>
63 #include <sys/syscallsubr.h>
64 #include <sys/sysctl.h>
65 #include <sys/sysproto.h>
66 #include <sys/systm.h>
67 #include <sys/time.h>
68 #include <sys/vmmeter.h>
69 #include <sys/vnode.h>
70 #include <sys/wait.h>
71 #include <sys/cpuset.h>
72 #include <sys/uio.h>
73 
74 #include <security/mac/mac_framework.h>
75 
76 #include <vm/vm.h>
77 #include <vm/pmap.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_extern.h>
81 #include <vm/vm_object.h>
82 #include <vm/swap_pager.h>
83 
84 #ifdef COMPAT_LINUX32
85 #include <machine/../linux32/linux.h>
86 #include <machine/../linux32/linux32_proto.h>
87 #else
88 #include <machine/../linux/linux.h>
89 #include <machine/../linux/linux_proto.h>
90 #endif
91 
92 #include <compat/linux/linux_dtrace.h>
93 #include <compat/linux/linux_file.h>
94 #include <compat/linux/linux_mib.h>
95 #include <compat/linux/linux_signal.h>
96 #include <compat/linux/linux_timer.h>
97 #include <compat/linux/linux_util.h>
98 #include <compat/linux/linux_sysproto.h>
99 #include <compat/linux/linux_emul.h>
100 #include <compat/linux/linux_misc.h>
101 
102 /**
103  * Special DTrace provider for the linuxulator.
104  *
105  * In this file we define the provider for the entire linuxulator. All
106  * modules (= files of the linuxulator) use it.
107  *
108  * We define a different name depending on the emulated bitsize, see
109  * ../../<ARCH>/linux{,32}/linux.h, e.g.:
110  *      native bitsize          = linuxulator
111  *      amd64, 32bit emulation  = linuxulator32
112  */
113 LIN_SDT_PROVIDER_DEFINE(LINUX_DTRACE);
114 
115 int stclohz;				/* Statistics clock frequency */
116 
117 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = {
118 	RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK,
119 	RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE,
120 	RLIMIT_MEMLOCK, RLIMIT_AS
121 };
122 
123 struct l_sysinfo {
124 	l_long		uptime;		/* Seconds since boot */
125 	l_ulong		loads[3];	/* 1, 5, and 15 minute load averages */
126 #define LINUX_SYSINFO_LOADS_SCALE 65536
127 	l_ulong		totalram;	/* Total usable main memory size */
128 	l_ulong		freeram;	/* Available memory size */
129 	l_ulong		sharedram;	/* Amount of shared memory */
130 	l_ulong		bufferram;	/* Memory used by buffers */
131 	l_ulong		totalswap;	/* Total swap space size */
132 	l_ulong		freeswap;	/* swap space still available */
133 	l_ushort	procs;		/* Number of current processes */
134 	l_ushort	pads;
135 	l_ulong		totalbig;
136 	l_ulong		freebig;
137 	l_uint		mem_unit;
138 	char		_f[20-2*sizeof(l_long)-sizeof(l_int)];	/* padding */
139 };
140 
141 struct l_pselect6arg {
142 	l_uintptr_t	ss;
143 	l_size_t	ss_len;
144 };
145 
146 static int	linux_utimensat_nsec_valid(l_long);
147 
148 
149 int
150 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args)
151 {
152 	struct l_sysinfo sysinfo;
153 	vm_object_t object;
154 	int i, j;
155 	struct timespec ts;
156 
157 	bzero(&sysinfo, sizeof(sysinfo));
158 	getnanouptime(&ts);
159 	if (ts.tv_nsec != 0)
160 		ts.tv_sec++;
161 	sysinfo.uptime = ts.tv_sec;
162 
163 	/* Use the information from the mib to get our load averages */
164 	for (i = 0; i < 3; i++)
165 		sysinfo.loads[i] = averunnable.ldavg[i] *
166 		    LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale;
167 
168 	sysinfo.totalram = physmem * PAGE_SIZE;
169 	sysinfo.freeram = sysinfo.totalram - vm_wire_count() * PAGE_SIZE;
170 
171 	sysinfo.sharedram = 0;
172 	mtx_lock(&vm_object_list_mtx);
173 	TAILQ_FOREACH(object, &vm_object_list, object_list)
174 		if (object->shadow_count > 1)
175 			sysinfo.sharedram += object->resident_page_count;
176 	mtx_unlock(&vm_object_list_mtx);
177 
178 	sysinfo.sharedram *= PAGE_SIZE;
179 	sysinfo.bufferram = 0;
180 
181 	swap_pager_status(&i, &j);
182 	sysinfo.totalswap = i * PAGE_SIZE;
183 	sysinfo.freeswap = (i - j) * PAGE_SIZE;
184 
185 	sysinfo.procs = nprocs;
186 
187 	/* The following are only present in newer Linux kernels. */
188 	sysinfo.totalbig = 0;
189 	sysinfo.freebig = 0;
190 	sysinfo.mem_unit = 1;
191 
192 	return (copyout(&sysinfo, args->info, sizeof(sysinfo)));
193 }
194 
195 #ifdef LINUX_LEGACY_SYSCALLS
196 int
197 linux_alarm(struct thread *td, struct linux_alarm_args *args)
198 {
199 	struct itimerval it, old_it;
200 	u_int secs;
201 	int error;
202 
203 	secs = args->secs;
204 	/*
205 	 * Linux alarm() is always successful. Limit secs to INT32_MAX / 2
206 	 * to match kern_setitimer()'s limit to avoid error from it.
207 	 *
208 	 * XXX. Linux limit secs to INT_MAX on 32 and does not limit on 64-bit
209 	 * platforms.
210 	 */
211 	if (secs > INT32_MAX / 2)
212 		secs = INT32_MAX / 2;
213 
214 	it.it_value.tv_sec = secs;
215 	it.it_value.tv_usec = 0;
216 	timevalclear(&it.it_interval);
217 	error = kern_setitimer(td, ITIMER_REAL, &it, &old_it);
218 	KASSERT(error == 0, ("kern_setitimer returns %d", error));
219 
220 	if ((old_it.it_value.tv_sec == 0 && old_it.it_value.tv_usec > 0) ||
221 	    old_it.it_value.tv_usec >= 500000)
222 		old_it.it_value.tv_sec++;
223 	td->td_retval[0] = old_it.it_value.tv_sec;
224 	return (0);
225 }
226 #endif
227 
228 int
229 linux_brk(struct thread *td, struct linux_brk_args *args)
230 {
231 	struct vmspace *vm = td->td_proc->p_vmspace;
232 	uintptr_t new, old;
233 
234 	old = (uintptr_t)vm->vm_daddr + ctob(vm->vm_dsize);
235 	new = (uintptr_t)args->dsend;
236 	if ((caddr_t)new > vm->vm_daddr && !kern_break(td, &new))
237 		td->td_retval[0] = (register_t)new;
238 	else
239 		td->td_retval[0] = (register_t)old;
240 
241 	return (0);
242 }
243 
244 #if defined(__i386__)
245 /* XXX: what about amd64/linux32? */
246 
247 int
248 linux_uselib(struct thread *td, struct linux_uselib_args *args)
249 {
250 	struct nameidata ni;
251 	struct vnode *vp;
252 	struct exec *a_out;
253 	vm_map_t map;
254 	vm_map_entry_t entry;
255 	struct vattr attr;
256 	vm_offset_t vmaddr;
257 	unsigned long file_offset;
258 	unsigned long bss_size;
259 	char *library;
260 	ssize_t aresid;
261 	int error;
262 	bool locked, opened, textset;
263 
264 	LCONVPATHEXIST(td, args->library, &library);
265 
266 	a_out = NULL;
267 	vp = NULL;
268 	locked = false;
269 	textset = false;
270 	opened = false;
271 
272 	NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1,
273 	    UIO_SYSSPACE, library, td);
274 	error = namei(&ni);
275 	LFREEPATH(library);
276 	if (error)
277 		goto cleanup;
278 
279 	vp = ni.ni_vp;
280 	NDFREE(&ni, NDF_ONLY_PNBUF);
281 
282 	/*
283 	 * From here on down, we have a locked vnode that must be unlocked.
284 	 * XXX: The code below largely duplicates exec_check_permissions().
285 	 */
286 	locked = true;
287 
288 	/* Executable? */
289 	error = VOP_GETATTR(vp, &attr, td->td_ucred);
290 	if (error)
291 		goto cleanup;
292 
293 	if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
294 	    ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) {
295 		/* EACCESS is what exec(2) returns. */
296 		error = ENOEXEC;
297 		goto cleanup;
298 	}
299 
300 	/* Sensible size? */
301 	if (attr.va_size == 0) {
302 		error = ENOEXEC;
303 		goto cleanup;
304 	}
305 
306 	/* Can we access it? */
307 	error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
308 	if (error)
309 		goto cleanup;
310 
311 	/*
312 	 * XXX: This should use vn_open() so that it is properly authorized,
313 	 * and to reduce code redundancy all over the place here.
314 	 * XXX: Not really, it duplicates far more of exec_check_permissions()
315 	 * than vn_open().
316 	 */
317 #ifdef MAC
318 	error = mac_vnode_check_open(td->td_ucred, vp, VREAD);
319 	if (error)
320 		goto cleanup;
321 #endif
322 	error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
323 	if (error)
324 		goto cleanup;
325 	opened = true;
326 
327 	/* Pull in executable header into exec_map */
328 	error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE,
329 	    VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0);
330 	if (error)
331 		goto cleanup;
332 
333 	/* Is it a Linux binary ? */
334 	if (((a_out->a_magic >> 16) & 0xff) != 0x64) {
335 		error = ENOEXEC;
336 		goto cleanup;
337 	}
338 
339 	/*
340 	 * While we are here, we should REALLY do some more checks
341 	 */
342 
343 	/* Set file/virtual offset based on a.out variant. */
344 	switch ((int)(a_out->a_magic & 0xffff)) {
345 	case 0413:			/* ZMAGIC */
346 		file_offset = 1024;
347 		break;
348 	case 0314:			/* QMAGIC */
349 		file_offset = 0;
350 		break;
351 	default:
352 		error = ENOEXEC;
353 		goto cleanup;
354 	}
355 
356 	bss_size = round_page(a_out->a_bss);
357 
358 	/* Check various fields in header for validity/bounds. */
359 	if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) {
360 		error = ENOEXEC;
361 		goto cleanup;
362 	}
363 
364 	/* text + data can't exceed file size */
365 	if (a_out->a_data + a_out->a_text > attr.va_size) {
366 		error = EFAULT;
367 		goto cleanup;
368 	}
369 
370 	/*
371 	 * text/data/bss must not exceed limits
372 	 * XXX - this is not complete. it should check current usage PLUS
373 	 * the resources needed by this library.
374 	 */
375 	PROC_LOCK(td->td_proc);
376 	if (a_out->a_text > maxtsiz ||
377 	    a_out->a_data + bss_size > lim_cur_proc(td->td_proc, RLIMIT_DATA) ||
378 	    racct_set(td->td_proc, RACCT_DATA, a_out->a_data +
379 	    bss_size) != 0) {
380 		PROC_UNLOCK(td->td_proc);
381 		error = ENOMEM;
382 		goto cleanup;
383 	}
384 	PROC_UNLOCK(td->td_proc);
385 
386 	/*
387 	 * Prevent more writers.
388 	 */
389 	error = VOP_SET_TEXT(vp);
390 	if (error != 0)
391 		goto cleanup;
392 	textset = true;
393 
394 	/*
395 	 * Lock no longer needed
396 	 */
397 	locked = false;
398 	VOP_UNLOCK(vp, 0);
399 
400 	/*
401 	 * Check if file_offset page aligned. Currently we cannot handle
402 	 * misalinged file offsets, and so we read in the entire image
403 	 * (what a waste).
404 	 */
405 	if (file_offset & PAGE_MASK) {
406 		/* Map text+data read/write/execute */
407 
408 		/* a_entry is the load address and is page aligned */
409 		vmaddr = trunc_page(a_out->a_entry);
410 
411 		/* get anon user mapping, read+write+execute */
412 		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
413 		    &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE,
414 		    VM_PROT_ALL, VM_PROT_ALL, 0);
415 		if (error)
416 			goto cleanup;
417 
418 		error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset,
419 		    a_out->a_text + a_out->a_data, UIO_USERSPACE, 0,
420 		    td->td_ucred, NOCRED, &aresid, td);
421 		if (error != 0)
422 			goto cleanup;
423 		if (aresid != 0) {
424 			error = ENOEXEC;
425 			goto cleanup;
426 		}
427 	} else {
428 		/*
429 		 * for QMAGIC, a_entry is 20 bytes beyond the load address
430 		 * to skip the executable header
431 		 */
432 		vmaddr = trunc_page(a_out->a_entry);
433 
434 		/*
435 		 * Map it all into the process's space as a single
436 		 * copy-on-write "data" segment.
437 		 */
438 		map = &td->td_proc->p_vmspace->vm_map;
439 		error = vm_mmap(map, &vmaddr,
440 		    a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL,
441 		    MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset);
442 		if (error)
443 			goto cleanup;
444 		vm_map_lock(map);
445 		if (!vm_map_lookup_entry(map, vmaddr, &entry)) {
446 			vm_map_unlock(map);
447 			error = EDOOFUS;
448 			goto cleanup;
449 		}
450 		entry->eflags |= MAP_ENTRY_VN_EXEC;
451 		vm_map_unlock(map);
452 		textset = false;
453 	}
454 
455 	if (bss_size != 0) {
456 		/* Calculate BSS start address */
457 		vmaddr = trunc_page(a_out->a_entry) + a_out->a_text +
458 		    a_out->a_data;
459 
460 		/* allocate some 'anon' space */
461 		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
462 		    &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL,
463 		    VM_PROT_ALL, 0);
464 		if (error)
465 			goto cleanup;
466 	}
467 
468 cleanup:
469 	if (opened) {
470 		if (locked)
471 			VOP_UNLOCK(vp, 0);
472 		locked = false;
473 		VOP_CLOSE(vp, FREAD, td->td_ucred, td);
474 	}
475 	if (textset)
476 		VOP_UNSET_TEXT_CHECKED(vp);
477 	if (locked)
478 		VOP_UNLOCK(vp, 0);
479 
480 	/* Release the temporary mapping. */
481 	if (a_out)
482 		kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE);
483 
484 	return (error);
485 }
486 
487 #endif	/* __i386__ */
488 
489 #ifdef LINUX_LEGACY_SYSCALLS
490 int
491 linux_select(struct thread *td, struct linux_select_args *args)
492 {
493 	l_timeval ltv;
494 	struct timeval tv0, tv1, utv, *tvp;
495 	int error;
496 
497 	/*
498 	 * Store current time for computation of the amount of
499 	 * time left.
500 	 */
501 	if (args->timeout) {
502 		if ((error = copyin(args->timeout, &ltv, sizeof(ltv))))
503 			goto select_out;
504 		utv.tv_sec = ltv.tv_sec;
505 		utv.tv_usec = ltv.tv_usec;
506 
507 		if (itimerfix(&utv)) {
508 			/*
509 			 * The timeval was invalid.  Convert it to something
510 			 * valid that will act as it does under Linux.
511 			 */
512 			utv.tv_sec += utv.tv_usec / 1000000;
513 			utv.tv_usec %= 1000000;
514 			if (utv.tv_usec < 0) {
515 				utv.tv_sec -= 1;
516 				utv.tv_usec += 1000000;
517 			}
518 			if (utv.tv_sec < 0)
519 				timevalclear(&utv);
520 		}
521 		microtime(&tv0);
522 		tvp = &utv;
523 	} else
524 		tvp = NULL;
525 
526 	error = kern_select(td, args->nfds, args->readfds, args->writefds,
527 	    args->exceptfds, tvp, LINUX_NFDBITS);
528 	if (error)
529 		goto select_out;
530 
531 	if (args->timeout) {
532 		if (td->td_retval[0]) {
533 			/*
534 			 * Compute how much time was left of the timeout,
535 			 * by subtracting the current time and the time
536 			 * before we started the call, and subtracting
537 			 * that result from the user-supplied value.
538 			 */
539 			microtime(&tv1);
540 			timevalsub(&tv1, &tv0);
541 			timevalsub(&utv, &tv1);
542 			if (utv.tv_sec < 0)
543 				timevalclear(&utv);
544 		} else
545 			timevalclear(&utv);
546 		ltv.tv_sec = utv.tv_sec;
547 		ltv.tv_usec = utv.tv_usec;
548 		if ((error = copyout(&ltv, args->timeout, sizeof(ltv))))
549 			goto select_out;
550 	}
551 
552 select_out:
553 	return (error);
554 }
555 #endif
556 
557 int
558 linux_mremap(struct thread *td, struct linux_mremap_args *args)
559 {
560 	uintptr_t addr;
561 	size_t len;
562 	int error = 0;
563 
564 	if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) {
565 		td->td_retval[0] = 0;
566 		return (EINVAL);
567 	}
568 
569 	/*
570 	 * Check for the page alignment.
571 	 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK.
572 	 */
573 	if (args->addr & PAGE_MASK) {
574 		td->td_retval[0] = 0;
575 		return (EINVAL);
576 	}
577 
578 	args->new_len = round_page(args->new_len);
579 	args->old_len = round_page(args->old_len);
580 
581 	if (args->new_len > args->old_len) {
582 		td->td_retval[0] = 0;
583 		return (ENOMEM);
584 	}
585 
586 	if (args->new_len < args->old_len) {
587 		addr = args->addr + args->new_len;
588 		len = args->old_len - args->new_len;
589 		error = kern_munmap(td, addr, len);
590 	}
591 
592 	td->td_retval[0] = error ? 0 : (uintptr_t)args->addr;
593 	return (error);
594 }
595 
596 #define LINUX_MS_ASYNC       0x0001
597 #define LINUX_MS_INVALIDATE  0x0002
598 #define LINUX_MS_SYNC        0x0004
599 
600 int
601 linux_msync(struct thread *td, struct linux_msync_args *args)
602 {
603 
604 	return (kern_msync(td, args->addr, args->len,
605 	    args->fl & ~LINUX_MS_SYNC));
606 }
607 
608 #ifdef LINUX_LEGACY_SYSCALLS
609 int
610 linux_time(struct thread *td, struct linux_time_args *args)
611 {
612 	struct timeval tv;
613 	l_time_t tm;
614 	int error;
615 
616 	microtime(&tv);
617 	tm = tv.tv_sec;
618 	if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm))))
619 		return (error);
620 	td->td_retval[0] = tm;
621 	return (0);
622 }
623 #endif
624 
625 struct l_times_argv {
626 	l_clock_t	tms_utime;
627 	l_clock_t	tms_stime;
628 	l_clock_t	tms_cutime;
629 	l_clock_t	tms_cstime;
630 };
631 
632 
633 /*
634  * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value.
635  * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK
636  * auxiliary vector entry.
637  */
638 #define	CLK_TCK		100
639 
640 #define	CONVOTCK(r)	(r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK))
641 #define	CONVNTCK(r)	(r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz))
642 
643 #define	CONVTCK(r)	(linux_kernver(td) >= LINUX_KERNVER_2004000 ?		\
644 			    CONVNTCK(r) : CONVOTCK(r))
645 
646 int
647 linux_times(struct thread *td, struct linux_times_args *args)
648 {
649 	struct timeval tv, utime, stime, cutime, cstime;
650 	struct l_times_argv tms;
651 	struct proc *p;
652 	int error;
653 
654 	if (args->buf != NULL) {
655 		p = td->td_proc;
656 		PROC_LOCK(p);
657 		PROC_STATLOCK(p);
658 		calcru(p, &utime, &stime);
659 		PROC_STATUNLOCK(p);
660 		calccru(p, &cutime, &cstime);
661 		PROC_UNLOCK(p);
662 
663 		tms.tms_utime = CONVTCK(utime);
664 		tms.tms_stime = CONVTCK(stime);
665 
666 		tms.tms_cutime = CONVTCK(cutime);
667 		tms.tms_cstime = CONVTCK(cstime);
668 
669 		if ((error = copyout(&tms, args->buf, sizeof(tms))))
670 			return (error);
671 	}
672 
673 	microuptime(&tv);
674 	td->td_retval[0] = (int)CONVTCK(tv);
675 	return (0);
676 }
677 
678 int
679 linux_newuname(struct thread *td, struct linux_newuname_args *args)
680 {
681 	struct l_new_utsname utsname;
682 	char osname[LINUX_MAX_UTSNAME];
683 	char osrelease[LINUX_MAX_UTSNAME];
684 	char *p;
685 
686 	linux_get_osname(td, osname);
687 	linux_get_osrelease(td, osrelease);
688 
689 	bzero(&utsname, sizeof(utsname));
690 	strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME);
691 	getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME);
692 	getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME);
693 	strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME);
694 	strlcpy(utsname.version, version, LINUX_MAX_UTSNAME);
695 	for (p = utsname.version; *p != '\0'; ++p)
696 		if (*p == '\n') {
697 			*p = '\0';
698 			break;
699 		}
700 	strlcpy(utsname.machine, linux_kplatform, LINUX_MAX_UTSNAME);
701 
702 	return (copyout(&utsname, args->buf, sizeof(utsname)));
703 }
704 
705 struct l_utimbuf {
706 	l_time_t l_actime;
707 	l_time_t l_modtime;
708 };
709 
710 #ifdef LINUX_LEGACY_SYSCALLS
711 int
712 linux_utime(struct thread *td, struct linux_utime_args *args)
713 {
714 	struct timeval tv[2], *tvp;
715 	struct l_utimbuf lut;
716 	char *fname;
717 	int error;
718 
719 	LCONVPATHEXIST(td, args->fname, &fname);
720 
721 	if (args->times) {
722 		if ((error = copyin(args->times, &lut, sizeof lut))) {
723 			LFREEPATH(fname);
724 			return (error);
725 		}
726 		tv[0].tv_sec = lut.l_actime;
727 		tv[0].tv_usec = 0;
728 		tv[1].tv_sec = lut.l_modtime;
729 		tv[1].tv_usec = 0;
730 		tvp = tv;
731 	} else
732 		tvp = NULL;
733 
734 	error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE, tvp,
735 	    UIO_SYSSPACE);
736 	LFREEPATH(fname);
737 	return (error);
738 }
739 #endif
740 
741 #ifdef LINUX_LEGACY_SYSCALLS
742 int
743 linux_utimes(struct thread *td, struct linux_utimes_args *args)
744 {
745 	l_timeval ltv[2];
746 	struct timeval tv[2], *tvp = NULL;
747 	char *fname;
748 	int error;
749 
750 	LCONVPATHEXIST(td, args->fname, &fname);
751 
752 	if (args->tptr != NULL) {
753 		if ((error = copyin(args->tptr, ltv, sizeof ltv))) {
754 			LFREEPATH(fname);
755 			return (error);
756 		}
757 		tv[0].tv_sec = ltv[0].tv_sec;
758 		tv[0].tv_usec = ltv[0].tv_usec;
759 		tv[1].tv_sec = ltv[1].tv_sec;
760 		tv[1].tv_usec = ltv[1].tv_usec;
761 		tvp = tv;
762 	}
763 
764 	error = kern_utimesat(td, AT_FDCWD, fname, UIO_SYSSPACE,
765 	    tvp, UIO_SYSSPACE);
766 	LFREEPATH(fname);
767 	return (error);
768 }
769 #endif
770 
771 static int
772 linux_utimensat_nsec_valid(l_long nsec)
773 {
774 
775 	if (nsec == LINUX_UTIME_OMIT || nsec == LINUX_UTIME_NOW)
776 		return (0);
777 	if (nsec >= 0 && nsec <= 999999999)
778 		return (0);
779 	return (1);
780 }
781 
782 int
783 linux_utimensat(struct thread *td, struct linux_utimensat_args *args)
784 {
785 	struct l_timespec l_times[2];
786 	struct timespec times[2], *timesp = NULL;
787 	char *path = NULL;
788 	int error, dfd, flags = 0;
789 
790 	dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd;
791 
792 	if (args->flags & ~LINUX_AT_SYMLINK_NOFOLLOW)
793 		return (EINVAL);
794 
795 	if (args->times != NULL) {
796 		error = copyin(args->times, l_times, sizeof(l_times));
797 		if (error != 0)
798 			return (error);
799 
800 		if (linux_utimensat_nsec_valid(l_times[0].tv_nsec) != 0 ||
801 		    linux_utimensat_nsec_valid(l_times[1].tv_nsec) != 0)
802 			return (EINVAL);
803 
804 		times[0].tv_sec = l_times[0].tv_sec;
805 		switch (l_times[0].tv_nsec)
806 		{
807 		case LINUX_UTIME_OMIT:
808 			times[0].tv_nsec = UTIME_OMIT;
809 			break;
810 		case LINUX_UTIME_NOW:
811 			times[0].tv_nsec = UTIME_NOW;
812 			break;
813 		default:
814 			times[0].tv_nsec = l_times[0].tv_nsec;
815 		}
816 
817 		times[1].tv_sec = l_times[1].tv_sec;
818 		switch (l_times[1].tv_nsec)
819 		{
820 		case LINUX_UTIME_OMIT:
821 			times[1].tv_nsec = UTIME_OMIT;
822 			break;
823 		case LINUX_UTIME_NOW:
824 			times[1].tv_nsec = UTIME_NOW;
825 			break;
826 		default:
827 			times[1].tv_nsec = l_times[1].tv_nsec;
828 			break;
829 		}
830 		timesp = times;
831 
832 		/* This breaks POSIX, but is what the Linux kernel does
833 		 * _on purpose_ (documented in the man page for utimensat(2)),
834 		 * so we must follow that behaviour. */
835 		if (times[0].tv_nsec == UTIME_OMIT &&
836 		    times[1].tv_nsec == UTIME_OMIT)
837 			return (0);
838 	}
839 
840 	if (args->pathname != NULL)
841 		LCONVPATHEXIST_AT(td, args->pathname, &path, dfd);
842 	else if (args->flags != 0)
843 		return (EINVAL);
844 
845 	if (args->flags & LINUX_AT_SYMLINK_NOFOLLOW)
846 		flags |= AT_SYMLINK_NOFOLLOW;
847 
848 	if (path == NULL)
849 		error = kern_futimens(td, dfd, timesp, UIO_SYSSPACE);
850 	else {
851 		error = kern_utimensat(td, dfd, path, UIO_SYSSPACE, timesp,
852 			UIO_SYSSPACE, flags);
853 		LFREEPATH(path);
854 	}
855 
856 	return (error);
857 }
858 
859 #ifdef LINUX_LEGACY_SYSCALLS
860 int
861 linux_futimesat(struct thread *td, struct linux_futimesat_args *args)
862 {
863 	l_timeval ltv[2];
864 	struct timeval tv[2], *tvp = NULL;
865 	char *fname;
866 	int error, dfd;
867 
868 	dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd;
869 	LCONVPATHEXIST_AT(td, args->filename, &fname, dfd);
870 
871 	if (args->utimes != NULL) {
872 		if ((error = copyin(args->utimes, ltv, sizeof ltv))) {
873 			LFREEPATH(fname);
874 			return (error);
875 		}
876 		tv[0].tv_sec = ltv[0].tv_sec;
877 		tv[0].tv_usec = ltv[0].tv_usec;
878 		tv[1].tv_sec = ltv[1].tv_sec;
879 		tv[1].tv_usec = ltv[1].tv_usec;
880 		tvp = tv;
881 	}
882 
883 	error = kern_utimesat(td, dfd, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE);
884 	LFREEPATH(fname);
885 	return (error);
886 }
887 #endif
888 
889 static int
890 linux_common_wait(struct thread *td, int pid, int *statusp,
891     int options, struct __wrusage *wrup)
892 {
893 	siginfo_t siginfo;
894 	idtype_t idtype;
895 	id_t id;
896 	int error, status, tmpstat;
897 
898 	if (pid == WAIT_ANY) {
899 		idtype = P_ALL;
900 		id = 0;
901 	} else if (pid < 0) {
902 		idtype = P_PGID;
903 		id = (id_t)-pid;
904 	} else {
905 		idtype = P_PID;
906 		id = (id_t)pid;
907 	}
908 
909 	/*
910 	 * For backward compatibility we implicitly add flags WEXITED
911 	 * and WTRAPPED here.
912 	 */
913 	options |= WEXITED | WTRAPPED;
914 	error = kern_wait6(td, idtype, id, &status, options, wrup, &siginfo);
915 	if (error)
916 		return (error);
917 
918 	if (statusp) {
919 		tmpstat = status & 0xffff;
920 		if (WIFSIGNALED(tmpstat)) {
921 			tmpstat = (tmpstat & 0xffffff80) |
922 			    bsd_to_linux_signal(WTERMSIG(tmpstat));
923 		} else if (WIFSTOPPED(tmpstat)) {
924 			tmpstat = (tmpstat & 0xffff00ff) |
925 			    (bsd_to_linux_signal(WSTOPSIG(tmpstat)) << 8);
926 #if defined(__amd64__) && !defined(COMPAT_LINUX32)
927 			if (WSTOPSIG(status) == SIGTRAP) {
928 				tmpstat = linux_ptrace_status(td,
929 				    siginfo.si_pid, tmpstat);
930 			}
931 #endif
932 		} else if (WIFCONTINUED(tmpstat)) {
933 			tmpstat = 0xffff;
934 		}
935 		error = copyout(&tmpstat, statusp, sizeof(int));
936 	}
937 
938 	return (error);
939 }
940 
941 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
942 int
943 linux_waitpid(struct thread *td, struct linux_waitpid_args *args)
944 {
945 	struct linux_wait4_args wait4_args;
946 
947 	wait4_args.pid = args->pid;
948 	wait4_args.status = args->status;
949 	wait4_args.options = args->options;
950 	wait4_args.rusage = NULL;
951 
952 	return (linux_wait4(td, &wait4_args));
953 }
954 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
955 
956 int
957 linux_wait4(struct thread *td, struct linux_wait4_args *args)
958 {
959 	int error, options;
960 	struct __wrusage wru, *wrup;
961 
962 	if (args->options & ~(LINUX_WUNTRACED | LINUX_WNOHANG |
963 	    LINUX_WCONTINUED | __WCLONE | __WNOTHREAD | __WALL))
964 		return (EINVAL);
965 
966 	options = WEXITED;
967 	linux_to_bsd_waitopts(args->options, &options);
968 
969 	if (args->rusage != NULL)
970 		wrup = &wru;
971 	else
972 		wrup = NULL;
973 	error = linux_common_wait(td, args->pid, args->status, options, wrup);
974 	if (error != 0)
975 		return (error);
976 	if (args->rusage != NULL)
977 		error = linux_copyout_rusage(&wru.wru_self, args->rusage);
978 	return (error);
979 }
980 
981 int
982 linux_waitid(struct thread *td, struct linux_waitid_args *args)
983 {
984 	int status, options, sig;
985 	struct __wrusage wru;
986 	siginfo_t siginfo;
987 	l_siginfo_t lsi;
988 	idtype_t idtype;
989 	struct proc *p;
990 	int error;
991 
992 	options = 0;
993 	linux_to_bsd_waitopts(args->options, &options);
994 
995 	if (options & ~(WNOHANG | WNOWAIT | WEXITED | WUNTRACED | WCONTINUED))
996 		return (EINVAL);
997 	if (!(options & (WEXITED | WUNTRACED | WCONTINUED)))
998 		return (EINVAL);
999 
1000 	switch (args->idtype) {
1001 	case LINUX_P_ALL:
1002 		idtype = P_ALL;
1003 		break;
1004 	case LINUX_P_PID:
1005 		if (args->id <= 0)
1006 			return (EINVAL);
1007 		idtype = P_PID;
1008 		break;
1009 	case LINUX_P_PGID:
1010 		if (args->id <= 0)
1011 			return (EINVAL);
1012 		idtype = P_PGID;
1013 		break;
1014 	default:
1015 		return (EINVAL);
1016 	}
1017 
1018 	error = kern_wait6(td, idtype, args->id, &status, options,
1019 	    &wru, &siginfo);
1020 	if (error != 0)
1021 		return (error);
1022 	if (args->rusage != NULL) {
1023 		error = linux_copyout_rusage(&wru.wru_children,
1024 		    args->rusage);
1025 		if (error != 0)
1026 			return (error);
1027 	}
1028 	if (args->info != NULL) {
1029 		p = td->td_proc;
1030 		bzero(&lsi, sizeof(lsi));
1031 		if (td->td_retval[0] != 0) {
1032 			sig = bsd_to_linux_signal(siginfo.si_signo);
1033 			siginfo_to_lsiginfo(&siginfo, &lsi, sig);
1034 		}
1035 		error = copyout(&lsi, args->info, sizeof(lsi));
1036 	}
1037 	td->td_retval[0] = 0;
1038 
1039 	return (error);
1040 }
1041 
1042 #ifdef LINUX_LEGACY_SYSCALLS
1043 int
1044 linux_mknod(struct thread *td, struct linux_mknod_args *args)
1045 {
1046 	char *path;
1047 	int error;
1048 
1049 	LCONVPATHCREAT(td, args->path, &path);
1050 
1051 	switch (args->mode & S_IFMT) {
1052 	case S_IFIFO:
1053 	case S_IFSOCK:
1054 		error = kern_mkfifoat(td, AT_FDCWD, path, UIO_SYSSPACE,
1055 		    args->mode);
1056 		break;
1057 
1058 	case S_IFCHR:
1059 	case S_IFBLK:
1060 		error = kern_mknodat(td, AT_FDCWD, path, UIO_SYSSPACE,
1061 		    args->mode, args->dev);
1062 		break;
1063 
1064 	case S_IFDIR:
1065 		error = EPERM;
1066 		break;
1067 
1068 	case 0:
1069 		args->mode |= S_IFREG;
1070 		/* FALLTHROUGH */
1071 	case S_IFREG:
1072 		error = kern_openat(td, AT_FDCWD, path, UIO_SYSSPACE,
1073 		    O_WRONLY | O_CREAT | O_TRUNC, args->mode);
1074 		if (error == 0)
1075 			kern_close(td, td->td_retval[0]);
1076 		break;
1077 
1078 	default:
1079 		error = EINVAL;
1080 		break;
1081 	}
1082 	LFREEPATH(path);
1083 	return (error);
1084 }
1085 #endif
1086 
1087 int
1088 linux_mknodat(struct thread *td, struct linux_mknodat_args *args)
1089 {
1090 	char *path;
1091 	int error, dfd;
1092 
1093 	dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd;
1094 	LCONVPATHCREAT_AT(td, args->filename, &path, dfd);
1095 
1096 	switch (args->mode & S_IFMT) {
1097 	case S_IFIFO:
1098 	case S_IFSOCK:
1099 		error = kern_mkfifoat(td, dfd, path, UIO_SYSSPACE, args->mode);
1100 		break;
1101 
1102 	case S_IFCHR:
1103 	case S_IFBLK:
1104 		error = kern_mknodat(td, dfd, path, UIO_SYSSPACE, args->mode,
1105 		    args->dev);
1106 		break;
1107 
1108 	case S_IFDIR:
1109 		error = EPERM;
1110 		break;
1111 
1112 	case 0:
1113 		args->mode |= S_IFREG;
1114 		/* FALLTHROUGH */
1115 	case S_IFREG:
1116 		error = kern_openat(td, dfd, path, UIO_SYSSPACE,
1117 		    O_WRONLY | O_CREAT | O_TRUNC, args->mode);
1118 		if (error == 0)
1119 			kern_close(td, td->td_retval[0]);
1120 		break;
1121 
1122 	default:
1123 		error = EINVAL;
1124 		break;
1125 	}
1126 	LFREEPATH(path);
1127 	return (error);
1128 }
1129 
1130 /*
1131  * UGH! This is just about the dumbest idea I've ever heard!!
1132  */
1133 int
1134 linux_personality(struct thread *td, struct linux_personality_args *args)
1135 {
1136 	struct linux_pemuldata *pem;
1137 	struct proc *p = td->td_proc;
1138 	uint32_t old;
1139 
1140 	PROC_LOCK(p);
1141 	pem = pem_find(p);
1142 	old = pem->persona;
1143 	if (args->per != 0xffffffff)
1144 		pem->persona = args->per;
1145 	PROC_UNLOCK(p);
1146 
1147 	td->td_retval[0] = old;
1148 	return (0);
1149 }
1150 
1151 struct l_itimerval {
1152 	l_timeval it_interval;
1153 	l_timeval it_value;
1154 };
1155 
1156 #define	B2L_ITIMERVAL(bip, lip)						\
1157 	(bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec;		\
1158 	(bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec;	\
1159 	(bip)->it_value.tv_sec = (lip)->it_value.tv_sec;		\
1160 	(bip)->it_value.tv_usec = (lip)->it_value.tv_usec;
1161 
1162 int
1163 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap)
1164 {
1165 	int error;
1166 	struct l_itimerval ls;
1167 	struct itimerval aitv, oitv;
1168 
1169 	if (uap->itv == NULL) {
1170 		uap->itv = uap->oitv;
1171 		return (linux_getitimer(td, (struct linux_getitimer_args *)uap));
1172 	}
1173 
1174 	error = copyin(uap->itv, &ls, sizeof(ls));
1175 	if (error != 0)
1176 		return (error);
1177 	B2L_ITIMERVAL(&aitv, &ls);
1178 	error = kern_setitimer(td, uap->which, &aitv, &oitv);
1179 	if (error != 0 || uap->oitv == NULL)
1180 		return (error);
1181 	B2L_ITIMERVAL(&ls, &oitv);
1182 
1183 	return (copyout(&ls, uap->oitv, sizeof(ls)));
1184 }
1185 
1186 int
1187 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap)
1188 {
1189 	int error;
1190 	struct l_itimerval ls;
1191 	struct itimerval aitv;
1192 
1193 	error = kern_getitimer(td, uap->which, &aitv);
1194 	if (error != 0)
1195 		return (error);
1196 	B2L_ITIMERVAL(&ls, &aitv);
1197 	return (copyout(&ls, uap->itv, sizeof(ls)));
1198 }
1199 
1200 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
1201 int
1202 linux_nice(struct thread *td, struct linux_nice_args *args)
1203 {
1204 	struct setpriority_args bsd_args;
1205 
1206 	bsd_args.which = PRIO_PROCESS;
1207 	bsd_args.who = 0;		/* current process */
1208 	bsd_args.prio = args->inc;
1209 	return (sys_setpriority(td, &bsd_args));
1210 }
1211 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
1212 
1213 int
1214 linux_setgroups(struct thread *td, struct linux_setgroups_args *args)
1215 {
1216 	struct ucred *newcred, *oldcred;
1217 	l_gid_t *linux_gidset;
1218 	gid_t *bsd_gidset;
1219 	int ngrp, error;
1220 	struct proc *p;
1221 
1222 	ngrp = args->gidsetsize;
1223 	if (ngrp < 0 || ngrp >= ngroups_max + 1)
1224 		return (EINVAL);
1225 	linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK);
1226 	error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t));
1227 	if (error)
1228 		goto out;
1229 	newcred = crget();
1230 	crextend(newcred, ngrp + 1);
1231 	p = td->td_proc;
1232 	PROC_LOCK(p);
1233 	oldcred = p->p_ucred;
1234 	crcopy(newcred, oldcred);
1235 
1236 	/*
1237 	 * cr_groups[0] holds egid. Setting the whole set from
1238 	 * the supplied set will cause egid to be changed too.
1239 	 * Keep cr_groups[0] unchanged to prevent that.
1240 	 */
1241 
1242 	if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS)) != 0) {
1243 		PROC_UNLOCK(p);
1244 		crfree(newcred);
1245 		goto out;
1246 	}
1247 
1248 	if (ngrp > 0) {
1249 		newcred->cr_ngroups = ngrp + 1;
1250 
1251 		bsd_gidset = newcred->cr_groups;
1252 		ngrp--;
1253 		while (ngrp >= 0) {
1254 			bsd_gidset[ngrp + 1] = linux_gidset[ngrp];
1255 			ngrp--;
1256 		}
1257 	} else
1258 		newcred->cr_ngroups = 1;
1259 
1260 	setsugid(p);
1261 	proc_set_cred(p, newcred);
1262 	PROC_UNLOCK(p);
1263 	crfree(oldcred);
1264 	error = 0;
1265 out:
1266 	free(linux_gidset, M_LINUX);
1267 	return (error);
1268 }
1269 
1270 int
1271 linux_getgroups(struct thread *td, struct linux_getgroups_args *args)
1272 {
1273 	struct ucred *cred;
1274 	l_gid_t *linux_gidset;
1275 	gid_t *bsd_gidset;
1276 	int bsd_gidsetsz, ngrp, error;
1277 
1278 	cred = td->td_ucred;
1279 	bsd_gidset = cred->cr_groups;
1280 	bsd_gidsetsz = cred->cr_ngroups - 1;
1281 
1282 	/*
1283 	 * cr_groups[0] holds egid. Returning the whole set
1284 	 * here will cause a duplicate. Exclude cr_groups[0]
1285 	 * to prevent that.
1286 	 */
1287 
1288 	if ((ngrp = args->gidsetsize) == 0) {
1289 		td->td_retval[0] = bsd_gidsetsz;
1290 		return (0);
1291 	}
1292 
1293 	if (ngrp < bsd_gidsetsz)
1294 		return (EINVAL);
1295 
1296 	ngrp = 0;
1297 	linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset),
1298 	    M_LINUX, M_WAITOK);
1299 	while (ngrp < bsd_gidsetsz) {
1300 		linux_gidset[ngrp] = bsd_gidset[ngrp + 1];
1301 		ngrp++;
1302 	}
1303 
1304 	error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t));
1305 	free(linux_gidset, M_LINUX);
1306 	if (error)
1307 		return (error);
1308 
1309 	td->td_retval[0] = ngrp;
1310 	return (0);
1311 }
1312 
1313 int
1314 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args)
1315 {
1316 	struct rlimit bsd_rlim;
1317 	struct l_rlimit rlim;
1318 	u_int which;
1319 	int error;
1320 
1321 	if (args->resource >= LINUX_RLIM_NLIMITS)
1322 		return (EINVAL);
1323 
1324 	which = linux_to_bsd_resource[args->resource];
1325 	if (which == -1)
1326 		return (EINVAL);
1327 
1328 	error = copyin(args->rlim, &rlim, sizeof(rlim));
1329 	if (error)
1330 		return (error);
1331 
1332 	bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur;
1333 	bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max;
1334 	return (kern_setrlimit(td, which, &bsd_rlim));
1335 }
1336 
1337 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
1338 int
1339 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args)
1340 {
1341 	struct l_rlimit rlim;
1342 	struct rlimit bsd_rlim;
1343 	u_int which;
1344 
1345 	if (args->resource >= LINUX_RLIM_NLIMITS)
1346 		return (EINVAL);
1347 
1348 	which = linux_to_bsd_resource[args->resource];
1349 	if (which == -1)
1350 		return (EINVAL);
1351 
1352 	lim_rlimit(td, which, &bsd_rlim);
1353 
1354 #ifdef COMPAT_LINUX32
1355 	rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur;
1356 	if (rlim.rlim_cur == UINT_MAX)
1357 		rlim.rlim_cur = INT_MAX;
1358 	rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max;
1359 	if (rlim.rlim_max == UINT_MAX)
1360 		rlim.rlim_max = INT_MAX;
1361 #else
1362 	rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur;
1363 	if (rlim.rlim_cur == ULONG_MAX)
1364 		rlim.rlim_cur = LONG_MAX;
1365 	rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max;
1366 	if (rlim.rlim_max == ULONG_MAX)
1367 		rlim.rlim_max = LONG_MAX;
1368 #endif
1369 	return (copyout(&rlim, args->rlim, sizeof(rlim)));
1370 }
1371 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
1372 
1373 int
1374 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args)
1375 {
1376 	struct l_rlimit rlim;
1377 	struct rlimit bsd_rlim;
1378 	u_int which;
1379 
1380 	if (args->resource >= LINUX_RLIM_NLIMITS)
1381 		return (EINVAL);
1382 
1383 	which = linux_to_bsd_resource[args->resource];
1384 	if (which == -1)
1385 		return (EINVAL);
1386 
1387 	lim_rlimit(td, which, &bsd_rlim);
1388 
1389 	rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur;
1390 	rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max;
1391 	return (copyout(&rlim, args->rlim, sizeof(rlim)));
1392 }
1393 
1394 int
1395 linux_sched_setscheduler(struct thread *td,
1396     struct linux_sched_setscheduler_args *args)
1397 {
1398 	struct sched_param sched_param;
1399 	struct thread *tdt;
1400 	int error, policy;
1401 
1402 	switch (args->policy) {
1403 	case LINUX_SCHED_OTHER:
1404 		policy = SCHED_OTHER;
1405 		break;
1406 	case LINUX_SCHED_FIFO:
1407 		policy = SCHED_FIFO;
1408 		break;
1409 	case LINUX_SCHED_RR:
1410 		policy = SCHED_RR;
1411 		break;
1412 	default:
1413 		return (EINVAL);
1414 	}
1415 
1416 	error = copyin(args->param, &sched_param, sizeof(sched_param));
1417 	if (error)
1418 		return (error);
1419 
1420 	tdt = linux_tdfind(td, args->pid, -1);
1421 	if (tdt == NULL)
1422 		return (ESRCH);
1423 
1424 	error = kern_sched_setscheduler(td, tdt, policy, &sched_param);
1425 	PROC_UNLOCK(tdt->td_proc);
1426 	return (error);
1427 }
1428 
1429 int
1430 linux_sched_getscheduler(struct thread *td,
1431     struct linux_sched_getscheduler_args *args)
1432 {
1433 	struct thread *tdt;
1434 	int error, policy;
1435 
1436 	tdt = linux_tdfind(td, args->pid, -1);
1437 	if (tdt == NULL)
1438 		return (ESRCH);
1439 
1440 	error = kern_sched_getscheduler(td, tdt, &policy);
1441 	PROC_UNLOCK(tdt->td_proc);
1442 
1443 	switch (policy) {
1444 	case SCHED_OTHER:
1445 		td->td_retval[0] = LINUX_SCHED_OTHER;
1446 		break;
1447 	case SCHED_FIFO:
1448 		td->td_retval[0] = LINUX_SCHED_FIFO;
1449 		break;
1450 	case SCHED_RR:
1451 		td->td_retval[0] = LINUX_SCHED_RR;
1452 		break;
1453 	}
1454 	return (error);
1455 }
1456 
1457 int
1458 linux_sched_get_priority_max(struct thread *td,
1459     struct linux_sched_get_priority_max_args *args)
1460 {
1461 	struct sched_get_priority_max_args bsd;
1462 
1463 	switch (args->policy) {
1464 	case LINUX_SCHED_OTHER:
1465 		bsd.policy = SCHED_OTHER;
1466 		break;
1467 	case LINUX_SCHED_FIFO:
1468 		bsd.policy = SCHED_FIFO;
1469 		break;
1470 	case LINUX_SCHED_RR:
1471 		bsd.policy = SCHED_RR;
1472 		break;
1473 	default:
1474 		return (EINVAL);
1475 	}
1476 	return (sys_sched_get_priority_max(td, &bsd));
1477 }
1478 
1479 int
1480 linux_sched_get_priority_min(struct thread *td,
1481     struct linux_sched_get_priority_min_args *args)
1482 {
1483 	struct sched_get_priority_min_args bsd;
1484 
1485 	switch (args->policy) {
1486 	case LINUX_SCHED_OTHER:
1487 		bsd.policy = SCHED_OTHER;
1488 		break;
1489 	case LINUX_SCHED_FIFO:
1490 		bsd.policy = SCHED_FIFO;
1491 		break;
1492 	case LINUX_SCHED_RR:
1493 		bsd.policy = SCHED_RR;
1494 		break;
1495 	default:
1496 		return (EINVAL);
1497 	}
1498 	return (sys_sched_get_priority_min(td, &bsd));
1499 }
1500 
1501 #define REBOOT_CAD_ON	0x89abcdef
1502 #define REBOOT_CAD_OFF	0
1503 #define REBOOT_HALT	0xcdef0123
1504 #define REBOOT_RESTART	0x01234567
1505 #define REBOOT_RESTART2	0xA1B2C3D4
1506 #define REBOOT_POWEROFF	0x4321FEDC
1507 #define REBOOT_MAGIC1	0xfee1dead
1508 #define REBOOT_MAGIC2	0x28121969
1509 #define REBOOT_MAGIC2A	0x05121996
1510 #define REBOOT_MAGIC2B	0x16041998
1511 
1512 int
1513 linux_reboot(struct thread *td, struct linux_reboot_args *args)
1514 {
1515 	struct reboot_args bsd_args;
1516 
1517 	if (args->magic1 != REBOOT_MAGIC1)
1518 		return (EINVAL);
1519 
1520 	switch (args->magic2) {
1521 	case REBOOT_MAGIC2:
1522 	case REBOOT_MAGIC2A:
1523 	case REBOOT_MAGIC2B:
1524 		break;
1525 	default:
1526 		return (EINVAL);
1527 	}
1528 
1529 	switch (args->cmd) {
1530 	case REBOOT_CAD_ON:
1531 	case REBOOT_CAD_OFF:
1532 		return (priv_check(td, PRIV_REBOOT));
1533 	case REBOOT_HALT:
1534 		bsd_args.opt = RB_HALT;
1535 		break;
1536 	case REBOOT_RESTART:
1537 	case REBOOT_RESTART2:
1538 		bsd_args.opt = 0;
1539 		break;
1540 	case REBOOT_POWEROFF:
1541 		bsd_args.opt = RB_POWEROFF;
1542 		break;
1543 	default:
1544 		return (EINVAL);
1545 	}
1546 	return (sys_reboot(td, &bsd_args));
1547 }
1548 
1549 
1550 /*
1551  * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify
1552  * td->td_retval[1] when COMPAT_43 is defined. This clobbers registers that
1553  * are assumed to be preserved. The following lightweight syscalls fixes
1554  * this. See also linux_getgid16() and linux_getuid16() in linux_uid16.c
1555  *
1556  * linux_getpid() - MP SAFE
1557  * linux_getgid() - MP SAFE
1558  * linux_getuid() - MP SAFE
1559  */
1560 
1561 int
1562 linux_getpid(struct thread *td, struct linux_getpid_args *args)
1563 {
1564 
1565 	td->td_retval[0] = td->td_proc->p_pid;
1566 
1567 	return (0);
1568 }
1569 
1570 int
1571 linux_gettid(struct thread *td, struct linux_gettid_args *args)
1572 {
1573 	struct linux_emuldata *em;
1574 
1575 	em = em_find(td);
1576 	KASSERT(em != NULL, ("gettid: emuldata not found.\n"));
1577 
1578 	td->td_retval[0] = em->em_tid;
1579 
1580 	return (0);
1581 }
1582 
1583 
1584 int
1585 linux_getppid(struct thread *td, struct linux_getppid_args *args)
1586 {
1587 
1588 	td->td_retval[0] = kern_getppid(td);
1589 	return (0);
1590 }
1591 
1592 int
1593 linux_getgid(struct thread *td, struct linux_getgid_args *args)
1594 {
1595 
1596 	td->td_retval[0] = td->td_ucred->cr_rgid;
1597 	return (0);
1598 }
1599 
1600 int
1601 linux_getuid(struct thread *td, struct linux_getuid_args *args)
1602 {
1603 
1604 	td->td_retval[0] = td->td_ucred->cr_ruid;
1605 	return (0);
1606 }
1607 
1608 
1609 int
1610 linux_getsid(struct thread *td, struct linux_getsid_args *args)
1611 {
1612 	struct getsid_args bsd;
1613 
1614 	bsd.pid = args->pid;
1615 	return (sys_getsid(td, &bsd));
1616 }
1617 
1618 int
1619 linux_nosys(struct thread *td, struct nosys_args *ignore)
1620 {
1621 
1622 	return (ENOSYS);
1623 }
1624 
1625 int
1626 linux_getpriority(struct thread *td, struct linux_getpriority_args *args)
1627 {
1628 	struct getpriority_args bsd_args;
1629 	int error;
1630 
1631 	bsd_args.which = args->which;
1632 	bsd_args.who = args->who;
1633 	error = sys_getpriority(td, &bsd_args);
1634 	td->td_retval[0] = 20 - td->td_retval[0];
1635 	return (error);
1636 }
1637 
1638 int
1639 linux_sethostname(struct thread *td, struct linux_sethostname_args *args)
1640 {
1641 	int name[2];
1642 
1643 	name[0] = CTL_KERN;
1644 	name[1] = KERN_HOSTNAME;
1645 	return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname,
1646 	    args->len, 0, 0));
1647 }
1648 
1649 int
1650 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args)
1651 {
1652 	int name[2];
1653 
1654 	name[0] = CTL_KERN;
1655 	name[1] = KERN_NISDOMAINNAME;
1656 	return (userland_sysctl(td, name, 2, 0, 0, 0, args->name,
1657 	    args->len, 0, 0));
1658 }
1659 
1660 int
1661 linux_exit_group(struct thread *td, struct linux_exit_group_args *args)
1662 {
1663 
1664 	LINUX_CTR2(exit_group, "thread(%d) (%d)", td->td_tid,
1665 	    args->error_code);
1666 
1667 	/*
1668 	 * XXX: we should send a signal to the parent if
1669 	 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?)
1670 	 * as it doesnt occur often.
1671 	 */
1672 	exit1(td, args->error_code, 0);
1673 		/* NOTREACHED */
1674 }
1675 
1676 #define _LINUX_CAPABILITY_VERSION_1  0x19980330
1677 #define _LINUX_CAPABILITY_VERSION_2  0x20071026
1678 #define _LINUX_CAPABILITY_VERSION_3  0x20080522
1679 
1680 struct l_user_cap_header {
1681 	l_int	version;
1682 	l_int	pid;
1683 };
1684 
1685 struct l_user_cap_data {
1686 	l_int	effective;
1687 	l_int	permitted;
1688 	l_int	inheritable;
1689 };
1690 
1691 int
1692 linux_capget(struct thread *td, struct linux_capget_args *uap)
1693 {
1694 	struct l_user_cap_header luch;
1695 	struct l_user_cap_data lucd[2];
1696 	int error, u32s;
1697 
1698 	if (uap->hdrp == NULL)
1699 		return (EFAULT);
1700 
1701 	error = copyin(uap->hdrp, &luch, sizeof(luch));
1702 	if (error != 0)
1703 		return (error);
1704 
1705 	switch (luch.version) {
1706 	case _LINUX_CAPABILITY_VERSION_1:
1707 		u32s = 1;
1708 		break;
1709 	case _LINUX_CAPABILITY_VERSION_2:
1710 	case _LINUX_CAPABILITY_VERSION_3:
1711 		u32s = 2;
1712 		break;
1713 	default:
1714 		luch.version = _LINUX_CAPABILITY_VERSION_1;
1715 		error = copyout(&luch, uap->hdrp, sizeof(luch));
1716 		if (error)
1717 			return (error);
1718 		return (EINVAL);
1719 	}
1720 
1721 	if (luch.pid)
1722 		return (EPERM);
1723 
1724 	if (uap->datap) {
1725 		/*
1726 		 * The current implementation doesn't support setting
1727 		 * a capability (it's essentially a stub) so indicate
1728 		 * that no capabilities are currently set or available
1729 		 * to request.
1730 		 */
1731 		memset(&lucd, 0, u32s * sizeof(lucd[0]));
1732 		error = copyout(&lucd, uap->datap, u32s * sizeof(lucd[0]));
1733 	}
1734 
1735 	return (error);
1736 }
1737 
1738 int
1739 linux_capset(struct thread *td, struct linux_capset_args *uap)
1740 {
1741 	struct l_user_cap_header luch;
1742 	struct l_user_cap_data lucd[2];
1743 	int error, i, u32s;
1744 
1745 	if (uap->hdrp == NULL || uap->datap == NULL)
1746 		return (EFAULT);
1747 
1748 	error = copyin(uap->hdrp, &luch, sizeof(luch));
1749 	if (error != 0)
1750 		return (error);
1751 
1752 	switch (luch.version) {
1753 	case _LINUX_CAPABILITY_VERSION_1:
1754 		u32s = 1;
1755 		break;
1756 	case _LINUX_CAPABILITY_VERSION_2:
1757 	case _LINUX_CAPABILITY_VERSION_3:
1758 		u32s = 2;
1759 		break;
1760 	default:
1761 		luch.version = _LINUX_CAPABILITY_VERSION_1;
1762 		error = copyout(&luch, uap->hdrp, sizeof(luch));
1763 		if (error)
1764 			return (error);
1765 		return (EINVAL);
1766 	}
1767 
1768 	if (luch.pid)
1769 		return (EPERM);
1770 
1771 	error = copyin(uap->datap, &lucd, u32s * sizeof(lucd[0]));
1772 	if (error != 0)
1773 		return (error);
1774 
1775 	/* We currently don't support setting any capabilities. */
1776 	for (i = 0; i < u32s; i++) {
1777 		if (lucd[i].effective || lucd[i].permitted ||
1778 		    lucd[i].inheritable) {
1779 			linux_msg(td,
1780 			    "capset[%d] effective=0x%x, permitted=0x%x, "
1781 			    "inheritable=0x%x is not implemented", i,
1782 			    (int)lucd[i].effective, (int)lucd[i].permitted,
1783 			    (int)lucd[i].inheritable);
1784 			return (EPERM);
1785 		}
1786 	}
1787 
1788 	return (0);
1789 }
1790 
1791 int
1792 linux_prctl(struct thread *td, struct linux_prctl_args *args)
1793 {
1794 	int error = 0, max_size;
1795 	struct proc *p = td->td_proc;
1796 	char comm[LINUX_MAX_COMM_LEN];
1797 	int pdeath_signal;
1798 
1799 	switch (args->option) {
1800 	case LINUX_PR_SET_PDEATHSIG:
1801 		if (!LINUX_SIG_VALID(args->arg2))
1802 			return (EINVAL);
1803 		pdeath_signal = linux_to_bsd_signal(args->arg2);
1804 		return (kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_CTL,
1805 		    &pdeath_signal));
1806 	case LINUX_PR_GET_PDEATHSIG:
1807 		error = kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_STATUS,
1808 		    &pdeath_signal);
1809 		if (error != 0)
1810 			return (error);
1811 		pdeath_signal = bsd_to_linux_signal(pdeath_signal);
1812 		return (copyout(&pdeath_signal,
1813 		    (void *)(register_t)args->arg2,
1814 		    sizeof(pdeath_signal)));
1815 		break;
1816 	case LINUX_PR_GET_KEEPCAPS:
1817 		/*
1818 		 * Indicate that we always clear the effective and
1819 		 * permitted capability sets when the user id becomes
1820 		 * non-zero (actually the capability sets are simply
1821 		 * always zero in the current implementation).
1822 		 */
1823 		td->td_retval[0] = 0;
1824 		break;
1825 	case LINUX_PR_SET_KEEPCAPS:
1826 		/*
1827 		 * Ignore requests to keep the effective and permitted
1828 		 * capability sets when the user id becomes non-zero.
1829 		 */
1830 		break;
1831 	case LINUX_PR_SET_NAME:
1832 		/*
1833 		 * To be on the safe side we need to make sure to not
1834 		 * overflow the size a Linux program expects. We already
1835 		 * do this here in the copyin, so that we don't need to
1836 		 * check on copyout.
1837 		 */
1838 		max_size = MIN(sizeof(comm), sizeof(p->p_comm));
1839 		error = copyinstr((void *)(register_t)args->arg2, comm,
1840 		    max_size, NULL);
1841 
1842 		/* Linux silently truncates the name if it is too long. */
1843 		if (error == ENAMETOOLONG) {
1844 			/*
1845 			 * XXX: copyinstr() isn't documented to populate the
1846 			 * array completely, so do a copyin() to be on the
1847 			 * safe side. This should be changed in case
1848 			 * copyinstr() is changed to guarantee this.
1849 			 */
1850 			error = copyin((void *)(register_t)args->arg2, comm,
1851 			    max_size - 1);
1852 			comm[max_size - 1] = '\0';
1853 		}
1854 		if (error)
1855 			return (error);
1856 
1857 		PROC_LOCK(p);
1858 		strlcpy(p->p_comm, comm, sizeof(p->p_comm));
1859 		PROC_UNLOCK(p);
1860 		break;
1861 	case LINUX_PR_GET_NAME:
1862 		PROC_LOCK(p);
1863 		strlcpy(comm, p->p_comm, sizeof(comm));
1864 		PROC_UNLOCK(p);
1865 		error = copyout(comm, (void *)(register_t)args->arg2,
1866 		    strlen(comm) + 1);
1867 		break;
1868 	default:
1869 		error = EINVAL;
1870 		break;
1871 	}
1872 
1873 	return (error);
1874 }
1875 
1876 int
1877 linux_sched_setparam(struct thread *td,
1878     struct linux_sched_setparam_args *uap)
1879 {
1880 	struct sched_param sched_param;
1881 	struct thread *tdt;
1882 	int error;
1883 
1884 	error = copyin(uap->param, &sched_param, sizeof(sched_param));
1885 	if (error)
1886 		return (error);
1887 
1888 	tdt = linux_tdfind(td, uap->pid, -1);
1889 	if (tdt == NULL)
1890 		return (ESRCH);
1891 
1892 	error = kern_sched_setparam(td, tdt, &sched_param);
1893 	PROC_UNLOCK(tdt->td_proc);
1894 	return (error);
1895 }
1896 
1897 int
1898 linux_sched_getparam(struct thread *td,
1899     struct linux_sched_getparam_args *uap)
1900 {
1901 	struct sched_param sched_param;
1902 	struct thread *tdt;
1903 	int error;
1904 
1905 	tdt = linux_tdfind(td, uap->pid, -1);
1906 	if (tdt == NULL)
1907 		return (ESRCH);
1908 
1909 	error = kern_sched_getparam(td, tdt, &sched_param);
1910 	PROC_UNLOCK(tdt->td_proc);
1911 	if (error == 0)
1912 		error = copyout(&sched_param, uap->param,
1913 		    sizeof(sched_param));
1914 	return (error);
1915 }
1916 
1917 /*
1918  * Get affinity of a process.
1919  */
1920 int
1921 linux_sched_getaffinity(struct thread *td,
1922     struct linux_sched_getaffinity_args *args)
1923 {
1924 	int error;
1925 	struct thread *tdt;
1926 
1927 	if (args->len < sizeof(cpuset_t))
1928 		return (EINVAL);
1929 
1930 	tdt = linux_tdfind(td, args->pid, -1);
1931 	if (tdt == NULL)
1932 		return (ESRCH);
1933 
1934 	PROC_UNLOCK(tdt->td_proc);
1935 
1936 	error = kern_cpuset_getaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID,
1937 	    tdt->td_tid, sizeof(cpuset_t), (cpuset_t *)args->user_mask_ptr);
1938 	if (error == 0)
1939 		td->td_retval[0] = sizeof(cpuset_t);
1940 
1941 	return (error);
1942 }
1943 
1944 /*
1945  *  Set affinity of a process.
1946  */
1947 int
1948 linux_sched_setaffinity(struct thread *td,
1949     struct linux_sched_setaffinity_args *args)
1950 {
1951 	struct thread *tdt;
1952 
1953 	if (args->len < sizeof(cpuset_t))
1954 		return (EINVAL);
1955 
1956 	tdt = linux_tdfind(td, args->pid, -1);
1957 	if (tdt == NULL)
1958 		return (ESRCH);
1959 
1960 	PROC_UNLOCK(tdt->td_proc);
1961 
1962 	return (kern_cpuset_setaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID,
1963 	    tdt->td_tid, sizeof(cpuset_t), (cpuset_t *) args->user_mask_ptr));
1964 }
1965 
1966 struct linux_rlimit64 {
1967 	uint64_t	rlim_cur;
1968 	uint64_t	rlim_max;
1969 };
1970 
1971 int
1972 linux_prlimit64(struct thread *td, struct linux_prlimit64_args *args)
1973 {
1974 	struct rlimit rlim, nrlim;
1975 	struct linux_rlimit64 lrlim;
1976 	struct proc *p;
1977 	u_int which;
1978 	int flags;
1979 	int error;
1980 
1981 	if (args->resource >= LINUX_RLIM_NLIMITS)
1982 		return (EINVAL);
1983 
1984 	which = linux_to_bsd_resource[args->resource];
1985 	if (which == -1)
1986 		return (EINVAL);
1987 
1988 	if (args->new != NULL) {
1989 		/*
1990 		 * Note. Unlike FreeBSD where rlim is signed 64-bit Linux
1991 		 * rlim is unsigned 64-bit. FreeBSD treats negative limits
1992 		 * as INFINITY so we do not need a conversion even.
1993 		 */
1994 		error = copyin(args->new, &nrlim, sizeof(nrlim));
1995 		if (error != 0)
1996 			return (error);
1997 	}
1998 
1999 	flags = PGET_HOLD | PGET_NOTWEXIT;
2000 	if (args->new != NULL)
2001 		flags |= PGET_CANDEBUG;
2002 	else
2003 		flags |= PGET_CANSEE;
2004 	error = pget(args->pid, flags, &p);
2005 	if (error != 0)
2006 		return (error);
2007 
2008 	if (args->old != NULL) {
2009 		PROC_LOCK(p);
2010 		lim_rlimit_proc(p, which, &rlim);
2011 		PROC_UNLOCK(p);
2012 		if (rlim.rlim_cur == RLIM_INFINITY)
2013 			lrlim.rlim_cur = LINUX_RLIM_INFINITY;
2014 		else
2015 			lrlim.rlim_cur = rlim.rlim_cur;
2016 		if (rlim.rlim_max == RLIM_INFINITY)
2017 			lrlim.rlim_max = LINUX_RLIM_INFINITY;
2018 		else
2019 			lrlim.rlim_max = rlim.rlim_max;
2020 		error = copyout(&lrlim, args->old, sizeof(lrlim));
2021 		if (error != 0)
2022 			goto out;
2023 	}
2024 
2025 	if (args->new != NULL)
2026 		error = kern_proc_setrlimit(td, p, which, &nrlim);
2027 
2028  out:
2029 	PRELE(p);
2030 	return (error);
2031 }
2032 
2033 int
2034 linux_pselect6(struct thread *td, struct linux_pselect6_args *args)
2035 {
2036 	struct timeval utv, tv0, tv1, *tvp;
2037 	struct l_pselect6arg lpse6;
2038 	struct l_timespec lts;
2039 	struct timespec uts;
2040 	l_sigset_t l_ss;
2041 	sigset_t *ssp;
2042 	sigset_t ss;
2043 	int error;
2044 
2045 	ssp = NULL;
2046 	if (args->sig != NULL) {
2047 		error = copyin(args->sig, &lpse6, sizeof(lpse6));
2048 		if (error != 0)
2049 			return (error);
2050 		if (lpse6.ss_len != sizeof(l_ss))
2051 			return (EINVAL);
2052 		if (lpse6.ss != 0) {
2053 			error = copyin(PTRIN(lpse6.ss), &l_ss,
2054 			    sizeof(l_ss));
2055 			if (error != 0)
2056 				return (error);
2057 			linux_to_bsd_sigset(&l_ss, &ss);
2058 			ssp = &ss;
2059 		}
2060 	}
2061 
2062 	/*
2063 	 * Currently glibc changes nanosecond number to microsecond.
2064 	 * This mean losing precision but for now it is hardly seen.
2065 	 */
2066 	if (args->tsp != NULL) {
2067 		error = copyin(args->tsp, &lts, sizeof(lts));
2068 		if (error != 0)
2069 			return (error);
2070 		error = linux_to_native_timespec(&uts, &lts);
2071 		if (error != 0)
2072 			return (error);
2073 
2074 		TIMESPEC_TO_TIMEVAL(&utv, &uts);
2075 		if (itimerfix(&utv))
2076 			return (EINVAL);
2077 
2078 		microtime(&tv0);
2079 		tvp = &utv;
2080 	} else
2081 		tvp = NULL;
2082 
2083 	error = kern_pselect(td, args->nfds, args->readfds, args->writefds,
2084 	    args->exceptfds, tvp, ssp, LINUX_NFDBITS);
2085 
2086 	if (error == 0 && args->tsp != NULL) {
2087 		if (td->td_retval[0] != 0) {
2088 			/*
2089 			 * Compute how much time was left of the timeout,
2090 			 * by subtracting the current time and the time
2091 			 * before we started the call, and subtracting
2092 			 * that result from the user-supplied value.
2093 			 */
2094 
2095 			microtime(&tv1);
2096 			timevalsub(&tv1, &tv0);
2097 			timevalsub(&utv, &tv1);
2098 			if (utv.tv_sec < 0)
2099 				timevalclear(&utv);
2100 		} else
2101 			timevalclear(&utv);
2102 
2103 		TIMEVAL_TO_TIMESPEC(&utv, &uts);
2104 
2105 		error = native_to_linux_timespec(&lts, &uts);
2106 		if (error == 0)
2107 			error = copyout(&lts, args->tsp, sizeof(lts));
2108 	}
2109 
2110 	return (error);
2111 }
2112 
2113 int
2114 linux_ppoll(struct thread *td, struct linux_ppoll_args *args)
2115 {
2116 	struct timespec ts0, ts1;
2117 	struct l_timespec lts;
2118 	struct timespec uts, *tsp;
2119 	l_sigset_t l_ss;
2120 	sigset_t *ssp;
2121 	sigset_t ss;
2122 	int error;
2123 
2124 	if (args->sset != NULL) {
2125 		if (args->ssize != sizeof(l_ss))
2126 			return (EINVAL);
2127 		error = copyin(args->sset, &l_ss, sizeof(l_ss));
2128 		if (error)
2129 			return (error);
2130 		linux_to_bsd_sigset(&l_ss, &ss);
2131 		ssp = &ss;
2132 	} else
2133 		ssp = NULL;
2134 	if (args->tsp != NULL) {
2135 		error = copyin(args->tsp, &lts, sizeof(lts));
2136 		if (error)
2137 			return (error);
2138 		error = linux_to_native_timespec(&uts, &lts);
2139 		if (error != 0)
2140 			return (error);
2141 
2142 		nanotime(&ts0);
2143 		tsp = &uts;
2144 	} else
2145 		tsp = NULL;
2146 
2147 	error = kern_poll(td, args->fds, args->nfds, tsp, ssp);
2148 
2149 	if (error == 0 && args->tsp != NULL) {
2150 		if (td->td_retval[0]) {
2151 			nanotime(&ts1);
2152 			timespecsub(&ts1, &ts0, &ts1);
2153 			timespecsub(&uts, &ts1, &uts);
2154 			if (uts.tv_sec < 0)
2155 				timespecclear(&uts);
2156 		} else
2157 			timespecclear(&uts);
2158 
2159 		error = native_to_linux_timespec(&lts, &uts);
2160 		if (error == 0)
2161 			error = copyout(&lts, args->tsp, sizeof(lts));
2162 	}
2163 
2164 	return (error);
2165 }
2166 
2167 int
2168 linux_sched_rr_get_interval(struct thread *td,
2169     struct linux_sched_rr_get_interval_args *uap)
2170 {
2171 	struct timespec ts;
2172 	struct l_timespec lts;
2173 	struct thread *tdt;
2174 	int error;
2175 
2176 	/*
2177 	 * According to man in case the invalid pid specified
2178 	 * EINVAL should be returned.
2179 	 */
2180 	if (uap->pid < 0)
2181 		return (EINVAL);
2182 
2183 	tdt = linux_tdfind(td, uap->pid, -1);
2184 	if (tdt == NULL)
2185 		return (ESRCH);
2186 
2187 	error = kern_sched_rr_get_interval_td(td, tdt, &ts);
2188 	PROC_UNLOCK(tdt->td_proc);
2189 	if (error != 0)
2190 		return (error);
2191 	error = native_to_linux_timespec(&lts, &ts);
2192 	if (error != 0)
2193 		return (error);
2194 	return (copyout(&lts, uap->interval, sizeof(lts)));
2195 }
2196 
2197 /*
2198  * In case when the Linux thread is the initial thread in
2199  * the thread group thread id is equal to the process id.
2200  * Glibc depends on this magic (assert in pthread_getattr_np.c).
2201  */
2202 struct thread *
2203 linux_tdfind(struct thread *td, lwpid_t tid, pid_t pid)
2204 {
2205 	struct linux_emuldata *em;
2206 	struct thread *tdt;
2207 	struct proc *p;
2208 
2209 	tdt = NULL;
2210 	if (tid == 0 || tid == td->td_tid) {
2211 		tdt = td;
2212 		PROC_LOCK(tdt->td_proc);
2213 	} else if (tid > PID_MAX)
2214 		tdt = tdfind(tid, pid);
2215 	else {
2216 		/*
2217 		 * Initial thread where the tid equal to the pid.
2218 		 */
2219 		p = pfind(tid);
2220 		if (p != NULL) {
2221 			if (SV_PROC_ABI(p) != SV_ABI_LINUX) {
2222 				/*
2223 				 * p is not a Linuxulator process.
2224 				 */
2225 				PROC_UNLOCK(p);
2226 				return (NULL);
2227 			}
2228 			FOREACH_THREAD_IN_PROC(p, tdt) {
2229 				em = em_find(tdt);
2230 				if (tid == em->em_tid)
2231 					return (tdt);
2232 			}
2233 			PROC_UNLOCK(p);
2234 		}
2235 		return (NULL);
2236 	}
2237 
2238 	return (tdt);
2239 }
2240 
2241 void
2242 linux_to_bsd_waitopts(int options, int *bsdopts)
2243 {
2244 
2245 	if (options & LINUX_WNOHANG)
2246 		*bsdopts |= WNOHANG;
2247 	if (options & LINUX_WUNTRACED)
2248 		*bsdopts |= WUNTRACED;
2249 	if (options & LINUX_WEXITED)
2250 		*bsdopts |= WEXITED;
2251 	if (options & LINUX_WCONTINUED)
2252 		*bsdopts |= WCONTINUED;
2253 	if (options & LINUX_WNOWAIT)
2254 		*bsdopts |= WNOWAIT;
2255 
2256 	if (options & __WCLONE)
2257 		*bsdopts |= WLINUXCLONE;
2258 }
2259 
2260 int
2261 linux_getrandom(struct thread *td, struct linux_getrandom_args *args)
2262 {
2263 	struct uio uio;
2264 	struct iovec iov;
2265 	int error;
2266 
2267 	if (args->flags & ~(LINUX_GRND_NONBLOCK|LINUX_GRND_RANDOM))
2268 		return (EINVAL);
2269 	if (args->count > INT_MAX)
2270 		args->count = INT_MAX;
2271 
2272 	iov.iov_base = args->buf;
2273 	iov.iov_len = args->count;
2274 
2275 	uio.uio_iov = &iov;
2276 	uio.uio_iovcnt = 1;
2277 	uio.uio_resid = iov.iov_len;
2278 	uio.uio_segflg = UIO_USERSPACE;
2279 	uio.uio_rw = UIO_READ;
2280 	uio.uio_td = td;
2281 
2282 	error = read_random_uio(&uio, args->flags & LINUX_GRND_NONBLOCK);
2283 	if (error == 0)
2284 		td->td_retval[0] = args->count - uio.uio_resid;
2285 	return (error);
2286 }
2287 
2288 int
2289 linux_mincore(struct thread *td, struct linux_mincore_args *args)
2290 {
2291 
2292 	/* Needs to be page-aligned */
2293 	if (args->start & PAGE_MASK)
2294 		return (EINVAL);
2295 	return (kern_mincore(td, args->start, args->len, args->vec));
2296 }
2297