xref: /freebsd/sys/compat/linux/linux_misc.c (revision 6c7216df785592bb069469113f556dfd1946ed5d)
1 /*-
2  * Copyright (c) 2002 Doug Rabson
3  * Copyright (c) 1994-1995 S�ren Schmidt
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer
11  *    in this position and unchanged.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_mac.h"
34 
35 #include <sys/param.h>
36 #include <sys/blist.h>
37 #include <sys/fcntl.h>
38 #if defined(__i386__) || defined(__alpha__)
39 #include <sys/imgact_aout.h>
40 #endif
41 #include <sys/jail.h>
42 #include <sys/kernel.h>
43 #include <sys/limits.h>
44 #include <sys/lock.h>
45 #include <sys/mac.h>
46 #include <sys/malloc.h>
47 #include <sys/mman.h>
48 #include <sys/mount.h>
49 #include <sys/mutex.h>
50 #include <sys/namei.h>
51 #include <sys/proc.h>
52 #include <sys/reboot.h>
53 #include <sys/resourcevar.h>
54 #include <sys/signalvar.h>
55 #include <sys/stat.h>
56 #include <sys/syscallsubr.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysproto.h>
59 #include <sys/systm.h>
60 #include <sys/time.h>
61 #include <sys/vmmeter.h>
62 #include <sys/vnode.h>
63 #include <sys/wait.h>
64 
65 #include <vm/vm.h>
66 #include <vm/pmap.h>
67 #include <vm/vm_kern.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_extern.h>
70 #include <vm/vm_object.h>
71 #include <vm/swap_pager.h>
72 
73 #include <posix4/sched.h>
74 
75 #include "opt_compat.h"
76 
77 #ifdef COMPAT_LINUX32
78 #include <machine/../linux32/linux.h>
79 #include <machine/../linux32/linux32_proto.h>
80 #else
81 #include <machine/../linux/linux.h>
82 #include <machine/../linux/linux_proto.h>
83 #endif
84 
85 #include <compat/linux/linux_mib.h>
86 #include <compat/linux/linux_util.h>
87 
88 #ifdef __i386__
89 #include <machine/cputypes.h>
90 #endif
91 
92 #ifdef __alpha__
93 #define BSD_TO_LINUX_SIGNAL(sig)       (sig)
94 #else
95 #define BSD_TO_LINUX_SIGNAL(sig)	\
96 	(((sig) <= LINUX_SIGTBLSZ) ? bsd_to_linux_signal[_SIG_IDX(sig)] : sig)
97 #endif
98 
99 #ifndef __alpha__
100 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = {
101 	RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK,
102 	RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE,
103 	RLIMIT_MEMLOCK, -1
104 };
105 #endif /*!__alpha__*/
106 
107 struct l_sysinfo {
108 	l_long		uptime;		/* Seconds since boot */
109 	l_ulong		loads[3];	/* 1, 5, and 15 minute load averages */
110 #define LINUX_SYSINFO_LOADS_SCALE 65536
111 	l_ulong		totalram;	/* Total usable main memory size */
112 	l_ulong		freeram;	/* Available memory size */
113 	l_ulong		sharedram;	/* Amount of shared memory */
114 	l_ulong		bufferram;	/* Memory used by buffers */
115 	l_ulong		totalswap;	/* Total swap space size */
116 	l_ulong		freeswap;	/* swap space still available */
117 	l_ushort	procs;		/* Number of current processes */
118 	l_ulong		totalbig;
119 	l_ulong		freebig;
120 	l_uint		mem_unit;
121 	char		_f[6];		/* Pads structure to 64 bytes */
122 };
123 #ifndef __alpha__
124 int
125 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args)
126 {
127 	struct l_sysinfo sysinfo;
128 	vm_object_t object;
129 	int i, j;
130 	struct timespec ts;
131 
132 	/* Uptime is copied out of print_uptime() in kern_shutdown.c */
133 	getnanouptime(&ts);
134 	i = 0;
135 	if (ts.tv_sec >= 86400) {
136 		ts.tv_sec %= 86400;
137 		i = 1;
138 	}
139 	if (i || ts.tv_sec >= 3600) {
140 		ts.tv_sec %= 3600;
141 		i = 1;
142 	}
143 	if (i || ts.tv_sec >= 60) {
144 		ts.tv_sec %= 60;
145 		i = 1;
146 	}
147 	sysinfo.uptime=ts.tv_sec;
148 
149 	/* Use the information from the mib to get our load averages */
150 	for (i = 0; i < 3; i++)
151 		sysinfo.loads[i] = averunnable.ldavg[i] *
152 		    LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale;
153 
154 	sysinfo.totalram = physmem * PAGE_SIZE;
155 	sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE;
156 
157 	sysinfo.sharedram = 0;
158 	mtx_lock(&vm_object_list_mtx);
159 	TAILQ_FOREACH(object, &vm_object_list, object_list)
160 		if (object->shadow_count > 1)
161 			sysinfo.sharedram += object->resident_page_count;
162 	mtx_unlock(&vm_object_list_mtx);
163 
164 	sysinfo.sharedram *= PAGE_SIZE;
165 	sysinfo.bufferram = 0;
166 
167 	swap_pager_status(&i, &j);
168 	sysinfo.totalswap= i * PAGE_SIZE;
169 	sysinfo.freeswap = (i - j) * PAGE_SIZE;
170 
171 	sysinfo.procs = nprocs;
172 
173 	/* The following are only present in newer Linux kernels. */
174 	sysinfo.totalbig = 0;
175 	sysinfo.freebig = 0;
176 	sysinfo.mem_unit = 1;
177 
178 	return copyout(&sysinfo, args->info, sizeof(sysinfo));
179 }
180 #endif /*!__alpha__*/
181 
182 #ifndef __alpha__
183 int
184 linux_alarm(struct thread *td, struct linux_alarm_args *args)
185 {
186 	struct itimerval it, old_it;
187 	struct timeval tv;
188 	struct proc *p;
189 
190 #ifdef DEBUG
191 	if (ldebug(alarm))
192 		printf(ARGS(alarm, "%u"), args->secs);
193 #endif
194 
195 	if (args->secs > 100000000)
196 		return EINVAL;
197 
198 	it.it_value.tv_sec = (long)args->secs;
199 	it.it_value.tv_usec = 0;
200 	it.it_interval.tv_sec = 0;
201 	it.it_interval.tv_usec = 0;
202 	p = td->td_proc;
203 	PROC_LOCK(p);
204 	old_it = p->p_realtimer;
205 	getmicrouptime(&tv);
206 	if (timevalisset(&old_it.it_value))
207 		callout_stop(&p->p_itcallout);
208 	if (it.it_value.tv_sec != 0) {
209 		callout_reset(&p->p_itcallout, tvtohz(&it.it_value),
210 		    realitexpire, p);
211 		timevaladd(&it.it_value, &tv);
212 	}
213 	p->p_realtimer = it;
214 	PROC_UNLOCK(p);
215 	if (timevalcmp(&old_it.it_value, &tv, >)) {
216 		timevalsub(&old_it.it_value, &tv);
217 		if (old_it.it_value.tv_usec != 0)
218 			old_it.it_value.tv_sec++;
219 		td->td_retval[0] = old_it.it_value.tv_sec;
220 	}
221 	return 0;
222 }
223 #endif /*!__alpha__*/
224 
225 int
226 linux_brk(struct thread *td, struct linux_brk_args *args)
227 {
228 	struct vmspace *vm = td->td_proc->p_vmspace;
229 	vm_offset_t new, old;
230 	struct obreak_args /* {
231 		char * nsize;
232 	} */ tmp;
233 
234 #ifdef DEBUG
235 	if (ldebug(brk))
236 		printf(ARGS(brk, "%p"), (void *)(uintptr_t)args->dsend);
237 #endif
238 	old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize);
239 	new = (vm_offset_t)args->dsend;
240 	tmp.nsize = (char *) new;
241 	if (((caddr_t)new > vm->vm_daddr) && !obreak(td, &tmp))
242 		td->td_retval[0] = (long)new;
243 	else
244 		td->td_retval[0] = (long)old;
245 
246 	return 0;
247 }
248 
249 #if defined(__i386__) || defined(__alpha__)
250 
251 int
252 linux_uselib(struct thread *td, struct linux_uselib_args *args)
253 {
254 	struct nameidata ni;
255 	struct vnode *vp;
256 	struct exec *a_out;
257 	struct vattr attr;
258 	vm_offset_t vmaddr;
259 	unsigned long file_offset;
260 	vm_offset_t buffer;
261 	unsigned long bss_size;
262 	char *library;
263 	int error;
264 	int locked;
265 
266 	LCONVPATHEXIST(td, args->library, &library);
267 
268 #ifdef DEBUG
269 	if (ldebug(uselib))
270 		printf(ARGS(uselib, "%s"), library);
271 #endif
272 
273 	a_out = NULL;
274 	locked = 0;
275 	vp = NULL;
276 
277 	/*
278 	 * XXX: This code should make use of vn_open(), rather than doing
279 	 * all this stuff itself.
280 	 */
281 	NDINIT(&ni, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, library, td);
282 	error = namei(&ni);
283 	LFREEPATH(library);
284 	if (error)
285 		goto cleanup;
286 
287 	vp = ni.ni_vp;
288 	/*
289 	 * XXX - This looks like a bogus check. A LOCKLEAF namei should not
290 	 * succeed without returning a vnode.
291 	 */
292 	if (vp == NULL) {
293 		error = ENOEXEC;	/* ?? */
294 		goto cleanup;
295 	}
296 	NDFREE(&ni, NDF_ONLY_PNBUF);
297 
298 	/*
299 	 * From here on down, we have a locked vnode that must be unlocked.
300 	 */
301 	locked++;
302 
303 	/* Writable? */
304 	if (vp->v_writecount) {
305 		error = ETXTBSY;
306 		goto cleanup;
307 	}
308 
309 	/* Executable? */
310 	error = VOP_GETATTR(vp, &attr, td->td_ucred, td);
311 	if (error)
312 		goto cleanup;
313 
314 	if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
315 	    ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) {
316 		error = ENOEXEC;
317 		goto cleanup;
318 	}
319 
320 	/* Sensible size? */
321 	if (attr.va_size == 0) {
322 		error = ENOEXEC;
323 		goto cleanup;
324 	}
325 
326 	/* Can we access it? */
327 	error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
328 	if (error)
329 		goto cleanup;
330 
331 	/*
332 	 * XXX: This should use vn_open() so that it is properly authorized,
333 	 * and to reduce code redundancy all over the place here.
334 	 */
335 #ifdef MAC
336 	error = mac_check_vnode_open(td->td_ucred, vp, FREAD);
337 	if (error)
338 		goto cleanup;
339 #endif
340 	error = VOP_OPEN(vp, FREAD, td->td_ucred, td, -1);
341 	if (error)
342 		goto cleanup;
343 
344 	/* Pull in executable header into kernel_map */
345 	error = vm_mmap(kernel_map, (vm_offset_t *)&a_out, PAGE_SIZE,
346 	    VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, 0);
347 	/*
348 	 * Lock no longer needed
349 	 */
350 	locked = 0;
351 	VOP_UNLOCK(vp, 0, td);
352 
353 	if (error)
354 		goto cleanup;
355 
356 	/* Is it a Linux binary ? */
357 	if (((a_out->a_magic >> 16) & 0xff) != 0x64) {
358 		error = ENOEXEC;
359 		goto cleanup;
360 	}
361 
362 	/*
363 	 * While we are here, we should REALLY do some more checks
364 	 */
365 
366 	/* Set file/virtual offset based on a.out variant. */
367 	switch ((int)(a_out->a_magic & 0xffff)) {
368 	case 0413:	/* ZMAGIC */
369 		file_offset = 1024;
370 		break;
371 	case 0314:	/* QMAGIC */
372 		file_offset = 0;
373 		break;
374 	default:
375 		error = ENOEXEC;
376 		goto cleanup;
377 	}
378 
379 	bss_size = round_page(a_out->a_bss);
380 
381 	/* Check various fields in header for validity/bounds. */
382 	if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) {
383 		error = ENOEXEC;
384 		goto cleanup;
385 	}
386 
387 	/* text + data can't exceed file size */
388 	if (a_out->a_data + a_out->a_text > attr.va_size) {
389 		error = EFAULT;
390 		goto cleanup;
391 	}
392 
393 	/*
394 	 * text/data/bss must not exceed limits
395 	 * XXX - this is not complete. it should check current usage PLUS
396 	 * the resources needed by this library.
397 	 */
398 	PROC_LOCK(td->td_proc);
399 	if (a_out->a_text > maxtsiz ||
400 	    a_out->a_data + bss_size > lim_cur(td->td_proc, RLIMIT_DATA)) {
401 		PROC_UNLOCK(td->td_proc);
402 		error = ENOMEM;
403 		goto cleanup;
404 	}
405 	PROC_UNLOCK(td->td_proc);
406 
407 	mp_fixme("Unlocked vflags access.");
408 	/* prevent more writers */
409 	vp->v_vflag |= VV_TEXT;
410 
411 	/*
412 	 * Check if file_offset page aligned. Currently we cannot handle
413 	 * misalinged file offsets, and so we read in the entire image
414 	 * (what a waste).
415 	 */
416 	if (file_offset & PAGE_MASK) {
417 #ifdef DEBUG
418 		printf("uselib: Non page aligned binary %lu\n", file_offset);
419 #endif
420 		/* Map text+data read/write/execute */
421 
422 		/* a_entry is the load address and is page aligned */
423 		vmaddr = trunc_page(a_out->a_entry);
424 
425 		/* get anon user mapping, read+write+execute */
426 		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
427 		    &vmaddr, a_out->a_text + a_out->a_data, FALSE, VM_PROT_ALL,
428 		    VM_PROT_ALL, 0);
429 		if (error)
430 			goto cleanup;
431 
432 		/* map file into kernel_map */
433 		error = vm_mmap(kernel_map, &buffer,
434 		    round_page(a_out->a_text + a_out->a_data + file_offset),
435 		    VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp,
436 		    trunc_page(file_offset));
437 		if (error)
438 			goto cleanup;
439 
440 		/* copy from kernel VM space to user space */
441 		error = copyout(PTRIN(buffer + file_offset),
442 		    (void *)vmaddr, a_out->a_text + a_out->a_data);
443 
444 		/* release temporary kernel space */
445 		vm_map_remove(kernel_map, buffer, buffer +
446 		    round_page(a_out->a_text + a_out->a_data + file_offset));
447 
448 		if (error)
449 			goto cleanup;
450 	} else {
451 #ifdef DEBUG
452 		printf("uselib: Page aligned binary %lu\n", file_offset);
453 #endif
454 		/*
455 		 * for QMAGIC, a_entry is 20 bytes beyond the load address
456 		 * to skip the executable header
457 		 */
458 		vmaddr = trunc_page(a_out->a_entry);
459 
460 		/*
461 		 * Map it all into the process's space as a single
462 		 * copy-on-write "data" segment.
463 		 */
464 		error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr,
465 		    a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL,
466 		    MAP_PRIVATE | MAP_FIXED, (caddr_t)vp, file_offset);
467 		if (error)
468 			goto cleanup;
469 	}
470 #ifdef DEBUG
471 	printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long*)vmaddr)[0],
472 	    ((long*)vmaddr)[1]);
473 #endif
474 	if (bss_size != 0) {
475 		/* Calculate BSS start address */
476 		vmaddr = trunc_page(a_out->a_entry) + a_out->a_text +
477 		    a_out->a_data;
478 
479 		/* allocate some 'anon' space */
480 		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
481 		    &vmaddr, bss_size, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0);
482 		if (error)
483 			goto cleanup;
484 	}
485 
486 cleanup:
487 	/* Unlock vnode if needed */
488 	if (locked)
489 		VOP_UNLOCK(vp, 0, td);
490 
491 	/* Release the kernel mapping. */
492 	if (a_out)
493 		vm_map_remove(kernel_map, (vm_offset_t)a_out,
494 		    (vm_offset_t)a_out + PAGE_SIZE);
495 
496 	return error;
497 }
498 
499 #endif	/* __i386__ || __alpha__ */
500 
501 int
502 linux_select(struct thread *td, struct linux_select_args *args)
503 {
504 	l_timeval ltv;
505 	struct timeval tv0, tv1, utv, *tvp;
506 	int error;
507 
508 #ifdef DEBUG
509 	if (ldebug(select))
510 		printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds,
511 		    (void *)args->readfds, (void *)args->writefds,
512 		    (void *)args->exceptfds, (void *)args->timeout);
513 #endif
514 
515 	/*
516 	 * Store current time for computation of the amount of
517 	 * time left.
518 	 */
519 	if (args->timeout) {
520 		if ((error = copyin(args->timeout, &ltv, sizeof(ltv))))
521 			goto select_out;
522 		utv.tv_sec = ltv.tv_sec;
523 		utv.tv_usec = ltv.tv_usec;
524 #ifdef DEBUG
525 		if (ldebug(select))
526 			printf(LMSG("incoming timeout (%ld/%ld)"),
527 			    utv.tv_sec, utv.tv_usec);
528 #endif
529 
530 		if (itimerfix(&utv)) {
531 			/*
532 			 * The timeval was invalid.  Convert it to something
533 			 * valid that will act as it does under Linux.
534 			 */
535 			utv.tv_sec += utv.tv_usec / 1000000;
536 			utv.tv_usec %= 1000000;
537 			if (utv.tv_usec < 0) {
538 				utv.tv_sec -= 1;
539 				utv.tv_usec += 1000000;
540 			}
541 			if (utv.tv_sec < 0)
542 				timevalclear(&utv);
543 		}
544 		microtime(&tv0);
545 		tvp = &utv;
546 	} else
547 		tvp = NULL;
548 
549 	error = kern_select(td, args->nfds, args->readfds, args->writefds,
550 	    args->exceptfds, tvp);
551 
552 #ifdef DEBUG
553 	if (ldebug(select))
554 		printf(LMSG("real select returns %d"), error);
555 #endif
556 	if (error) {
557 		/*
558 		 * See fs/select.c in the Linux kernel.  Without this,
559 		 * Maelstrom doesn't work.
560 		 */
561 		if (error == ERESTART)
562 			error = EINTR;
563 		goto select_out;
564 	}
565 
566 	if (args->timeout) {
567 		if (td->td_retval[0]) {
568 			/*
569 			 * Compute how much time was left of the timeout,
570 			 * by subtracting the current time and the time
571 			 * before we started the call, and subtracting
572 			 * that result from the user-supplied value.
573 			 */
574 			microtime(&tv1);
575 			timevalsub(&tv1, &tv0);
576 			timevalsub(&utv, &tv1);
577 			if (utv.tv_sec < 0)
578 				timevalclear(&utv);
579 		} else
580 			timevalclear(&utv);
581 #ifdef DEBUG
582 		if (ldebug(select))
583 			printf(LMSG("outgoing timeout (%ld/%ld)"),
584 			    utv.tv_sec, utv.tv_usec);
585 #endif
586 		ltv.tv_sec = utv.tv_sec;
587 		ltv.tv_usec = utv.tv_usec;
588 		if ((error = copyout(&ltv, args->timeout, sizeof(ltv))))
589 			goto select_out;
590 	}
591 
592 select_out:
593 #ifdef DEBUG
594 	if (ldebug(select))
595 		printf(LMSG("select_out -> %d"), error);
596 #endif
597 	return error;
598 }
599 
600 int
601 linux_mremap(struct thread *td, struct linux_mremap_args *args)
602 {
603 	struct munmap_args /* {
604 		void *addr;
605 		size_t len;
606 	} */ bsd_args;
607 	int error = 0;
608 
609 #ifdef DEBUG
610 	if (ldebug(mremap))
611 		printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"),
612 		    (void *)(uintptr_t)args->addr,
613 		    (unsigned long)args->old_len,
614 		    (unsigned long)args->new_len,
615 		    (unsigned long)args->flags);
616 #endif
617 	args->new_len = round_page(args->new_len);
618 	args->old_len = round_page(args->old_len);
619 
620 	if (args->new_len > args->old_len) {
621 		td->td_retval[0] = 0;
622 		return ENOMEM;
623 	}
624 
625 	if (args->new_len < args->old_len) {
626 		bsd_args.addr =
627 		    (caddr_t)((uintptr_t)args->addr + args->new_len);
628 		bsd_args.len = args->old_len - args->new_len;
629 		error = munmap(td, &bsd_args);
630 	}
631 
632 	td->td_retval[0] = error ? 0 : (uintptr_t)args->addr;
633 	return error;
634 }
635 
636 #define LINUX_MS_ASYNC       0x0001
637 #define LINUX_MS_INVALIDATE  0x0002
638 #define LINUX_MS_SYNC        0x0004
639 
640 int
641 linux_msync(struct thread *td, struct linux_msync_args *args)
642 {
643 	struct msync_args bsd_args;
644 
645 	bsd_args.addr = (caddr_t)(uintptr_t)args->addr;
646 	bsd_args.len = (uintptr_t)args->len;
647 	bsd_args.flags = args->fl & ~LINUX_MS_SYNC;
648 
649 	return msync(td, &bsd_args);
650 }
651 
652 #ifndef __alpha__
653 int
654 linux_time(struct thread *td, struct linux_time_args *args)
655 {
656 	struct timeval tv;
657 	l_time_t tm;
658 	int error;
659 
660 #ifdef DEBUG
661 	if (ldebug(time))
662 		printf(ARGS(time, "*"));
663 #endif
664 
665 	microtime(&tv);
666 	tm = tv.tv_sec;
667 	if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm))))
668 		return error;
669 	td->td_retval[0] = tm;
670 	return 0;
671 }
672 #endif	/*!__alpha__*/
673 
674 struct l_times_argv {
675 	l_long		tms_utime;
676 	l_long		tms_stime;
677 	l_long		tms_cutime;
678 	l_long		tms_cstime;
679 };
680 
681 #ifdef __alpha__
682 #define CLK_TCK 1024	/* Linux uses 1024 on alpha */
683 #else
684 #define CLK_TCK 100	/* Linux uses 100 */
685 #endif
686 
687 #define CONVTCK(r)	(r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK))
688 
689 int
690 linux_times(struct thread *td, struct linux_times_args *args)
691 {
692 	struct timeval tv, utime, stime, cutime, cstime;
693 	struct l_times_argv tms;
694 	struct proc *p;
695 	int error;
696 
697 #ifdef DEBUG
698 	if (ldebug(times))
699 		printf(ARGS(times, "*"));
700 #endif
701 
702 	p = td->td_proc;
703 	PROC_LOCK(p);
704 	calcru(p, &utime, &stime);
705 	calccru(p, &cutime, &cstime);
706 	PROC_UNLOCK(p);
707 
708 	tms.tms_utime = CONVTCK(utime);
709 	tms.tms_stime = CONVTCK(stime);
710 
711 	tms.tms_cutime = CONVTCK(cutime);
712 	tms.tms_cstime = CONVTCK(cstime);
713 
714 	if ((error = copyout(&tms, args->buf, sizeof(tms))))
715 		return error;
716 
717 	microuptime(&tv);
718 	td->td_retval[0] = (int)CONVTCK(tv);
719 	return 0;
720 }
721 
722 int
723 linux_newuname(struct thread *td, struct linux_newuname_args *args)
724 {
725 	struct l_new_utsname utsname;
726 	char osname[LINUX_MAX_UTSNAME];
727 	char osrelease[LINUX_MAX_UTSNAME];
728 	char *p;
729 
730 #ifdef DEBUG
731 	if (ldebug(newuname))
732 		printf(ARGS(newuname, "*"));
733 #endif
734 
735 	linux_get_osname(td, osname);
736 	linux_get_osrelease(td, osrelease);
737 
738 	bzero(&utsname, sizeof(utsname));
739 	strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME);
740 	getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME);
741 	strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME);
742 	strlcpy(utsname.version, version, LINUX_MAX_UTSNAME);
743 	for (p = utsname.version; *p != '\0'; ++p)
744 		if (*p == '\n') {
745 			*p = '\0';
746 			break;
747 		}
748 #ifdef __i386__
749 	{
750 		const char *class;
751 		switch (cpu_class) {
752 		case CPUCLASS_686:
753 			class = "i686";
754 			break;
755 		case CPUCLASS_586:
756 			class = "i586";
757 			break;
758 		case CPUCLASS_486:
759 			class = "i486";
760 			break;
761 		default:
762 			class = "i386";
763 		}
764 		strlcpy(utsname.machine, class, LINUX_MAX_UTSNAME);
765 	}
766 #elif defined(__amd64__)	/* XXX: Linux can change 'personality'. */
767 #ifdef COMPAT_LINUX32
768 	strlcpy(utsname.machine, "i686", LINUX_MAX_UTSNAME);
769 #else
770 	strlcpy(utsname.machine, "x86_64", LINUX_MAX_UTSNAME);
771 #endif /* COMPAT_LINUX32 */
772 #else /* something other than i386 or amd64 - assume we and Linux agree */
773 	strlcpy(utsname.machine, machine, LINUX_MAX_UTSNAME);
774 #endif /* __i386__ */
775 	strlcpy(utsname.domainname, domainname, LINUX_MAX_UTSNAME);
776 
777 	return (copyout(&utsname, args->buf, sizeof(utsname)));
778 }
779 
780 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
781 struct l_utimbuf {
782 	l_time_t l_actime;
783 	l_time_t l_modtime;
784 };
785 
786 int
787 linux_utime(struct thread *td, struct linux_utime_args *args)
788 {
789 	struct timeval tv[2], *tvp;
790 	struct l_utimbuf lut;
791 	char *fname;
792 	int error;
793 
794 	LCONVPATHEXIST(td, args->fname, &fname);
795 
796 #ifdef DEBUG
797 	if (ldebug(utime))
798 		printf(ARGS(utime, "%s, *"), fname);
799 #endif
800 
801 	if (args->times) {
802 		if ((error = copyin(args->times, &lut, sizeof lut))) {
803 			LFREEPATH(fname);
804 			return error;
805 		}
806 		tv[0].tv_sec = lut.l_actime;
807 		tv[0].tv_usec = 0;
808 		tv[1].tv_sec = lut.l_modtime;
809 		tv[1].tv_usec = 0;
810 		tvp = tv;
811 	} else
812 		tvp = NULL;
813 
814 	error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE);
815 	LFREEPATH(fname);
816 	return (error);
817 }
818 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
819 
820 #define __WCLONE 0x80000000
821 
822 #ifndef __alpha__
823 int
824 linux_waitpid(struct thread *td, struct linux_waitpid_args *args)
825 {
826 	int error, options, tmpstat;
827 
828 #ifdef DEBUG
829 	if (ldebug(waitpid))
830 		printf(ARGS(waitpid, "%d, %p, %d"),
831 		    args->pid, (void *)args->status, args->options);
832 #endif
833 
834 	options = (args->options & (WNOHANG | WUNTRACED));
835 	/* WLINUXCLONE should be equal to __WCLONE, but we make sure */
836 	if (args->options & __WCLONE)
837 		options |= WLINUXCLONE;
838 
839 	error = kern_wait(td, args->pid, &tmpstat, options, NULL);
840 	if (error)
841 		return error;
842 
843 	if (args->status) {
844 		tmpstat &= 0xffff;
845 		if (WIFSIGNALED(tmpstat))
846 			tmpstat = (tmpstat & 0xffffff80) |
847 			    BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat));
848 		else if (WIFSTOPPED(tmpstat))
849 			tmpstat = (tmpstat & 0xffff00ff) |
850 			    (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8);
851 		return copyout(&tmpstat, args->status, sizeof(int));
852 	}
853 
854 	return 0;
855 }
856 #endif	/*!__alpha__*/
857 
858 int
859 linux_wait4(struct thread *td, struct linux_wait4_args *args)
860 {
861 	int error, options, tmpstat;
862 	struct rusage ru, *rup;
863 	struct proc *p;
864 
865 #ifdef DEBUG
866 	if (ldebug(wait4))
867 		printf(ARGS(wait4, "%d, %p, %d, %p"),
868 		    args->pid, (void *)args->status, args->options,
869 		    (void *)args->rusage);
870 #endif
871 
872 	options = (args->options & (WNOHANG | WUNTRACED));
873 	/* WLINUXCLONE should be equal to __WCLONE, but we make sure */
874 	if (args->options & __WCLONE)
875 		options |= WLINUXCLONE;
876 
877 	if (args->rusage != NULL)
878 		rup = &ru;
879 	else
880 		rup = NULL;
881 	error = kern_wait(td, args->pid, &tmpstat, options, rup);
882 	if (error)
883 		return error;
884 
885 	p = td->td_proc;
886 	PROC_LOCK(p);
887 	SIGDELSET(p->p_siglist, SIGCHLD);
888 	PROC_UNLOCK(p);
889 
890 	if (args->status) {
891 		tmpstat &= 0xffff;
892 		if (WIFSIGNALED(tmpstat))
893 			tmpstat = (tmpstat & 0xffffff80) |
894 			    BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat));
895 		else if (WIFSTOPPED(tmpstat))
896 			tmpstat = (tmpstat & 0xffff00ff) |
897 			    (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8);
898 		error = copyout(&tmpstat, args->status, sizeof(int));
899 	}
900 	if (args->rusage != NULL && error == 0)
901 		error = copyout(&ru, args->rusage, sizeof(ru));
902 
903 	return (error);
904 }
905 
906 int
907 linux_mknod(struct thread *td, struct linux_mknod_args *args)
908 {
909 	char *path;
910 	int error;
911 
912 	LCONVPATHCREAT(td, args->path, &path);
913 
914 #ifdef DEBUG
915 	if (ldebug(mknod))
916 		printf(ARGS(mknod, "%s, %d, %d"), path, args->mode, args->dev);
917 #endif
918 
919 	if (args->mode & S_IFIFO)
920 		error = kern_mkfifo(td, path, UIO_SYSSPACE, args->mode);
921 	else
922 		error = kern_mknod(td, path, UIO_SYSSPACE, args->mode,
923 		    args->dev);
924 	LFREEPATH(path);
925 	return (error);
926 }
927 
928 /*
929  * UGH! This is just about the dumbest idea I've ever heard!!
930  */
931 int
932 linux_personality(struct thread *td, struct linux_personality_args *args)
933 {
934 #ifdef DEBUG
935 	if (ldebug(personality))
936 		printf(ARGS(personality, "%lu"), (unsigned long)args->per);
937 #endif
938 #ifndef __alpha__
939 	if (args->per != 0)
940 		return EINVAL;
941 #endif
942 
943 	/* Yes Jim, it's still a Linux... */
944 	td->td_retval[0] = 0;
945 	return 0;
946 }
947 
948 struct l_itimerval {
949 	l_timeval it_interval;
950 	l_timeval it_value;
951 };
952 
953 int
954 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap)
955 {
956 	int error;
957 	caddr_t sg;
958 	struct l_itimerval *lp, *lop, ls;
959 	struct itimerval *p = NULL, *op = NULL, s;
960 
961 #ifdef DEBUG
962 	if (ldebug(setitimer))
963 		printf(ARGS(setitimer, "%p, %p"),
964 		    (void *)uap->itv, (void *)uap->oitv);
965 #endif
966 	lp = uap->itv;
967 	if (lp != NULL) {
968 		sg = stackgap_init();
969 		p = stackgap_alloc(&sg, sizeof(struct itimerval));
970 		uap->itv = (struct l_itimerval *)p;
971 		error = copyin(lp, &ls, sizeof(ls));
972 		if (error != 0)
973 			return (error);
974 		s.it_interval.tv_sec = ls.it_interval.tv_sec;
975 		s.it_interval.tv_usec = ls.it_interval.tv_usec;
976 		s.it_value.tv_sec = ls.it_value.tv_sec;
977 		s.it_value.tv_usec = ls.it_value.tv_usec;
978 		error = copyout(&s, p, sizeof(s));
979 		if (error != 0)
980 			return (error);
981 #ifdef DEBUG
982 		if (ldebug(setitimer)) {
983 			printf("setitimer: value: sec: %ld, usec: %ld\n",
984 			    s.it_value.tv_sec, s.it_value.tv_usec);
985 			printf("setitimer: interval: sec: %ld, usec: %ld\n",
986 			    s.it_interval.tv_sec, s.it_interval.tv_usec);
987 		}
988 #endif
989 	}
990 	lop = uap->oitv;
991 	if (lop != NULL) {
992 		sg = stackgap_init();
993 		op = stackgap_alloc(&sg, sizeof(struct itimerval));
994 		uap->oitv = (struct l_itimerval *)op;
995 	}
996 	error = setitimer(td, (struct setitimer_args *) uap);
997 	if (error != 0)
998 		return (error);
999 	if (lop != NULL) {
1000 		error = copyin(op, &s, sizeof(s));
1001 		if (error != 0)
1002 			return (error);
1003 		ls.it_interval.tv_sec = s.it_interval.tv_sec;
1004 		ls.it_interval.tv_usec = s.it_interval.tv_usec;
1005 		ls.it_value.tv_sec = s.it_value.tv_sec;
1006 		ls.it_value.tv_usec = s.it_value.tv_usec;
1007 		error = copyout(&ls, lop, sizeof(ls));
1008 	}
1009 	return (error);
1010 }
1011 
1012 int
1013 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap)
1014 {
1015 	int error;
1016 	caddr_t sg;
1017 	struct l_itimerval *lp, ls;
1018 	struct itimerval *p = NULL, s;
1019 
1020 #ifdef DEBUG
1021 	if (ldebug(getitimer))
1022 		printf(ARGS(getitimer, "%p"), (void *)uap->itv);
1023 #endif
1024 	lp = uap->itv;
1025 	if (lp != NULL) {
1026 		sg = stackgap_init();
1027 		p = stackgap_alloc(&sg, sizeof(struct itimerval));
1028 		uap->itv = (struct l_itimerval *)p;
1029 	}
1030 	error = getitimer(td, (struct getitimer_args *) uap);
1031 	if (error != 0)
1032 		return (error);
1033 	if (lp != NULL) {
1034 		error = copyin(p, &s, sizeof(s));
1035 		if (error != 0)
1036 			return (error);
1037 		ls.it_interval.tv_sec = s.it_interval.tv_sec;
1038 		ls.it_interval.tv_usec = s.it_interval.tv_usec;
1039 		ls.it_value.tv_sec = s.it_value.tv_sec;
1040 		ls.it_value.tv_usec = s.it_value.tv_usec;
1041 		error = copyout(&ls, lp, sizeof(ls));
1042 	}
1043 	return (error);
1044 }
1045 
1046 #ifndef __alpha__
1047 int
1048 linux_nice(struct thread *td, struct linux_nice_args *args)
1049 {
1050 	struct setpriority_args	bsd_args;
1051 
1052 	bsd_args.which = PRIO_PROCESS;
1053 	bsd_args.who = 0;	/* current process */
1054 	bsd_args.prio = args->inc;
1055 	return setpriority(td, &bsd_args);
1056 }
1057 #endif	/*!__alpha__*/
1058 
1059 int
1060 linux_setgroups(struct thread *td, struct linux_setgroups_args *args)
1061 {
1062 	struct ucred *newcred, *oldcred;
1063 	l_gid_t linux_gidset[NGROUPS];
1064 	gid_t *bsd_gidset;
1065 	int ngrp, error;
1066 	struct proc *p;
1067 
1068 	ngrp = args->gidsetsize;
1069 	if (ngrp < 0 || ngrp >= NGROUPS)
1070 		return (EINVAL);
1071 	error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t));
1072 	if (error)
1073 		return (error);
1074 	newcred = crget();
1075 	p = td->td_proc;
1076 	PROC_LOCK(p);
1077 	oldcred = p->p_ucred;
1078 
1079 	/*
1080 	 * cr_groups[0] holds egid. Setting the whole set from
1081 	 * the supplied set will cause egid to be changed too.
1082 	 * Keep cr_groups[0] unchanged to prevent that.
1083 	 */
1084 
1085 	if ((error = suser_cred(oldcred, SUSER_ALLOWJAIL)) != 0) {
1086 		PROC_UNLOCK(p);
1087 		crfree(newcred);
1088 		return (error);
1089 	}
1090 
1091 	crcopy(newcred, oldcred);
1092 	if (ngrp > 0) {
1093 		newcred->cr_ngroups = ngrp + 1;
1094 
1095 		bsd_gidset = newcred->cr_groups;
1096 		ngrp--;
1097 		while (ngrp >= 0) {
1098 			bsd_gidset[ngrp + 1] = linux_gidset[ngrp];
1099 			ngrp--;
1100 		}
1101 	}
1102 	else
1103 		newcred->cr_ngroups = 1;
1104 
1105 	setsugid(p);
1106 	p->p_ucred = newcred;
1107 	PROC_UNLOCK(p);
1108 	crfree(oldcred);
1109 	return (0);
1110 }
1111 
1112 int
1113 linux_getgroups(struct thread *td, struct linux_getgroups_args *args)
1114 {
1115 	struct ucred *cred;
1116 	l_gid_t linux_gidset[NGROUPS];
1117 	gid_t *bsd_gidset;
1118 	int bsd_gidsetsz, ngrp, error;
1119 
1120 	cred = td->td_ucred;
1121 	bsd_gidset = cred->cr_groups;
1122 	bsd_gidsetsz = cred->cr_ngroups - 1;
1123 
1124 	/*
1125 	 * cr_groups[0] holds egid. Returning the whole set
1126 	 * here will cause a duplicate. Exclude cr_groups[0]
1127 	 * to prevent that.
1128 	 */
1129 
1130 	if ((ngrp = args->gidsetsize) == 0) {
1131 		td->td_retval[0] = bsd_gidsetsz;
1132 		return (0);
1133 	}
1134 
1135 	if (ngrp < bsd_gidsetsz)
1136 		return (EINVAL);
1137 
1138 	ngrp = 0;
1139 	while (ngrp < bsd_gidsetsz) {
1140 		linux_gidset[ngrp] = bsd_gidset[ngrp + 1];
1141 		ngrp++;
1142 	}
1143 
1144 	if ((error = copyout(linux_gidset, args->grouplist,
1145 	    ngrp * sizeof(l_gid_t))))
1146 		return (error);
1147 
1148 	td->td_retval[0] = ngrp;
1149 	return (0);
1150 }
1151 
1152 #ifndef __alpha__
1153 int
1154 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args)
1155 {
1156 	struct rlimit bsd_rlim;
1157 	struct l_rlimit rlim;
1158 	u_int which;
1159 	int error;
1160 
1161 #ifdef DEBUG
1162 	if (ldebug(setrlimit))
1163 		printf(ARGS(setrlimit, "%d, %p"),
1164 		    args->resource, (void *)args->rlim);
1165 #endif
1166 
1167 	if (args->resource >= LINUX_RLIM_NLIMITS)
1168 		return (EINVAL);
1169 
1170 	which = linux_to_bsd_resource[args->resource];
1171 	if (which == -1)
1172 		return (EINVAL);
1173 
1174 	error = copyin(args->rlim, &rlim, sizeof(rlim));
1175 	if (error)
1176 		return (error);
1177 
1178 	bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur;
1179 	bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max;
1180 	return (kern_setrlimit(td, which, &bsd_rlim));
1181 }
1182 
1183 int
1184 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args)
1185 {
1186 	struct l_rlimit rlim;
1187 	struct proc *p = td->td_proc;
1188 	struct rlimit bsd_rlim;
1189 	u_int which;
1190 
1191 #ifdef DEBUG
1192 	if (ldebug(old_getrlimit))
1193 		printf(ARGS(old_getrlimit, "%d, %p"),
1194 		    args->resource, (void *)args->rlim);
1195 #endif
1196 
1197 	if (args->resource >= LINUX_RLIM_NLIMITS)
1198 		return (EINVAL);
1199 
1200 	which = linux_to_bsd_resource[args->resource];
1201 	if (which == -1)
1202 		return (EINVAL);
1203 
1204 	PROC_LOCK(p);
1205 	lim_rlimit(p, which, &bsd_rlim);
1206 	PROC_UNLOCK(p);
1207 
1208 #ifdef COMPAT_LINUX32
1209 	rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur;
1210 	if (rlim.rlim_cur == UINT_MAX)
1211 		rlim.rlim_cur = INT_MAX;
1212 	rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max;
1213 	if (rlim.rlim_max == UINT_MAX)
1214 		rlim.rlim_max = INT_MAX;
1215 #else
1216 	rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur;
1217 	if (rlim.rlim_cur == ULONG_MAX)
1218 		rlim.rlim_cur = LONG_MAX;
1219 	rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max;
1220 	if (rlim.rlim_max == ULONG_MAX)
1221 		rlim.rlim_max = LONG_MAX;
1222 #endif
1223 	return (copyout(&rlim, args->rlim, sizeof(rlim)));
1224 }
1225 
1226 int
1227 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args)
1228 {
1229 	struct l_rlimit rlim;
1230 	struct proc *p = td->td_proc;
1231 	struct rlimit bsd_rlim;
1232 	u_int which;
1233 
1234 #ifdef DEBUG
1235 	if (ldebug(getrlimit))
1236 		printf(ARGS(getrlimit, "%d, %p"),
1237 		    args->resource, (void *)args->rlim);
1238 #endif
1239 
1240 	if (args->resource >= LINUX_RLIM_NLIMITS)
1241 		return (EINVAL);
1242 
1243 	which = linux_to_bsd_resource[args->resource];
1244 	if (which == -1)
1245 		return (EINVAL);
1246 
1247 	PROC_LOCK(p);
1248 	lim_rlimit(p, which, &bsd_rlim);
1249 	PROC_UNLOCK(p);
1250 
1251 	rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur;
1252 	rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max;
1253 	return (copyout(&rlim, args->rlim, sizeof(rlim)));
1254 }
1255 #endif /*!__alpha__*/
1256 
1257 int
1258 linux_sched_setscheduler(struct thread *td,
1259     struct linux_sched_setscheduler_args *args)
1260 {
1261 	struct sched_setscheduler_args bsd;
1262 
1263 #ifdef DEBUG
1264 	if (ldebug(sched_setscheduler))
1265 		printf(ARGS(sched_setscheduler, "%d, %d, %p"),
1266 		    args->pid, args->policy, (const void *)args->param);
1267 #endif
1268 
1269 	switch (args->policy) {
1270 	case LINUX_SCHED_OTHER:
1271 		bsd.policy = SCHED_OTHER;
1272 		break;
1273 	case LINUX_SCHED_FIFO:
1274 		bsd.policy = SCHED_FIFO;
1275 		break;
1276 	case LINUX_SCHED_RR:
1277 		bsd.policy = SCHED_RR;
1278 		break;
1279 	default:
1280 		return EINVAL;
1281 	}
1282 
1283 	bsd.pid = args->pid;
1284 	bsd.param = (struct sched_param *)args->param;
1285 	return sched_setscheduler(td, &bsd);
1286 }
1287 
1288 int
1289 linux_sched_getscheduler(struct thread *td,
1290     struct linux_sched_getscheduler_args *args)
1291 {
1292 	struct sched_getscheduler_args bsd;
1293 	int error;
1294 
1295 #ifdef DEBUG
1296 	if (ldebug(sched_getscheduler))
1297 		printf(ARGS(sched_getscheduler, "%d"), args->pid);
1298 #endif
1299 
1300 	bsd.pid = args->pid;
1301 	error = sched_getscheduler(td, &bsd);
1302 
1303 	switch (td->td_retval[0]) {
1304 	case SCHED_OTHER:
1305 		td->td_retval[0] = LINUX_SCHED_OTHER;
1306 		break;
1307 	case SCHED_FIFO:
1308 		td->td_retval[0] = LINUX_SCHED_FIFO;
1309 		break;
1310 	case SCHED_RR:
1311 		td->td_retval[0] = LINUX_SCHED_RR;
1312 		break;
1313 	}
1314 
1315 	return error;
1316 }
1317 
1318 int
1319 linux_sched_get_priority_max(struct thread *td,
1320     struct linux_sched_get_priority_max_args *args)
1321 {
1322 	struct sched_get_priority_max_args bsd;
1323 
1324 #ifdef DEBUG
1325 	if (ldebug(sched_get_priority_max))
1326 		printf(ARGS(sched_get_priority_max, "%d"), args->policy);
1327 #endif
1328 
1329 	switch (args->policy) {
1330 	case LINUX_SCHED_OTHER:
1331 		bsd.policy = SCHED_OTHER;
1332 		break;
1333 	case LINUX_SCHED_FIFO:
1334 		bsd.policy = SCHED_FIFO;
1335 		break;
1336 	case LINUX_SCHED_RR:
1337 		bsd.policy = SCHED_RR;
1338 		break;
1339 	default:
1340 		return EINVAL;
1341 	}
1342 	return sched_get_priority_max(td, &bsd);
1343 }
1344 
1345 int
1346 linux_sched_get_priority_min(struct thread *td,
1347     struct linux_sched_get_priority_min_args *args)
1348 {
1349 	struct sched_get_priority_min_args bsd;
1350 
1351 #ifdef DEBUG
1352 	if (ldebug(sched_get_priority_min))
1353 		printf(ARGS(sched_get_priority_min, "%d"), args->policy);
1354 #endif
1355 
1356 	switch (args->policy) {
1357 	case LINUX_SCHED_OTHER:
1358 		bsd.policy = SCHED_OTHER;
1359 		break;
1360 	case LINUX_SCHED_FIFO:
1361 		bsd.policy = SCHED_FIFO;
1362 		break;
1363 	case LINUX_SCHED_RR:
1364 		bsd.policy = SCHED_RR;
1365 		break;
1366 	default:
1367 		return EINVAL;
1368 	}
1369 	return sched_get_priority_min(td, &bsd);
1370 }
1371 
1372 #define REBOOT_CAD_ON	0x89abcdef
1373 #define REBOOT_CAD_OFF	0
1374 #define REBOOT_HALT	0xcdef0123
1375 
1376 int
1377 linux_reboot(struct thread *td, struct linux_reboot_args *args)
1378 {
1379 	struct reboot_args bsd_args;
1380 
1381 #ifdef DEBUG
1382 	if (ldebug(reboot))
1383 		printf(ARGS(reboot, "0x%x"), args->cmd);
1384 #endif
1385 	if (args->cmd == REBOOT_CAD_ON || args->cmd == REBOOT_CAD_OFF)
1386 		return (0);
1387 	bsd_args.opt = (args->cmd == REBOOT_HALT) ? RB_HALT : 0;
1388 	return (reboot(td, &bsd_args));
1389 }
1390 
1391 #ifndef __alpha__
1392 
1393 /*
1394  * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify
1395  * td->td_retval[1] when COMPAT_43 is defined. This
1396  * globbers registers that are assumed to be preserved. The following
1397  * lightweight syscalls fixes this. See also linux_getgid16() and
1398  * linux_getuid16() in linux_uid16.c.
1399  *
1400  * linux_getpid() - MP SAFE
1401  * linux_getgid() - MP SAFE
1402  * linux_getuid() - MP SAFE
1403  */
1404 
1405 int
1406 linux_getpid(struct thread *td, struct linux_getpid_args *args)
1407 {
1408 
1409 	td->td_retval[0] = td->td_proc->p_pid;
1410 	return (0);
1411 }
1412 
1413 int
1414 linux_getgid(struct thread *td, struct linux_getgid_args *args)
1415 {
1416 
1417 	td->td_retval[0] = td->td_ucred->cr_rgid;
1418 	return (0);
1419 }
1420 
1421 int
1422 linux_getuid(struct thread *td, struct linux_getuid_args *args)
1423 {
1424 
1425 	td->td_retval[0] = td->td_ucred->cr_ruid;
1426 	return (0);
1427 }
1428 
1429 #endif /*!__alpha__*/
1430 
1431 int
1432 linux_getsid(struct thread *td, struct linux_getsid_args *args)
1433 {
1434 	struct getsid_args bsd;
1435 	bsd.pid = args->pid;
1436 	return getsid(td, &bsd);
1437 }
1438