xref: /freebsd/sys/compat/linux/linux_misc.c (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*-
2  * Copyright (c) 1994-1995 S�ren Schmidt
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer
10  *    in this position and unchanged.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_mac.h"
33 
34 #include <sys/param.h>
35 #include <sys/blist.h>
36 #include <sys/fcntl.h>
37 #include <sys/imgact_aout.h>
38 #include <sys/jail.h>
39 #include <sys/kernel.h>
40 #include <sys/limits.h>
41 #include <sys/lock.h>
42 #include <sys/mac.h>
43 #include <sys/malloc.h>
44 #include <sys/mman.h>
45 #include <sys/mount.h>
46 #include <sys/mutex.h>
47 #include <sys/namei.h>
48 #include <sys/proc.h>
49 #include <sys/reboot.h>
50 #include <sys/resourcevar.h>
51 #include <sys/signalvar.h>
52 #include <sys/stat.h>
53 #include <sys/syscallsubr.h>
54 #include <sys/sysctl.h>
55 #include <sys/sysproto.h>
56 #include <sys/systm.h>
57 #include <sys/time.h>
58 #include <sys/vmmeter.h>
59 #include <sys/vnode.h>
60 #include <sys/wait.h>
61 
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_map.h>
66 #include <vm/vm_extern.h>
67 #include <vm/vm_object.h>
68 #include <vm/swap_pager.h>
69 
70 #include <posix4/sched.h>
71 
72 #include <machine/../linux/linux.h>
73 #include <machine/../linux/linux_proto.h>
74 
75 #include <compat/linux/linux_mib.h>
76 #include <compat/linux/linux_util.h>
77 
78 #ifdef __i386__
79 #include <machine/cputypes.h>
80 #endif
81 
82 #ifdef __alpha__
83 #define BSD_TO_LINUX_SIGNAL(sig)       (sig)
84 #else
85 #define BSD_TO_LINUX_SIGNAL(sig)	\
86 	(((sig) <= LINUX_SIGTBLSZ) ? bsd_to_linux_signal[_SIG_IDX(sig)] : sig)
87 #endif
88 
89 #ifndef __alpha__
90 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = {
91 	RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK,
92 	RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE,
93 	RLIMIT_MEMLOCK, -1
94 };
95 #endif /*!__alpha__*/
96 
97 struct l_sysinfo {
98 	l_long		uptime;		/* Seconds since boot */
99 	l_ulong		loads[3];	/* 1, 5, and 15 minute load averages */
100 	l_ulong		totalram;	/* Total usable main memory size */
101 	l_ulong		freeram;	/* Available memory size */
102 	l_ulong		sharedram;	/* Amount of shared memory */
103 	l_ulong		bufferram;	/* Memory used by buffers */
104 	l_ulong		totalswap;	/* Total swap space size */
105 	l_ulong		freeswap;	/* swap space still available */
106 	l_ushort	procs;		/* Number of current processes */
107 	char		_f[22];		/* Pads structure to 64 bytes */
108 };
109 #ifndef __alpha__
110 int
111 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args)
112 {
113 	struct l_sysinfo sysinfo;
114 	vm_object_t object;
115 	int i, j;
116 	struct timespec ts;
117 
118 	/* Uptime is copied out of print_uptime() in kern_shutdown.c */
119 	getnanouptime(&ts);
120 	i = 0;
121 	if (ts.tv_sec >= 86400) {
122 		ts.tv_sec %= 86400;
123 		i = 1;
124 	}
125 	if (i || ts.tv_sec >= 3600) {
126 		ts.tv_sec %= 3600;
127 		i = 1;
128 	}
129 	if (i || ts.tv_sec >= 60) {
130 		ts.tv_sec %= 60;
131 		i = 1;
132 	}
133 	sysinfo.uptime=ts.tv_sec;
134 
135 	/* Use the information from the mib to get our load averages */
136 	for (i = 0; i < 3; i++)
137 		sysinfo.loads[i] = averunnable.ldavg[i];
138 
139 	sysinfo.totalram = physmem * PAGE_SIZE;
140 	sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE;
141 
142 	sysinfo.sharedram = 0;
143 	for (object = TAILQ_FIRST(&vm_object_list); object != NULL;
144 	     object = TAILQ_NEXT(object, object_list))
145 		if (object->shadow_count > 1)
146 			sysinfo.sharedram += object->resident_page_count;
147 
148 	sysinfo.sharedram *= PAGE_SIZE;
149 	sysinfo.bufferram = 0;
150 
151 	swap_pager_status(&i, &j);
152 	sysinfo.totalswap= i * PAGE_SIZE;
153 	sysinfo.freeswap = (i - j) * PAGE_SIZE;
154 
155 	sysinfo.procs = 20; /* Hack */
156 
157 	return copyout(&sysinfo, args->info, sizeof(sysinfo));
158 }
159 #endif /*!__alpha__*/
160 
161 #ifndef __alpha__
162 int
163 linux_alarm(struct thread *td, struct linux_alarm_args *args)
164 {
165 	struct itimerval it, old_it;
166 	struct timeval tv;
167 	struct proc *p;
168 
169 #ifdef DEBUG
170 	if (ldebug(alarm))
171 		printf(ARGS(alarm, "%u"), args->secs);
172 #endif
173 
174 	if (args->secs > 100000000)
175 		return EINVAL;
176 
177 	it.it_value.tv_sec = (long)args->secs;
178 	it.it_value.tv_usec = 0;
179 	it.it_interval.tv_sec = 0;
180 	it.it_interval.tv_usec = 0;
181 	p = td->td_proc;
182 	PROC_LOCK(p);
183 	old_it = p->p_realtimer;
184 	getmicrouptime(&tv);
185 	if (timevalisset(&old_it.it_value))
186 		callout_stop(&p->p_itcallout);
187 	if (it.it_value.tv_sec != 0) {
188 		callout_reset(&p->p_itcallout, tvtohz(&it.it_value),
189 		    realitexpire, p);
190 		timevaladd(&it.it_value, &tv);
191 	}
192 	p->p_realtimer = it;
193 	PROC_UNLOCK(p);
194 	if (timevalcmp(&old_it.it_value, &tv, >)) {
195 		timevalsub(&old_it.it_value, &tv);
196 		if (old_it.it_value.tv_usec != 0)
197 			old_it.it_value.tv_sec++;
198 		td->td_retval[0] = old_it.it_value.tv_sec;
199 	}
200 	return 0;
201 }
202 #endif /*!__alpha__*/
203 
204 int
205 linux_brk(struct thread *td, struct linux_brk_args *args)
206 {
207 	struct vmspace *vm = td->td_proc->p_vmspace;
208 	vm_offset_t new, old;
209 	struct obreak_args /* {
210 		char * nsize;
211 	} */ tmp;
212 
213 #ifdef DEBUG
214 	if (ldebug(brk))
215 		printf(ARGS(brk, "%p"), (void *)args->dsend);
216 #endif
217 	old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize);
218 	new = (vm_offset_t)args->dsend;
219 	tmp.nsize = (char *) new;
220 	if (((caddr_t)new > vm->vm_daddr) && !obreak(td, &tmp))
221 		td->td_retval[0] = (long)new;
222 	else
223 		td->td_retval[0] = (long)old;
224 
225 	return 0;
226 }
227 
228 int
229 linux_uselib(struct thread *td, struct linux_uselib_args *args)
230 {
231 	struct nameidata ni;
232 	struct vnode *vp;
233 	struct exec *a_out;
234 	struct vattr attr;
235 	vm_offset_t vmaddr;
236 	unsigned long file_offset;
237 	vm_offset_t buffer;
238 	unsigned long bss_size;
239 	char *library;
240 	int error;
241 	int locked;
242 
243 	LCONVPATHEXIST(td, args->library, &library);
244 
245 #ifdef DEBUG
246 	if (ldebug(uselib))
247 		printf(ARGS(uselib, "%s"), library);
248 #endif
249 
250 	a_out = NULL;
251 	locked = 0;
252 	vp = NULL;
253 
254 	/*
255 	 * XXX: This code should make use of vn_open(), rather than doing
256 	 * all this stuff itself.
257 	 */
258 	NDINIT(&ni, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, library, td);
259 	error = namei(&ni);
260 	LFREEPATH(library);
261 	if (error)
262 		goto cleanup;
263 
264 	vp = ni.ni_vp;
265 	/*
266 	 * XXX - This looks like a bogus check. A LOCKLEAF namei should not
267 	 * succeed without returning a vnode.
268 	 */
269 	if (vp == NULL) {
270 		error = ENOEXEC;	/* ?? */
271 		goto cleanup;
272 	}
273 	NDFREE(&ni, NDF_ONLY_PNBUF);
274 
275 	/*
276 	 * From here on down, we have a locked vnode that must be unlocked.
277 	 */
278 	locked++;
279 
280 	/* Writable? */
281 	if (vp->v_writecount) {
282 		error = ETXTBSY;
283 		goto cleanup;
284 	}
285 
286 	/* Executable? */
287 	error = VOP_GETATTR(vp, &attr, td->td_ucred, td);
288 	if (error)
289 		goto cleanup;
290 
291 	if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
292 	    ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) {
293 		error = ENOEXEC;
294 		goto cleanup;
295 	}
296 
297 	/* Sensible size? */
298 	if (attr.va_size == 0) {
299 		error = ENOEXEC;
300 		goto cleanup;
301 	}
302 
303 	/* Can we access it? */
304 	error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
305 	if (error)
306 		goto cleanup;
307 
308 	/*
309 	 * XXX: This should use vn_open() so that it is properly authorized,
310 	 * and to reduce code redundancy all over the place here.
311 	 */
312 #ifdef MAC
313 	error = mac_check_vnode_open(td->td_ucred, vp, FREAD);
314 	if (error)
315 		goto cleanup;
316 #endif
317 	error = VOP_OPEN(vp, FREAD, td->td_ucred, td, -1);
318 	if (error)
319 		goto cleanup;
320 
321 	/* Pull in executable header into kernel_map */
322 	error = vm_mmap(kernel_map, (vm_offset_t *)&a_out, PAGE_SIZE,
323 	    VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, 0);
324 	/*
325 	 * Lock no longer needed
326 	 */
327 	locked = 0;
328 	VOP_UNLOCK(vp, 0, td);
329 
330 	if (error)
331 		goto cleanup;
332 
333 	/* Is it a Linux binary ? */
334 	if (((a_out->a_magic >> 16) & 0xff) != 0x64) {
335 		error = ENOEXEC;
336 		goto cleanup;
337 	}
338 
339 	/*
340 	 * While we are here, we should REALLY do some more checks
341 	 */
342 
343 	/* Set file/virtual offset based on a.out variant. */
344 	switch ((int)(a_out->a_magic & 0xffff)) {
345 	case 0413:	/* ZMAGIC */
346 		file_offset = 1024;
347 		break;
348 	case 0314:	/* QMAGIC */
349 		file_offset = 0;
350 		break;
351 	default:
352 		error = ENOEXEC;
353 		goto cleanup;
354 	}
355 
356 	bss_size = round_page(a_out->a_bss);
357 
358 	/* Check various fields in header for validity/bounds. */
359 	if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) {
360 		error = ENOEXEC;
361 		goto cleanup;
362 	}
363 
364 	/* text + data can't exceed file size */
365 	if (a_out->a_data + a_out->a_text > attr.va_size) {
366 		error = EFAULT;
367 		goto cleanup;
368 	}
369 
370 	/* To protect td->td_proc->p_rlimit in the if condition. */
371 	mtx_assert(&Giant, MA_OWNED);
372 
373 	/*
374 	 * text/data/bss must not exceed limits
375 	 * XXX - this is not complete. it should check current usage PLUS
376 	 * the resources needed by this library.
377 	 */
378 	if (a_out->a_text > maxtsiz ||
379 	    a_out->a_data + bss_size >
380 	    td->td_proc->p_rlimit[RLIMIT_DATA].rlim_cur) {
381 		error = ENOMEM;
382 		goto cleanup;
383 	}
384 
385 	mp_fixme("Unlocked vflags access.");
386 	/* prevent more writers */
387 	vp->v_vflag |= VV_TEXT;
388 
389 	/*
390 	 * Check if file_offset page aligned. Currently we cannot handle
391 	 * misalinged file offsets, and so we read in the entire image
392 	 * (what a waste).
393 	 */
394 	if (file_offset & PAGE_MASK) {
395 #ifdef DEBUG
396 		printf("uselib: Non page aligned binary %lu\n", file_offset);
397 #endif
398 		/* Map text+data read/write/execute */
399 
400 		/* a_entry is the load address and is page aligned */
401 		vmaddr = trunc_page(a_out->a_entry);
402 
403 		/* get anon user mapping, read+write+execute */
404 		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
405 		    &vmaddr, a_out->a_text + a_out->a_data, FALSE, VM_PROT_ALL,
406 		    VM_PROT_ALL, 0);
407 		if (error)
408 			goto cleanup;
409 
410 		/* map file into kernel_map */
411 		error = vm_mmap(kernel_map, &buffer,
412 		    round_page(a_out->a_text + a_out->a_data + file_offset),
413 		    VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp,
414 		    trunc_page(file_offset));
415 		if (error)
416 			goto cleanup;
417 
418 		/* copy from kernel VM space to user space */
419 		error = copyout((void *)(uintptr_t)(buffer + file_offset),
420 		    (void *)vmaddr, a_out->a_text + a_out->a_data);
421 
422 		/* release temporary kernel space */
423 		vm_map_remove(kernel_map, buffer, buffer +
424 		    round_page(a_out->a_text + a_out->a_data + file_offset));
425 
426 		if (error)
427 			goto cleanup;
428 	} else {
429 #ifdef DEBUG
430 		printf("uselib: Page aligned binary %lu\n", file_offset);
431 #endif
432 		/*
433 		 * for QMAGIC, a_entry is 20 bytes beyond the load address
434 		 * to skip the executable header
435 		 */
436 		vmaddr = trunc_page(a_out->a_entry);
437 
438 		/*
439 		 * Map it all into the process's space as a single
440 		 * copy-on-write "data" segment.
441 		 */
442 		error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr,
443 		    a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL,
444 		    MAP_PRIVATE | MAP_FIXED, (caddr_t)vp, file_offset);
445 		if (error)
446 			goto cleanup;
447 	}
448 #ifdef DEBUG
449 	printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long*)vmaddr)[0],
450 	    ((long*)vmaddr)[1]);
451 #endif
452 	if (bss_size != 0) {
453 		/* Calculate BSS start address */
454 		vmaddr = trunc_page(a_out->a_entry) + a_out->a_text +
455 		    a_out->a_data;
456 
457 		/* allocate some 'anon' space */
458 		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
459 		    &vmaddr, bss_size, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0);
460 		if (error)
461 			goto cleanup;
462 	}
463 
464 cleanup:
465 	/* Unlock vnode if needed */
466 	if (locked)
467 		VOP_UNLOCK(vp, 0, td);
468 
469 	/* Release the kernel mapping. */
470 	if (a_out)
471 		vm_map_remove(kernel_map, (vm_offset_t)a_out,
472 		    (vm_offset_t)a_out + PAGE_SIZE);
473 
474 	return error;
475 }
476 
477 int
478 linux_select(struct thread *td, struct linux_select_args *args)
479 {
480 	struct timeval tv0, tv1, utv, *tvp;
481 	int error;
482 
483 #ifdef DEBUG
484 	if (ldebug(select))
485 		printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds,
486 		    (void *)args->readfds, (void *)args->writefds,
487 		    (void *)args->exceptfds, (void *)args->timeout);
488 #endif
489 
490 	/*
491 	 * Store current time for computation of the amount of
492 	 * time left.
493 	 */
494 	if (args->timeout) {
495 		if ((error = copyin(args->timeout, &utv, sizeof(utv))))
496 			goto select_out;
497 #ifdef DEBUG
498 		if (ldebug(select))
499 			printf(LMSG("incoming timeout (%ld/%ld)"),
500 			    utv.tv_sec, utv.tv_usec);
501 #endif
502 
503 		if (itimerfix(&utv)) {
504 			/*
505 			 * The timeval was invalid.  Convert it to something
506 			 * valid that will act as it does under Linux.
507 			 */
508 			utv.tv_sec += utv.tv_usec / 1000000;
509 			utv.tv_usec %= 1000000;
510 			if (utv.tv_usec < 0) {
511 				utv.tv_sec -= 1;
512 				utv.tv_usec += 1000000;
513 			}
514 			if (utv.tv_sec < 0)
515 				timevalclear(&utv);
516 		}
517 		microtime(&tv0);
518 		tvp = &utv;
519 	} else
520 		tvp = NULL;
521 
522 	error = kern_select(td, args->nfds, args->readfds, args->writefds,
523 	    args->exceptfds, tvp);
524 
525 #ifdef DEBUG
526 	if (ldebug(select))
527 		printf(LMSG("real select returns %d"), error);
528 #endif
529 	if (error) {
530 		/*
531 		 * See fs/select.c in the Linux kernel.  Without this,
532 		 * Maelstrom doesn't work.
533 		 */
534 		if (error == ERESTART)
535 			error = EINTR;
536 		goto select_out;
537 	}
538 
539 	if (args->timeout) {
540 		if (td->td_retval[0]) {
541 			/*
542 			 * Compute how much time was left of the timeout,
543 			 * by subtracting the current time and the time
544 			 * before we started the call, and subtracting
545 			 * that result from the user-supplied value.
546 			 */
547 			microtime(&tv1);
548 			timevalsub(&tv1, &tv0);
549 			timevalsub(&utv, &tv1);
550 			if (utv.tv_sec < 0)
551 				timevalclear(&utv);
552 		} else
553 			timevalclear(&utv);
554 #ifdef DEBUG
555 		if (ldebug(select))
556 			printf(LMSG("outgoing timeout (%ld/%ld)"),
557 			    utv.tv_sec, utv.tv_usec);
558 #endif
559 		if ((error = copyout(&utv, args->timeout, sizeof(utv))))
560 			goto select_out;
561 	}
562 
563 select_out:
564 #ifdef DEBUG
565 	if (ldebug(select))
566 		printf(LMSG("select_out -> %d"), error);
567 #endif
568 	return error;
569 }
570 
571 int
572 linux_mremap(struct thread *td, struct linux_mremap_args *args)
573 {
574 	struct munmap_args /* {
575 		void *addr;
576 		size_t len;
577 	} */ bsd_args;
578 	int error = 0;
579 
580 #ifdef DEBUG
581 	if (ldebug(mremap))
582 		printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"),
583 		    (void *)args->addr,
584 		    (unsigned long)args->old_len,
585 		    (unsigned long)args->new_len,
586 		    (unsigned long)args->flags);
587 #endif
588 	args->new_len = round_page(args->new_len);
589 	args->old_len = round_page(args->old_len);
590 
591 	if (args->new_len > args->old_len) {
592 		td->td_retval[0] = 0;
593 		return ENOMEM;
594 	}
595 
596 	if (args->new_len < args->old_len) {
597 		bsd_args.addr = (caddr_t)(args->addr + args->new_len);
598 		bsd_args.len = args->old_len - args->new_len;
599 		error = munmap(td, &bsd_args);
600 	}
601 
602 	td->td_retval[0] = error ? 0 : (uintptr_t)args->addr;
603 	return error;
604 }
605 
606 #define LINUX_MS_ASYNC       0x0001
607 #define LINUX_MS_INVALIDATE  0x0002
608 #define LINUX_MS_SYNC        0x0004
609 
610 int
611 linux_msync(struct thread *td, struct linux_msync_args *args)
612 {
613 	struct msync_args bsd_args;
614 
615 	bsd_args.addr = (caddr_t)args->addr;
616 	bsd_args.len = args->len;
617 	bsd_args.flags = args->fl & ~LINUX_MS_SYNC;
618 
619 	return msync(td, &bsd_args);
620 }
621 
622 #ifndef __alpha__
623 int
624 linux_time(struct thread *td, struct linux_time_args *args)
625 {
626 	struct timeval tv;
627 	l_time_t tm;
628 	int error;
629 
630 #ifdef DEBUG
631 	if (ldebug(time))
632 		printf(ARGS(time, "*"));
633 #endif
634 
635 	microtime(&tv);
636 	tm = tv.tv_sec;
637 	if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm))))
638 		return error;
639 	td->td_retval[0] = tm;
640 	return 0;
641 }
642 #endif	/*!__alpha__*/
643 
644 struct l_times_argv {
645 	l_long		tms_utime;
646 	l_long		tms_stime;
647 	l_long		tms_cutime;
648 	l_long		tms_cstime;
649 };
650 
651 #ifdef __alpha__
652 #define CLK_TCK 1024	/* Linux uses 1024 on alpha */
653 #else
654 #define CLK_TCK 100	/* Linux uses 100 */
655 #endif
656 
657 #define CONVTCK(r)	(r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK))
658 
659 int
660 linux_times(struct thread *td, struct linux_times_args *args)
661 {
662 	struct timeval tv;
663 	struct l_times_argv tms;
664 	struct rusage ru;
665 	int error;
666 
667 #ifdef DEBUG
668 	if (ldebug(times))
669 		printf(ARGS(times, "*"));
670 #endif
671 
672 	mtx_lock_spin(&sched_lock);
673 	calcru(td->td_proc, &ru.ru_utime, &ru.ru_stime, NULL);
674 	mtx_unlock_spin(&sched_lock);
675 
676 	tms.tms_utime = CONVTCK(ru.ru_utime);
677 	tms.tms_stime = CONVTCK(ru.ru_stime);
678 
679 	tms.tms_cutime = CONVTCK(td->td_proc->p_stats->p_cru.ru_utime);
680 	tms.tms_cstime = CONVTCK(td->td_proc->p_stats->p_cru.ru_stime);
681 
682 	if ((error = copyout(&tms, args->buf, sizeof(tms))))
683 		return error;
684 
685 	microuptime(&tv);
686 	td->td_retval[0] = (int)CONVTCK(tv);
687 	return 0;
688 }
689 
690 int
691 linux_newuname(struct thread *td, struct linux_newuname_args *args)
692 {
693 	struct l_new_utsname utsname;
694 	char osname[LINUX_MAX_UTSNAME];
695 	char osrelease[LINUX_MAX_UTSNAME];
696 	char *p;
697 
698 #ifdef DEBUG
699 	if (ldebug(newuname))
700 		printf(ARGS(newuname, "*"));
701 #endif
702 
703 	linux_get_osname(td, osname);
704 	linux_get_osrelease(td, osrelease);
705 
706 	bzero(&utsname, sizeof(utsname));
707 	strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME);
708 	getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME);
709 	strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME);
710 	strlcpy(utsname.version, version, LINUX_MAX_UTSNAME);
711 	for (p = utsname.version; *p != '\0'; ++p)
712 		if (*p == '\n') {
713 			*p = '\0';
714 			break;
715 		}
716 #ifdef __i386__
717 	{
718 		const char *class;
719 		switch (cpu_class) {
720 		case CPUCLASS_686:
721 			class = "i686";
722 			break;
723 		case CPUCLASS_586:
724 			class = "i586";
725 			break;
726 		case CPUCLASS_486:
727 			class = "i486";
728 			break;
729 		default:
730 			class = "i386";
731 		}
732 		strlcpy(utsname.machine, class, LINUX_MAX_UTSNAME);
733 	}
734 #else
735 	strlcpy(utsname.machine, machine, LINUX_MAX_UTSNAME);
736 #endif
737 	strlcpy(utsname.domainname, domainname, LINUX_MAX_UTSNAME);
738 
739 	return (copyout(&utsname, args->buf, sizeof(utsname)));
740 }
741 
742 #if defined(__i386__)
743 struct l_utimbuf {
744 	l_time_t l_actime;
745 	l_time_t l_modtime;
746 };
747 
748 int
749 linux_utime(struct thread *td, struct linux_utime_args *args)
750 {
751 	struct timeval tv[2], *tvp;
752 	struct l_utimbuf lut;
753 	char *fname;
754 	int error;
755 
756 	LCONVPATHEXIST(td, args->fname, &fname);
757 
758 #ifdef DEBUG
759 	if (ldebug(utime))
760 		printf(ARGS(utime, "%s, *"), fname);
761 #endif
762 
763 	if (args->times) {
764 		if ((error = copyin(args->times, &lut, sizeof lut))) {
765 			LFREEPATH(fname);
766 			return error;
767 		}
768 		tv[0].tv_sec = lut.l_actime;
769 		tv[0].tv_usec = 0;
770 		tv[1].tv_sec = lut.l_modtime;
771 		tv[1].tv_usec = 0;
772 		tvp = tv;
773 	} else
774 		tvp = NULL;
775 
776 	error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE);
777 	LFREEPATH(fname);
778 	return (error);
779 }
780 #endif /* __i386__ */
781 
782 #define __WCLONE 0x80000000
783 
784 #ifndef __alpha__
785 int
786 linux_waitpid(struct thread *td, struct linux_waitpid_args *args)
787 {
788 	struct wait_args /* {
789 		int pid;
790 		int *status;
791 		int options;
792 		struct	rusage *rusage;
793 	} */ tmp;
794 	int error, tmpstat;
795 
796 #ifdef DEBUG
797 	if (ldebug(waitpid))
798 		printf(ARGS(waitpid, "%d, %p, %d"),
799 		    args->pid, (void *)args->status, args->options);
800 #endif
801 
802 	tmp.pid = args->pid;
803 	tmp.status = args->status;
804 	tmp.options = (args->options & (WNOHANG | WUNTRACED));
805 	/* WLINUXCLONE should be equal to __WCLONE, but we make sure */
806 	if (args->options & __WCLONE)
807 		tmp.options |= WLINUXCLONE;
808 	tmp.rusage = NULL;
809 
810 	if ((error = wait4(td, &tmp)) != 0)
811 		return error;
812 
813 	if (args->status) {
814 		if ((error = copyin(args->status, &tmpstat, sizeof(int))) != 0)
815 			return error;
816 		tmpstat &= 0xffff;
817 		if (WIFSIGNALED(tmpstat))
818 			tmpstat = (tmpstat & 0xffffff80) |
819 			    BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat));
820 		else if (WIFSTOPPED(tmpstat))
821 			tmpstat = (tmpstat & 0xffff00ff) |
822 			    (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8);
823 		return copyout(&tmpstat, args->status, sizeof(int));
824 	}
825 
826 	return 0;
827 }
828 #endif	/*!__alpha__*/
829 
830 int
831 linux_wait4(struct thread *td, struct linux_wait4_args *args)
832 {
833 	struct wait_args /* {
834 		int pid;
835 		int *status;
836 		int options;
837 		struct	rusage *rusage;
838 	} */ tmp;
839 	int error, tmpstat;
840 	struct proc *p;
841 
842 #ifdef DEBUG
843 	if (ldebug(wait4))
844 		printf(ARGS(wait4, "%d, %p, %d, %p"),
845 		    args->pid, (void *)args->status, args->options,
846 		    (void *)args->rusage);
847 #endif
848 
849 	tmp.pid = args->pid;
850 	tmp.status = args->status;
851 	tmp.options = (args->options & (WNOHANG | WUNTRACED));
852 	/* WLINUXCLONE should be equal to __WCLONE, but we make sure */
853 	if (args->options & __WCLONE)
854 		tmp.options |= WLINUXCLONE;
855 	tmp.rusage = (struct rusage *)args->rusage;
856 
857 	if ((error = wait4(td, &tmp)) != 0)
858 		return error;
859 
860 	p = td->td_proc;
861 	PROC_LOCK(p);
862 	SIGDELSET(p->p_siglist, SIGCHLD);
863 	PROC_UNLOCK(p);
864 
865 	if (args->status) {
866 		if ((error = copyin(args->status, &tmpstat, sizeof(int))) != 0)
867 			return error;
868 		tmpstat &= 0xffff;
869 		if (WIFSIGNALED(tmpstat))
870 			tmpstat = (tmpstat & 0xffffff80) |
871 			    BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat));
872 		else if (WIFSTOPPED(tmpstat))
873 			tmpstat = (tmpstat & 0xffff00ff) |
874 			    (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8);
875 		return copyout(&tmpstat, args->status, sizeof(int));
876 	}
877 
878 	return 0;
879 }
880 
881 int
882 linux_mknod(struct thread *td, struct linux_mknod_args *args)
883 {
884 	char *path;
885 	int error;
886 
887 	LCONVPATHCREAT(td, args->path, &path);
888 
889 #ifdef DEBUG
890 	if (ldebug(mknod))
891 		printf(ARGS(mknod, "%s, %d, %d"), path, args->mode, args->dev);
892 #endif
893 
894 	if (args->mode & S_IFIFO)
895 		error = kern_mkfifo(td, path, UIO_SYSSPACE, args->mode);
896 	else
897 		error = kern_mknod(td, path, UIO_SYSSPACE, args->mode,
898 		    args->dev);
899 	LFREEPATH(path);
900 	return (error);
901 }
902 
903 /*
904  * UGH! This is just about the dumbest idea I've ever heard!!
905  */
906 int
907 linux_personality(struct thread *td, struct linux_personality_args *args)
908 {
909 #ifdef DEBUG
910 	if (ldebug(personality))
911 		printf(ARGS(personality, "%lu"), (unsigned long)args->per);
912 #endif
913 #ifndef __alpha__
914 	if (args->per != 0)
915 		return EINVAL;
916 #endif
917 
918 	/* Yes Jim, it's still a Linux... */
919 	td->td_retval[0] = 0;
920 	return 0;
921 }
922 
923 /*
924  * Wrappers for get/setitimer for debugging..
925  */
926 int
927 linux_setitimer(struct thread *td, struct linux_setitimer_args *args)
928 {
929 	struct setitimer_args bsa;
930 	struct itimerval foo;
931 	int error;
932 
933 #ifdef DEBUG
934 	if (ldebug(setitimer))
935 		printf(ARGS(setitimer, "%p, %p"),
936 		    (void *)args->itv, (void *)args->oitv);
937 #endif
938 	bsa.which = args->which;
939 	bsa.itv = (struct itimerval *)args->itv;
940 	bsa.oitv = (struct itimerval *)args->oitv;
941 	if (args->itv) {
942 	    if ((error = copyin(args->itv, &foo, sizeof(foo))))
943 		return error;
944 #ifdef DEBUG
945 	    if (ldebug(setitimer)) {
946 		printf("setitimer: value: sec: %ld, usec: %ld\n",
947 		    foo.it_value.tv_sec, foo.it_value.tv_usec);
948 		printf("setitimer: interval: sec: %ld, usec: %ld\n",
949 		    foo.it_interval.tv_sec, foo.it_interval.tv_usec);
950 	    }
951 #endif
952 	}
953 	return setitimer(td, &bsa);
954 }
955 
956 int
957 linux_getitimer(struct thread *td, struct linux_getitimer_args *args)
958 {
959 	struct getitimer_args bsa;
960 #ifdef DEBUG
961 	if (ldebug(getitimer))
962 		printf(ARGS(getitimer, "%p"), (void *)args->itv);
963 #endif
964 	bsa.which = args->which;
965 	bsa.itv = (struct itimerval *)args->itv;
966 	return getitimer(td, &bsa);
967 }
968 
969 #ifndef __alpha__
970 int
971 linux_nice(struct thread *td, struct linux_nice_args *args)
972 {
973 	struct setpriority_args	bsd_args;
974 
975 	bsd_args.which = PRIO_PROCESS;
976 	bsd_args.who = 0;	/* current process */
977 	bsd_args.prio = args->inc;
978 	return setpriority(td, &bsd_args);
979 }
980 #endif	/*!__alpha__*/
981 
982 int
983 linux_setgroups(struct thread *td, struct linux_setgroups_args *args)
984 {
985 	struct ucred *newcred, *oldcred;
986 	l_gid_t linux_gidset[NGROUPS];
987 	gid_t *bsd_gidset;
988 	int ngrp, error;
989 	struct proc *p;
990 
991 	ngrp = args->gidsetsize;
992 	if (ngrp >= NGROUPS)
993 		return (EINVAL);
994 	error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t));
995 	if (error)
996 		return (error);
997 	newcred = crget();
998 	p = td->td_proc;
999 	PROC_LOCK(p);
1000 	oldcred = p->p_ucred;
1001 
1002 	/*
1003 	 * cr_groups[0] holds egid. Setting the whole set from
1004 	 * the supplied set will cause egid to be changed too.
1005 	 * Keep cr_groups[0] unchanged to prevent that.
1006 	 */
1007 
1008 	if ((error = suser_cred(oldcred, PRISON_ROOT)) != 0) {
1009 		PROC_UNLOCK(p);
1010 		crfree(newcred);
1011 		return (error);
1012 	}
1013 
1014 	crcopy(newcred, oldcred);
1015 	if (ngrp > 0) {
1016 		newcred->cr_ngroups = ngrp + 1;
1017 
1018 		bsd_gidset = newcred->cr_groups;
1019 		ngrp--;
1020 		while (ngrp >= 0) {
1021 			bsd_gidset[ngrp + 1] = linux_gidset[ngrp];
1022 			ngrp--;
1023 		}
1024 	}
1025 	else
1026 		newcred->cr_ngroups = 1;
1027 
1028 	setsugid(p);
1029 	p->p_ucred = newcred;
1030 	PROC_UNLOCK(p);
1031 	crfree(oldcred);
1032 	return (0);
1033 }
1034 
1035 int
1036 linux_getgroups(struct thread *td, struct linux_getgroups_args *args)
1037 {
1038 	struct ucred *cred;
1039 	l_gid_t linux_gidset[NGROUPS];
1040 	gid_t *bsd_gidset;
1041 	int bsd_gidsetsz, ngrp, error;
1042 
1043 	cred = td->td_ucred;
1044 	bsd_gidset = cred->cr_groups;
1045 	bsd_gidsetsz = cred->cr_ngroups - 1;
1046 
1047 	/*
1048 	 * cr_groups[0] holds egid. Returning the whole set
1049 	 * here will cause a duplicate. Exclude cr_groups[0]
1050 	 * to prevent that.
1051 	 */
1052 
1053 	if ((ngrp = args->gidsetsize) == 0) {
1054 		td->td_retval[0] = bsd_gidsetsz;
1055 		return (0);
1056 	}
1057 
1058 	if (ngrp < bsd_gidsetsz)
1059 		return (EINVAL);
1060 
1061 	ngrp = 0;
1062 	while (ngrp < bsd_gidsetsz) {
1063 		linux_gidset[ngrp] = bsd_gidset[ngrp + 1];
1064 		ngrp++;
1065 	}
1066 
1067 	if ((error = copyout(linux_gidset, args->grouplist,
1068 	    ngrp * sizeof(l_gid_t))))
1069 		return (error);
1070 
1071 	td->td_retval[0] = ngrp;
1072 	return (0);
1073 }
1074 
1075 #ifndef __alpha__
1076 int
1077 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args)
1078 {
1079 	struct rlimit bsd_rlim;
1080 	struct l_rlimit rlim;
1081 	u_int which;
1082 	int error;
1083 
1084 #ifdef DEBUG
1085 	if (ldebug(setrlimit))
1086 		printf(ARGS(setrlimit, "%d, %p"),
1087 		    args->resource, (void *)args->rlim);
1088 #endif
1089 
1090 	if (args->resource >= LINUX_RLIM_NLIMITS)
1091 		return (EINVAL);
1092 
1093 	which = linux_to_bsd_resource[args->resource];
1094 	if (which == -1)
1095 		return (EINVAL);
1096 
1097 	error = copyin(args->rlim, &rlim, sizeof(rlim));
1098 	if (error)
1099 		return (error);
1100 
1101 	bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur;
1102 	bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max;
1103 	return (dosetrlimit(td, which, &bsd_rlim));
1104 }
1105 
1106 int
1107 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args)
1108 {
1109 	struct l_rlimit rlim;
1110 	struct proc *p = td->td_proc;
1111 	struct rlimit *bsd_rlp;
1112 	u_int which;
1113 
1114 #ifdef DEBUG
1115 	if (ldebug(old_getrlimit))
1116 		printf(ARGS(old_getrlimit, "%d, %p"),
1117 		    args->resource, (void *)args->rlim);
1118 #endif
1119 
1120 	if (args->resource >= LINUX_RLIM_NLIMITS)
1121 		return (EINVAL);
1122 
1123 	which = linux_to_bsd_resource[args->resource];
1124 	if (which == -1)
1125 		return (EINVAL);
1126 	bsd_rlp = &p->p_rlimit[which];
1127 
1128 	rlim.rlim_cur = (unsigned long)bsd_rlp->rlim_cur;
1129 	if (rlim.rlim_cur == ULONG_MAX)
1130 		rlim.rlim_cur = LONG_MAX;
1131 	rlim.rlim_max = (unsigned long)bsd_rlp->rlim_max;
1132 	if (rlim.rlim_max == ULONG_MAX)
1133 		rlim.rlim_max = LONG_MAX;
1134 	return (copyout(&rlim, args->rlim, sizeof(rlim)));
1135 }
1136 
1137 int
1138 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args)
1139 {
1140 	struct l_rlimit rlim;
1141 	struct proc *p = td->td_proc;
1142 	struct rlimit *bsd_rlp;
1143 	u_int which;
1144 
1145 #ifdef DEBUG
1146 	if (ldebug(getrlimit))
1147 		printf(ARGS(getrlimit, "%d, %p"),
1148 		    args->resource, (void *)args->rlim);
1149 #endif
1150 
1151 	if (args->resource >= LINUX_RLIM_NLIMITS)
1152 		return (EINVAL);
1153 
1154 	which = linux_to_bsd_resource[args->resource];
1155 	if (which == -1)
1156 		return (EINVAL);
1157 	bsd_rlp = &p->p_rlimit[which];
1158 
1159 	rlim.rlim_cur = (l_ulong)bsd_rlp->rlim_cur;
1160 	rlim.rlim_max = (l_ulong)bsd_rlp->rlim_max;
1161 	return (copyout(&rlim, args->rlim, sizeof(rlim)));
1162 }
1163 #endif /*!__alpha__*/
1164 
1165 int
1166 linux_sched_setscheduler(struct thread *td,
1167     struct linux_sched_setscheduler_args *args)
1168 {
1169 	struct sched_setscheduler_args bsd;
1170 
1171 #ifdef DEBUG
1172 	if (ldebug(sched_setscheduler))
1173 		printf(ARGS(sched_setscheduler, "%d, %d, %p"),
1174 		    args->pid, args->policy, (const void *)args->param);
1175 #endif
1176 
1177 	switch (args->policy) {
1178 	case LINUX_SCHED_OTHER:
1179 		bsd.policy = SCHED_OTHER;
1180 		break;
1181 	case LINUX_SCHED_FIFO:
1182 		bsd.policy = SCHED_FIFO;
1183 		break;
1184 	case LINUX_SCHED_RR:
1185 		bsd.policy = SCHED_RR;
1186 		break;
1187 	default:
1188 		return EINVAL;
1189 	}
1190 
1191 	bsd.pid = args->pid;
1192 	bsd.param = (struct sched_param *)args->param;
1193 	return sched_setscheduler(td, &bsd);
1194 }
1195 
1196 int
1197 linux_sched_getscheduler(struct thread *td,
1198     struct linux_sched_getscheduler_args *args)
1199 {
1200 	struct sched_getscheduler_args bsd;
1201 	int error;
1202 
1203 #ifdef DEBUG
1204 	if (ldebug(sched_getscheduler))
1205 		printf(ARGS(sched_getscheduler, "%d"), args->pid);
1206 #endif
1207 
1208 	bsd.pid = args->pid;
1209 	error = sched_getscheduler(td, &bsd);
1210 
1211 	switch (td->td_retval[0]) {
1212 	case SCHED_OTHER:
1213 		td->td_retval[0] = LINUX_SCHED_OTHER;
1214 		break;
1215 	case SCHED_FIFO:
1216 		td->td_retval[0] = LINUX_SCHED_FIFO;
1217 		break;
1218 	case SCHED_RR:
1219 		td->td_retval[0] = LINUX_SCHED_RR;
1220 		break;
1221 	}
1222 
1223 	return error;
1224 }
1225 
1226 int
1227 linux_sched_get_priority_max(struct thread *td,
1228     struct linux_sched_get_priority_max_args *args)
1229 {
1230 	struct sched_get_priority_max_args bsd;
1231 
1232 #ifdef DEBUG
1233 	if (ldebug(sched_get_priority_max))
1234 		printf(ARGS(sched_get_priority_max, "%d"), args->policy);
1235 #endif
1236 
1237 	switch (args->policy) {
1238 	case LINUX_SCHED_OTHER:
1239 		bsd.policy = SCHED_OTHER;
1240 		break;
1241 	case LINUX_SCHED_FIFO:
1242 		bsd.policy = SCHED_FIFO;
1243 		break;
1244 	case LINUX_SCHED_RR:
1245 		bsd.policy = SCHED_RR;
1246 		break;
1247 	default:
1248 		return EINVAL;
1249 	}
1250 	return sched_get_priority_max(td, &bsd);
1251 }
1252 
1253 int
1254 linux_sched_get_priority_min(struct thread *td,
1255     struct linux_sched_get_priority_min_args *args)
1256 {
1257 	struct sched_get_priority_min_args bsd;
1258 
1259 #ifdef DEBUG
1260 	if (ldebug(sched_get_priority_min))
1261 		printf(ARGS(sched_get_priority_min, "%d"), args->policy);
1262 #endif
1263 
1264 	switch (args->policy) {
1265 	case LINUX_SCHED_OTHER:
1266 		bsd.policy = SCHED_OTHER;
1267 		break;
1268 	case LINUX_SCHED_FIFO:
1269 		bsd.policy = SCHED_FIFO;
1270 		break;
1271 	case LINUX_SCHED_RR:
1272 		bsd.policy = SCHED_RR;
1273 		break;
1274 	default:
1275 		return EINVAL;
1276 	}
1277 	return sched_get_priority_min(td, &bsd);
1278 }
1279 
1280 #define REBOOT_CAD_ON	0x89abcdef
1281 #define REBOOT_CAD_OFF	0
1282 #define REBOOT_HALT	0xcdef0123
1283 
1284 int
1285 linux_reboot(struct thread *td, struct linux_reboot_args *args)
1286 {
1287 	struct reboot_args bsd_args;
1288 
1289 #ifdef DEBUG
1290 	if (ldebug(reboot))
1291 		printf(ARGS(reboot, "0x%x"), args->cmd);
1292 #endif
1293 	if (args->cmd == REBOOT_CAD_ON || args->cmd == REBOOT_CAD_OFF)
1294 		return (0);
1295 	bsd_args.opt = (args->cmd == REBOOT_HALT) ? RB_HALT : 0;
1296 	return (reboot(td, &bsd_args));
1297 }
1298 
1299 #ifndef __alpha__
1300 
1301 /*
1302  * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify
1303  * td->td_retval[1] when COMPAT_43 or COMPAT_SUNOS is defined. This
1304  * globbers registers that are assumed to be preserved. The following
1305  * lightweight syscalls fixes this. See also linux_getgid16() and
1306  * linux_getuid16() in linux_uid16.c.
1307  *
1308  * linux_getpid() - MP SAFE
1309  * linux_getgid() - MP SAFE
1310  * linux_getuid() - MP SAFE
1311  */
1312 
1313 int
1314 linux_getpid(struct thread *td, struct linux_getpid_args *args)
1315 {
1316 
1317 	td->td_retval[0] = td->td_proc->p_pid;
1318 	return (0);
1319 }
1320 
1321 int
1322 linux_getgid(struct thread *td, struct linux_getgid_args *args)
1323 {
1324 
1325 	td->td_retval[0] = td->td_ucred->cr_rgid;
1326 	return (0);
1327 }
1328 
1329 int
1330 linux_getuid(struct thread *td, struct linux_getuid_args *args)
1331 {
1332 
1333 	td->td_retval[0] = td->td_ucred->cr_ruid;
1334 	return (0);
1335 }
1336 
1337 #endif /*!__alpha__*/
1338 
1339 int
1340 linux_getsid(struct thread *td, struct linux_getsid_args *args)
1341 {
1342 	struct getsid_args bsd;
1343 	bsd.pid = args->pid;
1344 	return getsid(td, &bsd);
1345 }
1346