xref: /freebsd/sys/compat/linux/linux_misc.c (revision c98323078dede7579020518ec84cdcb478e5c142)
1 /*-
2  * Copyright (c) 1994-1995 S�ren Schmidt
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer
10  *    in this position and unchanged.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_mac.h"
33 
34 #include <sys/param.h>
35 #include <sys/blist.h>
36 #include <sys/fcntl.h>
37 #include <sys/imgact_aout.h>
38 #include <sys/jail.h>
39 #include <sys/kernel.h>
40 #include <sys/limits.h>
41 #include <sys/lock.h>
42 #include <sys/mac.h>
43 #include <sys/malloc.h>
44 #include <sys/mman.h>
45 #include <sys/mount.h>
46 #include <sys/mutex.h>
47 #include <sys/namei.h>
48 #include <sys/proc.h>
49 #include <sys/reboot.h>
50 #include <sys/resourcevar.h>
51 #include <sys/signalvar.h>
52 #include <sys/stat.h>
53 #include <sys/syscallsubr.h>
54 #include <sys/sysctl.h>
55 #include <sys/sysproto.h>
56 #include <sys/systm.h>
57 #include <sys/time.h>
58 #include <sys/vmmeter.h>
59 #include <sys/vnode.h>
60 #include <sys/wait.h>
61 
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_map.h>
66 #include <vm/vm_extern.h>
67 #include <vm/vm_object.h>
68 #include <vm/swap_pager.h>
69 
70 #include <posix4/sched.h>
71 
72 #include <machine/../linux/linux.h>
73 #include <machine/../linux/linux_proto.h>
74 
75 #include <compat/linux/linux_mib.h>
76 #include <compat/linux/linux_util.h>
77 
78 #ifdef __i386__
79 #include <machine/cputypes.h>
80 #endif
81 
82 #ifdef __alpha__
83 #define BSD_TO_LINUX_SIGNAL(sig)       (sig)
84 #else
85 #define BSD_TO_LINUX_SIGNAL(sig)	\
86 	(((sig) <= LINUX_SIGTBLSZ) ? bsd_to_linux_signal[_SIG_IDX(sig)] : sig)
87 #endif
88 
89 #ifndef __alpha__
90 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = {
91 	RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK,
92 	RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE,
93 	RLIMIT_MEMLOCK, -1
94 };
95 #endif /*!__alpha__*/
96 
97 struct l_sysinfo {
98 	l_long		uptime;		/* Seconds since boot */
99 	l_ulong		loads[3];	/* 1, 5, and 15 minute load averages */
100 #define LINUX_SYSINFO_LOADS_SCALE 65536
101 	l_ulong		totalram;	/* Total usable main memory size */
102 	l_ulong		freeram;	/* Available memory size */
103 	l_ulong		sharedram;	/* Amount of shared memory */
104 	l_ulong		bufferram;	/* Memory used by buffers */
105 	l_ulong		totalswap;	/* Total swap space size */
106 	l_ulong		freeswap;	/* swap space still available */
107 	l_ushort	procs;		/* Number of current processes */
108 	l_ulong		totalbig;
109 	l_ulong		freebig;
110 	l_uint		mem_unit;
111 	char		_f[6];		/* Pads structure to 64 bytes */
112 };
113 #ifndef __alpha__
114 int
115 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args)
116 {
117 	struct l_sysinfo sysinfo;
118 	vm_object_t object;
119 	int i, j;
120 	struct timespec ts;
121 
122 	/* Uptime is copied out of print_uptime() in kern_shutdown.c */
123 	getnanouptime(&ts);
124 	i = 0;
125 	if (ts.tv_sec >= 86400) {
126 		ts.tv_sec %= 86400;
127 		i = 1;
128 	}
129 	if (i || ts.tv_sec >= 3600) {
130 		ts.tv_sec %= 3600;
131 		i = 1;
132 	}
133 	if (i || ts.tv_sec >= 60) {
134 		ts.tv_sec %= 60;
135 		i = 1;
136 	}
137 	sysinfo.uptime=ts.tv_sec;
138 
139 	/* Use the information from the mib to get our load averages */
140 	for (i = 0; i < 3; i++)
141 		sysinfo.loads[i] = averunnable.ldavg[i] *
142 		    LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale;
143 
144 	sysinfo.totalram = physmem * PAGE_SIZE;
145 	sysinfo.freeram = sysinfo.totalram - cnt.v_wire_count * PAGE_SIZE;
146 
147 	sysinfo.sharedram = 0;
148 	mtx_lock(&vm_object_list_mtx);
149 	TAILQ_FOREACH(object, &vm_object_list, object_list)
150 		if (object->shadow_count > 1)
151 			sysinfo.sharedram += object->resident_page_count;
152 	mtx_unlock(&vm_object_list_mtx);
153 
154 	sysinfo.sharedram *= PAGE_SIZE;
155 	sysinfo.bufferram = 0;
156 
157 	swap_pager_status(&i, &j);
158 	sysinfo.totalswap= i * PAGE_SIZE;
159 	sysinfo.freeswap = (i - j) * PAGE_SIZE;
160 
161 	sysinfo.procs = nprocs;
162 
163 	/* The following are only present in newer Linux kernels. */
164 	sysinfo.totalbig = 0;
165 	sysinfo.freebig = 0;
166 	sysinfo.mem_unit = 1;
167 
168 	return copyout(&sysinfo, args->info, sizeof(sysinfo));
169 }
170 #endif /*!__alpha__*/
171 
172 #ifndef __alpha__
173 int
174 linux_alarm(struct thread *td, struct linux_alarm_args *args)
175 {
176 	struct itimerval it, old_it;
177 	struct timeval tv;
178 	struct proc *p;
179 
180 #ifdef DEBUG
181 	if (ldebug(alarm))
182 		printf(ARGS(alarm, "%u"), args->secs);
183 #endif
184 
185 	if (args->secs > 100000000)
186 		return EINVAL;
187 
188 	it.it_value.tv_sec = (long)args->secs;
189 	it.it_value.tv_usec = 0;
190 	it.it_interval.tv_sec = 0;
191 	it.it_interval.tv_usec = 0;
192 	p = td->td_proc;
193 	PROC_LOCK(p);
194 	old_it = p->p_realtimer;
195 	getmicrouptime(&tv);
196 	if (timevalisset(&old_it.it_value))
197 		callout_stop(&p->p_itcallout);
198 	if (it.it_value.tv_sec != 0) {
199 		callout_reset(&p->p_itcallout, tvtohz(&it.it_value),
200 		    realitexpire, p);
201 		timevaladd(&it.it_value, &tv);
202 	}
203 	p->p_realtimer = it;
204 	PROC_UNLOCK(p);
205 	if (timevalcmp(&old_it.it_value, &tv, >)) {
206 		timevalsub(&old_it.it_value, &tv);
207 		if (old_it.it_value.tv_usec != 0)
208 			old_it.it_value.tv_sec++;
209 		td->td_retval[0] = old_it.it_value.tv_sec;
210 	}
211 	return 0;
212 }
213 #endif /*!__alpha__*/
214 
215 int
216 linux_brk(struct thread *td, struct linux_brk_args *args)
217 {
218 	struct vmspace *vm = td->td_proc->p_vmspace;
219 	vm_offset_t new, old;
220 	struct obreak_args /* {
221 		char * nsize;
222 	} */ tmp;
223 
224 #ifdef DEBUG
225 	if (ldebug(brk))
226 		printf(ARGS(brk, "%p"), (void *)args->dsend);
227 #endif
228 	old = (vm_offset_t)vm->vm_daddr + ctob(vm->vm_dsize);
229 	new = (vm_offset_t)args->dsend;
230 	tmp.nsize = (char *) new;
231 	if (((caddr_t)new > vm->vm_daddr) && !obreak(td, &tmp))
232 		td->td_retval[0] = (long)new;
233 	else
234 		td->td_retval[0] = (long)old;
235 
236 	return 0;
237 }
238 
239 int
240 linux_uselib(struct thread *td, struct linux_uselib_args *args)
241 {
242 	struct nameidata ni;
243 	struct vnode *vp;
244 	struct exec *a_out;
245 	struct vattr attr;
246 	vm_offset_t vmaddr;
247 	unsigned long file_offset;
248 	vm_offset_t buffer;
249 	unsigned long bss_size;
250 	char *library;
251 	int error;
252 	int locked;
253 
254 	LCONVPATHEXIST(td, args->library, &library);
255 
256 #ifdef DEBUG
257 	if (ldebug(uselib))
258 		printf(ARGS(uselib, "%s"), library);
259 #endif
260 
261 	a_out = NULL;
262 	locked = 0;
263 	vp = NULL;
264 
265 	/*
266 	 * XXX: This code should make use of vn_open(), rather than doing
267 	 * all this stuff itself.
268 	 */
269 	NDINIT(&ni, LOOKUP, FOLLOW|LOCKLEAF, UIO_SYSSPACE, library, td);
270 	error = namei(&ni);
271 	LFREEPATH(library);
272 	if (error)
273 		goto cleanup;
274 
275 	vp = ni.ni_vp;
276 	/*
277 	 * XXX - This looks like a bogus check. A LOCKLEAF namei should not
278 	 * succeed without returning a vnode.
279 	 */
280 	if (vp == NULL) {
281 		error = ENOEXEC;	/* ?? */
282 		goto cleanup;
283 	}
284 	NDFREE(&ni, NDF_ONLY_PNBUF);
285 
286 	/*
287 	 * From here on down, we have a locked vnode that must be unlocked.
288 	 */
289 	locked++;
290 
291 	/* Writable? */
292 	if (vp->v_writecount) {
293 		error = ETXTBSY;
294 		goto cleanup;
295 	}
296 
297 	/* Executable? */
298 	error = VOP_GETATTR(vp, &attr, td->td_ucred, td);
299 	if (error)
300 		goto cleanup;
301 
302 	if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
303 	    ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) {
304 		error = ENOEXEC;
305 		goto cleanup;
306 	}
307 
308 	/* Sensible size? */
309 	if (attr.va_size == 0) {
310 		error = ENOEXEC;
311 		goto cleanup;
312 	}
313 
314 	/* Can we access it? */
315 	error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
316 	if (error)
317 		goto cleanup;
318 
319 	/*
320 	 * XXX: This should use vn_open() so that it is properly authorized,
321 	 * and to reduce code redundancy all over the place here.
322 	 */
323 #ifdef MAC
324 	error = mac_check_vnode_open(td->td_ucred, vp, FREAD);
325 	if (error)
326 		goto cleanup;
327 #endif
328 	error = VOP_OPEN(vp, FREAD, td->td_ucred, td, -1);
329 	if (error)
330 		goto cleanup;
331 
332 	/* Pull in executable header into kernel_map */
333 	error = vm_mmap(kernel_map, (vm_offset_t *)&a_out, PAGE_SIZE,
334 	    VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, 0);
335 	/*
336 	 * Lock no longer needed
337 	 */
338 	locked = 0;
339 	VOP_UNLOCK(vp, 0, td);
340 
341 	if (error)
342 		goto cleanup;
343 
344 	/* Is it a Linux binary ? */
345 	if (((a_out->a_magic >> 16) & 0xff) != 0x64) {
346 		error = ENOEXEC;
347 		goto cleanup;
348 	}
349 
350 	/*
351 	 * While we are here, we should REALLY do some more checks
352 	 */
353 
354 	/* Set file/virtual offset based on a.out variant. */
355 	switch ((int)(a_out->a_magic & 0xffff)) {
356 	case 0413:	/* ZMAGIC */
357 		file_offset = 1024;
358 		break;
359 	case 0314:	/* QMAGIC */
360 		file_offset = 0;
361 		break;
362 	default:
363 		error = ENOEXEC;
364 		goto cleanup;
365 	}
366 
367 	bss_size = round_page(a_out->a_bss);
368 
369 	/* Check various fields in header for validity/bounds. */
370 	if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) {
371 		error = ENOEXEC;
372 		goto cleanup;
373 	}
374 
375 	/* text + data can't exceed file size */
376 	if (a_out->a_data + a_out->a_text > attr.va_size) {
377 		error = EFAULT;
378 		goto cleanup;
379 	}
380 
381 	/*
382 	 * text/data/bss must not exceed limits
383 	 * XXX - this is not complete. it should check current usage PLUS
384 	 * the resources needed by this library.
385 	 */
386 	PROC_LOCK(td->td_proc);
387 	if (a_out->a_text > maxtsiz ||
388 	    a_out->a_data + bss_size > lim_cur(td->td_proc, RLIMIT_DATA)) {
389 		PROC_UNLOCK(td->td_proc);
390 		error = ENOMEM;
391 		goto cleanup;
392 	}
393 	PROC_UNLOCK(td->td_proc);
394 
395 	mp_fixme("Unlocked vflags access.");
396 	/* prevent more writers */
397 	vp->v_vflag |= VV_TEXT;
398 
399 	/*
400 	 * Check if file_offset page aligned. Currently we cannot handle
401 	 * misalinged file offsets, and so we read in the entire image
402 	 * (what a waste).
403 	 */
404 	if (file_offset & PAGE_MASK) {
405 #ifdef DEBUG
406 		printf("uselib: Non page aligned binary %lu\n", file_offset);
407 #endif
408 		/* Map text+data read/write/execute */
409 
410 		/* a_entry is the load address and is page aligned */
411 		vmaddr = trunc_page(a_out->a_entry);
412 
413 		/* get anon user mapping, read+write+execute */
414 		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
415 		    &vmaddr, a_out->a_text + a_out->a_data, FALSE, VM_PROT_ALL,
416 		    VM_PROT_ALL, 0);
417 		if (error)
418 			goto cleanup;
419 
420 		/* map file into kernel_map */
421 		error = vm_mmap(kernel_map, &buffer,
422 		    round_page(a_out->a_text + a_out->a_data + file_offset),
423 		    VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp,
424 		    trunc_page(file_offset));
425 		if (error)
426 			goto cleanup;
427 
428 		/* copy from kernel VM space to user space */
429 		error = copyout((void *)(uintptr_t)(buffer + file_offset),
430 		    (void *)vmaddr, a_out->a_text + a_out->a_data);
431 
432 		/* release temporary kernel space */
433 		vm_map_remove(kernel_map, buffer, buffer +
434 		    round_page(a_out->a_text + a_out->a_data + file_offset));
435 
436 		if (error)
437 			goto cleanup;
438 	} else {
439 #ifdef DEBUG
440 		printf("uselib: Page aligned binary %lu\n", file_offset);
441 #endif
442 		/*
443 		 * for QMAGIC, a_entry is 20 bytes beyond the load address
444 		 * to skip the executable header
445 		 */
446 		vmaddr = trunc_page(a_out->a_entry);
447 
448 		/*
449 		 * Map it all into the process's space as a single
450 		 * copy-on-write "data" segment.
451 		 */
452 		error = vm_mmap(&td->td_proc->p_vmspace->vm_map, &vmaddr,
453 		    a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL,
454 		    MAP_PRIVATE | MAP_FIXED, (caddr_t)vp, file_offset);
455 		if (error)
456 			goto cleanup;
457 	}
458 #ifdef DEBUG
459 	printf("mem=%08lx = %08lx %08lx\n", (long)vmaddr, ((long*)vmaddr)[0],
460 	    ((long*)vmaddr)[1]);
461 #endif
462 	if (bss_size != 0) {
463 		/* Calculate BSS start address */
464 		vmaddr = trunc_page(a_out->a_entry) + a_out->a_text +
465 		    a_out->a_data;
466 
467 		/* allocate some 'anon' space */
468 		error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
469 		    &vmaddr, bss_size, FALSE, VM_PROT_ALL, VM_PROT_ALL, 0);
470 		if (error)
471 			goto cleanup;
472 	}
473 
474 cleanup:
475 	/* Unlock vnode if needed */
476 	if (locked)
477 		VOP_UNLOCK(vp, 0, td);
478 
479 	/* Release the kernel mapping. */
480 	if (a_out)
481 		vm_map_remove(kernel_map, (vm_offset_t)a_out,
482 		    (vm_offset_t)a_out + PAGE_SIZE);
483 
484 	return error;
485 }
486 
487 int
488 linux_select(struct thread *td, struct linux_select_args *args)
489 {
490 	struct timeval tv0, tv1, utv, *tvp;
491 	int error;
492 
493 #ifdef DEBUG
494 	if (ldebug(select))
495 		printf(ARGS(select, "%d, %p, %p, %p, %p"), args->nfds,
496 		    (void *)args->readfds, (void *)args->writefds,
497 		    (void *)args->exceptfds, (void *)args->timeout);
498 #endif
499 
500 	/*
501 	 * Store current time for computation of the amount of
502 	 * time left.
503 	 */
504 	if (args->timeout) {
505 		if ((error = copyin(args->timeout, &utv, sizeof(utv))))
506 			goto select_out;
507 #ifdef DEBUG
508 		if (ldebug(select))
509 			printf(LMSG("incoming timeout (%ld/%ld)"),
510 			    utv.tv_sec, utv.tv_usec);
511 #endif
512 
513 		if (itimerfix(&utv)) {
514 			/*
515 			 * The timeval was invalid.  Convert it to something
516 			 * valid that will act as it does under Linux.
517 			 */
518 			utv.tv_sec += utv.tv_usec / 1000000;
519 			utv.tv_usec %= 1000000;
520 			if (utv.tv_usec < 0) {
521 				utv.tv_sec -= 1;
522 				utv.tv_usec += 1000000;
523 			}
524 			if (utv.tv_sec < 0)
525 				timevalclear(&utv);
526 		}
527 		microtime(&tv0);
528 		tvp = &utv;
529 	} else
530 		tvp = NULL;
531 
532 	error = kern_select(td, args->nfds, args->readfds, args->writefds,
533 	    args->exceptfds, tvp);
534 
535 #ifdef DEBUG
536 	if (ldebug(select))
537 		printf(LMSG("real select returns %d"), error);
538 #endif
539 	if (error) {
540 		/*
541 		 * See fs/select.c in the Linux kernel.  Without this,
542 		 * Maelstrom doesn't work.
543 		 */
544 		if (error == ERESTART)
545 			error = EINTR;
546 		goto select_out;
547 	}
548 
549 	if (args->timeout) {
550 		if (td->td_retval[0]) {
551 			/*
552 			 * Compute how much time was left of the timeout,
553 			 * by subtracting the current time and the time
554 			 * before we started the call, and subtracting
555 			 * that result from the user-supplied value.
556 			 */
557 			microtime(&tv1);
558 			timevalsub(&tv1, &tv0);
559 			timevalsub(&utv, &tv1);
560 			if (utv.tv_sec < 0)
561 				timevalclear(&utv);
562 		} else
563 			timevalclear(&utv);
564 #ifdef DEBUG
565 		if (ldebug(select))
566 			printf(LMSG("outgoing timeout (%ld/%ld)"),
567 			    utv.tv_sec, utv.tv_usec);
568 #endif
569 		if ((error = copyout(&utv, args->timeout, sizeof(utv))))
570 			goto select_out;
571 	}
572 
573 select_out:
574 #ifdef DEBUG
575 	if (ldebug(select))
576 		printf(LMSG("select_out -> %d"), error);
577 #endif
578 	return error;
579 }
580 
581 int
582 linux_mremap(struct thread *td, struct linux_mremap_args *args)
583 {
584 	struct munmap_args /* {
585 		void *addr;
586 		size_t len;
587 	} */ bsd_args;
588 	int error = 0;
589 
590 #ifdef DEBUG
591 	if (ldebug(mremap))
592 		printf(ARGS(mremap, "%p, %08lx, %08lx, %08lx"),
593 		    (void *)args->addr,
594 		    (unsigned long)args->old_len,
595 		    (unsigned long)args->new_len,
596 		    (unsigned long)args->flags);
597 #endif
598 	args->new_len = round_page(args->new_len);
599 	args->old_len = round_page(args->old_len);
600 
601 	if (args->new_len > args->old_len) {
602 		td->td_retval[0] = 0;
603 		return ENOMEM;
604 	}
605 
606 	if (args->new_len < args->old_len) {
607 		bsd_args.addr = (caddr_t)(args->addr + args->new_len);
608 		bsd_args.len = args->old_len - args->new_len;
609 		error = munmap(td, &bsd_args);
610 	}
611 
612 	td->td_retval[0] = error ? 0 : (uintptr_t)args->addr;
613 	return error;
614 }
615 
616 #define LINUX_MS_ASYNC       0x0001
617 #define LINUX_MS_INVALIDATE  0x0002
618 #define LINUX_MS_SYNC        0x0004
619 
620 int
621 linux_msync(struct thread *td, struct linux_msync_args *args)
622 {
623 	struct msync_args bsd_args;
624 
625 	bsd_args.addr = (caddr_t)args->addr;
626 	bsd_args.len = args->len;
627 	bsd_args.flags = args->fl & ~LINUX_MS_SYNC;
628 
629 	return msync(td, &bsd_args);
630 }
631 
632 #ifndef __alpha__
633 int
634 linux_time(struct thread *td, struct linux_time_args *args)
635 {
636 	struct timeval tv;
637 	l_time_t tm;
638 	int error;
639 
640 #ifdef DEBUG
641 	if (ldebug(time))
642 		printf(ARGS(time, "*"));
643 #endif
644 
645 	microtime(&tv);
646 	tm = tv.tv_sec;
647 	if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm))))
648 		return error;
649 	td->td_retval[0] = tm;
650 	return 0;
651 }
652 #endif	/*!__alpha__*/
653 
654 struct l_times_argv {
655 	l_long		tms_utime;
656 	l_long		tms_stime;
657 	l_long		tms_cutime;
658 	l_long		tms_cstime;
659 };
660 
661 #ifdef __alpha__
662 #define CLK_TCK 1024	/* Linux uses 1024 on alpha */
663 #else
664 #define CLK_TCK 100	/* Linux uses 100 */
665 #endif
666 
667 #define CONVTCK(r)	(r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK))
668 
669 int
670 linux_times(struct thread *td, struct linux_times_args *args)
671 {
672 	struct timeval tv;
673 	struct l_times_argv tms;
674 	struct rusage ru;
675 	int error;
676 
677 #ifdef DEBUG
678 	if (ldebug(times))
679 		printf(ARGS(times, "*"));
680 #endif
681 
682 	mtx_lock_spin(&sched_lock);
683 	calcru(td->td_proc, &ru.ru_utime, &ru.ru_stime, NULL);
684 	mtx_unlock_spin(&sched_lock);
685 
686 	tms.tms_utime = CONVTCK(ru.ru_utime);
687 	tms.tms_stime = CONVTCK(ru.ru_stime);
688 
689 	tms.tms_cutime = CONVTCK(td->td_proc->p_stats->p_cru.ru_utime);
690 	tms.tms_cstime = CONVTCK(td->td_proc->p_stats->p_cru.ru_stime);
691 
692 	if ((error = copyout(&tms, args->buf, sizeof(tms))))
693 		return error;
694 
695 	microuptime(&tv);
696 	td->td_retval[0] = (int)CONVTCK(tv);
697 	return 0;
698 }
699 
700 int
701 linux_newuname(struct thread *td, struct linux_newuname_args *args)
702 {
703 	struct l_new_utsname utsname;
704 	char osname[LINUX_MAX_UTSNAME];
705 	char osrelease[LINUX_MAX_UTSNAME];
706 	char *p;
707 
708 #ifdef DEBUG
709 	if (ldebug(newuname))
710 		printf(ARGS(newuname, "*"));
711 #endif
712 
713 	linux_get_osname(td, osname);
714 	linux_get_osrelease(td, osrelease);
715 
716 	bzero(&utsname, sizeof(utsname));
717 	strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME);
718 	getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME);
719 	strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME);
720 	strlcpy(utsname.version, version, LINUX_MAX_UTSNAME);
721 	for (p = utsname.version; *p != '\0'; ++p)
722 		if (*p == '\n') {
723 			*p = '\0';
724 			break;
725 		}
726 #ifdef __i386__
727 	{
728 		const char *class;
729 		switch (cpu_class) {
730 		case CPUCLASS_686:
731 			class = "i686";
732 			break;
733 		case CPUCLASS_586:
734 			class = "i586";
735 			break;
736 		case CPUCLASS_486:
737 			class = "i486";
738 			break;
739 		default:
740 			class = "i386";
741 		}
742 		strlcpy(utsname.machine, class, LINUX_MAX_UTSNAME);
743 	}
744 #else
745 	strlcpy(utsname.machine, machine, LINUX_MAX_UTSNAME);
746 #endif
747 	strlcpy(utsname.domainname, domainname, LINUX_MAX_UTSNAME);
748 
749 	return (copyout(&utsname, args->buf, sizeof(utsname)));
750 }
751 
752 #if defined(__i386__)
753 struct l_utimbuf {
754 	l_time_t l_actime;
755 	l_time_t l_modtime;
756 };
757 
758 int
759 linux_utime(struct thread *td, struct linux_utime_args *args)
760 {
761 	struct timeval tv[2], *tvp;
762 	struct l_utimbuf lut;
763 	char *fname;
764 	int error;
765 
766 	LCONVPATHEXIST(td, args->fname, &fname);
767 
768 #ifdef DEBUG
769 	if (ldebug(utime))
770 		printf(ARGS(utime, "%s, *"), fname);
771 #endif
772 
773 	if (args->times) {
774 		if ((error = copyin(args->times, &lut, sizeof lut))) {
775 			LFREEPATH(fname);
776 			return error;
777 		}
778 		tv[0].tv_sec = lut.l_actime;
779 		tv[0].tv_usec = 0;
780 		tv[1].tv_sec = lut.l_modtime;
781 		tv[1].tv_usec = 0;
782 		tvp = tv;
783 	} else
784 		tvp = NULL;
785 
786 	error = kern_utimes(td, fname, UIO_SYSSPACE, tvp, UIO_SYSSPACE);
787 	LFREEPATH(fname);
788 	return (error);
789 }
790 #endif /* __i386__ */
791 
792 #define __WCLONE 0x80000000
793 
794 #ifndef __alpha__
795 int
796 linux_waitpid(struct thread *td, struct linux_waitpid_args *args)
797 {
798 	int error, options, tmpstat;
799 
800 #ifdef DEBUG
801 	if (ldebug(waitpid))
802 		printf(ARGS(waitpid, "%d, %p, %d"),
803 		    args->pid, (void *)args->status, args->options);
804 #endif
805 
806 	options = (args->options & (WNOHANG | WUNTRACED));
807 	/* WLINUXCLONE should be equal to __WCLONE, but we make sure */
808 	if (args->options & __WCLONE)
809 		options |= WLINUXCLONE;
810 
811 	error = kern_wait(td, args->pid, &tmpstat, options, NULL);
812 	if (error)
813 		return error;
814 
815 	if (args->status) {
816 		tmpstat &= 0xffff;
817 		if (WIFSIGNALED(tmpstat))
818 			tmpstat = (tmpstat & 0xffffff80) |
819 			    BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat));
820 		else if (WIFSTOPPED(tmpstat))
821 			tmpstat = (tmpstat & 0xffff00ff) |
822 			    (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8);
823 		return copyout(&tmpstat, args->status, sizeof(int));
824 	}
825 
826 	return 0;
827 }
828 #endif	/*!__alpha__*/
829 
830 int
831 linux_wait4(struct thread *td, struct linux_wait4_args *args)
832 {
833 	int error, options, tmpstat;
834 	struct rusage ru;
835 	struct proc *p;
836 
837 #ifdef DEBUG
838 	if (ldebug(wait4))
839 		printf(ARGS(wait4, "%d, %p, %d, %p"),
840 		    args->pid, (void *)args->status, args->options,
841 		    (void *)args->rusage);
842 #endif
843 
844 	options = (args->options & (WNOHANG | WUNTRACED));
845 	/* WLINUXCLONE should be equal to __WCLONE, but we make sure */
846 	if (args->options & __WCLONE)
847 		options |= WLINUXCLONE;
848 
849 	error = kern_wait(td, args->pid, &tmpstat, options, &ru);
850 	if (error)
851 		return error;
852 
853 	p = td->td_proc;
854 	PROC_LOCK(p);
855 	SIGDELSET(p->p_siglist, SIGCHLD);
856 	PROC_UNLOCK(p);
857 
858 	if (args->status) {
859 		tmpstat &= 0xffff;
860 		if (WIFSIGNALED(tmpstat))
861 			tmpstat = (tmpstat & 0xffffff80) |
862 			    BSD_TO_LINUX_SIGNAL(WTERMSIG(tmpstat));
863 		else if (WIFSTOPPED(tmpstat))
864 			tmpstat = (tmpstat & 0xffff00ff) |
865 			    (BSD_TO_LINUX_SIGNAL(WSTOPSIG(tmpstat)) << 8);
866 		error = copyout(&tmpstat, args->status, sizeof(int));
867 	}
868 	if (args->rusage != NULL && error == 0)
869 		error = copyout(&ru, args->rusage, sizeof(ru));
870 
871 	return (error);
872 }
873 
874 int
875 linux_mknod(struct thread *td, struct linux_mknod_args *args)
876 {
877 	char *path;
878 	int error;
879 
880 	LCONVPATHCREAT(td, args->path, &path);
881 
882 #ifdef DEBUG
883 	if (ldebug(mknod))
884 		printf(ARGS(mknod, "%s, %d, %d"), path, args->mode, args->dev);
885 #endif
886 
887 	if (args->mode & S_IFIFO)
888 		error = kern_mkfifo(td, path, UIO_SYSSPACE, args->mode);
889 	else
890 		error = kern_mknod(td, path, UIO_SYSSPACE, args->mode,
891 		    args->dev);
892 	LFREEPATH(path);
893 	return (error);
894 }
895 
896 /*
897  * UGH! This is just about the dumbest idea I've ever heard!!
898  */
899 int
900 linux_personality(struct thread *td, struct linux_personality_args *args)
901 {
902 #ifdef DEBUG
903 	if (ldebug(personality))
904 		printf(ARGS(personality, "%lu"), (unsigned long)args->per);
905 #endif
906 #ifndef __alpha__
907 	if (args->per != 0)
908 		return EINVAL;
909 #endif
910 
911 	/* Yes Jim, it's still a Linux... */
912 	td->td_retval[0] = 0;
913 	return 0;
914 }
915 
916 /*
917  * Wrappers for get/setitimer for debugging..
918  */
919 int
920 linux_setitimer(struct thread *td, struct linux_setitimer_args *args)
921 {
922 	struct setitimer_args bsa;
923 	struct itimerval foo;
924 	int error;
925 
926 #ifdef DEBUG
927 	if (ldebug(setitimer))
928 		printf(ARGS(setitimer, "%p, %p"),
929 		    (void *)args->itv, (void *)args->oitv);
930 #endif
931 	bsa.which = args->which;
932 	bsa.itv = (struct itimerval *)args->itv;
933 	bsa.oitv = (struct itimerval *)args->oitv;
934 	if (args->itv) {
935 	    if ((error = copyin(args->itv, &foo, sizeof(foo))))
936 		return error;
937 #ifdef DEBUG
938 	    if (ldebug(setitimer)) {
939 		printf("setitimer: value: sec: %ld, usec: %ld\n",
940 		    foo.it_value.tv_sec, foo.it_value.tv_usec);
941 		printf("setitimer: interval: sec: %ld, usec: %ld\n",
942 		    foo.it_interval.tv_sec, foo.it_interval.tv_usec);
943 	    }
944 #endif
945 	}
946 	return setitimer(td, &bsa);
947 }
948 
949 int
950 linux_getitimer(struct thread *td, struct linux_getitimer_args *args)
951 {
952 	struct getitimer_args bsa;
953 #ifdef DEBUG
954 	if (ldebug(getitimer))
955 		printf(ARGS(getitimer, "%p"), (void *)args->itv);
956 #endif
957 	bsa.which = args->which;
958 	bsa.itv = (struct itimerval *)args->itv;
959 	return getitimer(td, &bsa);
960 }
961 
962 #ifndef __alpha__
963 int
964 linux_nice(struct thread *td, struct linux_nice_args *args)
965 {
966 	struct setpriority_args	bsd_args;
967 
968 	bsd_args.which = PRIO_PROCESS;
969 	bsd_args.who = 0;	/* current process */
970 	bsd_args.prio = args->inc;
971 	return setpriority(td, &bsd_args);
972 }
973 #endif	/*!__alpha__*/
974 
975 int
976 linux_setgroups(struct thread *td, struct linux_setgroups_args *args)
977 {
978 	struct ucred *newcred, *oldcred;
979 	l_gid_t linux_gidset[NGROUPS];
980 	gid_t *bsd_gidset;
981 	int ngrp, error;
982 	struct proc *p;
983 
984 	ngrp = args->gidsetsize;
985 	if (ngrp < 0 || ngrp >= NGROUPS)
986 		return (EINVAL);
987 	error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t));
988 	if (error)
989 		return (error);
990 	newcred = crget();
991 	p = td->td_proc;
992 	PROC_LOCK(p);
993 	oldcred = p->p_ucred;
994 
995 	/*
996 	 * cr_groups[0] holds egid. Setting the whole set from
997 	 * the supplied set will cause egid to be changed too.
998 	 * Keep cr_groups[0] unchanged to prevent that.
999 	 */
1000 
1001 	if ((error = suser_cred(oldcred, PRISON_ROOT)) != 0) {
1002 		PROC_UNLOCK(p);
1003 		crfree(newcred);
1004 		return (error);
1005 	}
1006 
1007 	crcopy(newcred, oldcred);
1008 	if (ngrp > 0) {
1009 		newcred->cr_ngroups = ngrp + 1;
1010 
1011 		bsd_gidset = newcred->cr_groups;
1012 		ngrp--;
1013 		while (ngrp >= 0) {
1014 			bsd_gidset[ngrp + 1] = linux_gidset[ngrp];
1015 			ngrp--;
1016 		}
1017 	}
1018 	else
1019 		newcred->cr_ngroups = 1;
1020 
1021 	setsugid(p);
1022 	p->p_ucred = newcred;
1023 	PROC_UNLOCK(p);
1024 	crfree(oldcred);
1025 	return (0);
1026 }
1027 
1028 int
1029 linux_getgroups(struct thread *td, struct linux_getgroups_args *args)
1030 {
1031 	struct ucred *cred;
1032 	l_gid_t linux_gidset[NGROUPS];
1033 	gid_t *bsd_gidset;
1034 	int bsd_gidsetsz, ngrp, error;
1035 
1036 	cred = td->td_ucred;
1037 	bsd_gidset = cred->cr_groups;
1038 	bsd_gidsetsz = cred->cr_ngroups - 1;
1039 
1040 	/*
1041 	 * cr_groups[0] holds egid. Returning the whole set
1042 	 * here will cause a duplicate. Exclude cr_groups[0]
1043 	 * to prevent that.
1044 	 */
1045 
1046 	if ((ngrp = args->gidsetsize) == 0) {
1047 		td->td_retval[0] = bsd_gidsetsz;
1048 		return (0);
1049 	}
1050 
1051 	if (ngrp < bsd_gidsetsz)
1052 		return (EINVAL);
1053 
1054 	ngrp = 0;
1055 	while (ngrp < bsd_gidsetsz) {
1056 		linux_gidset[ngrp] = bsd_gidset[ngrp + 1];
1057 		ngrp++;
1058 	}
1059 
1060 	if ((error = copyout(linux_gidset, args->grouplist,
1061 	    ngrp * sizeof(l_gid_t))))
1062 		return (error);
1063 
1064 	td->td_retval[0] = ngrp;
1065 	return (0);
1066 }
1067 
1068 #ifndef __alpha__
1069 int
1070 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args)
1071 {
1072 	struct rlimit bsd_rlim;
1073 	struct l_rlimit rlim;
1074 	u_int which;
1075 	int error;
1076 
1077 #ifdef DEBUG
1078 	if (ldebug(setrlimit))
1079 		printf(ARGS(setrlimit, "%d, %p"),
1080 		    args->resource, (void *)args->rlim);
1081 #endif
1082 
1083 	if (args->resource >= LINUX_RLIM_NLIMITS)
1084 		return (EINVAL);
1085 
1086 	which = linux_to_bsd_resource[args->resource];
1087 	if (which == -1)
1088 		return (EINVAL);
1089 
1090 	error = copyin(args->rlim, &rlim, sizeof(rlim));
1091 	if (error)
1092 		return (error);
1093 
1094 	bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur;
1095 	bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max;
1096 	return (kern_setrlimit(td, which, &bsd_rlim));
1097 }
1098 
1099 int
1100 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args)
1101 {
1102 	struct l_rlimit rlim;
1103 	struct proc *p = td->td_proc;
1104 	struct rlimit bsd_rlim;
1105 	u_int which;
1106 
1107 #ifdef DEBUG
1108 	if (ldebug(old_getrlimit))
1109 		printf(ARGS(old_getrlimit, "%d, %p"),
1110 		    args->resource, (void *)args->rlim);
1111 #endif
1112 
1113 	if (args->resource >= LINUX_RLIM_NLIMITS)
1114 		return (EINVAL);
1115 
1116 	which = linux_to_bsd_resource[args->resource];
1117 	if (which == -1)
1118 		return (EINVAL);
1119 
1120 	PROC_LOCK(p);
1121 	lim_rlimit(p, which, &bsd_rlim);
1122 	PROC_UNLOCK(p);
1123 
1124 	rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur;
1125 	if (rlim.rlim_cur == ULONG_MAX)
1126 		rlim.rlim_cur = LONG_MAX;
1127 	rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max;
1128 	if (rlim.rlim_max == ULONG_MAX)
1129 		rlim.rlim_max = LONG_MAX;
1130 	return (copyout(&rlim, args->rlim, sizeof(rlim)));
1131 }
1132 
1133 int
1134 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args)
1135 {
1136 	struct l_rlimit rlim;
1137 	struct proc *p = td->td_proc;
1138 	struct rlimit bsd_rlim;
1139 	u_int which;
1140 
1141 #ifdef DEBUG
1142 	if (ldebug(getrlimit))
1143 		printf(ARGS(getrlimit, "%d, %p"),
1144 		    args->resource, (void *)args->rlim);
1145 #endif
1146 
1147 	if (args->resource >= LINUX_RLIM_NLIMITS)
1148 		return (EINVAL);
1149 
1150 	which = linux_to_bsd_resource[args->resource];
1151 	if (which == -1)
1152 		return (EINVAL);
1153 
1154 	PROC_LOCK(p);
1155 	lim_rlimit(p, which, &bsd_rlim);
1156 	PROC_UNLOCK(p);
1157 
1158 	rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur;
1159 	rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max;
1160 	return (copyout(&rlim, args->rlim, sizeof(rlim)));
1161 }
1162 #endif /*!__alpha__*/
1163 
1164 int
1165 linux_sched_setscheduler(struct thread *td,
1166     struct linux_sched_setscheduler_args *args)
1167 {
1168 	struct sched_setscheduler_args bsd;
1169 
1170 #ifdef DEBUG
1171 	if (ldebug(sched_setscheduler))
1172 		printf(ARGS(sched_setscheduler, "%d, %d, %p"),
1173 		    args->pid, args->policy, (const void *)args->param);
1174 #endif
1175 
1176 	switch (args->policy) {
1177 	case LINUX_SCHED_OTHER:
1178 		bsd.policy = SCHED_OTHER;
1179 		break;
1180 	case LINUX_SCHED_FIFO:
1181 		bsd.policy = SCHED_FIFO;
1182 		break;
1183 	case LINUX_SCHED_RR:
1184 		bsd.policy = SCHED_RR;
1185 		break;
1186 	default:
1187 		return EINVAL;
1188 	}
1189 
1190 	bsd.pid = args->pid;
1191 	bsd.param = (struct sched_param *)args->param;
1192 	return sched_setscheduler(td, &bsd);
1193 }
1194 
1195 int
1196 linux_sched_getscheduler(struct thread *td,
1197     struct linux_sched_getscheduler_args *args)
1198 {
1199 	struct sched_getscheduler_args bsd;
1200 	int error;
1201 
1202 #ifdef DEBUG
1203 	if (ldebug(sched_getscheduler))
1204 		printf(ARGS(sched_getscheduler, "%d"), args->pid);
1205 #endif
1206 
1207 	bsd.pid = args->pid;
1208 	error = sched_getscheduler(td, &bsd);
1209 
1210 	switch (td->td_retval[0]) {
1211 	case SCHED_OTHER:
1212 		td->td_retval[0] = LINUX_SCHED_OTHER;
1213 		break;
1214 	case SCHED_FIFO:
1215 		td->td_retval[0] = LINUX_SCHED_FIFO;
1216 		break;
1217 	case SCHED_RR:
1218 		td->td_retval[0] = LINUX_SCHED_RR;
1219 		break;
1220 	}
1221 
1222 	return error;
1223 }
1224 
1225 int
1226 linux_sched_get_priority_max(struct thread *td,
1227     struct linux_sched_get_priority_max_args *args)
1228 {
1229 	struct sched_get_priority_max_args bsd;
1230 
1231 #ifdef DEBUG
1232 	if (ldebug(sched_get_priority_max))
1233 		printf(ARGS(sched_get_priority_max, "%d"), args->policy);
1234 #endif
1235 
1236 	switch (args->policy) {
1237 	case LINUX_SCHED_OTHER:
1238 		bsd.policy = SCHED_OTHER;
1239 		break;
1240 	case LINUX_SCHED_FIFO:
1241 		bsd.policy = SCHED_FIFO;
1242 		break;
1243 	case LINUX_SCHED_RR:
1244 		bsd.policy = SCHED_RR;
1245 		break;
1246 	default:
1247 		return EINVAL;
1248 	}
1249 	return sched_get_priority_max(td, &bsd);
1250 }
1251 
1252 int
1253 linux_sched_get_priority_min(struct thread *td,
1254     struct linux_sched_get_priority_min_args *args)
1255 {
1256 	struct sched_get_priority_min_args bsd;
1257 
1258 #ifdef DEBUG
1259 	if (ldebug(sched_get_priority_min))
1260 		printf(ARGS(sched_get_priority_min, "%d"), args->policy);
1261 #endif
1262 
1263 	switch (args->policy) {
1264 	case LINUX_SCHED_OTHER:
1265 		bsd.policy = SCHED_OTHER;
1266 		break;
1267 	case LINUX_SCHED_FIFO:
1268 		bsd.policy = SCHED_FIFO;
1269 		break;
1270 	case LINUX_SCHED_RR:
1271 		bsd.policy = SCHED_RR;
1272 		break;
1273 	default:
1274 		return EINVAL;
1275 	}
1276 	return sched_get_priority_min(td, &bsd);
1277 }
1278 
1279 #define REBOOT_CAD_ON	0x89abcdef
1280 #define REBOOT_CAD_OFF	0
1281 #define REBOOT_HALT	0xcdef0123
1282 
1283 int
1284 linux_reboot(struct thread *td, struct linux_reboot_args *args)
1285 {
1286 	struct reboot_args bsd_args;
1287 
1288 #ifdef DEBUG
1289 	if (ldebug(reboot))
1290 		printf(ARGS(reboot, "0x%x"), args->cmd);
1291 #endif
1292 	if (args->cmd == REBOOT_CAD_ON || args->cmd == REBOOT_CAD_OFF)
1293 		return (0);
1294 	bsd_args.opt = (args->cmd == REBOOT_HALT) ? RB_HALT : 0;
1295 	return (reboot(td, &bsd_args));
1296 }
1297 
1298 #ifndef __alpha__
1299 
1300 /*
1301  * The FreeBSD native getpid(2), getgid(2) and getuid(2) also modify
1302  * td->td_retval[1] when COMPAT_43 is defined. This
1303  * globbers registers that are assumed to be preserved. The following
1304  * lightweight syscalls fixes this. See also linux_getgid16() and
1305  * linux_getuid16() in linux_uid16.c.
1306  *
1307  * linux_getpid() - MP SAFE
1308  * linux_getgid() - MP SAFE
1309  * linux_getuid() - MP SAFE
1310  */
1311 
1312 int
1313 linux_getpid(struct thread *td, struct linux_getpid_args *args)
1314 {
1315 
1316 	td->td_retval[0] = td->td_proc->p_pid;
1317 	return (0);
1318 }
1319 
1320 int
1321 linux_getgid(struct thread *td, struct linux_getgid_args *args)
1322 {
1323 
1324 	td->td_retval[0] = td->td_ucred->cr_rgid;
1325 	return (0);
1326 }
1327 
1328 int
1329 linux_getuid(struct thread *td, struct linux_getuid_args *args)
1330 {
1331 
1332 	td->td_retval[0] = td->td_ucred->cr_ruid;
1333 	return (0);
1334 }
1335 
1336 #endif /*!__alpha__*/
1337 
1338 int
1339 linux_getsid(struct thread *td, struct linux_getsid_args *args)
1340 {
1341 	struct getsid_args bsd;
1342 	bsd.pid = args->pid;
1343 	return getsid(td, &bsd);
1344 }
1345