xref: /freebsd/sys/compat/linux/linux_misc.c (revision b7a87d7c0dd19016e82f5d33f2c5ec90479c31fe)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2002 Doug Rabson
5  * Copyright (c) 1994-1995 Søren Schmidt
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer
13  *    in this position and unchanged.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/fcntl.h>
34 #include <sys/jail.h>
35 #include <sys/imgact.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/msgbuf.h>
39 #include <sys/mqueue.h>
40 #include <sys/mutex.h>
41 #include <sys/poll.h>
42 #include <sys/priv.h>
43 #include <sys/proc.h>
44 #include <sys/procctl.h>
45 #include <sys/reboot.h>
46 #include <sys/random.h>
47 #include <sys/resourcevar.h>
48 #include <sys/rtprio.h>
49 #include <sys/sched.h>
50 #include <sys/smp.h>
51 #include <sys/stat.h>
52 #include <sys/syscallsubr.h>
53 #include <sys/sysctl.h>
54 #include <sys/sysent.h>
55 #include <sys/sysproto.h>
56 #include <sys/time.h>
57 #include <sys/vmmeter.h>
58 #include <sys/vnode.h>
59 
60 #include <security/audit/audit.h>
61 #include <security/mac/mac_framework.h>
62 
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <vm/swap_pager.h>
66 
67 #ifdef COMPAT_LINUX32
68 #include <machine/../linux32/linux.h>
69 #include <machine/../linux32/linux32_proto.h>
70 #else
71 #include <machine/../linux/linux.h>
72 #include <machine/../linux/linux_proto.h>
73 #endif
74 
75 #include <compat/linux/linux_common.h>
76 #include <compat/linux/linux_dtrace.h>
77 #include <compat/linux/linux_file.h>
78 #include <compat/linux/linux_mib.h>
79 #include <compat/linux/linux_mmap.h>
80 #include <compat/linux/linux_signal.h>
81 #include <compat/linux/linux_time.h>
82 #include <compat/linux/linux_util.h>
83 #include <compat/linux/linux_emul.h>
84 #include <compat/linux/linux_misc.h>
85 
86 int stclohz;				/* Statistics clock frequency */
87 
88 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = {
89 	RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK,
90 	RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE,
91 	RLIMIT_MEMLOCK, RLIMIT_AS
92 };
93 
94 struct l_sysinfo {
95 	l_long		uptime;		/* Seconds since boot */
96 	l_ulong		loads[3];	/* 1, 5, and 15 minute load averages */
97 #define LINUX_SYSINFO_LOADS_SCALE 65536
98 	l_ulong		totalram;	/* Total usable main memory size */
99 	l_ulong		freeram;	/* Available memory size */
100 	l_ulong		sharedram;	/* Amount of shared memory */
101 	l_ulong		bufferram;	/* Memory used by buffers */
102 	l_ulong		totalswap;	/* Total swap space size */
103 	l_ulong		freeswap;	/* swap space still available */
104 	l_ushort	procs;		/* Number of current processes */
105 	l_ushort	pads;
106 	l_ulong		totalhigh;
107 	l_ulong		freehigh;
108 	l_uint		mem_unit;
109 	char		_f[20-2*sizeof(l_long)-sizeof(l_int)];	/* padding */
110 };
111 
112 struct l_pselect6arg {
113 	l_uintptr_t	ss;
114 	l_size_t	ss_len;
115 };
116 
117 static int	linux_utimensat_lts_to_ts(struct l_timespec *,
118 			struct timespec *);
119 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
120 static int	linux_utimensat_lts64_to_ts(struct l_timespec64 *,
121 			struct timespec *);
122 #endif
123 static int	linux_common_utimensat(struct thread *, int,
124 			const char *, struct timespec *, int);
125 static int	linux_common_pselect6(struct thread *, l_int,
126 			l_fd_set *, l_fd_set *, l_fd_set *,
127 			struct timespec *, l_uintptr_t *);
128 static int	linux_common_ppoll(struct thread *, struct pollfd *,
129 			uint32_t, struct timespec *, l_sigset_t *,
130 			l_size_t);
131 static int	linux_pollin(struct thread *, struct pollfd *,
132 			struct pollfd *, u_int);
133 static int	linux_pollout(struct thread *, struct pollfd *,
134 			struct pollfd *, u_int);
135 
136 int
linux_sysinfo(struct thread * td,struct linux_sysinfo_args * args)137 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args)
138 {
139 	struct l_sysinfo sysinfo;
140 	int i, j;
141 	struct timespec ts;
142 
143 	bzero(&sysinfo, sizeof(sysinfo));
144 	getnanouptime(&ts);
145 	if (ts.tv_nsec != 0)
146 		ts.tv_sec++;
147 	sysinfo.uptime = ts.tv_sec;
148 
149 	/* Use the information from the mib to get our load averages */
150 	for (i = 0; i < 3; i++)
151 		sysinfo.loads[i] = averunnable.ldavg[i] *
152 		    LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale;
153 
154 	sysinfo.totalram = physmem * PAGE_SIZE;
155 	sysinfo.freeram = (u_long)vm_free_count() * PAGE_SIZE;
156 
157 	/*
158 	 * sharedram counts pages allocated to named, swap-backed objects such
159 	 * as shared memory segments and tmpfs files.  There is no cheap way to
160 	 * compute this, so just leave the field unpopulated.  Linux itself only
161 	 * started setting this field in the 3.x timeframe.
162 	 */
163 	sysinfo.sharedram = 0;
164 	sysinfo.bufferram = 0;
165 
166 	swap_pager_status(&i, &j);
167 	sysinfo.totalswap = i * PAGE_SIZE;
168 	sysinfo.freeswap = (i - j) * PAGE_SIZE;
169 
170 	sysinfo.procs = nprocs;
171 
172 	/*
173 	 * Platforms supported by the emulation layer do not have a notion of
174 	 * high memory.
175 	 */
176 	sysinfo.totalhigh = 0;
177 	sysinfo.freehigh = 0;
178 
179 	sysinfo.mem_unit = 1;
180 
181 	return (copyout(&sysinfo, args->info, sizeof(sysinfo)));
182 }
183 
184 #ifdef LINUX_LEGACY_SYSCALLS
185 int
linux_alarm(struct thread * td,struct linux_alarm_args * args)186 linux_alarm(struct thread *td, struct linux_alarm_args *args)
187 {
188 	struct itimerval it, old_it;
189 	u_int secs;
190 	int error __diagused;
191 
192 	secs = args->secs;
193 	/*
194 	 * Linux alarm() is always successful. Limit secs to INT32_MAX / 2
195 	 * to match kern_setitimer()'s limit to avoid error from it.
196 	 *
197 	 * XXX. Linux limit secs to INT_MAX on 32 and does not limit on 64-bit
198 	 * platforms.
199 	 */
200 	if (secs > INT32_MAX / 2)
201 		secs = INT32_MAX / 2;
202 
203 	it.it_value.tv_sec = secs;
204 	it.it_value.tv_usec = 0;
205 	timevalclear(&it.it_interval);
206 	error = kern_setitimer(td, ITIMER_REAL, &it, &old_it);
207 	KASSERT(error == 0, ("kern_setitimer returns %d", error));
208 
209 	if ((old_it.it_value.tv_sec == 0 && old_it.it_value.tv_usec > 0) ||
210 	    old_it.it_value.tv_usec >= 500000)
211 		old_it.it_value.tv_sec++;
212 	td->td_retval[0] = old_it.it_value.tv_sec;
213 	return (0);
214 }
215 #endif
216 
217 int
linux_brk(struct thread * td,struct linux_brk_args * args)218 linux_brk(struct thread *td, struct linux_brk_args *args)
219 {
220 	struct vmspace *vm = td->td_proc->p_vmspace;
221 	uintptr_t new, old;
222 
223 	old = (uintptr_t)vm->vm_daddr + ctob(vm->vm_dsize);
224 	new = (uintptr_t)args->dsend;
225 	if ((caddr_t)new > vm->vm_daddr && !kern_break(td, &new))
226 		td->td_retval[0] = (register_t)new;
227 	else
228 		td->td_retval[0] = (register_t)old;
229 
230 	return (0);
231 }
232 
233 #ifdef LINUX_LEGACY_SYSCALLS
234 int
linux_select(struct thread * td,struct linux_select_args * args)235 linux_select(struct thread *td, struct linux_select_args *args)
236 {
237 	l_timeval ltv;
238 	struct timeval tv0, tv1, utv, *tvp;
239 	int error;
240 
241 	/*
242 	 * Store current time for computation of the amount of
243 	 * time left.
244 	 */
245 	if (args->timeout) {
246 		if ((error = copyin(args->timeout, &ltv, sizeof(ltv))))
247 			goto select_out;
248 		utv.tv_sec = ltv.tv_sec;
249 		utv.tv_usec = ltv.tv_usec;
250 
251 		if (itimerfix(&utv)) {
252 			/*
253 			 * The timeval was invalid.  Convert it to something
254 			 * valid that will act as it does under Linux.
255 			 */
256 			utv.tv_sec += utv.tv_usec / 1000000;
257 			utv.tv_usec %= 1000000;
258 			if (utv.tv_usec < 0) {
259 				utv.tv_sec -= 1;
260 				utv.tv_usec += 1000000;
261 			}
262 			if (utv.tv_sec < 0)
263 				timevalclear(&utv);
264 		}
265 		microtime(&tv0);
266 		tvp = &utv;
267 	} else
268 		tvp = NULL;
269 
270 	error = kern_select(td, args->nfds, args->readfds, args->writefds,
271 	    args->exceptfds, tvp, LINUX_NFDBITS);
272 	if (error)
273 		goto select_out;
274 
275 	if (args->timeout) {
276 		if (td->td_retval[0]) {
277 			/*
278 			 * Compute how much time was left of the timeout,
279 			 * by subtracting the current time and the time
280 			 * before we started the call, and subtracting
281 			 * that result from the user-supplied value.
282 			 */
283 			microtime(&tv1);
284 			timevalsub(&tv1, &tv0);
285 			timevalsub(&utv, &tv1);
286 			if (utv.tv_sec < 0)
287 				timevalclear(&utv);
288 		} else
289 			timevalclear(&utv);
290 		ltv.tv_sec = utv.tv_sec;
291 		ltv.tv_usec = utv.tv_usec;
292 		if ((error = copyout(&ltv, args->timeout, sizeof(ltv))))
293 			goto select_out;
294 	}
295 
296 select_out:
297 	return (error);
298 }
299 #endif
300 
301 int
linux_mremap(struct thread * td,struct linux_mremap_args * args)302 linux_mremap(struct thread *td, struct linux_mremap_args *args)
303 {
304 	uintptr_t addr;
305 	size_t len;
306 	int error = 0;
307 
308 	if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) {
309 		td->td_retval[0] = 0;
310 		return (EINVAL);
311 	}
312 
313 	/*
314 	 * Check for the page alignment.
315 	 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK.
316 	 */
317 	if (args->addr & PAGE_MASK) {
318 		td->td_retval[0] = 0;
319 		return (EINVAL);
320 	}
321 
322 	args->new_len = round_page(args->new_len);
323 	args->old_len = round_page(args->old_len);
324 
325 	if (args->new_len > args->old_len) {
326 		td->td_retval[0] = 0;
327 		return (ENOMEM);
328 	}
329 
330 	if (args->new_len < args->old_len) {
331 		addr = args->addr + args->new_len;
332 		len = args->old_len - args->new_len;
333 		error = kern_munmap(td, addr, len);
334 	}
335 
336 	td->td_retval[0] = error ? 0 : (uintptr_t)args->addr;
337 	return (error);
338 }
339 
340 #define LINUX_MS_ASYNC       0x0001
341 #define LINUX_MS_INVALIDATE  0x0002
342 #define LINUX_MS_SYNC        0x0004
343 
344 int
linux_msync(struct thread * td,struct linux_msync_args * args)345 linux_msync(struct thread *td, struct linux_msync_args *args)
346 {
347 
348 	return (kern_msync(td, args->addr, args->len,
349 	    args->fl & ~LINUX_MS_SYNC));
350 }
351 
352 int
linux_mprotect(struct thread * td,struct linux_mprotect_args * uap)353 linux_mprotect(struct thread *td, struct linux_mprotect_args *uap)
354 {
355 
356 	return (linux_mprotect_common(td, PTROUT(uap->addr), uap->len,
357 	    uap->prot));
358 }
359 
360 int
linux_madvise(struct thread * td,struct linux_madvise_args * uap)361 linux_madvise(struct thread *td, struct linux_madvise_args *uap)
362 {
363 
364 	return (linux_madvise_common(td, PTROUT(uap->addr), uap->len,
365 	    uap->behav));
366 }
367 
368 int
linux_mmap2(struct thread * td,struct linux_mmap2_args * uap)369 linux_mmap2(struct thread *td, struct linux_mmap2_args *uap)
370 {
371 #if defined(LINUX_ARCHWANT_MMAP2PGOFF)
372 	/*
373 	 * For architectures with sizeof (off_t) < sizeof (loff_t) mmap is
374 	 * implemented with mmap2 syscall and the offset is represented in
375 	 * multiples of page size.
376 	 */
377 	return (linux_mmap_common(td, PTROUT(uap->addr), uap->len, uap->prot,
378 	    uap->flags, uap->fd, (uint64_t)(uint32_t)uap->pgoff * PAGE_SIZE));
379 #else
380 	return (linux_mmap_common(td, PTROUT(uap->addr), uap->len, uap->prot,
381 	    uap->flags, uap->fd, uap->pgoff));
382 #endif
383 }
384 
385 #ifdef LINUX_LEGACY_SYSCALLS
386 int
linux_time(struct thread * td,struct linux_time_args * args)387 linux_time(struct thread *td, struct linux_time_args *args)
388 {
389 	struct timeval tv;
390 	l_time_t tm;
391 	int error;
392 
393 	microtime(&tv);
394 	tm = tv.tv_sec;
395 	if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm))))
396 		return (error);
397 	td->td_retval[0] = tm;
398 	return (0);
399 }
400 #endif
401 
402 struct l_times_argv {
403 	l_clock_t	tms_utime;
404 	l_clock_t	tms_stime;
405 	l_clock_t	tms_cutime;
406 	l_clock_t	tms_cstime;
407 };
408 
409 /*
410  * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value.
411  * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK
412  * auxiliary vector entry.
413  */
414 #define	CLK_TCK		100
415 
416 #define	CONVOTCK(r)	(r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK))
417 #define	CONVNTCK(r)	(r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz))
418 
419 #define	CONVTCK(r)	(linux_kernver(td) >= LINUX_KERNVER(2,4,0) ?	\
420 			    CONVNTCK(r) : CONVOTCK(r))
421 
422 int
linux_times(struct thread * td,struct linux_times_args * args)423 linux_times(struct thread *td, struct linux_times_args *args)
424 {
425 	struct timeval tv, utime, stime, cutime, cstime;
426 	struct l_times_argv tms;
427 	struct proc *p;
428 	int error;
429 
430 	if (args->buf != NULL) {
431 		p = td->td_proc;
432 		PROC_LOCK(p);
433 		PROC_STATLOCK(p);
434 		calcru(p, &utime, &stime);
435 		PROC_STATUNLOCK(p);
436 		calccru(p, &cutime, &cstime);
437 		PROC_UNLOCK(p);
438 
439 		tms.tms_utime = CONVTCK(utime);
440 		tms.tms_stime = CONVTCK(stime);
441 
442 		tms.tms_cutime = CONVTCK(cutime);
443 		tms.tms_cstime = CONVTCK(cstime);
444 
445 		if ((error = copyout(&tms, args->buf, sizeof(tms))))
446 			return (error);
447 	}
448 
449 	microuptime(&tv);
450 	td->td_retval[0] = (int)CONVTCK(tv);
451 	return (0);
452 }
453 
454 int
linux_newuname(struct thread * td,struct linux_newuname_args * args)455 linux_newuname(struct thread *td, struct linux_newuname_args *args)
456 {
457 	struct l_new_utsname utsname;
458 	char osname[LINUX_MAX_UTSNAME];
459 	char osrelease[LINUX_MAX_UTSNAME];
460 	char *p;
461 
462 	linux_get_osname(td, osname);
463 	linux_get_osrelease(td, osrelease);
464 
465 	bzero(&utsname, sizeof(utsname));
466 	strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME);
467 	getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME);
468 	getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME);
469 	strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME);
470 	strlcpy(utsname.version, version, LINUX_MAX_UTSNAME);
471 	for (p = utsname.version; *p != '\0'; ++p)
472 		if (*p == '\n') {
473 			*p = '\0';
474 			break;
475 		}
476 #if defined(__amd64__)
477 	/*
478 	 * On amd64, Linux uname(2) needs to return "x86_64"
479 	 * for both 64-bit and 32-bit applications.  On 32-bit,
480 	 * the string returned by getauxval(AT_PLATFORM) needs
481 	 * to remain "i686", though.
482 	 */
483 #if defined(COMPAT_LINUX32)
484 	if (linux32_emulate_i386)
485 		strlcpy(utsname.machine, "i686", LINUX_MAX_UTSNAME);
486 	else
487 #endif
488 	strlcpy(utsname.machine, "x86_64", LINUX_MAX_UTSNAME);
489 #elif defined(__aarch64__)
490 	strlcpy(utsname.machine, "aarch64", LINUX_MAX_UTSNAME);
491 #elif defined(__i386__)
492 	strlcpy(utsname.machine, "i686", LINUX_MAX_UTSNAME);
493 #endif
494 
495 	return (copyout(&utsname, args->buf, sizeof(utsname)));
496 }
497 
498 struct l_utimbuf {
499 	l_time_t l_actime;
500 	l_time_t l_modtime;
501 };
502 
503 #ifdef LINUX_LEGACY_SYSCALLS
504 int
linux_utime(struct thread * td,struct linux_utime_args * args)505 linux_utime(struct thread *td, struct linux_utime_args *args)
506 {
507 	struct timeval tv[2], *tvp;
508 	struct l_utimbuf lut;
509 	int error;
510 
511 	if (args->times) {
512 		if ((error = copyin(args->times, &lut, sizeof lut)) != 0)
513 			return (error);
514 		tv[0].tv_sec = lut.l_actime;
515 		tv[0].tv_usec = 0;
516 		tv[1].tv_sec = lut.l_modtime;
517 		tv[1].tv_usec = 0;
518 		tvp = tv;
519 	} else
520 		tvp = NULL;
521 
522 	return (kern_utimesat(td, AT_FDCWD, args->fname, UIO_USERSPACE,
523 	    tvp, UIO_SYSSPACE));
524 }
525 #endif
526 
527 #ifdef LINUX_LEGACY_SYSCALLS
528 int
linux_utimes(struct thread * td,struct linux_utimes_args * args)529 linux_utimes(struct thread *td, struct linux_utimes_args *args)
530 {
531 	l_timeval ltv[2];
532 	struct timeval tv[2], *tvp = NULL;
533 	int error;
534 
535 	if (args->tptr != NULL) {
536 		if ((error = copyin(args->tptr, ltv, sizeof ltv)) != 0)
537 			return (error);
538 		tv[0].tv_sec = ltv[0].tv_sec;
539 		tv[0].tv_usec = ltv[0].tv_usec;
540 		tv[1].tv_sec = ltv[1].tv_sec;
541 		tv[1].tv_usec = ltv[1].tv_usec;
542 		tvp = tv;
543 	}
544 
545 	return (kern_utimesat(td, AT_FDCWD, args->fname, UIO_USERSPACE,
546 	    tvp, UIO_SYSSPACE));
547 }
548 #endif
549 
550 static int
linux_utimensat_lts_to_ts(struct l_timespec * l_times,struct timespec * times)551 linux_utimensat_lts_to_ts(struct l_timespec *l_times, struct timespec *times)
552 {
553 
554 	if (l_times->tv_nsec != LINUX_UTIME_OMIT &&
555 	    l_times->tv_nsec != LINUX_UTIME_NOW &&
556 	    (l_times->tv_nsec < 0 || l_times->tv_nsec > 999999999))
557 		return (EINVAL);
558 
559 	times->tv_sec = l_times->tv_sec;
560 	switch (l_times->tv_nsec)
561 	{
562 	case LINUX_UTIME_OMIT:
563 		times->tv_nsec = UTIME_OMIT;
564 		break;
565 	case LINUX_UTIME_NOW:
566 		times->tv_nsec = UTIME_NOW;
567 		break;
568 	default:
569 		times->tv_nsec = l_times->tv_nsec;
570 	}
571 
572 	return (0);
573 }
574 
575 static int
linux_common_utimensat(struct thread * td,int ldfd,const char * pathname,struct timespec * timesp,int lflags)576 linux_common_utimensat(struct thread *td, int ldfd, const char *pathname,
577     struct timespec *timesp, int lflags)
578 {
579 	int dfd, flags = 0;
580 
581 	dfd = (ldfd == LINUX_AT_FDCWD) ? AT_FDCWD : ldfd;
582 
583 	if (lflags & ~(LINUX_AT_SYMLINK_NOFOLLOW | LINUX_AT_EMPTY_PATH))
584 		return (EINVAL);
585 
586 	if (timesp != NULL) {
587 		/* This breaks POSIX, but is what the Linux kernel does
588 		 * _on purpose_ (documented in the man page for utimensat(2)),
589 		 * so we must follow that behaviour. */
590 		if (timesp[0].tv_nsec == UTIME_OMIT &&
591 		    timesp[1].tv_nsec == UTIME_OMIT)
592 			return (0);
593 	}
594 
595 	if (lflags & LINUX_AT_SYMLINK_NOFOLLOW)
596 		flags |= AT_SYMLINK_NOFOLLOW;
597 	if (lflags & LINUX_AT_EMPTY_PATH)
598 		flags |= AT_EMPTY_PATH;
599 
600 	if (pathname != NULL)
601 		return (kern_utimensat(td, dfd, pathname,
602 		    UIO_USERSPACE, timesp, UIO_SYSSPACE, flags));
603 
604 	if (lflags != 0)
605 		return (EINVAL);
606 
607 	return (kern_futimens(td, dfd, timesp, UIO_SYSSPACE));
608 }
609 
610 int
linux_utimensat(struct thread * td,struct linux_utimensat_args * args)611 linux_utimensat(struct thread *td, struct linux_utimensat_args *args)
612 {
613 	struct l_timespec l_times[2];
614 	struct timespec times[2], *timesp;
615 	int error;
616 
617 	if (args->times != NULL) {
618 		error = copyin(args->times, l_times, sizeof(l_times));
619 		if (error != 0)
620 			return (error);
621 
622 		error = linux_utimensat_lts_to_ts(&l_times[0], &times[0]);
623 		if (error != 0)
624 			return (error);
625 		error = linux_utimensat_lts_to_ts(&l_times[1], &times[1]);
626 		if (error != 0)
627 			return (error);
628 		timesp = times;
629 	} else
630 		timesp = NULL;
631 
632 	return (linux_common_utimensat(td, args->dfd, args->pathname,
633 	    timesp, args->flags));
634 }
635 
636 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
637 static int
linux_utimensat_lts64_to_ts(struct l_timespec64 * l_times,struct timespec * times)638 linux_utimensat_lts64_to_ts(struct l_timespec64 *l_times, struct timespec *times)
639 {
640 
641 	/* Zero out the padding in compat mode. */
642 	l_times->tv_nsec &= 0xFFFFFFFFUL;
643 
644 	if (l_times->tv_nsec != LINUX_UTIME_OMIT &&
645 	    l_times->tv_nsec != LINUX_UTIME_NOW &&
646 	    (l_times->tv_nsec < 0 || l_times->tv_nsec > 999999999))
647 		return (EINVAL);
648 
649 	times->tv_sec = l_times->tv_sec;
650 	switch (l_times->tv_nsec)
651 	{
652 	case LINUX_UTIME_OMIT:
653 		times->tv_nsec = UTIME_OMIT;
654 		break;
655 	case LINUX_UTIME_NOW:
656 		times->tv_nsec = UTIME_NOW;
657 		break;
658 	default:
659 		times->tv_nsec = l_times->tv_nsec;
660 	}
661 
662 	return (0);
663 }
664 
665 int
linux_utimensat_time64(struct thread * td,struct linux_utimensat_time64_args * args)666 linux_utimensat_time64(struct thread *td, struct linux_utimensat_time64_args *args)
667 {
668 	struct l_timespec64 l_times[2];
669 	struct timespec times[2], *timesp;
670 	int error;
671 
672 	if (args->times64 != NULL) {
673 		error = copyin(args->times64, l_times, sizeof(l_times));
674 		if (error != 0)
675 			return (error);
676 
677 		error = linux_utimensat_lts64_to_ts(&l_times[0], &times[0]);
678 		if (error != 0)
679 			return (error);
680 		error = linux_utimensat_lts64_to_ts(&l_times[1], &times[1]);
681 		if (error != 0)
682 			return (error);
683 		timesp = times;
684 	} else
685 		timesp = NULL;
686 
687 	return (linux_common_utimensat(td, args->dfd, args->pathname,
688 	    timesp, args->flags));
689 }
690 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
691 
692 #ifdef LINUX_LEGACY_SYSCALLS
693 int
linux_futimesat(struct thread * td,struct linux_futimesat_args * args)694 linux_futimesat(struct thread *td, struct linux_futimesat_args *args)
695 {
696 	l_timeval ltv[2];
697 	struct timeval tv[2], *tvp = NULL;
698 	int error, dfd;
699 
700 	dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd;
701 
702 	if (args->utimes != NULL) {
703 		if ((error = copyin(args->utimes, ltv, sizeof ltv)) != 0)
704 			return (error);
705 		tv[0].tv_sec = ltv[0].tv_sec;
706 		tv[0].tv_usec = ltv[0].tv_usec;
707 		tv[1].tv_sec = ltv[1].tv_sec;
708 		tv[1].tv_usec = ltv[1].tv_usec;
709 		tvp = tv;
710 	}
711 
712 	return (kern_utimesat(td, dfd, args->filename, UIO_USERSPACE,
713 	    tvp, UIO_SYSSPACE));
714 }
715 #endif
716 
717 static int
linux_common_wait(struct thread * td,idtype_t idtype,int id,int * statusp,int options,void * rup,l_siginfo_t * infop)718 linux_common_wait(struct thread *td, idtype_t idtype, int id, int *statusp,
719     int options, void *rup, l_siginfo_t *infop)
720 {
721 	l_siginfo_t lsi;
722 	siginfo_t siginfo;
723 	struct __wrusage wru;
724 	int error, status, tmpstat, sig;
725 
726 	error = kern_wait6(td, idtype, id, &status, options,
727 	    rup != NULL ? &wru : NULL, &siginfo);
728 
729 	if (error == 0 && statusp) {
730 		tmpstat = status & 0xffff;
731 		if (WIFSIGNALED(tmpstat)) {
732 			tmpstat = (tmpstat & 0xffffff80) |
733 			    bsd_to_linux_signal(WTERMSIG(tmpstat));
734 		} else if (WIFSTOPPED(tmpstat)) {
735 			tmpstat = (tmpstat & 0xffff00ff) |
736 			    (bsd_to_linux_signal(WSTOPSIG(tmpstat)) << 8);
737 #if defined(__aarch64__) || (defined(__amd64__) && !defined(COMPAT_LINUX32))
738 			if (WSTOPSIG(status) == SIGTRAP) {
739 				tmpstat = linux_ptrace_status(td,
740 				    siginfo.si_pid, tmpstat);
741 			}
742 #endif
743 		} else if (WIFCONTINUED(tmpstat)) {
744 			tmpstat = 0xffff;
745 		}
746 		error = copyout(&tmpstat, statusp, sizeof(int));
747 	}
748 	if (error == 0 && rup != NULL)
749 		error = linux_copyout_rusage(&wru.wru_self, rup);
750 	if (error == 0 && infop != NULL && td->td_retval[0] != 0) {
751 		sig = bsd_to_linux_signal(siginfo.si_signo);
752 		siginfo_to_lsiginfo(&siginfo, &lsi, sig);
753 		error = copyout(&lsi, infop, sizeof(lsi));
754 	}
755 
756 	return (error);
757 }
758 
759 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
760 int
linux_waitpid(struct thread * td,struct linux_waitpid_args * args)761 linux_waitpid(struct thread *td, struct linux_waitpid_args *args)
762 {
763 	struct linux_wait4_args wait4_args = {
764 		.pid = args->pid,
765 		.status = args->status,
766 		.options = args->options,
767 		.rusage = NULL,
768 	};
769 
770 	return (linux_wait4(td, &wait4_args));
771 }
772 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
773 
774 int
linux_wait4(struct thread * td,struct linux_wait4_args * args)775 linux_wait4(struct thread *td, struct linux_wait4_args *args)
776 {
777 	struct proc *p;
778 	int options, id, idtype;
779 
780 	if (args->options & ~(LINUX_WUNTRACED | LINUX_WNOHANG |
781 	    LINUX_WCONTINUED | __WCLONE | __WNOTHREAD | __WALL))
782 		return (EINVAL);
783 
784 	/* -INT_MIN is not defined. */
785 	if (args->pid == INT_MIN)
786 		return (ESRCH);
787 
788 	options = 0;
789 	linux_to_bsd_waitopts(args->options, &options);
790 
791 	/*
792 	 * For backward compatibility we implicitly add flags WEXITED
793 	 * and WTRAPPED here.
794 	 */
795 	options |= WEXITED | WTRAPPED;
796 
797 	if (args->pid == WAIT_ANY) {
798 		idtype = P_ALL;
799 		id = 0;
800 	} else if (args->pid < 0) {
801 		idtype = P_PGID;
802 		id = (id_t)-args->pid;
803 	} else if (args->pid == 0) {
804 		idtype = P_PGID;
805 		p = td->td_proc;
806 		PROC_LOCK(p);
807 		id = p->p_pgid;
808 		PROC_UNLOCK(p);
809 	} else {
810 		idtype = P_PID;
811 		id = (id_t)args->pid;
812 	}
813 
814 	return (linux_common_wait(td, idtype, id, args->status, options,
815 	    args->rusage, NULL));
816 }
817 
818 int
linux_waitid(struct thread * td,struct linux_waitid_args * args)819 linux_waitid(struct thread *td, struct linux_waitid_args *args)
820 {
821 	idtype_t idtype;
822 	int error, options;
823 	struct proc *p;
824 	pid_t id;
825 
826 	if (args->options & ~(LINUX_WNOHANG | LINUX_WNOWAIT | LINUX_WEXITED |
827 	    LINUX_WSTOPPED | LINUX_WCONTINUED | __WCLONE | __WNOTHREAD | __WALL))
828 		return (EINVAL);
829 
830 	options = 0;
831 	linux_to_bsd_waitopts(args->options, &options);
832 
833 	id = args->id;
834 	switch (args->idtype) {
835 	case LINUX_P_ALL:
836 		idtype = P_ALL;
837 		break;
838 	case LINUX_P_PID:
839 		if (args->id <= 0)
840 			return (EINVAL);
841 		idtype = P_PID;
842 		break;
843 	case LINUX_P_PGID:
844 		if (linux_kernver(td) >= LINUX_KERNVER(5,4,0) && args->id == 0) {
845 			p = td->td_proc;
846 			PROC_LOCK(p);
847 			id = p->p_pgid;
848 			PROC_UNLOCK(p);
849 		} else if (args->id <= 0)
850 			return (EINVAL);
851 		idtype = P_PGID;
852 		break;
853 	case LINUX_P_PIDFD:
854 		LINUX_RATELIMIT_MSG("unsupported waitid P_PIDFD idtype");
855 		return (ENOSYS);
856 	default:
857 		return (EINVAL);
858 	}
859 
860 	error = linux_common_wait(td, idtype, id, NULL, options,
861 	    args->rusage, args->info);
862 	td->td_retval[0] = 0;
863 
864 	return (error);
865 }
866 
867 #ifdef LINUX_LEGACY_SYSCALLS
868 int
linux_mknod(struct thread * td,struct linux_mknod_args * args)869 linux_mknod(struct thread *td, struct linux_mknod_args *args)
870 {
871 	int error;
872 
873 	switch (args->mode & S_IFMT) {
874 	case S_IFIFO:
875 	case S_IFSOCK:
876 		error = kern_mkfifoat(td, AT_FDCWD, args->path, UIO_USERSPACE,
877 		    args->mode);
878 		break;
879 
880 	case S_IFCHR:
881 	case S_IFBLK:
882 		error = kern_mknodat(td, AT_FDCWD, args->path, UIO_USERSPACE,
883 		    args->mode, linux_decode_dev(args->dev));
884 		break;
885 
886 	case S_IFDIR:
887 		error = EPERM;
888 		break;
889 
890 	case 0:
891 		args->mode |= S_IFREG;
892 		/* FALLTHROUGH */
893 	case S_IFREG:
894 		error = kern_openat(td, AT_FDCWD, args->path, UIO_USERSPACE,
895 		    O_WRONLY | O_CREAT | O_TRUNC, args->mode);
896 		if (error == 0)
897 			kern_close(td, td->td_retval[0]);
898 		break;
899 
900 	default:
901 		error = EINVAL;
902 		break;
903 	}
904 	return (error);
905 }
906 #endif
907 
908 int
linux_mknodat(struct thread * td,struct linux_mknodat_args * args)909 linux_mknodat(struct thread *td, struct linux_mknodat_args *args)
910 {
911 	int error, dfd;
912 
913 	dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd;
914 
915 	switch (args->mode & S_IFMT) {
916 	case S_IFIFO:
917 	case S_IFSOCK:
918 		error = kern_mkfifoat(td, dfd, args->filename, UIO_USERSPACE,
919 		    args->mode);
920 		break;
921 
922 	case S_IFCHR:
923 	case S_IFBLK:
924 		error = kern_mknodat(td, dfd, args->filename, UIO_USERSPACE,
925 		    args->mode, linux_decode_dev(args->dev));
926 		break;
927 
928 	case S_IFDIR:
929 		error = EPERM;
930 		break;
931 
932 	case 0:
933 		args->mode |= S_IFREG;
934 		/* FALLTHROUGH */
935 	case S_IFREG:
936 		error = kern_openat(td, dfd, args->filename, UIO_USERSPACE,
937 		    O_WRONLY | O_CREAT | O_TRUNC, args->mode);
938 		if (error == 0)
939 			kern_close(td, td->td_retval[0]);
940 		break;
941 
942 	default:
943 		error = EINVAL;
944 		break;
945 	}
946 	return (error);
947 }
948 
949 /*
950  * UGH! This is just about the dumbest idea I've ever heard!!
951  */
952 int
linux_personality(struct thread * td,struct linux_personality_args * args)953 linux_personality(struct thread *td, struct linux_personality_args *args)
954 {
955 	struct linux_pemuldata *pem;
956 	struct proc *p = td->td_proc;
957 	uint32_t old;
958 
959 	PROC_LOCK(p);
960 	pem = pem_find(p);
961 	old = pem->persona;
962 	if (args->per != 0xffffffff)
963 		pem->persona = args->per;
964 	PROC_UNLOCK(p);
965 
966 	td->td_retval[0] = old;
967 	return (0);
968 }
969 
970 struct l_itimerval {
971 	l_timeval it_interval;
972 	l_timeval it_value;
973 };
974 
975 #define	B2L_ITIMERVAL(bip, lip)						\
976 	(bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec;		\
977 	(bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec;	\
978 	(bip)->it_value.tv_sec = (lip)->it_value.tv_sec;		\
979 	(bip)->it_value.tv_usec = (lip)->it_value.tv_usec;
980 
981 int
linux_setitimer(struct thread * td,struct linux_setitimer_args * uap)982 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap)
983 {
984 	int error;
985 	struct l_itimerval ls;
986 	struct itimerval aitv, oitv;
987 
988 	if (uap->itv == NULL) {
989 		uap->itv = uap->oitv;
990 		return (linux_getitimer(td, (struct linux_getitimer_args *)uap));
991 	}
992 
993 	error = copyin(uap->itv, &ls, sizeof(ls));
994 	if (error != 0)
995 		return (error);
996 	B2L_ITIMERVAL(&aitv, &ls);
997 	error = kern_setitimer(td, uap->which, &aitv, &oitv);
998 	if (error != 0 || uap->oitv == NULL)
999 		return (error);
1000 	B2L_ITIMERVAL(&ls, &oitv);
1001 
1002 	return (copyout(&ls, uap->oitv, sizeof(ls)));
1003 }
1004 
1005 int
linux_getitimer(struct thread * td,struct linux_getitimer_args * uap)1006 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap)
1007 {
1008 	int error;
1009 	struct l_itimerval ls;
1010 	struct itimerval aitv;
1011 
1012 	error = kern_getitimer(td, uap->which, &aitv);
1013 	if (error != 0)
1014 		return (error);
1015 	B2L_ITIMERVAL(&ls, &aitv);
1016 	return (copyout(&ls, uap->itv, sizeof(ls)));
1017 }
1018 
1019 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
1020 int
linux_nice(struct thread * td,struct linux_nice_args * args)1021 linux_nice(struct thread *td, struct linux_nice_args *args)
1022 {
1023 
1024 	return (kern_setpriority(td, PRIO_PROCESS, 0, args->inc));
1025 }
1026 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
1027 
1028 int
linux_setgroups(struct thread * td,struct linux_setgroups_args * args)1029 linux_setgroups(struct thread *td, struct linux_setgroups_args *args)
1030 {
1031 	const int ngrp = args->gidsetsize;
1032 	struct ucred *newcred, *oldcred;
1033 	l_gid_t *linux_gidset;
1034 	int error;
1035 	struct proc *p;
1036 
1037 	if (ngrp < 0 || ngrp > ngroups_max)
1038 		return (EINVAL);
1039 	linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK);
1040 	error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t));
1041 	if (error)
1042 		goto out;
1043 
1044 	newcred = crget();
1045 	crextend(newcred, ngrp);
1046 	p = td->td_proc;
1047 	PROC_LOCK(p);
1048 	oldcred = crcopysafe(p, newcred);
1049 
1050 	if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS)) != 0) {
1051 		PROC_UNLOCK(p);
1052 		crfree(newcred);
1053 		goto out;
1054 	}
1055 
1056 	newcred->cr_ngroups = ngrp;
1057 	for (int i = 0; i < ngrp; i++)
1058 		newcred->cr_groups[i] = linux_gidset[i];
1059 	newcred->cr_flags |= CRED_FLAG_GROUPSET;
1060 
1061 	setsugid(p);
1062 	proc_set_cred(p, newcred);
1063 	PROC_UNLOCK(p);
1064 	crfree(oldcred);
1065 	error = 0;
1066 out:
1067 	free(linux_gidset, M_LINUX);
1068 	return (error);
1069 }
1070 
1071 int
linux_getgroups(struct thread * td,struct linux_getgroups_args * args)1072 linux_getgroups(struct thread *td, struct linux_getgroups_args *args)
1073 {
1074 	const struct ucred *const cred = td->td_ucred;
1075 	l_gid_t *linux_gidset;
1076 	int ngrp, error;
1077 
1078 	ngrp = args->gidsetsize;
1079 
1080 	if (ngrp == 0) {
1081 		td->td_retval[0] = cred->cr_ngroups;
1082 		return (0);
1083 	}
1084 	if (ngrp < cred->cr_ngroups)
1085 		return (EINVAL);
1086 
1087 	ngrp = cred->cr_ngroups;
1088 
1089 	linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK);
1090 	for (int i = 0; i < ngrp; ++i)
1091 		linux_gidset[i] = cred->cr_groups[i];
1092 
1093 	error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t));
1094 	free(linux_gidset, M_LINUX);
1095 
1096 	if (error != 0)
1097 		return (error);
1098 
1099 	td->td_retval[0] = ngrp;
1100 	return (0);
1101 }
1102 
1103 static bool
linux_get_dummy_limit(struct thread * td,l_uint resource,struct rlimit * rlim)1104 linux_get_dummy_limit(struct thread *td, l_uint resource, struct rlimit *rlim)
1105 {
1106 	ssize_t size;
1107 	int res, error;
1108 
1109 	if (linux_dummy_rlimits == 0)
1110 		return (false);
1111 
1112 	switch (resource) {
1113 	case LINUX_RLIMIT_LOCKS:
1114 	case LINUX_RLIMIT_RTTIME:
1115 		rlim->rlim_cur = LINUX_RLIM_INFINITY;
1116 		rlim->rlim_max = LINUX_RLIM_INFINITY;
1117 		return (true);
1118 	case LINUX_RLIMIT_NICE:
1119 	case LINUX_RLIMIT_RTPRIO:
1120 		rlim->rlim_cur = 0;
1121 		rlim->rlim_max = 0;
1122 		return (true);
1123 	case LINUX_RLIMIT_SIGPENDING:
1124 		error = kernel_sysctlbyname(td,
1125 		    "kern.sigqueue.max_pending_per_proc",
1126 		    &res, &size, 0, 0, 0, 0);
1127 		if (error != 0)
1128 			return (false);
1129 		rlim->rlim_cur = res;
1130 		rlim->rlim_max = res;
1131 		return (true);
1132 	case LINUX_RLIMIT_MSGQUEUE:
1133 		error = kernel_sysctlbyname(td,
1134 		    "kern.ipc.msgmnb", &res, &size, 0, 0, 0, 0);
1135 		if (error != 0)
1136 			return (false);
1137 		rlim->rlim_cur = res;
1138 		rlim->rlim_max = res;
1139 		return (true);
1140 	default:
1141 		return (false);
1142 	}
1143 }
1144 
1145 int
linux_setrlimit(struct thread * td,struct linux_setrlimit_args * args)1146 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args)
1147 {
1148 	struct rlimit bsd_rlim;
1149 	struct l_rlimit rlim;
1150 	u_int which;
1151 	int error;
1152 
1153 	if (args->resource >= LINUX_RLIM_NLIMITS)
1154 		return (EINVAL);
1155 
1156 	which = linux_to_bsd_resource[args->resource];
1157 	if (which == -1)
1158 		return (EINVAL);
1159 
1160 	error = copyin(args->rlim, &rlim, sizeof(rlim));
1161 	if (error)
1162 		return (error);
1163 
1164 	bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur;
1165 	bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max;
1166 	return (kern_setrlimit(td, which, &bsd_rlim));
1167 }
1168 
1169 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
1170 int
linux_old_getrlimit(struct thread * td,struct linux_old_getrlimit_args * args)1171 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args)
1172 {
1173 	struct l_rlimit rlim;
1174 	struct rlimit bsd_rlim;
1175 	u_int which;
1176 
1177 	if (linux_get_dummy_limit(td, args->resource, &bsd_rlim)) {
1178 		rlim.rlim_cur = bsd_rlim.rlim_cur;
1179 		rlim.rlim_max = bsd_rlim.rlim_max;
1180 		return (copyout(&rlim, args->rlim, sizeof(rlim)));
1181 	}
1182 
1183 	if (args->resource >= LINUX_RLIM_NLIMITS)
1184 		return (EINVAL);
1185 
1186 	which = linux_to_bsd_resource[args->resource];
1187 	if (which == -1)
1188 		return (EINVAL);
1189 
1190 	lim_rlimit(td, which, &bsd_rlim);
1191 
1192 #ifdef COMPAT_LINUX32
1193 	rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur;
1194 	if (rlim.rlim_cur == UINT_MAX)
1195 		rlim.rlim_cur = INT_MAX;
1196 	rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max;
1197 	if (rlim.rlim_max == UINT_MAX)
1198 		rlim.rlim_max = INT_MAX;
1199 #else
1200 	rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur;
1201 	if (rlim.rlim_cur == ULONG_MAX)
1202 		rlim.rlim_cur = LONG_MAX;
1203 	rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max;
1204 	if (rlim.rlim_max == ULONG_MAX)
1205 		rlim.rlim_max = LONG_MAX;
1206 #endif
1207 	return (copyout(&rlim, args->rlim, sizeof(rlim)));
1208 }
1209 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
1210 
1211 int
linux_getrlimit(struct thread * td,struct linux_getrlimit_args * args)1212 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args)
1213 {
1214 	struct l_rlimit rlim;
1215 	struct rlimit bsd_rlim;
1216 	u_int which;
1217 
1218 	if (linux_get_dummy_limit(td, args->resource, &bsd_rlim)) {
1219 		rlim.rlim_cur = bsd_rlim.rlim_cur;
1220 		rlim.rlim_max = bsd_rlim.rlim_max;
1221 		return (copyout(&rlim, args->rlim, sizeof(rlim)));
1222 	}
1223 
1224 	if (args->resource >= LINUX_RLIM_NLIMITS)
1225 		return (EINVAL);
1226 
1227 	which = linux_to_bsd_resource[args->resource];
1228 	if (which == -1)
1229 		return (EINVAL);
1230 
1231 	lim_rlimit(td, which, &bsd_rlim);
1232 
1233 	rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur;
1234 	rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max;
1235 	return (copyout(&rlim, args->rlim, sizeof(rlim)));
1236 }
1237 
1238 int
linux_sched_setscheduler(struct thread * td,struct linux_sched_setscheduler_args * args)1239 linux_sched_setscheduler(struct thread *td,
1240     struct linux_sched_setscheduler_args *args)
1241 {
1242 	struct sched_param sched_param;
1243 	struct thread *tdt;
1244 	int error, policy;
1245 
1246 	switch (args->policy) {
1247 	case LINUX_SCHED_OTHER:
1248 		policy = SCHED_OTHER;
1249 		break;
1250 	case LINUX_SCHED_FIFO:
1251 		policy = SCHED_FIFO;
1252 		break;
1253 	case LINUX_SCHED_RR:
1254 		policy = SCHED_RR;
1255 		break;
1256 	default:
1257 		return (EINVAL);
1258 	}
1259 
1260 	error = copyin(args->param, &sched_param, sizeof(sched_param));
1261 	if (error)
1262 		return (error);
1263 
1264 	if (linux_map_sched_prio) {
1265 		switch (policy) {
1266 		case SCHED_OTHER:
1267 			if (sched_param.sched_priority != 0)
1268 				return (EINVAL);
1269 
1270 			sched_param.sched_priority =
1271 			    PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE;
1272 			break;
1273 		case SCHED_FIFO:
1274 		case SCHED_RR:
1275 			if (sched_param.sched_priority < 1 ||
1276 			    sched_param.sched_priority >= LINUX_MAX_RT_PRIO)
1277 				return (EINVAL);
1278 
1279 			/*
1280 			 * Map [1, LINUX_MAX_RT_PRIO - 1] to
1281 			 * [0, RTP_PRIO_MAX - RTP_PRIO_MIN] (rounding down).
1282 			 */
1283 			sched_param.sched_priority =
1284 			    (sched_param.sched_priority - 1) *
1285 			    (RTP_PRIO_MAX - RTP_PRIO_MIN + 1) /
1286 			    (LINUX_MAX_RT_PRIO - 1);
1287 			break;
1288 		}
1289 	}
1290 
1291 	tdt = linux_tdfind(td, args->pid, -1);
1292 	if (tdt == NULL)
1293 		return (ESRCH);
1294 
1295 	error = kern_sched_setscheduler(td, tdt, policy, &sched_param);
1296 	PROC_UNLOCK(tdt->td_proc);
1297 	return (error);
1298 }
1299 
1300 int
linux_sched_getscheduler(struct thread * td,struct linux_sched_getscheduler_args * args)1301 linux_sched_getscheduler(struct thread *td,
1302     struct linux_sched_getscheduler_args *args)
1303 {
1304 	struct thread *tdt;
1305 	int error, policy;
1306 
1307 	tdt = linux_tdfind(td, args->pid, -1);
1308 	if (tdt == NULL)
1309 		return (ESRCH);
1310 
1311 	error = kern_sched_getscheduler(td, tdt, &policy);
1312 	PROC_UNLOCK(tdt->td_proc);
1313 
1314 	switch (policy) {
1315 	case SCHED_OTHER:
1316 		td->td_retval[0] = LINUX_SCHED_OTHER;
1317 		break;
1318 	case SCHED_FIFO:
1319 		td->td_retval[0] = LINUX_SCHED_FIFO;
1320 		break;
1321 	case SCHED_RR:
1322 		td->td_retval[0] = LINUX_SCHED_RR;
1323 		break;
1324 	}
1325 	return (error);
1326 }
1327 
1328 int
linux_sched_get_priority_max(struct thread * td,struct linux_sched_get_priority_max_args * args)1329 linux_sched_get_priority_max(struct thread *td,
1330     struct linux_sched_get_priority_max_args *args)
1331 {
1332 	struct sched_get_priority_max_args bsd;
1333 
1334 	if (linux_map_sched_prio) {
1335 		switch (args->policy) {
1336 		case LINUX_SCHED_OTHER:
1337 			td->td_retval[0] = 0;
1338 			return (0);
1339 		case LINUX_SCHED_FIFO:
1340 		case LINUX_SCHED_RR:
1341 			td->td_retval[0] = LINUX_MAX_RT_PRIO - 1;
1342 			return (0);
1343 		default:
1344 			return (EINVAL);
1345 		}
1346 	}
1347 
1348 	switch (args->policy) {
1349 	case LINUX_SCHED_OTHER:
1350 		bsd.policy = SCHED_OTHER;
1351 		break;
1352 	case LINUX_SCHED_FIFO:
1353 		bsd.policy = SCHED_FIFO;
1354 		break;
1355 	case LINUX_SCHED_RR:
1356 		bsd.policy = SCHED_RR;
1357 		break;
1358 	default:
1359 		return (EINVAL);
1360 	}
1361 	return (sys_sched_get_priority_max(td, &bsd));
1362 }
1363 
1364 int
linux_sched_get_priority_min(struct thread * td,struct linux_sched_get_priority_min_args * args)1365 linux_sched_get_priority_min(struct thread *td,
1366     struct linux_sched_get_priority_min_args *args)
1367 {
1368 	struct sched_get_priority_min_args bsd;
1369 
1370 	if (linux_map_sched_prio) {
1371 		switch (args->policy) {
1372 		case LINUX_SCHED_OTHER:
1373 			td->td_retval[0] = 0;
1374 			return (0);
1375 		case LINUX_SCHED_FIFO:
1376 		case LINUX_SCHED_RR:
1377 			td->td_retval[0] = 1;
1378 			return (0);
1379 		default:
1380 			return (EINVAL);
1381 		}
1382 	}
1383 
1384 	switch (args->policy) {
1385 	case LINUX_SCHED_OTHER:
1386 		bsd.policy = SCHED_OTHER;
1387 		break;
1388 	case LINUX_SCHED_FIFO:
1389 		bsd.policy = SCHED_FIFO;
1390 		break;
1391 	case LINUX_SCHED_RR:
1392 		bsd.policy = SCHED_RR;
1393 		break;
1394 	default:
1395 		return (EINVAL);
1396 	}
1397 	return (sys_sched_get_priority_min(td, &bsd));
1398 }
1399 
1400 #define REBOOT_CAD_ON	0x89abcdef
1401 #define REBOOT_CAD_OFF	0
1402 #define REBOOT_HALT	0xcdef0123
1403 #define REBOOT_RESTART	0x01234567
1404 #define REBOOT_RESTART2	0xA1B2C3D4
1405 #define REBOOT_POWEROFF	0x4321FEDC
1406 #define REBOOT_MAGIC1	0xfee1dead
1407 #define REBOOT_MAGIC2	0x28121969
1408 #define REBOOT_MAGIC2A	0x05121996
1409 #define REBOOT_MAGIC2B	0x16041998
1410 
1411 int
linux_reboot(struct thread * td,struct linux_reboot_args * args)1412 linux_reboot(struct thread *td, struct linux_reboot_args *args)
1413 {
1414 	struct reboot_args bsd_args;
1415 
1416 	if (args->magic1 != REBOOT_MAGIC1)
1417 		return (EINVAL);
1418 
1419 	switch (args->magic2) {
1420 	case REBOOT_MAGIC2:
1421 	case REBOOT_MAGIC2A:
1422 	case REBOOT_MAGIC2B:
1423 		break;
1424 	default:
1425 		return (EINVAL);
1426 	}
1427 
1428 	switch (args->cmd) {
1429 	case REBOOT_CAD_ON:
1430 	case REBOOT_CAD_OFF:
1431 		return (priv_check(td, PRIV_REBOOT));
1432 	case REBOOT_HALT:
1433 		bsd_args.opt = RB_HALT;
1434 		break;
1435 	case REBOOT_RESTART:
1436 	case REBOOT_RESTART2:
1437 		bsd_args.opt = 0;
1438 		break;
1439 	case REBOOT_POWEROFF:
1440 		bsd_args.opt = RB_POWEROFF;
1441 		break;
1442 	default:
1443 		return (EINVAL);
1444 	}
1445 	return (sys_reboot(td, &bsd_args));
1446 }
1447 
1448 int
linux_getpid(struct thread * td,struct linux_getpid_args * args)1449 linux_getpid(struct thread *td, struct linux_getpid_args *args)
1450 {
1451 
1452 	td->td_retval[0] = td->td_proc->p_pid;
1453 
1454 	return (0);
1455 }
1456 
1457 int
linux_gettid(struct thread * td,struct linux_gettid_args * args)1458 linux_gettid(struct thread *td, struct linux_gettid_args *args)
1459 {
1460 	struct linux_emuldata *em;
1461 
1462 	em = em_find(td);
1463 	KASSERT(em != NULL, ("gettid: emuldata not found.\n"));
1464 
1465 	td->td_retval[0] = em->em_tid;
1466 
1467 	return (0);
1468 }
1469 
1470 int
linux_getppid(struct thread * td,struct linux_getppid_args * args)1471 linux_getppid(struct thread *td, struct linux_getppid_args *args)
1472 {
1473 
1474 	td->td_retval[0] = kern_getppid(td);
1475 	return (0);
1476 }
1477 
1478 int
linux_getgid(struct thread * td,struct linux_getgid_args * args)1479 linux_getgid(struct thread *td, struct linux_getgid_args *args)
1480 {
1481 
1482 	td->td_retval[0] = td->td_ucred->cr_rgid;
1483 	return (0);
1484 }
1485 
1486 int
linux_getuid(struct thread * td,struct linux_getuid_args * args)1487 linux_getuid(struct thread *td, struct linux_getuid_args *args)
1488 {
1489 
1490 	td->td_retval[0] = td->td_ucred->cr_ruid;
1491 	return (0);
1492 }
1493 
1494 int
linux_getsid(struct thread * td,struct linux_getsid_args * args)1495 linux_getsid(struct thread *td, struct linux_getsid_args *args)
1496 {
1497 
1498 	return (kern_getsid(td, args->pid));
1499 }
1500 
1501 int
linux_getpriority(struct thread * td,struct linux_getpriority_args * args)1502 linux_getpriority(struct thread *td, struct linux_getpriority_args *args)
1503 {
1504 	int error;
1505 
1506 	error = kern_getpriority(td, args->which, args->who);
1507 	td->td_retval[0] = 20 - td->td_retval[0];
1508 	return (error);
1509 }
1510 
1511 int
linux_sethostname(struct thread * td,struct linux_sethostname_args * args)1512 linux_sethostname(struct thread *td, struct linux_sethostname_args *args)
1513 {
1514 	int name[2];
1515 
1516 	name[0] = CTL_KERN;
1517 	name[1] = KERN_HOSTNAME;
1518 	return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname,
1519 	    args->len, 0, 0));
1520 }
1521 
1522 int
linux_setdomainname(struct thread * td,struct linux_setdomainname_args * args)1523 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args)
1524 {
1525 	int name[2];
1526 
1527 	name[0] = CTL_KERN;
1528 	name[1] = KERN_NISDOMAINNAME;
1529 	return (userland_sysctl(td, name, 2, 0, 0, 0, args->name,
1530 	    args->len, 0, 0));
1531 }
1532 
1533 int
linux_exit_group(struct thread * td,struct linux_exit_group_args * args)1534 linux_exit_group(struct thread *td, struct linux_exit_group_args *args)
1535 {
1536 
1537 	LINUX_CTR2(exit_group, "thread(%d) (%d)", td->td_tid,
1538 	    args->error_code);
1539 
1540 	/*
1541 	 * XXX: we should send a signal to the parent if
1542 	 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?)
1543 	 * as it doesnt occur often.
1544 	 */
1545 	exit1(td, args->error_code, 0);
1546 		/* NOTREACHED */
1547 }
1548 
1549 #define _LINUX_CAPABILITY_VERSION_1  0x19980330
1550 #define _LINUX_CAPABILITY_VERSION_2  0x20071026
1551 #define _LINUX_CAPABILITY_VERSION_3  0x20080522
1552 
1553 struct l_user_cap_header {
1554 	l_int	version;
1555 	l_int	pid;
1556 };
1557 
1558 struct l_user_cap_data {
1559 	l_int	effective;
1560 	l_int	permitted;
1561 	l_int	inheritable;
1562 };
1563 
1564 int
linux_capget(struct thread * td,struct linux_capget_args * uap)1565 linux_capget(struct thread *td, struct linux_capget_args *uap)
1566 {
1567 	struct l_user_cap_header luch;
1568 	struct l_user_cap_data lucd[2];
1569 	int error, u32s;
1570 
1571 	if (uap->hdrp == NULL)
1572 		return (EFAULT);
1573 
1574 	error = copyin(uap->hdrp, &luch, sizeof(luch));
1575 	if (error != 0)
1576 		return (error);
1577 
1578 	switch (luch.version) {
1579 	case _LINUX_CAPABILITY_VERSION_1:
1580 		u32s = 1;
1581 		break;
1582 	case _LINUX_CAPABILITY_VERSION_2:
1583 	case _LINUX_CAPABILITY_VERSION_3:
1584 		u32s = 2;
1585 		break;
1586 	default:
1587 		luch.version = _LINUX_CAPABILITY_VERSION_1;
1588 		error = copyout(&luch, uap->hdrp, sizeof(luch));
1589 		if (error)
1590 			return (error);
1591 		return (EINVAL);
1592 	}
1593 
1594 	if (luch.pid)
1595 		return (EPERM);
1596 
1597 	if (uap->datap) {
1598 		/*
1599 		 * The current implementation doesn't support setting
1600 		 * a capability (it's essentially a stub) so indicate
1601 		 * that no capabilities are currently set or available
1602 		 * to request.
1603 		 */
1604 		memset(&lucd, 0, u32s * sizeof(lucd[0]));
1605 		error = copyout(&lucd, uap->datap, u32s * sizeof(lucd[0]));
1606 	}
1607 
1608 	return (error);
1609 }
1610 
1611 int
linux_capset(struct thread * td,struct linux_capset_args * uap)1612 linux_capset(struct thread *td, struct linux_capset_args *uap)
1613 {
1614 	struct l_user_cap_header luch;
1615 	struct l_user_cap_data lucd[2];
1616 	int error, i, u32s;
1617 
1618 	if (uap->hdrp == NULL || uap->datap == NULL)
1619 		return (EFAULT);
1620 
1621 	error = copyin(uap->hdrp, &luch, sizeof(luch));
1622 	if (error != 0)
1623 		return (error);
1624 
1625 	switch (luch.version) {
1626 	case _LINUX_CAPABILITY_VERSION_1:
1627 		u32s = 1;
1628 		break;
1629 	case _LINUX_CAPABILITY_VERSION_2:
1630 	case _LINUX_CAPABILITY_VERSION_3:
1631 		u32s = 2;
1632 		break;
1633 	default:
1634 		luch.version = _LINUX_CAPABILITY_VERSION_1;
1635 		error = copyout(&luch, uap->hdrp, sizeof(luch));
1636 		if (error)
1637 			return (error);
1638 		return (EINVAL);
1639 	}
1640 
1641 	if (luch.pid)
1642 		return (EPERM);
1643 
1644 	error = copyin(uap->datap, &lucd, u32s * sizeof(lucd[0]));
1645 	if (error != 0)
1646 		return (error);
1647 
1648 	/* We currently don't support setting any capabilities. */
1649 	for (i = 0; i < u32s; i++) {
1650 		if (lucd[i].effective || lucd[i].permitted ||
1651 		    lucd[i].inheritable) {
1652 			linux_msg(td,
1653 			    "capset[%d] effective=0x%x, permitted=0x%x, "
1654 			    "inheritable=0x%x is not implemented", i,
1655 			    (int)lucd[i].effective, (int)lucd[i].permitted,
1656 			    (int)lucd[i].inheritable);
1657 			return (EPERM);
1658 		}
1659 	}
1660 
1661 	return (0);
1662 }
1663 
1664 int
linux_prctl(struct thread * td,struct linux_prctl_args * args)1665 linux_prctl(struct thread *td, struct linux_prctl_args *args)
1666 {
1667 	int error = 0, max_size, arg;
1668 	struct proc *p = td->td_proc;
1669 	char comm[LINUX_MAX_COMM_LEN];
1670 	int pdeath_signal, trace_state;
1671 
1672 	switch (args->option) {
1673 	case LINUX_PR_SET_PDEATHSIG:
1674 		if (!LINUX_SIG_VALID(args->arg2))
1675 			return (EINVAL);
1676 		pdeath_signal = linux_to_bsd_signal(args->arg2);
1677 		return (kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_CTL,
1678 		    &pdeath_signal));
1679 	case LINUX_PR_GET_PDEATHSIG:
1680 		error = kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_STATUS,
1681 		    &pdeath_signal);
1682 		if (error != 0)
1683 			return (error);
1684 		pdeath_signal = bsd_to_linux_signal(pdeath_signal);
1685 		return (copyout(&pdeath_signal,
1686 		    (void *)(register_t)args->arg2,
1687 		    sizeof(pdeath_signal)));
1688 	/*
1689 	 * In Linux, this flag controls if set[gu]id processes can coredump.
1690 	 * There are additional semantics imposed on processes that cannot
1691 	 * coredump:
1692 	 * - Such processes can not be ptraced.
1693 	 * - There are some semantics around ownership of process-related files
1694 	 *   in the /proc namespace.
1695 	 *
1696 	 * In FreeBSD, we can (and by default, do) disable setuid coredump
1697 	 * system-wide with 'sugid_coredump.'  We control tracability on a
1698 	 * per-process basis with the procctl PROC_TRACE (=> P2_NOTRACE flag).
1699 	 * By happy coincidence, P2_NOTRACE also prevents coredumping.  So the
1700 	 * procctl is roughly analogous to Linux's DUMPABLE.
1701 	 *
1702 	 * So, proxy these knobs to the corresponding PROC_TRACE setting.
1703 	 */
1704 	case LINUX_PR_GET_DUMPABLE:
1705 		error = kern_procctl(td, P_PID, p->p_pid, PROC_TRACE_STATUS,
1706 		    &trace_state);
1707 		if (error != 0)
1708 			return (error);
1709 		td->td_retval[0] = (trace_state != -1);
1710 		return (0);
1711 	case LINUX_PR_SET_DUMPABLE:
1712 		/*
1713 		 * It is only valid for userspace to set one of these two
1714 		 * flags, and only one at a time.
1715 		 */
1716 		switch (args->arg2) {
1717 		case LINUX_SUID_DUMP_DISABLE:
1718 			trace_state = PROC_TRACE_CTL_DISABLE_EXEC;
1719 			break;
1720 		case LINUX_SUID_DUMP_USER:
1721 			trace_state = PROC_TRACE_CTL_ENABLE;
1722 			break;
1723 		default:
1724 			return (EINVAL);
1725 		}
1726 		return (kern_procctl(td, P_PID, p->p_pid, PROC_TRACE_CTL,
1727 		    &trace_state));
1728 	case LINUX_PR_GET_KEEPCAPS:
1729 		/*
1730 		 * Indicate that we always clear the effective and
1731 		 * permitted capability sets when the user id becomes
1732 		 * non-zero (actually the capability sets are simply
1733 		 * always zero in the current implementation).
1734 		 */
1735 		td->td_retval[0] = 0;
1736 		break;
1737 	case LINUX_PR_SET_KEEPCAPS:
1738 		/*
1739 		 * Ignore requests to keep the effective and permitted
1740 		 * capability sets when the user id becomes non-zero.
1741 		 */
1742 		break;
1743 	case LINUX_PR_SET_NAME:
1744 		/*
1745 		 * To be on the safe side we need to make sure to not
1746 		 * overflow the size a Linux program expects. We already
1747 		 * do this here in the copyin, so that we don't need to
1748 		 * check on copyout.
1749 		 */
1750 		max_size = MIN(sizeof(comm), sizeof(p->p_comm));
1751 		error = copyinstr((void *)(register_t)args->arg2, comm,
1752 		    max_size, NULL);
1753 
1754 		/* Linux silently truncates the name if it is too long. */
1755 		if (error == ENAMETOOLONG) {
1756 			/*
1757 			 * XXX: copyinstr() isn't documented to populate the
1758 			 * array completely, so do a copyin() to be on the
1759 			 * safe side. This should be changed in case
1760 			 * copyinstr() is changed to guarantee this.
1761 			 */
1762 			error = copyin((void *)(register_t)args->arg2, comm,
1763 			    max_size - 1);
1764 			comm[max_size - 1] = '\0';
1765 		}
1766 		if (error)
1767 			return (error);
1768 
1769 		PROC_LOCK(p);
1770 		strlcpy(p->p_comm, comm, sizeof(p->p_comm));
1771 		PROC_UNLOCK(p);
1772 		break;
1773 	case LINUX_PR_GET_NAME:
1774 		PROC_LOCK(p);
1775 		strlcpy(comm, p->p_comm, sizeof(comm));
1776 		PROC_UNLOCK(p);
1777 		error = copyout(comm, (void *)(register_t)args->arg2,
1778 		    strlen(comm) + 1);
1779 		break;
1780 	case LINUX_PR_GET_SECCOMP:
1781 	case LINUX_PR_SET_SECCOMP:
1782 		/*
1783 		 * Same as returned by Linux without CONFIG_SECCOMP enabled.
1784 		 */
1785 		error = EINVAL;
1786 		break;
1787 	case LINUX_PR_CAPBSET_READ:
1788 #if 0
1789 		/*
1790 		 * This makes too much noise with Ubuntu Focal.
1791 		 */
1792 		linux_msg(td, "unsupported prctl PR_CAPBSET_READ %d",
1793 		    (int)args->arg2);
1794 #endif
1795 		error = EINVAL;
1796 		break;
1797 	case LINUX_PR_SET_CHILD_SUBREAPER:
1798 		if (args->arg2 == 0) {
1799 			return (kern_procctl(td, P_PID, 0, PROC_REAP_RELEASE,
1800 			    NULL));
1801 		}
1802 
1803 		return (kern_procctl(td, P_PID, 0, PROC_REAP_ACQUIRE,
1804 		    NULL));
1805 	case LINUX_PR_SET_NO_NEW_PRIVS:
1806 		arg = args->arg2 == 1 ?
1807 		    PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE;
1808 		error = kern_procctl(td, P_PID, p->p_pid,
1809 		    PROC_NO_NEW_PRIVS_CTL, &arg);
1810 		break;
1811 	case LINUX_PR_SET_PTRACER:
1812 		linux_msg(td, "unsupported prctl PR_SET_PTRACER");
1813 		error = EINVAL;
1814 		break;
1815 	default:
1816 		linux_msg(td, "unsupported prctl option %d", args->option);
1817 		error = EINVAL;
1818 		break;
1819 	}
1820 
1821 	return (error);
1822 }
1823 
1824 int
linux_sched_setparam(struct thread * td,struct linux_sched_setparam_args * uap)1825 linux_sched_setparam(struct thread *td,
1826     struct linux_sched_setparam_args *uap)
1827 {
1828 	struct sched_param sched_param;
1829 	struct thread *tdt;
1830 	int error, policy;
1831 
1832 	error = copyin(uap->param, &sched_param, sizeof(sched_param));
1833 	if (error)
1834 		return (error);
1835 
1836 	tdt = linux_tdfind(td, uap->pid, -1);
1837 	if (tdt == NULL)
1838 		return (ESRCH);
1839 
1840 	if (linux_map_sched_prio) {
1841 		error = kern_sched_getscheduler(td, tdt, &policy);
1842 		if (error)
1843 			goto out;
1844 
1845 		switch (policy) {
1846 		case SCHED_OTHER:
1847 			if (sched_param.sched_priority != 0) {
1848 				error = EINVAL;
1849 				goto out;
1850 			}
1851 			sched_param.sched_priority =
1852 			    PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE;
1853 			break;
1854 		case SCHED_FIFO:
1855 		case SCHED_RR:
1856 			if (sched_param.sched_priority < 1 ||
1857 			    sched_param.sched_priority >= LINUX_MAX_RT_PRIO) {
1858 				error = EINVAL;
1859 				goto out;
1860 			}
1861 			/*
1862 			 * Map [1, LINUX_MAX_RT_PRIO - 1] to
1863 			 * [0, RTP_PRIO_MAX - RTP_PRIO_MIN] (rounding down).
1864 			 */
1865 			sched_param.sched_priority =
1866 			    (sched_param.sched_priority - 1) *
1867 			    (RTP_PRIO_MAX - RTP_PRIO_MIN + 1) /
1868 			    (LINUX_MAX_RT_PRIO - 1);
1869 			break;
1870 		}
1871 	}
1872 
1873 	error = kern_sched_setparam(td, tdt, &sched_param);
1874 out:	PROC_UNLOCK(tdt->td_proc);
1875 	return (error);
1876 }
1877 
1878 int
linux_sched_getparam(struct thread * td,struct linux_sched_getparam_args * uap)1879 linux_sched_getparam(struct thread *td,
1880     struct linux_sched_getparam_args *uap)
1881 {
1882 	struct sched_param sched_param;
1883 	struct thread *tdt;
1884 	int error, policy;
1885 
1886 	tdt = linux_tdfind(td, uap->pid, -1);
1887 	if (tdt == NULL)
1888 		return (ESRCH);
1889 
1890 	error = kern_sched_getparam(td, tdt, &sched_param);
1891 	if (error) {
1892 		PROC_UNLOCK(tdt->td_proc);
1893 		return (error);
1894 	}
1895 
1896 	if (linux_map_sched_prio) {
1897 		error = kern_sched_getscheduler(td, tdt, &policy);
1898 		PROC_UNLOCK(tdt->td_proc);
1899 		if (error)
1900 			return (error);
1901 
1902 		switch (policy) {
1903 		case SCHED_OTHER:
1904 			sched_param.sched_priority = 0;
1905 			break;
1906 		case SCHED_FIFO:
1907 		case SCHED_RR:
1908 			/*
1909 			 * Map [0, RTP_PRIO_MAX - RTP_PRIO_MIN] to
1910 			 * [1, LINUX_MAX_RT_PRIO - 1] (rounding up).
1911 			 */
1912 			sched_param.sched_priority =
1913 			    (sched_param.sched_priority *
1914 			    (LINUX_MAX_RT_PRIO - 1) +
1915 			    (RTP_PRIO_MAX - RTP_PRIO_MIN - 1)) /
1916 			    (RTP_PRIO_MAX - RTP_PRIO_MIN) + 1;
1917 			break;
1918 		}
1919 	} else
1920 		PROC_UNLOCK(tdt->td_proc);
1921 
1922 	error = copyout(&sched_param, uap->param, sizeof(sched_param));
1923 	return (error);
1924 }
1925 
1926 /*
1927  * Get affinity of a process.
1928  */
1929 int
linux_sched_getaffinity(struct thread * td,struct linux_sched_getaffinity_args * args)1930 linux_sched_getaffinity(struct thread *td,
1931     struct linux_sched_getaffinity_args *args)
1932 {
1933 	struct thread *tdt;
1934 	cpuset_t *mask;
1935 	size_t size;
1936 	int error;
1937 	id_t tid;
1938 
1939 	tdt = linux_tdfind(td, args->pid, -1);
1940 	if (tdt == NULL)
1941 		return (ESRCH);
1942 	tid = tdt->td_tid;
1943 	PROC_UNLOCK(tdt->td_proc);
1944 
1945 	mask = malloc(sizeof(cpuset_t), M_LINUX, M_WAITOK | M_ZERO);
1946 	size = min(args->len, sizeof(cpuset_t));
1947 	error = kern_cpuset_getaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID,
1948 	    tid, size, mask);
1949 	if (error == ERANGE)
1950 		error = EINVAL;
1951  	if (error == 0)
1952 		error = copyout(mask, args->user_mask_ptr, size);
1953 	if (error == 0)
1954 		td->td_retval[0] = size;
1955 	free(mask, M_LINUX);
1956 	return (error);
1957 }
1958 
1959 /*
1960  *  Set affinity of a process.
1961  */
1962 int
linux_sched_setaffinity(struct thread * td,struct linux_sched_setaffinity_args * args)1963 linux_sched_setaffinity(struct thread *td,
1964     struct linux_sched_setaffinity_args *args)
1965 {
1966 	struct thread *tdt;
1967 	cpuset_t *mask;
1968 	int cpu, error;
1969 	size_t len;
1970 	id_t tid;
1971 
1972 	tdt = linux_tdfind(td, args->pid, -1);
1973 	if (tdt == NULL)
1974 		return (ESRCH);
1975 	tid = tdt->td_tid;
1976 	PROC_UNLOCK(tdt->td_proc);
1977 
1978 	len = min(args->len, sizeof(cpuset_t));
1979 	mask = malloc(sizeof(cpuset_t), M_TEMP, M_WAITOK | M_ZERO);
1980 	error = copyin(args->user_mask_ptr, mask, len);
1981 	if (error != 0)
1982 		goto out;
1983 	/* Linux ignore high bits */
1984 	CPU_FOREACH_ISSET(cpu, mask)
1985 		if (cpu > mp_maxid)
1986 			CPU_CLR(cpu, mask);
1987 
1988 	error = kern_cpuset_setaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID,
1989 	    tid, mask);
1990 	if (error == EDEADLK)
1991 		error = EINVAL;
1992 out:
1993 	free(mask, M_TEMP);
1994 	return (error);
1995 }
1996 
1997 struct linux_rlimit64 {
1998 	uint64_t	rlim_cur;
1999 	uint64_t	rlim_max;
2000 };
2001 
2002 int
linux_prlimit64(struct thread * td,struct linux_prlimit64_args * args)2003 linux_prlimit64(struct thread *td, struct linux_prlimit64_args *args)
2004 {
2005 	struct rlimit rlim, nrlim;
2006 	struct linux_rlimit64 lrlim;
2007 	struct proc *p;
2008 	u_int which;
2009 	int flags;
2010 	int error;
2011 
2012 	if (args->new == NULL && args->old != NULL) {
2013 		if (linux_get_dummy_limit(td, args->resource, &rlim)) {
2014 			lrlim.rlim_cur = rlim.rlim_cur;
2015 			lrlim.rlim_max = rlim.rlim_max;
2016 			return (copyout(&lrlim, args->old, sizeof(lrlim)));
2017 		}
2018 	}
2019 
2020 	if (args->resource >= LINUX_RLIM_NLIMITS)
2021 		return (EINVAL);
2022 
2023 	which = linux_to_bsd_resource[args->resource];
2024 	if (which == -1)
2025 		return (EINVAL);
2026 
2027 	if (args->new != NULL) {
2028 		/*
2029 		 * Note. Unlike FreeBSD where rlim is signed 64-bit Linux
2030 		 * rlim is unsigned 64-bit. FreeBSD treats negative limits
2031 		 * as INFINITY so we do not need a conversion even.
2032 		 */
2033 		error = copyin(args->new, &nrlim, sizeof(nrlim));
2034 		if (error != 0)
2035 			return (error);
2036 	}
2037 
2038 	flags = PGET_HOLD | PGET_NOTWEXIT;
2039 	if (args->new != NULL)
2040 		flags |= PGET_CANDEBUG;
2041 	else
2042 		flags |= PGET_CANSEE;
2043 	if (args->pid == 0) {
2044 		p = td->td_proc;
2045 		PHOLD(p);
2046 	} else {
2047 		error = pget(args->pid, flags, &p);
2048 		if (error != 0)
2049 			return (error);
2050 	}
2051 	if (args->old != NULL) {
2052 		PROC_LOCK(p);
2053 		lim_rlimit_proc(p, which, &rlim);
2054 		PROC_UNLOCK(p);
2055 		if (rlim.rlim_cur == RLIM_INFINITY)
2056 			lrlim.rlim_cur = LINUX_RLIM_INFINITY;
2057 		else
2058 			lrlim.rlim_cur = rlim.rlim_cur;
2059 		if (rlim.rlim_max == RLIM_INFINITY)
2060 			lrlim.rlim_max = LINUX_RLIM_INFINITY;
2061 		else
2062 			lrlim.rlim_max = rlim.rlim_max;
2063 		error = copyout(&lrlim, args->old, sizeof(lrlim));
2064 		if (error != 0)
2065 			goto out;
2066 	}
2067 
2068 	if (args->new != NULL)
2069 		error = kern_proc_setrlimit(td, p, which, &nrlim);
2070 
2071  out:
2072 	PRELE(p);
2073 	return (error);
2074 }
2075 
2076 int
linux_pselect6(struct thread * td,struct linux_pselect6_args * args)2077 linux_pselect6(struct thread *td, struct linux_pselect6_args *args)
2078 {
2079 	struct timespec ts, *tsp;
2080 	int error;
2081 
2082 	if (args->tsp != NULL) {
2083 		error = linux_get_timespec(&ts, args->tsp);
2084 		if (error != 0)
2085 			return (error);
2086 		tsp = &ts;
2087 	} else
2088 		tsp = NULL;
2089 
2090 	error = linux_common_pselect6(td, args->nfds, args->readfds,
2091 	    args->writefds, args->exceptfds, tsp, args->sig);
2092 
2093 	if (args->tsp != NULL)
2094 		linux_put_timespec(&ts, args->tsp);
2095 	return (error);
2096 }
2097 
2098 static int
linux_common_pselect6(struct thread * td,l_int nfds,l_fd_set * readfds,l_fd_set * writefds,l_fd_set * exceptfds,struct timespec * tsp,l_uintptr_t * sig)2099 linux_common_pselect6(struct thread *td, l_int nfds, l_fd_set *readfds,
2100     l_fd_set *writefds, l_fd_set *exceptfds, struct timespec *tsp,
2101     l_uintptr_t *sig)
2102 {
2103 	struct timeval utv, tv0, tv1, *tvp;
2104 	struct l_pselect6arg lpse6;
2105 	sigset_t *ssp;
2106 	sigset_t ss;
2107 	int error;
2108 
2109 	ssp = NULL;
2110 	if (sig != NULL) {
2111 		error = copyin(sig, &lpse6, sizeof(lpse6));
2112 		if (error != 0)
2113 			return (error);
2114 		error = linux_copyin_sigset(td, PTRIN(lpse6.ss),
2115 		    lpse6.ss_len, &ss, &ssp);
2116 		if (error != 0)
2117 		    return (error);
2118 	} else
2119 		ssp = NULL;
2120 
2121 	/*
2122 	 * Currently glibc changes nanosecond number to microsecond.
2123 	 * This mean losing precision but for now it is hardly seen.
2124 	 */
2125 	if (tsp != NULL) {
2126 		TIMESPEC_TO_TIMEVAL(&utv, tsp);
2127 		if (itimerfix(&utv))
2128 			return (EINVAL);
2129 
2130 		microtime(&tv0);
2131 		tvp = &utv;
2132 	} else
2133 		tvp = NULL;
2134 
2135 	error = kern_pselect(td, nfds, readfds, writefds,
2136 	    exceptfds, tvp, ssp, LINUX_NFDBITS);
2137 
2138 	if (tsp != NULL) {
2139 		/*
2140 		 * Compute how much time was left of the timeout,
2141 		 * by subtracting the current time and the time
2142 		 * before we started the call, and subtracting
2143 		 * that result from the user-supplied value.
2144 		 */
2145 		microtime(&tv1);
2146 		timevalsub(&tv1, &tv0);
2147 		timevalsub(&utv, &tv1);
2148 		if (utv.tv_sec < 0)
2149 			timevalclear(&utv);
2150 		TIMEVAL_TO_TIMESPEC(&utv, tsp);
2151 	}
2152 	return (error);
2153 }
2154 
2155 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
2156 int
linux_pselect6_time64(struct thread * td,struct linux_pselect6_time64_args * args)2157 linux_pselect6_time64(struct thread *td,
2158     struct linux_pselect6_time64_args *args)
2159 {
2160 	struct timespec ts, *tsp;
2161 	int error;
2162 
2163 	if (args->tsp != NULL) {
2164 		error = linux_get_timespec64(&ts, args->tsp);
2165 		if (error != 0)
2166 			return (error);
2167 		tsp = &ts;
2168 	} else
2169 		tsp = NULL;
2170 
2171 	error = linux_common_pselect6(td, args->nfds, args->readfds,
2172 	    args->writefds, args->exceptfds, tsp, args->sig);
2173 
2174 	if (args->tsp != NULL)
2175 		linux_put_timespec64(&ts, args->tsp);
2176 	return (error);
2177 }
2178 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
2179 
2180 int
linux_ppoll(struct thread * td,struct linux_ppoll_args * args)2181 linux_ppoll(struct thread *td, struct linux_ppoll_args *args)
2182 {
2183 	struct timespec uts, *tsp;
2184 	int error;
2185 
2186 	if (args->tsp != NULL) {
2187 		error = linux_get_timespec(&uts, args->tsp);
2188 		if (error != 0)
2189 			return (error);
2190 		tsp = &uts;
2191 	} else
2192 		tsp = NULL;
2193 
2194 	error = linux_common_ppoll(td, args->fds, args->nfds, tsp,
2195 	    args->sset, args->ssize);
2196 	if (error == 0 && args->tsp != NULL)
2197 		error = linux_put_timespec(&uts, args->tsp);
2198 	return (error);
2199 }
2200 
2201 static int
linux_common_ppoll(struct thread * td,struct pollfd * fds,uint32_t nfds,struct timespec * tsp,l_sigset_t * sset,l_size_t ssize)2202 linux_common_ppoll(struct thread *td, struct pollfd *fds, uint32_t nfds,
2203     struct timespec *tsp, l_sigset_t *sset, l_size_t ssize)
2204 {
2205 	struct timespec ts0, ts1;
2206 	struct pollfd stackfds[32];
2207 	struct pollfd *kfds;
2208  	sigset_t *ssp;
2209  	sigset_t ss;
2210  	int error;
2211 
2212 	if (kern_poll_maxfds(nfds))
2213 		return (EINVAL);
2214 	if (sset != NULL) {
2215 		error = linux_copyin_sigset(td, sset, ssize, &ss, &ssp);
2216 		if (error != 0)
2217 		    return (error);
2218 	} else
2219 		ssp = NULL;
2220 	if (tsp != NULL)
2221 		nanotime(&ts0);
2222 
2223 	if (nfds > nitems(stackfds))
2224 		kfds = mallocarray(nfds, sizeof(*kfds), M_TEMP, M_WAITOK);
2225 	else
2226 		kfds = stackfds;
2227 	error = linux_pollin(td, kfds, fds, nfds);
2228 	if (error != 0)
2229 		goto out;
2230 
2231 	error = kern_poll_kfds(td, kfds, nfds, tsp, ssp);
2232 	if (error == 0)
2233 		error = linux_pollout(td, kfds, fds, nfds);
2234 
2235 	if (error == 0 && tsp != NULL) {
2236 		if (td->td_retval[0]) {
2237 			nanotime(&ts1);
2238 			timespecsub(&ts1, &ts0, &ts1);
2239 			timespecsub(tsp, &ts1, tsp);
2240 			if (tsp->tv_sec < 0)
2241 				timespecclear(tsp);
2242 		} else
2243 			timespecclear(tsp);
2244 	}
2245 
2246 out:
2247 	if (nfds > nitems(stackfds))
2248 		free(kfds, M_TEMP);
2249 	return (error);
2250 }
2251 
2252 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
2253 int
linux_ppoll_time64(struct thread * td,struct linux_ppoll_time64_args * args)2254 linux_ppoll_time64(struct thread *td, struct linux_ppoll_time64_args *args)
2255 {
2256 	struct timespec uts, *tsp;
2257 	int error;
2258 
2259 	if (args->tsp != NULL) {
2260 		error = linux_get_timespec64(&uts, args->tsp);
2261 		if (error != 0)
2262 			return (error);
2263 		tsp = &uts;
2264 	} else
2265  		tsp = NULL;
2266 	error = linux_common_ppoll(td, args->fds, args->nfds, tsp,
2267 	    args->sset, args->ssize);
2268 	if (error == 0 && args->tsp != NULL)
2269 		error = linux_put_timespec64(&uts, args->tsp);
2270 	return (error);
2271 }
2272 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
2273 
2274 static int
linux_pollin(struct thread * td,struct pollfd * fds,struct pollfd * ufds,u_int nfd)2275 linux_pollin(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd)
2276 {
2277 	int error;
2278 	u_int i;
2279 
2280 	error = copyin(ufds, fds, nfd * sizeof(*fds));
2281 	if (error != 0)
2282 		return (error);
2283 
2284 	for (i = 0; i < nfd; i++) {
2285 		if (fds->events != 0)
2286 			linux_to_bsd_poll_events(td, fds->fd,
2287 			    fds->events, &fds->events);
2288 		fds++;
2289 	}
2290 	return (0);
2291 }
2292 
2293 static int
linux_pollout(struct thread * td,struct pollfd * fds,struct pollfd * ufds,u_int nfd)2294 linux_pollout(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd)
2295 {
2296 	int error = 0;
2297 	u_int i, n = 0;
2298 
2299 	for (i = 0; i < nfd; i++) {
2300 		if (fds->revents != 0) {
2301 			bsd_to_linux_poll_events(fds->revents,
2302 			    &fds->revents);
2303 			n++;
2304 		}
2305 		error = copyout(&fds->revents, &ufds->revents,
2306 		    sizeof(ufds->revents));
2307 		if (error)
2308 			return (error);
2309 		fds++;
2310 		ufds++;
2311 	}
2312 	td->td_retval[0] = n;
2313 	return (0);
2314 }
2315 
2316 static int
linux_sched_rr_get_interval_common(struct thread * td,pid_t pid,struct timespec * ts)2317 linux_sched_rr_get_interval_common(struct thread *td, pid_t pid,
2318     struct timespec *ts)
2319 {
2320 	struct thread *tdt;
2321 	int error;
2322 
2323 	/*
2324 	 * According to man in case the invalid pid specified
2325 	 * EINVAL should be returned.
2326 	 */
2327 	if (pid < 0)
2328 		return (EINVAL);
2329 
2330 	tdt = linux_tdfind(td, pid, -1);
2331 	if (tdt == NULL)
2332 		return (ESRCH);
2333 
2334 	error = kern_sched_rr_get_interval_td(td, tdt, ts);
2335 	PROC_UNLOCK(tdt->td_proc);
2336 	return (error);
2337 }
2338 
2339 int
linux_sched_rr_get_interval(struct thread * td,struct linux_sched_rr_get_interval_args * uap)2340 linux_sched_rr_get_interval(struct thread *td,
2341     struct linux_sched_rr_get_interval_args *uap)
2342 {
2343 	struct timespec ts;
2344 	int error;
2345 
2346 	error = linux_sched_rr_get_interval_common(td, uap->pid, &ts);
2347 	if (error != 0)
2348 		return (error);
2349 	return (linux_put_timespec(&ts, uap->interval));
2350 }
2351 
2352 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
2353 int
linux_sched_rr_get_interval_time64(struct thread * td,struct linux_sched_rr_get_interval_time64_args * uap)2354 linux_sched_rr_get_interval_time64(struct thread *td,
2355     struct linux_sched_rr_get_interval_time64_args *uap)
2356 {
2357 	struct timespec ts;
2358 	int error;
2359 
2360 	error = linux_sched_rr_get_interval_common(td, uap->pid, &ts);
2361 	if (error != 0)
2362 		return (error);
2363 	return (linux_put_timespec64(&ts, uap->interval));
2364 }
2365 #endif
2366 
2367 /*
2368  * In case when the Linux thread is the initial thread in
2369  * the thread group thread id is equal to the process id.
2370  * Glibc depends on this magic (assert in pthread_getattr_np.c).
2371  */
2372 struct thread *
linux_tdfind(struct thread * td,lwpid_t tid,pid_t pid)2373 linux_tdfind(struct thread *td, lwpid_t tid, pid_t pid)
2374 {
2375 	struct linux_emuldata *em;
2376 	struct thread *tdt;
2377 	struct proc *p;
2378 
2379 	tdt = NULL;
2380 	if (tid == 0 || tid == td->td_tid) {
2381 		if (pid != -1 && td->td_proc->p_pid != pid)
2382 			return (NULL);
2383 		PROC_LOCK(td->td_proc);
2384 		return (td);
2385 	} else if (tid > PID_MAX)
2386 		return (tdfind(tid, pid));
2387 
2388 	/*
2389 	 * Initial thread where the tid equal to the pid.
2390 	 */
2391 	p = pfind(tid);
2392 	if (p != NULL) {
2393 		if (SV_PROC_ABI(p) != SV_ABI_LINUX ||
2394 		    (pid != -1 && tid != pid)) {
2395 			/*
2396 			 * p is not a Linuxulator process.
2397 			 */
2398 			PROC_UNLOCK(p);
2399 			return (NULL);
2400 		}
2401 		FOREACH_THREAD_IN_PROC(p, tdt) {
2402 			em = em_find(tdt);
2403 			if (tid == em->em_tid)
2404 				return (tdt);
2405 		}
2406 		PROC_UNLOCK(p);
2407 	}
2408 	return (NULL);
2409 }
2410 
2411 void
linux_to_bsd_waitopts(int options,int * bsdopts)2412 linux_to_bsd_waitopts(int options, int *bsdopts)
2413 {
2414 
2415 	if (options & LINUX_WNOHANG)
2416 		*bsdopts |= WNOHANG;
2417 	if (options & LINUX_WUNTRACED)
2418 		*bsdopts |= WUNTRACED;
2419 	if (options & LINUX_WEXITED)
2420 		*bsdopts |= WEXITED;
2421 	if (options & LINUX_WCONTINUED)
2422 		*bsdopts |= WCONTINUED;
2423 	if (options & LINUX_WNOWAIT)
2424 		*bsdopts |= WNOWAIT;
2425 
2426 	if (options & __WCLONE)
2427 		*bsdopts |= WLINUXCLONE;
2428 }
2429 
2430 int
linux_getrandom(struct thread * td,struct linux_getrandom_args * args)2431 linux_getrandom(struct thread *td, struct linux_getrandom_args *args)
2432 {
2433 	struct uio uio;
2434 	struct iovec iov;
2435 	int error;
2436 
2437 	if (args->flags & ~(LINUX_GRND_NONBLOCK|LINUX_GRND_RANDOM))
2438 		return (EINVAL);
2439 	if (args->count > INT_MAX)
2440 		args->count = INT_MAX;
2441 
2442 	iov.iov_base = args->buf;
2443 	iov.iov_len = args->count;
2444 
2445 	uio.uio_iov = &iov;
2446 	uio.uio_iovcnt = 1;
2447 	uio.uio_resid = iov.iov_len;
2448 	uio.uio_segflg = UIO_USERSPACE;
2449 	uio.uio_rw = UIO_READ;
2450 	uio.uio_td = td;
2451 
2452 	error = read_random_uio(&uio, args->flags & LINUX_GRND_NONBLOCK);
2453 	if (error == 0)
2454 		td->td_retval[0] = args->count - uio.uio_resid;
2455 	return (error);
2456 }
2457 
2458 int
linux_mincore(struct thread * td,struct linux_mincore_args * args)2459 linux_mincore(struct thread *td, struct linux_mincore_args *args)
2460 {
2461 
2462 	/* Needs to be page-aligned */
2463 	if (args->start & PAGE_MASK)
2464 		return (EINVAL);
2465 	return (kern_mincore(td, args->start, args->len, args->vec));
2466 }
2467 
2468 #define	SYSLOG_TAG	"<6>"
2469 
2470 int
linux_syslog(struct thread * td,struct linux_syslog_args * args)2471 linux_syslog(struct thread *td, struct linux_syslog_args *args)
2472 {
2473 	char buf[128], *src, *dst;
2474 	u_int seq;
2475 	int buflen, error;
2476 
2477 	if (args->type != LINUX_SYSLOG_ACTION_READ_ALL) {
2478 		linux_msg(td, "syslog unsupported type 0x%x", args->type);
2479 		return (EINVAL);
2480 	}
2481 
2482 	if (args->len < 6) {
2483 		td->td_retval[0] = 0;
2484 		return (0);
2485 	}
2486 
2487 	error = priv_check(td, PRIV_MSGBUF);
2488 	if (error)
2489 		return (error);
2490 
2491 	mtx_lock(&msgbuf_lock);
2492 	msgbuf_peekbytes(msgbufp, NULL, 0, &seq);
2493 	mtx_unlock(&msgbuf_lock);
2494 
2495 	dst = args->buf;
2496 	error = copyout(&SYSLOG_TAG, dst, sizeof(SYSLOG_TAG));
2497 	/* The -1 is to skip the trailing '\0'. */
2498 	dst += sizeof(SYSLOG_TAG) - 1;
2499 
2500 	while (error == 0) {
2501 		mtx_lock(&msgbuf_lock);
2502 		buflen = msgbuf_peekbytes(msgbufp, buf, sizeof(buf), &seq);
2503 		mtx_unlock(&msgbuf_lock);
2504 
2505 		if (buflen == 0)
2506 			break;
2507 
2508 		for (src = buf; src < buf + buflen && error == 0; src++) {
2509 			if (*src == '\0')
2510 				continue;
2511 
2512 			if (dst >= args->buf + args->len)
2513 				goto out;
2514 
2515 			error = copyout(src, dst, 1);
2516 			dst++;
2517 
2518 			if (*src == '\n' && *(src + 1) != '<' &&
2519 			    dst + sizeof(SYSLOG_TAG) < args->buf + args->len) {
2520 				error = copyout(&SYSLOG_TAG,
2521 				    dst, sizeof(SYSLOG_TAG));
2522 				dst += sizeof(SYSLOG_TAG) - 1;
2523 			}
2524 		}
2525 	}
2526 out:
2527 	td->td_retval[0] = dst - args->buf;
2528 	return (error);
2529 }
2530 
2531 int
linux_getcpu(struct thread * td,struct linux_getcpu_args * args)2532 linux_getcpu(struct thread *td, struct linux_getcpu_args *args)
2533 {
2534 	int cpu, error, node;
2535 
2536 	cpu = td->td_oncpu; /* Make sure it doesn't change during copyout(9) */
2537 	error = 0;
2538 	node = cpuid_to_pcpu[cpu]->pc_domain;
2539 
2540 	if (args->cpu != NULL)
2541 		error = copyout(&cpu, args->cpu, sizeof(l_int));
2542 	if (args->node != NULL)
2543 		error = copyout(&node, args->node, sizeof(l_int));
2544 	return (error);
2545 }
2546 
2547 #if defined(__i386__) || defined(__amd64__)
2548 int
linux_poll(struct thread * td,struct linux_poll_args * args)2549 linux_poll(struct thread *td, struct linux_poll_args *args)
2550 {
2551 	struct timespec ts, *tsp;
2552 
2553 	if (args->timeout != INFTIM) {
2554 		if (args->timeout < 0)
2555 			return (EINVAL);
2556 		ts.tv_sec = args->timeout / 1000;
2557 		ts.tv_nsec = (args->timeout % 1000) * 1000000;
2558 		tsp = &ts;
2559 	} else
2560 		tsp = NULL;
2561 
2562 	return (linux_common_ppoll(td, args->fds, args->nfds,
2563 	    tsp, NULL, 0));
2564 }
2565 #endif /* __i386__ || __amd64__ */
2566 
2567 int
linux_seccomp(struct thread * td,struct linux_seccomp_args * args)2568 linux_seccomp(struct thread *td, struct linux_seccomp_args *args)
2569 {
2570 
2571 	switch (args->op) {
2572 	case LINUX_SECCOMP_GET_ACTION_AVAIL:
2573 		return (EOPNOTSUPP);
2574 	default:
2575 		/*
2576 		 * Ignore unknown operations, just like Linux kernel built
2577 		 * without CONFIG_SECCOMP.
2578 		 */
2579 		return (EINVAL);
2580 	}
2581 }
2582 
2583 /*
2584  * Custom version of exec_copyin_args(), to copy out argument and environment
2585  * strings from the old process address space into the temporary string buffer.
2586  * Based on freebsd32_exec_copyin_args.
2587  */
2588 static int
linux_exec_copyin_args(struct image_args * args,const char * fname,l_uintptr_t * argv,l_uintptr_t * envv)2589 linux_exec_copyin_args(struct image_args *args, const char *fname,
2590     l_uintptr_t *argv, l_uintptr_t *envv)
2591 {
2592 	char *argp, *envp;
2593 	l_uintptr_t *ptr, arg;
2594 	int error;
2595 
2596 	bzero(args, sizeof(*args));
2597 	if (argv == NULL)
2598 		return (EFAULT);
2599 
2600 	/*
2601 	 * Allocate demand-paged memory for the file name, argument, and
2602 	 * environment strings.
2603 	 */
2604 	error = exec_alloc_args(args);
2605 	if (error != 0)
2606 		return (error);
2607 
2608 	/*
2609 	 * Copy the file name.
2610 	 */
2611 	error = exec_args_add_fname(args, fname, UIO_USERSPACE);
2612 	if (error != 0)
2613 		goto err_exit;
2614 
2615 	/*
2616 	 * extract arguments first
2617 	 */
2618 	ptr = argv;
2619 	for (;;) {
2620 		error = copyin(ptr++, &arg, sizeof(arg));
2621 		if (error)
2622 			goto err_exit;
2623 		if (arg == 0)
2624 			break;
2625 		argp = PTRIN(arg);
2626 		error = exec_args_add_arg(args, argp, UIO_USERSPACE);
2627 		if (error != 0)
2628 			goto err_exit;
2629 	}
2630 
2631 	/*
2632 	 * This comment is from Linux do_execveat_common:
2633 	 * When argv is empty, add an empty string ("") as argv[0] to
2634 	 * ensure confused userspace programs that start processing
2635 	 * from argv[1] won't end up walking envp.
2636 	 */
2637 	if (args->argc == 0 &&
2638 	    (error = exec_args_add_arg(args, "", UIO_SYSSPACE) != 0))
2639 		goto err_exit;
2640 
2641 	/*
2642 	 * extract environment strings
2643 	 */
2644 	if (envv) {
2645 		ptr = envv;
2646 		for (;;) {
2647 			error = copyin(ptr++, &arg, sizeof(arg));
2648 			if (error)
2649 				goto err_exit;
2650 			if (arg == 0)
2651 				break;
2652 			envp = PTRIN(arg);
2653 			error = exec_args_add_env(args, envp, UIO_USERSPACE);
2654 			if (error != 0)
2655 				goto err_exit;
2656 		}
2657 	}
2658 
2659 	return (0);
2660 
2661 err_exit:
2662 	exec_free_args(args);
2663 	return (error);
2664 }
2665 
2666 int
linux_execve(struct thread * td,struct linux_execve_args * args)2667 linux_execve(struct thread *td, struct linux_execve_args *args)
2668 {
2669 	struct image_args eargs;
2670 	int error;
2671 
2672 	LINUX_CTR(execve);
2673 
2674 	error = linux_exec_copyin_args(&eargs, args->path, args->argp,
2675 	    args->envp);
2676 	if (error == 0)
2677 		error = linux_common_execve(td, &eargs);
2678 	AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
2679 	return (error);
2680 }
2681 
2682 static void
linux_up_rtprio_if(struct thread * td1,struct rtprio * rtp)2683 linux_up_rtprio_if(struct thread *td1, struct rtprio *rtp)
2684 {
2685 	struct rtprio rtp2;
2686 
2687 	pri_to_rtp(td1, &rtp2);
2688 	if (rtp2.type <  rtp->type ||
2689 	    (rtp2.type == rtp->type &&
2690 	    rtp2.prio < rtp->prio)) {
2691 		rtp->type = rtp2.type;
2692 		rtp->prio = rtp2.prio;
2693 	}
2694 }
2695 
2696 #define	LINUX_PRIO_DIVIDER	RTP_PRIO_MAX / LINUX_IOPRIO_MAX
2697 
2698 static int
linux_rtprio2ioprio(struct rtprio * rtp)2699 linux_rtprio2ioprio(struct rtprio *rtp)
2700 {
2701 	int ioprio, prio;
2702 
2703 	switch (rtp->type) {
2704 	case RTP_PRIO_IDLE:
2705 		prio = RTP_PRIO_MIN;
2706 		ioprio = LINUX_IOPRIO_PRIO(LINUX_IOPRIO_CLASS_IDLE, prio);
2707 		break;
2708 	case RTP_PRIO_NORMAL:
2709 		prio = rtp->prio / LINUX_PRIO_DIVIDER;
2710 		ioprio = LINUX_IOPRIO_PRIO(LINUX_IOPRIO_CLASS_BE, prio);
2711 		break;
2712 	case RTP_PRIO_REALTIME:
2713 		prio = rtp->prio / LINUX_PRIO_DIVIDER;
2714 		ioprio = LINUX_IOPRIO_PRIO(LINUX_IOPRIO_CLASS_RT, prio);
2715 		break;
2716 	default:
2717 		prio = RTP_PRIO_MIN;
2718 		ioprio = LINUX_IOPRIO_PRIO(LINUX_IOPRIO_CLASS_NONE, prio);
2719 		break;
2720 	}
2721 	return (ioprio);
2722 }
2723 
2724 static int
linux_ioprio2rtprio(int ioprio,struct rtprio * rtp)2725 linux_ioprio2rtprio(int ioprio, struct rtprio *rtp)
2726 {
2727 
2728 	switch (LINUX_IOPRIO_PRIO_CLASS(ioprio)) {
2729 	case LINUX_IOPRIO_CLASS_IDLE:
2730 		rtp->prio = RTP_PRIO_MIN;
2731 		rtp->type = RTP_PRIO_IDLE;
2732 		break;
2733 	case LINUX_IOPRIO_CLASS_BE:
2734 		rtp->prio = LINUX_IOPRIO_PRIO_DATA(ioprio) * LINUX_PRIO_DIVIDER;
2735 		rtp->type = RTP_PRIO_NORMAL;
2736 		break;
2737 	case LINUX_IOPRIO_CLASS_RT:
2738 		rtp->prio = LINUX_IOPRIO_PRIO_DATA(ioprio) * LINUX_PRIO_DIVIDER;
2739 		rtp->type = RTP_PRIO_REALTIME;
2740 		break;
2741 	default:
2742 		return (EINVAL);
2743 	}
2744 	return (0);
2745 }
2746 #undef LINUX_PRIO_DIVIDER
2747 
2748 int
linux_ioprio_get(struct thread * td,struct linux_ioprio_get_args * args)2749 linux_ioprio_get(struct thread *td, struct linux_ioprio_get_args *args)
2750 {
2751 	struct thread *td1;
2752 	struct rtprio rtp;
2753 	struct pgrp *pg;
2754 	struct proc *p;
2755 	int error, found;
2756 
2757 	p = NULL;
2758 	td1 = NULL;
2759 	error = 0;
2760 	found = 0;
2761 	rtp.type = RTP_PRIO_IDLE;
2762 	rtp.prio = RTP_PRIO_MAX;
2763 	switch (args->which) {
2764 	case LINUX_IOPRIO_WHO_PROCESS:
2765 		if (args->who == 0) {
2766 			td1 = td;
2767 			p = td1->td_proc;
2768 			PROC_LOCK(p);
2769 		} else if (args->who > PID_MAX) {
2770 			td1 = linux_tdfind(td, args->who, -1);
2771 			if (td1 != NULL)
2772 				p = td1->td_proc;
2773 		} else
2774 			p = pfind(args->who);
2775 		if (p == NULL)
2776 			return (ESRCH);
2777 		if ((error = p_cansee(td, p))) {
2778 			PROC_UNLOCK(p);
2779 			break;
2780 		}
2781 		if (td1 != NULL) {
2782 			pri_to_rtp(td1, &rtp);
2783 		} else {
2784 			FOREACH_THREAD_IN_PROC(p, td1) {
2785 				linux_up_rtprio_if(td1, &rtp);
2786 			}
2787 		}
2788 		found++;
2789 		PROC_UNLOCK(p);
2790 		break;
2791 	case LINUX_IOPRIO_WHO_PGRP:
2792 		sx_slock(&proctree_lock);
2793 		if (args->who == 0) {
2794 			pg = td->td_proc->p_pgrp;
2795 			PGRP_LOCK(pg);
2796 		} else {
2797 			pg = pgfind(args->who);
2798 			if (pg == NULL) {
2799 				sx_sunlock(&proctree_lock);
2800 				error = ESRCH;
2801 				break;
2802 			}
2803 		}
2804 		sx_sunlock(&proctree_lock);
2805 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
2806 			PROC_LOCK(p);
2807 			if (p->p_state == PRS_NORMAL &&
2808 			    p_cansee(td, p) == 0) {
2809 				FOREACH_THREAD_IN_PROC(p, td1) {
2810 					linux_up_rtprio_if(td1, &rtp);
2811 					found++;
2812 				}
2813 			}
2814 			PROC_UNLOCK(p);
2815 		}
2816 		PGRP_UNLOCK(pg);
2817 		break;
2818 	case LINUX_IOPRIO_WHO_USER:
2819 		if (args->who == 0)
2820 			args->who = td->td_ucred->cr_uid;
2821 		sx_slock(&allproc_lock);
2822 		FOREACH_PROC_IN_SYSTEM(p) {
2823 			PROC_LOCK(p);
2824 			if (p->p_state == PRS_NORMAL &&
2825 			    p->p_ucred->cr_uid == args->who &&
2826 			    p_cansee(td, p) == 0) {
2827 				FOREACH_THREAD_IN_PROC(p, td1) {
2828 					linux_up_rtprio_if(td1, &rtp);
2829 					found++;
2830 				}
2831 			}
2832 			PROC_UNLOCK(p);
2833 		}
2834 		sx_sunlock(&allproc_lock);
2835 		break;
2836 	default:
2837 		error = EINVAL;
2838 		break;
2839 	}
2840 	if (error == 0) {
2841 		if (found != 0)
2842 			td->td_retval[0] = linux_rtprio2ioprio(&rtp);
2843 		else
2844 			error = ESRCH;
2845 	}
2846 	return (error);
2847 }
2848 
2849 int
linux_ioprio_set(struct thread * td,struct linux_ioprio_set_args * args)2850 linux_ioprio_set(struct thread *td, struct linux_ioprio_set_args *args)
2851 {
2852 	struct thread *td1;
2853 	struct rtprio rtp;
2854 	struct pgrp *pg;
2855 	struct proc *p;
2856 	int error;
2857 
2858 	if ((error = linux_ioprio2rtprio(args->ioprio, &rtp)) != 0)
2859 		return (error);
2860 	/* Attempts to set high priorities (REALTIME) require su privileges. */
2861 	if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME &&
2862 	    (error = priv_check(td, PRIV_SCHED_RTPRIO)) != 0)
2863 		return (error);
2864 
2865 	p = NULL;
2866 	td1 = NULL;
2867 	switch (args->which) {
2868 	case LINUX_IOPRIO_WHO_PROCESS:
2869 		if (args->who == 0) {
2870 			td1 = td;
2871 			p = td1->td_proc;
2872 			PROC_LOCK(p);
2873 		} else if (args->who > PID_MAX) {
2874 			td1 = linux_tdfind(td, args->who, -1);
2875 			if (td1 != NULL)
2876 				p = td1->td_proc;
2877 		} else
2878 			p = pfind(args->who);
2879 		if (p == NULL)
2880 			return (ESRCH);
2881 		if ((error = p_cansched(td, p))) {
2882 			PROC_UNLOCK(p);
2883 			break;
2884 		}
2885 		if (td1 != NULL) {
2886 			error = rtp_to_pri(&rtp, td1);
2887 		} else {
2888 			FOREACH_THREAD_IN_PROC(p, td1) {
2889 				if ((error = rtp_to_pri(&rtp, td1)) != 0)
2890 					break;
2891 			}
2892 		}
2893 		PROC_UNLOCK(p);
2894 		break;
2895 	case LINUX_IOPRIO_WHO_PGRP:
2896 		sx_slock(&proctree_lock);
2897 		if (args->who == 0) {
2898 			pg = td->td_proc->p_pgrp;
2899 			PGRP_LOCK(pg);
2900 		} else {
2901 			pg = pgfind(args->who);
2902 			if (pg == NULL) {
2903 				sx_sunlock(&proctree_lock);
2904 				error = ESRCH;
2905 				break;
2906 			}
2907 		}
2908 		sx_sunlock(&proctree_lock);
2909 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
2910 			PROC_LOCK(p);
2911 			if (p->p_state == PRS_NORMAL &&
2912 			    p_cansched(td, p) == 0) {
2913 				FOREACH_THREAD_IN_PROC(p, td1) {
2914 					if ((error = rtp_to_pri(&rtp, td1)) != 0)
2915 						break;
2916 				}
2917 			}
2918 			PROC_UNLOCK(p);
2919 			if (error != 0)
2920 				break;
2921 		}
2922 		PGRP_UNLOCK(pg);
2923 		break;
2924 	case LINUX_IOPRIO_WHO_USER:
2925 		if (args->who == 0)
2926 			args->who = td->td_ucred->cr_uid;
2927 		sx_slock(&allproc_lock);
2928 		FOREACH_PROC_IN_SYSTEM(p) {
2929 			PROC_LOCK(p);
2930 			if (p->p_state == PRS_NORMAL &&
2931 			    p->p_ucred->cr_uid == args->who &&
2932 			    p_cansched(td, p) == 0) {
2933 				FOREACH_THREAD_IN_PROC(p, td1) {
2934 					if ((error = rtp_to_pri(&rtp, td1)) != 0)
2935 						break;
2936 				}
2937 			}
2938 			PROC_UNLOCK(p);
2939 			if (error != 0)
2940 				break;
2941 		}
2942 		sx_sunlock(&allproc_lock);
2943 		break;
2944 	default:
2945 		error = EINVAL;
2946 		break;
2947 	}
2948 	return (error);
2949 }
2950 
2951 /* The only flag is O_NONBLOCK */
2952 #define B2L_MQ_FLAGS(bflags)	((bflags) != 0 ? LINUX_O_NONBLOCK : 0)
2953 #define L2B_MQ_FLAGS(lflags)	((lflags) != 0 ? O_NONBLOCK : 0)
2954 
2955 int
linux_mq_open(struct thread * td,struct linux_mq_open_args * args)2956 linux_mq_open(struct thread *td, struct linux_mq_open_args *args)
2957 {
2958 	struct mq_attr attr;
2959 	int error, flags;
2960 
2961 	flags = linux_common_openflags(args->oflag);
2962 	if ((flags & O_ACCMODE) == O_ACCMODE || (flags & O_EXEC) != 0)
2963 		return (EINVAL);
2964 	flags = FFLAGS(flags);
2965 	if ((flags & O_CREAT) != 0 && args->attr != NULL) {
2966 		error = copyin(args->attr, &attr, sizeof(attr));
2967 		if (error != 0)
2968 			return (error);
2969 		attr.mq_flags = L2B_MQ_FLAGS(attr.mq_flags);
2970 	}
2971 
2972 	return (kern_kmq_open(td, args->name, flags, args->mode,
2973 	    args->attr != NULL ? &attr : NULL));
2974 }
2975 
2976 int
linux_mq_unlink(struct thread * td,struct linux_mq_unlink_args * args)2977 linux_mq_unlink(struct thread *td, struct linux_mq_unlink_args *args)
2978 {
2979 	struct kmq_unlink_args bsd_args = {
2980 		.path = PTRIN(args->name)
2981 	};
2982 
2983 	return (sys_kmq_unlink(td, &bsd_args));
2984 }
2985 
2986 int
linux_mq_timedsend(struct thread * td,struct linux_mq_timedsend_args * args)2987 linux_mq_timedsend(struct thread *td, struct linux_mq_timedsend_args *args)
2988 {
2989 	struct timespec ts, *abs_timeout;
2990 	int error;
2991 
2992 	if (args->abs_timeout == NULL)
2993 		abs_timeout = NULL;
2994 	else {
2995 		error = linux_get_timespec(&ts, args->abs_timeout);
2996 		if (error != 0)
2997 			return (error);
2998 		abs_timeout = &ts;
2999 	}
3000 
3001 	return (kern_kmq_timedsend(td, args->mqd, PTRIN(args->msg_ptr),
3002 		args->msg_len, args->msg_prio, abs_timeout));
3003 }
3004 
3005 int
linux_mq_timedreceive(struct thread * td,struct linux_mq_timedreceive_args * args)3006 linux_mq_timedreceive(struct thread *td, struct linux_mq_timedreceive_args *args)
3007 {
3008 	struct timespec ts, *abs_timeout;
3009 	int error;
3010 
3011 	if (args->abs_timeout == NULL)
3012 		abs_timeout = NULL;
3013 	else {
3014 		error = linux_get_timespec(&ts, args->abs_timeout);
3015 		if (error != 0)
3016 			return (error);
3017 		abs_timeout = &ts;
3018 	}
3019 
3020 	return (kern_kmq_timedreceive(td, args->mqd, PTRIN(args->msg_ptr),
3021 		args->msg_len, args->msg_prio, abs_timeout));
3022 }
3023 
3024 int
linux_mq_notify(struct thread * td,struct linux_mq_notify_args * args)3025 linux_mq_notify(struct thread *td, struct linux_mq_notify_args *args)
3026 {
3027 	struct sigevent ev, *evp;
3028 	struct l_sigevent l_ev;
3029 	int error;
3030 
3031 	if (args->sevp == NULL)
3032 		evp = NULL;
3033 	else {
3034 		error = copyin(args->sevp, &l_ev, sizeof(l_ev));
3035 		if (error != 0)
3036 			return (error);
3037 		error = linux_convert_l_sigevent(&l_ev, &ev);
3038 		if (error != 0)
3039 			return (error);
3040 		evp = &ev;
3041 	}
3042 
3043 	return (kern_kmq_notify(td, args->mqd, evp));
3044 }
3045 
3046 int
linux_mq_getsetattr(struct thread * td,struct linux_mq_getsetattr_args * args)3047 linux_mq_getsetattr(struct thread *td, struct linux_mq_getsetattr_args *args)
3048 {
3049 	struct mq_attr attr, oattr;
3050 	int error;
3051 
3052 	if (args->attr != NULL) {
3053 		error = copyin(args->attr, &attr, sizeof(attr));
3054 		if (error != 0)
3055 			return (error);
3056 		attr.mq_flags = L2B_MQ_FLAGS(attr.mq_flags);
3057 	}
3058 
3059 	error = kern_kmq_setattr(td, args->mqd, args->attr != NULL ? &attr : NULL,
3060 	    &oattr);
3061 	if (error == 0 && args->oattr != NULL) {
3062 		oattr.mq_flags = B2L_MQ_FLAGS(oattr.mq_flags);
3063 		bzero(oattr.__reserved, sizeof(oattr.__reserved));
3064 		error = copyout(&oattr, args->oattr, sizeof(oattr));
3065 	}
3066 
3067 	return (error);
3068 }
3069 
3070 MODULE_DEPEND(linux, mqueuefs, 1, 1, 1);
3071