1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2002 Doug Rabson
5 * Copyright (c) 1994-1995 Søren Schmidt
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer
13 * in this position and unchanged.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/param.h>
33 #include <sys/fcntl.h>
34 #include <sys/jail.h>
35 #include <sys/imgact.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/msgbuf.h>
39 #include <sys/mqueue.h>
40 #include <sys/mutex.h>
41 #include <sys/poll.h>
42 #include <sys/priv.h>
43 #include <sys/proc.h>
44 #include <sys/procctl.h>
45 #include <sys/reboot.h>
46 #include <sys/random.h>
47 #include <sys/resourcevar.h>
48 #include <sys/rtprio.h>
49 #include <sys/sched.h>
50 #include <sys/smp.h>
51 #include <sys/stat.h>
52 #include <sys/syscallsubr.h>
53 #include <sys/sysctl.h>
54 #include <sys/sysent.h>
55 #include <sys/sysproto.h>
56 #include <sys/time.h>
57 #include <sys/vmmeter.h>
58 #include <sys/vnode.h>
59
60 #include <security/audit/audit.h>
61 #include <security/mac/mac_framework.h>
62
63 #include <vm/pmap.h>
64 #include <vm/vm_map.h>
65 #include <vm/swap_pager.h>
66
67 #ifdef COMPAT_LINUX32
68 #include <machine/../linux32/linux.h>
69 #include <machine/../linux32/linux32_proto.h>
70 #else
71 #include <machine/../linux/linux.h>
72 #include <machine/../linux/linux_proto.h>
73 #endif
74
75 #include <compat/linux/linux_common.h>
76 #include <compat/linux/linux_dtrace.h>
77 #include <compat/linux/linux_file.h>
78 #include <compat/linux/linux_mib.h>
79 #include <compat/linux/linux_mmap.h>
80 #include <compat/linux/linux_signal.h>
81 #include <compat/linux/linux_time.h>
82 #include <compat/linux/linux_util.h>
83 #include <compat/linux/linux_emul.h>
84 #include <compat/linux/linux_misc.h>
85
86 int stclohz; /* Statistics clock frequency */
87
88 static unsigned int linux_to_bsd_resource[LINUX_RLIM_NLIMITS] = {
89 RLIMIT_CPU, RLIMIT_FSIZE, RLIMIT_DATA, RLIMIT_STACK,
90 RLIMIT_CORE, RLIMIT_RSS, RLIMIT_NPROC, RLIMIT_NOFILE,
91 RLIMIT_MEMLOCK, RLIMIT_AS
92 };
93
94 struct l_sysinfo {
95 l_long uptime; /* Seconds since boot */
96 l_ulong loads[3]; /* 1, 5, and 15 minute load averages */
97 #define LINUX_SYSINFO_LOADS_SCALE 65536
98 l_ulong totalram; /* Total usable main memory size */
99 l_ulong freeram; /* Available memory size */
100 l_ulong sharedram; /* Amount of shared memory */
101 l_ulong bufferram; /* Memory used by buffers */
102 l_ulong totalswap; /* Total swap space size */
103 l_ulong freeswap; /* swap space still available */
104 l_ushort procs; /* Number of current processes */
105 l_ushort pads;
106 l_ulong totalhigh;
107 l_ulong freehigh;
108 l_uint mem_unit;
109 char _f[20-2*sizeof(l_long)-sizeof(l_int)]; /* padding */
110 };
111
112 struct l_pselect6arg {
113 l_uintptr_t ss;
114 l_size_t ss_len;
115 };
116
117 static int linux_utimensat_lts_to_ts(struct l_timespec *,
118 struct timespec *);
119 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
120 static int linux_utimensat_lts64_to_ts(struct l_timespec64 *,
121 struct timespec *);
122 #endif
123 static int linux_common_utimensat(struct thread *, int,
124 const char *, struct timespec *, int);
125 static int linux_common_pselect6(struct thread *, l_int,
126 l_fd_set *, l_fd_set *, l_fd_set *,
127 struct timespec *, l_uintptr_t *);
128 static int linux_common_ppoll(struct thread *, struct pollfd *,
129 uint32_t, struct timespec *, l_sigset_t *,
130 l_size_t);
131 static int linux_pollin(struct thread *, struct pollfd *,
132 struct pollfd *, u_int);
133 static int linux_pollout(struct thread *, struct pollfd *,
134 struct pollfd *, u_int);
135
136 int
linux_sysinfo(struct thread * td,struct linux_sysinfo_args * args)137 linux_sysinfo(struct thread *td, struct linux_sysinfo_args *args)
138 {
139 struct l_sysinfo sysinfo;
140 int i, j;
141 struct timespec ts;
142
143 bzero(&sysinfo, sizeof(sysinfo));
144 getnanouptime(&ts);
145 if (ts.tv_nsec != 0)
146 ts.tv_sec++;
147 sysinfo.uptime = ts.tv_sec;
148
149 /* Use the information from the mib to get our load averages */
150 for (i = 0; i < 3; i++)
151 sysinfo.loads[i] = averunnable.ldavg[i] *
152 LINUX_SYSINFO_LOADS_SCALE / averunnable.fscale;
153
154 sysinfo.totalram = physmem * PAGE_SIZE;
155 sysinfo.freeram = (u_long)vm_free_count() * PAGE_SIZE;
156
157 /*
158 * sharedram counts pages allocated to named, swap-backed objects such
159 * as shared memory segments and tmpfs files. There is no cheap way to
160 * compute this, so just leave the field unpopulated. Linux itself only
161 * started setting this field in the 3.x timeframe.
162 */
163 sysinfo.sharedram = 0;
164 sysinfo.bufferram = 0;
165
166 swap_pager_status(&i, &j);
167 sysinfo.totalswap = i * PAGE_SIZE;
168 sysinfo.freeswap = (i - j) * PAGE_SIZE;
169
170 sysinfo.procs = nprocs;
171
172 /*
173 * Platforms supported by the emulation layer do not have a notion of
174 * high memory.
175 */
176 sysinfo.totalhigh = 0;
177 sysinfo.freehigh = 0;
178
179 sysinfo.mem_unit = 1;
180
181 return (copyout(&sysinfo, args->info, sizeof(sysinfo)));
182 }
183
184 #ifdef LINUX_LEGACY_SYSCALLS
185 int
linux_alarm(struct thread * td,struct linux_alarm_args * args)186 linux_alarm(struct thread *td, struct linux_alarm_args *args)
187 {
188 struct itimerval it, old_it;
189 u_int secs;
190 int error __diagused;
191
192 secs = args->secs;
193 /*
194 * Linux alarm() is always successful. Limit secs to INT32_MAX / 2
195 * to match kern_setitimer()'s limit to avoid error from it.
196 *
197 * XXX. Linux limit secs to INT_MAX on 32 and does not limit on 64-bit
198 * platforms.
199 */
200 if (secs > INT32_MAX / 2)
201 secs = INT32_MAX / 2;
202
203 it.it_value.tv_sec = secs;
204 it.it_value.tv_usec = 0;
205 timevalclear(&it.it_interval);
206 error = kern_setitimer(td, ITIMER_REAL, &it, &old_it);
207 KASSERT(error == 0, ("kern_setitimer returns %d", error));
208
209 if ((old_it.it_value.tv_sec == 0 && old_it.it_value.tv_usec > 0) ||
210 old_it.it_value.tv_usec >= 500000)
211 old_it.it_value.tv_sec++;
212 td->td_retval[0] = old_it.it_value.tv_sec;
213 return (0);
214 }
215 #endif
216
217 int
linux_brk(struct thread * td,struct linux_brk_args * args)218 linux_brk(struct thread *td, struct linux_brk_args *args)
219 {
220 struct vmspace *vm = td->td_proc->p_vmspace;
221 uintptr_t new, old;
222
223 old = (uintptr_t)vm->vm_daddr + ctob(vm->vm_dsize);
224 new = (uintptr_t)args->dsend;
225 if ((caddr_t)new > vm->vm_daddr && !kern_break(td, &new))
226 td->td_retval[0] = (register_t)new;
227 else
228 td->td_retval[0] = (register_t)old;
229
230 return (0);
231 }
232
233 #ifdef LINUX_LEGACY_SYSCALLS
234 int
linux_select(struct thread * td,struct linux_select_args * args)235 linux_select(struct thread *td, struct linux_select_args *args)
236 {
237 l_timeval ltv;
238 struct timeval tv0, tv1, utv, *tvp;
239 int error;
240
241 /*
242 * Store current time for computation of the amount of
243 * time left.
244 */
245 if (args->timeout) {
246 if ((error = copyin(args->timeout, <v, sizeof(ltv))))
247 goto select_out;
248 utv.tv_sec = ltv.tv_sec;
249 utv.tv_usec = ltv.tv_usec;
250
251 if (itimerfix(&utv)) {
252 /*
253 * The timeval was invalid. Convert it to something
254 * valid that will act as it does under Linux.
255 */
256 utv.tv_sec += utv.tv_usec / 1000000;
257 utv.tv_usec %= 1000000;
258 if (utv.tv_usec < 0) {
259 utv.tv_sec -= 1;
260 utv.tv_usec += 1000000;
261 }
262 if (utv.tv_sec < 0)
263 timevalclear(&utv);
264 }
265 microtime(&tv0);
266 tvp = &utv;
267 } else
268 tvp = NULL;
269
270 error = kern_select(td, args->nfds, args->readfds, args->writefds,
271 args->exceptfds, tvp, LINUX_NFDBITS);
272 if (error)
273 goto select_out;
274
275 if (args->timeout) {
276 if (td->td_retval[0]) {
277 /*
278 * Compute how much time was left of the timeout,
279 * by subtracting the current time and the time
280 * before we started the call, and subtracting
281 * that result from the user-supplied value.
282 */
283 microtime(&tv1);
284 timevalsub(&tv1, &tv0);
285 timevalsub(&utv, &tv1);
286 if (utv.tv_sec < 0)
287 timevalclear(&utv);
288 } else
289 timevalclear(&utv);
290 ltv.tv_sec = utv.tv_sec;
291 ltv.tv_usec = utv.tv_usec;
292 if ((error = copyout(<v, args->timeout, sizeof(ltv))))
293 goto select_out;
294 }
295
296 select_out:
297 return (error);
298 }
299 #endif
300
301 int
linux_mremap(struct thread * td,struct linux_mremap_args * args)302 linux_mremap(struct thread *td, struct linux_mremap_args *args)
303 {
304 uintptr_t addr;
305 size_t len;
306 int error = 0;
307
308 if (args->flags & ~(LINUX_MREMAP_FIXED | LINUX_MREMAP_MAYMOVE)) {
309 td->td_retval[0] = 0;
310 return (EINVAL);
311 }
312
313 /*
314 * Check for the page alignment.
315 * Linux defines PAGE_MASK to be FreeBSD ~PAGE_MASK.
316 */
317 if (args->addr & PAGE_MASK) {
318 td->td_retval[0] = 0;
319 return (EINVAL);
320 }
321
322 args->new_len = round_page(args->new_len);
323 args->old_len = round_page(args->old_len);
324
325 if (args->new_len > args->old_len) {
326 td->td_retval[0] = 0;
327 return (ENOMEM);
328 }
329
330 if (args->new_len < args->old_len) {
331 addr = args->addr + args->new_len;
332 len = args->old_len - args->new_len;
333 error = kern_munmap(td, addr, len);
334 }
335
336 td->td_retval[0] = error ? 0 : (uintptr_t)args->addr;
337 return (error);
338 }
339
340 #define LINUX_MS_ASYNC 0x0001
341 #define LINUX_MS_INVALIDATE 0x0002
342 #define LINUX_MS_SYNC 0x0004
343
344 int
linux_msync(struct thread * td,struct linux_msync_args * args)345 linux_msync(struct thread *td, struct linux_msync_args *args)
346 {
347
348 return (kern_msync(td, args->addr, args->len,
349 args->fl & ~LINUX_MS_SYNC));
350 }
351
352 int
linux_mprotect(struct thread * td,struct linux_mprotect_args * uap)353 linux_mprotect(struct thread *td, struct linux_mprotect_args *uap)
354 {
355
356 return (linux_mprotect_common(td, PTROUT(uap->addr), uap->len,
357 uap->prot));
358 }
359
360 int
linux_madvise(struct thread * td,struct linux_madvise_args * uap)361 linux_madvise(struct thread *td, struct linux_madvise_args *uap)
362 {
363
364 return (linux_madvise_common(td, PTROUT(uap->addr), uap->len,
365 uap->behav));
366 }
367
368 int
linux_mmap2(struct thread * td,struct linux_mmap2_args * uap)369 linux_mmap2(struct thread *td, struct linux_mmap2_args *uap)
370 {
371 #if defined(LINUX_ARCHWANT_MMAP2PGOFF)
372 /*
373 * For architectures with sizeof (off_t) < sizeof (loff_t) mmap is
374 * implemented with mmap2 syscall and the offset is represented in
375 * multiples of page size.
376 */
377 return (linux_mmap_common(td, PTROUT(uap->addr), uap->len, uap->prot,
378 uap->flags, uap->fd, (uint64_t)(uint32_t)uap->pgoff * PAGE_SIZE));
379 #else
380 return (linux_mmap_common(td, PTROUT(uap->addr), uap->len, uap->prot,
381 uap->flags, uap->fd, uap->pgoff));
382 #endif
383 }
384
385 #ifdef LINUX_LEGACY_SYSCALLS
386 int
linux_time(struct thread * td,struct linux_time_args * args)387 linux_time(struct thread *td, struct linux_time_args *args)
388 {
389 struct timeval tv;
390 l_time_t tm;
391 int error;
392
393 microtime(&tv);
394 tm = tv.tv_sec;
395 if (args->tm && (error = copyout(&tm, args->tm, sizeof(tm))))
396 return (error);
397 td->td_retval[0] = tm;
398 return (0);
399 }
400 #endif
401
402 struct l_times_argv {
403 l_clock_t tms_utime;
404 l_clock_t tms_stime;
405 l_clock_t tms_cutime;
406 l_clock_t tms_cstime;
407 };
408
409 /*
410 * Glibc versions prior to 2.2.1 always use hard-coded CLK_TCK value.
411 * Since 2.2.1 Glibc uses value exported from kernel via AT_CLKTCK
412 * auxiliary vector entry.
413 */
414 #define CLK_TCK 100
415
416 #define CONVOTCK(r) (r.tv_sec * CLK_TCK + r.tv_usec / (1000000 / CLK_TCK))
417 #define CONVNTCK(r) (r.tv_sec * stclohz + r.tv_usec / (1000000 / stclohz))
418
419 #define CONVTCK(r) (linux_kernver(td) >= LINUX_KERNVER(2,4,0) ? \
420 CONVNTCK(r) : CONVOTCK(r))
421
422 int
linux_times(struct thread * td,struct linux_times_args * args)423 linux_times(struct thread *td, struct linux_times_args *args)
424 {
425 struct timeval tv, utime, stime, cutime, cstime;
426 struct l_times_argv tms;
427 struct proc *p;
428 int error;
429
430 if (args->buf != NULL) {
431 p = td->td_proc;
432 PROC_LOCK(p);
433 PROC_STATLOCK(p);
434 calcru(p, &utime, &stime);
435 PROC_STATUNLOCK(p);
436 calccru(p, &cutime, &cstime);
437 PROC_UNLOCK(p);
438
439 tms.tms_utime = CONVTCK(utime);
440 tms.tms_stime = CONVTCK(stime);
441
442 tms.tms_cutime = CONVTCK(cutime);
443 tms.tms_cstime = CONVTCK(cstime);
444
445 if ((error = copyout(&tms, args->buf, sizeof(tms))))
446 return (error);
447 }
448
449 microuptime(&tv);
450 td->td_retval[0] = (int)CONVTCK(tv);
451 return (0);
452 }
453
454 int
linux_newuname(struct thread * td,struct linux_newuname_args * args)455 linux_newuname(struct thread *td, struct linux_newuname_args *args)
456 {
457 struct l_new_utsname utsname;
458 char osname[LINUX_MAX_UTSNAME];
459 char osrelease[LINUX_MAX_UTSNAME];
460 char *p;
461
462 linux_get_osname(td, osname);
463 linux_get_osrelease(td, osrelease);
464
465 bzero(&utsname, sizeof(utsname));
466 strlcpy(utsname.sysname, osname, LINUX_MAX_UTSNAME);
467 getcredhostname(td->td_ucred, utsname.nodename, LINUX_MAX_UTSNAME);
468 getcreddomainname(td->td_ucred, utsname.domainname, LINUX_MAX_UTSNAME);
469 strlcpy(utsname.release, osrelease, LINUX_MAX_UTSNAME);
470 strlcpy(utsname.version, version, LINUX_MAX_UTSNAME);
471 for (p = utsname.version; *p != '\0'; ++p)
472 if (*p == '\n') {
473 *p = '\0';
474 break;
475 }
476 #if defined(__amd64__)
477 /*
478 * On amd64, Linux uname(2) needs to return "x86_64"
479 * for both 64-bit and 32-bit applications. On 32-bit,
480 * the string returned by getauxval(AT_PLATFORM) needs
481 * to remain "i686", though.
482 */
483 #if defined(COMPAT_LINUX32)
484 if (linux32_emulate_i386)
485 strlcpy(utsname.machine, "i686", LINUX_MAX_UTSNAME);
486 else
487 #endif
488 strlcpy(utsname.machine, "x86_64", LINUX_MAX_UTSNAME);
489 #elif defined(__aarch64__)
490 strlcpy(utsname.machine, "aarch64", LINUX_MAX_UTSNAME);
491 #elif defined(__i386__)
492 strlcpy(utsname.machine, "i686", LINUX_MAX_UTSNAME);
493 #endif
494
495 return (copyout(&utsname, args->buf, sizeof(utsname)));
496 }
497
498 struct l_utimbuf {
499 l_time_t l_actime;
500 l_time_t l_modtime;
501 };
502
503 #ifdef LINUX_LEGACY_SYSCALLS
504 int
linux_utime(struct thread * td,struct linux_utime_args * args)505 linux_utime(struct thread *td, struct linux_utime_args *args)
506 {
507 struct timeval tv[2], *tvp;
508 struct l_utimbuf lut;
509 int error;
510
511 if (args->times) {
512 if ((error = copyin(args->times, &lut, sizeof lut)) != 0)
513 return (error);
514 tv[0].tv_sec = lut.l_actime;
515 tv[0].tv_usec = 0;
516 tv[1].tv_sec = lut.l_modtime;
517 tv[1].tv_usec = 0;
518 tvp = tv;
519 } else
520 tvp = NULL;
521
522 return (kern_utimesat(td, AT_FDCWD, args->fname, UIO_USERSPACE,
523 tvp, UIO_SYSSPACE));
524 }
525 #endif
526
527 #ifdef LINUX_LEGACY_SYSCALLS
528 int
linux_utimes(struct thread * td,struct linux_utimes_args * args)529 linux_utimes(struct thread *td, struct linux_utimes_args *args)
530 {
531 l_timeval ltv[2];
532 struct timeval tv[2], *tvp = NULL;
533 int error;
534
535 if (args->tptr != NULL) {
536 if ((error = copyin(args->tptr, ltv, sizeof ltv)) != 0)
537 return (error);
538 tv[0].tv_sec = ltv[0].tv_sec;
539 tv[0].tv_usec = ltv[0].tv_usec;
540 tv[1].tv_sec = ltv[1].tv_sec;
541 tv[1].tv_usec = ltv[1].tv_usec;
542 tvp = tv;
543 }
544
545 return (kern_utimesat(td, AT_FDCWD, args->fname, UIO_USERSPACE,
546 tvp, UIO_SYSSPACE));
547 }
548 #endif
549
550 static int
linux_utimensat_lts_to_ts(struct l_timespec * l_times,struct timespec * times)551 linux_utimensat_lts_to_ts(struct l_timespec *l_times, struct timespec *times)
552 {
553
554 if (l_times->tv_nsec != LINUX_UTIME_OMIT &&
555 l_times->tv_nsec != LINUX_UTIME_NOW &&
556 (l_times->tv_nsec < 0 || l_times->tv_nsec > 999999999))
557 return (EINVAL);
558
559 times->tv_sec = l_times->tv_sec;
560 switch (l_times->tv_nsec)
561 {
562 case LINUX_UTIME_OMIT:
563 times->tv_nsec = UTIME_OMIT;
564 break;
565 case LINUX_UTIME_NOW:
566 times->tv_nsec = UTIME_NOW;
567 break;
568 default:
569 times->tv_nsec = l_times->tv_nsec;
570 }
571
572 return (0);
573 }
574
575 static int
linux_common_utimensat(struct thread * td,int ldfd,const char * pathname,struct timespec * timesp,int lflags)576 linux_common_utimensat(struct thread *td, int ldfd, const char *pathname,
577 struct timespec *timesp, int lflags)
578 {
579 int dfd, flags = 0;
580
581 dfd = (ldfd == LINUX_AT_FDCWD) ? AT_FDCWD : ldfd;
582
583 if (lflags & ~(LINUX_AT_SYMLINK_NOFOLLOW | LINUX_AT_EMPTY_PATH))
584 return (EINVAL);
585
586 if (timesp != NULL) {
587 /* This breaks POSIX, but is what the Linux kernel does
588 * _on purpose_ (documented in the man page for utimensat(2)),
589 * so we must follow that behaviour. */
590 if (timesp[0].tv_nsec == UTIME_OMIT &&
591 timesp[1].tv_nsec == UTIME_OMIT)
592 return (0);
593 }
594
595 if (lflags & LINUX_AT_SYMLINK_NOFOLLOW)
596 flags |= AT_SYMLINK_NOFOLLOW;
597 if (lflags & LINUX_AT_EMPTY_PATH)
598 flags |= AT_EMPTY_PATH;
599
600 if (pathname != NULL)
601 return (kern_utimensat(td, dfd, pathname,
602 UIO_USERSPACE, timesp, UIO_SYSSPACE, flags));
603
604 if (lflags != 0)
605 return (EINVAL);
606
607 return (kern_futimens(td, dfd, timesp, UIO_SYSSPACE));
608 }
609
610 int
linux_utimensat(struct thread * td,struct linux_utimensat_args * args)611 linux_utimensat(struct thread *td, struct linux_utimensat_args *args)
612 {
613 struct l_timespec l_times[2];
614 struct timespec times[2], *timesp;
615 int error;
616
617 if (args->times != NULL) {
618 error = copyin(args->times, l_times, sizeof(l_times));
619 if (error != 0)
620 return (error);
621
622 error = linux_utimensat_lts_to_ts(&l_times[0], ×[0]);
623 if (error != 0)
624 return (error);
625 error = linux_utimensat_lts_to_ts(&l_times[1], ×[1]);
626 if (error != 0)
627 return (error);
628 timesp = times;
629 } else
630 timesp = NULL;
631
632 return (linux_common_utimensat(td, args->dfd, args->pathname,
633 timesp, args->flags));
634 }
635
636 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
637 static int
linux_utimensat_lts64_to_ts(struct l_timespec64 * l_times,struct timespec * times)638 linux_utimensat_lts64_to_ts(struct l_timespec64 *l_times, struct timespec *times)
639 {
640
641 /* Zero out the padding in compat mode. */
642 l_times->tv_nsec &= 0xFFFFFFFFUL;
643
644 if (l_times->tv_nsec != LINUX_UTIME_OMIT &&
645 l_times->tv_nsec != LINUX_UTIME_NOW &&
646 (l_times->tv_nsec < 0 || l_times->tv_nsec > 999999999))
647 return (EINVAL);
648
649 times->tv_sec = l_times->tv_sec;
650 switch (l_times->tv_nsec)
651 {
652 case LINUX_UTIME_OMIT:
653 times->tv_nsec = UTIME_OMIT;
654 break;
655 case LINUX_UTIME_NOW:
656 times->tv_nsec = UTIME_NOW;
657 break;
658 default:
659 times->tv_nsec = l_times->tv_nsec;
660 }
661
662 return (0);
663 }
664
665 int
linux_utimensat_time64(struct thread * td,struct linux_utimensat_time64_args * args)666 linux_utimensat_time64(struct thread *td, struct linux_utimensat_time64_args *args)
667 {
668 struct l_timespec64 l_times[2];
669 struct timespec times[2], *timesp;
670 int error;
671
672 if (args->times64 != NULL) {
673 error = copyin(args->times64, l_times, sizeof(l_times));
674 if (error != 0)
675 return (error);
676
677 error = linux_utimensat_lts64_to_ts(&l_times[0], ×[0]);
678 if (error != 0)
679 return (error);
680 error = linux_utimensat_lts64_to_ts(&l_times[1], ×[1]);
681 if (error != 0)
682 return (error);
683 timesp = times;
684 } else
685 timesp = NULL;
686
687 return (linux_common_utimensat(td, args->dfd, args->pathname,
688 timesp, args->flags));
689 }
690 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
691
692 #ifdef LINUX_LEGACY_SYSCALLS
693 int
linux_futimesat(struct thread * td,struct linux_futimesat_args * args)694 linux_futimesat(struct thread *td, struct linux_futimesat_args *args)
695 {
696 l_timeval ltv[2];
697 struct timeval tv[2], *tvp = NULL;
698 int error, dfd;
699
700 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd;
701
702 if (args->utimes != NULL) {
703 if ((error = copyin(args->utimes, ltv, sizeof ltv)) != 0)
704 return (error);
705 tv[0].tv_sec = ltv[0].tv_sec;
706 tv[0].tv_usec = ltv[0].tv_usec;
707 tv[1].tv_sec = ltv[1].tv_sec;
708 tv[1].tv_usec = ltv[1].tv_usec;
709 tvp = tv;
710 }
711
712 return (kern_utimesat(td, dfd, args->filename, UIO_USERSPACE,
713 tvp, UIO_SYSSPACE));
714 }
715 #endif
716
717 static int
linux_common_wait(struct thread * td,idtype_t idtype,int id,int * statusp,int options,void * rup,l_siginfo_t * infop)718 linux_common_wait(struct thread *td, idtype_t idtype, int id, int *statusp,
719 int options, void *rup, l_siginfo_t *infop)
720 {
721 l_siginfo_t lsi;
722 siginfo_t siginfo;
723 struct __wrusage wru;
724 int error, status, tmpstat, sig;
725
726 error = kern_wait6(td, idtype, id, &status, options,
727 rup != NULL ? &wru : NULL, &siginfo);
728
729 if (error == 0 && statusp) {
730 tmpstat = status & 0xffff;
731 if (WIFSIGNALED(tmpstat)) {
732 tmpstat = (tmpstat & 0xffffff80) |
733 bsd_to_linux_signal(WTERMSIG(tmpstat));
734 } else if (WIFSTOPPED(tmpstat)) {
735 tmpstat = (tmpstat & 0xffff00ff) |
736 (bsd_to_linux_signal(WSTOPSIG(tmpstat)) << 8);
737 #if defined(__aarch64__) || (defined(__amd64__) && !defined(COMPAT_LINUX32))
738 if (WSTOPSIG(status) == SIGTRAP) {
739 tmpstat = linux_ptrace_status(td,
740 siginfo.si_pid, tmpstat);
741 }
742 #endif
743 } else if (WIFCONTINUED(tmpstat)) {
744 tmpstat = 0xffff;
745 }
746 error = copyout(&tmpstat, statusp, sizeof(int));
747 }
748 if (error == 0 && rup != NULL)
749 error = linux_copyout_rusage(&wru.wru_self, rup);
750 if (error == 0 && infop != NULL && td->td_retval[0] != 0) {
751 sig = bsd_to_linux_signal(siginfo.si_signo);
752 siginfo_to_lsiginfo(&siginfo, &lsi, sig);
753 error = copyout(&lsi, infop, sizeof(lsi));
754 }
755
756 return (error);
757 }
758
759 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
760 int
linux_waitpid(struct thread * td,struct linux_waitpid_args * args)761 linux_waitpid(struct thread *td, struct linux_waitpid_args *args)
762 {
763 struct linux_wait4_args wait4_args = {
764 .pid = args->pid,
765 .status = args->status,
766 .options = args->options,
767 .rusage = NULL,
768 };
769
770 return (linux_wait4(td, &wait4_args));
771 }
772 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
773
774 int
linux_wait4(struct thread * td,struct linux_wait4_args * args)775 linux_wait4(struct thread *td, struct linux_wait4_args *args)
776 {
777 struct proc *p;
778 int options, id, idtype;
779
780 if (args->options & ~(LINUX_WUNTRACED | LINUX_WNOHANG |
781 LINUX_WCONTINUED | __WCLONE | __WNOTHREAD | __WALL))
782 return (EINVAL);
783
784 /* -INT_MIN is not defined. */
785 if (args->pid == INT_MIN)
786 return (ESRCH);
787
788 options = 0;
789 linux_to_bsd_waitopts(args->options, &options);
790
791 /*
792 * For backward compatibility we implicitly add flags WEXITED
793 * and WTRAPPED here.
794 */
795 options |= WEXITED | WTRAPPED;
796
797 if (args->pid == WAIT_ANY) {
798 idtype = P_ALL;
799 id = 0;
800 } else if (args->pid < 0) {
801 idtype = P_PGID;
802 id = (id_t)-args->pid;
803 } else if (args->pid == 0) {
804 idtype = P_PGID;
805 p = td->td_proc;
806 PROC_LOCK(p);
807 id = p->p_pgid;
808 PROC_UNLOCK(p);
809 } else {
810 idtype = P_PID;
811 id = (id_t)args->pid;
812 }
813
814 return (linux_common_wait(td, idtype, id, args->status, options,
815 args->rusage, NULL));
816 }
817
818 int
linux_waitid(struct thread * td,struct linux_waitid_args * args)819 linux_waitid(struct thread *td, struct linux_waitid_args *args)
820 {
821 idtype_t idtype;
822 int error, options;
823 struct proc *p;
824 pid_t id;
825
826 if (args->options & ~(LINUX_WNOHANG | LINUX_WNOWAIT | LINUX_WEXITED |
827 LINUX_WSTOPPED | LINUX_WCONTINUED | __WCLONE | __WNOTHREAD | __WALL))
828 return (EINVAL);
829
830 options = 0;
831 linux_to_bsd_waitopts(args->options, &options);
832
833 id = args->id;
834 switch (args->idtype) {
835 case LINUX_P_ALL:
836 idtype = P_ALL;
837 break;
838 case LINUX_P_PID:
839 if (args->id <= 0)
840 return (EINVAL);
841 idtype = P_PID;
842 break;
843 case LINUX_P_PGID:
844 if (linux_kernver(td) >= LINUX_KERNVER(5,4,0) && args->id == 0) {
845 p = td->td_proc;
846 PROC_LOCK(p);
847 id = p->p_pgid;
848 PROC_UNLOCK(p);
849 } else if (args->id <= 0)
850 return (EINVAL);
851 idtype = P_PGID;
852 break;
853 case LINUX_P_PIDFD:
854 LINUX_RATELIMIT_MSG("unsupported waitid P_PIDFD idtype");
855 return (ENOSYS);
856 default:
857 return (EINVAL);
858 }
859
860 error = linux_common_wait(td, idtype, id, NULL, options,
861 args->rusage, args->info);
862 td->td_retval[0] = 0;
863
864 return (error);
865 }
866
867 #ifdef LINUX_LEGACY_SYSCALLS
868 int
linux_mknod(struct thread * td,struct linux_mknod_args * args)869 linux_mknod(struct thread *td, struct linux_mknod_args *args)
870 {
871 int error;
872
873 switch (args->mode & S_IFMT) {
874 case S_IFIFO:
875 case S_IFSOCK:
876 error = kern_mkfifoat(td, AT_FDCWD, args->path, UIO_USERSPACE,
877 args->mode);
878 break;
879
880 case S_IFCHR:
881 case S_IFBLK:
882 error = kern_mknodat(td, AT_FDCWD, args->path, UIO_USERSPACE,
883 args->mode, linux_decode_dev(args->dev));
884 break;
885
886 case S_IFDIR:
887 error = EPERM;
888 break;
889
890 case 0:
891 args->mode |= S_IFREG;
892 /* FALLTHROUGH */
893 case S_IFREG:
894 error = kern_openat(td, AT_FDCWD, args->path, UIO_USERSPACE,
895 O_WRONLY | O_CREAT | O_TRUNC, args->mode);
896 if (error == 0)
897 kern_close(td, td->td_retval[0]);
898 break;
899
900 default:
901 error = EINVAL;
902 break;
903 }
904 return (error);
905 }
906 #endif
907
908 int
linux_mknodat(struct thread * td,struct linux_mknodat_args * args)909 linux_mknodat(struct thread *td, struct linux_mknodat_args *args)
910 {
911 int error, dfd;
912
913 dfd = (args->dfd == LINUX_AT_FDCWD) ? AT_FDCWD : args->dfd;
914
915 switch (args->mode & S_IFMT) {
916 case S_IFIFO:
917 case S_IFSOCK:
918 error = kern_mkfifoat(td, dfd, args->filename, UIO_USERSPACE,
919 args->mode);
920 break;
921
922 case S_IFCHR:
923 case S_IFBLK:
924 error = kern_mknodat(td, dfd, args->filename, UIO_USERSPACE,
925 args->mode, linux_decode_dev(args->dev));
926 break;
927
928 case S_IFDIR:
929 error = EPERM;
930 break;
931
932 case 0:
933 args->mode |= S_IFREG;
934 /* FALLTHROUGH */
935 case S_IFREG:
936 error = kern_openat(td, dfd, args->filename, UIO_USERSPACE,
937 O_WRONLY | O_CREAT | O_TRUNC, args->mode);
938 if (error == 0)
939 kern_close(td, td->td_retval[0]);
940 break;
941
942 default:
943 error = EINVAL;
944 break;
945 }
946 return (error);
947 }
948
949 /*
950 * UGH! This is just about the dumbest idea I've ever heard!!
951 */
952 int
linux_personality(struct thread * td,struct linux_personality_args * args)953 linux_personality(struct thread *td, struct linux_personality_args *args)
954 {
955 struct linux_pemuldata *pem;
956 struct proc *p = td->td_proc;
957 uint32_t old;
958
959 PROC_LOCK(p);
960 pem = pem_find(p);
961 old = pem->persona;
962 if (args->per != 0xffffffff)
963 pem->persona = args->per;
964 PROC_UNLOCK(p);
965
966 td->td_retval[0] = old;
967 return (0);
968 }
969
970 struct l_itimerval {
971 l_timeval it_interval;
972 l_timeval it_value;
973 };
974
975 #define B2L_ITIMERVAL(bip, lip) \
976 (bip)->it_interval.tv_sec = (lip)->it_interval.tv_sec; \
977 (bip)->it_interval.tv_usec = (lip)->it_interval.tv_usec; \
978 (bip)->it_value.tv_sec = (lip)->it_value.tv_sec; \
979 (bip)->it_value.tv_usec = (lip)->it_value.tv_usec;
980
981 int
linux_setitimer(struct thread * td,struct linux_setitimer_args * uap)982 linux_setitimer(struct thread *td, struct linux_setitimer_args *uap)
983 {
984 int error;
985 struct l_itimerval ls;
986 struct itimerval aitv, oitv;
987
988 if (uap->itv == NULL) {
989 uap->itv = uap->oitv;
990 return (linux_getitimer(td, (struct linux_getitimer_args *)uap));
991 }
992
993 error = copyin(uap->itv, &ls, sizeof(ls));
994 if (error != 0)
995 return (error);
996 B2L_ITIMERVAL(&aitv, &ls);
997 error = kern_setitimer(td, uap->which, &aitv, &oitv);
998 if (error != 0 || uap->oitv == NULL)
999 return (error);
1000 B2L_ITIMERVAL(&ls, &oitv);
1001
1002 return (copyout(&ls, uap->oitv, sizeof(ls)));
1003 }
1004
1005 int
linux_getitimer(struct thread * td,struct linux_getitimer_args * uap)1006 linux_getitimer(struct thread *td, struct linux_getitimer_args *uap)
1007 {
1008 int error;
1009 struct l_itimerval ls;
1010 struct itimerval aitv;
1011
1012 error = kern_getitimer(td, uap->which, &aitv);
1013 if (error != 0)
1014 return (error);
1015 B2L_ITIMERVAL(&ls, &aitv);
1016 return (copyout(&ls, uap->itv, sizeof(ls)));
1017 }
1018
1019 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
1020 int
linux_nice(struct thread * td,struct linux_nice_args * args)1021 linux_nice(struct thread *td, struct linux_nice_args *args)
1022 {
1023
1024 return (kern_setpriority(td, PRIO_PROCESS, 0, args->inc));
1025 }
1026 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
1027
1028 int
linux_setgroups(struct thread * td,struct linux_setgroups_args * args)1029 linux_setgroups(struct thread *td, struct linux_setgroups_args *args)
1030 {
1031 struct ucred *newcred, *oldcred;
1032 l_gid_t *linux_gidset;
1033 int ngrp, error;
1034 struct proc *p;
1035
1036 ngrp = args->gidsetsize;
1037 if (ngrp < 0 || ngrp >= ngroups_max)
1038 return (EINVAL);
1039 linux_gidset = malloc(ngrp * sizeof(*linux_gidset), M_LINUX, M_WAITOK);
1040 error = copyin(args->grouplist, linux_gidset, ngrp * sizeof(l_gid_t));
1041 if (error)
1042 goto out;
1043 newcred = crget();
1044 crextend(newcred, ngrp);
1045 p = td->td_proc;
1046 PROC_LOCK(p);
1047 oldcred = p->p_ucred;
1048 crcopy(newcred, oldcred);
1049
1050 if ((error = priv_check_cred(oldcred, PRIV_CRED_SETGROUPS)) != 0) {
1051 PROC_UNLOCK(p);
1052 crfree(newcred);
1053 goto out;
1054 }
1055
1056 newcred->cr_ngroups = ngrp;
1057 for (int i = 0; i < ngrp; i++)
1058 newcred->cr_groups[i] = linux_gidset[i];
1059 newcred->cr_flags |= CRED_FLAG_GROUPSET;
1060
1061 setsugid(p);
1062 proc_set_cred(p, newcred);
1063 PROC_UNLOCK(p);
1064 crfree(oldcred);
1065 error = 0;
1066 out:
1067 free(linux_gidset, M_LINUX);
1068 return (error);
1069 }
1070
1071 int
linux_getgroups(struct thread * td,struct linux_getgroups_args * args)1072 linux_getgroups(struct thread *td, struct linux_getgroups_args *args)
1073 {
1074 struct ucred *cred;
1075 l_gid_t *linux_gidset;
1076 gid_t *bsd_gidset;
1077 int bsd_gidsetsz, ngrp, error;
1078
1079 cred = td->td_ucred;
1080 bsd_gidset = cred->cr_groups;
1081 bsd_gidsetsz = cred->cr_ngroups;
1082
1083 if ((ngrp = args->gidsetsize) == 0) {
1084 td->td_retval[0] = bsd_gidsetsz;
1085 return (0);
1086 }
1087
1088 if (ngrp < bsd_gidsetsz)
1089 return (EINVAL);
1090
1091 ngrp = 0;
1092 linux_gidset = malloc(bsd_gidsetsz * sizeof(*linux_gidset),
1093 M_LINUX, M_WAITOK);
1094 while (ngrp < bsd_gidsetsz) {
1095 linux_gidset[ngrp] = bsd_gidset[ngrp];
1096 ngrp++;
1097 }
1098
1099 error = copyout(linux_gidset, args->grouplist, ngrp * sizeof(l_gid_t));
1100 free(linux_gidset, M_LINUX);
1101 if (error)
1102 return (error);
1103
1104 td->td_retval[0] = ngrp;
1105 return (0);
1106 }
1107
1108 static bool
linux_get_dummy_limit(struct thread * td,l_uint resource,struct rlimit * rlim)1109 linux_get_dummy_limit(struct thread *td, l_uint resource, struct rlimit *rlim)
1110 {
1111 ssize_t size;
1112 int res, error;
1113
1114 if (linux_dummy_rlimits == 0)
1115 return (false);
1116
1117 switch (resource) {
1118 case LINUX_RLIMIT_LOCKS:
1119 case LINUX_RLIMIT_RTTIME:
1120 rlim->rlim_cur = LINUX_RLIM_INFINITY;
1121 rlim->rlim_max = LINUX_RLIM_INFINITY;
1122 return (true);
1123 case LINUX_RLIMIT_NICE:
1124 case LINUX_RLIMIT_RTPRIO:
1125 rlim->rlim_cur = 0;
1126 rlim->rlim_max = 0;
1127 return (true);
1128 case LINUX_RLIMIT_SIGPENDING:
1129 error = kernel_sysctlbyname(td,
1130 "kern.sigqueue.max_pending_per_proc",
1131 &res, &size, 0, 0, 0, 0);
1132 if (error != 0)
1133 return (false);
1134 rlim->rlim_cur = res;
1135 rlim->rlim_max = res;
1136 return (true);
1137 case LINUX_RLIMIT_MSGQUEUE:
1138 error = kernel_sysctlbyname(td,
1139 "kern.ipc.msgmnb", &res, &size, 0, 0, 0, 0);
1140 if (error != 0)
1141 return (false);
1142 rlim->rlim_cur = res;
1143 rlim->rlim_max = res;
1144 return (true);
1145 default:
1146 return (false);
1147 }
1148 }
1149
1150 int
linux_setrlimit(struct thread * td,struct linux_setrlimit_args * args)1151 linux_setrlimit(struct thread *td, struct linux_setrlimit_args *args)
1152 {
1153 struct rlimit bsd_rlim;
1154 struct l_rlimit rlim;
1155 u_int which;
1156 int error;
1157
1158 if (args->resource >= LINUX_RLIM_NLIMITS)
1159 return (EINVAL);
1160
1161 which = linux_to_bsd_resource[args->resource];
1162 if (which == -1)
1163 return (EINVAL);
1164
1165 error = copyin(args->rlim, &rlim, sizeof(rlim));
1166 if (error)
1167 return (error);
1168
1169 bsd_rlim.rlim_cur = (rlim_t)rlim.rlim_cur;
1170 bsd_rlim.rlim_max = (rlim_t)rlim.rlim_max;
1171 return (kern_setrlimit(td, which, &bsd_rlim));
1172 }
1173
1174 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
1175 int
linux_old_getrlimit(struct thread * td,struct linux_old_getrlimit_args * args)1176 linux_old_getrlimit(struct thread *td, struct linux_old_getrlimit_args *args)
1177 {
1178 struct l_rlimit rlim;
1179 struct rlimit bsd_rlim;
1180 u_int which;
1181
1182 if (linux_get_dummy_limit(td, args->resource, &bsd_rlim)) {
1183 rlim.rlim_cur = bsd_rlim.rlim_cur;
1184 rlim.rlim_max = bsd_rlim.rlim_max;
1185 return (copyout(&rlim, args->rlim, sizeof(rlim)));
1186 }
1187
1188 if (args->resource >= LINUX_RLIM_NLIMITS)
1189 return (EINVAL);
1190
1191 which = linux_to_bsd_resource[args->resource];
1192 if (which == -1)
1193 return (EINVAL);
1194
1195 lim_rlimit(td, which, &bsd_rlim);
1196
1197 #ifdef COMPAT_LINUX32
1198 rlim.rlim_cur = (unsigned int)bsd_rlim.rlim_cur;
1199 if (rlim.rlim_cur == UINT_MAX)
1200 rlim.rlim_cur = INT_MAX;
1201 rlim.rlim_max = (unsigned int)bsd_rlim.rlim_max;
1202 if (rlim.rlim_max == UINT_MAX)
1203 rlim.rlim_max = INT_MAX;
1204 #else
1205 rlim.rlim_cur = (unsigned long)bsd_rlim.rlim_cur;
1206 if (rlim.rlim_cur == ULONG_MAX)
1207 rlim.rlim_cur = LONG_MAX;
1208 rlim.rlim_max = (unsigned long)bsd_rlim.rlim_max;
1209 if (rlim.rlim_max == ULONG_MAX)
1210 rlim.rlim_max = LONG_MAX;
1211 #endif
1212 return (copyout(&rlim, args->rlim, sizeof(rlim)));
1213 }
1214 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
1215
1216 int
linux_getrlimit(struct thread * td,struct linux_getrlimit_args * args)1217 linux_getrlimit(struct thread *td, struct linux_getrlimit_args *args)
1218 {
1219 struct l_rlimit rlim;
1220 struct rlimit bsd_rlim;
1221 u_int which;
1222
1223 if (linux_get_dummy_limit(td, args->resource, &bsd_rlim)) {
1224 rlim.rlim_cur = bsd_rlim.rlim_cur;
1225 rlim.rlim_max = bsd_rlim.rlim_max;
1226 return (copyout(&rlim, args->rlim, sizeof(rlim)));
1227 }
1228
1229 if (args->resource >= LINUX_RLIM_NLIMITS)
1230 return (EINVAL);
1231
1232 which = linux_to_bsd_resource[args->resource];
1233 if (which == -1)
1234 return (EINVAL);
1235
1236 lim_rlimit(td, which, &bsd_rlim);
1237
1238 rlim.rlim_cur = (l_ulong)bsd_rlim.rlim_cur;
1239 rlim.rlim_max = (l_ulong)bsd_rlim.rlim_max;
1240 return (copyout(&rlim, args->rlim, sizeof(rlim)));
1241 }
1242
1243 int
linux_sched_setscheduler(struct thread * td,struct linux_sched_setscheduler_args * args)1244 linux_sched_setscheduler(struct thread *td,
1245 struct linux_sched_setscheduler_args *args)
1246 {
1247 struct sched_param sched_param;
1248 struct thread *tdt;
1249 int error, policy;
1250
1251 switch (args->policy) {
1252 case LINUX_SCHED_OTHER:
1253 policy = SCHED_OTHER;
1254 break;
1255 case LINUX_SCHED_FIFO:
1256 policy = SCHED_FIFO;
1257 break;
1258 case LINUX_SCHED_RR:
1259 policy = SCHED_RR;
1260 break;
1261 default:
1262 return (EINVAL);
1263 }
1264
1265 error = copyin(args->param, &sched_param, sizeof(sched_param));
1266 if (error)
1267 return (error);
1268
1269 if (linux_map_sched_prio) {
1270 switch (policy) {
1271 case SCHED_OTHER:
1272 if (sched_param.sched_priority != 0)
1273 return (EINVAL);
1274
1275 sched_param.sched_priority =
1276 PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE;
1277 break;
1278 case SCHED_FIFO:
1279 case SCHED_RR:
1280 if (sched_param.sched_priority < 1 ||
1281 sched_param.sched_priority >= LINUX_MAX_RT_PRIO)
1282 return (EINVAL);
1283
1284 /*
1285 * Map [1, LINUX_MAX_RT_PRIO - 1] to
1286 * [0, RTP_PRIO_MAX - RTP_PRIO_MIN] (rounding down).
1287 */
1288 sched_param.sched_priority =
1289 (sched_param.sched_priority - 1) *
1290 (RTP_PRIO_MAX - RTP_PRIO_MIN + 1) /
1291 (LINUX_MAX_RT_PRIO - 1);
1292 break;
1293 }
1294 }
1295
1296 tdt = linux_tdfind(td, args->pid, -1);
1297 if (tdt == NULL)
1298 return (ESRCH);
1299
1300 error = kern_sched_setscheduler(td, tdt, policy, &sched_param);
1301 PROC_UNLOCK(tdt->td_proc);
1302 return (error);
1303 }
1304
1305 int
linux_sched_getscheduler(struct thread * td,struct linux_sched_getscheduler_args * args)1306 linux_sched_getscheduler(struct thread *td,
1307 struct linux_sched_getscheduler_args *args)
1308 {
1309 struct thread *tdt;
1310 int error, policy;
1311
1312 tdt = linux_tdfind(td, args->pid, -1);
1313 if (tdt == NULL)
1314 return (ESRCH);
1315
1316 error = kern_sched_getscheduler(td, tdt, &policy);
1317 PROC_UNLOCK(tdt->td_proc);
1318
1319 switch (policy) {
1320 case SCHED_OTHER:
1321 td->td_retval[0] = LINUX_SCHED_OTHER;
1322 break;
1323 case SCHED_FIFO:
1324 td->td_retval[0] = LINUX_SCHED_FIFO;
1325 break;
1326 case SCHED_RR:
1327 td->td_retval[0] = LINUX_SCHED_RR;
1328 break;
1329 }
1330 return (error);
1331 }
1332
1333 int
linux_sched_get_priority_max(struct thread * td,struct linux_sched_get_priority_max_args * args)1334 linux_sched_get_priority_max(struct thread *td,
1335 struct linux_sched_get_priority_max_args *args)
1336 {
1337 struct sched_get_priority_max_args bsd;
1338
1339 if (linux_map_sched_prio) {
1340 switch (args->policy) {
1341 case LINUX_SCHED_OTHER:
1342 td->td_retval[0] = 0;
1343 return (0);
1344 case LINUX_SCHED_FIFO:
1345 case LINUX_SCHED_RR:
1346 td->td_retval[0] = LINUX_MAX_RT_PRIO - 1;
1347 return (0);
1348 default:
1349 return (EINVAL);
1350 }
1351 }
1352
1353 switch (args->policy) {
1354 case LINUX_SCHED_OTHER:
1355 bsd.policy = SCHED_OTHER;
1356 break;
1357 case LINUX_SCHED_FIFO:
1358 bsd.policy = SCHED_FIFO;
1359 break;
1360 case LINUX_SCHED_RR:
1361 bsd.policy = SCHED_RR;
1362 break;
1363 default:
1364 return (EINVAL);
1365 }
1366 return (sys_sched_get_priority_max(td, &bsd));
1367 }
1368
1369 int
linux_sched_get_priority_min(struct thread * td,struct linux_sched_get_priority_min_args * args)1370 linux_sched_get_priority_min(struct thread *td,
1371 struct linux_sched_get_priority_min_args *args)
1372 {
1373 struct sched_get_priority_min_args bsd;
1374
1375 if (linux_map_sched_prio) {
1376 switch (args->policy) {
1377 case LINUX_SCHED_OTHER:
1378 td->td_retval[0] = 0;
1379 return (0);
1380 case LINUX_SCHED_FIFO:
1381 case LINUX_SCHED_RR:
1382 td->td_retval[0] = 1;
1383 return (0);
1384 default:
1385 return (EINVAL);
1386 }
1387 }
1388
1389 switch (args->policy) {
1390 case LINUX_SCHED_OTHER:
1391 bsd.policy = SCHED_OTHER;
1392 break;
1393 case LINUX_SCHED_FIFO:
1394 bsd.policy = SCHED_FIFO;
1395 break;
1396 case LINUX_SCHED_RR:
1397 bsd.policy = SCHED_RR;
1398 break;
1399 default:
1400 return (EINVAL);
1401 }
1402 return (sys_sched_get_priority_min(td, &bsd));
1403 }
1404
1405 #define REBOOT_CAD_ON 0x89abcdef
1406 #define REBOOT_CAD_OFF 0
1407 #define REBOOT_HALT 0xcdef0123
1408 #define REBOOT_RESTART 0x01234567
1409 #define REBOOT_RESTART2 0xA1B2C3D4
1410 #define REBOOT_POWEROFF 0x4321FEDC
1411 #define REBOOT_MAGIC1 0xfee1dead
1412 #define REBOOT_MAGIC2 0x28121969
1413 #define REBOOT_MAGIC2A 0x05121996
1414 #define REBOOT_MAGIC2B 0x16041998
1415
1416 int
linux_reboot(struct thread * td,struct linux_reboot_args * args)1417 linux_reboot(struct thread *td, struct linux_reboot_args *args)
1418 {
1419 struct reboot_args bsd_args;
1420
1421 if (args->magic1 != REBOOT_MAGIC1)
1422 return (EINVAL);
1423
1424 switch (args->magic2) {
1425 case REBOOT_MAGIC2:
1426 case REBOOT_MAGIC2A:
1427 case REBOOT_MAGIC2B:
1428 break;
1429 default:
1430 return (EINVAL);
1431 }
1432
1433 switch (args->cmd) {
1434 case REBOOT_CAD_ON:
1435 case REBOOT_CAD_OFF:
1436 return (priv_check(td, PRIV_REBOOT));
1437 case REBOOT_HALT:
1438 bsd_args.opt = RB_HALT;
1439 break;
1440 case REBOOT_RESTART:
1441 case REBOOT_RESTART2:
1442 bsd_args.opt = 0;
1443 break;
1444 case REBOOT_POWEROFF:
1445 bsd_args.opt = RB_POWEROFF;
1446 break;
1447 default:
1448 return (EINVAL);
1449 }
1450 return (sys_reboot(td, &bsd_args));
1451 }
1452
1453 int
linux_getpid(struct thread * td,struct linux_getpid_args * args)1454 linux_getpid(struct thread *td, struct linux_getpid_args *args)
1455 {
1456
1457 td->td_retval[0] = td->td_proc->p_pid;
1458
1459 return (0);
1460 }
1461
1462 int
linux_gettid(struct thread * td,struct linux_gettid_args * args)1463 linux_gettid(struct thread *td, struct linux_gettid_args *args)
1464 {
1465 struct linux_emuldata *em;
1466
1467 em = em_find(td);
1468 KASSERT(em != NULL, ("gettid: emuldata not found.\n"));
1469
1470 td->td_retval[0] = em->em_tid;
1471
1472 return (0);
1473 }
1474
1475 int
linux_getppid(struct thread * td,struct linux_getppid_args * args)1476 linux_getppid(struct thread *td, struct linux_getppid_args *args)
1477 {
1478
1479 td->td_retval[0] = kern_getppid(td);
1480 return (0);
1481 }
1482
1483 int
linux_getgid(struct thread * td,struct linux_getgid_args * args)1484 linux_getgid(struct thread *td, struct linux_getgid_args *args)
1485 {
1486
1487 td->td_retval[0] = td->td_ucred->cr_rgid;
1488 return (0);
1489 }
1490
1491 int
linux_getuid(struct thread * td,struct linux_getuid_args * args)1492 linux_getuid(struct thread *td, struct linux_getuid_args *args)
1493 {
1494
1495 td->td_retval[0] = td->td_ucred->cr_ruid;
1496 return (0);
1497 }
1498
1499 int
linux_getsid(struct thread * td,struct linux_getsid_args * args)1500 linux_getsid(struct thread *td, struct linux_getsid_args *args)
1501 {
1502
1503 return (kern_getsid(td, args->pid));
1504 }
1505
1506 int
linux_getpriority(struct thread * td,struct linux_getpriority_args * args)1507 linux_getpriority(struct thread *td, struct linux_getpriority_args *args)
1508 {
1509 int error;
1510
1511 error = kern_getpriority(td, args->which, args->who);
1512 td->td_retval[0] = 20 - td->td_retval[0];
1513 return (error);
1514 }
1515
1516 int
linux_sethostname(struct thread * td,struct linux_sethostname_args * args)1517 linux_sethostname(struct thread *td, struct linux_sethostname_args *args)
1518 {
1519 int name[2];
1520
1521 name[0] = CTL_KERN;
1522 name[1] = KERN_HOSTNAME;
1523 return (userland_sysctl(td, name, 2, 0, 0, 0, args->hostname,
1524 args->len, 0, 0));
1525 }
1526
1527 int
linux_setdomainname(struct thread * td,struct linux_setdomainname_args * args)1528 linux_setdomainname(struct thread *td, struct linux_setdomainname_args *args)
1529 {
1530 int name[2];
1531
1532 name[0] = CTL_KERN;
1533 name[1] = KERN_NISDOMAINNAME;
1534 return (userland_sysctl(td, name, 2, 0, 0, 0, args->name,
1535 args->len, 0, 0));
1536 }
1537
1538 int
linux_exit_group(struct thread * td,struct linux_exit_group_args * args)1539 linux_exit_group(struct thread *td, struct linux_exit_group_args *args)
1540 {
1541
1542 LINUX_CTR2(exit_group, "thread(%d) (%d)", td->td_tid,
1543 args->error_code);
1544
1545 /*
1546 * XXX: we should send a signal to the parent if
1547 * SIGNAL_EXIT_GROUP is set. We ignore that (temporarily?)
1548 * as it doesnt occur often.
1549 */
1550 exit1(td, args->error_code, 0);
1551 /* NOTREACHED */
1552 }
1553
1554 #define _LINUX_CAPABILITY_VERSION_1 0x19980330
1555 #define _LINUX_CAPABILITY_VERSION_2 0x20071026
1556 #define _LINUX_CAPABILITY_VERSION_3 0x20080522
1557
1558 struct l_user_cap_header {
1559 l_int version;
1560 l_int pid;
1561 };
1562
1563 struct l_user_cap_data {
1564 l_int effective;
1565 l_int permitted;
1566 l_int inheritable;
1567 };
1568
1569 int
linux_capget(struct thread * td,struct linux_capget_args * uap)1570 linux_capget(struct thread *td, struct linux_capget_args *uap)
1571 {
1572 struct l_user_cap_header luch;
1573 struct l_user_cap_data lucd[2];
1574 int error, u32s;
1575
1576 if (uap->hdrp == NULL)
1577 return (EFAULT);
1578
1579 error = copyin(uap->hdrp, &luch, sizeof(luch));
1580 if (error != 0)
1581 return (error);
1582
1583 switch (luch.version) {
1584 case _LINUX_CAPABILITY_VERSION_1:
1585 u32s = 1;
1586 break;
1587 case _LINUX_CAPABILITY_VERSION_2:
1588 case _LINUX_CAPABILITY_VERSION_3:
1589 u32s = 2;
1590 break;
1591 default:
1592 luch.version = _LINUX_CAPABILITY_VERSION_1;
1593 error = copyout(&luch, uap->hdrp, sizeof(luch));
1594 if (error)
1595 return (error);
1596 return (EINVAL);
1597 }
1598
1599 if (luch.pid)
1600 return (EPERM);
1601
1602 if (uap->datap) {
1603 /*
1604 * The current implementation doesn't support setting
1605 * a capability (it's essentially a stub) so indicate
1606 * that no capabilities are currently set or available
1607 * to request.
1608 */
1609 memset(&lucd, 0, u32s * sizeof(lucd[0]));
1610 error = copyout(&lucd, uap->datap, u32s * sizeof(lucd[0]));
1611 }
1612
1613 return (error);
1614 }
1615
1616 int
linux_capset(struct thread * td,struct linux_capset_args * uap)1617 linux_capset(struct thread *td, struct linux_capset_args *uap)
1618 {
1619 struct l_user_cap_header luch;
1620 struct l_user_cap_data lucd[2];
1621 int error, i, u32s;
1622
1623 if (uap->hdrp == NULL || uap->datap == NULL)
1624 return (EFAULT);
1625
1626 error = copyin(uap->hdrp, &luch, sizeof(luch));
1627 if (error != 0)
1628 return (error);
1629
1630 switch (luch.version) {
1631 case _LINUX_CAPABILITY_VERSION_1:
1632 u32s = 1;
1633 break;
1634 case _LINUX_CAPABILITY_VERSION_2:
1635 case _LINUX_CAPABILITY_VERSION_3:
1636 u32s = 2;
1637 break;
1638 default:
1639 luch.version = _LINUX_CAPABILITY_VERSION_1;
1640 error = copyout(&luch, uap->hdrp, sizeof(luch));
1641 if (error)
1642 return (error);
1643 return (EINVAL);
1644 }
1645
1646 if (luch.pid)
1647 return (EPERM);
1648
1649 error = copyin(uap->datap, &lucd, u32s * sizeof(lucd[0]));
1650 if (error != 0)
1651 return (error);
1652
1653 /* We currently don't support setting any capabilities. */
1654 for (i = 0; i < u32s; i++) {
1655 if (lucd[i].effective || lucd[i].permitted ||
1656 lucd[i].inheritable) {
1657 linux_msg(td,
1658 "capset[%d] effective=0x%x, permitted=0x%x, "
1659 "inheritable=0x%x is not implemented", i,
1660 (int)lucd[i].effective, (int)lucd[i].permitted,
1661 (int)lucd[i].inheritable);
1662 return (EPERM);
1663 }
1664 }
1665
1666 return (0);
1667 }
1668
1669 int
linux_prctl(struct thread * td,struct linux_prctl_args * args)1670 linux_prctl(struct thread *td, struct linux_prctl_args *args)
1671 {
1672 int error = 0, max_size, arg;
1673 struct proc *p = td->td_proc;
1674 char comm[LINUX_MAX_COMM_LEN];
1675 int pdeath_signal, trace_state;
1676
1677 switch (args->option) {
1678 case LINUX_PR_SET_PDEATHSIG:
1679 if (!LINUX_SIG_VALID(args->arg2))
1680 return (EINVAL);
1681 pdeath_signal = linux_to_bsd_signal(args->arg2);
1682 return (kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_CTL,
1683 &pdeath_signal));
1684 case LINUX_PR_GET_PDEATHSIG:
1685 error = kern_procctl(td, P_PID, 0, PROC_PDEATHSIG_STATUS,
1686 &pdeath_signal);
1687 if (error != 0)
1688 return (error);
1689 pdeath_signal = bsd_to_linux_signal(pdeath_signal);
1690 return (copyout(&pdeath_signal,
1691 (void *)(register_t)args->arg2,
1692 sizeof(pdeath_signal)));
1693 /*
1694 * In Linux, this flag controls if set[gu]id processes can coredump.
1695 * There are additional semantics imposed on processes that cannot
1696 * coredump:
1697 * - Such processes can not be ptraced.
1698 * - There are some semantics around ownership of process-related files
1699 * in the /proc namespace.
1700 *
1701 * In FreeBSD, we can (and by default, do) disable setuid coredump
1702 * system-wide with 'sugid_coredump.' We control tracability on a
1703 * per-process basis with the procctl PROC_TRACE (=> P2_NOTRACE flag).
1704 * By happy coincidence, P2_NOTRACE also prevents coredumping. So the
1705 * procctl is roughly analogous to Linux's DUMPABLE.
1706 *
1707 * So, proxy these knobs to the corresponding PROC_TRACE setting.
1708 */
1709 case LINUX_PR_GET_DUMPABLE:
1710 error = kern_procctl(td, P_PID, p->p_pid, PROC_TRACE_STATUS,
1711 &trace_state);
1712 if (error != 0)
1713 return (error);
1714 td->td_retval[0] = (trace_state != -1);
1715 return (0);
1716 case LINUX_PR_SET_DUMPABLE:
1717 /*
1718 * It is only valid for userspace to set one of these two
1719 * flags, and only one at a time.
1720 */
1721 switch (args->arg2) {
1722 case LINUX_SUID_DUMP_DISABLE:
1723 trace_state = PROC_TRACE_CTL_DISABLE_EXEC;
1724 break;
1725 case LINUX_SUID_DUMP_USER:
1726 trace_state = PROC_TRACE_CTL_ENABLE;
1727 break;
1728 default:
1729 return (EINVAL);
1730 }
1731 return (kern_procctl(td, P_PID, p->p_pid, PROC_TRACE_CTL,
1732 &trace_state));
1733 case LINUX_PR_GET_KEEPCAPS:
1734 /*
1735 * Indicate that we always clear the effective and
1736 * permitted capability sets when the user id becomes
1737 * non-zero (actually the capability sets are simply
1738 * always zero in the current implementation).
1739 */
1740 td->td_retval[0] = 0;
1741 break;
1742 case LINUX_PR_SET_KEEPCAPS:
1743 /*
1744 * Ignore requests to keep the effective and permitted
1745 * capability sets when the user id becomes non-zero.
1746 */
1747 break;
1748 case LINUX_PR_SET_NAME:
1749 /*
1750 * To be on the safe side we need to make sure to not
1751 * overflow the size a Linux program expects. We already
1752 * do this here in the copyin, so that we don't need to
1753 * check on copyout.
1754 */
1755 max_size = MIN(sizeof(comm), sizeof(p->p_comm));
1756 error = copyinstr((void *)(register_t)args->arg2, comm,
1757 max_size, NULL);
1758
1759 /* Linux silently truncates the name if it is too long. */
1760 if (error == ENAMETOOLONG) {
1761 /*
1762 * XXX: copyinstr() isn't documented to populate the
1763 * array completely, so do a copyin() to be on the
1764 * safe side. This should be changed in case
1765 * copyinstr() is changed to guarantee this.
1766 */
1767 error = copyin((void *)(register_t)args->arg2, comm,
1768 max_size - 1);
1769 comm[max_size - 1] = '\0';
1770 }
1771 if (error)
1772 return (error);
1773
1774 PROC_LOCK(p);
1775 strlcpy(p->p_comm, comm, sizeof(p->p_comm));
1776 PROC_UNLOCK(p);
1777 break;
1778 case LINUX_PR_GET_NAME:
1779 PROC_LOCK(p);
1780 strlcpy(comm, p->p_comm, sizeof(comm));
1781 PROC_UNLOCK(p);
1782 error = copyout(comm, (void *)(register_t)args->arg2,
1783 strlen(comm) + 1);
1784 break;
1785 case LINUX_PR_GET_SECCOMP:
1786 case LINUX_PR_SET_SECCOMP:
1787 /*
1788 * Same as returned by Linux without CONFIG_SECCOMP enabled.
1789 */
1790 error = EINVAL;
1791 break;
1792 case LINUX_PR_CAPBSET_READ:
1793 #if 0
1794 /*
1795 * This makes too much noise with Ubuntu Focal.
1796 */
1797 linux_msg(td, "unsupported prctl PR_CAPBSET_READ %d",
1798 (int)args->arg2);
1799 #endif
1800 error = EINVAL;
1801 break;
1802 case LINUX_PR_SET_CHILD_SUBREAPER:
1803 if (args->arg2 == 0) {
1804 return (kern_procctl(td, P_PID, 0, PROC_REAP_RELEASE,
1805 NULL));
1806 }
1807
1808 return (kern_procctl(td, P_PID, 0, PROC_REAP_ACQUIRE,
1809 NULL));
1810 case LINUX_PR_SET_NO_NEW_PRIVS:
1811 arg = args->arg2 == 1 ?
1812 PROC_NO_NEW_PRIVS_ENABLE : PROC_NO_NEW_PRIVS_DISABLE;
1813 error = kern_procctl(td, P_PID, p->p_pid,
1814 PROC_NO_NEW_PRIVS_CTL, &arg);
1815 break;
1816 case LINUX_PR_SET_PTRACER:
1817 linux_msg(td, "unsupported prctl PR_SET_PTRACER");
1818 error = EINVAL;
1819 break;
1820 default:
1821 linux_msg(td, "unsupported prctl option %d", args->option);
1822 error = EINVAL;
1823 break;
1824 }
1825
1826 return (error);
1827 }
1828
1829 int
linux_sched_setparam(struct thread * td,struct linux_sched_setparam_args * uap)1830 linux_sched_setparam(struct thread *td,
1831 struct linux_sched_setparam_args *uap)
1832 {
1833 struct sched_param sched_param;
1834 struct thread *tdt;
1835 int error, policy;
1836
1837 error = copyin(uap->param, &sched_param, sizeof(sched_param));
1838 if (error)
1839 return (error);
1840
1841 tdt = linux_tdfind(td, uap->pid, -1);
1842 if (tdt == NULL)
1843 return (ESRCH);
1844
1845 if (linux_map_sched_prio) {
1846 error = kern_sched_getscheduler(td, tdt, &policy);
1847 if (error)
1848 goto out;
1849
1850 switch (policy) {
1851 case SCHED_OTHER:
1852 if (sched_param.sched_priority != 0) {
1853 error = EINVAL;
1854 goto out;
1855 }
1856 sched_param.sched_priority =
1857 PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE;
1858 break;
1859 case SCHED_FIFO:
1860 case SCHED_RR:
1861 if (sched_param.sched_priority < 1 ||
1862 sched_param.sched_priority >= LINUX_MAX_RT_PRIO) {
1863 error = EINVAL;
1864 goto out;
1865 }
1866 /*
1867 * Map [1, LINUX_MAX_RT_PRIO - 1] to
1868 * [0, RTP_PRIO_MAX - RTP_PRIO_MIN] (rounding down).
1869 */
1870 sched_param.sched_priority =
1871 (sched_param.sched_priority - 1) *
1872 (RTP_PRIO_MAX - RTP_PRIO_MIN + 1) /
1873 (LINUX_MAX_RT_PRIO - 1);
1874 break;
1875 }
1876 }
1877
1878 error = kern_sched_setparam(td, tdt, &sched_param);
1879 out: PROC_UNLOCK(tdt->td_proc);
1880 return (error);
1881 }
1882
1883 int
linux_sched_getparam(struct thread * td,struct linux_sched_getparam_args * uap)1884 linux_sched_getparam(struct thread *td,
1885 struct linux_sched_getparam_args *uap)
1886 {
1887 struct sched_param sched_param;
1888 struct thread *tdt;
1889 int error, policy;
1890
1891 tdt = linux_tdfind(td, uap->pid, -1);
1892 if (tdt == NULL)
1893 return (ESRCH);
1894
1895 error = kern_sched_getparam(td, tdt, &sched_param);
1896 if (error) {
1897 PROC_UNLOCK(tdt->td_proc);
1898 return (error);
1899 }
1900
1901 if (linux_map_sched_prio) {
1902 error = kern_sched_getscheduler(td, tdt, &policy);
1903 PROC_UNLOCK(tdt->td_proc);
1904 if (error)
1905 return (error);
1906
1907 switch (policy) {
1908 case SCHED_OTHER:
1909 sched_param.sched_priority = 0;
1910 break;
1911 case SCHED_FIFO:
1912 case SCHED_RR:
1913 /*
1914 * Map [0, RTP_PRIO_MAX - RTP_PRIO_MIN] to
1915 * [1, LINUX_MAX_RT_PRIO - 1] (rounding up).
1916 */
1917 sched_param.sched_priority =
1918 (sched_param.sched_priority *
1919 (LINUX_MAX_RT_PRIO - 1) +
1920 (RTP_PRIO_MAX - RTP_PRIO_MIN - 1)) /
1921 (RTP_PRIO_MAX - RTP_PRIO_MIN) + 1;
1922 break;
1923 }
1924 } else
1925 PROC_UNLOCK(tdt->td_proc);
1926
1927 error = copyout(&sched_param, uap->param, sizeof(sched_param));
1928 return (error);
1929 }
1930
1931 /*
1932 * Get affinity of a process.
1933 */
1934 int
linux_sched_getaffinity(struct thread * td,struct linux_sched_getaffinity_args * args)1935 linux_sched_getaffinity(struct thread *td,
1936 struct linux_sched_getaffinity_args *args)
1937 {
1938 struct thread *tdt;
1939 cpuset_t *mask;
1940 size_t size;
1941 int error;
1942 id_t tid;
1943
1944 tdt = linux_tdfind(td, args->pid, -1);
1945 if (tdt == NULL)
1946 return (ESRCH);
1947 tid = tdt->td_tid;
1948 PROC_UNLOCK(tdt->td_proc);
1949
1950 mask = malloc(sizeof(cpuset_t), M_LINUX, M_WAITOK | M_ZERO);
1951 size = min(args->len, sizeof(cpuset_t));
1952 error = kern_cpuset_getaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID,
1953 tid, size, mask);
1954 if (error == ERANGE)
1955 error = EINVAL;
1956 if (error == 0)
1957 error = copyout(mask, args->user_mask_ptr, size);
1958 if (error == 0)
1959 td->td_retval[0] = size;
1960 free(mask, M_LINUX);
1961 return (error);
1962 }
1963
1964 /*
1965 * Set affinity of a process.
1966 */
1967 int
linux_sched_setaffinity(struct thread * td,struct linux_sched_setaffinity_args * args)1968 linux_sched_setaffinity(struct thread *td,
1969 struct linux_sched_setaffinity_args *args)
1970 {
1971 struct thread *tdt;
1972 cpuset_t *mask;
1973 int cpu, error;
1974 size_t len;
1975 id_t tid;
1976
1977 tdt = linux_tdfind(td, args->pid, -1);
1978 if (tdt == NULL)
1979 return (ESRCH);
1980 tid = tdt->td_tid;
1981 PROC_UNLOCK(tdt->td_proc);
1982
1983 len = min(args->len, sizeof(cpuset_t));
1984 mask = malloc(sizeof(cpuset_t), M_TEMP, M_WAITOK | M_ZERO);
1985 error = copyin(args->user_mask_ptr, mask, len);
1986 if (error != 0)
1987 goto out;
1988 /* Linux ignore high bits */
1989 CPU_FOREACH_ISSET(cpu, mask)
1990 if (cpu > mp_maxid)
1991 CPU_CLR(cpu, mask);
1992
1993 error = kern_cpuset_setaffinity(td, CPU_LEVEL_WHICH, CPU_WHICH_TID,
1994 tid, mask);
1995 if (error == EDEADLK)
1996 error = EINVAL;
1997 out:
1998 free(mask, M_TEMP);
1999 return (error);
2000 }
2001
2002 struct linux_rlimit64 {
2003 uint64_t rlim_cur;
2004 uint64_t rlim_max;
2005 };
2006
2007 int
linux_prlimit64(struct thread * td,struct linux_prlimit64_args * args)2008 linux_prlimit64(struct thread *td, struct linux_prlimit64_args *args)
2009 {
2010 struct rlimit rlim, nrlim;
2011 struct linux_rlimit64 lrlim;
2012 struct proc *p;
2013 u_int which;
2014 int flags;
2015 int error;
2016
2017 if (args->new == NULL && args->old != NULL) {
2018 if (linux_get_dummy_limit(td, args->resource, &rlim)) {
2019 lrlim.rlim_cur = rlim.rlim_cur;
2020 lrlim.rlim_max = rlim.rlim_max;
2021 return (copyout(&lrlim, args->old, sizeof(lrlim)));
2022 }
2023 }
2024
2025 if (args->resource >= LINUX_RLIM_NLIMITS)
2026 return (EINVAL);
2027
2028 which = linux_to_bsd_resource[args->resource];
2029 if (which == -1)
2030 return (EINVAL);
2031
2032 if (args->new != NULL) {
2033 /*
2034 * Note. Unlike FreeBSD where rlim is signed 64-bit Linux
2035 * rlim is unsigned 64-bit. FreeBSD treats negative limits
2036 * as INFINITY so we do not need a conversion even.
2037 */
2038 error = copyin(args->new, &nrlim, sizeof(nrlim));
2039 if (error != 0)
2040 return (error);
2041 }
2042
2043 flags = PGET_HOLD | PGET_NOTWEXIT;
2044 if (args->new != NULL)
2045 flags |= PGET_CANDEBUG;
2046 else
2047 flags |= PGET_CANSEE;
2048 if (args->pid == 0) {
2049 p = td->td_proc;
2050 PHOLD(p);
2051 } else {
2052 error = pget(args->pid, flags, &p);
2053 if (error != 0)
2054 return (error);
2055 }
2056 if (args->old != NULL) {
2057 PROC_LOCK(p);
2058 lim_rlimit_proc(p, which, &rlim);
2059 PROC_UNLOCK(p);
2060 if (rlim.rlim_cur == RLIM_INFINITY)
2061 lrlim.rlim_cur = LINUX_RLIM_INFINITY;
2062 else
2063 lrlim.rlim_cur = rlim.rlim_cur;
2064 if (rlim.rlim_max == RLIM_INFINITY)
2065 lrlim.rlim_max = LINUX_RLIM_INFINITY;
2066 else
2067 lrlim.rlim_max = rlim.rlim_max;
2068 error = copyout(&lrlim, args->old, sizeof(lrlim));
2069 if (error != 0)
2070 goto out;
2071 }
2072
2073 if (args->new != NULL)
2074 error = kern_proc_setrlimit(td, p, which, &nrlim);
2075
2076 out:
2077 PRELE(p);
2078 return (error);
2079 }
2080
2081 int
linux_pselect6(struct thread * td,struct linux_pselect6_args * args)2082 linux_pselect6(struct thread *td, struct linux_pselect6_args *args)
2083 {
2084 struct timespec ts, *tsp;
2085 int error;
2086
2087 if (args->tsp != NULL) {
2088 error = linux_get_timespec(&ts, args->tsp);
2089 if (error != 0)
2090 return (error);
2091 tsp = &ts;
2092 } else
2093 tsp = NULL;
2094
2095 error = linux_common_pselect6(td, args->nfds, args->readfds,
2096 args->writefds, args->exceptfds, tsp, args->sig);
2097
2098 if (args->tsp != NULL)
2099 linux_put_timespec(&ts, args->tsp);
2100 return (error);
2101 }
2102
2103 static int
linux_common_pselect6(struct thread * td,l_int nfds,l_fd_set * readfds,l_fd_set * writefds,l_fd_set * exceptfds,struct timespec * tsp,l_uintptr_t * sig)2104 linux_common_pselect6(struct thread *td, l_int nfds, l_fd_set *readfds,
2105 l_fd_set *writefds, l_fd_set *exceptfds, struct timespec *tsp,
2106 l_uintptr_t *sig)
2107 {
2108 struct timeval utv, tv0, tv1, *tvp;
2109 struct l_pselect6arg lpse6;
2110 sigset_t *ssp;
2111 sigset_t ss;
2112 int error;
2113
2114 ssp = NULL;
2115 if (sig != NULL) {
2116 error = copyin(sig, &lpse6, sizeof(lpse6));
2117 if (error != 0)
2118 return (error);
2119 error = linux_copyin_sigset(td, PTRIN(lpse6.ss),
2120 lpse6.ss_len, &ss, &ssp);
2121 if (error != 0)
2122 return (error);
2123 } else
2124 ssp = NULL;
2125
2126 /*
2127 * Currently glibc changes nanosecond number to microsecond.
2128 * This mean losing precision but for now it is hardly seen.
2129 */
2130 if (tsp != NULL) {
2131 TIMESPEC_TO_TIMEVAL(&utv, tsp);
2132 if (itimerfix(&utv))
2133 return (EINVAL);
2134
2135 microtime(&tv0);
2136 tvp = &utv;
2137 } else
2138 tvp = NULL;
2139
2140 error = kern_pselect(td, nfds, readfds, writefds,
2141 exceptfds, tvp, ssp, LINUX_NFDBITS);
2142
2143 if (tsp != NULL) {
2144 /*
2145 * Compute how much time was left of the timeout,
2146 * by subtracting the current time and the time
2147 * before we started the call, and subtracting
2148 * that result from the user-supplied value.
2149 */
2150 microtime(&tv1);
2151 timevalsub(&tv1, &tv0);
2152 timevalsub(&utv, &tv1);
2153 if (utv.tv_sec < 0)
2154 timevalclear(&utv);
2155 TIMEVAL_TO_TIMESPEC(&utv, tsp);
2156 }
2157 return (error);
2158 }
2159
2160 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
2161 int
linux_pselect6_time64(struct thread * td,struct linux_pselect6_time64_args * args)2162 linux_pselect6_time64(struct thread *td,
2163 struct linux_pselect6_time64_args *args)
2164 {
2165 struct timespec ts, *tsp;
2166 int error;
2167
2168 if (args->tsp != NULL) {
2169 error = linux_get_timespec64(&ts, args->tsp);
2170 if (error != 0)
2171 return (error);
2172 tsp = &ts;
2173 } else
2174 tsp = NULL;
2175
2176 error = linux_common_pselect6(td, args->nfds, args->readfds,
2177 args->writefds, args->exceptfds, tsp, args->sig);
2178
2179 if (args->tsp != NULL)
2180 linux_put_timespec64(&ts, args->tsp);
2181 return (error);
2182 }
2183 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
2184
2185 int
linux_ppoll(struct thread * td,struct linux_ppoll_args * args)2186 linux_ppoll(struct thread *td, struct linux_ppoll_args *args)
2187 {
2188 struct timespec uts, *tsp;
2189 int error;
2190
2191 if (args->tsp != NULL) {
2192 error = linux_get_timespec(&uts, args->tsp);
2193 if (error != 0)
2194 return (error);
2195 tsp = &uts;
2196 } else
2197 tsp = NULL;
2198
2199 error = linux_common_ppoll(td, args->fds, args->nfds, tsp,
2200 args->sset, args->ssize);
2201 if (error == 0 && args->tsp != NULL)
2202 error = linux_put_timespec(&uts, args->tsp);
2203 return (error);
2204 }
2205
2206 static int
linux_common_ppoll(struct thread * td,struct pollfd * fds,uint32_t nfds,struct timespec * tsp,l_sigset_t * sset,l_size_t ssize)2207 linux_common_ppoll(struct thread *td, struct pollfd *fds, uint32_t nfds,
2208 struct timespec *tsp, l_sigset_t *sset, l_size_t ssize)
2209 {
2210 struct timespec ts0, ts1;
2211 struct pollfd stackfds[32];
2212 struct pollfd *kfds;
2213 sigset_t *ssp;
2214 sigset_t ss;
2215 int error;
2216
2217 if (kern_poll_maxfds(nfds))
2218 return (EINVAL);
2219 if (sset != NULL) {
2220 error = linux_copyin_sigset(td, sset, ssize, &ss, &ssp);
2221 if (error != 0)
2222 return (error);
2223 } else
2224 ssp = NULL;
2225 if (tsp != NULL)
2226 nanotime(&ts0);
2227
2228 if (nfds > nitems(stackfds))
2229 kfds = mallocarray(nfds, sizeof(*kfds), M_TEMP, M_WAITOK);
2230 else
2231 kfds = stackfds;
2232 error = linux_pollin(td, kfds, fds, nfds);
2233 if (error != 0)
2234 goto out;
2235
2236 error = kern_poll_kfds(td, kfds, nfds, tsp, ssp);
2237 if (error == 0)
2238 error = linux_pollout(td, kfds, fds, nfds);
2239
2240 if (error == 0 && tsp != NULL) {
2241 if (td->td_retval[0]) {
2242 nanotime(&ts1);
2243 timespecsub(&ts1, &ts0, &ts1);
2244 timespecsub(tsp, &ts1, tsp);
2245 if (tsp->tv_sec < 0)
2246 timespecclear(tsp);
2247 } else
2248 timespecclear(tsp);
2249 }
2250
2251 out:
2252 if (nfds > nitems(stackfds))
2253 free(kfds, M_TEMP);
2254 return (error);
2255 }
2256
2257 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
2258 int
linux_ppoll_time64(struct thread * td,struct linux_ppoll_time64_args * args)2259 linux_ppoll_time64(struct thread *td, struct linux_ppoll_time64_args *args)
2260 {
2261 struct timespec uts, *tsp;
2262 int error;
2263
2264 if (args->tsp != NULL) {
2265 error = linux_get_timespec64(&uts, args->tsp);
2266 if (error != 0)
2267 return (error);
2268 tsp = &uts;
2269 } else
2270 tsp = NULL;
2271 error = linux_common_ppoll(td, args->fds, args->nfds, tsp,
2272 args->sset, args->ssize);
2273 if (error == 0 && args->tsp != NULL)
2274 error = linux_put_timespec64(&uts, args->tsp);
2275 return (error);
2276 }
2277 #endif /* __i386__ || (__amd64__ && COMPAT_LINUX32) */
2278
2279 static int
linux_pollin(struct thread * td,struct pollfd * fds,struct pollfd * ufds,u_int nfd)2280 linux_pollin(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd)
2281 {
2282 int error;
2283 u_int i;
2284
2285 error = copyin(ufds, fds, nfd * sizeof(*fds));
2286 if (error != 0)
2287 return (error);
2288
2289 for (i = 0; i < nfd; i++) {
2290 if (fds->events != 0)
2291 linux_to_bsd_poll_events(td, fds->fd,
2292 fds->events, &fds->events);
2293 fds++;
2294 }
2295 return (0);
2296 }
2297
2298 static int
linux_pollout(struct thread * td,struct pollfd * fds,struct pollfd * ufds,u_int nfd)2299 linux_pollout(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd)
2300 {
2301 int error = 0;
2302 u_int i, n = 0;
2303
2304 for (i = 0; i < nfd; i++) {
2305 if (fds->revents != 0) {
2306 bsd_to_linux_poll_events(fds->revents,
2307 &fds->revents);
2308 n++;
2309 }
2310 error = copyout(&fds->revents, &ufds->revents,
2311 sizeof(ufds->revents));
2312 if (error)
2313 return (error);
2314 fds++;
2315 ufds++;
2316 }
2317 td->td_retval[0] = n;
2318 return (0);
2319 }
2320
2321 static int
linux_sched_rr_get_interval_common(struct thread * td,pid_t pid,struct timespec * ts)2322 linux_sched_rr_get_interval_common(struct thread *td, pid_t pid,
2323 struct timespec *ts)
2324 {
2325 struct thread *tdt;
2326 int error;
2327
2328 /*
2329 * According to man in case the invalid pid specified
2330 * EINVAL should be returned.
2331 */
2332 if (pid < 0)
2333 return (EINVAL);
2334
2335 tdt = linux_tdfind(td, pid, -1);
2336 if (tdt == NULL)
2337 return (ESRCH);
2338
2339 error = kern_sched_rr_get_interval_td(td, tdt, ts);
2340 PROC_UNLOCK(tdt->td_proc);
2341 return (error);
2342 }
2343
2344 int
linux_sched_rr_get_interval(struct thread * td,struct linux_sched_rr_get_interval_args * uap)2345 linux_sched_rr_get_interval(struct thread *td,
2346 struct linux_sched_rr_get_interval_args *uap)
2347 {
2348 struct timespec ts;
2349 int error;
2350
2351 error = linux_sched_rr_get_interval_common(td, uap->pid, &ts);
2352 if (error != 0)
2353 return (error);
2354 return (linux_put_timespec(&ts, uap->interval));
2355 }
2356
2357 #if defined(__i386__) || (defined(__amd64__) && defined(COMPAT_LINUX32))
2358 int
linux_sched_rr_get_interval_time64(struct thread * td,struct linux_sched_rr_get_interval_time64_args * uap)2359 linux_sched_rr_get_interval_time64(struct thread *td,
2360 struct linux_sched_rr_get_interval_time64_args *uap)
2361 {
2362 struct timespec ts;
2363 int error;
2364
2365 error = linux_sched_rr_get_interval_common(td, uap->pid, &ts);
2366 if (error != 0)
2367 return (error);
2368 return (linux_put_timespec64(&ts, uap->interval));
2369 }
2370 #endif
2371
2372 /*
2373 * In case when the Linux thread is the initial thread in
2374 * the thread group thread id is equal to the process id.
2375 * Glibc depends on this magic (assert in pthread_getattr_np.c).
2376 */
2377 struct thread *
linux_tdfind(struct thread * td,lwpid_t tid,pid_t pid)2378 linux_tdfind(struct thread *td, lwpid_t tid, pid_t pid)
2379 {
2380 struct linux_emuldata *em;
2381 struct thread *tdt;
2382 struct proc *p;
2383
2384 tdt = NULL;
2385 if (tid == 0 || tid == td->td_tid) {
2386 if (pid != -1 && td->td_proc->p_pid != pid)
2387 return (NULL);
2388 PROC_LOCK(td->td_proc);
2389 return (td);
2390 } else if (tid > PID_MAX)
2391 return (tdfind(tid, pid));
2392
2393 /*
2394 * Initial thread where the tid equal to the pid.
2395 */
2396 p = pfind(tid);
2397 if (p != NULL) {
2398 if (SV_PROC_ABI(p) != SV_ABI_LINUX ||
2399 (pid != -1 && tid != pid)) {
2400 /*
2401 * p is not a Linuxulator process.
2402 */
2403 PROC_UNLOCK(p);
2404 return (NULL);
2405 }
2406 FOREACH_THREAD_IN_PROC(p, tdt) {
2407 em = em_find(tdt);
2408 if (tid == em->em_tid)
2409 return (tdt);
2410 }
2411 PROC_UNLOCK(p);
2412 }
2413 return (NULL);
2414 }
2415
2416 void
linux_to_bsd_waitopts(int options,int * bsdopts)2417 linux_to_bsd_waitopts(int options, int *bsdopts)
2418 {
2419
2420 if (options & LINUX_WNOHANG)
2421 *bsdopts |= WNOHANG;
2422 if (options & LINUX_WUNTRACED)
2423 *bsdopts |= WUNTRACED;
2424 if (options & LINUX_WEXITED)
2425 *bsdopts |= WEXITED;
2426 if (options & LINUX_WCONTINUED)
2427 *bsdopts |= WCONTINUED;
2428 if (options & LINUX_WNOWAIT)
2429 *bsdopts |= WNOWAIT;
2430
2431 if (options & __WCLONE)
2432 *bsdopts |= WLINUXCLONE;
2433 }
2434
2435 int
linux_getrandom(struct thread * td,struct linux_getrandom_args * args)2436 linux_getrandom(struct thread *td, struct linux_getrandom_args *args)
2437 {
2438 struct uio uio;
2439 struct iovec iov;
2440 int error;
2441
2442 if (args->flags & ~(LINUX_GRND_NONBLOCK|LINUX_GRND_RANDOM))
2443 return (EINVAL);
2444 if (args->count > INT_MAX)
2445 args->count = INT_MAX;
2446
2447 iov.iov_base = args->buf;
2448 iov.iov_len = args->count;
2449
2450 uio.uio_iov = &iov;
2451 uio.uio_iovcnt = 1;
2452 uio.uio_resid = iov.iov_len;
2453 uio.uio_segflg = UIO_USERSPACE;
2454 uio.uio_rw = UIO_READ;
2455 uio.uio_td = td;
2456
2457 error = read_random_uio(&uio, args->flags & LINUX_GRND_NONBLOCK);
2458 if (error == 0)
2459 td->td_retval[0] = args->count - uio.uio_resid;
2460 return (error);
2461 }
2462
2463 int
linux_mincore(struct thread * td,struct linux_mincore_args * args)2464 linux_mincore(struct thread *td, struct linux_mincore_args *args)
2465 {
2466
2467 /* Needs to be page-aligned */
2468 if (args->start & PAGE_MASK)
2469 return (EINVAL);
2470 return (kern_mincore(td, args->start, args->len, args->vec));
2471 }
2472
2473 #define SYSLOG_TAG "<6>"
2474
2475 int
linux_syslog(struct thread * td,struct linux_syslog_args * args)2476 linux_syslog(struct thread *td, struct linux_syslog_args *args)
2477 {
2478 char buf[128], *src, *dst;
2479 u_int seq;
2480 int buflen, error;
2481
2482 if (args->type != LINUX_SYSLOG_ACTION_READ_ALL) {
2483 linux_msg(td, "syslog unsupported type 0x%x", args->type);
2484 return (EINVAL);
2485 }
2486
2487 if (args->len < 6) {
2488 td->td_retval[0] = 0;
2489 return (0);
2490 }
2491
2492 error = priv_check(td, PRIV_MSGBUF);
2493 if (error)
2494 return (error);
2495
2496 mtx_lock(&msgbuf_lock);
2497 msgbuf_peekbytes(msgbufp, NULL, 0, &seq);
2498 mtx_unlock(&msgbuf_lock);
2499
2500 dst = args->buf;
2501 error = copyout(&SYSLOG_TAG, dst, sizeof(SYSLOG_TAG));
2502 /* The -1 is to skip the trailing '\0'. */
2503 dst += sizeof(SYSLOG_TAG) - 1;
2504
2505 while (error == 0) {
2506 mtx_lock(&msgbuf_lock);
2507 buflen = msgbuf_peekbytes(msgbufp, buf, sizeof(buf), &seq);
2508 mtx_unlock(&msgbuf_lock);
2509
2510 if (buflen == 0)
2511 break;
2512
2513 for (src = buf; src < buf + buflen && error == 0; src++) {
2514 if (*src == '\0')
2515 continue;
2516
2517 if (dst >= args->buf + args->len)
2518 goto out;
2519
2520 error = copyout(src, dst, 1);
2521 dst++;
2522
2523 if (*src == '\n' && *(src + 1) != '<' &&
2524 dst + sizeof(SYSLOG_TAG) < args->buf + args->len) {
2525 error = copyout(&SYSLOG_TAG,
2526 dst, sizeof(SYSLOG_TAG));
2527 dst += sizeof(SYSLOG_TAG) - 1;
2528 }
2529 }
2530 }
2531 out:
2532 td->td_retval[0] = dst - args->buf;
2533 return (error);
2534 }
2535
2536 int
linux_getcpu(struct thread * td,struct linux_getcpu_args * args)2537 linux_getcpu(struct thread *td, struct linux_getcpu_args *args)
2538 {
2539 int cpu, error, node;
2540
2541 cpu = td->td_oncpu; /* Make sure it doesn't change during copyout(9) */
2542 error = 0;
2543 node = cpuid_to_pcpu[cpu]->pc_domain;
2544
2545 if (args->cpu != NULL)
2546 error = copyout(&cpu, args->cpu, sizeof(l_int));
2547 if (args->node != NULL)
2548 error = copyout(&node, args->node, sizeof(l_int));
2549 return (error);
2550 }
2551
2552 #if defined(__i386__) || defined(__amd64__)
2553 int
linux_poll(struct thread * td,struct linux_poll_args * args)2554 linux_poll(struct thread *td, struct linux_poll_args *args)
2555 {
2556 struct timespec ts, *tsp;
2557
2558 if (args->timeout != INFTIM) {
2559 if (args->timeout < 0)
2560 return (EINVAL);
2561 ts.tv_sec = args->timeout / 1000;
2562 ts.tv_nsec = (args->timeout % 1000) * 1000000;
2563 tsp = &ts;
2564 } else
2565 tsp = NULL;
2566
2567 return (linux_common_ppoll(td, args->fds, args->nfds,
2568 tsp, NULL, 0));
2569 }
2570 #endif /* __i386__ || __amd64__ */
2571
2572 int
linux_seccomp(struct thread * td,struct linux_seccomp_args * args)2573 linux_seccomp(struct thread *td, struct linux_seccomp_args *args)
2574 {
2575
2576 switch (args->op) {
2577 case LINUX_SECCOMP_GET_ACTION_AVAIL:
2578 return (EOPNOTSUPP);
2579 default:
2580 /*
2581 * Ignore unknown operations, just like Linux kernel built
2582 * without CONFIG_SECCOMP.
2583 */
2584 return (EINVAL);
2585 }
2586 }
2587
2588 /*
2589 * Custom version of exec_copyin_args(), to copy out argument and environment
2590 * strings from the old process address space into the temporary string buffer.
2591 * Based on freebsd32_exec_copyin_args.
2592 */
2593 static int
linux_exec_copyin_args(struct image_args * args,const char * fname,l_uintptr_t * argv,l_uintptr_t * envv)2594 linux_exec_copyin_args(struct image_args *args, const char *fname,
2595 l_uintptr_t *argv, l_uintptr_t *envv)
2596 {
2597 char *argp, *envp;
2598 l_uintptr_t *ptr, arg;
2599 int error;
2600
2601 bzero(args, sizeof(*args));
2602 if (argv == NULL)
2603 return (EFAULT);
2604
2605 /*
2606 * Allocate demand-paged memory for the file name, argument, and
2607 * environment strings.
2608 */
2609 error = exec_alloc_args(args);
2610 if (error != 0)
2611 return (error);
2612
2613 /*
2614 * Copy the file name.
2615 */
2616 error = exec_args_add_fname(args, fname, UIO_USERSPACE);
2617 if (error != 0)
2618 goto err_exit;
2619
2620 /*
2621 * extract arguments first
2622 */
2623 ptr = argv;
2624 for (;;) {
2625 error = copyin(ptr++, &arg, sizeof(arg));
2626 if (error)
2627 goto err_exit;
2628 if (arg == 0)
2629 break;
2630 argp = PTRIN(arg);
2631 error = exec_args_add_arg(args, argp, UIO_USERSPACE);
2632 if (error != 0)
2633 goto err_exit;
2634 }
2635
2636 /*
2637 * This comment is from Linux do_execveat_common:
2638 * When argv is empty, add an empty string ("") as argv[0] to
2639 * ensure confused userspace programs that start processing
2640 * from argv[1] won't end up walking envp.
2641 */
2642 if (args->argc == 0 &&
2643 (error = exec_args_add_arg(args, "", UIO_SYSSPACE) != 0))
2644 goto err_exit;
2645
2646 /*
2647 * extract environment strings
2648 */
2649 if (envv) {
2650 ptr = envv;
2651 for (;;) {
2652 error = copyin(ptr++, &arg, sizeof(arg));
2653 if (error)
2654 goto err_exit;
2655 if (arg == 0)
2656 break;
2657 envp = PTRIN(arg);
2658 error = exec_args_add_env(args, envp, UIO_USERSPACE);
2659 if (error != 0)
2660 goto err_exit;
2661 }
2662 }
2663
2664 return (0);
2665
2666 err_exit:
2667 exec_free_args(args);
2668 return (error);
2669 }
2670
2671 int
linux_execve(struct thread * td,struct linux_execve_args * args)2672 linux_execve(struct thread *td, struct linux_execve_args *args)
2673 {
2674 struct image_args eargs;
2675 int error;
2676
2677 LINUX_CTR(execve);
2678
2679 error = linux_exec_copyin_args(&eargs, args->path, args->argp,
2680 args->envp);
2681 if (error == 0)
2682 error = linux_common_execve(td, &eargs);
2683 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
2684 return (error);
2685 }
2686
2687 static void
linux_up_rtprio_if(struct thread * td1,struct rtprio * rtp)2688 linux_up_rtprio_if(struct thread *td1, struct rtprio *rtp)
2689 {
2690 struct rtprio rtp2;
2691
2692 pri_to_rtp(td1, &rtp2);
2693 if (rtp2.type < rtp->type ||
2694 (rtp2.type == rtp->type &&
2695 rtp2.prio < rtp->prio)) {
2696 rtp->type = rtp2.type;
2697 rtp->prio = rtp2.prio;
2698 }
2699 }
2700
2701 #define LINUX_PRIO_DIVIDER RTP_PRIO_MAX / LINUX_IOPRIO_MAX
2702
2703 static int
linux_rtprio2ioprio(struct rtprio * rtp)2704 linux_rtprio2ioprio(struct rtprio *rtp)
2705 {
2706 int ioprio, prio;
2707
2708 switch (rtp->type) {
2709 case RTP_PRIO_IDLE:
2710 prio = RTP_PRIO_MIN;
2711 ioprio = LINUX_IOPRIO_PRIO(LINUX_IOPRIO_CLASS_IDLE, prio);
2712 break;
2713 case RTP_PRIO_NORMAL:
2714 prio = rtp->prio / LINUX_PRIO_DIVIDER;
2715 ioprio = LINUX_IOPRIO_PRIO(LINUX_IOPRIO_CLASS_BE, prio);
2716 break;
2717 case RTP_PRIO_REALTIME:
2718 prio = rtp->prio / LINUX_PRIO_DIVIDER;
2719 ioprio = LINUX_IOPRIO_PRIO(LINUX_IOPRIO_CLASS_RT, prio);
2720 break;
2721 default:
2722 prio = RTP_PRIO_MIN;
2723 ioprio = LINUX_IOPRIO_PRIO(LINUX_IOPRIO_CLASS_NONE, prio);
2724 break;
2725 }
2726 return (ioprio);
2727 }
2728
2729 static int
linux_ioprio2rtprio(int ioprio,struct rtprio * rtp)2730 linux_ioprio2rtprio(int ioprio, struct rtprio *rtp)
2731 {
2732
2733 switch (LINUX_IOPRIO_PRIO_CLASS(ioprio)) {
2734 case LINUX_IOPRIO_CLASS_IDLE:
2735 rtp->prio = RTP_PRIO_MIN;
2736 rtp->type = RTP_PRIO_IDLE;
2737 break;
2738 case LINUX_IOPRIO_CLASS_BE:
2739 rtp->prio = LINUX_IOPRIO_PRIO_DATA(ioprio) * LINUX_PRIO_DIVIDER;
2740 rtp->type = RTP_PRIO_NORMAL;
2741 break;
2742 case LINUX_IOPRIO_CLASS_RT:
2743 rtp->prio = LINUX_IOPRIO_PRIO_DATA(ioprio) * LINUX_PRIO_DIVIDER;
2744 rtp->type = RTP_PRIO_REALTIME;
2745 break;
2746 default:
2747 return (EINVAL);
2748 }
2749 return (0);
2750 }
2751 #undef LINUX_PRIO_DIVIDER
2752
2753 int
linux_ioprio_get(struct thread * td,struct linux_ioprio_get_args * args)2754 linux_ioprio_get(struct thread *td, struct linux_ioprio_get_args *args)
2755 {
2756 struct thread *td1;
2757 struct rtprio rtp;
2758 struct pgrp *pg;
2759 struct proc *p;
2760 int error, found;
2761
2762 p = NULL;
2763 td1 = NULL;
2764 error = 0;
2765 found = 0;
2766 rtp.type = RTP_PRIO_IDLE;
2767 rtp.prio = RTP_PRIO_MAX;
2768 switch (args->which) {
2769 case LINUX_IOPRIO_WHO_PROCESS:
2770 if (args->who == 0) {
2771 td1 = td;
2772 p = td1->td_proc;
2773 PROC_LOCK(p);
2774 } else if (args->who > PID_MAX) {
2775 td1 = linux_tdfind(td, args->who, -1);
2776 if (td1 != NULL)
2777 p = td1->td_proc;
2778 } else
2779 p = pfind(args->who);
2780 if (p == NULL)
2781 return (ESRCH);
2782 if ((error = p_cansee(td, p))) {
2783 PROC_UNLOCK(p);
2784 break;
2785 }
2786 if (td1 != NULL) {
2787 pri_to_rtp(td1, &rtp);
2788 } else {
2789 FOREACH_THREAD_IN_PROC(p, td1) {
2790 linux_up_rtprio_if(td1, &rtp);
2791 }
2792 }
2793 found++;
2794 PROC_UNLOCK(p);
2795 break;
2796 case LINUX_IOPRIO_WHO_PGRP:
2797 sx_slock(&proctree_lock);
2798 if (args->who == 0) {
2799 pg = td->td_proc->p_pgrp;
2800 PGRP_LOCK(pg);
2801 } else {
2802 pg = pgfind(args->who);
2803 if (pg == NULL) {
2804 sx_sunlock(&proctree_lock);
2805 error = ESRCH;
2806 break;
2807 }
2808 }
2809 sx_sunlock(&proctree_lock);
2810 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
2811 PROC_LOCK(p);
2812 if (p->p_state == PRS_NORMAL &&
2813 p_cansee(td, p) == 0) {
2814 FOREACH_THREAD_IN_PROC(p, td1) {
2815 linux_up_rtprio_if(td1, &rtp);
2816 found++;
2817 }
2818 }
2819 PROC_UNLOCK(p);
2820 }
2821 PGRP_UNLOCK(pg);
2822 break;
2823 case LINUX_IOPRIO_WHO_USER:
2824 if (args->who == 0)
2825 args->who = td->td_ucred->cr_uid;
2826 sx_slock(&allproc_lock);
2827 FOREACH_PROC_IN_SYSTEM(p) {
2828 PROC_LOCK(p);
2829 if (p->p_state == PRS_NORMAL &&
2830 p->p_ucred->cr_uid == args->who &&
2831 p_cansee(td, p) == 0) {
2832 FOREACH_THREAD_IN_PROC(p, td1) {
2833 linux_up_rtprio_if(td1, &rtp);
2834 found++;
2835 }
2836 }
2837 PROC_UNLOCK(p);
2838 }
2839 sx_sunlock(&allproc_lock);
2840 break;
2841 default:
2842 error = EINVAL;
2843 break;
2844 }
2845 if (error == 0) {
2846 if (found != 0)
2847 td->td_retval[0] = linux_rtprio2ioprio(&rtp);
2848 else
2849 error = ESRCH;
2850 }
2851 return (error);
2852 }
2853
2854 int
linux_ioprio_set(struct thread * td,struct linux_ioprio_set_args * args)2855 linux_ioprio_set(struct thread *td, struct linux_ioprio_set_args *args)
2856 {
2857 struct thread *td1;
2858 struct rtprio rtp;
2859 struct pgrp *pg;
2860 struct proc *p;
2861 int error;
2862
2863 if ((error = linux_ioprio2rtprio(args->ioprio, &rtp)) != 0)
2864 return (error);
2865 /* Attempts to set high priorities (REALTIME) require su privileges. */
2866 if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME &&
2867 (error = priv_check(td, PRIV_SCHED_RTPRIO)) != 0)
2868 return (error);
2869
2870 p = NULL;
2871 td1 = NULL;
2872 switch (args->which) {
2873 case LINUX_IOPRIO_WHO_PROCESS:
2874 if (args->who == 0) {
2875 td1 = td;
2876 p = td1->td_proc;
2877 PROC_LOCK(p);
2878 } else if (args->who > PID_MAX) {
2879 td1 = linux_tdfind(td, args->who, -1);
2880 if (td1 != NULL)
2881 p = td1->td_proc;
2882 } else
2883 p = pfind(args->who);
2884 if (p == NULL)
2885 return (ESRCH);
2886 if ((error = p_cansched(td, p))) {
2887 PROC_UNLOCK(p);
2888 break;
2889 }
2890 if (td1 != NULL) {
2891 error = rtp_to_pri(&rtp, td1);
2892 } else {
2893 FOREACH_THREAD_IN_PROC(p, td1) {
2894 if ((error = rtp_to_pri(&rtp, td1)) != 0)
2895 break;
2896 }
2897 }
2898 PROC_UNLOCK(p);
2899 break;
2900 case LINUX_IOPRIO_WHO_PGRP:
2901 sx_slock(&proctree_lock);
2902 if (args->who == 0) {
2903 pg = td->td_proc->p_pgrp;
2904 PGRP_LOCK(pg);
2905 } else {
2906 pg = pgfind(args->who);
2907 if (pg == NULL) {
2908 sx_sunlock(&proctree_lock);
2909 error = ESRCH;
2910 break;
2911 }
2912 }
2913 sx_sunlock(&proctree_lock);
2914 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
2915 PROC_LOCK(p);
2916 if (p->p_state == PRS_NORMAL &&
2917 p_cansched(td, p) == 0) {
2918 FOREACH_THREAD_IN_PROC(p, td1) {
2919 if ((error = rtp_to_pri(&rtp, td1)) != 0)
2920 break;
2921 }
2922 }
2923 PROC_UNLOCK(p);
2924 if (error != 0)
2925 break;
2926 }
2927 PGRP_UNLOCK(pg);
2928 break;
2929 case LINUX_IOPRIO_WHO_USER:
2930 if (args->who == 0)
2931 args->who = td->td_ucred->cr_uid;
2932 sx_slock(&allproc_lock);
2933 FOREACH_PROC_IN_SYSTEM(p) {
2934 PROC_LOCK(p);
2935 if (p->p_state == PRS_NORMAL &&
2936 p->p_ucred->cr_uid == args->who &&
2937 p_cansched(td, p) == 0) {
2938 FOREACH_THREAD_IN_PROC(p, td1) {
2939 if ((error = rtp_to_pri(&rtp, td1)) != 0)
2940 break;
2941 }
2942 }
2943 PROC_UNLOCK(p);
2944 if (error != 0)
2945 break;
2946 }
2947 sx_sunlock(&allproc_lock);
2948 break;
2949 default:
2950 error = EINVAL;
2951 break;
2952 }
2953 return (error);
2954 }
2955
2956 /* The only flag is O_NONBLOCK */
2957 #define B2L_MQ_FLAGS(bflags) ((bflags) != 0 ? LINUX_O_NONBLOCK : 0)
2958 #define L2B_MQ_FLAGS(lflags) ((lflags) != 0 ? O_NONBLOCK : 0)
2959
2960 int
linux_mq_open(struct thread * td,struct linux_mq_open_args * args)2961 linux_mq_open(struct thread *td, struct linux_mq_open_args *args)
2962 {
2963 struct mq_attr attr;
2964 int error, flags;
2965
2966 flags = linux_common_openflags(args->oflag);
2967 if ((flags & O_ACCMODE) == O_ACCMODE || (flags & O_EXEC) != 0)
2968 return (EINVAL);
2969 flags = FFLAGS(flags);
2970 if ((flags & O_CREAT) != 0 && args->attr != NULL) {
2971 error = copyin(args->attr, &attr, sizeof(attr));
2972 if (error != 0)
2973 return (error);
2974 attr.mq_flags = L2B_MQ_FLAGS(attr.mq_flags);
2975 }
2976
2977 return (kern_kmq_open(td, args->name, flags, args->mode,
2978 args->attr != NULL ? &attr : NULL));
2979 }
2980
2981 int
linux_mq_unlink(struct thread * td,struct linux_mq_unlink_args * args)2982 linux_mq_unlink(struct thread *td, struct linux_mq_unlink_args *args)
2983 {
2984 struct kmq_unlink_args bsd_args = {
2985 .path = PTRIN(args->name)
2986 };
2987
2988 return (sys_kmq_unlink(td, &bsd_args));
2989 }
2990
2991 int
linux_mq_timedsend(struct thread * td,struct linux_mq_timedsend_args * args)2992 linux_mq_timedsend(struct thread *td, struct linux_mq_timedsend_args *args)
2993 {
2994 struct timespec ts, *abs_timeout;
2995 int error;
2996
2997 if (args->abs_timeout == NULL)
2998 abs_timeout = NULL;
2999 else {
3000 error = linux_get_timespec(&ts, args->abs_timeout);
3001 if (error != 0)
3002 return (error);
3003 abs_timeout = &ts;
3004 }
3005
3006 return (kern_kmq_timedsend(td, args->mqd, PTRIN(args->msg_ptr),
3007 args->msg_len, args->msg_prio, abs_timeout));
3008 }
3009
3010 int
linux_mq_timedreceive(struct thread * td,struct linux_mq_timedreceive_args * args)3011 linux_mq_timedreceive(struct thread *td, struct linux_mq_timedreceive_args *args)
3012 {
3013 struct timespec ts, *abs_timeout;
3014 int error;
3015
3016 if (args->abs_timeout == NULL)
3017 abs_timeout = NULL;
3018 else {
3019 error = linux_get_timespec(&ts, args->abs_timeout);
3020 if (error != 0)
3021 return (error);
3022 abs_timeout = &ts;
3023 }
3024
3025 return (kern_kmq_timedreceive(td, args->mqd, PTRIN(args->msg_ptr),
3026 args->msg_len, args->msg_prio, abs_timeout));
3027 }
3028
3029 int
linux_mq_notify(struct thread * td,struct linux_mq_notify_args * args)3030 linux_mq_notify(struct thread *td, struct linux_mq_notify_args *args)
3031 {
3032 struct sigevent ev, *evp;
3033 struct l_sigevent l_ev;
3034 int error;
3035
3036 if (args->sevp == NULL)
3037 evp = NULL;
3038 else {
3039 error = copyin(args->sevp, &l_ev, sizeof(l_ev));
3040 if (error != 0)
3041 return (error);
3042 error = linux_convert_l_sigevent(&l_ev, &ev);
3043 if (error != 0)
3044 return (error);
3045 evp = &ev;
3046 }
3047
3048 return (kern_kmq_notify(td, args->mqd, evp));
3049 }
3050
3051 int
linux_mq_getsetattr(struct thread * td,struct linux_mq_getsetattr_args * args)3052 linux_mq_getsetattr(struct thread *td, struct linux_mq_getsetattr_args *args)
3053 {
3054 struct mq_attr attr, oattr;
3055 int error;
3056
3057 if (args->attr != NULL) {
3058 error = copyin(args->attr, &attr, sizeof(attr));
3059 if (error != 0)
3060 return (error);
3061 attr.mq_flags = L2B_MQ_FLAGS(attr.mq_flags);
3062 }
3063
3064 error = kern_kmq_setattr(td, args->mqd, args->attr != NULL ? &attr : NULL,
3065 &oattr);
3066 if (error == 0 && args->oattr != NULL) {
3067 oattr.mq_flags = B2L_MQ_FLAGS(oattr.mq_flags);
3068 bzero(oattr.__reserved, sizeof(oattr.__reserved));
3069 error = copyout(&oattr, args->oattr, sizeof(oattr));
3070 }
3071
3072 return (error);
3073 }
3074
3075 MODULE_DEPEND(linux, mqueuefs, 1, 1, 1);
3076