1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 */
36
37 #include "opt_capsicum.h"
38 #include "opt_ktrace.h"
39
40 #define EXTERR_CATEGORY EXTERR_CAT_FILEDESC
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/sysproto.h>
44 #include <sys/capsicum.h>
45 #include <sys/filedesc.h>
46 #include <sys/filio.h>
47 #include <sys/fcntl.h>
48 #include <sys/file.h>
49 #include <sys/exterrvar.h>
50 #include <sys/lock.h>
51 #include <sys/proc.h>
52 #include <sys/signalvar.h>
53 #include <sys/protosw.h>
54 #include <sys/socketvar.h>
55 #include <sys/uio.h>
56 #include <sys/eventfd.h>
57 #include <sys/kernel.h>
58 #include <sys/ktr.h>
59 #include <sys/limits.h>
60 #include <sys/malloc.h>
61 #include <sys/poll.h>
62 #include <sys/resourcevar.h>
63 #include <sys/selinfo.h>
64 #include <sys/sleepqueue.h>
65 #include <sys/specialfd.h>
66 #include <sys/syscallsubr.h>
67 #include <sys/sysctl.h>
68 #include <sys/sysent.h>
69 #include <sys/vnode.h>
70 #include <sys/unistd.h>
71 #include <sys/bio.h>
72 #include <sys/buf.h>
73 #include <sys/condvar.h>
74 #ifdef KTRACE
75 #include <sys/ktrace.h>
76 #endif
77
78 #include <security/audit/audit.h>
79
80 /*
81 * The following macro defines how many bytes will be allocated from
82 * the stack instead of memory allocated when passing the IOCTL data
83 * structures from userspace and to the kernel. Some IOCTLs having
84 * small data structures are used very frequently and this small
85 * buffer on the stack gives a significant speedup improvement for
86 * those requests. The value of this define should be greater or equal
87 * to 64 bytes and should also be power of two. The data structure is
88 * currently hard-aligned to a 8-byte boundary on the stack. This
89 * should currently be sufficient for all supported platforms.
90 */
91 #define SYS_IOCTL_SMALL_SIZE 128 /* bytes */
92 #define SYS_IOCTL_SMALL_ALIGN 8 /* bytes */
93
94 #ifdef __LP64__
95 static int iosize_max_clamp = 0;
96 SYSCTL_INT(_debug, OID_AUTO, iosize_max_clamp, CTLFLAG_RW,
97 &iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX");
98 static int devfs_iosize_max_clamp = 1;
99 SYSCTL_INT(_debug, OID_AUTO, devfs_iosize_max_clamp, CTLFLAG_RW,
100 &devfs_iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX for devices");
101 #endif
102
103 /*
104 * Assert that the return value of read(2) and write(2) syscalls fits
105 * into a register. If not, an architecture will need to provide the
106 * usermode wrappers to reconstruct the result.
107 */
108 CTASSERT(sizeof(register_t) >= sizeof(size_t));
109
110 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
111 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
112 MALLOC_DEFINE(M_IOV, "iov", "large iov's");
113
114 static int pollout(struct thread *, struct pollfd *, struct pollfd *,
115 u_int);
116 static int pollscan(struct thread *, struct pollfd *, u_int);
117 static int pollrescan(struct thread *);
118 static int selscan(struct thread *, fd_mask **, fd_mask **, int);
119 static int selrescan(struct thread *, fd_mask **, fd_mask **);
120 static void selfdalloc(struct thread *, void *);
121 static void selfdfree(struct seltd *, struct selfd *);
122 static int dofileread(struct thread *, int, struct file *, struct uio *,
123 off_t, int);
124 static int dofilewrite(struct thread *, int, struct file *, struct uio *,
125 off_t, int);
126 static void doselwakeup(struct selinfo *, int);
127 static void seltdinit(struct thread *);
128 static int seltdwait(struct thread *, sbintime_t, sbintime_t);
129 static void seltdclear(struct thread *);
130
131 /*
132 * One seltd per-thread allocated on demand as needed.
133 *
134 * t - protected by st_mtx
135 * k - Only accessed by curthread or read-only
136 */
137 struct seltd {
138 STAILQ_HEAD(, selfd) st_selq; /* (k) List of selfds. */
139 struct selfd *st_free1; /* (k) free fd for read set. */
140 struct selfd *st_free2; /* (k) free fd for write set. */
141 struct mtx st_mtx; /* Protects struct seltd */
142 struct cv st_wait; /* (t) Wait channel. */
143 int st_flags; /* (t) SELTD_ flags. */
144 };
145
146 #define SELTD_PENDING 0x0001 /* We have pending events. */
147 #define SELTD_RESCAN 0x0002 /* Doing a rescan. */
148
149 /*
150 * One selfd allocated per-thread per-file-descriptor.
151 * f - protected by sf_mtx
152 */
153 struct selfd {
154 STAILQ_ENTRY(selfd) sf_link; /* (k) fds owned by this td. */
155 TAILQ_ENTRY(selfd) sf_threads; /* (f) fds on this selinfo. */
156 struct selinfo *sf_si; /* (f) selinfo when linked. */
157 struct mtx *sf_mtx; /* Pointer to selinfo mtx. */
158 struct seltd *sf_td; /* (k) owning seltd. */
159 void *sf_cookie; /* (k) fd or pollfd. */
160 };
161
162 MALLOC_DEFINE(M_SELFD, "selfd", "selfd");
163 static struct mtx_pool *mtxpool_select;
164
165 #ifdef __LP64__
166 size_t
devfs_iosize_max(void)167 devfs_iosize_max(void)
168 {
169
170 return (devfs_iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ?
171 INT_MAX : SSIZE_MAX);
172 }
173
174 size_t
iosize_max(void)175 iosize_max(void)
176 {
177
178 return (iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ?
179 INT_MAX : SSIZE_MAX);
180 }
181 #endif
182
183 #ifndef _SYS_SYSPROTO_H_
184 struct read_args {
185 int fd;
186 void *buf;
187 size_t nbyte;
188 };
189 #endif
190 int
sys_read(struct thread * td,struct read_args * uap)191 sys_read(struct thread *td, struct read_args *uap)
192 {
193 struct uio auio;
194 struct iovec aiov;
195 int error;
196
197 if (uap->nbyte > IOSIZE_MAX)
198 return (EINVAL);
199 aiov.iov_base = uap->buf;
200 aiov.iov_len = uap->nbyte;
201 auio.uio_iov = &aiov;
202 auio.uio_iovcnt = 1;
203 auio.uio_resid = uap->nbyte;
204 auio.uio_segflg = UIO_USERSPACE;
205 error = kern_readv(td, uap->fd, &auio);
206 return (error);
207 }
208
209 /*
210 * Positioned read system call
211 */
212 #ifndef _SYS_SYSPROTO_H_
213 struct pread_args {
214 int fd;
215 void *buf;
216 size_t nbyte;
217 int pad;
218 off_t offset;
219 };
220 #endif
221 int
sys_pread(struct thread * td,struct pread_args * uap)222 sys_pread(struct thread *td, struct pread_args *uap)
223 {
224
225 return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
226 }
227
228 int
kern_pread(struct thread * td,int fd,void * buf,size_t nbyte,off_t offset)229 kern_pread(struct thread *td, int fd, void *buf, size_t nbyte, off_t offset)
230 {
231 struct uio auio;
232 struct iovec aiov;
233 int error;
234
235 if (nbyte > IOSIZE_MAX)
236 return (EINVAL);
237 aiov.iov_base = buf;
238 aiov.iov_len = nbyte;
239 auio.uio_iov = &aiov;
240 auio.uio_iovcnt = 1;
241 auio.uio_resid = nbyte;
242 auio.uio_segflg = UIO_USERSPACE;
243 error = kern_preadv(td, fd, &auio, offset);
244 return (error);
245 }
246
247 #if defined(COMPAT_FREEBSD6)
248 int
freebsd6_pread(struct thread * td,struct freebsd6_pread_args * uap)249 freebsd6_pread(struct thread *td, struct freebsd6_pread_args *uap)
250 {
251
252 return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
253 }
254 #endif
255
256 /*
257 * Scatter read system call.
258 */
259 #ifndef _SYS_SYSPROTO_H_
260 struct readv_args {
261 int fd;
262 struct iovec *iovp;
263 u_int iovcnt;
264 };
265 #endif
266 int
sys_readv(struct thread * td,struct readv_args * uap)267 sys_readv(struct thread *td, struct readv_args *uap)
268 {
269 struct uio *auio;
270 int error;
271
272 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
273 if (error)
274 return (error);
275 error = kern_readv(td, uap->fd, auio);
276 freeuio(auio);
277 return (error);
278 }
279
280 int
kern_readv(struct thread * td,int fd,struct uio * auio)281 kern_readv(struct thread *td, int fd, struct uio *auio)
282 {
283 struct file *fp;
284 int error;
285
286 error = fget_read(td, fd, &cap_read_rights, &fp);
287 if (error)
288 return (error);
289 error = dofileread(td, fd, fp, auio, (off_t)-1, 0);
290 fdrop(fp, td);
291 return (error);
292 }
293
294 /*
295 * Scatter positioned read system call.
296 */
297 #ifndef _SYS_SYSPROTO_H_
298 struct preadv_args {
299 int fd;
300 struct iovec *iovp;
301 u_int iovcnt;
302 off_t offset;
303 };
304 #endif
305 int
sys_preadv(struct thread * td,struct preadv_args * uap)306 sys_preadv(struct thread *td, struct preadv_args *uap)
307 {
308 struct uio *auio;
309 int error;
310
311 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
312 if (error)
313 return (error);
314 error = kern_preadv(td, uap->fd, auio, uap->offset);
315 freeuio(auio);
316 return (error);
317 }
318
319 int
kern_preadv(struct thread * td,int fd,struct uio * auio,off_t offset)320 kern_preadv(struct thread *td, int fd, struct uio *auio, off_t offset)
321 {
322 struct file *fp;
323 int error;
324
325 error = fget_read(td, fd, &cap_pread_rights, &fp);
326 if (error)
327 return (error);
328 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
329 error = ESPIPE;
330 else if (offset < 0 &&
331 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR))
332 error = EINVAL;
333 else
334 error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET);
335 fdrop(fp, td);
336 return (error);
337 }
338
339 /*
340 * Common code for readv and preadv that reads data in
341 * from a file using the passed in uio, offset, and flags.
342 */
343 static int
dofileread(struct thread * td,int fd,struct file * fp,struct uio * auio,off_t offset,int flags)344 dofileread(struct thread *td, int fd, struct file *fp, struct uio *auio,
345 off_t offset, int flags)
346 {
347 ssize_t cnt;
348 int error;
349 #ifdef KTRACE
350 struct uio *ktruio = NULL;
351 #endif
352
353 AUDIT_ARG_FD(fd);
354
355 /* Finish zero length reads right here */
356 if (auio->uio_resid == 0) {
357 td->td_retval[0] = 0;
358 return (0);
359 }
360 auio->uio_rw = UIO_READ;
361 auio->uio_offset = offset;
362 auio->uio_td = td;
363 #ifdef KTRACE
364 if (KTRPOINT(td, KTR_GENIO))
365 ktruio = cloneuio(auio);
366 #endif
367 cnt = auio->uio_resid;
368 if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) {
369 if (auio->uio_resid != cnt && (error == ERESTART ||
370 error == EINTR || error == EWOULDBLOCK))
371 error = 0;
372 }
373 cnt -= auio->uio_resid;
374 #ifdef KTRACE
375 if (ktruio != NULL) {
376 ktruio->uio_resid = cnt;
377 ktrgenio(fd, UIO_READ, ktruio, error);
378 }
379 #endif
380 td->td_retval[0] = cnt;
381 return (error);
382 }
383
384 #ifndef _SYS_SYSPROTO_H_
385 struct write_args {
386 int fd;
387 const void *buf;
388 size_t nbyte;
389 };
390 #endif
391 int
sys_write(struct thread * td,struct write_args * uap)392 sys_write(struct thread *td, struct write_args *uap)
393 {
394 struct uio auio;
395 struct iovec aiov;
396 int error;
397
398 if (uap->nbyte > IOSIZE_MAX)
399 return (EINVAL);
400 aiov.iov_base = (void *)(uintptr_t)uap->buf;
401 aiov.iov_len = uap->nbyte;
402 auio.uio_iov = &aiov;
403 auio.uio_iovcnt = 1;
404 auio.uio_resid = uap->nbyte;
405 auio.uio_segflg = UIO_USERSPACE;
406 error = kern_writev(td, uap->fd, &auio);
407 return (error);
408 }
409
410 /*
411 * Positioned write system call.
412 */
413 #ifndef _SYS_SYSPROTO_H_
414 struct pwrite_args {
415 int fd;
416 const void *buf;
417 size_t nbyte;
418 int pad;
419 off_t offset;
420 };
421 #endif
422 int
sys_pwrite(struct thread * td,struct pwrite_args * uap)423 sys_pwrite(struct thread *td, struct pwrite_args *uap)
424 {
425
426 return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
427 }
428
429 int
kern_pwrite(struct thread * td,int fd,const void * buf,size_t nbyte,off_t offset)430 kern_pwrite(struct thread *td, int fd, const void *buf, size_t nbyte,
431 off_t offset)
432 {
433 struct uio auio;
434 struct iovec aiov;
435 int error;
436
437 if (nbyte > IOSIZE_MAX)
438 return (EINVAL);
439 aiov.iov_base = (void *)(uintptr_t)buf;
440 aiov.iov_len = nbyte;
441 auio.uio_iov = &aiov;
442 auio.uio_iovcnt = 1;
443 auio.uio_resid = nbyte;
444 auio.uio_segflg = UIO_USERSPACE;
445 error = kern_pwritev(td, fd, &auio, offset);
446 return (error);
447 }
448
449 #if defined(COMPAT_FREEBSD6)
450 int
freebsd6_pwrite(struct thread * td,struct freebsd6_pwrite_args * uap)451 freebsd6_pwrite(struct thread *td, struct freebsd6_pwrite_args *uap)
452 {
453
454 return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
455 }
456 #endif
457
458 /*
459 * Gather write system call.
460 */
461 #ifndef _SYS_SYSPROTO_H_
462 struct writev_args {
463 int fd;
464 struct iovec *iovp;
465 u_int iovcnt;
466 };
467 #endif
468 int
sys_writev(struct thread * td,struct writev_args * uap)469 sys_writev(struct thread *td, struct writev_args *uap)
470 {
471 struct uio *auio;
472 int error;
473
474 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
475 if (error)
476 return (error);
477 error = kern_writev(td, uap->fd, auio);
478 freeuio(auio);
479 return (error);
480 }
481
482 int
kern_writev(struct thread * td,int fd,struct uio * auio)483 kern_writev(struct thread *td, int fd, struct uio *auio)
484 {
485 struct file *fp;
486 int error;
487
488 error = fget_write(td, fd, &cap_write_rights, &fp);
489 if (error)
490 return (error);
491 error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0);
492 fdrop(fp, td);
493 return (error);
494 }
495
496 /*
497 * Gather positioned write system call.
498 */
499 #ifndef _SYS_SYSPROTO_H_
500 struct pwritev_args {
501 int fd;
502 struct iovec *iovp;
503 u_int iovcnt;
504 off_t offset;
505 };
506 #endif
507 int
sys_pwritev(struct thread * td,struct pwritev_args * uap)508 sys_pwritev(struct thread *td, struct pwritev_args *uap)
509 {
510 struct uio *auio;
511 int error;
512
513 error = copyinuio(uap->iovp, uap->iovcnt, &auio);
514 if (error)
515 return (error);
516 error = kern_pwritev(td, uap->fd, auio, uap->offset);
517 freeuio(auio);
518 return (error);
519 }
520
521 int
kern_pwritev(struct thread * td,int fd,struct uio * auio,off_t offset)522 kern_pwritev(struct thread *td, int fd, struct uio *auio, off_t offset)
523 {
524 struct file *fp;
525 int error;
526
527 error = fget_write(td, fd, &cap_pwrite_rights, &fp);
528 if (error)
529 return (error);
530 if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
531 error = ESPIPE;
532 else if (offset < 0 &&
533 (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR))
534 error = EINVAL;
535 else
536 error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET);
537 fdrop(fp, td);
538 return (error);
539 }
540
541 /*
542 * Common code for writev and pwritev that writes data to
543 * a file using the passed in uio, offset, and flags.
544 */
545 static int
dofilewrite(struct thread * td,int fd,struct file * fp,struct uio * auio,off_t offset,int flags)546 dofilewrite(struct thread *td, int fd, struct file *fp, struct uio *auio,
547 off_t offset, int flags)
548 {
549 ssize_t cnt;
550 int error;
551 #ifdef KTRACE
552 struct uio *ktruio = NULL;
553 #endif
554
555 AUDIT_ARG_FD(fd);
556 auio->uio_rw = UIO_WRITE;
557 auio->uio_td = td;
558 auio->uio_offset = offset;
559 #ifdef KTRACE
560 if (KTRPOINT(td, KTR_GENIO))
561 ktruio = cloneuio(auio);
562 #endif
563 cnt = auio->uio_resid;
564 error = fo_write(fp, auio, td->td_ucred, flags, td);
565 /*
566 * Socket layer is responsible for special error handling,
567 * see sousrsend().
568 */
569 if (error != 0 && fp->f_type != DTYPE_SOCKET) {
570 if (auio->uio_resid != cnt && (error == ERESTART ||
571 error == EINTR || error == EWOULDBLOCK))
572 error = 0;
573 if (error == EPIPE) {
574 PROC_LOCK(td->td_proc);
575 tdsignal(td, SIGPIPE);
576 PROC_UNLOCK(td->td_proc);
577 }
578 }
579 cnt -= auio->uio_resid;
580 #ifdef KTRACE
581 if (ktruio != NULL) {
582 if (error == 0)
583 ktruio->uio_resid = cnt;
584 ktrgenio(fd, UIO_WRITE, ktruio, error);
585 }
586 #endif
587 td->td_retval[0] = cnt;
588 return (error);
589 }
590
591 /*
592 * Truncate a file given a file descriptor.
593 *
594 * Can't use fget_write() here, since must return EINVAL and not EBADF if the
595 * descriptor isn't writable.
596 */
597 int
kern_ftruncate(struct thread * td,int fd,off_t length)598 kern_ftruncate(struct thread *td, int fd, off_t length)
599 {
600 struct file *fp;
601 int error;
602
603 AUDIT_ARG_FD(fd);
604 if (length < 0)
605 return (EINVAL);
606 error = fget(td, fd, &cap_ftruncate_rights, &fp);
607 if (error)
608 return (error);
609 AUDIT_ARG_FILE(td->td_proc, fp);
610 if (!(fp->f_flag & FWRITE)) {
611 fdrop(fp, td);
612 return (EINVAL);
613 }
614 error = fo_truncate(fp, length, td->td_ucred, td);
615 fdrop(fp, td);
616 return (error);
617 }
618
619 #ifndef _SYS_SYSPROTO_H_
620 struct ftruncate_args {
621 int fd;
622 int pad;
623 off_t length;
624 };
625 #endif
626 int
sys_ftruncate(struct thread * td,struct ftruncate_args * uap)627 sys_ftruncate(struct thread *td, struct ftruncate_args *uap)
628 {
629
630 return (kern_ftruncate(td, uap->fd, uap->length));
631 }
632
633 #if defined(COMPAT_43)
634 #ifndef _SYS_SYSPROTO_H_
635 struct oftruncate_args {
636 int fd;
637 long length;
638 };
639 #endif
640 int
oftruncate(struct thread * td,struct oftruncate_args * uap)641 oftruncate(struct thread *td, struct oftruncate_args *uap)
642 {
643
644 return (kern_ftruncate(td, uap->fd, uap->length));
645 }
646 #endif /* COMPAT_43 */
647
648 #ifndef _SYS_SYSPROTO_H_
649 struct ioctl_args {
650 int fd;
651 u_long com;
652 caddr_t data;
653 };
654 #endif
655 /* ARGSUSED */
656 int
sys_ioctl(struct thread * td,struct ioctl_args * uap)657 sys_ioctl(struct thread *td, struct ioctl_args *uap)
658 {
659 u_char smalldata[SYS_IOCTL_SMALL_SIZE] __aligned(SYS_IOCTL_SMALL_ALIGN);
660 uint32_t com;
661 int arg, error;
662 u_int size;
663 caddr_t data;
664
665 #ifdef INVARIANTS
666 if (uap->com > 0xffffffff) {
667 printf(
668 "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n",
669 td->td_proc->p_pid, td->td_name, uap->com);
670 }
671 #endif
672 com = (uint32_t)uap->com;
673
674 /*
675 * Interpret high order word to find amount of data to be
676 * copied to/from the user's address space.
677 */
678 size = IOCPARM_LEN(com);
679 if ((size > IOCPARM_MAX) ||
680 ((com & (IOC_VOID | IOC_IN | IOC_OUT)) == 0) ||
681 #if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
682 ((com & IOC_OUT) && size == 0) ||
683 #else
684 ((com & (IOC_IN | IOC_OUT)) && size == 0) ||
685 #endif
686 ((com & IOC_VOID) && size > 0 && size != sizeof(int)))
687 return (ENOTTY);
688
689 if (size > 0) {
690 if (com & IOC_VOID) {
691 /* Integer argument. */
692 arg = (intptr_t)uap->data;
693 data = (void *)&arg;
694 size = 0;
695 } else {
696 if (size > SYS_IOCTL_SMALL_SIZE)
697 data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
698 else
699 data = smalldata;
700 }
701 } else
702 data = (void *)&uap->data;
703 if (com & IOC_IN) {
704 error = copyin(uap->data, data, (u_int)size);
705 if (error != 0)
706 goto out;
707 } else if (com & IOC_OUT) {
708 /*
709 * Zero the buffer so the user always
710 * gets back something deterministic.
711 */
712 bzero(data, size);
713 }
714
715 error = kern_ioctl(td, uap->fd, com, data);
716
717 if (error == 0 && (com & IOC_OUT))
718 error = copyout(data, uap->data, (u_int)size);
719
720 out:
721 if (size > SYS_IOCTL_SMALL_SIZE)
722 free(data, M_IOCTLOPS);
723 return (error);
724 }
725
726 int
kern_ioctl(struct thread * td,int fd,u_long com,caddr_t data)727 kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
728 {
729 struct file *fp;
730 struct filedesc *fdp;
731 int error, tmp, locked;
732
733 AUDIT_ARG_FD(fd);
734 AUDIT_ARG_CMD(com);
735
736 fdp = td->td_proc->p_fd;
737
738 switch (com) {
739 case FIONCLEX:
740 case FIOCLEX:
741 FILEDESC_XLOCK(fdp);
742 locked = LA_XLOCKED;
743 break;
744 default:
745 #ifdef CAPABILITIES
746 FILEDESC_SLOCK(fdp);
747 locked = LA_SLOCKED;
748 #else
749 locked = LA_UNLOCKED;
750 #endif
751 break;
752 }
753
754 #ifdef CAPABILITIES
755 if ((fp = fget_noref(fdp, fd)) == NULL) {
756 error = EBADF;
757 goto out;
758 }
759 if ((error = cap_ioctl_check(fdp, fd, com)) != 0) {
760 fp = NULL; /* fhold() was not called yet */
761 goto out;
762 }
763 if (!fhold(fp)) {
764 error = EBADF;
765 fp = NULL;
766 goto out;
767 }
768 if (locked == LA_SLOCKED) {
769 FILEDESC_SUNLOCK(fdp);
770 locked = LA_UNLOCKED;
771 }
772 #else
773 error = fget(td, fd, &cap_ioctl_rights, &fp);
774 if (error != 0) {
775 fp = NULL;
776 goto out;
777 }
778 #endif
779 if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
780 error = EBADF;
781 goto out;
782 }
783
784 switch (com) {
785 case FIONCLEX:
786 fdp->fd_ofiles[fd].fde_flags &= ~UF_EXCLOSE;
787 goto out;
788 case FIOCLEX:
789 fdp->fd_ofiles[fd].fde_flags |= UF_EXCLOSE;
790 goto out;
791 case FIONBIO:
792 if ((tmp = *(int *)data))
793 atomic_set_int(&fp->f_flag, FNONBLOCK);
794 else
795 atomic_clear_int(&fp->f_flag, FNONBLOCK);
796 data = (void *)&tmp;
797 break;
798 case FIOASYNC:
799 if ((tmp = *(int *)data))
800 atomic_set_int(&fp->f_flag, FASYNC);
801 else
802 atomic_clear_int(&fp->f_flag, FASYNC);
803 data = (void *)&tmp;
804 break;
805 }
806
807 error = fo_ioctl(fp, com, data, td->td_ucred, td);
808 out:
809 switch (locked) {
810 case LA_XLOCKED:
811 FILEDESC_XUNLOCK(fdp);
812 break;
813 #ifdef CAPABILITIES
814 case LA_SLOCKED:
815 FILEDESC_SUNLOCK(fdp);
816 break;
817 #endif
818 default:
819 FILEDESC_UNLOCK_ASSERT(fdp);
820 break;
821 }
822 if (fp != NULL)
823 fdrop(fp, td);
824 return (error);
825 }
826
827 int
sys_posix_fallocate(struct thread * td,struct posix_fallocate_args * uap)828 sys_posix_fallocate(struct thread *td, struct posix_fallocate_args *uap)
829 {
830 int error;
831
832 error = kern_posix_fallocate(td, uap->fd, uap->offset, uap->len);
833 return (kern_posix_error(td, error));
834 }
835
836 int
kern_posix_fallocate(struct thread * td,int fd,off_t offset,off_t len)837 kern_posix_fallocate(struct thread *td, int fd, off_t offset, off_t len)
838 {
839 struct file *fp;
840 int error;
841
842 AUDIT_ARG_FD(fd);
843 if (offset < 0 || len <= 0)
844 return (EINVAL);
845 /* Check for wrap. */
846 if (offset > OFF_MAX - len)
847 return (EFBIG);
848 AUDIT_ARG_FD(fd);
849 error = fget(td, fd, &cap_pwrite_rights, &fp);
850 if (error != 0)
851 return (error);
852 AUDIT_ARG_FILE(td->td_proc, fp);
853 if ((fp->f_ops->fo_flags & DFLAG_SEEKABLE) == 0) {
854 error = ESPIPE;
855 goto out;
856 }
857 if ((fp->f_flag & FWRITE) == 0) {
858 error = EBADF;
859 goto out;
860 }
861
862 error = fo_fallocate(fp, offset, len, td);
863 out:
864 fdrop(fp, td);
865 return (error);
866 }
867
868 int
sys_fspacectl(struct thread * td,struct fspacectl_args * uap)869 sys_fspacectl(struct thread *td, struct fspacectl_args *uap)
870 {
871 struct spacectl_range rqsr, rmsr;
872 int error, cerror;
873
874 error = copyin(uap->rqsr, &rqsr, sizeof(rqsr));
875 if (error != 0)
876 return (error);
877
878 error = kern_fspacectl(td, uap->fd, uap->cmd, &rqsr, uap->flags,
879 &rmsr);
880 if (uap->rmsr != NULL) {
881 cerror = copyout(&rmsr, uap->rmsr, sizeof(rmsr));
882 if (error == 0)
883 error = cerror;
884 }
885 return (error);
886 }
887
888 int
kern_fspacectl(struct thread * td,int fd,int cmd,const struct spacectl_range * rqsr,int flags,struct spacectl_range * rmsrp)889 kern_fspacectl(struct thread *td, int fd, int cmd,
890 const struct spacectl_range *rqsr, int flags, struct spacectl_range *rmsrp)
891 {
892 struct file *fp;
893 struct spacectl_range rmsr;
894 int error;
895
896 AUDIT_ARG_FD(fd);
897 AUDIT_ARG_CMD(cmd);
898 AUDIT_ARG_FFLAGS(flags);
899
900 if (rqsr == NULL)
901 return (EINVAL);
902 rmsr = *rqsr;
903 if (rmsrp != NULL)
904 *rmsrp = rmsr;
905
906 if (cmd != SPACECTL_DEALLOC ||
907 rqsr->r_offset < 0 || rqsr->r_len <= 0 ||
908 rqsr->r_offset > OFF_MAX - rqsr->r_len ||
909 (flags & ~SPACECTL_F_SUPPORTED) != 0)
910 return (EINVAL);
911
912 error = fget_write(td, fd, &cap_pwrite_rights, &fp);
913 if (error != 0)
914 return (error);
915 AUDIT_ARG_FILE(td->td_proc, fp);
916 if ((fp->f_ops->fo_flags & DFLAG_SEEKABLE) == 0) {
917 error = ESPIPE;
918 goto out;
919 }
920 if ((fp->f_flag & FWRITE) == 0) {
921 error = EBADF;
922 goto out;
923 }
924
925 error = fo_fspacectl(fp, cmd, &rmsr.r_offset, &rmsr.r_len, flags,
926 td->td_ucred, td);
927 /* fspacectl is not restarted after signals if the file is modified. */
928 if (rmsr.r_len != rqsr->r_len && (error == ERESTART ||
929 error == EINTR || error == EWOULDBLOCK))
930 error = 0;
931 if (rmsrp != NULL)
932 *rmsrp = rmsr;
933 out:
934 fdrop(fp, td);
935 return (error);
936 }
937
938 int
kern_specialfd(struct thread * td,int type,void * arg)939 kern_specialfd(struct thread *td, int type, void *arg)
940 {
941 struct file *fp;
942 struct specialfd_eventfd *ae;
943 int error, fd, fflags;
944
945 fflags = 0;
946 error = falloc_noinstall(td, &fp);
947 if (error != 0)
948 return (error);
949
950 switch (type) {
951 case SPECIALFD_EVENTFD:
952 ae = arg;
953 if ((ae->flags & EFD_CLOEXEC) != 0)
954 fflags |= O_CLOEXEC;
955 error = eventfd_create_file(td, fp, ae->initval, ae->flags);
956 break;
957 default:
958 error = EINVAL;
959 break;
960 }
961
962 if (error == 0)
963 error = finstall(td, fp, &fd, fflags, NULL);
964 fdrop(fp, td);
965 if (error == 0)
966 td->td_retval[0] = fd;
967 return (error);
968 }
969
970 int
sys___specialfd(struct thread * td,struct __specialfd_args * args)971 sys___specialfd(struct thread *td, struct __specialfd_args *args)
972 {
973 struct specialfd_eventfd ae;
974 int error;
975
976 switch (args->type) {
977 case SPECIALFD_EVENTFD:
978 if (args->len != sizeof(struct specialfd_eventfd)) {
979 error = EINVAL;
980 break;
981 }
982 error = copyin(args->req, &ae, sizeof(ae));
983 if (error != 0)
984 break;
985 if ((ae.flags & ~(EFD_CLOEXEC | EFD_NONBLOCK |
986 EFD_SEMAPHORE)) != 0) {
987 error = EINVAL;
988 break;
989 }
990 error = kern_specialfd(td, args->type, &ae);
991 break;
992 default:
993 error = EINVAL;
994 break;
995 }
996 return (error);
997 }
998
999 int
poll_no_poll(int events)1000 poll_no_poll(int events)
1001 {
1002 /*
1003 * Return true for read/write. If the user asked for something
1004 * special, return POLLNVAL, so that clients have a way of
1005 * determining reliably whether or not the extended
1006 * functionality is present without hard-coding knowledge
1007 * of specific filesystem implementations.
1008 */
1009 if (events & ~POLLSTANDARD)
1010 return (POLLNVAL);
1011
1012 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
1013 }
1014
1015 int
sys_pselect(struct thread * td,struct pselect_args * uap)1016 sys_pselect(struct thread *td, struct pselect_args *uap)
1017 {
1018 struct timespec ts;
1019 struct timeval tv, *tvp;
1020 sigset_t set, *uset;
1021 int error;
1022
1023 if (uap->ts != NULL) {
1024 error = copyin(uap->ts, &ts, sizeof(ts));
1025 if (error != 0)
1026 return (error);
1027 TIMESPEC_TO_TIMEVAL(&tv, &ts);
1028 tvp = &tv;
1029 } else
1030 tvp = NULL;
1031 if (uap->sm != NULL) {
1032 error = copyin(uap->sm, &set, sizeof(set));
1033 if (error != 0)
1034 return (error);
1035 uset = &set;
1036 } else
1037 uset = NULL;
1038 return (kern_pselect(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
1039 uset, NFDBITS));
1040 }
1041
1042 int
kern_pselect(struct thread * td,int nd,fd_set * in,fd_set * ou,fd_set * ex,struct timeval * tvp,sigset_t * uset,int abi_nfdbits)1043 kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex,
1044 struct timeval *tvp, sigset_t *uset, int abi_nfdbits)
1045 {
1046 int error;
1047
1048 if (uset != NULL) {
1049 error = kern_sigprocmask(td, SIG_SETMASK, uset,
1050 &td->td_oldsigmask, 0);
1051 if (error != 0)
1052 return (error);
1053 td->td_pflags |= TDP_OLDMASK;
1054 }
1055 error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits);
1056 if (uset != NULL) {
1057 /*
1058 * Make sure that ast() is called on return to
1059 * usermode and TDP_OLDMASK is cleared, restoring old
1060 * sigmask. If we didn't get interrupted, then the caller is
1061 * likely not expecting a signal to hit that should normally be
1062 * blocked by its signal mask, so we restore the mask before
1063 * any signals could be delivered.
1064 */
1065 if (error == EINTR) {
1066 ast_sched(td, TDA_SIGSUSPEND);
1067 } else {
1068 /* *select(2) should never restart. */
1069 MPASS(error != ERESTART);
1070 ast_sched(td, TDA_PSELECT);
1071 }
1072 }
1073
1074 return (error);
1075 }
1076
1077 #ifndef _SYS_SYSPROTO_H_
1078 struct select_args {
1079 int nd;
1080 fd_set *in, *ou, *ex;
1081 struct timeval *tv;
1082 };
1083 #endif
1084 int
sys_select(struct thread * td,struct select_args * uap)1085 sys_select(struct thread *td, struct select_args *uap)
1086 {
1087 struct timeval tv, *tvp;
1088 int error;
1089
1090 if (uap->tv != NULL) {
1091 error = copyin(uap->tv, &tv, sizeof(tv));
1092 if (error)
1093 return (error);
1094 tvp = &tv;
1095 } else
1096 tvp = NULL;
1097
1098 return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
1099 NFDBITS));
1100 }
1101
1102 /*
1103 * In the unlikely case when user specified n greater then the last
1104 * open file descriptor, check that no bits are set after the last
1105 * valid fd. We must return EBADF if any is set.
1106 *
1107 * There are applications that rely on the behaviour.
1108 *
1109 * nd is fd_nfiles.
1110 */
1111 static int
select_check_badfd(fd_set * fd_in,int nd,int ndu,int abi_nfdbits)1112 select_check_badfd(fd_set *fd_in, int nd, int ndu, int abi_nfdbits)
1113 {
1114 char *addr, *oaddr;
1115 int b, i, res;
1116 uint8_t bits;
1117
1118 if (nd >= ndu || fd_in == NULL)
1119 return (0);
1120
1121 oaddr = NULL;
1122 bits = 0; /* silence gcc */
1123 for (i = nd; i < ndu; i++) {
1124 b = i / NBBY;
1125 #if BYTE_ORDER == LITTLE_ENDIAN
1126 addr = (char *)fd_in + b;
1127 #else
1128 addr = (char *)fd_in;
1129 if (abi_nfdbits == NFDBITS) {
1130 addr += rounddown(b, sizeof(fd_mask)) +
1131 sizeof(fd_mask) - 1 - b % sizeof(fd_mask);
1132 } else {
1133 addr += rounddown(b, sizeof(uint32_t)) +
1134 sizeof(uint32_t) - 1 - b % sizeof(uint32_t);
1135 }
1136 #endif
1137 if (addr != oaddr) {
1138 res = fubyte(addr);
1139 if (res == -1)
1140 return (EFAULT);
1141 oaddr = addr;
1142 bits = res;
1143 }
1144 if ((bits & (1 << (i % NBBY))) != 0)
1145 return (EBADF);
1146 }
1147 return (0);
1148 }
1149
1150 int
kern_select(struct thread * td,int nd,fd_set * fd_in,fd_set * fd_ou,fd_set * fd_ex,struct timeval * tvp,int abi_nfdbits)1151 kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou,
1152 fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits)
1153 {
1154 struct filedesc *fdp;
1155 /*
1156 * The magic 2048 here is chosen to be just enough for FD_SETSIZE
1157 * infds with the new FD_SETSIZE of 1024, and more than enough for
1158 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE
1159 * of 256.
1160 */
1161 fd_mask s_selbits[howmany(2048, NFDBITS)];
1162 fd_mask *ibits[3], *obits[3], *selbits, *sbp;
1163 struct timeval rtv;
1164 sbintime_t asbt, precision, rsbt;
1165 u_int nbufbytes, ncpbytes, ncpubytes, nfdbits;
1166 int error, lf, ndu;
1167
1168 if (nd < 0)
1169 return (EINVAL);
1170 fdp = td->td_proc->p_fd;
1171 ndu = nd;
1172 lf = fdp->fd_nfiles;
1173 if (nd > lf)
1174 nd = lf;
1175
1176 error = select_check_badfd(fd_in, nd, ndu, abi_nfdbits);
1177 if (error != 0)
1178 return (error);
1179 error = select_check_badfd(fd_ou, nd, ndu, abi_nfdbits);
1180 if (error != 0)
1181 return (error);
1182 error = select_check_badfd(fd_ex, nd, ndu, abi_nfdbits);
1183 if (error != 0)
1184 return (error);
1185
1186 /*
1187 * Allocate just enough bits for the non-null fd_sets. Use the
1188 * preallocated auto buffer if possible.
1189 */
1190 nfdbits = roundup(nd, NFDBITS);
1191 ncpbytes = nfdbits / NBBY;
1192 ncpubytes = roundup(nd, abi_nfdbits) / NBBY;
1193 nbufbytes = 0;
1194 if (fd_in != NULL)
1195 nbufbytes += 2 * ncpbytes;
1196 if (fd_ou != NULL)
1197 nbufbytes += 2 * ncpbytes;
1198 if (fd_ex != NULL)
1199 nbufbytes += 2 * ncpbytes;
1200 if (nbufbytes <= sizeof s_selbits)
1201 selbits = &s_selbits[0];
1202 else
1203 selbits = malloc(nbufbytes, M_SELECT, M_WAITOK);
1204
1205 /*
1206 * Assign pointers into the bit buffers and fetch the input bits.
1207 * Put the output buffers together so that they can be bzeroed
1208 * together.
1209 */
1210 sbp = selbits;
1211 #define getbits(name, x) \
1212 do { \
1213 if (name == NULL) { \
1214 ibits[x] = NULL; \
1215 obits[x] = NULL; \
1216 } else { \
1217 ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp; \
1218 obits[x] = sbp; \
1219 sbp += ncpbytes / sizeof *sbp; \
1220 error = copyin(name, ibits[x], ncpubytes); \
1221 if (error != 0) \
1222 goto done; \
1223 if (ncpbytes != ncpubytes) \
1224 bzero((char *)ibits[x] + ncpubytes, \
1225 ncpbytes - ncpubytes); \
1226 } \
1227 } while (0)
1228 getbits(fd_in, 0);
1229 getbits(fd_ou, 1);
1230 getbits(fd_ex, 2);
1231 #undef getbits
1232
1233 #if BYTE_ORDER == BIG_ENDIAN && defined(__LP64__)
1234 /*
1235 * XXX: swizzle_fdset assumes that if abi_nfdbits != NFDBITS,
1236 * we are running under 32-bit emulation. This should be more
1237 * generic.
1238 */
1239 #define swizzle_fdset(bits) \
1240 if (abi_nfdbits != NFDBITS && bits != NULL) { \
1241 int i; \
1242 for (i = 0; i < ncpbytes / sizeof *sbp; i++) \
1243 bits[i] = (bits[i] >> 32) | (bits[i] << 32); \
1244 }
1245 #else
1246 #define swizzle_fdset(bits)
1247 #endif
1248
1249 /* Make sure the bit order makes it through an ABI transition */
1250 swizzle_fdset(ibits[0]);
1251 swizzle_fdset(ibits[1]);
1252 swizzle_fdset(ibits[2]);
1253
1254 if (nbufbytes != 0)
1255 bzero(selbits, nbufbytes / 2);
1256
1257 precision = 0;
1258 if (tvp != NULL) {
1259 rtv = *tvp;
1260 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1261 rtv.tv_usec >= 1000000) {
1262 error = EINVAL;
1263 goto done;
1264 }
1265 if (!timevalisset(&rtv))
1266 asbt = 0;
1267 else if (rtv.tv_sec <= INT32_MAX) {
1268 rsbt = tvtosbt(rtv);
1269 precision = rsbt;
1270 precision >>= tc_precexp;
1271 if (TIMESEL(&asbt, rsbt))
1272 asbt += tc_tick_sbt;
1273 if (asbt <= SBT_MAX - rsbt)
1274 asbt += rsbt;
1275 else
1276 asbt = -1;
1277 } else
1278 asbt = -1;
1279 } else
1280 asbt = -1;
1281 seltdinit(td);
1282 /* Iterate until the timeout expires or descriptors become ready. */
1283 for (;;) {
1284 error = selscan(td, ibits, obits, nd);
1285 if (error || td->td_retval[0] != 0)
1286 break;
1287 error = seltdwait(td, asbt, precision);
1288 if (error)
1289 break;
1290 error = selrescan(td, ibits, obits);
1291 if (error || td->td_retval[0] != 0)
1292 break;
1293 }
1294 seltdclear(td);
1295
1296 done:
1297 /* select is not restarted after signals... */
1298 if (error == ERESTART)
1299 error = EINTR;
1300 if (error == EWOULDBLOCK)
1301 error = 0;
1302
1303 /* swizzle bit order back, if necessary */
1304 swizzle_fdset(obits[0]);
1305 swizzle_fdset(obits[1]);
1306 swizzle_fdset(obits[2]);
1307 #undef swizzle_fdset
1308
1309 #define putbits(name, x) \
1310 if (name && (error2 = copyout(obits[x], name, ncpubytes))) \
1311 error = error2;
1312 if (error == 0) {
1313 int error2;
1314
1315 putbits(fd_in, 0);
1316 putbits(fd_ou, 1);
1317 putbits(fd_ex, 2);
1318 #undef putbits
1319 }
1320 if (selbits != &s_selbits[0])
1321 free(selbits, M_SELECT);
1322
1323 return (error);
1324 }
1325 /*
1326 * Convert a select bit set to poll flags.
1327 *
1328 * The backend always returns POLLHUP/POLLERR if appropriate and we
1329 * return this as a set bit in any set.
1330 */
1331 static const int select_flags[3] = {
1332 POLLRDNORM | POLLHUP | POLLERR,
1333 POLLWRNORM | POLLHUP | POLLERR,
1334 POLLRDBAND | POLLERR
1335 };
1336
1337 /*
1338 * Compute the fo_poll flags required for a fd given by the index and
1339 * bit position in the fd_mask array.
1340 */
1341 static __inline int
selflags(fd_mask ** ibits,int idx,fd_mask bit)1342 selflags(fd_mask **ibits, int idx, fd_mask bit)
1343 {
1344 int flags;
1345 int msk;
1346
1347 flags = 0;
1348 for (msk = 0; msk < 3; msk++) {
1349 if (ibits[msk] == NULL)
1350 continue;
1351 if ((ibits[msk][idx] & bit) == 0)
1352 continue;
1353 flags |= select_flags[msk];
1354 }
1355 return (flags);
1356 }
1357
1358 /*
1359 * Set the appropriate output bits given a mask of fired events and the
1360 * input bits originally requested.
1361 */
1362 static __inline int
selsetbits(fd_mask ** ibits,fd_mask ** obits,int idx,fd_mask bit,int events)1363 selsetbits(fd_mask **ibits, fd_mask **obits, int idx, fd_mask bit, int events)
1364 {
1365 int msk;
1366 int n;
1367
1368 n = 0;
1369 for (msk = 0; msk < 3; msk++) {
1370 if ((events & select_flags[msk]) == 0)
1371 continue;
1372 if (ibits[msk] == NULL)
1373 continue;
1374 if ((ibits[msk][idx] & bit) == 0)
1375 continue;
1376 /*
1377 * XXX Check for a duplicate set. This can occur because a
1378 * socket calls selrecord() twice for each poll() call
1379 * resulting in two selfds per real fd. selrescan() will
1380 * call selsetbits twice as a result.
1381 */
1382 if ((obits[msk][idx] & bit) != 0)
1383 continue;
1384 obits[msk][idx] |= bit;
1385 n++;
1386 }
1387
1388 return (n);
1389 }
1390
1391 /*
1392 * Traverse the list of fds attached to this thread's seltd and check for
1393 * completion.
1394 */
1395 static int
selrescan(struct thread * td,fd_mask ** ibits,fd_mask ** obits)1396 selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits)
1397 {
1398 struct filedesc *fdp;
1399 struct selinfo *si;
1400 struct seltd *stp;
1401 struct selfd *sfp;
1402 struct selfd *sfn;
1403 struct file *fp;
1404 fd_mask bit;
1405 int fd, ev, n, idx;
1406 int error;
1407 bool only_user;
1408
1409 fdp = td->td_proc->p_fd;
1410 stp = td->td_sel;
1411 n = 0;
1412 only_user = FILEDESC_IS_ONLY_USER(fdp);
1413 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1414 fd = (int)(uintptr_t)sfp->sf_cookie;
1415 si = sfp->sf_si;
1416 selfdfree(stp, sfp);
1417 /* If the selinfo wasn't cleared the event didn't fire. */
1418 if (si != NULL)
1419 continue;
1420 if (only_user)
1421 error = fget_only_user(fdp, fd, &cap_event_rights, &fp);
1422 else
1423 error = fget_unlocked(td, fd, &cap_event_rights, &fp);
1424 if (__predict_false(error != 0))
1425 return (error);
1426 idx = fd / NFDBITS;
1427 bit = (fd_mask)1 << (fd % NFDBITS);
1428 ev = fo_poll(fp, selflags(ibits, idx, bit), td->td_ucred, td);
1429 if (only_user)
1430 fput_only_user(fdp, fp);
1431 else
1432 fdrop(fp, td);
1433 if (ev != 0)
1434 n += selsetbits(ibits, obits, idx, bit, ev);
1435 }
1436 stp->st_flags = 0;
1437 td->td_retval[0] = n;
1438 return (0);
1439 }
1440
1441 /*
1442 * Perform the initial filedescriptor scan and register ourselves with
1443 * each selinfo.
1444 */
1445 static int
selscan(struct thread * td,fd_mask ** ibits,fd_mask ** obits,int nfd)1446 selscan(struct thread *td, fd_mask **ibits, fd_mask **obits, int nfd)
1447 {
1448 struct filedesc *fdp;
1449 struct file *fp;
1450 fd_mask bit;
1451 int ev, flags, end, fd;
1452 int n, idx;
1453 int error;
1454 bool only_user;
1455
1456 fdp = td->td_proc->p_fd;
1457 n = 0;
1458 only_user = FILEDESC_IS_ONLY_USER(fdp);
1459 for (idx = 0, fd = 0; fd < nfd; idx++) {
1460 end = imin(fd + NFDBITS, nfd);
1461 for (bit = 1; fd < end; bit <<= 1, fd++) {
1462 /* Compute the list of events we're interested in. */
1463 flags = selflags(ibits, idx, bit);
1464 if (flags == 0)
1465 continue;
1466 if (only_user)
1467 error = fget_only_user(fdp, fd, &cap_event_rights, &fp);
1468 else
1469 error = fget_unlocked(td, fd, &cap_event_rights, &fp);
1470 if (__predict_false(error != 0))
1471 return (error);
1472 selfdalloc(td, (void *)(uintptr_t)fd);
1473 ev = fo_poll(fp, flags, td->td_ucred, td);
1474 if (only_user)
1475 fput_only_user(fdp, fp);
1476 else
1477 fdrop(fp, td);
1478 if (ev != 0)
1479 n += selsetbits(ibits, obits, idx, bit, ev);
1480 }
1481 }
1482
1483 td->td_retval[0] = n;
1484 return (0);
1485 }
1486
1487 int
sys_poll(struct thread * td,struct poll_args * uap)1488 sys_poll(struct thread *td, struct poll_args *uap)
1489 {
1490 struct timespec ts, *tsp;
1491
1492 if (uap->timeout != INFTIM) {
1493 if (uap->timeout < 0)
1494 return (EINVAL);
1495 ts.tv_sec = uap->timeout / 1000;
1496 ts.tv_nsec = (uap->timeout % 1000) * 1000000;
1497 tsp = &ts;
1498 } else
1499 tsp = NULL;
1500
1501 return (kern_poll(td, uap->fds, uap->nfds, tsp, NULL));
1502 }
1503
1504 /*
1505 * kfds points to an array in the kernel.
1506 */
1507 int
kern_poll_kfds(struct thread * td,struct pollfd * kfds,u_int nfds,struct timespec * tsp,sigset_t * uset)1508 kern_poll_kfds(struct thread *td, struct pollfd *kfds, u_int nfds,
1509 struct timespec *tsp, sigset_t *uset)
1510 {
1511 sbintime_t sbt, precision, tmp;
1512 time_t over;
1513 struct timespec ts;
1514 int error;
1515
1516 precision = 0;
1517 if (tsp != NULL) {
1518 if (!timespecvalid_interval(tsp))
1519 return (EINVAL);
1520 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
1521 sbt = 0;
1522 else {
1523 ts = *tsp;
1524 if (ts.tv_sec > INT32_MAX / 2) {
1525 over = ts.tv_sec - INT32_MAX / 2;
1526 ts.tv_sec -= over;
1527 } else
1528 over = 0;
1529 tmp = tstosbt(ts);
1530 precision = tmp;
1531 precision >>= tc_precexp;
1532 if (TIMESEL(&sbt, tmp))
1533 sbt += tc_tick_sbt;
1534 sbt += tmp;
1535 }
1536 } else
1537 sbt = -1;
1538
1539 if (uset != NULL) {
1540 error = kern_sigprocmask(td, SIG_SETMASK, uset,
1541 &td->td_oldsigmask, 0);
1542 if (error)
1543 return (error);
1544 td->td_pflags |= TDP_OLDMASK;
1545 }
1546
1547 seltdinit(td);
1548 /* Iterate until the timeout expires or descriptors become ready. */
1549 for (;;) {
1550 error = pollscan(td, kfds, nfds);
1551 if (error || td->td_retval[0] != 0)
1552 break;
1553 error = seltdwait(td, sbt, precision);
1554 if (error)
1555 break;
1556 error = pollrescan(td);
1557 if (error || td->td_retval[0] != 0)
1558 break;
1559 }
1560 seltdclear(td);
1561
1562 /* poll is not restarted after signals... */
1563 if (error == ERESTART)
1564 error = EINTR;
1565 if (error == EWOULDBLOCK)
1566 error = 0;
1567
1568 if (uset != NULL) {
1569 /*
1570 * Make sure that ast() is called on return to
1571 * usermode and TDP_OLDMASK is cleared, restoring old
1572 * sigmask. If we didn't get interrupted, then the caller is
1573 * likely not expecting a signal to hit that should normally be
1574 * blocked by its signal mask, so we restore the mask before
1575 * any signals could be delivered.
1576 */
1577 if (error == EINTR)
1578 ast_sched(td, TDA_SIGSUSPEND);
1579 else
1580 ast_sched(td, TDA_PSELECT);
1581 }
1582
1583 return (error);
1584 }
1585
1586 int
sys_ppoll(struct thread * td,struct ppoll_args * uap)1587 sys_ppoll(struct thread *td, struct ppoll_args *uap)
1588 {
1589 struct timespec ts, *tsp;
1590 sigset_t set, *ssp;
1591 int error;
1592
1593 if (uap->ts != NULL) {
1594 error = copyin(uap->ts, &ts, sizeof(ts));
1595 if (error)
1596 return (error);
1597 tsp = &ts;
1598 } else
1599 tsp = NULL;
1600 if (uap->set != NULL) {
1601 error = copyin(uap->set, &set, sizeof(set));
1602 if (error)
1603 return (error);
1604 ssp = &set;
1605 } else
1606 ssp = NULL;
1607 return (kern_poll(td, uap->fds, uap->nfds, tsp, ssp));
1608 }
1609
1610 /*
1611 * ufds points to an array in user space.
1612 */
1613 int
kern_poll(struct thread * td,struct pollfd * ufds,u_int nfds,struct timespec * tsp,sigset_t * set)1614 kern_poll(struct thread *td, struct pollfd *ufds, u_int nfds,
1615 struct timespec *tsp, sigset_t *set)
1616 {
1617 struct pollfd *kfds;
1618 struct pollfd stackfds[32];
1619 int error;
1620
1621 if (kern_poll_maxfds(nfds))
1622 return (EINVAL);
1623 if (nfds > nitems(stackfds))
1624 kfds = mallocarray(nfds, sizeof(*kfds), M_TEMP, M_WAITOK);
1625 else
1626 kfds = stackfds;
1627 error = copyin(ufds, kfds, nfds * sizeof(*kfds));
1628 if (error != 0)
1629 goto out;
1630
1631 error = kern_poll_kfds(td, kfds, nfds, tsp, set);
1632 if (error == 0)
1633 error = pollout(td, kfds, ufds, nfds);
1634 #ifdef KTRACE
1635 if (error == 0 && KTRPOINT(td, KTR_STRUCT_ARRAY))
1636 ktrstructarray("pollfd", UIO_USERSPACE, ufds, nfds,
1637 sizeof(*ufds));
1638 #endif
1639
1640 out:
1641 if (nfds > nitems(stackfds))
1642 free(kfds, M_TEMP);
1643 return (error);
1644 }
1645
1646 bool
kern_poll_maxfds(u_int nfds)1647 kern_poll_maxfds(u_int nfds)
1648 {
1649
1650 /*
1651 * This is kinda bogus. We have fd limits, but that is not
1652 * really related to the size of the pollfd array. Make sure
1653 * we let the process use at least FD_SETSIZE entries and at
1654 * least enough for the system-wide limits. We want to be reasonably
1655 * safe, but not overly restrictive.
1656 */
1657 return (nfds > maxfilesperproc && nfds > FD_SETSIZE);
1658 }
1659
1660 static int
pollrescan(struct thread * td)1661 pollrescan(struct thread *td)
1662 {
1663 struct seltd *stp;
1664 struct selfd *sfp;
1665 struct selfd *sfn;
1666 struct selinfo *si;
1667 struct filedesc *fdp;
1668 struct file *fp;
1669 struct pollfd *fd;
1670 int n, error;
1671 bool only_user;
1672
1673 n = 0;
1674 fdp = td->td_proc->p_fd;
1675 stp = td->td_sel;
1676 only_user = FILEDESC_IS_ONLY_USER(fdp);
1677 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1678 fd = (struct pollfd *)sfp->sf_cookie;
1679 si = sfp->sf_si;
1680 selfdfree(stp, sfp);
1681 /* If the selinfo wasn't cleared the event didn't fire. */
1682 if (si != NULL)
1683 continue;
1684 if (only_user)
1685 error = fget_only_user(fdp, fd->fd, &cap_event_rights, &fp);
1686 else
1687 error = fget_unlocked(td, fd->fd, &cap_event_rights, &fp);
1688 if (__predict_false(error != 0)) {
1689 fd->revents = POLLNVAL;
1690 n++;
1691 continue;
1692 }
1693 /*
1694 * Note: backend also returns POLLHUP and
1695 * POLLERR if appropriate.
1696 */
1697 fd->revents = fo_poll(fp, fd->events, td->td_ucred, td);
1698 if (only_user)
1699 fput_only_user(fdp, fp);
1700 else
1701 fdrop(fp, td);
1702 if (fd->revents != 0)
1703 n++;
1704 }
1705 stp->st_flags = 0;
1706 td->td_retval[0] = n;
1707 return (0);
1708 }
1709
1710 static int
pollout(struct thread * td,struct pollfd * fds,struct pollfd * ufds,u_int nfd)1711 pollout(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd)
1712 {
1713 int error = 0;
1714 u_int i = 0;
1715 u_int n = 0;
1716
1717 for (i = 0; i < nfd; i++) {
1718 error = copyout(&fds->revents, &ufds->revents,
1719 sizeof(ufds->revents));
1720 if (error)
1721 return (error);
1722 if (fds->revents != 0)
1723 n++;
1724 fds++;
1725 ufds++;
1726 }
1727 td->td_retval[0] = n;
1728 return (0);
1729 }
1730
1731 static int
pollscan(struct thread * td,struct pollfd * fds,u_int nfd)1732 pollscan(struct thread *td, struct pollfd *fds, u_int nfd)
1733 {
1734 struct filedesc *fdp;
1735 struct file *fp;
1736 int i, n, error;
1737 bool only_user;
1738
1739 n = 0;
1740 fdp = td->td_proc->p_fd;
1741 only_user = FILEDESC_IS_ONLY_USER(fdp);
1742 for (i = 0; i < nfd; i++, fds++) {
1743 if (fds->fd < 0) {
1744 fds->revents = 0;
1745 continue;
1746 }
1747 if (only_user)
1748 error = fget_only_user(fdp, fds->fd, &cap_event_rights, &fp);
1749 else
1750 error = fget_unlocked(td, fds->fd, &cap_event_rights, &fp);
1751 if (__predict_false(error != 0)) {
1752 fds->revents = POLLNVAL;
1753 n++;
1754 continue;
1755 }
1756 /*
1757 * Note: backend also returns POLLHUP and
1758 * POLLERR if appropriate.
1759 */
1760 selfdalloc(td, fds);
1761 fds->revents = fo_poll(fp, fds->events,
1762 td->td_ucred, td);
1763 if (only_user)
1764 fput_only_user(fdp, fp);
1765 else
1766 fdrop(fp, td);
1767 /*
1768 * POSIX requires POLLOUT to be never
1769 * set simultaneously with POLLHUP.
1770 */
1771 if ((fds->revents & POLLHUP) != 0)
1772 fds->revents &= ~POLLOUT;
1773
1774 if (fds->revents != 0)
1775 n++;
1776 }
1777 td->td_retval[0] = n;
1778 return (0);
1779 }
1780
1781 /*
1782 * XXX This was created specifically to support netncp and netsmb. This
1783 * allows the caller to specify a socket to wait for events on. It returns
1784 * 0 if any events matched and an error otherwise. There is no way to
1785 * determine which events fired.
1786 */
1787 int
selsocket(struct socket * so,int events,struct timeval * tvp,struct thread * td)1788 selsocket(struct socket *so, int events, struct timeval *tvp, struct thread *td)
1789 {
1790 struct timeval rtv;
1791 sbintime_t asbt, precision, rsbt;
1792 int error;
1793
1794 precision = 0; /* stupid gcc! */
1795 if (tvp != NULL) {
1796 rtv = *tvp;
1797 if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1798 rtv.tv_usec >= 1000000)
1799 return (EINVAL);
1800 if (!timevalisset(&rtv))
1801 asbt = 0;
1802 else if (rtv.tv_sec <= INT32_MAX) {
1803 rsbt = tvtosbt(rtv);
1804 precision = rsbt;
1805 precision >>= tc_precexp;
1806 if (TIMESEL(&asbt, rsbt))
1807 asbt += tc_tick_sbt;
1808 if (asbt <= SBT_MAX - rsbt)
1809 asbt += rsbt;
1810 else
1811 asbt = -1;
1812 } else
1813 asbt = -1;
1814 } else
1815 asbt = -1;
1816 seltdinit(td);
1817 /*
1818 * Iterate until the timeout expires or the socket becomes ready.
1819 */
1820 for (;;) {
1821 selfdalloc(td, NULL);
1822 if (so->so_proto->pr_sopoll(so, events, td) != 0) {
1823 error = 0;
1824 break;
1825 }
1826 error = seltdwait(td, asbt, precision);
1827 if (error)
1828 break;
1829 }
1830 seltdclear(td);
1831 /* XXX Duplicates ncp/smb behavior. */
1832 if (error == ERESTART)
1833 error = 0;
1834 return (error);
1835 }
1836
1837 /*
1838 * Preallocate two selfds associated with 'cookie'. Some fo_poll routines
1839 * have two select sets, one for read and another for write.
1840 */
1841 static void
selfdalloc(struct thread * td,void * cookie)1842 selfdalloc(struct thread *td, void *cookie)
1843 {
1844 struct seltd *stp;
1845
1846 stp = td->td_sel;
1847 if (stp->st_free1 == NULL)
1848 stp->st_free1 = malloc(sizeof(*stp->st_free1), M_SELFD, M_WAITOK|M_ZERO);
1849 stp->st_free1->sf_td = stp;
1850 stp->st_free1->sf_cookie = cookie;
1851 if (stp->st_free2 == NULL)
1852 stp->st_free2 = malloc(sizeof(*stp->st_free2), M_SELFD, M_WAITOK|M_ZERO);
1853 stp->st_free2->sf_td = stp;
1854 stp->st_free2->sf_cookie = cookie;
1855 }
1856
1857 static void
selfdfree(struct seltd * stp,struct selfd * sfp)1858 selfdfree(struct seltd *stp, struct selfd *sfp)
1859 {
1860 STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link);
1861 /*
1862 * Paired with doselwakeup.
1863 */
1864 if (atomic_load_acq_ptr((uintptr_t *)&sfp->sf_si) != (uintptr_t)NULL) {
1865 mtx_lock(sfp->sf_mtx);
1866 if (sfp->sf_si != NULL) {
1867 TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads);
1868 }
1869 mtx_unlock(sfp->sf_mtx);
1870 }
1871 free(sfp, M_SELFD);
1872 }
1873
1874 /* Drain the waiters tied to all the selfd belonging the specified selinfo. */
1875 void
seldrain(struct selinfo * sip)1876 seldrain(struct selinfo *sip)
1877 {
1878
1879 /*
1880 * This feature is already provided by doselwakeup(), thus it is
1881 * enough to go for it.
1882 * Eventually, the context, should take care to avoid races
1883 * between thread calling select()/poll() and file descriptor
1884 * detaching, but, again, the races are just the same as
1885 * selwakeup().
1886 */
1887 doselwakeup(sip, -1);
1888 }
1889
1890 /*
1891 * Record a select request.
1892 */
1893 void
selrecord(struct thread * selector,struct selinfo * sip)1894 selrecord(struct thread *selector, struct selinfo *sip)
1895 {
1896 struct selfd *sfp;
1897 struct seltd *stp;
1898 struct mtx *mtxp;
1899
1900 stp = selector->td_sel;
1901 /*
1902 * Don't record when doing a rescan.
1903 */
1904 if (stp->st_flags & SELTD_RESCAN)
1905 return;
1906 /*
1907 * Grab one of the preallocated descriptors.
1908 */
1909 sfp = NULL;
1910 if ((sfp = stp->st_free1) != NULL)
1911 stp->st_free1 = NULL;
1912 else if ((sfp = stp->st_free2) != NULL)
1913 stp->st_free2 = NULL;
1914 else
1915 panic("selrecord: No free selfd on selq");
1916 mtxp = sip->si_mtx;
1917 if (mtxp == NULL)
1918 mtxp = mtx_pool_find(mtxpool_select, sip);
1919 /*
1920 * Initialize the sfp and queue it in the thread.
1921 */
1922 sfp->sf_si = sip;
1923 sfp->sf_mtx = mtxp;
1924 STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link);
1925 /*
1926 * Now that we've locked the sip, check for initialization.
1927 */
1928 mtx_lock(mtxp);
1929 if (sip->si_mtx == NULL) {
1930 sip->si_mtx = mtxp;
1931 TAILQ_INIT(&sip->si_tdlist);
1932 }
1933 /*
1934 * Add this thread to the list of selfds listening on this selinfo.
1935 */
1936 TAILQ_INSERT_TAIL(&sip->si_tdlist, sfp, sf_threads);
1937 mtx_unlock(sip->si_mtx);
1938 }
1939
1940 /* Wake up a selecting thread. */
1941 void
selwakeup(struct selinfo * sip)1942 selwakeup(struct selinfo *sip)
1943 {
1944 doselwakeup(sip, -1);
1945 }
1946
1947 /* Wake up a selecting thread, and set its priority. */
1948 void
selwakeuppri(struct selinfo * sip,int pri)1949 selwakeuppri(struct selinfo *sip, int pri)
1950 {
1951 doselwakeup(sip, pri);
1952 }
1953
1954 /*
1955 * Do a wakeup when a selectable event occurs.
1956 */
1957 static void
doselwakeup(struct selinfo * sip,int pri)1958 doselwakeup(struct selinfo *sip, int pri)
1959 {
1960 struct selfd *sfp;
1961 struct selfd *sfn;
1962 struct seltd *stp;
1963
1964 /* If it's not initialized there can't be any waiters. */
1965 if (sip->si_mtx == NULL)
1966 return;
1967 /*
1968 * Locking the selinfo locks all selfds associated with it.
1969 */
1970 mtx_lock(sip->si_mtx);
1971 TAILQ_FOREACH_SAFE(sfp, &sip->si_tdlist, sf_threads, sfn) {
1972 /*
1973 * Once we remove this sfp from the list and clear the
1974 * sf_si seltdclear will know to ignore this si.
1975 */
1976 TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads);
1977 stp = sfp->sf_td;
1978 mtx_lock(&stp->st_mtx);
1979 stp->st_flags |= SELTD_PENDING;
1980 cv_broadcastpri(&stp->st_wait, pri);
1981 mtx_unlock(&stp->st_mtx);
1982 /*
1983 * Paired with selfdfree.
1984 *
1985 * Storing this only after the wakeup provides an invariant that
1986 * stp is not used after selfdfree returns.
1987 */
1988 atomic_store_rel_ptr((uintptr_t *)&sfp->sf_si, (uintptr_t)NULL);
1989 }
1990 mtx_unlock(sip->si_mtx);
1991 }
1992
1993 static void
seltdinit(struct thread * td)1994 seltdinit(struct thread *td)
1995 {
1996 struct seltd *stp;
1997
1998 stp = td->td_sel;
1999 if (stp != NULL) {
2000 MPASS(stp->st_flags == 0);
2001 MPASS(STAILQ_EMPTY(&stp->st_selq));
2002 return;
2003 }
2004 stp = malloc(sizeof(*stp), M_SELECT, M_WAITOK|M_ZERO);
2005 mtx_init(&stp->st_mtx, "sellck", NULL, MTX_DEF);
2006 cv_init(&stp->st_wait, "select");
2007 stp->st_flags = 0;
2008 STAILQ_INIT(&stp->st_selq);
2009 td->td_sel = stp;
2010 }
2011
2012 static int
seltdwait(struct thread * td,sbintime_t sbt,sbintime_t precision)2013 seltdwait(struct thread *td, sbintime_t sbt, sbintime_t precision)
2014 {
2015 struct seltd *stp;
2016 int error;
2017
2018 stp = td->td_sel;
2019 /*
2020 * An event of interest may occur while we do not hold the seltd
2021 * locked so check the pending flag before we sleep.
2022 */
2023 mtx_lock(&stp->st_mtx);
2024 /*
2025 * Any further calls to selrecord will be a rescan.
2026 */
2027 stp->st_flags |= SELTD_RESCAN;
2028 if (stp->st_flags & SELTD_PENDING) {
2029 mtx_unlock(&stp->st_mtx);
2030 return (0);
2031 }
2032 if (sbt == 0)
2033 error = EWOULDBLOCK;
2034 else if (sbt != -1)
2035 error = cv_timedwait_sig_sbt(&stp->st_wait, &stp->st_mtx,
2036 sbt, precision, C_ABSOLUTE);
2037 else
2038 error = cv_wait_sig(&stp->st_wait, &stp->st_mtx);
2039 mtx_unlock(&stp->st_mtx);
2040
2041 return (error);
2042 }
2043
2044 void
seltdfini(struct thread * td)2045 seltdfini(struct thread *td)
2046 {
2047 struct seltd *stp;
2048
2049 stp = td->td_sel;
2050 if (stp == NULL)
2051 return;
2052 MPASS(stp->st_flags == 0);
2053 MPASS(STAILQ_EMPTY(&stp->st_selq));
2054 if (stp->st_free1)
2055 free(stp->st_free1, M_SELFD);
2056 if (stp->st_free2)
2057 free(stp->st_free2, M_SELFD);
2058 td->td_sel = NULL;
2059 cv_destroy(&stp->st_wait);
2060 mtx_destroy(&stp->st_mtx);
2061 free(stp, M_SELECT);
2062 }
2063
2064 /*
2065 * Remove the references to the thread from all of the objects we were
2066 * polling.
2067 */
2068 static void
seltdclear(struct thread * td)2069 seltdclear(struct thread *td)
2070 {
2071 struct seltd *stp;
2072 struct selfd *sfp;
2073 struct selfd *sfn;
2074
2075 stp = td->td_sel;
2076 STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn)
2077 selfdfree(stp, sfp);
2078 stp->st_flags = 0;
2079 }
2080
2081 static void selectinit(void *);
2082 SYSINIT(select, SI_SUB_SYSCALLS, SI_ORDER_ANY, selectinit, NULL);
2083 static void
selectinit(void * dummy __unused)2084 selectinit(void *dummy __unused)
2085 {
2086
2087 mtxpool_select = mtx_pool_create("select mtxpool", 128, MTX_DEF);
2088 }
2089
2090 /*
2091 * Set up a syscall return value that follows the convention specified for
2092 * posix_* functions.
2093 */
2094 int
kern_posix_error(struct thread * td,int error)2095 kern_posix_error(struct thread *td, int error)
2096 {
2097
2098 if (error <= 0)
2099 return (error);
2100 td->td_errno = error;
2101 td->td_pflags |= TDP_NERRNO;
2102 td->td_retval[0] = error;
2103 return (0);
2104 }
2105
2106 int
kcmp_cmp(uintptr_t a,uintptr_t b)2107 kcmp_cmp(uintptr_t a, uintptr_t b)
2108 {
2109 if (a == b)
2110 return (0);
2111 else if (a < b)
2112 return (1);
2113 return (2);
2114 }
2115
2116 static int
kcmp_pget(struct thread * td,pid_t pid,struct proc ** pp)2117 kcmp_pget(struct thread *td, pid_t pid, struct proc **pp)
2118 {
2119 int error;
2120
2121 if (pid == td->td_proc->p_pid) {
2122 *pp = td->td_proc;
2123 return (0);
2124 }
2125 error = pget(pid, PGET_NOTID | PGET_CANDEBUG | PGET_NOTWEXIT |
2126 PGET_HOLD, pp);
2127 MPASS(*pp != td->td_proc);
2128 return (error);
2129 }
2130
2131 int
kern_kcmp(struct thread * td,pid_t pid1,pid_t pid2,int type,uintptr_t idx1,uintptr_t idx2)2132 kern_kcmp(struct thread *td, pid_t pid1, pid_t pid2, int type,
2133 uintptr_t idx1, uintptr_t idx2)
2134 {
2135 struct proc *p1, *p2;
2136 struct file *fp1, *fp2;
2137 int error, res;
2138
2139 res = -1;
2140 p1 = p2 = NULL;
2141 error = kcmp_pget(td, pid1, &p1);
2142 if (error == 0)
2143 error = kcmp_pget(td, pid2, &p2);
2144 if (error != 0)
2145 goto out;
2146
2147 switch (type) {
2148 case KCMP_FILE:
2149 case KCMP_FILEOBJ:
2150 error = fget_remote(td, p1, idx1, &fp1);
2151 if (error == 0) {
2152 error = fget_remote(td, p2, idx2, &fp2);
2153 if (error == 0) {
2154 if (type == KCMP_FILEOBJ)
2155 res = fo_cmp(fp1, fp2, td);
2156 else
2157 res = kcmp_cmp((uintptr_t)fp1,
2158 (uintptr_t)fp2);
2159 fdrop(fp2, td);
2160 }
2161 fdrop(fp1, td);
2162 }
2163 break;
2164 case KCMP_FILES:
2165 res = kcmp_cmp((uintptr_t)p1->p_fd, (uintptr_t)p2->p_fd);
2166 break;
2167 case KCMP_SIGHAND:
2168 res = kcmp_cmp((uintptr_t)p1->p_sigacts,
2169 (uintptr_t)p2->p_sigacts);
2170 break;
2171 case KCMP_VM:
2172 res = kcmp_cmp((uintptr_t)p1->p_vmspace,
2173 (uintptr_t)p2->p_vmspace);
2174 break;
2175 default:
2176 error = EINVAL;
2177 break;
2178 }
2179
2180 out:
2181 if (p1 != NULL && p1 != td->td_proc)
2182 PRELE(p1);
2183 if (p2 != NULL && p2 != td->td_proc)
2184 PRELE(p2);
2185
2186 td->td_retval[0] = res;
2187 return (error);
2188 }
2189
2190 int
sys_kcmp(struct thread * td,struct kcmp_args * uap)2191 sys_kcmp(struct thread *td, struct kcmp_args *uap)
2192 {
2193 return (kern_kcmp(td, uap->pid1, uap->pid2, uap->type,
2194 uap->idx1, uap->idx2));
2195 }
2196
2197 int
file_kcmp_generic(struct file * fp1,struct file * fp2,struct thread * td)2198 file_kcmp_generic(struct file *fp1, struct file *fp2, struct thread *td)
2199 {
2200 if (fp1->f_type != fp2->f_type)
2201 return (3);
2202 return (kcmp_cmp((uintptr_t)fp1->f_data, (uintptr_t)fp2->f_data));
2203 }
2204
2205 int
exterr_to_ue(struct thread * td,struct uexterror * ue)2206 exterr_to_ue(struct thread *td, struct uexterror *ue)
2207 {
2208 if ((td->td_pflags2 & TDP2_EXTERR) == 0)
2209 return (ENOENT);
2210
2211 memset(ue, 0, sizeof(*ue));
2212 ue->error = td->td_kexterr.error;
2213 ue->cat = td->td_kexterr.cat;
2214 ue->src_line = td->td_kexterr.src_line;
2215 ue->p1 = td->td_kexterr.p1;
2216 ue->p2 = td->td_kexterr.p2;
2217 if (td->td_kexterr.msg != NULL)
2218 strlcpy(ue->msg, td->td_kexterr.msg, sizeof(ue->msg));
2219 return (0);
2220 }
2221
2222 void
exterr_copyout(struct thread * td)2223 exterr_copyout(struct thread *td)
2224 {
2225 struct uexterror ue;
2226 ksiginfo_t ksi;
2227 void *uloc;
2228 size_t sz;
2229 int error;
2230
2231 MPASS((td->td_pflags2 & TDP2_UEXTERR) != 0);
2232
2233 uloc = (char *)td->td_exterr_ptr + __offsetof(struct uexterror,
2234 error);
2235 error = exterr_to_ue(td, &ue);
2236 if (error != 0) {
2237 ue.error = 0;
2238 sz = sizeof(ue.error);
2239 } else {
2240 sz = sizeof(ue) - __offsetof(struct uexterror, error);
2241 }
2242 error = copyout(&ue.error, uloc, sz);
2243 if (error != 0) {
2244 td->td_pflags2 &= ~TDP2_UEXTERR;
2245 ksiginfo_init_trap(&ksi);
2246 ksi.ksi_signo = SIGSEGV;
2247 ksi.ksi_code = SEGV_ACCERR;
2248 ksi.ksi_addr = uloc;
2249 trapsignal(td, &ksi);
2250 }
2251 }
2252
2253 int
sys_exterrctl(struct thread * td,struct exterrctl_args * uap)2254 sys_exterrctl(struct thread *td, struct exterrctl_args *uap)
2255 {
2256 uint32_t ver;
2257 int error;
2258
2259 if ((uap->flags & ~(EXTERRCTLF_FORCE)) != 0)
2260 return (EINVAL);
2261 switch (uap->op) {
2262 case EXTERRCTL_ENABLE:
2263 if ((td->td_pflags2 & TDP2_UEXTERR) != 0 &&
2264 (uap->flags & EXTERRCTLF_FORCE) == 0)
2265 return (EBUSY);
2266 td->td_pflags2 &= ~TDP2_UEXTERR;
2267 error = copyin(uap->ptr, &ver, sizeof(ver));
2268 if (error != 0)
2269 return (error);
2270 if (ver != UEXTERROR_VER)
2271 return (EINVAL);
2272 td->td_pflags2 |= TDP2_UEXTERR;
2273 td->td_exterr_ptr = uap->ptr;
2274 return (0);
2275 case EXTERRCTL_DISABLE:
2276 if ((td->td_pflags2 & TDP2_UEXTERR) == 0)
2277 return (EINVAL);
2278 td->td_pflags2 &= ~TDP2_UEXTERR;
2279 return (0);
2280 default:
2281 return (EINVAL);
2282 }
2283 }
2284
2285 int
exterr_set(int eerror,int category,const char * mmsg,uintptr_t pp1,uintptr_t pp2,int line)2286 exterr_set(int eerror, int category, const char *mmsg, uintptr_t pp1,
2287 uintptr_t pp2, int line)
2288 {
2289 struct thread *td;
2290
2291 td = curthread;
2292 if ((td->td_pflags2 & TDP2_UEXTERR) != 0) {
2293 td->td_pflags2 |= TDP2_EXTERR;
2294 td->td_kexterr.error = eerror;
2295 td->td_kexterr.cat = category;
2296 td->td_kexterr.msg = mmsg;
2297 td->td_kexterr.p1 = pp1;
2298 td->td_kexterr.p2 = pp2;
2299 td->td_kexterr.src_line = line;
2300 ktrexterr(td);
2301 }
2302 return (eerror);
2303 }
2304