1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org> 11 * Copyright (c) 2013, 2014 The FreeBSD Foundation 12 * 13 * Portions of this software were developed by Konstantin Belousov 14 * under sponsorship from the FreeBSD Foundation. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_hwpmc_hooks.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/disk.h> 51 #include <sys/fail.h> 52 #include <sys/fcntl.h> 53 #include <sys/file.h> 54 #include <sys/kdb.h> 55 #include <sys/stat.h> 56 #include <sys/priv.h> 57 #include <sys/proc.h> 58 #include <sys/limits.h> 59 #include <sys/lock.h> 60 #include <sys/mman.h> 61 #include <sys/mount.h> 62 #include <sys/mutex.h> 63 #include <sys/namei.h> 64 #include <sys/vnode.h> 65 #include <sys/bio.h> 66 #include <sys/buf.h> 67 #include <sys/filio.h> 68 #include <sys/resourcevar.h> 69 #include <sys/rwlock.h> 70 #include <sys/sx.h> 71 #include <sys/sysctl.h> 72 #include <sys/ttycom.h> 73 #include <sys/conf.h> 74 #include <sys/syslog.h> 75 #include <sys/unistd.h> 76 #include <sys/user.h> 77 78 #include <security/audit/audit.h> 79 #include <security/mac/mac_framework.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_extern.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_object.h> 86 #include <vm/vm_page.h> 87 #include <vm/vnode_pager.h> 88 89 #ifdef HWPMC_HOOKS 90 #include <sys/pmckern.h> 91 #endif 92 93 static fo_rdwr_t vn_read; 94 static fo_rdwr_t vn_write; 95 static fo_rdwr_t vn_io_fault; 96 static fo_truncate_t vn_truncate; 97 static fo_ioctl_t vn_ioctl; 98 static fo_poll_t vn_poll; 99 static fo_kqfilter_t vn_kqfilter; 100 static fo_stat_t vn_statfile; 101 static fo_close_t vn_closefile; 102 static fo_mmap_t vn_mmap; 103 104 struct fileops vnops = { 105 .fo_read = vn_io_fault, 106 .fo_write = vn_io_fault, 107 .fo_truncate = vn_truncate, 108 .fo_ioctl = vn_ioctl, 109 .fo_poll = vn_poll, 110 .fo_kqfilter = vn_kqfilter, 111 .fo_stat = vn_statfile, 112 .fo_close = vn_closefile, 113 .fo_chmod = vn_chmod, 114 .fo_chown = vn_chown, 115 .fo_sendfile = vn_sendfile, 116 .fo_seek = vn_seek, 117 .fo_fill_kinfo = vn_fill_kinfo, 118 .fo_mmap = vn_mmap, 119 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 120 }; 121 122 static const int io_hold_cnt = 16; 123 static int vn_io_fault_enable = 1; 124 SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_enable, CTLFLAG_RW, 125 &vn_io_fault_enable, 0, "Enable vn_io_fault lock avoidance"); 126 static int vn_io_fault_prefault = 0; 127 SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_prefault, CTLFLAG_RW, 128 &vn_io_fault_prefault, 0, "Enable vn_io_fault prefaulting"); 129 static u_long vn_io_faults_cnt; 130 SYSCTL_ULONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD, 131 &vn_io_faults_cnt, 0, "Count of vn_io_fault lock avoidance triggers"); 132 133 /* 134 * Returns true if vn_io_fault mode of handling the i/o request should 135 * be used. 136 */ 137 static bool 138 do_vn_io_fault(struct vnode *vp, struct uio *uio) 139 { 140 struct mount *mp; 141 142 return (uio->uio_segflg == UIO_USERSPACE && vp->v_type == VREG && 143 (mp = vp->v_mount) != NULL && 144 (mp->mnt_kern_flag & MNTK_NO_IOPF) != 0 && vn_io_fault_enable); 145 } 146 147 /* 148 * Structure used to pass arguments to vn_io_fault1(), to do either 149 * file- or vnode-based I/O calls. 150 */ 151 struct vn_io_fault_args { 152 enum { 153 VN_IO_FAULT_FOP, 154 VN_IO_FAULT_VOP 155 } kind; 156 struct ucred *cred; 157 int flags; 158 union { 159 struct fop_args_tag { 160 struct file *fp; 161 fo_rdwr_t *doio; 162 } fop_args; 163 struct vop_args_tag { 164 struct vnode *vp; 165 } vop_args; 166 } args; 167 }; 168 169 static int vn_io_fault1(struct vnode *vp, struct uio *uio, 170 struct vn_io_fault_args *args, struct thread *td); 171 172 int 173 vn_open(ndp, flagp, cmode, fp) 174 struct nameidata *ndp; 175 int *flagp, cmode; 176 struct file *fp; 177 { 178 struct thread *td = ndp->ni_cnd.cn_thread; 179 180 return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp)); 181 } 182 183 /* 184 * Common code for vnode open operations via a name lookup. 185 * Lookup the vnode and invoke VOP_CREATE if needed. 186 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 187 * 188 * Note that this does NOT free nameidata for the successful case, 189 * due to the NDINIT being done elsewhere. 190 */ 191 int 192 vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags, 193 struct ucred *cred, struct file *fp) 194 { 195 struct vnode *vp; 196 struct mount *mp; 197 struct thread *td = ndp->ni_cnd.cn_thread; 198 struct vattr vat; 199 struct vattr *vap = &vat; 200 int fmode, error; 201 202 restart: 203 fmode = *flagp; 204 if ((fmode & (O_CREAT | O_EXCL | O_DIRECTORY)) == (O_CREAT | 205 O_EXCL | O_DIRECTORY)) 206 return (EINVAL); 207 else if ((fmode & (O_CREAT | O_DIRECTORY)) == O_CREAT) { 208 ndp->ni_cnd.cn_nameiop = CREATE; 209 /* 210 * Set NOCACHE to avoid flushing the cache when 211 * rolling in many files at once. 212 */ 213 ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF | NOCACHE; 214 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 215 ndp->ni_cnd.cn_flags |= FOLLOW; 216 if (!(vn_open_flags & VN_OPEN_NOAUDIT)) 217 ndp->ni_cnd.cn_flags |= AUDITVNODE1; 218 if (vn_open_flags & VN_OPEN_NOCAPCHECK) 219 ndp->ni_cnd.cn_flags |= NOCAPCHECK; 220 bwillwrite(); 221 if ((error = namei(ndp)) != 0) 222 return (error); 223 if (ndp->ni_vp == NULL) { 224 VATTR_NULL(vap); 225 vap->va_type = VREG; 226 vap->va_mode = cmode; 227 if (fmode & O_EXCL) 228 vap->va_vaflags |= VA_EXCLUSIVE; 229 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 230 NDFREE(ndp, NDF_ONLY_PNBUF); 231 vput(ndp->ni_dvp); 232 if ((error = vn_start_write(NULL, &mp, 233 V_XSLEEP | PCATCH)) != 0) 234 return (error); 235 goto restart; 236 } 237 if ((vn_open_flags & VN_OPEN_NAMECACHE) != 0) 238 ndp->ni_cnd.cn_flags |= MAKEENTRY; 239 #ifdef MAC 240 error = mac_vnode_check_create(cred, ndp->ni_dvp, 241 &ndp->ni_cnd, vap); 242 if (error == 0) 243 #endif 244 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 245 &ndp->ni_cnd, vap); 246 vput(ndp->ni_dvp); 247 vn_finished_write(mp); 248 if (error) { 249 NDFREE(ndp, NDF_ONLY_PNBUF); 250 return (error); 251 } 252 fmode &= ~O_TRUNC; 253 vp = ndp->ni_vp; 254 } else { 255 if (ndp->ni_dvp == ndp->ni_vp) 256 vrele(ndp->ni_dvp); 257 else 258 vput(ndp->ni_dvp); 259 ndp->ni_dvp = NULL; 260 vp = ndp->ni_vp; 261 if (fmode & O_EXCL) { 262 error = EEXIST; 263 goto bad; 264 } 265 fmode &= ~O_CREAT; 266 } 267 } else { 268 ndp->ni_cnd.cn_nameiop = LOOKUP; 269 ndp->ni_cnd.cn_flags = ISOPEN | 270 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; 271 if (!(fmode & FWRITE)) 272 ndp->ni_cnd.cn_flags |= LOCKSHARED; 273 if (!(vn_open_flags & VN_OPEN_NOAUDIT)) 274 ndp->ni_cnd.cn_flags |= AUDITVNODE1; 275 if (vn_open_flags & VN_OPEN_NOCAPCHECK) 276 ndp->ni_cnd.cn_flags |= NOCAPCHECK; 277 if ((error = namei(ndp)) != 0) 278 return (error); 279 vp = ndp->ni_vp; 280 } 281 error = vn_open_vnode(vp, fmode, cred, td, fp); 282 if (error) 283 goto bad; 284 *flagp = fmode; 285 return (0); 286 bad: 287 NDFREE(ndp, NDF_ONLY_PNBUF); 288 vput(vp); 289 *flagp = fmode; 290 ndp->ni_vp = NULL; 291 return (error); 292 } 293 294 /* 295 * Common code for vnode open operations once a vnode is located. 296 * Check permissions, and call the VOP_OPEN routine. 297 */ 298 int 299 vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred, 300 struct thread *td, struct file *fp) 301 { 302 accmode_t accmode; 303 struct flock lf; 304 int error, lock_flags, type; 305 306 if (vp->v_type == VLNK) 307 return (EMLINK); 308 if (vp->v_type == VSOCK) 309 return (EOPNOTSUPP); 310 if (vp->v_type != VDIR && fmode & O_DIRECTORY) 311 return (ENOTDIR); 312 accmode = 0; 313 if (fmode & (FWRITE | O_TRUNC)) { 314 if (vp->v_type == VDIR) 315 return (EISDIR); 316 accmode |= VWRITE; 317 } 318 if (fmode & FREAD) 319 accmode |= VREAD; 320 if (fmode & FEXEC) 321 accmode |= VEXEC; 322 if ((fmode & O_APPEND) && (fmode & FWRITE)) 323 accmode |= VAPPEND; 324 #ifdef MAC 325 if (fmode & O_CREAT) 326 accmode |= VCREAT; 327 if (fmode & O_VERIFY) 328 accmode |= VVERIFY; 329 error = mac_vnode_check_open(cred, vp, accmode); 330 if (error) 331 return (error); 332 333 accmode &= ~(VCREAT | VVERIFY); 334 #endif 335 if ((fmode & O_CREAT) == 0) { 336 if (accmode & VWRITE) { 337 error = vn_writechk(vp); 338 if (error) 339 return (error); 340 } 341 if (accmode) { 342 error = VOP_ACCESS(vp, accmode, cred, td); 343 if (error) 344 return (error); 345 } 346 } 347 if (vp->v_type == VFIFO && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 348 vn_lock(vp, LK_UPGRADE | LK_RETRY); 349 if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0) 350 return (error); 351 352 if (fmode & (O_EXLOCK | O_SHLOCK)) { 353 KASSERT(fp != NULL, ("open with flock requires fp")); 354 lock_flags = VOP_ISLOCKED(vp); 355 VOP_UNLOCK(vp, 0); 356 lf.l_whence = SEEK_SET; 357 lf.l_start = 0; 358 lf.l_len = 0; 359 if (fmode & O_EXLOCK) 360 lf.l_type = F_WRLCK; 361 else 362 lf.l_type = F_RDLCK; 363 type = F_FLOCK; 364 if ((fmode & FNONBLOCK) == 0) 365 type |= F_WAIT; 366 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type); 367 if (error == 0) 368 fp->f_flag |= FHASLOCK; 369 vn_lock(vp, lock_flags | LK_RETRY); 370 if (error == 0 && vp->v_iflag & VI_DOOMED) 371 error = ENOENT; 372 373 /* 374 * Another thread might have used this vnode as an 375 * executable while the vnode lock was dropped. 376 * Ensure the vnode is still able to be opened for 377 * writing after the lock has been obtained. 378 */ 379 if (error == 0 && accmode & VWRITE) 380 error = vn_writechk(vp); 381 382 if (error != 0) { 383 fp->f_flag |= FOPENFAILED; 384 fp->f_vnode = vp; 385 if (fp->f_ops == &badfileops) { 386 fp->f_type = DTYPE_VNODE; 387 fp->f_ops = &vnops; 388 } 389 vref(vp); 390 } 391 } 392 if (error == 0 && fmode & FWRITE) { 393 VOP_ADD_WRITECOUNT(vp, 1); 394 CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", 395 __func__, vp, vp->v_writecount); 396 } 397 ASSERT_VOP_LOCKED(vp, "vn_open_vnode"); 398 return (error); 399 } 400 401 /* 402 * Check for write permissions on the specified vnode. 403 * Prototype text segments cannot be written. 404 */ 405 int 406 vn_writechk(vp) 407 register struct vnode *vp; 408 { 409 410 ASSERT_VOP_LOCKED(vp, "vn_writechk"); 411 /* 412 * If there's shared text associated with 413 * the vnode, try to free it up once. If 414 * we fail, we can't allow writing. 415 */ 416 if (VOP_IS_TEXT(vp)) 417 return (ETXTBSY); 418 419 return (0); 420 } 421 422 /* 423 * Vnode close call 424 */ 425 int 426 vn_close(vp, flags, file_cred, td) 427 register struct vnode *vp; 428 int flags; 429 struct ucred *file_cred; 430 struct thread *td; 431 { 432 struct mount *mp; 433 int error, lock_flags; 434 435 if (vp->v_type != VFIFO && (flags & FWRITE) == 0 && 436 MNT_EXTENDED_SHARED(vp->v_mount)) 437 lock_flags = LK_SHARED; 438 else 439 lock_flags = LK_EXCLUSIVE; 440 441 vn_start_write(vp, &mp, V_WAIT); 442 vn_lock(vp, lock_flags | LK_RETRY); 443 if ((flags & (FWRITE | FOPENFAILED)) == FWRITE) { 444 VNASSERT(vp->v_writecount > 0, vp, 445 ("vn_close: negative writecount")); 446 VOP_ADD_WRITECOUNT(vp, -1); 447 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 448 __func__, vp, vp->v_writecount); 449 } 450 error = VOP_CLOSE(vp, flags, file_cred, td); 451 vput(vp); 452 vn_finished_write(mp); 453 return (error); 454 } 455 456 /* 457 * Heuristic to detect sequential operation. 458 */ 459 static int 460 sequential_heuristic(struct uio *uio, struct file *fp) 461 { 462 463 ASSERT_VOP_LOCKED(fp->f_vnode, __func__); 464 if (fp->f_flag & FRDAHEAD) 465 return (fp->f_seqcount << IO_SEQSHIFT); 466 467 /* 468 * Offset 0 is handled specially. open() sets f_seqcount to 1 so 469 * that the first I/O is normally considered to be slightly 470 * sequential. Seeking to offset 0 doesn't change sequentiality 471 * unless previous seeks have reduced f_seqcount to 0, in which 472 * case offset 0 is not special. 473 */ 474 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 475 uio->uio_offset == fp->f_nextoff) { 476 /* 477 * f_seqcount is in units of fixed-size blocks so that it 478 * depends mainly on the amount of sequential I/O and not 479 * much on the number of sequential I/O's. The fixed size 480 * of 16384 is hard-coded here since it is (not quite) just 481 * a magic size that works well here. This size is more 482 * closely related to the best I/O size for real disks than 483 * to any block size used by software. 484 */ 485 fp->f_seqcount += howmany(uio->uio_resid, 16384); 486 if (fp->f_seqcount > IO_SEQMAX) 487 fp->f_seqcount = IO_SEQMAX; 488 return (fp->f_seqcount << IO_SEQSHIFT); 489 } 490 491 /* Not sequential. Quickly draw-down sequentiality. */ 492 if (fp->f_seqcount > 1) 493 fp->f_seqcount = 1; 494 else 495 fp->f_seqcount = 0; 496 return (0); 497 } 498 499 /* 500 * Package up an I/O request on a vnode into a uio and do it. 501 */ 502 int 503 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset, 504 enum uio_seg segflg, int ioflg, struct ucred *active_cred, 505 struct ucred *file_cred, ssize_t *aresid, struct thread *td) 506 { 507 struct uio auio; 508 struct iovec aiov; 509 struct mount *mp; 510 struct ucred *cred; 511 void *rl_cookie; 512 struct vn_io_fault_args args; 513 int error, lock_flags; 514 515 auio.uio_iov = &aiov; 516 auio.uio_iovcnt = 1; 517 aiov.iov_base = base; 518 aiov.iov_len = len; 519 auio.uio_resid = len; 520 auio.uio_offset = offset; 521 auio.uio_segflg = segflg; 522 auio.uio_rw = rw; 523 auio.uio_td = td; 524 error = 0; 525 526 if ((ioflg & IO_NODELOCKED) == 0) { 527 if ((ioflg & IO_RANGELOCKED) == 0) { 528 if (rw == UIO_READ) { 529 rl_cookie = vn_rangelock_rlock(vp, offset, 530 offset + len); 531 } else { 532 rl_cookie = vn_rangelock_wlock(vp, offset, 533 offset + len); 534 } 535 } else 536 rl_cookie = NULL; 537 mp = NULL; 538 if (rw == UIO_WRITE) { 539 if (vp->v_type != VCHR && 540 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) 541 != 0) 542 goto out; 543 if (MNT_SHARED_WRITES(mp) || 544 ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) 545 lock_flags = LK_SHARED; 546 else 547 lock_flags = LK_EXCLUSIVE; 548 } else 549 lock_flags = LK_SHARED; 550 vn_lock(vp, lock_flags | LK_RETRY); 551 } else 552 rl_cookie = NULL; 553 554 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 555 #ifdef MAC 556 if ((ioflg & IO_NOMACCHECK) == 0) { 557 if (rw == UIO_READ) 558 error = mac_vnode_check_read(active_cred, file_cred, 559 vp); 560 else 561 error = mac_vnode_check_write(active_cred, file_cred, 562 vp); 563 } 564 #endif 565 if (error == 0) { 566 if (file_cred != NULL) 567 cred = file_cred; 568 else 569 cred = active_cred; 570 if (do_vn_io_fault(vp, &auio)) { 571 args.kind = VN_IO_FAULT_VOP; 572 args.cred = cred; 573 args.flags = ioflg; 574 args.args.vop_args.vp = vp; 575 error = vn_io_fault1(vp, &auio, &args, td); 576 } else if (rw == UIO_READ) { 577 error = VOP_READ(vp, &auio, ioflg, cred); 578 } else /* if (rw == UIO_WRITE) */ { 579 error = VOP_WRITE(vp, &auio, ioflg, cred); 580 } 581 } 582 if (aresid) 583 *aresid = auio.uio_resid; 584 else 585 if (auio.uio_resid && error == 0) 586 error = EIO; 587 if ((ioflg & IO_NODELOCKED) == 0) { 588 VOP_UNLOCK(vp, 0); 589 if (mp != NULL) 590 vn_finished_write(mp); 591 } 592 out: 593 if (rl_cookie != NULL) 594 vn_rangelock_unlock(vp, rl_cookie); 595 return (error); 596 } 597 598 /* 599 * Package up an I/O request on a vnode into a uio and do it. The I/O 600 * request is split up into smaller chunks and we try to avoid saturating 601 * the buffer cache while potentially holding a vnode locked, so we 602 * check bwillwrite() before calling vn_rdwr(). We also call kern_yield() 603 * to give other processes a chance to lock the vnode (either other processes 604 * core'ing the same binary, or unrelated processes scanning the directory). 605 */ 606 int 607 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred, 608 file_cred, aresid, td) 609 enum uio_rw rw; 610 struct vnode *vp; 611 void *base; 612 size_t len; 613 off_t offset; 614 enum uio_seg segflg; 615 int ioflg; 616 struct ucred *active_cred; 617 struct ucred *file_cred; 618 size_t *aresid; 619 struct thread *td; 620 { 621 int error = 0; 622 ssize_t iaresid; 623 624 do { 625 int chunk; 626 627 /* 628 * Force `offset' to a multiple of MAXBSIZE except possibly 629 * for the first chunk, so that filesystems only need to 630 * write full blocks except possibly for the first and last 631 * chunks. 632 */ 633 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 634 635 if (chunk > len) 636 chunk = len; 637 if (rw != UIO_READ && vp->v_type == VREG) 638 bwillwrite(); 639 iaresid = 0; 640 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 641 ioflg, active_cred, file_cred, &iaresid, td); 642 len -= chunk; /* aresid calc already includes length */ 643 if (error) 644 break; 645 offset += chunk; 646 base = (char *)base + chunk; 647 kern_yield(PRI_USER); 648 } while (len); 649 if (aresid) 650 *aresid = len + iaresid; 651 return (error); 652 } 653 654 off_t 655 foffset_lock(struct file *fp, int flags) 656 { 657 struct mtx *mtxp; 658 off_t res; 659 660 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 661 662 #if OFF_MAX <= LONG_MAX 663 /* 664 * Caller only wants the current f_offset value. Assume that 665 * the long and shorter integer types reads are atomic. 666 */ 667 if ((flags & FOF_NOLOCK) != 0) 668 return (fp->f_offset); 669 #endif 670 671 /* 672 * According to McKusick the vn lock was protecting f_offset here. 673 * It is now protected by the FOFFSET_LOCKED flag. 674 */ 675 mtxp = mtx_pool_find(mtxpool_sleep, fp); 676 mtx_lock(mtxp); 677 if ((flags & FOF_NOLOCK) == 0) { 678 while (fp->f_vnread_flags & FOFFSET_LOCKED) { 679 fp->f_vnread_flags |= FOFFSET_LOCK_WAITING; 680 msleep(&fp->f_vnread_flags, mtxp, PUSER -1, 681 "vofflock", 0); 682 } 683 fp->f_vnread_flags |= FOFFSET_LOCKED; 684 } 685 res = fp->f_offset; 686 mtx_unlock(mtxp); 687 return (res); 688 } 689 690 void 691 foffset_unlock(struct file *fp, off_t val, int flags) 692 { 693 struct mtx *mtxp; 694 695 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 696 697 #if OFF_MAX <= LONG_MAX 698 if ((flags & FOF_NOLOCK) != 0) { 699 if ((flags & FOF_NOUPDATE) == 0) 700 fp->f_offset = val; 701 if ((flags & FOF_NEXTOFF) != 0) 702 fp->f_nextoff = val; 703 return; 704 } 705 #endif 706 707 mtxp = mtx_pool_find(mtxpool_sleep, fp); 708 mtx_lock(mtxp); 709 if ((flags & FOF_NOUPDATE) == 0) 710 fp->f_offset = val; 711 if ((flags & FOF_NEXTOFF) != 0) 712 fp->f_nextoff = val; 713 if ((flags & FOF_NOLOCK) == 0) { 714 KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0, 715 ("Lost FOFFSET_LOCKED")); 716 if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING) 717 wakeup(&fp->f_vnread_flags); 718 fp->f_vnread_flags = 0; 719 } 720 mtx_unlock(mtxp); 721 } 722 723 void 724 foffset_lock_uio(struct file *fp, struct uio *uio, int flags) 725 { 726 727 if ((flags & FOF_OFFSET) == 0) 728 uio->uio_offset = foffset_lock(fp, flags); 729 } 730 731 void 732 foffset_unlock_uio(struct file *fp, struct uio *uio, int flags) 733 { 734 735 if ((flags & FOF_OFFSET) == 0) 736 foffset_unlock(fp, uio->uio_offset, flags); 737 } 738 739 static int 740 get_advice(struct file *fp, struct uio *uio) 741 { 742 struct mtx *mtxp; 743 int ret; 744 745 ret = POSIX_FADV_NORMAL; 746 if (fp->f_advice == NULL || fp->f_vnode->v_type != VREG) 747 return (ret); 748 749 mtxp = mtx_pool_find(mtxpool_sleep, fp); 750 mtx_lock(mtxp); 751 if (fp->f_advice != NULL && 752 uio->uio_offset >= fp->f_advice->fa_start && 753 uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end) 754 ret = fp->f_advice->fa_advice; 755 mtx_unlock(mtxp); 756 return (ret); 757 } 758 759 /* 760 * File table vnode read routine. 761 */ 762 static int 763 vn_read(fp, uio, active_cred, flags, td) 764 struct file *fp; 765 struct uio *uio; 766 struct ucred *active_cred; 767 int flags; 768 struct thread *td; 769 { 770 struct vnode *vp; 771 off_t orig_offset; 772 int error, ioflag; 773 int advice; 774 775 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 776 uio->uio_td, td)); 777 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 778 vp = fp->f_vnode; 779 ioflag = 0; 780 if (fp->f_flag & FNONBLOCK) 781 ioflag |= IO_NDELAY; 782 if (fp->f_flag & O_DIRECT) 783 ioflag |= IO_DIRECT; 784 advice = get_advice(fp, uio); 785 vn_lock(vp, LK_SHARED | LK_RETRY); 786 787 switch (advice) { 788 case POSIX_FADV_NORMAL: 789 case POSIX_FADV_SEQUENTIAL: 790 case POSIX_FADV_NOREUSE: 791 ioflag |= sequential_heuristic(uio, fp); 792 break; 793 case POSIX_FADV_RANDOM: 794 /* Disable read-ahead for random I/O. */ 795 break; 796 } 797 orig_offset = uio->uio_offset; 798 799 #ifdef MAC 800 error = mac_vnode_check_read(active_cred, fp->f_cred, vp); 801 if (error == 0) 802 #endif 803 error = VOP_READ(vp, uio, ioflag, fp->f_cred); 804 fp->f_nextoff = uio->uio_offset; 805 VOP_UNLOCK(vp, 0); 806 if (error == 0 && advice == POSIX_FADV_NOREUSE && 807 orig_offset != uio->uio_offset) 808 /* 809 * Use POSIX_FADV_DONTNEED to flush pages and buffers 810 * for the backing file after a POSIX_FADV_NOREUSE 811 * read(2). 812 */ 813 error = VOP_ADVISE(vp, orig_offset, uio->uio_offset - 1, 814 POSIX_FADV_DONTNEED); 815 return (error); 816 } 817 818 /* 819 * File table vnode write routine. 820 */ 821 static int 822 vn_write(fp, uio, active_cred, flags, td) 823 struct file *fp; 824 struct uio *uio; 825 struct ucred *active_cred; 826 int flags; 827 struct thread *td; 828 { 829 struct vnode *vp; 830 struct mount *mp; 831 off_t orig_offset; 832 int error, ioflag, lock_flags; 833 int advice; 834 835 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 836 uio->uio_td, td)); 837 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 838 vp = fp->f_vnode; 839 if (vp->v_type == VREG) 840 bwillwrite(); 841 ioflag = IO_UNIT; 842 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 843 ioflag |= IO_APPEND; 844 if (fp->f_flag & FNONBLOCK) 845 ioflag |= IO_NDELAY; 846 if (fp->f_flag & O_DIRECT) 847 ioflag |= IO_DIRECT; 848 if ((fp->f_flag & O_FSYNC) || 849 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 850 ioflag |= IO_SYNC; 851 mp = NULL; 852 if (vp->v_type != VCHR && 853 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) 854 goto unlock; 855 856 advice = get_advice(fp, uio); 857 858 if (MNT_SHARED_WRITES(mp) || 859 (mp == NULL && MNT_SHARED_WRITES(vp->v_mount))) { 860 lock_flags = LK_SHARED; 861 } else { 862 lock_flags = LK_EXCLUSIVE; 863 } 864 865 vn_lock(vp, lock_flags | LK_RETRY); 866 switch (advice) { 867 case POSIX_FADV_NORMAL: 868 case POSIX_FADV_SEQUENTIAL: 869 case POSIX_FADV_NOREUSE: 870 ioflag |= sequential_heuristic(uio, fp); 871 break; 872 case POSIX_FADV_RANDOM: 873 /* XXX: Is this correct? */ 874 break; 875 } 876 orig_offset = uio->uio_offset; 877 878 #ifdef MAC 879 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 880 if (error == 0) 881 #endif 882 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred); 883 fp->f_nextoff = uio->uio_offset; 884 VOP_UNLOCK(vp, 0); 885 if (vp->v_type != VCHR) 886 vn_finished_write(mp); 887 if (error == 0 && advice == POSIX_FADV_NOREUSE && 888 orig_offset != uio->uio_offset) 889 /* 890 * Use POSIX_FADV_DONTNEED to flush pages and buffers 891 * for the backing file after a POSIX_FADV_NOREUSE 892 * write(2). 893 */ 894 error = VOP_ADVISE(vp, orig_offset, uio->uio_offset - 1, 895 POSIX_FADV_DONTNEED); 896 unlock: 897 return (error); 898 } 899 900 /* 901 * The vn_io_fault() is a wrapper around vn_read() and vn_write() to 902 * prevent the following deadlock: 903 * 904 * Assume that the thread A reads from the vnode vp1 into userspace 905 * buffer buf1 backed by the pages of vnode vp2. If a page in buf1 is 906 * currently not resident, then system ends up with the call chain 907 * vn_read() -> VOP_READ(vp1) -> uiomove() -> [Page Fault] -> 908 * vm_fault(buf1) -> vnode_pager_getpages(vp2) -> VOP_GETPAGES(vp2) 909 * which establishes lock order vp1->vn_lock, then vp2->vn_lock. 910 * If, at the same time, thread B reads from vnode vp2 into buffer buf2 911 * backed by the pages of vnode vp1, and some page in buf2 is not 912 * resident, we get a reversed order vp2->vn_lock, then vp1->vn_lock. 913 * 914 * To prevent the lock order reversal and deadlock, vn_io_fault() does 915 * not allow page faults to happen during VOP_READ() or VOP_WRITE(). 916 * Instead, it first tries to do the whole range i/o with pagefaults 917 * disabled. If all pages in the i/o buffer are resident and mapped, 918 * VOP will succeed (ignoring the genuine filesystem errors). 919 * Otherwise, we get back EFAULT, and vn_io_fault() falls back to do 920 * i/o in chunks, with all pages in the chunk prefaulted and held 921 * using vm_fault_quick_hold_pages(). 922 * 923 * Filesystems using this deadlock avoidance scheme should use the 924 * array of the held pages from uio, saved in the curthread->td_ma, 925 * instead of doing uiomove(). A helper function 926 * vn_io_fault_uiomove() converts uiomove request into 927 * uiomove_fromphys() over td_ma array. 928 * 929 * Since vnode locks do not cover the whole i/o anymore, rangelocks 930 * make the current i/o request atomic with respect to other i/os and 931 * truncations. 932 */ 933 934 /* 935 * Decode vn_io_fault_args and perform the corresponding i/o. 936 */ 937 static int 938 vn_io_fault_doio(struct vn_io_fault_args *args, struct uio *uio, 939 struct thread *td) 940 { 941 942 switch (args->kind) { 943 case VN_IO_FAULT_FOP: 944 return ((args->args.fop_args.doio)(args->args.fop_args.fp, 945 uio, args->cred, args->flags, td)); 946 case VN_IO_FAULT_VOP: 947 if (uio->uio_rw == UIO_READ) { 948 return (VOP_READ(args->args.vop_args.vp, uio, 949 args->flags, args->cred)); 950 } else if (uio->uio_rw == UIO_WRITE) { 951 return (VOP_WRITE(args->args.vop_args.vp, uio, 952 args->flags, args->cred)); 953 } 954 break; 955 } 956 panic("vn_io_fault_doio: unknown kind of io %d %d", args->kind, 957 uio->uio_rw); 958 } 959 960 static int 961 vn_io_fault_touch(char *base, const struct uio *uio) 962 { 963 int r; 964 965 r = fubyte(base); 966 if (r == -1 || (uio->uio_rw == UIO_READ && subyte(base, r) == -1)) 967 return (EFAULT); 968 return (0); 969 } 970 971 static int 972 vn_io_fault_prefault_user(const struct uio *uio) 973 { 974 char *base; 975 const struct iovec *iov; 976 size_t len; 977 ssize_t resid; 978 int error, i; 979 980 KASSERT(uio->uio_segflg == UIO_USERSPACE, 981 ("vn_io_fault_prefault userspace")); 982 983 error = i = 0; 984 iov = uio->uio_iov; 985 resid = uio->uio_resid; 986 base = iov->iov_base; 987 len = iov->iov_len; 988 while (resid > 0) { 989 error = vn_io_fault_touch(base, uio); 990 if (error != 0) 991 break; 992 if (len < PAGE_SIZE) { 993 if (len != 0) { 994 error = vn_io_fault_touch(base + len - 1, uio); 995 if (error != 0) 996 break; 997 resid -= len; 998 } 999 if (++i >= uio->uio_iovcnt) 1000 break; 1001 iov = uio->uio_iov + i; 1002 base = iov->iov_base; 1003 len = iov->iov_len; 1004 } else { 1005 len -= PAGE_SIZE; 1006 base += PAGE_SIZE; 1007 resid -= PAGE_SIZE; 1008 } 1009 } 1010 return (error); 1011 } 1012 1013 /* 1014 * Common code for vn_io_fault(), agnostic to the kind of i/o request. 1015 * Uses vn_io_fault_doio() to make the call to an actual i/o function. 1016 * Used from vn_rdwr() and vn_io_fault(), which encode the i/o request 1017 * into args and call vn_io_fault1() to handle faults during the user 1018 * mode buffer accesses. 1019 */ 1020 static int 1021 vn_io_fault1(struct vnode *vp, struct uio *uio, struct vn_io_fault_args *args, 1022 struct thread *td) 1023 { 1024 vm_page_t ma[io_hold_cnt + 2]; 1025 struct uio *uio_clone, short_uio; 1026 struct iovec short_iovec[1]; 1027 vm_page_t *prev_td_ma; 1028 vm_prot_t prot; 1029 vm_offset_t addr, end; 1030 size_t len, resid; 1031 ssize_t adv; 1032 int error, cnt, save, saveheld, prev_td_ma_cnt; 1033 1034 if (vn_io_fault_prefault) { 1035 error = vn_io_fault_prefault_user(uio); 1036 if (error != 0) 1037 return (error); /* Or ignore ? */ 1038 } 1039 1040 prot = uio->uio_rw == UIO_READ ? VM_PROT_WRITE : VM_PROT_READ; 1041 1042 /* 1043 * The UFS follows IO_UNIT directive and replays back both 1044 * uio_offset and uio_resid if an error is encountered during the 1045 * operation. But, since the iovec may be already advanced, 1046 * uio is still in an inconsistent state. 1047 * 1048 * Cache a copy of the original uio, which is advanced to the redo 1049 * point using UIO_NOCOPY below. 1050 */ 1051 uio_clone = cloneuio(uio); 1052 resid = uio->uio_resid; 1053 1054 short_uio.uio_segflg = UIO_USERSPACE; 1055 short_uio.uio_rw = uio->uio_rw; 1056 short_uio.uio_td = uio->uio_td; 1057 1058 save = vm_fault_disable_pagefaults(); 1059 error = vn_io_fault_doio(args, uio, td); 1060 if (error != EFAULT) 1061 goto out; 1062 1063 atomic_add_long(&vn_io_faults_cnt, 1); 1064 uio_clone->uio_segflg = UIO_NOCOPY; 1065 uiomove(NULL, resid - uio->uio_resid, uio_clone); 1066 uio_clone->uio_segflg = uio->uio_segflg; 1067 1068 saveheld = curthread_pflags_set(TDP_UIOHELD); 1069 prev_td_ma = td->td_ma; 1070 prev_td_ma_cnt = td->td_ma_cnt; 1071 1072 while (uio_clone->uio_resid != 0) { 1073 len = uio_clone->uio_iov->iov_len; 1074 if (len == 0) { 1075 KASSERT(uio_clone->uio_iovcnt >= 1, 1076 ("iovcnt underflow")); 1077 uio_clone->uio_iov++; 1078 uio_clone->uio_iovcnt--; 1079 continue; 1080 } 1081 if (len > io_hold_cnt * PAGE_SIZE) 1082 len = io_hold_cnt * PAGE_SIZE; 1083 addr = (uintptr_t)uio_clone->uio_iov->iov_base; 1084 end = round_page(addr + len); 1085 if (end < addr) { 1086 error = EFAULT; 1087 break; 1088 } 1089 cnt = atop(end - trunc_page(addr)); 1090 /* 1091 * A perfectly misaligned address and length could cause 1092 * both the start and the end of the chunk to use partial 1093 * page. +2 accounts for such a situation. 1094 */ 1095 cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map, 1096 addr, len, prot, ma, io_hold_cnt + 2); 1097 if (cnt == -1) { 1098 error = EFAULT; 1099 break; 1100 } 1101 short_uio.uio_iov = &short_iovec[0]; 1102 short_iovec[0].iov_base = (void *)addr; 1103 short_uio.uio_iovcnt = 1; 1104 short_uio.uio_resid = short_iovec[0].iov_len = len; 1105 short_uio.uio_offset = uio_clone->uio_offset; 1106 td->td_ma = ma; 1107 td->td_ma_cnt = cnt; 1108 1109 error = vn_io_fault_doio(args, &short_uio, td); 1110 vm_page_unhold_pages(ma, cnt); 1111 adv = len - short_uio.uio_resid; 1112 1113 uio_clone->uio_iov->iov_base = 1114 (char *)uio_clone->uio_iov->iov_base + adv; 1115 uio_clone->uio_iov->iov_len -= adv; 1116 uio_clone->uio_resid -= adv; 1117 uio_clone->uio_offset += adv; 1118 1119 uio->uio_resid -= adv; 1120 uio->uio_offset += adv; 1121 1122 if (error != 0 || adv == 0) 1123 break; 1124 } 1125 td->td_ma = prev_td_ma; 1126 td->td_ma_cnt = prev_td_ma_cnt; 1127 curthread_pflags_restore(saveheld); 1128 out: 1129 vm_fault_enable_pagefaults(save); 1130 free(uio_clone, M_IOV); 1131 return (error); 1132 } 1133 1134 static int 1135 vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred, 1136 int flags, struct thread *td) 1137 { 1138 fo_rdwr_t *doio; 1139 struct vnode *vp; 1140 void *rl_cookie; 1141 struct vn_io_fault_args args; 1142 int error; 1143 1144 doio = uio->uio_rw == UIO_READ ? vn_read : vn_write; 1145 vp = fp->f_vnode; 1146 foffset_lock_uio(fp, uio, flags); 1147 if (do_vn_io_fault(vp, uio)) { 1148 args.kind = VN_IO_FAULT_FOP; 1149 args.args.fop_args.fp = fp; 1150 args.args.fop_args.doio = doio; 1151 args.cred = active_cred; 1152 args.flags = flags | FOF_OFFSET; 1153 if (uio->uio_rw == UIO_READ) { 1154 rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset, 1155 uio->uio_offset + uio->uio_resid); 1156 } else if ((fp->f_flag & O_APPEND) != 0 || 1157 (flags & FOF_OFFSET) == 0) { 1158 /* For appenders, punt and lock the whole range. */ 1159 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1160 } else { 1161 rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset, 1162 uio->uio_offset + uio->uio_resid); 1163 } 1164 error = vn_io_fault1(vp, uio, &args, td); 1165 vn_rangelock_unlock(vp, rl_cookie); 1166 } else { 1167 error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td); 1168 } 1169 foffset_unlock_uio(fp, uio, flags); 1170 return (error); 1171 } 1172 1173 /* 1174 * Helper function to perform the requested uiomove operation using 1175 * the held pages for io->uio_iov[0].iov_base buffer instead of 1176 * copyin/copyout. Access to the pages with uiomove_fromphys() 1177 * instead of iov_base prevents page faults that could occur due to 1178 * pmap_collect() invalidating the mapping created by 1179 * vm_fault_quick_hold_pages(), or pageout daemon, page laundry or 1180 * object cleanup revoking the write access from page mappings. 1181 * 1182 * Filesystems specified MNTK_NO_IOPF shall use vn_io_fault_uiomove() 1183 * instead of plain uiomove(). 1184 */ 1185 int 1186 vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio) 1187 { 1188 struct uio transp_uio; 1189 struct iovec transp_iov[1]; 1190 struct thread *td; 1191 size_t adv; 1192 int error, pgadv; 1193 1194 td = curthread; 1195 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1196 uio->uio_segflg != UIO_USERSPACE) 1197 return (uiomove(data, xfersize, uio)); 1198 1199 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1200 transp_iov[0].iov_base = data; 1201 transp_uio.uio_iov = &transp_iov[0]; 1202 transp_uio.uio_iovcnt = 1; 1203 if (xfersize > uio->uio_resid) 1204 xfersize = uio->uio_resid; 1205 transp_uio.uio_resid = transp_iov[0].iov_len = xfersize; 1206 transp_uio.uio_offset = 0; 1207 transp_uio.uio_segflg = UIO_SYSSPACE; 1208 /* 1209 * Since transp_iov points to data, and td_ma page array 1210 * corresponds to original uio->uio_iov, we need to invert the 1211 * direction of the i/o operation as passed to 1212 * uiomove_fromphys(). 1213 */ 1214 switch (uio->uio_rw) { 1215 case UIO_WRITE: 1216 transp_uio.uio_rw = UIO_READ; 1217 break; 1218 case UIO_READ: 1219 transp_uio.uio_rw = UIO_WRITE; 1220 break; 1221 } 1222 transp_uio.uio_td = uio->uio_td; 1223 error = uiomove_fromphys(td->td_ma, 1224 ((vm_offset_t)uio->uio_iov->iov_base) & PAGE_MASK, 1225 xfersize, &transp_uio); 1226 adv = xfersize - transp_uio.uio_resid; 1227 pgadv = 1228 (((vm_offset_t)uio->uio_iov->iov_base + adv) >> PAGE_SHIFT) - 1229 (((vm_offset_t)uio->uio_iov->iov_base) >> PAGE_SHIFT); 1230 td->td_ma += pgadv; 1231 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1232 pgadv)); 1233 td->td_ma_cnt -= pgadv; 1234 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + adv; 1235 uio->uio_iov->iov_len -= adv; 1236 uio->uio_resid -= adv; 1237 uio->uio_offset += adv; 1238 return (error); 1239 } 1240 1241 int 1242 vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize, 1243 struct uio *uio) 1244 { 1245 struct thread *td; 1246 vm_offset_t iov_base; 1247 int cnt, pgadv; 1248 1249 td = curthread; 1250 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1251 uio->uio_segflg != UIO_USERSPACE) 1252 return (uiomove_fromphys(ma, offset, xfersize, uio)); 1253 1254 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1255 cnt = xfersize > uio->uio_resid ? uio->uio_resid : xfersize; 1256 iov_base = (vm_offset_t)uio->uio_iov->iov_base; 1257 switch (uio->uio_rw) { 1258 case UIO_WRITE: 1259 pmap_copy_pages(td->td_ma, iov_base & PAGE_MASK, ma, 1260 offset, cnt); 1261 break; 1262 case UIO_READ: 1263 pmap_copy_pages(ma, offset, td->td_ma, iov_base & PAGE_MASK, 1264 cnt); 1265 break; 1266 } 1267 pgadv = ((iov_base + cnt) >> PAGE_SHIFT) - (iov_base >> PAGE_SHIFT); 1268 td->td_ma += pgadv; 1269 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1270 pgadv)); 1271 td->td_ma_cnt -= pgadv; 1272 uio->uio_iov->iov_base = (char *)(iov_base + cnt); 1273 uio->uio_iov->iov_len -= cnt; 1274 uio->uio_resid -= cnt; 1275 uio->uio_offset += cnt; 1276 return (0); 1277 } 1278 1279 1280 /* 1281 * File table truncate routine. 1282 */ 1283 static int 1284 vn_truncate(struct file *fp, off_t length, struct ucred *active_cred, 1285 struct thread *td) 1286 { 1287 struct vattr vattr; 1288 struct mount *mp; 1289 struct vnode *vp; 1290 void *rl_cookie; 1291 int error; 1292 1293 vp = fp->f_vnode; 1294 1295 /* 1296 * Lock the whole range for truncation. Otherwise split i/o 1297 * might happen partly before and partly after the truncation. 1298 */ 1299 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1300 error = vn_start_write(vp, &mp, V_WAIT | PCATCH); 1301 if (error) 1302 goto out1; 1303 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1304 if (vp->v_type == VDIR) { 1305 error = EISDIR; 1306 goto out; 1307 } 1308 #ifdef MAC 1309 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 1310 if (error) 1311 goto out; 1312 #endif 1313 error = vn_writechk(vp); 1314 if (error == 0) { 1315 VATTR_NULL(&vattr); 1316 vattr.va_size = length; 1317 error = VOP_SETATTR(vp, &vattr, fp->f_cred); 1318 } 1319 out: 1320 VOP_UNLOCK(vp, 0); 1321 vn_finished_write(mp); 1322 out1: 1323 vn_rangelock_unlock(vp, rl_cookie); 1324 return (error); 1325 } 1326 1327 /* 1328 * File table vnode stat routine. 1329 */ 1330 static int 1331 vn_statfile(fp, sb, active_cred, td) 1332 struct file *fp; 1333 struct stat *sb; 1334 struct ucred *active_cred; 1335 struct thread *td; 1336 { 1337 struct vnode *vp = fp->f_vnode; 1338 int error; 1339 1340 vn_lock(vp, LK_SHARED | LK_RETRY); 1341 error = vn_stat(vp, sb, active_cred, fp->f_cred, td); 1342 VOP_UNLOCK(vp, 0); 1343 1344 return (error); 1345 } 1346 1347 /* 1348 * Stat a vnode; implementation for the stat syscall 1349 */ 1350 int 1351 vn_stat(vp, sb, active_cred, file_cred, td) 1352 struct vnode *vp; 1353 register struct stat *sb; 1354 struct ucred *active_cred; 1355 struct ucred *file_cred; 1356 struct thread *td; 1357 { 1358 struct vattr vattr; 1359 register struct vattr *vap; 1360 int error; 1361 u_short mode; 1362 1363 #ifdef MAC 1364 error = mac_vnode_check_stat(active_cred, file_cred, vp); 1365 if (error) 1366 return (error); 1367 #endif 1368 1369 vap = &vattr; 1370 1371 /* 1372 * Initialize defaults for new and unusual fields, so that file 1373 * systems which don't support these fields don't need to know 1374 * about them. 1375 */ 1376 vap->va_birthtime.tv_sec = -1; 1377 vap->va_birthtime.tv_nsec = 0; 1378 vap->va_fsid = VNOVAL; 1379 vap->va_rdev = NODEV; 1380 1381 error = VOP_GETATTR(vp, vap, active_cred); 1382 if (error) 1383 return (error); 1384 1385 /* 1386 * Zero the spare stat fields 1387 */ 1388 bzero(sb, sizeof *sb); 1389 1390 /* 1391 * Copy from vattr table 1392 */ 1393 if (vap->va_fsid != VNOVAL) 1394 sb->st_dev = vap->va_fsid; 1395 else 1396 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 1397 sb->st_ino = vap->va_fileid; 1398 mode = vap->va_mode; 1399 switch (vap->va_type) { 1400 case VREG: 1401 mode |= S_IFREG; 1402 break; 1403 case VDIR: 1404 mode |= S_IFDIR; 1405 break; 1406 case VBLK: 1407 mode |= S_IFBLK; 1408 break; 1409 case VCHR: 1410 mode |= S_IFCHR; 1411 break; 1412 case VLNK: 1413 mode |= S_IFLNK; 1414 break; 1415 case VSOCK: 1416 mode |= S_IFSOCK; 1417 break; 1418 case VFIFO: 1419 mode |= S_IFIFO; 1420 break; 1421 default: 1422 return (EBADF); 1423 }; 1424 sb->st_mode = mode; 1425 sb->st_nlink = vap->va_nlink; 1426 sb->st_uid = vap->va_uid; 1427 sb->st_gid = vap->va_gid; 1428 sb->st_rdev = vap->va_rdev; 1429 if (vap->va_size > OFF_MAX) 1430 return (EOVERFLOW); 1431 sb->st_size = vap->va_size; 1432 sb->st_atim = vap->va_atime; 1433 sb->st_mtim = vap->va_mtime; 1434 sb->st_ctim = vap->va_ctime; 1435 sb->st_birthtim = vap->va_birthtime; 1436 1437 /* 1438 * According to www.opengroup.org, the meaning of st_blksize is 1439 * "a filesystem-specific preferred I/O block size for this 1440 * object. In some filesystem types, this may vary from file 1441 * to file" 1442 * Use miminum/default of PAGE_SIZE (e.g. for VCHR). 1443 */ 1444 1445 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize); 1446 1447 sb->st_flags = vap->va_flags; 1448 if (priv_check(td, PRIV_VFS_GENERATION)) 1449 sb->st_gen = 0; 1450 else 1451 sb->st_gen = vap->va_gen; 1452 1453 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 1454 return (0); 1455 } 1456 1457 /* 1458 * File table vnode ioctl routine. 1459 */ 1460 static int 1461 vn_ioctl(fp, com, data, active_cred, td) 1462 struct file *fp; 1463 u_long com; 1464 void *data; 1465 struct ucred *active_cred; 1466 struct thread *td; 1467 { 1468 struct vattr vattr; 1469 struct vnode *vp; 1470 int error; 1471 1472 vp = fp->f_vnode; 1473 switch (vp->v_type) { 1474 case VDIR: 1475 case VREG: 1476 switch (com) { 1477 case FIONREAD: 1478 vn_lock(vp, LK_SHARED | LK_RETRY); 1479 error = VOP_GETATTR(vp, &vattr, active_cred); 1480 VOP_UNLOCK(vp, 0); 1481 if (error == 0) 1482 *(int *)data = vattr.va_size - fp->f_offset; 1483 return (error); 1484 case FIONBIO: 1485 case FIOASYNC: 1486 return (0); 1487 default: 1488 return (VOP_IOCTL(vp, com, data, fp->f_flag, 1489 active_cred, td)); 1490 } 1491 default: 1492 return (ENOTTY); 1493 } 1494 } 1495 1496 /* 1497 * File table vnode poll routine. 1498 */ 1499 static int 1500 vn_poll(fp, events, active_cred, td) 1501 struct file *fp; 1502 int events; 1503 struct ucred *active_cred; 1504 struct thread *td; 1505 { 1506 struct vnode *vp; 1507 int error; 1508 1509 vp = fp->f_vnode; 1510 #ifdef MAC 1511 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1512 error = mac_vnode_check_poll(active_cred, fp->f_cred, vp); 1513 VOP_UNLOCK(vp, 0); 1514 if (!error) 1515 #endif 1516 1517 error = VOP_POLL(vp, events, fp->f_cred, td); 1518 return (error); 1519 } 1520 1521 /* 1522 * Acquire the requested lock and then check for validity. LK_RETRY 1523 * permits vn_lock to return doomed vnodes. 1524 */ 1525 int 1526 _vn_lock(struct vnode *vp, int flags, char *file, int line) 1527 { 1528 int error; 1529 1530 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 1531 ("vn_lock called with no locktype.")); 1532 do { 1533 #ifdef DEBUG_VFS_LOCKS 1534 KASSERT(vp->v_holdcnt != 0, 1535 ("vn_lock %p: zero hold count", vp)); 1536 #endif 1537 error = VOP_LOCK1(vp, flags, file, line); 1538 flags &= ~LK_INTERLOCK; /* Interlock is always dropped. */ 1539 KASSERT((flags & LK_RETRY) == 0 || error == 0, 1540 ("LK_RETRY set with incompatible flags (0x%x) or an error occured (%d)", 1541 flags, error)); 1542 /* 1543 * Callers specify LK_RETRY if they wish to get dead vnodes. 1544 * If RETRY is not set, we return ENOENT instead. 1545 */ 1546 if (error == 0 && vp->v_iflag & VI_DOOMED && 1547 (flags & LK_RETRY) == 0) { 1548 VOP_UNLOCK(vp, 0); 1549 error = ENOENT; 1550 break; 1551 } 1552 } while (flags & LK_RETRY && error != 0); 1553 return (error); 1554 } 1555 1556 /* 1557 * File table vnode close routine. 1558 */ 1559 static int 1560 vn_closefile(fp, td) 1561 struct file *fp; 1562 struct thread *td; 1563 { 1564 struct vnode *vp; 1565 struct flock lf; 1566 int error; 1567 1568 vp = fp->f_vnode; 1569 fp->f_ops = &badfileops; 1570 1571 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) 1572 vref(vp); 1573 1574 error = vn_close(vp, fp->f_flag, fp->f_cred, td); 1575 1576 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) { 1577 lf.l_whence = SEEK_SET; 1578 lf.l_start = 0; 1579 lf.l_len = 0; 1580 lf.l_type = F_UNLCK; 1581 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK); 1582 vrele(vp); 1583 } 1584 return (error); 1585 } 1586 1587 static bool 1588 vn_suspendable(struct mount *mp) 1589 { 1590 1591 return (mp->mnt_op->vfs_susp_clean != NULL); 1592 } 1593 1594 /* 1595 * Preparing to start a filesystem write operation. If the operation is 1596 * permitted, then we bump the count of operations in progress and 1597 * proceed. If a suspend request is in progress, we wait until the 1598 * suspension is over, and then proceed. 1599 */ 1600 static int 1601 vn_start_write_locked(struct mount *mp, int flags) 1602 { 1603 int error, mflags; 1604 1605 mtx_assert(MNT_MTX(mp), MA_OWNED); 1606 error = 0; 1607 1608 /* 1609 * Check on status of suspension. 1610 */ 1611 if ((curthread->td_pflags & TDP_IGNSUSP) == 0 || 1612 mp->mnt_susp_owner != curthread) { 1613 mflags = ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? 1614 (flags & PCATCH) : 0) | (PUSER - 1); 1615 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1616 if (flags & V_NOWAIT) { 1617 error = EWOULDBLOCK; 1618 goto unlock; 1619 } 1620 error = msleep(&mp->mnt_flag, MNT_MTX(mp), mflags, 1621 "suspfs", 0); 1622 if (error) 1623 goto unlock; 1624 } 1625 } 1626 if (flags & V_XSLEEP) 1627 goto unlock; 1628 mp->mnt_writeopcount++; 1629 unlock: 1630 if (error != 0 || (flags & V_XSLEEP) != 0) 1631 MNT_REL(mp); 1632 MNT_IUNLOCK(mp); 1633 return (error); 1634 } 1635 1636 int 1637 vn_start_write(struct vnode *vp, struct mount **mpp, int flags) 1638 { 1639 struct mount *mp; 1640 int error; 1641 1642 KASSERT((flags & V_MNTREF) == 0 || (*mpp != NULL && vp == NULL), 1643 ("V_MNTREF requires mp")); 1644 1645 error = 0; 1646 /* 1647 * If a vnode is provided, get and return the mount point that 1648 * to which it will write. 1649 */ 1650 if (vp != NULL) { 1651 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1652 *mpp = NULL; 1653 if (error != EOPNOTSUPP) 1654 return (error); 1655 return (0); 1656 } 1657 } 1658 if ((mp = *mpp) == NULL) 1659 return (0); 1660 1661 if (!vn_suspendable(mp)) { 1662 if (vp != NULL || (flags & V_MNTREF) != 0) 1663 vfs_rel(mp); 1664 return (0); 1665 } 1666 1667 /* 1668 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1669 * a vfs_ref(). 1670 * As long as a vnode is not provided we need to acquire a 1671 * refcount for the provided mountpoint too, in order to 1672 * emulate a vfs_ref(). 1673 */ 1674 MNT_ILOCK(mp); 1675 if (vp == NULL && (flags & V_MNTREF) == 0) 1676 MNT_REF(mp); 1677 1678 return (vn_start_write_locked(mp, flags)); 1679 } 1680 1681 /* 1682 * Secondary suspension. Used by operations such as vop_inactive 1683 * routines that are needed by the higher level functions. These 1684 * are allowed to proceed until all the higher level functions have 1685 * completed (indicated by mnt_writeopcount dropping to zero). At that 1686 * time, these operations are halted until the suspension is over. 1687 */ 1688 int 1689 vn_start_secondary_write(struct vnode *vp, struct mount **mpp, int flags) 1690 { 1691 struct mount *mp; 1692 int error; 1693 1694 KASSERT((flags & V_MNTREF) == 0 || (*mpp != NULL && vp == NULL), 1695 ("V_MNTREF requires mp")); 1696 1697 retry: 1698 if (vp != NULL) { 1699 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1700 *mpp = NULL; 1701 if (error != EOPNOTSUPP) 1702 return (error); 1703 return (0); 1704 } 1705 } 1706 /* 1707 * If we are not suspended or have not yet reached suspended 1708 * mode, then let the operation proceed. 1709 */ 1710 if ((mp = *mpp) == NULL) 1711 return (0); 1712 1713 if (!vn_suspendable(mp)) { 1714 if (vp != NULL || (flags & V_MNTREF) != 0) 1715 vfs_rel(mp); 1716 return (0); 1717 } 1718 1719 /* 1720 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1721 * a vfs_ref(). 1722 * As long as a vnode is not provided we need to acquire a 1723 * refcount for the provided mountpoint too, in order to 1724 * emulate a vfs_ref(). 1725 */ 1726 MNT_ILOCK(mp); 1727 if (vp == NULL && (flags & V_MNTREF) == 0) 1728 MNT_REF(mp); 1729 if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) { 1730 mp->mnt_secondary_writes++; 1731 mp->mnt_secondary_accwrites++; 1732 MNT_IUNLOCK(mp); 1733 return (0); 1734 } 1735 if (flags & V_NOWAIT) { 1736 MNT_REL(mp); 1737 MNT_IUNLOCK(mp); 1738 return (EWOULDBLOCK); 1739 } 1740 /* 1741 * Wait for the suspension to finish. 1742 */ 1743 error = msleep(&mp->mnt_flag, MNT_MTX(mp), (PUSER - 1) | PDROP | 1744 ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? (flags & PCATCH) : 0), 1745 "suspfs", 0); 1746 vfs_rel(mp); 1747 if (error == 0) 1748 goto retry; 1749 return (error); 1750 } 1751 1752 /* 1753 * Filesystem write operation has completed. If we are suspending and this 1754 * operation is the last one, notify the suspender that the suspension is 1755 * now in effect. 1756 */ 1757 void 1758 vn_finished_write(mp) 1759 struct mount *mp; 1760 { 1761 if (mp == NULL || !vn_suspendable(mp)) 1762 return; 1763 MNT_ILOCK(mp); 1764 MNT_REL(mp); 1765 mp->mnt_writeopcount--; 1766 if (mp->mnt_writeopcount < 0) 1767 panic("vn_finished_write: neg cnt"); 1768 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1769 mp->mnt_writeopcount <= 0) 1770 wakeup(&mp->mnt_writeopcount); 1771 MNT_IUNLOCK(mp); 1772 } 1773 1774 1775 /* 1776 * Filesystem secondary write operation has completed. If we are 1777 * suspending and this operation is the last one, notify the suspender 1778 * that the suspension is now in effect. 1779 */ 1780 void 1781 vn_finished_secondary_write(mp) 1782 struct mount *mp; 1783 { 1784 if (mp == NULL || !vn_suspendable(mp)) 1785 return; 1786 MNT_ILOCK(mp); 1787 MNT_REL(mp); 1788 mp->mnt_secondary_writes--; 1789 if (mp->mnt_secondary_writes < 0) 1790 panic("vn_finished_secondary_write: neg cnt"); 1791 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1792 mp->mnt_secondary_writes <= 0) 1793 wakeup(&mp->mnt_secondary_writes); 1794 MNT_IUNLOCK(mp); 1795 } 1796 1797 1798 1799 /* 1800 * Request a filesystem to suspend write operations. 1801 */ 1802 int 1803 vfs_write_suspend(struct mount *mp, int flags) 1804 { 1805 int error; 1806 1807 MPASS(vn_suspendable(mp)); 1808 1809 MNT_ILOCK(mp); 1810 if (mp->mnt_susp_owner == curthread) { 1811 MNT_IUNLOCK(mp); 1812 return (EALREADY); 1813 } 1814 while (mp->mnt_kern_flag & MNTK_SUSPEND) 1815 msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0); 1816 1817 /* 1818 * Unmount holds a write reference on the mount point. If we 1819 * own busy reference and drain for writers, we deadlock with 1820 * the reference draining in the unmount path. Callers of 1821 * vfs_write_suspend() must specify VS_SKIP_UNMOUNT if 1822 * vfs_busy() reference is owned and caller is not in the 1823 * unmount context. 1824 */ 1825 if ((flags & VS_SKIP_UNMOUNT) != 0 && 1826 (mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { 1827 MNT_IUNLOCK(mp); 1828 return (EBUSY); 1829 } 1830 1831 mp->mnt_kern_flag |= MNTK_SUSPEND; 1832 mp->mnt_susp_owner = curthread; 1833 if (mp->mnt_writeopcount > 0) 1834 (void) msleep(&mp->mnt_writeopcount, 1835 MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0); 1836 else 1837 MNT_IUNLOCK(mp); 1838 if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0) 1839 vfs_write_resume(mp, 0); 1840 return (error); 1841 } 1842 1843 /* 1844 * Request a filesystem to resume write operations. 1845 */ 1846 void 1847 vfs_write_resume(struct mount *mp, int flags) 1848 { 1849 1850 MPASS(vn_suspendable(mp)); 1851 1852 MNT_ILOCK(mp); 1853 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1854 KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner")); 1855 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 | 1856 MNTK_SUSPENDED); 1857 mp->mnt_susp_owner = NULL; 1858 wakeup(&mp->mnt_writeopcount); 1859 wakeup(&mp->mnt_flag); 1860 curthread->td_pflags &= ~TDP_IGNSUSP; 1861 if ((flags & VR_START_WRITE) != 0) { 1862 MNT_REF(mp); 1863 mp->mnt_writeopcount++; 1864 } 1865 MNT_IUNLOCK(mp); 1866 if ((flags & VR_NO_SUSPCLR) == 0) 1867 VFS_SUSP_CLEAN(mp); 1868 } else if ((flags & VR_START_WRITE) != 0) { 1869 MNT_REF(mp); 1870 vn_start_write_locked(mp, 0); 1871 } else { 1872 MNT_IUNLOCK(mp); 1873 } 1874 } 1875 1876 /* 1877 * Helper loop around vfs_write_suspend() for filesystem unmount VFS 1878 * methods. 1879 */ 1880 int 1881 vfs_write_suspend_umnt(struct mount *mp) 1882 { 1883 int error; 1884 1885 MPASS(vn_suspendable(mp)); 1886 KASSERT((curthread->td_pflags & TDP_IGNSUSP) == 0, 1887 ("vfs_write_suspend_umnt: recursed")); 1888 1889 /* dounmount() already called vn_start_write(). */ 1890 for (;;) { 1891 vn_finished_write(mp); 1892 error = vfs_write_suspend(mp, 0); 1893 if (error != 0) { 1894 vn_start_write(NULL, &mp, V_WAIT); 1895 return (error); 1896 } 1897 MNT_ILOCK(mp); 1898 if ((mp->mnt_kern_flag & MNTK_SUSPENDED) != 0) 1899 break; 1900 MNT_IUNLOCK(mp); 1901 vn_start_write(NULL, &mp, V_WAIT); 1902 } 1903 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 1904 wakeup(&mp->mnt_flag); 1905 MNT_IUNLOCK(mp); 1906 curthread->td_pflags |= TDP_IGNSUSP; 1907 return (0); 1908 } 1909 1910 /* 1911 * Implement kqueues for files by translating it to vnode operation. 1912 */ 1913 static int 1914 vn_kqfilter(struct file *fp, struct knote *kn) 1915 { 1916 1917 return (VOP_KQFILTER(fp->f_vnode, kn)); 1918 } 1919 1920 /* 1921 * Simplified in-kernel wrapper calls for extended attribute access. 1922 * Both calls pass in a NULL credential, authorizing as "kernel" access. 1923 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 1924 */ 1925 int 1926 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 1927 const char *attrname, int *buflen, char *buf, struct thread *td) 1928 { 1929 struct uio auio; 1930 struct iovec iov; 1931 int error; 1932 1933 iov.iov_len = *buflen; 1934 iov.iov_base = buf; 1935 1936 auio.uio_iov = &iov; 1937 auio.uio_iovcnt = 1; 1938 auio.uio_rw = UIO_READ; 1939 auio.uio_segflg = UIO_SYSSPACE; 1940 auio.uio_td = td; 1941 auio.uio_offset = 0; 1942 auio.uio_resid = *buflen; 1943 1944 if ((ioflg & IO_NODELOCKED) == 0) 1945 vn_lock(vp, LK_SHARED | LK_RETRY); 1946 1947 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1948 1949 /* authorize attribute retrieval as kernel */ 1950 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 1951 td); 1952 1953 if ((ioflg & IO_NODELOCKED) == 0) 1954 VOP_UNLOCK(vp, 0); 1955 1956 if (error == 0) { 1957 *buflen = *buflen - auio.uio_resid; 1958 } 1959 1960 return (error); 1961 } 1962 1963 /* 1964 * XXX failure mode if partially written? 1965 */ 1966 int 1967 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 1968 const char *attrname, int buflen, char *buf, struct thread *td) 1969 { 1970 struct uio auio; 1971 struct iovec iov; 1972 struct mount *mp; 1973 int error; 1974 1975 iov.iov_len = buflen; 1976 iov.iov_base = buf; 1977 1978 auio.uio_iov = &iov; 1979 auio.uio_iovcnt = 1; 1980 auio.uio_rw = UIO_WRITE; 1981 auio.uio_segflg = UIO_SYSSPACE; 1982 auio.uio_td = td; 1983 auio.uio_offset = 0; 1984 auio.uio_resid = buflen; 1985 1986 if ((ioflg & IO_NODELOCKED) == 0) { 1987 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1988 return (error); 1989 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1990 } 1991 1992 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1993 1994 /* authorize attribute setting as kernel */ 1995 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 1996 1997 if ((ioflg & IO_NODELOCKED) == 0) { 1998 vn_finished_write(mp); 1999 VOP_UNLOCK(vp, 0); 2000 } 2001 2002 return (error); 2003 } 2004 2005 int 2006 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 2007 const char *attrname, struct thread *td) 2008 { 2009 struct mount *mp; 2010 int error; 2011 2012 if ((ioflg & IO_NODELOCKED) == 0) { 2013 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 2014 return (error); 2015 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2016 } 2017 2018 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 2019 2020 /* authorize attribute removal as kernel */ 2021 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td); 2022 if (error == EOPNOTSUPP) 2023 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, 2024 NULL, td); 2025 2026 if ((ioflg & IO_NODELOCKED) == 0) { 2027 vn_finished_write(mp); 2028 VOP_UNLOCK(vp, 0); 2029 } 2030 2031 return (error); 2032 } 2033 2034 static int 2035 vn_get_ino_alloc_vget(struct mount *mp, void *arg, int lkflags, 2036 struct vnode **rvp) 2037 { 2038 2039 return (VFS_VGET(mp, *(ino_t *)arg, lkflags, rvp)); 2040 } 2041 2042 int 2043 vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp) 2044 { 2045 2046 return (vn_vget_ino_gen(vp, vn_get_ino_alloc_vget, &ino, 2047 lkflags, rvp)); 2048 } 2049 2050 int 2051 vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg, 2052 int lkflags, struct vnode **rvp) 2053 { 2054 struct mount *mp; 2055 int ltype, error; 2056 2057 ASSERT_VOP_LOCKED(vp, "vn_vget_ino_get"); 2058 mp = vp->v_mount; 2059 ltype = VOP_ISLOCKED(vp); 2060 KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED, 2061 ("vn_vget_ino: vp not locked")); 2062 error = vfs_busy(mp, MBF_NOWAIT); 2063 if (error != 0) { 2064 vfs_ref(mp); 2065 VOP_UNLOCK(vp, 0); 2066 error = vfs_busy(mp, 0); 2067 vn_lock(vp, ltype | LK_RETRY); 2068 vfs_rel(mp); 2069 if (error != 0) 2070 return (ENOENT); 2071 if (vp->v_iflag & VI_DOOMED) { 2072 vfs_unbusy(mp); 2073 return (ENOENT); 2074 } 2075 } 2076 VOP_UNLOCK(vp, 0); 2077 error = alloc(mp, alloc_arg, lkflags, rvp); 2078 vfs_unbusy(mp); 2079 if (*rvp != vp) 2080 vn_lock(vp, ltype | LK_RETRY); 2081 if (vp->v_iflag & VI_DOOMED) { 2082 if (error == 0) { 2083 if (*rvp == vp) 2084 vunref(vp); 2085 else 2086 vput(*rvp); 2087 } 2088 error = ENOENT; 2089 } 2090 return (error); 2091 } 2092 2093 int 2094 vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio, 2095 struct thread *td) 2096 { 2097 2098 if (vp->v_type != VREG || td == NULL) 2099 return (0); 2100 if ((uoff_t)uio->uio_offset + uio->uio_resid > 2101 lim_cur(td, RLIMIT_FSIZE)) { 2102 PROC_LOCK(td->td_proc); 2103 kern_psignal(td->td_proc, SIGXFSZ); 2104 PROC_UNLOCK(td->td_proc); 2105 return (EFBIG); 2106 } 2107 return (0); 2108 } 2109 2110 int 2111 vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 2112 struct thread *td) 2113 { 2114 struct vnode *vp; 2115 2116 vp = fp->f_vnode; 2117 #ifdef AUDIT 2118 vn_lock(vp, LK_SHARED | LK_RETRY); 2119 AUDIT_ARG_VNODE1(vp); 2120 VOP_UNLOCK(vp, 0); 2121 #endif 2122 return (setfmode(td, active_cred, vp, mode)); 2123 } 2124 2125 int 2126 vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 2127 struct thread *td) 2128 { 2129 struct vnode *vp; 2130 2131 vp = fp->f_vnode; 2132 #ifdef AUDIT 2133 vn_lock(vp, LK_SHARED | LK_RETRY); 2134 AUDIT_ARG_VNODE1(vp); 2135 VOP_UNLOCK(vp, 0); 2136 #endif 2137 return (setfown(td, active_cred, vp, uid, gid)); 2138 } 2139 2140 void 2141 vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end) 2142 { 2143 vm_object_t object; 2144 2145 if ((object = vp->v_object) == NULL) 2146 return; 2147 VM_OBJECT_WLOCK(object); 2148 vm_object_page_remove(object, start, end, 0); 2149 VM_OBJECT_WUNLOCK(object); 2150 } 2151 2152 int 2153 vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred) 2154 { 2155 struct vattr va; 2156 daddr_t bn, bnp; 2157 uint64_t bsize; 2158 off_t noff; 2159 int error; 2160 2161 KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA, 2162 ("Wrong command %lu", cmd)); 2163 2164 if (vn_lock(vp, LK_SHARED) != 0) 2165 return (EBADF); 2166 if (vp->v_type != VREG) { 2167 error = ENOTTY; 2168 goto unlock; 2169 } 2170 error = VOP_GETATTR(vp, &va, cred); 2171 if (error != 0) 2172 goto unlock; 2173 noff = *off; 2174 if (noff >= va.va_size) { 2175 error = ENXIO; 2176 goto unlock; 2177 } 2178 bsize = vp->v_mount->mnt_stat.f_iosize; 2179 for (bn = noff / bsize; noff < va.va_size; bn++, noff += bsize) { 2180 error = VOP_BMAP(vp, bn, NULL, &bnp, NULL, NULL); 2181 if (error == EOPNOTSUPP) { 2182 error = ENOTTY; 2183 goto unlock; 2184 } 2185 if ((bnp == -1 && cmd == FIOSEEKHOLE) || 2186 (bnp != -1 && cmd == FIOSEEKDATA)) { 2187 noff = bn * bsize; 2188 if (noff < *off) 2189 noff = *off; 2190 goto unlock; 2191 } 2192 } 2193 if (noff > va.va_size) 2194 noff = va.va_size; 2195 /* noff == va.va_size. There is an implicit hole at the end of file. */ 2196 if (cmd == FIOSEEKDATA) 2197 error = ENXIO; 2198 unlock: 2199 VOP_UNLOCK(vp, 0); 2200 if (error == 0) 2201 *off = noff; 2202 return (error); 2203 } 2204 2205 int 2206 vn_seek(struct file *fp, off_t offset, int whence, struct thread *td) 2207 { 2208 struct ucred *cred; 2209 struct vnode *vp; 2210 struct vattr vattr; 2211 off_t foffset, size; 2212 int error, noneg; 2213 2214 cred = td->td_ucred; 2215 vp = fp->f_vnode; 2216 foffset = foffset_lock(fp, 0); 2217 noneg = (vp->v_type != VCHR); 2218 error = 0; 2219 switch (whence) { 2220 case L_INCR: 2221 if (noneg && 2222 (foffset < 0 || 2223 (offset > 0 && foffset > OFF_MAX - offset))) { 2224 error = EOVERFLOW; 2225 break; 2226 } 2227 offset += foffset; 2228 break; 2229 case L_XTND: 2230 vn_lock(vp, LK_SHARED | LK_RETRY); 2231 error = VOP_GETATTR(vp, &vattr, cred); 2232 VOP_UNLOCK(vp, 0); 2233 if (error) 2234 break; 2235 2236 /* 2237 * If the file references a disk device, then fetch 2238 * the media size and use that to determine the ending 2239 * offset. 2240 */ 2241 if (vattr.va_size == 0 && vp->v_type == VCHR && 2242 fo_ioctl(fp, DIOCGMEDIASIZE, &size, cred, td) == 0) 2243 vattr.va_size = size; 2244 if (noneg && 2245 (vattr.va_size > OFF_MAX || 2246 (offset > 0 && vattr.va_size > OFF_MAX - offset))) { 2247 error = EOVERFLOW; 2248 break; 2249 } 2250 offset += vattr.va_size; 2251 break; 2252 case L_SET: 2253 break; 2254 case SEEK_DATA: 2255 error = fo_ioctl(fp, FIOSEEKDATA, &offset, cred, td); 2256 break; 2257 case SEEK_HOLE: 2258 error = fo_ioctl(fp, FIOSEEKHOLE, &offset, cred, td); 2259 break; 2260 default: 2261 error = EINVAL; 2262 } 2263 if (error == 0 && noneg && offset < 0) 2264 error = EINVAL; 2265 if (error != 0) 2266 goto drop; 2267 VFS_KNOTE_UNLOCKED(vp, 0); 2268 td->td_uretoff.tdu_off = offset; 2269 drop: 2270 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); 2271 return (error); 2272 } 2273 2274 int 2275 vn_utimes_perm(struct vnode *vp, struct vattr *vap, struct ucred *cred, 2276 struct thread *td) 2277 { 2278 int error; 2279 2280 /* 2281 * Grant permission if the caller is the owner of the file, or 2282 * the super-user, or has ACL_WRITE_ATTRIBUTES permission on 2283 * on the file. If the time pointer is null, then write 2284 * permission on the file is also sufficient. 2285 * 2286 * From NFSv4.1, draft 21, 6.2.1.3.1, Discussion of Mask Attributes: 2287 * A user having ACL_WRITE_DATA or ACL_WRITE_ATTRIBUTES 2288 * will be allowed to set the times [..] to the current 2289 * server time. 2290 */ 2291 error = VOP_ACCESSX(vp, VWRITE_ATTRIBUTES, cred, td); 2292 if (error != 0 && (vap->va_vaflags & VA_UTIMES_NULL) != 0) 2293 error = VOP_ACCESS(vp, VWRITE, cred, td); 2294 return (error); 2295 } 2296 2297 int 2298 vn_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 2299 { 2300 struct vnode *vp; 2301 int error; 2302 2303 if (fp->f_type == DTYPE_FIFO) 2304 kif->kf_type = KF_TYPE_FIFO; 2305 else 2306 kif->kf_type = KF_TYPE_VNODE; 2307 vp = fp->f_vnode; 2308 vref(vp); 2309 FILEDESC_SUNLOCK(fdp); 2310 error = vn_fill_kinfo_vnode(vp, kif); 2311 vrele(vp); 2312 FILEDESC_SLOCK(fdp); 2313 return (error); 2314 } 2315 2316 static inline void 2317 vn_fill_junk(struct kinfo_file *kif) 2318 { 2319 size_t len, olen; 2320 2321 /* 2322 * Simulate vn_fullpath returning changing values for a given 2323 * vp during e.g. coredump. 2324 */ 2325 len = (arc4random() % (sizeof(kif->kf_path) - 2)) + 1; 2326 olen = strlen(kif->kf_path); 2327 if (len < olen) 2328 strcpy(&kif->kf_path[len - 1], "$"); 2329 else 2330 for (; olen < len; olen++) 2331 strcpy(&kif->kf_path[olen], "A"); 2332 } 2333 2334 int 2335 vn_fill_kinfo_vnode(struct vnode *vp, struct kinfo_file *kif) 2336 { 2337 struct vattr va; 2338 char *fullpath, *freepath; 2339 int error; 2340 2341 kif->kf_vnode_type = vntype_to_kinfo(vp->v_type); 2342 freepath = NULL; 2343 fullpath = "-"; 2344 error = vn_fullpath(curthread, vp, &fullpath, &freepath); 2345 if (error == 0) { 2346 strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path)); 2347 } 2348 if (freepath != NULL) 2349 free(freepath, M_TEMP); 2350 2351 KFAIL_POINT_CODE(DEBUG_FP, fill_kinfo_vnode__random_path, 2352 vn_fill_junk(kif); 2353 ); 2354 2355 /* 2356 * Retrieve vnode attributes. 2357 */ 2358 va.va_fsid = VNOVAL; 2359 va.va_rdev = NODEV; 2360 vn_lock(vp, LK_SHARED | LK_RETRY); 2361 error = VOP_GETATTR(vp, &va, curthread->td_ucred); 2362 VOP_UNLOCK(vp, 0); 2363 if (error != 0) 2364 return (error); 2365 if (va.va_fsid != VNOVAL) 2366 kif->kf_un.kf_file.kf_file_fsid = va.va_fsid; 2367 else 2368 kif->kf_un.kf_file.kf_file_fsid = 2369 vp->v_mount->mnt_stat.f_fsid.val[0]; 2370 kif->kf_un.kf_file.kf_file_fileid = va.va_fileid; 2371 kif->kf_un.kf_file.kf_file_mode = MAKEIMODE(va.va_type, va.va_mode); 2372 kif->kf_un.kf_file.kf_file_size = va.va_size; 2373 kif->kf_un.kf_file.kf_file_rdev = va.va_rdev; 2374 return (0); 2375 } 2376 2377 int 2378 vn_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 2379 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 2380 struct thread *td) 2381 { 2382 #ifdef HWPMC_HOOKS 2383 struct pmckern_map_in pkm; 2384 #endif 2385 struct mount *mp; 2386 struct vnode *vp; 2387 vm_object_t object; 2388 vm_prot_t maxprot; 2389 boolean_t writecounted; 2390 int error; 2391 2392 #if defined(COMPAT_FREEBSD7) || defined(COMPAT_FREEBSD6) || \ 2393 defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) 2394 /* 2395 * POSIX shared-memory objects are defined to have 2396 * kernel persistence, and are not defined to support 2397 * read(2)/write(2) -- or even open(2). Thus, we can 2398 * use MAP_ASYNC to trade on-disk coherence for speed. 2399 * The shm_open(3) library routine turns on the FPOSIXSHM 2400 * flag to request this behavior. 2401 */ 2402 if ((fp->f_flag & FPOSIXSHM) != 0) 2403 flags |= MAP_NOSYNC; 2404 #endif 2405 vp = fp->f_vnode; 2406 2407 /* 2408 * Ensure that file and memory protections are 2409 * compatible. Note that we only worry about 2410 * writability if mapping is shared; in this case, 2411 * current and max prot are dictated by the open file. 2412 * XXX use the vnode instead? Problem is: what 2413 * credentials do we use for determination? What if 2414 * proc does a setuid? 2415 */ 2416 mp = vp->v_mount; 2417 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) 2418 maxprot = VM_PROT_NONE; 2419 else 2420 maxprot = VM_PROT_EXECUTE; 2421 if ((fp->f_flag & FREAD) != 0) 2422 maxprot |= VM_PROT_READ; 2423 else if ((prot & VM_PROT_READ) != 0) 2424 return (EACCES); 2425 2426 /* 2427 * If we are sharing potential changes via MAP_SHARED and we 2428 * are trying to get write permission although we opened it 2429 * without asking for it, bail out. 2430 */ 2431 if ((flags & MAP_SHARED) != 0) { 2432 if ((fp->f_flag & FWRITE) != 0) 2433 maxprot |= VM_PROT_WRITE; 2434 else if ((prot & VM_PROT_WRITE) != 0) 2435 return (EACCES); 2436 } else { 2437 maxprot |= VM_PROT_WRITE; 2438 cap_maxprot |= VM_PROT_WRITE; 2439 } 2440 maxprot &= cap_maxprot; 2441 2442 writecounted = FALSE; 2443 error = vm_mmap_vnode(td, size, prot, &maxprot, &flags, vp, 2444 &foff, &object, &writecounted); 2445 if (error != 0) 2446 return (error); 2447 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 2448 foff, writecounted, td); 2449 if (error != 0) { 2450 /* 2451 * If this mapping was accounted for in the vnode's 2452 * writecount, then undo that now. 2453 */ 2454 if (writecounted) 2455 vnode_pager_release_writecount(object, 0, size); 2456 vm_object_deallocate(object); 2457 } 2458 #ifdef HWPMC_HOOKS 2459 /* Inform hwpmc(4) if an executable is being mapped. */ 2460 if (error == 0 && (prot & VM_PROT_EXECUTE) != 0) { 2461 pkm.pm_file = vp; 2462 pkm.pm_address = (uintptr_t) *addr; 2463 PMC_CALL_HOOK(td, PMC_FN_MMAP, (void *) &pkm); 2464 } 2465 #endif 2466 return (error); 2467 } 2468