1 /*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org> 11 * Copyright (c) 2013, 2014 The FreeBSD Foundation 12 * 13 * Portions of this software were developed by Konstantin Belousov 14 * under sponsorship from the FreeBSD Foundation. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/disk.h> 49 #include <sys/fcntl.h> 50 #include <sys/file.h> 51 #include <sys/kdb.h> 52 #include <sys/stat.h> 53 #include <sys/priv.h> 54 #include <sys/proc.h> 55 #include <sys/limits.h> 56 #include <sys/lock.h> 57 #include <sys/mount.h> 58 #include <sys/mutex.h> 59 #include <sys/namei.h> 60 #include <sys/vnode.h> 61 #include <sys/bio.h> 62 #include <sys/buf.h> 63 #include <sys/filio.h> 64 #include <sys/resourcevar.h> 65 #include <sys/rwlock.h> 66 #include <sys/sx.h> 67 #include <sys/sysctl.h> 68 #include <sys/ttycom.h> 69 #include <sys/conf.h> 70 #include <sys/syslog.h> 71 #include <sys/unistd.h> 72 #include <sys/user.h> 73 74 #include <security/audit/audit.h> 75 #include <security/mac/mac_framework.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_extern.h> 79 #include <vm/pmap.h> 80 #include <vm/vm_map.h> 81 #include <vm/vm_object.h> 82 #include <vm/vm_page.h> 83 84 static fo_rdwr_t vn_read; 85 static fo_rdwr_t vn_write; 86 static fo_rdwr_t vn_io_fault; 87 static fo_truncate_t vn_truncate; 88 static fo_ioctl_t vn_ioctl; 89 static fo_poll_t vn_poll; 90 static fo_kqfilter_t vn_kqfilter; 91 static fo_stat_t vn_statfile; 92 static fo_close_t vn_closefile; 93 94 struct fileops vnops = { 95 .fo_read = vn_io_fault, 96 .fo_write = vn_io_fault, 97 .fo_truncate = vn_truncate, 98 .fo_ioctl = vn_ioctl, 99 .fo_poll = vn_poll, 100 .fo_kqfilter = vn_kqfilter, 101 .fo_stat = vn_statfile, 102 .fo_close = vn_closefile, 103 .fo_chmod = vn_chmod, 104 .fo_chown = vn_chown, 105 .fo_sendfile = vn_sendfile, 106 .fo_seek = vn_seek, 107 .fo_fill_kinfo = vn_fill_kinfo, 108 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 109 }; 110 111 static const int io_hold_cnt = 16; 112 static int vn_io_fault_enable = 1; 113 SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_enable, CTLFLAG_RW, 114 &vn_io_fault_enable, 0, "Enable vn_io_fault lock avoidance"); 115 static u_long vn_io_faults_cnt; 116 SYSCTL_ULONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD, 117 &vn_io_faults_cnt, 0, "Count of vn_io_fault lock avoidance triggers"); 118 119 /* 120 * Returns true if vn_io_fault mode of handling the i/o request should 121 * be used. 122 */ 123 static bool 124 do_vn_io_fault(struct vnode *vp, struct uio *uio) 125 { 126 struct mount *mp; 127 128 return (uio->uio_segflg == UIO_USERSPACE && vp->v_type == VREG && 129 (mp = vp->v_mount) != NULL && 130 (mp->mnt_kern_flag & MNTK_NO_IOPF) != 0 && vn_io_fault_enable); 131 } 132 133 /* 134 * Structure used to pass arguments to vn_io_fault1(), to do either 135 * file- or vnode-based I/O calls. 136 */ 137 struct vn_io_fault_args { 138 enum { 139 VN_IO_FAULT_FOP, 140 VN_IO_FAULT_VOP 141 } kind; 142 struct ucred *cred; 143 int flags; 144 union { 145 struct fop_args_tag { 146 struct file *fp; 147 fo_rdwr_t *doio; 148 } fop_args; 149 struct vop_args_tag { 150 struct vnode *vp; 151 } vop_args; 152 } args; 153 }; 154 155 static int vn_io_fault1(struct vnode *vp, struct uio *uio, 156 struct vn_io_fault_args *args, struct thread *td); 157 158 int 159 vn_open(ndp, flagp, cmode, fp) 160 struct nameidata *ndp; 161 int *flagp, cmode; 162 struct file *fp; 163 { 164 struct thread *td = ndp->ni_cnd.cn_thread; 165 166 return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp)); 167 } 168 169 /* 170 * Common code for vnode open operations via a name lookup. 171 * Lookup the vnode and invoke VOP_CREATE if needed. 172 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 173 * 174 * Note that this does NOT free nameidata for the successful case, 175 * due to the NDINIT being done elsewhere. 176 */ 177 int 178 vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags, 179 struct ucred *cred, struct file *fp) 180 { 181 struct vnode *vp; 182 struct mount *mp; 183 struct thread *td = ndp->ni_cnd.cn_thread; 184 struct vattr vat; 185 struct vattr *vap = &vat; 186 int fmode, error; 187 188 restart: 189 fmode = *flagp; 190 if (fmode & O_CREAT) { 191 ndp->ni_cnd.cn_nameiop = CREATE; 192 /* 193 * Set NOCACHE to avoid flushing the cache when 194 * rolling in many files at once. 195 */ 196 ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF | NOCACHE; 197 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 198 ndp->ni_cnd.cn_flags |= FOLLOW; 199 if (!(vn_open_flags & VN_OPEN_NOAUDIT)) 200 ndp->ni_cnd.cn_flags |= AUDITVNODE1; 201 if (vn_open_flags & VN_OPEN_NOCAPCHECK) 202 ndp->ni_cnd.cn_flags |= NOCAPCHECK; 203 bwillwrite(); 204 if ((error = namei(ndp)) != 0) 205 return (error); 206 if (ndp->ni_vp == NULL) { 207 VATTR_NULL(vap); 208 vap->va_type = VREG; 209 vap->va_mode = cmode; 210 if (fmode & O_EXCL) 211 vap->va_vaflags |= VA_EXCLUSIVE; 212 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 213 NDFREE(ndp, NDF_ONLY_PNBUF); 214 vput(ndp->ni_dvp); 215 if ((error = vn_start_write(NULL, &mp, 216 V_XSLEEP | PCATCH)) != 0) 217 return (error); 218 goto restart; 219 } 220 if ((vn_open_flags & VN_OPEN_NAMECACHE) != 0) 221 ndp->ni_cnd.cn_flags |= MAKEENTRY; 222 #ifdef MAC 223 error = mac_vnode_check_create(cred, ndp->ni_dvp, 224 &ndp->ni_cnd, vap); 225 if (error == 0) 226 #endif 227 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 228 &ndp->ni_cnd, vap); 229 vput(ndp->ni_dvp); 230 vn_finished_write(mp); 231 if (error) { 232 NDFREE(ndp, NDF_ONLY_PNBUF); 233 return (error); 234 } 235 fmode &= ~O_TRUNC; 236 vp = ndp->ni_vp; 237 } else { 238 if (ndp->ni_dvp == ndp->ni_vp) 239 vrele(ndp->ni_dvp); 240 else 241 vput(ndp->ni_dvp); 242 ndp->ni_dvp = NULL; 243 vp = ndp->ni_vp; 244 if (fmode & O_EXCL) { 245 error = EEXIST; 246 goto bad; 247 } 248 fmode &= ~O_CREAT; 249 } 250 } else { 251 ndp->ni_cnd.cn_nameiop = LOOKUP; 252 ndp->ni_cnd.cn_flags = ISOPEN | 253 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; 254 if (!(fmode & FWRITE)) 255 ndp->ni_cnd.cn_flags |= LOCKSHARED; 256 if (!(vn_open_flags & VN_OPEN_NOAUDIT)) 257 ndp->ni_cnd.cn_flags |= AUDITVNODE1; 258 if (vn_open_flags & VN_OPEN_NOCAPCHECK) 259 ndp->ni_cnd.cn_flags |= NOCAPCHECK; 260 if ((error = namei(ndp)) != 0) 261 return (error); 262 vp = ndp->ni_vp; 263 } 264 error = vn_open_vnode(vp, fmode, cred, td, fp); 265 if (error) 266 goto bad; 267 *flagp = fmode; 268 return (0); 269 bad: 270 NDFREE(ndp, NDF_ONLY_PNBUF); 271 vput(vp); 272 *flagp = fmode; 273 ndp->ni_vp = NULL; 274 return (error); 275 } 276 277 /* 278 * Common code for vnode open operations once a vnode is located. 279 * Check permissions, and call the VOP_OPEN routine. 280 */ 281 int 282 vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred, 283 struct thread *td, struct file *fp) 284 { 285 struct mount *mp; 286 accmode_t accmode; 287 struct flock lf; 288 int error, have_flock, lock_flags, type; 289 290 if (vp->v_type == VLNK) 291 return (EMLINK); 292 if (vp->v_type == VSOCK) 293 return (EOPNOTSUPP); 294 if (vp->v_type != VDIR && fmode & O_DIRECTORY) 295 return (ENOTDIR); 296 accmode = 0; 297 if (fmode & (FWRITE | O_TRUNC)) { 298 if (vp->v_type == VDIR) 299 return (EISDIR); 300 accmode |= VWRITE; 301 } 302 if (fmode & FREAD) 303 accmode |= VREAD; 304 if (fmode & FEXEC) 305 accmode |= VEXEC; 306 if ((fmode & O_APPEND) && (fmode & FWRITE)) 307 accmode |= VAPPEND; 308 #ifdef MAC 309 if (fmode & O_CREAT) 310 accmode |= VCREAT; 311 if (fmode & O_VERIFY) 312 accmode |= VVERIFY; 313 error = mac_vnode_check_open(cred, vp, accmode); 314 if (error) 315 return (error); 316 317 accmode &= ~(VCREAT | VVERIFY); 318 #endif 319 if ((fmode & O_CREAT) == 0) { 320 if (accmode & VWRITE) { 321 error = vn_writechk(vp); 322 if (error) 323 return (error); 324 } 325 if (accmode) { 326 error = VOP_ACCESS(vp, accmode, cred, td); 327 if (error) 328 return (error); 329 } 330 } 331 if (vp->v_type == VFIFO && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 332 vn_lock(vp, LK_UPGRADE | LK_RETRY); 333 if ((error = VOP_OPEN(vp, fmode, cred, td, fp)) != 0) 334 return (error); 335 336 if (fmode & (O_EXLOCK | O_SHLOCK)) { 337 KASSERT(fp != NULL, ("open with flock requires fp")); 338 lock_flags = VOP_ISLOCKED(vp); 339 VOP_UNLOCK(vp, 0); 340 lf.l_whence = SEEK_SET; 341 lf.l_start = 0; 342 lf.l_len = 0; 343 if (fmode & O_EXLOCK) 344 lf.l_type = F_WRLCK; 345 else 346 lf.l_type = F_RDLCK; 347 type = F_FLOCK; 348 if ((fmode & FNONBLOCK) == 0) 349 type |= F_WAIT; 350 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type); 351 have_flock = (error == 0); 352 vn_lock(vp, lock_flags | LK_RETRY); 353 if (error == 0 && vp->v_iflag & VI_DOOMED) 354 error = ENOENT; 355 /* 356 * Another thread might have used this vnode as an 357 * executable while the vnode lock was dropped. 358 * Ensure the vnode is still able to be opened for 359 * writing after the lock has been obtained. 360 */ 361 if (error == 0 && accmode & VWRITE) 362 error = vn_writechk(vp); 363 if (error) { 364 VOP_UNLOCK(vp, 0); 365 if (have_flock) { 366 lf.l_whence = SEEK_SET; 367 lf.l_start = 0; 368 lf.l_len = 0; 369 lf.l_type = F_UNLCK; 370 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, 371 F_FLOCK); 372 } 373 vn_start_write(vp, &mp, V_WAIT); 374 vn_lock(vp, lock_flags | LK_RETRY); 375 (void)VOP_CLOSE(vp, fmode, cred, td); 376 vn_finished_write(mp); 377 /* Prevent second close from fdrop()->vn_close(). */ 378 if (fp != NULL) 379 fp->f_ops= &badfileops; 380 return (error); 381 } 382 fp->f_flag |= FHASLOCK; 383 } 384 if (fmode & FWRITE) { 385 VOP_ADD_WRITECOUNT(vp, 1); 386 CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", 387 __func__, vp, vp->v_writecount); 388 } 389 ASSERT_VOP_LOCKED(vp, "vn_open_vnode"); 390 return (0); 391 } 392 393 /* 394 * Check for write permissions on the specified vnode. 395 * Prototype text segments cannot be written. 396 */ 397 int 398 vn_writechk(vp) 399 register struct vnode *vp; 400 { 401 402 ASSERT_VOP_LOCKED(vp, "vn_writechk"); 403 /* 404 * If there's shared text associated with 405 * the vnode, try to free it up once. If 406 * we fail, we can't allow writing. 407 */ 408 if (VOP_IS_TEXT(vp)) 409 return (ETXTBSY); 410 411 return (0); 412 } 413 414 /* 415 * Vnode close call 416 */ 417 int 418 vn_close(vp, flags, file_cred, td) 419 register struct vnode *vp; 420 int flags; 421 struct ucred *file_cred; 422 struct thread *td; 423 { 424 struct mount *mp; 425 int error, lock_flags; 426 427 if (vp->v_type != VFIFO && (flags & FWRITE) == 0 && 428 MNT_EXTENDED_SHARED(vp->v_mount)) 429 lock_flags = LK_SHARED; 430 else 431 lock_flags = LK_EXCLUSIVE; 432 433 vn_start_write(vp, &mp, V_WAIT); 434 vn_lock(vp, lock_flags | LK_RETRY); 435 if (flags & FWRITE) { 436 VNASSERT(vp->v_writecount > 0, vp, 437 ("vn_close: negative writecount")); 438 VOP_ADD_WRITECOUNT(vp, -1); 439 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 440 __func__, vp, vp->v_writecount); 441 } 442 error = VOP_CLOSE(vp, flags, file_cred, td); 443 vput(vp); 444 vn_finished_write(mp); 445 return (error); 446 } 447 448 /* 449 * Heuristic to detect sequential operation. 450 */ 451 static int 452 sequential_heuristic(struct uio *uio, struct file *fp) 453 { 454 455 ASSERT_VOP_LOCKED(fp->f_vnode, __func__); 456 if (fp->f_flag & FRDAHEAD) 457 return (fp->f_seqcount << IO_SEQSHIFT); 458 459 /* 460 * Offset 0 is handled specially. open() sets f_seqcount to 1 so 461 * that the first I/O is normally considered to be slightly 462 * sequential. Seeking to offset 0 doesn't change sequentiality 463 * unless previous seeks have reduced f_seqcount to 0, in which 464 * case offset 0 is not special. 465 */ 466 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 467 uio->uio_offset == fp->f_nextoff) { 468 /* 469 * f_seqcount is in units of fixed-size blocks so that it 470 * depends mainly on the amount of sequential I/O and not 471 * much on the number of sequential I/O's. The fixed size 472 * of 16384 is hard-coded here since it is (not quite) just 473 * a magic size that works well here. This size is more 474 * closely related to the best I/O size for real disks than 475 * to any block size used by software. 476 */ 477 fp->f_seqcount += howmany(uio->uio_resid, 16384); 478 if (fp->f_seqcount > IO_SEQMAX) 479 fp->f_seqcount = IO_SEQMAX; 480 return (fp->f_seqcount << IO_SEQSHIFT); 481 } 482 483 /* Not sequential. Quickly draw-down sequentiality. */ 484 if (fp->f_seqcount > 1) 485 fp->f_seqcount = 1; 486 else 487 fp->f_seqcount = 0; 488 return (0); 489 } 490 491 /* 492 * Package up an I/O request on a vnode into a uio and do it. 493 */ 494 int 495 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset, 496 enum uio_seg segflg, int ioflg, struct ucred *active_cred, 497 struct ucred *file_cred, ssize_t *aresid, struct thread *td) 498 { 499 struct uio auio; 500 struct iovec aiov; 501 struct mount *mp; 502 struct ucred *cred; 503 void *rl_cookie; 504 struct vn_io_fault_args args; 505 int error, lock_flags; 506 507 auio.uio_iov = &aiov; 508 auio.uio_iovcnt = 1; 509 aiov.iov_base = base; 510 aiov.iov_len = len; 511 auio.uio_resid = len; 512 auio.uio_offset = offset; 513 auio.uio_segflg = segflg; 514 auio.uio_rw = rw; 515 auio.uio_td = td; 516 error = 0; 517 518 if ((ioflg & IO_NODELOCKED) == 0) { 519 if ((ioflg & IO_RANGELOCKED) == 0) { 520 if (rw == UIO_READ) { 521 rl_cookie = vn_rangelock_rlock(vp, offset, 522 offset + len); 523 } else { 524 rl_cookie = vn_rangelock_wlock(vp, offset, 525 offset + len); 526 } 527 } else 528 rl_cookie = NULL; 529 mp = NULL; 530 if (rw == UIO_WRITE) { 531 if (vp->v_type != VCHR && 532 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) 533 != 0) 534 goto out; 535 if (MNT_SHARED_WRITES(mp) || 536 ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) 537 lock_flags = LK_SHARED; 538 else 539 lock_flags = LK_EXCLUSIVE; 540 } else 541 lock_flags = LK_SHARED; 542 vn_lock(vp, lock_flags | LK_RETRY); 543 } else 544 rl_cookie = NULL; 545 546 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 547 #ifdef MAC 548 if ((ioflg & IO_NOMACCHECK) == 0) { 549 if (rw == UIO_READ) 550 error = mac_vnode_check_read(active_cred, file_cred, 551 vp); 552 else 553 error = mac_vnode_check_write(active_cred, file_cred, 554 vp); 555 } 556 #endif 557 if (error == 0) { 558 if (file_cred != NULL) 559 cred = file_cred; 560 else 561 cred = active_cred; 562 if (do_vn_io_fault(vp, &auio)) { 563 args.kind = VN_IO_FAULT_VOP; 564 args.cred = cred; 565 args.flags = ioflg; 566 args.args.vop_args.vp = vp; 567 error = vn_io_fault1(vp, &auio, &args, td); 568 } else if (rw == UIO_READ) { 569 error = VOP_READ(vp, &auio, ioflg, cred); 570 } else /* if (rw == UIO_WRITE) */ { 571 error = VOP_WRITE(vp, &auio, ioflg, cred); 572 } 573 } 574 if (aresid) 575 *aresid = auio.uio_resid; 576 else 577 if (auio.uio_resid && error == 0) 578 error = EIO; 579 if ((ioflg & IO_NODELOCKED) == 0) { 580 VOP_UNLOCK(vp, 0); 581 if (mp != NULL) 582 vn_finished_write(mp); 583 } 584 out: 585 if (rl_cookie != NULL) 586 vn_rangelock_unlock(vp, rl_cookie); 587 return (error); 588 } 589 590 /* 591 * Package up an I/O request on a vnode into a uio and do it. The I/O 592 * request is split up into smaller chunks and we try to avoid saturating 593 * the buffer cache while potentially holding a vnode locked, so we 594 * check bwillwrite() before calling vn_rdwr(). We also call kern_yield() 595 * to give other processes a chance to lock the vnode (either other processes 596 * core'ing the same binary, or unrelated processes scanning the directory). 597 */ 598 int 599 vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, active_cred, 600 file_cred, aresid, td) 601 enum uio_rw rw; 602 struct vnode *vp; 603 void *base; 604 size_t len; 605 off_t offset; 606 enum uio_seg segflg; 607 int ioflg; 608 struct ucred *active_cred; 609 struct ucred *file_cred; 610 size_t *aresid; 611 struct thread *td; 612 { 613 int error = 0; 614 ssize_t iaresid; 615 616 do { 617 int chunk; 618 619 /* 620 * Force `offset' to a multiple of MAXBSIZE except possibly 621 * for the first chunk, so that filesystems only need to 622 * write full blocks except possibly for the first and last 623 * chunks. 624 */ 625 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 626 627 if (chunk > len) 628 chunk = len; 629 if (rw != UIO_READ && vp->v_type == VREG) 630 bwillwrite(); 631 iaresid = 0; 632 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 633 ioflg, active_cred, file_cred, &iaresid, td); 634 len -= chunk; /* aresid calc already includes length */ 635 if (error) 636 break; 637 offset += chunk; 638 base = (char *)base + chunk; 639 kern_yield(PRI_USER); 640 } while (len); 641 if (aresid) 642 *aresid = len + iaresid; 643 return (error); 644 } 645 646 off_t 647 foffset_lock(struct file *fp, int flags) 648 { 649 struct mtx *mtxp; 650 off_t res; 651 652 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 653 654 #if OFF_MAX <= LONG_MAX 655 /* 656 * Caller only wants the current f_offset value. Assume that 657 * the long and shorter integer types reads are atomic. 658 */ 659 if ((flags & FOF_NOLOCK) != 0) 660 return (fp->f_offset); 661 #endif 662 663 /* 664 * According to McKusick the vn lock was protecting f_offset here. 665 * It is now protected by the FOFFSET_LOCKED flag. 666 */ 667 mtxp = mtx_pool_find(mtxpool_sleep, fp); 668 mtx_lock(mtxp); 669 if ((flags & FOF_NOLOCK) == 0) { 670 while (fp->f_vnread_flags & FOFFSET_LOCKED) { 671 fp->f_vnread_flags |= FOFFSET_LOCK_WAITING; 672 msleep(&fp->f_vnread_flags, mtxp, PUSER -1, 673 "vofflock", 0); 674 } 675 fp->f_vnread_flags |= FOFFSET_LOCKED; 676 } 677 res = fp->f_offset; 678 mtx_unlock(mtxp); 679 return (res); 680 } 681 682 void 683 foffset_unlock(struct file *fp, off_t val, int flags) 684 { 685 struct mtx *mtxp; 686 687 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 688 689 #if OFF_MAX <= LONG_MAX 690 if ((flags & FOF_NOLOCK) != 0) { 691 if ((flags & FOF_NOUPDATE) == 0) 692 fp->f_offset = val; 693 if ((flags & FOF_NEXTOFF) != 0) 694 fp->f_nextoff = val; 695 return; 696 } 697 #endif 698 699 mtxp = mtx_pool_find(mtxpool_sleep, fp); 700 mtx_lock(mtxp); 701 if ((flags & FOF_NOUPDATE) == 0) 702 fp->f_offset = val; 703 if ((flags & FOF_NEXTOFF) != 0) 704 fp->f_nextoff = val; 705 if ((flags & FOF_NOLOCK) == 0) { 706 KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0, 707 ("Lost FOFFSET_LOCKED")); 708 if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING) 709 wakeup(&fp->f_vnread_flags); 710 fp->f_vnread_flags = 0; 711 } 712 mtx_unlock(mtxp); 713 } 714 715 void 716 foffset_lock_uio(struct file *fp, struct uio *uio, int flags) 717 { 718 719 if ((flags & FOF_OFFSET) == 0) 720 uio->uio_offset = foffset_lock(fp, flags); 721 } 722 723 void 724 foffset_unlock_uio(struct file *fp, struct uio *uio, int flags) 725 { 726 727 if ((flags & FOF_OFFSET) == 0) 728 foffset_unlock(fp, uio->uio_offset, flags); 729 } 730 731 static int 732 get_advice(struct file *fp, struct uio *uio) 733 { 734 struct mtx *mtxp; 735 int ret; 736 737 ret = POSIX_FADV_NORMAL; 738 if (fp->f_advice == NULL) 739 return (ret); 740 741 mtxp = mtx_pool_find(mtxpool_sleep, fp); 742 mtx_lock(mtxp); 743 if (uio->uio_offset >= fp->f_advice->fa_start && 744 uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end) 745 ret = fp->f_advice->fa_advice; 746 mtx_unlock(mtxp); 747 return (ret); 748 } 749 750 /* 751 * File table vnode read routine. 752 */ 753 static int 754 vn_read(fp, uio, active_cred, flags, td) 755 struct file *fp; 756 struct uio *uio; 757 struct ucred *active_cred; 758 int flags; 759 struct thread *td; 760 { 761 struct vnode *vp; 762 struct mtx *mtxp; 763 int error, ioflag; 764 int advice; 765 off_t offset, start, end; 766 767 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 768 uio->uio_td, td)); 769 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 770 vp = fp->f_vnode; 771 ioflag = 0; 772 if (fp->f_flag & FNONBLOCK) 773 ioflag |= IO_NDELAY; 774 if (fp->f_flag & O_DIRECT) 775 ioflag |= IO_DIRECT; 776 advice = get_advice(fp, uio); 777 vn_lock(vp, LK_SHARED | LK_RETRY); 778 779 switch (advice) { 780 case POSIX_FADV_NORMAL: 781 case POSIX_FADV_SEQUENTIAL: 782 case POSIX_FADV_NOREUSE: 783 ioflag |= sequential_heuristic(uio, fp); 784 break; 785 case POSIX_FADV_RANDOM: 786 /* Disable read-ahead for random I/O. */ 787 break; 788 } 789 offset = uio->uio_offset; 790 791 #ifdef MAC 792 error = mac_vnode_check_read(active_cred, fp->f_cred, vp); 793 if (error == 0) 794 #endif 795 error = VOP_READ(vp, uio, ioflag, fp->f_cred); 796 fp->f_nextoff = uio->uio_offset; 797 VOP_UNLOCK(vp, 0); 798 if (error == 0 && advice == POSIX_FADV_NOREUSE && 799 offset != uio->uio_offset) { 800 /* 801 * Use POSIX_FADV_DONTNEED to flush clean pages and 802 * buffers for the backing file after a 803 * POSIX_FADV_NOREUSE read(2). To optimize the common 804 * case of using POSIX_FADV_NOREUSE with sequential 805 * access, track the previous implicit DONTNEED 806 * request and grow this request to include the 807 * current read(2) in addition to the previous 808 * DONTNEED. With purely sequential access this will 809 * cause the DONTNEED requests to continously grow to 810 * cover all of the previously read regions of the 811 * file. This allows filesystem blocks that are 812 * accessed by multiple calls to read(2) to be flushed 813 * once the last read(2) finishes. 814 */ 815 start = offset; 816 end = uio->uio_offset - 1; 817 mtxp = mtx_pool_find(mtxpool_sleep, fp); 818 mtx_lock(mtxp); 819 if (fp->f_advice != NULL && 820 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) { 821 if (start != 0 && fp->f_advice->fa_prevend + 1 == start) 822 start = fp->f_advice->fa_prevstart; 823 else if (fp->f_advice->fa_prevstart != 0 && 824 fp->f_advice->fa_prevstart == end + 1) 825 end = fp->f_advice->fa_prevend; 826 fp->f_advice->fa_prevstart = start; 827 fp->f_advice->fa_prevend = end; 828 } 829 mtx_unlock(mtxp); 830 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED); 831 } 832 return (error); 833 } 834 835 /* 836 * File table vnode write routine. 837 */ 838 static int 839 vn_write(fp, uio, active_cred, flags, td) 840 struct file *fp; 841 struct uio *uio; 842 struct ucred *active_cred; 843 int flags; 844 struct thread *td; 845 { 846 struct vnode *vp; 847 struct mount *mp; 848 struct mtx *mtxp; 849 int error, ioflag, lock_flags; 850 int advice; 851 off_t offset, start, end; 852 853 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 854 uio->uio_td, td)); 855 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 856 vp = fp->f_vnode; 857 if (vp->v_type == VREG) 858 bwillwrite(); 859 ioflag = IO_UNIT; 860 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 861 ioflag |= IO_APPEND; 862 if (fp->f_flag & FNONBLOCK) 863 ioflag |= IO_NDELAY; 864 if (fp->f_flag & O_DIRECT) 865 ioflag |= IO_DIRECT; 866 if ((fp->f_flag & O_FSYNC) || 867 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 868 ioflag |= IO_SYNC; 869 mp = NULL; 870 if (vp->v_type != VCHR && 871 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) 872 goto unlock; 873 874 advice = get_advice(fp, uio); 875 876 if (MNT_SHARED_WRITES(mp) || 877 (mp == NULL && MNT_SHARED_WRITES(vp->v_mount))) { 878 lock_flags = LK_SHARED; 879 } else { 880 lock_flags = LK_EXCLUSIVE; 881 } 882 883 vn_lock(vp, lock_flags | LK_RETRY); 884 switch (advice) { 885 case POSIX_FADV_NORMAL: 886 case POSIX_FADV_SEQUENTIAL: 887 case POSIX_FADV_NOREUSE: 888 ioflag |= sequential_heuristic(uio, fp); 889 break; 890 case POSIX_FADV_RANDOM: 891 /* XXX: Is this correct? */ 892 break; 893 } 894 offset = uio->uio_offset; 895 896 #ifdef MAC 897 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 898 if (error == 0) 899 #endif 900 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred); 901 fp->f_nextoff = uio->uio_offset; 902 VOP_UNLOCK(vp, 0); 903 if (vp->v_type != VCHR) 904 vn_finished_write(mp); 905 if (error == 0 && advice == POSIX_FADV_NOREUSE && 906 offset != uio->uio_offset) { 907 /* 908 * Use POSIX_FADV_DONTNEED to flush clean pages and 909 * buffers for the backing file after a 910 * POSIX_FADV_NOREUSE write(2). To optimize the 911 * common case of using POSIX_FADV_NOREUSE with 912 * sequential access, track the previous implicit 913 * DONTNEED request and grow this request to include 914 * the current write(2) in addition to the previous 915 * DONTNEED. With purely sequential access this will 916 * cause the DONTNEED requests to continously grow to 917 * cover all of the previously written regions of the 918 * file. 919 * 920 * Note that the blocks just written are almost 921 * certainly still dirty, so this only works when 922 * VOP_ADVISE() calls from subsequent writes push out 923 * the data written by this write(2) once the backing 924 * buffers are clean. However, as compared to forcing 925 * IO_DIRECT, this gives much saner behavior. Write 926 * clustering is still allowed, and clean pages are 927 * merely moved to the cache page queue rather than 928 * outright thrown away. This means a subsequent 929 * read(2) can still avoid hitting the disk if the 930 * pages have not been reclaimed. 931 * 932 * This does make POSIX_FADV_NOREUSE largely useless 933 * with non-sequential access. However, sequential 934 * access is the more common use case and the flag is 935 * merely advisory. 936 */ 937 start = offset; 938 end = uio->uio_offset - 1; 939 mtxp = mtx_pool_find(mtxpool_sleep, fp); 940 mtx_lock(mtxp); 941 if (fp->f_advice != NULL && 942 fp->f_advice->fa_advice == POSIX_FADV_NOREUSE) { 943 if (start != 0 && fp->f_advice->fa_prevend + 1 == start) 944 start = fp->f_advice->fa_prevstart; 945 else if (fp->f_advice->fa_prevstart != 0 && 946 fp->f_advice->fa_prevstart == end + 1) 947 end = fp->f_advice->fa_prevend; 948 fp->f_advice->fa_prevstart = start; 949 fp->f_advice->fa_prevend = end; 950 } 951 mtx_unlock(mtxp); 952 error = VOP_ADVISE(vp, start, end, POSIX_FADV_DONTNEED); 953 } 954 955 unlock: 956 return (error); 957 } 958 959 /* 960 * The vn_io_fault() is a wrapper around vn_read() and vn_write() to 961 * prevent the following deadlock: 962 * 963 * Assume that the thread A reads from the vnode vp1 into userspace 964 * buffer buf1 backed by the pages of vnode vp2. If a page in buf1 is 965 * currently not resident, then system ends up with the call chain 966 * vn_read() -> VOP_READ(vp1) -> uiomove() -> [Page Fault] -> 967 * vm_fault(buf1) -> vnode_pager_getpages(vp2) -> VOP_GETPAGES(vp2) 968 * which establishes lock order vp1->vn_lock, then vp2->vn_lock. 969 * If, at the same time, thread B reads from vnode vp2 into buffer buf2 970 * backed by the pages of vnode vp1, and some page in buf2 is not 971 * resident, we get a reversed order vp2->vn_lock, then vp1->vn_lock. 972 * 973 * To prevent the lock order reversal and deadlock, vn_io_fault() does 974 * not allow page faults to happen during VOP_READ() or VOP_WRITE(). 975 * Instead, it first tries to do the whole range i/o with pagefaults 976 * disabled. If all pages in the i/o buffer are resident and mapped, 977 * VOP will succeed (ignoring the genuine filesystem errors). 978 * Otherwise, we get back EFAULT, and vn_io_fault() falls back to do 979 * i/o in chunks, with all pages in the chunk prefaulted and held 980 * using vm_fault_quick_hold_pages(). 981 * 982 * Filesystems using this deadlock avoidance scheme should use the 983 * array of the held pages from uio, saved in the curthread->td_ma, 984 * instead of doing uiomove(). A helper function 985 * vn_io_fault_uiomove() converts uiomove request into 986 * uiomove_fromphys() over td_ma array. 987 * 988 * Since vnode locks do not cover the whole i/o anymore, rangelocks 989 * make the current i/o request atomic with respect to other i/os and 990 * truncations. 991 */ 992 993 /* 994 * Decode vn_io_fault_args and perform the corresponding i/o. 995 */ 996 static int 997 vn_io_fault_doio(struct vn_io_fault_args *args, struct uio *uio, 998 struct thread *td) 999 { 1000 1001 switch (args->kind) { 1002 case VN_IO_FAULT_FOP: 1003 return ((args->args.fop_args.doio)(args->args.fop_args.fp, 1004 uio, args->cred, args->flags, td)); 1005 case VN_IO_FAULT_VOP: 1006 if (uio->uio_rw == UIO_READ) { 1007 return (VOP_READ(args->args.vop_args.vp, uio, 1008 args->flags, args->cred)); 1009 } else if (uio->uio_rw == UIO_WRITE) { 1010 return (VOP_WRITE(args->args.vop_args.vp, uio, 1011 args->flags, args->cred)); 1012 } 1013 break; 1014 } 1015 panic("vn_io_fault_doio: unknown kind of io %d %d", args->kind, 1016 uio->uio_rw); 1017 } 1018 1019 /* 1020 * Common code for vn_io_fault(), agnostic to the kind of i/o request. 1021 * Uses vn_io_fault_doio() to make the call to an actual i/o function. 1022 * Used from vn_rdwr() and vn_io_fault(), which encode the i/o request 1023 * into args and call vn_io_fault1() to handle faults during the user 1024 * mode buffer accesses. 1025 */ 1026 static int 1027 vn_io_fault1(struct vnode *vp, struct uio *uio, struct vn_io_fault_args *args, 1028 struct thread *td) 1029 { 1030 vm_page_t ma[io_hold_cnt + 2]; 1031 struct uio *uio_clone, short_uio; 1032 struct iovec short_iovec[1]; 1033 vm_page_t *prev_td_ma; 1034 vm_prot_t prot; 1035 vm_offset_t addr, end; 1036 size_t len, resid; 1037 ssize_t adv; 1038 int error, cnt, save, saveheld, prev_td_ma_cnt; 1039 1040 prot = uio->uio_rw == UIO_READ ? VM_PROT_WRITE : VM_PROT_READ; 1041 1042 /* 1043 * The UFS follows IO_UNIT directive and replays back both 1044 * uio_offset and uio_resid if an error is encountered during the 1045 * operation. But, since the iovec may be already advanced, 1046 * uio is still in an inconsistent state. 1047 * 1048 * Cache a copy of the original uio, which is advanced to the redo 1049 * point using UIO_NOCOPY below. 1050 */ 1051 uio_clone = cloneuio(uio); 1052 resid = uio->uio_resid; 1053 1054 short_uio.uio_segflg = UIO_USERSPACE; 1055 short_uio.uio_rw = uio->uio_rw; 1056 short_uio.uio_td = uio->uio_td; 1057 1058 save = vm_fault_disable_pagefaults(); 1059 error = vn_io_fault_doio(args, uio, td); 1060 if (error != EFAULT) 1061 goto out; 1062 1063 atomic_add_long(&vn_io_faults_cnt, 1); 1064 uio_clone->uio_segflg = UIO_NOCOPY; 1065 uiomove(NULL, resid - uio->uio_resid, uio_clone); 1066 uio_clone->uio_segflg = uio->uio_segflg; 1067 1068 saveheld = curthread_pflags_set(TDP_UIOHELD); 1069 prev_td_ma = td->td_ma; 1070 prev_td_ma_cnt = td->td_ma_cnt; 1071 1072 while (uio_clone->uio_resid != 0) { 1073 len = uio_clone->uio_iov->iov_len; 1074 if (len == 0) { 1075 KASSERT(uio_clone->uio_iovcnt >= 1, 1076 ("iovcnt underflow")); 1077 uio_clone->uio_iov++; 1078 uio_clone->uio_iovcnt--; 1079 continue; 1080 } 1081 if (len > io_hold_cnt * PAGE_SIZE) 1082 len = io_hold_cnt * PAGE_SIZE; 1083 addr = (uintptr_t)uio_clone->uio_iov->iov_base; 1084 end = round_page(addr + len); 1085 if (end < addr) { 1086 error = EFAULT; 1087 break; 1088 } 1089 cnt = atop(end - trunc_page(addr)); 1090 /* 1091 * A perfectly misaligned address and length could cause 1092 * both the start and the end of the chunk to use partial 1093 * page. +2 accounts for such a situation. 1094 */ 1095 cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map, 1096 addr, len, prot, ma, io_hold_cnt + 2); 1097 if (cnt == -1) { 1098 error = EFAULT; 1099 break; 1100 } 1101 short_uio.uio_iov = &short_iovec[0]; 1102 short_iovec[0].iov_base = (void *)addr; 1103 short_uio.uio_iovcnt = 1; 1104 short_uio.uio_resid = short_iovec[0].iov_len = len; 1105 short_uio.uio_offset = uio_clone->uio_offset; 1106 td->td_ma = ma; 1107 td->td_ma_cnt = cnt; 1108 1109 error = vn_io_fault_doio(args, &short_uio, td); 1110 vm_page_unhold_pages(ma, cnt); 1111 adv = len - short_uio.uio_resid; 1112 1113 uio_clone->uio_iov->iov_base = 1114 (char *)uio_clone->uio_iov->iov_base + adv; 1115 uio_clone->uio_iov->iov_len -= adv; 1116 uio_clone->uio_resid -= adv; 1117 uio_clone->uio_offset += adv; 1118 1119 uio->uio_resid -= adv; 1120 uio->uio_offset += adv; 1121 1122 if (error != 0 || adv == 0) 1123 break; 1124 } 1125 td->td_ma = prev_td_ma; 1126 td->td_ma_cnt = prev_td_ma_cnt; 1127 curthread_pflags_restore(saveheld); 1128 out: 1129 vm_fault_enable_pagefaults(save); 1130 free(uio_clone, M_IOV); 1131 return (error); 1132 } 1133 1134 static int 1135 vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred, 1136 int flags, struct thread *td) 1137 { 1138 fo_rdwr_t *doio; 1139 struct vnode *vp; 1140 void *rl_cookie; 1141 struct vn_io_fault_args args; 1142 int error; 1143 1144 doio = uio->uio_rw == UIO_READ ? vn_read : vn_write; 1145 vp = fp->f_vnode; 1146 foffset_lock_uio(fp, uio, flags); 1147 if (do_vn_io_fault(vp, uio)) { 1148 args.kind = VN_IO_FAULT_FOP; 1149 args.args.fop_args.fp = fp; 1150 args.args.fop_args.doio = doio; 1151 args.cred = active_cred; 1152 args.flags = flags | FOF_OFFSET; 1153 if (uio->uio_rw == UIO_READ) { 1154 rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset, 1155 uio->uio_offset + uio->uio_resid); 1156 } else if ((fp->f_flag & O_APPEND) != 0 || 1157 (flags & FOF_OFFSET) == 0) { 1158 /* For appenders, punt and lock the whole range. */ 1159 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1160 } else { 1161 rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset, 1162 uio->uio_offset + uio->uio_resid); 1163 } 1164 error = vn_io_fault1(vp, uio, &args, td); 1165 vn_rangelock_unlock(vp, rl_cookie); 1166 } else { 1167 error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td); 1168 } 1169 foffset_unlock_uio(fp, uio, flags); 1170 return (error); 1171 } 1172 1173 /* 1174 * Helper function to perform the requested uiomove operation using 1175 * the held pages for io->uio_iov[0].iov_base buffer instead of 1176 * copyin/copyout. Access to the pages with uiomove_fromphys() 1177 * instead of iov_base prevents page faults that could occur due to 1178 * pmap_collect() invalidating the mapping created by 1179 * vm_fault_quick_hold_pages(), or pageout daemon, page laundry or 1180 * object cleanup revoking the write access from page mappings. 1181 * 1182 * Filesystems specified MNTK_NO_IOPF shall use vn_io_fault_uiomove() 1183 * instead of plain uiomove(). 1184 */ 1185 int 1186 vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio) 1187 { 1188 struct uio transp_uio; 1189 struct iovec transp_iov[1]; 1190 struct thread *td; 1191 size_t adv; 1192 int error, pgadv; 1193 1194 td = curthread; 1195 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1196 uio->uio_segflg != UIO_USERSPACE) 1197 return (uiomove(data, xfersize, uio)); 1198 1199 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1200 transp_iov[0].iov_base = data; 1201 transp_uio.uio_iov = &transp_iov[0]; 1202 transp_uio.uio_iovcnt = 1; 1203 if (xfersize > uio->uio_resid) 1204 xfersize = uio->uio_resid; 1205 transp_uio.uio_resid = transp_iov[0].iov_len = xfersize; 1206 transp_uio.uio_offset = 0; 1207 transp_uio.uio_segflg = UIO_SYSSPACE; 1208 /* 1209 * Since transp_iov points to data, and td_ma page array 1210 * corresponds to original uio->uio_iov, we need to invert the 1211 * direction of the i/o operation as passed to 1212 * uiomove_fromphys(). 1213 */ 1214 switch (uio->uio_rw) { 1215 case UIO_WRITE: 1216 transp_uio.uio_rw = UIO_READ; 1217 break; 1218 case UIO_READ: 1219 transp_uio.uio_rw = UIO_WRITE; 1220 break; 1221 } 1222 transp_uio.uio_td = uio->uio_td; 1223 error = uiomove_fromphys(td->td_ma, 1224 ((vm_offset_t)uio->uio_iov->iov_base) & PAGE_MASK, 1225 xfersize, &transp_uio); 1226 adv = xfersize - transp_uio.uio_resid; 1227 pgadv = 1228 (((vm_offset_t)uio->uio_iov->iov_base + adv) >> PAGE_SHIFT) - 1229 (((vm_offset_t)uio->uio_iov->iov_base) >> PAGE_SHIFT); 1230 td->td_ma += pgadv; 1231 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1232 pgadv)); 1233 td->td_ma_cnt -= pgadv; 1234 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + adv; 1235 uio->uio_iov->iov_len -= adv; 1236 uio->uio_resid -= adv; 1237 uio->uio_offset += adv; 1238 return (error); 1239 } 1240 1241 int 1242 vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize, 1243 struct uio *uio) 1244 { 1245 struct thread *td; 1246 vm_offset_t iov_base; 1247 int cnt, pgadv; 1248 1249 td = curthread; 1250 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1251 uio->uio_segflg != UIO_USERSPACE) 1252 return (uiomove_fromphys(ma, offset, xfersize, uio)); 1253 1254 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1255 cnt = xfersize > uio->uio_resid ? uio->uio_resid : xfersize; 1256 iov_base = (vm_offset_t)uio->uio_iov->iov_base; 1257 switch (uio->uio_rw) { 1258 case UIO_WRITE: 1259 pmap_copy_pages(td->td_ma, iov_base & PAGE_MASK, ma, 1260 offset, cnt); 1261 break; 1262 case UIO_READ: 1263 pmap_copy_pages(ma, offset, td->td_ma, iov_base & PAGE_MASK, 1264 cnt); 1265 break; 1266 } 1267 pgadv = ((iov_base + cnt) >> PAGE_SHIFT) - (iov_base >> PAGE_SHIFT); 1268 td->td_ma += pgadv; 1269 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1270 pgadv)); 1271 td->td_ma_cnt -= pgadv; 1272 uio->uio_iov->iov_base = (char *)(iov_base + cnt); 1273 uio->uio_iov->iov_len -= cnt; 1274 uio->uio_resid -= cnt; 1275 uio->uio_offset += cnt; 1276 return (0); 1277 } 1278 1279 1280 /* 1281 * File table truncate routine. 1282 */ 1283 static int 1284 vn_truncate(struct file *fp, off_t length, struct ucred *active_cred, 1285 struct thread *td) 1286 { 1287 struct vattr vattr; 1288 struct mount *mp; 1289 struct vnode *vp; 1290 void *rl_cookie; 1291 int error; 1292 1293 vp = fp->f_vnode; 1294 1295 /* 1296 * Lock the whole range for truncation. Otherwise split i/o 1297 * might happen partly before and partly after the truncation. 1298 */ 1299 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1300 error = vn_start_write(vp, &mp, V_WAIT | PCATCH); 1301 if (error) 1302 goto out1; 1303 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1304 if (vp->v_type == VDIR) { 1305 error = EISDIR; 1306 goto out; 1307 } 1308 #ifdef MAC 1309 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 1310 if (error) 1311 goto out; 1312 #endif 1313 error = vn_writechk(vp); 1314 if (error == 0) { 1315 VATTR_NULL(&vattr); 1316 vattr.va_size = length; 1317 error = VOP_SETATTR(vp, &vattr, fp->f_cred); 1318 } 1319 out: 1320 VOP_UNLOCK(vp, 0); 1321 vn_finished_write(mp); 1322 out1: 1323 vn_rangelock_unlock(vp, rl_cookie); 1324 return (error); 1325 } 1326 1327 /* 1328 * File table vnode stat routine. 1329 */ 1330 static int 1331 vn_statfile(fp, sb, active_cred, td) 1332 struct file *fp; 1333 struct stat *sb; 1334 struct ucred *active_cred; 1335 struct thread *td; 1336 { 1337 struct vnode *vp = fp->f_vnode; 1338 int error; 1339 1340 vn_lock(vp, LK_SHARED | LK_RETRY); 1341 error = vn_stat(vp, sb, active_cred, fp->f_cred, td); 1342 VOP_UNLOCK(vp, 0); 1343 1344 return (error); 1345 } 1346 1347 /* 1348 * Stat a vnode; implementation for the stat syscall 1349 */ 1350 int 1351 vn_stat(vp, sb, active_cred, file_cred, td) 1352 struct vnode *vp; 1353 register struct stat *sb; 1354 struct ucred *active_cred; 1355 struct ucred *file_cred; 1356 struct thread *td; 1357 { 1358 struct vattr vattr; 1359 register struct vattr *vap; 1360 int error; 1361 u_short mode; 1362 1363 #ifdef MAC 1364 error = mac_vnode_check_stat(active_cred, file_cred, vp); 1365 if (error) 1366 return (error); 1367 #endif 1368 1369 vap = &vattr; 1370 1371 /* 1372 * Initialize defaults for new and unusual fields, so that file 1373 * systems which don't support these fields don't need to know 1374 * about them. 1375 */ 1376 vap->va_birthtime.tv_sec = -1; 1377 vap->va_birthtime.tv_nsec = 0; 1378 vap->va_fsid = VNOVAL; 1379 vap->va_rdev = NODEV; 1380 1381 error = VOP_GETATTR(vp, vap, active_cred); 1382 if (error) 1383 return (error); 1384 1385 /* 1386 * Zero the spare stat fields 1387 */ 1388 bzero(sb, sizeof *sb); 1389 1390 /* 1391 * Copy from vattr table 1392 */ 1393 if (vap->va_fsid != VNOVAL) 1394 sb->st_dev = vap->va_fsid; 1395 else 1396 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 1397 sb->st_ino = vap->va_fileid; 1398 mode = vap->va_mode; 1399 switch (vap->va_type) { 1400 case VREG: 1401 mode |= S_IFREG; 1402 break; 1403 case VDIR: 1404 mode |= S_IFDIR; 1405 break; 1406 case VBLK: 1407 mode |= S_IFBLK; 1408 break; 1409 case VCHR: 1410 mode |= S_IFCHR; 1411 break; 1412 case VLNK: 1413 mode |= S_IFLNK; 1414 break; 1415 case VSOCK: 1416 mode |= S_IFSOCK; 1417 break; 1418 case VFIFO: 1419 mode |= S_IFIFO; 1420 break; 1421 default: 1422 return (EBADF); 1423 }; 1424 sb->st_mode = mode; 1425 sb->st_nlink = vap->va_nlink; 1426 sb->st_uid = vap->va_uid; 1427 sb->st_gid = vap->va_gid; 1428 sb->st_rdev = vap->va_rdev; 1429 if (vap->va_size > OFF_MAX) 1430 return (EOVERFLOW); 1431 sb->st_size = vap->va_size; 1432 sb->st_atim = vap->va_atime; 1433 sb->st_mtim = vap->va_mtime; 1434 sb->st_ctim = vap->va_ctime; 1435 sb->st_birthtim = vap->va_birthtime; 1436 1437 /* 1438 * According to www.opengroup.org, the meaning of st_blksize is 1439 * "a filesystem-specific preferred I/O block size for this 1440 * object. In some filesystem types, this may vary from file 1441 * to file" 1442 * Use miminum/default of PAGE_SIZE (e.g. for VCHR). 1443 */ 1444 1445 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize); 1446 1447 sb->st_flags = vap->va_flags; 1448 if (priv_check(td, PRIV_VFS_GENERATION)) 1449 sb->st_gen = 0; 1450 else 1451 sb->st_gen = vap->va_gen; 1452 1453 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 1454 return (0); 1455 } 1456 1457 /* 1458 * File table vnode ioctl routine. 1459 */ 1460 static int 1461 vn_ioctl(fp, com, data, active_cred, td) 1462 struct file *fp; 1463 u_long com; 1464 void *data; 1465 struct ucred *active_cred; 1466 struct thread *td; 1467 { 1468 struct vattr vattr; 1469 struct vnode *vp; 1470 int error; 1471 1472 vp = fp->f_vnode; 1473 switch (vp->v_type) { 1474 case VDIR: 1475 case VREG: 1476 switch (com) { 1477 case FIONREAD: 1478 vn_lock(vp, LK_SHARED | LK_RETRY); 1479 error = VOP_GETATTR(vp, &vattr, active_cred); 1480 VOP_UNLOCK(vp, 0); 1481 if (error == 0) 1482 *(int *)data = vattr.va_size - fp->f_offset; 1483 return (error); 1484 case FIONBIO: 1485 case FIOASYNC: 1486 return (0); 1487 default: 1488 return (VOP_IOCTL(vp, com, data, fp->f_flag, 1489 active_cred, td)); 1490 } 1491 default: 1492 return (ENOTTY); 1493 } 1494 } 1495 1496 /* 1497 * File table vnode poll routine. 1498 */ 1499 static int 1500 vn_poll(fp, events, active_cred, td) 1501 struct file *fp; 1502 int events; 1503 struct ucred *active_cred; 1504 struct thread *td; 1505 { 1506 struct vnode *vp; 1507 int error; 1508 1509 vp = fp->f_vnode; 1510 #ifdef MAC 1511 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1512 error = mac_vnode_check_poll(active_cred, fp->f_cred, vp); 1513 VOP_UNLOCK(vp, 0); 1514 if (!error) 1515 #endif 1516 1517 error = VOP_POLL(vp, events, fp->f_cred, td); 1518 return (error); 1519 } 1520 1521 /* 1522 * Acquire the requested lock and then check for validity. LK_RETRY 1523 * permits vn_lock to return doomed vnodes. 1524 */ 1525 int 1526 _vn_lock(struct vnode *vp, int flags, char *file, int line) 1527 { 1528 int error; 1529 1530 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 1531 ("vn_lock called with no locktype.")); 1532 do { 1533 #ifdef DEBUG_VFS_LOCKS 1534 KASSERT(vp->v_holdcnt != 0, 1535 ("vn_lock %p: zero hold count", vp)); 1536 #endif 1537 error = VOP_LOCK1(vp, flags, file, line); 1538 flags &= ~LK_INTERLOCK; /* Interlock is always dropped. */ 1539 KASSERT((flags & LK_RETRY) == 0 || error == 0, 1540 ("LK_RETRY set with incompatible flags (0x%x) or an error occured (%d)", 1541 flags, error)); 1542 /* 1543 * Callers specify LK_RETRY if they wish to get dead vnodes. 1544 * If RETRY is not set, we return ENOENT instead. 1545 */ 1546 if (error == 0 && vp->v_iflag & VI_DOOMED && 1547 (flags & LK_RETRY) == 0) { 1548 VOP_UNLOCK(vp, 0); 1549 error = ENOENT; 1550 break; 1551 } 1552 } while (flags & LK_RETRY && error != 0); 1553 return (error); 1554 } 1555 1556 /* 1557 * File table vnode close routine. 1558 */ 1559 static int 1560 vn_closefile(fp, td) 1561 struct file *fp; 1562 struct thread *td; 1563 { 1564 struct vnode *vp; 1565 struct flock lf; 1566 int error; 1567 1568 vp = fp->f_vnode; 1569 fp->f_ops = &badfileops; 1570 1571 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) 1572 vref(vp); 1573 1574 error = vn_close(vp, fp->f_flag, fp->f_cred, td); 1575 1576 if (fp->f_type == DTYPE_VNODE && fp->f_flag & FHASLOCK) { 1577 lf.l_whence = SEEK_SET; 1578 lf.l_start = 0; 1579 lf.l_len = 0; 1580 lf.l_type = F_UNLCK; 1581 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK); 1582 vrele(vp); 1583 } 1584 return (error); 1585 } 1586 1587 static bool 1588 vn_suspendable_mp(struct mount *mp) 1589 { 1590 1591 return ((mp->mnt_kern_flag & MNTK_SUSPENDABLE) != 0); 1592 } 1593 1594 static bool 1595 vn_suspendable(struct vnode *vp, struct mount **mpp) 1596 { 1597 1598 if (vp != NULL) 1599 *mpp = vp->v_mount; 1600 if (*mpp == NULL) 1601 return (false); 1602 1603 return (vn_suspendable_mp(*mpp)); 1604 } 1605 1606 /* 1607 * Preparing to start a filesystem write operation. If the operation is 1608 * permitted, then we bump the count of operations in progress and 1609 * proceed. If a suspend request is in progress, we wait until the 1610 * suspension is over, and then proceed. 1611 */ 1612 static int 1613 vn_start_write_locked(struct mount *mp, int flags) 1614 { 1615 int error, mflags; 1616 1617 mtx_assert(MNT_MTX(mp), MA_OWNED); 1618 error = 0; 1619 1620 /* 1621 * Check on status of suspension. 1622 */ 1623 if ((curthread->td_pflags & TDP_IGNSUSP) == 0 || 1624 mp->mnt_susp_owner != curthread) { 1625 mflags = ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? 1626 (flags & PCATCH) : 0) | (PUSER - 1); 1627 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1628 if (flags & V_NOWAIT) { 1629 error = EWOULDBLOCK; 1630 goto unlock; 1631 } 1632 error = msleep(&mp->mnt_flag, MNT_MTX(mp), mflags, 1633 "suspfs", 0); 1634 if (error) 1635 goto unlock; 1636 } 1637 } 1638 if (flags & V_XSLEEP) 1639 goto unlock; 1640 mp->mnt_writeopcount++; 1641 unlock: 1642 if (error != 0 || (flags & V_XSLEEP) != 0) 1643 MNT_REL(mp); 1644 MNT_IUNLOCK(mp); 1645 return (error); 1646 } 1647 1648 int 1649 vn_start_write(vp, mpp, flags) 1650 struct vnode *vp; 1651 struct mount **mpp; 1652 int flags; 1653 { 1654 struct mount *mp; 1655 int error; 1656 1657 if (!vn_suspendable(vp, mpp)) 1658 return (0); 1659 1660 error = 0; 1661 /* 1662 * If a vnode is provided, get and return the mount point that 1663 * to which it will write. 1664 */ 1665 if (vp != NULL) { 1666 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1667 *mpp = NULL; 1668 if (error != EOPNOTSUPP) 1669 return (error); 1670 return (0); 1671 } 1672 } 1673 if ((mp = *mpp) == NULL) 1674 return (0); 1675 1676 /* 1677 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1678 * a vfs_ref(). 1679 * As long as a vnode is not provided we need to acquire a 1680 * refcount for the provided mountpoint too, in order to 1681 * emulate a vfs_ref(). 1682 */ 1683 MNT_ILOCK(mp); 1684 if (vp == NULL) 1685 MNT_REF(mp); 1686 1687 return (vn_start_write_locked(mp, flags)); 1688 } 1689 1690 /* 1691 * Secondary suspension. Used by operations such as vop_inactive 1692 * routines that are needed by the higher level functions. These 1693 * are allowed to proceed until all the higher level functions have 1694 * completed (indicated by mnt_writeopcount dropping to zero). At that 1695 * time, these operations are halted until the suspension is over. 1696 */ 1697 int 1698 vn_start_secondary_write(vp, mpp, flags) 1699 struct vnode *vp; 1700 struct mount **mpp; 1701 int flags; 1702 { 1703 struct mount *mp; 1704 int error; 1705 1706 if (!vn_suspendable(vp, mpp)) 1707 return (0); 1708 1709 retry: 1710 if (vp != NULL) { 1711 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1712 *mpp = NULL; 1713 if (error != EOPNOTSUPP) 1714 return (error); 1715 return (0); 1716 } 1717 } 1718 /* 1719 * If we are not suspended or have not yet reached suspended 1720 * mode, then let the operation proceed. 1721 */ 1722 if ((mp = *mpp) == NULL) 1723 return (0); 1724 1725 /* 1726 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1727 * a vfs_ref(). 1728 * As long as a vnode is not provided we need to acquire a 1729 * refcount for the provided mountpoint too, in order to 1730 * emulate a vfs_ref(). 1731 */ 1732 MNT_ILOCK(mp); 1733 if (vp == NULL) 1734 MNT_REF(mp); 1735 if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) { 1736 mp->mnt_secondary_writes++; 1737 mp->mnt_secondary_accwrites++; 1738 MNT_IUNLOCK(mp); 1739 return (0); 1740 } 1741 if (flags & V_NOWAIT) { 1742 MNT_REL(mp); 1743 MNT_IUNLOCK(mp); 1744 return (EWOULDBLOCK); 1745 } 1746 /* 1747 * Wait for the suspension to finish. 1748 */ 1749 error = msleep(&mp->mnt_flag, MNT_MTX(mp), (PUSER - 1) | PDROP | 1750 ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? (flags & PCATCH) : 0), 1751 "suspfs", 0); 1752 vfs_rel(mp); 1753 if (error == 0) 1754 goto retry; 1755 return (error); 1756 } 1757 1758 /* 1759 * Filesystem write operation has completed. If we are suspending and this 1760 * operation is the last one, notify the suspender that the suspension is 1761 * now in effect. 1762 */ 1763 void 1764 vn_finished_write(mp) 1765 struct mount *mp; 1766 { 1767 if (mp == NULL || !vn_suspendable_mp(mp)) 1768 return; 1769 MNT_ILOCK(mp); 1770 MNT_REL(mp); 1771 mp->mnt_writeopcount--; 1772 if (mp->mnt_writeopcount < 0) 1773 panic("vn_finished_write: neg cnt"); 1774 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1775 mp->mnt_writeopcount <= 0) 1776 wakeup(&mp->mnt_writeopcount); 1777 MNT_IUNLOCK(mp); 1778 } 1779 1780 1781 /* 1782 * Filesystem secondary write operation has completed. If we are 1783 * suspending and this operation is the last one, notify the suspender 1784 * that the suspension is now in effect. 1785 */ 1786 void 1787 vn_finished_secondary_write(mp) 1788 struct mount *mp; 1789 { 1790 if (mp == NULL || !vn_suspendable_mp(mp)) 1791 return; 1792 MNT_ILOCK(mp); 1793 MNT_REL(mp); 1794 mp->mnt_secondary_writes--; 1795 if (mp->mnt_secondary_writes < 0) 1796 panic("vn_finished_secondary_write: neg cnt"); 1797 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1798 mp->mnt_secondary_writes <= 0) 1799 wakeup(&mp->mnt_secondary_writes); 1800 MNT_IUNLOCK(mp); 1801 } 1802 1803 1804 1805 /* 1806 * Request a filesystem to suspend write operations. 1807 */ 1808 int 1809 vfs_write_suspend(struct mount *mp, int flags) 1810 { 1811 int error; 1812 1813 MPASS(vn_suspendable_mp(mp)); 1814 1815 MNT_ILOCK(mp); 1816 if (mp->mnt_susp_owner == curthread) { 1817 MNT_IUNLOCK(mp); 1818 return (EALREADY); 1819 } 1820 while (mp->mnt_kern_flag & MNTK_SUSPEND) 1821 msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0); 1822 1823 /* 1824 * Unmount holds a write reference on the mount point. If we 1825 * own busy reference and drain for writers, we deadlock with 1826 * the reference draining in the unmount path. Callers of 1827 * vfs_write_suspend() must specify VS_SKIP_UNMOUNT if 1828 * vfs_busy() reference is owned and caller is not in the 1829 * unmount context. 1830 */ 1831 if ((flags & VS_SKIP_UNMOUNT) != 0 && 1832 (mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { 1833 MNT_IUNLOCK(mp); 1834 return (EBUSY); 1835 } 1836 1837 mp->mnt_kern_flag |= MNTK_SUSPEND; 1838 mp->mnt_susp_owner = curthread; 1839 if (mp->mnt_writeopcount > 0) 1840 (void) msleep(&mp->mnt_writeopcount, 1841 MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0); 1842 else 1843 MNT_IUNLOCK(mp); 1844 if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0) 1845 vfs_write_resume(mp, 0); 1846 return (error); 1847 } 1848 1849 /* 1850 * Request a filesystem to resume write operations. 1851 */ 1852 void 1853 vfs_write_resume(struct mount *mp, int flags) 1854 { 1855 1856 MPASS(vn_suspendable_mp(mp)); 1857 1858 MNT_ILOCK(mp); 1859 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1860 KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner")); 1861 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 | 1862 MNTK_SUSPENDED); 1863 mp->mnt_susp_owner = NULL; 1864 wakeup(&mp->mnt_writeopcount); 1865 wakeup(&mp->mnt_flag); 1866 curthread->td_pflags &= ~TDP_IGNSUSP; 1867 if ((flags & VR_START_WRITE) != 0) { 1868 MNT_REF(mp); 1869 mp->mnt_writeopcount++; 1870 } 1871 MNT_IUNLOCK(mp); 1872 if ((flags & VR_NO_SUSPCLR) == 0) 1873 VFS_SUSP_CLEAN(mp); 1874 } else if ((flags & VR_START_WRITE) != 0) { 1875 MNT_REF(mp); 1876 vn_start_write_locked(mp, 0); 1877 } else { 1878 MNT_IUNLOCK(mp); 1879 } 1880 } 1881 1882 /* 1883 * Helper loop around vfs_write_suspend() for filesystem unmount VFS 1884 * methods. 1885 */ 1886 int 1887 vfs_write_suspend_umnt(struct mount *mp) 1888 { 1889 int error; 1890 1891 MPASS(vn_suspendable_mp(mp)); 1892 KASSERT((curthread->td_pflags & TDP_IGNSUSP) == 0, 1893 ("vfs_write_suspend_umnt: recursed")); 1894 1895 /* dounmount() already called vn_start_write(). */ 1896 for (;;) { 1897 vn_finished_write(mp); 1898 error = vfs_write_suspend(mp, 0); 1899 if (error != 0) { 1900 vn_start_write(NULL, &mp, V_WAIT); 1901 return (error); 1902 } 1903 MNT_ILOCK(mp); 1904 if ((mp->mnt_kern_flag & MNTK_SUSPENDED) != 0) 1905 break; 1906 MNT_IUNLOCK(mp); 1907 vn_start_write(NULL, &mp, V_WAIT); 1908 } 1909 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 1910 wakeup(&mp->mnt_flag); 1911 MNT_IUNLOCK(mp); 1912 curthread->td_pflags |= TDP_IGNSUSP; 1913 return (0); 1914 } 1915 1916 /* 1917 * Implement kqueues for files by translating it to vnode operation. 1918 */ 1919 static int 1920 vn_kqfilter(struct file *fp, struct knote *kn) 1921 { 1922 1923 return (VOP_KQFILTER(fp->f_vnode, kn)); 1924 } 1925 1926 /* 1927 * Simplified in-kernel wrapper calls for extended attribute access. 1928 * Both calls pass in a NULL credential, authorizing as "kernel" access. 1929 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 1930 */ 1931 int 1932 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 1933 const char *attrname, int *buflen, char *buf, struct thread *td) 1934 { 1935 struct uio auio; 1936 struct iovec iov; 1937 int error; 1938 1939 iov.iov_len = *buflen; 1940 iov.iov_base = buf; 1941 1942 auio.uio_iov = &iov; 1943 auio.uio_iovcnt = 1; 1944 auio.uio_rw = UIO_READ; 1945 auio.uio_segflg = UIO_SYSSPACE; 1946 auio.uio_td = td; 1947 auio.uio_offset = 0; 1948 auio.uio_resid = *buflen; 1949 1950 if ((ioflg & IO_NODELOCKED) == 0) 1951 vn_lock(vp, LK_SHARED | LK_RETRY); 1952 1953 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1954 1955 /* authorize attribute retrieval as kernel */ 1956 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 1957 td); 1958 1959 if ((ioflg & IO_NODELOCKED) == 0) 1960 VOP_UNLOCK(vp, 0); 1961 1962 if (error == 0) { 1963 *buflen = *buflen - auio.uio_resid; 1964 } 1965 1966 return (error); 1967 } 1968 1969 /* 1970 * XXX failure mode if partially written? 1971 */ 1972 int 1973 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 1974 const char *attrname, int buflen, char *buf, struct thread *td) 1975 { 1976 struct uio auio; 1977 struct iovec iov; 1978 struct mount *mp; 1979 int error; 1980 1981 iov.iov_len = buflen; 1982 iov.iov_base = buf; 1983 1984 auio.uio_iov = &iov; 1985 auio.uio_iovcnt = 1; 1986 auio.uio_rw = UIO_WRITE; 1987 auio.uio_segflg = UIO_SYSSPACE; 1988 auio.uio_td = td; 1989 auio.uio_offset = 0; 1990 auio.uio_resid = buflen; 1991 1992 if ((ioflg & IO_NODELOCKED) == 0) { 1993 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1994 return (error); 1995 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1996 } 1997 1998 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1999 2000 /* authorize attribute setting as kernel */ 2001 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 2002 2003 if ((ioflg & IO_NODELOCKED) == 0) { 2004 vn_finished_write(mp); 2005 VOP_UNLOCK(vp, 0); 2006 } 2007 2008 return (error); 2009 } 2010 2011 int 2012 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 2013 const char *attrname, struct thread *td) 2014 { 2015 struct mount *mp; 2016 int error; 2017 2018 if ((ioflg & IO_NODELOCKED) == 0) { 2019 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 2020 return (error); 2021 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2022 } 2023 2024 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 2025 2026 /* authorize attribute removal as kernel */ 2027 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td); 2028 if (error == EOPNOTSUPP) 2029 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, 2030 NULL, td); 2031 2032 if ((ioflg & IO_NODELOCKED) == 0) { 2033 vn_finished_write(mp); 2034 VOP_UNLOCK(vp, 0); 2035 } 2036 2037 return (error); 2038 } 2039 2040 static int 2041 vn_get_ino_alloc_vget(struct mount *mp, void *arg, int lkflags, 2042 struct vnode **rvp) 2043 { 2044 2045 return (VFS_VGET(mp, *(ino_t *)arg, lkflags, rvp)); 2046 } 2047 2048 int 2049 vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp) 2050 { 2051 2052 return (vn_vget_ino_gen(vp, vn_get_ino_alloc_vget, &ino, 2053 lkflags, rvp)); 2054 } 2055 2056 int 2057 vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg, 2058 int lkflags, struct vnode **rvp) 2059 { 2060 struct mount *mp; 2061 int ltype, error; 2062 2063 ASSERT_VOP_LOCKED(vp, "vn_vget_ino_get"); 2064 mp = vp->v_mount; 2065 ltype = VOP_ISLOCKED(vp); 2066 KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED, 2067 ("vn_vget_ino: vp not locked")); 2068 error = vfs_busy(mp, MBF_NOWAIT); 2069 if (error != 0) { 2070 vfs_ref(mp); 2071 VOP_UNLOCK(vp, 0); 2072 error = vfs_busy(mp, 0); 2073 vn_lock(vp, ltype | LK_RETRY); 2074 vfs_rel(mp); 2075 if (error != 0) 2076 return (ENOENT); 2077 if (vp->v_iflag & VI_DOOMED) { 2078 vfs_unbusy(mp); 2079 return (ENOENT); 2080 } 2081 } 2082 VOP_UNLOCK(vp, 0); 2083 error = alloc(mp, alloc_arg, lkflags, rvp); 2084 vfs_unbusy(mp); 2085 if (*rvp != vp) 2086 vn_lock(vp, ltype | LK_RETRY); 2087 if (vp->v_iflag & VI_DOOMED) { 2088 if (error == 0) { 2089 if (*rvp == vp) 2090 vunref(vp); 2091 else 2092 vput(*rvp); 2093 } 2094 error = ENOENT; 2095 } 2096 return (error); 2097 } 2098 2099 int 2100 vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio, 2101 const struct thread *td) 2102 { 2103 2104 if (vp->v_type != VREG || td == NULL) 2105 return (0); 2106 PROC_LOCK(td->td_proc); 2107 if ((uoff_t)uio->uio_offset + uio->uio_resid > 2108 lim_cur(td->td_proc, RLIMIT_FSIZE)) { 2109 kern_psignal(td->td_proc, SIGXFSZ); 2110 PROC_UNLOCK(td->td_proc); 2111 return (EFBIG); 2112 } 2113 PROC_UNLOCK(td->td_proc); 2114 return (0); 2115 } 2116 2117 int 2118 vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 2119 struct thread *td) 2120 { 2121 struct vnode *vp; 2122 2123 vp = fp->f_vnode; 2124 #ifdef AUDIT 2125 vn_lock(vp, LK_SHARED | LK_RETRY); 2126 AUDIT_ARG_VNODE1(vp); 2127 VOP_UNLOCK(vp, 0); 2128 #endif 2129 return (setfmode(td, active_cred, vp, mode)); 2130 } 2131 2132 int 2133 vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 2134 struct thread *td) 2135 { 2136 struct vnode *vp; 2137 2138 vp = fp->f_vnode; 2139 #ifdef AUDIT 2140 vn_lock(vp, LK_SHARED | LK_RETRY); 2141 AUDIT_ARG_VNODE1(vp); 2142 VOP_UNLOCK(vp, 0); 2143 #endif 2144 return (setfown(td, active_cred, vp, uid, gid)); 2145 } 2146 2147 void 2148 vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end) 2149 { 2150 vm_object_t object; 2151 2152 if ((object = vp->v_object) == NULL) 2153 return; 2154 VM_OBJECT_WLOCK(object); 2155 vm_object_page_remove(object, start, end, 0); 2156 VM_OBJECT_WUNLOCK(object); 2157 } 2158 2159 int 2160 vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred) 2161 { 2162 struct vattr va; 2163 daddr_t bn, bnp; 2164 uint64_t bsize; 2165 off_t noff; 2166 int error; 2167 2168 KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA, 2169 ("Wrong command %lu", cmd)); 2170 2171 if (vn_lock(vp, LK_SHARED) != 0) 2172 return (EBADF); 2173 if (vp->v_type != VREG) { 2174 error = ENOTTY; 2175 goto unlock; 2176 } 2177 error = VOP_GETATTR(vp, &va, cred); 2178 if (error != 0) 2179 goto unlock; 2180 noff = *off; 2181 if (noff >= va.va_size) { 2182 error = ENXIO; 2183 goto unlock; 2184 } 2185 bsize = vp->v_mount->mnt_stat.f_iosize; 2186 for (bn = noff / bsize; noff < va.va_size; bn++, noff += bsize) { 2187 error = VOP_BMAP(vp, bn, NULL, &bnp, NULL, NULL); 2188 if (error == EOPNOTSUPP) { 2189 error = ENOTTY; 2190 goto unlock; 2191 } 2192 if ((bnp == -1 && cmd == FIOSEEKHOLE) || 2193 (bnp != -1 && cmd == FIOSEEKDATA)) { 2194 noff = bn * bsize; 2195 if (noff < *off) 2196 noff = *off; 2197 goto unlock; 2198 } 2199 } 2200 if (noff > va.va_size) 2201 noff = va.va_size; 2202 /* noff == va.va_size. There is an implicit hole at the end of file. */ 2203 if (cmd == FIOSEEKDATA) 2204 error = ENXIO; 2205 unlock: 2206 VOP_UNLOCK(vp, 0); 2207 if (error == 0) 2208 *off = noff; 2209 return (error); 2210 } 2211 2212 int 2213 vn_seek(struct file *fp, off_t offset, int whence, struct thread *td) 2214 { 2215 struct ucred *cred; 2216 struct vnode *vp; 2217 struct vattr vattr; 2218 off_t foffset, size; 2219 int error, noneg; 2220 2221 cred = td->td_ucred; 2222 vp = fp->f_vnode; 2223 foffset = foffset_lock(fp, 0); 2224 noneg = (vp->v_type != VCHR); 2225 error = 0; 2226 switch (whence) { 2227 case L_INCR: 2228 if (noneg && 2229 (foffset < 0 || 2230 (offset > 0 && foffset > OFF_MAX - offset))) { 2231 error = EOVERFLOW; 2232 break; 2233 } 2234 offset += foffset; 2235 break; 2236 case L_XTND: 2237 vn_lock(vp, LK_SHARED | LK_RETRY); 2238 error = VOP_GETATTR(vp, &vattr, cred); 2239 VOP_UNLOCK(vp, 0); 2240 if (error) 2241 break; 2242 2243 /* 2244 * If the file references a disk device, then fetch 2245 * the media size and use that to determine the ending 2246 * offset. 2247 */ 2248 if (vattr.va_size == 0 && vp->v_type == VCHR && 2249 fo_ioctl(fp, DIOCGMEDIASIZE, &size, cred, td) == 0) 2250 vattr.va_size = size; 2251 if (noneg && 2252 (vattr.va_size > OFF_MAX || 2253 (offset > 0 && vattr.va_size > OFF_MAX - offset))) { 2254 error = EOVERFLOW; 2255 break; 2256 } 2257 offset += vattr.va_size; 2258 break; 2259 case L_SET: 2260 break; 2261 case SEEK_DATA: 2262 error = fo_ioctl(fp, FIOSEEKDATA, &offset, cred, td); 2263 break; 2264 case SEEK_HOLE: 2265 error = fo_ioctl(fp, FIOSEEKHOLE, &offset, cred, td); 2266 break; 2267 default: 2268 error = EINVAL; 2269 } 2270 if (error == 0 && noneg && offset < 0) 2271 error = EINVAL; 2272 if (error != 0) 2273 goto drop; 2274 VFS_KNOTE_UNLOCKED(vp, 0); 2275 td->td_uretoff.tdu_off = offset; 2276 drop: 2277 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); 2278 return (error); 2279 } 2280 2281 int 2282 vn_utimes_perm(struct vnode *vp, struct vattr *vap, struct ucred *cred, 2283 struct thread *td) 2284 { 2285 int error; 2286 2287 /* 2288 * Grant permission if the caller is the owner of the file, or 2289 * the super-user, or has ACL_WRITE_ATTRIBUTES permission on 2290 * on the file. If the time pointer is null, then write 2291 * permission on the file is also sufficient. 2292 * 2293 * From NFSv4.1, draft 21, 6.2.1.3.1, Discussion of Mask Attributes: 2294 * A user having ACL_WRITE_DATA or ACL_WRITE_ATTRIBUTES 2295 * will be allowed to set the times [..] to the current 2296 * server time. 2297 */ 2298 error = VOP_ACCESSX(vp, VWRITE_ATTRIBUTES, cred, td); 2299 if (error != 0 && (vap->va_vaflags & VA_UTIMES_NULL) != 0) 2300 error = VOP_ACCESS(vp, VWRITE, cred, td); 2301 return (error); 2302 } 2303 2304 int 2305 vn_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 2306 { 2307 struct vnode *vp; 2308 int error; 2309 2310 if (fp->f_type == DTYPE_FIFO) 2311 kif->kf_type = KF_TYPE_FIFO; 2312 else 2313 kif->kf_type = KF_TYPE_VNODE; 2314 vp = fp->f_vnode; 2315 vref(vp); 2316 FILEDESC_SUNLOCK(fdp); 2317 error = vn_fill_kinfo_vnode(vp, kif); 2318 vrele(vp); 2319 FILEDESC_SLOCK(fdp); 2320 return (error); 2321 } 2322 2323 int 2324 vn_fill_kinfo_vnode(struct vnode *vp, struct kinfo_file *kif) 2325 { 2326 struct vattr va; 2327 char *fullpath, *freepath; 2328 int error; 2329 2330 kif->kf_vnode_type = vntype_to_kinfo(vp->v_type); 2331 freepath = NULL; 2332 fullpath = "-"; 2333 error = vn_fullpath(curthread, vp, &fullpath, &freepath); 2334 if (error == 0) { 2335 strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path)); 2336 } 2337 if (freepath != NULL) 2338 free(freepath, M_TEMP); 2339 2340 /* 2341 * Retrieve vnode attributes. 2342 */ 2343 va.va_fsid = VNOVAL; 2344 va.va_rdev = NODEV; 2345 vn_lock(vp, LK_SHARED | LK_RETRY); 2346 error = VOP_GETATTR(vp, &va, curthread->td_ucred); 2347 VOP_UNLOCK(vp, 0); 2348 if (error != 0) 2349 return (error); 2350 if (va.va_fsid != VNOVAL) 2351 kif->kf_un.kf_file.kf_file_fsid = va.va_fsid; 2352 else 2353 kif->kf_un.kf_file.kf_file_fsid = 2354 vp->v_mount->mnt_stat.f_fsid.val[0]; 2355 kif->kf_un.kf_file.kf_file_fileid = va.va_fileid; 2356 kif->kf_un.kf_file.kf_file_mode = MAKEIMODE(va.va_type, va.va_mode); 2357 kif->kf_un.kf_file.kf_file_size = va.va_size; 2358 kif->kf_un.kf_file.kf_file_rdev = va.va_rdev; 2359 return (0); 2360 } 2361