1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org> 13 * Copyright (c) 2013, 2014 The FreeBSD Foundation 14 * 15 * Portions of this software were developed by Konstantin Belousov 16 * under sponsorship from the FreeBSD Foundation. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions 20 * are met: 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 3. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_hwpmc_hooks.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/disk.h> 53 #include <sys/fail.h> 54 #include <sys/fcntl.h> 55 #include <sys/file.h> 56 #include <sys/kdb.h> 57 #include <sys/ktr.h> 58 #include <sys/stat.h> 59 #include <sys/priv.h> 60 #include <sys/proc.h> 61 #include <sys/limits.h> 62 #include <sys/lock.h> 63 #include <sys/mman.h> 64 #include <sys/mount.h> 65 #include <sys/mutex.h> 66 #include <sys/namei.h> 67 #include <sys/vnode.h> 68 #include <sys/bio.h> 69 #include <sys/buf.h> 70 #include <sys/filio.h> 71 #include <sys/resourcevar.h> 72 #include <sys/rwlock.h> 73 #include <sys/sx.h> 74 #include <sys/sysctl.h> 75 #include <sys/ttycom.h> 76 #include <sys/conf.h> 77 #include <sys/syslog.h> 78 #include <sys/unistd.h> 79 #include <sys/user.h> 80 81 #include <security/audit/audit.h> 82 #include <security/mac/mac_framework.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_extern.h> 86 #include <vm/pmap.h> 87 #include <vm/vm_map.h> 88 #include <vm/vm_object.h> 89 #include <vm/vm_page.h> 90 #include <vm/vnode_pager.h> 91 92 #ifdef HWPMC_HOOKS 93 #include <sys/pmckern.h> 94 #endif 95 96 static fo_rdwr_t vn_read; 97 static fo_rdwr_t vn_write; 98 static fo_rdwr_t vn_io_fault; 99 static fo_truncate_t vn_truncate; 100 static fo_ioctl_t vn_ioctl; 101 static fo_poll_t vn_poll; 102 static fo_kqfilter_t vn_kqfilter; 103 static fo_stat_t vn_statfile; 104 static fo_close_t vn_closefile; 105 static fo_mmap_t vn_mmap; 106 107 struct fileops vnops = { 108 .fo_read = vn_io_fault, 109 .fo_write = vn_io_fault, 110 .fo_truncate = vn_truncate, 111 .fo_ioctl = vn_ioctl, 112 .fo_poll = vn_poll, 113 .fo_kqfilter = vn_kqfilter, 114 .fo_stat = vn_statfile, 115 .fo_close = vn_closefile, 116 .fo_chmod = vn_chmod, 117 .fo_chown = vn_chown, 118 .fo_sendfile = vn_sendfile, 119 .fo_seek = vn_seek, 120 .fo_fill_kinfo = vn_fill_kinfo, 121 .fo_mmap = vn_mmap, 122 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 123 }; 124 125 static const int io_hold_cnt = 16; 126 static int vn_io_fault_enable = 1; 127 SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_enable, CTLFLAG_RW, 128 &vn_io_fault_enable, 0, "Enable vn_io_fault lock avoidance"); 129 static int vn_io_fault_prefault = 0; 130 SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_prefault, CTLFLAG_RW, 131 &vn_io_fault_prefault, 0, "Enable vn_io_fault prefaulting"); 132 static u_long vn_io_faults_cnt; 133 SYSCTL_ULONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD, 134 &vn_io_faults_cnt, 0, "Count of vn_io_fault lock avoidance triggers"); 135 136 /* 137 * Returns true if vn_io_fault mode of handling the i/o request should 138 * be used. 139 */ 140 static bool 141 do_vn_io_fault(struct vnode *vp, struct uio *uio) 142 { 143 struct mount *mp; 144 145 return (uio->uio_segflg == UIO_USERSPACE && vp->v_type == VREG && 146 (mp = vp->v_mount) != NULL && 147 (mp->mnt_kern_flag & MNTK_NO_IOPF) != 0 && vn_io_fault_enable); 148 } 149 150 /* 151 * Structure used to pass arguments to vn_io_fault1(), to do either 152 * file- or vnode-based I/O calls. 153 */ 154 struct vn_io_fault_args { 155 enum { 156 VN_IO_FAULT_FOP, 157 VN_IO_FAULT_VOP 158 } kind; 159 struct ucred *cred; 160 int flags; 161 union { 162 struct fop_args_tag { 163 struct file *fp; 164 fo_rdwr_t *doio; 165 } fop_args; 166 struct vop_args_tag { 167 struct vnode *vp; 168 } vop_args; 169 } args; 170 }; 171 172 static int vn_io_fault1(struct vnode *vp, struct uio *uio, 173 struct vn_io_fault_args *args, struct thread *td); 174 175 int 176 vn_open(struct nameidata *ndp, int *flagp, int cmode, struct file *fp) 177 { 178 struct thread *td = ndp->ni_cnd.cn_thread; 179 180 return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp)); 181 } 182 183 /* 184 * Common code for vnode open operations via a name lookup. 185 * Lookup the vnode and invoke VOP_CREATE if needed. 186 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 187 * 188 * Note that this does NOT free nameidata for the successful case, 189 * due to the NDINIT being done elsewhere. 190 */ 191 int 192 vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags, 193 struct ucred *cred, struct file *fp) 194 { 195 struct vnode *vp; 196 struct mount *mp; 197 struct thread *td = ndp->ni_cnd.cn_thread; 198 struct vattr vat; 199 struct vattr *vap = &vat; 200 int fmode, error; 201 202 restart: 203 fmode = *flagp; 204 if ((fmode & (O_CREAT | O_EXCL | O_DIRECTORY)) == (O_CREAT | 205 O_EXCL | O_DIRECTORY)) 206 return (EINVAL); 207 else if ((fmode & (O_CREAT | O_DIRECTORY)) == O_CREAT) { 208 ndp->ni_cnd.cn_nameiop = CREATE; 209 /* 210 * Set NOCACHE to avoid flushing the cache when 211 * rolling in many files at once. 212 */ 213 ndp->ni_cnd.cn_flags = ISOPEN | LOCKPARENT | LOCKLEAF | NOCACHE; 214 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 215 ndp->ni_cnd.cn_flags |= FOLLOW; 216 if ((fmode & O_BENEATH) != 0) 217 ndp->ni_cnd.cn_flags |= BENEATH; 218 if (!(vn_open_flags & VN_OPEN_NOAUDIT)) 219 ndp->ni_cnd.cn_flags |= AUDITVNODE1; 220 if (vn_open_flags & VN_OPEN_NOCAPCHECK) 221 ndp->ni_cnd.cn_flags |= NOCAPCHECK; 222 bwillwrite(); 223 if ((error = namei(ndp)) != 0) 224 return (error); 225 if (ndp->ni_vp == NULL) { 226 VATTR_NULL(vap); 227 vap->va_type = VREG; 228 vap->va_mode = cmode; 229 if (fmode & O_EXCL) 230 vap->va_vaflags |= VA_EXCLUSIVE; 231 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 232 NDFREE(ndp, NDF_ONLY_PNBUF); 233 vput(ndp->ni_dvp); 234 if ((error = vn_start_write(NULL, &mp, 235 V_XSLEEP | PCATCH)) != 0) 236 return (error); 237 goto restart; 238 } 239 if ((vn_open_flags & VN_OPEN_NAMECACHE) != 0) 240 ndp->ni_cnd.cn_flags |= MAKEENTRY; 241 #ifdef MAC 242 error = mac_vnode_check_create(cred, ndp->ni_dvp, 243 &ndp->ni_cnd, vap); 244 if (error == 0) 245 #endif 246 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 247 &ndp->ni_cnd, vap); 248 vput(ndp->ni_dvp); 249 vn_finished_write(mp); 250 if (error) { 251 NDFREE(ndp, NDF_ONLY_PNBUF); 252 return (error); 253 } 254 fmode &= ~O_TRUNC; 255 vp = ndp->ni_vp; 256 } else { 257 if (ndp->ni_dvp == ndp->ni_vp) 258 vrele(ndp->ni_dvp); 259 else 260 vput(ndp->ni_dvp); 261 ndp->ni_dvp = NULL; 262 vp = ndp->ni_vp; 263 if (fmode & O_EXCL) { 264 error = EEXIST; 265 goto bad; 266 } 267 fmode &= ~O_CREAT; 268 } 269 } else { 270 ndp->ni_cnd.cn_nameiop = LOOKUP; 271 ndp->ni_cnd.cn_flags = ISOPEN | 272 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; 273 if (!(fmode & FWRITE)) 274 ndp->ni_cnd.cn_flags |= LOCKSHARED; 275 if ((fmode & O_BENEATH) != 0) 276 ndp->ni_cnd.cn_flags |= BENEATH; 277 if (!(vn_open_flags & VN_OPEN_NOAUDIT)) 278 ndp->ni_cnd.cn_flags |= AUDITVNODE1; 279 if (vn_open_flags & VN_OPEN_NOCAPCHECK) 280 ndp->ni_cnd.cn_flags |= NOCAPCHECK; 281 if ((error = namei(ndp)) != 0) 282 return (error); 283 vp = ndp->ni_vp; 284 } 285 error = vn_open_vnode(vp, fmode, cred, td, fp); 286 if (error) 287 goto bad; 288 *flagp = fmode; 289 return (0); 290 bad: 291 NDFREE(ndp, NDF_ONLY_PNBUF); 292 vput(vp); 293 *flagp = fmode; 294 ndp->ni_vp = NULL; 295 return (error); 296 } 297 298 static int 299 vn_open_vnode_advlock(struct vnode *vp, int fmode, struct file *fp) 300 { 301 struct flock lf; 302 int error, lock_flags, type; 303 304 ASSERT_VOP_LOCKED(vp, "vn_open_vnode_advlock"); 305 if ((fmode & (O_EXLOCK | O_SHLOCK)) == 0) 306 return (0); 307 KASSERT(fp != NULL, ("open with flock requires fp")); 308 if (fp->f_type != DTYPE_NONE && fp->f_type != DTYPE_VNODE) 309 return (EOPNOTSUPP); 310 311 lock_flags = VOP_ISLOCKED(vp); 312 VOP_UNLOCK(vp, 0); 313 314 lf.l_whence = SEEK_SET; 315 lf.l_start = 0; 316 lf.l_len = 0; 317 lf.l_type = (fmode & O_EXLOCK) != 0 ? F_WRLCK : F_RDLCK; 318 type = F_FLOCK; 319 if ((fmode & FNONBLOCK) == 0) 320 type |= F_WAIT; 321 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type); 322 if (error == 0) 323 fp->f_flag |= FHASLOCK; 324 325 vn_lock(vp, lock_flags | LK_RETRY); 326 if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) 327 error = ENOENT; 328 return (error); 329 } 330 331 /* 332 * Common code for vnode open operations once a vnode is located. 333 * Check permissions, and call the VOP_OPEN routine. 334 */ 335 int 336 vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred, 337 struct thread *td, struct file *fp) 338 { 339 accmode_t accmode; 340 int error; 341 342 if (vp->v_type == VLNK) 343 return (EMLINK); 344 if (vp->v_type == VSOCK) 345 return (EOPNOTSUPP); 346 if (vp->v_type != VDIR && fmode & O_DIRECTORY) 347 return (ENOTDIR); 348 accmode = 0; 349 if (fmode & (FWRITE | O_TRUNC)) { 350 if (vp->v_type == VDIR) 351 return (EISDIR); 352 accmode |= VWRITE; 353 } 354 if (fmode & FREAD) 355 accmode |= VREAD; 356 if (fmode & FEXEC) 357 accmode |= VEXEC; 358 if ((fmode & O_APPEND) && (fmode & FWRITE)) 359 accmode |= VAPPEND; 360 #ifdef MAC 361 if (fmode & O_CREAT) 362 accmode |= VCREAT; 363 if (fmode & O_VERIFY) 364 accmode |= VVERIFY; 365 error = mac_vnode_check_open(cred, vp, accmode); 366 if (error) 367 return (error); 368 369 accmode &= ~(VCREAT | VVERIFY); 370 #endif 371 if ((fmode & O_CREAT) == 0 && accmode != 0) { 372 error = VOP_ACCESS(vp, accmode, cred, td); 373 if (error != 0) 374 return (error); 375 } 376 if (vp->v_type == VFIFO && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 377 vn_lock(vp, LK_UPGRADE | LK_RETRY); 378 error = VOP_OPEN(vp, fmode, cred, td, fp); 379 if (error != 0) 380 return (error); 381 382 error = vn_open_vnode_advlock(vp, fmode, fp); 383 if (error == 0 && (fmode & FWRITE) != 0) { 384 error = VOP_ADD_WRITECOUNT(vp, 1); 385 if (error == 0) { 386 CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", 387 __func__, vp, vp->v_writecount); 388 } 389 } 390 391 /* 392 * Error from advlock or VOP_ADD_WRITECOUNT() still requires 393 * calling VOP_CLOSE() to pair with earlier VOP_OPEN(). 394 * Arrange for that by having fdrop() to use vn_closefile(). 395 */ 396 if (error != 0) { 397 fp->f_flag |= FOPENFAILED; 398 fp->f_vnode = vp; 399 if (fp->f_ops == &badfileops) { 400 fp->f_type = DTYPE_VNODE; 401 fp->f_ops = &vnops; 402 } 403 vref(vp); 404 } 405 406 ASSERT_VOP_LOCKED(vp, "vn_open_vnode"); 407 return (error); 408 409 } 410 411 /* 412 * Check for write permissions on the specified vnode. 413 * Prototype text segments cannot be written. 414 * It is racy. 415 */ 416 int 417 vn_writechk(struct vnode *vp) 418 { 419 420 ASSERT_VOP_LOCKED(vp, "vn_writechk"); 421 /* 422 * If there's shared text associated with 423 * the vnode, try to free it up once. If 424 * we fail, we can't allow writing. 425 */ 426 if (VOP_IS_TEXT(vp)) 427 return (ETXTBSY); 428 429 return (0); 430 } 431 432 /* 433 * Vnode close call 434 */ 435 static int 436 vn_close1(struct vnode *vp, int flags, struct ucred *file_cred, 437 struct thread *td, bool keep_ref) 438 { 439 struct mount *mp; 440 int error, lock_flags; 441 442 if (vp->v_type != VFIFO && (flags & FWRITE) == 0 && 443 MNT_EXTENDED_SHARED(vp->v_mount)) 444 lock_flags = LK_SHARED; 445 else 446 lock_flags = LK_EXCLUSIVE; 447 448 vn_start_write(vp, &mp, V_WAIT); 449 vn_lock(vp, lock_flags | LK_RETRY); 450 AUDIT_ARG_VNODE1(vp); 451 if ((flags & (FWRITE | FOPENFAILED)) == FWRITE) { 452 VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 453 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 454 __func__, vp, vp->v_writecount); 455 } 456 error = VOP_CLOSE(vp, flags, file_cred, td); 457 if (keep_ref) 458 VOP_UNLOCK(vp, 0); 459 else 460 vput(vp); 461 vn_finished_write(mp); 462 return (error); 463 } 464 465 int 466 vn_close(struct vnode *vp, int flags, struct ucred *file_cred, 467 struct thread *td) 468 { 469 470 return (vn_close1(vp, flags, file_cred, td, false)); 471 } 472 473 /* 474 * Heuristic to detect sequential operation. 475 */ 476 static int 477 sequential_heuristic(struct uio *uio, struct file *fp) 478 { 479 480 ASSERT_VOP_LOCKED(fp->f_vnode, __func__); 481 if (fp->f_flag & FRDAHEAD) 482 return (fp->f_seqcount << IO_SEQSHIFT); 483 484 /* 485 * Offset 0 is handled specially. open() sets f_seqcount to 1 so 486 * that the first I/O is normally considered to be slightly 487 * sequential. Seeking to offset 0 doesn't change sequentiality 488 * unless previous seeks have reduced f_seqcount to 0, in which 489 * case offset 0 is not special. 490 */ 491 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) || 492 uio->uio_offset == fp->f_nextoff) { 493 /* 494 * f_seqcount is in units of fixed-size blocks so that it 495 * depends mainly on the amount of sequential I/O and not 496 * much on the number of sequential I/O's. The fixed size 497 * of 16384 is hard-coded here since it is (not quite) just 498 * a magic size that works well here. This size is more 499 * closely related to the best I/O size for real disks than 500 * to any block size used by software. 501 */ 502 fp->f_seqcount += lmin(IO_SEQMAX, 503 howmany(uio->uio_resid, 16384)); 504 return (fp->f_seqcount << IO_SEQSHIFT); 505 } 506 507 /* Not sequential. Quickly draw-down sequentiality. */ 508 if (fp->f_seqcount > 1) 509 fp->f_seqcount = 1; 510 else 511 fp->f_seqcount = 0; 512 return (0); 513 } 514 515 /* 516 * Package up an I/O request on a vnode into a uio and do it. 517 */ 518 int 519 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset, 520 enum uio_seg segflg, int ioflg, struct ucred *active_cred, 521 struct ucred *file_cred, ssize_t *aresid, struct thread *td) 522 { 523 struct uio auio; 524 struct iovec aiov; 525 struct mount *mp; 526 struct ucred *cred; 527 void *rl_cookie; 528 struct vn_io_fault_args args; 529 int error, lock_flags; 530 531 if (offset < 0 && vp->v_type != VCHR) 532 return (EINVAL); 533 auio.uio_iov = &aiov; 534 auio.uio_iovcnt = 1; 535 aiov.iov_base = base; 536 aiov.iov_len = len; 537 auio.uio_resid = len; 538 auio.uio_offset = offset; 539 auio.uio_segflg = segflg; 540 auio.uio_rw = rw; 541 auio.uio_td = td; 542 error = 0; 543 544 if ((ioflg & IO_NODELOCKED) == 0) { 545 if ((ioflg & IO_RANGELOCKED) == 0) { 546 if (rw == UIO_READ) { 547 rl_cookie = vn_rangelock_rlock(vp, offset, 548 offset + len); 549 } else { 550 rl_cookie = vn_rangelock_wlock(vp, offset, 551 offset + len); 552 } 553 } else 554 rl_cookie = NULL; 555 mp = NULL; 556 if (rw == UIO_WRITE) { 557 if (vp->v_type != VCHR && 558 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) 559 != 0) 560 goto out; 561 if (MNT_SHARED_WRITES(mp) || 562 ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) 563 lock_flags = LK_SHARED; 564 else 565 lock_flags = LK_EXCLUSIVE; 566 } else 567 lock_flags = LK_SHARED; 568 vn_lock(vp, lock_flags | LK_RETRY); 569 } else 570 rl_cookie = NULL; 571 572 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 573 #ifdef MAC 574 if ((ioflg & IO_NOMACCHECK) == 0) { 575 if (rw == UIO_READ) 576 error = mac_vnode_check_read(active_cred, file_cred, 577 vp); 578 else 579 error = mac_vnode_check_write(active_cred, file_cred, 580 vp); 581 } 582 #endif 583 if (error == 0) { 584 if (file_cred != NULL) 585 cred = file_cred; 586 else 587 cred = active_cred; 588 if (do_vn_io_fault(vp, &auio)) { 589 args.kind = VN_IO_FAULT_VOP; 590 args.cred = cred; 591 args.flags = ioflg; 592 args.args.vop_args.vp = vp; 593 error = vn_io_fault1(vp, &auio, &args, td); 594 } else if (rw == UIO_READ) { 595 error = VOP_READ(vp, &auio, ioflg, cred); 596 } else /* if (rw == UIO_WRITE) */ { 597 error = VOP_WRITE(vp, &auio, ioflg, cred); 598 } 599 } 600 if (aresid) 601 *aresid = auio.uio_resid; 602 else 603 if (auio.uio_resid && error == 0) 604 error = EIO; 605 if ((ioflg & IO_NODELOCKED) == 0) { 606 VOP_UNLOCK(vp, 0); 607 if (mp != NULL) 608 vn_finished_write(mp); 609 } 610 out: 611 if (rl_cookie != NULL) 612 vn_rangelock_unlock(vp, rl_cookie); 613 return (error); 614 } 615 616 /* 617 * Package up an I/O request on a vnode into a uio and do it. The I/O 618 * request is split up into smaller chunks and we try to avoid saturating 619 * the buffer cache while potentially holding a vnode locked, so we 620 * check bwillwrite() before calling vn_rdwr(). We also call kern_yield() 621 * to give other processes a chance to lock the vnode (either other processes 622 * core'ing the same binary, or unrelated processes scanning the directory). 623 */ 624 int 625 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, void *base, size_t len, 626 off_t offset, enum uio_seg segflg, int ioflg, struct ucred *active_cred, 627 struct ucred *file_cred, size_t *aresid, struct thread *td) 628 { 629 int error = 0; 630 ssize_t iaresid; 631 632 do { 633 int chunk; 634 635 /* 636 * Force `offset' to a multiple of MAXBSIZE except possibly 637 * for the first chunk, so that filesystems only need to 638 * write full blocks except possibly for the first and last 639 * chunks. 640 */ 641 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 642 643 if (chunk > len) 644 chunk = len; 645 if (rw != UIO_READ && vp->v_type == VREG) 646 bwillwrite(); 647 iaresid = 0; 648 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 649 ioflg, active_cred, file_cred, &iaresid, td); 650 len -= chunk; /* aresid calc already includes length */ 651 if (error) 652 break; 653 offset += chunk; 654 base = (char *)base + chunk; 655 kern_yield(PRI_USER); 656 } while (len); 657 if (aresid) 658 *aresid = len + iaresid; 659 return (error); 660 } 661 662 off_t 663 foffset_lock(struct file *fp, int flags) 664 { 665 struct mtx *mtxp; 666 off_t res; 667 668 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 669 670 #if OFF_MAX <= LONG_MAX 671 /* 672 * Caller only wants the current f_offset value. Assume that 673 * the long and shorter integer types reads are atomic. 674 */ 675 if ((flags & FOF_NOLOCK) != 0) 676 return (fp->f_offset); 677 #endif 678 679 /* 680 * According to McKusick the vn lock was protecting f_offset here. 681 * It is now protected by the FOFFSET_LOCKED flag. 682 */ 683 mtxp = mtx_pool_find(mtxpool_sleep, fp); 684 mtx_lock(mtxp); 685 if ((flags & FOF_NOLOCK) == 0) { 686 while (fp->f_vnread_flags & FOFFSET_LOCKED) { 687 fp->f_vnread_flags |= FOFFSET_LOCK_WAITING; 688 msleep(&fp->f_vnread_flags, mtxp, PUSER -1, 689 "vofflock", 0); 690 } 691 fp->f_vnread_flags |= FOFFSET_LOCKED; 692 } 693 res = fp->f_offset; 694 mtx_unlock(mtxp); 695 return (res); 696 } 697 698 void 699 foffset_unlock(struct file *fp, off_t val, int flags) 700 { 701 struct mtx *mtxp; 702 703 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 704 705 #if OFF_MAX <= LONG_MAX 706 if ((flags & FOF_NOLOCK) != 0) { 707 if ((flags & FOF_NOUPDATE) == 0) 708 fp->f_offset = val; 709 if ((flags & FOF_NEXTOFF) != 0) 710 fp->f_nextoff = val; 711 return; 712 } 713 #endif 714 715 mtxp = mtx_pool_find(mtxpool_sleep, fp); 716 mtx_lock(mtxp); 717 if ((flags & FOF_NOUPDATE) == 0) 718 fp->f_offset = val; 719 if ((flags & FOF_NEXTOFF) != 0) 720 fp->f_nextoff = val; 721 if ((flags & FOF_NOLOCK) == 0) { 722 KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0, 723 ("Lost FOFFSET_LOCKED")); 724 if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING) 725 wakeup(&fp->f_vnread_flags); 726 fp->f_vnread_flags = 0; 727 } 728 mtx_unlock(mtxp); 729 } 730 731 void 732 foffset_lock_uio(struct file *fp, struct uio *uio, int flags) 733 { 734 735 if ((flags & FOF_OFFSET) == 0) 736 uio->uio_offset = foffset_lock(fp, flags); 737 } 738 739 void 740 foffset_unlock_uio(struct file *fp, struct uio *uio, int flags) 741 { 742 743 if ((flags & FOF_OFFSET) == 0) 744 foffset_unlock(fp, uio->uio_offset, flags); 745 } 746 747 static int 748 get_advice(struct file *fp, struct uio *uio) 749 { 750 struct mtx *mtxp; 751 int ret; 752 753 ret = POSIX_FADV_NORMAL; 754 if (fp->f_advice == NULL || fp->f_vnode->v_type != VREG) 755 return (ret); 756 757 mtxp = mtx_pool_find(mtxpool_sleep, fp); 758 mtx_lock(mtxp); 759 if (fp->f_advice != NULL && 760 uio->uio_offset >= fp->f_advice->fa_start && 761 uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end) 762 ret = fp->f_advice->fa_advice; 763 mtx_unlock(mtxp); 764 return (ret); 765 } 766 767 /* 768 * File table vnode read routine. 769 */ 770 static int 771 vn_read(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, 772 struct thread *td) 773 { 774 struct vnode *vp; 775 off_t orig_offset; 776 int error, ioflag; 777 int advice; 778 779 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 780 uio->uio_td, td)); 781 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 782 vp = fp->f_vnode; 783 ioflag = 0; 784 if (fp->f_flag & FNONBLOCK) 785 ioflag |= IO_NDELAY; 786 if (fp->f_flag & O_DIRECT) 787 ioflag |= IO_DIRECT; 788 advice = get_advice(fp, uio); 789 vn_lock(vp, LK_SHARED | LK_RETRY); 790 791 switch (advice) { 792 case POSIX_FADV_NORMAL: 793 case POSIX_FADV_SEQUENTIAL: 794 case POSIX_FADV_NOREUSE: 795 ioflag |= sequential_heuristic(uio, fp); 796 break; 797 case POSIX_FADV_RANDOM: 798 /* Disable read-ahead for random I/O. */ 799 break; 800 } 801 orig_offset = uio->uio_offset; 802 803 #ifdef MAC 804 error = mac_vnode_check_read(active_cred, fp->f_cred, vp); 805 if (error == 0) 806 #endif 807 error = VOP_READ(vp, uio, ioflag, fp->f_cred); 808 fp->f_nextoff = uio->uio_offset; 809 VOP_UNLOCK(vp, 0); 810 if (error == 0 && advice == POSIX_FADV_NOREUSE && 811 orig_offset != uio->uio_offset) 812 /* 813 * Use POSIX_FADV_DONTNEED to flush pages and buffers 814 * for the backing file after a POSIX_FADV_NOREUSE 815 * read(2). 816 */ 817 error = VOP_ADVISE(vp, orig_offset, uio->uio_offset - 1, 818 POSIX_FADV_DONTNEED); 819 return (error); 820 } 821 822 /* 823 * File table vnode write routine. 824 */ 825 static int 826 vn_write(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, 827 struct thread *td) 828 { 829 struct vnode *vp; 830 struct mount *mp; 831 off_t orig_offset; 832 int error, ioflag, lock_flags; 833 int advice; 834 835 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 836 uio->uio_td, td)); 837 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 838 vp = fp->f_vnode; 839 if (vp->v_type == VREG) 840 bwillwrite(); 841 ioflag = IO_UNIT; 842 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 843 ioflag |= IO_APPEND; 844 if (fp->f_flag & FNONBLOCK) 845 ioflag |= IO_NDELAY; 846 if (fp->f_flag & O_DIRECT) 847 ioflag |= IO_DIRECT; 848 if ((fp->f_flag & O_FSYNC) || 849 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 850 ioflag |= IO_SYNC; 851 mp = NULL; 852 if (vp->v_type != VCHR && 853 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) 854 goto unlock; 855 856 advice = get_advice(fp, uio); 857 858 if (MNT_SHARED_WRITES(mp) || 859 (mp == NULL && MNT_SHARED_WRITES(vp->v_mount))) { 860 lock_flags = LK_SHARED; 861 } else { 862 lock_flags = LK_EXCLUSIVE; 863 } 864 865 vn_lock(vp, lock_flags | LK_RETRY); 866 switch (advice) { 867 case POSIX_FADV_NORMAL: 868 case POSIX_FADV_SEQUENTIAL: 869 case POSIX_FADV_NOREUSE: 870 ioflag |= sequential_heuristic(uio, fp); 871 break; 872 case POSIX_FADV_RANDOM: 873 /* XXX: Is this correct? */ 874 break; 875 } 876 orig_offset = uio->uio_offset; 877 878 #ifdef MAC 879 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 880 if (error == 0) 881 #endif 882 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred); 883 fp->f_nextoff = uio->uio_offset; 884 VOP_UNLOCK(vp, 0); 885 if (vp->v_type != VCHR) 886 vn_finished_write(mp); 887 if (error == 0 && advice == POSIX_FADV_NOREUSE && 888 orig_offset != uio->uio_offset) 889 /* 890 * Use POSIX_FADV_DONTNEED to flush pages and buffers 891 * for the backing file after a POSIX_FADV_NOREUSE 892 * write(2). 893 */ 894 error = VOP_ADVISE(vp, orig_offset, uio->uio_offset - 1, 895 POSIX_FADV_DONTNEED); 896 unlock: 897 return (error); 898 } 899 900 /* 901 * The vn_io_fault() is a wrapper around vn_read() and vn_write() to 902 * prevent the following deadlock: 903 * 904 * Assume that the thread A reads from the vnode vp1 into userspace 905 * buffer buf1 backed by the pages of vnode vp2. If a page in buf1 is 906 * currently not resident, then system ends up with the call chain 907 * vn_read() -> VOP_READ(vp1) -> uiomove() -> [Page Fault] -> 908 * vm_fault(buf1) -> vnode_pager_getpages(vp2) -> VOP_GETPAGES(vp2) 909 * which establishes lock order vp1->vn_lock, then vp2->vn_lock. 910 * If, at the same time, thread B reads from vnode vp2 into buffer buf2 911 * backed by the pages of vnode vp1, and some page in buf2 is not 912 * resident, we get a reversed order vp2->vn_lock, then vp1->vn_lock. 913 * 914 * To prevent the lock order reversal and deadlock, vn_io_fault() does 915 * not allow page faults to happen during VOP_READ() or VOP_WRITE(). 916 * Instead, it first tries to do the whole range i/o with pagefaults 917 * disabled. If all pages in the i/o buffer are resident and mapped, 918 * VOP will succeed (ignoring the genuine filesystem errors). 919 * Otherwise, we get back EFAULT, and vn_io_fault() falls back to do 920 * i/o in chunks, with all pages in the chunk prefaulted and held 921 * using vm_fault_quick_hold_pages(). 922 * 923 * Filesystems using this deadlock avoidance scheme should use the 924 * array of the held pages from uio, saved in the curthread->td_ma, 925 * instead of doing uiomove(). A helper function 926 * vn_io_fault_uiomove() converts uiomove request into 927 * uiomove_fromphys() over td_ma array. 928 * 929 * Since vnode locks do not cover the whole i/o anymore, rangelocks 930 * make the current i/o request atomic with respect to other i/os and 931 * truncations. 932 */ 933 934 /* 935 * Decode vn_io_fault_args and perform the corresponding i/o. 936 */ 937 static int 938 vn_io_fault_doio(struct vn_io_fault_args *args, struct uio *uio, 939 struct thread *td) 940 { 941 int error, save; 942 943 error = 0; 944 save = vm_fault_disable_pagefaults(); 945 switch (args->kind) { 946 case VN_IO_FAULT_FOP: 947 error = (args->args.fop_args.doio)(args->args.fop_args.fp, 948 uio, args->cred, args->flags, td); 949 break; 950 case VN_IO_FAULT_VOP: 951 if (uio->uio_rw == UIO_READ) { 952 error = VOP_READ(args->args.vop_args.vp, uio, 953 args->flags, args->cred); 954 } else if (uio->uio_rw == UIO_WRITE) { 955 error = VOP_WRITE(args->args.vop_args.vp, uio, 956 args->flags, args->cred); 957 } 958 break; 959 default: 960 panic("vn_io_fault_doio: unknown kind of io %d %d", 961 args->kind, uio->uio_rw); 962 } 963 vm_fault_enable_pagefaults(save); 964 return (error); 965 } 966 967 static int 968 vn_io_fault_touch(char *base, const struct uio *uio) 969 { 970 int r; 971 972 r = fubyte(base); 973 if (r == -1 || (uio->uio_rw == UIO_READ && subyte(base, r) == -1)) 974 return (EFAULT); 975 return (0); 976 } 977 978 static int 979 vn_io_fault_prefault_user(const struct uio *uio) 980 { 981 char *base; 982 const struct iovec *iov; 983 size_t len; 984 ssize_t resid; 985 int error, i; 986 987 KASSERT(uio->uio_segflg == UIO_USERSPACE, 988 ("vn_io_fault_prefault userspace")); 989 990 error = i = 0; 991 iov = uio->uio_iov; 992 resid = uio->uio_resid; 993 base = iov->iov_base; 994 len = iov->iov_len; 995 while (resid > 0) { 996 error = vn_io_fault_touch(base, uio); 997 if (error != 0) 998 break; 999 if (len < PAGE_SIZE) { 1000 if (len != 0) { 1001 error = vn_io_fault_touch(base + len - 1, uio); 1002 if (error != 0) 1003 break; 1004 resid -= len; 1005 } 1006 if (++i >= uio->uio_iovcnt) 1007 break; 1008 iov = uio->uio_iov + i; 1009 base = iov->iov_base; 1010 len = iov->iov_len; 1011 } else { 1012 len -= PAGE_SIZE; 1013 base += PAGE_SIZE; 1014 resid -= PAGE_SIZE; 1015 } 1016 } 1017 return (error); 1018 } 1019 1020 /* 1021 * Common code for vn_io_fault(), agnostic to the kind of i/o request. 1022 * Uses vn_io_fault_doio() to make the call to an actual i/o function. 1023 * Used from vn_rdwr() and vn_io_fault(), which encode the i/o request 1024 * into args and call vn_io_fault1() to handle faults during the user 1025 * mode buffer accesses. 1026 */ 1027 static int 1028 vn_io_fault1(struct vnode *vp, struct uio *uio, struct vn_io_fault_args *args, 1029 struct thread *td) 1030 { 1031 vm_page_t ma[io_hold_cnt + 2]; 1032 struct uio *uio_clone, short_uio; 1033 struct iovec short_iovec[1]; 1034 vm_page_t *prev_td_ma; 1035 vm_prot_t prot; 1036 vm_offset_t addr, end; 1037 size_t len, resid; 1038 ssize_t adv; 1039 int error, cnt, saveheld, prev_td_ma_cnt; 1040 1041 if (vn_io_fault_prefault) { 1042 error = vn_io_fault_prefault_user(uio); 1043 if (error != 0) 1044 return (error); /* Or ignore ? */ 1045 } 1046 1047 prot = uio->uio_rw == UIO_READ ? VM_PROT_WRITE : VM_PROT_READ; 1048 1049 /* 1050 * The UFS follows IO_UNIT directive and replays back both 1051 * uio_offset and uio_resid if an error is encountered during the 1052 * operation. But, since the iovec may be already advanced, 1053 * uio is still in an inconsistent state. 1054 * 1055 * Cache a copy of the original uio, which is advanced to the redo 1056 * point using UIO_NOCOPY below. 1057 */ 1058 uio_clone = cloneuio(uio); 1059 resid = uio->uio_resid; 1060 1061 short_uio.uio_segflg = UIO_USERSPACE; 1062 short_uio.uio_rw = uio->uio_rw; 1063 short_uio.uio_td = uio->uio_td; 1064 1065 error = vn_io_fault_doio(args, uio, td); 1066 if (error != EFAULT) 1067 goto out; 1068 1069 atomic_add_long(&vn_io_faults_cnt, 1); 1070 uio_clone->uio_segflg = UIO_NOCOPY; 1071 uiomove(NULL, resid - uio->uio_resid, uio_clone); 1072 uio_clone->uio_segflg = uio->uio_segflg; 1073 1074 saveheld = curthread_pflags_set(TDP_UIOHELD); 1075 prev_td_ma = td->td_ma; 1076 prev_td_ma_cnt = td->td_ma_cnt; 1077 1078 while (uio_clone->uio_resid != 0) { 1079 len = uio_clone->uio_iov->iov_len; 1080 if (len == 0) { 1081 KASSERT(uio_clone->uio_iovcnt >= 1, 1082 ("iovcnt underflow")); 1083 uio_clone->uio_iov++; 1084 uio_clone->uio_iovcnt--; 1085 continue; 1086 } 1087 if (len > io_hold_cnt * PAGE_SIZE) 1088 len = io_hold_cnt * PAGE_SIZE; 1089 addr = (uintptr_t)uio_clone->uio_iov->iov_base; 1090 end = round_page(addr + len); 1091 if (end < addr) { 1092 error = EFAULT; 1093 break; 1094 } 1095 cnt = atop(end - trunc_page(addr)); 1096 /* 1097 * A perfectly misaligned address and length could cause 1098 * both the start and the end of the chunk to use partial 1099 * page. +2 accounts for such a situation. 1100 */ 1101 cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map, 1102 addr, len, prot, ma, io_hold_cnt + 2); 1103 if (cnt == -1) { 1104 error = EFAULT; 1105 break; 1106 } 1107 short_uio.uio_iov = &short_iovec[0]; 1108 short_iovec[0].iov_base = (void *)addr; 1109 short_uio.uio_iovcnt = 1; 1110 short_uio.uio_resid = short_iovec[0].iov_len = len; 1111 short_uio.uio_offset = uio_clone->uio_offset; 1112 td->td_ma = ma; 1113 td->td_ma_cnt = cnt; 1114 1115 error = vn_io_fault_doio(args, &short_uio, td); 1116 vm_page_unhold_pages(ma, cnt); 1117 adv = len - short_uio.uio_resid; 1118 1119 uio_clone->uio_iov->iov_base = 1120 (char *)uio_clone->uio_iov->iov_base + adv; 1121 uio_clone->uio_iov->iov_len -= adv; 1122 uio_clone->uio_resid -= adv; 1123 uio_clone->uio_offset += adv; 1124 1125 uio->uio_resid -= adv; 1126 uio->uio_offset += adv; 1127 1128 if (error != 0 || adv == 0) 1129 break; 1130 } 1131 td->td_ma = prev_td_ma; 1132 td->td_ma_cnt = prev_td_ma_cnt; 1133 curthread_pflags_restore(saveheld); 1134 out: 1135 free(uio_clone, M_IOV); 1136 return (error); 1137 } 1138 1139 static int 1140 vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred, 1141 int flags, struct thread *td) 1142 { 1143 fo_rdwr_t *doio; 1144 struct vnode *vp; 1145 void *rl_cookie; 1146 struct vn_io_fault_args args; 1147 int error; 1148 1149 doio = uio->uio_rw == UIO_READ ? vn_read : vn_write; 1150 vp = fp->f_vnode; 1151 foffset_lock_uio(fp, uio, flags); 1152 if (do_vn_io_fault(vp, uio)) { 1153 args.kind = VN_IO_FAULT_FOP; 1154 args.args.fop_args.fp = fp; 1155 args.args.fop_args.doio = doio; 1156 args.cred = active_cred; 1157 args.flags = flags | FOF_OFFSET; 1158 if (uio->uio_rw == UIO_READ) { 1159 rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset, 1160 uio->uio_offset + uio->uio_resid); 1161 } else if ((fp->f_flag & O_APPEND) != 0 || 1162 (flags & FOF_OFFSET) == 0) { 1163 /* For appenders, punt and lock the whole range. */ 1164 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1165 } else { 1166 rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset, 1167 uio->uio_offset + uio->uio_resid); 1168 } 1169 error = vn_io_fault1(vp, uio, &args, td); 1170 vn_rangelock_unlock(vp, rl_cookie); 1171 } else { 1172 error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td); 1173 } 1174 foffset_unlock_uio(fp, uio, flags); 1175 return (error); 1176 } 1177 1178 /* 1179 * Helper function to perform the requested uiomove operation using 1180 * the held pages for io->uio_iov[0].iov_base buffer instead of 1181 * copyin/copyout. Access to the pages with uiomove_fromphys() 1182 * instead of iov_base prevents page faults that could occur due to 1183 * pmap_collect() invalidating the mapping created by 1184 * vm_fault_quick_hold_pages(), or pageout daemon, page laundry or 1185 * object cleanup revoking the write access from page mappings. 1186 * 1187 * Filesystems specified MNTK_NO_IOPF shall use vn_io_fault_uiomove() 1188 * instead of plain uiomove(). 1189 */ 1190 int 1191 vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio) 1192 { 1193 struct uio transp_uio; 1194 struct iovec transp_iov[1]; 1195 struct thread *td; 1196 size_t adv; 1197 int error, pgadv; 1198 1199 td = curthread; 1200 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1201 uio->uio_segflg != UIO_USERSPACE) 1202 return (uiomove(data, xfersize, uio)); 1203 1204 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1205 transp_iov[0].iov_base = data; 1206 transp_uio.uio_iov = &transp_iov[0]; 1207 transp_uio.uio_iovcnt = 1; 1208 if (xfersize > uio->uio_resid) 1209 xfersize = uio->uio_resid; 1210 transp_uio.uio_resid = transp_iov[0].iov_len = xfersize; 1211 transp_uio.uio_offset = 0; 1212 transp_uio.uio_segflg = UIO_SYSSPACE; 1213 /* 1214 * Since transp_iov points to data, and td_ma page array 1215 * corresponds to original uio->uio_iov, we need to invert the 1216 * direction of the i/o operation as passed to 1217 * uiomove_fromphys(). 1218 */ 1219 switch (uio->uio_rw) { 1220 case UIO_WRITE: 1221 transp_uio.uio_rw = UIO_READ; 1222 break; 1223 case UIO_READ: 1224 transp_uio.uio_rw = UIO_WRITE; 1225 break; 1226 } 1227 transp_uio.uio_td = uio->uio_td; 1228 error = uiomove_fromphys(td->td_ma, 1229 ((vm_offset_t)uio->uio_iov->iov_base) & PAGE_MASK, 1230 xfersize, &transp_uio); 1231 adv = xfersize - transp_uio.uio_resid; 1232 pgadv = 1233 (((vm_offset_t)uio->uio_iov->iov_base + adv) >> PAGE_SHIFT) - 1234 (((vm_offset_t)uio->uio_iov->iov_base) >> PAGE_SHIFT); 1235 td->td_ma += pgadv; 1236 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1237 pgadv)); 1238 td->td_ma_cnt -= pgadv; 1239 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + adv; 1240 uio->uio_iov->iov_len -= adv; 1241 uio->uio_resid -= adv; 1242 uio->uio_offset += adv; 1243 return (error); 1244 } 1245 1246 int 1247 vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize, 1248 struct uio *uio) 1249 { 1250 struct thread *td; 1251 vm_offset_t iov_base; 1252 int cnt, pgadv; 1253 1254 td = curthread; 1255 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1256 uio->uio_segflg != UIO_USERSPACE) 1257 return (uiomove_fromphys(ma, offset, xfersize, uio)); 1258 1259 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1260 cnt = xfersize > uio->uio_resid ? uio->uio_resid : xfersize; 1261 iov_base = (vm_offset_t)uio->uio_iov->iov_base; 1262 switch (uio->uio_rw) { 1263 case UIO_WRITE: 1264 pmap_copy_pages(td->td_ma, iov_base & PAGE_MASK, ma, 1265 offset, cnt); 1266 break; 1267 case UIO_READ: 1268 pmap_copy_pages(ma, offset, td->td_ma, iov_base & PAGE_MASK, 1269 cnt); 1270 break; 1271 } 1272 pgadv = ((iov_base + cnt) >> PAGE_SHIFT) - (iov_base >> PAGE_SHIFT); 1273 td->td_ma += pgadv; 1274 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1275 pgadv)); 1276 td->td_ma_cnt -= pgadv; 1277 uio->uio_iov->iov_base = (char *)(iov_base + cnt); 1278 uio->uio_iov->iov_len -= cnt; 1279 uio->uio_resid -= cnt; 1280 uio->uio_offset += cnt; 1281 return (0); 1282 } 1283 1284 1285 /* 1286 * File table truncate routine. 1287 */ 1288 static int 1289 vn_truncate(struct file *fp, off_t length, struct ucred *active_cred, 1290 struct thread *td) 1291 { 1292 struct vattr vattr; 1293 struct mount *mp; 1294 struct vnode *vp; 1295 void *rl_cookie; 1296 int error; 1297 1298 vp = fp->f_vnode; 1299 1300 /* 1301 * Lock the whole range for truncation. Otherwise split i/o 1302 * might happen partly before and partly after the truncation. 1303 */ 1304 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1305 error = vn_start_write(vp, &mp, V_WAIT | PCATCH); 1306 if (error) 1307 goto out1; 1308 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1309 AUDIT_ARG_VNODE1(vp); 1310 if (vp->v_type == VDIR) { 1311 error = EISDIR; 1312 goto out; 1313 } 1314 #ifdef MAC 1315 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 1316 if (error) 1317 goto out; 1318 #endif 1319 error = VOP_ADD_WRITECOUNT(vp, 1); 1320 if (error == 0) { 1321 VATTR_NULL(&vattr); 1322 vattr.va_size = length; 1323 if ((fp->f_flag & O_FSYNC) != 0) 1324 vattr.va_vaflags |= VA_SYNC; 1325 error = VOP_SETATTR(vp, &vattr, fp->f_cred); 1326 VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 1327 } 1328 out: 1329 VOP_UNLOCK(vp, 0); 1330 vn_finished_write(mp); 1331 out1: 1332 vn_rangelock_unlock(vp, rl_cookie); 1333 return (error); 1334 } 1335 1336 /* 1337 * File table vnode stat routine. 1338 */ 1339 static int 1340 vn_statfile(struct file *fp, struct stat *sb, struct ucred *active_cred, 1341 struct thread *td) 1342 { 1343 struct vnode *vp = fp->f_vnode; 1344 int error; 1345 1346 vn_lock(vp, LK_SHARED | LK_RETRY); 1347 error = vn_stat(vp, sb, active_cred, fp->f_cred, td); 1348 VOP_UNLOCK(vp, 0); 1349 1350 return (error); 1351 } 1352 1353 /* 1354 * Stat a vnode; implementation for the stat syscall 1355 */ 1356 int 1357 vn_stat(struct vnode *vp, struct stat *sb, struct ucred *active_cred, 1358 struct ucred *file_cred, struct thread *td) 1359 { 1360 struct vattr vattr; 1361 struct vattr *vap; 1362 int error; 1363 u_short mode; 1364 1365 AUDIT_ARG_VNODE1(vp); 1366 #ifdef MAC 1367 error = mac_vnode_check_stat(active_cred, file_cred, vp); 1368 if (error) 1369 return (error); 1370 #endif 1371 1372 vap = &vattr; 1373 1374 /* 1375 * Initialize defaults for new and unusual fields, so that file 1376 * systems which don't support these fields don't need to know 1377 * about them. 1378 */ 1379 vap->va_birthtime.tv_sec = -1; 1380 vap->va_birthtime.tv_nsec = 0; 1381 vap->va_fsid = VNOVAL; 1382 vap->va_rdev = NODEV; 1383 1384 error = VOP_GETATTR(vp, vap, active_cred); 1385 if (error) 1386 return (error); 1387 1388 /* 1389 * Zero the spare stat fields 1390 */ 1391 bzero(sb, sizeof *sb); 1392 1393 /* 1394 * Copy from vattr table 1395 */ 1396 if (vap->va_fsid != VNOVAL) 1397 sb->st_dev = vap->va_fsid; 1398 else 1399 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; 1400 sb->st_ino = vap->va_fileid; 1401 mode = vap->va_mode; 1402 switch (vap->va_type) { 1403 case VREG: 1404 mode |= S_IFREG; 1405 break; 1406 case VDIR: 1407 mode |= S_IFDIR; 1408 break; 1409 case VBLK: 1410 mode |= S_IFBLK; 1411 break; 1412 case VCHR: 1413 mode |= S_IFCHR; 1414 break; 1415 case VLNK: 1416 mode |= S_IFLNK; 1417 break; 1418 case VSOCK: 1419 mode |= S_IFSOCK; 1420 break; 1421 case VFIFO: 1422 mode |= S_IFIFO; 1423 break; 1424 default: 1425 return (EBADF); 1426 } 1427 sb->st_mode = mode; 1428 sb->st_nlink = vap->va_nlink; 1429 sb->st_uid = vap->va_uid; 1430 sb->st_gid = vap->va_gid; 1431 sb->st_rdev = vap->va_rdev; 1432 if (vap->va_size > OFF_MAX) 1433 return (EOVERFLOW); 1434 sb->st_size = vap->va_size; 1435 sb->st_atim = vap->va_atime; 1436 sb->st_mtim = vap->va_mtime; 1437 sb->st_ctim = vap->va_ctime; 1438 sb->st_birthtim = vap->va_birthtime; 1439 1440 /* 1441 * According to www.opengroup.org, the meaning of st_blksize is 1442 * "a filesystem-specific preferred I/O block size for this 1443 * object. In some filesystem types, this may vary from file 1444 * to file" 1445 * Use miminum/default of PAGE_SIZE (e.g. for VCHR). 1446 */ 1447 1448 sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize); 1449 1450 sb->st_flags = vap->va_flags; 1451 if (priv_check(td, PRIV_VFS_GENERATION)) 1452 sb->st_gen = 0; 1453 else 1454 sb->st_gen = vap->va_gen; 1455 1456 sb->st_blocks = vap->va_bytes / S_BLKSIZE; 1457 return (0); 1458 } 1459 1460 /* 1461 * File table vnode ioctl routine. 1462 */ 1463 static int 1464 vn_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred, 1465 struct thread *td) 1466 { 1467 struct vattr vattr; 1468 struct vnode *vp; 1469 struct fiobmap2_arg *bmarg; 1470 int error; 1471 1472 vp = fp->f_vnode; 1473 switch (vp->v_type) { 1474 case VDIR: 1475 case VREG: 1476 switch (com) { 1477 case FIONREAD: 1478 vn_lock(vp, LK_SHARED | LK_RETRY); 1479 error = VOP_GETATTR(vp, &vattr, active_cred); 1480 VOP_UNLOCK(vp, 0); 1481 if (error == 0) 1482 *(int *)data = vattr.va_size - fp->f_offset; 1483 return (error); 1484 case FIOBMAP2: 1485 bmarg = (struct fiobmap2_arg *)data; 1486 vn_lock(vp, LK_SHARED | LK_RETRY); 1487 #ifdef MAC 1488 error = mac_vnode_check_read(active_cred, fp->f_cred, 1489 vp); 1490 if (error == 0) 1491 #endif 1492 error = VOP_BMAP(vp, bmarg->bn, NULL, 1493 &bmarg->bn, &bmarg->runp, &bmarg->runb); 1494 VOP_UNLOCK(vp, 0); 1495 return (error); 1496 case FIONBIO: 1497 case FIOASYNC: 1498 return (0); 1499 default: 1500 return (VOP_IOCTL(vp, com, data, fp->f_flag, 1501 active_cred, td)); 1502 } 1503 break; 1504 case VCHR: 1505 return (VOP_IOCTL(vp, com, data, fp->f_flag, 1506 active_cred, td)); 1507 default: 1508 return (ENOTTY); 1509 } 1510 } 1511 1512 /* 1513 * File table vnode poll routine. 1514 */ 1515 static int 1516 vn_poll(struct file *fp, int events, struct ucred *active_cred, 1517 struct thread *td) 1518 { 1519 struct vnode *vp; 1520 int error; 1521 1522 vp = fp->f_vnode; 1523 #ifdef MAC 1524 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1525 AUDIT_ARG_VNODE1(vp); 1526 error = mac_vnode_check_poll(active_cred, fp->f_cred, vp); 1527 VOP_UNLOCK(vp, 0); 1528 if (!error) 1529 #endif 1530 1531 error = VOP_POLL(vp, events, fp->f_cred, td); 1532 return (error); 1533 } 1534 1535 /* 1536 * Acquire the requested lock and then check for validity. LK_RETRY 1537 * permits vn_lock to return doomed vnodes. 1538 */ 1539 int 1540 _vn_lock(struct vnode *vp, int flags, char *file, int line) 1541 { 1542 int error; 1543 1544 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 1545 ("vn_lock: no locktype")); 1546 VNASSERT(vp->v_holdcnt != 0, vp, ("vn_lock: zero hold count")); 1547 retry: 1548 error = VOP_LOCK1(vp, flags, file, line); 1549 flags &= ~LK_INTERLOCK; /* Interlock is always dropped. */ 1550 KASSERT((flags & LK_RETRY) == 0 || error == 0, 1551 ("vn_lock: error %d incompatible with flags %#x", error, flags)); 1552 1553 if ((flags & LK_RETRY) == 0) { 1554 if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) { 1555 VOP_UNLOCK(vp, 0); 1556 error = ENOENT; 1557 } 1558 } else if (error != 0) 1559 goto retry; 1560 return (error); 1561 } 1562 1563 /* 1564 * File table vnode close routine. 1565 */ 1566 static int 1567 vn_closefile(struct file *fp, struct thread *td) 1568 { 1569 struct vnode *vp; 1570 struct flock lf; 1571 int error; 1572 bool ref; 1573 1574 vp = fp->f_vnode; 1575 fp->f_ops = &badfileops; 1576 ref= (fp->f_flag & FHASLOCK) != 0 && fp->f_type == DTYPE_VNODE; 1577 1578 error = vn_close1(vp, fp->f_flag, fp->f_cred, td, ref); 1579 1580 if (__predict_false(ref)) { 1581 lf.l_whence = SEEK_SET; 1582 lf.l_start = 0; 1583 lf.l_len = 0; 1584 lf.l_type = F_UNLCK; 1585 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK); 1586 vrele(vp); 1587 } 1588 return (error); 1589 } 1590 1591 static bool 1592 vn_suspendable(struct mount *mp) 1593 { 1594 1595 return (mp->mnt_op->vfs_susp_clean != NULL); 1596 } 1597 1598 /* 1599 * Preparing to start a filesystem write operation. If the operation is 1600 * permitted, then we bump the count of operations in progress and 1601 * proceed. If a suspend request is in progress, we wait until the 1602 * suspension is over, and then proceed. 1603 */ 1604 static int 1605 vn_start_write_locked(struct mount *mp, int flags) 1606 { 1607 int error, mflags; 1608 1609 mtx_assert(MNT_MTX(mp), MA_OWNED); 1610 error = 0; 1611 1612 /* 1613 * Check on status of suspension. 1614 */ 1615 if ((curthread->td_pflags & TDP_IGNSUSP) == 0 || 1616 mp->mnt_susp_owner != curthread) { 1617 mflags = ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? 1618 (flags & PCATCH) : 0) | (PUSER - 1); 1619 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1620 if (flags & V_NOWAIT) { 1621 error = EWOULDBLOCK; 1622 goto unlock; 1623 } 1624 error = msleep(&mp->mnt_flag, MNT_MTX(mp), mflags, 1625 "suspfs", 0); 1626 if (error) 1627 goto unlock; 1628 } 1629 } 1630 if (flags & V_XSLEEP) 1631 goto unlock; 1632 mp->mnt_writeopcount++; 1633 unlock: 1634 if (error != 0 || (flags & V_XSLEEP) != 0) 1635 MNT_REL(mp); 1636 MNT_IUNLOCK(mp); 1637 return (error); 1638 } 1639 1640 int 1641 vn_start_write(struct vnode *vp, struct mount **mpp, int flags) 1642 { 1643 struct mount *mp; 1644 int error; 1645 1646 KASSERT((flags & V_MNTREF) == 0 || (*mpp != NULL && vp == NULL), 1647 ("V_MNTREF requires mp")); 1648 1649 error = 0; 1650 /* 1651 * If a vnode is provided, get and return the mount point that 1652 * to which it will write. 1653 */ 1654 if (vp != NULL) { 1655 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1656 *mpp = NULL; 1657 if (error != EOPNOTSUPP) 1658 return (error); 1659 return (0); 1660 } 1661 } 1662 if ((mp = *mpp) == NULL) 1663 return (0); 1664 1665 if (!vn_suspendable(mp)) { 1666 if (vp != NULL || (flags & V_MNTREF) != 0) 1667 vfs_rel(mp); 1668 return (0); 1669 } 1670 1671 /* 1672 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1673 * a vfs_ref(). 1674 * As long as a vnode is not provided we need to acquire a 1675 * refcount for the provided mountpoint too, in order to 1676 * emulate a vfs_ref(). 1677 */ 1678 MNT_ILOCK(mp); 1679 if (vp == NULL && (flags & V_MNTREF) == 0) 1680 MNT_REF(mp); 1681 1682 return (vn_start_write_locked(mp, flags)); 1683 } 1684 1685 /* 1686 * Secondary suspension. Used by operations such as vop_inactive 1687 * routines that are needed by the higher level functions. These 1688 * are allowed to proceed until all the higher level functions have 1689 * completed (indicated by mnt_writeopcount dropping to zero). At that 1690 * time, these operations are halted until the suspension is over. 1691 */ 1692 int 1693 vn_start_secondary_write(struct vnode *vp, struct mount **mpp, int flags) 1694 { 1695 struct mount *mp; 1696 int error; 1697 1698 KASSERT((flags & V_MNTREF) == 0 || (*mpp != NULL && vp == NULL), 1699 ("V_MNTREF requires mp")); 1700 1701 retry: 1702 if (vp != NULL) { 1703 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1704 *mpp = NULL; 1705 if (error != EOPNOTSUPP) 1706 return (error); 1707 return (0); 1708 } 1709 } 1710 /* 1711 * If we are not suspended or have not yet reached suspended 1712 * mode, then let the operation proceed. 1713 */ 1714 if ((mp = *mpp) == NULL) 1715 return (0); 1716 1717 if (!vn_suspendable(mp)) { 1718 if (vp != NULL || (flags & V_MNTREF) != 0) 1719 vfs_rel(mp); 1720 return (0); 1721 } 1722 1723 /* 1724 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1725 * a vfs_ref(). 1726 * As long as a vnode is not provided we need to acquire a 1727 * refcount for the provided mountpoint too, in order to 1728 * emulate a vfs_ref(). 1729 */ 1730 MNT_ILOCK(mp); 1731 if (vp == NULL && (flags & V_MNTREF) == 0) 1732 MNT_REF(mp); 1733 if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) { 1734 mp->mnt_secondary_writes++; 1735 mp->mnt_secondary_accwrites++; 1736 MNT_IUNLOCK(mp); 1737 return (0); 1738 } 1739 if (flags & V_NOWAIT) { 1740 MNT_REL(mp); 1741 MNT_IUNLOCK(mp); 1742 return (EWOULDBLOCK); 1743 } 1744 /* 1745 * Wait for the suspension to finish. 1746 */ 1747 error = msleep(&mp->mnt_flag, MNT_MTX(mp), (PUSER - 1) | PDROP | 1748 ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? (flags & PCATCH) : 0), 1749 "suspfs", 0); 1750 vfs_rel(mp); 1751 if (error == 0) 1752 goto retry; 1753 return (error); 1754 } 1755 1756 /* 1757 * Filesystem write operation has completed. If we are suspending and this 1758 * operation is the last one, notify the suspender that the suspension is 1759 * now in effect. 1760 */ 1761 void 1762 vn_finished_write(struct mount *mp) 1763 { 1764 if (mp == NULL || !vn_suspendable(mp)) 1765 return; 1766 MNT_ILOCK(mp); 1767 MNT_REL(mp); 1768 mp->mnt_writeopcount--; 1769 if (mp->mnt_writeopcount < 0) 1770 panic("vn_finished_write: neg cnt"); 1771 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1772 mp->mnt_writeopcount <= 0) 1773 wakeup(&mp->mnt_writeopcount); 1774 MNT_IUNLOCK(mp); 1775 } 1776 1777 1778 /* 1779 * Filesystem secondary write operation has completed. If we are 1780 * suspending and this operation is the last one, notify the suspender 1781 * that the suspension is now in effect. 1782 */ 1783 void 1784 vn_finished_secondary_write(struct mount *mp) 1785 { 1786 if (mp == NULL || !vn_suspendable(mp)) 1787 return; 1788 MNT_ILOCK(mp); 1789 MNT_REL(mp); 1790 mp->mnt_secondary_writes--; 1791 if (mp->mnt_secondary_writes < 0) 1792 panic("vn_finished_secondary_write: neg cnt"); 1793 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1794 mp->mnt_secondary_writes <= 0) 1795 wakeup(&mp->mnt_secondary_writes); 1796 MNT_IUNLOCK(mp); 1797 } 1798 1799 1800 1801 /* 1802 * Request a filesystem to suspend write operations. 1803 */ 1804 int 1805 vfs_write_suspend(struct mount *mp, int flags) 1806 { 1807 int error; 1808 1809 MPASS(vn_suspendable(mp)); 1810 1811 MNT_ILOCK(mp); 1812 if (mp->mnt_susp_owner == curthread) { 1813 MNT_IUNLOCK(mp); 1814 return (EALREADY); 1815 } 1816 while (mp->mnt_kern_flag & MNTK_SUSPEND) 1817 msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0); 1818 1819 /* 1820 * Unmount holds a write reference on the mount point. If we 1821 * own busy reference and drain for writers, we deadlock with 1822 * the reference draining in the unmount path. Callers of 1823 * vfs_write_suspend() must specify VS_SKIP_UNMOUNT if 1824 * vfs_busy() reference is owned and caller is not in the 1825 * unmount context. 1826 */ 1827 if ((flags & VS_SKIP_UNMOUNT) != 0 && 1828 (mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { 1829 MNT_IUNLOCK(mp); 1830 return (EBUSY); 1831 } 1832 1833 mp->mnt_kern_flag |= MNTK_SUSPEND; 1834 mp->mnt_susp_owner = curthread; 1835 if (mp->mnt_writeopcount > 0) 1836 (void) msleep(&mp->mnt_writeopcount, 1837 MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0); 1838 else 1839 MNT_IUNLOCK(mp); 1840 if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0) 1841 vfs_write_resume(mp, 0); 1842 return (error); 1843 } 1844 1845 /* 1846 * Request a filesystem to resume write operations. 1847 */ 1848 void 1849 vfs_write_resume(struct mount *mp, int flags) 1850 { 1851 1852 MPASS(vn_suspendable(mp)); 1853 1854 MNT_ILOCK(mp); 1855 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1856 KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner")); 1857 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 | 1858 MNTK_SUSPENDED); 1859 mp->mnt_susp_owner = NULL; 1860 wakeup(&mp->mnt_writeopcount); 1861 wakeup(&mp->mnt_flag); 1862 curthread->td_pflags &= ~TDP_IGNSUSP; 1863 if ((flags & VR_START_WRITE) != 0) { 1864 MNT_REF(mp); 1865 mp->mnt_writeopcount++; 1866 } 1867 MNT_IUNLOCK(mp); 1868 if ((flags & VR_NO_SUSPCLR) == 0) 1869 VFS_SUSP_CLEAN(mp); 1870 } else if ((flags & VR_START_WRITE) != 0) { 1871 MNT_REF(mp); 1872 vn_start_write_locked(mp, 0); 1873 } else { 1874 MNT_IUNLOCK(mp); 1875 } 1876 } 1877 1878 /* 1879 * Helper loop around vfs_write_suspend() for filesystem unmount VFS 1880 * methods. 1881 */ 1882 int 1883 vfs_write_suspend_umnt(struct mount *mp) 1884 { 1885 int error; 1886 1887 MPASS(vn_suspendable(mp)); 1888 KASSERT((curthread->td_pflags & TDP_IGNSUSP) == 0, 1889 ("vfs_write_suspend_umnt: recursed")); 1890 1891 /* dounmount() already called vn_start_write(). */ 1892 for (;;) { 1893 vn_finished_write(mp); 1894 error = vfs_write_suspend(mp, 0); 1895 if (error != 0) { 1896 vn_start_write(NULL, &mp, V_WAIT); 1897 return (error); 1898 } 1899 MNT_ILOCK(mp); 1900 if ((mp->mnt_kern_flag & MNTK_SUSPENDED) != 0) 1901 break; 1902 MNT_IUNLOCK(mp); 1903 vn_start_write(NULL, &mp, V_WAIT); 1904 } 1905 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 1906 wakeup(&mp->mnt_flag); 1907 MNT_IUNLOCK(mp); 1908 curthread->td_pflags |= TDP_IGNSUSP; 1909 return (0); 1910 } 1911 1912 /* 1913 * Implement kqueues for files by translating it to vnode operation. 1914 */ 1915 static int 1916 vn_kqfilter(struct file *fp, struct knote *kn) 1917 { 1918 1919 return (VOP_KQFILTER(fp->f_vnode, kn)); 1920 } 1921 1922 /* 1923 * Simplified in-kernel wrapper calls for extended attribute access. 1924 * Both calls pass in a NULL credential, authorizing as "kernel" access. 1925 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 1926 */ 1927 int 1928 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 1929 const char *attrname, int *buflen, char *buf, struct thread *td) 1930 { 1931 struct uio auio; 1932 struct iovec iov; 1933 int error; 1934 1935 iov.iov_len = *buflen; 1936 iov.iov_base = buf; 1937 1938 auio.uio_iov = &iov; 1939 auio.uio_iovcnt = 1; 1940 auio.uio_rw = UIO_READ; 1941 auio.uio_segflg = UIO_SYSSPACE; 1942 auio.uio_td = td; 1943 auio.uio_offset = 0; 1944 auio.uio_resid = *buflen; 1945 1946 if ((ioflg & IO_NODELOCKED) == 0) 1947 vn_lock(vp, LK_SHARED | LK_RETRY); 1948 1949 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1950 1951 /* authorize attribute retrieval as kernel */ 1952 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 1953 td); 1954 1955 if ((ioflg & IO_NODELOCKED) == 0) 1956 VOP_UNLOCK(vp, 0); 1957 1958 if (error == 0) { 1959 *buflen = *buflen - auio.uio_resid; 1960 } 1961 1962 return (error); 1963 } 1964 1965 /* 1966 * XXX failure mode if partially written? 1967 */ 1968 int 1969 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 1970 const char *attrname, int buflen, char *buf, struct thread *td) 1971 { 1972 struct uio auio; 1973 struct iovec iov; 1974 struct mount *mp; 1975 int error; 1976 1977 iov.iov_len = buflen; 1978 iov.iov_base = buf; 1979 1980 auio.uio_iov = &iov; 1981 auio.uio_iovcnt = 1; 1982 auio.uio_rw = UIO_WRITE; 1983 auio.uio_segflg = UIO_SYSSPACE; 1984 auio.uio_td = td; 1985 auio.uio_offset = 0; 1986 auio.uio_resid = buflen; 1987 1988 if ((ioflg & IO_NODELOCKED) == 0) { 1989 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 1990 return (error); 1991 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1992 } 1993 1994 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 1995 1996 /* authorize attribute setting as kernel */ 1997 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 1998 1999 if ((ioflg & IO_NODELOCKED) == 0) { 2000 vn_finished_write(mp); 2001 VOP_UNLOCK(vp, 0); 2002 } 2003 2004 return (error); 2005 } 2006 2007 int 2008 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 2009 const char *attrname, struct thread *td) 2010 { 2011 struct mount *mp; 2012 int error; 2013 2014 if ((ioflg & IO_NODELOCKED) == 0) { 2015 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 2016 return (error); 2017 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2018 } 2019 2020 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 2021 2022 /* authorize attribute removal as kernel */ 2023 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td); 2024 if (error == EOPNOTSUPP) 2025 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, 2026 NULL, td); 2027 2028 if ((ioflg & IO_NODELOCKED) == 0) { 2029 vn_finished_write(mp); 2030 VOP_UNLOCK(vp, 0); 2031 } 2032 2033 return (error); 2034 } 2035 2036 static int 2037 vn_get_ino_alloc_vget(struct mount *mp, void *arg, int lkflags, 2038 struct vnode **rvp) 2039 { 2040 2041 return (VFS_VGET(mp, *(ino_t *)arg, lkflags, rvp)); 2042 } 2043 2044 int 2045 vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp) 2046 { 2047 2048 return (vn_vget_ino_gen(vp, vn_get_ino_alloc_vget, &ino, 2049 lkflags, rvp)); 2050 } 2051 2052 int 2053 vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg, 2054 int lkflags, struct vnode **rvp) 2055 { 2056 struct mount *mp; 2057 int ltype, error; 2058 2059 ASSERT_VOP_LOCKED(vp, "vn_vget_ino_get"); 2060 mp = vp->v_mount; 2061 ltype = VOP_ISLOCKED(vp); 2062 KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED, 2063 ("vn_vget_ino: vp not locked")); 2064 error = vfs_busy(mp, MBF_NOWAIT); 2065 if (error != 0) { 2066 vfs_ref(mp); 2067 VOP_UNLOCK(vp, 0); 2068 error = vfs_busy(mp, 0); 2069 vn_lock(vp, ltype | LK_RETRY); 2070 vfs_rel(mp); 2071 if (error != 0) 2072 return (ENOENT); 2073 if (vp->v_iflag & VI_DOOMED) { 2074 vfs_unbusy(mp); 2075 return (ENOENT); 2076 } 2077 } 2078 VOP_UNLOCK(vp, 0); 2079 error = alloc(mp, alloc_arg, lkflags, rvp); 2080 vfs_unbusy(mp); 2081 if (*rvp != vp) 2082 vn_lock(vp, ltype | LK_RETRY); 2083 if (vp->v_iflag & VI_DOOMED) { 2084 if (error == 0) { 2085 if (*rvp == vp) 2086 vunref(vp); 2087 else 2088 vput(*rvp); 2089 } 2090 error = ENOENT; 2091 } 2092 return (error); 2093 } 2094 2095 int 2096 vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio, 2097 struct thread *td) 2098 { 2099 2100 if (vp->v_type != VREG || td == NULL) 2101 return (0); 2102 if ((uoff_t)uio->uio_offset + uio->uio_resid > 2103 lim_cur(td, RLIMIT_FSIZE)) { 2104 PROC_LOCK(td->td_proc); 2105 kern_psignal(td->td_proc, SIGXFSZ); 2106 PROC_UNLOCK(td->td_proc); 2107 return (EFBIG); 2108 } 2109 return (0); 2110 } 2111 2112 int 2113 vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 2114 struct thread *td) 2115 { 2116 struct vnode *vp; 2117 2118 vp = fp->f_vnode; 2119 #ifdef AUDIT 2120 vn_lock(vp, LK_SHARED | LK_RETRY); 2121 AUDIT_ARG_VNODE1(vp); 2122 VOP_UNLOCK(vp, 0); 2123 #endif 2124 return (setfmode(td, active_cred, vp, mode)); 2125 } 2126 2127 int 2128 vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 2129 struct thread *td) 2130 { 2131 struct vnode *vp; 2132 2133 vp = fp->f_vnode; 2134 #ifdef AUDIT 2135 vn_lock(vp, LK_SHARED | LK_RETRY); 2136 AUDIT_ARG_VNODE1(vp); 2137 VOP_UNLOCK(vp, 0); 2138 #endif 2139 return (setfown(td, active_cred, vp, uid, gid)); 2140 } 2141 2142 void 2143 vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end) 2144 { 2145 vm_object_t object; 2146 2147 if ((object = vp->v_object) == NULL) 2148 return; 2149 VM_OBJECT_WLOCK(object); 2150 vm_object_page_remove(object, start, end, 0); 2151 VM_OBJECT_WUNLOCK(object); 2152 } 2153 2154 int 2155 vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred) 2156 { 2157 struct vattr va; 2158 daddr_t bn, bnp; 2159 uint64_t bsize; 2160 off_t noff; 2161 int error; 2162 2163 KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA, 2164 ("Wrong command %lu", cmd)); 2165 2166 if (vn_lock(vp, LK_SHARED) != 0) 2167 return (EBADF); 2168 if (vp->v_type != VREG) { 2169 error = ENOTTY; 2170 goto unlock; 2171 } 2172 error = VOP_GETATTR(vp, &va, cred); 2173 if (error != 0) 2174 goto unlock; 2175 noff = *off; 2176 if (noff >= va.va_size) { 2177 error = ENXIO; 2178 goto unlock; 2179 } 2180 bsize = vp->v_mount->mnt_stat.f_iosize; 2181 for (bn = noff / bsize; noff < va.va_size; bn++, noff += bsize - 2182 noff % bsize) { 2183 error = VOP_BMAP(vp, bn, NULL, &bnp, NULL, NULL); 2184 if (error == EOPNOTSUPP) { 2185 error = ENOTTY; 2186 goto unlock; 2187 } 2188 if ((bnp == -1 && cmd == FIOSEEKHOLE) || 2189 (bnp != -1 && cmd == FIOSEEKDATA)) { 2190 noff = bn * bsize; 2191 if (noff < *off) 2192 noff = *off; 2193 goto unlock; 2194 } 2195 } 2196 if (noff > va.va_size) 2197 noff = va.va_size; 2198 /* noff == va.va_size. There is an implicit hole at the end of file. */ 2199 if (cmd == FIOSEEKDATA) 2200 error = ENXIO; 2201 unlock: 2202 VOP_UNLOCK(vp, 0); 2203 if (error == 0) 2204 *off = noff; 2205 return (error); 2206 } 2207 2208 int 2209 vn_seek(struct file *fp, off_t offset, int whence, struct thread *td) 2210 { 2211 struct ucred *cred; 2212 struct vnode *vp; 2213 struct vattr vattr; 2214 off_t foffset, size; 2215 int error, noneg; 2216 2217 cred = td->td_ucred; 2218 vp = fp->f_vnode; 2219 foffset = foffset_lock(fp, 0); 2220 noneg = (vp->v_type != VCHR); 2221 error = 0; 2222 switch (whence) { 2223 case L_INCR: 2224 if (noneg && 2225 (foffset < 0 || 2226 (offset > 0 && foffset > OFF_MAX - offset))) { 2227 error = EOVERFLOW; 2228 break; 2229 } 2230 offset += foffset; 2231 break; 2232 case L_XTND: 2233 vn_lock(vp, LK_SHARED | LK_RETRY); 2234 error = VOP_GETATTR(vp, &vattr, cred); 2235 VOP_UNLOCK(vp, 0); 2236 if (error) 2237 break; 2238 2239 /* 2240 * If the file references a disk device, then fetch 2241 * the media size and use that to determine the ending 2242 * offset. 2243 */ 2244 if (vattr.va_size == 0 && vp->v_type == VCHR && 2245 fo_ioctl(fp, DIOCGMEDIASIZE, &size, cred, td) == 0) 2246 vattr.va_size = size; 2247 if (noneg && 2248 (vattr.va_size > OFF_MAX || 2249 (offset > 0 && vattr.va_size > OFF_MAX - offset))) { 2250 error = EOVERFLOW; 2251 break; 2252 } 2253 offset += vattr.va_size; 2254 break; 2255 case L_SET: 2256 break; 2257 case SEEK_DATA: 2258 error = fo_ioctl(fp, FIOSEEKDATA, &offset, cred, td); 2259 break; 2260 case SEEK_HOLE: 2261 error = fo_ioctl(fp, FIOSEEKHOLE, &offset, cred, td); 2262 break; 2263 default: 2264 error = EINVAL; 2265 } 2266 if (error == 0 && noneg && offset < 0) 2267 error = EINVAL; 2268 if (error != 0) 2269 goto drop; 2270 VFS_KNOTE_UNLOCKED(vp, 0); 2271 td->td_uretoff.tdu_off = offset; 2272 drop: 2273 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); 2274 return (error); 2275 } 2276 2277 int 2278 vn_utimes_perm(struct vnode *vp, struct vattr *vap, struct ucred *cred, 2279 struct thread *td) 2280 { 2281 int error; 2282 2283 /* 2284 * Grant permission if the caller is the owner of the file, or 2285 * the super-user, or has ACL_WRITE_ATTRIBUTES permission on 2286 * on the file. If the time pointer is null, then write 2287 * permission on the file is also sufficient. 2288 * 2289 * From NFSv4.1, draft 21, 6.2.1.3.1, Discussion of Mask Attributes: 2290 * A user having ACL_WRITE_DATA or ACL_WRITE_ATTRIBUTES 2291 * will be allowed to set the times [..] to the current 2292 * server time. 2293 */ 2294 error = VOP_ACCESSX(vp, VWRITE_ATTRIBUTES, cred, td); 2295 if (error != 0 && (vap->va_vaflags & VA_UTIMES_NULL) != 0) 2296 error = VOP_ACCESS(vp, VWRITE, cred, td); 2297 return (error); 2298 } 2299 2300 int 2301 vn_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 2302 { 2303 struct vnode *vp; 2304 int error; 2305 2306 if (fp->f_type == DTYPE_FIFO) 2307 kif->kf_type = KF_TYPE_FIFO; 2308 else 2309 kif->kf_type = KF_TYPE_VNODE; 2310 vp = fp->f_vnode; 2311 vref(vp); 2312 FILEDESC_SUNLOCK(fdp); 2313 error = vn_fill_kinfo_vnode(vp, kif); 2314 vrele(vp); 2315 FILEDESC_SLOCK(fdp); 2316 return (error); 2317 } 2318 2319 static inline void 2320 vn_fill_junk(struct kinfo_file *kif) 2321 { 2322 size_t len, olen; 2323 2324 /* 2325 * Simulate vn_fullpath returning changing values for a given 2326 * vp during e.g. coredump. 2327 */ 2328 len = (arc4random() % (sizeof(kif->kf_path) - 2)) + 1; 2329 olen = strlen(kif->kf_path); 2330 if (len < olen) 2331 strcpy(&kif->kf_path[len - 1], "$"); 2332 else 2333 for (; olen < len; olen++) 2334 strcpy(&kif->kf_path[olen], "A"); 2335 } 2336 2337 int 2338 vn_fill_kinfo_vnode(struct vnode *vp, struct kinfo_file *kif) 2339 { 2340 struct vattr va; 2341 char *fullpath, *freepath; 2342 int error; 2343 2344 kif->kf_un.kf_file.kf_file_type = vntype_to_kinfo(vp->v_type); 2345 freepath = NULL; 2346 fullpath = "-"; 2347 error = vn_fullpath(curthread, vp, &fullpath, &freepath); 2348 if (error == 0) { 2349 strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path)); 2350 } 2351 if (freepath != NULL) 2352 free(freepath, M_TEMP); 2353 2354 KFAIL_POINT_CODE(DEBUG_FP, fill_kinfo_vnode__random_path, 2355 vn_fill_junk(kif); 2356 ); 2357 2358 /* 2359 * Retrieve vnode attributes. 2360 */ 2361 va.va_fsid = VNOVAL; 2362 va.va_rdev = NODEV; 2363 vn_lock(vp, LK_SHARED | LK_RETRY); 2364 error = VOP_GETATTR(vp, &va, curthread->td_ucred); 2365 VOP_UNLOCK(vp, 0); 2366 if (error != 0) 2367 return (error); 2368 if (va.va_fsid != VNOVAL) 2369 kif->kf_un.kf_file.kf_file_fsid = va.va_fsid; 2370 else 2371 kif->kf_un.kf_file.kf_file_fsid = 2372 vp->v_mount->mnt_stat.f_fsid.val[0]; 2373 kif->kf_un.kf_file.kf_file_fsid_freebsd11 = 2374 kif->kf_un.kf_file.kf_file_fsid; /* truncate */ 2375 kif->kf_un.kf_file.kf_file_fileid = va.va_fileid; 2376 kif->kf_un.kf_file.kf_file_mode = MAKEIMODE(va.va_type, va.va_mode); 2377 kif->kf_un.kf_file.kf_file_size = va.va_size; 2378 kif->kf_un.kf_file.kf_file_rdev = va.va_rdev; 2379 kif->kf_un.kf_file.kf_file_rdev_freebsd11 = 2380 kif->kf_un.kf_file.kf_file_rdev; /* truncate */ 2381 return (0); 2382 } 2383 2384 int 2385 vn_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 2386 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 2387 struct thread *td) 2388 { 2389 #ifdef HWPMC_HOOKS 2390 struct pmckern_map_in pkm; 2391 #endif 2392 struct mount *mp; 2393 struct vnode *vp; 2394 vm_object_t object; 2395 vm_prot_t maxprot; 2396 boolean_t writecounted; 2397 int error; 2398 2399 #if defined(COMPAT_FREEBSD7) || defined(COMPAT_FREEBSD6) || \ 2400 defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) 2401 /* 2402 * POSIX shared-memory objects are defined to have 2403 * kernel persistence, and are not defined to support 2404 * read(2)/write(2) -- or even open(2). Thus, we can 2405 * use MAP_ASYNC to trade on-disk coherence for speed. 2406 * The shm_open(3) library routine turns on the FPOSIXSHM 2407 * flag to request this behavior. 2408 */ 2409 if ((fp->f_flag & FPOSIXSHM) != 0) 2410 flags |= MAP_NOSYNC; 2411 #endif 2412 vp = fp->f_vnode; 2413 2414 /* 2415 * Ensure that file and memory protections are 2416 * compatible. Note that we only worry about 2417 * writability if mapping is shared; in this case, 2418 * current and max prot are dictated by the open file. 2419 * XXX use the vnode instead? Problem is: what 2420 * credentials do we use for determination? What if 2421 * proc does a setuid? 2422 */ 2423 mp = vp->v_mount; 2424 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 2425 maxprot = VM_PROT_NONE; 2426 if ((prot & VM_PROT_EXECUTE) != 0) 2427 return (EACCES); 2428 } else 2429 maxprot = VM_PROT_EXECUTE; 2430 if ((fp->f_flag & FREAD) != 0) 2431 maxprot |= VM_PROT_READ; 2432 else if ((prot & VM_PROT_READ) != 0) 2433 return (EACCES); 2434 2435 /* 2436 * If we are sharing potential changes via MAP_SHARED and we 2437 * are trying to get write permission although we opened it 2438 * without asking for it, bail out. 2439 */ 2440 if ((flags & MAP_SHARED) != 0) { 2441 if ((fp->f_flag & FWRITE) != 0) 2442 maxprot |= VM_PROT_WRITE; 2443 else if ((prot & VM_PROT_WRITE) != 0) 2444 return (EACCES); 2445 } else { 2446 maxprot |= VM_PROT_WRITE; 2447 cap_maxprot |= VM_PROT_WRITE; 2448 } 2449 maxprot &= cap_maxprot; 2450 2451 /* 2452 * For regular files and shared memory, POSIX requires that 2453 * the value of foff be a legitimate offset within the data 2454 * object. In particular, negative offsets are invalid. 2455 * Blocking negative offsets and overflows here avoids 2456 * possible wraparound or user-level access into reserved 2457 * ranges of the data object later. In contrast, POSIX does 2458 * not dictate how offsets are used by device drivers, so in 2459 * the case of a device mapping a negative offset is passed 2460 * on. 2461 */ 2462 if ( 2463 #ifdef _LP64 2464 size > OFF_MAX || 2465 #endif 2466 foff < 0 || foff > OFF_MAX - size) 2467 return (EINVAL); 2468 2469 writecounted = FALSE; 2470 error = vm_mmap_vnode(td, size, prot, &maxprot, &flags, vp, 2471 &foff, &object, &writecounted); 2472 if (error != 0) 2473 return (error); 2474 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 2475 foff, writecounted, td); 2476 if (error != 0) { 2477 /* 2478 * If this mapping was accounted for in the vnode's 2479 * writecount, then undo that now. 2480 */ 2481 if (writecounted) 2482 vnode_pager_release_writecount(object, 0, size); 2483 vm_object_deallocate(object); 2484 } 2485 #ifdef HWPMC_HOOKS 2486 /* Inform hwpmc(4) if an executable is being mapped. */ 2487 if (PMC_HOOK_INSTALLED(PMC_FN_MMAP)) { 2488 if ((prot & VM_PROT_EXECUTE) != 0 && error == 0) { 2489 pkm.pm_file = vp; 2490 pkm.pm_address = (uintptr_t) *addr; 2491 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_MMAP, (void *) &pkm); 2492 } 2493 } 2494 #endif 2495 return (error); 2496 } 2497 2498 void 2499 vn_fsid(struct vnode *vp, struct vattr *va) 2500 { 2501 fsid_t *f; 2502 2503 f = &vp->v_mount->mnt_stat.f_fsid; 2504 va->va_fsid = (uint32_t)f->val[1]; 2505 va->va_fsid <<= sizeof(f->val[1]) * NBBY; 2506 va->va_fsid += (uint32_t)f->val[0]; 2507 } 2508 2509 int 2510 vn_fsync_buf(struct vnode *vp, int waitfor) 2511 { 2512 struct buf *bp, *nbp; 2513 struct bufobj *bo; 2514 struct mount *mp; 2515 int error, maxretry; 2516 2517 error = 0; 2518 maxretry = 10000; /* large, arbitrarily chosen */ 2519 mp = NULL; 2520 if (vp->v_type == VCHR) { 2521 VI_LOCK(vp); 2522 mp = vp->v_rdev->si_mountpt; 2523 VI_UNLOCK(vp); 2524 } 2525 bo = &vp->v_bufobj; 2526 BO_LOCK(bo); 2527 loop1: 2528 /* 2529 * MARK/SCAN initialization to avoid infinite loops. 2530 */ 2531 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { 2532 bp->b_vflags &= ~BV_SCANNED; 2533 bp->b_error = 0; 2534 } 2535 2536 /* 2537 * Flush all dirty buffers associated with a vnode. 2538 */ 2539 loop2: 2540 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2541 if ((bp->b_vflags & BV_SCANNED) != 0) 2542 continue; 2543 bp->b_vflags |= BV_SCANNED; 2544 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { 2545 if (waitfor != MNT_WAIT) 2546 continue; 2547 if (BUF_LOCK(bp, 2548 LK_EXCLUSIVE | LK_INTERLOCK | LK_SLEEPFAIL, 2549 BO_LOCKPTR(bo)) != 0) { 2550 BO_LOCK(bo); 2551 goto loop1; 2552 } 2553 BO_LOCK(bo); 2554 } 2555 BO_UNLOCK(bo); 2556 KASSERT(bp->b_bufobj == bo, 2557 ("bp %p wrong b_bufobj %p should be %p", 2558 bp, bp->b_bufobj, bo)); 2559 if ((bp->b_flags & B_DELWRI) == 0) 2560 panic("fsync: not dirty"); 2561 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) { 2562 vfs_bio_awrite(bp); 2563 } else { 2564 bremfree(bp); 2565 bawrite(bp); 2566 } 2567 if (maxretry < 1000) 2568 pause("dirty", hz < 1000 ? 1 : hz / 1000); 2569 BO_LOCK(bo); 2570 goto loop2; 2571 } 2572 2573 /* 2574 * If synchronous the caller expects us to completely resolve all 2575 * dirty buffers in the system. Wait for in-progress I/O to 2576 * complete (which could include background bitmap writes), then 2577 * retry if dirty blocks still exist. 2578 */ 2579 if (waitfor == MNT_WAIT) { 2580 bufobj_wwait(bo, 0, 0); 2581 if (bo->bo_dirty.bv_cnt > 0) { 2582 /* 2583 * If we are unable to write any of these buffers 2584 * then we fail now rather than trying endlessly 2585 * to write them out. 2586 */ 2587 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 2588 if ((error = bp->b_error) != 0) 2589 break; 2590 if ((mp != NULL && mp->mnt_secondary_writes > 0) || 2591 (error == 0 && --maxretry >= 0)) 2592 goto loop1; 2593 if (error == 0) 2594 error = EAGAIN; 2595 } 2596 } 2597 BO_UNLOCK(bo); 2598 if (error != 0) 2599 vn_printf(vp, "fsync: giving up on dirty (error = %d) ", error); 2600 2601 return (error); 2602 } 2603