1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org> 13 * Copyright (c) 2013, 2014 The FreeBSD Foundation 14 * 15 * Portions of this software were developed by Konstantin Belousov 16 * under sponsorship from the FreeBSD Foundation. 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions 20 * are met: 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 3. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_hwpmc_hooks.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/disk.h> 53 #include <sys/fail.h> 54 #include <sys/fcntl.h> 55 #include <sys/file.h> 56 #include <sys/kdb.h> 57 #include <sys/ktr.h> 58 #include <sys/stat.h> 59 #include <sys/priv.h> 60 #include <sys/proc.h> 61 #include <sys/limits.h> 62 #include <sys/lock.h> 63 #include <sys/mman.h> 64 #include <sys/mount.h> 65 #include <sys/mutex.h> 66 #include <sys/namei.h> 67 #include <sys/vnode.h> 68 #include <sys/bio.h> 69 #include <sys/buf.h> 70 #include <sys/filio.h> 71 #include <sys/resourcevar.h> 72 #include <sys/rwlock.h> 73 #include <sys/sx.h> 74 #include <sys/sleepqueue.h> 75 #include <sys/sysctl.h> 76 #include <sys/ttycom.h> 77 #include <sys/conf.h> 78 #include <sys/syslog.h> 79 #include <sys/unistd.h> 80 #include <sys/user.h> 81 82 #include <security/audit/audit.h> 83 #include <security/mac/mac_framework.h> 84 85 #include <vm/vm.h> 86 #include <vm/vm_extern.h> 87 #include <vm/pmap.h> 88 #include <vm/vm_map.h> 89 #include <vm/vm_object.h> 90 #include <vm/vm_page.h> 91 #include <vm/vm_pager.h> 92 93 #ifdef HWPMC_HOOKS 94 #include <sys/pmckern.h> 95 #endif 96 97 static fo_rdwr_t vn_read; 98 static fo_rdwr_t vn_write; 99 static fo_rdwr_t vn_io_fault; 100 static fo_truncate_t vn_truncate; 101 static fo_ioctl_t vn_ioctl; 102 static fo_poll_t vn_poll; 103 static fo_kqfilter_t vn_kqfilter; 104 static fo_stat_t vn_statfile; 105 static fo_close_t vn_closefile; 106 static fo_mmap_t vn_mmap; 107 static fo_fallocate_t vn_fallocate; 108 109 struct fileops vnops = { 110 .fo_read = vn_io_fault, 111 .fo_write = vn_io_fault, 112 .fo_truncate = vn_truncate, 113 .fo_ioctl = vn_ioctl, 114 .fo_poll = vn_poll, 115 .fo_kqfilter = vn_kqfilter, 116 .fo_stat = vn_statfile, 117 .fo_close = vn_closefile, 118 .fo_chmod = vn_chmod, 119 .fo_chown = vn_chown, 120 .fo_sendfile = vn_sendfile, 121 .fo_seek = vn_seek, 122 .fo_fill_kinfo = vn_fill_kinfo, 123 .fo_mmap = vn_mmap, 124 .fo_fallocate = vn_fallocate, 125 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE 126 }; 127 128 const u_int io_hold_cnt = 16; 129 static int vn_io_fault_enable = 1; 130 SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_enable, CTLFLAG_RWTUN, 131 &vn_io_fault_enable, 0, "Enable vn_io_fault lock avoidance"); 132 static int vn_io_fault_prefault = 0; 133 SYSCTL_INT(_debug, OID_AUTO, vn_io_fault_prefault, CTLFLAG_RWTUN, 134 &vn_io_fault_prefault, 0, "Enable vn_io_fault prefaulting"); 135 static int vn_io_pgcache_read_enable = 1; 136 SYSCTL_INT(_debug, OID_AUTO, vn_io_pgcache_read_enable, CTLFLAG_RWTUN, 137 &vn_io_pgcache_read_enable, 0, 138 "Enable copying from page cache for reads, avoiding fs"); 139 static u_long vn_io_faults_cnt; 140 SYSCTL_ULONG(_debug, OID_AUTO, vn_io_faults, CTLFLAG_RD, 141 &vn_io_faults_cnt, 0, "Count of vn_io_fault lock avoidance triggers"); 142 143 static int vfs_allow_read_dir = 0; 144 SYSCTL_INT(_security_bsd, OID_AUTO, allow_read_dir, CTLFLAG_RW, 145 &vfs_allow_read_dir, 0, 146 "Enable read(2) of directory by root for filesystems that support it"); 147 148 /* 149 * Returns true if vn_io_fault mode of handling the i/o request should 150 * be used. 151 */ 152 static bool 153 do_vn_io_fault(struct vnode *vp, struct uio *uio) 154 { 155 struct mount *mp; 156 157 return (uio->uio_segflg == UIO_USERSPACE && vp->v_type == VREG && 158 (mp = vp->v_mount) != NULL && 159 (mp->mnt_kern_flag & MNTK_NO_IOPF) != 0 && vn_io_fault_enable); 160 } 161 162 /* 163 * Structure used to pass arguments to vn_io_fault1(), to do either 164 * file- or vnode-based I/O calls. 165 */ 166 struct vn_io_fault_args { 167 enum { 168 VN_IO_FAULT_FOP, 169 VN_IO_FAULT_VOP 170 } kind; 171 struct ucred *cred; 172 int flags; 173 union { 174 struct fop_args_tag { 175 struct file *fp; 176 fo_rdwr_t *doio; 177 } fop_args; 178 struct vop_args_tag { 179 struct vnode *vp; 180 } vop_args; 181 } args; 182 }; 183 184 static int vn_io_fault1(struct vnode *vp, struct uio *uio, 185 struct vn_io_fault_args *args, struct thread *td); 186 187 int 188 vn_open(struct nameidata *ndp, int *flagp, int cmode, struct file *fp) 189 { 190 struct thread *td = ndp->ni_cnd.cn_thread; 191 192 return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp)); 193 } 194 195 static uint64_t 196 open2nameif(int fmode, u_int vn_open_flags) 197 { 198 uint64_t res; 199 200 res = ISOPEN | LOCKLEAF; 201 if ((fmode & O_BENEATH) != 0) 202 res |= BENEATH; 203 if ((fmode & O_RESOLVE_BENEATH) != 0) 204 res |= RBENEATH; 205 if ((vn_open_flags & VN_OPEN_NOAUDIT) == 0) 206 res |= AUDITVNODE1; 207 if ((vn_open_flags & VN_OPEN_NOCAPCHECK) != 0) 208 res |= NOCAPCHECK; 209 return (res); 210 } 211 212 /* 213 * Common code for vnode open operations via a name lookup. 214 * Lookup the vnode and invoke VOP_CREATE if needed. 215 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. 216 * 217 * Note that this does NOT free nameidata for the successful case, 218 * due to the NDINIT being done elsewhere. 219 */ 220 int 221 vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags, 222 struct ucred *cred, struct file *fp) 223 { 224 struct vnode *vp; 225 struct mount *mp; 226 struct thread *td = ndp->ni_cnd.cn_thread; 227 struct vattr vat; 228 struct vattr *vap = &vat; 229 int fmode, error; 230 231 restart: 232 fmode = *flagp; 233 if ((fmode & (O_CREAT | O_EXCL | O_DIRECTORY)) == (O_CREAT | 234 O_EXCL | O_DIRECTORY)) 235 return (EINVAL); 236 else if ((fmode & (O_CREAT | O_DIRECTORY)) == O_CREAT) { 237 ndp->ni_cnd.cn_nameiop = CREATE; 238 ndp->ni_cnd.cn_flags = open2nameif(fmode, vn_open_flags); 239 /* 240 * Set NOCACHE to avoid flushing the cache when 241 * rolling in many files at once. 242 */ 243 ndp->ni_cnd.cn_flags |= LOCKPARENT | NOCACHE; 244 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) 245 ndp->ni_cnd.cn_flags |= FOLLOW; 246 if ((vn_open_flags & VN_OPEN_INVFS) == 0) 247 bwillwrite(); 248 if ((error = namei(ndp)) != 0) 249 return (error); 250 if (ndp->ni_vp == NULL) { 251 VATTR_NULL(vap); 252 vap->va_type = VREG; 253 vap->va_mode = cmode; 254 if (fmode & O_EXCL) 255 vap->va_vaflags |= VA_EXCLUSIVE; 256 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { 257 NDFREE(ndp, NDF_ONLY_PNBUF); 258 vput(ndp->ni_dvp); 259 if ((error = vn_start_write(NULL, &mp, 260 V_XSLEEP | PCATCH)) != 0) 261 return (error); 262 NDREINIT(ndp); 263 goto restart; 264 } 265 if ((vn_open_flags & VN_OPEN_NAMECACHE) != 0) 266 ndp->ni_cnd.cn_flags |= MAKEENTRY; 267 #ifdef MAC 268 error = mac_vnode_check_create(cred, ndp->ni_dvp, 269 &ndp->ni_cnd, vap); 270 if (error == 0) 271 #endif 272 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, 273 &ndp->ni_cnd, vap); 274 vput(ndp->ni_dvp); 275 vn_finished_write(mp); 276 if (error) { 277 NDFREE(ndp, NDF_ONLY_PNBUF); 278 return (error); 279 } 280 fmode &= ~O_TRUNC; 281 vp = ndp->ni_vp; 282 } else { 283 if (ndp->ni_dvp == ndp->ni_vp) 284 vrele(ndp->ni_dvp); 285 else 286 vput(ndp->ni_dvp); 287 ndp->ni_dvp = NULL; 288 vp = ndp->ni_vp; 289 if (fmode & O_EXCL) { 290 error = EEXIST; 291 goto bad; 292 } 293 if (vp->v_type == VDIR) { 294 error = EISDIR; 295 goto bad; 296 } 297 fmode &= ~O_CREAT; 298 } 299 } else { 300 ndp->ni_cnd.cn_nameiop = LOOKUP; 301 ndp->ni_cnd.cn_flags = open2nameif(fmode, vn_open_flags); 302 ndp->ni_cnd.cn_flags |= (fmode & O_NOFOLLOW) != 0 ? NOFOLLOW : 303 FOLLOW; 304 if ((fmode & FWRITE) == 0) 305 ndp->ni_cnd.cn_flags |= LOCKSHARED; 306 if ((error = namei(ndp)) != 0) 307 return (error); 308 vp = ndp->ni_vp; 309 } 310 error = vn_open_vnode(vp, fmode, cred, td, fp); 311 if (error) 312 goto bad; 313 *flagp = fmode; 314 return (0); 315 bad: 316 NDFREE(ndp, NDF_ONLY_PNBUF); 317 vput(vp); 318 *flagp = fmode; 319 ndp->ni_vp = NULL; 320 return (error); 321 } 322 323 static int 324 vn_open_vnode_advlock(struct vnode *vp, int fmode, struct file *fp) 325 { 326 struct flock lf; 327 int error, lock_flags, type; 328 329 ASSERT_VOP_LOCKED(vp, "vn_open_vnode_advlock"); 330 if ((fmode & (O_EXLOCK | O_SHLOCK)) == 0) 331 return (0); 332 KASSERT(fp != NULL, ("open with flock requires fp")); 333 if (fp->f_type != DTYPE_NONE && fp->f_type != DTYPE_VNODE) 334 return (EOPNOTSUPP); 335 336 lock_flags = VOP_ISLOCKED(vp); 337 VOP_UNLOCK(vp); 338 339 lf.l_whence = SEEK_SET; 340 lf.l_start = 0; 341 lf.l_len = 0; 342 lf.l_type = (fmode & O_EXLOCK) != 0 ? F_WRLCK : F_RDLCK; 343 type = F_FLOCK; 344 if ((fmode & FNONBLOCK) == 0) 345 type |= F_WAIT; 346 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type); 347 if (error == 0) 348 fp->f_flag |= FHASLOCK; 349 350 vn_lock(vp, lock_flags | LK_RETRY); 351 if (error == 0 && VN_IS_DOOMED(vp)) 352 error = ENOENT; 353 return (error); 354 } 355 356 /* 357 * Common code for vnode open operations once a vnode is located. 358 * Check permissions, and call the VOP_OPEN routine. 359 */ 360 int 361 vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred, 362 struct thread *td, struct file *fp) 363 { 364 accmode_t accmode; 365 int error; 366 367 if (vp->v_type == VLNK) 368 return (EMLINK); 369 if (vp->v_type == VSOCK) 370 return (EOPNOTSUPP); 371 if (vp->v_type != VDIR && fmode & O_DIRECTORY) 372 return (ENOTDIR); 373 accmode = 0; 374 if (fmode & (FWRITE | O_TRUNC)) { 375 if (vp->v_type == VDIR) 376 return (EISDIR); 377 accmode |= VWRITE; 378 } 379 if (fmode & FREAD) 380 accmode |= VREAD; 381 if (fmode & FEXEC) 382 accmode |= VEXEC; 383 if ((fmode & O_APPEND) && (fmode & FWRITE)) 384 accmode |= VAPPEND; 385 #ifdef MAC 386 if (fmode & O_CREAT) 387 accmode |= VCREAT; 388 if (fmode & O_VERIFY) 389 accmode |= VVERIFY; 390 error = mac_vnode_check_open(cred, vp, accmode); 391 if (error) 392 return (error); 393 394 accmode &= ~(VCREAT | VVERIFY); 395 #endif 396 if ((fmode & O_CREAT) == 0 && accmode != 0) { 397 error = VOP_ACCESS(vp, accmode, cred, td); 398 if (error != 0) 399 return (error); 400 } 401 if (vp->v_type == VFIFO && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 402 vn_lock(vp, LK_UPGRADE | LK_RETRY); 403 error = VOP_OPEN(vp, fmode, cred, td, fp); 404 if (error != 0) 405 return (error); 406 407 error = vn_open_vnode_advlock(vp, fmode, fp); 408 if (error == 0 && (fmode & FWRITE) != 0) { 409 error = VOP_ADD_WRITECOUNT(vp, 1); 410 if (error == 0) { 411 CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", 412 __func__, vp, vp->v_writecount); 413 } 414 } 415 416 /* 417 * Error from advlock or VOP_ADD_WRITECOUNT() still requires 418 * calling VOP_CLOSE() to pair with earlier VOP_OPEN(). 419 * Arrange for that by having fdrop() to use vn_closefile(). 420 */ 421 if (error != 0) { 422 fp->f_flag |= FOPENFAILED; 423 fp->f_vnode = vp; 424 if (fp->f_ops == &badfileops) { 425 fp->f_type = DTYPE_VNODE; 426 fp->f_ops = &vnops; 427 } 428 vref(vp); 429 } 430 431 ASSERT_VOP_LOCKED(vp, "vn_open_vnode"); 432 return (error); 433 434 } 435 436 /* 437 * Check for write permissions on the specified vnode. 438 * Prototype text segments cannot be written. 439 * It is racy. 440 */ 441 int 442 vn_writechk(struct vnode *vp) 443 { 444 445 ASSERT_VOP_LOCKED(vp, "vn_writechk"); 446 /* 447 * If there's shared text associated with 448 * the vnode, try to free it up once. If 449 * we fail, we can't allow writing. 450 */ 451 if (VOP_IS_TEXT(vp)) 452 return (ETXTBSY); 453 454 return (0); 455 } 456 457 /* 458 * Vnode close call 459 */ 460 static int 461 vn_close1(struct vnode *vp, int flags, struct ucred *file_cred, 462 struct thread *td, bool keep_ref) 463 { 464 struct mount *mp; 465 int error, lock_flags; 466 467 if (vp->v_type != VFIFO && (flags & FWRITE) == 0 && 468 MNT_EXTENDED_SHARED(vp->v_mount)) 469 lock_flags = LK_SHARED; 470 else 471 lock_flags = LK_EXCLUSIVE; 472 473 vn_start_write(vp, &mp, V_WAIT); 474 vn_lock(vp, lock_flags | LK_RETRY); 475 AUDIT_ARG_VNODE1(vp); 476 if ((flags & (FWRITE | FOPENFAILED)) == FWRITE) { 477 VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 478 CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 479 __func__, vp, vp->v_writecount); 480 } 481 error = VOP_CLOSE(vp, flags, file_cred, td); 482 if (keep_ref) 483 VOP_UNLOCK(vp); 484 else 485 vput(vp); 486 vn_finished_write(mp); 487 return (error); 488 } 489 490 int 491 vn_close(struct vnode *vp, int flags, struct ucred *file_cred, 492 struct thread *td) 493 { 494 495 return (vn_close1(vp, flags, file_cred, td, false)); 496 } 497 498 /* 499 * Heuristic to detect sequential operation. 500 */ 501 static int 502 sequential_heuristic(struct uio *uio, struct file *fp) 503 { 504 enum uio_rw rw; 505 506 ASSERT_VOP_LOCKED(fp->f_vnode, __func__); 507 508 rw = uio->uio_rw; 509 if (fp->f_flag & FRDAHEAD) 510 return (fp->f_seqcount[rw] << IO_SEQSHIFT); 511 512 /* 513 * Offset 0 is handled specially. open() sets f_seqcount to 1 so 514 * that the first I/O is normally considered to be slightly 515 * sequential. Seeking to offset 0 doesn't change sequentiality 516 * unless previous seeks have reduced f_seqcount to 0, in which 517 * case offset 0 is not special. 518 */ 519 if ((uio->uio_offset == 0 && fp->f_seqcount[rw] > 0) || 520 uio->uio_offset == fp->f_nextoff[rw]) { 521 /* 522 * f_seqcount is in units of fixed-size blocks so that it 523 * depends mainly on the amount of sequential I/O and not 524 * much on the number of sequential I/O's. The fixed size 525 * of 16384 is hard-coded here since it is (not quite) just 526 * a magic size that works well here. This size is more 527 * closely related to the best I/O size for real disks than 528 * to any block size used by software. 529 */ 530 if (uio->uio_resid >= IO_SEQMAX * 16384) 531 fp->f_seqcount[rw] = IO_SEQMAX; 532 else { 533 fp->f_seqcount[rw] += howmany(uio->uio_resid, 16384); 534 if (fp->f_seqcount[rw] > IO_SEQMAX) 535 fp->f_seqcount[rw] = IO_SEQMAX; 536 } 537 return (fp->f_seqcount[rw] << IO_SEQSHIFT); 538 } 539 540 /* Not sequential. Quickly draw-down sequentiality. */ 541 if (fp->f_seqcount[rw] > 1) 542 fp->f_seqcount[rw] = 1; 543 else 544 fp->f_seqcount[rw] = 0; 545 return (0); 546 } 547 548 /* 549 * Package up an I/O request on a vnode into a uio and do it. 550 */ 551 int 552 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset, 553 enum uio_seg segflg, int ioflg, struct ucred *active_cred, 554 struct ucred *file_cred, ssize_t *aresid, struct thread *td) 555 { 556 struct uio auio; 557 struct iovec aiov; 558 struct mount *mp; 559 struct ucred *cred; 560 void *rl_cookie; 561 struct vn_io_fault_args args; 562 int error, lock_flags; 563 564 if (offset < 0 && vp->v_type != VCHR) 565 return (EINVAL); 566 auio.uio_iov = &aiov; 567 auio.uio_iovcnt = 1; 568 aiov.iov_base = base; 569 aiov.iov_len = len; 570 auio.uio_resid = len; 571 auio.uio_offset = offset; 572 auio.uio_segflg = segflg; 573 auio.uio_rw = rw; 574 auio.uio_td = td; 575 error = 0; 576 577 if ((ioflg & IO_NODELOCKED) == 0) { 578 if ((ioflg & IO_RANGELOCKED) == 0) { 579 if (rw == UIO_READ) { 580 rl_cookie = vn_rangelock_rlock(vp, offset, 581 offset + len); 582 } else { 583 rl_cookie = vn_rangelock_wlock(vp, offset, 584 offset + len); 585 } 586 } else 587 rl_cookie = NULL; 588 mp = NULL; 589 if (rw == UIO_WRITE) { 590 if (vp->v_type != VCHR && 591 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) 592 != 0) 593 goto out; 594 if (MNT_SHARED_WRITES(mp) || 595 ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) 596 lock_flags = LK_SHARED; 597 else 598 lock_flags = LK_EXCLUSIVE; 599 } else 600 lock_flags = LK_SHARED; 601 vn_lock(vp, lock_flags | LK_RETRY); 602 } else 603 rl_cookie = NULL; 604 605 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 606 #ifdef MAC 607 if ((ioflg & IO_NOMACCHECK) == 0) { 608 if (rw == UIO_READ) 609 error = mac_vnode_check_read(active_cred, file_cred, 610 vp); 611 else 612 error = mac_vnode_check_write(active_cred, file_cred, 613 vp); 614 } 615 #endif 616 if (error == 0) { 617 if (file_cred != NULL) 618 cred = file_cred; 619 else 620 cred = active_cred; 621 if (do_vn_io_fault(vp, &auio)) { 622 args.kind = VN_IO_FAULT_VOP; 623 args.cred = cred; 624 args.flags = ioflg; 625 args.args.vop_args.vp = vp; 626 error = vn_io_fault1(vp, &auio, &args, td); 627 } else if (rw == UIO_READ) { 628 error = VOP_READ(vp, &auio, ioflg, cred); 629 } else /* if (rw == UIO_WRITE) */ { 630 error = VOP_WRITE(vp, &auio, ioflg, cred); 631 } 632 } 633 if (aresid) 634 *aresid = auio.uio_resid; 635 else 636 if (auio.uio_resid && error == 0) 637 error = EIO; 638 if ((ioflg & IO_NODELOCKED) == 0) { 639 VOP_UNLOCK(vp); 640 if (mp != NULL) 641 vn_finished_write(mp); 642 } 643 out: 644 if (rl_cookie != NULL) 645 vn_rangelock_unlock(vp, rl_cookie); 646 return (error); 647 } 648 649 /* 650 * Package up an I/O request on a vnode into a uio and do it. The I/O 651 * request is split up into smaller chunks and we try to avoid saturating 652 * the buffer cache while potentially holding a vnode locked, so we 653 * check bwillwrite() before calling vn_rdwr(). We also call kern_yield() 654 * to give other processes a chance to lock the vnode (either other processes 655 * core'ing the same binary, or unrelated processes scanning the directory). 656 */ 657 int 658 vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, void *base, size_t len, 659 off_t offset, enum uio_seg segflg, int ioflg, struct ucred *active_cred, 660 struct ucred *file_cred, size_t *aresid, struct thread *td) 661 { 662 int error = 0; 663 ssize_t iaresid; 664 665 do { 666 int chunk; 667 668 /* 669 * Force `offset' to a multiple of MAXBSIZE except possibly 670 * for the first chunk, so that filesystems only need to 671 * write full blocks except possibly for the first and last 672 * chunks. 673 */ 674 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; 675 676 if (chunk > len) 677 chunk = len; 678 if (rw != UIO_READ && vp->v_type == VREG) 679 bwillwrite(); 680 iaresid = 0; 681 error = vn_rdwr(rw, vp, base, chunk, offset, segflg, 682 ioflg, active_cred, file_cred, &iaresid, td); 683 len -= chunk; /* aresid calc already includes length */ 684 if (error) 685 break; 686 offset += chunk; 687 base = (char *)base + chunk; 688 kern_yield(PRI_USER); 689 } while (len); 690 if (aresid) 691 *aresid = len + iaresid; 692 return (error); 693 } 694 695 #if OFF_MAX <= LONG_MAX 696 off_t 697 foffset_lock(struct file *fp, int flags) 698 { 699 volatile short *flagsp; 700 off_t res; 701 short state; 702 703 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 704 705 if ((flags & FOF_NOLOCK) != 0) 706 return (atomic_load_long(&fp->f_offset)); 707 708 /* 709 * According to McKusick the vn lock was protecting f_offset here. 710 * It is now protected by the FOFFSET_LOCKED flag. 711 */ 712 flagsp = &fp->f_vnread_flags; 713 if (atomic_cmpset_acq_16(flagsp, 0, FOFFSET_LOCKED)) 714 return (atomic_load_long(&fp->f_offset)); 715 716 sleepq_lock(&fp->f_vnread_flags); 717 state = atomic_load_16(flagsp); 718 for (;;) { 719 if ((state & FOFFSET_LOCKED) == 0) { 720 if (!atomic_fcmpset_acq_16(flagsp, &state, 721 FOFFSET_LOCKED)) 722 continue; 723 break; 724 } 725 if ((state & FOFFSET_LOCK_WAITING) == 0) { 726 if (!atomic_fcmpset_acq_16(flagsp, &state, 727 state | FOFFSET_LOCK_WAITING)) 728 continue; 729 } 730 DROP_GIANT(); 731 sleepq_add(&fp->f_vnread_flags, NULL, "vofflock", 0, 0); 732 sleepq_wait(&fp->f_vnread_flags, PUSER -1); 733 PICKUP_GIANT(); 734 sleepq_lock(&fp->f_vnread_flags); 735 state = atomic_load_16(flagsp); 736 } 737 res = atomic_load_long(&fp->f_offset); 738 sleepq_release(&fp->f_vnread_flags); 739 return (res); 740 } 741 742 void 743 foffset_unlock(struct file *fp, off_t val, int flags) 744 { 745 volatile short *flagsp; 746 short state; 747 748 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 749 750 if ((flags & FOF_NOUPDATE) == 0) 751 atomic_store_long(&fp->f_offset, val); 752 if ((flags & FOF_NEXTOFF_R) != 0) 753 fp->f_nextoff[UIO_READ] = val; 754 if ((flags & FOF_NEXTOFF_W) != 0) 755 fp->f_nextoff[UIO_WRITE] = val; 756 757 if ((flags & FOF_NOLOCK) != 0) 758 return; 759 760 flagsp = &fp->f_vnread_flags; 761 state = atomic_load_16(flagsp); 762 if ((state & FOFFSET_LOCK_WAITING) == 0 && 763 atomic_cmpset_rel_16(flagsp, state, 0)) 764 return; 765 766 sleepq_lock(&fp->f_vnread_flags); 767 MPASS((fp->f_vnread_flags & FOFFSET_LOCKED) != 0); 768 MPASS((fp->f_vnread_flags & FOFFSET_LOCK_WAITING) != 0); 769 fp->f_vnread_flags = 0; 770 sleepq_broadcast(&fp->f_vnread_flags, SLEEPQ_SLEEP, 0, 0); 771 sleepq_release(&fp->f_vnread_flags); 772 } 773 #else 774 off_t 775 foffset_lock(struct file *fp, int flags) 776 { 777 struct mtx *mtxp; 778 off_t res; 779 780 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 781 782 mtxp = mtx_pool_find(mtxpool_sleep, fp); 783 mtx_lock(mtxp); 784 if ((flags & FOF_NOLOCK) == 0) { 785 while (fp->f_vnread_flags & FOFFSET_LOCKED) { 786 fp->f_vnread_flags |= FOFFSET_LOCK_WAITING; 787 msleep(&fp->f_vnread_flags, mtxp, PUSER -1, 788 "vofflock", 0); 789 } 790 fp->f_vnread_flags |= FOFFSET_LOCKED; 791 } 792 res = fp->f_offset; 793 mtx_unlock(mtxp); 794 return (res); 795 } 796 797 void 798 foffset_unlock(struct file *fp, off_t val, int flags) 799 { 800 struct mtx *mtxp; 801 802 KASSERT((flags & FOF_OFFSET) == 0, ("FOF_OFFSET passed")); 803 804 mtxp = mtx_pool_find(mtxpool_sleep, fp); 805 mtx_lock(mtxp); 806 if ((flags & FOF_NOUPDATE) == 0) 807 fp->f_offset = val; 808 if ((flags & FOF_NEXTOFF_R) != 0) 809 fp->f_nextoff[UIO_READ] = val; 810 if ((flags & FOF_NEXTOFF_W) != 0) 811 fp->f_nextoff[UIO_WRITE] = val; 812 if ((flags & FOF_NOLOCK) == 0) { 813 KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0, 814 ("Lost FOFFSET_LOCKED")); 815 if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING) 816 wakeup(&fp->f_vnread_flags); 817 fp->f_vnread_flags = 0; 818 } 819 mtx_unlock(mtxp); 820 } 821 #endif 822 823 void 824 foffset_lock_uio(struct file *fp, struct uio *uio, int flags) 825 { 826 827 if ((flags & FOF_OFFSET) == 0) 828 uio->uio_offset = foffset_lock(fp, flags); 829 } 830 831 void 832 foffset_unlock_uio(struct file *fp, struct uio *uio, int flags) 833 { 834 835 if ((flags & FOF_OFFSET) == 0) 836 foffset_unlock(fp, uio->uio_offset, flags); 837 } 838 839 static int 840 get_advice(struct file *fp, struct uio *uio) 841 { 842 struct mtx *mtxp; 843 int ret; 844 845 ret = POSIX_FADV_NORMAL; 846 if (fp->f_advice == NULL || fp->f_vnode->v_type != VREG) 847 return (ret); 848 849 mtxp = mtx_pool_find(mtxpool_sleep, fp); 850 mtx_lock(mtxp); 851 if (fp->f_advice != NULL && 852 uio->uio_offset >= fp->f_advice->fa_start && 853 uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end) 854 ret = fp->f_advice->fa_advice; 855 mtx_unlock(mtxp); 856 return (ret); 857 } 858 859 int 860 vn_read_from_obj(struct vnode *vp, struct uio *uio) 861 { 862 vm_object_t obj; 863 vm_page_t ma[io_hold_cnt + 2]; 864 off_t off, vsz; 865 ssize_t resid; 866 int error, i, j; 867 868 obj = vp->v_object; 869 MPASS(uio->uio_resid <= ptoa(io_hold_cnt + 2)); 870 MPASS(obj != NULL); 871 MPASS(obj->type == OBJT_VNODE); 872 873 /* 874 * Depends on type stability of vm_objects. 875 */ 876 vm_object_pip_add(obj, 1); 877 if ((obj->flags & OBJ_DEAD) != 0) { 878 /* 879 * Note that object might be already reused from the 880 * vnode, and the OBJ_DEAD flag cleared. This is fine, 881 * we recheck for DOOMED vnode state after all pages 882 * are busied, and retract then. 883 * 884 * But we check for OBJ_DEAD to ensure that we do not 885 * busy pages while vm_object_terminate_pages() 886 * processes the queue. 887 */ 888 error = EJUSTRETURN; 889 goto out_pip; 890 } 891 892 resid = uio->uio_resid; 893 off = uio->uio_offset; 894 for (i = 0; resid > 0; i++) { 895 MPASS(i < io_hold_cnt + 2); 896 ma[i] = vm_page_grab_unlocked(obj, atop(off), 897 VM_ALLOC_NOCREAT | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | 898 VM_ALLOC_NOWAIT); 899 if (ma[i] == NULL) 900 break; 901 902 /* 903 * Skip invalid pages. Valid mask can be partial only 904 * at EOF, and we clip later. 905 */ 906 if (vm_page_none_valid(ma[i])) { 907 vm_page_sunbusy(ma[i]); 908 break; 909 } 910 911 resid -= PAGE_SIZE; 912 off += PAGE_SIZE; 913 } 914 if (i == 0) { 915 error = EJUSTRETURN; 916 goto out_pip; 917 } 918 919 /* 920 * Check VIRF_DOOMED after we busied our pages. Since 921 * vgonel() terminates the vnode' vm_object, it cannot 922 * process past pages busied by us. 923 */ 924 if (VN_IS_DOOMED(vp)) { 925 error = EJUSTRETURN; 926 goto out; 927 } 928 929 resid = PAGE_SIZE - (uio->uio_offset & PAGE_MASK) + ptoa(i - 1); 930 if (resid > uio->uio_resid) 931 resid = uio->uio_resid; 932 933 /* 934 * Unlocked read of vnp_size is safe because truncation cannot 935 * pass busied page. But we load vnp_size into a local 936 * variable so that possible concurrent extension does not 937 * break calculation. 938 */ 939 #if defined(__powerpc__) && !defined(__powerpc64__) 940 vsz = obj->un_pager.vnp.vnp_size; 941 #else 942 vsz = atomic_load_64(&obj->un_pager.vnp.vnp_size); 943 #endif 944 if (uio->uio_offset + resid > vsz) 945 resid = vsz - uio->uio_offset; 946 947 error = vn_io_fault_pgmove(ma, uio->uio_offset & PAGE_MASK, resid, uio); 948 949 out: 950 for (j = 0; j < i; j++) { 951 if (error == 0) 952 vm_page_reference(ma[j]); 953 vm_page_sunbusy(ma[j]); 954 } 955 out_pip: 956 vm_object_pip_wakeup(obj); 957 if (error != 0) 958 return (error); 959 return (uio->uio_resid == 0 ? 0 : EJUSTRETURN); 960 } 961 962 /* 963 * File table vnode read routine. 964 */ 965 static int 966 vn_read(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, 967 struct thread *td) 968 { 969 struct vnode *vp; 970 off_t orig_offset; 971 int error, ioflag; 972 int advice; 973 974 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 975 uio->uio_td, td)); 976 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 977 vp = fp->f_vnode; 978 ioflag = 0; 979 if (fp->f_flag & FNONBLOCK) 980 ioflag |= IO_NDELAY; 981 if (fp->f_flag & O_DIRECT) 982 ioflag |= IO_DIRECT; 983 984 /* 985 * Try to read from page cache. VIRF_DOOMED check is racy but 986 * allows us to avoid unneeded work outright. 987 */ 988 if (vn_io_pgcache_read_enable && !mac_vnode_check_read_enabled() && 989 (vp->v_irflag & (VIRF_DOOMED | VIRF_PGREAD)) == VIRF_PGREAD) { 990 error = VOP_READ_PGCACHE(vp, uio, ioflag, fp->f_cred); 991 if (error == 0) { 992 fp->f_nextoff[UIO_READ] = uio->uio_offset; 993 return (0); 994 } 995 if (error != EJUSTRETURN) 996 return (error); 997 } 998 999 advice = get_advice(fp, uio); 1000 vn_lock(vp, LK_SHARED | LK_RETRY); 1001 1002 switch (advice) { 1003 case POSIX_FADV_NORMAL: 1004 case POSIX_FADV_SEQUENTIAL: 1005 case POSIX_FADV_NOREUSE: 1006 ioflag |= sequential_heuristic(uio, fp); 1007 break; 1008 case POSIX_FADV_RANDOM: 1009 /* Disable read-ahead for random I/O. */ 1010 break; 1011 } 1012 orig_offset = uio->uio_offset; 1013 1014 #ifdef MAC 1015 error = mac_vnode_check_read(active_cred, fp->f_cred, vp); 1016 if (error == 0) 1017 #endif 1018 error = VOP_READ(vp, uio, ioflag, fp->f_cred); 1019 fp->f_nextoff[UIO_READ] = uio->uio_offset; 1020 VOP_UNLOCK(vp); 1021 if (error == 0 && advice == POSIX_FADV_NOREUSE && 1022 orig_offset != uio->uio_offset) 1023 /* 1024 * Use POSIX_FADV_DONTNEED to flush pages and buffers 1025 * for the backing file after a POSIX_FADV_NOREUSE 1026 * read(2). 1027 */ 1028 error = VOP_ADVISE(vp, orig_offset, uio->uio_offset - 1, 1029 POSIX_FADV_DONTNEED); 1030 return (error); 1031 } 1032 1033 /* 1034 * File table vnode write routine. 1035 */ 1036 static int 1037 vn_write(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, 1038 struct thread *td) 1039 { 1040 struct vnode *vp; 1041 struct mount *mp; 1042 off_t orig_offset; 1043 int error, ioflag, lock_flags; 1044 int advice; 1045 1046 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", 1047 uio->uio_td, td)); 1048 KASSERT(flags & FOF_OFFSET, ("No FOF_OFFSET")); 1049 vp = fp->f_vnode; 1050 if (vp->v_type == VREG) 1051 bwillwrite(); 1052 ioflag = IO_UNIT; 1053 if (vp->v_type == VREG && (fp->f_flag & O_APPEND)) 1054 ioflag |= IO_APPEND; 1055 if (fp->f_flag & FNONBLOCK) 1056 ioflag |= IO_NDELAY; 1057 if (fp->f_flag & O_DIRECT) 1058 ioflag |= IO_DIRECT; 1059 if ((fp->f_flag & O_FSYNC) || 1060 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) 1061 ioflag |= IO_SYNC; 1062 mp = NULL; 1063 if (vp->v_type != VCHR && 1064 (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) 1065 goto unlock; 1066 1067 advice = get_advice(fp, uio); 1068 1069 if (MNT_SHARED_WRITES(mp) || 1070 (mp == NULL && MNT_SHARED_WRITES(vp->v_mount))) { 1071 lock_flags = LK_SHARED; 1072 } else { 1073 lock_flags = LK_EXCLUSIVE; 1074 } 1075 1076 vn_lock(vp, lock_flags | LK_RETRY); 1077 switch (advice) { 1078 case POSIX_FADV_NORMAL: 1079 case POSIX_FADV_SEQUENTIAL: 1080 case POSIX_FADV_NOREUSE: 1081 ioflag |= sequential_heuristic(uio, fp); 1082 break; 1083 case POSIX_FADV_RANDOM: 1084 /* XXX: Is this correct? */ 1085 break; 1086 } 1087 orig_offset = uio->uio_offset; 1088 1089 #ifdef MAC 1090 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 1091 if (error == 0) 1092 #endif 1093 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred); 1094 fp->f_nextoff[UIO_WRITE] = uio->uio_offset; 1095 VOP_UNLOCK(vp); 1096 if (vp->v_type != VCHR) 1097 vn_finished_write(mp); 1098 if (error == 0 && advice == POSIX_FADV_NOREUSE && 1099 orig_offset != uio->uio_offset) 1100 /* 1101 * Use POSIX_FADV_DONTNEED to flush pages and buffers 1102 * for the backing file after a POSIX_FADV_NOREUSE 1103 * write(2). 1104 */ 1105 error = VOP_ADVISE(vp, orig_offset, uio->uio_offset - 1, 1106 POSIX_FADV_DONTNEED); 1107 unlock: 1108 return (error); 1109 } 1110 1111 /* 1112 * The vn_io_fault() is a wrapper around vn_read() and vn_write() to 1113 * prevent the following deadlock: 1114 * 1115 * Assume that the thread A reads from the vnode vp1 into userspace 1116 * buffer buf1 backed by the pages of vnode vp2. If a page in buf1 is 1117 * currently not resident, then system ends up with the call chain 1118 * vn_read() -> VOP_READ(vp1) -> uiomove() -> [Page Fault] -> 1119 * vm_fault(buf1) -> vnode_pager_getpages(vp2) -> VOP_GETPAGES(vp2) 1120 * which establishes lock order vp1->vn_lock, then vp2->vn_lock. 1121 * If, at the same time, thread B reads from vnode vp2 into buffer buf2 1122 * backed by the pages of vnode vp1, and some page in buf2 is not 1123 * resident, we get a reversed order vp2->vn_lock, then vp1->vn_lock. 1124 * 1125 * To prevent the lock order reversal and deadlock, vn_io_fault() does 1126 * not allow page faults to happen during VOP_READ() or VOP_WRITE(). 1127 * Instead, it first tries to do the whole range i/o with pagefaults 1128 * disabled. If all pages in the i/o buffer are resident and mapped, 1129 * VOP will succeed (ignoring the genuine filesystem errors). 1130 * Otherwise, we get back EFAULT, and vn_io_fault() falls back to do 1131 * i/o in chunks, with all pages in the chunk prefaulted and held 1132 * using vm_fault_quick_hold_pages(). 1133 * 1134 * Filesystems using this deadlock avoidance scheme should use the 1135 * array of the held pages from uio, saved in the curthread->td_ma, 1136 * instead of doing uiomove(). A helper function 1137 * vn_io_fault_uiomove() converts uiomove request into 1138 * uiomove_fromphys() over td_ma array. 1139 * 1140 * Since vnode locks do not cover the whole i/o anymore, rangelocks 1141 * make the current i/o request atomic with respect to other i/os and 1142 * truncations. 1143 */ 1144 1145 /* 1146 * Decode vn_io_fault_args and perform the corresponding i/o. 1147 */ 1148 static int 1149 vn_io_fault_doio(struct vn_io_fault_args *args, struct uio *uio, 1150 struct thread *td) 1151 { 1152 int error, save; 1153 1154 error = 0; 1155 save = vm_fault_disable_pagefaults(); 1156 switch (args->kind) { 1157 case VN_IO_FAULT_FOP: 1158 error = (args->args.fop_args.doio)(args->args.fop_args.fp, 1159 uio, args->cred, args->flags, td); 1160 break; 1161 case VN_IO_FAULT_VOP: 1162 if (uio->uio_rw == UIO_READ) { 1163 error = VOP_READ(args->args.vop_args.vp, uio, 1164 args->flags, args->cred); 1165 } else if (uio->uio_rw == UIO_WRITE) { 1166 error = VOP_WRITE(args->args.vop_args.vp, uio, 1167 args->flags, args->cred); 1168 } 1169 break; 1170 default: 1171 panic("vn_io_fault_doio: unknown kind of io %d %d", 1172 args->kind, uio->uio_rw); 1173 } 1174 vm_fault_enable_pagefaults(save); 1175 return (error); 1176 } 1177 1178 static int 1179 vn_io_fault_touch(char *base, const struct uio *uio) 1180 { 1181 int r; 1182 1183 r = fubyte(base); 1184 if (r == -1 || (uio->uio_rw == UIO_READ && subyte(base, r) == -1)) 1185 return (EFAULT); 1186 return (0); 1187 } 1188 1189 static int 1190 vn_io_fault_prefault_user(const struct uio *uio) 1191 { 1192 char *base; 1193 const struct iovec *iov; 1194 size_t len; 1195 ssize_t resid; 1196 int error, i; 1197 1198 KASSERT(uio->uio_segflg == UIO_USERSPACE, 1199 ("vn_io_fault_prefault userspace")); 1200 1201 error = i = 0; 1202 iov = uio->uio_iov; 1203 resid = uio->uio_resid; 1204 base = iov->iov_base; 1205 len = iov->iov_len; 1206 while (resid > 0) { 1207 error = vn_io_fault_touch(base, uio); 1208 if (error != 0) 1209 break; 1210 if (len < PAGE_SIZE) { 1211 if (len != 0) { 1212 error = vn_io_fault_touch(base + len - 1, uio); 1213 if (error != 0) 1214 break; 1215 resid -= len; 1216 } 1217 if (++i >= uio->uio_iovcnt) 1218 break; 1219 iov = uio->uio_iov + i; 1220 base = iov->iov_base; 1221 len = iov->iov_len; 1222 } else { 1223 len -= PAGE_SIZE; 1224 base += PAGE_SIZE; 1225 resid -= PAGE_SIZE; 1226 } 1227 } 1228 return (error); 1229 } 1230 1231 /* 1232 * Common code for vn_io_fault(), agnostic to the kind of i/o request. 1233 * Uses vn_io_fault_doio() to make the call to an actual i/o function. 1234 * Used from vn_rdwr() and vn_io_fault(), which encode the i/o request 1235 * into args and call vn_io_fault1() to handle faults during the user 1236 * mode buffer accesses. 1237 */ 1238 static int 1239 vn_io_fault1(struct vnode *vp, struct uio *uio, struct vn_io_fault_args *args, 1240 struct thread *td) 1241 { 1242 vm_page_t ma[io_hold_cnt + 2]; 1243 struct uio *uio_clone, short_uio; 1244 struct iovec short_iovec[1]; 1245 vm_page_t *prev_td_ma; 1246 vm_prot_t prot; 1247 vm_offset_t addr, end; 1248 size_t len, resid; 1249 ssize_t adv; 1250 int error, cnt, saveheld, prev_td_ma_cnt; 1251 1252 if (vn_io_fault_prefault) { 1253 error = vn_io_fault_prefault_user(uio); 1254 if (error != 0) 1255 return (error); /* Or ignore ? */ 1256 } 1257 1258 prot = uio->uio_rw == UIO_READ ? VM_PROT_WRITE : VM_PROT_READ; 1259 1260 /* 1261 * The UFS follows IO_UNIT directive and replays back both 1262 * uio_offset and uio_resid if an error is encountered during the 1263 * operation. But, since the iovec may be already advanced, 1264 * uio is still in an inconsistent state. 1265 * 1266 * Cache a copy of the original uio, which is advanced to the redo 1267 * point using UIO_NOCOPY below. 1268 */ 1269 uio_clone = cloneuio(uio); 1270 resid = uio->uio_resid; 1271 1272 short_uio.uio_segflg = UIO_USERSPACE; 1273 short_uio.uio_rw = uio->uio_rw; 1274 short_uio.uio_td = uio->uio_td; 1275 1276 error = vn_io_fault_doio(args, uio, td); 1277 if (error != EFAULT) 1278 goto out; 1279 1280 atomic_add_long(&vn_io_faults_cnt, 1); 1281 uio_clone->uio_segflg = UIO_NOCOPY; 1282 uiomove(NULL, resid - uio->uio_resid, uio_clone); 1283 uio_clone->uio_segflg = uio->uio_segflg; 1284 1285 saveheld = curthread_pflags_set(TDP_UIOHELD); 1286 prev_td_ma = td->td_ma; 1287 prev_td_ma_cnt = td->td_ma_cnt; 1288 1289 while (uio_clone->uio_resid != 0) { 1290 len = uio_clone->uio_iov->iov_len; 1291 if (len == 0) { 1292 KASSERT(uio_clone->uio_iovcnt >= 1, 1293 ("iovcnt underflow")); 1294 uio_clone->uio_iov++; 1295 uio_clone->uio_iovcnt--; 1296 continue; 1297 } 1298 if (len > ptoa(io_hold_cnt)) 1299 len = ptoa(io_hold_cnt); 1300 addr = (uintptr_t)uio_clone->uio_iov->iov_base; 1301 end = round_page(addr + len); 1302 if (end < addr) { 1303 error = EFAULT; 1304 break; 1305 } 1306 cnt = atop(end - trunc_page(addr)); 1307 /* 1308 * A perfectly misaligned address and length could cause 1309 * both the start and the end of the chunk to use partial 1310 * page. +2 accounts for such a situation. 1311 */ 1312 cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map, 1313 addr, len, prot, ma, io_hold_cnt + 2); 1314 if (cnt == -1) { 1315 error = EFAULT; 1316 break; 1317 } 1318 short_uio.uio_iov = &short_iovec[0]; 1319 short_iovec[0].iov_base = (void *)addr; 1320 short_uio.uio_iovcnt = 1; 1321 short_uio.uio_resid = short_iovec[0].iov_len = len; 1322 short_uio.uio_offset = uio_clone->uio_offset; 1323 td->td_ma = ma; 1324 td->td_ma_cnt = cnt; 1325 1326 error = vn_io_fault_doio(args, &short_uio, td); 1327 vm_page_unhold_pages(ma, cnt); 1328 adv = len - short_uio.uio_resid; 1329 1330 uio_clone->uio_iov->iov_base = 1331 (char *)uio_clone->uio_iov->iov_base + adv; 1332 uio_clone->uio_iov->iov_len -= adv; 1333 uio_clone->uio_resid -= adv; 1334 uio_clone->uio_offset += adv; 1335 1336 uio->uio_resid -= adv; 1337 uio->uio_offset += adv; 1338 1339 if (error != 0 || adv == 0) 1340 break; 1341 } 1342 td->td_ma = prev_td_ma; 1343 td->td_ma_cnt = prev_td_ma_cnt; 1344 curthread_pflags_restore(saveheld); 1345 out: 1346 free(uio_clone, M_IOV); 1347 return (error); 1348 } 1349 1350 static int 1351 vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred, 1352 int flags, struct thread *td) 1353 { 1354 fo_rdwr_t *doio; 1355 struct vnode *vp; 1356 void *rl_cookie; 1357 struct vn_io_fault_args args; 1358 int error; 1359 1360 doio = uio->uio_rw == UIO_READ ? vn_read : vn_write; 1361 vp = fp->f_vnode; 1362 1363 /* 1364 * The ability to read(2) on a directory has historically been 1365 * allowed for all users, but this can and has been the source of 1366 * at least one security issue in the past. As such, it is now hidden 1367 * away behind a sysctl for those that actually need it to use it, and 1368 * restricted to root when it's turned on to make it relatively safe to 1369 * leave on for longer sessions of need. 1370 */ 1371 if (vp->v_type == VDIR) { 1372 KASSERT(uio->uio_rw == UIO_READ, 1373 ("illegal write attempted on a directory")); 1374 if (!vfs_allow_read_dir) 1375 return (EISDIR); 1376 if ((error = priv_check(td, PRIV_VFS_READ_DIR)) != 0) 1377 return (EISDIR); 1378 } 1379 1380 foffset_lock_uio(fp, uio, flags); 1381 if (do_vn_io_fault(vp, uio)) { 1382 args.kind = VN_IO_FAULT_FOP; 1383 args.args.fop_args.fp = fp; 1384 args.args.fop_args.doio = doio; 1385 args.cred = active_cred; 1386 args.flags = flags | FOF_OFFSET; 1387 if (uio->uio_rw == UIO_READ) { 1388 rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset, 1389 uio->uio_offset + uio->uio_resid); 1390 } else if ((fp->f_flag & O_APPEND) != 0 || 1391 (flags & FOF_OFFSET) == 0) { 1392 /* For appenders, punt and lock the whole range. */ 1393 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1394 } else { 1395 rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset, 1396 uio->uio_offset + uio->uio_resid); 1397 } 1398 error = vn_io_fault1(vp, uio, &args, td); 1399 vn_rangelock_unlock(vp, rl_cookie); 1400 } else { 1401 error = doio(fp, uio, active_cred, flags | FOF_OFFSET, td); 1402 } 1403 foffset_unlock_uio(fp, uio, flags); 1404 return (error); 1405 } 1406 1407 /* 1408 * Helper function to perform the requested uiomove operation using 1409 * the held pages for io->uio_iov[0].iov_base buffer instead of 1410 * copyin/copyout. Access to the pages with uiomove_fromphys() 1411 * instead of iov_base prevents page faults that could occur due to 1412 * pmap_collect() invalidating the mapping created by 1413 * vm_fault_quick_hold_pages(), or pageout daemon, page laundry or 1414 * object cleanup revoking the write access from page mappings. 1415 * 1416 * Filesystems specified MNTK_NO_IOPF shall use vn_io_fault_uiomove() 1417 * instead of plain uiomove(). 1418 */ 1419 int 1420 vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio) 1421 { 1422 struct uio transp_uio; 1423 struct iovec transp_iov[1]; 1424 struct thread *td; 1425 size_t adv; 1426 int error, pgadv; 1427 1428 td = curthread; 1429 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1430 uio->uio_segflg != UIO_USERSPACE) 1431 return (uiomove(data, xfersize, uio)); 1432 1433 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1434 transp_iov[0].iov_base = data; 1435 transp_uio.uio_iov = &transp_iov[0]; 1436 transp_uio.uio_iovcnt = 1; 1437 if (xfersize > uio->uio_resid) 1438 xfersize = uio->uio_resid; 1439 transp_uio.uio_resid = transp_iov[0].iov_len = xfersize; 1440 transp_uio.uio_offset = 0; 1441 transp_uio.uio_segflg = UIO_SYSSPACE; 1442 /* 1443 * Since transp_iov points to data, and td_ma page array 1444 * corresponds to original uio->uio_iov, we need to invert the 1445 * direction of the i/o operation as passed to 1446 * uiomove_fromphys(). 1447 */ 1448 switch (uio->uio_rw) { 1449 case UIO_WRITE: 1450 transp_uio.uio_rw = UIO_READ; 1451 break; 1452 case UIO_READ: 1453 transp_uio.uio_rw = UIO_WRITE; 1454 break; 1455 } 1456 transp_uio.uio_td = uio->uio_td; 1457 error = uiomove_fromphys(td->td_ma, 1458 ((vm_offset_t)uio->uio_iov->iov_base) & PAGE_MASK, 1459 xfersize, &transp_uio); 1460 adv = xfersize - transp_uio.uio_resid; 1461 pgadv = 1462 (((vm_offset_t)uio->uio_iov->iov_base + adv) >> PAGE_SHIFT) - 1463 (((vm_offset_t)uio->uio_iov->iov_base) >> PAGE_SHIFT); 1464 td->td_ma += pgadv; 1465 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1466 pgadv)); 1467 td->td_ma_cnt -= pgadv; 1468 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + adv; 1469 uio->uio_iov->iov_len -= adv; 1470 uio->uio_resid -= adv; 1471 uio->uio_offset += adv; 1472 return (error); 1473 } 1474 1475 int 1476 vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize, 1477 struct uio *uio) 1478 { 1479 struct thread *td; 1480 vm_offset_t iov_base; 1481 int cnt, pgadv; 1482 1483 td = curthread; 1484 if ((td->td_pflags & TDP_UIOHELD) == 0 || 1485 uio->uio_segflg != UIO_USERSPACE) 1486 return (uiomove_fromphys(ma, offset, xfersize, uio)); 1487 1488 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); 1489 cnt = xfersize > uio->uio_resid ? uio->uio_resid : xfersize; 1490 iov_base = (vm_offset_t)uio->uio_iov->iov_base; 1491 switch (uio->uio_rw) { 1492 case UIO_WRITE: 1493 pmap_copy_pages(td->td_ma, iov_base & PAGE_MASK, ma, 1494 offset, cnt); 1495 break; 1496 case UIO_READ: 1497 pmap_copy_pages(ma, offset, td->td_ma, iov_base & PAGE_MASK, 1498 cnt); 1499 break; 1500 } 1501 pgadv = ((iov_base + cnt) >> PAGE_SHIFT) - (iov_base >> PAGE_SHIFT); 1502 td->td_ma += pgadv; 1503 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, 1504 pgadv)); 1505 td->td_ma_cnt -= pgadv; 1506 uio->uio_iov->iov_base = (char *)(iov_base + cnt); 1507 uio->uio_iov->iov_len -= cnt; 1508 uio->uio_resid -= cnt; 1509 uio->uio_offset += cnt; 1510 return (0); 1511 } 1512 1513 /* 1514 * File table truncate routine. 1515 */ 1516 static int 1517 vn_truncate(struct file *fp, off_t length, struct ucred *active_cred, 1518 struct thread *td) 1519 { 1520 struct mount *mp; 1521 struct vnode *vp; 1522 void *rl_cookie; 1523 int error; 1524 1525 vp = fp->f_vnode; 1526 1527 /* 1528 * Lock the whole range for truncation. Otherwise split i/o 1529 * might happen partly before and partly after the truncation. 1530 */ 1531 rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); 1532 error = vn_start_write(vp, &mp, V_WAIT | PCATCH); 1533 if (error) 1534 goto out1; 1535 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1536 AUDIT_ARG_VNODE1(vp); 1537 if (vp->v_type == VDIR) { 1538 error = EISDIR; 1539 goto out; 1540 } 1541 #ifdef MAC 1542 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); 1543 if (error) 1544 goto out; 1545 #endif 1546 error = vn_truncate_locked(vp, length, (fp->f_flag & O_FSYNC) != 0, 1547 fp->f_cred); 1548 out: 1549 VOP_UNLOCK(vp); 1550 vn_finished_write(mp); 1551 out1: 1552 vn_rangelock_unlock(vp, rl_cookie); 1553 return (error); 1554 } 1555 1556 /* 1557 * Truncate a file that is already locked. 1558 */ 1559 int 1560 vn_truncate_locked(struct vnode *vp, off_t length, bool sync, 1561 struct ucred *cred) 1562 { 1563 struct vattr vattr; 1564 int error; 1565 1566 error = VOP_ADD_WRITECOUNT(vp, 1); 1567 if (error == 0) { 1568 VATTR_NULL(&vattr); 1569 vattr.va_size = length; 1570 if (sync) 1571 vattr.va_vaflags |= VA_SYNC; 1572 error = VOP_SETATTR(vp, &vattr, cred); 1573 VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 1574 } 1575 return (error); 1576 } 1577 1578 /* 1579 * File table vnode stat routine. 1580 */ 1581 static int 1582 vn_statfile(struct file *fp, struct stat *sb, struct ucred *active_cred, 1583 struct thread *td) 1584 { 1585 struct vnode *vp = fp->f_vnode; 1586 int error; 1587 1588 vn_lock(vp, LK_SHARED | LK_RETRY); 1589 error = VOP_STAT(vp, sb, active_cred, fp->f_cred, td); 1590 VOP_UNLOCK(vp); 1591 1592 return (error); 1593 } 1594 1595 /* 1596 * File table vnode ioctl routine. 1597 */ 1598 static int 1599 vn_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred, 1600 struct thread *td) 1601 { 1602 struct vattr vattr; 1603 struct vnode *vp; 1604 struct fiobmap2_arg *bmarg; 1605 int error; 1606 1607 vp = fp->f_vnode; 1608 switch (vp->v_type) { 1609 case VDIR: 1610 case VREG: 1611 switch (com) { 1612 case FIONREAD: 1613 vn_lock(vp, LK_SHARED | LK_RETRY); 1614 error = VOP_GETATTR(vp, &vattr, active_cred); 1615 VOP_UNLOCK(vp); 1616 if (error == 0) 1617 *(int *)data = vattr.va_size - fp->f_offset; 1618 return (error); 1619 case FIOBMAP2: 1620 bmarg = (struct fiobmap2_arg *)data; 1621 vn_lock(vp, LK_SHARED | LK_RETRY); 1622 #ifdef MAC 1623 error = mac_vnode_check_read(active_cred, fp->f_cred, 1624 vp); 1625 if (error == 0) 1626 #endif 1627 error = VOP_BMAP(vp, bmarg->bn, NULL, 1628 &bmarg->bn, &bmarg->runp, &bmarg->runb); 1629 VOP_UNLOCK(vp); 1630 return (error); 1631 case FIONBIO: 1632 case FIOASYNC: 1633 return (0); 1634 default: 1635 return (VOP_IOCTL(vp, com, data, fp->f_flag, 1636 active_cred, td)); 1637 } 1638 break; 1639 case VCHR: 1640 return (VOP_IOCTL(vp, com, data, fp->f_flag, 1641 active_cred, td)); 1642 default: 1643 return (ENOTTY); 1644 } 1645 } 1646 1647 /* 1648 * File table vnode poll routine. 1649 */ 1650 static int 1651 vn_poll(struct file *fp, int events, struct ucred *active_cred, 1652 struct thread *td) 1653 { 1654 struct vnode *vp; 1655 int error; 1656 1657 vp = fp->f_vnode; 1658 #if defined(MAC) || defined(AUDIT) 1659 if (AUDITING_TD(td) || mac_vnode_check_poll_enabled()) { 1660 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1661 AUDIT_ARG_VNODE1(vp); 1662 error = mac_vnode_check_poll(active_cred, fp->f_cred, vp); 1663 VOP_UNLOCK(vp); 1664 if (error != 0) 1665 return (error); 1666 } 1667 #endif 1668 error = VOP_POLL(vp, events, fp->f_cred, td); 1669 return (error); 1670 } 1671 1672 /* 1673 * Acquire the requested lock and then check for validity. LK_RETRY 1674 * permits vn_lock to return doomed vnodes. 1675 */ 1676 static int __noinline 1677 _vn_lock_fallback(struct vnode *vp, int flags, const char *file, int line, 1678 int error) 1679 { 1680 1681 KASSERT((flags & LK_RETRY) == 0 || error == 0, 1682 ("vn_lock: error %d incompatible with flags %#x", error, flags)); 1683 1684 if (error == 0) 1685 VNASSERT(VN_IS_DOOMED(vp), vp, ("vnode not doomed")); 1686 1687 if ((flags & LK_RETRY) == 0) { 1688 if (error == 0) { 1689 VOP_UNLOCK(vp); 1690 error = ENOENT; 1691 } 1692 return (error); 1693 } 1694 1695 /* 1696 * LK_RETRY case. 1697 * 1698 * Nothing to do if we got the lock. 1699 */ 1700 if (error == 0) 1701 return (0); 1702 1703 /* 1704 * Interlock was dropped by the call in _vn_lock. 1705 */ 1706 flags &= ~LK_INTERLOCK; 1707 do { 1708 error = VOP_LOCK1(vp, flags, file, line); 1709 } while (error != 0); 1710 return (0); 1711 } 1712 1713 int 1714 _vn_lock(struct vnode *vp, int flags, const char *file, int line) 1715 { 1716 int error; 1717 1718 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 1719 ("vn_lock: no locktype (%d passed)", flags)); 1720 VNPASS(vp->v_holdcnt > 0, vp); 1721 error = VOP_LOCK1(vp, flags, file, line); 1722 if (__predict_false(error != 0 || VN_IS_DOOMED(vp))) 1723 return (_vn_lock_fallback(vp, flags, file, line, error)); 1724 return (0); 1725 } 1726 1727 /* 1728 * File table vnode close routine. 1729 */ 1730 static int 1731 vn_closefile(struct file *fp, struct thread *td) 1732 { 1733 struct vnode *vp; 1734 struct flock lf; 1735 int error; 1736 bool ref; 1737 1738 vp = fp->f_vnode; 1739 fp->f_ops = &badfileops; 1740 ref= (fp->f_flag & FHASLOCK) != 0 && fp->f_type == DTYPE_VNODE; 1741 1742 error = vn_close1(vp, fp->f_flag, fp->f_cred, td, ref); 1743 1744 if (__predict_false(ref)) { 1745 lf.l_whence = SEEK_SET; 1746 lf.l_start = 0; 1747 lf.l_len = 0; 1748 lf.l_type = F_UNLCK; 1749 (void) VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK); 1750 vrele(vp); 1751 } 1752 return (error); 1753 } 1754 1755 /* 1756 * Preparing to start a filesystem write operation. If the operation is 1757 * permitted, then we bump the count of operations in progress and 1758 * proceed. If a suspend request is in progress, we wait until the 1759 * suspension is over, and then proceed. 1760 */ 1761 static int 1762 vn_start_write_refed(struct mount *mp, int flags, bool mplocked) 1763 { 1764 struct mount_pcpu *mpcpu; 1765 int error, mflags; 1766 1767 if (__predict_true(!mplocked) && (flags & V_XSLEEP) == 0 && 1768 vfs_op_thread_enter(mp, mpcpu)) { 1769 MPASS((mp->mnt_kern_flag & MNTK_SUSPEND) == 0); 1770 vfs_mp_count_add_pcpu(mpcpu, writeopcount, 1); 1771 vfs_op_thread_exit(mp, mpcpu); 1772 return (0); 1773 } 1774 1775 if (mplocked) 1776 mtx_assert(MNT_MTX(mp), MA_OWNED); 1777 else 1778 MNT_ILOCK(mp); 1779 1780 error = 0; 1781 1782 /* 1783 * Check on status of suspension. 1784 */ 1785 if ((curthread->td_pflags & TDP_IGNSUSP) == 0 || 1786 mp->mnt_susp_owner != curthread) { 1787 mflags = ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? 1788 (flags & PCATCH) : 0) | (PUSER - 1); 1789 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 1790 if (flags & V_NOWAIT) { 1791 error = EWOULDBLOCK; 1792 goto unlock; 1793 } 1794 error = msleep(&mp->mnt_flag, MNT_MTX(mp), mflags, 1795 "suspfs", 0); 1796 if (error) 1797 goto unlock; 1798 } 1799 } 1800 if (flags & V_XSLEEP) 1801 goto unlock; 1802 mp->mnt_writeopcount++; 1803 unlock: 1804 if (error != 0 || (flags & V_XSLEEP) != 0) 1805 MNT_REL(mp); 1806 MNT_IUNLOCK(mp); 1807 return (error); 1808 } 1809 1810 int 1811 vn_start_write(struct vnode *vp, struct mount **mpp, int flags) 1812 { 1813 struct mount *mp; 1814 int error; 1815 1816 KASSERT((flags & V_MNTREF) == 0 || (*mpp != NULL && vp == NULL), 1817 ("V_MNTREF requires mp")); 1818 1819 error = 0; 1820 /* 1821 * If a vnode is provided, get and return the mount point that 1822 * to which it will write. 1823 */ 1824 if (vp != NULL) { 1825 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1826 *mpp = NULL; 1827 if (error != EOPNOTSUPP) 1828 return (error); 1829 return (0); 1830 } 1831 } 1832 if ((mp = *mpp) == NULL) 1833 return (0); 1834 1835 /* 1836 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1837 * a vfs_ref(). 1838 * As long as a vnode is not provided we need to acquire a 1839 * refcount for the provided mountpoint too, in order to 1840 * emulate a vfs_ref(). 1841 */ 1842 if (vp == NULL && (flags & V_MNTREF) == 0) 1843 vfs_ref(mp); 1844 1845 return (vn_start_write_refed(mp, flags, false)); 1846 } 1847 1848 /* 1849 * Secondary suspension. Used by operations such as vop_inactive 1850 * routines that are needed by the higher level functions. These 1851 * are allowed to proceed until all the higher level functions have 1852 * completed (indicated by mnt_writeopcount dropping to zero). At that 1853 * time, these operations are halted until the suspension is over. 1854 */ 1855 int 1856 vn_start_secondary_write(struct vnode *vp, struct mount **mpp, int flags) 1857 { 1858 struct mount *mp; 1859 int error; 1860 1861 KASSERT((flags & V_MNTREF) == 0 || (*mpp != NULL && vp == NULL), 1862 ("V_MNTREF requires mp")); 1863 1864 retry: 1865 if (vp != NULL) { 1866 if ((error = VOP_GETWRITEMOUNT(vp, mpp)) != 0) { 1867 *mpp = NULL; 1868 if (error != EOPNOTSUPP) 1869 return (error); 1870 return (0); 1871 } 1872 } 1873 /* 1874 * If we are not suspended or have not yet reached suspended 1875 * mode, then let the operation proceed. 1876 */ 1877 if ((mp = *mpp) == NULL) 1878 return (0); 1879 1880 /* 1881 * VOP_GETWRITEMOUNT() returns with the mp refcount held through 1882 * a vfs_ref(). 1883 * As long as a vnode is not provided we need to acquire a 1884 * refcount for the provided mountpoint too, in order to 1885 * emulate a vfs_ref(). 1886 */ 1887 MNT_ILOCK(mp); 1888 if (vp == NULL && (flags & V_MNTREF) == 0) 1889 MNT_REF(mp); 1890 if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) { 1891 mp->mnt_secondary_writes++; 1892 mp->mnt_secondary_accwrites++; 1893 MNT_IUNLOCK(mp); 1894 return (0); 1895 } 1896 if (flags & V_NOWAIT) { 1897 MNT_REL(mp); 1898 MNT_IUNLOCK(mp); 1899 return (EWOULDBLOCK); 1900 } 1901 /* 1902 * Wait for the suspension to finish. 1903 */ 1904 error = msleep(&mp->mnt_flag, MNT_MTX(mp), (PUSER - 1) | PDROP | 1905 ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0 ? (flags & PCATCH) : 0), 1906 "suspfs", 0); 1907 vfs_rel(mp); 1908 if (error == 0) 1909 goto retry; 1910 return (error); 1911 } 1912 1913 /* 1914 * Filesystem write operation has completed. If we are suspending and this 1915 * operation is the last one, notify the suspender that the suspension is 1916 * now in effect. 1917 */ 1918 void 1919 vn_finished_write(struct mount *mp) 1920 { 1921 struct mount_pcpu *mpcpu; 1922 int c; 1923 1924 if (mp == NULL) 1925 return; 1926 1927 if (vfs_op_thread_enter(mp, mpcpu)) { 1928 vfs_mp_count_sub_pcpu(mpcpu, writeopcount, 1); 1929 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 1930 vfs_op_thread_exit(mp, mpcpu); 1931 return; 1932 } 1933 1934 MNT_ILOCK(mp); 1935 vfs_assert_mount_counters(mp); 1936 MNT_REL(mp); 1937 c = --mp->mnt_writeopcount; 1938 if (mp->mnt_vfs_ops == 0) { 1939 MPASS((mp->mnt_kern_flag & MNTK_SUSPEND) == 0); 1940 MNT_IUNLOCK(mp); 1941 return; 1942 } 1943 if (c < 0) 1944 vfs_dump_mount_counters(mp); 1945 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && c == 0) 1946 wakeup(&mp->mnt_writeopcount); 1947 MNT_IUNLOCK(mp); 1948 } 1949 1950 /* 1951 * Filesystem secondary write operation has completed. If we are 1952 * suspending and this operation is the last one, notify the suspender 1953 * that the suspension is now in effect. 1954 */ 1955 void 1956 vn_finished_secondary_write(struct mount *mp) 1957 { 1958 if (mp == NULL) 1959 return; 1960 MNT_ILOCK(mp); 1961 MNT_REL(mp); 1962 mp->mnt_secondary_writes--; 1963 if (mp->mnt_secondary_writes < 0) 1964 panic("vn_finished_secondary_write: neg cnt"); 1965 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && 1966 mp->mnt_secondary_writes <= 0) 1967 wakeup(&mp->mnt_secondary_writes); 1968 MNT_IUNLOCK(mp); 1969 } 1970 1971 /* 1972 * Request a filesystem to suspend write operations. 1973 */ 1974 int 1975 vfs_write_suspend(struct mount *mp, int flags) 1976 { 1977 int error; 1978 1979 vfs_op_enter(mp); 1980 1981 MNT_ILOCK(mp); 1982 vfs_assert_mount_counters(mp); 1983 if (mp->mnt_susp_owner == curthread) { 1984 vfs_op_exit_locked(mp); 1985 MNT_IUNLOCK(mp); 1986 return (EALREADY); 1987 } 1988 while (mp->mnt_kern_flag & MNTK_SUSPEND) 1989 msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0); 1990 1991 /* 1992 * Unmount holds a write reference on the mount point. If we 1993 * own busy reference and drain for writers, we deadlock with 1994 * the reference draining in the unmount path. Callers of 1995 * vfs_write_suspend() must specify VS_SKIP_UNMOUNT if 1996 * vfs_busy() reference is owned and caller is not in the 1997 * unmount context. 1998 */ 1999 if ((flags & VS_SKIP_UNMOUNT) != 0 && 2000 (mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { 2001 vfs_op_exit_locked(mp); 2002 MNT_IUNLOCK(mp); 2003 return (EBUSY); 2004 } 2005 2006 mp->mnt_kern_flag |= MNTK_SUSPEND; 2007 mp->mnt_susp_owner = curthread; 2008 if (mp->mnt_writeopcount > 0) 2009 (void) msleep(&mp->mnt_writeopcount, 2010 MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0); 2011 else 2012 MNT_IUNLOCK(mp); 2013 if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0) { 2014 vfs_write_resume(mp, 0); 2015 /* vfs_write_resume does vfs_op_exit() for us */ 2016 } 2017 return (error); 2018 } 2019 2020 /* 2021 * Request a filesystem to resume write operations. 2022 */ 2023 void 2024 vfs_write_resume(struct mount *mp, int flags) 2025 { 2026 2027 MNT_ILOCK(mp); 2028 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { 2029 KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner")); 2030 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 | 2031 MNTK_SUSPENDED); 2032 mp->mnt_susp_owner = NULL; 2033 wakeup(&mp->mnt_writeopcount); 2034 wakeup(&mp->mnt_flag); 2035 curthread->td_pflags &= ~TDP_IGNSUSP; 2036 if ((flags & VR_START_WRITE) != 0) { 2037 MNT_REF(mp); 2038 mp->mnt_writeopcount++; 2039 } 2040 MNT_IUNLOCK(mp); 2041 if ((flags & VR_NO_SUSPCLR) == 0) 2042 VFS_SUSP_CLEAN(mp); 2043 vfs_op_exit(mp); 2044 } else if ((flags & VR_START_WRITE) != 0) { 2045 MNT_REF(mp); 2046 vn_start_write_refed(mp, 0, true); 2047 } else { 2048 MNT_IUNLOCK(mp); 2049 } 2050 } 2051 2052 /* 2053 * Helper loop around vfs_write_suspend() for filesystem unmount VFS 2054 * methods. 2055 */ 2056 int 2057 vfs_write_suspend_umnt(struct mount *mp) 2058 { 2059 int error; 2060 2061 KASSERT((curthread->td_pflags & TDP_IGNSUSP) == 0, 2062 ("vfs_write_suspend_umnt: recursed")); 2063 2064 /* dounmount() already called vn_start_write(). */ 2065 for (;;) { 2066 vn_finished_write(mp); 2067 error = vfs_write_suspend(mp, 0); 2068 if (error != 0) { 2069 vn_start_write(NULL, &mp, V_WAIT); 2070 return (error); 2071 } 2072 MNT_ILOCK(mp); 2073 if ((mp->mnt_kern_flag & MNTK_SUSPENDED) != 0) 2074 break; 2075 MNT_IUNLOCK(mp); 2076 vn_start_write(NULL, &mp, V_WAIT); 2077 } 2078 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); 2079 wakeup(&mp->mnt_flag); 2080 MNT_IUNLOCK(mp); 2081 curthread->td_pflags |= TDP_IGNSUSP; 2082 return (0); 2083 } 2084 2085 /* 2086 * Implement kqueues for files by translating it to vnode operation. 2087 */ 2088 static int 2089 vn_kqfilter(struct file *fp, struct knote *kn) 2090 { 2091 2092 return (VOP_KQFILTER(fp->f_vnode, kn)); 2093 } 2094 2095 /* 2096 * Simplified in-kernel wrapper calls for extended attribute access. 2097 * Both calls pass in a NULL credential, authorizing as "kernel" access. 2098 * Set IO_NODELOCKED in ioflg if the vnode is already locked. 2099 */ 2100 int 2101 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 2102 const char *attrname, int *buflen, char *buf, struct thread *td) 2103 { 2104 struct uio auio; 2105 struct iovec iov; 2106 int error; 2107 2108 iov.iov_len = *buflen; 2109 iov.iov_base = buf; 2110 2111 auio.uio_iov = &iov; 2112 auio.uio_iovcnt = 1; 2113 auio.uio_rw = UIO_READ; 2114 auio.uio_segflg = UIO_SYSSPACE; 2115 auio.uio_td = td; 2116 auio.uio_offset = 0; 2117 auio.uio_resid = *buflen; 2118 2119 if ((ioflg & IO_NODELOCKED) == 0) 2120 vn_lock(vp, LK_SHARED | LK_RETRY); 2121 2122 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 2123 2124 /* authorize attribute retrieval as kernel */ 2125 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL, 2126 td); 2127 2128 if ((ioflg & IO_NODELOCKED) == 0) 2129 VOP_UNLOCK(vp); 2130 2131 if (error == 0) { 2132 *buflen = *buflen - auio.uio_resid; 2133 } 2134 2135 return (error); 2136 } 2137 2138 /* 2139 * XXX failure mode if partially written? 2140 */ 2141 int 2142 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 2143 const char *attrname, int buflen, char *buf, struct thread *td) 2144 { 2145 struct uio auio; 2146 struct iovec iov; 2147 struct mount *mp; 2148 int error; 2149 2150 iov.iov_len = buflen; 2151 iov.iov_base = buf; 2152 2153 auio.uio_iov = &iov; 2154 auio.uio_iovcnt = 1; 2155 auio.uio_rw = UIO_WRITE; 2156 auio.uio_segflg = UIO_SYSSPACE; 2157 auio.uio_td = td; 2158 auio.uio_offset = 0; 2159 auio.uio_resid = buflen; 2160 2161 if ((ioflg & IO_NODELOCKED) == 0) { 2162 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 2163 return (error); 2164 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2165 } 2166 2167 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 2168 2169 /* authorize attribute setting as kernel */ 2170 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, td); 2171 2172 if ((ioflg & IO_NODELOCKED) == 0) { 2173 vn_finished_write(mp); 2174 VOP_UNLOCK(vp); 2175 } 2176 2177 return (error); 2178 } 2179 2180 int 2181 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 2182 const char *attrname, struct thread *td) 2183 { 2184 struct mount *mp; 2185 int error; 2186 2187 if ((ioflg & IO_NODELOCKED) == 0) { 2188 if ((error = vn_start_write(vp, &mp, V_WAIT)) != 0) 2189 return (error); 2190 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2191 } 2192 2193 ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); 2194 2195 /* authorize attribute removal as kernel */ 2196 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NULL, td); 2197 if (error == EOPNOTSUPP) 2198 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL, 2199 NULL, td); 2200 2201 if ((ioflg & IO_NODELOCKED) == 0) { 2202 vn_finished_write(mp); 2203 VOP_UNLOCK(vp); 2204 } 2205 2206 return (error); 2207 } 2208 2209 static int 2210 vn_get_ino_alloc_vget(struct mount *mp, void *arg, int lkflags, 2211 struct vnode **rvp) 2212 { 2213 2214 return (VFS_VGET(mp, *(ino_t *)arg, lkflags, rvp)); 2215 } 2216 2217 int 2218 vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp) 2219 { 2220 2221 return (vn_vget_ino_gen(vp, vn_get_ino_alloc_vget, &ino, 2222 lkflags, rvp)); 2223 } 2224 2225 int 2226 vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg, 2227 int lkflags, struct vnode **rvp) 2228 { 2229 struct mount *mp; 2230 int ltype, error; 2231 2232 ASSERT_VOP_LOCKED(vp, "vn_vget_ino_get"); 2233 mp = vp->v_mount; 2234 ltype = VOP_ISLOCKED(vp); 2235 KASSERT(ltype == LK_EXCLUSIVE || ltype == LK_SHARED, 2236 ("vn_vget_ino: vp not locked")); 2237 error = vfs_busy(mp, MBF_NOWAIT); 2238 if (error != 0) { 2239 vfs_ref(mp); 2240 VOP_UNLOCK(vp); 2241 error = vfs_busy(mp, 0); 2242 vn_lock(vp, ltype | LK_RETRY); 2243 vfs_rel(mp); 2244 if (error != 0) 2245 return (ENOENT); 2246 if (VN_IS_DOOMED(vp)) { 2247 vfs_unbusy(mp); 2248 return (ENOENT); 2249 } 2250 } 2251 VOP_UNLOCK(vp); 2252 error = alloc(mp, alloc_arg, lkflags, rvp); 2253 vfs_unbusy(mp); 2254 if (error != 0 || *rvp != vp) 2255 vn_lock(vp, ltype | LK_RETRY); 2256 if (VN_IS_DOOMED(vp)) { 2257 if (error == 0) { 2258 if (*rvp == vp) 2259 vunref(vp); 2260 else 2261 vput(*rvp); 2262 } 2263 error = ENOENT; 2264 } 2265 return (error); 2266 } 2267 2268 int 2269 vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio, 2270 struct thread *td) 2271 { 2272 2273 if (vp->v_type != VREG || td == NULL) 2274 return (0); 2275 if ((uoff_t)uio->uio_offset + uio->uio_resid > 2276 lim_cur(td, RLIMIT_FSIZE)) { 2277 PROC_LOCK(td->td_proc); 2278 kern_psignal(td->td_proc, SIGXFSZ); 2279 PROC_UNLOCK(td->td_proc); 2280 return (EFBIG); 2281 } 2282 return (0); 2283 } 2284 2285 int 2286 vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 2287 struct thread *td) 2288 { 2289 struct vnode *vp; 2290 2291 vp = fp->f_vnode; 2292 #ifdef AUDIT 2293 vn_lock(vp, LK_SHARED | LK_RETRY); 2294 AUDIT_ARG_VNODE1(vp); 2295 VOP_UNLOCK(vp); 2296 #endif 2297 return (setfmode(td, active_cred, vp, mode)); 2298 } 2299 2300 int 2301 vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 2302 struct thread *td) 2303 { 2304 struct vnode *vp; 2305 2306 vp = fp->f_vnode; 2307 #ifdef AUDIT 2308 vn_lock(vp, LK_SHARED | LK_RETRY); 2309 AUDIT_ARG_VNODE1(vp); 2310 VOP_UNLOCK(vp); 2311 #endif 2312 return (setfown(td, active_cred, vp, uid, gid)); 2313 } 2314 2315 void 2316 vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end) 2317 { 2318 vm_object_t object; 2319 2320 if ((object = vp->v_object) == NULL) 2321 return; 2322 VM_OBJECT_WLOCK(object); 2323 vm_object_page_remove(object, start, end, 0); 2324 VM_OBJECT_WUNLOCK(object); 2325 } 2326 2327 int 2328 vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred) 2329 { 2330 struct vattr va; 2331 daddr_t bn, bnp; 2332 uint64_t bsize; 2333 off_t noff; 2334 int error; 2335 2336 KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA, 2337 ("Wrong command %lu", cmd)); 2338 2339 if (vn_lock(vp, LK_SHARED) != 0) 2340 return (EBADF); 2341 if (vp->v_type != VREG) { 2342 error = ENOTTY; 2343 goto unlock; 2344 } 2345 error = VOP_GETATTR(vp, &va, cred); 2346 if (error != 0) 2347 goto unlock; 2348 noff = *off; 2349 if (noff >= va.va_size) { 2350 error = ENXIO; 2351 goto unlock; 2352 } 2353 bsize = vp->v_mount->mnt_stat.f_iosize; 2354 for (bn = noff / bsize; noff < va.va_size; bn++, noff += bsize - 2355 noff % bsize) { 2356 error = VOP_BMAP(vp, bn, NULL, &bnp, NULL, NULL); 2357 if (error == EOPNOTSUPP) { 2358 error = ENOTTY; 2359 goto unlock; 2360 } 2361 if ((bnp == -1 && cmd == FIOSEEKHOLE) || 2362 (bnp != -1 && cmd == FIOSEEKDATA)) { 2363 noff = bn * bsize; 2364 if (noff < *off) 2365 noff = *off; 2366 goto unlock; 2367 } 2368 } 2369 if (noff > va.va_size) 2370 noff = va.va_size; 2371 /* noff == va.va_size. There is an implicit hole at the end of file. */ 2372 if (cmd == FIOSEEKDATA) 2373 error = ENXIO; 2374 unlock: 2375 VOP_UNLOCK(vp); 2376 if (error == 0) 2377 *off = noff; 2378 return (error); 2379 } 2380 2381 int 2382 vn_seek(struct file *fp, off_t offset, int whence, struct thread *td) 2383 { 2384 struct ucred *cred; 2385 struct vnode *vp; 2386 struct vattr vattr; 2387 off_t foffset, size; 2388 int error, noneg; 2389 2390 cred = td->td_ucred; 2391 vp = fp->f_vnode; 2392 foffset = foffset_lock(fp, 0); 2393 noneg = (vp->v_type != VCHR); 2394 error = 0; 2395 switch (whence) { 2396 case L_INCR: 2397 if (noneg && 2398 (foffset < 0 || 2399 (offset > 0 && foffset > OFF_MAX - offset))) { 2400 error = EOVERFLOW; 2401 break; 2402 } 2403 offset += foffset; 2404 break; 2405 case L_XTND: 2406 vn_lock(vp, LK_SHARED | LK_RETRY); 2407 error = VOP_GETATTR(vp, &vattr, cred); 2408 VOP_UNLOCK(vp); 2409 if (error) 2410 break; 2411 2412 /* 2413 * If the file references a disk device, then fetch 2414 * the media size and use that to determine the ending 2415 * offset. 2416 */ 2417 if (vattr.va_size == 0 && vp->v_type == VCHR && 2418 fo_ioctl(fp, DIOCGMEDIASIZE, &size, cred, td) == 0) 2419 vattr.va_size = size; 2420 if (noneg && 2421 (vattr.va_size > OFF_MAX || 2422 (offset > 0 && vattr.va_size > OFF_MAX - offset))) { 2423 error = EOVERFLOW; 2424 break; 2425 } 2426 offset += vattr.va_size; 2427 break; 2428 case L_SET: 2429 break; 2430 case SEEK_DATA: 2431 error = fo_ioctl(fp, FIOSEEKDATA, &offset, cred, td); 2432 if (error == ENOTTY) 2433 error = EINVAL; 2434 break; 2435 case SEEK_HOLE: 2436 error = fo_ioctl(fp, FIOSEEKHOLE, &offset, cred, td); 2437 if (error == ENOTTY) 2438 error = EINVAL; 2439 break; 2440 default: 2441 error = EINVAL; 2442 } 2443 if (error == 0 && noneg && offset < 0) 2444 error = EINVAL; 2445 if (error != 0) 2446 goto drop; 2447 VFS_KNOTE_UNLOCKED(vp, 0); 2448 td->td_uretoff.tdu_off = offset; 2449 drop: 2450 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0); 2451 return (error); 2452 } 2453 2454 int 2455 vn_utimes_perm(struct vnode *vp, struct vattr *vap, struct ucred *cred, 2456 struct thread *td) 2457 { 2458 int error; 2459 2460 /* 2461 * Grant permission if the caller is the owner of the file, or 2462 * the super-user, or has ACL_WRITE_ATTRIBUTES permission on 2463 * on the file. If the time pointer is null, then write 2464 * permission on the file is also sufficient. 2465 * 2466 * From NFSv4.1, draft 21, 6.2.1.3.1, Discussion of Mask Attributes: 2467 * A user having ACL_WRITE_DATA or ACL_WRITE_ATTRIBUTES 2468 * will be allowed to set the times [..] to the current 2469 * server time. 2470 */ 2471 error = VOP_ACCESSX(vp, VWRITE_ATTRIBUTES, cred, td); 2472 if (error != 0 && (vap->va_vaflags & VA_UTIMES_NULL) != 0) 2473 error = VOP_ACCESS(vp, VWRITE, cred, td); 2474 return (error); 2475 } 2476 2477 int 2478 vn_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) 2479 { 2480 struct vnode *vp; 2481 int error; 2482 2483 if (fp->f_type == DTYPE_FIFO) 2484 kif->kf_type = KF_TYPE_FIFO; 2485 else 2486 kif->kf_type = KF_TYPE_VNODE; 2487 vp = fp->f_vnode; 2488 vref(vp); 2489 FILEDESC_SUNLOCK(fdp); 2490 error = vn_fill_kinfo_vnode(vp, kif); 2491 vrele(vp); 2492 FILEDESC_SLOCK(fdp); 2493 return (error); 2494 } 2495 2496 static inline void 2497 vn_fill_junk(struct kinfo_file *kif) 2498 { 2499 size_t len, olen; 2500 2501 /* 2502 * Simulate vn_fullpath returning changing values for a given 2503 * vp during e.g. coredump. 2504 */ 2505 len = (arc4random() % (sizeof(kif->kf_path) - 2)) + 1; 2506 olen = strlen(kif->kf_path); 2507 if (len < olen) 2508 strcpy(&kif->kf_path[len - 1], "$"); 2509 else 2510 for (; olen < len; olen++) 2511 strcpy(&kif->kf_path[olen], "A"); 2512 } 2513 2514 int 2515 vn_fill_kinfo_vnode(struct vnode *vp, struct kinfo_file *kif) 2516 { 2517 struct vattr va; 2518 char *fullpath, *freepath; 2519 int error; 2520 2521 kif->kf_un.kf_file.kf_file_type = vntype_to_kinfo(vp->v_type); 2522 freepath = NULL; 2523 fullpath = "-"; 2524 error = vn_fullpath(vp, &fullpath, &freepath); 2525 if (error == 0) { 2526 strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path)); 2527 } 2528 if (freepath != NULL) 2529 free(freepath, M_TEMP); 2530 2531 KFAIL_POINT_CODE(DEBUG_FP, fill_kinfo_vnode__random_path, 2532 vn_fill_junk(kif); 2533 ); 2534 2535 /* 2536 * Retrieve vnode attributes. 2537 */ 2538 va.va_fsid = VNOVAL; 2539 va.va_rdev = NODEV; 2540 vn_lock(vp, LK_SHARED | LK_RETRY); 2541 error = VOP_GETATTR(vp, &va, curthread->td_ucred); 2542 VOP_UNLOCK(vp); 2543 if (error != 0) 2544 return (error); 2545 if (va.va_fsid != VNOVAL) 2546 kif->kf_un.kf_file.kf_file_fsid = va.va_fsid; 2547 else 2548 kif->kf_un.kf_file.kf_file_fsid = 2549 vp->v_mount->mnt_stat.f_fsid.val[0]; 2550 kif->kf_un.kf_file.kf_file_fsid_freebsd11 = 2551 kif->kf_un.kf_file.kf_file_fsid; /* truncate */ 2552 kif->kf_un.kf_file.kf_file_fileid = va.va_fileid; 2553 kif->kf_un.kf_file.kf_file_mode = MAKEIMODE(va.va_type, va.va_mode); 2554 kif->kf_un.kf_file.kf_file_size = va.va_size; 2555 kif->kf_un.kf_file.kf_file_rdev = va.va_rdev; 2556 kif->kf_un.kf_file.kf_file_rdev_freebsd11 = 2557 kif->kf_un.kf_file.kf_file_rdev; /* truncate */ 2558 return (0); 2559 } 2560 2561 int 2562 vn_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 2563 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 2564 struct thread *td) 2565 { 2566 #ifdef HWPMC_HOOKS 2567 struct pmckern_map_in pkm; 2568 #endif 2569 struct mount *mp; 2570 struct vnode *vp; 2571 vm_object_t object; 2572 vm_prot_t maxprot; 2573 boolean_t writecounted; 2574 int error; 2575 2576 #if defined(COMPAT_FREEBSD7) || defined(COMPAT_FREEBSD6) || \ 2577 defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) 2578 /* 2579 * POSIX shared-memory objects are defined to have 2580 * kernel persistence, and are not defined to support 2581 * read(2)/write(2) -- or even open(2). Thus, we can 2582 * use MAP_ASYNC to trade on-disk coherence for speed. 2583 * The shm_open(3) library routine turns on the FPOSIXSHM 2584 * flag to request this behavior. 2585 */ 2586 if ((fp->f_flag & FPOSIXSHM) != 0) 2587 flags |= MAP_NOSYNC; 2588 #endif 2589 vp = fp->f_vnode; 2590 2591 /* 2592 * Ensure that file and memory protections are 2593 * compatible. Note that we only worry about 2594 * writability if mapping is shared; in this case, 2595 * current and max prot are dictated by the open file. 2596 * XXX use the vnode instead? Problem is: what 2597 * credentials do we use for determination? What if 2598 * proc does a setuid? 2599 */ 2600 mp = vp->v_mount; 2601 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 2602 maxprot = VM_PROT_NONE; 2603 if ((prot & VM_PROT_EXECUTE) != 0) 2604 return (EACCES); 2605 } else 2606 maxprot = VM_PROT_EXECUTE; 2607 if ((fp->f_flag & FREAD) != 0) 2608 maxprot |= VM_PROT_READ; 2609 else if ((prot & VM_PROT_READ) != 0) 2610 return (EACCES); 2611 2612 /* 2613 * If we are sharing potential changes via MAP_SHARED and we 2614 * are trying to get write permission although we opened it 2615 * without asking for it, bail out. 2616 */ 2617 if ((flags & MAP_SHARED) != 0) { 2618 if ((fp->f_flag & FWRITE) != 0) 2619 maxprot |= VM_PROT_WRITE; 2620 else if ((prot & VM_PROT_WRITE) != 0) 2621 return (EACCES); 2622 } else { 2623 maxprot |= VM_PROT_WRITE; 2624 cap_maxprot |= VM_PROT_WRITE; 2625 } 2626 maxprot &= cap_maxprot; 2627 2628 /* 2629 * For regular files and shared memory, POSIX requires that 2630 * the value of foff be a legitimate offset within the data 2631 * object. In particular, negative offsets are invalid. 2632 * Blocking negative offsets and overflows here avoids 2633 * possible wraparound or user-level access into reserved 2634 * ranges of the data object later. In contrast, POSIX does 2635 * not dictate how offsets are used by device drivers, so in 2636 * the case of a device mapping a negative offset is passed 2637 * on. 2638 */ 2639 if ( 2640 #ifdef _LP64 2641 size > OFF_MAX || 2642 #endif 2643 foff > OFF_MAX - size) 2644 return (EINVAL); 2645 2646 writecounted = FALSE; 2647 error = vm_mmap_vnode(td, size, prot, &maxprot, &flags, vp, 2648 &foff, &object, &writecounted); 2649 if (error != 0) 2650 return (error); 2651 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 2652 foff, writecounted, td); 2653 if (error != 0) { 2654 /* 2655 * If this mapping was accounted for in the vnode's 2656 * writecount, then undo that now. 2657 */ 2658 if (writecounted) 2659 vm_pager_release_writecount(object, 0, size); 2660 vm_object_deallocate(object); 2661 } 2662 #ifdef HWPMC_HOOKS 2663 /* Inform hwpmc(4) if an executable is being mapped. */ 2664 if (PMC_HOOK_INSTALLED(PMC_FN_MMAP)) { 2665 if ((prot & VM_PROT_EXECUTE) != 0 && error == 0) { 2666 pkm.pm_file = vp; 2667 pkm.pm_address = (uintptr_t) *addr; 2668 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_MMAP, (void *) &pkm); 2669 } 2670 } 2671 #endif 2672 return (error); 2673 } 2674 2675 void 2676 vn_fsid(struct vnode *vp, struct vattr *va) 2677 { 2678 fsid_t *f; 2679 2680 f = &vp->v_mount->mnt_stat.f_fsid; 2681 va->va_fsid = (uint32_t)f->val[1]; 2682 va->va_fsid <<= sizeof(f->val[1]) * NBBY; 2683 va->va_fsid += (uint32_t)f->val[0]; 2684 } 2685 2686 int 2687 vn_fsync_buf(struct vnode *vp, int waitfor) 2688 { 2689 struct buf *bp, *nbp; 2690 struct bufobj *bo; 2691 struct mount *mp; 2692 int error, maxretry; 2693 2694 error = 0; 2695 maxretry = 10000; /* large, arbitrarily chosen */ 2696 mp = NULL; 2697 if (vp->v_type == VCHR) { 2698 VI_LOCK(vp); 2699 mp = vp->v_rdev->si_mountpt; 2700 VI_UNLOCK(vp); 2701 } 2702 bo = &vp->v_bufobj; 2703 BO_LOCK(bo); 2704 loop1: 2705 /* 2706 * MARK/SCAN initialization to avoid infinite loops. 2707 */ 2708 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { 2709 bp->b_vflags &= ~BV_SCANNED; 2710 bp->b_error = 0; 2711 } 2712 2713 /* 2714 * Flush all dirty buffers associated with a vnode. 2715 */ 2716 loop2: 2717 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2718 if ((bp->b_vflags & BV_SCANNED) != 0) 2719 continue; 2720 bp->b_vflags |= BV_SCANNED; 2721 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { 2722 if (waitfor != MNT_WAIT) 2723 continue; 2724 if (BUF_LOCK(bp, 2725 LK_EXCLUSIVE | LK_INTERLOCK | LK_SLEEPFAIL, 2726 BO_LOCKPTR(bo)) != 0) { 2727 BO_LOCK(bo); 2728 goto loop1; 2729 } 2730 BO_LOCK(bo); 2731 } 2732 BO_UNLOCK(bo); 2733 KASSERT(bp->b_bufobj == bo, 2734 ("bp %p wrong b_bufobj %p should be %p", 2735 bp, bp->b_bufobj, bo)); 2736 if ((bp->b_flags & B_DELWRI) == 0) 2737 panic("fsync: not dirty"); 2738 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) { 2739 vfs_bio_awrite(bp); 2740 } else { 2741 bremfree(bp); 2742 bawrite(bp); 2743 } 2744 if (maxretry < 1000) 2745 pause("dirty", hz < 1000 ? 1 : hz / 1000); 2746 BO_LOCK(bo); 2747 goto loop2; 2748 } 2749 2750 /* 2751 * If synchronous the caller expects us to completely resolve all 2752 * dirty buffers in the system. Wait for in-progress I/O to 2753 * complete (which could include background bitmap writes), then 2754 * retry if dirty blocks still exist. 2755 */ 2756 if (waitfor == MNT_WAIT) { 2757 bufobj_wwait(bo, 0, 0); 2758 if (bo->bo_dirty.bv_cnt > 0) { 2759 /* 2760 * If we are unable to write any of these buffers 2761 * then we fail now rather than trying endlessly 2762 * to write them out. 2763 */ 2764 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 2765 if ((error = bp->b_error) != 0) 2766 break; 2767 if ((mp != NULL && mp->mnt_secondary_writes > 0) || 2768 (error == 0 && --maxretry >= 0)) 2769 goto loop1; 2770 if (error == 0) 2771 error = EAGAIN; 2772 } 2773 } 2774 BO_UNLOCK(bo); 2775 if (error != 0) 2776 vn_printf(vp, "fsync: giving up on dirty (error = %d) ", error); 2777 2778 return (error); 2779 } 2780 2781 /* 2782 * Copies a byte range from invp to outvp. Calls VOP_COPY_FILE_RANGE() 2783 * or vn_generic_copy_file_range() after rangelocking the byte ranges, 2784 * to do the actual copy. 2785 * vn_generic_copy_file_range() is factored out, so it can be called 2786 * from a VOP_COPY_FILE_RANGE() call as well, but handles vnodes from 2787 * different file systems. 2788 */ 2789 int 2790 vn_copy_file_range(struct vnode *invp, off_t *inoffp, struct vnode *outvp, 2791 off_t *outoffp, size_t *lenp, unsigned int flags, struct ucred *incred, 2792 struct ucred *outcred, struct thread *fsize_td) 2793 { 2794 int error; 2795 size_t len; 2796 uint64_t uval; 2797 2798 len = *lenp; 2799 *lenp = 0; /* For error returns. */ 2800 error = 0; 2801 2802 /* Do some sanity checks on the arguments. */ 2803 if (invp->v_type == VDIR || outvp->v_type == VDIR) 2804 error = EISDIR; 2805 else if (*inoffp < 0 || *outoffp < 0 || 2806 invp->v_type != VREG || outvp->v_type != VREG) 2807 error = EINVAL; 2808 if (error != 0) 2809 goto out; 2810 2811 /* Ensure offset + len does not wrap around. */ 2812 uval = *inoffp; 2813 uval += len; 2814 if (uval > INT64_MAX) 2815 len = INT64_MAX - *inoffp; 2816 uval = *outoffp; 2817 uval += len; 2818 if (uval > INT64_MAX) 2819 len = INT64_MAX - *outoffp; 2820 if (len == 0) 2821 goto out; 2822 2823 /* 2824 * If the two vnode are for the same file system, call 2825 * VOP_COPY_FILE_RANGE(), otherwise call vn_generic_copy_file_range() 2826 * which can handle copies across multiple file systems. 2827 */ 2828 *lenp = len; 2829 if (invp->v_mount == outvp->v_mount) 2830 error = VOP_COPY_FILE_RANGE(invp, inoffp, outvp, outoffp, 2831 lenp, flags, incred, outcred, fsize_td); 2832 else 2833 error = vn_generic_copy_file_range(invp, inoffp, outvp, 2834 outoffp, lenp, flags, incred, outcred, fsize_td); 2835 out: 2836 return (error); 2837 } 2838 2839 /* 2840 * Test len bytes of data starting at dat for all bytes == 0. 2841 * Return true if all bytes are zero, false otherwise. 2842 * Expects dat to be well aligned. 2843 */ 2844 static bool 2845 mem_iszero(void *dat, int len) 2846 { 2847 int i; 2848 const u_int *p; 2849 const char *cp; 2850 2851 for (p = dat; len > 0; len -= sizeof(*p), p++) { 2852 if (len >= sizeof(*p)) { 2853 if (*p != 0) 2854 return (false); 2855 } else { 2856 cp = (const char *)p; 2857 for (i = 0; i < len; i++, cp++) 2858 if (*cp != '\0') 2859 return (false); 2860 } 2861 } 2862 return (true); 2863 } 2864 2865 /* 2866 * Look for a hole in the output file and, if found, adjust *outoffp 2867 * and *xferp to skip past the hole. 2868 * *xferp is the entire hole length to be written and xfer2 is how many bytes 2869 * to be written as 0's upon return. 2870 */ 2871 static off_t 2872 vn_skip_hole(struct vnode *outvp, off_t xfer2, off_t *outoffp, off_t *xferp, 2873 off_t *dataoffp, off_t *holeoffp, struct ucred *cred) 2874 { 2875 int error; 2876 off_t delta; 2877 2878 if (*holeoffp == 0 || *holeoffp <= *outoffp) { 2879 *dataoffp = *outoffp; 2880 error = VOP_IOCTL(outvp, FIOSEEKDATA, dataoffp, 0, cred, 2881 curthread); 2882 if (error == 0) { 2883 *holeoffp = *dataoffp; 2884 error = VOP_IOCTL(outvp, FIOSEEKHOLE, holeoffp, 0, cred, 2885 curthread); 2886 } 2887 if (error != 0 || *holeoffp == *dataoffp) { 2888 /* 2889 * Since outvp is unlocked, it may be possible for 2890 * another thread to do a truncate(), lseek(), write() 2891 * creating a hole at startoff between the above 2892 * VOP_IOCTL() calls, if the other thread does not do 2893 * rangelocking. 2894 * If that happens, *holeoffp == *dataoffp and finding 2895 * the hole has failed, so disable vn_skip_hole(). 2896 */ 2897 *holeoffp = -1; /* Disable use of vn_skip_hole(). */ 2898 return (xfer2); 2899 } 2900 KASSERT(*dataoffp >= *outoffp, 2901 ("vn_skip_hole: dataoff=%jd < outoff=%jd", 2902 (intmax_t)*dataoffp, (intmax_t)*outoffp)); 2903 KASSERT(*holeoffp > *dataoffp, 2904 ("vn_skip_hole: holeoff=%jd <= dataoff=%jd", 2905 (intmax_t)*holeoffp, (intmax_t)*dataoffp)); 2906 } 2907 2908 /* 2909 * If there is a hole before the data starts, advance *outoffp and 2910 * *xferp past the hole. 2911 */ 2912 if (*dataoffp > *outoffp) { 2913 delta = *dataoffp - *outoffp; 2914 if (delta >= *xferp) { 2915 /* Entire *xferp is a hole. */ 2916 *outoffp += *xferp; 2917 *xferp = 0; 2918 return (0); 2919 } 2920 *xferp -= delta; 2921 *outoffp += delta; 2922 xfer2 = MIN(xfer2, *xferp); 2923 } 2924 2925 /* 2926 * If a hole starts before the end of this xfer2, reduce this xfer2 so 2927 * that the write ends at the start of the hole. 2928 * *holeoffp should always be greater than *outoffp, but for the 2929 * non-INVARIANTS case, check this to make sure xfer2 remains a sane 2930 * value. 2931 */ 2932 if (*holeoffp > *outoffp && *holeoffp < *outoffp + xfer2) 2933 xfer2 = *holeoffp - *outoffp; 2934 return (xfer2); 2935 } 2936 2937 /* 2938 * Write an xfer sized chunk to outvp in blksize blocks from dat. 2939 * dat is a maximum of blksize in length and can be written repeatedly in 2940 * the chunk. 2941 * If growfile == true, just grow the file via vn_truncate_locked() instead 2942 * of doing actual writes. 2943 * If checkhole == true, a hole is being punched, so skip over any hole 2944 * already in the output file. 2945 */ 2946 static int 2947 vn_write_outvp(struct vnode *outvp, char *dat, off_t outoff, off_t xfer, 2948 u_long blksize, bool growfile, bool checkhole, struct ucred *cred) 2949 { 2950 struct mount *mp; 2951 off_t dataoff, holeoff, xfer2; 2952 int error, lckf; 2953 2954 /* 2955 * Loop around doing writes of blksize until write has been completed. 2956 * Lock/unlock on each loop iteration so that a bwillwrite() can be 2957 * done for each iteration, since the xfer argument can be very 2958 * large if there is a large hole to punch in the output file. 2959 */ 2960 error = 0; 2961 holeoff = 0; 2962 do { 2963 xfer2 = MIN(xfer, blksize); 2964 if (checkhole) { 2965 /* 2966 * Punching a hole. Skip writing if there is 2967 * already a hole in the output file. 2968 */ 2969 xfer2 = vn_skip_hole(outvp, xfer2, &outoff, &xfer, 2970 &dataoff, &holeoff, cred); 2971 if (xfer == 0) 2972 break; 2973 if (holeoff < 0) 2974 checkhole = false; 2975 KASSERT(xfer2 > 0, ("vn_write_outvp: xfer2=%jd", 2976 (intmax_t)xfer2)); 2977 } 2978 bwillwrite(); 2979 mp = NULL; 2980 error = vn_start_write(outvp, &mp, V_WAIT); 2981 if (error != 0) 2982 break; 2983 if (growfile) { 2984 error = vn_lock(outvp, LK_EXCLUSIVE); 2985 if (error == 0) { 2986 error = vn_truncate_locked(outvp, outoff + xfer, 2987 false, cred); 2988 VOP_UNLOCK(outvp); 2989 } 2990 } else { 2991 if (MNT_SHARED_WRITES(mp)) 2992 lckf = LK_SHARED; 2993 else 2994 lckf = LK_EXCLUSIVE; 2995 error = vn_lock(outvp, lckf); 2996 if (error == 0) { 2997 error = vn_rdwr(UIO_WRITE, outvp, dat, xfer2, 2998 outoff, UIO_SYSSPACE, IO_NODELOCKED, 2999 curthread->td_ucred, cred, NULL, curthread); 3000 outoff += xfer2; 3001 xfer -= xfer2; 3002 } 3003 VOP_UNLOCK(outvp); 3004 } 3005 if (mp != NULL) 3006 vn_finished_write(mp); 3007 } while (!growfile && xfer > 0 && error == 0); 3008 return (error); 3009 } 3010 3011 /* 3012 * Copy a byte range of one file to another. This function can handle the 3013 * case where invp and outvp are on different file systems. 3014 * It can also be called by a VOP_COPY_FILE_RANGE() to do the work, if there 3015 * is no better file system specific way to do it. 3016 */ 3017 int 3018 vn_generic_copy_file_range(struct vnode *invp, off_t *inoffp, 3019 struct vnode *outvp, off_t *outoffp, size_t *lenp, unsigned int flags, 3020 struct ucred *incred, struct ucred *outcred, struct thread *fsize_td) 3021 { 3022 struct vattr va; 3023 struct mount *mp; 3024 struct uio io; 3025 off_t startoff, endoff, xfer, xfer2; 3026 u_long blksize; 3027 int error, interrupted; 3028 bool cantseek, readzeros, eof, lastblock; 3029 ssize_t aresid; 3030 size_t copylen, len, rem, savlen; 3031 char *dat; 3032 long holein, holeout; 3033 3034 holein = holeout = 0; 3035 savlen = len = *lenp; 3036 error = 0; 3037 interrupted = 0; 3038 dat = NULL; 3039 3040 error = vn_lock(invp, LK_SHARED); 3041 if (error != 0) 3042 goto out; 3043 if (VOP_PATHCONF(invp, _PC_MIN_HOLE_SIZE, &holein) != 0) 3044 holein = 0; 3045 VOP_UNLOCK(invp); 3046 3047 mp = NULL; 3048 error = vn_start_write(outvp, &mp, V_WAIT); 3049 if (error == 0) 3050 error = vn_lock(outvp, LK_EXCLUSIVE); 3051 if (error == 0) { 3052 /* 3053 * If fsize_td != NULL, do a vn_rlimit_fsize() call, 3054 * now that outvp is locked. 3055 */ 3056 if (fsize_td != NULL) { 3057 io.uio_offset = *outoffp; 3058 io.uio_resid = len; 3059 error = vn_rlimit_fsize(outvp, &io, fsize_td); 3060 if (error != 0) 3061 error = EFBIG; 3062 } 3063 if (VOP_PATHCONF(outvp, _PC_MIN_HOLE_SIZE, &holeout) != 0) 3064 holeout = 0; 3065 /* 3066 * Holes that are past EOF do not need to be written as a block 3067 * of zero bytes. So, truncate the output file as far as 3068 * possible and then use va.va_size to decide if writing 0 3069 * bytes is necessary in the loop below. 3070 */ 3071 if (error == 0) 3072 error = VOP_GETATTR(outvp, &va, outcred); 3073 if (error == 0 && va.va_size > *outoffp && va.va_size <= 3074 *outoffp + len) { 3075 #ifdef MAC 3076 error = mac_vnode_check_write(curthread->td_ucred, 3077 outcred, outvp); 3078 if (error == 0) 3079 #endif 3080 error = vn_truncate_locked(outvp, *outoffp, 3081 false, outcred); 3082 if (error == 0) 3083 va.va_size = *outoffp; 3084 } 3085 VOP_UNLOCK(outvp); 3086 } 3087 if (mp != NULL) 3088 vn_finished_write(mp); 3089 if (error != 0) 3090 goto out; 3091 3092 /* 3093 * Set the blksize to the larger of the hole sizes for invp and outvp. 3094 * If hole sizes aren't available, set the blksize to the larger 3095 * f_iosize of invp and outvp. 3096 * This code expects the hole sizes and f_iosizes to be powers of 2. 3097 * This value is clipped at 4Kbytes and 1Mbyte. 3098 */ 3099 blksize = MAX(holein, holeout); 3100 3101 /* Clip len to end at an exact multiple of hole size. */ 3102 if (blksize > 1) { 3103 rem = *inoffp % blksize; 3104 if (rem > 0) 3105 rem = blksize - rem; 3106 if (len - rem > blksize) 3107 len = savlen = rounddown(len - rem, blksize) + rem; 3108 } 3109 3110 if (blksize <= 1) 3111 blksize = MAX(invp->v_mount->mnt_stat.f_iosize, 3112 outvp->v_mount->mnt_stat.f_iosize); 3113 if (blksize < 4096) 3114 blksize = 4096; 3115 else if (blksize > 1024 * 1024) 3116 blksize = 1024 * 1024; 3117 dat = malloc(blksize, M_TEMP, M_WAITOK); 3118 3119 /* 3120 * If VOP_IOCTL(FIOSEEKHOLE) works for invp, use it and FIOSEEKDATA 3121 * to find holes. Otherwise, just scan the read block for all 0s 3122 * in the inner loop where the data copying is done. 3123 * Note that some file systems such as NFSv3, NFSv4.0 and NFSv4.1 may 3124 * support holes on the server, but do not support FIOSEEKHOLE. 3125 */ 3126 eof = false; 3127 while (len > 0 && error == 0 && !eof && interrupted == 0) { 3128 endoff = 0; /* To shut up compilers. */ 3129 cantseek = true; 3130 startoff = *inoffp; 3131 copylen = len; 3132 3133 /* 3134 * Find the next data area. If there is just a hole to EOF, 3135 * FIOSEEKDATA should fail and then we drop down into the 3136 * inner loop and create the hole on the outvp file. 3137 * (I do not know if any file system will report a hole to 3138 * EOF via FIOSEEKHOLE, but I am pretty sure FIOSEEKDATA 3139 * will fail for those file systems.) 3140 * 3141 * For input files that don't support FIOSEEKDATA/FIOSEEKHOLE, 3142 * the code just falls through to the inner copy loop. 3143 */ 3144 error = EINVAL; 3145 if (holein > 0) 3146 error = VOP_IOCTL(invp, FIOSEEKDATA, &startoff, 0, 3147 incred, curthread); 3148 if (error == 0) { 3149 endoff = startoff; 3150 error = VOP_IOCTL(invp, FIOSEEKHOLE, &endoff, 0, 3151 incred, curthread); 3152 /* 3153 * Since invp is unlocked, it may be possible for 3154 * another thread to do a truncate(), lseek(), write() 3155 * creating a hole at startoff between the above 3156 * VOP_IOCTL() calls, if the other thread does not do 3157 * rangelocking. 3158 * If that happens, startoff == endoff and finding 3159 * the hole has failed, so set an error. 3160 */ 3161 if (error == 0 && startoff == endoff) 3162 error = EINVAL; /* Any error. Reset to 0. */ 3163 } 3164 if (error == 0) { 3165 if (startoff > *inoffp) { 3166 /* Found hole before data block. */ 3167 xfer = MIN(startoff - *inoffp, len); 3168 if (*outoffp < va.va_size) { 3169 /* Must write 0s to punch hole. */ 3170 xfer2 = MIN(va.va_size - *outoffp, 3171 xfer); 3172 memset(dat, 0, MIN(xfer2, blksize)); 3173 error = vn_write_outvp(outvp, dat, 3174 *outoffp, xfer2, blksize, false, 3175 holeout > 0, outcred); 3176 } 3177 3178 if (error == 0 && *outoffp + xfer > 3179 va.va_size && xfer == len) 3180 /* Grow last block. */ 3181 error = vn_write_outvp(outvp, dat, 3182 *outoffp, xfer, blksize, true, 3183 false, outcred); 3184 if (error == 0) { 3185 *inoffp += xfer; 3186 *outoffp += xfer; 3187 len -= xfer; 3188 if (len < savlen) 3189 interrupted = sig_intr(); 3190 } 3191 } 3192 copylen = MIN(len, endoff - startoff); 3193 cantseek = false; 3194 } else { 3195 cantseek = true; 3196 startoff = *inoffp; 3197 copylen = len; 3198 error = 0; 3199 } 3200 3201 xfer = blksize; 3202 if (cantseek) { 3203 /* 3204 * Set first xfer to end at a block boundary, so that 3205 * holes are more likely detected in the loop below via 3206 * the for all bytes 0 method. 3207 */ 3208 xfer -= (*inoffp % blksize); 3209 } 3210 /* Loop copying the data block. */ 3211 while (copylen > 0 && error == 0 && !eof && interrupted == 0) { 3212 if (copylen < xfer) 3213 xfer = copylen; 3214 error = vn_lock(invp, LK_SHARED); 3215 if (error != 0) 3216 goto out; 3217 error = vn_rdwr(UIO_READ, invp, dat, xfer, 3218 startoff, UIO_SYSSPACE, IO_NODELOCKED, 3219 curthread->td_ucred, incred, &aresid, 3220 curthread); 3221 VOP_UNLOCK(invp); 3222 lastblock = false; 3223 if (error == 0 && aresid > 0) { 3224 /* Stop the copy at EOF on the input file. */ 3225 xfer -= aresid; 3226 eof = true; 3227 lastblock = true; 3228 } 3229 if (error == 0) { 3230 /* 3231 * Skip the write for holes past the initial EOF 3232 * of the output file, unless this is the last 3233 * write of the output file at EOF. 3234 */ 3235 readzeros = cantseek ? mem_iszero(dat, xfer) : 3236 false; 3237 if (xfer == len) 3238 lastblock = true; 3239 if (!cantseek || *outoffp < va.va_size || 3240 lastblock || !readzeros) 3241 error = vn_write_outvp(outvp, dat, 3242 *outoffp, xfer, blksize, 3243 readzeros && lastblock && 3244 *outoffp >= va.va_size, false, 3245 outcred); 3246 if (error == 0) { 3247 *inoffp += xfer; 3248 startoff += xfer; 3249 *outoffp += xfer; 3250 copylen -= xfer; 3251 len -= xfer; 3252 if (len < savlen) 3253 interrupted = sig_intr(); 3254 } 3255 } 3256 xfer = blksize; 3257 } 3258 } 3259 out: 3260 *lenp = savlen - len; 3261 free(dat, M_TEMP); 3262 return (error); 3263 } 3264 3265 static int 3266 vn_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td) 3267 { 3268 struct mount *mp; 3269 struct vnode *vp; 3270 off_t olen, ooffset; 3271 int error; 3272 #ifdef AUDIT 3273 int audited_vnode1 = 0; 3274 #endif 3275 3276 vp = fp->f_vnode; 3277 if (vp->v_type != VREG) 3278 return (ENODEV); 3279 3280 /* Allocating blocks may take a long time, so iterate. */ 3281 for (;;) { 3282 olen = len; 3283 ooffset = offset; 3284 3285 bwillwrite(); 3286 mp = NULL; 3287 error = vn_start_write(vp, &mp, V_WAIT | PCATCH); 3288 if (error != 0) 3289 break; 3290 error = vn_lock(vp, LK_EXCLUSIVE); 3291 if (error != 0) { 3292 vn_finished_write(mp); 3293 break; 3294 } 3295 #ifdef AUDIT 3296 if (!audited_vnode1) { 3297 AUDIT_ARG_VNODE1(vp); 3298 audited_vnode1 = 1; 3299 } 3300 #endif 3301 #ifdef MAC 3302 error = mac_vnode_check_write(td->td_ucred, fp->f_cred, vp); 3303 if (error == 0) 3304 #endif 3305 error = VOP_ALLOCATE(vp, &offset, &len); 3306 VOP_UNLOCK(vp); 3307 vn_finished_write(mp); 3308 3309 if (olen + ooffset != offset + len) { 3310 panic("offset + len changed from %jx/%jx to %jx/%jx", 3311 ooffset, olen, offset, len); 3312 } 3313 if (error != 0 || len == 0) 3314 break; 3315 KASSERT(olen > len, ("Iteration did not make progress?")); 3316 maybe_yield(); 3317 } 3318 3319 return (error); 3320 } 3321