1 /*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/bio.h> 41 #include <sys/buf.h> 42 #include <sys/conf.h> 43 #include <sys/event.h> 44 #include <sys/kernel.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/lockf.h> 48 #include <sys/malloc.h> 49 #include <sys/mount.h> 50 #include <sys/namei.h> 51 #include <sys/rwlock.h> 52 #include <sys/fcntl.h> 53 #include <sys/unistd.h> 54 #include <sys/vnode.h> 55 #include <sys/dirent.h> 56 #include <sys/poll.h> 57 58 #include <security/mac/mac_framework.h> 59 60 #include <vm/vm.h> 61 #include <vm/vm_object.h> 62 #include <vm/vm_extern.h> 63 #include <vm/pmap.h> 64 #include <vm/vm_map.h> 65 #include <vm/vm_page.h> 66 #include <vm/vm_pager.h> 67 #include <vm/vnode_pager.h> 68 69 static int vop_nolookup(struct vop_lookup_args *); 70 static int vop_norename(struct vop_rename_args *); 71 static int vop_nostrategy(struct vop_strategy_args *); 72 static int get_next_dirent(struct vnode *vp, struct dirent **dpp, 73 char *dirbuf, int dirbuflen, off_t *off, 74 char **cpos, int *len, int *eofflag, 75 struct thread *td); 76 static int dirent_exists(struct vnode *vp, const char *dirname, 77 struct thread *td); 78 79 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4) 80 81 static int vop_stdis_text(struct vop_is_text_args *ap); 82 static int vop_stdset_text(struct vop_set_text_args *ap); 83 static int vop_stdunset_text(struct vop_unset_text_args *ap); 84 static int vop_stdget_writecount(struct vop_get_writecount_args *ap); 85 static int vop_stdadd_writecount(struct vop_add_writecount_args *ap); 86 static int vop_stdfdatasync(struct vop_fdatasync_args *ap); 87 static int vop_stdgetpages_async(struct vop_getpages_async_args *ap); 88 89 /* 90 * This vnode table stores what we want to do if the filesystem doesn't 91 * implement a particular VOP. 92 * 93 * If there is no specific entry here, we will return EOPNOTSUPP. 94 * 95 * Note that every filesystem has to implement either vop_access 96 * or vop_accessx; failing to do so will result in immediate crash 97 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(), 98 * which calls vop_stdaccess() etc. 99 */ 100 101 struct vop_vector default_vnodeops = { 102 .vop_default = NULL, 103 .vop_bypass = VOP_EOPNOTSUPP, 104 105 .vop_access = vop_stdaccess, 106 .vop_accessx = vop_stdaccessx, 107 .vop_advise = vop_stdadvise, 108 .vop_advlock = vop_stdadvlock, 109 .vop_advlockasync = vop_stdadvlockasync, 110 .vop_advlockpurge = vop_stdadvlockpurge, 111 .vop_allocate = vop_stdallocate, 112 .vop_bmap = vop_stdbmap, 113 .vop_close = VOP_NULL, 114 .vop_fsync = VOP_NULL, 115 .vop_fdatasync = vop_stdfdatasync, 116 .vop_getpages = vop_stdgetpages, 117 .vop_getpages_async = vop_stdgetpages_async, 118 .vop_getwritemount = vop_stdgetwritemount, 119 .vop_inactive = VOP_NULL, 120 .vop_ioctl = VOP_ENOTTY, 121 .vop_kqfilter = vop_stdkqfilter, 122 .vop_islocked = vop_stdislocked, 123 .vop_lock1 = vop_stdlock, 124 .vop_lookup = vop_nolookup, 125 .vop_open = VOP_NULL, 126 .vop_pathconf = VOP_EINVAL, 127 .vop_poll = vop_nopoll, 128 .vop_putpages = vop_stdputpages, 129 .vop_readlink = VOP_EINVAL, 130 .vop_rename = vop_norename, 131 .vop_revoke = VOP_PANIC, 132 .vop_strategy = vop_nostrategy, 133 .vop_unlock = vop_stdunlock, 134 .vop_vptocnp = vop_stdvptocnp, 135 .vop_vptofh = vop_stdvptofh, 136 .vop_unp_bind = vop_stdunp_bind, 137 .vop_unp_connect = vop_stdunp_connect, 138 .vop_unp_detach = vop_stdunp_detach, 139 .vop_is_text = vop_stdis_text, 140 .vop_set_text = vop_stdset_text, 141 .vop_unset_text = vop_stdunset_text, 142 .vop_get_writecount = vop_stdget_writecount, 143 .vop_add_writecount = vop_stdadd_writecount, 144 }; 145 146 /* 147 * Series of placeholder functions for various error returns for 148 * VOPs. 149 */ 150 151 int 152 vop_eopnotsupp(struct vop_generic_args *ap) 153 { 154 /* 155 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 156 */ 157 158 return (EOPNOTSUPP); 159 } 160 161 int 162 vop_ebadf(struct vop_generic_args *ap) 163 { 164 165 return (EBADF); 166 } 167 168 int 169 vop_enotty(struct vop_generic_args *ap) 170 { 171 172 return (ENOTTY); 173 } 174 175 int 176 vop_einval(struct vop_generic_args *ap) 177 { 178 179 return (EINVAL); 180 } 181 182 int 183 vop_enoent(struct vop_generic_args *ap) 184 { 185 186 return (ENOENT); 187 } 188 189 int 190 vop_null(struct vop_generic_args *ap) 191 { 192 193 return (0); 194 } 195 196 /* 197 * Helper function to panic on some bad VOPs in some filesystems. 198 */ 199 int 200 vop_panic(struct vop_generic_args *ap) 201 { 202 203 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 204 } 205 206 /* 207 * vop_std<something> and vop_no<something> are default functions for use by 208 * filesystems that need the "default reasonable" implementation for a 209 * particular operation. 210 * 211 * The documentation for the operations they implement exists (if it exists) 212 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 213 */ 214 215 /* 216 * Default vop for filesystems that do not support name lookup 217 */ 218 static int 219 vop_nolookup(ap) 220 struct vop_lookup_args /* { 221 struct vnode *a_dvp; 222 struct vnode **a_vpp; 223 struct componentname *a_cnp; 224 } */ *ap; 225 { 226 227 *ap->a_vpp = NULL; 228 return (ENOTDIR); 229 } 230 231 /* 232 * vop_norename: 233 * 234 * Handle unlock and reference counting for arguments of vop_rename 235 * for filesystems that do not implement rename operation. 236 */ 237 static int 238 vop_norename(struct vop_rename_args *ap) 239 { 240 241 vop_rename_fail(ap); 242 return (EOPNOTSUPP); 243 } 244 245 /* 246 * vop_nostrategy: 247 * 248 * Strategy routine for VFS devices that have none. 249 * 250 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 251 * routine. Typically this is done for a BIO_READ strategy call. 252 * Typically B_INVAL is assumed to already be clear prior to a write 253 * and should not be cleared manually unless you just made the buffer 254 * invalid. BIO_ERROR should be cleared either way. 255 */ 256 257 static int 258 vop_nostrategy (struct vop_strategy_args *ap) 259 { 260 printf("No strategy for buffer at %p\n", ap->a_bp); 261 vn_printf(ap->a_vp, "vnode "); 262 ap->a_bp->b_ioflags |= BIO_ERROR; 263 ap->a_bp->b_error = EOPNOTSUPP; 264 bufdone(ap->a_bp); 265 return (EOPNOTSUPP); 266 } 267 268 static int 269 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf, 270 int dirbuflen, off_t *off, char **cpos, int *len, 271 int *eofflag, struct thread *td) 272 { 273 int error, reclen; 274 struct uio uio; 275 struct iovec iov; 276 struct dirent *dp; 277 278 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 279 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 280 281 if (*len == 0) { 282 iov.iov_base = dirbuf; 283 iov.iov_len = dirbuflen; 284 285 uio.uio_iov = &iov; 286 uio.uio_iovcnt = 1; 287 uio.uio_offset = *off; 288 uio.uio_resid = dirbuflen; 289 uio.uio_segflg = UIO_SYSSPACE; 290 uio.uio_rw = UIO_READ; 291 uio.uio_td = td; 292 293 *eofflag = 0; 294 295 #ifdef MAC 296 error = mac_vnode_check_readdir(td->td_ucred, vp); 297 if (error == 0) 298 #endif 299 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag, 300 NULL, NULL); 301 if (error) 302 return (error); 303 304 *off = uio.uio_offset; 305 306 *cpos = dirbuf; 307 *len = (dirbuflen - uio.uio_resid); 308 309 if (*len == 0) 310 return (ENOENT); 311 } 312 313 dp = (struct dirent *)(*cpos); 314 reclen = dp->d_reclen; 315 *dpp = dp; 316 317 /* check for malformed directory.. */ 318 if (reclen < DIRENT_MINSIZE) 319 return (EINVAL); 320 321 *cpos += reclen; 322 *len -= reclen; 323 324 return (0); 325 } 326 327 /* 328 * Check if a named file exists in a given directory vnode. 329 */ 330 static int 331 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td) 332 { 333 char *dirbuf, *cpos; 334 int error, eofflag, dirbuflen, len, found; 335 off_t off; 336 struct dirent *dp; 337 struct vattr va; 338 339 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 340 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 341 342 found = 0; 343 344 error = VOP_GETATTR(vp, &va, td->td_ucred); 345 if (error) 346 return (found); 347 348 dirbuflen = DEV_BSIZE; 349 if (dirbuflen < va.va_blocksize) 350 dirbuflen = va.va_blocksize; 351 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 352 353 off = 0; 354 len = 0; 355 do { 356 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off, 357 &cpos, &len, &eofflag, td); 358 if (error) 359 goto out; 360 361 if (dp->d_type != DT_WHT && dp->d_fileno != 0 && 362 strcmp(dp->d_name, dirname) == 0) { 363 found = 1; 364 goto out; 365 } 366 } while (len > 0 || !eofflag); 367 368 out: 369 free(dirbuf, M_TEMP); 370 return (found); 371 } 372 373 int 374 vop_stdaccess(struct vop_access_args *ap) 375 { 376 377 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | 378 VAPPEND)) == 0, ("invalid bit in accmode")); 379 380 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td)); 381 } 382 383 int 384 vop_stdaccessx(struct vop_accessx_args *ap) 385 { 386 int error; 387 accmode_t accmode = ap->a_accmode; 388 389 error = vfs_unixify_accmode(&accmode); 390 if (error != 0) 391 return (error); 392 393 if (accmode == 0) 394 return (0); 395 396 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td)); 397 } 398 399 /* 400 * Advisory record locking support 401 */ 402 int 403 vop_stdadvlock(struct vop_advlock_args *ap) 404 { 405 struct vnode *vp; 406 struct vattr vattr; 407 int error; 408 409 vp = ap->a_vp; 410 if (ap->a_fl->l_whence == SEEK_END) { 411 /* 412 * The NFSv4 server must avoid doing a vn_lock() here, since it 413 * can deadlock the nfsd threads, due to a LOR. Fortunately 414 * the NFSv4 server always uses SEEK_SET and this code is 415 * only required for the SEEK_END case. 416 */ 417 vn_lock(vp, LK_SHARED | LK_RETRY); 418 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 419 VOP_UNLOCK(vp, 0); 420 if (error) 421 return (error); 422 } else 423 vattr.va_size = 0; 424 425 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size)); 426 } 427 428 int 429 vop_stdadvlockasync(struct vop_advlockasync_args *ap) 430 { 431 struct vnode *vp; 432 struct vattr vattr; 433 int error; 434 435 vp = ap->a_vp; 436 if (ap->a_fl->l_whence == SEEK_END) { 437 /* The size argument is only needed for SEEK_END. */ 438 vn_lock(vp, LK_SHARED | LK_RETRY); 439 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 440 VOP_UNLOCK(vp, 0); 441 if (error) 442 return (error); 443 } else 444 vattr.va_size = 0; 445 446 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size)); 447 } 448 449 int 450 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap) 451 { 452 struct vnode *vp; 453 454 vp = ap->a_vp; 455 lf_purgelocks(vp, &vp->v_lockf); 456 return (0); 457 } 458 459 /* 460 * vop_stdpathconf: 461 * 462 * Standard implementation of POSIX pathconf, to get information about limits 463 * for a filesystem. 464 * Override per filesystem for the case where the filesystem has smaller 465 * limits. 466 */ 467 int 468 vop_stdpathconf(ap) 469 struct vop_pathconf_args /* { 470 struct vnode *a_vp; 471 int a_name; 472 int *a_retval; 473 } */ *ap; 474 { 475 476 switch (ap->a_name) { 477 case _PC_ASYNC_IO: 478 *ap->a_retval = _POSIX_ASYNCHRONOUS_IO; 479 return (0); 480 case _PC_NAME_MAX: 481 *ap->a_retval = NAME_MAX; 482 return (0); 483 case _PC_PATH_MAX: 484 *ap->a_retval = PATH_MAX; 485 return (0); 486 case _PC_LINK_MAX: 487 *ap->a_retval = LINK_MAX; 488 return (0); 489 case _PC_MAX_CANON: 490 *ap->a_retval = MAX_CANON; 491 return (0); 492 case _PC_MAX_INPUT: 493 *ap->a_retval = MAX_INPUT; 494 return (0); 495 case _PC_PIPE_BUF: 496 *ap->a_retval = PIPE_BUF; 497 return (0); 498 case _PC_CHOWN_RESTRICTED: 499 *ap->a_retval = 1; 500 return (0); 501 case _PC_VDISABLE: 502 *ap->a_retval = _POSIX_VDISABLE; 503 return (0); 504 default: 505 return (EINVAL); 506 } 507 /* NOTREACHED */ 508 } 509 510 /* 511 * Standard lock, unlock and islocked functions. 512 */ 513 int 514 vop_stdlock(ap) 515 struct vop_lock1_args /* { 516 struct vnode *a_vp; 517 int a_flags; 518 char *file; 519 int line; 520 } */ *ap; 521 { 522 struct vnode *vp = ap->a_vp; 523 524 return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp), 525 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file, 526 ap->a_line)); 527 } 528 529 /* See above. */ 530 int 531 vop_stdunlock(ap) 532 struct vop_unlock_args /* { 533 struct vnode *a_vp; 534 int a_flags; 535 } */ *ap; 536 { 537 struct vnode *vp = ap->a_vp; 538 539 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp))); 540 } 541 542 /* See above. */ 543 int 544 vop_stdislocked(ap) 545 struct vop_islocked_args /* { 546 struct vnode *a_vp; 547 } */ *ap; 548 { 549 550 return (lockstatus(ap->a_vp->v_vnlock)); 551 } 552 553 /* 554 * Return true for select/poll. 555 */ 556 int 557 vop_nopoll(ap) 558 struct vop_poll_args /* { 559 struct vnode *a_vp; 560 int a_events; 561 struct ucred *a_cred; 562 struct thread *a_td; 563 } */ *ap; 564 { 565 566 return (poll_no_poll(ap->a_events)); 567 } 568 569 /* 570 * Implement poll for local filesystems that support it. 571 */ 572 int 573 vop_stdpoll(ap) 574 struct vop_poll_args /* { 575 struct vnode *a_vp; 576 int a_events; 577 struct ucred *a_cred; 578 struct thread *a_td; 579 } */ *ap; 580 { 581 if (ap->a_events & ~POLLSTANDARD) 582 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 583 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 584 } 585 586 /* 587 * Return our mount point, as we will take charge of the writes. 588 */ 589 int 590 vop_stdgetwritemount(ap) 591 struct vop_getwritemount_args /* { 592 struct vnode *a_vp; 593 struct mount **a_mpp; 594 } */ *ap; 595 { 596 struct mount *mp; 597 598 /* 599 * XXX Since this is called unlocked we may be recycled while 600 * attempting to ref the mount. If this is the case or mountpoint 601 * will be set to NULL. We only have to prevent this call from 602 * returning with a ref to an incorrect mountpoint. It is not 603 * harmful to return with a ref to our previous mountpoint. 604 */ 605 mp = ap->a_vp->v_mount; 606 if (mp != NULL) { 607 vfs_ref(mp); 608 if (mp != ap->a_vp->v_mount) { 609 vfs_rel(mp); 610 mp = NULL; 611 } 612 } 613 *(ap->a_mpp) = mp; 614 return (0); 615 } 616 617 /* XXX Needs good comment and VOP_BMAP(9) manpage */ 618 int 619 vop_stdbmap(ap) 620 struct vop_bmap_args /* { 621 struct vnode *a_vp; 622 daddr_t a_bn; 623 struct bufobj **a_bop; 624 daddr_t *a_bnp; 625 int *a_runp; 626 int *a_runb; 627 } */ *ap; 628 { 629 630 if (ap->a_bop != NULL) 631 *ap->a_bop = &ap->a_vp->v_bufobj; 632 if (ap->a_bnp != NULL) 633 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 634 if (ap->a_runp != NULL) 635 *ap->a_runp = 0; 636 if (ap->a_runb != NULL) 637 *ap->a_runb = 0; 638 return (0); 639 } 640 641 int 642 vop_stdfsync(ap) 643 struct vop_fsync_args /* { 644 struct vnode *a_vp; 645 int a_waitfor; 646 struct thread *a_td; 647 } */ *ap; 648 { 649 struct vnode *vp = ap->a_vp; 650 struct buf *bp; 651 struct bufobj *bo; 652 struct buf *nbp; 653 int error = 0; 654 int maxretry = 1000; /* large, arbitrarily chosen */ 655 656 bo = &vp->v_bufobj; 657 BO_LOCK(bo); 658 loop1: 659 /* 660 * MARK/SCAN initialization to avoid infinite loops. 661 */ 662 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { 663 bp->b_vflags &= ~BV_SCANNED; 664 bp->b_error = 0; 665 } 666 667 /* 668 * Flush all dirty buffers associated with a vnode. 669 */ 670 loop2: 671 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 672 if ((bp->b_vflags & BV_SCANNED) != 0) 673 continue; 674 bp->b_vflags |= BV_SCANNED; 675 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { 676 if (ap->a_waitfor != MNT_WAIT) 677 continue; 678 if (BUF_LOCK(bp, 679 LK_EXCLUSIVE | LK_INTERLOCK | LK_SLEEPFAIL, 680 BO_LOCKPTR(bo)) != 0) { 681 BO_LOCK(bo); 682 goto loop1; 683 } 684 BO_LOCK(bo); 685 } 686 BO_UNLOCK(bo); 687 KASSERT(bp->b_bufobj == bo, 688 ("bp %p wrong b_bufobj %p should be %p", 689 bp, bp->b_bufobj, bo)); 690 if ((bp->b_flags & B_DELWRI) == 0) 691 panic("fsync: not dirty"); 692 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) { 693 vfs_bio_awrite(bp); 694 } else { 695 bremfree(bp); 696 bawrite(bp); 697 } 698 BO_LOCK(bo); 699 goto loop2; 700 } 701 702 /* 703 * If synchronous the caller expects us to completely resolve all 704 * dirty buffers in the system. Wait for in-progress I/O to 705 * complete (which could include background bitmap writes), then 706 * retry if dirty blocks still exist. 707 */ 708 if (ap->a_waitfor == MNT_WAIT) { 709 bufobj_wwait(bo, 0, 0); 710 if (bo->bo_dirty.bv_cnt > 0) { 711 /* 712 * If we are unable to write any of these buffers 713 * then we fail now rather than trying endlessly 714 * to write them out. 715 */ 716 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 717 if ((error = bp->b_error) == 0) 718 continue; 719 if (error == 0 && --maxretry >= 0) 720 goto loop1; 721 error = EAGAIN; 722 } 723 } 724 BO_UNLOCK(bo); 725 if (error == EAGAIN) 726 vn_printf(vp, "fsync: giving up on dirty "); 727 728 return (error); 729 } 730 731 static int 732 vop_stdfdatasync(struct vop_fdatasync_args *ap) 733 { 734 735 return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td)); 736 } 737 738 int 739 vop_stdfdatasync_buf(struct vop_fdatasync_args *ap) 740 { 741 struct vop_fsync_args apf; 742 743 apf.a_vp = ap->a_vp; 744 apf.a_waitfor = MNT_WAIT; 745 apf.a_td = ap->a_td; 746 return (vop_stdfsync(&apf)); 747 } 748 749 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 750 int 751 vop_stdgetpages(ap) 752 struct vop_getpages_args /* { 753 struct vnode *a_vp; 754 vm_page_t *a_m; 755 int a_count; 756 int *a_rbehind; 757 int *a_rahead; 758 } */ *ap; 759 { 760 761 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 762 ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL); 763 } 764 765 static int 766 vop_stdgetpages_async(struct vop_getpages_async_args *ap) 767 { 768 int error; 769 770 error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind, 771 ap->a_rahead); 772 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); 773 return (error); 774 } 775 776 int 777 vop_stdkqfilter(struct vop_kqfilter_args *ap) 778 { 779 return vfs_kqfilter(ap); 780 } 781 782 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 783 int 784 vop_stdputpages(ap) 785 struct vop_putpages_args /* { 786 struct vnode *a_vp; 787 vm_page_t *a_m; 788 int a_count; 789 int a_sync; 790 int *a_rtvals; 791 } */ *ap; 792 { 793 794 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 795 ap->a_sync, ap->a_rtvals); 796 } 797 798 int 799 vop_stdvptofh(struct vop_vptofh_args *ap) 800 { 801 return (EOPNOTSUPP); 802 } 803 804 int 805 vop_stdvptocnp(struct vop_vptocnp_args *ap) 806 { 807 struct vnode *vp = ap->a_vp; 808 struct vnode **dvp = ap->a_vpp; 809 struct ucred *cred = ap->a_cred; 810 char *buf = ap->a_buf; 811 int *buflen = ap->a_buflen; 812 char *dirbuf, *cpos; 813 int i, error, eofflag, dirbuflen, flags, locked, len, covered; 814 off_t off; 815 ino_t fileno; 816 struct vattr va; 817 struct nameidata nd; 818 struct thread *td; 819 struct dirent *dp; 820 struct vnode *mvp; 821 822 i = *buflen; 823 error = 0; 824 covered = 0; 825 td = curthread; 826 827 if (vp->v_type != VDIR) 828 return (ENOENT); 829 830 error = VOP_GETATTR(vp, &va, cred); 831 if (error) 832 return (error); 833 834 VREF(vp); 835 locked = VOP_ISLOCKED(vp); 836 VOP_UNLOCK(vp, 0); 837 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, 838 "..", vp, td); 839 flags = FREAD; 840 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL); 841 if (error) { 842 vn_lock(vp, locked | LK_RETRY); 843 return (error); 844 } 845 NDFREE(&nd, NDF_ONLY_PNBUF); 846 847 mvp = *dvp = nd.ni_vp; 848 849 if (vp->v_mount != (*dvp)->v_mount && 850 ((*dvp)->v_vflag & VV_ROOT) && 851 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) { 852 *dvp = (*dvp)->v_mount->mnt_vnodecovered; 853 VREF(mvp); 854 VOP_UNLOCK(mvp, 0); 855 vn_close(mvp, FREAD, cred, td); 856 VREF(*dvp); 857 vn_lock(*dvp, LK_SHARED | LK_RETRY); 858 covered = 1; 859 } 860 861 fileno = va.va_fileid; 862 863 dirbuflen = DEV_BSIZE; 864 if (dirbuflen < va.va_blocksize) 865 dirbuflen = va.va_blocksize; 866 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 867 868 if ((*dvp)->v_type != VDIR) { 869 error = ENOENT; 870 goto out; 871 } 872 873 off = 0; 874 len = 0; 875 do { 876 /* call VOP_READDIR of parent */ 877 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off, 878 &cpos, &len, &eofflag, td); 879 if (error) 880 goto out; 881 882 if ((dp->d_type != DT_WHT) && 883 (dp->d_fileno == fileno)) { 884 if (covered) { 885 VOP_UNLOCK(*dvp, 0); 886 vn_lock(mvp, LK_SHARED | LK_RETRY); 887 if (dirent_exists(mvp, dp->d_name, td)) { 888 error = ENOENT; 889 VOP_UNLOCK(mvp, 0); 890 vn_lock(*dvp, LK_SHARED | LK_RETRY); 891 goto out; 892 } 893 VOP_UNLOCK(mvp, 0); 894 vn_lock(*dvp, LK_SHARED | LK_RETRY); 895 } 896 i -= dp->d_namlen; 897 898 if (i < 0) { 899 error = ENOMEM; 900 goto out; 901 } 902 if (dp->d_namlen == 1 && dp->d_name[0] == '.') { 903 error = ENOENT; 904 } else { 905 bcopy(dp->d_name, buf + i, dp->d_namlen); 906 error = 0; 907 } 908 goto out; 909 } 910 } while (len > 0 || !eofflag); 911 error = ENOENT; 912 913 out: 914 free(dirbuf, M_TEMP); 915 if (!error) { 916 *buflen = i; 917 vref(*dvp); 918 } 919 if (covered) { 920 vput(*dvp); 921 vrele(mvp); 922 } else { 923 VOP_UNLOCK(mvp, 0); 924 vn_close(mvp, FREAD, cred, td); 925 } 926 vn_lock(vp, locked | LK_RETRY); 927 return (error); 928 } 929 930 int 931 vop_stdallocate(struct vop_allocate_args *ap) 932 { 933 #ifdef __notyet__ 934 struct statfs sfs; 935 #endif 936 struct iovec aiov; 937 struct vattr vattr, *vap; 938 struct uio auio; 939 off_t fsize, len, cur, offset; 940 uint8_t *buf; 941 struct thread *td; 942 struct vnode *vp; 943 size_t iosize; 944 int error; 945 946 buf = NULL; 947 error = 0; 948 td = curthread; 949 vap = &vattr; 950 vp = ap->a_vp; 951 len = *ap->a_len; 952 offset = *ap->a_offset; 953 954 error = VOP_GETATTR(vp, vap, td->td_ucred); 955 if (error != 0) 956 goto out; 957 fsize = vap->va_size; 958 iosize = vap->va_blocksize; 959 if (iosize == 0) 960 iosize = BLKDEV_IOSIZE; 961 if (iosize > MAXPHYS) 962 iosize = MAXPHYS; 963 buf = malloc(iosize, M_TEMP, M_WAITOK); 964 965 #ifdef __notyet__ 966 /* 967 * Check if the filesystem sets f_maxfilesize; if not use 968 * VOP_SETATTR to perform the check. 969 */ 970 error = VFS_STATFS(vp->v_mount, &sfs, td); 971 if (error != 0) 972 goto out; 973 if (sfs.f_maxfilesize) { 974 if (offset > sfs.f_maxfilesize || len > sfs.f_maxfilesize || 975 offset + len > sfs.f_maxfilesize) { 976 error = EFBIG; 977 goto out; 978 } 979 } else 980 #endif 981 if (offset + len > vap->va_size) { 982 /* 983 * Test offset + len against the filesystem's maxfilesize. 984 */ 985 VATTR_NULL(vap); 986 vap->va_size = offset + len; 987 error = VOP_SETATTR(vp, vap, td->td_ucred); 988 if (error != 0) 989 goto out; 990 VATTR_NULL(vap); 991 vap->va_size = fsize; 992 error = VOP_SETATTR(vp, vap, td->td_ucred); 993 if (error != 0) 994 goto out; 995 } 996 997 for (;;) { 998 /* 999 * Read and write back anything below the nominal file 1000 * size. There's currently no way outside the filesystem 1001 * to know whether this area is sparse or not. 1002 */ 1003 cur = iosize; 1004 if ((offset % iosize) != 0) 1005 cur -= (offset % iosize); 1006 if (cur > len) 1007 cur = len; 1008 if (offset < fsize) { 1009 aiov.iov_base = buf; 1010 aiov.iov_len = cur; 1011 auio.uio_iov = &aiov; 1012 auio.uio_iovcnt = 1; 1013 auio.uio_offset = offset; 1014 auio.uio_resid = cur; 1015 auio.uio_segflg = UIO_SYSSPACE; 1016 auio.uio_rw = UIO_READ; 1017 auio.uio_td = td; 1018 error = VOP_READ(vp, &auio, 0, td->td_ucred); 1019 if (error != 0) 1020 break; 1021 if (auio.uio_resid > 0) { 1022 bzero(buf + cur - auio.uio_resid, 1023 auio.uio_resid); 1024 } 1025 } else { 1026 bzero(buf, cur); 1027 } 1028 1029 aiov.iov_base = buf; 1030 aiov.iov_len = cur; 1031 auio.uio_iov = &aiov; 1032 auio.uio_iovcnt = 1; 1033 auio.uio_offset = offset; 1034 auio.uio_resid = cur; 1035 auio.uio_segflg = UIO_SYSSPACE; 1036 auio.uio_rw = UIO_WRITE; 1037 auio.uio_td = td; 1038 1039 error = VOP_WRITE(vp, &auio, 0, td->td_ucred); 1040 if (error != 0) 1041 break; 1042 1043 len -= cur; 1044 offset += cur; 1045 if (len == 0) 1046 break; 1047 if (should_yield()) 1048 break; 1049 } 1050 1051 out: 1052 *ap->a_len = len; 1053 *ap->a_offset = offset; 1054 free(buf, M_TEMP); 1055 return (error); 1056 } 1057 1058 int 1059 vop_stdadvise(struct vop_advise_args *ap) 1060 { 1061 struct vnode *vp; 1062 struct bufobj *bo; 1063 daddr_t startn, endn; 1064 off_t start, end; 1065 int bsize, error; 1066 1067 vp = ap->a_vp; 1068 switch (ap->a_advice) { 1069 case POSIX_FADV_WILLNEED: 1070 /* 1071 * Do nothing for now. Filesystems should provide a 1072 * custom method which starts an asynchronous read of 1073 * the requested region. 1074 */ 1075 error = 0; 1076 break; 1077 case POSIX_FADV_DONTNEED: 1078 error = 0; 1079 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1080 if (vp->v_iflag & VI_DOOMED) { 1081 VOP_UNLOCK(vp, 0); 1082 break; 1083 } 1084 1085 /* 1086 * Deactivate pages in the specified range from the backing VM 1087 * object. Pages that are resident in the buffer cache will 1088 * remain wired until their corresponding buffers are released 1089 * below. 1090 */ 1091 if (vp->v_object != NULL) { 1092 start = trunc_page(ap->a_start); 1093 end = round_page(ap->a_end); 1094 VM_OBJECT_WLOCK(vp->v_object); 1095 vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start), 1096 OFF_TO_IDX(end)); 1097 VM_OBJECT_WUNLOCK(vp->v_object); 1098 } 1099 1100 bo = &vp->v_bufobj; 1101 BO_RLOCK(bo); 1102 bsize = vp->v_bufobj.bo_bsize; 1103 startn = ap->a_start / bsize; 1104 endn = ap->a_end / bsize; 1105 error = bnoreuselist(&bo->bo_clean, bo, startn, endn); 1106 if (error == 0) 1107 error = bnoreuselist(&bo->bo_dirty, bo, startn, endn); 1108 BO_RUNLOCK(bo); 1109 VOP_UNLOCK(vp, 0); 1110 break; 1111 default: 1112 error = EINVAL; 1113 break; 1114 } 1115 return (error); 1116 } 1117 1118 int 1119 vop_stdunp_bind(struct vop_unp_bind_args *ap) 1120 { 1121 1122 ap->a_vp->v_socket = ap->a_socket; 1123 return (0); 1124 } 1125 1126 int 1127 vop_stdunp_connect(struct vop_unp_connect_args *ap) 1128 { 1129 1130 *ap->a_socket = ap->a_vp->v_socket; 1131 return (0); 1132 } 1133 1134 int 1135 vop_stdunp_detach(struct vop_unp_detach_args *ap) 1136 { 1137 1138 ap->a_vp->v_socket = NULL; 1139 return (0); 1140 } 1141 1142 static int 1143 vop_stdis_text(struct vop_is_text_args *ap) 1144 { 1145 1146 return ((ap->a_vp->v_vflag & VV_TEXT) != 0); 1147 } 1148 1149 static int 1150 vop_stdset_text(struct vop_set_text_args *ap) 1151 { 1152 1153 ap->a_vp->v_vflag |= VV_TEXT; 1154 return (0); 1155 } 1156 1157 static int 1158 vop_stdunset_text(struct vop_unset_text_args *ap) 1159 { 1160 1161 ap->a_vp->v_vflag &= ~VV_TEXT; 1162 return (0); 1163 } 1164 1165 static int 1166 vop_stdget_writecount(struct vop_get_writecount_args *ap) 1167 { 1168 1169 *ap->a_writecount = ap->a_vp->v_writecount; 1170 return (0); 1171 } 1172 1173 static int 1174 vop_stdadd_writecount(struct vop_add_writecount_args *ap) 1175 { 1176 1177 ap->a_vp->v_writecount += ap->a_inc; 1178 return (0); 1179 } 1180 1181 /* 1182 * vfs default ops 1183 * used to fill the vfs function table to get reasonable default return values. 1184 */ 1185 int 1186 vfs_stdroot (mp, flags, vpp) 1187 struct mount *mp; 1188 int flags; 1189 struct vnode **vpp; 1190 { 1191 1192 return (EOPNOTSUPP); 1193 } 1194 1195 int 1196 vfs_stdstatfs (mp, sbp) 1197 struct mount *mp; 1198 struct statfs *sbp; 1199 { 1200 1201 return (EOPNOTSUPP); 1202 } 1203 1204 int 1205 vfs_stdquotactl (mp, cmds, uid, arg) 1206 struct mount *mp; 1207 int cmds; 1208 uid_t uid; 1209 void *arg; 1210 { 1211 1212 return (EOPNOTSUPP); 1213 } 1214 1215 int 1216 vfs_stdsync(mp, waitfor) 1217 struct mount *mp; 1218 int waitfor; 1219 { 1220 struct vnode *vp, *mvp; 1221 struct thread *td; 1222 int error, lockreq, allerror = 0; 1223 1224 td = curthread; 1225 lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 1226 if (waitfor != MNT_WAIT) 1227 lockreq |= LK_NOWAIT; 1228 /* 1229 * Force stale buffer cache information to be flushed. 1230 */ 1231 loop: 1232 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1233 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1234 VI_UNLOCK(vp); 1235 continue; 1236 } 1237 if ((error = vget(vp, lockreq, td)) != 0) { 1238 if (error == ENOENT) { 1239 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1240 goto loop; 1241 } 1242 continue; 1243 } 1244 error = VOP_FSYNC(vp, waitfor, td); 1245 if (error) 1246 allerror = error; 1247 vput(vp); 1248 } 1249 return (allerror); 1250 } 1251 1252 int 1253 vfs_stdnosync (mp, waitfor) 1254 struct mount *mp; 1255 int waitfor; 1256 { 1257 1258 return (0); 1259 } 1260 1261 int 1262 vfs_stdvget (mp, ino, flags, vpp) 1263 struct mount *mp; 1264 ino_t ino; 1265 int flags; 1266 struct vnode **vpp; 1267 { 1268 1269 return (EOPNOTSUPP); 1270 } 1271 1272 int 1273 vfs_stdfhtovp (mp, fhp, flags, vpp) 1274 struct mount *mp; 1275 struct fid *fhp; 1276 int flags; 1277 struct vnode **vpp; 1278 { 1279 1280 return (EOPNOTSUPP); 1281 } 1282 1283 int 1284 vfs_stdinit (vfsp) 1285 struct vfsconf *vfsp; 1286 { 1287 1288 return (0); 1289 } 1290 1291 int 1292 vfs_stduninit (vfsp) 1293 struct vfsconf *vfsp; 1294 { 1295 1296 return(0); 1297 } 1298 1299 int 1300 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname) 1301 struct mount *mp; 1302 int cmd; 1303 struct vnode *filename_vp; 1304 int attrnamespace; 1305 const char *attrname; 1306 { 1307 1308 if (filename_vp != NULL) 1309 VOP_UNLOCK(filename_vp, 0); 1310 return (EOPNOTSUPP); 1311 } 1312 1313 int 1314 vfs_stdsysctl(mp, op, req) 1315 struct mount *mp; 1316 fsctlop_t op; 1317 struct sysctl_req *req; 1318 { 1319 1320 return (EOPNOTSUPP); 1321 } 1322 1323 /* end of vfs default ops */ 1324