1 /*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/bio.h> 41 #include <sys/buf.h> 42 #include <sys/conf.h> 43 #include <sys/event.h> 44 #include <sys/kernel.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/lockf.h> 48 #include <sys/malloc.h> 49 #include <sys/mount.h> 50 #include <sys/namei.h> 51 #include <sys/rwlock.h> 52 #include <sys/fcntl.h> 53 #include <sys/unistd.h> 54 #include <sys/vnode.h> 55 #include <sys/dirent.h> 56 #include <sys/poll.h> 57 58 #include <security/mac/mac_framework.h> 59 60 #include <vm/vm.h> 61 #include <vm/vm_object.h> 62 #include <vm/vm_extern.h> 63 #include <vm/pmap.h> 64 #include <vm/vm_map.h> 65 #include <vm/vm_page.h> 66 #include <vm/vm_pager.h> 67 #include <vm/vnode_pager.h> 68 69 static int vop_nolookup(struct vop_lookup_args *); 70 static int vop_norename(struct vop_rename_args *); 71 static int vop_nostrategy(struct vop_strategy_args *); 72 static int get_next_dirent(struct vnode *vp, struct dirent **dpp, 73 char *dirbuf, int dirbuflen, off_t *off, 74 char **cpos, int *len, int *eofflag, 75 struct thread *td); 76 static int dirent_exists(struct vnode *vp, const char *dirname, 77 struct thread *td); 78 79 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4) 80 81 static int vop_stdis_text(struct vop_is_text_args *ap); 82 static int vop_stdset_text(struct vop_set_text_args *ap); 83 static int vop_stdunset_text(struct vop_unset_text_args *ap); 84 static int vop_stdget_writecount(struct vop_get_writecount_args *ap); 85 static int vop_stdadd_writecount(struct vop_add_writecount_args *ap); 86 static int vop_stdfdatasync(struct vop_fdatasync_args *ap); 87 static int vop_stdgetpages_async(struct vop_getpages_async_args *ap); 88 89 /* 90 * This vnode table stores what we want to do if the filesystem doesn't 91 * implement a particular VOP. 92 * 93 * If there is no specific entry here, we will return EOPNOTSUPP. 94 * 95 * Note that every filesystem has to implement either vop_access 96 * or vop_accessx; failing to do so will result in immediate crash 97 * due to stack overflow, as vop_stdaccess() calls vop_stdaccessx(), 98 * which calls vop_stdaccess() etc. 99 */ 100 101 struct vop_vector default_vnodeops = { 102 .vop_default = NULL, 103 .vop_bypass = VOP_EOPNOTSUPP, 104 105 .vop_access = vop_stdaccess, 106 .vop_accessx = vop_stdaccessx, 107 .vop_advise = vop_stdadvise, 108 .vop_advlock = vop_stdadvlock, 109 .vop_advlockasync = vop_stdadvlockasync, 110 .vop_advlockpurge = vop_stdadvlockpurge, 111 .vop_allocate = vop_stdallocate, 112 .vop_bmap = vop_stdbmap, 113 .vop_close = VOP_NULL, 114 .vop_fsync = VOP_NULL, 115 .vop_fdatasync = vop_stdfdatasync, 116 .vop_getpages = vop_stdgetpages, 117 .vop_getpages_async = vop_stdgetpages_async, 118 .vop_getwritemount = vop_stdgetwritemount, 119 .vop_inactive = VOP_NULL, 120 .vop_ioctl = VOP_ENOTTY, 121 .vop_kqfilter = vop_stdkqfilter, 122 .vop_islocked = vop_stdislocked, 123 .vop_lock1 = vop_stdlock, 124 .vop_lookup = vop_nolookup, 125 .vop_open = VOP_NULL, 126 .vop_pathconf = VOP_EINVAL, 127 .vop_poll = vop_nopoll, 128 .vop_putpages = vop_stdputpages, 129 .vop_readlink = VOP_EINVAL, 130 .vop_rename = vop_norename, 131 .vop_revoke = VOP_PANIC, 132 .vop_strategy = vop_nostrategy, 133 .vop_unlock = vop_stdunlock, 134 .vop_vptocnp = vop_stdvptocnp, 135 .vop_vptofh = vop_stdvptofh, 136 .vop_unp_bind = vop_stdunp_bind, 137 .vop_unp_connect = vop_stdunp_connect, 138 .vop_unp_detach = vop_stdunp_detach, 139 .vop_is_text = vop_stdis_text, 140 .vop_set_text = vop_stdset_text, 141 .vop_unset_text = vop_stdunset_text, 142 .vop_get_writecount = vop_stdget_writecount, 143 .vop_add_writecount = vop_stdadd_writecount, 144 }; 145 146 /* 147 * Series of placeholder functions for various error returns for 148 * VOPs. 149 */ 150 151 int 152 vop_eopnotsupp(struct vop_generic_args *ap) 153 { 154 /* 155 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 156 */ 157 158 return (EOPNOTSUPP); 159 } 160 161 int 162 vop_ebadf(struct vop_generic_args *ap) 163 { 164 165 return (EBADF); 166 } 167 168 int 169 vop_enotty(struct vop_generic_args *ap) 170 { 171 172 return (ENOTTY); 173 } 174 175 int 176 vop_einval(struct vop_generic_args *ap) 177 { 178 179 return (EINVAL); 180 } 181 182 int 183 vop_enoent(struct vop_generic_args *ap) 184 { 185 186 return (ENOENT); 187 } 188 189 int 190 vop_null(struct vop_generic_args *ap) 191 { 192 193 return (0); 194 } 195 196 /* 197 * Helper function to panic on some bad VOPs in some filesystems. 198 */ 199 int 200 vop_panic(struct vop_generic_args *ap) 201 { 202 203 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 204 } 205 206 /* 207 * vop_std<something> and vop_no<something> are default functions for use by 208 * filesystems that need the "default reasonable" implementation for a 209 * particular operation. 210 * 211 * The documentation for the operations they implement exists (if it exists) 212 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 213 */ 214 215 /* 216 * Default vop for filesystems that do not support name lookup 217 */ 218 static int 219 vop_nolookup(ap) 220 struct vop_lookup_args /* { 221 struct vnode *a_dvp; 222 struct vnode **a_vpp; 223 struct componentname *a_cnp; 224 } */ *ap; 225 { 226 227 *ap->a_vpp = NULL; 228 return (ENOTDIR); 229 } 230 231 /* 232 * vop_norename: 233 * 234 * Handle unlock and reference counting for arguments of vop_rename 235 * for filesystems that do not implement rename operation. 236 */ 237 static int 238 vop_norename(struct vop_rename_args *ap) 239 { 240 241 vop_rename_fail(ap); 242 return (EOPNOTSUPP); 243 } 244 245 /* 246 * vop_nostrategy: 247 * 248 * Strategy routine for VFS devices that have none. 249 * 250 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 251 * routine. Typically this is done for a BIO_READ strategy call. 252 * Typically B_INVAL is assumed to already be clear prior to a write 253 * and should not be cleared manually unless you just made the buffer 254 * invalid. BIO_ERROR should be cleared either way. 255 */ 256 257 static int 258 vop_nostrategy (struct vop_strategy_args *ap) 259 { 260 printf("No strategy for buffer at %p\n", ap->a_bp); 261 vn_printf(ap->a_vp, "vnode "); 262 ap->a_bp->b_ioflags |= BIO_ERROR; 263 ap->a_bp->b_error = EOPNOTSUPP; 264 bufdone(ap->a_bp); 265 return (EOPNOTSUPP); 266 } 267 268 static int 269 get_next_dirent(struct vnode *vp, struct dirent **dpp, char *dirbuf, 270 int dirbuflen, off_t *off, char **cpos, int *len, 271 int *eofflag, struct thread *td) 272 { 273 int error, reclen; 274 struct uio uio; 275 struct iovec iov; 276 struct dirent *dp; 277 278 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 279 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 280 281 if (*len == 0) { 282 iov.iov_base = dirbuf; 283 iov.iov_len = dirbuflen; 284 285 uio.uio_iov = &iov; 286 uio.uio_iovcnt = 1; 287 uio.uio_offset = *off; 288 uio.uio_resid = dirbuflen; 289 uio.uio_segflg = UIO_SYSSPACE; 290 uio.uio_rw = UIO_READ; 291 uio.uio_td = td; 292 293 *eofflag = 0; 294 295 #ifdef MAC 296 error = mac_vnode_check_readdir(td->td_ucred, vp); 297 if (error == 0) 298 #endif 299 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag, 300 NULL, NULL); 301 if (error) 302 return (error); 303 304 *off = uio.uio_offset; 305 306 *cpos = dirbuf; 307 *len = (dirbuflen - uio.uio_resid); 308 309 if (*len == 0) 310 return (ENOENT); 311 } 312 313 dp = (struct dirent *)(*cpos); 314 reclen = dp->d_reclen; 315 *dpp = dp; 316 317 /* check for malformed directory.. */ 318 if (reclen < DIRENT_MINSIZE) 319 return (EINVAL); 320 321 *cpos += reclen; 322 *len -= reclen; 323 324 return (0); 325 } 326 327 /* 328 * Check if a named file exists in a given directory vnode. 329 */ 330 static int 331 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td) 332 { 333 char *dirbuf, *cpos; 334 int error, eofflag, dirbuflen, len, found; 335 off_t off; 336 struct dirent *dp; 337 struct vattr va; 338 339 KASSERT(VOP_ISLOCKED(vp), ("vp %p is not locked", vp)); 340 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); 341 342 found = 0; 343 344 error = VOP_GETATTR(vp, &va, td->td_ucred); 345 if (error) 346 return (found); 347 348 dirbuflen = DEV_BSIZE; 349 if (dirbuflen < va.va_blocksize) 350 dirbuflen = va.va_blocksize; 351 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 352 353 off = 0; 354 len = 0; 355 do { 356 error = get_next_dirent(vp, &dp, dirbuf, dirbuflen, &off, 357 &cpos, &len, &eofflag, td); 358 if (error) 359 goto out; 360 361 if (dp->d_type != DT_WHT && dp->d_fileno != 0 && 362 strcmp(dp->d_name, dirname) == 0) { 363 found = 1; 364 goto out; 365 } 366 } while (len > 0 || !eofflag); 367 368 out: 369 free(dirbuf, M_TEMP); 370 return (found); 371 } 372 373 int 374 vop_stdaccess(struct vop_access_args *ap) 375 { 376 377 KASSERT((ap->a_accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | 378 VAPPEND)) == 0, ("invalid bit in accmode")); 379 380 return (VOP_ACCESSX(ap->a_vp, ap->a_accmode, ap->a_cred, ap->a_td)); 381 } 382 383 int 384 vop_stdaccessx(struct vop_accessx_args *ap) 385 { 386 int error; 387 accmode_t accmode = ap->a_accmode; 388 389 error = vfs_unixify_accmode(&accmode); 390 if (error != 0) 391 return (error); 392 393 if (accmode == 0) 394 return (0); 395 396 return (VOP_ACCESS(ap->a_vp, accmode, ap->a_cred, ap->a_td)); 397 } 398 399 /* 400 * Advisory record locking support 401 */ 402 int 403 vop_stdadvlock(struct vop_advlock_args *ap) 404 { 405 struct vnode *vp; 406 struct vattr vattr; 407 int error; 408 409 vp = ap->a_vp; 410 if (ap->a_fl->l_whence == SEEK_END) { 411 /* 412 * The NFSv4 server must avoid doing a vn_lock() here, since it 413 * can deadlock the nfsd threads, due to a LOR. Fortunately 414 * the NFSv4 server always uses SEEK_SET and this code is 415 * only required for the SEEK_END case. 416 */ 417 vn_lock(vp, LK_SHARED | LK_RETRY); 418 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 419 VOP_UNLOCK(vp, 0); 420 if (error) 421 return (error); 422 } else 423 vattr.va_size = 0; 424 425 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size)); 426 } 427 428 int 429 vop_stdadvlockasync(struct vop_advlockasync_args *ap) 430 { 431 struct vnode *vp; 432 struct vattr vattr; 433 int error; 434 435 vp = ap->a_vp; 436 if (ap->a_fl->l_whence == SEEK_END) { 437 /* The size argument is only needed for SEEK_END. */ 438 vn_lock(vp, LK_SHARED | LK_RETRY); 439 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); 440 VOP_UNLOCK(vp, 0); 441 if (error) 442 return (error); 443 } else 444 vattr.va_size = 0; 445 446 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size)); 447 } 448 449 int 450 vop_stdadvlockpurge(struct vop_advlockpurge_args *ap) 451 { 452 struct vnode *vp; 453 454 vp = ap->a_vp; 455 lf_purgelocks(vp, &vp->v_lockf); 456 return (0); 457 } 458 459 /* 460 * vop_stdpathconf: 461 * 462 * Standard implementation of POSIX pathconf, to get information about limits 463 * for a filesystem. 464 * Override per filesystem for the case where the filesystem has smaller 465 * limits. 466 */ 467 int 468 vop_stdpathconf(ap) 469 struct vop_pathconf_args /* { 470 struct vnode *a_vp; 471 int a_name; 472 int *a_retval; 473 } */ *ap; 474 { 475 476 switch (ap->a_name) { 477 case _PC_ASYNC_IO: 478 *ap->a_retval = _POSIX_ASYNCHRONOUS_IO; 479 return (0); 480 case _PC_NAME_MAX: 481 *ap->a_retval = NAME_MAX; 482 return (0); 483 case _PC_PATH_MAX: 484 *ap->a_retval = PATH_MAX; 485 return (0); 486 case _PC_LINK_MAX: 487 *ap->a_retval = LINK_MAX; 488 return (0); 489 case _PC_PIPE_BUF: 490 *ap->a_retval = PIPE_BUF; 491 return (0); 492 case _PC_CHOWN_RESTRICTED: 493 *ap->a_retval = 1; 494 return (0); 495 default: 496 return (EINVAL); 497 } 498 /* NOTREACHED */ 499 } 500 501 /* 502 * Standard lock, unlock and islocked functions. 503 */ 504 int 505 vop_stdlock(ap) 506 struct vop_lock1_args /* { 507 struct vnode *a_vp; 508 int a_flags; 509 char *file; 510 int line; 511 } */ *ap; 512 { 513 struct vnode *vp = ap->a_vp; 514 struct mtx *ilk; 515 516 ilk = VI_MTX(vp); 517 return (lockmgr_lock_fast_path(vp->v_vnlock, ap->a_flags, 518 (ilk != NULL) ? &ilk->lock_object : NULL, ap->a_file, ap->a_line)); 519 } 520 521 /* See above. */ 522 int 523 vop_stdunlock(ap) 524 struct vop_unlock_args /* { 525 struct vnode *a_vp; 526 int a_flags; 527 } */ *ap; 528 { 529 struct vnode *vp = ap->a_vp; 530 struct mtx *ilk; 531 532 ilk = VI_MTX(vp); 533 return (lockmgr_unlock_fast_path(vp->v_vnlock, ap->a_flags, 534 (ilk != NULL) ? &ilk->lock_object : NULL)); 535 } 536 537 /* See above. */ 538 int 539 vop_stdislocked(ap) 540 struct vop_islocked_args /* { 541 struct vnode *a_vp; 542 } */ *ap; 543 { 544 545 return (lockstatus(ap->a_vp->v_vnlock)); 546 } 547 548 /* 549 * Return true for select/poll. 550 */ 551 int 552 vop_nopoll(ap) 553 struct vop_poll_args /* { 554 struct vnode *a_vp; 555 int a_events; 556 struct ucred *a_cred; 557 struct thread *a_td; 558 } */ *ap; 559 { 560 561 return (poll_no_poll(ap->a_events)); 562 } 563 564 /* 565 * Implement poll for local filesystems that support it. 566 */ 567 int 568 vop_stdpoll(ap) 569 struct vop_poll_args /* { 570 struct vnode *a_vp; 571 int a_events; 572 struct ucred *a_cred; 573 struct thread *a_td; 574 } */ *ap; 575 { 576 if (ap->a_events & ~POLLSTANDARD) 577 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 578 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 579 } 580 581 /* 582 * Return our mount point, as we will take charge of the writes. 583 */ 584 int 585 vop_stdgetwritemount(ap) 586 struct vop_getwritemount_args /* { 587 struct vnode *a_vp; 588 struct mount **a_mpp; 589 } */ *ap; 590 { 591 struct mount *mp; 592 593 /* 594 * XXX Since this is called unlocked we may be recycled while 595 * attempting to ref the mount. If this is the case or mountpoint 596 * will be set to NULL. We only have to prevent this call from 597 * returning with a ref to an incorrect mountpoint. It is not 598 * harmful to return with a ref to our previous mountpoint. 599 */ 600 mp = ap->a_vp->v_mount; 601 if (mp != NULL) { 602 vfs_ref(mp); 603 if (mp != ap->a_vp->v_mount) { 604 vfs_rel(mp); 605 mp = NULL; 606 } 607 } 608 *(ap->a_mpp) = mp; 609 return (0); 610 } 611 612 /* XXX Needs good comment and VOP_BMAP(9) manpage */ 613 int 614 vop_stdbmap(ap) 615 struct vop_bmap_args /* { 616 struct vnode *a_vp; 617 daddr_t a_bn; 618 struct bufobj **a_bop; 619 daddr_t *a_bnp; 620 int *a_runp; 621 int *a_runb; 622 } */ *ap; 623 { 624 625 if (ap->a_bop != NULL) 626 *ap->a_bop = &ap->a_vp->v_bufobj; 627 if (ap->a_bnp != NULL) 628 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 629 if (ap->a_runp != NULL) 630 *ap->a_runp = 0; 631 if (ap->a_runb != NULL) 632 *ap->a_runb = 0; 633 return (0); 634 } 635 636 int 637 vop_stdfsync(ap) 638 struct vop_fsync_args /* { 639 struct vnode *a_vp; 640 int a_waitfor; 641 struct thread *a_td; 642 } */ *ap; 643 { 644 struct vnode *vp = ap->a_vp; 645 struct buf *bp; 646 struct bufobj *bo; 647 struct buf *nbp; 648 int error = 0; 649 int maxretry = 1000; /* large, arbitrarily chosen */ 650 651 bo = &vp->v_bufobj; 652 BO_LOCK(bo); 653 loop1: 654 /* 655 * MARK/SCAN initialization to avoid infinite loops. 656 */ 657 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { 658 bp->b_vflags &= ~BV_SCANNED; 659 bp->b_error = 0; 660 } 661 662 /* 663 * Flush all dirty buffers associated with a vnode. 664 */ 665 loop2: 666 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 667 if ((bp->b_vflags & BV_SCANNED) != 0) 668 continue; 669 bp->b_vflags |= BV_SCANNED; 670 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) { 671 if (ap->a_waitfor != MNT_WAIT) 672 continue; 673 if (BUF_LOCK(bp, 674 LK_EXCLUSIVE | LK_INTERLOCK | LK_SLEEPFAIL, 675 BO_LOCKPTR(bo)) != 0) { 676 BO_LOCK(bo); 677 goto loop1; 678 } 679 BO_LOCK(bo); 680 } 681 BO_UNLOCK(bo); 682 KASSERT(bp->b_bufobj == bo, 683 ("bp %p wrong b_bufobj %p should be %p", 684 bp, bp->b_bufobj, bo)); 685 if ((bp->b_flags & B_DELWRI) == 0) 686 panic("fsync: not dirty"); 687 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) { 688 vfs_bio_awrite(bp); 689 } else { 690 bremfree(bp); 691 bawrite(bp); 692 } 693 BO_LOCK(bo); 694 goto loop2; 695 } 696 697 /* 698 * If synchronous the caller expects us to completely resolve all 699 * dirty buffers in the system. Wait for in-progress I/O to 700 * complete (which could include background bitmap writes), then 701 * retry if dirty blocks still exist. 702 */ 703 if (ap->a_waitfor == MNT_WAIT) { 704 bufobj_wwait(bo, 0, 0); 705 if (bo->bo_dirty.bv_cnt > 0) { 706 /* 707 * If we are unable to write any of these buffers 708 * then we fail now rather than trying endlessly 709 * to write them out. 710 */ 711 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 712 if ((error = bp->b_error) != 0) 713 break; 714 if (error == 0 && --maxretry >= 0) 715 goto loop1; 716 error = EAGAIN; 717 } 718 } 719 BO_UNLOCK(bo); 720 if (error == EAGAIN) 721 vn_printf(vp, "fsync: giving up on dirty "); 722 723 return (error); 724 } 725 726 static int 727 vop_stdfdatasync(struct vop_fdatasync_args *ap) 728 { 729 730 return (VOP_FSYNC(ap->a_vp, MNT_WAIT, ap->a_td)); 731 } 732 733 int 734 vop_stdfdatasync_buf(struct vop_fdatasync_args *ap) 735 { 736 struct vop_fsync_args apf; 737 738 apf.a_vp = ap->a_vp; 739 apf.a_waitfor = MNT_WAIT; 740 apf.a_td = ap->a_td; 741 return (vop_stdfsync(&apf)); 742 } 743 744 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 745 int 746 vop_stdgetpages(ap) 747 struct vop_getpages_args /* { 748 struct vnode *a_vp; 749 vm_page_t *a_m; 750 int a_count; 751 int *a_rbehind; 752 int *a_rahead; 753 } */ *ap; 754 { 755 756 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 757 ap->a_count, ap->a_rbehind, ap->a_rahead, NULL, NULL); 758 } 759 760 static int 761 vop_stdgetpages_async(struct vop_getpages_async_args *ap) 762 { 763 int error; 764 765 error = VOP_GETPAGES(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind, 766 ap->a_rahead); 767 ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); 768 return (error); 769 } 770 771 int 772 vop_stdkqfilter(struct vop_kqfilter_args *ap) 773 { 774 return vfs_kqfilter(ap); 775 } 776 777 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 778 int 779 vop_stdputpages(ap) 780 struct vop_putpages_args /* { 781 struct vnode *a_vp; 782 vm_page_t *a_m; 783 int a_count; 784 int a_sync; 785 int *a_rtvals; 786 } */ *ap; 787 { 788 789 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 790 ap->a_sync, ap->a_rtvals); 791 } 792 793 int 794 vop_stdvptofh(struct vop_vptofh_args *ap) 795 { 796 return (EOPNOTSUPP); 797 } 798 799 int 800 vop_stdvptocnp(struct vop_vptocnp_args *ap) 801 { 802 struct vnode *vp = ap->a_vp; 803 struct vnode **dvp = ap->a_vpp; 804 struct ucred *cred = ap->a_cred; 805 char *buf = ap->a_buf; 806 int *buflen = ap->a_buflen; 807 char *dirbuf, *cpos; 808 int i, error, eofflag, dirbuflen, flags, locked, len, covered; 809 off_t off; 810 ino_t fileno; 811 struct vattr va; 812 struct nameidata nd; 813 struct thread *td; 814 struct dirent *dp; 815 struct vnode *mvp; 816 817 i = *buflen; 818 error = 0; 819 covered = 0; 820 td = curthread; 821 822 if (vp->v_type != VDIR) 823 return (ENOENT); 824 825 error = VOP_GETATTR(vp, &va, cred); 826 if (error) 827 return (error); 828 829 VREF(vp); 830 locked = VOP_ISLOCKED(vp); 831 VOP_UNLOCK(vp, 0); 832 NDINIT_ATVP(&nd, LOOKUP, FOLLOW | LOCKSHARED | LOCKLEAF, UIO_SYSSPACE, 833 "..", vp, td); 834 flags = FREAD; 835 error = vn_open_cred(&nd, &flags, 0, VN_OPEN_NOAUDIT, cred, NULL); 836 if (error) { 837 vn_lock(vp, locked | LK_RETRY); 838 return (error); 839 } 840 NDFREE(&nd, NDF_ONLY_PNBUF); 841 842 mvp = *dvp = nd.ni_vp; 843 844 if (vp->v_mount != (*dvp)->v_mount && 845 ((*dvp)->v_vflag & VV_ROOT) && 846 ((*dvp)->v_mount->mnt_flag & MNT_UNION)) { 847 *dvp = (*dvp)->v_mount->mnt_vnodecovered; 848 VREF(mvp); 849 VOP_UNLOCK(mvp, 0); 850 vn_close(mvp, FREAD, cred, td); 851 VREF(*dvp); 852 vn_lock(*dvp, LK_SHARED | LK_RETRY); 853 covered = 1; 854 } 855 856 fileno = va.va_fileid; 857 858 dirbuflen = DEV_BSIZE; 859 if (dirbuflen < va.va_blocksize) 860 dirbuflen = va.va_blocksize; 861 dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK); 862 863 if ((*dvp)->v_type != VDIR) { 864 error = ENOENT; 865 goto out; 866 } 867 868 off = 0; 869 len = 0; 870 do { 871 /* call VOP_READDIR of parent */ 872 error = get_next_dirent(*dvp, &dp, dirbuf, dirbuflen, &off, 873 &cpos, &len, &eofflag, td); 874 if (error) 875 goto out; 876 877 if ((dp->d_type != DT_WHT) && 878 (dp->d_fileno == fileno)) { 879 if (covered) { 880 VOP_UNLOCK(*dvp, 0); 881 vn_lock(mvp, LK_SHARED | LK_RETRY); 882 if (dirent_exists(mvp, dp->d_name, td)) { 883 error = ENOENT; 884 VOP_UNLOCK(mvp, 0); 885 vn_lock(*dvp, LK_SHARED | LK_RETRY); 886 goto out; 887 } 888 VOP_UNLOCK(mvp, 0); 889 vn_lock(*dvp, LK_SHARED | LK_RETRY); 890 } 891 i -= dp->d_namlen; 892 893 if (i < 0) { 894 error = ENOMEM; 895 goto out; 896 } 897 if (dp->d_namlen == 1 && dp->d_name[0] == '.') { 898 error = ENOENT; 899 } else { 900 bcopy(dp->d_name, buf + i, dp->d_namlen); 901 error = 0; 902 } 903 goto out; 904 } 905 } while (len > 0 || !eofflag); 906 error = ENOENT; 907 908 out: 909 free(dirbuf, M_TEMP); 910 if (!error) { 911 *buflen = i; 912 vref(*dvp); 913 } 914 if (covered) { 915 vput(*dvp); 916 vrele(mvp); 917 } else { 918 VOP_UNLOCK(mvp, 0); 919 vn_close(mvp, FREAD, cred, td); 920 } 921 vn_lock(vp, locked | LK_RETRY); 922 return (error); 923 } 924 925 int 926 vop_stdallocate(struct vop_allocate_args *ap) 927 { 928 #ifdef __notyet__ 929 struct statfs *sfs; 930 off_t maxfilesize = 0; 931 #endif 932 struct iovec aiov; 933 struct vattr vattr, *vap; 934 struct uio auio; 935 off_t fsize, len, cur, offset; 936 uint8_t *buf; 937 struct thread *td; 938 struct vnode *vp; 939 size_t iosize; 940 int error; 941 942 buf = NULL; 943 error = 0; 944 td = curthread; 945 vap = &vattr; 946 vp = ap->a_vp; 947 len = *ap->a_len; 948 offset = *ap->a_offset; 949 950 error = VOP_GETATTR(vp, vap, td->td_ucred); 951 if (error != 0) 952 goto out; 953 fsize = vap->va_size; 954 iosize = vap->va_blocksize; 955 if (iosize == 0) 956 iosize = BLKDEV_IOSIZE; 957 if (iosize > MAXPHYS) 958 iosize = MAXPHYS; 959 buf = malloc(iosize, M_TEMP, M_WAITOK); 960 961 #ifdef __notyet__ 962 /* 963 * Check if the filesystem sets f_maxfilesize; if not use 964 * VOP_SETATTR to perform the check. 965 */ 966 sfs = malloc(sizeof(struct statfs), M_STATFS, M_WAITOK); 967 error = VFS_STATFS(vp->v_mount, sfs, td); 968 if (error == 0) 969 maxfilesize = sfs->f_maxfilesize; 970 free(sfs, M_STATFS); 971 if (error != 0) 972 goto out; 973 if (maxfilesize) { 974 if (offset > maxfilesize || len > maxfilesize || 975 offset + len > maxfilesize) { 976 error = EFBIG; 977 goto out; 978 } 979 } else 980 #endif 981 if (offset + len > vap->va_size) { 982 /* 983 * Test offset + len against the filesystem's maxfilesize. 984 */ 985 VATTR_NULL(vap); 986 vap->va_size = offset + len; 987 error = VOP_SETATTR(vp, vap, td->td_ucred); 988 if (error != 0) 989 goto out; 990 VATTR_NULL(vap); 991 vap->va_size = fsize; 992 error = VOP_SETATTR(vp, vap, td->td_ucred); 993 if (error != 0) 994 goto out; 995 } 996 997 for (;;) { 998 /* 999 * Read and write back anything below the nominal file 1000 * size. There's currently no way outside the filesystem 1001 * to know whether this area is sparse or not. 1002 */ 1003 cur = iosize; 1004 if ((offset % iosize) != 0) 1005 cur -= (offset % iosize); 1006 if (cur > len) 1007 cur = len; 1008 if (offset < fsize) { 1009 aiov.iov_base = buf; 1010 aiov.iov_len = cur; 1011 auio.uio_iov = &aiov; 1012 auio.uio_iovcnt = 1; 1013 auio.uio_offset = offset; 1014 auio.uio_resid = cur; 1015 auio.uio_segflg = UIO_SYSSPACE; 1016 auio.uio_rw = UIO_READ; 1017 auio.uio_td = td; 1018 error = VOP_READ(vp, &auio, 0, td->td_ucred); 1019 if (error != 0) 1020 break; 1021 if (auio.uio_resid > 0) { 1022 bzero(buf + cur - auio.uio_resid, 1023 auio.uio_resid); 1024 } 1025 } else { 1026 bzero(buf, cur); 1027 } 1028 1029 aiov.iov_base = buf; 1030 aiov.iov_len = cur; 1031 auio.uio_iov = &aiov; 1032 auio.uio_iovcnt = 1; 1033 auio.uio_offset = offset; 1034 auio.uio_resid = cur; 1035 auio.uio_segflg = UIO_SYSSPACE; 1036 auio.uio_rw = UIO_WRITE; 1037 auio.uio_td = td; 1038 1039 error = VOP_WRITE(vp, &auio, 0, td->td_ucred); 1040 if (error != 0) 1041 break; 1042 1043 len -= cur; 1044 offset += cur; 1045 if (len == 0) 1046 break; 1047 if (should_yield()) 1048 break; 1049 } 1050 1051 out: 1052 *ap->a_len = len; 1053 *ap->a_offset = offset; 1054 free(buf, M_TEMP); 1055 return (error); 1056 } 1057 1058 int 1059 vop_stdadvise(struct vop_advise_args *ap) 1060 { 1061 struct vnode *vp; 1062 struct bufobj *bo; 1063 daddr_t startn, endn; 1064 off_t start, end; 1065 int bsize, error; 1066 1067 vp = ap->a_vp; 1068 switch (ap->a_advice) { 1069 case POSIX_FADV_WILLNEED: 1070 /* 1071 * Do nothing for now. Filesystems should provide a 1072 * custom method which starts an asynchronous read of 1073 * the requested region. 1074 */ 1075 error = 0; 1076 break; 1077 case POSIX_FADV_DONTNEED: 1078 error = 0; 1079 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1080 if (vp->v_iflag & VI_DOOMED) { 1081 VOP_UNLOCK(vp, 0); 1082 break; 1083 } 1084 1085 /* 1086 * Deactivate pages in the specified range from the backing VM 1087 * object. Pages that are resident in the buffer cache will 1088 * remain wired until their corresponding buffers are released 1089 * below. 1090 */ 1091 if (vp->v_object != NULL) { 1092 start = trunc_page(ap->a_start); 1093 end = round_page(ap->a_end); 1094 VM_OBJECT_RLOCK(vp->v_object); 1095 vm_object_page_noreuse(vp->v_object, OFF_TO_IDX(start), 1096 OFF_TO_IDX(end)); 1097 VM_OBJECT_RUNLOCK(vp->v_object); 1098 } 1099 1100 bo = &vp->v_bufobj; 1101 BO_RLOCK(bo); 1102 bsize = vp->v_bufobj.bo_bsize; 1103 startn = ap->a_start / bsize; 1104 endn = ap->a_end / bsize; 1105 error = bnoreuselist(&bo->bo_clean, bo, startn, endn); 1106 if (error == 0) 1107 error = bnoreuselist(&bo->bo_dirty, bo, startn, endn); 1108 BO_RUNLOCK(bo); 1109 VOP_UNLOCK(vp, 0); 1110 break; 1111 default: 1112 error = EINVAL; 1113 break; 1114 } 1115 return (error); 1116 } 1117 1118 int 1119 vop_stdunp_bind(struct vop_unp_bind_args *ap) 1120 { 1121 1122 ap->a_vp->v_unpcb = ap->a_unpcb; 1123 return (0); 1124 } 1125 1126 int 1127 vop_stdunp_connect(struct vop_unp_connect_args *ap) 1128 { 1129 1130 *ap->a_unpcb = ap->a_vp->v_unpcb; 1131 return (0); 1132 } 1133 1134 int 1135 vop_stdunp_detach(struct vop_unp_detach_args *ap) 1136 { 1137 1138 ap->a_vp->v_unpcb = NULL; 1139 return (0); 1140 } 1141 1142 static int 1143 vop_stdis_text(struct vop_is_text_args *ap) 1144 { 1145 1146 return ((ap->a_vp->v_vflag & VV_TEXT) != 0); 1147 } 1148 1149 static int 1150 vop_stdset_text(struct vop_set_text_args *ap) 1151 { 1152 1153 ap->a_vp->v_vflag |= VV_TEXT; 1154 return (0); 1155 } 1156 1157 static int 1158 vop_stdunset_text(struct vop_unset_text_args *ap) 1159 { 1160 1161 ap->a_vp->v_vflag &= ~VV_TEXT; 1162 return (0); 1163 } 1164 1165 static int 1166 vop_stdget_writecount(struct vop_get_writecount_args *ap) 1167 { 1168 1169 *ap->a_writecount = ap->a_vp->v_writecount; 1170 return (0); 1171 } 1172 1173 static int 1174 vop_stdadd_writecount(struct vop_add_writecount_args *ap) 1175 { 1176 1177 ap->a_vp->v_writecount += ap->a_inc; 1178 return (0); 1179 } 1180 1181 /* 1182 * vfs default ops 1183 * used to fill the vfs function table to get reasonable default return values. 1184 */ 1185 int 1186 vfs_stdroot (mp, flags, vpp) 1187 struct mount *mp; 1188 int flags; 1189 struct vnode **vpp; 1190 { 1191 1192 return (EOPNOTSUPP); 1193 } 1194 1195 int 1196 vfs_stdstatfs (mp, sbp) 1197 struct mount *mp; 1198 struct statfs *sbp; 1199 { 1200 1201 return (EOPNOTSUPP); 1202 } 1203 1204 int 1205 vfs_stdquotactl (mp, cmds, uid, arg) 1206 struct mount *mp; 1207 int cmds; 1208 uid_t uid; 1209 void *arg; 1210 { 1211 1212 return (EOPNOTSUPP); 1213 } 1214 1215 int 1216 vfs_stdsync(mp, waitfor) 1217 struct mount *mp; 1218 int waitfor; 1219 { 1220 struct vnode *vp, *mvp; 1221 struct thread *td; 1222 int error, lockreq, allerror = 0; 1223 1224 td = curthread; 1225 lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 1226 if (waitfor != MNT_WAIT) 1227 lockreq |= LK_NOWAIT; 1228 /* 1229 * Force stale buffer cache information to be flushed. 1230 */ 1231 loop: 1232 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 1233 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { 1234 VI_UNLOCK(vp); 1235 continue; 1236 } 1237 if ((error = vget(vp, lockreq, td)) != 0) { 1238 if (error == ENOENT) { 1239 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 1240 goto loop; 1241 } 1242 continue; 1243 } 1244 error = VOP_FSYNC(vp, waitfor, td); 1245 if (error) 1246 allerror = error; 1247 vput(vp); 1248 } 1249 return (allerror); 1250 } 1251 1252 int 1253 vfs_stdnosync (mp, waitfor) 1254 struct mount *mp; 1255 int waitfor; 1256 { 1257 1258 return (0); 1259 } 1260 1261 int 1262 vfs_stdvget (mp, ino, flags, vpp) 1263 struct mount *mp; 1264 ino_t ino; 1265 int flags; 1266 struct vnode **vpp; 1267 { 1268 1269 return (EOPNOTSUPP); 1270 } 1271 1272 int 1273 vfs_stdfhtovp (mp, fhp, flags, vpp) 1274 struct mount *mp; 1275 struct fid *fhp; 1276 int flags; 1277 struct vnode **vpp; 1278 { 1279 1280 return (EOPNOTSUPP); 1281 } 1282 1283 int 1284 vfs_stdinit (vfsp) 1285 struct vfsconf *vfsp; 1286 { 1287 1288 return (0); 1289 } 1290 1291 int 1292 vfs_stduninit (vfsp) 1293 struct vfsconf *vfsp; 1294 { 1295 1296 return(0); 1297 } 1298 1299 int 1300 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname) 1301 struct mount *mp; 1302 int cmd; 1303 struct vnode *filename_vp; 1304 int attrnamespace; 1305 const char *attrname; 1306 { 1307 1308 if (filename_vp != NULL) 1309 VOP_UNLOCK(filename_vp, 0); 1310 return (EOPNOTSUPP); 1311 } 1312 1313 int 1314 vfs_stdsysctl(mp, op, req) 1315 struct mount *mp; 1316 fsctlop_t op; 1317 struct sysctl_req *req; 1318 { 1319 1320 return (EOPNOTSUPP); 1321 } 1322 1323 /* end of vfs default ops */ 1324