1 /*- 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/bio.h> 41 #include <sys/buf.h> 42 #include <sys/conf.h> 43 #include <sys/event.h> 44 #include <sys/kernel.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mount.h> 49 #include <sys/mutex.h> 50 #include <sys/unistd.h> 51 #include <sys/vnode.h> 52 #include <sys/poll.h> 53 54 #include <vm/vm.h> 55 #include <vm/vm_object.h> 56 #include <vm/vm_extern.h> 57 #include <vm/pmap.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_page.h> 60 #include <vm/vm_pager.h> 61 #include <vm/vnode_pager.h> 62 63 static int vop_nolookup(struct vop_lookup_args *); 64 static int vop_nostrategy(struct vop_strategy_args *); 65 66 /* 67 * This vnode table stores what we want to do if the filesystem doesn't 68 * implement a particular VOP. 69 * 70 * If there is no specific entry here, we will return EOPNOTSUPP. 71 * 72 */ 73 74 struct vop_vector default_vnodeops = { 75 .vop_default = NULL, 76 .vop_bypass = VOP_EOPNOTSUPP, 77 78 .vop_advlock = VOP_EINVAL, 79 .vop_bmap = vop_stdbmap, 80 .vop_close = VOP_NULL, 81 .vop_fsync = VOP_NULL, 82 .vop_getpages = vop_stdgetpages, 83 .vop_getwritemount = vop_stdgetwritemount, 84 .vop_inactive = VOP_NULL, 85 .vop_ioctl = VOP_ENOTTY, 86 .vop_kqfilter = vop_stdkqfilter, 87 .vop_islocked = vop_stdislocked, 88 .vop_lease = VOP_NULL, 89 ._vop_lock = vop_stdlock, 90 .vop_lookup = vop_nolookup, 91 .vop_open = VOP_NULL, 92 .vop_pathconf = VOP_EINVAL, 93 .vop_poll = vop_nopoll, 94 .vop_putpages = vop_stdputpages, 95 .vop_readlink = VOP_EINVAL, 96 .vop_revoke = VOP_PANIC, 97 .vop_strategy = vop_nostrategy, 98 .vop_unlock = vop_stdunlock, 99 }; 100 101 /* 102 * Series of placeholder functions for various error returns for 103 * VOPs. 104 */ 105 106 int 107 vop_eopnotsupp(struct vop_generic_args *ap) 108 { 109 /* 110 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 111 */ 112 113 return (EOPNOTSUPP); 114 } 115 116 int 117 vop_ebadf(struct vop_generic_args *ap) 118 { 119 120 return (EBADF); 121 } 122 123 int 124 vop_enotty(struct vop_generic_args *ap) 125 { 126 127 return (ENOTTY); 128 } 129 130 int 131 vop_einval(struct vop_generic_args *ap) 132 { 133 134 return (EINVAL); 135 } 136 137 int 138 vop_null(struct vop_generic_args *ap) 139 { 140 141 return (0); 142 } 143 144 /* 145 * Helper function to panic on some bad VOPs in some filesystems. 146 */ 147 int 148 vop_panic(struct vop_generic_args *ap) 149 { 150 151 panic("filesystem goof: vop_panic[%s]", ap->a_desc->vdesc_name); 152 } 153 154 /* 155 * vop_std<something> and vop_no<something> are default functions for use by 156 * filesystems that need the "default reasonable" implementation for a 157 * particular operation. 158 * 159 * The documentation for the operations they implement exists (if it exists) 160 * in the VOP_<SOMETHING>(9) manpage (all uppercase). 161 */ 162 163 /* 164 * Default vop for filesystems that do not support name lookup 165 */ 166 static int 167 vop_nolookup(ap) 168 struct vop_lookup_args /* { 169 struct vnode *a_dvp; 170 struct vnode **a_vpp; 171 struct componentname *a_cnp; 172 } */ *ap; 173 { 174 175 *ap->a_vpp = NULL; 176 return (ENOTDIR); 177 } 178 179 /* 180 * vop_nostrategy: 181 * 182 * Strategy routine for VFS devices that have none. 183 * 184 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 185 * routine. Typically this is done for a BIO_READ strategy call. 186 * Typically B_INVAL is assumed to already be clear prior to a write 187 * and should not be cleared manually unless you just made the buffer 188 * invalid. BIO_ERROR should be cleared either way. 189 */ 190 191 static int 192 vop_nostrategy (struct vop_strategy_args *ap) 193 { 194 printf("No strategy for buffer at %p\n", ap->a_bp); 195 vprint("vnode", ap->a_vp); 196 ap->a_bp->b_ioflags |= BIO_ERROR; 197 ap->a_bp->b_error = EOPNOTSUPP; 198 bufdone(ap->a_bp); 199 return (EOPNOTSUPP); 200 } 201 202 /* 203 * vop_stdpathconf: 204 * 205 * Standard implementation of POSIX pathconf, to get information about limits 206 * for a filesystem. 207 * Override per filesystem for the case where the filesystem has smaller 208 * limits. 209 */ 210 int 211 vop_stdpathconf(ap) 212 struct vop_pathconf_args /* { 213 struct vnode *a_vp; 214 int a_name; 215 int *a_retval; 216 } */ *ap; 217 { 218 219 switch (ap->a_name) { 220 case _PC_NAME_MAX: 221 *ap->a_retval = NAME_MAX; 222 return (0); 223 case _PC_PATH_MAX: 224 *ap->a_retval = PATH_MAX; 225 return (0); 226 case _PC_LINK_MAX: 227 *ap->a_retval = LINK_MAX; 228 return (0); 229 case _PC_MAX_CANON: 230 *ap->a_retval = MAX_CANON; 231 return (0); 232 case _PC_MAX_INPUT: 233 *ap->a_retval = MAX_INPUT; 234 return (0); 235 case _PC_PIPE_BUF: 236 *ap->a_retval = PIPE_BUF; 237 return (0); 238 case _PC_CHOWN_RESTRICTED: 239 *ap->a_retval = 1; 240 return (0); 241 case _PC_VDISABLE: 242 *ap->a_retval = _POSIX_VDISABLE; 243 return (0); 244 default: 245 return (EINVAL); 246 } 247 /* NOTREACHED */ 248 } 249 250 /* 251 * Standard lock, unlock and islocked functions. 252 */ 253 int 254 vop_stdlock(ap) 255 struct _vop_lock_args /* { 256 struct vnode *a_vp; 257 int a_flags; 258 struct thread *a_td; 259 char *file; 260 int line; 261 } */ *ap; 262 { 263 struct vnode *vp = ap->a_vp; 264 265 return (_lockmgr(vp->v_vnlock, ap->a_flags, VI_MTX(vp), ap->a_td, ap->a_file, ap->a_line)); 266 } 267 268 /* See above. */ 269 int 270 vop_stdunlock(ap) 271 struct vop_unlock_args /* { 272 struct vnode *a_vp; 273 int a_flags; 274 struct thread *a_td; 275 } */ *ap; 276 { 277 struct vnode *vp = ap->a_vp; 278 279 return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp), 280 ap->a_td)); 281 } 282 283 /* See above. */ 284 int 285 vop_stdislocked(ap) 286 struct vop_islocked_args /* { 287 struct vnode *a_vp; 288 struct thread *a_td; 289 } */ *ap; 290 { 291 292 return (lockstatus(ap->a_vp->v_vnlock, ap->a_td)); 293 } 294 295 /* 296 * Return true for select/poll. 297 */ 298 int 299 vop_nopoll(ap) 300 struct vop_poll_args /* { 301 struct vnode *a_vp; 302 int a_events; 303 struct ucred *a_cred; 304 struct thread *a_td; 305 } */ *ap; 306 { 307 /* 308 * Return true for read/write. If the user asked for something 309 * special, return POLLNVAL, so that clients have a way of 310 * determining reliably whether or not the extended 311 * functionality is present without hard-coding knowledge 312 * of specific filesystem implementations. 313 * Stay in sync with kern_conf.c::no_poll(). 314 */ 315 if (ap->a_events & ~POLLSTANDARD) 316 return (POLLNVAL); 317 318 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 319 } 320 321 /* 322 * Implement poll for local filesystems that support it. 323 */ 324 int 325 vop_stdpoll(ap) 326 struct vop_poll_args /* { 327 struct vnode *a_vp; 328 int a_events; 329 struct ucred *a_cred; 330 struct thread *a_td; 331 } */ *ap; 332 { 333 if (ap->a_events & ~POLLSTANDARD) 334 return (vn_pollrecord(ap->a_vp, ap->a_td, ap->a_events)); 335 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 336 } 337 338 /* 339 * Return our mount point, as we will take charge of the writes. 340 */ 341 int 342 vop_stdgetwritemount(ap) 343 struct vop_getwritemount_args /* { 344 struct vnode *a_vp; 345 struct mount **a_mpp; 346 } */ *ap; 347 { 348 struct mount *mp; 349 350 /* 351 * XXX Since this is called unlocked we may be recycled while 352 * attempting to ref the mount. If this is the case or mountpoint 353 * will be set to NULL. We only have to prevent this call from 354 * returning with a ref to an incorrect mountpoint. It is not 355 * harmful to return with a ref to our previous mountpoint. 356 */ 357 mp = ap->a_vp->v_mount; 358 if (mp != NULL) { 359 vfs_ref(mp); 360 if (mp != ap->a_vp->v_mount) { 361 vfs_rel(mp); 362 mp = NULL; 363 } 364 } 365 *(ap->a_mpp) = mp; 366 return (0); 367 } 368 369 /* XXX Needs good comment and VOP_BMAP(9) manpage */ 370 int 371 vop_stdbmap(ap) 372 struct vop_bmap_args /* { 373 struct vnode *a_vp; 374 daddr_t a_bn; 375 struct bufobj **a_bop; 376 daddr_t *a_bnp; 377 int *a_runp; 378 int *a_runb; 379 } */ *ap; 380 { 381 382 if (ap->a_bop != NULL) 383 *ap->a_bop = &ap->a_vp->v_bufobj; 384 if (ap->a_bnp != NULL) 385 *ap->a_bnp = ap->a_bn * btodb(ap->a_vp->v_mount->mnt_stat.f_iosize); 386 if (ap->a_runp != NULL) 387 *ap->a_runp = 0; 388 if (ap->a_runb != NULL) 389 *ap->a_runb = 0; 390 return (0); 391 } 392 393 int 394 vop_stdfsync(ap) 395 struct vop_fsync_args /* { 396 struct vnode *a_vp; 397 struct ucred *a_cred; 398 int a_waitfor; 399 struct thread *a_td; 400 } */ *ap; 401 { 402 struct vnode *vp = ap->a_vp; 403 struct buf *bp; 404 struct bufobj *bo; 405 struct buf *nbp; 406 int error = 0; 407 int maxretry = 1000; /* large, arbitrarily chosen */ 408 409 VI_LOCK(vp); 410 loop1: 411 /* 412 * MARK/SCAN initialization to avoid infinite loops. 413 */ 414 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) { 415 bp->b_vflags &= ~BV_SCANNED; 416 bp->b_error = 0; 417 } 418 419 /* 420 * Flush all dirty buffers associated with a vnode. 421 */ 422 loop2: 423 TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) { 424 if ((bp->b_vflags & BV_SCANNED) != 0) 425 continue; 426 bp->b_vflags |= BV_SCANNED; 427 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 428 continue; 429 VI_UNLOCK(vp); 430 KASSERT(bp->b_bufobj == &vp->v_bufobj, 431 ("bp %p wrong b_bufobj %p should be %p", 432 bp, bp->b_bufobj, &vp->v_bufobj)); 433 if ((bp->b_flags & B_DELWRI) == 0) 434 panic("fsync: not dirty"); 435 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) { 436 vfs_bio_awrite(bp); 437 } else { 438 bremfree(bp); 439 bawrite(bp); 440 } 441 VI_LOCK(vp); 442 goto loop2; 443 } 444 445 /* 446 * If synchronous the caller expects us to completely resolve all 447 * dirty buffers in the system. Wait for in-progress I/O to 448 * complete (which could include background bitmap writes), then 449 * retry if dirty blocks still exist. 450 */ 451 if (ap->a_waitfor == MNT_WAIT) { 452 bo = &vp->v_bufobj; 453 bufobj_wwait(bo, 0, 0); 454 if (bo->bo_dirty.bv_cnt > 0) { 455 /* 456 * If we are unable to write any of these buffers 457 * then we fail now rather than trying endlessly 458 * to write them out. 459 */ 460 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 461 if ((error = bp->b_error) == 0) 462 continue; 463 if (error == 0 && --maxretry >= 0) 464 goto loop1; 465 error = EAGAIN; 466 } 467 } 468 VI_UNLOCK(vp); 469 if (error == EAGAIN) 470 vprint("fsync: giving up on dirty", vp); 471 472 return (error); 473 } 474 475 /* XXX Needs good comment and more info in the manpage (VOP_GETPAGES(9)). */ 476 int 477 vop_stdgetpages(ap) 478 struct vop_getpages_args /* { 479 struct vnode *a_vp; 480 vm_page_t *a_m; 481 int a_count; 482 int a_reqpage; 483 vm_ooffset_t a_offset; 484 } */ *ap; 485 { 486 487 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 488 ap->a_count, ap->a_reqpage); 489 } 490 491 int 492 vop_stdkqfilter(struct vop_kqfilter_args *ap) 493 { 494 return vfs_kqfilter(ap); 495 } 496 497 /* XXX Needs good comment and more info in the manpage (VOP_PUTPAGES(9)). */ 498 int 499 vop_stdputpages(ap) 500 struct vop_putpages_args /* { 501 struct vnode *a_vp; 502 vm_page_t *a_m; 503 int a_count; 504 int a_sync; 505 int *a_rtvals; 506 vm_ooffset_t a_offset; 507 } */ *ap; 508 { 509 510 return vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 511 ap->a_sync, ap->a_rtvals); 512 } 513 514 /* 515 * vfs default ops 516 * used to fill the vfs function table to get reasonable default return values. 517 */ 518 int 519 vfs_stdroot (mp, flags, vpp, td) 520 struct mount *mp; 521 int flags; 522 struct vnode **vpp; 523 struct thread *td; 524 { 525 526 return (EOPNOTSUPP); 527 } 528 529 int 530 vfs_stdstatfs (mp, sbp, td) 531 struct mount *mp; 532 struct statfs *sbp; 533 struct thread *td; 534 { 535 536 return (EOPNOTSUPP); 537 } 538 539 int 540 vfs_stdvptofh (vp, fhp) 541 struct vnode *vp; 542 struct fid *fhp; 543 { 544 545 return (EOPNOTSUPP); 546 } 547 548 int 549 vfs_stdquotactl (mp, cmds, uid, arg, td) 550 struct mount *mp; 551 int cmds; 552 uid_t uid; 553 void *arg; 554 struct thread *td; 555 { 556 557 return (EOPNOTSUPP); 558 } 559 560 int 561 vfs_stdsync(mp, waitfor, td) 562 struct mount *mp; 563 int waitfor; 564 struct thread *td; 565 { 566 struct vnode *vp, *mvp; 567 int error, lockreq, allerror = 0; 568 569 lockreq = LK_EXCLUSIVE | LK_INTERLOCK; 570 if (waitfor != MNT_WAIT) 571 lockreq |= LK_NOWAIT; 572 /* 573 * Force stale buffer cache information to be flushed. 574 */ 575 MNT_ILOCK(mp); 576 loop: 577 MNT_VNODE_FOREACH(vp, mp, mvp) { 578 579 VI_LOCK(vp); 580 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { 581 VI_UNLOCK(vp); 582 continue; 583 } 584 MNT_IUNLOCK(mp); 585 586 if ((error = vget(vp, lockreq, td)) != 0) { 587 MNT_ILOCK(mp); 588 if (error == ENOENT) { 589 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp); 590 goto loop; 591 } 592 continue; 593 } 594 error = VOP_FSYNC(vp, waitfor, td); 595 if (error) 596 allerror = error; 597 598 /* Do not turn this into vput. td is not always curthread. */ 599 VOP_UNLOCK(vp, 0, td); 600 vrele(vp); 601 MNT_ILOCK(mp); 602 } 603 MNT_IUNLOCK(mp); 604 return (allerror); 605 } 606 607 int 608 vfs_stdnosync (mp, waitfor, td) 609 struct mount *mp; 610 int waitfor; 611 struct thread *td; 612 { 613 614 return (0); 615 } 616 617 int 618 vfs_stdvget (mp, ino, flags, vpp) 619 struct mount *mp; 620 ino_t ino; 621 int flags; 622 struct vnode **vpp; 623 { 624 625 return (EOPNOTSUPP); 626 } 627 628 int 629 vfs_stdfhtovp (mp, fhp, vpp) 630 struct mount *mp; 631 struct fid *fhp; 632 struct vnode **vpp; 633 { 634 635 return (EOPNOTSUPP); 636 } 637 638 int 639 vfs_stdinit (vfsp) 640 struct vfsconf *vfsp; 641 { 642 643 return (0); 644 } 645 646 int 647 vfs_stduninit (vfsp) 648 struct vfsconf *vfsp; 649 { 650 651 return(0); 652 } 653 654 int 655 vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace, attrname, td) 656 struct mount *mp; 657 int cmd; 658 struct vnode *filename_vp; 659 int attrnamespace; 660 const char *attrname; 661 struct thread *td; 662 { 663 664 if (filename_vp != NULL) 665 VOP_UNLOCK(filename_vp, 0, td); 666 return (EOPNOTSUPP); 667 } 668 669 int 670 vfs_stdsysctl(mp, op, req) 671 struct mount *mp; 672 fsctlop_t op; 673 struct sysctl_req *req; 674 { 675 676 return (EOPNOTSUPP); 677 } 678 679 /* end of vfs default ops */ 680