1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * 39 * $FreeBSD$ 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/bio.h> 45 #include <sys/buf.h> 46 #include <sys/kernel.h> 47 #include <sys/lock.h> 48 #include <sys/malloc.h> 49 #include <sys/mount.h> 50 #include <sys/unistd.h> 51 #include <sys/vnode.h> 52 #include <sys/poll.h> 53 54 static int vop_nostrategy __P((struct vop_strategy_args *)); 55 56 /* 57 * This vnode table stores what we want to do if the filesystem doesn't 58 * implement a particular VOP. 59 * 60 * If there is no specific entry here, we will return EOPNOTSUPP. 61 * 62 */ 63 64 vop_t **default_vnodeop_p; 65 static struct vnodeopv_entry_desc default_vnodeop_entries[] = { 66 { &vop_default_desc, (vop_t *) vop_eopnotsupp }, 67 { &vop_advlock_desc, (vop_t *) vop_einval }, 68 { &vop_bwrite_desc, (vop_t *) vop_stdbwrite }, 69 { &vop_close_desc, (vop_t *) vop_null }, 70 { &vop_fsync_desc, (vop_t *) vop_null }, 71 { &vop_ioctl_desc, (vop_t *) vop_enotty }, 72 { &vop_islocked_desc, (vop_t *) vop_noislocked }, 73 { &vop_lease_desc, (vop_t *) vop_null }, 74 { &vop_lock_desc, (vop_t *) vop_nolock }, 75 { &vop_mmap_desc, (vop_t *) vop_einval }, 76 { &vop_open_desc, (vop_t *) vop_null }, 77 { &vop_pathconf_desc, (vop_t *) vop_einval }, 78 { &vop_poll_desc, (vop_t *) vop_nopoll }, 79 { &vop_readlink_desc, (vop_t *) vop_einval }, 80 { &vop_reallocblks_desc, (vop_t *) vop_eopnotsupp }, 81 { &vop_revoke_desc, (vop_t *) vop_revoke }, 82 { &vop_strategy_desc, (vop_t *) vop_nostrategy }, 83 { &vop_unlock_desc, (vop_t *) vop_nounlock }, 84 { &vop_getacl_desc, (vop_t *) vop_eopnotsupp }, 85 { &vop_setacl_desc, (vop_t *) vop_eopnotsupp }, 86 { &vop_aclcheck_desc, (vop_t *) vop_eopnotsupp }, 87 { &vop_getextattr_desc, (vop_t *) vop_eopnotsupp }, 88 { &vop_setextattr_desc, (vop_t *) vop_eopnotsupp }, 89 { NULL, NULL } 90 }; 91 92 static struct vnodeopv_desc default_vnodeop_opv_desc = 93 { &default_vnodeop_p, default_vnodeop_entries }; 94 95 VNODEOP_SET(default_vnodeop_opv_desc); 96 97 int 98 vop_eopnotsupp(struct vop_generic_args *ap) 99 { 100 /* 101 printf("vop_notsupp[%s]\n", ap->a_desc->vdesc_name); 102 */ 103 104 return (EOPNOTSUPP); 105 } 106 107 int 108 vop_ebadf(struct vop_generic_args *ap) 109 { 110 111 return (EBADF); 112 } 113 114 int 115 vop_enotty(struct vop_generic_args *ap) 116 { 117 118 return (ENOTTY); 119 } 120 121 int 122 vop_einval(struct vop_generic_args *ap) 123 { 124 125 return (EINVAL); 126 } 127 128 int 129 vop_null(struct vop_generic_args *ap) 130 { 131 132 return (0); 133 } 134 135 int 136 vop_defaultop(struct vop_generic_args *ap) 137 { 138 139 return (VOCALL(default_vnodeop_p, ap->a_desc->vdesc_offset, ap)); 140 } 141 142 int 143 vop_panic(struct vop_generic_args *ap) 144 { 145 146 printf("vop_panic[%s]\n", ap->a_desc->vdesc_name); 147 panic("Filesystem goof"); 148 return (0); 149 } 150 151 /* 152 * vop_nostrategy: 153 * 154 * Strategy routine for VFS devices that have none. 155 * 156 * BIO_ERROR and B_INVAL must be cleared prior to calling any strategy 157 * routine. Typically this is done for a BIO_READ strategy call. 158 * Typically B_INVAL is assumed to already be clear prior to a write 159 * and should not be cleared manually unless you just made the buffer 160 * invalid. BIO_ERROR should be cleared either way. 161 */ 162 163 static int 164 vop_nostrategy (struct vop_strategy_args *ap) 165 { 166 printf("No strategy for buffer at %p\n", ap->a_bp); 167 vprint("", ap->a_vp); 168 vprint("", ap->a_bp->b_vp); 169 ap->a_bp->b_ioflags |= BIO_ERROR; 170 ap->a_bp->b_error = EOPNOTSUPP; 171 bufdone(ap->a_bp); 172 return (EOPNOTSUPP); 173 } 174 175 int 176 vop_stdpathconf(ap) 177 struct vop_pathconf_args /* { 178 struct vnode *a_vp; 179 int a_name; 180 int *a_retval; 181 } */ *ap; 182 { 183 184 switch (ap->a_name) { 185 case _PC_LINK_MAX: 186 *ap->a_retval = LINK_MAX; 187 return (0); 188 case _PC_MAX_CANON: 189 *ap->a_retval = MAX_CANON; 190 return (0); 191 case _PC_MAX_INPUT: 192 *ap->a_retval = MAX_INPUT; 193 return (0); 194 case _PC_PIPE_BUF: 195 *ap->a_retval = PIPE_BUF; 196 return (0); 197 case _PC_CHOWN_RESTRICTED: 198 *ap->a_retval = 1; 199 return (0); 200 case _PC_VDISABLE: 201 *ap->a_retval = _POSIX_VDISABLE; 202 return (0); 203 default: 204 return (EINVAL); 205 } 206 /* NOTREACHED */ 207 } 208 209 /* 210 * Standard lock, unlock and islocked functions. 211 * 212 * These depend on the lock structure being the first element in the 213 * inode, ie: vp->v_data points to the the lock! 214 */ 215 int 216 vop_stdlock(ap) 217 struct vop_lock_args /* { 218 struct vnode *a_vp; 219 int a_flags; 220 struct proc *a_p; 221 } */ *ap; 222 { 223 struct lock *l; 224 225 if ((l = (struct lock *)ap->a_vp->v_data) == NULL) { 226 if (ap->a_flags & LK_INTERLOCK) 227 simple_unlock(&ap->a_vp->v_interlock); 228 return 0; 229 } 230 231 #ifndef DEBUG_LOCKS 232 return (lockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p)); 233 #else 234 return (debuglockmgr(l, ap->a_flags, &ap->a_vp->v_interlock, ap->a_p, 235 "vop_stdlock", ap->a_vp->filename, ap->a_vp->line)); 236 #endif 237 } 238 239 int 240 vop_stdunlock(ap) 241 struct vop_unlock_args /* { 242 struct vnode *a_vp; 243 int a_flags; 244 struct proc *a_p; 245 } */ *ap; 246 { 247 struct lock *l; 248 249 if ((l = (struct lock *)ap->a_vp->v_data) == NULL) { 250 if (ap->a_flags & LK_INTERLOCK) 251 simple_unlock(&ap->a_vp->v_interlock); 252 return 0; 253 } 254 255 return (lockmgr(l, ap->a_flags | LK_RELEASE, &ap->a_vp->v_interlock, 256 ap->a_p)); 257 } 258 259 int 260 vop_stdislocked(ap) 261 struct vop_islocked_args /* { 262 struct vnode *a_vp; 263 struct proc *a_p; 264 } */ *ap; 265 { 266 struct lock *l; 267 268 if ((l = (struct lock *)ap->a_vp->v_data) == NULL) 269 return 0; 270 271 return (lockstatus(l, ap->a_p)); 272 } 273 274 /* 275 * Return true for select/poll. 276 */ 277 int 278 vop_nopoll(ap) 279 struct vop_poll_args /* { 280 struct vnode *a_vp; 281 int a_events; 282 struct ucred *a_cred; 283 struct proc *a_p; 284 } */ *ap; 285 { 286 /* 287 * Return true for read/write. If the user asked for something 288 * special, return POLLNVAL, so that clients have a way of 289 * determining reliably whether or not the extended 290 * functionality is present without hard-coding knowledge 291 * of specific filesystem implementations. 292 */ 293 if (ap->a_events & ~POLLSTANDARD) 294 return (POLLNVAL); 295 296 return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)); 297 } 298 299 /* 300 * Implement poll for local filesystems that support it. 301 */ 302 int 303 vop_stdpoll(ap) 304 struct vop_poll_args /* { 305 struct vnode *a_vp; 306 int a_events; 307 struct ucred *a_cred; 308 struct proc *a_p; 309 } */ *ap; 310 { 311 if ((ap->a_events & ~POLLSTANDARD) == 0) 312 return (ap->a_events & (POLLRDNORM|POLLWRNORM)); 313 return (vn_pollrecord(ap->a_vp, ap->a_p, ap->a_events)); 314 } 315 316 int 317 vop_stdbwrite(ap) 318 struct vop_bwrite_args *ap; 319 { 320 return (bwrite(ap->a_bp)); 321 } 322 323 /* 324 * Stubs to use when there is no locking to be done on the underlying object. 325 * A minimal shared lock is necessary to ensure that the underlying object 326 * is not revoked while an operation is in progress. So, an active shared 327 * count is maintained in an auxillary vnode lock structure. 328 */ 329 int 330 vop_sharedlock(ap) 331 struct vop_lock_args /* { 332 struct vnode *a_vp; 333 int a_flags; 334 struct proc *a_p; 335 } */ *ap; 336 { 337 /* 338 * This code cannot be used until all the non-locking filesystems 339 * (notably NFS) are converted to properly lock and release nodes. 340 * Also, certain vnode operations change the locking state within 341 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 342 * and symlink). Ideally these operations should not change the 343 * lock state, but should be changed to let the caller of the 344 * function unlock them. Otherwise all intermediate vnode layers 345 * (such as union, umapfs, etc) must catch these functions to do 346 * the necessary locking at their layer. Note that the inactive 347 * and lookup operations also change their lock state, but this 348 * cannot be avoided, so these two operations will always need 349 * to be handled in intermediate layers. 350 */ 351 struct vnode *vp = ap->a_vp; 352 int vnflags, flags = ap->a_flags; 353 354 if (vp->v_vnlock == NULL) { 355 if ((flags & LK_TYPE_MASK) == LK_DRAIN) 356 return (0); 357 MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 358 M_VNODE, M_WAITOK); 359 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE); 360 } 361 switch (flags & LK_TYPE_MASK) { 362 case LK_DRAIN: 363 vnflags = LK_DRAIN; 364 break; 365 case LK_EXCLUSIVE: 366 #ifdef DEBUG_VFS_LOCKS 367 /* 368 * Normally, we use shared locks here, but that confuses 369 * the locking assertions. 370 */ 371 vnflags = LK_EXCLUSIVE; 372 break; 373 #endif 374 case LK_SHARED: 375 vnflags = LK_SHARED; 376 break; 377 case LK_UPGRADE: 378 case LK_EXCLUPGRADE: 379 case LK_DOWNGRADE: 380 return (0); 381 case LK_RELEASE: 382 default: 383 panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK); 384 } 385 if (flags & LK_INTERLOCK) 386 vnflags |= LK_INTERLOCK; 387 #ifndef DEBUG_LOCKS 388 return (lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 389 #else 390 return (debuglockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p, 391 "vop_sharedlock", vp->filename, vp->line)); 392 #endif 393 } 394 395 /* 396 * Stubs to use when there is no locking to be done on the underlying object. 397 * A minimal shared lock is necessary to ensure that the underlying object 398 * is not revoked while an operation is in progress. So, an active shared 399 * count is maintained in an auxillary vnode lock structure. 400 */ 401 int 402 vop_nolock(ap) 403 struct vop_lock_args /* { 404 struct vnode *a_vp; 405 int a_flags; 406 struct proc *a_p; 407 } */ *ap; 408 { 409 #ifdef notyet 410 /* 411 * This code cannot be used until all the non-locking filesystems 412 * (notably NFS) are converted to properly lock and release nodes. 413 * Also, certain vnode operations change the locking state within 414 * the operation (create, mknod, remove, link, rename, mkdir, rmdir, 415 * and symlink). Ideally these operations should not change the 416 * lock state, but should be changed to let the caller of the 417 * function unlock them. Otherwise all intermediate vnode layers 418 * (such as union, umapfs, etc) must catch these functions to do 419 * the necessary locking at their layer. Note that the inactive 420 * and lookup operations also change their lock state, but this 421 * cannot be avoided, so these two operations will always need 422 * to be handled in intermediate layers. 423 */ 424 struct vnode *vp = ap->a_vp; 425 int vnflags, flags = ap->a_flags; 426 427 if (vp->v_vnlock == NULL) { 428 if ((flags & LK_TYPE_MASK) == LK_DRAIN) 429 return (0); 430 MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock), 431 M_VNODE, M_WAITOK); 432 lockinit(vp->v_vnlock, PVFS, "vnlock", 0, LK_NOPAUSE); 433 } 434 switch (flags & LK_TYPE_MASK) { 435 case LK_DRAIN: 436 vnflags = LK_DRAIN; 437 break; 438 case LK_EXCLUSIVE: 439 case LK_SHARED: 440 vnflags = LK_SHARED; 441 break; 442 case LK_UPGRADE: 443 case LK_EXCLUPGRADE: 444 case LK_DOWNGRADE: 445 return (0); 446 case LK_RELEASE: 447 default: 448 panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK); 449 } 450 if (flags & LK_INTERLOCK) 451 vnflags |= LK_INTERLOCK; 452 return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p)); 453 #else /* for now */ 454 /* 455 * Since we are not using the lock manager, we must clear 456 * the interlock here. 457 */ 458 if (ap->a_flags & LK_INTERLOCK) 459 simple_unlock(&ap->a_vp->v_interlock); 460 return (0); 461 #endif 462 } 463 464 /* 465 * Do the inverse of vop_nolock, handling the interlock in a compatible way. 466 */ 467 int 468 vop_nounlock(ap) 469 struct vop_unlock_args /* { 470 struct vnode *a_vp; 471 int a_flags; 472 struct proc *a_p; 473 } */ *ap; 474 { 475 struct vnode *vp = ap->a_vp; 476 477 if (vp->v_vnlock == NULL) { 478 if (ap->a_flags & LK_INTERLOCK) 479 simple_unlock(&ap->a_vp->v_interlock); 480 return (0); 481 } 482 return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags, 483 &ap->a_vp->v_interlock, ap->a_p)); 484 } 485 486 /* 487 * Return whether or not the node is in use. 488 */ 489 int 490 vop_noislocked(ap) 491 struct vop_islocked_args /* { 492 struct vnode *a_vp; 493 struct proc *a_p; 494 } */ *ap; 495 { 496 struct vnode *vp = ap->a_vp; 497 498 if (vp->v_vnlock == NULL) 499 return (0); 500 return (lockstatus(vp->v_vnlock, ap->a_p)); 501 } 502 503 /* 504 * vfs default ops 505 * used to fill the vfs fucntion table to get reasonable default return values. 506 */ 507 int 508 vfs_stdmount (mp, path, data, ndp, p) 509 struct mount *mp; 510 char *path; 511 caddr_t data; 512 struct nameidata *ndp; 513 struct proc *p; 514 { 515 return (0); 516 } 517 518 int 519 vfs_stdunmount (mp, mntflags, p) 520 struct mount *mp; 521 int mntflags; 522 struct proc *p; 523 { 524 return (0); 525 } 526 527 int 528 vfs_stdroot (mp, vpp) 529 struct mount *mp; 530 struct vnode **vpp; 531 { 532 return (EOPNOTSUPP); 533 } 534 535 int 536 vfs_stdstatfs (mp, sbp, p) 537 struct mount *mp; 538 struct statfs *sbp; 539 struct proc *p; 540 { 541 return (EOPNOTSUPP); 542 } 543 544 int 545 vfs_stdvptofh (vp, fhp) 546 struct vnode *vp; 547 struct fid *fhp; 548 { 549 return (EOPNOTSUPP); 550 } 551 552 int 553 vfs_stdstart (mp, flags, p) 554 struct mount *mp; 555 int flags; 556 struct proc *p; 557 { 558 return (0); 559 } 560 561 int 562 vfs_stdquotactl (mp, cmds, uid, arg, p) 563 struct mount *mp; 564 int cmds; 565 uid_t uid; 566 caddr_t arg; 567 struct proc *p; 568 { 569 return (EOPNOTSUPP); 570 } 571 572 int 573 vfs_stdsync (mp, waitfor, cred, p) 574 struct mount *mp; 575 int waitfor; 576 struct ucred *cred; 577 struct proc *p; 578 { 579 return (0); 580 } 581 582 int 583 vfs_stdvget (mp, ino, vpp) 584 struct mount *mp; 585 ino_t ino; 586 struct vnode **vpp; 587 { 588 return (EOPNOTSUPP); 589 } 590 591 int 592 vfs_stdfhtovp (mp, fhp, vpp) 593 struct mount *mp; 594 struct fid *fhp; 595 struct vnode **vpp; 596 { 597 return (EOPNOTSUPP); 598 } 599 600 int 601 vfs_stdcheckexp (mp, nam, extflagsp, credanonp) 602 struct mount *mp; 603 struct sockaddr *nam; 604 int *extflagsp; 605 struct ucred **credanonp; 606 { 607 return (EOPNOTSUPP); 608 } 609 610 int 611 vfs_stdinit (vfsp) 612 struct vfsconf *vfsp; 613 { 614 return (0); 615 } 616 617 int 618 vfs_stduninit (vfsp) 619 struct vfsconf *vfsp; 620 { 621 return(0); 622 } 623 624 int 625 vfs_stdextattrctl(mp, cmd, attrname, arg, p) 626 struct mount *mp; 627 int cmd; 628 const char *attrname; 629 caddr_t arg; 630 struct proc *p; 631 { 632 return(EOPNOTSUPP); 633 } 634 635 /* end of vfs default ops */ 636