1 /*- 2 * Copyright (c) 2002, 2003 Networks Associates Technology, Inc. 3 * All rights reserved. 4 * 5 * This software was developed for the FreeBSD Project by Marshall 6 * Kirk McKusick and Network Associates Laboratories, the Security 7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR 8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 9 * research program 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * Copyright (c) 1982, 1986, 1989, 1993 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 4. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * from: @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95 60 * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ... 61 * @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95 62 */ 63 64 #include <sys/cdefs.h> 65 __FBSDID("$FreeBSD$"); 66 67 #include <sys/param.h> 68 #include <sys/bio.h> 69 #include <sys/systm.h> 70 #include <sys/buf.h> 71 #include <sys/conf.h> 72 #include <sys/extattr.h> 73 #include <sys/kernel.h> 74 #include <sys/limits.h> 75 #include <sys/malloc.h> 76 #include <sys/mount.h> 77 #include <sys/priv.h> 78 #include <sys/proc.h> 79 #include <sys/resourcevar.h> 80 #include <sys/signalvar.h> 81 #include <sys/stat.h> 82 #include <sys/vmmeter.h> 83 #include <sys/vnode.h> 84 85 #include <vm/vm.h> 86 #include <vm/vm_extern.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_page.h> 89 #include <vm/vm_pager.h> 90 #include <vm/vnode_pager.h> 91 92 #include <ufs/ufs/extattr.h> 93 #include <ufs/ufs/quota.h> 94 #include <ufs/ufs/inode.h> 95 #include <ufs/ufs/ufs_extern.h> 96 #include <ufs/ufs/ufsmount.h> 97 98 #include <ufs/ffs/fs.h> 99 #include <ufs/ffs/ffs_extern.h> 100 #include "opt_directio.h" 101 #include "opt_ffs.h" 102 103 #ifdef DIRECTIO 104 extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone); 105 #endif 106 static vop_fsync_t ffs_fsync; 107 static _vop_lock_t ffs_lock; 108 static vop_getpages_t ffs_getpages; 109 static vop_read_t ffs_read; 110 static vop_write_t ffs_write; 111 static int ffs_extread(struct vnode *vp, struct uio *uio, int ioflag); 112 static int ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, 113 struct ucred *cred); 114 static vop_strategy_t ffsext_strategy; 115 static vop_closeextattr_t ffs_closeextattr; 116 static vop_deleteextattr_t ffs_deleteextattr; 117 static vop_getextattr_t ffs_getextattr; 118 static vop_listextattr_t ffs_listextattr; 119 static vop_openextattr_t ffs_openextattr; 120 static vop_setextattr_t ffs_setextattr; 121 static vop_vptofh_t ffs_vptofh; 122 123 124 /* Global vfs data structures for ufs. */ 125 struct vop_vector ffs_vnodeops1 = { 126 .vop_default = &ufs_vnodeops, 127 .vop_fsync = ffs_fsync, 128 .vop_getpages = ffs_getpages, 129 ._vop_lock = ffs_lock, 130 .vop_read = ffs_read, 131 .vop_reallocblks = ffs_reallocblks, 132 .vop_write = ffs_write, 133 .vop_vptofh = ffs_vptofh, 134 }; 135 136 struct vop_vector ffs_fifoops1 = { 137 .vop_default = &ufs_fifoops, 138 .vop_fsync = ffs_fsync, 139 .vop_reallocblks = ffs_reallocblks, /* XXX: really ??? */ 140 .vop_vptofh = ffs_vptofh, 141 }; 142 143 /* Global vfs data structures for ufs. */ 144 struct vop_vector ffs_vnodeops2 = { 145 .vop_default = &ufs_vnodeops, 146 .vop_fsync = ffs_fsync, 147 .vop_getpages = ffs_getpages, 148 ._vop_lock = ffs_lock, 149 .vop_read = ffs_read, 150 .vop_reallocblks = ffs_reallocblks, 151 .vop_write = ffs_write, 152 .vop_closeextattr = ffs_closeextattr, 153 .vop_deleteextattr = ffs_deleteextattr, 154 .vop_getextattr = ffs_getextattr, 155 .vop_listextattr = ffs_listextattr, 156 .vop_openextattr = ffs_openextattr, 157 .vop_setextattr = ffs_setextattr, 158 .vop_vptofh = ffs_vptofh, 159 }; 160 161 struct vop_vector ffs_fifoops2 = { 162 .vop_default = &ufs_fifoops, 163 .vop_fsync = ffs_fsync, 164 ._vop_lock = ffs_lock, 165 .vop_reallocblks = ffs_reallocblks, 166 .vop_strategy = ffsext_strategy, 167 .vop_closeextattr = ffs_closeextattr, 168 .vop_deleteextattr = ffs_deleteextattr, 169 .vop_getextattr = ffs_getextattr, 170 .vop_listextattr = ffs_listextattr, 171 .vop_openextattr = ffs_openextattr, 172 .vop_setextattr = ffs_setextattr, 173 .vop_vptofh = ffs_vptofh, 174 }; 175 176 /* 177 * Synch an open file. 178 */ 179 /* ARGSUSED */ 180 static int 181 ffs_fsync(struct vop_fsync_args *ap) 182 { 183 int error; 184 185 error = ffs_syncvnode(ap->a_vp, ap->a_waitfor); 186 if (error) 187 return (error); 188 if (ap->a_waitfor == MNT_WAIT && 189 (ap->a_vp->v_mount->mnt_flag & MNT_SOFTDEP)) 190 error = softdep_fsync(ap->a_vp); 191 return (error); 192 } 193 194 int 195 ffs_syncvnode(struct vnode *vp, int waitfor) 196 { 197 struct inode *ip = VTOI(vp); 198 struct buf *bp; 199 struct buf *nbp; 200 int s, error, wait, passes, skipmeta; 201 ufs_lbn_t lbn; 202 203 wait = (waitfor == MNT_WAIT); 204 lbn = lblkno(ip->i_fs, (ip->i_size + ip->i_fs->fs_bsize - 1)); 205 206 /* 207 * Flush all dirty buffers associated with a vnode. 208 */ 209 passes = NIADDR + 1; 210 skipmeta = 0; 211 if (wait) 212 skipmeta = 1; 213 s = splbio(); 214 VI_LOCK(vp); 215 loop: 216 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) 217 bp->b_vflags &= ~BV_SCANNED; 218 TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) { 219 /* 220 * Reasons to skip this buffer: it has already been considered 221 * on this pass, this pass is the first time through on a 222 * synchronous flush request and the buffer being considered 223 * is metadata, the buffer has dependencies that will cause 224 * it to be redirtied and it has not already been deferred, 225 * or it is already being written. 226 */ 227 if ((bp->b_vflags & BV_SCANNED) != 0) 228 continue; 229 bp->b_vflags |= BV_SCANNED; 230 if ((skipmeta == 1 && bp->b_lblkno < 0)) 231 continue; 232 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 233 continue; 234 VI_UNLOCK(vp); 235 if (!wait && !LIST_EMPTY(&bp->b_dep) && 236 (bp->b_flags & B_DEFERRED) == 0 && 237 buf_countdeps(bp, 0)) { 238 bp->b_flags |= B_DEFERRED; 239 BUF_UNLOCK(bp); 240 VI_LOCK(vp); 241 continue; 242 } 243 if ((bp->b_flags & B_DELWRI) == 0) 244 panic("ffs_fsync: not dirty"); 245 /* 246 * If this is a synchronous flush request, or it is not a 247 * file or device, start the write on this buffer immediatly. 248 */ 249 if (wait || (vp->v_type != VREG && vp->v_type != VBLK)) { 250 251 /* 252 * On our final pass through, do all I/O synchronously 253 * so that we can find out if our flush is failing 254 * because of write errors. 255 */ 256 if (passes > 0 || !wait) { 257 if ((bp->b_flags & B_CLUSTEROK) && !wait) { 258 (void) vfs_bio_awrite(bp); 259 } else { 260 bremfree(bp); 261 splx(s); 262 (void) bawrite(bp); 263 s = splbio(); 264 } 265 } else { 266 bremfree(bp); 267 splx(s); 268 if ((error = bwrite(bp)) != 0) 269 return (error); 270 s = splbio(); 271 } 272 } else if ((vp->v_type == VREG) && (bp->b_lblkno >= lbn)) { 273 /* 274 * If the buffer is for data that has been truncated 275 * off the file, then throw it away. 276 */ 277 bremfree(bp); 278 bp->b_flags |= B_INVAL | B_NOCACHE; 279 splx(s); 280 brelse(bp); 281 s = splbio(); 282 } else 283 vfs_bio_awrite(bp); 284 285 /* 286 * Since we may have slept during the I/O, we need 287 * to start from a known point. 288 */ 289 VI_LOCK(vp); 290 nbp = TAILQ_FIRST(&vp->v_bufobj.bo_dirty.bv_hd); 291 } 292 /* 293 * If we were asked to do this synchronously, then go back for 294 * another pass, this time doing the metadata. 295 */ 296 if (skipmeta) { 297 skipmeta = 0; 298 goto loop; 299 } 300 301 if (wait) { 302 bufobj_wwait(&vp->v_bufobj, 3, 0); 303 VI_UNLOCK(vp); 304 305 /* 306 * Ensure that any filesystem metatdata associated 307 * with the vnode has been written. 308 */ 309 splx(s); 310 if ((error = softdep_sync_metadata(vp)) != 0) 311 return (error); 312 s = splbio(); 313 314 VI_LOCK(vp); 315 if (vp->v_bufobj.bo_dirty.bv_cnt > 0) { 316 /* 317 * Block devices associated with filesystems may 318 * have new I/O requests posted for them even if 319 * the vnode is locked, so no amount of trying will 320 * get them clean. Thus we give block devices a 321 * good effort, then just give up. For all other file 322 * types, go around and try again until it is clean. 323 */ 324 if (passes > 0) { 325 passes -= 1; 326 goto loop; 327 } 328 #ifdef DIAGNOSTIC 329 if (!vn_isdisk(vp, NULL)) 330 vprint("ffs_fsync: dirty", vp); 331 #endif 332 } 333 } 334 VI_UNLOCK(vp); 335 splx(s); 336 return (ffs_update(vp, wait)); 337 } 338 339 static int 340 ffs_lock(ap) 341 struct _vop_lock_args /* { 342 struct vnode *a_vp; 343 int a_flags; 344 struct thread *a_td; 345 char *file; 346 int line; 347 } */ *ap; 348 { 349 #ifndef NO_FFS_SNAPSHOT 350 struct vnode *vp; 351 int flags; 352 struct lock *lkp; 353 int result; 354 355 switch (ap->a_flags & LK_TYPE_MASK) { 356 case LK_SHARED: 357 case LK_UPGRADE: 358 case LK_EXCLUSIVE: 359 vp = ap->a_vp; 360 flags = ap->a_flags; 361 for (;;) { 362 /* 363 * vnode interlock must be held to ensure that 364 * the possibly external lock isn't freed, 365 * e.g. when mutating from snapshot file vnode 366 * to regular file vnode. 367 */ 368 if ((flags & LK_INTERLOCK) == 0) { 369 VI_LOCK(vp); 370 flags |= LK_INTERLOCK; 371 } 372 lkp = vp->v_vnlock; 373 result = _lockmgr(lkp, flags, VI_MTX(vp), ap->a_td, ap->a_file, ap->a_line); 374 if (lkp == vp->v_vnlock || result != 0) 375 break; 376 /* 377 * Apparent success, except that the vnode 378 * mutated between snapshot file vnode and 379 * regular file vnode while this process 380 * slept. The lock currently held is not the 381 * right lock. Release it, and try to get the 382 * new lock. 383 */ 384 (void) _lockmgr(lkp, LK_RELEASE, VI_MTX(vp), ap->a_td, ap->a_file, ap->a_line); 385 if ((flags & LK_TYPE_MASK) == LK_UPGRADE) 386 flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE; 387 flags &= ~LK_INTERLOCK; 388 } 389 break; 390 default: 391 result = _VOP_LOCK_APV(&ufs_vnodeops, ap); 392 } 393 return (result); 394 #else 395 return (_VOP_LOCK_APV(&ufs_vnodeops, ap)); 396 #endif 397 } 398 399 /* 400 * Vnode op for reading. 401 */ 402 /* ARGSUSED */ 403 static int 404 ffs_read(ap) 405 struct vop_read_args /* { 406 struct vnode *a_vp; 407 struct uio *a_uio; 408 int a_ioflag; 409 struct ucred *a_cred; 410 } */ *ap; 411 { 412 struct vnode *vp; 413 struct inode *ip; 414 struct uio *uio; 415 struct fs *fs; 416 struct buf *bp; 417 ufs_lbn_t lbn, nextlbn; 418 off_t bytesinfile; 419 long size, xfersize, blkoffset; 420 int error, orig_resid; 421 int seqcount; 422 int ioflag; 423 424 vp = ap->a_vp; 425 uio = ap->a_uio; 426 ioflag = ap->a_ioflag; 427 if (ap->a_ioflag & IO_EXT) 428 #ifdef notyet 429 return (ffs_extread(vp, uio, ioflag)); 430 #else 431 panic("ffs_read+IO_EXT"); 432 #endif 433 #ifdef DIRECTIO 434 if ((ioflag & IO_DIRECT) != 0) { 435 int workdone; 436 437 error = ffs_rawread(vp, uio, &workdone); 438 if (error != 0 || workdone != 0) 439 return error; 440 } 441 #endif 442 443 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 444 ip = VTOI(vp); 445 446 #ifdef DIAGNOSTIC 447 if (uio->uio_rw != UIO_READ) 448 panic("ffs_read: mode"); 449 450 if (vp->v_type == VLNK) { 451 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen) 452 panic("ffs_read: short symlink"); 453 } else if (vp->v_type != VREG && vp->v_type != VDIR) 454 panic("ffs_read: type %d", vp->v_type); 455 #endif 456 orig_resid = uio->uio_resid; 457 KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0")); 458 if (orig_resid == 0) 459 return (0); 460 KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0")); 461 fs = ip->i_fs; 462 if (uio->uio_offset < ip->i_size && 463 uio->uio_offset >= fs->fs_maxfilesize) 464 return (EOVERFLOW); 465 466 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { 467 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) 468 break; 469 lbn = lblkno(fs, uio->uio_offset); 470 nextlbn = lbn + 1; 471 472 /* 473 * size of buffer. The buffer representing the 474 * end of the file is rounded up to the size of 475 * the block type ( fragment or full block, 476 * depending ). 477 */ 478 size = blksize(fs, ip, lbn); 479 blkoffset = blkoff(fs, uio->uio_offset); 480 481 /* 482 * The amount we want to transfer in this iteration is 483 * one FS block less the amount of the data before 484 * our startpoint (duh!) 485 */ 486 xfersize = fs->fs_bsize - blkoffset; 487 488 /* 489 * But if we actually want less than the block, 490 * or the file doesn't have a whole block more of data, 491 * then use the lesser number. 492 */ 493 if (uio->uio_resid < xfersize) 494 xfersize = uio->uio_resid; 495 if (bytesinfile < xfersize) 496 xfersize = bytesinfile; 497 498 if (lblktosize(fs, nextlbn) >= ip->i_size) { 499 /* 500 * Don't do readahead if this is the end of the file. 501 */ 502 error = bread(vp, lbn, size, NOCRED, &bp); 503 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { 504 /* 505 * Otherwise if we are allowed to cluster, 506 * grab as much as we can. 507 * 508 * XXX This may not be a win if we are not 509 * doing sequential access. 510 */ 511 error = cluster_read(vp, ip->i_size, lbn, 512 size, NOCRED, blkoffset + uio->uio_resid, seqcount, &bp); 513 } else if (seqcount > 1) { 514 /* 515 * If we are NOT allowed to cluster, then 516 * if we appear to be acting sequentially, 517 * fire off a request for a readahead 518 * as well as a read. Note that the 4th and 5th 519 * arguments point to arrays of the size specified in 520 * the 6th argument. 521 */ 522 int nextsize = blksize(fs, ip, nextlbn); 523 error = breadn(vp, lbn, 524 size, &nextlbn, &nextsize, 1, NOCRED, &bp); 525 } else { 526 /* 527 * Failing all of the above, just read what the 528 * user asked for. Interestingly, the same as 529 * the first option above. 530 */ 531 error = bread(vp, lbn, size, NOCRED, &bp); 532 } 533 if (error) { 534 brelse(bp); 535 bp = NULL; 536 break; 537 } 538 539 /* 540 * If IO_DIRECT then set B_DIRECT for the buffer. This 541 * will cause us to attempt to release the buffer later on 542 * and will cause the buffer cache to attempt to free the 543 * underlying pages. 544 */ 545 if (ioflag & IO_DIRECT) 546 bp->b_flags |= B_DIRECT; 547 548 /* 549 * We should only get non-zero b_resid when an I/O error 550 * has occurred, which should cause us to break above. 551 * However, if the short read did not cause an error, 552 * then we want to ensure that we do not uiomove bad 553 * or uninitialized data. 554 */ 555 size -= bp->b_resid; 556 if (size < xfersize) { 557 if (size == 0) 558 break; 559 xfersize = size; 560 } 561 562 error = uiomove((char *)bp->b_data + blkoffset, 563 (int)xfersize, uio); 564 if (error) 565 break; 566 567 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 568 (LIST_EMPTY(&bp->b_dep))) { 569 /* 570 * If there are no dependencies, and it's VMIO, 571 * then we don't need the buf, mark it available 572 * for freeing. The VM has the data. 573 */ 574 bp->b_flags |= B_RELBUF; 575 brelse(bp); 576 } else { 577 /* 578 * Otherwise let whoever 579 * made the request take care of 580 * freeing it. We just queue 581 * it onto another list. 582 */ 583 bqrelse(bp); 584 } 585 } 586 587 /* 588 * This can only happen in the case of an error 589 * because the loop above resets bp to NULL on each iteration 590 * and on normal completion has not set a new value into it. 591 * so it must have come from a 'break' statement 592 */ 593 if (bp != NULL) { 594 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 595 (LIST_EMPTY(&bp->b_dep))) { 596 bp->b_flags |= B_RELBUF; 597 brelse(bp); 598 } else { 599 bqrelse(bp); 600 } 601 } 602 603 if ((error == 0 || uio->uio_resid != orig_resid) && 604 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0) { 605 VI_LOCK(vp); 606 ip->i_flag |= IN_ACCESS; 607 VI_UNLOCK(vp); 608 } 609 return (error); 610 } 611 612 /* 613 * Vnode op for writing. 614 */ 615 static int 616 ffs_write(ap) 617 struct vop_write_args /* { 618 struct vnode *a_vp; 619 struct uio *a_uio; 620 int a_ioflag; 621 struct ucred *a_cred; 622 } */ *ap; 623 { 624 struct vnode *vp; 625 struct uio *uio; 626 struct inode *ip; 627 struct fs *fs; 628 struct buf *bp; 629 struct thread *td; 630 ufs_lbn_t lbn; 631 off_t osize; 632 int seqcount; 633 int blkoffset, error, flags, ioflag, resid, size, xfersize; 634 635 vp = ap->a_vp; 636 uio = ap->a_uio; 637 ioflag = ap->a_ioflag; 638 if (ap->a_ioflag & IO_EXT) 639 #ifdef notyet 640 return (ffs_extwrite(vp, uio, ioflag, ap->a_cred)); 641 #else 642 panic("ffs_write+IO_EXT"); 643 #endif 644 645 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 646 ip = VTOI(vp); 647 648 #ifdef DIAGNOSTIC 649 if (uio->uio_rw != UIO_WRITE) 650 panic("ffs_write: mode"); 651 #endif 652 653 switch (vp->v_type) { 654 case VREG: 655 if (ioflag & IO_APPEND) 656 uio->uio_offset = ip->i_size; 657 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) 658 return (EPERM); 659 /* FALLTHROUGH */ 660 case VLNK: 661 break; 662 case VDIR: 663 panic("ffs_write: dir write"); 664 break; 665 default: 666 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type, 667 (int)uio->uio_offset, 668 (int)uio->uio_resid 669 ); 670 } 671 672 KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0")); 673 KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0")); 674 fs = ip->i_fs; 675 if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) 676 return (EFBIG); 677 /* 678 * Maybe this should be above the vnode op call, but so long as 679 * file servers have no limits, I don't think it matters. 680 */ 681 td = uio->uio_td; 682 if (vp->v_type == VREG && td != NULL) { 683 PROC_LOCK(td->td_proc); 684 if (uio->uio_offset + uio->uio_resid > 685 lim_cur(td->td_proc, RLIMIT_FSIZE)) { 686 psignal(td->td_proc, SIGXFSZ); 687 PROC_UNLOCK(td->td_proc); 688 return (EFBIG); 689 } 690 PROC_UNLOCK(td->td_proc); 691 } 692 693 resid = uio->uio_resid; 694 osize = ip->i_size; 695 if (seqcount > BA_SEQMAX) 696 flags = BA_SEQMAX << BA_SEQSHIFT; 697 else 698 flags = seqcount << BA_SEQSHIFT; 699 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp)) 700 flags |= IO_SYNC; 701 702 for (error = 0; uio->uio_resid > 0;) { 703 lbn = lblkno(fs, uio->uio_offset); 704 blkoffset = blkoff(fs, uio->uio_offset); 705 xfersize = fs->fs_bsize - blkoffset; 706 if (uio->uio_resid < xfersize) 707 xfersize = uio->uio_resid; 708 if (uio->uio_offset + xfersize > ip->i_size) 709 vnode_pager_setsize(vp, uio->uio_offset + xfersize); 710 711 /* 712 * We must perform a read-before-write if the transfer size 713 * does not cover the entire buffer. 714 */ 715 if (fs->fs_bsize > xfersize) 716 flags |= BA_CLRBUF; 717 else 718 flags &= ~BA_CLRBUF; 719 /* XXX is uio->uio_offset the right thing here? */ 720 error = UFS_BALLOC(vp, uio->uio_offset, xfersize, 721 ap->a_cred, flags, &bp); 722 if (error != 0) 723 break; 724 /* 725 * If the buffer is not valid we have to clear out any 726 * garbage data from the pages instantiated for the buffer. 727 * If we do not, a failed uiomove() during a write can leave 728 * the prior contents of the pages exposed to a userland 729 * mmap(). XXX deal with uiomove() errors a better way. 730 */ 731 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize) 732 vfs_bio_clrbuf(bp); 733 if (ioflag & IO_DIRECT) 734 bp->b_flags |= B_DIRECT; 735 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL)) 736 bp->b_flags |= B_NOCACHE; 737 738 if (uio->uio_offset + xfersize > ip->i_size) { 739 ip->i_size = uio->uio_offset + xfersize; 740 DIP_SET(ip, i_size, ip->i_size); 741 } 742 743 size = blksize(fs, ip, lbn) - bp->b_resid; 744 if (size < xfersize) 745 xfersize = size; 746 747 error = 748 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); 749 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 750 (LIST_EMPTY(&bp->b_dep))) { 751 bp->b_flags |= B_RELBUF; 752 } 753 754 /* 755 * If IO_SYNC each buffer is written synchronously. Otherwise 756 * if we have a severe page deficiency write the buffer 757 * asynchronously. Otherwise try to cluster, and if that 758 * doesn't do it then either do an async write (if O_DIRECT), 759 * or a delayed write (if not). 760 */ 761 if (ioflag & IO_SYNC) { 762 (void)bwrite(bp); 763 } else if (vm_page_count_severe() || 764 buf_dirty_count_severe() || 765 (ioflag & IO_ASYNC)) { 766 bp->b_flags |= B_CLUSTEROK; 767 bawrite(bp); 768 } else if (xfersize + blkoffset == fs->fs_bsize) { 769 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) { 770 bp->b_flags |= B_CLUSTEROK; 771 cluster_write(vp, bp, ip->i_size, seqcount); 772 } else { 773 bawrite(bp); 774 } 775 } else if (ioflag & IO_DIRECT) { 776 bp->b_flags |= B_CLUSTEROK; 777 bawrite(bp); 778 } else { 779 bp->b_flags |= B_CLUSTEROK; 780 bdwrite(bp); 781 } 782 if (error || xfersize == 0) 783 break; 784 ip->i_flag |= IN_CHANGE | IN_UPDATE; 785 } 786 /* 787 * If we successfully wrote any data, and we are not the superuser 788 * we clear the setuid and setgid bits as a precaution against 789 * tampering. 790 */ 791 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && 792 ap->a_cred) { 793 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 794 SUSER_ALLOWJAIL)) { 795 ip->i_mode &= ~(ISUID | ISGID); 796 DIP_SET(ip, i_mode, ip->i_mode); 797 } 798 } 799 if (error) { 800 if (ioflag & IO_UNIT) { 801 (void)ffs_truncate(vp, osize, 802 IO_NORMAL | (ioflag & IO_SYNC), 803 ap->a_cred, uio->uio_td); 804 uio->uio_offset -= resid - uio->uio_resid; 805 uio->uio_resid = resid; 806 } 807 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) 808 error = ffs_update(vp, 1); 809 return (error); 810 } 811 812 /* 813 * get page routine 814 */ 815 static int 816 ffs_getpages(ap) 817 struct vop_getpages_args *ap; 818 { 819 int i; 820 vm_page_t mreq; 821 int pcount; 822 823 pcount = round_page(ap->a_count) / PAGE_SIZE; 824 mreq = ap->a_m[ap->a_reqpage]; 825 826 /* 827 * if ANY DEV_BSIZE blocks are valid on a large filesystem block, 828 * then the entire page is valid. Since the page may be mapped, 829 * user programs might reference data beyond the actual end of file 830 * occuring within the page. We have to zero that data. 831 */ 832 VM_OBJECT_LOCK(mreq->object); 833 if (mreq->valid) { 834 if (mreq->valid != VM_PAGE_BITS_ALL) 835 vm_page_zero_invalid(mreq, TRUE); 836 vm_page_lock_queues(); 837 for (i = 0; i < pcount; i++) { 838 if (i != ap->a_reqpage) { 839 vm_page_free(ap->a_m[i]); 840 } 841 } 842 vm_page_unlock_queues(); 843 VM_OBJECT_UNLOCK(mreq->object); 844 return VM_PAGER_OK; 845 } 846 VM_OBJECT_UNLOCK(mreq->object); 847 848 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 849 ap->a_count, 850 ap->a_reqpage); 851 } 852 853 854 /* 855 * Extended attribute area reading. 856 */ 857 static int 858 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag) 859 { 860 struct inode *ip; 861 struct ufs2_dinode *dp; 862 struct fs *fs; 863 struct buf *bp; 864 ufs_lbn_t lbn, nextlbn; 865 off_t bytesinfile; 866 long size, xfersize, blkoffset; 867 int error, orig_resid; 868 869 ip = VTOI(vp); 870 fs = ip->i_fs; 871 dp = ip->i_din2; 872 873 #ifdef DIAGNOSTIC 874 if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC) 875 panic("ffs_extread: mode"); 876 877 #endif 878 orig_resid = uio->uio_resid; 879 KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0")); 880 if (orig_resid == 0) 881 return (0); 882 KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0")); 883 884 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { 885 if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0) 886 break; 887 lbn = lblkno(fs, uio->uio_offset); 888 nextlbn = lbn + 1; 889 890 /* 891 * size of buffer. The buffer representing the 892 * end of the file is rounded up to the size of 893 * the block type ( fragment or full block, 894 * depending ). 895 */ 896 size = sblksize(fs, dp->di_extsize, lbn); 897 blkoffset = blkoff(fs, uio->uio_offset); 898 899 /* 900 * The amount we want to transfer in this iteration is 901 * one FS block less the amount of the data before 902 * our startpoint (duh!) 903 */ 904 xfersize = fs->fs_bsize - blkoffset; 905 906 /* 907 * But if we actually want less than the block, 908 * or the file doesn't have a whole block more of data, 909 * then use the lesser number. 910 */ 911 if (uio->uio_resid < xfersize) 912 xfersize = uio->uio_resid; 913 if (bytesinfile < xfersize) 914 xfersize = bytesinfile; 915 916 if (lblktosize(fs, nextlbn) >= dp->di_extsize) { 917 /* 918 * Don't do readahead if this is the end of the info. 919 */ 920 error = bread(vp, -1 - lbn, size, NOCRED, &bp); 921 } else { 922 /* 923 * If we have a second block, then 924 * fire off a request for a readahead 925 * as well as a read. Note that the 4th and 5th 926 * arguments point to arrays of the size specified in 927 * the 6th argument. 928 */ 929 int nextsize = sblksize(fs, dp->di_extsize, nextlbn); 930 931 nextlbn = -1 - nextlbn; 932 error = breadn(vp, -1 - lbn, 933 size, &nextlbn, &nextsize, 1, NOCRED, &bp); 934 } 935 if (error) { 936 brelse(bp); 937 bp = NULL; 938 break; 939 } 940 941 /* 942 * If IO_DIRECT then set B_DIRECT for the buffer. This 943 * will cause us to attempt to release the buffer later on 944 * and will cause the buffer cache to attempt to free the 945 * underlying pages. 946 */ 947 if (ioflag & IO_DIRECT) 948 bp->b_flags |= B_DIRECT; 949 950 /* 951 * We should only get non-zero b_resid when an I/O error 952 * has occurred, which should cause us to break above. 953 * However, if the short read did not cause an error, 954 * then we want to ensure that we do not uiomove bad 955 * or uninitialized data. 956 */ 957 size -= bp->b_resid; 958 if (size < xfersize) { 959 if (size == 0) 960 break; 961 xfersize = size; 962 } 963 964 error = uiomove((char *)bp->b_data + blkoffset, 965 (int)xfersize, uio); 966 if (error) 967 break; 968 969 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 970 (LIST_EMPTY(&bp->b_dep))) { 971 /* 972 * If there are no dependencies, and it's VMIO, 973 * then we don't need the buf, mark it available 974 * for freeing. The VM has the data. 975 */ 976 bp->b_flags |= B_RELBUF; 977 brelse(bp); 978 } else { 979 /* 980 * Otherwise let whoever 981 * made the request take care of 982 * freeing it. We just queue 983 * it onto another list. 984 */ 985 bqrelse(bp); 986 } 987 } 988 989 /* 990 * This can only happen in the case of an error 991 * because the loop above resets bp to NULL on each iteration 992 * and on normal completion has not set a new value into it. 993 * so it must have come from a 'break' statement 994 */ 995 if (bp != NULL) { 996 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 997 (LIST_EMPTY(&bp->b_dep))) { 998 bp->b_flags |= B_RELBUF; 999 brelse(bp); 1000 } else { 1001 bqrelse(bp); 1002 } 1003 } 1004 1005 if ((error == 0 || uio->uio_resid != orig_resid) && 1006 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0) { 1007 VI_LOCK(vp); 1008 ip->i_flag |= IN_ACCESS; 1009 VI_UNLOCK(vp); 1010 } 1011 return (error); 1012 } 1013 1014 /* 1015 * Extended attribute area writing. 1016 */ 1017 static int 1018 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred) 1019 { 1020 struct inode *ip; 1021 struct ufs2_dinode *dp; 1022 struct fs *fs; 1023 struct buf *bp; 1024 ufs_lbn_t lbn; 1025 off_t osize; 1026 int blkoffset, error, flags, resid, size, xfersize; 1027 1028 ip = VTOI(vp); 1029 fs = ip->i_fs; 1030 dp = ip->i_din2; 1031 1032 KASSERT(!(ip->i_flag & IN_SPACECOUNTED), ("inode %u: inode is dead", 1033 ip->i_number)); 1034 1035 #ifdef DIAGNOSTIC 1036 if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC) 1037 panic("ffs_extwrite: mode"); 1038 #endif 1039 1040 if (ioflag & IO_APPEND) 1041 uio->uio_offset = dp->di_extsize; 1042 KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0")); 1043 KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0")); 1044 if ((uoff_t)uio->uio_offset + uio->uio_resid > NXADDR * fs->fs_bsize) 1045 return (EFBIG); 1046 1047 resid = uio->uio_resid; 1048 osize = dp->di_extsize; 1049 flags = IO_EXT; 1050 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp)) 1051 flags |= IO_SYNC; 1052 1053 for (error = 0; uio->uio_resid > 0;) { 1054 lbn = lblkno(fs, uio->uio_offset); 1055 blkoffset = blkoff(fs, uio->uio_offset); 1056 xfersize = fs->fs_bsize - blkoffset; 1057 if (uio->uio_resid < xfersize) 1058 xfersize = uio->uio_resid; 1059 1060 /* 1061 * We must perform a read-before-write if the transfer size 1062 * does not cover the entire buffer. 1063 */ 1064 if (fs->fs_bsize > xfersize) 1065 flags |= BA_CLRBUF; 1066 else 1067 flags &= ~BA_CLRBUF; 1068 error = UFS_BALLOC(vp, uio->uio_offset, xfersize, 1069 ucred, flags, &bp); 1070 if (error != 0) 1071 break; 1072 /* 1073 * If the buffer is not valid we have to clear out any 1074 * garbage data from the pages instantiated for the buffer. 1075 * If we do not, a failed uiomove() during a write can leave 1076 * the prior contents of the pages exposed to a userland 1077 * mmap(). XXX deal with uiomove() errors a better way. 1078 */ 1079 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize) 1080 vfs_bio_clrbuf(bp); 1081 if (ioflag & IO_DIRECT) 1082 bp->b_flags |= B_DIRECT; 1083 1084 if (uio->uio_offset + xfersize > dp->di_extsize) 1085 dp->di_extsize = uio->uio_offset + xfersize; 1086 1087 size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid; 1088 if (size < xfersize) 1089 xfersize = size; 1090 1091 error = 1092 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); 1093 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 1094 (LIST_EMPTY(&bp->b_dep))) { 1095 bp->b_flags |= B_RELBUF; 1096 } 1097 1098 /* 1099 * If IO_SYNC each buffer is written synchronously. Otherwise 1100 * if we have a severe page deficiency write the buffer 1101 * asynchronously. Otherwise try to cluster, and if that 1102 * doesn't do it then either do an async write (if O_DIRECT), 1103 * or a delayed write (if not). 1104 */ 1105 if (ioflag & IO_SYNC) { 1106 (void)bwrite(bp); 1107 } else if (vm_page_count_severe() || 1108 buf_dirty_count_severe() || 1109 xfersize + blkoffset == fs->fs_bsize || 1110 (ioflag & (IO_ASYNC | IO_DIRECT))) 1111 bawrite(bp); 1112 else 1113 bdwrite(bp); 1114 if (error || xfersize == 0) 1115 break; 1116 ip->i_flag |= IN_CHANGE | IN_UPDATE; 1117 } 1118 /* 1119 * If we successfully wrote any data, and we are not the superuser 1120 * we clear the setuid and setgid bits as a precaution against 1121 * tampering. 1122 */ 1123 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) { 1124 if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID, 1125 SUSER_ALLOWJAIL)) { 1126 ip->i_mode &= ~(ISUID | ISGID); 1127 dp->di_mode = ip->i_mode; 1128 } 1129 } 1130 if (error) { 1131 if (ioflag & IO_UNIT) { 1132 (void)ffs_truncate(vp, osize, 1133 IO_EXT | (ioflag&IO_SYNC), ucred, uio->uio_td); 1134 uio->uio_offset -= resid - uio->uio_resid; 1135 uio->uio_resid = resid; 1136 } 1137 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) 1138 error = ffs_update(vp, 1); 1139 return (error); 1140 } 1141 1142 1143 /* 1144 * Vnode operating to retrieve a named extended attribute. 1145 * 1146 * Locate a particular EA (nspace:name) in the area (ptr:length), and return 1147 * the length of the EA, and possibly the pointer to the entry and to the data. 1148 */ 1149 static int 1150 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name, u_char **eap, u_char **eac) 1151 { 1152 u_char *p, *pe, *pn, *p0; 1153 int eapad1, eapad2, ealength, ealen, nlen; 1154 uint32_t ul; 1155 1156 pe = ptr + length; 1157 nlen = strlen(name); 1158 1159 for (p = ptr; p < pe; p = pn) { 1160 p0 = p; 1161 bcopy(p, &ul, sizeof(ul)); 1162 pn = p + ul; 1163 /* make sure this entry is complete */ 1164 if (pn > pe) 1165 break; 1166 p += sizeof(uint32_t); 1167 if (*p != nspace) 1168 continue; 1169 p++; 1170 eapad2 = *p++; 1171 if (*p != nlen) 1172 continue; 1173 p++; 1174 if (bcmp(p, name, nlen)) 1175 continue; 1176 ealength = sizeof(uint32_t) + 3 + nlen; 1177 eapad1 = 8 - (ealength % 8); 1178 if (eapad1 == 8) 1179 eapad1 = 0; 1180 ealength += eapad1; 1181 ealen = ul - ealength - eapad2; 1182 p += nlen + eapad1; 1183 if (eap != NULL) 1184 *eap = p0; 1185 if (eac != NULL) 1186 *eac = p; 1187 return (ealen); 1188 } 1189 return(-1); 1190 } 1191 1192 static int 1193 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra) 1194 { 1195 struct inode *ip; 1196 struct ufs2_dinode *dp; 1197 struct uio luio; 1198 struct iovec liovec; 1199 int easize, error; 1200 u_char *eae; 1201 1202 ip = VTOI(vp); 1203 dp = ip->i_din2; 1204 easize = dp->di_extsize; 1205 1206 eae = malloc(easize + extra, M_TEMP, M_WAITOK); 1207 1208 liovec.iov_base = eae; 1209 liovec.iov_len = easize; 1210 luio.uio_iov = &liovec; 1211 luio.uio_iovcnt = 1; 1212 luio.uio_offset = 0; 1213 luio.uio_resid = easize; 1214 luio.uio_segflg = UIO_SYSSPACE; 1215 luio.uio_rw = UIO_READ; 1216 luio.uio_td = td; 1217 1218 error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC); 1219 if (error) { 1220 free(eae, M_TEMP); 1221 return(error); 1222 } 1223 *p = eae; 1224 return (0); 1225 } 1226 1227 static int 1228 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td) 1229 { 1230 struct inode *ip; 1231 struct ufs2_dinode *dp; 1232 int error; 1233 1234 ip = VTOI(vp); 1235 1236 if (ip->i_ea_area != NULL) 1237 return (EBUSY); 1238 dp = ip->i_din2; 1239 error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0); 1240 if (error) 1241 return (error); 1242 ip->i_ea_len = dp->di_extsize; 1243 ip->i_ea_error = 0; 1244 return (0); 1245 } 1246 1247 /* 1248 * Vnode extattr transaction commit/abort 1249 */ 1250 static int 1251 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td) 1252 { 1253 struct inode *ip; 1254 struct uio luio; 1255 struct iovec liovec; 1256 int error; 1257 struct ufs2_dinode *dp; 1258 1259 ip = VTOI(vp); 1260 if (ip->i_ea_area == NULL) 1261 return (EINVAL); 1262 dp = ip->i_din2; 1263 error = ip->i_ea_error; 1264 if (commit && error == 0) { 1265 if (cred == NOCRED) 1266 cred = vp->v_mount->mnt_cred; 1267 liovec.iov_base = ip->i_ea_area; 1268 liovec.iov_len = ip->i_ea_len; 1269 luio.uio_iov = &liovec; 1270 luio.uio_iovcnt = 1; 1271 luio.uio_offset = 0; 1272 luio.uio_resid = ip->i_ea_len; 1273 luio.uio_segflg = UIO_SYSSPACE; 1274 luio.uio_rw = UIO_WRITE; 1275 luio.uio_td = td; 1276 /* XXX: I'm not happy about truncating to zero size */ 1277 if (ip->i_ea_len < dp->di_extsize) 1278 error = ffs_truncate(vp, 0, IO_EXT, cred, td); 1279 error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred); 1280 } 1281 free(ip->i_ea_area, M_TEMP); 1282 ip->i_ea_area = NULL; 1283 ip->i_ea_len = 0; 1284 ip->i_ea_error = 0; 1285 return (error); 1286 } 1287 1288 /* 1289 * Vnode extattr strategy routine for fifos. 1290 * 1291 * We need to check for a read or write of the external attributes. 1292 * Otherwise we just fall through and do the usual thing. 1293 */ 1294 static int 1295 ffsext_strategy(struct vop_strategy_args *ap) 1296 /* 1297 struct vop_strategy_args { 1298 struct vnodeop_desc *a_desc; 1299 struct vnode *a_vp; 1300 struct buf *a_bp; 1301 }; 1302 */ 1303 { 1304 struct vnode *vp; 1305 daddr_t lbn; 1306 1307 vp = ap->a_vp; 1308 lbn = ap->a_bp->b_lblkno; 1309 if (VTOI(vp)->i_fs->fs_magic == FS_UFS2_MAGIC && 1310 lbn < 0 && lbn >= -NXADDR) 1311 return (VOP_STRATEGY_APV(&ufs_vnodeops, ap)); 1312 if (vp->v_type == VFIFO) 1313 return (VOP_STRATEGY_APV(&ufs_fifoops, ap)); 1314 panic("spec nodes went here"); 1315 } 1316 1317 /* 1318 * Vnode extattr transaction commit/abort 1319 */ 1320 static int 1321 ffs_openextattr(struct vop_openextattr_args *ap) 1322 /* 1323 struct vop_openextattr_args { 1324 struct vnodeop_desc *a_desc; 1325 struct vnode *a_vp; 1326 IN struct ucred *a_cred; 1327 IN struct thread *a_td; 1328 }; 1329 */ 1330 { 1331 struct inode *ip; 1332 struct fs *fs; 1333 1334 ip = VTOI(ap->a_vp); 1335 fs = ip->i_fs; 1336 1337 if (ap->a_vp->v_type == VCHR) 1338 return (EOPNOTSUPP); 1339 1340 return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td)); 1341 } 1342 1343 1344 /* 1345 * Vnode extattr transaction commit/abort 1346 */ 1347 static int 1348 ffs_closeextattr(struct vop_closeextattr_args *ap) 1349 /* 1350 struct vop_closeextattr_args { 1351 struct vnodeop_desc *a_desc; 1352 struct vnode *a_vp; 1353 int a_commit; 1354 IN struct ucred *a_cred; 1355 IN struct thread *a_td; 1356 }; 1357 */ 1358 { 1359 struct inode *ip; 1360 struct fs *fs; 1361 1362 ip = VTOI(ap->a_vp); 1363 fs = ip->i_fs; 1364 1365 if (ap->a_vp->v_type == VCHR) 1366 return (EOPNOTSUPP); 1367 1368 if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) 1369 return (EROFS); 1370 1371 return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td)); 1372 } 1373 1374 /* 1375 * Vnode operation to remove a named attribute. 1376 */ 1377 static int 1378 ffs_deleteextattr(struct vop_deleteextattr_args *ap) 1379 /* 1380 vop_deleteextattr { 1381 IN struct vnode *a_vp; 1382 IN int a_attrnamespace; 1383 IN const char *a_name; 1384 IN struct ucred *a_cred; 1385 IN struct thread *a_td; 1386 }; 1387 */ 1388 { 1389 struct inode *ip; 1390 struct fs *fs; 1391 uint32_t ealength, ul; 1392 int ealen, olen, eapad1, eapad2, error, i, easize; 1393 u_char *eae, *p; 1394 int stand_alone; 1395 1396 ip = VTOI(ap->a_vp); 1397 fs = ip->i_fs; 1398 1399 if (ap->a_vp->v_type == VCHR) 1400 return (EOPNOTSUPP); 1401 1402 if (strlen(ap->a_name) == 0) 1403 return (EINVAL); 1404 1405 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) 1406 return (EROFS); 1407 1408 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1409 ap->a_cred, ap->a_td, IWRITE); 1410 if (error) { 1411 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1412 ip->i_ea_error = error; 1413 return (error); 1414 } 1415 1416 if (ip->i_ea_area == NULL) { 1417 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1418 if (error) 1419 return (error); 1420 stand_alone = 1; 1421 } else { 1422 stand_alone = 0; 1423 } 1424 1425 ealength = eapad1 = ealen = eapad2 = 0; 1426 1427 eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK); 1428 bcopy(ip->i_ea_area, eae, ip->i_ea_len); 1429 easize = ip->i_ea_len; 1430 1431 olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name, 1432 &p, NULL); 1433 if (olen == -1) { 1434 /* delete but nonexistent */ 1435 free(eae, M_TEMP); 1436 if (stand_alone) 1437 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1438 return(ENOATTR); 1439 } 1440 bcopy(p, &ul, sizeof ul); 1441 i = p - eae + ul; 1442 if (ul != ealength) { 1443 bcopy(p + ul, p + ealength, easize - i); 1444 easize += (ealength - ul); 1445 } 1446 if (easize > NXADDR * fs->fs_bsize) { 1447 free(eae, M_TEMP); 1448 if (stand_alone) 1449 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1450 else if (ip->i_ea_error == 0) 1451 ip->i_ea_error = ENOSPC; 1452 return(ENOSPC); 1453 } 1454 p = ip->i_ea_area; 1455 ip->i_ea_area = eae; 1456 ip->i_ea_len = easize; 1457 free(p, M_TEMP); 1458 if (stand_alone) 1459 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td); 1460 return(error); 1461 } 1462 1463 /* 1464 * Vnode operation to retrieve a named extended attribute. 1465 */ 1466 static int 1467 ffs_getextattr(struct vop_getextattr_args *ap) 1468 /* 1469 vop_getextattr { 1470 IN struct vnode *a_vp; 1471 IN int a_attrnamespace; 1472 IN const char *a_name; 1473 INOUT struct uio *a_uio; 1474 OUT size_t *a_size; 1475 IN struct ucred *a_cred; 1476 IN struct thread *a_td; 1477 }; 1478 */ 1479 { 1480 struct inode *ip; 1481 struct fs *fs; 1482 u_char *eae, *p; 1483 unsigned easize; 1484 int error, ealen, stand_alone; 1485 1486 ip = VTOI(ap->a_vp); 1487 fs = ip->i_fs; 1488 1489 if (ap->a_vp->v_type == VCHR) 1490 return (EOPNOTSUPP); 1491 1492 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1493 ap->a_cred, ap->a_td, IREAD); 1494 if (error) 1495 return (error); 1496 1497 if (ip->i_ea_area == NULL) { 1498 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1499 if (error) 1500 return (error); 1501 stand_alone = 1; 1502 } else { 1503 stand_alone = 0; 1504 } 1505 eae = ip->i_ea_area; 1506 easize = ip->i_ea_len; 1507 1508 ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name, 1509 NULL, &p); 1510 if (ealen >= 0) { 1511 error = 0; 1512 if (ap->a_size != NULL) 1513 *ap->a_size = ealen; 1514 else if (ap->a_uio != NULL) 1515 error = uiomove(p, ealen, ap->a_uio); 1516 } else 1517 error = ENOATTR; 1518 if (stand_alone) 1519 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1520 return(error); 1521 } 1522 1523 /* 1524 * Vnode operation to retrieve extended attributes on a vnode. 1525 */ 1526 static int 1527 ffs_listextattr(struct vop_listextattr_args *ap) 1528 /* 1529 vop_listextattr { 1530 IN struct vnode *a_vp; 1531 IN int a_attrnamespace; 1532 INOUT struct uio *a_uio; 1533 OUT size_t *a_size; 1534 IN struct ucred *a_cred; 1535 IN struct thread *a_td; 1536 }; 1537 */ 1538 { 1539 struct inode *ip; 1540 struct fs *fs; 1541 u_char *eae, *p, *pe, *pn; 1542 unsigned easize; 1543 uint32_t ul; 1544 int error, ealen, stand_alone; 1545 1546 ip = VTOI(ap->a_vp); 1547 fs = ip->i_fs; 1548 1549 if (ap->a_vp->v_type == VCHR) 1550 return (EOPNOTSUPP); 1551 1552 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1553 ap->a_cred, ap->a_td, IREAD); 1554 if (error) 1555 return (error); 1556 1557 if (ip->i_ea_area == NULL) { 1558 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1559 if (error) 1560 return (error); 1561 stand_alone = 1; 1562 } else { 1563 stand_alone = 0; 1564 } 1565 eae = ip->i_ea_area; 1566 easize = ip->i_ea_len; 1567 1568 error = 0; 1569 if (ap->a_size != NULL) 1570 *ap->a_size = 0; 1571 pe = eae + easize; 1572 for(p = eae; error == 0 && p < pe; p = pn) { 1573 bcopy(p, &ul, sizeof(ul)); 1574 pn = p + ul; 1575 if (pn > pe) 1576 break; 1577 p += sizeof(ul); 1578 if (*p++ != ap->a_attrnamespace) 1579 continue; 1580 p++; /* pad2 */ 1581 ealen = *p; 1582 if (ap->a_size != NULL) { 1583 *ap->a_size += ealen + 1; 1584 } else if (ap->a_uio != NULL) { 1585 error = uiomove(p, ealen + 1, ap->a_uio); 1586 } 1587 } 1588 if (stand_alone) 1589 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1590 return(error); 1591 } 1592 1593 /* 1594 * Vnode operation to set a named attribute. 1595 */ 1596 static int 1597 ffs_setextattr(struct vop_setextattr_args *ap) 1598 /* 1599 vop_setextattr { 1600 IN struct vnode *a_vp; 1601 IN int a_attrnamespace; 1602 IN const char *a_name; 1603 INOUT struct uio *a_uio; 1604 IN struct ucred *a_cred; 1605 IN struct thread *a_td; 1606 }; 1607 */ 1608 { 1609 struct inode *ip; 1610 struct fs *fs; 1611 uint32_t ealength, ul; 1612 int ealen, olen, eapad1, eapad2, error, i, easize; 1613 u_char *eae, *p; 1614 int stand_alone; 1615 1616 ip = VTOI(ap->a_vp); 1617 fs = ip->i_fs; 1618 1619 if (ap->a_vp->v_type == VCHR) 1620 return (EOPNOTSUPP); 1621 1622 if (strlen(ap->a_name) == 0) 1623 return (EINVAL); 1624 1625 /* XXX Now unsupported API to delete EAs using NULL uio. */ 1626 if (ap->a_uio == NULL) 1627 return (EOPNOTSUPP); 1628 1629 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) 1630 return (EROFS); 1631 1632 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1633 ap->a_cred, ap->a_td, IWRITE); 1634 if (error) { 1635 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1636 ip->i_ea_error = error; 1637 return (error); 1638 } 1639 1640 if (ip->i_ea_area == NULL) { 1641 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1642 if (error) 1643 return (error); 1644 stand_alone = 1; 1645 } else { 1646 stand_alone = 0; 1647 } 1648 1649 ealen = ap->a_uio->uio_resid; 1650 ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name); 1651 eapad1 = 8 - (ealength % 8); 1652 if (eapad1 == 8) 1653 eapad1 = 0; 1654 eapad2 = 8 - (ealen % 8); 1655 if (eapad2 == 8) 1656 eapad2 = 0; 1657 ealength += eapad1 + ealen + eapad2; 1658 1659 eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK); 1660 bcopy(ip->i_ea_area, eae, ip->i_ea_len); 1661 easize = ip->i_ea_len; 1662 1663 olen = ffs_findextattr(eae, easize, 1664 ap->a_attrnamespace, ap->a_name, &p, NULL); 1665 if (olen == -1) { 1666 /* new, append at end */ 1667 p = eae + easize; 1668 easize += ealength; 1669 } else { 1670 bcopy(p, &ul, sizeof ul); 1671 i = p - eae + ul; 1672 if (ul != ealength) { 1673 bcopy(p + ul, p + ealength, easize - i); 1674 easize += (ealength - ul); 1675 } 1676 } 1677 if (easize > NXADDR * fs->fs_bsize) { 1678 free(eae, M_TEMP); 1679 if (stand_alone) 1680 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1681 else if (ip->i_ea_error == 0) 1682 ip->i_ea_error = ENOSPC; 1683 return(ENOSPC); 1684 } 1685 bcopy(&ealength, p, sizeof(ealength)); 1686 p += sizeof(ealength); 1687 *p++ = ap->a_attrnamespace; 1688 *p++ = eapad2; 1689 *p++ = strlen(ap->a_name); 1690 strcpy(p, ap->a_name); 1691 p += strlen(ap->a_name); 1692 bzero(p, eapad1); 1693 p += eapad1; 1694 error = uiomove(p, ealen, ap->a_uio); 1695 if (error) { 1696 free(eae, M_TEMP); 1697 if (stand_alone) 1698 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1699 else if (ip->i_ea_error == 0) 1700 ip->i_ea_error = error; 1701 return(error); 1702 } 1703 p += ealen; 1704 bzero(p, eapad2); 1705 1706 p = ip->i_ea_area; 1707 ip->i_ea_area = eae; 1708 ip->i_ea_len = easize; 1709 free(p, M_TEMP); 1710 if (stand_alone) 1711 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td); 1712 return(error); 1713 } 1714 1715 /* 1716 * Vnode pointer to File handle 1717 */ 1718 static int 1719 ffs_vptofh(struct vop_vptofh_args *ap) 1720 /* 1721 vop_vptofh { 1722 IN struct vnode *a_vp; 1723 IN struct fid *a_fhp; 1724 }; 1725 */ 1726 { 1727 struct inode *ip; 1728 struct ufid *ufhp; 1729 1730 ip = VTOI(ap->a_vp); 1731 ufhp = (struct ufid *)ap->a_fhp; 1732 ufhp->ufid_len = sizeof(struct ufid); 1733 ufhp->ufid_ino = ip->i_number; 1734 ufhp->ufid_gen = ip->i_gen; 1735 return (0); 1736 } 1737