1 /*- 2 * Copyright (c) 2002, 2003 Networks Associates Technology, Inc. 3 * All rights reserved. 4 * 5 * This software was developed for the FreeBSD Project by Marshall 6 * Kirk McKusick and Network Associates Laboratories, the Security 7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR 8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 9 * research program 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * Copyright (c) 1982, 1986, 1989, 1993 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 4. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * from: @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95 60 * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ... 61 * @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95 62 */ 63 64 #include <sys/cdefs.h> 65 __FBSDID("$FreeBSD$"); 66 67 #include <sys/param.h> 68 #include <sys/bio.h> 69 #include <sys/systm.h> 70 #include <sys/buf.h> 71 #include <sys/conf.h> 72 #include <sys/extattr.h> 73 #include <sys/kernel.h> 74 #include <sys/limits.h> 75 #include <sys/malloc.h> 76 #include <sys/mount.h> 77 #include <sys/priv.h> 78 #include <sys/stat.h> 79 #include <sys/vmmeter.h> 80 #include <sys/vnode.h> 81 82 #include <vm/vm.h> 83 #include <vm/vm_extern.h> 84 #include <vm/vm_object.h> 85 #include <vm/vm_page.h> 86 #include <vm/vm_pager.h> 87 #include <vm/vnode_pager.h> 88 89 #include <ufs/ufs/extattr.h> 90 #include <ufs/ufs/quota.h> 91 #include <ufs/ufs/inode.h> 92 #include <ufs/ufs/ufs_extern.h> 93 #include <ufs/ufs/ufsmount.h> 94 95 #include <ufs/ffs/fs.h> 96 #include <ufs/ffs/ffs_extern.h> 97 #include "opt_directio.h" 98 #include "opt_ffs.h" 99 100 #ifdef DIRECTIO 101 extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone); 102 #endif 103 static vop_fsync_t ffs_fsync; 104 static vop_lock1_t ffs_lock; 105 static vop_getpages_t ffs_getpages; 106 static vop_read_t ffs_read; 107 static vop_write_t ffs_write; 108 static int ffs_extread(struct vnode *vp, struct uio *uio, int ioflag); 109 static int ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, 110 struct ucred *cred); 111 static vop_strategy_t ffsext_strategy; 112 static vop_closeextattr_t ffs_closeextattr; 113 static vop_deleteextattr_t ffs_deleteextattr; 114 static vop_getextattr_t ffs_getextattr; 115 static vop_listextattr_t ffs_listextattr; 116 static vop_openextattr_t ffs_openextattr; 117 static vop_setextattr_t ffs_setextattr; 118 static vop_vptofh_t ffs_vptofh; 119 120 121 /* Global vfs data structures for ufs. */ 122 struct vop_vector ffs_vnodeops1 = { 123 .vop_default = &ufs_vnodeops, 124 .vop_fsync = ffs_fsync, 125 .vop_getpages = ffs_getpages, 126 .vop_lock1 = ffs_lock, 127 .vop_read = ffs_read, 128 .vop_reallocblks = ffs_reallocblks, 129 .vop_write = ffs_write, 130 .vop_vptofh = ffs_vptofh, 131 }; 132 133 struct vop_vector ffs_fifoops1 = { 134 .vop_default = &ufs_fifoops, 135 .vop_fsync = ffs_fsync, 136 .vop_reallocblks = ffs_reallocblks, /* XXX: really ??? */ 137 .vop_vptofh = ffs_vptofh, 138 }; 139 140 /* Global vfs data structures for ufs. */ 141 struct vop_vector ffs_vnodeops2 = { 142 .vop_default = &ufs_vnodeops, 143 .vop_fsync = ffs_fsync, 144 .vop_getpages = ffs_getpages, 145 .vop_lock1 = ffs_lock, 146 .vop_read = ffs_read, 147 .vop_reallocblks = ffs_reallocblks, 148 .vop_write = ffs_write, 149 .vop_closeextattr = ffs_closeextattr, 150 .vop_deleteextattr = ffs_deleteextattr, 151 .vop_getextattr = ffs_getextattr, 152 .vop_listextattr = ffs_listextattr, 153 .vop_openextattr = ffs_openextattr, 154 .vop_setextattr = ffs_setextattr, 155 .vop_vptofh = ffs_vptofh, 156 }; 157 158 struct vop_vector ffs_fifoops2 = { 159 .vop_default = &ufs_fifoops, 160 .vop_fsync = ffs_fsync, 161 .vop_lock1 = ffs_lock, 162 .vop_reallocblks = ffs_reallocblks, 163 .vop_strategy = ffsext_strategy, 164 .vop_closeextattr = ffs_closeextattr, 165 .vop_deleteextattr = ffs_deleteextattr, 166 .vop_getextattr = ffs_getextattr, 167 .vop_listextattr = ffs_listextattr, 168 .vop_openextattr = ffs_openextattr, 169 .vop_setextattr = ffs_setextattr, 170 .vop_vptofh = ffs_vptofh, 171 }; 172 173 /* 174 * Synch an open file. 175 */ 176 /* ARGSUSED */ 177 static int 178 ffs_fsync(struct vop_fsync_args *ap) 179 { 180 struct vnode *vp; 181 struct bufobj *bo; 182 int error; 183 184 vp = ap->a_vp; 185 bo = &vp->v_bufobj; 186 retry: 187 error = ffs_syncvnode(vp, ap->a_waitfor); 188 if (error) 189 return (error); 190 if (ap->a_waitfor == MNT_WAIT && 191 (vp->v_mount->mnt_flag & MNT_SOFTDEP)) { 192 error = softdep_fsync(vp); 193 if (error) 194 return (error); 195 196 /* 197 * The softdep_fsync() function may drop vp lock, 198 * allowing for dirty buffers to reappear on the 199 * bo_dirty list. Recheck and resync as needed. 200 */ 201 BO_LOCK(bo); 202 if (vp->v_type == VREG && (bo->bo_numoutput > 0 || 203 bo->bo_dirty.bv_cnt > 0)) { 204 BO_UNLOCK(bo); 205 goto retry; 206 } 207 BO_UNLOCK(bo); 208 } 209 return (0); 210 } 211 212 int 213 ffs_syncvnode(struct vnode *vp, int waitfor) 214 { 215 struct inode *ip = VTOI(vp); 216 struct bufobj *bo; 217 struct buf *bp; 218 struct buf *nbp; 219 int s, error, wait, passes, skipmeta; 220 ufs_lbn_t lbn; 221 222 wait = (waitfor == MNT_WAIT); 223 lbn = lblkno(ip->i_fs, (ip->i_size + ip->i_fs->fs_bsize - 1)); 224 bo = &vp->v_bufobj; 225 ip->i_flag &= ~IN_NEEDSYNC; 226 227 /* 228 * Flush all dirty buffers associated with a vnode. 229 */ 230 passes = NIADDR + 1; 231 skipmeta = 0; 232 if (wait) 233 skipmeta = 1; 234 s = splbio(); 235 BO_LOCK(bo); 236 loop: 237 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 238 bp->b_vflags &= ~BV_SCANNED; 239 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 240 /* 241 * Reasons to skip this buffer: it has already been considered 242 * on this pass, this pass is the first time through on a 243 * synchronous flush request and the buffer being considered 244 * is metadata, the buffer has dependencies that will cause 245 * it to be redirtied and it has not already been deferred, 246 * or it is already being written. 247 */ 248 if ((bp->b_vflags & BV_SCANNED) != 0) 249 continue; 250 bp->b_vflags |= BV_SCANNED; 251 if ((skipmeta == 1 && bp->b_lblkno < 0)) 252 continue; 253 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 254 continue; 255 BO_UNLOCK(bo); 256 if (!wait && !LIST_EMPTY(&bp->b_dep) && 257 (bp->b_flags & B_DEFERRED) == 0 && 258 buf_countdeps(bp, 0)) { 259 bp->b_flags |= B_DEFERRED; 260 BUF_UNLOCK(bp); 261 BO_LOCK(bo); 262 continue; 263 } 264 if ((bp->b_flags & B_DELWRI) == 0) 265 panic("ffs_fsync: not dirty"); 266 /* 267 * If this is a synchronous flush request, or it is not a 268 * file or device, start the write on this buffer immediately. 269 */ 270 if (wait || (vp->v_type != VREG && vp->v_type != VBLK)) { 271 272 /* 273 * On our final pass through, do all I/O synchronously 274 * so that we can find out if our flush is failing 275 * because of write errors. 276 */ 277 if (passes > 0 || !wait) { 278 if ((bp->b_flags & B_CLUSTEROK) && !wait) { 279 (void) vfs_bio_awrite(bp); 280 } else { 281 bremfree(bp); 282 splx(s); 283 (void) bawrite(bp); 284 s = splbio(); 285 } 286 } else { 287 bremfree(bp); 288 splx(s); 289 if ((error = bwrite(bp)) != 0) 290 return (error); 291 s = splbio(); 292 } 293 } else if ((vp->v_type == VREG) && (bp->b_lblkno >= lbn)) { 294 /* 295 * If the buffer is for data that has been truncated 296 * off the file, then throw it away. 297 */ 298 bremfree(bp); 299 bp->b_flags |= B_INVAL | B_NOCACHE; 300 splx(s); 301 brelse(bp); 302 s = splbio(); 303 } else 304 vfs_bio_awrite(bp); 305 306 /* 307 * Since we may have slept during the I/O, we need 308 * to start from a known point. 309 */ 310 BO_LOCK(bo); 311 nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd); 312 } 313 /* 314 * If we were asked to do this synchronously, then go back for 315 * another pass, this time doing the metadata. 316 */ 317 if (skipmeta) { 318 skipmeta = 0; 319 goto loop; 320 } 321 322 if (wait) { 323 bufobj_wwait(bo, 3, 0); 324 BO_UNLOCK(bo); 325 326 /* 327 * Ensure that any filesystem metatdata associated 328 * with the vnode has been written. 329 */ 330 splx(s); 331 if ((error = softdep_sync_metadata(vp)) != 0) 332 return (error); 333 s = splbio(); 334 335 BO_LOCK(bo); 336 if (bo->bo_dirty.bv_cnt > 0) { 337 /* 338 * Block devices associated with filesystems may 339 * have new I/O requests posted for them even if 340 * the vnode is locked, so no amount of trying will 341 * get them clean. Thus we give block devices a 342 * good effort, then just give up. For all other file 343 * types, go around and try again until it is clean. 344 */ 345 if (passes > 0) { 346 passes -= 1; 347 goto loop; 348 } 349 #ifdef INVARIANTS 350 if (!vn_isdisk(vp, NULL)) 351 vprint("ffs_fsync: dirty", vp); 352 #endif 353 } 354 } 355 BO_UNLOCK(bo); 356 splx(s); 357 return (ffs_update(vp, wait)); 358 } 359 360 static int 361 ffs_lock(ap) 362 struct vop_lock1_args /* { 363 struct vnode *a_vp; 364 int a_flags; 365 struct thread *a_td; 366 char *file; 367 int line; 368 } */ *ap; 369 { 370 #ifndef NO_FFS_SNAPSHOT 371 struct vnode *vp; 372 int flags; 373 struct lock *lkp; 374 int result; 375 376 switch (ap->a_flags & LK_TYPE_MASK) { 377 case LK_SHARED: 378 case LK_UPGRADE: 379 case LK_EXCLUSIVE: 380 vp = ap->a_vp; 381 flags = ap->a_flags; 382 for (;;) { 383 #ifdef DEBUG_VFS_LOCKS 384 KASSERT(vp->v_holdcnt != 0, 385 ("ffs_lock %p: zero hold count", vp)); 386 #endif 387 lkp = vp->v_vnlock; 388 result = _lockmgr_args(lkp, flags, VI_MTX(vp), 389 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, 390 ap->a_file, ap->a_line); 391 if (lkp == vp->v_vnlock || result != 0) 392 break; 393 /* 394 * Apparent success, except that the vnode 395 * mutated between snapshot file vnode and 396 * regular file vnode while this process 397 * slept. The lock currently held is not the 398 * right lock. Release it, and try to get the 399 * new lock. 400 */ 401 (void) _lockmgr_args(lkp, LK_RELEASE, NULL, 402 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, 403 ap->a_file, ap->a_line); 404 if ((flags & (LK_INTERLOCK | LK_NOWAIT)) == 405 (LK_INTERLOCK | LK_NOWAIT)) 406 return (EBUSY); 407 if ((flags & LK_TYPE_MASK) == LK_UPGRADE) 408 flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE; 409 flags &= ~LK_INTERLOCK; 410 } 411 break; 412 default: 413 result = VOP_LOCK1_APV(&ufs_vnodeops, ap); 414 } 415 return (result); 416 #else 417 return (VOP_LOCK1_APV(&ufs_vnodeops, ap)); 418 #endif 419 } 420 421 /* 422 * Vnode op for reading. 423 */ 424 /* ARGSUSED */ 425 static int 426 ffs_read(ap) 427 struct vop_read_args /* { 428 struct vnode *a_vp; 429 struct uio *a_uio; 430 int a_ioflag; 431 struct ucred *a_cred; 432 } */ *ap; 433 { 434 struct vnode *vp; 435 struct inode *ip; 436 struct uio *uio; 437 struct fs *fs; 438 struct buf *bp; 439 ufs_lbn_t lbn, nextlbn; 440 off_t bytesinfile; 441 long size, xfersize, blkoffset; 442 int error, orig_resid; 443 int seqcount; 444 int ioflag; 445 446 vp = ap->a_vp; 447 uio = ap->a_uio; 448 ioflag = ap->a_ioflag; 449 if (ap->a_ioflag & IO_EXT) 450 #ifdef notyet 451 return (ffs_extread(vp, uio, ioflag)); 452 #else 453 panic("ffs_read+IO_EXT"); 454 #endif 455 #ifdef DIRECTIO 456 if ((ioflag & IO_DIRECT) != 0) { 457 int workdone; 458 459 error = ffs_rawread(vp, uio, &workdone); 460 if (error != 0 || workdone != 0) 461 return error; 462 } 463 #endif 464 465 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 466 ip = VTOI(vp); 467 468 #ifdef INVARIANTS 469 if (uio->uio_rw != UIO_READ) 470 panic("ffs_read: mode"); 471 472 if (vp->v_type == VLNK) { 473 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen) 474 panic("ffs_read: short symlink"); 475 } else if (vp->v_type != VREG && vp->v_type != VDIR) 476 panic("ffs_read: type %d", vp->v_type); 477 #endif 478 orig_resid = uio->uio_resid; 479 KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0")); 480 if (orig_resid == 0) 481 return (0); 482 KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0")); 483 fs = ip->i_fs; 484 if (uio->uio_offset < ip->i_size && 485 uio->uio_offset >= fs->fs_maxfilesize) 486 return (EOVERFLOW); 487 488 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { 489 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) 490 break; 491 lbn = lblkno(fs, uio->uio_offset); 492 nextlbn = lbn + 1; 493 494 /* 495 * size of buffer. The buffer representing the 496 * end of the file is rounded up to the size of 497 * the block type ( fragment or full block, 498 * depending ). 499 */ 500 size = blksize(fs, ip, lbn); 501 blkoffset = blkoff(fs, uio->uio_offset); 502 503 /* 504 * The amount we want to transfer in this iteration is 505 * one FS block less the amount of the data before 506 * our startpoint (duh!) 507 */ 508 xfersize = fs->fs_bsize - blkoffset; 509 510 /* 511 * But if we actually want less than the block, 512 * or the file doesn't have a whole block more of data, 513 * then use the lesser number. 514 */ 515 if (uio->uio_resid < xfersize) 516 xfersize = uio->uio_resid; 517 if (bytesinfile < xfersize) 518 xfersize = bytesinfile; 519 520 if (lblktosize(fs, nextlbn) >= ip->i_size) { 521 /* 522 * Don't do readahead if this is the end of the file. 523 */ 524 error = bread(vp, lbn, size, NOCRED, &bp); 525 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { 526 /* 527 * Otherwise if we are allowed to cluster, 528 * grab as much as we can. 529 * 530 * XXX This may not be a win if we are not 531 * doing sequential access. 532 */ 533 error = cluster_read(vp, ip->i_size, lbn, 534 size, NOCRED, blkoffset + uio->uio_resid, seqcount, &bp); 535 } else if (seqcount > 1) { 536 /* 537 * If we are NOT allowed to cluster, then 538 * if we appear to be acting sequentially, 539 * fire off a request for a readahead 540 * as well as a read. Note that the 4th and 5th 541 * arguments point to arrays of the size specified in 542 * the 6th argument. 543 */ 544 int nextsize = blksize(fs, ip, nextlbn); 545 error = breadn(vp, lbn, 546 size, &nextlbn, &nextsize, 1, NOCRED, &bp); 547 } else { 548 /* 549 * Failing all of the above, just read what the 550 * user asked for. Interestingly, the same as 551 * the first option above. 552 */ 553 error = bread(vp, lbn, size, NOCRED, &bp); 554 } 555 if (error) { 556 brelse(bp); 557 bp = NULL; 558 break; 559 } 560 561 /* 562 * If IO_DIRECT then set B_DIRECT for the buffer. This 563 * will cause us to attempt to release the buffer later on 564 * and will cause the buffer cache to attempt to free the 565 * underlying pages. 566 */ 567 if (ioflag & IO_DIRECT) 568 bp->b_flags |= B_DIRECT; 569 570 /* 571 * We should only get non-zero b_resid when an I/O error 572 * has occurred, which should cause us to break above. 573 * However, if the short read did not cause an error, 574 * then we want to ensure that we do not uiomove bad 575 * or uninitialized data. 576 */ 577 size -= bp->b_resid; 578 if (size < xfersize) { 579 if (size == 0) 580 break; 581 xfersize = size; 582 } 583 584 error = uiomove((char *)bp->b_data + blkoffset, 585 (int)xfersize, uio); 586 if (error) 587 break; 588 589 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 590 (LIST_EMPTY(&bp->b_dep))) { 591 /* 592 * If there are no dependencies, and it's VMIO, 593 * then we don't need the buf, mark it available 594 * for freeing. The VM has the data. 595 */ 596 bp->b_flags |= B_RELBUF; 597 brelse(bp); 598 } else { 599 /* 600 * Otherwise let whoever 601 * made the request take care of 602 * freeing it. We just queue 603 * it onto another list. 604 */ 605 bqrelse(bp); 606 } 607 } 608 609 /* 610 * This can only happen in the case of an error 611 * because the loop above resets bp to NULL on each iteration 612 * and on normal completion has not set a new value into it. 613 * so it must have come from a 'break' statement 614 */ 615 if (bp != NULL) { 616 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 617 (LIST_EMPTY(&bp->b_dep))) { 618 bp->b_flags |= B_RELBUF; 619 brelse(bp); 620 } else { 621 bqrelse(bp); 622 } 623 } 624 625 if ((error == 0 || uio->uio_resid != orig_resid) && 626 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0 && 627 (ip->i_flag & IN_ACCESS) == 0) { 628 VI_LOCK(vp); 629 ip->i_flag |= IN_ACCESS; 630 VI_UNLOCK(vp); 631 } 632 return (error); 633 } 634 635 /* 636 * Vnode op for writing. 637 */ 638 static int 639 ffs_write(ap) 640 struct vop_write_args /* { 641 struct vnode *a_vp; 642 struct uio *a_uio; 643 int a_ioflag; 644 struct ucred *a_cred; 645 } */ *ap; 646 { 647 struct vnode *vp; 648 struct uio *uio; 649 struct inode *ip; 650 struct fs *fs; 651 struct buf *bp; 652 ufs_lbn_t lbn; 653 off_t osize; 654 int seqcount; 655 int blkoffset, error, flags, ioflag, resid, size, xfersize; 656 657 vp = ap->a_vp; 658 uio = ap->a_uio; 659 ioflag = ap->a_ioflag; 660 if (ap->a_ioflag & IO_EXT) 661 #ifdef notyet 662 return (ffs_extwrite(vp, uio, ioflag, ap->a_cred)); 663 #else 664 panic("ffs_write+IO_EXT"); 665 #endif 666 667 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 668 ip = VTOI(vp); 669 670 #ifdef INVARIANTS 671 if (uio->uio_rw != UIO_WRITE) 672 panic("ffs_write: mode"); 673 #endif 674 675 switch (vp->v_type) { 676 case VREG: 677 if (ioflag & IO_APPEND) 678 uio->uio_offset = ip->i_size; 679 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) 680 return (EPERM); 681 /* FALLTHROUGH */ 682 case VLNK: 683 break; 684 case VDIR: 685 panic("ffs_write: dir write"); 686 break; 687 default: 688 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type, 689 (int)uio->uio_offset, 690 (int)uio->uio_resid 691 ); 692 } 693 694 KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0")); 695 KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0")); 696 fs = ip->i_fs; 697 if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) 698 return (EFBIG); 699 /* 700 * Maybe this should be above the vnode op call, but so long as 701 * file servers have no limits, I don't think it matters. 702 */ 703 if (vn_rlimit_fsize(vp, uio, uio->uio_td)) 704 return (EFBIG); 705 706 resid = uio->uio_resid; 707 osize = ip->i_size; 708 if (seqcount > BA_SEQMAX) 709 flags = BA_SEQMAX << BA_SEQSHIFT; 710 else 711 flags = seqcount << BA_SEQSHIFT; 712 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp)) 713 flags |= IO_SYNC; 714 715 for (error = 0; uio->uio_resid > 0;) { 716 lbn = lblkno(fs, uio->uio_offset); 717 blkoffset = blkoff(fs, uio->uio_offset); 718 xfersize = fs->fs_bsize - blkoffset; 719 if (uio->uio_resid < xfersize) 720 xfersize = uio->uio_resid; 721 if (uio->uio_offset + xfersize > ip->i_size) 722 vnode_pager_setsize(vp, uio->uio_offset + xfersize); 723 724 /* 725 * We must perform a read-before-write if the transfer size 726 * does not cover the entire buffer. 727 */ 728 if (fs->fs_bsize > xfersize) 729 flags |= BA_CLRBUF; 730 else 731 flags &= ~BA_CLRBUF; 732 /* XXX is uio->uio_offset the right thing here? */ 733 error = UFS_BALLOC(vp, uio->uio_offset, xfersize, 734 ap->a_cred, flags, &bp); 735 if (error != 0) { 736 vnode_pager_setsize(vp, ip->i_size); 737 break; 738 } 739 /* 740 * If the buffer is not valid we have to clear out any 741 * garbage data from the pages instantiated for the buffer. 742 * If we do not, a failed uiomove() during a write can leave 743 * the prior contents of the pages exposed to a userland 744 * mmap(). XXX deal with uiomove() errors a better way. 745 */ 746 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize) 747 vfs_bio_clrbuf(bp); 748 if (ioflag & IO_DIRECT) 749 bp->b_flags |= B_DIRECT; 750 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL)) 751 bp->b_flags |= B_NOCACHE; 752 753 if (uio->uio_offset + xfersize > ip->i_size) { 754 ip->i_size = uio->uio_offset + xfersize; 755 DIP_SET(ip, i_size, ip->i_size); 756 } 757 758 size = blksize(fs, ip, lbn) - bp->b_resid; 759 if (size < xfersize) 760 xfersize = size; 761 762 error = 763 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); 764 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 765 (LIST_EMPTY(&bp->b_dep))) { 766 bp->b_flags |= B_RELBUF; 767 } 768 769 /* 770 * If IO_SYNC each buffer is written synchronously. Otherwise 771 * if we have a severe page deficiency write the buffer 772 * asynchronously. Otherwise try to cluster, and if that 773 * doesn't do it then either do an async write (if O_DIRECT), 774 * or a delayed write (if not). 775 */ 776 if (ioflag & IO_SYNC) { 777 (void)bwrite(bp); 778 } else if (vm_page_count_severe() || 779 buf_dirty_count_severe() || 780 (ioflag & IO_ASYNC)) { 781 bp->b_flags |= B_CLUSTEROK; 782 bawrite(bp); 783 } else if (xfersize + blkoffset == fs->fs_bsize) { 784 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) { 785 bp->b_flags |= B_CLUSTEROK; 786 cluster_write(vp, bp, ip->i_size, seqcount); 787 } else { 788 bawrite(bp); 789 } 790 } else if (ioflag & IO_DIRECT) { 791 bp->b_flags |= B_CLUSTEROK; 792 bawrite(bp); 793 } else { 794 bp->b_flags |= B_CLUSTEROK; 795 bdwrite(bp); 796 } 797 if (error || xfersize == 0) 798 break; 799 ip->i_flag |= IN_CHANGE | IN_UPDATE; 800 } 801 /* 802 * If we successfully wrote any data, and we are not the superuser 803 * we clear the setuid and setgid bits as a precaution against 804 * tampering. 805 */ 806 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && 807 ap->a_cred) { 808 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) { 809 ip->i_mode &= ~(ISUID | ISGID); 810 DIP_SET(ip, i_mode, ip->i_mode); 811 } 812 } 813 if (error) { 814 if (ioflag & IO_UNIT) { 815 (void)ffs_truncate(vp, osize, 816 IO_NORMAL | (ioflag & IO_SYNC), 817 ap->a_cred, uio->uio_td); 818 uio->uio_offset -= resid - uio->uio_resid; 819 uio->uio_resid = resid; 820 } 821 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) 822 error = ffs_update(vp, 1); 823 return (error); 824 } 825 826 /* 827 * get page routine 828 */ 829 static int 830 ffs_getpages(ap) 831 struct vop_getpages_args *ap; 832 { 833 int i; 834 vm_page_t mreq; 835 int pcount; 836 837 pcount = round_page(ap->a_count) / PAGE_SIZE; 838 mreq = ap->a_m[ap->a_reqpage]; 839 840 /* 841 * if ANY DEV_BSIZE blocks are valid on a large filesystem block, 842 * then the entire page is valid. Since the page may be mapped, 843 * user programs might reference data beyond the actual end of file 844 * occuring within the page. We have to zero that data. 845 */ 846 VM_OBJECT_LOCK(mreq->object); 847 if (mreq->valid) { 848 if (mreq->valid != VM_PAGE_BITS_ALL) 849 vm_page_zero_invalid(mreq, TRUE); 850 for (i = 0; i < pcount; i++) { 851 if (i != ap->a_reqpage) { 852 vm_page_lock(ap->a_m[i]); 853 vm_page_free(ap->a_m[i]); 854 vm_page_unlock(ap->a_m[i]); 855 } 856 } 857 VM_OBJECT_UNLOCK(mreq->object); 858 return VM_PAGER_OK; 859 } 860 VM_OBJECT_UNLOCK(mreq->object); 861 862 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 863 ap->a_count, 864 ap->a_reqpage); 865 } 866 867 868 /* 869 * Extended attribute area reading. 870 */ 871 static int 872 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag) 873 { 874 struct inode *ip; 875 struct ufs2_dinode *dp; 876 struct fs *fs; 877 struct buf *bp; 878 ufs_lbn_t lbn, nextlbn; 879 off_t bytesinfile; 880 long size, xfersize, blkoffset; 881 int error, orig_resid; 882 883 ip = VTOI(vp); 884 fs = ip->i_fs; 885 dp = ip->i_din2; 886 887 #ifdef INVARIANTS 888 if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC) 889 panic("ffs_extread: mode"); 890 891 #endif 892 orig_resid = uio->uio_resid; 893 KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0")); 894 if (orig_resid == 0) 895 return (0); 896 KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0")); 897 898 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { 899 if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0) 900 break; 901 lbn = lblkno(fs, uio->uio_offset); 902 nextlbn = lbn + 1; 903 904 /* 905 * size of buffer. The buffer representing the 906 * end of the file is rounded up to the size of 907 * the block type ( fragment or full block, 908 * depending ). 909 */ 910 size = sblksize(fs, dp->di_extsize, lbn); 911 blkoffset = blkoff(fs, uio->uio_offset); 912 913 /* 914 * The amount we want to transfer in this iteration is 915 * one FS block less the amount of the data before 916 * our startpoint (duh!) 917 */ 918 xfersize = fs->fs_bsize - blkoffset; 919 920 /* 921 * But if we actually want less than the block, 922 * or the file doesn't have a whole block more of data, 923 * then use the lesser number. 924 */ 925 if (uio->uio_resid < xfersize) 926 xfersize = uio->uio_resid; 927 if (bytesinfile < xfersize) 928 xfersize = bytesinfile; 929 930 if (lblktosize(fs, nextlbn) >= dp->di_extsize) { 931 /* 932 * Don't do readahead if this is the end of the info. 933 */ 934 error = bread(vp, -1 - lbn, size, NOCRED, &bp); 935 } else { 936 /* 937 * If we have a second block, then 938 * fire off a request for a readahead 939 * as well as a read. Note that the 4th and 5th 940 * arguments point to arrays of the size specified in 941 * the 6th argument. 942 */ 943 int nextsize = sblksize(fs, dp->di_extsize, nextlbn); 944 945 nextlbn = -1 - nextlbn; 946 error = breadn(vp, -1 - lbn, 947 size, &nextlbn, &nextsize, 1, NOCRED, &bp); 948 } 949 if (error) { 950 brelse(bp); 951 bp = NULL; 952 break; 953 } 954 955 /* 956 * If IO_DIRECT then set B_DIRECT for the buffer. This 957 * will cause us to attempt to release the buffer later on 958 * and will cause the buffer cache to attempt to free the 959 * underlying pages. 960 */ 961 if (ioflag & IO_DIRECT) 962 bp->b_flags |= B_DIRECT; 963 964 /* 965 * We should only get non-zero b_resid when an I/O error 966 * has occurred, which should cause us to break above. 967 * However, if the short read did not cause an error, 968 * then we want to ensure that we do not uiomove bad 969 * or uninitialized data. 970 */ 971 size -= bp->b_resid; 972 if (size < xfersize) { 973 if (size == 0) 974 break; 975 xfersize = size; 976 } 977 978 error = uiomove((char *)bp->b_data + blkoffset, 979 (int)xfersize, uio); 980 if (error) 981 break; 982 983 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 984 (LIST_EMPTY(&bp->b_dep))) { 985 /* 986 * If there are no dependencies, and it's VMIO, 987 * then we don't need the buf, mark it available 988 * for freeing. The VM has the data. 989 */ 990 bp->b_flags |= B_RELBUF; 991 brelse(bp); 992 } else { 993 /* 994 * Otherwise let whoever 995 * made the request take care of 996 * freeing it. We just queue 997 * it onto another list. 998 */ 999 bqrelse(bp); 1000 } 1001 } 1002 1003 /* 1004 * This can only happen in the case of an error 1005 * because the loop above resets bp to NULL on each iteration 1006 * and on normal completion has not set a new value into it. 1007 * so it must have come from a 'break' statement 1008 */ 1009 if (bp != NULL) { 1010 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 1011 (LIST_EMPTY(&bp->b_dep))) { 1012 bp->b_flags |= B_RELBUF; 1013 brelse(bp); 1014 } else { 1015 bqrelse(bp); 1016 } 1017 } 1018 return (error); 1019 } 1020 1021 /* 1022 * Extended attribute area writing. 1023 */ 1024 static int 1025 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred) 1026 { 1027 struct inode *ip; 1028 struct ufs2_dinode *dp; 1029 struct fs *fs; 1030 struct buf *bp; 1031 ufs_lbn_t lbn; 1032 off_t osize; 1033 int blkoffset, error, flags, resid, size, xfersize; 1034 1035 ip = VTOI(vp); 1036 fs = ip->i_fs; 1037 dp = ip->i_din2; 1038 1039 #ifdef INVARIANTS 1040 if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC) 1041 panic("ffs_extwrite: mode"); 1042 #endif 1043 1044 if (ioflag & IO_APPEND) 1045 uio->uio_offset = dp->di_extsize; 1046 KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0")); 1047 KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0")); 1048 if ((uoff_t)uio->uio_offset + uio->uio_resid > NXADDR * fs->fs_bsize) 1049 return (EFBIG); 1050 1051 resid = uio->uio_resid; 1052 osize = dp->di_extsize; 1053 flags = IO_EXT; 1054 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp)) 1055 flags |= IO_SYNC; 1056 1057 for (error = 0; uio->uio_resid > 0;) { 1058 lbn = lblkno(fs, uio->uio_offset); 1059 blkoffset = blkoff(fs, uio->uio_offset); 1060 xfersize = fs->fs_bsize - blkoffset; 1061 if (uio->uio_resid < xfersize) 1062 xfersize = uio->uio_resid; 1063 1064 /* 1065 * We must perform a read-before-write if the transfer size 1066 * does not cover the entire buffer. 1067 */ 1068 if (fs->fs_bsize > xfersize) 1069 flags |= BA_CLRBUF; 1070 else 1071 flags &= ~BA_CLRBUF; 1072 error = UFS_BALLOC(vp, uio->uio_offset, xfersize, 1073 ucred, flags, &bp); 1074 if (error != 0) 1075 break; 1076 /* 1077 * If the buffer is not valid we have to clear out any 1078 * garbage data from the pages instantiated for the buffer. 1079 * If we do not, a failed uiomove() during a write can leave 1080 * the prior contents of the pages exposed to a userland 1081 * mmap(). XXX deal with uiomove() errors a better way. 1082 */ 1083 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize) 1084 vfs_bio_clrbuf(bp); 1085 if (ioflag & IO_DIRECT) 1086 bp->b_flags |= B_DIRECT; 1087 1088 if (uio->uio_offset + xfersize > dp->di_extsize) 1089 dp->di_extsize = uio->uio_offset + xfersize; 1090 1091 size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid; 1092 if (size < xfersize) 1093 xfersize = size; 1094 1095 error = 1096 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); 1097 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 1098 (LIST_EMPTY(&bp->b_dep))) { 1099 bp->b_flags |= B_RELBUF; 1100 } 1101 1102 /* 1103 * If IO_SYNC each buffer is written synchronously. Otherwise 1104 * if we have a severe page deficiency write the buffer 1105 * asynchronously. Otherwise try to cluster, and if that 1106 * doesn't do it then either do an async write (if O_DIRECT), 1107 * or a delayed write (if not). 1108 */ 1109 if (ioflag & IO_SYNC) { 1110 (void)bwrite(bp); 1111 } else if (vm_page_count_severe() || 1112 buf_dirty_count_severe() || 1113 xfersize + blkoffset == fs->fs_bsize || 1114 (ioflag & (IO_ASYNC | IO_DIRECT))) 1115 bawrite(bp); 1116 else 1117 bdwrite(bp); 1118 if (error || xfersize == 0) 1119 break; 1120 ip->i_flag |= IN_CHANGE; 1121 } 1122 /* 1123 * If we successfully wrote any data, and we are not the superuser 1124 * we clear the setuid and setgid bits as a precaution against 1125 * tampering. 1126 */ 1127 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) { 1128 if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID, 0)) { 1129 ip->i_mode &= ~(ISUID | ISGID); 1130 dp->di_mode = ip->i_mode; 1131 } 1132 } 1133 if (error) { 1134 if (ioflag & IO_UNIT) { 1135 (void)ffs_truncate(vp, osize, 1136 IO_EXT | (ioflag&IO_SYNC), ucred, uio->uio_td); 1137 uio->uio_offset -= resid - uio->uio_resid; 1138 uio->uio_resid = resid; 1139 } 1140 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) 1141 error = ffs_update(vp, 1); 1142 return (error); 1143 } 1144 1145 1146 /* 1147 * Vnode operating to retrieve a named extended attribute. 1148 * 1149 * Locate a particular EA (nspace:name) in the area (ptr:length), and return 1150 * the length of the EA, and possibly the pointer to the entry and to the data. 1151 */ 1152 static int 1153 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name, u_char **eap, u_char **eac) 1154 { 1155 u_char *p, *pe, *pn, *p0; 1156 int eapad1, eapad2, ealength, ealen, nlen; 1157 uint32_t ul; 1158 1159 pe = ptr + length; 1160 nlen = strlen(name); 1161 1162 for (p = ptr; p < pe; p = pn) { 1163 p0 = p; 1164 bcopy(p, &ul, sizeof(ul)); 1165 pn = p + ul; 1166 /* make sure this entry is complete */ 1167 if (pn > pe) 1168 break; 1169 p += sizeof(uint32_t); 1170 if (*p != nspace) 1171 continue; 1172 p++; 1173 eapad2 = *p++; 1174 if (*p != nlen) 1175 continue; 1176 p++; 1177 if (bcmp(p, name, nlen)) 1178 continue; 1179 ealength = sizeof(uint32_t) + 3 + nlen; 1180 eapad1 = 8 - (ealength % 8); 1181 if (eapad1 == 8) 1182 eapad1 = 0; 1183 ealength += eapad1; 1184 ealen = ul - ealength - eapad2; 1185 p += nlen + eapad1; 1186 if (eap != NULL) 1187 *eap = p0; 1188 if (eac != NULL) 1189 *eac = p; 1190 return (ealen); 1191 } 1192 return(-1); 1193 } 1194 1195 static int 1196 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra) 1197 { 1198 struct inode *ip; 1199 struct ufs2_dinode *dp; 1200 struct fs *fs; 1201 struct uio luio; 1202 struct iovec liovec; 1203 int easize, error; 1204 u_char *eae; 1205 1206 ip = VTOI(vp); 1207 fs = ip->i_fs; 1208 dp = ip->i_din2; 1209 easize = dp->di_extsize; 1210 if ((uoff_t)easize + extra > NXADDR * fs->fs_bsize) 1211 return (EFBIG); 1212 1213 eae = malloc(easize + extra, M_TEMP, M_WAITOK); 1214 1215 liovec.iov_base = eae; 1216 liovec.iov_len = easize; 1217 luio.uio_iov = &liovec; 1218 luio.uio_iovcnt = 1; 1219 luio.uio_offset = 0; 1220 luio.uio_resid = easize; 1221 luio.uio_segflg = UIO_SYSSPACE; 1222 luio.uio_rw = UIO_READ; 1223 luio.uio_td = td; 1224 1225 error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC); 1226 if (error) { 1227 free(eae, M_TEMP); 1228 return(error); 1229 } 1230 *p = eae; 1231 return (0); 1232 } 1233 1234 static void 1235 ffs_lock_ea(struct vnode *vp) 1236 { 1237 struct inode *ip; 1238 1239 ip = VTOI(vp); 1240 VI_LOCK(vp); 1241 while (ip->i_flag & IN_EA_LOCKED) { 1242 ip->i_flag |= IN_EA_LOCKWAIT; 1243 msleep(&ip->i_ea_refs, &vp->v_interlock, PINOD + 2, "ufs_ea", 1244 0); 1245 } 1246 ip->i_flag |= IN_EA_LOCKED; 1247 VI_UNLOCK(vp); 1248 } 1249 1250 static void 1251 ffs_unlock_ea(struct vnode *vp) 1252 { 1253 struct inode *ip; 1254 1255 ip = VTOI(vp); 1256 VI_LOCK(vp); 1257 if (ip->i_flag & IN_EA_LOCKWAIT) 1258 wakeup(&ip->i_ea_refs); 1259 ip->i_flag &= ~(IN_EA_LOCKED | IN_EA_LOCKWAIT); 1260 VI_UNLOCK(vp); 1261 } 1262 1263 static int 1264 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td) 1265 { 1266 struct inode *ip; 1267 struct ufs2_dinode *dp; 1268 int error; 1269 1270 ip = VTOI(vp); 1271 1272 ffs_lock_ea(vp); 1273 if (ip->i_ea_area != NULL) { 1274 ip->i_ea_refs++; 1275 ffs_unlock_ea(vp); 1276 return (0); 1277 } 1278 dp = ip->i_din2; 1279 error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0); 1280 if (error) { 1281 ffs_unlock_ea(vp); 1282 return (error); 1283 } 1284 ip->i_ea_len = dp->di_extsize; 1285 ip->i_ea_error = 0; 1286 ip->i_ea_refs++; 1287 ffs_unlock_ea(vp); 1288 return (0); 1289 } 1290 1291 /* 1292 * Vnode extattr transaction commit/abort 1293 */ 1294 static int 1295 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td) 1296 { 1297 struct inode *ip; 1298 struct uio luio; 1299 struct iovec liovec; 1300 int error; 1301 struct ufs2_dinode *dp; 1302 1303 ip = VTOI(vp); 1304 1305 ffs_lock_ea(vp); 1306 if (ip->i_ea_area == NULL) { 1307 ffs_unlock_ea(vp); 1308 return (EINVAL); 1309 } 1310 dp = ip->i_din2; 1311 error = ip->i_ea_error; 1312 if (commit && error == 0) { 1313 ASSERT_VOP_ELOCKED(vp, "ffs_close_ea commit"); 1314 if (cred == NOCRED) 1315 cred = vp->v_mount->mnt_cred; 1316 liovec.iov_base = ip->i_ea_area; 1317 liovec.iov_len = ip->i_ea_len; 1318 luio.uio_iov = &liovec; 1319 luio.uio_iovcnt = 1; 1320 luio.uio_offset = 0; 1321 luio.uio_resid = ip->i_ea_len; 1322 luio.uio_segflg = UIO_SYSSPACE; 1323 luio.uio_rw = UIO_WRITE; 1324 luio.uio_td = td; 1325 /* XXX: I'm not happy about truncating to zero size */ 1326 if (ip->i_ea_len < dp->di_extsize) 1327 error = ffs_truncate(vp, 0, IO_EXT, cred, td); 1328 error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred); 1329 } 1330 if (--ip->i_ea_refs == 0) { 1331 free(ip->i_ea_area, M_TEMP); 1332 ip->i_ea_area = NULL; 1333 ip->i_ea_len = 0; 1334 ip->i_ea_error = 0; 1335 } 1336 ffs_unlock_ea(vp); 1337 return (error); 1338 } 1339 1340 /* 1341 * Vnode extattr strategy routine for fifos. 1342 * 1343 * We need to check for a read or write of the external attributes. 1344 * Otherwise we just fall through and do the usual thing. 1345 */ 1346 static int 1347 ffsext_strategy(struct vop_strategy_args *ap) 1348 /* 1349 struct vop_strategy_args { 1350 struct vnodeop_desc *a_desc; 1351 struct vnode *a_vp; 1352 struct buf *a_bp; 1353 }; 1354 */ 1355 { 1356 struct vnode *vp; 1357 daddr_t lbn; 1358 1359 vp = ap->a_vp; 1360 lbn = ap->a_bp->b_lblkno; 1361 if (VTOI(vp)->i_fs->fs_magic == FS_UFS2_MAGIC && 1362 lbn < 0 && lbn >= -NXADDR) 1363 return (VOP_STRATEGY_APV(&ufs_vnodeops, ap)); 1364 if (vp->v_type == VFIFO) 1365 return (VOP_STRATEGY_APV(&ufs_fifoops, ap)); 1366 panic("spec nodes went here"); 1367 } 1368 1369 /* 1370 * Vnode extattr transaction commit/abort 1371 */ 1372 static int 1373 ffs_openextattr(struct vop_openextattr_args *ap) 1374 /* 1375 struct vop_openextattr_args { 1376 struct vnodeop_desc *a_desc; 1377 struct vnode *a_vp; 1378 IN struct ucred *a_cred; 1379 IN struct thread *a_td; 1380 }; 1381 */ 1382 { 1383 struct inode *ip; 1384 struct fs *fs; 1385 1386 ip = VTOI(ap->a_vp); 1387 fs = ip->i_fs; 1388 1389 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1390 return (EOPNOTSUPP); 1391 1392 return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td)); 1393 } 1394 1395 1396 /* 1397 * Vnode extattr transaction commit/abort 1398 */ 1399 static int 1400 ffs_closeextattr(struct vop_closeextattr_args *ap) 1401 /* 1402 struct vop_closeextattr_args { 1403 struct vnodeop_desc *a_desc; 1404 struct vnode *a_vp; 1405 int a_commit; 1406 IN struct ucred *a_cred; 1407 IN struct thread *a_td; 1408 }; 1409 */ 1410 { 1411 struct inode *ip; 1412 struct fs *fs; 1413 1414 ip = VTOI(ap->a_vp); 1415 fs = ip->i_fs; 1416 1417 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1418 return (EOPNOTSUPP); 1419 1420 if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) 1421 return (EROFS); 1422 1423 return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td)); 1424 } 1425 1426 /* 1427 * Vnode operation to remove a named attribute. 1428 */ 1429 static int 1430 ffs_deleteextattr(struct vop_deleteextattr_args *ap) 1431 /* 1432 vop_deleteextattr { 1433 IN struct vnode *a_vp; 1434 IN int a_attrnamespace; 1435 IN const char *a_name; 1436 IN struct ucred *a_cred; 1437 IN struct thread *a_td; 1438 }; 1439 */ 1440 { 1441 struct inode *ip; 1442 struct fs *fs; 1443 uint32_t ealength, ul; 1444 int ealen, olen, eapad1, eapad2, error, i, easize; 1445 u_char *eae, *p; 1446 1447 ip = VTOI(ap->a_vp); 1448 fs = ip->i_fs; 1449 1450 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1451 return (EOPNOTSUPP); 1452 1453 if (strlen(ap->a_name) == 0) 1454 return (EINVAL); 1455 1456 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) 1457 return (EROFS); 1458 1459 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1460 ap->a_cred, ap->a_td, VWRITE); 1461 if (error) { 1462 1463 /* 1464 * ffs_lock_ea is not needed there, because the vnode 1465 * must be exclusively locked. 1466 */ 1467 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1468 ip->i_ea_error = error; 1469 return (error); 1470 } 1471 1472 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1473 if (error) 1474 return (error); 1475 1476 ealength = eapad1 = ealen = eapad2 = 0; 1477 1478 eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK); 1479 bcopy(ip->i_ea_area, eae, ip->i_ea_len); 1480 easize = ip->i_ea_len; 1481 1482 olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name, 1483 &p, NULL); 1484 if (olen == -1) { 1485 /* delete but nonexistent */ 1486 free(eae, M_TEMP); 1487 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1488 return(ENOATTR); 1489 } 1490 bcopy(p, &ul, sizeof ul); 1491 i = p - eae + ul; 1492 if (ul != ealength) { 1493 bcopy(p + ul, p + ealength, easize - i); 1494 easize += (ealength - ul); 1495 } 1496 if (easize > NXADDR * fs->fs_bsize) { 1497 free(eae, M_TEMP); 1498 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1499 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1500 ip->i_ea_error = ENOSPC; 1501 return(ENOSPC); 1502 } 1503 p = ip->i_ea_area; 1504 ip->i_ea_area = eae; 1505 ip->i_ea_len = easize; 1506 free(p, M_TEMP); 1507 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td); 1508 return(error); 1509 } 1510 1511 /* 1512 * Vnode operation to retrieve a named extended attribute. 1513 */ 1514 static int 1515 ffs_getextattr(struct vop_getextattr_args *ap) 1516 /* 1517 vop_getextattr { 1518 IN struct vnode *a_vp; 1519 IN int a_attrnamespace; 1520 IN const char *a_name; 1521 INOUT struct uio *a_uio; 1522 OUT size_t *a_size; 1523 IN struct ucred *a_cred; 1524 IN struct thread *a_td; 1525 }; 1526 */ 1527 { 1528 struct inode *ip; 1529 struct fs *fs; 1530 u_char *eae, *p; 1531 unsigned easize; 1532 int error, ealen; 1533 1534 ip = VTOI(ap->a_vp); 1535 fs = ip->i_fs; 1536 1537 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1538 return (EOPNOTSUPP); 1539 1540 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1541 ap->a_cred, ap->a_td, VREAD); 1542 if (error) 1543 return (error); 1544 1545 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1546 if (error) 1547 return (error); 1548 1549 eae = ip->i_ea_area; 1550 easize = ip->i_ea_len; 1551 1552 ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name, 1553 NULL, &p); 1554 if (ealen >= 0) { 1555 error = 0; 1556 if (ap->a_size != NULL) 1557 *ap->a_size = ealen; 1558 else if (ap->a_uio != NULL) 1559 error = uiomove(p, ealen, ap->a_uio); 1560 } else 1561 error = ENOATTR; 1562 1563 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1564 return(error); 1565 } 1566 1567 /* 1568 * Vnode operation to retrieve extended attributes on a vnode. 1569 */ 1570 static int 1571 ffs_listextattr(struct vop_listextattr_args *ap) 1572 /* 1573 vop_listextattr { 1574 IN struct vnode *a_vp; 1575 IN int a_attrnamespace; 1576 INOUT struct uio *a_uio; 1577 OUT size_t *a_size; 1578 IN struct ucred *a_cred; 1579 IN struct thread *a_td; 1580 }; 1581 */ 1582 { 1583 struct inode *ip; 1584 struct fs *fs; 1585 u_char *eae, *p, *pe, *pn; 1586 unsigned easize; 1587 uint32_t ul; 1588 int error, ealen; 1589 1590 ip = VTOI(ap->a_vp); 1591 fs = ip->i_fs; 1592 1593 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1594 return (EOPNOTSUPP); 1595 1596 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1597 ap->a_cred, ap->a_td, VREAD); 1598 if (error) 1599 return (error); 1600 1601 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1602 if (error) 1603 return (error); 1604 eae = ip->i_ea_area; 1605 easize = ip->i_ea_len; 1606 1607 error = 0; 1608 if (ap->a_size != NULL) 1609 *ap->a_size = 0; 1610 pe = eae + easize; 1611 for(p = eae; error == 0 && p < pe; p = pn) { 1612 bcopy(p, &ul, sizeof(ul)); 1613 pn = p + ul; 1614 if (pn > pe) 1615 break; 1616 p += sizeof(ul); 1617 if (*p++ != ap->a_attrnamespace) 1618 continue; 1619 p++; /* pad2 */ 1620 ealen = *p; 1621 if (ap->a_size != NULL) { 1622 *ap->a_size += ealen + 1; 1623 } else if (ap->a_uio != NULL) { 1624 error = uiomove(p, ealen + 1, ap->a_uio); 1625 } 1626 } 1627 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1628 return(error); 1629 } 1630 1631 /* 1632 * Vnode operation to set a named attribute. 1633 */ 1634 static int 1635 ffs_setextattr(struct vop_setextattr_args *ap) 1636 /* 1637 vop_setextattr { 1638 IN struct vnode *a_vp; 1639 IN int a_attrnamespace; 1640 IN const char *a_name; 1641 INOUT struct uio *a_uio; 1642 IN struct ucred *a_cred; 1643 IN struct thread *a_td; 1644 }; 1645 */ 1646 { 1647 struct inode *ip; 1648 struct fs *fs; 1649 uint32_t ealength, ul; 1650 int ealen, olen, eapad1, eapad2, error, i, easize; 1651 u_char *eae, *p; 1652 1653 ip = VTOI(ap->a_vp); 1654 fs = ip->i_fs; 1655 1656 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1657 return (EOPNOTSUPP); 1658 1659 if (strlen(ap->a_name) == 0) 1660 return (EINVAL); 1661 1662 /* XXX Now unsupported API to delete EAs using NULL uio. */ 1663 if (ap->a_uio == NULL) 1664 return (EOPNOTSUPP); 1665 1666 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) 1667 return (EROFS); 1668 1669 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1670 ap->a_cred, ap->a_td, VWRITE); 1671 if (error) { 1672 1673 /* 1674 * ffs_lock_ea is not needed there, because the vnode 1675 * must be exclusively locked. 1676 */ 1677 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1678 ip->i_ea_error = error; 1679 return (error); 1680 } 1681 1682 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1683 if (error) 1684 return (error); 1685 1686 ealen = ap->a_uio->uio_resid; 1687 ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name); 1688 eapad1 = 8 - (ealength % 8); 1689 if (eapad1 == 8) 1690 eapad1 = 0; 1691 eapad2 = 8 - (ealen % 8); 1692 if (eapad2 == 8) 1693 eapad2 = 0; 1694 ealength += eapad1 + ealen + eapad2; 1695 1696 eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK); 1697 bcopy(ip->i_ea_area, eae, ip->i_ea_len); 1698 easize = ip->i_ea_len; 1699 1700 olen = ffs_findextattr(eae, easize, 1701 ap->a_attrnamespace, ap->a_name, &p, NULL); 1702 if (olen == -1) { 1703 /* new, append at end */ 1704 p = eae + easize; 1705 easize += ealength; 1706 } else { 1707 bcopy(p, &ul, sizeof ul); 1708 i = p - eae + ul; 1709 if (ul != ealength) { 1710 bcopy(p + ul, p + ealength, easize - i); 1711 easize += (ealength - ul); 1712 } 1713 } 1714 if (easize > NXADDR * fs->fs_bsize) { 1715 free(eae, M_TEMP); 1716 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1717 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1718 ip->i_ea_error = ENOSPC; 1719 return(ENOSPC); 1720 } 1721 bcopy(&ealength, p, sizeof(ealength)); 1722 p += sizeof(ealength); 1723 *p++ = ap->a_attrnamespace; 1724 *p++ = eapad2; 1725 *p++ = strlen(ap->a_name); 1726 strcpy(p, ap->a_name); 1727 p += strlen(ap->a_name); 1728 bzero(p, eapad1); 1729 p += eapad1; 1730 error = uiomove(p, ealen, ap->a_uio); 1731 if (error) { 1732 free(eae, M_TEMP); 1733 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1734 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1735 ip->i_ea_error = error; 1736 return(error); 1737 } 1738 p += ealen; 1739 bzero(p, eapad2); 1740 1741 p = ip->i_ea_area; 1742 ip->i_ea_area = eae; 1743 ip->i_ea_len = easize; 1744 free(p, M_TEMP); 1745 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td); 1746 return(error); 1747 } 1748 1749 /* 1750 * Vnode pointer to File handle 1751 */ 1752 static int 1753 ffs_vptofh(struct vop_vptofh_args *ap) 1754 /* 1755 vop_vptofh { 1756 IN struct vnode *a_vp; 1757 IN struct fid *a_fhp; 1758 }; 1759 */ 1760 { 1761 struct inode *ip; 1762 struct ufid *ufhp; 1763 1764 ip = VTOI(ap->a_vp); 1765 ufhp = (struct ufid *)ap->a_fhp; 1766 ufhp->ufid_len = sizeof(struct ufid); 1767 ufhp->ufid_ino = ip->i_number; 1768 ufhp->ufid_gen = ip->i_gen; 1769 return (0); 1770 } 1771