1 /*- 2 * Copyright (c) 2002, 2003 Networks Associates Technology, Inc. 3 * All rights reserved. 4 * 5 * This software was developed for the FreeBSD Project by Marshall 6 * Kirk McKusick and Network Associates Laboratories, the Security 7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR 8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 9 * research program 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * Copyright (c) 1982, 1986, 1989, 1993 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 4. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * from: @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95 60 * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ... 61 * @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95 62 */ 63 64 #include <sys/cdefs.h> 65 __FBSDID("$FreeBSD$"); 66 67 #include <sys/param.h> 68 #include <sys/bio.h> 69 #include <sys/systm.h> 70 #include <sys/buf.h> 71 #include <sys/conf.h> 72 #include <sys/extattr.h> 73 #include <sys/kernel.h> 74 #include <sys/limits.h> 75 #include <sys/malloc.h> 76 #include <sys/mount.h> 77 #include <sys/priv.h> 78 #include <sys/proc.h> 79 #include <sys/resourcevar.h> 80 #include <sys/signalvar.h> 81 #include <sys/stat.h> 82 #include <sys/vmmeter.h> 83 #include <sys/vnode.h> 84 85 #include <vm/vm.h> 86 #include <vm/vm_extern.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_page.h> 89 #include <vm/vm_pager.h> 90 #include <vm/vnode_pager.h> 91 92 #include <ufs/ufs/extattr.h> 93 #include <ufs/ufs/quota.h> 94 #include <ufs/ufs/inode.h> 95 #include <ufs/ufs/ufs_extern.h> 96 #include <ufs/ufs/ufsmount.h> 97 98 #include <ufs/ffs/fs.h> 99 #include <ufs/ffs/ffs_extern.h> 100 #include "opt_directio.h" 101 #include "opt_ffs.h" 102 103 #ifdef DIRECTIO 104 extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone); 105 #endif 106 static vop_fsync_t ffs_fsync; 107 static vop_lock1_t ffs_lock; 108 static vop_getpages_t ffs_getpages; 109 static vop_read_t ffs_read; 110 static vop_write_t ffs_write; 111 static int ffs_extread(struct vnode *vp, struct uio *uio, int ioflag); 112 static int ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, 113 struct ucred *cred); 114 static vop_strategy_t ffsext_strategy; 115 static vop_closeextattr_t ffs_closeextattr; 116 static vop_deleteextattr_t ffs_deleteextattr; 117 static vop_getextattr_t ffs_getextattr; 118 static vop_listextattr_t ffs_listextattr; 119 static vop_openextattr_t ffs_openextattr; 120 static vop_setextattr_t ffs_setextattr; 121 static vop_vptofh_t ffs_vptofh; 122 123 124 /* Global vfs data structures for ufs. */ 125 struct vop_vector ffs_vnodeops1 = { 126 .vop_default = &ufs_vnodeops, 127 .vop_fsync = ffs_fsync, 128 .vop_getpages = ffs_getpages, 129 .vop_lock1 = ffs_lock, 130 .vop_read = ffs_read, 131 .vop_reallocblks = ffs_reallocblks, 132 .vop_write = ffs_write, 133 .vop_vptofh = ffs_vptofh, 134 }; 135 136 struct vop_vector ffs_fifoops1 = { 137 .vop_default = &ufs_fifoops, 138 .vop_fsync = ffs_fsync, 139 .vop_reallocblks = ffs_reallocblks, /* XXX: really ??? */ 140 .vop_vptofh = ffs_vptofh, 141 }; 142 143 /* Global vfs data structures for ufs. */ 144 struct vop_vector ffs_vnodeops2 = { 145 .vop_default = &ufs_vnodeops, 146 .vop_fsync = ffs_fsync, 147 .vop_getpages = ffs_getpages, 148 .vop_lock1 = ffs_lock, 149 .vop_read = ffs_read, 150 .vop_reallocblks = ffs_reallocblks, 151 .vop_write = ffs_write, 152 .vop_closeextattr = ffs_closeextattr, 153 .vop_deleteextattr = ffs_deleteextattr, 154 .vop_getextattr = ffs_getextattr, 155 .vop_listextattr = ffs_listextattr, 156 .vop_openextattr = ffs_openextattr, 157 .vop_setextattr = ffs_setextattr, 158 .vop_vptofh = ffs_vptofh, 159 }; 160 161 struct vop_vector ffs_fifoops2 = { 162 .vop_default = &ufs_fifoops, 163 .vop_fsync = ffs_fsync, 164 .vop_lock1 = ffs_lock, 165 .vop_reallocblks = ffs_reallocblks, 166 .vop_strategy = ffsext_strategy, 167 .vop_closeextattr = ffs_closeextattr, 168 .vop_deleteextattr = ffs_deleteextattr, 169 .vop_getextattr = ffs_getextattr, 170 .vop_listextattr = ffs_listextattr, 171 .vop_openextattr = ffs_openextattr, 172 .vop_setextattr = ffs_setextattr, 173 .vop_vptofh = ffs_vptofh, 174 }; 175 176 /* 177 * Synch an open file. 178 */ 179 /* ARGSUSED */ 180 static int 181 ffs_fsync(struct vop_fsync_args *ap) 182 { 183 struct vnode *vp; 184 struct bufobj *bo; 185 int error; 186 187 vp = ap->a_vp; 188 bo = &vp->v_bufobj; 189 retry: 190 error = ffs_syncvnode(vp, ap->a_waitfor); 191 if (error) 192 return (error); 193 if (ap->a_waitfor == MNT_WAIT && 194 (vp->v_mount->mnt_flag & MNT_SOFTDEP)) { 195 error = softdep_fsync(vp); 196 if (error) 197 return (error); 198 199 /* 200 * The softdep_fsync() function may drop vp lock, 201 * allowing for dirty buffers to reappear on the 202 * bo_dirty list. Recheck and resync as needed. 203 */ 204 BO_LOCK(bo); 205 if (vp->v_type == VREG && (bo->bo_numoutput > 0 || 206 bo->bo_dirty.bv_cnt > 0)) { 207 BO_UNLOCK(bo); 208 goto retry; 209 } 210 BO_UNLOCK(bo); 211 } 212 return (0); 213 } 214 215 int 216 ffs_syncvnode(struct vnode *vp, int waitfor) 217 { 218 struct inode *ip = VTOI(vp); 219 struct bufobj *bo; 220 struct buf *bp; 221 struct buf *nbp; 222 int s, error, wait, passes, skipmeta; 223 ufs_lbn_t lbn; 224 225 wait = (waitfor == MNT_WAIT); 226 lbn = lblkno(ip->i_fs, (ip->i_size + ip->i_fs->fs_bsize - 1)); 227 bo = &vp->v_bufobj; 228 229 /* 230 * Flush all dirty buffers associated with a vnode. 231 */ 232 passes = NIADDR + 1; 233 skipmeta = 0; 234 if (wait) 235 skipmeta = 1; 236 s = splbio(); 237 BO_LOCK(bo); 238 loop: 239 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 240 bp->b_vflags &= ~BV_SCANNED; 241 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 242 /* 243 * Reasons to skip this buffer: it has already been considered 244 * on this pass, this pass is the first time through on a 245 * synchronous flush request and the buffer being considered 246 * is metadata, the buffer has dependencies that will cause 247 * it to be redirtied and it has not already been deferred, 248 * or it is already being written. 249 */ 250 if ((bp->b_vflags & BV_SCANNED) != 0) 251 continue; 252 bp->b_vflags |= BV_SCANNED; 253 if ((skipmeta == 1 && bp->b_lblkno < 0)) 254 continue; 255 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 256 continue; 257 BO_UNLOCK(bo); 258 if (!wait && !LIST_EMPTY(&bp->b_dep) && 259 (bp->b_flags & B_DEFERRED) == 0 && 260 buf_countdeps(bp, 0)) { 261 bp->b_flags |= B_DEFERRED; 262 BUF_UNLOCK(bp); 263 BO_LOCK(bo); 264 continue; 265 } 266 if ((bp->b_flags & B_DELWRI) == 0) 267 panic("ffs_fsync: not dirty"); 268 /* 269 * If this is a synchronous flush request, or it is not a 270 * file or device, start the write on this buffer immediately. 271 */ 272 if (wait || (vp->v_type != VREG && vp->v_type != VBLK)) { 273 274 /* 275 * On our final pass through, do all I/O synchronously 276 * so that we can find out if our flush is failing 277 * because of write errors. 278 */ 279 if (passes > 0 || !wait) { 280 if ((bp->b_flags & B_CLUSTEROK) && !wait) { 281 (void) vfs_bio_awrite(bp); 282 } else { 283 bremfree(bp); 284 splx(s); 285 (void) bawrite(bp); 286 s = splbio(); 287 } 288 } else { 289 bremfree(bp); 290 splx(s); 291 if ((error = bwrite(bp)) != 0) 292 return (error); 293 s = splbio(); 294 } 295 } else if ((vp->v_type == VREG) && (bp->b_lblkno >= lbn)) { 296 /* 297 * If the buffer is for data that has been truncated 298 * off the file, then throw it away. 299 */ 300 bremfree(bp); 301 bp->b_flags |= B_INVAL | B_NOCACHE; 302 splx(s); 303 brelse(bp); 304 s = splbio(); 305 } else 306 vfs_bio_awrite(bp); 307 308 /* 309 * Since we may have slept during the I/O, we need 310 * to start from a known point. 311 */ 312 BO_LOCK(bo); 313 nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd); 314 } 315 /* 316 * If we were asked to do this synchronously, then go back for 317 * another pass, this time doing the metadata. 318 */ 319 if (skipmeta) { 320 skipmeta = 0; 321 goto loop; 322 } 323 324 if (wait) { 325 bufobj_wwait(bo, 3, 0); 326 BO_UNLOCK(bo); 327 328 /* 329 * Ensure that any filesystem metatdata associated 330 * with the vnode has been written. 331 */ 332 splx(s); 333 if ((error = softdep_sync_metadata(vp)) != 0) 334 return (error); 335 s = splbio(); 336 337 BO_LOCK(bo); 338 if (bo->bo_dirty.bv_cnt > 0) { 339 /* 340 * Block devices associated with filesystems may 341 * have new I/O requests posted for them even if 342 * the vnode is locked, so no amount of trying will 343 * get them clean. Thus we give block devices a 344 * good effort, then just give up. For all other file 345 * types, go around and try again until it is clean. 346 */ 347 if (passes > 0) { 348 passes -= 1; 349 goto loop; 350 } 351 #ifdef INVARIANTS 352 if (!vn_isdisk(vp, NULL)) 353 vprint("ffs_fsync: dirty", vp); 354 #endif 355 } 356 } 357 BO_UNLOCK(bo); 358 splx(s); 359 return (ffs_update(vp, wait)); 360 } 361 362 static int 363 ffs_lock(ap) 364 struct vop_lock1_args /* { 365 struct vnode *a_vp; 366 int a_flags; 367 struct thread *a_td; 368 char *file; 369 int line; 370 } */ *ap; 371 { 372 #ifndef NO_FFS_SNAPSHOT 373 struct vnode *vp; 374 int flags; 375 struct lock *lkp; 376 int result; 377 378 switch (ap->a_flags & LK_TYPE_MASK) { 379 case LK_SHARED: 380 case LK_UPGRADE: 381 case LK_EXCLUSIVE: 382 vp = ap->a_vp; 383 flags = ap->a_flags; 384 for (;;) { 385 #ifdef DEBUG_VFS_LOCKS 386 KASSERT(vp->v_holdcnt != 0, 387 ("ffs_lock %p: zero hold count", vp)); 388 #endif 389 lkp = vp->v_vnlock; 390 result = _lockmgr_args(lkp, flags, VI_MTX(vp), 391 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, 392 ap->a_file, ap->a_line); 393 if (lkp == vp->v_vnlock || result != 0) 394 break; 395 /* 396 * Apparent success, except that the vnode 397 * mutated between snapshot file vnode and 398 * regular file vnode while this process 399 * slept. The lock currently held is not the 400 * right lock. Release it, and try to get the 401 * new lock. 402 */ 403 (void) _lockmgr_args(lkp, LK_RELEASE, NULL, 404 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, 405 ap->a_file, ap->a_line); 406 if ((flags & (LK_INTERLOCK | LK_NOWAIT)) == 407 (LK_INTERLOCK | LK_NOWAIT)) 408 return (EBUSY); 409 if ((flags & LK_TYPE_MASK) == LK_UPGRADE) 410 flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE; 411 flags &= ~LK_INTERLOCK; 412 } 413 break; 414 default: 415 result = VOP_LOCK1_APV(&ufs_vnodeops, ap); 416 } 417 return (result); 418 #else 419 return (VOP_LOCK1_APV(&ufs_vnodeops, ap)); 420 #endif 421 } 422 423 /* 424 * Vnode op for reading. 425 */ 426 /* ARGSUSED */ 427 static int 428 ffs_read(ap) 429 struct vop_read_args /* { 430 struct vnode *a_vp; 431 struct uio *a_uio; 432 int a_ioflag; 433 struct ucred *a_cred; 434 } */ *ap; 435 { 436 struct vnode *vp; 437 struct inode *ip; 438 struct uio *uio; 439 struct fs *fs; 440 struct buf *bp; 441 ufs_lbn_t lbn, nextlbn; 442 off_t bytesinfile; 443 long size, xfersize, blkoffset; 444 int error, orig_resid; 445 int seqcount; 446 int ioflag; 447 448 vp = ap->a_vp; 449 uio = ap->a_uio; 450 ioflag = ap->a_ioflag; 451 if (ap->a_ioflag & IO_EXT) 452 #ifdef notyet 453 return (ffs_extread(vp, uio, ioflag)); 454 #else 455 panic("ffs_read+IO_EXT"); 456 #endif 457 #ifdef DIRECTIO 458 if ((ioflag & IO_DIRECT) != 0) { 459 int workdone; 460 461 error = ffs_rawread(vp, uio, &workdone); 462 if (error != 0 || workdone != 0) 463 return error; 464 } 465 #endif 466 467 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 468 ip = VTOI(vp); 469 470 #ifdef INVARIANTS 471 if (uio->uio_rw != UIO_READ) 472 panic("ffs_read: mode"); 473 474 if (vp->v_type == VLNK) { 475 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen) 476 panic("ffs_read: short symlink"); 477 } else if (vp->v_type != VREG && vp->v_type != VDIR) 478 panic("ffs_read: type %d", vp->v_type); 479 #endif 480 orig_resid = uio->uio_resid; 481 KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0")); 482 if (orig_resid == 0) 483 return (0); 484 KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0")); 485 fs = ip->i_fs; 486 if (uio->uio_offset < ip->i_size && 487 uio->uio_offset >= fs->fs_maxfilesize) 488 return (EOVERFLOW); 489 490 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { 491 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) 492 break; 493 lbn = lblkno(fs, uio->uio_offset); 494 nextlbn = lbn + 1; 495 496 /* 497 * size of buffer. The buffer representing the 498 * end of the file is rounded up to the size of 499 * the block type ( fragment or full block, 500 * depending ). 501 */ 502 size = blksize(fs, ip, lbn); 503 blkoffset = blkoff(fs, uio->uio_offset); 504 505 /* 506 * The amount we want to transfer in this iteration is 507 * one FS block less the amount of the data before 508 * our startpoint (duh!) 509 */ 510 xfersize = fs->fs_bsize - blkoffset; 511 512 /* 513 * But if we actually want less than the block, 514 * or the file doesn't have a whole block more of data, 515 * then use the lesser number. 516 */ 517 if (uio->uio_resid < xfersize) 518 xfersize = uio->uio_resid; 519 if (bytesinfile < xfersize) 520 xfersize = bytesinfile; 521 522 if (lblktosize(fs, nextlbn) >= ip->i_size) { 523 /* 524 * Don't do readahead if this is the end of the file. 525 */ 526 error = bread(vp, lbn, size, NOCRED, &bp); 527 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { 528 /* 529 * Otherwise if we are allowed to cluster, 530 * grab as much as we can. 531 * 532 * XXX This may not be a win if we are not 533 * doing sequential access. 534 */ 535 error = cluster_read(vp, ip->i_size, lbn, 536 size, NOCRED, blkoffset + uio->uio_resid, seqcount, &bp); 537 } else if (seqcount > 1) { 538 /* 539 * If we are NOT allowed to cluster, then 540 * if we appear to be acting sequentially, 541 * fire off a request for a readahead 542 * as well as a read. Note that the 4th and 5th 543 * arguments point to arrays of the size specified in 544 * the 6th argument. 545 */ 546 int nextsize = blksize(fs, ip, nextlbn); 547 error = breadn(vp, lbn, 548 size, &nextlbn, &nextsize, 1, NOCRED, &bp); 549 } else { 550 /* 551 * Failing all of the above, just read what the 552 * user asked for. Interestingly, the same as 553 * the first option above. 554 */ 555 error = bread(vp, lbn, size, NOCRED, &bp); 556 } 557 if (error) { 558 brelse(bp); 559 bp = NULL; 560 break; 561 } 562 563 /* 564 * If IO_DIRECT then set B_DIRECT for the buffer. This 565 * will cause us to attempt to release the buffer later on 566 * and will cause the buffer cache to attempt to free the 567 * underlying pages. 568 */ 569 if (ioflag & IO_DIRECT) 570 bp->b_flags |= B_DIRECT; 571 572 /* 573 * We should only get non-zero b_resid when an I/O error 574 * has occurred, which should cause us to break above. 575 * However, if the short read did not cause an error, 576 * then we want to ensure that we do not uiomove bad 577 * or uninitialized data. 578 */ 579 size -= bp->b_resid; 580 if (size < xfersize) { 581 if (size == 0) 582 break; 583 xfersize = size; 584 } 585 586 error = uiomove((char *)bp->b_data + blkoffset, 587 (int)xfersize, uio); 588 if (error) 589 break; 590 591 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 592 (LIST_EMPTY(&bp->b_dep))) { 593 /* 594 * If there are no dependencies, and it's VMIO, 595 * then we don't need the buf, mark it available 596 * for freeing. The VM has the data. 597 */ 598 bp->b_flags |= B_RELBUF; 599 brelse(bp); 600 } else { 601 /* 602 * Otherwise let whoever 603 * made the request take care of 604 * freeing it. We just queue 605 * it onto another list. 606 */ 607 bqrelse(bp); 608 } 609 } 610 611 /* 612 * This can only happen in the case of an error 613 * because the loop above resets bp to NULL on each iteration 614 * and on normal completion has not set a new value into it. 615 * so it must have come from a 'break' statement 616 */ 617 if (bp != NULL) { 618 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 619 (LIST_EMPTY(&bp->b_dep))) { 620 bp->b_flags |= B_RELBUF; 621 brelse(bp); 622 } else { 623 bqrelse(bp); 624 } 625 } 626 627 if ((error == 0 || uio->uio_resid != orig_resid) && 628 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0 && 629 (ip->i_flag & IN_ACCESS) == 0) { 630 VI_LOCK(vp); 631 ip->i_flag |= IN_ACCESS; 632 VI_UNLOCK(vp); 633 } 634 return (error); 635 } 636 637 /* 638 * Vnode op for writing. 639 */ 640 static int 641 ffs_write(ap) 642 struct vop_write_args /* { 643 struct vnode *a_vp; 644 struct uio *a_uio; 645 int a_ioflag; 646 struct ucred *a_cred; 647 } */ *ap; 648 { 649 struct vnode *vp; 650 struct uio *uio; 651 struct inode *ip; 652 struct fs *fs; 653 struct buf *bp; 654 struct thread *td; 655 ufs_lbn_t lbn; 656 off_t osize; 657 int seqcount; 658 int blkoffset, error, flags, ioflag, resid, size, xfersize; 659 660 vp = ap->a_vp; 661 uio = ap->a_uio; 662 ioflag = ap->a_ioflag; 663 if (ap->a_ioflag & IO_EXT) 664 #ifdef notyet 665 return (ffs_extwrite(vp, uio, ioflag, ap->a_cred)); 666 #else 667 panic("ffs_write+IO_EXT"); 668 #endif 669 670 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 671 ip = VTOI(vp); 672 673 #ifdef INVARIANTS 674 if (uio->uio_rw != UIO_WRITE) 675 panic("ffs_write: mode"); 676 #endif 677 678 switch (vp->v_type) { 679 case VREG: 680 if (ioflag & IO_APPEND) 681 uio->uio_offset = ip->i_size; 682 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) 683 return (EPERM); 684 /* FALLTHROUGH */ 685 case VLNK: 686 break; 687 case VDIR: 688 panic("ffs_write: dir write"); 689 break; 690 default: 691 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type, 692 (int)uio->uio_offset, 693 (int)uio->uio_resid 694 ); 695 } 696 697 KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0")); 698 KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0")); 699 fs = ip->i_fs; 700 if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) 701 return (EFBIG); 702 /* 703 * Maybe this should be above the vnode op call, but so long as 704 * file servers have no limits, I don't think it matters. 705 */ 706 td = uio->uio_td; 707 if (vp->v_type == VREG && td != NULL) { 708 PROC_LOCK(td->td_proc); 709 if (uio->uio_offset + uio->uio_resid > 710 lim_cur(td->td_proc, RLIMIT_FSIZE)) { 711 psignal(td->td_proc, SIGXFSZ); 712 PROC_UNLOCK(td->td_proc); 713 return (EFBIG); 714 } 715 PROC_UNLOCK(td->td_proc); 716 } 717 718 resid = uio->uio_resid; 719 osize = ip->i_size; 720 if (seqcount > BA_SEQMAX) 721 flags = BA_SEQMAX << BA_SEQSHIFT; 722 else 723 flags = seqcount << BA_SEQSHIFT; 724 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp)) 725 flags |= IO_SYNC; 726 727 for (error = 0; uio->uio_resid > 0;) { 728 lbn = lblkno(fs, uio->uio_offset); 729 blkoffset = blkoff(fs, uio->uio_offset); 730 xfersize = fs->fs_bsize - blkoffset; 731 if (uio->uio_resid < xfersize) 732 xfersize = uio->uio_resid; 733 if (uio->uio_offset + xfersize > ip->i_size) 734 vnode_pager_setsize(vp, uio->uio_offset + xfersize); 735 736 /* 737 * We must perform a read-before-write if the transfer size 738 * does not cover the entire buffer. 739 */ 740 if (fs->fs_bsize > xfersize) 741 flags |= BA_CLRBUF; 742 else 743 flags &= ~BA_CLRBUF; 744 /* XXX is uio->uio_offset the right thing here? */ 745 error = UFS_BALLOC(vp, uio->uio_offset, xfersize, 746 ap->a_cred, flags, &bp); 747 if (error != 0) { 748 vnode_pager_setsize(vp, ip->i_size); 749 break; 750 } 751 /* 752 * If the buffer is not valid we have to clear out any 753 * garbage data from the pages instantiated for the buffer. 754 * If we do not, a failed uiomove() during a write can leave 755 * the prior contents of the pages exposed to a userland 756 * mmap(). XXX deal with uiomove() errors a better way. 757 */ 758 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize) 759 vfs_bio_clrbuf(bp); 760 if (ioflag & IO_DIRECT) 761 bp->b_flags |= B_DIRECT; 762 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL)) 763 bp->b_flags |= B_NOCACHE; 764 765 if (uio->uio_offset + xfersize > ip->i_size) { 766 ip->i_size = uio->uio_offset + xfersize; 767 DIP_SET(ip, i_size, ip->i_size); 768 } 769 770 size = blksize(fs, ip, lbn) - bp->b_resid; 771 if (size < xfersize) 772 xfersize = size; 773 774 error = 775 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); 776 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 777 (LIST_EMPTY(&bp->b_dep))) { 778 bp->b_flags |= B_RELBUF; 779 } 780 781 /* 782 * If IO_SYNC each buffer is written synchronously. Otherwise 783 * if we have a severe page deficiency write the buffer 784 * asynchronously. Otherwise try to cluster, and if that 785 * doesn't do it then either do an async write (if O_DIRECT), 786 * or a delayed write (if not). 787 */ 788 if (ioflag & IO_SYNC) { 789 (void)bwrite(bp); 790 } else if (vm_page_count_severe() || 791 buf_dirty_count_severe() || 792 (ioflag & IO_ASYNC)) { 793 bp->b_flags |= B_CLUSTEROK; 794 bawrite(bp); 795 } else if (xfersize + blkoffset == fs->fs_bsize) { 796 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) { 797 bp->b_flags |= B_CLUSTEROK; 798 cluster_write(vp, bp, ip->i_size, seqcount); 799 } else { 800 bawrite(bp); 801 } 802 } else if (ioflag & IO_DIRECT) { 803 bp->b_flags |= B_CLUSTEROK; 804 bawrite(bp); 805 } else { 806 bp->b_flags |= B_CLUSTEROK; 807 bdwrite(bp); 808 } 809 if (error || xfersize == 0) 810 break; 811 ip->i_flag |= IN_CHANGE | IN_UPDATE; 812 } 813 /* 814 * If we successfully wrote any data, and we are not the superuser 815 * we clear the setuid and setgid bits as a precaution against 816 * tampering. 817 */ 818 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && 819 ap->a_cred) { 820 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) { 821 ip->i_mode &= ~(ISUID | ISGID); 822 DIP_SET(ip, i_mode, ip->i_mode); 823 } 824 } 825 if (error) { 826 if (ioflag & IO_UNIT) { 827 (void)ffs_truncate(vp, osize, 828 IO_NORMAL | (ioflag & IO_SYNC), 829 ap->a_cred, uio->uio_td); 830 uio->uio_offset -= resid - uio->uio_resid; 831 uio->uio_resid = resid; 832 } 833 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) 834 error = ffs_update(vp, 1); 835 return (error); 836 } 837 838 /* 839 * get page routine 840 */ 841 static int 842 ffs_getpages(ap) 843 struct vop_getpages_args *ap; 844 { 845 int i; 846 vm_page_t mreq; 847 int pcount; 848 849 pcount = round_page(ap->a_count) / PAGE_SIZE; 850 mreq = ap->a_m[ap->a_reqpage]; 851 852 /* 853 * if ANY DEV_BSIZE blocks are valid on a large filesystem block, 854 * then the entire page is valid. Since the page may be mapped, 855 * user programs might reference data beyond the actual end of file 856 * occuring within the page. We have to zero that data. 857 */ 858 VM_OBJECT_LOCK(mreq->object); 859 if (mreq->valid) { 860 if (mreq->valid != VM_PAGE_BITS_ALL) 861 vm_page_zero_invalid(mreq, TRUE); 862 vm_page_lock_queues(); 863 for (i = 0; i < pcount; i++) { 864 if (i != ap->a_reqpage) { 865 vm_page_free(ap->a_m[i]); 866 } 867 } 868 vm_page_unlock_queues(); 869 VM_OBJECT_UNLOCK(mreq->object); 870 return VM_PAGER_OK; 871 } 872 VM_OBJECT_UNLOCK(mreq->object); 873 874 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 875 ap->a_count, 876 ap->a_reqpage); 877 } 878 879 880 /* 881 * Extended attribute area reading. 882 */ 883 static int 884 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag) 885 { 886 struct inode *ip; 887 struct ufs2_dinode *dp; 888 struct fs *fs; 889 struct buf *bp; 890 ufs_lbn_t lbn, nextlbn; 891 off_t bytesinfile; 892 long size, xfersize, blkoffset; 893 int error, orig_resid; 894 895 ip = VTOI(vp); 896 fs = ip->i_fs; 897 dp = ip->i_din2; 898 899 #ifdef INVARIANTS 900 if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC) 901 panic("ffs_extread: mode"); 902 903 #endif 904 orig_resid = uio->uio_resid; 905 KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0")); 906 if (orig_resid == 0) 907 return (0); 908 KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0")); 909 910 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { 911 if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0) 912 break; 913 lbn = lblkno(fs, uio->uio_offset); 914 nextlbn = lbn + 1; 915 916 /* 917 * size of buffer. The buffer representing the 918 * end of the file is rounded up to the size of 919 * the block type ( fragment or full block, 920 * depending ). 921 */ 922 size = sblksize(fs, dp->di_extsize, lbn); 923 blkoffset = blkoff(fs, uio->uio_offset); 924 925 /* 926 * The amount we want to transfer in this iteration is 927 * one FS block less the amount of the data before 928 * our startpoint (duh!) 929 */ 930 xfersize = fs->fs_bsize - blkoffset; 931 932 /* 933 * But if we actually want less than the block, 934 * or the file doesn't have a whole block more of data, 935 * then use the lesser number. 936 */ 937 if (uio->uio_resid < xfersize) 938 xfersize = uio->uio_resid; 939 if (bytesinfile < xfersize) 940 xfersize = bytesinfile; 941 942 if (lblktosize(fs, nextlbn) >= dp->di_extsize) { 943 /* 944 * Don't do readahead if this is the end of the info. 945 */ 946 error = bread(vp, -1 - lbn, size, NOCRED, &bp); 947 } else { 948 /* 949 * If we have a second block, then 950 * fire off a request for a readahead 951 * as well as a read. Note that the 4th and 5th 952 * arguments point to arrays of the size specified in 953 * the 6th argument. 954 */ 955 int nextsize = sblksize(fs, dp->di_extsize, nextlbn); 956 957 nextlbn = -1 - nextlbn; 958 error = breadn(vp, -1 - lbn, 959 size, &nextlbn, &nextsize, 1, NOCRED, &bp); 960 } 961 if (error) { 962 brelse(bp); 963 bp = NULL; 964 break; 965 } 966 967 /* 968 * If IO_DIRECT then set B_DIRECT for the buffer. This 969 * will cause us to attempt to release the buffer later on 970 * and will cause the buffer cache to attempt to free the 971 * underlying pages. 972 */ 973 if (ioflag & IO_DIRECT) 974 bp->b_flags |= B_DIRECT; 975 976 /* 977 * We should only get non-zero b_resid when an I/O error 978 * has occurred, which should cause us to break above. 979 * However, if the short read did not cause an error, 980 * then we want to ensure that we do not uiomove bad 981 * or uninitialized data. 982 */ 983 size -= bp->b_resid; 984 if (size < xfersize) { 985 if (size == 0) 986 break; 987 xfersize = size; 988 } 989 990 error = uiomove((char *)bp->b_data + blkoffset, 991 (int)xfersize, uio); 992 if (error) 993 break; 994 995 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 996 (LIST_EMPTY(&bp->b_dep))) { 997 /* 998 * If there are no dependencies, and it's VMIO, 999 * then we don't need the buf, mark it available 1000 * for freeing. The VM has the data. 1001 */ 1002 bp->b_flags |= B_RELBUF; 1003 brelse(bp); 1004 } else { 1005 /* 1006 * Otherwise let whoever 1007 * made the request take care of 1008 * freeing it. We just queue 1009 * it onto another list. 1010 */ 1011 bqrelse(bp); 1012 } 1013 } 1014 1015 /* 1016 * This can only happen in the case of an error 1017 * because the loop above resets bp to NULL on each iteration 1018 * and on normal completion has not set a new value into it. 1019 * so it must have come from a 'break' statement 1020 */ 1021 if (bp != NULL) { 1022 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 1023 (LIST_EMPTY(&bp->b_dep))) { 1024 bp->b_flags |= B_RELBUF; 1025 brelse(bp); 1026 } else { 1027 bqrelse(bp); 1028 } 1029 } 1030 return (error); 1031 } 1032 1033 /* 1034 * Extended attribute area writing. 1035 */ 1036 static int 1037 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred) 1038 { 1039 struct inode *ip; 1040 struct ufs2_dinode *dp; 1041 struct fs *fs; 1042 struct buf *bp; 1043 ufs_lbn_t lbn; 1044 off_t osize; 1045 int blkoffset, error, flags, resid, size, xfersize; 1046 1047 ip = VTOI(vp); 1048 fs = ip->i_fs; 1049 dp = ip->i_din2; 1050 1051 KASSERT(!(ip->i_flag & IN_SPACECOUNTED), ("inode %u: inode is dead", 1052 ip->i_number)); 1053 1054 #ifdef INVARIANTS 1055 if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC) 1056 panic("ffs_extwrite: mode"); 1057 #endif 1058 1059 if (ioflag & IO_APPEND) 1060 uio->uio_offset = dp->di_extsize; 1061 KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0")); 1062 KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0")); 1063 if ((uoff_t)uio->uio_offset + uio->uio_resid > NXADDR * fs->fs_bsize) 1064 return (EFBIG); 1065 1066 resid = uio->uio_resid; 1067 osize = dp->di_extsize; 1068 flags = IO_EXT; 1069 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp)) 1070 flags |= IO_SYNC; 1071 1072 for (error = 0; uio->uio_resid > 0;) { 1073 lbn = lblkno(fs, uio->uio_offset); 1074 blkoffset = blkoff(fs, uio->uio_offset); 1075 xfersize = fs->fs_bsize - blkoffset; 1076 if (uio->uio_resid < xfersize) 1077 xfersize = uio->uio_resid; 1078 1079 /* 1080 * We must perform a read-before-write if the transfer size 1081 * does not cover the entire buffer. 1082 */ 1083 if (fs->fs_bsize > xfersize) 1084 flags |= BA_CLRBUF; 1085 else 1086 flags &= ~BA_CLRBUF; 1087 error = UFS_BALLOC(vp, uio->uio_offset, xfersize, 1088 ucred, flags, &bp); 1089 if (error != 0) 1090 break; 1091 /* 1092 * If the buffer is not valid we have to clear out any 1093 * garbage data from the pages instantiated for the buffer. 1094 * If we do not, a failed uiomove() during a write can leave 1095 * the prior contents of the pages exposed to a userland 1096 * mmap(). XXX deal with uiomove() errors a better way. 1097 */ 1098 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize) 1099 vfs_bio_clrbuf(bp); 1100 if (ioflag & IO_DIRECT) 1101 bp->b_flags |= B_DIRECT; 1102 1103 if (uio->uio_offset + xfersize > dp->di_extsize) 1104 dp->di_extsize = uio->uio_offset + xfersize; 1105 1106 size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid; 1107 if (size < xfersize) 1108 xfersize = size; 1109 1110 error = 1111 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); 1112 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 1113 (LIST_EMPTY(&bp->b_dep))) { 1114 bp->b_flags |= B_RELBUF; 1115 } 1116 1117 /* 1118 * If IO_SYNC each buffer is written synchronously. Otherwise 1119 * if we have a severe page deficiency write the buffer 1120 * asynchronously. Otherwise try to cluster, and if that 1121 * doesn't do it then either do an async write (if O_DIRECT), 1122 * or a delayed write (if not). 1123 */ 1124 if (ioflag & IO_SYNC) { 1125 (void)bwrite(bp); 1126 } else if (vm_page_count_severe() || 1127 buf_dirty_count_severe() || 1128 xfersize + blkoffset == fs->fs_bsize || 1129 (ioflag & (IO_ASYNC | IO_DIRECT))) 1130 bawrite(bp); 1131 else 1132 bdwrite(bp); 1133 if (error || xfersize == 0) 1134 break; 1135 ip->i_flag |= IN_CHANGE; 1136 } 1137 /* 1138 * If we successfully wrote any data, and we are not the superuser 1139 * we clear the setuid and setgid bits as a precaution against 1140 * tampering. 1141 */ 1142 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) { 1143 if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID, 0)) { 1144 ip->i_mode &= ~(ISUID | ISGID); 1145 dp->di_mode = ip->i_mode; 1146 } 1147 } 1148 if (error) { 1149 if (ioflag & IO_UNIT) { 1150 (void)ffs_truncate(vp, osize, 1151 IO_EXT | (ioflag&IO_SYNC), ucred, uio->uio_td); 1152 uio->uio_offset -= resid - uio->uio_resid; 1153 uio->uio_resid = resid; 1154 } 1155 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) 1156 error = ffs_update(vp, 1); 1157 return (error); 1158 } 1159 1160 1161 /* 1162 * Vnode operating to retrieve a named extended attribute. 1163 * 1164 * Locate a particular EA (nspace:name) in the area (ptr:length), and return 1165 * the length of the EA, and possibly the pointer to the entry and to the data. 1166 */ 1167 static int 1168 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name, u_char **eap, u_char **eac) 1169 { 1170 u_char *p, *pe, *pn, *p0; 1171 int eapad1, eapad2, ealength, ealen, nlen; 1172 uint32_t ul; 1173 1174 pe = ptr + length; 1175 nlen = strlen(name); 1176 1177 for (p = ptr; p < pe; p = pn) { 1178 p0 = p; 1179 bcopy(p, &ul, sizeof(ul)); 1180 pn = p + ul; 1181 /* make sure this entry is complete */ 1182 if (pn > pe) 1183 break; 1184 p += sizeof(uint32_t); 1185 if (*p != nspace) 1186 continue; 1187 p++; 1188 eapad2 = *p++; 1189 if (*p != nlen) 1190 continue; 1191 p++; 1192 if (bcmp(p, name, nlen)) 1193 continue; 1194 ealength = sizeof(uint32_t) + 3 + nlen; 1195 eapad1 = 8 - (ealength % 8); 1196 if (eapad1 == 8) 1197 eapad1 = 0; 1198 ealength += eapad1; 1199 ealen = ul - ealength - eapad2; 1200 p += nlen + eapad1; 1201 if (eap != NULL) 1202 *eap = p0; 1203 if (eac != NULL) 1204 *eac = p; 1205 return (ealen); 1206 } 1207 return(-1); 1208 } 1209 1210 static int 1211 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra) 1212 { 1213 struct inode *ip; 1214 struct ufs2_dinode *dp; 1215 struct fs *fs; 1216 struct uio luio; 1217 struct iovec liovec; 1218 int easize, error; 1219 u_char *eae; 1220 1221 ip = VTOI(vp); 1222 fs = ip->i_fs; 1223 dp = ip->i_din2; 1224 easize = dp->di_extsize; 1225 if ((uoff_t)easize + extra > NXADDR * fs->fs_bsize) 1226 return (EFBIG); 1227 1228 eae = malloc(easize + extra, M_TEMP, M_WAITOK); 1229 1230 liovec.iov_base = eae; 1231 liovec.iov_len = easize; 1232 luio.uio_iov = &liovec; 1233 luio.uio_iovcnt = 1; 1234 luio.uio_offset = 0; 1235 luio.uio_resid = easize; 1236 luio.uio_segflg = UIO_SYSSPACE; 1237 luio.uio_rw = UIO_READ; 1238 luio.uio_td = td; 1239 1240 error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC); 1241 if (error) { 1242 free(eae, M_TEMP); 1243 return(error); 1244 } 1245 *p = eae; 1246 return (0); 1247 } 1248 1249 static void 1250 ffs_lock_ea(struct vnode *vp) 1251 { 1252 struct inode *ip; 1253 1254 ip = VTOI(vp); 1255 VI_LOCK(vp); 1256 while (ip->i_flag & IN_EA_LOCKED) { 1257 ip->i_flag |= IN_EA_LOCKWAIT; 1258 msleep(&ip->i_ea_refs, &vp->v_interlock, PINOD + 2, "ufs_ea", 1259 0); 1260 } 1261 ip->i_flag |= IN_EA_LOCKED; 1262 VI_UNLOCK(vp); 1263 } 1264 1265 static void 1266 ffs_unlock_ea(struct vnode *vp) 1267 { 1268 struct inode *ip; 1269 1270 ip = VTOI(vp); 1271 VI_LOCK(vp); 1272 if (ip->i_flag & IN_EA_LOCKWAIT) 1273 wakeup(&ip->i_ea_refs); 1274 ip->i_flag &= ~(IN_EA_LOCKED | IN_EA_LOCKWAIT); 1275 VI_UNLOCK(vp); 1276 } 1277 1278 static int 1279 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td) 1280 { 1281 struct inode *ip; 1282 struct ufs2_dinode *dp; 1283 int error; 1284 1285 ip = VTOI(vp); 1286 1287 ffs_lock_ea(vp); 1288 if (ip->i_ea_area != NULL) { 1289 ip->i_ea_refs++; 1290 ffs_unlock_ea(vp); 1291 return (0); 1292 } 1293 dp = ip->i_din2; 1294 error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0); 1295 if (error) { 1296 ffs_unlock_ea(vp); 1297 return (error); 1298 } 1299 ip->i_ea_len = dp->di_extsize; 1300 ip->i_ea_error = 0; 1301 ip->i_ea_refs++; 1302 ffs_unlock_ea(vp); 1303 return (0); 1304 } 1305 1306 /* 1307 * Vnode extattr transaction commit/abort 1308 */ 1309 static int 1310 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td) 1311 { 1312 struct inode *ip; 1313 struct uio luio; 1314 struct iovec liovec; 1315 int error; 1316 struct ufs2_dinode *dp; 1317 1318 ip = VTOI(vp); 1319 1320 ffs_lock_ea(vp); 1321 if (ip->i_ea_area == NULL) { 1322 ffs_unlock_ea(vp); 1323 return (EINVAL); 1324 } 1325 dp = ip->i_din2; 1326 error = ip->i_ea_error; 1327 if (commit && error == 0) { 1328 ASSERT_VOP_ELOCKED(vp, "ffs_close_ea commit"); 1329 if (cred == NOCRED) 1330 cred = vp->v_mount->mnt_cred; 1331 liovec.iov_base = ip->i_ea_area; 1332 liovec.iov_len = ip->i_ea_len; 1333 luio.uio_iov = &liovec; 1334 luio.uio_iovcnt = 1; 1335 luio.uio_offset = 0; 1336 luio.uio_resid = ip->i_ea_len; 1337 luio.uio_segflg = UIO_SYSSPACE; 1338 luio.uio_rw = UIO_WRITE; 1339 luio.uio_td = td; 1340 /* XXX: I'm not happy about truncating to zero size */ 1341 if (ip->i_ea_len < dp->di_extsize) 1342 error = ffs_truncate(vp, 0, IO_EXT, cred, td); 1343 error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred); 1344 } 1345 if (--ip->i_ea_refs == 0) { 1346 free(ip->i_ea_area, M_TEMP); 1347 ip->i_ea_area = NULL; 1348 ip->i_ea_len = 0; 1349 ip->i_ea_error = 0; 1350 } 1351 ffs_unlock_ea(vp); 1352 return (error); 1353 } 1354 1355 /* 1356 * Vnode extattr strategy routine for fifos. 1357 * 1358 * We need to check for a read or write of the external attributes. 1359 * Otherwise we just fall through and do the usual thing. 1360 */ 1361 static int 1362 ffsext_strategy(struct vop_strategy_args *ap) 1363 /* 1364 struct vop_strategy_args { 1365 struct vnodeop_desc *a_desc; 1366 struct vnode *a_vp; 1367 struct buf *a_bp; 1368 }; 1369 */ 1370 { 1371 struct vnode *vp; 1372 daddr_t lbn; 1373 1374 vp = ap->a_vp; 1375 lbn = ap->a_bp->b_lblkno; 1376 if (VTOI(vp)->i_fs->fs_magic == FS_UFS2_MAGIC && 1377 lbn < 0 && lbn >= -NXADDR) 1378 return (VOP_STRATEGY_APV(&ufs_vnodeops, ap)); 1379 if (vp->v_type == VFIFO) 1380 return (VOP_STRATEGY_APV(&ufs_fifoops, ap)); 1381 panic("spec nodes went here"); 1382 } 1383 1384 /* 1385 * Vnode extattr transaction commit/abort 1386 */ 1387 static int 1388 ffs_openextattr(struct vop_openextattr_args *ap) 1389 /* 1390 struct vop_openextattr_args { 1391 struct vnodeop_desc *a_desc; 1392 struct vnode *a_vp; 1393 IN struct ucred *a_cred; 1394 IN struct thread *a_td; 1395 }; 1396 */ 1397 { 1398 struct inode *ip; 1399 struct fs *fs; 1400 1401 ip = VTOI(ap->a_vp); 1402 fs = ip->i_fs; 1403 1404 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1405 return (EOPNOTSUPP); 1406 1407 return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td)); 1408 } 1409 1410 1411 /* 1412 * Vnode extattr transaction commit/abort 1413 */ 1414 static int 1415 ffs_closeextattr(struct vop_closeextattr_args *ap) 1416 /* 1417 struct vop_closeextattr_args { 1418 struct vnodeop_desc *a_desc; 1419 struct vnode *a_vp; 1420 int a_commit; 1421 IN struct ucred *a_cred; 1422 IN struct thread *a_td; 1423 }; 1424 */ 1425 { 1426 struct inode *ip; 1427 struct fs *fs; 1428 1429 ip = VTOI(ap->a_vp); 1430 fs = ip->i_fs; 1431 1432 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1433 return (EOPNOTSUPP); 1434 1435 if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) 1436 return (EROFS); 1437 1438 return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td)); 1439 } 1440 1441 /* 1442 * Vnode operation to remove a named attribute. 1443 */ 1444 static int 1445 ffs_deleteextattr(struct vop_deleteextattr_args *ap) 1446 /* 1447 vop_deleteextattr { 1448 IN struct vnode *a_vp; 1449 IN int a_attrnamespace; 1450 IN const char *a_name; 1451 IN struct ucred *a_cred; 1452 IN struct thread *a_td; 1453 }; 1454 */ 1455 { 1456 struct inode *ip; 1457 struct fs *fs; 1458 uint32_t ealength, ul; 1459 int ealen, olen, eapad1, eapad2, error, i, easize; 1460 u_char *eae, *p; 1461 1462 ip = VTOI(ap->a_vp); 1463 fs = ip->i_fs; 1464 1465 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1466 return (EOPNOTSUPP); 1467 1468 if (strlen(ap->a_name) == 0) 1469 return (EINVAL); 1470 1471 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) 1472 return (EROFS); 1473 1474 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1475 ap->a_cred, ap->a_td, VWRITE); 1476 if (error) { 1477 1478 /* 1479 * ffs_lock_ea is not needed there, because the vnode 1480 * must be exclusively locked. 1481 */ 1482 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1483 ip->i_ea_error = error; 1484 return (error); 1485 } 1486 1487 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1488 if (error) 1489 return (error); 1490 1491 ealength = eapad1 = ealen = eapad2 = 0; 1492 1493 eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK); 1494 bcopy(ip->i_ea_area, eae, ip->i_ea_len); 1495 easize = ip->i_ea_len; 1496 1497 olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name, 1498 &p, NULL); 1499 if (olen == -1) { 1500 /* delete but nonexistent */ 1501 free(eae, M_TEMP); 1502 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1503 return(ENOATTR); 1504 } 1505 bcopy(p, &ul, sizeof ul); 1506 i = p - eae + ul; 1507 if (ul != ealength) { 1508 bcopy(p + ul, p + ealength, easize - i); 1509 easize += (ealength - ul); 1510 } 1511 if (easize > NXADDR * fs->fs_bsize) { 1512 free(eae, M_TEMP); 1513 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1514 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1515 ip->i_ea_error = ENOSPC; 1516 return(ENOSPC); 1517 } 1518 p = ip->i_ea_area; 1519 ip->i_ea_area = eae; 1520 ip->i_ea_len = easize; 1521 free(p, M_TEMP); 1522 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td); 1523 return(error); 1524 } 1525 1526 /* 1527 * Vnode operation to retrieve a named extended attribute. 1528 */ 1529 static int 1530 ffs_getextattr(struct vop_getextattr_args *ap) 1531 /* 1532 vop_getextattr { 1533 IN struct vnode *a_vp; 1534 IN int a_attrnamespace; 1535 IN const char *a_name; 1536 INOUT struct uio *a_uio; 1537 OUT size_t *a_size; 1538 IN struct ucred *a_cred; 1539 IN struct thread *a_td; 1540 }; 1541 */ 1542 { 1543 struct inode *ip; 1544 struct fs *fs; 1545 u_char *eae, *p; 1546 unsigned easize; 1547 int error, ealen; 1548 1549 ip = VTOI(ap->a_vp); 1550 fs = ip->i_fs; 1551 1552 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1553 return (EOPNOTSUPP); 1554 1555 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1556 ap->a_cred, ap->a_td, VREAD); 1557 if (error) 1558 return (error); 1559 1560 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1561 if (error) 1562 return (error); 1563 1564 eae = ip->i_ea_area; 1565 easize = ip->i_ea_len; 1566 1567 ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name, 1568 NULL, &p); 1569 if (ealen >= 0) { 1570 error = 0; 1571 if (ap->a_size != NULL) 1572 *ap->a_size = ealen; 1573 else if (ap->a_uio != NULL) 1574 error = uiomove(p, ealen, ap->a_uio); 1575 } else 1576 error = ENOATTR; 1577 1578 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1579 return(error); 1580 } 1581 1582 /* 1583 * Vnode operation to retrieve extended attributes on a vnode. 1584 */ 1585 static int 1586 ffs_listextattr(struct vop_listextattr_args *ap) 1587 /* 1588 vop_listextattr { 1589 IN struct vnode *a_vp; 1590 IN int a_attrnamespace; 1591 INOUT struct uio *a_uio; 1592 OUT size_t *a_size; 1593 IN struct ucred *a_cred; 1594 IN struct thread *a_td; 1595 }; 1596 */ 1597 { 1598 struct inode *ip; 1599 struct fs *fs; 1600 u_char *eae, *p, *pe, *pn; 1601 unsigned easize; 1602 uint32_t ul; 1603 int error, ealen; 1604 1605 ip = VTOI(ap->a_vp); 1606 fs = ip->i_fs; 1607 1608 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1609 return (EOPNOTSUPP); 1610 1611 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1612 ap->a_cred, ap->a_td, VREAD); 1613 if (error) 1614 return (error); 1615 1616 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1617 if (error) 1618 return (error); 1619 eae = ip->i_ea_area; 1620 easize = ip->i_ea_len; 1621 1622 error = 0; 1623 if (ap->a_size != NULL) 1624 *ap->a_size = 0; 1625 pe = eae + easize; 1626 for(p = eae; error == 0 && p < pe; p = pn) { 1627 bcopy(p, &ul, sizeof(ul)); 1628 pn = p + ul; 1629 if (pn > pe) 1630 break; 1631 p += sizeof(ul); 1632 if (*p++ != ap->a_attrnamespace) 1633 continue; 1634 p++; /* pad2 */ 1635 ealen = *p; 1636 if (ap->a_size != NULL) { 1637 *ap->a_size += ealen + 1; 1638 } else if (ap->a_uio != NULL) { 1639 error = uiomove(p, ealen + 1, ap->a_uio); 1640 } 1641 } 1642 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1643 return(error); 1644 } 1645 1646 /* 1647 * Vnode operation to set a named attribute. 1648 */ 1649 static int 1650 ffs_setextattr(struct vop_setextattr_args *ap) 1651 /* 1652 vop_setextattr { 1653 IN struct vnode *a_vp; 1654 IN int a_attrnamespace; 1655 IN const char *a_name; 1656 INOUT struct uio *a_uio; 1657 IN struct ucred *a_cred; 1658 IN struct thread *a_td; 1659 }; 1660 */ 1661 { 1662 struct inode *ip; 1663 struct fs *fs; 1664 uint32_t ealength, ul; 1665 int ealen, olen, eapad1, eapad2, error, i, easize; 1666 u_char *eae, *p; 1667 1668 ip = VTOI(ap->a_vp); 1669 fs = ip->i_fs; 1670 1671 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1672 return (EOPNOTSUPP); 1673 1674 if (strlen(ap->a_name) == 0) 1675 return (EINVAL); 1676 1677 /* XXX Now unsupported API to delete EAs using NULL uio. */ 1678 if (ap->a_uio == NULL) 1679 return (EOPNOTSUPP); 1680 1681 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) 1682 return (EROFS); 1683 1684 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1685 ap->a_cred, ap->a_td, VWRITE); 1686 if (error) { 1687 1688 /* 1689 * ffs_lock_ea is not needed there, because the vnode 1690 * must be exclusively locked. 1691 */ 1692 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1693 ip->i_ea_error = error; 1694 return (error); 1695 } 1696 1697 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1698 if (error) 1699 return (error); 1700 1701 ealen = ap->a_uio->uio_resid; 1702 ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name); 1703 eapad1 = 8 - (ealength % 8); 1704 if (eapad1 == 8) 1705 eapad1 = 0; 1706 eapad2 = 8 - (ealen % 8); 1707 if (eapad2 == 8) 1708 eapad2 = 0; 1709 ealength += eapad1 + ealen + eapad2; 1710 1711 eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK); 1712 bcopy(ip->i_ea_area, eae, ip->i_ea_len); 1713 easize = ip->i_ea_len; 1714 1715 olen = ffs_findextattr(eae, easize, 1716 ap->a_attrnamespace, ap->a_name, &p, NULL); 1717 if (olen == -1) { 1718 /* new, append at end */ 1719 p = eae + easize; 1720 easize += ealength; 1721 } else { 1722 bcopy(p, &ul, sizeof ul); 1723 i = p - eae + ul; 1724 if (ul != ealength) { 1725 bcopy(p + ul, p + ealength, easize - i); 1726 easize += (ealength - ul); 1727 } 1728 } 1729 if (easize > NXADDR * fs->fs_bsize) { 1730 free(eae, M_TEMP); 1731 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1732 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1733 ip->i_ea_error = ENOSPC; 1734 return(ENOSPC); 1735 } 1736 bcopy(&ealength, p, sizeof(ealength)); 1737 p += sizeof(ealength); 1738 *p++ = ap->a_attrnamespace; 1739 *p++ = eapad2; 1740 *p++ = strlen(ap->a_name); 1741 strcpy(p, ap->a_name); 1742 p += strlen(ap->a_name); 1743 bzero(p, eapad1); 1744 p += eapad1; 1745 error = uiomove(p, ealen, ap->a_uio); 1746 if (error) { 1747 free(eae, M_TEMP); 1748 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1749 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1750 ip->i_ea_error = error; 1751 return(error); 1752 } 1753 p += ealen; 1754 bzero(p, eapad2); 1755 1756 p = ip->i_ea_area; 1757 ip->i_ea_area = eae; 1758 ip->i_ea_len = easize; 1759 free(p, M_TEMP); 1760 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td); 1761 return(error); 1762 } 1763 1764 /* 1765 * Vnode pointer to File handle 1766 */ 1767 static int 1768 ffs_vptofh(struct vop_vptofh_args *ap) 1769 /* 1770 vop_vptofh { 1771 IN struct vnode *a_vp; 1772 IN struct fid *a_fhp; 1773 }; 1774 */ 1775 { 1776 struct inode *ip; 1777 struct ufid *ufhp; 1778 1779 ip = VTOI(ap->a_vp); 1780 ufhp = (struct ufid *)ap->a_fhp; 1781 ufhp->ufid_len = sizeof(struct ufid); 1782 ufhp->ufid_ino = ip->i_number; 1783 ufhp->ufid_gen = ip->i_gen; 1784 return (0); 1785 } 1786