1 /*- 2 * Copyright (c) 2002, 2003 Networks Associates Technology, Inc. 3 * All rights reserved. 4 * 5 * This software was developed for the FreeBSD Project by Marshall 6 * Kirk McKusick and Network Associates Laboratories, the Security 7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR 8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 9 * research program 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * Copyright (c) 1982, 1986, 1989, 1993 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 4. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * from: @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95 60 * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ... 61 * @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95 62 */ 63 64 #include <sys/cdefs.h> 65 __FBSDID("$FreeBSD$"); 66 67 #include <sys/param.h> 68 #include <sys/bio.h> 69 #include <sys/systm.h> 70 #include <sys/buf.h> 71 #include <sys/conf.h> 72 #include <sys/extattr.h> 73 #include <sys/kernel.h> 74 #include <sys/limits.h> 75 #include <sys/malloc.h> 76 #include <sys/mount.h> 77 #include <sys/priv.h> 78 #include <sys/rwlock.h> 79 #include <sys/stat.h> 80 #include <sys/vmmeter.h> 81 #include <sys/vnode.h> 82 83 #include <vm/vm.h> 84 #include <vm/vm_param.h> 85 #include <vm/vm_extern.h> 86 #include <vm/vm_object.h> 87 #include <vm/vm_page.h> 88 #include <vm/vm_pager.h> 89 #include <vm/vnode_pager.h> 90 91 #include <ufs/ufs/extattr.h> 92 #include <ufs/ufs/quota.h> 93 #include <ufs/ufs/inode.h> 94 #include <ufs/ufs/ufs_extern.h> 95 #include <ufs/ufs/ufsmount.h> 96 97 #include <ufs/ffs/fs.h> 98 #include <ufs/ffs/ffs_extern.h> 99 #include "opt_directio.h" 100 #include "opt_ffs.h" 101 102 #ifdef DIRECTIO 103 extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone); 104 #endif 105 static vop_fsync_t ffs_fsync; 106 static vop_fdatasync_t ffs_fdatasync; 107 static vop_lock1_t ffs_lock; 108 static vop_read_t ffs_read; 109 static vop_write_t ffs_write; 110 static int ffs_extread(struct vnode *vp, struct uio *uio, int ioflag); 111 static int ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, 112 struct ucred *cred); 113 static vop_strategy_t ffsext_strategy; 114 static vop_closeextattr_t ffs_closeextattr; 115 static vop_deleteextattr_t ffs_deleteextattr; 116 static vop_getextattr_t ffs_getextattr; 117 static vop_listextattr_t ffs_listextattr; 118 static vop_openextattr_t ffs_openextattr; 119 static vop_setextattr_t ffs_setextattr; 120 static vop_vptofh_t ffs_vptofh; 121 122 123 /* Global vfs data structures for ufs. */ 124 struct vop_vector ffs_vnodeops1 = { 125 .vop_default = &ufs_vnodeops, 126 .vop_fsync = ffs_fsync, 127 .vop_fdatasync = ffs_fdatasync, 128 .vop_getpages = vnode_pager_local_getpages, 129 .vop_getpages_async = vnode_pager_local_getpages_async, 130 .vop_lock1 = ffs_lock, 131 .vop_read = ffs_read, 132 .vop_reallocblks = ffs_reallocblks, 133 .vop_write = ffs_write, 134 .vop_vptofh = ffs_vptofh, 135 }; 136 137 struct vop_vector ffs_fifoops1 = { 138 .vop_default = &ufs_fifoops, 139 .vop_fsync = ffs_fsync, 140 .vop_fdatasync = ffs_fdatasync, 141 .vop_reallocblks = ffs_reallocblks, /* XXX: really ??? */ 142 .vop_vptofh = ffs_vptofh, 143 }; 144 145 /* Global vfs data structures for ufs. */ 146 struct vop_vector ffs_vnodeops2 = { 147 .vop_default = &ufs_vnodeops, 148 .vop_fsync = ffs_fsync, 149 .vop_fdatasync = ffs_fdatasync, 150 .vop_getpages = vnode_pager_local_getpages, 151 .vop_getpages_async = vnode_pager_local_getpages_async, 152 .vop_lock1 = ffs_lock, 153 .vop_read = ffs_read, 154 .vop_reallocblks = ffs_reallocblks, 155 .vop_write = ffs_write, 156 .vop_closeextattr = ffs_closeextattr, 157 .vop_deleteextattr = ffs_deleteextattr, 158 .vop_getextattr = ffs_getextattr, 159 .vop_listextattr = ffs_listextattr, 160 .vop_openextattr = ffs_openextattr, 161 .vop_setextattr = ffs_setextattr, 162 .vop_vptofh = ffs_vptofh, 163 }; 164 165 struct vop_vector ffs_fifoops2 = { 166 .vop_default = &ufs_fifoops, 167 .vop_fsync = ffs_fsync, 168 .vop_fdatasync = ffs_fdatasync, 169 .vop_lock1 = ffs_lock, 170 .vop_reallocblks = ffs_reallocblks, 171 .vop_strategy = ffsext_strategy, 172 .vop_closeextattr = ffs_closeextattr, 173 .vop_deleteextattr = ffs_deleteextattr, 174 .vop_getextattr = ffs_getextattr, 175 .vop_listextattr = ffs_listextattr, 176 .vop_openextattr = ffs_openextattr, 177 .vop_setextattr = ffs_setextattr, 178 .vop_vptofh = ffs_vptofh, 179 }; 180 181 /* 182 * Synch an open file. 183 */ 184 /* ARGSUSED */ 185 static int 186 ffs_fsync(struct vop_fsync_args *ap) 187 { 188 struct vnode *vp; 189 struct bufobj *bo; 190 int error; 191 192 vp = ap->a_vp; 193 bo = &vp->v_bufobj; 194 retry: 195 error = ffs_syncvnode(vp, ap->a_waitfor, 0); 196 if (error) 197 return (error); 198 if (ap->a_waitfor == MNT_WAIT && DOINGSOFTDEP(vp)) { 199 error = softdep_fsync(vp); 200 if (error) 201 return (error); 202 203 /* 204 * The softdep_fsync() function may drop vp lock, 205 * allowing for dirty buffers to reappear on the 206 * bo_dirty list. Recheck and resync as needed. 207 */ 208 BO_LOCK(bo); 209 if ((vp->v_type == VREG || vp->v_type == VDIR) && 210 (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)) { 211 BO_UNLOCK(bo); 212 goto retry; 213 } 214 BO_UNLOCK(bo); 215 } 216 return (0); 217 } 218 219 int 220 ffs_syncvnode(struct vnode *vp, int waitfor, int flags) 221 { 222 struct inode *ip; 223 struct bufobj *bo; 224 struct buf *bp, *nbp; 225 ufs_lbn_t lbn; 226 int error, passes; 227 bool still_dirty, wait; 228 229 ip = VTOI(vp); 230 ip->i_flag &= ~IN_NEEDSYNC; 231 bo = &vp->v_bufobj; 232 233 /* 234 * When doing MNT_WAIT we must first flush all dependencies 235 * on the inode. 236 */ 237 if (DOINGSOFTDEP(vp) && waitfor == MNT_WAIT && 238 (error = softdep_sync_metadata(vp)) != 0) 239 return (error); 240 241 /* 242 * Flush all dirty buffers associated with a vnode. 243 */ 244 error = 0; 245 passes = 0; 246 wait = false; /* Always do an async pass first. */ 247 lbn = lblkno(ITOFS(ip), (ip->i_size + ITOFS(ip)->fs_bsize - 1)); 248 BO_LOCK(bo); 249 loop: 250 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 251 bp->b_vflags &= ~BV_SCANNED; 252 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 253 /* 254 * Reasons to skip this buffer: it has already been considered 255 * on this pass, the buffer has dependencies that will cause 256 * it to be redirtied and it has not already been deferred, 257 * or it is already being written. 258 */ 259 if ((bp->b_vflags & BV_SCANNED) != 0) 260 continue; 261 bp->b_vflags |= BV_SCANNED; 262 /* 263 * Flush indirects in order, if requested. 264 * 265 * Note that if only datasync is requested, we can 266 * skip indirect blocks when softupdates are not 267 * active. Otherwise we must flush them with data, 268 * since dependencies prevent data block writes. 269 */ 270 if (waitfor == MNT_WAIT && bp->b_lblkno <= -NDADDR && 271 (lbn_level(bp->b_lblkno) >= passes || 272 ((flags & DATA_ONLY) != 0 && !DOINGSOFTDEP(vp)))) 273 continue; 274 if (bp->b_lblkno > lbn) 275 panic("ffs_syncvnode: syncing truncated data."); 276 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) { 277 BO_UNLOCK(bo); 278 } else if (wait) { 279 if (BUF_LOCK(bp, 280 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 281 BO_LOCKPTR(bo)) != 0) { 282 bp->b_vflags &= ~BV_SCANNED; 283 goto next; 284 } 285 } else 286 continue; 287 if ((bp->b_flags & B_DELWRI) == 0) 288 panic("ffs_fsync: not dirty"); 289 /* 290 * Check for dependencies and potentially complete them. 291 */ 292 if (!LIST_EMPTY(&bp->b_dep) && 293 (error = softdep_sync_buf(vp, bp, 294 wait ? MNT_WAIT : MNT_NOWAIT)) != 0) { 295 /* I/O error. */ 296 if (error != EBUSY) { 297 BUF_UNLOCK(bp); 298 return (error); 299 } 300 /* If we deferred once, don't defer again. */ 301 if ((bp->b_flags & B_DEFERRED) == 0) { 302 bp->b_flags |= B_DEFERRED; 303 BUF_UNLOCK(bp); 304 goto next; 305 } 306 } 307 if (wait) { 308 bremfree(bp); 309 if ((error = bwrite(bp)) != 0) 310 return (error); 311 } else if ((bp->b_flags & B_CLUSTEROK)) { 312 (void) vfs_bio_awrite(bp); 313 } else { 314 bremfree(bp); 315 (void) bawrite(bp); 316 } 317 next: 318 /* 319 * Since we may have slept during the I/O, we need 320 * to start from a known point. 321 */ 322 BO_LOCK(bo); 323 nbp = TAILQ_FIRST(&bo->bo_dirty.bv_hd); 324 } 325 if (waitfor != MNT_WAIT) { 326 BO_UNLOCK(bo); 327 if ((flags & NO_INO_UPDT) != 0) 328 return (0); 329 else 330 return (ffs_update(vp, 0)); 331 } 332 /* Drain IO to see if we're done. */ 333 bufobj_wwait(bo, 0, 0); 334 /* 335 * Block devices associated with filesystems may have new I/O 336 * requests posted for them even if the vnode is locked, so no 337 * amount of trying will get them clean. We make several passes 338 * as a best effort. 339 * 340 * Regular files may need multiple passes to flush all dependency 341 * work as it is possible that we must write once per indirect 342 * level, once for the leaf, and once for the inode and each of 343 * these will be done with one sync and one async pass. 344 */ 345 if (bo->bo_dirty.bv_cnt > 0) { 346 if ((flags & DATA_ONLY) == 0) { 347 still_dirty = true; 348 } else { 349 /* 350 * For data-only sync, dirty indirect buffers 351 * are ignored. 352 */ 353 still_dirty = false; 354 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { 355 if (bp->b_lblkno > -NDADDR) { 356 still_dirty = true; 357 break; 358 } 359 } 360 } 361 362 if (still_dirty) { 363 /* Write the inode after sync passes to flush deps. */ 364 if (wait && DOINGSOFTDEP(vp) && 365 (flags & NO_INO_UPDT) == 0) { 366 BO_UNLOCK(bo); 367 ffs_update(vp, 1); 368 BO_LOCK(bo); 369 } 370 /* switch between sync/async. */ 371 wait = !wait; 372 if (wait || ++passes < NIADDR + 2) 373 goto loop; 374 #ifdef INVARIANTS 375 if (!vn_isdisk(vp, NULL)) 376 vn_printf(vp, "ffs_fsync: dirty "); 377 #endif 378 } 379 } 380 BO_UNLOCK(bo); 381 error = 0; 382 if ((flags & DATA_ONLY) == 0) { 383 if ((flags & NO_INO_UPDT) == 0) 384 error = ffs_update(vp, 1); 385 if (DOINGSUJ(vp)) 386 softdep_journal_fsync(VTOI(vp)); 387 } 388 return (error); 389 } 390 391 static int 392 ffs_fdatasync(struct vop_fdatasync_args *ap) 393 { 394 395 return (ffs_syncvnode(ap->a_vp, MNT_WAIT, DATA_ONLY)); 396 } 397 398 static int 399 ffs_lock(ap) 400 struct vop_lock1_args /* { 401 struct vnode *a_vp; 402 int a_flags; 403 struct thread *a_td; 404 char *file; 405 int line; 406 } */ *ap; 407 { 408 #ifndef NO_FFS_SNAPSHOT 409 struct vnode *vp; 410 int flags; 411 struct lock *lkp; 412 int result; 413 414 switch (ap->a_flags & LK_TYPE_MASK) { 415 case LK_SHARED: 416 case LK_UPGRADE: 417 case LK_EXCLUSIVE: 418 vp = ap->a_vp; 419 flags = ap->a_flags; 420 for (;;) { 421 #ifdef DEBUG_VFS_LOCKS 422 KASSERT(vp->v_holdcnt != 0, 423 ("ffs_lock %p: zero hold count", vp)); 424 #endif 425 lkp = vp->v_vnlock; 426 result = _lockmgr_args(lkp, flags, VI_MTX(vp), 427 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, 428 ap->a_file, ap->a_line); 429 if (lkp == vp->v_vnlock || result != 0) 430 break; 431 /* 432 * Apparent success, except that the vnode 433 * mutated between snapshot file vnode and 434 * regular file vnode while this process 435 * slept. The lock currently held is not the 436 * right lock. Release it, and try to get the 437 * new lock. 438 */ 439 (void) _lockmgr_args(lkp, LK_RELEASE, NULL, 440 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, 441 ap->a_file, ap->a_line); 442 if ((flags & (LK_INTERLOCK | LK_NOWAIT)) == 443 (LK_INTERLOCK | LK_NOWAIT)) 444 return (EBUSY); 445 if ((flags & LK_TYPE_MASK) == LK_UPGRADE) 446 flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE; 447 flags &= ~LK_INTERLOCK; 448 } 449 break; 450 default: 451 result = VOP_LOCK1_APV(&ufs_vnodeops, ap); 452 } 453 return (result); 454 #else 455 return (VOP_LOCK1_APV(&ufs_vnodeops, ap)); 456 #endif 457 } 458 459 /* 460 * Vnode op for reading. 461 */ 462 static int 463 ffs_read(ap) 464 struct vop_read_args /* { 465 struct vnode *a_vp; 466 struct uio *a_uio; 467 int a_ioflag; 468 struct ucred *a_cred; 469 } */ *ap; 470 { 471 struct vnode *vp; 472 struct inode *ip; 473 struct uio *uio; 474 struct fs *fs; 475 struct buf *bp; 476 ufs_lbn_t lbn, nextlbn; 477 off_t bytesinfile; 478 long size, xfersize, blkoffset; 479 ssize_t orig_resid; 480 int error; 481 int seqcount; 482 int ioflag; 483 484 vp = ap->a_vp; 485 uio = ap->a_uio; 486 ioflag = ap->a_ioflag; 487 if (ap->a_ioflag & IO_EXT) 488 #ifdef notyet 489 return (ffs_extread(vp, uio, ioflag)); 490 #else 491 panic("ffs_read+IO_EXT"); 492 #endif 493 #ifdef DIRECTIO 494 if ((ioflag & IO_DIRECT) != 0) { 495 int workdone; 496 497 error = ffs_rawread(vp, uio, &workdone); 498 if (error != 0 || workdone != 0) 499 return error; 500 } 501 #endif 502 503 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 504 ip = VTOI(vp); 505 506 #ifdef INVARIANTS 507 if (uio->uio_rw != UIO_READ) 508 panic("ffs_read: mode"); 509 510 if (vp->v_type == VLNK) { 511 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen) 512 panic("ffs_read: short symlink"); 513 } else if (vp->v_type != VREG && vp->v_type != VDIR) 514 panic("ffs_read: type %d", vp->v_type); 515 #endif 516 orig_resid = uio->uio_resid; 517 KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0")); 518 if (orig_resid == 0) 519 return (0); 520 KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0")); 521 fs = ITOFS(ip); 522 if (uio->uio_offset < ip->i_size && 523 uio->uio_offset >= fs->fs_maxfilesize) 524 return (EOVERFLOW); 525 526 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { 527 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) 528 break; 529 lbn = lblkno(fs, uio->uio_offset); 530 nextlbn = lbn + 1; 531 532 /* 533 * size of buffer. The buffer representing the 534 * end of the file is rounded up to the size of 535 * the block type ( fragment or full block, 536 * depending ). 537 */ 538 size = blksize(fs, ip, lbn); 539 blkoffset = blkoff(fs, uio->uio_offset); 540 541 /* 542 * The amount we want to transfer in this iteration is 543 * one FS block less the amount of the data before 544 * our startpoint (duh!) 545 */ 546 xfersize = fs->fs_bsize - blkoffset; 547 548 /* 549 * But if we actually want less than the block, 550 * or the file doesn't have a whole block more of data, 551 * then use the lesser number. 552 */ 553 if (uio->uio_resid < xfersize) 554 xfersize = uio->uio_resid; 555 if (bytesinfile < xfersize) 556 xfersize = bytesinfile; 557 558 if (lblktosize(fs, nextlbn) >= ip->i_size) { 559 /* 560 * Don't do readahead if this is the end of the file. 561 */ 562 error = bread_gb(vp, lbn, size, NOCRED, 563 GB_UNMAPPED, &bp); 564 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { 565 /* 566 * Otherwise if we are allowed to cluster, 567 * grab as much as we can. 568 * 569 * XXX This may not be a win if we are not 570 * doing sequential access. 571 */ 572 error = cluster_read(vp, ip->i_size, lbn, 573 size, NOCRED, blkoffset + uio->uio_resid, 574 seqcount, GB_UNMAPPED, &bp); 575 } else if (seqcount > 1) { 576 /* 577 * If we are NOT allowed to cluster, then 578 * if we appear to be acting sequentially, 579 * fire off a request for a readahead 580 * as well as a read. Note that the 4th and 5th 581 * arguments point to arrays of the size specified in 582 * the 6th argument. 583 */ 584 u_int nextsize = blksize(fs, ip, nextlbn); 585 error = breadn_flags(vp, lbn, size, &nextlbn, 586 &nextsize, 1, NOCRED, GB_UNMAPPED, &bp); 587 } else { 588 /* 589 * Failing all of the above, just read what the 590 * user asked for. Interestingly, the same as 591 * the first option above. 592 */ 593 error = bread_gb(vp, lbn, size, NOCRED, 594 GB_UNMAPPED, &bp); 595 } 596 if (error) { 597 brelse(bp); 598 bp = NULL; 599 break; 600 } 601 602 /* 603 * If IO_DIRECT then set B_DIRECT for the buffer. This 604 * will cause us to attempt to release the buffer later on 605 * and will cause the buffer cache to attempt to free the 606 * underlying pages. 607 */ 608 if (ioflag & IO_DIRECT) 609 bp->b_flags |= B_DIRECT; 610 611 /* 612 * We should only get non-zero b_resid when an I/O error 613 * has occurred, which should cause us to break above. 614 * However, if the short read did not cause an error, 615 * then we want to ensure that we do not uiomove bad 616 * or uninitialized data. 617 */ 618 size -= bp->b_resid; 619 if (size < xfersize) { 620 if (size == 0) 621 break; 622 xfersize = size; 623 } 624 625 if (buf_mapped(bp)) { 626 error = vn_io_fault_uiomove((char *)bp->b_data + 627 blkoffset, (int)xfersize, uio); 628 } else { 629 error = vn_io_fault_pgmove(bp->b_pages, blkoffset, 630 (int)xfersize, uio); 631 } 632 if (error) 633 break; 634 635 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 636 (LIST_EMPTY(&bp->b_dep))) { 637 /* 638 * If there are no dependencies, and it's VMIO, 639 * then we don't need the buf, mark it available 640 * for freeing. For non-direct VMIO reads, the VM 641 * has the data. 642 */ 643 bp->b_flags |= B_RELBUF; 644 brelse(bp); 645 } else { 646 /* 647 * Otherwise let whoever 648 * made the request take care of 649 * freeing it. We just queue 650 * it onto another list. 651 */ 652 bqrelse(bp); 653 } 654 } 655 656 /* 657 * This can only happen in the case of an error 658 * because the loop above resets bp to NULL on each iteration 659 * and on normal completion has not set a new value into it. 660 * so it must have come from a 'break' statement 661 */ 662 if (bp != NULL) { 663 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 664 (LIST_EMPTY(&bp->b_dep))) { 665 bp->b_flags |= B_RELBUF; 666 brelse(bp); 667 } else { 668 bqrelse(bp); 669 } 670 } 671 672 if ((error == 0 || uio->uio_resid != orig_resid) && 673 (vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0 && 674 (ip->i_flag & IN_ACCESS) == 0) { 675 VI_LOCK(vp); 676 ip->i_flag |= IN_ACCESS; 677 VI_UNLOCK(vp); 678 } 679 return (error); 680 } 681 682 /* 683 * Vnode op for writing. 684 */ 685 static int 686 ffs_write(ap) 687 struct vop_write_args /* { 688 struct vnode *a_vp; 689 struct uio *a_uio; 690 int a_ioflag; 691 struct ucred *a_cred; 692 } */ *ap; 693 { 694 struct vnode *vp; 695 struct uio *uio; 696 struct inode *ip; 697 struct fs *fs; 698 struct buf *bp; 699 ufs_lbn_t lbn; 700 off_t osize; 701 ssize_t resid; 702 int seqcount; 703 int blkoffset, error, flags, ioflag, size, xfersize; 704 705 vp = ap->a_vp; 706 uio = ap->a_uio; 707 ioflag = ap->a_ioflag; 708 if (ap->a_ioflag & IO_EXT) 709 #ifdef notyet 710 return (ffs_extwrite(vp, uio, ioflag, ap->a_cred)); 711 #else 712 panic("ffs_write+IO_EXT"); 713 #endif 714 715 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 716 ip = VTOI(vp); 717 718 #ifdef INVARIANTS 719 if (uio->uio_rw != UIO_WRITE) 720 panic("ffs_write: mode"); 721 #endif 722 723 switch (vp->v_type) { 724 case VREG: 725 if (ioflag & IO_APPEND) 726 uio->uio_offset = ip->i_size; 727 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) 728 return (EPERM); 729 /* FALLTHROUGH */ 730 case VLNK: 731 break; 732 case VDIR: 733 panic("ffs_write: dir write"); 734 break; 735 default: 736 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type, 737 (int)uio->uio_offset, 738 (int)uio->uio_resid 739 ); 740 } 741 742 KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0")); 743 KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0")); 744 fs = ITOFS(ip); 745 if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) 746 return (EFBIG); 747 /* 748 * Maybe this should be above the vnode op call, but so long as 749 * file servers have no limits, I don't think it matters. 750 */ 751 if (vn_rlimit_fsize(vp, uio, uio->uio_td)) 752 return (EFBIG); 753 754 resid = uio->uio_resid; 755 osize = ip->i_size; 756 if (seqcount > BA_SEQMAX) 757 flags = BA_SEQMAX << BA_SEQSHIFT; 758 else 759 flags = seqcount << BA_SEQSHIFT; 760 if (ioflag & IO_SYNC) 761 flags |= IO_SYNC; 762 flags |= BA_UNMAPPED; 763 764 for (error = 0; uio->uio_resid > 0;) { 765 lbn = lblkno(fs, uio->uio_offset); 766 blkoffset = blkoff(fs, uio->uio_offset); 767 xfersize = fs->fs_bsize - blkoffset; 768 if (uio->uio_resid < xfersize) 769 xfersize = uio->uio_resid; 770 if (uio->uio_offset + xfersize > ip->i_size) 771 vnode_pager_setsize(vp, uio->uio_offset + xfersize); 772 773 /* 774 * We must perform a read-before-write if the transfer size 775 * does not cover the entire buffer. 776 */ 777 if (fs->fs_bsize > xfersize) 778 flags |= BA_CLRBUF; 779 else 780 flags &= ~BA_CLRBUF; 781 /* XXX is uio->uio_offset the right thing here? */ 782 error = UFS_BALLOC(vp, uio->uio_offset, xfersize, 783 ap->a_cred, flags, &bp); 784 if (error != 0) { 785 vnode_pager_setsize(vp, ip->i_size); 786 break; 787 } 788 if (ioflag & IO_DIRECT) 789 bp->b_flags |= B_DIRECT; 790 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL)) 791 bp->b_flags |= B_NOCACHE; 792 793 if (uio->uio_offset + xfersize > ip->i_size) { 794 ip->i_size = uio->uio_offset + xfersize; 795 DIP_SET(ip, i_size, ip->i_size); 796 } 797 798 size = blksize(fs, ip, lbn) - bp->b_resid; 799 if (size < xfersize) 800 xfersize = size; 801 802 if (buf_mapped(bp)) { 803 error = vn_io_fault_uiomove((char *)bp->b_data + 804 blkoffset, (int)xfersize, uio); 805 } else { 806 error = vn_io_fault_pgmove(bp->b_pages, blkoffset, 807 (int)xfersize, uio); 808 } 809 /* 810 * If the buffer is not already filled and we encounter an 811 * error while trying to fill it, we have to clear out any 812 * garbage data from the pages instantiated for the buffer. 813 * If we do not, a failed uiomove() during a write can leave 814 * the prior contents of the pages exposed to a userland mmap. 815 * 816 * Note that we need only clear buffers with a transfer size 817 * equal to the block size because buffers with a shorter 818 * transfer size were cleared above by the call to UFS_BALLOC() 819 * with the BA_CLRBUF flag set. 820 * 821 * If the source region for uiomove identically mmaps the 822 * buffer, uiomove() performed the NOP copy, and the buffer 823 * content remains valid because the page fault handler 824 * validated the pages. 825 */ 826 if (error != 0 && (bp->b_flags & B_CACHE) == 0 && 827 fs->fs_bsize == xfersize) 828 vfs_bio_clrbuf(bp); 829 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 830 (LIST_EMPTY(&bp->b_dep))) { 831 bp->b_flags |= B_RELBUF; 832 } 833 834 /* 835 * If IO_SYNC each buffer is written synchronously. Otherwise 836 * if we have a severe page deficiency write the buffer 837 * asynchronously. Otherwise try to cluster, and if that 838 * doesn't do it then either do an async write (if O_DIRECT), 839 * or a delayed write (if not). 840 */ 841 if (ioflag & IO_SYNC) { 842 (void)bwrite(bp); 843 } else if (vm_page_count_severe() || 844 buf_dirty_count_severe() || 845 (ioflag & IO_ASYNC)) { 846 bp->b_flags |= B_CLUSTEROK; 847 bawrite(bp); 848 } else if (xfersize + blkoffset == fs->fs_bsize) { 849 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) { 850 bp->b_flags |= B_CLUSTEROK; 851 cluster_write(vp, bp, ip->i_size, seqcount, 852 GB_UNMAPPED); 853 } else { 854 bawrite(bp); 855 } 856 } else if (ioflag & IO_DIRECT) { 857 bp->b_flags |= B_CLUSTEROK; 858 bawrite(bp); 859 } else { 860 bp->b_flags |= B_CLUSTEROK; 861 bdwrite(bp); 862 } 863 if (error || xfersize == 0) 864 break; 865 ip->i_flag |= IN_CHANGE | IN_UPDATE; 866 } 867 /* 868 * If we successfully wrote any data, and we are not the superuser 869 * we clear the setuid and setgid bits as a precaution against 870 * tampering. 871 */ 872 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && 873 ap->a_cred) { 874 if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0)) { 875 ip->i_mode &= ~(ISUID | ISGID); 876 DIP_SET(ip, i_mode, ip->i_mode); 877 } 878 } 879 if (error) { 880 if (ioflag & IO_UNIT) { 881 (void)ffs_truncate(vp, osize, 882 IO_NORMAL | (ioflag & IO_SYNC), ap->a_cred); 883 uio->uio_offset -= resid - uio->uio_resid; 884 uio->uio_resid = resid; 885 } 886 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) 887 error = ffs_update(vp, 1); 888 return (error); 889 } 890 891 /* 892 * Extended attribute area reading. 893 */ 894 static int 895 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag) 896 { 897 struct inode *ip; 898 struct ufs2_dinode *dp; 899 struct fs *fs; 900 struct buf *bp; 901 ufs_lbn_t lbn, nextlbn; 902 off_t bytesinfile; 903 long size, xfersize, blkoffset; 904 ssize_t orig_resid; 905 int error; 906 907 ip = VTOI(vp); 908 fs = ITOFS(ip); 909 dp = ip->i_din2; 910 911 #ifdef INVARIANTS 912 if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC) 913 panic("ffs_extread: mode"); 914 915 #endif 916 orig_resid = uio->uio_resid; 917 KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0")); 918 if (orig_resid == 0) 919 return (0); 920 KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0")); 921 922 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { 923 if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0) 924 break; 925 lbn = lblkno(fs, uio->uio_offset); 926 nextlbn = lbn + 1; 927 928 /* 929 * size of buffer. The buffer representing the 930 * end of the file is rounded up to the size of 931 * the block type ( fragment or full block, 932 * depending ). 933 */ 934 size = sblksize(fs, dp->di_extsize, lbn); 935 blkoffset = blkoff(fs, uio->uio_offset); 936 937 /* 938 * The amount we want to transfer in this iteration is 939 * one FS block less the amount of the data before 940 * our startpoint (duh!) 941 */ 942 xfersize = fs->fs_bsize - blkoffset; 943 944 /* 945 * But if we actually want less than the block, 946 * or the file doesn't have a whole block more of data, 947 * then use the lesser number. 948 */ 949 if (uio->uio_resid < xfersize) 950 xfersize = uio->uio_resid; 951 if (bytesinfile < xfersize) 952 xfersize = bytesinfile; 953 954 if (lblktosize(fs, nextlbn) >= dp->di_extsize) { 955 /* 956 * Don't do readahead if this is the end of the info. 957 */ 958 error = bread(vp, -1 - lbn, size, NOCRED, &bp); 959 } else { 960 /* 961 * If we have a second block, then 962 * fire off a request for a readahead 963 * as well as a read. Note that the 4th and 5th 964 * arguments point to arrays of the size specified in 965 * the 6th argument. 966 */ 967 u_int nextsize = sblksize(fs, dp->di_extsize, nextlbn); 968 969 nextlbn = -1 - nextlbn; 970 error = breadn(vp, -1 - lbn, 971 size, &nextlbn, &nextsize, 1, NOCRED, &bp); 972 } 973 if (error) { 974 brelse(bp); 975 bp = NULL; 976 break; 977 } 978 979 /* 980 * If IO_DIRECT then set B_DIRECT for the buffer. This 981 * will cause us to attempt to release the buffer later on 982 * and will cause the buffer cache to attempt to free the 983 * underlying pages. 984 */ 985 if (ioflag & IO_DIRECT) 986 bp->b_flags |= B_DIRECT; 987 988 /* 989 * We should only get non-zero b_resid when an I/O error 990 * has occurred, which should cause us to break above. 991 * However, if the short read did not cause an error, 992 * then we want to ensure that we do not uiomove bad 993 * or uninitialized data. 994 */ 995 size -= bp->b_resid; 996 if (size < xfersize) { 997 if (size == 0) 998 break; 999 xfersize = size; 1000 } 1001 1002 error = uiomove((char *)bp->b_data + blkoffset, 1003 (int)xfersize, uio); 1004 if (error) 1005 break; 1006 1007 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 1008 (LIST_EMPTY(&bp->b_dep))) { 1009 /* 1010 * If there are no dependencies, and it's VMIO, 1011 * then we don't need the buf, mark it available 1012 * for freeing. For non-direct VMIO reads, the VM 1013 * has the data. 1014 */ 1015 bp->b_flags |= B_RELBUF; 1016 brelse(bp); 1017 } else { 1018 /* 1019 * Otherwise let whoever 1020 * made the request take care of 1021 * freeing it. We just queue 1022 * it onto another list. 1023 */ 1024 bqrelse(bp); 1025 } 1026 } 1027 1028 /* 1029 * This can only happen in the case of an error 1030 * because the loop above resets bp to NULL on each iteration 1031 * and on normal completion has not set a new value into it. 1032 * so it must have come from a 'break' statement 1033 */ 1034 if (bp != NULL) { 1035 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 1036 (LIST_EMPTY(&bp->b_dep))) { 1037 bp->b_flags |= B_RELBUF; 1038 brelse(bp); 1039 } else { 1040 bqrelse(bp); 1041 } 1042 } 1043 return (error); 1044 } 1045 1046 /* 1047 * Extended attribute area writing. 1048 */ 1049 static int 1050 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred) 1051 { 1052 struct inode *ip; 1053 struct ufs2_dinode *dp; 1054 struct fs *fs; 1055 struct buf *bp; 1056 ufs_lbn_t lbn; 1057 off_t osize; 1058 ssize_t resid; 1059 int blkoffset, error, flags, size, xfersize; 1060 1061 ip = VTOI(vp); 1062 fs = ITOFS(ip); 1063 dp = ip->i_din2; 1064 1065 #ifdef INVARIANTS 1066 if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC) 1067 panic("ffs_extwrite: mode"); 1068 #endif 1069 1070 if (ioflag & IO_APPEND) 1071 uio->uio_offset = dp->di_extsize; 1072 KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0")); 1073 KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0")); 1074 if ((uoff_t)uio->uio_offset + uio->uio_resid > NXADDR * fs->fs_bsize) 1075 return (EFBIG); 1076 1077 resid = uio->uio_resid; 1078 osize = dp->di_extsize; 1079 flags = IO_EXT; 1080 if (ioflag & IO_SYNC) 1081 flags |= IO_SYNC; 1082 1083 for (error = 0; uio->uio_resid > 0;) { 1084 lbn = lblkno(fs, uio->uio_offset); 1085 blkoffset = blkoff(fs, uio->uio_offset); 1086 xfersize = fs->fs_bsize - blkoffset; 1087 if (uio->uio_resid < xfersize) 1088 xfersize = uio->uio_resid; 1089 1090 /* 1091 * We must perform a read-before-write if the transfer size 1092 * does not cover the entire buffer. 1093 */ 1094 if (fs->fs_bsize > xfersize) 1095 flags |= BA_CLRBUF; 1096 else 1097 flags &= ~BA_CLRBUF; 1098 error = UFS_BALLOC(vp, uio->uio_offset, xfersize, 1099 ucred, flags, &bp); 1100 if (error != 0) 1101 break; 1102 /* 1103 * If the buffer is not valid we have to clear out any 1104 * garbage data from the pages instantiated for the buffer. 1105 * If we do not, a failed uiomove() during a write can leave 1106 * the prior contents of the pages exposed to a userland 1107 * mmap(). XXX deal with uiomove() errors a better way. 1108 */ 1109 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize) 1110 vfs_bio_clrbuf(bp); 1111 if (ioflag & IO_DIRECT) 1112 bp->b_flags |= B_DIRECT; 1113 1114 if (uio->uio_offset + xfersize > dp->di_extsize) 1115 dp->di_extsize = uio->uio_offset + xfersize; 1116 1117 size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid; 1118 if (size < xfersize) 1119 xfersize = size; 1120 1121 error = 1122 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); 1123 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 1124 (LIST_EMPTY(&bp->b_dep))) { 1125 bp->b_flags |= B_RELBUF; 1126 } 1127 1128 /* 1129 * If IO_SYNC each buffer is written synchronously. Otherwise 1130 * if we have a severe page deficiency write the buffer 1131 * asynchronously. Otherwise try to cluster, and if that 1132 * doesn't do it then either do an async write (if O_DIRECT), 1133 * or a delayed write (if not). 1134 */ 1135 if (ioflag & IO_SYNC) { 1136 (void)bwrite(bp); 1137 } else if (vm_page_count_severe() || 1138 buf_dirty_count_severe() || 1139 xfersize + blkoffset == fs->fs_bsize || 1140 (ioflag & (IO_ASYNC | IO_DIRECT))) 1141 bawrite(bp); 1142 else 1143 bdwrite(bp); 1144 if (error || xfersize == 0) 1145 break; 1146 ip->i_flag |= IN_CHANGE; 1147 } 1148 /* 1149 * If we successfully wrote any data, and we are not the superuser 1150 * we clear the setuid and setgid bits as a precaution against 1151 * tampering. 1152 */ 1153 if ((ip->i_mode & (ISUID | ISGID)) && resid > uio->uio_resid && ucred) { 1154 if (priv_check_cred(ucred, PRIV_VFS_RETAINSUGID, 0)) { 1155 ip->i_mode &= ~(ISUID | ISGID); 1156 dp->di_mode = ip->i_mode; 1157 } 1158 } 1159 if (error) { 1160 if (ioflag & IO_UNIT) { 1161 (void)ffs_truncate(vp, osize, 1162 IO_EXT | (ioflag&IO_SYNC), ucred); 1163 uio->uio_offset -= resid - uio->uio_resid; 1164 uio->uio_resid = resid; 1165 } 1166 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) 1167 error = ffs_update(vp, 1); 1168 return (error); 1169 } 1170 1171 1172 /* 1173 * Vnode operating to retrieve a named extended attribute. 1174 * 1175 * Locate a particular EA (nspace:name) in the area (ptr:length), and return 1176 * the length of the EA, and possibly the pointer to the entry and to the data. 1177 */ 1178 static int 1179 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name, u_char **eap, u_char **eac) 1180 { 1181 u_char *p, *pe, *pn, *p0; 1182 int eapad1, eapad2, ealength, ealen, nlen; 1183 uint32_t ul; 1184 1185 pe = ptr + length; 1186 nlen = strlen(name); 1187 1188 for (p = ptr; p < pe; p = pn) { 1189 p0 = p; 1190 bcopy(p, &ul, sizeof(ul)); 1191 pn = p + ul; 1192 /* make sure this entry is complete */ 1193 if (pn > pe) 1194 break; 1195 p += sizeof(uint32_t); 1196 if (*p != nspace) 1197 continue; 1198 p++; 1199 eapad2 = *p++; 1200 if (*p != nlen) 1201 continue; 1202 p++; 1203 if (bcmp(p, name, nlen)) 1204 continue; 1205 ealength = sizeof(uint32_t) + 3 + nlen; 1206 eapad1 = 8 - (ealength % 8); 1207 if (eapad1 == 8) 1208 eapad1 = 0; 1209 ealength += eapad1; 1210 ealen = ul - ealength - eapad2; 1211 p += nlen + eapad1; 1212 if (eap != NULL) 1213 *eap = p0; 1214 if (eac != NULL) 1215 *eac = p; 1216 return (ealen); 1217 } 1218 return(-1); 1219 } 1220 1221 static int 1222 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra) 1223 { 1224 struct inode *ip; 1225 struct ufs2_dinode *dp; 1226 struct fs *fs; 1227 struct uio luio; 1228 struct iovec liovec; 1229 u_int easize; 1230 int error; 1231 u_char *eae; 1232 1233 ip = VTOI(vp); 1234 fs = ITOFS(ip); 1235 dp = ip->i_din2; 1236 easize = dp->di_extsize; 1237 if ((uoff_t)easize + extra > NXADDR * fs->fs_bsize) 1238 return (EFBIG); 1239 1240 eae = malloc(easize + extra, M_TEMP, M_WAITOK); 1241 1242 liovec.iov_base = eae; 1243 liovec.iov_len = easize; 1244 luio.uio_iov = &liovec; 1245 luio.uio_iovcnt = 1; 1246 luio.uio_offset = 0; 1247 luio.uio_resid = easize; 1248 luio.uio_segflg = UIO_SYSSPACE; 1249 luio.uio_rw = UIO_READ; 1250 luio.uio_td = td; 1251 1252 error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC); 1253 if (error) { 1254 free(eae, M_TEMP); 1255 return(error); 1256 } 1257 *p = eae; 1258 return (0); 1259 } 1260 1261 static void 1262 ffs_lock_ea(struct vnode *vp) 1263 { 1264 struct inode *ip; 1265 1266 ip = VTOI(vp); 1267 VI_LOCK(vp); 1268 while (ip->i_flag & IN_EA_LOCKED) { 1269 ip->i_flag |= IN_EA_LOCKWAIT; 1270 msleep(&ip->i_ea_refs, &vp->v_interlock, PINOD + 2, "ufs_ea", 1271 0); 1272 } 1273 ip->i_flag |= IN_EA_LOCKED; 1274 VI_UNLOCK(vp); 1275 } 1276 1277 static void 1278 ffs_unlock_ea(struct vnode *vp) 1279 { 1280 struct inode *ip; 1281 1282 ip = VTOI(vp); 1283 VI_LOCK(vp); 1284 if (ip->i_flag & IN_EA_LOCKWAIT) 1285 wakeup(&ip->i_ea_refs); 1286 ip->i_flag &= ~(IN_EA_LOCKED | IN_EA_LOCKWAIT); 1287 VI_UNLOCK(vp); 1288 } 1289 1290 static int 1291 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td) 1292 { 1293 struct inode *ip; 1294 struct ufs2_dinode *dp; 1295 int error; 1296 1297 ip = VTOI(vp); 1298 1299 ffs_lock_ea(vp); 1300 if (ip->i_ea_area != NULL) { 1301 ip->i_ea_refs++; 1302 ffs_unlock_ea(vp); 1303 return (0); 1304 } 1305 dp = ip->i_din2; 1306 error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0); 1307 if (error) { 1308 ffs_unlock_ea(vp); 1309 return (error); 1310 } 1311 ip->i_ea_len = dp->di_extsize; 1312 ip->i_ea_error = 0; 1313 ip->i_ea_refs++; 1314 ffs_unlock_ea(vp); 1315 return (0); 1316 } 1317 1318 /* 1319 * Vnode extattr transaction commit/abort 1320 */ 1321 static int 1322 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td) 1323 { 1324 struct inode *ip; 1325 struct uio luio; 1326 struct iovec liovec; 1327 int error; 1328 struct ufs2_dinode *dp; 1329 1330 ip = VTOI(vp); 1331 1332 ffs_lock_ea(vp); 1333 if (ip->i_ea_area == NULL) { 1334 ffs_unlock_ea(vp); 1335 return (EINVAL); 1336 } 1337 dp = ip->i_din2; 1338 error = ip->i_ea_error; 1339 if (commit && error == 0) { 1340 ASSERT_VOP_ELOCKED(vp, "ffs_close_ea commit"); 1341 if (cred == NOCRED) 1342 cred = vp->v_mount->mnt_cred; 1343 liovec.iov_base = ip->i_ea_area; 1344 liovec.iov_len = ip->i_ea_len; 1345 luio.uio_iov = &liovec; 1346 luio.uio_iovcnt = 1; 1347 luio.uio_offset = 0; 1348 luio.uio_resid = ip->i_ea_len; 1349 luio.uio_segflg = UIO_SYSSPACE; 1350 luio.uio_rw = UIO_WRITE; 1351 luio.uio_td = td; 1352 /* XXX: I'm not happy about truncating to zero size */ 1353 if (ip->i_ea_len < dp->di_extsize) 1354 error = ffs_truncate(vp, 0, IO_EXT, cred); 1355 error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred); 1356 } 1357 if (--ip->i_ea_refs == 0) { 1358 free(ip->i_ea_area, M_TEMP); 1359 ip->i_ea_area = NULL; 1360 ip->i_ea_len = 0; 1361 ip->i_ea_error = 0; 1362 } 1363 ffs_unlock_ea(vp); 1364 return (error); 1365 } 1366 1367 /* 1368 * Vnode extattr strategy routine for fifos. 1369 * 1370 * We need to check for a read or write of the external attributes. 1371 * Otherwise we just fall through and do the usual thing. 1372 */ 1373 static int 1374 ffsext_strategy(struct vop_strategy_args *ap) 1375 /* 1376 struct vop_strategy_args { 1377 struct vnodeop_desc *a_desc; 1378 struct vnode *a_vp; 1379 struct buf *a_bp; 1380 }; 1381 */ 1382 { 1383 struct vnode *vp; 1384 daddr_t lbn; 1385 1386 vp = ap->a_vp; 1387 lbn = ap->a_bp->b_lblkno; 1388 if (I_IS_UFS2(VTOI(vp)) && lbn < 0 && lbn >= -NXADDR) 1389 return (VOP_STRATEGY_APV(&ufs_vnodeops, ap)); 1390 if (vp->v_type == VFIFO) 1391 return (VOP_STRATEGY_APV(&ufs_fifoops, ap)); 1392 panic("spec nodes went here"); 1393 } 1394 1395 /* 1396 * Vnode extattr transaction commit/abort 1397 */ 1398 static int 1399 ffs_openextattr(struct vop_openextattr_args *ap) 1400 /* 1401 struct vop_openextattr_args { 1402 struct vnodeop_desc *a_desc; 1403 struct vnode *a_vp; 1404 IN struct ucred *a_cred; 1405 IN struct thread *a_td; 1406 }; 1407 */ 1408 { 1409 1410 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1411 return (EOPNOTSUPP); 1412 1413 return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td)); 1414 } 1415 1416 1417 /* 1418 * Vnode extattr transaction commit/abort 1419 */ 1420 static int 1421 ffs_closeextattr(struct vop_closeextattr_args *ap) 1422 /* 1423 struct vop_closeextattr_args { 1424 struct vnodeop_desc *a_desc; 1425 struct vnode *a_vp; 1426 int a_commit; 1427 IN struct ucred *a_cred; 1428 IN struct thread *a_td; 1429 }; 1430 */ 1431 { 1432 1433 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1434 return (EOPNOTSUPP); 1435 1436 if (ap->a_commit && (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY)) 1437 return (EROFS); 1438 1439 return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td)); 1440 } 1441 1442 /* 1443 * Vnode operation to remove a named attribute. 1444 */ 1445 static int 1446 ffs_deleteextattr(struct vop_deleteextattr_args *ap) 1447 /* 1448 vop_deleteextattr { 1449 IN struct vnode *a_vp; 1450 IN int a_attrnamespace; 1451 IN const char *a_name; 1452 IN struct ucred *a_cred; 1453 IN struct thread *a_td; 1454 }; 1455 */ 1456 { 1457 struct inode *ip; 1458 struct fs *fs; 1459 uint32_t ealength, ul; 1460 int ealen, olen, eapad1, eapad2, error, i, easize; 1461 u_char *eae, *p; 1462 1463 ip = VTOI(ap->a_vp); 1464 fs = ITOFS(ip); 1465 1466 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1467 return (EOPNOTSUPP); 1468 1469 if (strlen(ap->a_name) == 0) 1470 return (EINVAL); 1471 1472 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) 1473 return (EROFS); 1474 1475 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1476 ap->a_cred, ap->a_td, VWRITE); 1477 if (error) { 1478 1479 /* 1480 * ffs_lock_ea is not needed there, because the vnode 1481 * must be exclusively locked. 1482 */ 1483 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1484 ip->i_ea_error = error; 1485 return (error); 1486 } 1487 1488 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1489 if (error) 1490 return (error); 1491 1492 ealength = eapad1 = ealen = eapad2 = 0; 1493 1494 eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK); 1495 bcopy(ip->i_ea_area, eae, ip->i_ea_len); 1496 easize = ip->i_ea_len; 1497 1498 olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name, 1499 &p, NULL); 1500 if (olen == -1) { 1501 /* delete but nonexistent */ 1502 free(eae, M_TEMP); 1503 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1504 return(ENOATTR); 1505 } 1506 bcopy(p, &ul, sizeof ul); 1507 i = p - eae + ul; 1508 if (ul != ealength) { 1509 bcopy(p + ul, p + ealength, easize - i); 1510 easize += (ealength - ul); 1511 } 1512 if (easize > NXADDR * fs->fs_bsize) { 1513 free(eae, M_TEMP); 1514 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1515 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1516 ip->i_ea_error = ENOSPC; 1517 return(ENOSPC); 1518 } 1519 p = ip->i_ea_area; 1520 ip->i_ea_area = eae; 1521 ip->i_ea_len = easize; 1522 free(p, M_TEMP); 1523 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td); 1524 return(error); 1525 } 1526 1527 /* 1528 * Vnode operation to retrieve a named extended attribute. 1529 */ 1530 static int 1531 ffs_getextattr(struct vop_getextattr_args *ap) 1532 /* 1533 vop_getextattr { 1534 IN struct vnode *a_vp; 1535 IN int a_attrnamespace; 1536 IN const char *a_name; 1537 INOUT struct uio *a_uio; 1538 OUT size_t *a_size; 1539 IN struct ucred *a_cred; 1540 IN struct thread *a_td; 1541 }; 1542 */ 1543 { 1544 struct inode *ip; 1545 u_char *eae, *p; 1546 unsigned easize; 1547 int error, ealen; 1548 1549 ip = VTOI(ap->a_vp); 1550 1551 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1552 return (EOPNOTSUPP); 1553 1554 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1555 ap->a_cred, ap->a_td, VREAD); 1556 if (error) 1557 return (error); 1558 1559 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1560 if (error) 1561 return (error); 1562 1563 eae = ip->i_ea_area; 1564 easize = ip->i_ea_len; 1565 1566 ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name, 1567 NULL, &p); 1568 if (ealen >= 0) { 1569 error = 0; 1570 if (ap->a_size != NULL) 1571 *ap->a_size = ealen; 1572 else if (ap->a_uio != NULL) 1573 error = uiomove(p, ealen, ap->a_uio); 1574 } else 1575 error = ENOATTR; 1576 1577 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1578 return(error); 1579 } 1580 1581 /* 1582 * Vnode operation to retrieve extended attributes on a vnode. 1583 */ 1584 static int 1585 ffs_listextattr(struct vop_listextattr_args *ap) 1586 /* 1587 vop_listextattr { 1588 IN struct vnode *a_vp; 1589 IN int a_attrnamespace; 1590 INOUT struct uio *a_uio; 1591 OUT size_t *a_size; 1592 IN struct ucred *a_cred; 1593 IN struct thread *a_td; 1594 }; 1595 */ 1596 { 1597 struct inode *ip; 1598 u_char *eae, *p, *pe, *pn; 1599 unsigned easize; 1600 uint32_t ul; 1601 int error, ealen; 1602 1603 ip = VTOI(ap->a_vp); 1604 1605 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1606 return (EOPNOTSUPP); 1607 1608 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1609 ap->a_cred, ap->a_td, VREAD); 1610 if (error) 1611 return (error); 1612 1613 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1614 if (error) 1615 return (error); 1616 eae = ip->i_ea_area; 1617 easize = ip->i_ea_len; 1618 1619 error = 0; 1620 if (ap->a_size != NULL) 1621 *ap->a_size = 0; 1622 pe = eae + easize; 1623 for(p = eae; error == 0 && p < pe; p = pn) { 1624 bcopy(p, &ul, sizeof(ul)); 1625 pn = p + ul; 1626 if (pn > pe) 1627 break; 1628 p += sizeof(ul); 1629 if (*p++ != ap->a_attrnamespace) 1630 continue; 1631 p++; /* pad2 */ 1632 ealen = *p; 1633 if (ap->a_size != NULL) { 1634 *ap->a_size += ealen + 1; 1635 } else if (ap->a_uio != NULL) { 1636 error = uiomove(p, ealen + 1, ap->a_uio); 1637 } 1638 } 1639 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1640 return(error); 1641 } 1642 1643 /* 1644 * Vnode operation to set a named attribute. 1645 */ 1646 static int 1647 ffs_setextattr(struct vop_setextattr_args *ap) 1648 /* 1649 vop_setextattr { 1650 IN struct vnode *a_vp; 1651 IN int a_attrnamespace; 1652 IN const char *a_name; 1653 INOUT struct uio *a_uio; 1654 IN struct ucred *a_cred; 1655 IN struct thread *a_td; 1656 }; 1657 */ 1658 { 1659 struct inode *ip; 1660 struct fs *fs; 1661 uint32_t ealength, ul; 1662 ssize_t ealen; 1663 int olen, eapad1, eapad2, error, i, easize; 1664 u_char *eae, *p; 1665 1666 ip = VTOI(ap->a_vp); 1667 fs = ITOFS(ip); 1668 1669 if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK) 1670 return (EOPNOTSUPP); 1671 1672 if (strlen(ap->a_name) == 0) 1673 return (EINVAL); 1674 1675 /* XXX Now unsupported API to delete EAs using NULL uio. */ 1676 if (ap->a_uio == NULL) 1677 return (EOPNOTSUPP); 1678 1679 if (ap->a_vp->v_mount->mnt_flag & MNT_RDONLY) 1680 return (EROFS); 1681 1682 ealen = ap->a_uio->uio_resid; 1683 if (ealen < 0 || ealen > lblktosize(fs, NXADDR)) 1684 return (EINVAL); 1685 1686 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1687 ap->a_cred, ap->a_td, VWRITE); 1688 if (error) { 1689 1690 /* 1691 * ffs_lock_ea is not needed there, because the vnode 1692 * must be exclusively locked. 1693 */ 1694 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1695 ip->i_ea_error = error; 1696 return (error); 1697 } 1698 1699 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1700 if (error) 1701 return (error); 1702 1703 ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name); 1704 eapad1 = 8 - (ealength % 8); 1705 if (eapad1 == 8) 1706 eapad1 = 0; 1707 eapad2 = 8 - (ealen % 8); 1708 if (eapad2 == 8) 1709 eapad2 = 0; 1710 ealength += eapad1 + ealen + eapad2; 1711 1712 eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK); 1713 bcopy(ip->i_ea_area, eae, ip->i_ea_len); 1714 easize = ip->i_ea_len; 1715 1716 olen = ffs_findextattr(eae, easize, 1717 ap->a_attrnamespace, ap->a_name, &p, NULL); 1718 if (olen == -1) { 1719 /* new, append at end */ 1720 p = eae + easize; 1721 easize += ealength; 1722 } else { 1723 bcopy(p, &ul, sizeof ul); 1724 i = p - eae + ul; 1725 if (ul != ealength) { 1726 bcopy(p + ul, p + ealength, easize - i); 1727 easize += (ealength - ul); 1728 } 1729 } 1730 if (easize > lblktosize(fs, NXADDR)) { 1731 free(eae, M_TEMP); 1732 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1733 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1734 ip->i_ea_error = ENOSPC; 1735 return(ENOSPC); 1736 } 1737 bcopy(&ealength, p, sizeof(ealength)); 1738 p += sizeof(ealength); 1739 *p++ = ap->a_attrnamespace; 1740 *p++ = eapad2; 1741 *p++ = strlen(ap->a_name); 1742 strcpy(p, ap->a_name); 1743 p += strlen(ap->a_name); 1744 bzero(p, eapad1); 1745 p += eapad1; 1746 error = uiomove(p, ealen, ap->a_uio); 1747 if (error) { 1748 free(eae, M_TEMP); 1749 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1750 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1751 ip->i_ea_error = error; 1752 return(error); 1753 } 1754 p += ealen; 1755 bzero(p, eapad2); 1756 1757 p = ip->i_ea_area; 1758 ip->i_ea_area = eae; 1759 ip->i_ea_len = easize; 1760 free(p, M_TEMP); 1761 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td); 1762 return(error); 1763 } 1764 1765 /* 1766 * Vnode pointer to File handle 1767 */ 1768 static int 1769 ffs_vptofh(struct vop_vptofh_args *ap) 1770 /* 1771 vop_vptofh { 1772 IN struct vnode *a_vp; 1773 IN struct fid *a_fhp; 1774 }; 1775 */ 1776 { 1777 struct inode *ip; 1778 struct ufid *ufhp; 1779 1780 ip = VTOI(ap->a_vp); 1781 ufhp = (struct ufid *)ap->a_fhp; 1782 ufhp->ufid_len = sizeof(struct ufid); 1783 ufhp->ufid_ino = ip->i_number; 1784 ufhp->ufid_gen = ip->i_gen; 1785 return (0); 1786 } 1787