1 /*- 2 * Copyright (c) 2002, 2003 Networks Associates Technology, Inc. 3 * All rights reserved. 4 * 5 * This software was developed for the FreeBSD Project by Marshall 6 * Kirk McKusick and Network Associates Laboratories, the Security 7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR 8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 9 * research program 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * Copyright (c) 1982, 1986, 1989, 1993 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 4. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * from: @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95 60 * from: $FreeBSD: .../ufs/ufs_readwrite.c,v 1.96 2002/08/12 09:22:11 phk ... 61 * @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95 62 */ 63 64 #include <sys/cdefs.h> 65 __FBSDID("$FreeBSD$"); 66 67 #include <sys/param.h> 68 #include <sys/bio.h> 69 #include <sys/systm.h> 70 #include <sys/buf.h> 71 #include <sys/conf.h> 72 #include <sys/extattr.h> 73 #include <sys/kernel.h> 74 #include <sys/limits.h> 75 #include <sys/malloc.h> 76 #include <sys/mount.h> 77 #include <sys/priv.h> 78 #include <sys/proc.h> 79 #include <sys/resourcevar.h> 80 #include <sys/signalvar.h> 81 #include <sys/stat.h> 82 #include <sys/vmmeter.h> 83 #include <sys/vnode.h> 84 85 #include <vm/vm.h> 86 #include <vm/vm_extern.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_page.h> 89 #include <vm/vm_pager.h> 90 #include <vm/vnode_pager.h> 91 92 #include <ufs/ufs/extattr.h> 93 #include <ufs/ufs/quota.h> 94 #include <ufs/ufs/inode.h> 95 #include <ufs/ufs/ufs_extern.h> 96 #include <ufs/ufs/ufsmount.h> 97 98 #include <ufs/ffs/fs.h> 99 #include <ufs/ffs/ffs_extern.h> 100 #include "opt_directio.h" 101 #include "opt_ffs.h" 102 103 #ifdef DIRECTIO 104 extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone); 105 #endif 106 static vop_fsync_t ffs_fsync; 107 static _vop_lock_t ffs_lock; 108 static vop_getpages_t ffs_getpages; 109 static vop_read_t ffs_read; 110 static vop_write_t ffs_write; 111 static int ffs_extread(struct vnode *vp, struct uio *uio, int ioflag); 112 static int ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, 113 struct ucred *cred); 114 static vop_strategy_t ffsext_strategy; 115 static vop_closeextattr_t ffs_closeextattr; 116 static vop_deleteextattr_t ffs_deleteextattr; 117 static vop_getextattr_t ffs_getextattr; 118 static vop_listextattr_t ffs_listextattr; 119 static vop_openextattr_t ffs_openextattr; 120 static vop_setextattr_t ffs_setextattr; 121 122 123 /* Global vfs data structures for ufs. */ 124 struct vop_vector ffs_vnodeops1 = { 125 .vop_default = &ufs_vnodeops, 126 .vop_fsync = ffs_fsync, 127 .vop_getpages = ffs_getpages, 128 ._vop_lock = ffs_lock, 129 .vop_read = ffs_read, 130 .vop_reallocblks = ffs_reallocblks, 131 .vop_write = ffs_write, 132 }; 133 134 struct vop_vector ffs_fifoops1 = { 135 .vop_default = &ufs_fifoops, 136 .vop_fsync = ffs_fsync, 137 .vop_reallocblks = ffs_reallocblks, /* XXX: really ??? */ 138 }; 139 140 /* Global vfs data structures for ufs. */ 141 struct vop_vector ffs_vnodeops2 = { 142 .vop_default = &ufs_vnodeops, 143 .vop_fsync = ffs_fsync, 144 .vop_getpages = ffs_getpages, 145 ._vop_lock = ffs_lock, 146 .vop_read = ffs_read, 147 .vop_reallocblks = ffs_reallocblks, 148 .vop_write = ffs_write, 149 .vop_closeextattr = ffs_closeextattr, 150 .vop_deleteextattr = ffs_deleteextattr, 151 .vop_getextattr = ffs_getextattr, 152 .vop_listextattr = ffs_listextattr, 153 .vop_openextattr = ffs_openextattr, 154 .vop_setextattr = ffs_setextattr, 155 }; 156 157 struct vop_vector ffs_fifoops2 = { 158 .vop_default = &ufs_fifoops, 159 .vop_fsync = ffs_fsync, 160 ._vop_lock = ffs_lock, 161 .vop_reallocblks = ffs_reallocblks, 162 .vop_strategy = ffsext_strategy, 163 .vop_closeextattr = ffs_closeextattr, 164 .vop_deleteextattr = ffs_deleteextattr, 165 .vop_getextattr = ffs_getextattr, 166 .vop_listextattr = ffs_listextattr, 167 .vop_openextattr = ffs_openextattr, 168 .vop_setextattr = ffs_setextattr, 169 }; 170 171 /* 172 * Synch an open file. 173 */ 174 /* ARGSUSED */ 175 static int 176 ffs_fsync(struct vop_fsync_args *ap) 177 { 178 int error; 179 180 error = ffs_syncvnode(ap->a_vp, ap->a_waitfor); 181 if (error) 182 return (error); 183 if (ap->a_waitfor == MNT_WAIT && 184 (ap->a_vp->v_mount->mnt_flag & MNT_SOFTDEP)) 185 error = softdep_fsync(ap->a_vp); 186 return (error); 187 } 188 189 int 190 ffs_syncvnode(struct vnode *vp, int waitfor) 191 { 192 struct inode *ip = VTOI(vp); 193 struct buf *bp; 194 struct buf *nbp; 195 int s, error, wait, passes, skipmeta; 196 ufs_lbn_t lbn; 197 198 wait = (waitfor == MNT_WAIT); 199 lbn = lblkno(ip->i_fs, (ip->i_size + ip->i_fs->fs_bsize - 1)); 200 201 /* 202 * Flush all dirty buffers associated with a vnode. 203 */ 204 passes = NIADDR + 1; 205 skipmeta = 0; 206 if (wait) 207 skipmeta = 1; 208 s = splbio(); 209 VI_LOCK(vp); 210 loop: 211 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) 212 bp->b_vflags &= ~BV_SCANNED; 213 TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) { 214 /* 215 * Reasons to skip this buffer: it has already been considered 216 * on this pass, this pass is the first time through on a 217 * synchronous flush request and the buffer being considered 218 * is metadata, the buffer has dependencies that will cause 219 * it to be redirtied and it has not already been deferred, 220 * or it is already being written. 221 */ 222 if ((bp->b_vflags & BV_SCANNED) != 0) 223 continue; 224 bp->b_vflags |= BV_SCANNED; 225 if ((skipmeta == 1 && bp->b_lblkno < 0)) 226 continue; 227 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 228 continue; 229 VI_UNLOCK(vp); 230 if (!wait && LIST_FIRST(&bp->b_dep) != NULL && 231 (bp->b_flags & B_DEFERRED) == 0 && 232 buf_countdeps(bp, 0)) { 233 bp->b_flags |= B_DEFERRED; 234 BUF_UNLOCK(bp); 235 VI_LOCK(vp); 236 continue; 237 } 238 if ((bp->b_flags & B_DELWRI) == 0) 239 panic("ffs_fsync: not dirty"); 240 /* 241 * If this is a synchronous flush request, or it is not a 242 * file or device, start the write on this buffer immediatly. 243 */ 244 if (wait || (vp->v_type != VREG && vp->v_type != VBLK)) { 245 246 /* 247 * On our final pass through, do all I/O synchronously 248 * so that we can find out if our flush is failing 249 * because of write errors. 250 */ 251 if (passes > 0 || !wait) { 252 if ((bp->b_flags & B_CLUSTEROK) && !wait) { 253 (void) vfs_bio_awrite(bp); 254 } else { 255 bremfree(bp); 256 splx(s); 257 (void) bawrite(bp); 258 s = splbio(); 259 } 260 } else { 261 bremfree(bp); 262 splx(s); 263 if ((error = bwrite(bp)) != 0) 264 return (error); 265 s = splbio(); 266 } 267 } else if ((vp->v_type == VREG) && (bp->b_lblkno >= lbn)) { 268 /* 269 * If the buffer is for data that has been truncated 270 * off the file, then throw it away. 271 */ 272 bremfree(bp); 273 bp->b_flags |= B_INVAL | B_NOCACHE; 274 splx(s); 275 brelse(bp); 276 s = splbio(); 277 } else 278 vfs_bio_awrite(bp); 279 280 /* 281 * Since we may have slept during the I/O, we need 282 * to start from a known point. 283 */ 284 VI_LOCK(vp); 285 nbp = TAILQ_FIRST(&vp->v_bufobj.bo_dirty.bv_hd); 286 } 287 /* 288 * If we were asked to do this synchronously, then go back for 289 * another pass, this time doing the metadata. 290 */ 291 if (skipmeta) { 292 skipmeta = 0; 293 goto loop; 294 } 295 296 if (wait) { 297 bufobj_wwait(&vp->v_bufobj, 3, 0); 298 VI_UNLOCK(vp); 299 300 /* 301 * Ensure that any filesystem metatdata associated 302 * with the vnode has been written. 303 */ 304 splx(s); 305 if ((error = softdep_sync_metadata(vp)) != 0) 306 return (error); 307 s = splbio(); 308 309 VI_LOCK(vp); 310 if (vp->v_bufobj.bo_dirty.bv_cnt > 0) { 311 /* 312 * Block devices associated with filesystems may 313 * have new I/O requests posted for them even if 314 * the vnode is locked, so no amount of trying will 315 * get them clean. Thus we give block devices a 316 * good effort, then just give up. For all other file 317 * types, go around and try again until it is clean. 318 */ 319 if (passes > 0) { 320 passes -= 1; 321 goto loop; 322 } 323 #ifdef DIAGNOSTIC 324 if (!vn_isdisk(vp, NULL)) 325 vprint("ffs_fsync: dirty", vp); 326 #endif 327 } 328 } 329 VI_UNLOCK(vp); 330 splx(s); 331 return (ffs_update(vp, wait)); 332 } 333 334 static int 335 ffs_lock(ap) 336 struct _vop_lock_args /* { 337 struct vnode *a_vp; 338 int a_flags; 339 struct thread *a_td; 340 char *file; 341 int line; 342 } */ *ap; 343 { 344 #ifndef NO_FFS_SNAPSHOT 345 struct vnode *vp; 346 int flags; 347 struct lock *lkp; 348 int result; 349 350 switch (ap->a_flags & LK_TYPE_MASK) { 351 case LK_SHARED: 352 case LK_UPGRADE: 353 case LK_EXCLUSIVE: 354 vp = ap->a_vp; 355 flags = ap->a_flags; 356 for (;;) { 357 /* 358 * vnode interlock must be held to ensure that 359 * the possibly external lock isn't freed, 360 * e.g. when mutating from snapshot file vnode 361 * to regular file vnode. 362 */ 363 if ((flags & LK_INTERLOCK) == 0) { 364 VI_LOCK(vp); 365 flags |= LK_INTERLOCK; 366 } 367 lkp = vp->v_vnlock; 368 result = _lockmgr(lkp, flags, VI_MTX(vp), ap->a_td, ap->a_file, ap->a_line); 369 if (lkp == vp->v_vnlock || result != 0) 370 break; 371 /* 372 * Apparent success, except that the vnode 373 * mutated between snapshot file vnode and 374 * regular file vnode while this process 375 * slept. The lock currently held is not the 376 * right lock. Release it, and try to get the 377 * new lock. 378 */ 379 (void) _lockmgr(lkp, LK_RELEASE, VI_MTX(vp), ap->a_td, ap->a_file, ap->a_line); 380 if ((flags & LK_TYPE_MASK) == LK_UPGRADE) 381 flags = (flags & ~LK_TYPE_MASK) | LK_EXCLUSIVE; 382 flags &= ~LK_INTERLOCK; 383 } 384 break; 385 default: 386 result = _VOP_LOCK_APV(&ufs_vnodeops, ap); 387 } 388 return (result); 389 #else 390 return (_VOP_LOCK_APV(&ufs_vnodeops, ap)); 391 #endif 392 } 393 394 /* 395 * Vnode op for reading. 396 */ 397 /* ARGSUSED */ 398 static int 399 ffs_read(ap) 400 struct vop_read_args /* { 401 struct vnode *a_vp; 402 struct uio *a_uio; 403 int a_ioflag; 404 struct ucred *a_cred; 405 } */ *ap; 406 { 407 struct vnode *vp; 408 struct inode *ip; 409 struct uio *uio; 410 struct fs *fs; 411 struct buf *bp; 412 ufs_lbn_t lbn, nextlbn; 413 off_t bytesinfile; 414 long size, xfersize, blkoffset; 415 int error, orig_resid; 416 int seqcount; 417 int ioflag; 418 419 vp = ap->a_vp; 420 uio = ap->a_uio; 421 ioflag = ap->a_ioflag; 422 if (ap->a_ioflag & IO_EXT) 423 #ifdef notyet 424 return (ffs_extread(vp, uio, ioflag)); 425 #else 426 panic("ffs_read+IO_EXT"); 427 #endif 428 #ifdef DIRECTIO 429 if ((ioflag & IO_DIRECT) != 0) { 430 int workdone; 431 432 error = ffs_rawread(vp, uio, &workdone); 433 if (error != 0 || workdone != 0) 434 return error; 435 } 436 #endif 437 438 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 439 ip = VTOI(vp); 440 441 #ifdef DIAGNOSTIC 442 if (uio->uio_rw != UIO_READ) 443 panic("ffs_read: mode"); 444 445 if (vp->v_type == VLNK) { 446 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen) 447 panic("ffs_read: short symlink"); 448 } else if (vp->v_type != VREG && vp->v_type != VDIR) 449 panic("ffs_read: type %d", vp->v_type); 450 #endif 451 orig_resid = uio->uio_resid; 452 KASSERT(orig_resid >= 0, ("ffs_read: uio->uio_resid < 0")); 453 if (orig_resid == 0) 454 return (0); 455 KASSERT(uio->uio_offset >= 0, ("ffs_read: uio->uio_offset < 0")); 456 fs = ip->i_fs; 457 if (uio->uio_offset < ip->i_size && 458 uio->uio_offset >= fs->fs_maxfilesize) 459 return (EOVERFLOW); 460 461 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { 462 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) 463 break; 464 lbn = lblkno(fs, uio->uio_offset); 465 nextlbn = lbn + 1; 466 467 /* 468 * size of buffer. The buffer representing the 469 * end of the file is rounded up to the size of 470 * the block type ( fragment or full block, 471 * depending ). 472 */ 473 size = blksize(fs, ip, lbn); 474 blkoffset = blkoff(fs, uio->uio_offset); 475 476 /* 477 * The amount we want to transfer in this iteration is 478 * one FS block less the amount of the data before 479 * our startpoint (duh!) 480 */ 481 xfersize = fs->fs_bsize - blkoffset; 482 483 /* 484 * But if we actually want less than the block, 485 * or the file doesn't have a whole block more of data, 486 * then use the lesser number. 487 */ 488 if (uio->uio_resid < xfersize) 489 xfersize = uio->uio_resid; 490 if (bytesinfile < xfersize) 491 xfersize = bytesinfile; 492 493 if (lblktosize(fs, nextlbn) >= ip->i_size) { 494 /* 495 * Don't do readahead if this is the end of the file. 496 */ 497 error = bread(vp, lbn, size, NOCRED, &bp); 498 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { 499 /* 500 * Otherwise if we are allowed to cluster, 501 * grab as much as we can. 502 * 503 * XXX This may not be a win if we are not 504 * doing sequential access. 505 */ 506 error = cluster_read(vp, ip->i_size, lbn, 507 size, NOCRED, blkoffset + uio->uio_resid, seqcount, &bp); 508 } else if (seqcount > 1) { 509 /* 510 * If we are NOT allowed to cluster, then 511 * if we appear to be acting sequentially, 512 * fire off a request for a readahead 513 * as well as a read. Note that the 4th and 5th 514 * arguments point to arrays of the size specified in 515 * the 6th argument. 516 */ 517 int nextsize = blksize(fs, ip, nextlbn); 518 error = breadn(vp, lbn, 519 size, &nextlbn, &nextsize, 1, NOCRED, &bp); 520 } else { 521 /* 522 * Failing all of the above, just read what the 523 * user asked for. Interestingly, the same as 524 * the first option above. 525 */ 526 error = bread(vp, lbn, size, NOCRED, &bp); 527 } 528 if (error) { 529 brelse(bp); 530 bp = NULL; 531 break; 532 } 533 534 /* 535 * If IO_DIRECT then set B_DIRECT for the buffer. This 536 * will cause us to attempt to release the buffer later on 537 * and will cause the buffer cache to attempt to free the 538 * underlying pages. 539 */ 540 if (ioflag & IO_DIRECT) 541 bp->b_flags |= B_DIRECT; 542 543 /* 544 * We should only get non-zero b_resid when an I/O error 545 * has occurred, which should cause us to break above. 546 * However, if the short read did not cause an error, 547 * then we want to ensure that we do not uiomove bad 548 * or uninitialized data. 549 */ 550 size -= bp->b_resid; 551 if (size < xfersize) { 552 if (size == 0) 553 break; 554 xfersize = size; 555 } 556 557 error = uiomove((char *)bp->b_data + blkoffset, 558 (int)xfersize, uio); 559 if (error) 560 break; 561 562 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 563 (LIST_FIRST(&bp->b_dep) == NULL)) { 564 /* 565 * If there are no dependencies, and it's VMIO, 566 * then we don't need the buf, mark it available 567 * for freeing. The VM has the data. 568 */ 569 bp->b_flags |= B_RELBUF; 570 brelse(bp); 571 } else { 572 /* 573 * Otherwise let whoever 574 * made the request take care of 575 * freeing it. We just queue 576 * it onto another list. 577 */ 578 bqrelse(bp); 579 } 580 } 581 582 /* 583 * This can only happen in the case of an error 584 * because the loop above resets bp to NULL on each iteration 585 * and on normal completion has not set a new value into it. 586 * so it must have come from a 'break' statement 587 */ 588 if (bp != NULL) { 589 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 590 (LIST_FIRST(&bp->b_dep) == NULL)) { 591 bp->b_flags |= B_RELBUF; 592 brelse(bp); 593 } else { 594 bqrelse(bp); 595 } 596 } 597 598 if ((error == 0 || uio->uio_resid != orig_resid) && 599 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0) { 600 VI_LOCK(vp); 601 ip->i_flag |= IN_ACCESS; 602 VI_UNLOCK(vp); 603 } 604 return (error); 605 } 606 607 /* 608 * Vnode op for writing. 609 */ 610 static int 611 ffs_write(ap) 612 struct vop_write_args /* { 613 struct vnode *a_vp; 614 struct uio *a_uio; 615 int a_ioflag; 616 struct ucred *a_cred; 617 } */ *ap; 618 { 619 struct vnode *vp; 620 struct uio *uio; 621 struct inode *ip; 622 struct fs *fs; 623 struct buf *bp; 624 struct thread *td; 625 ufs_lbn_t lbn; 626 off_t osize; 627 int seqcount; 628 int blkoffset, error, flags, ioflag, resid, size, xfersize; 629 630 vp = ap->a_vp; 631 uio = ap->a_uio; 632 ioflag = ap->a_ioflag; 633 if (ap->a_ioflag & IO_EXT) 634 #ifdef notyet 635 return (ffs_extwrite(vp, uio, ioflag, ap->a_cred)); 636 #else 637 panic("ffs_write+IO_EXT"); 638 #endif 639 640 seqcount = ap->a_ioflag >> IO_SEQSHIFT; 641 ip = VTOI(vp); 642 643 #ifdef DIAGNOSTIC 644 if (uio->uio_rw != UIO_WRITE) 645 panic("ffs_write: mode"); 646 #endif 647 648 switch (vp->v_type) { 649 case VREG: 650 if (ioflag & IO_APPEND) 651 uio->uio_offset = ip->i_size; 652 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) 653 return (EPERM); 654 /* FALLTHROUGH */ 655 case VLNK: 656 break; 657 case VDIR: 658 panic("ffs_write: dir write"); 659 break; 660 default: 661 panic("ffs_write: type %p %d (%d,%d)", vp, (int)vp->v_type, 662 (int)uio->uio_offset, 663 (int)uio->uio_resid 664 ); 665 } 666 667 KASSERT(uio->uio_resid >= 0, ("ffs_write: uio->uio_resid < 0")); 668 KASSERT(uio->uio_offset >= 0, ("ffs_write: uio->uio_offset < 0")); 669 fs = ip->i_fs; 670 if ((uoff_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) 671 return (EFBIG); 672 /* 673 * Maybe this should be above the vnode op call, but so long as 674 * file servers have no limits, I don't think it matters. 675 */ 676 td = uio->uio_td; 677 if (vp->v_type == VREG && td != NULL) { 678 PROC_LOCK(td->td_proc); 679 if (uio->uio_offset + uio->uio_resid > 680 lim_cur(td->td_proc, RLIMIT_FSIZE)) { 681 psignal(td->td_proc, SIGXFSZ); 682 PROC_UNLOCK(td->td_proc); 683 return (EFBIG); 684 } 685 PROC_UNLOCK(td->td_proc); 686 } 687 688 resid = uio->uio_resid; 689 osize = ip->i_size; 690 if (seqcount > BA_SEQMAX) 691 flags = BA_SEQMAX << BA_SEQSHIFT; 692 else 693 flags = seqcount << BA_SEQSHIFT; 694 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp)) 695 flags |= IO_SYNC; 696 697 for (error = 0; uio->uio_resid > 0;) { 698 lbn = lblkno(fs, uio->uio_offset); 699 blkoffset = blkoff(fs, uio->uio_offset); 700 xfersize = fs->fs_bsize - blkoffset; 701 if (uio->uio_resid < xfersize) 702 xfersize = uio->uio_resid; 703 if (uio->uio_offset + xfersize > ip->i_size) 704 vnode_pager_setsize(vp, uio->uio_offset + xfersize); 705 706 /* 707 * We must perform a read-before-write if the transfer size 708 * does not cover the entire buffer. 709 */ 710 if (fs->fs_bsize > xfersize) 711 flags |= BA_CLRBUF; 712 else 713 flags &= ~BA_CLRBUF; 714 /* XXX is uio->uio_offset the right thing here? */ 715 error = UFS_BALLOC(vp, uio->uio_offset, xfersize, 716 ap->a_cred, flags, &bp); 717 if (error != 0) 718 break; 719 /* 720 * If the buffer is not valid we have to clear out any 721 * garbage data from the pages instantiated for the buffer. 722 * If we do not, a failed uiomove() during a write can leave 723 * the prior contents of the pages exposed to a userland 724 * mmap(). XXX deal with uiomove() errors a better way. 725 */ 726 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize) 727 vfs_bio_clrbuf(bp); 728 if (ioflag & IO_DIRECT) 729 bp->b_flags |= B_DIRECT; 730 if ((ioflag & (IO_SYNC|IO_INVAL)) == (IO_SYNC|IO_INVAL)) 731 bp->b_flags |= B_NOCACHE; 732 733 if (uio->uio_offset + xfersize > ip->i_size) { 734 ip->i_size = uio->uio_offset + xfersize; 735 DIP_SET(ip, i_size, ip->i_size); 736 } 737 738 size = blksize(fs, ip, lbn) - bp->b_resid; 739 if (size < xfersize) 740 xfersize = size; 741 742 error = 743 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); 744 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 745 (LIST_FIRST(&bp->b_dep) == NULL)) { 746 bp->b_flags |= B_RELBUF; 747 } 748 749 /* 750 * If IO_SYNC each buffer is written synchronously. Otherwise 751 * if we have a severe page deficiency write the buffer 752 * asynchronously. Otherwise try to cluster, and if that 753 * doesn't do it then either do an async write (if O_DIRECT), 754 * or a delayed write (if not). 755 */ 756 if (ioflag & IO_SYNC) { 757 (void)bwrite(bp); 758 } else if (vm_page_count_severe() || 759 buf_dirty_count_severe() || 760 (ioflag & IO_ASYNC)) { 761 bp->b_flags |= B_CLUSTEROK; 762 bawrite(bp); 763 } else if (xfersize + blkoffset == fs->fs_bsize) { 764 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) { 765 bp->b_flags |= B_CLUSTEROK; 766 cluster_write(vp, bp, ip->i_size, seqcount); 767 } else { 768 bawrite(bp); 769 } 770 } else if (ioflag & IO_DIRECT) { 771 bp->b_flags |= B_CLUSTEROK; 772 bawrite(bp); 773 } else { 774 bp->b_flags |= B_CLUSTEROK; 775 bdwrite(bp); 776 } 777 if (error || xfersize == 0) 778 break; 779 ip->i_flag |= IN_CHANGE | IN_UPDATE; 780 } 781 /* 782 * If we successfully wrote any data, and we are not the superuser 783 * we clear the setuid and setgid bits as a precaution against 784 * tampering. 785 */ 786 if (resid > uio->uio_resid && ap->a_cred && 787 priv_check_cred(ap->a_cred, PRIV_VFS_CLEARSUGID, 788 SUSER_ALLOWJAIL)) { 789 ip->i_mode &= ~(ISUID | ISGID); 790 DIP_SET(ip, i_mode, ip->i_mode); 791 } 792 if (error) { 793 if (ioflag & IO_UNIT) { 794 (void)ffs_truncate(vp, osize, 795 IO_NORMAL | (ioflag & IO_SYNC), 796 ap->a_cred, uio->uio_td); 797 uio->uio_offset -= resid - uio->uio_resid; 798 uio->uio_resid = resid; 799 } 800 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) 801 error = ffs_update(vp, 1); 802 return (error); 803 } 804 805 /* 806 * get page routine 807 */ 808 static int 809 ffs_getpages(ap) 810 struct vop_getpages_args *ap; 811 { 812 int i; 813 vm_page_t mreq; 814 int pcount; 815 816 pcount = round_page(ap->a_count) / PAGE_SIZE; 817 mreq = ap->a_m[ap->a_reqpage]; 818 819 /* 820 * if ANY DEV_BSIZE blocks are valid on a large filesystem block, 821 * then the entire page is valid. Since the page may be mapped, 822 * user programs might reference data beyond the actual end of file 823 * occuring within the page. We have to zero that data. 824 */ 825 VM_OBJECT_LOCK(mreq->object); 826 if (mreq->valid) { 827 if (mreq->valid != VM_PAGE_BITS_ALL) 828 vm_page_zero_invalid(mreq, TRUE); 829 vm_page_lock_queues(); 830 for (i = 0; i < pcount; i++) { 831 if (i != ap->a_reqpage) { 832 vm_page_free(ap->a_m[i]); 833 } 834 } 835 vm_page_unlock_queues(); 836 VM_OBJECT_UNLOCK(mreq->object); 837 return VM_PAGER_OK; 838 } 839 VM_OBJECT_UNLOCK(mreq->object); 840 841 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 842 ap->a_count, 843 ap->a_reqpage); 844 } 845 846 847 /* 848 * Extended attribute area reading. 849 */ 850 static int 851 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag) 852 { 853 struct inode *ip; 854 struct ufs2_dinode *dp; 855 struct fs *fs; 856 struct buf *bp; 857 ufs_lbn_t lbn, nextlbn; 858 off_t bytesinfile; 859 long size, xfersize, blkoffset; 860 int error, orig_resid; 861 862 ip = VTOI(vp); 863 fs = ip->i_fs; 864 dp = ip->i_din2; 865 866 #ifdef DIAGNOSTIC 867 if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC) 868 panic("ffs_extread: mode"); 869 870 #endif 871 orig_resid = uio->uio_resid; 872 KASSERT(orig_resid >= 0, ("ffs_extread: uio->uio_resid < 0")); 873 if (orig_resid == 0) 874 return (0); 875 KASSERT(uio->uio_offset >= 0, ("ffs_extread: uio->uio_offset < 0")); 876 877 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { 878 if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0) 879 break; 880 lbn = lblkno(fs, uio->uio_offset); 881 nextlbn = lbn + 1; 882 883 /* 884 * size of buffer. The buffer representing the 885 * end of the file is rounded up to the size of 886 * the block type ( fragment or full block, 887 * depending ). 888 */ 889 size = sblksize(fs, dp->di_extsize, lbn); 890 blkoffset = blkoff(fs, uio->uio_offset); 891 892 /* 893 * The amount we want to transfer in this iteration is 894 * one FS block less the amount of the data before 895 * our startpoint (duh!) 896 */ 897 xfersize = fs->fs_bsize - blkoffset; 898 899 /* 900 * But if we actually want less than the block, 901 * or the file doesn't have a whole block more of data, 902 * then use the lesser number. 903 */ 904 if (uio->uio_resid < xfersize) 905 xfersize = uio->uio_resid; 906 if (bytesinfile < xfersize) 907 xfersize = bytesinfile; 908 909 if (lblktosize(fs, nextlbn) >= dp->di_extsize) { 910 /* 911 * Don't do readahead if this is the end of the info. 912 */ 913 error = bread(vp, -1 - lbn, size, NOCRED, &bp); 914 } else { 915 /* 916 * If we have a second block, then 917 * fire off a request for a readahead 918 * as well as a read. Note that the 4th and 5th 919 * arguments point to arrays of the size specified in 920 * the 6th argument. 921 */ 922 int nextsize = sblksize(fs, dp->di_extsize, nextlbn); 923 924 nextlbn = -1 - nextlbn; 925 error = breadn(vp, -1 - lbn, 926 size, &nextlbn, &nextsize, 1, NOCRED, &bp); 927 } 928 if (error) { 929 brelse(bp); 930 bp = NULL; 931 break; 932 } 933 934 /* 935 * If IO_DIRECT then set B_DIRECT for the buffer. This 936 * will cause us to attempt to release the buffer later on 937 * and will cause the buffer cache to attempt to free the 938 * underlying pages. 939 */ 940 if (ioflag & IO_DIRECT) 941 bp->b_flags |= B_DIRECT; 942 943 /* 944 * We should only get non-zero b_resid when an I/O error 945 * has occurred, which should cause us to break above. 946 * However, if the short read did not cause an error, 947 * then we want to ensure that we do not uiomove bad 948 * or uninitialized data. 949 */ 950 size -= bp->b_resid; 951 if (size < xfersize) { 952 if (size == 0) 953 break; 954 xfersize = size; 955 } 956 957 error = uiomove((char *)bp->b_data + blkoffset, 958 (int)xfersize, uio); 959 if (error) 960 break; 961 962 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 963 (LIST_FIRST(&bp->b_dep) == NULL)) { 964 /* 965 * If there are no dependencies, and it's VMIO, 966 * then we don't need the buf, mark it available 967 * for freeing. The VM has the data. 968 */ 969 bp->b_flags |= B_RELBUF; 970 brelse(bp); 971 } else { 972 /* 973 * Otherwise let whoever 974 * made the request take care of 975 * freeing it. We just queue 976 * it onto another list. 977 */ 978 bqrelse(bp); 979 } 980 } 981 982 /* 983 * This can only happen in the case of an error 984 * because the loop above resets bp to NULL on each iteration 985 * and on normal completion has not set a new value into it. 986 * so it must have come from a 'break' statement 987 */ 988 if (bp != NULL) { 989 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 990 (LIST_FIRST(&bp->b_dep) == NULL)) { 991 bp->b_flags |= B_RELBUF; 992 brelse(bp); 993 } else { 994 bqrelse(bp); 995 } 996 } 997 998 if ((error == 0 || uio->uio_resid != orig_resid) && 999 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0) { 1000 VI_LOCK(vp); 1001 ip->i_flag |= IN_ACCESS; 1002 VI_UNLOCK(vp); 1003 } 1004 return (error); 1005 } 1006 1007 /* 1008 * Extended attribute area writing. 1009 */ 1010 static int 1011 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred) 1012 { 1013 struct inode *ip; 1014 struct ufs2_dinode *dp; 1015 struct fs *fs; 1016 struct buf *bp; 1017 ufs_lbn_t lbn; 1018 off_t osize; 1019 int blkoffset, error, flags, resid, size, xfersize; 1020 1021 ip = VTOI(vp); 1022 fs = ip->i_fs; 1023 dp = ip->i_din2; 1024 1025 #ifdef DIAGNOSTIC 1026 if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC) 1027 panic("ffs_extwrite: mode"); 1028 #endif 1029 1030 if (ioflag & IO_APPEND) 1031 uio->uio_offset = dp->di_extsize; 1032 KASSERT(uio->uio_offset >= 0, ("ffs_extwrite: uio->uio_offset < 0")); 1033 KASSERT(uio->uio_resid >= 0, ("ffs_extwrite: uio->uio_resid < 0")); 1034 if ((uoff_t)uio->uio_offset + uio->uio_resid > NXADDR * fs->fs_bsize) 1035 return (EFBIG); 1036 1037 resid = uio->uio_resid; 1038 osize = dp->di_extsize; 1039 flags = IO_EXT; 1040 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp)) 1041 flags |= IO_SYNC; 1042 1043 for (error = 0; uio->uio_resid > 0;) { 1044 lbn = lblkno(fs, uio->uio_offset); 1045 blkoffset = blkoff(fs, uio->uio_offset); 1046 xfersize = fs->fs_bsize - blkoffset; 1047 if (uio->uio_resid < xfersize) 1048 xfersize = uio->uio_resid; 1049 1050 /* 1051 * We must perform a read-before-write if the transfer size 1052 * does not cover the entire buffer. 1053 */ 1054 if (fs->fs_bsize > xfersize) 1055 flags |= BA_CLRBUF; 1056 else 1057 flags &= ~BA_CLRBUF; 1058 error = UFS_BALLOC(vp, uio->uio_offset, xfersize, 1059 ucred, flags, &bp); 1060 if (error != 0) 1061 break; 1062 /* 1063 * If the buffer is not valid we have to clear out any 1064 * garbage data from the pages instantiated for the buffer. 1065 * If we do not, a failed uiomove() during a write can leave 1066 * the prior contents of the pages exposed to a userland 1067 * mmap(). XXX deal with uiomove() errors a better way. 1068 */ 1069 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize) 1070 vfs_bio_clrbuf(bp); 1071 if (ioflag & IO_DIRECT) 1072 bp->b_flags |= B_DIRECT; 1073 1074 if (uio->uio_offset + xfersize > dp->di_extsize) 1075 dp->di_extsize = uio->uio_offset + xfersize; 1076 1077 size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid; 1078 if (size < xfersize) 1079 xfersize = size; 1080 1081 error = 1082 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); 1083 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 1084 (LIST_FIRST(&bp->b_dep) == NULL)) { 1085 bp->b_flags |= B_RELBUF; 1086 } 1087 1088 /* 1089 * If IO_SYNC each buffer is written synchronously. Otherwise 1090 * if we have a severe page deficiency write the buffer 1091 * asynchronously. Otherwise try to cluster, and if that 1092 * doesn't do it then either do an async write (if O_DIRECT), 1093 * or a delayed write (if not). 1094 */ 1095 if (ioflag & IO_SYNC) { 1096 (void)bwrite(bp); 1097 } else if (vm_page_count_severe() || 1098 buf_dirty_count_severe() || 1099 xfersize + blkoffset == fs->fs_bsize || 1100 (ioflag & (IO_ASYNC | IO_DIRECT))) 1101 bawrite(bp); 1102 else 1103 bdwrite(bp); 1104 if (error || xfersize == 0) 1105 break; 1106 ip->i_flag |= IN_CHANGE | IN_UPDATE; 1107 } 1108 /* 1109 * If we successfully wrote any data, and we are not the superuser 1110 * we clear the setuid and setgid bits as a precaution against 1111 * tampering. 1112 */ 1113 if (resid > uio->uio_resid && ucred && 1114 priv_check_cred(ucred, PRIV_VFS_CLEARSUGID, SUSER_ALLOWJAIL)) { 1115 ip->i_mode &= ~(ISUID | ISGID); 1116 dp->di_mode = ip->i_mode; 1117 } 1118 if (error) { 1119 if (ioflag & IO_UNIT) { 1120 (void)ffs_truncate(vp, osize, 1121 IO_EXT | (ioflag&IO_SYNC), ucred, uio->uio_td); 1122 uio->uio_offset -= resid - uio->uio_resid; 1123 uio->uio_resid = resid; 1124 } 1125 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) 1126 error = ffs_update(vp, 1); 1127 return (error); 1128 } 1129 1130 1131 /* 1132 * Vnode operating to retrieve a named extended attribute. 1133 * 1134 * Locate a particular EA (nspace:name) in the area (ptr:length), and return 1135 * the length of the EA, and possibly the pointer to the entry and to the data. 1136 */ 1137 static int 1138 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name, u_char **eap, u_char **eac) 1139 { 1140 u_char *p, *pe, *pn, *p0; 1141 int eapad1, eapad2, ealength, ealen, nlen; 1142 uint32_t ul; 1143 1144 pe = ptr + length; 1145 nlen = strlen(name); 1146 1147 for (p = ptr; p < pe; p = pn) { 1148 p0 = p; 1149 bcopy(p, &ul, sizeof(ul)); 1150 pn = p + ul; 1151 /* make sure this entry is complete */ 1152 if (pn > pe) 1153 break; 1154 p += sizeof(uint32_t); 1155 if (*p != nspace) 1156 continue; 1157 p++; 1158 eapad2 = *p++; 1159 if (*p != nlen) 1160 continue; 1161 p++; 1162 if (bcmp(p, name, nlen)) 1163 continue; 1164 ealength = sizeof(uint32_t) + 3 + nlen; 1165 eapad1 = 8 - (ealength % 8); 1166 if (eapad1 == 8) 1167 eapad1 = 0; 1168 ealength += eapad1; 1169 ealen = ul - ealength - eapad2; 1170 p += nlen + eapad1; 1171 if (eap != NULL) 1172 *eap = p0; 1173 if (eac != NULL) 1174 *eac = p; 1175 return (ealen); 1176 } 1177 return(-1); 1178 } 1179 1180 static int 1181 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra) 1182 { 1183 struct inode *ip; 1184 struct ufs2_dinode *dp; 1185 struct uio luio; 1186 struct iovec liovec; 1187 int easize, error; 1188 u_char *eae; 1189 1190 ip = VTOI(vp); 1191 dp = ip->i_din2; 1192 easize = dp->di_extsize; 1193 1194 eae = malloc(easize + extra, M_TEMP, M_WAITOK); 1195 1196 liovec.iov_base = eae; 1197 liovec.iov_len = easize; 1198 luio.uio_iov = &liovec; 1199 luio.uio_iovcnt = 1; 1200 luio.uio_offset = 0; 1201 luio.uio_resid = easize; 1202 luio.uio_segflg = UIO_SYSSPACE; 1203 luio.uio_rw = UIO_READ; 1204 luio.uio_td = td; 1205 1206 error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC); 1207 if (error) { 1208 free(eae, M_TEMP); 1209 return(error); 1210 } 1211 *p = eae; 1212 return (0); 1213 } 1214 1215 static int 1216 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td) 1217 { 1218 struct inode *ip; 1219 struct ufs2_dinode *dp; 1220 int error; 1221 1222 ip = VTOI(vp); 1223 1224 if (ip->i_ea_area != NULL) 1225 return (EBUSY); 1226 dp = ip->i_din2; 1227 error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0); 1228 if (error) 1229 return (error); 1230 ip->i_ea_len = dp->di_extsize; 1231 ip->i_ea_error = 0; 1232 return (0); 1233 } 1234 1235 /* 1236 * Vnode extattr transaction commit/abort 1237 */ 1238 static int 1239 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td) 1240 { 1241 struct inode *ip; 1242 struct uio luio; 1243 struct iovec liovec; 1244 int error; 1245 struct ufs2_dinode *dp; 1246 1247 ip = VTOI(vp); 1248 if (ip->i_ea_area == NULL) 1249 return (EINVAL); 1250 dp = ip->i_din2; 1251 error = ip->i_ea_error; 1252 if (commit && error == 0) { 1253 if (cred == NOCRED) 1254 cred = vp->v_mount->mnt_cred; 1255 liovec.iov_base = ip->i_ea_area; 1256 liovec.iov_len = ip->i_ea_len; 1257 luio.uio_iov = &liovec; 1258 luio.uio_iovcnt = 1; 1259 luio.uio_offset = 0; 1260 luio.uio_resid = ip->i_ea_len; 1261 luio.uio_segflg = UIO_SYSSPACE; 1262 luio.uio_rw = UIO_WRITE; 1263 luio.uio_td = td; 1264 /* XXX: I'm not happy about truncating to zero size */ 1265 if (ip->i_ea_len < dp->di_extsize) 1266 error = ffs_truncate(vp, 0, IO_EXT, cred, td); 1267 error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred); 1268 } 1269 free(ip->i_ea_area, M_TEMP); 1270 ip->i_ea_area = NULL; 1271 ip->i_ea_len = 0; 1272 ip->i_ea_error = 0; 1273 return (error); 1274 } 1275 1276 /* 1277 * Vnode extattr strategy routine for fifos. 1278 * 1279 * We need to check for a read or write of the external attributes. 1280 * Otherwise we just fall through and do the usual thing. 1281 */ 1282 static int 1283 ffsext_strategy(struct vop_strategy_args *ap) 1284 /* 1285 struct vop_strategy_args { 1286 struct vnodeop_desc *a_desc; 1287 struct vnode *a_vp; 1288 struct buf *a_bp; 1289 }; 1290 */ 1291 { 1292 struct vnode *vp; 1293 daddr_t lbn; 1294 1295 vp = ap->a_vp; 1296 lbn = ap->a_bp->b_lblkno; 1297 if (VTOI(vp)->i_fs->fs_magic == FS_UFS2_MAGIC && 1298 lbn < 0 && lbn >= -NXADDR) 1299 return (VOP_STRATEGY_APV(&ufs_vnodeops, ap)); 1300 if (vp->v_type == VFIFO) 1301 return (VOP_STRATEGY_APV(&ufs_fifoops, ap)); 1302 panic("spec nodes went here"); 1303 } 1304 1305 /* 1306 * Vnode extattr transaction commit/abort 1307 */ 1308 static int 1309 ffs_openextattr(struct vop_openextattr_args *ap) 1310 /* 1311 struct vop_openextattr_args { 1312 struct vnodeop_desc *a_desc; 1313 struct vnode *a_vp; 1314 IN struct ucred *a_cred; 1315 IN struct thread *a_td; 1316 }; 1317 */ 1318 { 1319 struct inode *ip; 1320 struct fs *fs; 1321 1322 ip = VTOI(ap->a_vp); 1323 fs = ip->i_fs; 1324 1325 if (ap->a_vp->v_type == VCHR) 1326 return (EOPNOTSUPP); 1327 1328 return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td)); 1329 } 1330 1331 1332 /* 1333 * Vnode extattr transaction commit/abort 1334 */ 1335 static int 1336 ffs_closeextattr(struct vop_closeextattr_args *ap) 1337 /* 1338 struct vop_closeextattr_args { 1339 struct vnodeop_desc *a_desc; 1340 struct vnode *a_vp; 1341 int a_commit; 1342 IN struct ucred *a_cred; 1343 IN struct thread *a_td; 1344 }; 1345 */ 1346 { 1347 struct inode *ip; 1348 struct fs *fs; 1349 1350 ip = VTOI(ap->a_vp); 1351 fs = ip->i_fs; 1352 1353 if (ap->a_vp->v_type == VCHR) 1354 return (EOPNOTSUPP); 1355 1356 return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td)); 1357 } 1358 1359 /* 1360 * Vnode operation to remove a named attribute. 1361 */ 1362 static int 1363 ffs_deleteextattr(struct vop_deleteextattr_args *ap) 1364 /* 1365 vop_deleteextattr { 1366 IN struct vnode *a_vp; 1367 IN int a_attrnamespace; 1368 IN const char *a_name; 1369 IN struct ucred *a_cred; 1370 IN struct thread *a_td; 1371 }; 1372 */ 1373 { 1374 struct inode *ip; 1375 struct fs *fs; 1376 uint32_t ealength, ul; 1377 int ealen, olen, eapad1, eapad2, error, i, easize; 1378 u_char *eae, *p; 1379 int stand_alone; 1380 1381 ip = VTOI(ap->a_vp); 1382 fs = ip->i_fs; 1383 1384 if (ap->a_vp->v_type == VCHR) 1385 return (EOPNOTSUPP); 1386 1387 if (strlen(ap->a_name) == 0) 1388 return (EINVAL); 1389 1390 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1391 ap->a_cred, ap->a_td, IWRITE); 1392 if (error) { 1393 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1394 ip->i_ea_error = error; 1395 return (error); 1396 } 1397 1398 if (ip->i_ea_area == NULL) { 1399 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1400 if (error) 1401 return (error); 1402 stand_alone = 1; 1403 } else { 1404 stand_alone = 0; 1405 } 1406 1407 ealength = eapad1 = ealen = eapad2 = 0; 1408 1409 eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK); 1410 bcopy(ip->i_ea_area, eae, ip->i_ea_len); 1411 easize = ip->i_ea_len; 1412 1413 olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name, 1414 &p, NULL); 1415 if (olen == -1) { 1416 /* delete but nonexistent */ 1417 free(eae, M_TEMP); 1418 if (stand_alone) 1419 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1420 return(ENOATTR); 1421 } 1422 bcopy(p, &ul, sizeof ul); 1423 i = p - eae + ul; 1424 if (ul != ealength) { 1425 bcopy(p + ul, p + ealength, easize - i); 1426 easize += (ealength - ul); 1427 } 1428 if (easize > NXADDR * fs->fs_bsize) { 1429 free(eae, M_TEMP); 1430 if (stand_alone) 1431 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1432 else if (ip->i_ea_error == 0) 1433 ip->i_ea_error = ENOSPC; 1434 return(ENOSPC); 1435 } 1436 p = ip->i_ea_area; 1437 ip->i_ea_area = eae; 1438 ip->i_ea_len = easize; 1439 free(p, M_TEMP); 1440 if (stand_alone) 1441 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td); 1442 return(error); 1443 } 1444 1445 /* 1446 * Vnode operation to retrieve a named extended attribute. 1447 */ 1448 static int 1449 ffs_getextattr(struct vop_getextattr_args *ap) 1450 /* 1451 vop_getextattr { 1452 IN struct vnode *a_vp; 1453 IN int a_attrnamespace; 1454 IN const char *a_name; 1455 INOUT struct uio *a_uio; 1456 OUT size_t *a_size; 1457 IN struct ucred *a_cred; 1458 IN struct thread *a_td; 1459 }; 1460 */ 1461 { 1462 struct inode *ip; 1463 struct fs *fs; 1464 u_char *eae, *p; 1465 unsigned easize; 1466 int error, ealen, stand_alone; 1467 1468 ip = VTOI(ap->a_vp); 1469 fs = ip->i_fs; 1470 1471 if (ap->a_vp->v_type == VCHR) 1472 return (EOPNOTSUPP); 1473 1474 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1475 ap->a_cred, ap->a_td, IREAD); 1476 if (error) 1477 return (error); 1478 1479 if (ip->i_ea_area == NULL) { 1480 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1481 if (error) 1482 return (error); 1483 stand_alone = 1; 1484 } else { 1485 stand_alone = 0; 1486 } 1487 eae = ip->i_ea_area; 1488 easize = ip->i_ea_len; 1489 1490 ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name, 1491 NULL, &p); 1492 if (ealen >= 0) { 1493 error = 0; 1494 if (ap->a_size != NULL) 1495 *ap->a_size = ealen; 1496 else if (ap->a_uio != NULL) 1497 error = uiomove(p, ealen, ap->a_uio); 1498 } else 1499 error = ENOATTR; 1500 if (stand_alone) 1501 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1502 return(error); 1503 } 1504 1505 /* 1506 * Vnode operation to retrieve extended attributes on a vnode. 1507 */ 1508 static int 1509 ffs_listextattr(struct vop_listextattr_args *ap) 1510 /* 1511 vop_listextattr { 1512 IN struct vnode *a_vp; 1513 IN int a_attrnamespace; 1514 INOUT struct uio *a_uio; 1515 OUT size_t *a_size; 1516 IN struct ucred *a_cred; 1517 IN struct thread *a_td; 1518 }; 1519 */ 1520 { 1521 struct inode *ip; 1522 struct fs *fs; 1523 u_char *eae, *p, *pe, *pn; 1524 unsigned easize; 1525 uint32_t ul; 1526 int error, ealen, stand_alone; 1527 1528 ip = VTOI(ap->a_vp); 1529 fs = ip->i_fs; 1530 1531 if (ap->a_vp->v_type == VCHR) 1532 return (EOPNOTSUPP); 1533 1534 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1535 ap->a_cred, ap->a_td, IREAD); 1536 if (error) 1537 return (error); 1538 1539 if (ip->i_ea_area == NULL) { 1540 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1541 if (error) 1542 return (error); 1543 stand_alone = 1; 1544 } else { 1545 stand_alone = 0; 1546 } 1547 eae = ip->i_ea_area; 1548 easize = ip->i_ea_len; 1549 1550 error = 0; 1551 if (ap->a_size != NULL) 1552 *ap->a_size = 0; 1553 pe = eae + easize; 1554 for(p = eae; error == 0 && p < pe; p = pn) { 1555 bcopy(p, &ul, sizeof(ul)); 1556 pn = p + ul; 1557 if (pn > pe) 1558 break; 1559 p += sizeof(ul); 1560 if (*p++ != ap->a_attrnamespace) 1561 continue; 1562 p++; /* pad2 */ 1563 ealen = *p; 1564 if (ap->a_size != NULL) { 1565 *ap->a_size += ealen + 1; 1566 } else if (ap->a_uio != NULL) { 1567 error = uiomove(p, ealen + 1, ap->a_uio); 1568 } 1569 } 1570 if (stand_alone) 1571 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1572 return(error); 1573 } 1574 1575 /* 1576 * Vnode operation to set a named attribute. 1577 */ 1578 static int 1579 ffs_setextattr(struct vop_setextattr_args *ap) 1580 /* 1581 vop_setextattr { 1582 IN struct vnode *a_vp; 1583 IN int a_attrnamespace; 1584 IN const char *a_name; 1585 INOUT struct uio *a_uio; 1586 IN struct ucred *a_cred; 1587 IN struct thread *a_td; 1588 }; 1589 */ 1590 { 1591 struct inode *ip; 1592 struct fs *fs; 1593 uint32_t ealength, ul; 1594 int ealen, olen, eapad1, eapad2, error, i, easize; 1595 u_char *eae, *p; 1596 int stand_alone; 1597 1598 ip = VTOI(ap->a_vp); 1599 fs = ip->i_fs; 1600 1601 if (ap->a_vp->v_type == VCHR) 1602 return (EOPNOTSUPP); 1603 1604 if (strlen(ap->a_name) == 0) 1605 return (EINVAL); 1606 1607 /* XXX Now unsupported API to delete EAs using NULL uio. */ 1608 if (ap->a_uio == NULL) 1609 return (EOPNOTSUPP); 1610 1611 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1612 ap->a_cred, ap->a_td, IWRITE); 1613 if (error) { 1614 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1615 ip->i_ea_error = error; 1616 return (error); 1617 } 1618 1619 if (ip->i_ea_area == NULL) { 1620 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1621 if (error) 1622 return (error); 1623 stand_alone = 1; 1624 } else { 1625 stand_alone = 0; 1626 } 1627 1628 ealen = ap->a_uio->uio_resid; 1629 ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name); 1630 eapad1 = 8 - (ealength % 8); 1631 if (eapad1 == 8) 1632 eapad1 = 0; 1633 eapad2 = 8 - (ealen % 8); 1634 if (eapad2 == 8) 1635 eapad2 = 0; 1636 ealength += eapad1 + ealen + eapad2; 1637 1638 eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK); 1639 bcopy(ip->i_ea_area, eae, ip->i_ea_len); 1640 easize = ip->i_ea_len; 1641 1642 olen = ffs_findextattr(eae, easize, 1643 ap->a_attrnamespace, ap->a_name, &p, NULL); 1644 if (olen == -1) { 1645 /* new, append at end */ 1646 p = eae + easize; 1647 easize += ealength; 1648 } else { 1649 bcopy(p, &ul, sizeof ul); 1650 i = p - eae + ul; 1651 if (ul != ealength) { 1652 bcopy(p + ul, p + ealength, easize - i); 1653 easize += (ealength - ul); 1654 } 1655 } 1656 if (easize > NXADDR * fs->fs_bsize) { 1657 free(eae, M_TEMP); 1658 if (stand_alone) 1659 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1660 else if (ip->i_ea_error == 0) 1661 ip->i_ea_error = ENOSPC; 1662 return(ENOSPC); 1663 } 1664 bcopy(&ealength, p, sizeof(ealength)); 1665 p += sizeof(ealength); 1666 *p++ = ap->a_attrnamespace; 1667 *p++ = eapad2; 1668 *p++ = strlen(ap->a_name); 1669 strcpy(p, ap->a_name); 1670 p += strlen(ap->a_name); 1671 bzero(p, eapad1); 1672 p += eapad1; 1673 error = uiomove(p, ealen, ap->a_uio); 1674 if (error) { 1675 free(eae, M_TEMP); 1676 if (stand_alone) 1677 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1678 else if (ip->i_ea_error == 0) 1679 ip->i_ea_error = error; 1680 return(error); 1681 } 1682 p += ealen; 1683 bzero(p, eapad2); 1684 1685 p = ip->i_ea_area; 1686 ip->i_ea_area = eae; 1687 ip->i_ea_len = easize; 1688 free(p, M_TEMP); 1689 if (stand_alone) 1690 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td); 1691 return(error); 1692 } 1693