1 /* 2 * Copyright (c) 2002, 2003 Networks Associates Technology, Inc. 3 * All rights reserved. 4 * 5 * This software was developed for the FreeBSD Project by Marshall 6 * Kirk McKusick and Network Associates Laboratories, the Security 7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR 8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 9 * research program 10 * 11 * Copyright (c) 1982, 1986, 1989, 1993 12 * The Regents of the University of California. All rights reserved. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the University of 25 * California, Berkeley and its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/bio.h> 50 #include <sys/systm.h> 51 #include <sys/buf.h> 52 #include <sys/conf.h> 53 #include <sys/extattr.h> 54 #include <sys/kernel.h> 55 #include <sys/limits.h> 56 #include <sys/malloc.h> 57 #include <sys/mount.h> 58 #include <sys/proc.h> 59 #include <sys/resourcevar.h> 60 #include <sys/signalvar.h> 61 #include <sys/stat.h> 62 #include <sys/vmmeter.h> 63 #include <sys/vnode.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_extern.h> 67 #include <vm/vm_object.h> 68 #include <vm/vm_page.h> 69 #include <vm/vm_pager.h> 70 #include <vm/vnode_pager.h> 71 72 #include <ufs/ufs/extattr.h> 73 #include <ufs/ufs/quota.h> 74 #include <ufs/ufs/inode.h> 75 #include <ufs/ufs/ufs_extern.h> 76 #include <ufs/ufs/ufsmount.h> 77 78 #include <ufs/ffs/fs.h> 79 #include <ufs/ffs/ffs_extern.h> 80 #include "opt_directio.h" 81 82 #ifdef DIRECTIO 83 extern int ffs_rawread(struct vnode *vp, struct uio *uio, int *workdone); 84 #endif 85 static int ffs_fsync(struct vop_fsync_args *); 86 static int ffs_getpages(struct vop_getpages_args *); 87 static int ffs_read(struct vop_read_args *); 88 static int ffs_write(struct vop_write_args *); 89 static int ffs_extread(struct vnode *vp, struct uio *uio, int ioflag); 90 static int ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, 91 struct ucred *cred); 92 static int ffsext_strategy(struct vop_strategy_args *); 93 static int ffs_closeextattr(struct vop_closeextattr_args *); 94 static int ffs_deleteextattr(struct vop_deleteextattr_args *); 95 static int ffs_getextattr(struct vop_getextattr_args *); 96 static int ffs_listextattr(struct vop_listextattr_args *); 97 static int ffs_openextattr(struct vop_openextattr_args *); 98 static int ffs_setextattr(struct vop_setextattr_args *); 99 100 101 /* Global vfs data structures for ufs. */ 102 vop_t **ffs_vnodeop_p; 103 static struct vnodeopv_entry_desc ffs_vnodeop_entries[] = { 104 { &vop_default_desc, (vop_t *) ufs_vnoperate }, 105 { &vop_fsync_desc, (vop_t *) ffs_fsync }, 106 { &vop_getpages_desc, (vop_t *) ffs_getpages }, 107 { &vop_read_desc, (vop_t *) ffs_read }, 108 { &vop_reallocblks_desc, (vop_t *) ffs_reallocblks }, 109 { &vop_write_desc, (vop_t *) ffs_write }, 110 { &vop_closeextattr_desc, (vop_t *) ffs_closeextattr }, 111 { &vop_deleteextattr_desc, (vop_t *) ffs_deleteextattr }, 112 { &vop_getextattr_desc, (vop_t *) ffs_getextattr }, 113 { &vop_listextattr_desc, (vop_t *) ffs_listextattr }, 114 { &vop_openextattr_desc, (vop_t *) ffs_openextattr }, 115 { &vop_setextattr_desc, (vop_t *) ffs_setextattr }, 116 { NULL, NULL } 117 }; 118 static struct vnodeopv_desc ffs_vnodeop_opv_desc = 119 { &ffs_vnodeop_p, ffs_vnodeop_entries }; 120 121 vop_t **ffs_specop_p; 122 static struct vnodeopv_entry_desc ffs_specop_entries[] = { 123 { &vop_default_desc, (vop_t *) ufs_vnoperatespec }, 124 { &vop_fsync_desc, (vop_t *) ffs_fsync }, 125 { &vop_reallocblks_desc, (vop_t *) ffs_reallocblks }, 126 { &vop_strategy_desc, (vop_t *) ffsext_strategy }, 127 { &vop_closeextattr_desc, (vop_t *) ffs_closeextattr }, 128 { &vop_deleteextattr_desc, (vop_t *) ffs_deleteextattr }, 129 { &vop_getextattr_desc, (vop_t *) ffs_getextattr }, 130 { &vop_listextattr_desc, (vop_t *) ffs_listextattr }, 131 { &vop_openextattr_desc, (vop_t *) ffs_openextattr }, 132 { &vop_setextattr_desc, (vop_t *) ffs_setextattr }, 133 { NULL, NULL } 134 }; 135 static struct vnodeopv_desc ffs_specop_opv_desc = 136 { &ffs_specop_p, ffs_specop_entries }; 137 138 vop_t **ffs_fifoop_p; 139 static struct vnodeopv_entry_desc ffs_fifoop_entries[] = { 140 { &vop_default_desc, (vop_t *) ufs_vnoperatefifo }, 141 { &vop_fsync_desc, (vop_t *) ffs_fsync }, 142 { &vop_reallocblks_desc, (vop_t *) ffs_reallocblks }, 143 { &vop_strategy_desc, (vop_t *) ffsext_strategy }, 144 { &vop_closeextattr_desc, (vop_t *) ffs_closeextattr }, 145 { &vop_deleteextattr_desc, (vop_t *) ffs_deleteextattr }, 146 { &vop_getextattr_desc, (vop_t *) ffs_getextattr }, 147 { &vop_listextattr_desc, (vop_t *) ffs_listextattr }, 148 { &vop_openextattr_desc, (vop_t *) ffs_openextattr }, 149 { &vop_setextattr_desc, (vop_t *) ffs_setextattr }, 150 { NULL, NULL } 151 }; 152 static struct vnodeopv_desc ffs_fifoop_opv_desc = 153 { &ffs_fifoop_p, ffs_fifoop_entries }; 154 155 VNODEOP_SET(ffs_vnodeop_opv_desc); 156 VNODEOP_SET(ffs_specop_opv_desc); 157 VNODEOP_SET(ffs_fifoop_opv_desc); 158 159 /* 160 * Synch an open file. 161 */ 162 /* ARGSUSED */ 163 static int 164 ffs_fsync(ap) 165 struct vop_fsync_args /* { 166 struct vnode *a_vp; 167 struct ucred *a_cred; 168 int a_waitfor; 169 struct thread *a_td; 170 } */ *ap; 171 { 172 struct vnode *vp = ap->a_vp; 173 struct inode *ip = VTOI(vp); 174 struct buf *bp; 175 struct buf *nbp; 176 int s, error, wait, passes, skipmeta; 177 ufs_lbn_t lbn; 178 179 wait = (ap->a_waitfor == MNT_WAIT); 180 if (vn_isdisk(vp, NULL)) { 181 lbn = INT_MAX; 182 if (vp->v_rdev->si_mountpoint != NULL && 183 (vp->v_rdev->si_mountpoint->mnt_flag & MNT_SOFTDEP)) 184 softdep_fsync_mountdev(vp); 185 } else { 186 lbn = lblkno(ip->i_fs, (ip->i_size + ip->i_fs->fs_bsize - 1)); 187 } 188 189 /* 190 * Flush all dirty buffers associated with a vnode. 191 */ 192 passes = NIADDR + 1; 193 skipmeta = 0; 194 if (wait) 195 skipmeta = 1; 196 s = splbio(); 197 VI_LOCK(vp); 198 loop: 199 TAILQ_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) 200 bp->b_vflags &= ~BV_SCANNED; 201 for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { 202 nbp = TAILQ_NEXT(bp, b_vnbufs); 203 /* 204 * Reasons to skip this buffer: it has already been considered 205 * on this pass, this pass is the first time through on a 206 * synchronous flush request and the buffer being considered 207 * is metadata, the buffer has dependencies that will cause 208 * it to be redirtied and it has not already been deferred, 209 * or it is already being written. 210 */ 211 if ((bp->b_vflags & BV_SCANNED) != 0) 212 continue; 213 bp->b_vflags |= BV_SCANNED; 214 if ((skipmeta == 1 && bp->b_lblkno < 0)) 215 continue; 216 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 217 continue; 218 if (!wait && LIST_FIRST(&bp->b_dep) != NULL && 219 (bp->b_flags & B_DEFERRED) == 0 && 220 buf_countdeps(bp, 0)) { 221 bp->b_flags |= B_DEFERRED; 222 BUF_UNLOCK(bp); 223 continue; 224 } 225 VI_UNLOCK(vp); 226 if ((bp->b_flags & B_DELWRI) == 0) 227 panic("ffs_fsync: not dirty"); 228 if (vp != bp->b_vp) 229 panic("ffs_fsync: vp != vp->b_vp"); 230 /* 231 * If this is a synchronous flush request, or it is not a 232 * file or device, start the write on this buffer immediatly. 233 */ 234 if (wait || (vp->v_type != VREG && vp->v_type != VBLK)) { 235 236 /* 237 * On our final pass through, do all I/O synchronously 238 * so that we can find out if our flush is failing 239 * because of write errors. 240 */ 241 if (passes > 0 || !wait) { 242 if ((bp->b_flags & B_CLUSTEROK) && !wait) { 243 (void) vfs_bio_awrite(bp); 244 } else { 245 bremfree(bp); 246 splx(s); 247 (void) bawrite(bp); 248 s = splbio(); 249 } 250 } else { 251 bremfree(bp); 252 splx(s); 253 if ((error = bwrite(bp)) != 0) 254 return (error); 255 s = splbio(); 256 } 257 } else if ((vp->v_type == VREG) && (bp->b_lblkno >= lbn)) { 258 /* 259 * If the buffer is for data that has been truncated 260 * off the file, then throw it away. 261 */ 262 bremfree(bp); 263 bp->b_flags |= B_INVAL | B_NOCACHE; 264 splx(s); 265 brelse(bp); 266 s = splbio(); 267 } else 268 vfs_bio_awrite(bp); 269 270 /* 271 * Since we may have slept during the I/O, we need 272 * to start from a known point. 273 */ 274 VI_LOCK(vp); 275 nbp = TAILQ_FIRST(&vp->v_dirtyblkhd); 276 } 277 /* 278 * If we were asked to do this synchronously, then go back for 279 * another pass, this time doing the metadata. 280 */ 281 if (skipmeta) { 282 skipmeta = 0; 283 goto loop; 284 } 285 286 if (wait) { 287 while (vp->v_numoutput) { 288 vp->v_iflag |= VI_BWAIT; 289 msleep((caddr_t)&vp->v_numoutput, VI_MTX(vp), 290 PRIBIO + 4, "ffsfsn", 0); 291 } 292 VI_UNLOCK(vp); 293 294 /* 295 * Ensure that any filesystem metatdata associated 296 * with the vnode has been written. 297 */ 298 splx(s); 299 if ((error = softdep_sync_metadata(ap)) != 0) 300 return (error); 301 s = splbio(); 302 303 VI_LOCK(vp); 304 if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) { 305 /* 306 * Block devices associated with filesystems may 307 * have new I/O requests posted for them even if 308 * the vnode is locked, so no amount of trying will 309 * get them clean. Thus we give block devices a 310 * good effort, then just give up. For all other file 311 * types, go around and try again until it is clean. 312 */ 313 if (passes > 0) { 314 passes -= 1; 315 goto loop; 316 } 317 #ifdef DIAGNOSTIC 318 if (!vn_isdisk(vp, NULL)) 319 vprint("ffs_fsync: dirty", vp); 320 #endif 321 } 322 } 323 VI_UNLOCK(vp); 324 splx(s); 325 return (UFS_UPDATE(vp, wait)); 326 } 327 328 329 /* 330 * Vnode op for reading. 331 */ 332 /* ARGSUSED */ 333 static int 334 ffs_read(ap) 335 struct vop_read_args /* { 336 struct vnode *a_vp; 337 struct uio *a_uio; 338 int a_ioflag; 339 struct ucred *a_cred; 340 } */ *ap; 341 { 342 struct vnode *vp; 343 struct inode *ip; 344 struct uio *uio; 345 struct fs *fs; 346 struct buf *bp; 347 ufs_lbn_t lbn, nextlbn; 348 off_t bytesinfile; 349 long size, xfersize, blkoffset; 350 int error, orig_resid; 351 int seqcount; 352 int ioflag; 353 vm_object_t object; 354 355 vp = ap->a_vp; 356 uio = ap->a_uio; 357 ioflag = ap->a_ioflag; 358 if (ap->a_ioflag & IO_EXT) 359 #ifdef notyet 360 return (ffs_extread(vp, uio, ioflag)); 361 #else 362 panic("ffs_read+IO_EXT"); 363 #endif 364 #ifdef DIRECTIO 365 if ((ioflag & IO_DIRECT) != 0) { 366 int workdone; 367 368 error = ffs_rawread(vp, uio, &workdone); 369 if (error != 0 || workdone != 0) 370 return error; 371 } 372 #endif 373 374 GIANT_REQUIRED; 375 376 seqcount = ap->a_ioflag >> 16; 377 ip = VTOI(vp); 378 379 #ifdef DIAGNOSTIC 380 if (uio->uio_rw != UIO_READ) 381 panic("ffs_read: mode"); 382 383 if (vp->v_type == VLNK) { 384 if ((int)ip->i_size < vp->v_mount->mnt_maxsymlinklen) 385 panic("ffs_read: short symlink"); 386 } else if (vp->v_type != VREG && vp->v_type != VDIR) 387 panic("ffs_read: type %d", vp->v_type); 388 #endif 389 fs = ip->i_fs; 390 if ((u_int64_t)uio->uio_offset > fs->fs_maxfilesize) 391 return (EFBIG); 392 393 orig_resid = uio->uio_resid; 394 if (orig_resid <= 0) 395 return (0); 396 397 object = vp->v_object; 398 399 bytesinfile = ip->i_size - uio->uio_offset; 400 if (bytesinfile <= 0) { 401 if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) 402 ip->i_flag |= IN_ACCESS; 403 return 0; 404 } 405 406 if (object) { 407 vm_object_reference(object); 408 } 409 410 /* 411 * Ok so we couldn't do it all in one vm trick... 412 * so cycle around trying smaller bites.. 413 */ 414 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { 415 if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0) 416 break; 417 418 lbn = lblkno(fs, uio->uio_offset); 419 nextlbn = lbn + 1; 420 421 /* 422 * size of buffer. The buffer representing the 423 * end of the file is rounded up to the size of 424 * the block type ( fragment or full block, 425 * depending ). 426 */ 427 size = blksize(fs, ip, lbn); 428 blkoffset = blkoff(fs, uio->uio_offset); 429 430 /* 431 * The amount we want to transfer in this iteration is 432 * one FS block less the amount of the data before 433 * our startpoint (duh!) 434 */ 435 xfersize = fs->fs_bsize - blkoffset; 436 437 /* 438 * But if we actually want less than the block, 439 * or the file doesn't have a whole block more of data, 440 * then use the lesser number. 441 */ 442 if (uio->uio_resid < xfersize) 443 xfersize = uio->uio_resid; 444 if (bytesinfile < xfersize) 445 xfersize = bytesinfile; 446 447 if (lblktosize(fs, nextlbn) >= ip->i_size) { 448 /* 449 * Don't do readahead if this is the end of the file. 450 */ 451 error = bread(vp, lbn, size, NOCRED, &bp); 452 } else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { 453 /* 454 * Otherwise if we are allowed to cluster, 455 * grab as much as we can. 456 * 457 * XXX This may not be a win if we are not 458 * doing sequential access. 459 */ 460 error = cluster_read(vp, ip->i_size, lbn, 461 size, NOCRED, uio->uio_resid, seqcount, &bp); 462 } else if (seqcount > 1) { 463 /* 464 * If we are NOT allowed to cluster, then 465 * if we appear to be acting sequentially, 466 * fire off a request for a readahead 467 * as well as a read. Note that the 4th and 5th 468 * arguments point to arrays of the size specified in 469 * the 6th argument. 470 */ 471 int nextsize = blksize(fs, ip, nextlbn); 472 error = breadn(vp, lbn, 473 size, &nextlbn, &nextsize, 1, NOCRED, &bp); 474 } else { 475 /* 476 * Failing all of the above, just read what the 477 * user asked for. Interestingly, the same as 478 * the first option above. 479 */ 480 error = bread(vp, lbn, size, NOCRED, &bp); 481 } 482 if (error) { 483 brelse(bp); 484 bp = NULL; 485 break; 486 } 487 488 /* 489 * If IO_DIRECT then set B_DIRECT for the buffer. This 490 * will cause us to attempt to release the buffer later on 491 * and will cause the buffer cache to attempt to free the 492 * underlying pages. 493 */ 494 if (ioflag & IO_DIRECT) 495 bp->b_flags |= B_DIRECT; 496 497 /* 498 * We should only get non-zero b_resid when an I/O error 499 * has occurred, which should cause us to break above. 500 * However, if the short read did not cause an error, 501 * then we want to ensure that we do not uiomove bad 502 * or uninitialized data. 503 */ 504 size -= bp->b_resid; 505 if (size < xfersize) { 506 if (size == 0) 507 break; 508 xfersize = size; 509 } 510 511 { 512 /* 513 * otherwise use the general form 514 */ 515 error = 516 uiomove((char *)bp->b_data + blkoffset, 517 (int)xfersize, uio); 518 } 519 520 if (error) 521 break; 522 523 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 524 (LIST_FIRST(&bp->b_dep) == NULL)) { 525 /* 526 * If there are no dependencies, and it's VMIO, 527 * then we don't need the buf, mark it available 528 * for freeing. The VM has the data. 529 */ 530 bp->b_flags |= B_RELBUF; 531 brelse(bp); 532 } else { 533 /* 534 * Otherwise let whoever 535 * made the request take care of 536 * freeing it. We just queue 537 * it onto another list. 538 */ 539 bqrelse(bp); 540 } 541 } 542 543 /* 544 * This can only happen in the case of an error 545 * because the loop above resets bp to NULL on each iteration 546 * and on normal completion has not set a new value into it. 547 * so it must have come from a 'break' statement 548 */ 549 if (bp != NULL) { 550 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 551 (LIST_FIRST(&bp->b_dep) == NULL)) { 552 bp->b_flags |= B_RELBUF; 553 brelse(bp); 554 } else { 555 bqrelse(bp); 556 } 557 } 558 559 if (object) { 560 VM_OBJECT_LOCK(object); 561 vm_object_vndeallocate(object); 562 } 563 if ((error == 0 || uio->uio_resid != orig_resid) && 564 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0) 565 ip->i_flag |= IN_ACCESS; 566 return (error); 567 } 568 569 /* 570 * Vnode op for writing. 571 */ 572 static int 573 ffs_write(ap) 574 struct vop_write_args /* { 575 struct vnode *a_vp; 576 struct uio *a_uio; 577 int a_ioflag; 578 struct ucred *a_cred; 579 } */ *ap; 580 { 581 struct vnode *vp; 582 struct uio *uio; 583 struct inode *ip; 584 struct fs *fs; 585 struct buf *bp; 586 struct thread *td; 587 ufs_lbn_t lbn; 588 off_t osize; 589 int seqcount; 590 int blkoffset, error, extended, flags, ioflag, resid, size, xfersize; 591 vm_object_t object; 592 593 vp = ap->a_vp; 594 uio = ap->a_uio; 595 ioflag = ap->a_ioflag; 596 if (ap->a_ioflag & IO_EXT) 597 #ifdef notyet 598 return (ffs_extwrite(vp, uio, ioflag, ap->a_cred)); 599 #else 600 panic("ffs_read+IO_EXT"); 601 #endif 602 603 GIANT_REQUIRED; 604 605 extended = 0; 606 seqcount = ap->a_ioflag >> 16; 607 ip = VTOI(vp); 608 609 object = vp->v_object; 610 if (object) { 611 vm_object_reference(object); 612 } 613 614 #ifdef DIAGNOSTIC 615 if (uio->uio_rw != UIO_WRITE) 616 panic("ffswrite: mode"); 617 #endif 618 619 switch (vp->v_type) { 620 case VREG: 621 if (ioflag & IO_APPEND) 622 uio->uio_offset = ip->i_size; 623 if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size) { 624 if (object) { 625 VM_OBJECT_LOCK(object); 626 vm_object_vndeallocate(object); 627 } 628 return (EPERM); 629 } 630 /* FALLTHROUGH */ 631 case VLNK: 632 break; 633 case VDIR: 634 panic("ffswrite: dir write"); 635 break; 636 default: 637 panic("ffswrite: type %p %d (%d,%d)", vp, (int)vp->v_type, 638 (int)uio->uio_offset, 639 (int)uio->uio_resid 640 ); 641 } 642 643 fs = ip->i_fs; 644 if (uio->uio_offset < 0 || 645 (u_int64_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) { 646 if (object) { 647 VM_OBJECT_LOCK(object); 648 vm_object_vndeallocate(object); 649 } 650 return (EFBIG); 651 } 652 /* 653 * Maybe this should be above the vnode op call, but so long as 654 * file servers have no limits, I don't think it matters. 655 */ 656 td = uio->uio_td; 657 if (vp->v_type == VREG && td && 658 uio->uio_offset + uio->uio_resid > 659 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 660 PROC_LOCK(td->td_proc); 661 psignal(td->td_proc, SIGXFSZ); 662 PROC_UNLOCK(td->td_proc); 663 if (object) { 664 VM_OBJECT_LOCK(object); 665 vm_object_vndeallocate(object); 666 } 667 return (EFBIG); 668 } 669 670 resid = uio->uio_resid; 671 osize = ip->i_size; 672 if (seqcount > BA_SEQMAX) 673 flags = BA_SEQMAX << BA_SEQSHIFT; 674 else 675 flags = seqcount << BA_SEQSHIFT; 676 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp)) 677 flags |= IO_SYNC; 678 679 for (error = 0; uio->uio_resid > 0;) { 680 lbn = lblkno(fs, uio->uio_offset); 681 blkoffset = blkoff(fs, uio->uio_offset); 682 xfersize = fs->fs_bsize - blkoffset; 683 if (uio->uio_resid < xfersize) 684 xfersize = uio->uio_resid; 685 686 if (uio->uio_offset + xfersize > ip->i_size) 687 vnode_pager_setsize(vp, uio->uio_offset + xfersize); 688 689 /* 690 * We must perform a read-before-write if the transfer size 691 * does not cover the entire buffer. 692 */ 693 if (fs->fs_bsize > xfersize) 694 flags |= BA_CLRBUF; 695 else 696 flags &= ~BA_CLRBUF; 697 /* XXX is uio->uio_offset the right thing here? */ 698 error = UFS_BALLOC(vp, uio->uio_offset, xfersize, 699 ap->a_cred, flags, &bp); 700 if (error != 0) 701 break; 702 /* 703 * If the buffer is not valid we have to clear out any 704 * garbage data from the pages instantiated for the buffer. 705 * If we do not, a failed uiomove() during a write can leave 706 * the prior contents of the pages exposed to a userland 707 * mmap(). XXX deal with uiomove() errors a better way. 708 */ 709 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize) 710 vfs_bio_clrbuf(bp); 711 if (ioflag & IO_DIRECT) 712 bp->b_flags |= B_DIRECT; 713 714 if (uio->uio_offset + xfersize > ip->i_size) { 715 ip->i_size = uio->uio_offset + xfersize; 716 DIP(ip, i_size) = ip->i_size; 717 extended = 1; 718 } 719 720 size = blksize(fs, ip, lbn) - bp->b_resid; 721 if (size < xfersize) 722 xfersize = size; 723 724 error = 725 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); 726 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 727 (LIST_FIRST(&bp->b_dep) == NULL)) { 728 bp->b_flags |= B_RELBUF; 729 } 730 731 /* 732 * If IO_SYNC each buffer is written synchronously. Otherwise 733 * if we have a severe page deficiency write the buffer 734 * asynchronously. Otherwise try to cluster, and if that 735 * doesn't do it then either do an async write (if O_DIRECT), 736 * or a delayed write (if not). 737 */ 738 if (ioflag & IO_SYNC) { 739 (void)bwrite(bp); 740 } else if (vm_page_count_severe() || 741 buf_dirty_count_severe() || 742 (ioflag & IO_ASYNC)) { 743 bp->b_flags |= B_CLUSTEROK; 744 bawrite(bp); 745 } else if (xfersize + blkoffset == fs->fs_bsize) { 746 if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) { 747 bp->b_flags |= B_CLUSTEROK; 748 cluster_write(bp, ip->i_size, seqcount); 749 } else { 750 bawrite(bp); 751 } 752 } else if (ioflag & IO_DIRECT) { 753 bp->b_flags |= B_CLUSTEROK; 754 bawrite(bp); 755 } else { 756 bp->b_flags |= B_CLUSTEROK; 757 bdwrite(bp); 758 } 759 if (error || xfersize == 0) 760 break; 761 ip->i_flag |= IN_CHANGE | IN_UPDATE; 762 } 763 /* 764 * If we successfully wrote any data, and we are not the superuser 765 * we clear the setuid and setgid bits as a precaution against 766 * tampering. 767 */ 768 if (resid > uio->uio_resid && ap->a_cred && 769 suser_cred(ap->a_cred, PRISON_ROOT)) { 770 ip->i_mode &= ~(ISUID | ISGID); 771 DIP(ip, i_mode) = ip->i_mode; 772 } 773 if (resid > uio->uio_resid) 774 VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0)); 775 if (error) { 776 if (ioflag & IO_UNIT) { 777 (void)UFS_TRUNCATE(vp, osize, 778 IO_NORMAL | (ioflag & IO_SYNC), 779 ap->a_cred, uio->uio_td); 780 uio->uio_offset -= resid - uio->uio_resid; 781 uio->uio_resid = resid; 782 } 783 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) 784 error = UFS_UPDATE(vp, 1); 785 786 if (object) { 787 VM_OBJECT_LOCK(object); 788 vm_object_vndeallocate(object); 789 } 790 791 return (error); 792 } 793 794 /* 795 * get page routine 796 */ 797 static int 798 ffs_getpages(ap) 799 struct vop_getpages_args *ap; 800 { 801 off_t foff, physoffset; 802 int i, size, bsize; 803 struct vnode *dp, *vp; 804 vm_object_t obj; 805 vm_pindex_t pindex; 806 vm_page_t mreq; 807 int bbackwards, bforwards; 808 int pbackwards, pforwards; 809 int firstpage; 810 ufs2_daddr_t reqblkno, reqlblkno; 811 int poff; 812 int pcount; 813 int rtval; 814 int pagesperblock; 815 816 GIANT_REQUIRED; 817 818 pcount = round_page(ap->a_count) / PAGE_SIZE; 819 mreq = ap->a_m[ap->a_reqpage]; 820 821 /* 822 * if ANY DEV_BSIZE blocks are valid on a large filesystem block, 823 * then the entire page is valid. Since the page may be mapped, 824 * user programs might reference data beyond the actual end of file 825 * occuring within the page. We have to zero that data. 826 */ 827 VM_OBJECT_LOCK(mreq->object); 828 if (mreq->valid) { 829 if (mreq->valid != VM_PAGE_BITS_ALL) 830 vm_page_zero_invalid(mreq, TRUE); 831 vm_page_lock_queues(); 832 for (i = 0; i < pcount; i++) { 833 if (i != ap->a_reqpage) { 834 vm_page_free(ap->a_m[i]); 835 } 836 } 837 vm_page_unlock_queues(); 838 VM_OBJECT_UNLOCK(mreq->object); 839 return VM_PAGER_OK; 840 } 841 VM_OBJECT_UNLOCK(mreq->object); 842 vp = ap->a_vp; 843 obj = vp->v_object; 844 bsize = vp->v_mount->mnt_stat.f_iosize; 845 pindex = mreq->pindex; 846 foff = IDX_TO_OFF(pindex) /* + ap->a_offset should be zero */; 847 848 if (bsize < PAGE_SIZE) 849 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, 850 ap->a_count, 851 ap->a_reqpage); 852 853 /* 854 * foff is the file offset of the required page 855 * reqlblkno is the logical block that contains the page 856 * poff is the index of the page into the logical block 857 */ 858 reqlblkno = foff / bsize; 859 poff = (foff % bsize) / PAGE_SIZE; 860 861 dp = VTOI(vp)->i_devvp; 862 if (ufs_bmaparray(vp, reqlblkno, &reqblkno, 0, &bforwards, &bbackwards) 863 || (reqblkno == -1)) { 864 VM_OBJECT_LOCK(obj); 865 vm_page_lock_queues(); 866 for(i = 0; i < pcount; i++) { 867 if (i != ap->a_reqpage) 868 vm_page_free(ap->a_m[i]); 869 } 870 vm_page_unlock_queues(); 871 if (reqblkno == -1) { 872 if ((mreq->flags & PG_ZERO) == 0) 873 pmap_zero_page(mreq); 874 vm_page_undirty(mreq); 875 mreq->valid = VM_PAGE_BITS_ALL; 876 VM_OBJECT_UNLOCK(obj); 877 return VM_PAGER_OK; 878 } else { 879 VM_OBJECT_UNLOCK(obj); 880 return VM_PAGER_ERROR; 881 } 882 } 883 884 physoffset = (off_t)reqblkno * DEV_BSIZE + poff * PAGE_SIZE; 885 pagesperblock = bsize / PAGE_SIZE; 886 /* 887 * find the first page that is contiguous... 888 * note that pbackwards is the number of pages that are contiguous 889 * backwards. 890 */ 891 firstpage = 0; 892 if (ap->a_count) { 893 pbackwards = poff + bbackwards * pagesperblock; 894 if (ap->a_reqpage > pbackwards) { 895 firstpage = ap->a_reqpage - pbackwards; 896 VM_OBJECT_LOCK(obj); 897 vm_page_lock_queues(); 898 for(i=0;i<firstpage;i++) 899 vm_page_free(ap->a_m[i]); 900 vm_page_unlock_queues(); 901 VM_OBJECT_UNLOCK(obj); 902 } 903 904 /* 905 * pforwards is the number of pages that are contiguous 906 * after the current page. 907 */ 908 pforwards = (pagesperblock - (poff + 1)) + 909 bforwards * pagesperblock; 910 if (pforwards < (pcount - (ap->a_reqpage + 1))) { 911 VM_OBJECT_LOCK(obj); 912 vm_page_lock_queues(); 913 for( i = ap->a_reqpage + pforwards + 1; i < pcount; i++) 914 vm_page_free(ap->a_m[i]); 915 vm_page_unlock_queues(); 916 VM_OBJECT_UNLOCK(obj); 917 pcount = ap->a_reqpage + pforwards + 1; 918 } 919 920 /* 921 * number of pages for I/O corrected for the non-contig pages at 922 * the beginning of the array. 923 */ 924 pcount -= firstpage; 925 } 926 927 /* 928 * calculate the size of the transfer 929 */ 930 931 size = pcount * PAGE_SIZE; 932 933 if ((IDX_TO_OFF(ap->a_m[firstpage]->pindex) + size) > 934 obj->un_pager.vnp.vnp_size) 935 size = obj->un_pager.vnp.vnp_size - 936 IDX_TO_OFF(ap->a_m[firstpage]->pindex); 937 938 physoffset -= foff; 939 rtval = VOP_GETPAGES(dp, &ap->a_m[firstpage], size, 940 (ap->a_reqpage - firstpage), physoffset); 941 942 return (rtval); 943 } 944 945 /* 946 * Extended attribute area reading. 947 */ 948 static int 949 ffs_extread(struct vnode *vp, struct uio *uio, int ioflag) 950 { 951 struct inode *ip; 952 struct ufs2_dinode *dp; 953 struct fs *fs; 954 struct buf *bp; 955 ufs_lbn_t lbn, nextlbn; 956 off_t bytesinfile; 957 long size, xfersize, blkoffset; 958 int error, orig_resid; 959 960 GIANT_REQUIRED; 961 962 ip = VTOI(vp); 963 fs = ip->i_fs; 964 dp = ip->i_din2; 965 966 #ifdef DIAGNOSTIC 967 if (uio->uio_rw != UIO_READ || fs->fs_magic != FS_UFS2_MAGIC) 968 panic("ffs_extread: mode"); 969 970 #endif 971 orig_resid = uio->uio_resid; 972 if (orig_resid <= 0) 973 return (0); 974 975 bytesinfile = dp->di_extsize - uio->uio_offset; 976 if (bytesinfile <= 0) { 977 if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) 978 ip->i_flag |= IN_ACCESS; 979 return 0; 980 } 981 982 for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { 983 if ((bytesinfile = dp->di_extsize - uio->uio_offset) <= 0) 984 break; 985 986 lbn = lblkno(fs, uio->uio_offset); 987 nextlbn = lbn + 1; 988 989 /* 990 * size of buffer. The buffer representing the 991 * end of the file is rounded up to the size of 992 * the block type ( fragment or full block, 993 * depending ). 994 */ 995 size = sblksize(fs, dp->di_extsize, lbn); 996 blkoffset = blkoff(fs, uio->uio_offset); 997 998 /* 999 * The amount we want to transfer in this iteration is 1000 * one FS block less the amount of the data before 1001 * our startpoint (duh!) 1002 */ 1003 xfersize = fs->fs_bsize - blkoffset; 1004 1005 /* 1006 * But if we actually want less than the block, 1007 * or the file doesn't have a whole block more of data, 1008 * then use the lesser number. 1009 */ 1010 if (uio->uio_resid < xfersize) 1011 xfersize = uio->uio_resid; 1012 if (bytesinfile < xfersize) 1013 xfersize = bytesinfile; 1014 1015 if (lblktosize(fs, nextlbn) >= dp->di_extsize) { 1016 /* 1017 * Don't do readahead if this is the end of the info. 1018 */ 1019 error = bread(vp, -1 - lbn, size, NOCRED, &bp); 1020 } else { 1021 /* 1022 * If we have a second block, then 1023 * fire off a request for a readahead 1024 * as well as a read. Note that the 4th and 5th 1025 * arguments point to arrays of the size specified in 1026 * the 6th argument. 1027 */ 1028 int nextsize = sblksize(fs, dp->di_extsize, nextlbn); 1029 1030 nextlbn = -1 - nextlbn; 1031 error = breadn(vp, -1 - lbn, 1032 size, &nextlbn, &nextsize, 1, NOCRED, &bp); 1033 } 1034 if (error) { 1035 brelse(bp); 1036 bp = NULL; 1037 break; 1038 } 1039 1040 /* 1041 * If IO_DIRECT then set B_DIRECT for the buffer. This 1042 * will cause us to attempt to release the buffer later on 1043 * and will cause the buffer cache to attempt to free the 1044 * underlying pages. 1045 */ 1046 if (ioflag & IO_DIRECT) 1047 bp->b_flags |= B_DIRECT; 1048 1049 /* 1050 * We should only get non-zero b_resid when an I/O error 1051 * has occurred, which should cause us to break above. 1052 * However, if the short read did not cause an error, 1053 * then we want to ensure that we do not uiomove bad 1054 * or uninitialized data. 1055 */ 1056 size -= bp->b_resid; 1057 if (size < xfersize) { 1058 if (size == 0) 1059 break; 1060 xfersize = size; 1061 } 1062 1063 error = uiomove((char *)bp->b_data + blkoffset, 1064 (int)xfersize, uio); 1065 if (error) 1066 break; 1067 1068 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 1069 (LIST_FIRST(&bp->b_dep) == NULL)) { 1070 /* 1071 * If there are no dependencies, and it's VMIO, 1072 * then we don't need the buf, mark it available 1073 * for freeing. The VM has the data. 1074 */ 1075 bp->b_flags |= B_RELBUF; 1076 brelse(bp); 1077 } else { 1078 /* 1079 * Otherwise let whoever 1080 * made the request take care of 1081 * freeing it. We just queue 1082 * it onto another list. 1083 */ 1084 bqrelse(bp); 1085 } 1086 } 1087 1088 /* 1089 * This can only happen in the case of an error 1090 * because the loop above resets bp to NULL on each iteration 1091 * and on normal completion has not set a new value into it. 1092 * so it must have come from a 'break' statement 1093 */ 1094 if (bp != NULL) { 1095 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 1096 (LIST_FIRST(&bp->b_dep) == NULL)) { 1097 bp->b_flags |= B_RELBUF; 1098 brelse(bp); 1099 } else { 1100 bqrelse(bp); 1101 } 1102 } 1103 1104 if ((error == 0 || uio->uio_resid != orig_resid) && 1105 (vp->v_mount->mnt_flag & MNT_NOATIME) == 0) 1106 ip->i_flag |= IN_ACCESS; 1107 return (error); 1108 } 1109 1110 /* 1111 * Extended attribute area writing. 1112 */ 1113 static int 1114 ffs_extwrite(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *ucred) 1115 { 1116 struct inode *ip; 1117 struct ufs2_dinode *dp; 1118 struct fs *fs; 1119 struct buf *bp; 1120 ufs_lbn_t lbn; 1121 off_t osize; 1122 int blkoffset, error, flags, resid, size, xfersize; 1123 1124 GIANT_REQUIRED; 1125 1126 ip = VTOI(vp); 1127 fs = ip->i_fs; 1128 dp = ip->i_din2; 1129 1130 #ifdef DIAGNOSTIC 1131 if (uio->uio_rw != UIO_WRITE || fs->fs_magic != FS_UFS2_MAGIC) 1132 panic("ext_write: mode"); 1133 #endif 1134 1135 if (ioflag & IO_APPEND) 1136 uio->uio_offset = dp->di_extsize; 1137 1138 if (uio->uio_offset < 0 || 1139 (u_int64_t)uio->uio_offset + uio->uio_resid > NXADDR * fs->fs_bsize) 1140 return (EFBIG); 1141 1142 resid = uio->uio_resid; 1143 osize = dp->di_extsize; 1144 flags = IO_EXT; 1145 if ((ioflag & IO_SYNC) && !DOINGASYNC(vp)) 1146 flags |= IO_SYNC; 1147 1148 for (error = 0; uio->uio_resid > 0;) { 1149 lbn = lblkno(fs, uio->uio_offset); 1150 blkoffset = blkoff(fs, uio->uio_offset); 1151 xfersize = fs->fs_bsize - blkoffset; 1152 if (uio->uio_resid < xfersize) 1153 xfersize = uio->uio_resid; 1154 1155 /* 1156 * We must perform a read-before-write if the transfer size 1157 * does not cover the entire buffer. 1158 */ 1159 if (fs->fs_bsize > xfersize) 1160 flags |= BA_CLRBUF; 1161 else 1162 flags &= ~BA_CLRBUF; 1163 error = UFS_BALLOC(vp, uio->uio_offset, xfersize, 1164 ucred, flags, &bp); 1165 if (error != 0) 1166 break; 1167 /* 1168 * If the buffer is not valid we have to clear out any 1169 * garbage data from the pages instantiated for the buffer. 1170 * If we do not, a failed uiomove() during a write can leave 1171 * the prior contents of the pages exposed to a userland 1172 * mmap(). XXX deal with uiomove() errors a better way. 1173 */ 1174 if ((bp->b_flags & B_CACHE) == 0 && fs->fs_bsize <= xfersize) 1175 vfs_bio_clrbuf(bp); 1176 if (ioflag & IO_DIRECT) 1177 bp->b_flags |= B_DIRECT; 1178 1179 if (uio->uio_offset + xfersize > dp->di_extsize) 1180 dp->di_extsize = uio->uio_offset + xfersize; 1181 1182 size = sblksize(fs, dp->di_extsize, lbn) - bp->b_resid; 1183 if (size < xfersize) 1184 xfersize = size; 1185 1186 error = 1187 uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio); 1188 if ((ioflag & (IO_VMIO|IO_DIRECT)) && 1189 (LIST_FIRST(&bp->b_dep) == NULL)) { 1190 bp->b_flags |= B_RELBUF; 1191 } 1192 1193 /* 1194 * If IO_SYNC each buffer is written synchronously. Otherwise 1195 * if we have a severe page deficiency write the buffer 1196 * asynchronously. Otherwise try to cluster, and if that 1197 * doesn't do it then either do an async write (if O_DIRECT), 1198 * or a delayed write (if not). 1199 */ 1200 if (ioflag & IO_SYNC) { 1201 (void)bwrite(bp); 1202 } else if (vm_page_count_severe() || 1203 buf_dirty_count_severe() || 1204 xfersize + blkoffset == fs->fs_bsize || 1205 (ioflag & (IO_ASYNC | IO_DIRECT))) 1206 bawrite(bp); 1207 else 1208 bdwrite(bp); 1209 if (error || xfersize == 0) 1210 break; 1211 ip->i_flag |= IN_CHANGE | IN_UPDATE; 1212 } 1213 /* 1214 * If we successfully wrote any data, and we are not the superuser 1215 * we clear the setuid and setgid bits as a precaution against 1216 * tampering. 1217 */ 1218 if (resid > uio->uio_resid && ucred && 1219 suser_cred(ucred, PRISON_ROOT)) { 1220 ip->i_mode &= ~(ISUID | ISGID); 1221 dp->di_mode = ip->i_mode; 1222 } 1223 if (error) { 1224 if (ioflag & IO_UNIT) { 1225 (void)UFS_TRUNCATE(vp, osize, 1226 IO_EXT | (ioflag&IO_SYNC), ucred, uio->uio_td); 1227 uio->uio_offset -= resid - uio->uio_resid; 1228 uio->uio_resid = resid; 1229 } 1230 } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) 1231 error = UFS_UPDATE(vp, 1); 1232 return (error); 1233 } 1234 1235 1236 /* 1237 * Vnode operating to retrieve a named extended attribute. 1238 * 1239 * Locate a particular EA (nspace:name) in the area (ptr:length), and return 1240 * the length of the EA, and possibly the pointer to the entry and to the data. 1241 */ 1242 static int 1243 ffs_findextattr(u_char *ptr, u_int length, int nspace, const char *name, u_char **eap, u_char **eac) 1244 { 1245 u_char *p, *pe, *pn, *p0; 1246 int eapad1, eapad2, ealength, ealen, nlen; 1247 uint32_t ul; 1248 1249 pe = ptr + length; 1250 nlen = strlen(name); 1251 1252 for (p = ptr; p < pe; p = pn) { 1253 p0 = p; 1254 bcopy(p, &ul, sizeof(ul)); 1255 pn = p + ul; 1256 /* make sure this entry is complete */ 1257 if (pn > pe) 1258 break; 1259 p += sizeof(uint32_t); 1260 if (*p != nspace) 1261 continue; 1262 p++; 1263 eapad2 = *p++; 1264 if (*p != nlen) 1265 continue; 1266 p++; 1267 if (bcmp(p, name, nlen)) 1268 continue; 1269 ealength = sizeof(uint32_t) + 3 + nlen; 1270 eapad1 = 8 - (ealength % 8); 1271 if (eapad1 == 8) 1272 eapad1 = 0; 1273 ealength += eapad1; 1274 ealen = ul - ealength - eapad2; 1275 p += nlen + eapad1; 1276 if (eap != NULL) 1277 *eap = p0; 1278 if (eac != NULL) 1279 *eac = p; 1280 return (ealen); 1281 } 1282 return(-1); 1283 } 1284 1285 static int 1286 ffs_rdextattr(u_char **p, struct vnode *vp, struct thread *td, int extra) 1287 { 1288 struct inode *ip; 1289 struct ufs2_dinode *dp; 1290 struct uio luio; 1291 struct iovec liovec; 1292 int easize, error; 1293 u_char *eae; 1294 1295 ip = VTOI(vp); 1296 dp = ip->i_din2; 1297 easize = dp->di_extsize; 1298 1299 eae = malloc(easize + extra, M_TEMP, M_WAITOK); 1300 1301 liovec.iov_base = eae; 1302 liovec.iov_len = easize; 1303 luio.uio_iov = &liovec; 1304 luio.uio_iovcnt = 1; 1305 luio.uio_offset = 0; 1306 luio.uio_resid = easize; 1307 luio.uio_segflg = UIO_SYSSPACE; 1308 luio.uio_rw = UIO_READ; 1309 luio.uio_td = td; 1310 1311 error = ffs_extread(vp, &luio, IO_EXT | IO_SYNC); 1312 if (error) { 1313 free(eae, M_TEMP); 1314 return(error); 1315 } 1316 *p = eae; 1317 return (0); 1318 } 1319 1320 static int 1321 ffs_open_ea(struct vnode *vp, struct ucred *cred, struct thread *td) 1322 { 1323 struct inode *ip; 1324 struct ufs2_dinode *dp; 1325 int error; 1326 1327 ip = VTOI(vp); 1328 1329 if (ip->i_ea_area != NULL) 1330 return (EBUSY); 1331 dp = ip->i_din2; 1332 error = ffs_rdextattr(&ip->i_ea_area, vp, td, 0); 1333 if (error) 1334 return (error); 1335 ip->i_ea_len = dp->di_extsize; 1336 ip->i_ea_error = 0; 1337 return (0); 1338 } 1339 1340 /* 1341 * Vnode extattr transaction commit/abort 1342 */ 1343 static int 1344 ffs_close_ea(struct vnode *vp, int commit, struct ucred *cred, struct thread *td) 1345 { 1346 struct inode *ip; 1347 struct uio luio; 1348 struct iovec liovec; 1349 int error; 1350 struct ufs2_dinode *dp; 1351 1352 ip = VTOI(vp); 1353 if (ip->i_ea_area == NULL) 1354 return (EINVAL); 1355 dp = ip->i_din2; 1356 error = ip->i_ea_error; 1357 if (commit && error == 0) { 1358 if (cred == NOCRED) 1359 cred = vp->v_mount->mnt_cred; 1360 liovec.iov_base = ip->i_ea_area; 1361 liovec.iov_len = ip->i_ea_len; 1362 luio.uio_iov = &liovec; 1363 luio.uio_iovcnt = 1; 1364 luio.uio_offset = 0; 1365 luio.uio_resid = ip->i_ea_len; 1366 luio.uio_segflg = UIO_SYSSPACE; 1367 luio.uio_rw = UIO_WRITE; 1368 luio.uio_td = td; 1369 /* XXX: I'm not happy about truncating to zero size */ 1370 if (ip->i_ea_len < dp->di_extsize) 1371 error = ffs_truncate(vp, 0, IO_EXT, cred, td); 1372 error = ffs_extwrite(vp, &luio, IO_EXT | IO_SYNC, cred); 1373 } 1374 free(ip->i_ea_area, M_TEMP); 1375 ip->i_ea_area = NULL; 1376 ip->i_ea_len = 0; 1377 ip->i_ea_error = 0; 1378 return (error); 1379 } 1380 1381 /* 1382 * Vnode extattr strategy routine for special devices and fifos. 1383 * 1384 * We need to check for a read or write of the external attributes. 1385 * Otherwise we just fall through and do the usual thing. 1386 */ 1387 static int 1388 ffsext_strategy(struct vop_strategy_args *ap) 1389 /* 1390 struct vop_strategy_args { 1391 struct vnodeop_desc *a_desc; 1392 struct vnode *a_vp; 1393 struct buf *a_bp; 1394 }; 1395 */ 1396 { 1397 struct vnode *vp; 1398 daddr_t lbn; 1399 1400 KASSERT(ap->a_vp == ap->a_bp->b_vp, ("%s(%p != %p)", 1401 __func__, ap->a_vp, ap->a_bp->b_vp)); 1402 vp = ap->a_vp; 1403 lbn = ap->a_bp->b_lblkno; 1404 if (VTOI(vp)->i_fs->fs_magic == FS_UFS2_MAGIC && 1405 lbn < 0 && lbn >= -NXADDR) 1406 return (ufs_vnoperate((struct vop_generic_args *)ap)); 1407 if (vp->v_type == VFIFO) 1408 return (ufs_vnoperatefifo((struct vop_generic_args *)ap)); 1409 return (ufs_vnoperatespec((struct vop_generic_args *)ap)); 1410 } 1411 1412 /* 1413 * Vnode extattr transaction commit/abort 1414 */ 1415 static int 1416 ffs_openextattr(struct vop_openextattr_args *ap) 1417 /* 1418 struct vop_openextattr_args { 1419 struct vnodeop_desc *a_desc; 1420 struct vnode *a_vp; 1421 IN struct ucred *a_cred; 1422 IN struct thread *a_td; 1423 }; 1424 */ 1425 { 1426 struct inode *ip; 1427 struct fs *fs; 1428 1429 ip = VTOI(ap->a_vp); 1430 fs = ip->i_fs; 1431 if (fs->fs_magic == FS_UFS1_MAGIC) 1432 return (ufs_vnoperate((struct vop_generic_args *)ap)); 1433 1434 if (ap->a_vp->v_type == VCHR) 1435 return (EOPNOTSUPP); 1436 1437 return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td)); 1438 } 1439 1440 1441 /* 1442 * Vnode extattr transaction commit/abort 1443 */ 1444 static int 1445 ffs_closeextattr(struct vop_closeextattr_args *ap) 1446 /* 1447 struct vop_closeextattr_args { 1448 struct vnodeop_desc *a_desc; 1449 struct vnode *a_vp; 1450 int a_commit; 1451 IN struct ucred *a_cred; 1452 IN struct thread *a_td; 1453 }; 1454 */ 1455 { 1456 struct inode *ip; 1457 struct fs *fs; 1458 1459 ip = VTOI(ap->a_vp); 1460 fs = ip->i_fs; 1461 if (fs->fs_magic == FS_UFS1_MAGIC) 1462 return (ufs_vnoperate((struct vop_generic_args *)ap)); 1463 1464 if (ap->a_vp->v_type == VCHR) 1465 return (EOPNOTSUPP); 1466 1467 return (ffs_close_ea(ap->a_vp, ap->a_commit, ap->a_cred, ap->a_td)); 1468 } 1469 1470 /* 1471 * Vnode operation to remove a named attribute. 1472 */ 1473 static int 1474 ffs_deleteextattr(struct vop_deleteextattr_args *ap) 1475 /* 1476 vop_deleteextattr { 1477 IN struct vnode *a_vp; 1478 IN int a_attrnamespace; 1479 IN const char *a_name; 1480 IN struct ucred *a_cred; 1481 IN struct thread *a_td; 1482 }; 1483 */ 1484 { 1485 struct inode *ip; 1486 struct fs *fs; 1487 uint32_t ealength, ul; 1488 int ealen, olen, eapad1, eapad2, error, i, easize; 1489 u_char *eae, *p; 1490 int stand_alone; 1491 1492 ip = VTOI(ap->a_vp); 1493 fs = ip->i_fs; 1494 1495 if (fs->fs_magic == FS_UFS1_MAGIC) 1496 return (ufs_vnoperate((struct vop_generic_args *)ap)); 1497 1498 if (ap->a_vp->v_type == VCHR) 1499 return (EOPNOTSUPP); 1500 1501 if (strlen(ap->a_name) == 0) 1502 return (EINVAL); 1503 1504 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1505 ap->a_cred, ap->a_td, IWRITE); 1506 if (error) { 1507 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1508 ip->i_ea_error = error; 1509 return (error); 1510 } 1511 1512 if (ip->i_ea_area == NULL) { 1513 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1514 if (error) 1515 return (error); 1516 stand_alone = 1; 1517 } else { 1518 stand_alone = 0; 1519 } 1520 1521 ealength = eapad1 = ealen = eapad2 = 0; 1522 1523 eae = malloc(ip->i_ea_len, M_TEMP, M_WAITOK); 1524 bcopy(ip->i_ea_area, eae, ip->i_ea_len); 1525 easize = ip->i_ea_len; 1526 1527 olen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name, 1528 &p, NULL); 1529 if (olen == -1) { 1530 /* delete but nonexistent */ 1531 free(eae, M_TEMP); 1532 if (stand_alone) 1533 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1534 return(ENOATTR); 1535 } 1536 bcopy(p, &ul, sizeof ul); 1537 i = p - eae + ul; 1538 if (ul != ealength) { 1539 bcopy(p + ul, p + ealength, easize - i); 1540 easize += (ealength - ul); 1541 } 1542 if (easize > NXADDR * fs->fs_bsize) { 1543 free(eae, M_TEMP); 1544 if (stand_alone) 1545 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1546 else if (ip->i_ea_error == 0) 1547 ip->i_ea_error = ENOSPC; 1548 return(ENOSPC); 1549 } 1550 p = ip->i_ea_area; 1551 ip->i_ea_area = eae; 1552 ip->i_ea_len = easize; 1553 free(p, M_TEMP); 1554 if (stand_alone) 1555 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td); 1556 return(error); 1557 } 1558 1559 /* 1560 * Vnode operation to retrieve a named extended attribute. 1561 */ 1562 static int 1563 ffs_getextattr(struct vop_getextattr_args *ap) 1564 /* 1565 vop_getextattr { 1566 IN struct vnode *a_vp; 1567 IN int a_attrnamespace; 1568 IN const char *a_name; 1569 INOUT struct uio *a_uio; 1570 OUT size_t *a_size; 1571 IN struct ucred *a_cred; 1572 IN struct thread *a_td; 1573 }; 1574 */ 1575 { 1576 struct inode *ip; 1577 struct fs *fs; 1578 u_char *eae, *p; 1579 unsigned easize; 1580 int error, ealen, stand_alone; 1581 1582 ip = VTOI(ap->a_vp); 1583 fs = ip->i_fs; 1584 1585 if (fs->fs_magic == FS_UFS1_MAGIC) 1586 return (ufs_vnoperate((struct vop_generic_args *)ap)); 1587 1588 if (ap->a_vp->v_type == VCHR) 1589 return (EOPNOTSUPP); 1590 1591 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1592 ap->a_cred, ap->a_td, IREAD); 1593 if (error) 1594 return (error); 1595 1596 if (ip->i_ea_area == NULL) { 1597 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1598 if (error) 1599 return (error); 1600 stand_alone = 1; 1601 } else { 1602 stand_alone = 0; 1603 } 1604 eae = ip->i_ea_area; 1605 easize = ip->i_ea_len; 1606 1607 ealen = ffs_findextattr(eae, easize, ap->a_attrnamespace, ap->a_name, 1608 NULL, &p); 1609 if (ealen >= 0) { 1610 error = 0; 1611 if (ap->a_size != NULL) 1612 *ap->a_size = ealen; 1613 else if (ap->a_uio != NULL) 1614 error = uiomove(p, ealen, ap->a_uio); 1615 } else 1616 error = ENOATTR; 1617 if (stand_alone) 1618 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1619 return(error); 1620 } 1621 1622 /* 1623 * Vnode operation to retrieve extended attributes on a vnode. 1624 */ 1625 static int 1626 ffs_listextattr(struct vop_listextattr_args *ap) 1627 /* 1628 vop_listextattr { 1629 IN struct vnode *a_vp; 1630 IN int a_attrnamespace; 1631 INOUT struct uio *a_uio; 1632 OUT size_t *a_size; 1633 IN struct ucred *a_cred; 1634 IN struct thread *a_td; 1635 }; 1636 */ 1637 { 1638 struct inode *ip; 1639 struct fs *fs; 1640 u_char *eae, *p, *pe, *pn; 1641 unsigned easize; 1642 uint32_t ul; 1643 int error, ealen, stand_alone; 1644 1645 ip = VTOI(ap->a_vp); 1646 fs = ip->i_fs; 1647 1648 if (fs->fs_magic == FS_UFS1_MAGIC) 1649 return (ufs_vnoperate((struct vop_generic_args *)ap)); 1650 1651 if (ap->a_vp->v_type == VCHR) 1652 return (EOPNOTSUPP); 1653 1654 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1655 ap->a_cred, ap->a_td, IREAD); 1656 if (error) 1657 return (error); 1658 1659 if (ip->i_ea_area == NULL) { 1660 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1661 if (error) 1662 return (error); 1663 stand_alone = 1; 1664 } else { 1665 stand_alone = 0; 1666 } 1667 eae = ip->i_ea_area; 1668 easize = ip->i_ea_len; 1669 1670 error = 0; 1671 if (ap->a_size != NULL) 1672 *ap->a_size = 0; 1673 pe = eae + easize; 1674 for(p = eae; error == 0 && p < pe; p = pn) { 1675 bcopy(p, &ul, sizeof(ul)); 1676 pn = p + ul; 1677 if (pn > pe) 1678 break; 1679 p += sizeof(ul); 1680 if (*p++ != ap->a_attrnamespace) 1681 continue; 1682 p++; /* pad2 */ 1683 ealen = *p; 1684 if (ap->a_size != NULL) { 1685 *ap->a_size += ealen + 1; 1686 } else if (ap->a_uio != NULL) { 1687 error = uiomove(p, ealen + 1, ap->a_uio); 1688 } 1689 } 1690 if (stand_alone) 1691 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1692 return(error); 1693 } 1694 1695 /* 1696 * Vnode operation to set a named attribute. 1697 */ 1698 static int 1699 ffs_setextattr(struct vop_setextattr_args *ap) 1700 /* 1701 vop_setextattr { 1702 IN struct vnode *a_vp; 1703 IN int a_attrnamespace; 1704 IN const char *a_name; 1705 INOUT struct uio *a_uio; 1706 IN struct ucred *a_cred; 1707 IN struct thread *a_td; 1708 }; 1709 */ 1710 { 1711 struct inode *ip; 1712 struct fs *fs; 1713 uint32_t ealength, ul; 1714 int ealen, olen, eapad1, eapad2, error, i, easize; 1715 u_char *eae, *p; 1716 int stand_alone; 1717 1718 ip = VTOI(ap->a_vp); 1719 fs = ip->i_fs; 1720 1721 if (fs->fs_magic == FS_UFS1_MAGIC) 1722 return (ufs_vnoperate((struct vop_generic_args *)ap)); 1723 1724 if (ap->a_vp->v_type == VCHR) 1725 return (EOPNOTSUPP); 1726 1727 if (strlen(ap->a_name) == 0) 1728 return (EINVAL); 1729 1730 /* XXX Now unsupported API to delete EAs using NULL uio. */ 1731 if (ap->a_uio == NULL) 1732 return (EOPNOTSUPP); 1733 1734 error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace, 1735 ap->a_cred, ap->a_td, IWRITE); 1736 if (error) { 1737 if (ip->i_ea_area != NULL && ip->i_ea_error == 0) 1738 ip->i_ea_error = error; 1739 return (error); 1740 } 1741 1742 if (ip->i_ea_area == NULL) { 1743 error = ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td); 1744 if (error) 1745 return (error); 1746 stand_alone = 1; 1747 } else { 1748 stand_alone = 0; 1749 } 1750 1751 ealen = ap->a_uio->uio_resid; 1752 ealength = sizeof(uint32_t) + 3 + strlen(ap->a_name); 1753 eapad1 = 8 - (ealength % 8); 1754 if (eapad1 == 8) 1755 eapad1 = 0; 1756 eapad2 = 8 - (ealen % 8); 1757 if (eapad2 == 8) 1758 eapad2 = 0; 1759 ealength += eapad1 + ealen + eapad2; 1760 1761 eae = malloc(ip->i_ea_len + ealength, M_TEMP, M_WAITOK); 1762 bcopy(ip->i_ea_area, eae, ip->i_ea_len); 1763 easize = ip->i_ea_len; 1764 1765 olen = ffs_findextattr(eae, easize, 1766 ap->a_attrnamespace, ap->a_name, &p, NULL); 1767 if (olen == -1) { 1768 /* new, append at end */ 1769 p = eae + easize; 1770 easize += ealength; 1771 } else { 1772 bcopy(p, &ul, sizeof ul); 1773 i = p - eae + ul; 1774 if (ul != ealength) { 1775 bcopy(p + ul, p + ealength, easize - i); 1776 easize += (ealength - ul); 1777 } 1778 } 1779 if (easize > NXADDR * fs->fs_bsize) { 1780 free(eae, M_TEMP); 1781 if (stand_alone) 1782 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1783 else if (ip->i_ea_error == 0) 1784 ip->i_ea_error = ENOSPC; 1785 return(ENOSPC); 1786 } 1787 bcopy(&ealength, p, sizeof(ealength)); 1788 p += sizeof(ealength); 1789 *p++ = ap->a_attrnamespace; 1790 *p++ = eapad2; 1791 *p++ = strlen(ap->a_name); 1792 strcpy(p, ap->a_name); 1793 p += strlen(ap->a_name); 1794 bzero(p, eapad1); 1795 p += eapad1; 1796 error = uiomove(p, ealen, ap->a_uio); 1797 if (error) { 1798 free(eae, M_TEMP); 1799 if (stand_alone) 1800 ffs_close_ea(ap->a_vp, 0, ap->a_cred, ap->a_td); 1801 else if (ip->i_ea_error == 0) 1802 ip->i_ea_error = error; 1803 return(error); 1804 } 1805 p += ealen; 1806 bzero(p, eapad2); 1807 1808 p = ip->i_ea_area; 1809 ip->i_ea_area = eae; 1810 ip->i_ea_len = easize; 1811 free(p, M_TEMP); 1812 if (stand_alone) 1813 error = ffs_close_ea(ap->a_vp, 1, ap->a_cred, ap->a_td); 1814 return(error); 1815 } 1816