1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Portions Copyright 2007 Jeremy Teo */ 27 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/time.h> 31 #include <sys/systm.h> 32 #include <sys/sysmacros.h> 33 #include <sys/resource.h> 34 #include <sys/vfs.h> 35 #include <sys/vfs_opreg.h> 36 #include <sys/vnode.h> 37 #include <sys/file.h> 38 #include <sys/stat.h> 39 #include <sys/kmem.h> 40 #include <sys/taskq.h> 41 #include <sys/uio.h> 42 #include <sys/vmsystm.h> 43 #include <sys/atomic.h> 44 #include <sys/vm.h> 45 #include <vm/seg_vn.h> 46 #include <vm/pvn.h> 47 #include <vm/as.h> 48 #include <vm/kpm.h> 49 #include <vm/seg_kpm.h> 50 #include <sys/mman.h> 51 #include <sys/pathname.h> 52 #include <sys/cmn_err.h> 53 #include <sys/errno.h> 54 #include <sys/unistd.h> 55 #include <sys/zfs_dir.h> 56 #include <sys/zfs_acl.h> 57 #include <sys/zfs_ioctl.h> 58 #include <sys/fs/zfs.h> 59 #include <sys/dmu.h> 60 #include <sys/spa.h> 61 #include <sys/txg.h> 62 #include <sys/dbuf.h> 63 #include <sys/zap.h> 64 #include <sys/dirent.h> 65 #include <sys/policy.h> 66 #include <sys/sunddi.h> 67 #include <sys/filio.h> 68 #include <sys/sid.h> 69 #include "fs/fs_subr.h" 70 #include <sys/zfs_ctldir.h> 71 #include <sys/zfs_fuid.h> 72 #include <sys/dnlc.h> 73 #include <sys/zfs_rlock.h> 74 #include <sys/extdirent.h> 75 #include <sys/kidmap.h> 76 #include <sys/cred_impl.h> 77 #include <sys/attr.h> 78 79 /* 80 * Programming rules. 81 * 82 * Each vnode op performs some logical unit of work. To do this, the ZPL must 83 * properly lock its in-core state, create a DMU transaction, do the work, 84 * record this work in the intent log (ZIL), commit the DMU transaction, 85 * and wait for the intent log to commit if it is a synchronous operation. 86 * Moreover, the vnode ops must work in both normal and log replay context. 87 * The ordering of events is important to avoid deadlocks and references 88 * to freed memory. The example below illustrates the following Big Rules: 89 * 90 * (1) A check must be made in each zfs thread for a mounted file system. 91 * This is done avoiding races using ZFS_ENTER(zfsvfs). 92 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes 93 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros 94 * can return EIO from the calling function. 95 * 96 * (2) VN_RELE() should always be the last thing except for zil_commit() 97 * (if necessary) and ZFS_EXIT(). This is for 3 reasons: 98 * First, if it's the last reference, the vnode/znode 99 * can be freed, so the zp may point to freed memory. Second, the last 100 * reference will call zfs_zinactive(), which may induce a lot of work -- 101 * pushing cached pages (which acquires range locks) and syncing out 102 * cached atime changes. Third, zfs_zinactive() may require a new tx, 103 * which could deadlock the system if you were already holding one. 104 * 105 * (3) All range locks must be grabbed before calling dmu_tx_assign(), 106 * as they can span dmu_tx_assign() calls. 107 * 108 * (4) Always pass zfsvfs->z_assign as the second argument to dmu_tx_assign(). 109 * In normal operation, this will be TXG_NOWAIT. During ZIL replay, 110 * it will be a specific txg. Either way, dmu_tx_assign() never blocks. 111 * This is critical because we don't want to block while holding locks. 112 * Note, in particular, that if a lock is sometimes acquired before 113 * the tx assigns, and sometimes after (e.g. z_lock), then failing to 114 * use a non-blocking assign can deadlock the system. The scenario: 115 * 116 * Thread A has grabbed a lock before calling dmu_tx_assign(). 117 * Thread B is in an already-assigned tx, and blocks for this lock. 118 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open() 119 * forever, because the previous txg can't quiesce until B's tx commits. 120 * 121 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT, 122 * then drop all locks, call dmu_tx_wait(), and try again. 123 * 124 * (5) If the operation succeeded, generate the intent log entry for it 125 * before dropping locks. This ensures that the ordering of events 126 * in the intent log matches the order in which they actually occurred. 127 * 128 * (6) At the end of each vnode op, the DMU tx must always commit, 129 * regardless of whether there were any errors. 130 * 131 * (7) After dropping all locks, invoke zil_commit(zilog, seq, foid) 132 * to ensure that synchronous semantics are provided when necessary. 133 * 134 * In general, this is how things should be ordered in each vnode op: 135 * 136 * ZFS_ENTER(zfsvfs); // exit if unmounted 137 * top: 138 * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD()) 139 * rw_enter(...); // grab any other locks you need 140 * tx = dmu_tx_create(...); // get DMU tx 141 * dmu_tx_hold_*(); // hold each object you might modify 142 * error = dmu_tx_assign(tx, zfsvfs->z_assign); // try to assign 143 * if (error) { 144 * rw_exit(...); // drop locks 145 * zfs_dirent_unlock(dl); // unlock directory entry 146 * VN_RELE(...); // release held vnodes 147 * if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { 148 * dmu_tx_wait(tx); 149 * dmu_tx_abort(tx); 150 * goto top; 151 * } 152 * dmu_tx_abort(tx); // abort DMU tx 153 * ZFS_EXIT(zfsvfs); // finished in zfs 154 * return (error); // really out of space 155 * } 156 * error = do_real_work(); // do whatever this VOP does 157 * if (error == 0) 158 * zfs_log_*(...); // on success, make ZIL entry 159 * dmu_tx_commit(tx); // commit DMU tx -- error or not 160 * rw_exit(...); // drop locks 161 * zfs_dirent_unlock(dl); // unlock directory entry 162 * VN_RELE(...); // release held vnodes 163 * zil_commit(zilog, seq, foid); // synchronous when necessary 164 * ZFS_EXIT(zfsvfs); // finished in zfs 165 * return (error); // done, report error 166 */ 167 168 /* ARGSUSED */ 169 static int 170 zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct) 171 { 172 znode_t *zp = VTOZ(*vpp); 173 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 174 175 ZFS_ENTER(zfsvfs); 176 ZFS_VERIFY_ZP(zp); 177 178 if ((flag & FWRITE) && (zp->z_phys->zp_flags & ZFS_APPENDONLY) && 179 ((flag & FAPPEND) == 0)) { 180 ZFS_EXIT(zfsvfs); 181 return (EPERM); 182 } 183 184 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan && 185 ZTOV(zp)->v_type == VREG && 186 !(zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) && 187 zp->z_phys->zp_size > 0) { 188 if (fs_vscan(*vpp, cr, 0) != 0) { 189 ZFS_EXIT(zfsvfs); 190 return (EACCES); 191 } 192 } 193 194 /* Keep a count of the synchronous opens in the znode */ 195 if (flag & (FSYNC | FDSYNC)) 196 atomic_inc_32(&zp->z_sync_cnt); 197 198 ZFS_EXIT(zfsvfs); 199 return (0); 200 } 201 202 /* ARGSUSED */ 203 static int 204 zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr, 205 caller_context_t *ct) 206 { 207 znode_t *zp = VTOZ(vp); 208 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 209 210 ZFS_ENTER(zfsvfs); 211 ZFS_VERIFY_ZP(zp); 212 213 /* Decrement the synchronous opens in the znode */ 214 if ((flag & (FSYNC | FDSYNC)) && (count == 1)) 215 atomic_dec_32(&zp->z_sync_cnt); 216 217 /* 218 * Clean up any locks held by this process on the vp. 219 */ 220 cleanlocks(vp, ddi_get_pid(), 0); 221 cleanshares(vp, ddi_get_pid()); 222 223 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan && 224 ZTOV(zp)->v_type == VREG && 225 !(zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) && 226 zp->z_phys->zp_size > 0) 227 VERIFY(fs_vscan(vp, cr, 1) == 0); 228 229 ZFS_EXIT(zfsvfs); 230 return (0); 231 } 232 233 /* 234 * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and 235 * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter. 236 */ 237 static int 238 zfs_holey(vnode_t *vp, int cmd, offset_t *off) 239 { 240 znode_t *zp = VTOZ(vp); 241 uint64_t noff = (uint64_t)*off; /* new offset */ 242 uint64_t file_sz; 243 int error; 244 boolean_t hole; 245 246 file_sz = zp->z_phys->zp_size; 247 if (noff >= file_sz) { 248 return (ENXIO); 249 } 250 251 if (cmd == _FIO_SEEK_HOLE) 252 hole = B_TRUE; 253 else 254 hole = B_FALSE; 255 256 error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff); 257 258 /* end of file? */ 259 if ((error == ESRCH) || (noff > file_sz)) { 260 /* 261 * Handle the virtual hole at the end of file. 262 */ 263 if (hole) { 264 *off = file_sz; 265 return (0); 266 } 267 return (ENXIO); 268 } 269 270 if (noff < *off) 271 return (error); 272 *off = noff; 273 return (error); 274 } 275 276 /* ARGSUSED */ 277 static int 278 zfs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred, 279 int *rvalp, caller_context_t *ct) 280 { 281 offset_t off; 282 int error; 283 zfsvfs_t *zfsvfs; 284 znode_t *zp; 285 286 switch (com) { 287 case _FIOFFS: 288 return (zfs_sync(vp->v_vfsp, 0, cred)); 289 290 /* 291 * The following two ioctls are used by bfu. Faking out, 292 * necessary to avoid bfu errors. 293 */ 294 case _FIOGDIO: 295 case _FIOSDIO: 296 return (0); 297 298 case _FIO_SEEK_DATA: 299 case _FIO_SEEK_HOLE: 300 if (ddi_copyin((void *)data, &off, sizeof (off), flag)) 301 return (EFAULT); 302 303 zp = VTOZ(vp); 304 zfsvfs = zp->z_zfsvfs; 305 ZFS_ENTER(zfsvfs); 306 ZFS_VERIFY_ZP(zp); 307 308 /* offset parameter is in/out */ 309 error = zfs_holey(vp, com, &off); 310 ZFS_EXIT(zfsvfs); 311 if (error) 312 return (error); 313 if (ddi_copyout(&off, (void *)data, sizeof (off), flag)) 314 return (EFAULT); 315 return (0); 316 } 317 return (ENOTTY); 318 } 319 320 /* 321 * Utility functions to map and unmap a single physical page. These 322 * are used to manage the mappable copies of ZFS file data, and therefore 323 * do not update ref/mod bits. 324 */ 325 caddr_t 326 zfs_map_page(page_t *pp, enum seg_rw rw) 327 { 328 if (kpm_enable) 329 return (hat_kpm_mapin(pp, 0)); 330 ASSERT(rw == S_READ || rw == S_WRITE); 331 return (ppmapin(pp, PROT_READ | ((rw == S_WRITE) ? PROT_WRITE : 0), 332 (caddr_t)-1)); 333 } 334 335 void 336 zfs_unmap_page(page_t *pp, caddr_t addr) 337 { 338 if (kpm_enable) { 339 hat_kpm_mapout(pp, 0, addr); 340 } else { 341 ppmapout(addr); 342 } 343 } 344 345 /* 346 * When a file is memory mapped, we must keep the IO data synchronized 347 * between the DMU cache and the memory mapped pages. What this means: 348 * 349 * On Write: If we find a memory mapped page, we write to *both* 350 * the page and the dmu buffer. 351 * 352 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when 353 * the file is memory mapped. 354 */ 355 static int 356 mappedwrite(vnode_t *vp, int nbytes, uio_t *uio, dmu_tx_t *tx) 357 { 358 znode_t *zp = VTOZ(vp); 359 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 360 int64_t start, off; 361 int len = nbytes; 362 int error = 0; 363 364 start = uio->uio_loffset; 365 off = start & PAGEOFFSET; 366 for (start &= PAGEMASK; len > 0; start += PAGESIZE) { 367 page_t *pp; 368 uint64_t bytes = MIN(PAGESIZE - off, len); 369 uint64_t woff = uio->uio_loffset; 370 371 /* 372 * We don't want a new page to "appear" in the middle of 373 * the file update (because it may not get the write 374 * update data), so we grab a lock to block 375 * zfs_getpage(). 376 */ 377 rw_enter(&zp->z_map_lock, RW_WRITER); 378 if (pp = page_lookup(vp, start, SE_SHARED)) { 379 caddr_t va; 380 381 rw_exit(&zp->z_map_lock); 382 va = zfs_map_page(pp, S_WRITE); 383 error = uiomove(va+off, bytes, UIO_WRITE, uio); 384 if (error == 0) { 385 dmu_write(zfsvfs->z_os, zp->z_id, 386 woff, bytes, va+off, tx); 387 } 388 zfs_unmap_page(pp, va); 389 page_unlock(pp); 390 } else { 391 error = dmu_write_uio(zfsvfs->z_os, zp->z_id, 392 uio, bytes, tx); 393 rw_exit(&zp->z_map_lock); 394 } 395 len -= bytes; 396 off = 0; 397 if (error) 398 break; 399 } 400 return (error); 401 } 402 403 /* 404 * When a file is memory mapped, we must keep the IO data synchronized 405 * between the DMU cache and the memory mapped pages. What this means: 406 * 407 * On Read: We "read" preferentially from memory mapped pages, 408 * else we default from the dmu buffer. 409 * 410 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when 411 * the file is memory mapped. 412 */ 413 static int 414 mappedread(vnode_t *vp, int nbytes, uio_t *uio) 415 { 416 znode_t *zp = VTOZ(vp); 417 objset_t *os = zp->z_zfsvfs->z_os; 418 int64_t start, off; 419 int len = nbytes; 420 int error = 0; 421 422 start = uio->uio_loffset; 423 off = start & PAGEOFFSET; 424 for (start &= PAGEMASK; len > 0; start += PAGESIZE) { 425 page_t *pp; 426 uint64_t bytes = MIN(PAGESIZE - off, len); 427 428 if (pp = page_lookup(vp, start, SE_SHARED)) { 429 caddr_t va; 430 431 va = zfs_map_page(pp, S_READ); 432 error = uiomove(va + off, bytes, UIO_READ, uio); 433 zfs_unmap_page(pp, va); 434 page_unlock(pp); 435 } else { 436 error = dmu_read_uio(os, zp->z_id, uio, bytes); 437 } 438 len -= bytes; 439 off = 0; 440 if (error) 441 break; 442 } 443 return (error); 444 } 445 446 offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */ 447 448 /* 449 * Read bytes from specified file into supplied buffer. 450 * 451 * IN: vp - vnode of file to be read from. 452 * uio - structure supplying read location, range info, 453 * and return buffer. 454 * ioflag - SYNC flags; used to provide FRSYNC semantics. 455 * cr - credentials of caller. 456 * ct - caller context 457 * 458 * OUT: uio - updated offset and range, buffer filled. 459 * 460 * RETURN: 0 if success 461 * error code if failure 462 * 463 * Side Effects: 464 * vp - atime updated if byte count > 0 465 */ 466 /* ARGSUSED */ 467 static int 468 zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) 469 { 470 znode_t *zp = VTOZ(vp); 471 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 472 objset_t *os; 473 ssize_t n, nbytes; 474 int error; 475 rl_t *rl; 476 477 ZFS_ENTER(zfsvfs); 478 ZFS_VERIFY_ZP(zp); 479 os = zfsvfs->z_os; 480 481 if (zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) { 482 ZFS_EXIT(zfsvfs); 483 return (EACCES); 484 } 485 486 /* 487 * Validate file offset 488 */ 489 if (uio->uio_loffset < (offset_t)0) { 490 ZFS_EXIT(zfsvfs); 491 return (EINVAL); 492 } 493 494 /* 495 * Fasttrack empty reads 496 */ 497 if (uio->uio_resid == 0) { 498 ZFS_EXIT(zfsvfs); 499 return (0); 500 } 501 502 /* 503 * Check for mandatory locks 504 */ 505 if (MANDMODE((mode_t)zp->z_phys->zp_mode)) { 506 if (error = chklock(vp, FREAD, 507 uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) { 508 ZFS_EXIT(zfsvfs); 509 return (error); 510 } 511 } 512 513 /* 514 * If we're in FRSYNC mode, sync out this znode before reading it. 515 */ 516 if (ioflag & FRSYNC) 517 zil_commit(zfsvfs->z_log, zp->z_last_itx, zp->z_id); 518 519 /* 520 * Lock the range against changes. 521 */ 522 rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER); 523 524 /* 525 * If we are reading past end-of-file we can skip 526 * to the end; but we might still need to set atime. 527 */ 528 if (uio->uio_loffset >= zp->z_phys->zp_size) { 529 error = 0; 530 goto out; 531 } 532 533 ASSERT(uio->uio_loffset < zp->z_phys->zp_size); 534 n = MIN(uio->uio_resid, zp->z_phys->zp_size - uio->uio_loffset); 535 536 while (n > 0) { 537 nbytes = MIN(n, zfs_read_chunk_size - 538 P2PHASE(uio->uio_loffset, zfs_read_chunk_size)); 539 540 if (vn_has_cached_data(vp)) 541 error = mappedread(vp, nbytes, uio); 542 else 543 error = dmu_read_uio(os, zp->z_id, uio, nbytes); 544 if (error) { 545 /* convert checksum errors into IO errors */ 546 if (error == ECKSUM) 547 error = EIO; 548 break; 549 } 550 551 n -= nbytes; 552 } 553 554 out: 555 zfs_range_unlock(rl); 556 557 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 558 ZFS_EXIT(zfsvfs); 559 return (error); 560 } 561 562 /* 563 * Write the bytes to a file. 564 * 565 * IN: vp - vnode of file to be written to. 566 * uio - structure supplying write location, range info, 567 * and data buffer. 568 * ioflag - FAPPEND flag set if in append mode. 569 * cr - credentials of caller. 570 * ct - caller context (NFS/CIFS fem monitor only) 571 * 572 * OUT: uio - updated offset and range. 573 * 574 * RETURN: 0 if success 575 * error code if failure 576 * 577 * Timestamps: 578 * vp - ctime|mtime updated if byte count > 0 579 */ 580 /* ARGSUSED */ 581 static int 582 zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) 583 { 584 znode_t *zp = VTOZ(vp); 585 rlim64_t limit = uio->uio_llimit; 586 ssize_t start_resid = uio->uio_resid; 587 ssize_t tx_bytes; 588 uint64_t end_size; 589 dmu_tx_t *tx; 590 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 591 zilog_t *zilog; 592 offset_t woff; 593 ssize_t n, nbytes; 594 rl_t *rl; 595 int max_blksz = zfsvfs->z_max_blksz; 596 uint64_t pflags; 597 int error; 598 599 /* 600 * Fasttrack empty write 601 */ 602 n = start_resid; 603 if (n == 0) 604 return (0); 605 606 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T) 607 limit = MAXOFFSET_T; 608 609 ZFS_ENTER(zfsvfs); 610 ZFS_VERIFY_ZP(zp); 611 612 /* 613 * If immutable or not appending then return EPERM 614 */ 615 pflags = zp->z_phys->zp_flags; 616 if ((pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) || 617 ((pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) && 618 (uio->uio_loffset < zp->z_phys->zp_size))) { 619 ZFS_EXIT(zfsvfs); 620 return (EPERM); 621 } 622 623 zilog = zfsvfs->z_log; 624 625 /* 626 * Pre-fault the pages to ensure slow (eg NFS) pages 627 * don't hold up txg. 628 */ 629 uio_prefaultpages(n, uio); 630 631 /* 632 * If in append mode, set the io offset pointer to eof. 633 */ 634 if (ioflag & FAPPEND) { 635 /* 636 * Range lock for a file append: 637 * The value for the start of range will be determined by 638 * zfs_range_lock() (to guarantee append semantics). 639 * If this write will cause the block size to increase, 640 * zfs_range_lock() will lock the entire file, so we must 641 * later reduce the range after we grow the block size. 642 */ 643 rl = zfs_range_lock(zp, 0, n, RL_APPEND); 644 if (rl->r_len == UINT64_MAX) { 645 /* overlocked, zp_size can't change */ 646 woff = uio->uio_loffset = zp->z_phys->zp_size; 647 } else { 648 woff = uio->uio_loffset = rl->r_off; 649 } 650 } else { 651 woff = uio->uio_loffset; 652 /* 653 * Validate file offset 654 */ 655 if (woff < 0) { 656 ZFS_EXIT(zfsvfs); 657 return (EINVAL); 658 } 659 660 /* 661 * If we need to grow the block size then zfs_range_lock() 662 * will lock a wider range than we request here. 663 * Later after growing the block size we reduce the range. 664 */ 665 rl = zfs_range_lock(zp, woff, n, RL_WRITER); 666 } 667 668 if (woff >= limit) { 669 zfs_range_unlock(rl); 670 ZFS_EXIT(zfsvfs); 671 return (EFBIG); 672 } 673 674 if ((woff + n) > limit || woff > (limit - n)) 675 n = limit - woff; 676 677 /* 678 * Check for mandatory locks 679 */ 680 if (MANDMODE((mode_t)zp->z_phys->zp_mode) && 681 (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) { 682 zfs_range_unlock(rl); 683 ZFS_EXIT(zfsvfs); 684 return (error); 685 } 686 end_size = MAX(zp->z_phys->zp_size, woff + n); 687 688 /* 689 * Write the file in reasonable size chunks. Each chunk is written 690 * in a separate transaction; this keeps the intent log records small 691 * and allows us to do more fine-grained space accounting. 692 */ 693 while (n > 0) { 694 /* 695 * Start a transaction. 696 */ 697 woff = uio->uio_loffset; 698 tx = dmu_tx_create(zfsvfs->z_os); 699 dmu_tx_hold_bonus(tx, zp->z_id); 700 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz)); 701 error = dmu_tx_assign(tx, zfsvfs->z_assign); 702 if (error) { 703 if (error == ERESTART && 704 zfsvfs->z_assign == TXG_NOWAIT) { 705 dmu_tx_wait(tx); 706 dmu_tx_abort(tx); 707 continue; 708 } 709 dmu_tx_abort(tx); 710 break; 711 } 712 713 /* 714 * If zfs_range_lock() over-locked we grow the blocksize 715 * and then reduce the lock range. This will only happen 716 * on the first iteration since zfs_range_reduce() will 717 * shrink down r_len to the appropriate size. 718 */ 719 if (rl->r_len == UINT64_MAX) { 720 uint64_t new_blksz; 721 722 if (zp->z_blksz > max_blksz) { 723 ASSERT(!ISP2(zp->z_blksz)); 724 new_blksz = MIN(end_size, SPA_MAXBLOCKSIZE); 725 } else { 726 new_blksz = MIN(end_size, max_blksz); 727 } 728 zfs_grow_blocksize(zp, new_blksz, tx); 729 zfs_range_reduce(rl, woff, n); 730 } 731 732 /* 733 * XXX - should we really limit each write to z_max_blksz? 734 * Perhaps we should use SPA_MAXBLOCKSIZE chunks? 735 */ 736 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz)); 737 rw_enter(&zp->z_map_lock, RW_READER); 738 739 tx_bytes = uio->uio_resid; 740 if (vn_has_cached_data(vp)) { 741 rw_exit(&zp->z_map_lock); 742 error = mappedwrite(vp, nbytes, uio, tx); 743 } else { 744 error = dmu_write_uio(zfsvfs->z_os, zp->z_id, 745 uio, nbytes, tx); 746 rw_exit(&zp->z_map_lock); 747 } 748 tx_bytes -= uio->uio_resid; 749 750 /* 751 * If we made no progress, we're done. If we made even 752 * partial progress, update the znode and ZIL accordingly. 753 */ 754 if (tx_bytes == 0) { 755 dmu_tx_commit(tx); 756 ASSERT(error != 0); 757 break; 758 } 759 760 /* 761 * Clear Set-UID/Set-GID bits on successful write if not 762 * privileged and at least one of the excute bits is set. 763 * 764 * It would be nice to to this after all writes have 765 * been done, but that would still expose the ISUID/ISGID 766 * to another app after the partial write is committed. 767 * 768 * Note: we don't call zfs_fuid_map_id() here because 769 * user 0 is not an ephemeral uid. 770 */ 771 mutex_enter(&zp->z_acl_lock); 772 if ((zp->z_phys->zp_mode & (S_IXUSR | (S_IXUSR >> 3) | 773 (S_IXUSR >> 6))) != 0 && 774 (zp->z_phys->zp_mode & (S_ISUID | S_ISGID)) != 0 && 775 secpolicy_vnode_setid_retain(cr, 776 (zp->z_phys->zp_mode & S_ISUID) != 0 && 777 zp->z_phys->zp_uid == 0) != 0) { 778 zp->z_phys->zp_mode &= ~(S_ISUID | S_ISGID); 779 } 780 mutex_exit(&zp->z_acl_lock); 781 782 /* 783 * Update time stamp. NOTE: This marks the bonus buffer as 784 * dirty, so we don't have to do it again for zp_size. 785 */ 786 zfs_time_stamper(zp, CONTENT_MODIFIED, tx); 787 788 /* 789 * Update the file size (zp_size) if it has changed; 790 * account for possible concurrent updates. 791 */ 792 while ((end_size = zp->z_phys->zp_size) < uio->uio_loffset) 793 (void) atomic_cas_64(&zp->z_phys->zp_size, end_size, 794 uio->uio_loffset); 795 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag); 796 dmu_tx_commit(tx); 797 798 if (error != 0) 799 break; 800 ASSERT(tx_bytes == nbytes); 801 n -= nbytes; 802 } 803 804 zfs_range_unlock(rl); 805 806 /* 807 * If we're in replay mode, or we made no progress, return error. 808 * Otherwise, it's at least a partial write, so it's successful. 809 */ 810 if (zfsvfs->z_assign >= TXG_INITIAL || uio->uio_resid == start_resid) { 811 ZFS_EXIT(zfsvfs); 812 return (error); 813 } 814 815 if (ioflag & (FSYNC | FDSYNC)) 816 zil_commit(zilog, zp->z_last_itx, zp->z_id); 817 818 ZFS_EXIT(zfsvfs); 819 return (0); 820 } 821 822 void 823 zfs_get_done(dmu_buf_t *db, void *vzgd) 824 { 825 zgd_t *zgd = (zgd_t *)vzgd; 826 rl_t *rl = zgd->zgd_rl; 827 vnode_t *vp = ZTOV(rl->r_zp); 828 829 dmu_buf_rele(db, vzgd); 830 zfs_range_unlock(rl); 831 VN_RELE(vp); 832 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 833 kmem_free(zgd, sizeof (zgd_t)); 834 } 835 836 /* 837 * Get data to generate a TX_WRITE intent log record. 838 */ 839 int 840 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 841 { 842 zfsvfs_t *zfsvfs = arg; 843 objset_t *os = zfsvfs->z_os; 844 znode_t *zp; 845 uint64_t off = lr->lr_offset; 846 dmu_buf_t *db; 847 rl_t *rl; 848 zgd_t *zgd; 849 int dlen = lr->lr_length; /* length of user data */ 850 int error = 0; 851 852 ASSERT(zio); 853 ASSERT(dlen != 0); 854 855 /* 856 * Nothing to do if the file has been removed 857 */ 858 if (zfs_zget(zfsvfs, lr->lr_foid, &zp) != 0) 859 return (ENOENT); 860 if (zp->z_unlinked) { 861 VN_RELE(ZTOV(zp)); 862 return (ENOENT); 863 } 864 865 /* 866 * Write records come in two flavors: immediate and indirect. 867 * For small writes it's cheaper to store the data with the 868 * log record (immediate); for large writes it's cheaper to 869 * sync the data and get a pointer to it (indirect) so that 870 * we don't have to write the data twice. 871 */ 872 if (buf != NULL) { /* immediate write */ 873 rl = zfs_range_lock(zp, off, dlen, RL_READER); 874 /* test for truncation needs to be done while range locked */ 875 if (off >= zp->z_phys->zp_size) { 876 error = ENOENT; 877 goto out; 878 } 879 VERIFY(0 == dmu_read(os, lr->lr_foid, off, dlen, buf)); 880 } else { /* indirect write */ 881 uint64_t boff; /* block starting offset */ 882 883 /* 884 * Have to lock the whole block to ensure when it's 885 * written out and it's checksum is being calculated 886 * that no one can change the data. We need to re-check 887 * blocksize after we get the lock in case it's changed! 888 */ 889 for (;;) { 890 if (ISP2(zp->z_blksz)) { 891 boff = P2ALIGN_TYPED(off, zp->z_blksz, 892 uint64_t); 893 } else { 894 boff = 0; 895 } 896 dlen = zp->z_blksz; 897 rl = zfs_range_lock(zp, boff, dlen, RL_READER); 898 if (zp->z_blksz == dlen) 899 break; 900 zfs_range_unlock(rl); 901 } 902 /* test for truncation needs to be done while range locked */ 903 if (off >= zp->z_phys->zp_size) { 904 error = ENOENT; 905 goto out; 906 } 907 zgd = (zgd_t *)kmem_alloc(sizeof (zgd_t), KM_SLEEP); 908 zgd->zgd_rl = rl; 909 zgd->zgd_zilog = zfsvfs->z_log; 910 zgd->zgd_bp = &lr->lr_blkptr; 911 VERIFY(0 == dmu_buf_hold(os, lr->lr_foid, boff, zgd, &db)); 912 ASSERT(boff == db->db_offset); 913 lr->lr_blkoff = off - boff; 914 error = dmu_sync(zio, db, &lr->lr_blkptr, 915 lr->lr_common.lrc_txg, zfs_get_done, zgd); 916 ASSERT((error && error != EINPROGRESS) || 917 lr->lr_length <= zp->z_blksz); 918 if (error == 0) 919 zil_add_block(zfsvfs->z_log, &lr->lr_blkptr); 920 /* 921 * If we get EINPROGRESS, then we need to wait for a 922 * write IO initiated by dmu_sync() to complete before 923 * we can release this dbuf. We will finish everything 924 * up in the zfs_get_done() callback. 925 */ 926 if (error == EINPROGRESS) 927 return (0); 928 dmu_buf_rele(db, zgd); 929 kmem_free(zgd, sizeof (zgd_t)); 930 } 931 out: 932 zfs_range_unlock(rl); 933 VN_RELE(ZTOV(zp)); 934 return (error); 935 } 936 937 /*ARGSUSED*/ 938 static int 939 zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr, 940 caller_context_t *ct) 941 { 942 znode_t *zp = VTOZ(vp); 943 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 944 int error; 945 946 ZFS_ENTER(zfsvfs); 947 ZFS_VERIFY_ZP(zp); 948 949 if (flag & V_ACE_MASK) 950 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr); 951 else 952 error = zfs_zaccess_rwx(zp, mode, flag, cr); 953 954 ZFS_EXIT(zfsvfs); 955 return (error); 956 } 957 958 /* 959 * Lookup an entry in a directory, or an extended attribute directory. 960 * If it exists, return a held vnode reference for it. 961 * 962 * IN: dvp - vnode of directory to search. 963 * nm - name of entry to lookup. 964 * pnp - full pathname to lookup [UNUSED]. 965 * flags - LOOKUP_XATTR set if looking for an attribute. 966 * rdir - root directory vnode [UNUSED]. 967 * cr - credentials of caller. 968 * ct - caller context 969 * direntflags - directory lookup flags 970 * realpnp - returned pathname. 971 * 972 * OUT: vpp - vnode of located entry, NULL if not found. 973 * 974 * RETURN: 0 if success 975 * error code if failure 976 * 977 * Timestamps: 978 * NA 979 */ 980 /* ARGSUSED */ 981 static int 982 zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp, 983 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, 984 int *direntflags, pathname_t *realpnp) 985 { 986 znode_t *zdp = VTOZ(dvp); 987 zfsvfs_t *zfsvfs = zdp->z_zfsvfs; 988 int error; 989 990 ZFS_ENTER(zfsvfs); 991 ZFS_VERIFY_ZP(zdp); 992 993 *vpp = NULL; 994 995 if (flags & LOOKUP_XATTR) { 996 /* 997 * If the xattr property is off, refuse the lookup request. 998 */ 999 if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) { 1000 ZFS_EXIT(zfsvfs); 1001 return (EINVAL); 1002 } 1003 1004 /* 1005 * We don't allow recursive attributes.. 1006 * Maybe someday we will. 1007 */ 1008 if (zdp->z_phys->zp_flags & ZFS_XATTR) { 1009 ZFS_EXIT(zfsvfs); 1010 return (EINVAL); 1011 } 1012 1013 if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) { 1014 ZFS_EXIT(zfsvfs); 1015 return (error); 1016 } 1017 1018 /* 1019 * Do we have permission to get into attribute directory? 1020 */ 1021 1022 if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0, 1023 B_FALSE, cr)) { 1024 VN_RELE(*vpp); 1025 *vpp = NULL; 1026 } 1027 1028 ZFS_EXIT(zfsvfs); 1029 return (error); 1030 } 1031 1032 if (dvp->v_type != VDIR) { 1033 ZFS_EXIT(zfsvfs); 1034 return (ENOTDIR); 1035 } 1036 1037 /* 1038 * Check accessibility of directory. 1039 */ 1040 1041 if (error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr)) { 1042 ZFS_EXIT(zfsvfs); 1043 return (error); 1044 } 1045 1046 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm), 1047 NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 1048 ZFS_EXIT(zfsvfs); 1049 return (EILSEQ); 1050 } 1051 1052 error = zfs_dirlook(zdp, nm, vpp, flags, direntflags, realpnp); 1053 if (error == 0) { 1054 /* 1055 * Convert device special files 1056 */ 1057 if (IS_DEVVP(*vpp)) { 1058 vnode_t *svp; 1059 1060 svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr); 1061 VN_RELE(*vpp); 1062 if (svp == NULL) 1063 error = ENOSYS; 1064 else 1065 *vpp = svp; 1066 } 1067 } 1068 1069 ZFS_EXIT(zfsvfs); 1070 return (error); 1071 } 1072 1073 /* 1074 * Attempt to create a new entry in a directory. If the entry 1075 * already exists, truncate the file if permissible, else return 1076 * an error. Return the vp of the created or trunc'd file. 1077 * 1078 * IN: dvp - vnode of directory to put new file entry in. 1079 * name - name of new file entry. 1080 * vap - attributes of new file. 1081 * excl - flag indicating exclusive or non-exclusive mode. 1082 * mode - mode to open file with. 1083 * cr - credentials of caller. 1084 * flag - large file flag [UNUSED]. 1085 * ct - caller context 1086 * vsecp - ACL to be set 1087 * 1088 * OUT: vpp - vnode of created or trunc'd entry. 1089 * 1090 * RETURN: 0 if success 1091 * error code if failure 1092 * 1093 * Timestamps: 1094 * dvp - ctime|mtime updated if new entry created 1095 * vp - ctime|mtime always, atime if new 1096 */ 1097 1098 /* ARGSUSED */ 1099 static int 1100 zfs_create(vnode_t *dvp, char *name, vattr_t *vap, vcexcl_t excl, 1101 int mode, vnode_t **vpp, cred_t *cr, int flag, caller_context_t *ct, 1102 vsecattr_t *vsecp) 1103 { 1104 znode_t *zp, *dzp = VTOZ(dvp); 1105 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1106 zilog_t *zilog; 1107 objset_t *os; 1108 zfs_dirlock_t *dl; 1109 dmu_tx_t *tx; 1110 int error; 1111 zfs_acl_t *aclp = NULL; 1112 zfs_fuid_info_t *fuidp = NULL; 1113 ksid_t *ksid; 1114 uid_t uid; 1115 gid_t gid = crgetgid(cr); 1116 1117 /* 1118 * If we have an ephemeral id, ACL, or XVATTR then 1119 * make sure file system is at proper version 1120 */ 1121 1122 ksid = crgetsid(cr, KSID_OWNER); 1123 if (ksid) 1124 uid = ksid_getid(ksid); 1125 else 1126 uid = crgetuid(cr); 1127 1128 if (zfsvfs->z_use_fuids == B_FALSE && 1129 (vsecp || (vap->va_mask & AT_XVATTR) || 1130 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) 1131 return (EINVAL); 1132 1133 ZFS_ENTER(zfsvfs); 1134 ZFS_VERIFY_ZP(dzp); 1135 os = zfsvfs->z_os; 1136 zilog = zfsvfs->z_log; 1137 1138 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), 1139 NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 1140 ZFS_EXIT(zfsvfs); 1141 return (EILSEQ); 1142 } 1143 1144 if (vap->va_mask & AT_XVATTR) { 1145 if ((error = secpolicy_xvattr((xvattr_t *)vap, 1146 crgetuid(cr), cr, vap->va_type)) != 0) { 1147 ZFS_EXIT(zfsvfs); 1148 return (error); 1149 } 1150 } 1151 top: 1152 *vpp = NULL; 1153 1154 if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr)) 1155 vap->va_mode &= ~VSVTX; 1156 1157 if (*name == '\0') { 1158 /* 1159 * Null component name refers to the directory itself. 1160 */ 1161 VN_HOLD(dvp); 1162 zp = dzp; 1163 dl = NULL; 1164 error = 0; 1165 } else { 1166 /* possible VN_HOLD(zp) */ 1167 int zflg = 0; 1168 1169 if (flag & FIGNORECASE) 1170 zflg |= ZCILOOK; 1171 1172 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, 1173 NULL, NULL); 1174 if (error) { 1175 if (strcmp(name, "..") == 0) 1176 error = EISDIR; 1177 ZFS_EXIT(zfsvfs); 1178 if (aclp) 1179 zfs_acl_free(aclp); 1180 return (error); 1181 } 1182 } 1183 if (vsecp && aclp == NULL) { 1184 error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, &aclp); 1185 if (error) { 1186 ZFS_EXIT(zfsvfs); 1187 if (dl) 1188 zfs_dirent_unlock(dl); 1189 return (error); 1190 } 1191 } 1192 1193 if (zp == NULL) { 1194 uint64_t txtype; 1195 1196 /* 1197 * Create a new file object and update the directory 1198 * to reference it. 1199 */ 1200 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { 1201 goto out; 1202 } 1203 1204 /* 1205 * We only support the creation of regular files in 1206 * extended attribute directories. 1207 */ 1208 if ((dzp->z_phys->zp_flags & ZFS_XATTR) && 1209 (vap->va_type != VREG)) { 1210 error = EINVAL; 1211 goto out; 1212 } 1213 1214 tx = dmu_tx_create(os); 1215 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1216 if ((aclp && aclp->z_has_fuids) || IS_EPHEMERAL(uid) || 1217 IS_EPHEMERAL(gid)) { 1218 if (zfsvfs->z_fuid_obj == 0) { 1219 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1220 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 1221 FUID_SIZE_ESTIMATE(zfsvfs)); 1222 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, 1223 FALSE, NULL); 1224 } else { 1225 dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); 1226 dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, 1227 FUID_SIZE_ESTIMATE(zfsvfs)); 1228 } 1229 } 1230 dmu_tx_hold_bonus(tx, dzp->z_id); 1231 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); 1232 if ((dzp->z_phys->zp_flags & ZFS_INHERIT_ACE) || aclp) { 1233 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 1234 0, SPA_MAXBLOCKSIZE); 1235 } 1236 error = dmu_tx_assign(tx, zfsvfs->z_assign); 1237 if (error) { 1238 zfs_dirent_unlock(dl); 1239 if (error == ERESTART && 1240 zfsvfs->z_assign == TXG_NOWAIT) { 1241 dmu_tx_wait(tx); 1242 dmu_tx_abort(tx); 1243 goto top; 1244 } 1245 dmu_tx_abort(tx); 1246 ZFS_EXIT(zfsvfs); 1247 if (aclp) 1248 zfs_acl_free(aclp); 1249 return (error); 1250 } 1251 zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, aclp, &fuidp); 1252 (void) zfs_link_create(dl, zp, tx, ZNEW); 1253 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap); 1254 if (flag & FIGNORECASE) 1255 txtype |= TX_CI; 1256 zfs_log_create(zilog, tx, txtype, dzp, zp, name, 1257 vsecp, fuidp, vap); 1258 if (fuidp) 1259 zfs_fuid_info_free(fuidp); 1260 dmu_tx_commit(tx); 1261 } else { 1262 int aflags = (flag & FAPPEND) ? V_APPEND : 0; 1263 1264 /* 1265 * A directory entry already exists for this name. 1266 */ 1267 /* 1268 * Can't truncate an existing file if in exclusive mode. 1269 */ 1270 if (excl == EXCL) { 1271 error = EEXIST; 1272 goto out; 1273 } 1274 /* 1275 * Can't open a directory for writing. 1276 */ 1277 if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) { 1278 error = EISDIR; 1279 goto out; 1280 } 1281 /* 1282 * Verify requested access to file. 1283 */ 1284 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) { 1285 goto out; 1286 } 1287 1288 mutex_enter(&dzp->z_lock); 1289 dzp->z_seq++; 1290 mutex_exit(&dzp->z_lock); 1291 1292 /* 1293 * Truncate regular files if requested. 1294 */ 1295 if ((ZTOV(zp)->v_type == VREG) && 1296 (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) { 1297 /* we can't hold any locks when calling zfs_freesp() */ 1298 zfs_dirent_unlock(dl); 1299 dl = NULL; 1300 error = zfs_freesp(zp, 0, 0, mode, TRUE); 1301 if (error == 0) { 1302 vnevent_create(ZTOV(zp), ct); 1303 } 1304 } 1305 } 1306 out: 1307 1308 if (dl) 1309 zfs_dirent_unlock(dl); 1310 1311 if (error) { 1312 if (zp) 1313 VN_RELE(ZTOV(zp)); 1314 } else { 1315 *vpp = ZTOV(zp); 1316 /* 1317 * If vnode is for a device return a specfs vnode instead. 1318 */ 1319 if (IS_DEVVP(*vpp)) { 1320 struct vnode *svp; 1321 1322 svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr); 1323 VN_RELE(*vpp); 1324 if (svp == NULL) { 1325 error = ENOSYS; 1326 } 1327 *vpp = svp; 1328 } 1329 } 1330 if (aclp) 1331 zfs_acl_free(aclp); 1332 1333 ZFS_EXIT(zfsvfs); 1334 return (error); 1335 } 1336 1337 /* 1338 * Remove an entry from a directory. 1339 * 1340 * IN: dvp - vnode of directory to remove entry from. 1341 * name - name of entry to remove. 1342 * cr - credentials of caller. 1343 * ct - caller context 1344 * flags - case flags 1345 * 1346 * RETURN: 0 if success 1347 * error code if failure 1348 * 1349 * Timestamps: 1350 * dvp - ctime|mtime 1351 * vp - ctime (if nlink > 0) 1352 */ 1353 /*ARGSUSED*/ 1354 static int 1355 zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct, 1356 int flags) 1357 { 1358 znode_t *zp, *dzp = VTOZ(dvp); 1359 znode_t *xzp = NULL; 1360 vnode_t *vp; 1361 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1362 zilog_t *zilog; 1363 uint64_t acl_obj, xattr_obj; 1364 zfs_dirlock_t *dl; 1365 dmu_tx_t *tx; 1366 boolean_t may_delete_now, delete_now = FALSE; 1367 boolean_t unlinked, toobig = FALSE; 1368 uint64_t txtype; 1369 pathname_t *realnmp = NULL; 1370 pathname_t realnm; 1371 int error; 1372 int zflg = ZEXISTS; 1373 1374 ZFS_ENTER(zfsvfs); 1375 ZFS_VERIFY_ZP(dzp); 1376 zilog = zfsvfs->z_log; 1377 1378 if (flags & FIGNORECASE) { 1379 zflg |= ZCILOOK; 1380 pn_alloc(&realnm); 1381 realnmp = &realnm; 1382 } 1383 1384 top: 1385 /* 1386 * Attempt to lock directory; fail if entry doesn't exist. 1387 */ 1388 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, 1389 NULL, realnmp)) { 1390 if (realnmp) 1391 pn_free(realnmp); 1392 ZFS_EXIT(zfsvfs); 1393 return (error); 1394 } 1395 1396 vp = ZTOV(zp); 1397 1398 if (error = zfs_zaccess_delete(dzp, zp, cr)) { 1399 goto out; 1400 } 1401 1402 /* 1403 * Need to use rmdir for removing directories. 1404 */ 1405 if (vp->v_type == VDIR) { 1406 error = EPERM; 1407 goto out; 1408 } 1409 1410 vnevent_remove(vp, dvp, name, ct); 1411 1412 if (realnmp) 1413 dnlc_remove(dvp, realnmp->pn_buf); 1414 else 1415 dnlc_remove(dvp, name); 1416 1417 mutex_enter(&vp->v_lock); 1418 may_delete_now = vp->v_count == 1 && !vn_has_cached_data(vp); 1419 mutex_exit(&vp->v_lock); 1420 1421 /* 1422 * We may delete the znode now, or we may put it in the unlinked set; 1423 * it depends on whether we're the last link, and on whether there are 1424 * other holds on the vnode. So we dmu_tx_hold() the right things to 1425 * allow for either case. 1426 */ 1427 tx = dmu_tx_create(zfsvfs->z_os); 1428 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); 1429 dmu_tx_hold_bonus(tx, zp->z_id); 1430 if (may_delete_now) { 1431 toobig = 1432 zp->z_phys->zp_size > zp->z_blksz * DMU_MAX_DELETEBLKCNT; 1433 /* if the file is too big, only hold_free a token amount */ 1434 dmu_tx_hold_free(tx, zp->z_id, 0, 1435 (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END)); 1436 } 1437 1438 /* are there any extended attributes? */ 1439 if ((xattr_obj = zp->z_phys->zp_xattr) != 0) { 1440 /* XXX - do we need this if we are deleting? */ 1441 dmu_tx_hold_bonus(tx, xattr_obj); 1442 } 1443 1444 /* are there any additional acls */ 1445 if ((acl_obj = zp->z_phys->zp_acl.z_acl_extern_obj) != 0 && 1446 may_delete_now) 1447 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); 1448 1449 /* charge as an update -- would be nice not to charge at all */ 1450 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 1451 1452 error = dmu_tx_assign(tx, zfsvfs->z_assign); 1453 if (error) { 1454 zfs_dirent_unlock(dl); 1455 VN_RELE(vp); 1456 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { 1457 dmu_tx_wait(tx); 1458 dmu_tx_abort(tx); 1459 goto top; 1460 } 1461 if (realnmp) 1462 pn_free(realnmp); 1463 dmu_tx_abort(tx); 1464 ZFS_EXIT(zfsvfs); 1465 return (error); 1466 } 1467 1468 /* 1469 * Remove the directory entry. 1470 */ 1471 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked); 1472 1473 if (error) { 1474 dmu_tx_commit(tx); 1475 goto out; 1476 } 1477 1478 if (unlinked) { 1479 mutex_enter(&vp->v_lock); 1480 delete_now = may_delete_now && !toobig && 1481 vp->v_count == 1 && !vn_has_cached_data(vp) && 1482 zp->z_phys->zp_xattr == xattr_obj && 1483 zp->z_phys->zp_acl.z_acl_extern_obj == acl_obj; 1484 mutex_exit(&vp->v_lock); 1485 } 1486 1487 if (delete_now) { 1488 if (zp->z_phys->zp_xattr) { 1489 error = zfs_zget(zfsvfs, zp->z_phys->zp_xattr, &xzp); 1490 ASSERT3U(error, ==, 0); 1491 ASSERT3U(xzp->z_phys->zp_links, ==, 2); 1492 dmu_buf_will_dirty(xzp->z_dbuf, tx); 1493 mutex_enter(&xzp->z_lock); 1494 xzp->z_unlinked = 1; 1495 xzp->z_phys->zp_links = 0; 1496 mutex_exit(&xzp->z_lock); 1497 zfs_unlinked_add(xzp, tx); 1498 zp->z_phys->zp_xattr = 0; /* probably unnecessary */ 1499 } 1500 mutex_enter(&zp->z_lock); 1501 mutex_enter(&vp->v_lock); 1502 vp->v_count--; 1503 ASSERT3U(vp->v_count, ==, 0); 1504 mutex_exit(&vp->v_lock); 1505 mutex_exit(&zp->z_lock); 1506 zfs_znode_delete(zp, tx); 1507 } else if (unlinked) { 1508 zfs_unlinked_add(zp, tx); 1509 } 1510 1511 txtype = TX_REMOVE; 1512 if (flags & FIGNORECASE) 1513 txtype |= TX_CI; 1514 zfs_log_remove(zilog, tx, txtype, dzp, name); 1515 1516 dmu_tx_commit(tx); 1517 out: 1518 if (realnmp) 1519 pn_free(realnmp); 1520 1521 zfs_dirent_unlock(dl); 1522 1523 if (!delete_now) { 1524 VN_RELE(vp); 1525 } else if (xzp) { 1526 /* this rele is delayed to prevent nesting transactions */ 1527 VN_RELE(ZTOV(xzp)); 1528 } 1529 1530 ZFS_EXIT(zfsvfs); 1531 return (error); 1532 } 1533 1534 /* 1535 * Create a new directory and insert it into dvp using the name 1536 * provided. Return a pointer to the inserted directory. 1537 * 1538 * IN: dvp - vnode of directory to add subdir to. 1539 * dirname - name of new directory. 1540 * vap - attributes of new directory. 1541 * cr - credentials of caller. 1542 * ct - caller context 1543 * vsecp - ACL to be set 1544 * 1545 * OUT: vpp - vnode of created directory. 1546 * 1547 * RETURN: 0 if success 1548 * error code if failure 1549 * 1550 * Timestamps: 1551 * dvp - ctime|mtime updated 1552 * vp - ctime|mtime|atime updated 1553 */ 1554 /*ARGSUSED*/ 1555 static int 1556 zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr, 1557 caller_context_t *ct, int flags, vsecattr_t *vsecp) 1558 { 1559 znode_t *zp, *dzp = VTOZ(dvp); 1560 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1561 zilog_t *zilog; 1562 zfs_dirlock_t *dl; 1563 uint64_t txtype; 1564 dmu_tx_t *tx; 1565 int error; 1566 zfs_acl_t *aclp = NULL; 1567 zfs_fuid_info_t *fuidp = NULL; 1568 int zf = ZNEW; 1569 ksid_t *ksid; 1570 uid_t uid; 1571 gid_t gid = crgetgid(cr); 1572 1573 ASSERT(vap->va_type == VDIR); 1574 1575 /* 1576 * If we have an ephemeral id, ACL, or XVATTR then 1577 * make sure file system is at proper version 1578 */ 1579 1580 ksid = crgetsid(cr, KSID_OWNER); 1581 if (ksid) 1582 uid = ksid_getid(ksid); 1583 else 1584 uid = crgetuid(cr); 1585 if (zfsvfs->z_use_fuids == B_FALSE && 1586 (vsecp || (vap->va_mask & AT_XVATTR) || 1587 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) 1588 return (EINVAL); 1589 1590 ZFS_ENTER(zfsvfs); 1591 ZFS_VERIFY_ZP(dzp); 1592 zilog = zfsvfs->z_log; 1593 1594 if (dzp->z_phys->zp_flags & ZFS_XATTR) { 1595 ZFS_EXIT(zfsvfs); 1596 return (EINVAL); 1597 } 1598 1599 if (zfsvfs->z_utf8 && u8_validate(dirname, 1600 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 1601 ZFS_EXIT(zfsvfs); 1602 return (EILSEQ); 1603 } 1604 if (flags & FIGNORECASE) 1605 zf |= ZCILOOK; 1606 1607 if (vap->va_mask & AT_XVATTR) 1608 if ((error = secpolicy_xvattr((xvattr_t *)vap, 1609 crgetuid(cr), cr, vap->va_type)) != 0) { 1610 ZFS_EXIT(zfsvfs); 1611 return (error); 1612 } 1613 1614 /* 1615 * First make sure the new directory doesn't exist. 1616 */ 1617 top: 1618 *vpp = NULL; 1619 1620 if (error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf, 1621 NULL, NULL)) { 1622 ZFS_EXIT(zfsvfs); 1623 return (error); 1624 } 1625 1626 if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) { 1627 zfs_dirent_unlock(dl); 1628 ZFS_EXIT(zfsvfs); 1629 return (error); 1630 } 1631 1632 if (vsecp && aclp == NULL) { 1633 error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, &aclp); 1634 if (error) { 1635 zfs_dirent_unlock(dl); 1636 ZFS_EXIT(zfsvfs); 1637 return (error); 1638 } 1639 } 1640 /* 1641 * Add a new entry to the directory. 1642 */ 1643 tx = dmu_tx_create(zfsvfs->z_os); 1644 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname); 1645 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); 1646 if ((aclp && aclp->z_has_fuids) || IS_EPHEMERAL(uid) || 1647 IS_EPHEMERAL(gid)) { 1648 if (zfsvfs->z_fuid_obj == 0) { 1649 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1650 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 1651 FUID_SIZE_ESTIMATE(zfsvfs)); 1652 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); 1653 } else { 1654 dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); 1655 dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, 1656 FUID_SIZE_ESTIMATE(zfsvfs)); 1657 } 1658 } 1659 if ((dzp->z_phys->zp_flags & ZFS_INHERIT_ACE) || aclp) 1660 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 1661 0, SPA_MAXBLOCKSIZE); 1662 error = dmu_tx_assign(tx, zfsvfs->z_assign); 1663 if (error) { 1664 zfs_dirent_unlock(dl); 1665 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { 1666 dmu_tx_wait(tx); 1667 dmu_tx_abort(tx); 1668 goto top; 1669 } 1670 dmu_tx_abort(tx); 1671 ZFS_EXIT(zfsvfs); 1672 if (aclp) 1673 zfs_acl_free(aclp); 1674 return (error); 1675 } 1676 1677 /* 1678 * Create new node. 1679 */ 1680 zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, aclp, &fuidp); 1681 1682 if (aclp) 1683 zfs_acl_free(aclp); 1684 1685 /* 1686 * Now put new name in parent dir. 1687 */ 1688 (void) zfs_link_create(dl, zp, tx, ZNEW); 1689 1690 *vpp = ZTOV(zp); 1691 1692 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap); 1693 if (flags & FIGNORECASE) 1694 txtype |= TX_CI; 1695 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp, fuidp, vap); 1696 1697 if (fuidp) 1698 zfs_fuid_info_free(fuidp); 1699 dmu_tx_commit(tx); 1700 1701 zfs_dirent_unlock(dl); 1702 1703 ZFS_EXIT(zfsvfs); 1704 return (0); 1705 } 1706 1707 /* 1708 * Remove a directory subdir entry. If the current working 1709 * directory is the same as the subdir to be removed, the 1710 * remove will fail. 1711 * 1712 * IN: dvp - vnode of directory to remove from. 1713 * name - name of directory to be removed. 1714 * cwd - vnode of current working directory. 1715 * cr - credentials of caller. 1716 * ct - caller context 1717 * flags - case flags 1718 * 1719 * RETURN: 0 if success 1720 * error code if failure 1721 * 1722 * Timestamps: 1723 * dvp - ctime|mtime updated 1724 */ 1725 /*ARGSUSED*/ 1726 static int 1727 zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr, 1728 caller_context_t *ct, int flags) 1729 { 1730 znode_t *dzp = VTOZ(dvp); 1731 znode_t *zp; 1732 vnode_t *vp; 1733 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1734 zilog_t *zilog; 1735 zfs_dirlock_t *dl; 1736 dmu_tx_t *tx; 1737 int error; 1738 int zflg = ZEXISTS; 1739 1740 ZFS_ENTER(zfsvfs); 1741 ZFS_VERIFY_ZP(dzp); 1742 zilog = zfsvfs->z_log; 1743 1744 if (flags & FIGNORECASE) 1745 zflg |= ZCILOOK; 1746 top: 1747 zp = NULL; 1748 1749 /* 1750 * Attempt to lock directory; fail if entry doesn't exist. 1751 */ 1752 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, 1753 NULL, NULL)) { 1754 ZFS_EXIT(zfsvfs); 1755 return (error); 1756 } 1757 1758 vp = ZTOV(zp); 1759 1760 if (error = zfs_zaccess_delete(dzp, zp, cr)) { 1761 goto out; 1762 } 1763 1764 if (vp->v_type != VDIR) { 1765 error = ENOTDIR; 1766 goto out; 1767 } 1768 1769 if (vp == cwd) { 1770 error = EINVAL; 1771 goto out; 1772 } 1773 1774 vnevent_rmdir(vp, dvp, name, ct); 1775 1776 /* 1777 * Grab a lock on the directory to make sure that noone is 1778 * trying to add (or lookup) entries while we are removing it. 1779 */ 1780 rw_enter(&zp->z_name_lock, RW_WRITER); 1781 1782 /* 1783 * Grab a lock on the parent pointer to make sure we play well 1784 * with the treewalk and directory rename code. 1785 */ 1786 rw_enter(&zp->z_parent_lock, RW_WRITER); 1787 1788 tx = dmu_tx_create(zfsvfs->z_os); 1789 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); 1790 dmu_tx_hold_bonus(tx, zp->z_id); 1791 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 1792 error = dmu_tx_assign(tx, zfsvfs->z_assign); 1793 if (error) { 1794 rw_exit(&zp->z_parent_lock); 1795 rw_exit(&zp->z_name_lock); 1796 zfs_dirent_unlock(dl); 1797 VN_RELE(vp); 1798 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { 1799 dmu_tx_wait(tx); 1800 dmu_tx_abort(tx); 1801 goto top; 1802 } 1803 dmu_tx_abort(tx); 1804 ZFS_EXIT(zfsvfs); 1805 return (error); 1806 } 1807 1808 error = zfs_link_destroy(dl, zp, tx, zflg, NULL); 1809 1810 if (error == 0) { 1811 uint64_t txtype = TX_RMDIR; 1812 if (flags & FIGNORECASE) 1813 txtype |= TX_CI; 1814 zfs_log_remove(zilog, tx, txtype, dzp, name); 1815 } 1816 1817 dmu_tx_commit(tx); 1818 1819 rw_exit(&zp->z_parent_lock); 1820 rw_exit(&zp->z_name_lock); 1821 out: 1822 zfs_dirent_unlock(dl); 1823 1824 VN_RELE(vp); 1825 1826 ZFS_EXIT(zfsvfs); 1827 return (error); 1828 } 1829 1830 /* 1831 * Read as many directory entries as will fit into the provided 1832 * buffer from the given directory cursor position (specified in 1833 * the uio structure. 1834 * 1835 * IN: vp - vnode of directory to read. 1836 * uio - structure supplying read location, range info, 1837 * and return buffer. 1838 * cr - credentials of caller. 1839 * ct - caller context 1840 * flags - case flags 1841 * 1842 * OUT: uio - updated offset and range, buffer filled. 1843 * eofp - set to true if end-of-file detected. 1844 * 1845 * RETURN: 0 if success 1846 * error code if failure 1847 * 1848 * Timestamps: 1849 * vp - atime updated 1850 * 1851 * Note that the low 4 bits of the cookie returned by zap is always zero. 1852 * This allows us to use the low range for "special" directory entries: 1853 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem, 1854 * we use the offset 2 for the '.zfs' directory. 1855 */ 1856 /* ARGSUSED */ 1857 static int 1858 zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, 1859 caller_context_t *ct, int flags) 1860 { 1861 znode_t *zp = VTOZ(vp); 1862 iovec_t *iovp; 1863 edirent_t *eodp; 1864 dirent64_t *odp; 1865 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1866 objset_t *os; 1867 caddr_t outbuf; 1868 size_t bufsize; 1869 zap_cursor_t zc; 1870 zap_attribute_t zap; 1871 uint_t bytes_wanted; 1872 uint64_t offset; /* must be unsigned; checks for < 1 */ 1873 int local_eof; 1874 int outcount; 1875 int error; 1876 uint8_t prefetch; 1877 boolean_t check_sysattrs; 1878 1879 ZFS_ENTER(zfsvfs); 1880 ZFS_VERIFY_ZP(zp); 1881 1882 /* 1883 * If we are not given an eof variable, 1884 * use a local one. 1885 */ 1886 if (eofp == NULL) 1887 eofp = &local_eof; 1888 1889 /* 1890 * Check for valid iov_len. 1891 */ 1892 if (uio->uio_iov->iov_len <= 0) { 1893 ZFS_EXIT(zfsvfs); 1894 return (EINVAL); 1895 } 1896 1897 /* 1898 * Quit if directory has been removed (posix) 1899 */ 1900 if ((*eofp = zp->z_unlinked) != 0) { 1901 ZFS_EXIT(zfsvfs); 1902 return (0); 1903 } 1904 1905 error = 0; 1906 os = zfsvfs->z_os; 1907 offset = uio->uio_loffset; 1908 prefetch = zp->z_zn_prefetch; 1909 1910 /* 1911 * Initialize the iterator cursor. 1912 */ 1913 if (offset <= 3) { 1914 /* 1915 * Start iteration from the beginning of the directory. 1916 */ 1917 zap_cursor_init(&zc, os, zp->z_id); 1918 } else { 1919 /* 1920 * The offset is a serialized cursor. 1921 */ 1922 zap_cursor_init_serialized(&zc, os, zp->z_id, offset); 1923 } 1924 1925 /* 1926 * Get space to change directory entries into fs independent format. 1927 */ 1928 iovp = uio->uio_iov; 1929 bytes_wanted = iovp->iov_len; 1930 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) { 1931 bufsize = bytes_wanted; 1932 outbuf = kmem_alloc(bufsize, KM_SLEEP); 1933 odp = (struct dirent64 *)outbuf; 1934 } else { 1935 bufsize = bytes_wanted; 1936 odp = (struct dirent64 *)iovp->iov_base; 1937 } 1938 eodp = (struct edirent *)odp; 1939 1940 /* 1941 * If this VFS supports the system attribute view interface; and 1942 * we're looking at an extended attribute directory; and we care 1943 * about normalization conflicts on this vfs; then we must check 1944 * for normalization conflicts with the sysattr name space. 1945 */ 1946 check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) && 1947 (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm && 1948 (flags & V_RDDIR_ENTFLAGS); 1949 1950 /* 1951 * Transform to file-system independent format 1952 */ 1953 outcount = 0; 1954 while (outcount < bytes_wanted) { 1955 ino64_t objnum; 1956 ushort_t reclen; 1957 off64_t *next; 1958 1959 /* 1960 * Special case `.', `..', and `.zfs'. 1961 */ 1962 if (offset == 0) { 1963 (void) strcpy(zap.za_name, "."); 1964 zap.za_normalization_conflict = 0; 1965 objnum = zp->z_id; 1966 } else if (offset == 1) { 1967 (void) strcpy(zap.za_name, ".."); 1968 zap.za_normalization_conflict = 0; 1969 objnum = zp->z_phys->zp_parent; 1970 } else if (offset == 2 && zfs_show_ctldir(zp)) { 1971 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME); 1972 zap.za_normalization_conflict = 0; 1973 objnum = ZFSCTL_INO_ROOT; 1974 } else { 1975 /* 1976 * Grab next entry. 1977 */ 1978 if (error = zap_cursor_retrieve(&zc, &zap)) { 1979 if ((*eofp = (error == ENOENT)) != 0) 1980 break; 1981 else 1982 goto update; 1983 } 1984 1985 if (zap.za_integer_length != 8 || 1986 zap.za_num_integers != 1) { 1987 cmn_err(CE_WARN, "zap_readdir: bad directory " 1988 "entry, obj = %lld, offset = %lld\n", 1989 (u_longlong_t)zp->z_id, 1990 (u_longlong_t)offset); 1991 error = ENXIO; 1992 goto update; 1993 } 1994 1995 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer); 1996 /* 1997 * MacOS X can extract the object type here such as: 1998 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer); 1999 */ 2000 2001 if (check_sysattrs && !zap.za_normalization_conflict) { 2002 zap.za_normalization_conflict = 2003 xattr_sysattr_casechk(zap.za_name); 2004 } 2005 } 2006 2007 if (flags & V_RDDIR_ENTFLAGS) 2008 reclen = EDIRENT_RECLEN(strlen(zap.za_name)); 2009 else 2010 reclen = DIRENT64_RECLEN(strlen(zap.za_name)); 2011 2012 /* 2013 * Will this entry fit in the buffer? 2014 */ 2015 if (outcount + reclen > bufsize) { 2016 /* 2017 * Did we manage to fit anything in the buffer? 2018 */ 2019 if (!outcount) { 2020 error = EINVAL; 2021 goto update; 2022 } 2023 break; 2024 } 2025 if (flags & V_RDDIR_ENTFLAGS) { 2026 /* 2027 * Add extended flag entry: 2028 */ 2029 eodp->ed_ino = objnum; 2030 eodp->ed_reclen = reclen; 2031 /* NOTE: ed_off is the offset for the *next* entry */ 2032 next = &(eodp->ed_off); 2033 eodp->ed_eflags = zap.za_normalization_conflict ? 2034 ED_CASE_CONFLICT : 0; 2035 (void) strncpy(eodp->ed_name, zap.za_name, 2036 EDIRENT_NAMELEN(reclen)); 2037 eodp = (edirent_t *)((intptr_t)eodp + reclen); 2038 } else { 2039 /* 2040 * Add normal entry: 2041 */ 2042 odp->d_ino = objnum; 2043 odp->d_reclen = reclen; 2044 /* NOTE: d_off is the offset for the *next* entry */ 2045 next = &(odp->d_off); 2046 (void) strncpy(odp->d_name, zap.za_name, 2047 DIRENT64_NAMELEN(reclen)); 2048 odp = (dirent64_t *)((intptr_t)odp + reclen); 2049 } 2050 outcount += reclen; 2051 2052 ASSERT(outcount <= bufsize); 2053 2054 /* Prefetch znode */ 2055 if (prefetch) 2056 dmu_prefetch(os, objnum, 0, 0); 2057 2058 /* 2059 * Move to the next entry, fill in the previous offset. 2060 */ 2061 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) { 2062 zap_cursor_advance(&zc); 2063 offset = zap_cursor_serialize(&zc); 2064 } else { 2065 offset += 1; 2066 } 2067 *next = offset; 2068 } 2069 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */ 2070 2071 if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) { 2072 iovp->iov_base += outcount; 2073 iovp->iov_len -= outcount; 2074 uio->uio_resid -= outcount; 2075 } else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) { 2076 /* 2077 * Reset the pointer. 2078 */ 2079 offset = uio->uio_loffset; 2080 } 2081 2082 update: 2083 zap_cursor_fini(&zc); 2084 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) 2085 kmem_free(outbuf, bufsize); 2086 2087 if (error == ENOENT) 2088 error = 0; 2089 2090 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 2091 2092 uio->uio_loffset = offset; 2093 ZFS_EXIT(zfsvfs); 2094 return (error); 2095 } 2096 2097 ulong_t zfs_fsync_sync_cnt = 4; 2098 2099 static int 2100 zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct) 2101 { 2102 znode_t *zp = VTOZ(vp); 2103 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2104 2105 /* 2106 * Regardless of whether this is required for standards conformance, 2107 * this is the logical behavior when fsync() is called on a file with 2108 * dirty pages. We use B_ASYNC since the ZIL transactions are already 2109 * going to be pushed out as part of the zil_commit(). 2110 */ 2111 if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) && 2112 (vp->v_type == VREG) && !(IS_SWAPVP(vp))) 2113 (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, B_ASYNC, cr, ct); 2114 2115 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt); 2116 2117 ZFS_ENTER(zfsvfs); 2118 ZFS_VERIFY_ZP(zp); 2119 zil_commit(zfsvfs->z_log, zp->z_last_itx, zp->z_id); 2120 ZFS_EXIT(zfsvfs); 2121 return (0); 2122 } 2123 2124 2125 /* 2126 * Get the requested file attributes and place them in the provided 2127 * vattr structure. 2128 * 2129 * IN: vp - vnode of file. 2130 * vap - va_mask identifies requested attributes. 2131 * If AT_XVATTR set, then optional attrs are requested 2132 * flags - ATTR_NOACLCHECK (CIFS server context) 2133 * cr - credentials of caller. 2134 * ct - caller context 2135 * 2136 * OUT: vap - attribute values. 2137 * 2138 * RETURN: 0 (always succeeds) 2139 */ 2140 /* ARGSUSED */ 2141 static int 2142 zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, 2143 caller_context_t *ct) 2144 { 2145 znode_t *zp = VTOZ(vp); 2146 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2147 znode_phys_t *pzp; 2148 int error = 0; 2149 uint64_t links; 2150 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */ 2151 xoptattr_t *xoap = NULL; 2152 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 2153 2154 ZFS_ENTER(zfsvfs); 2155 ZFS_VERIFY_ZP(zp); 2156 pzp = zp->z_phys; 2157 2158 mutex_enter(&zp->z_lock); 2159 2160 /* 2161 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES. 2162 * Also, if we are the owner don't bother, since owner should 2163 * always be allowed to read basic attributes of file. 2164 */ 2165 if (!(pzp->zp_flags & ZFS_ACL_TRIVIAL) && 2166 (pzp->zp_uid != crgetuid(cr))) { 2167 if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0, 2168 skipaclchk, cr)) { 2169 mutex_exit(&zp->z_lock); 2170 ZFS_EXIT(zfsvfs); 2171 return (error); 2172 } 2173 } 2174 2175 /* 2176 * Return all attributes. It's cheaper to provide the answer 2177 * than to determine whether we were asked the question. 2178 */ 2179 2180 vap->va_type = vp->v_type; 2181 vap->va_mode = pzp->zp_mode & MODEMASK; 2182 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid); 2183 vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev; 2184 vap->va_nodeid = zp->z_id; 2185 if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp)) 2186 links = pzp->zp_links + 1; 2187 else 2188 links = pzp->zp_links; 2189 vap->va_nlink = MIN(links, UINT32_MAX); /* nlink_t limit! */ 2190 vap->va_size = pzp->zp_size; 2191 vap->va_rdev = vp->v_rdev; 2192 vap->va_seq = zp->z_seq; 2193 2194 /* 2195 * Add in any requested optional attributes and the create time. 2196 * Also set the corresponding bits in the returned attribute bitmap. 2197 */ 2198 if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) { 2199 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) { 2200 xoap->xoa_archive = 2201 ((pzp->zp_flags & ZFS_ARCHIVE) != 0); 2202 XVA_SET_RTN(xvap, XAT_ARCHIVE); 2203 } 2204 2205 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) { 2206 xoap->xoa_readonly = 2207 ((pzp->zp_flags & ZFS_READONLY) != 0); 2208 XVA_SET_RTN(xvap, XAT_READONLY); 2209 } 2210 2211 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) { 2212 xoap->xoa_system = 2213 ((pzp->zp_flags & ZFS_SYSTEM) != 0); 2214 XVA_SET_RTN(xvap, XAT_SYSTEM); 2215 } 2216 2217 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) { 2218 xoap->xoa_hidden = 2219 ((pzp->zp_flags & ZFS_HIDDEN) != 0); 2220 XVA_SET_RTN(xvap, XAT_HIDDEN); 2221 } 2222 2223 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) { 2224 xoap->xoa_nounlink = 2225 ((pzp->zp_flags & ZFS_NOUNLINK) != 0); 2226 XVA_SET_RTN(xvap, XAT_NOUNLINK); 2227 } 2228 2229 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) { 2230 xoap->xoa_immutable = 2231 ((pzp->zp_flags & ZFS_IMMUTABLE) != 0); 2232 XVA_SET_RTN(xvap, XAT_IMMUTABLE); 2233 } 2234 2235 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) { 2236 xoap->xoa_appendonly = 2237 ((pzp->zp_flags & ZFS_APPENDONLY) != 0); 2238 XVA_SET_RTN(xvap, XAT_APPENDONLY); 2239 } 2240 2241 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) { 2242 xoap->xoa_nodump = 2243 ((pzp->zp_flags & ZFS_NODUMP) != 0); 2244 XVA_SET_RTN(xvap, XAT_NODUMP); 2245 } 2246 2247 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) { 2248 xoap->xoa_opaque = 2249 ((pzp->zp_flags & ZFS_OPAQUE) != 0); 2250 XVA_SET_RTN(xvap, XAT_OPAQUE); 2251 } 2252 2253 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) { 2254 xoap->xoa_av_quarantined = 2255 ((pzp->zp_flags & ZFS_AV_QUARANTINED) != 0); 2256 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED); 2257 } 2258 2259 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) { 2260 xoap->xoa_av_modified = 2261 ((pzp->zp_flags & ZFS_AV_MODIFIED) != 0); 2262 XVA_SET_RTN(xvap, XAT_AV_MODIFIED); 2263 } 2264 2265 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) && 2266 vp->v_type == VREG && 2267 (pzp->zp_flags & ZFS_BONUS_SCANSTAMP)) { 2268 size_t len; 2269 dmu_object_info_t doi; 2270 2271 /* 2272 * Only VREG files have anti-virus scanstamps, so we 2273 * won't conflict with symlinks in the bonus buffer. 2274 */ 2275 dmu_object_info_from_db(zp->z_dbuf, &doi); 2276 len = sizeof (xoap->xoa_av_scanstamp) + 2277 sizeof (znode_phys_t); 2278 if (len <= doi.doi_bonus_size) { 2279 /* 2280 * pzp points to the start of the 2281 * znode_phys_t. pzp + 1 points to the 2282 * first byte after the znode_phys_t. 2283 */ 2284 (void) memcpy(xoap->xoa_av_scanstamp, 2285 pzp + 1, 2286 sizeof (xoap->xoa_av_scanstamp)); 2287 XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP); 2288 } 2289 } 2290 2291 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) { 2292 ZFS_TIME_DECODE(&xoap->xoa_createtime, pzp->zp_crtime); 2293 XVA_SET_RTN(xvap, XAT_CREATETIME); 2294 } 2295 } 2296 2297 ZFS_TIME_DECODE(&vap->va_atime, pzp->zp_atime); 2298 ZFS_TIME_DECODE(&vap->va_mtime, pzp->zp_mtime); 2299 ZFS_TIME_DECODE(&vap->va_ctime, pzp->zp_ctime); 2300 2301 mutex_exit(&zp->z_lock); 2302 2303 dmu_object_size_from_db(zp->z_dbuf, &vap->va_blksize, &vap->va_nblocks); 2304 2305 if (zp->z_blksz == 0) { 2306 /* 2307 * Block size hasn't been set; suggest maximal I/O transfers. 2308 */ 2309 vap->va_blksize = zfsvfs->z_max_blksz; 2310 } 2311 2312 ZFS_EXIT(zfsvfs); 2313 return (0); 2314 } 2315 2316 /* 2317 * Set the file attributes to the values contained in the 2318 * vattr structure. 2319 * 2320 * IN: vp - vnode of file to be modified. 2321 * vap - new attribute values. 2322 * If AT_XVATTR set, then optional attrs are being set 2323 * flags - ATTR_UTIME set if non-default time values provided. 2324 * - ATTR_NOACLCHECK (CIFS context only). 2325 * cr - credentials of caller. 2326 * ct - caller context 2327 * 2328 * RETURN: 0 if success 2329 * error code if failure 2330 * 2331 * Timestamps: 2332 * vp - ctime updated, mtime updated if size changed. 2333 */ 2334 /* ARGSUSED */ 2335 static int 2336 zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, 2337 caller_context_t *ct) 2338 { 2339 znode_t *zp = VTOZ(vp); 2340 znode_phys_t *pzp; 2341 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2342 zilog_t *zilog; 2343 dmu_tx_t *tx; 2344 vattr_t oldva; 2345 xvattr_t tmpxvattr; 2346 uint_t mask = vap->va_mask; 2347 uint_t saved_mask; 2348 int trim_mask = 0; 2349 uint64_t new_mode; 2350 znode_t *attrzp; 2351 int need_policy = FALSE; 2352 int err; 2353 zfs_fuid_info_t *fuidp = NULL; 2354 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */ 2355 xoptattr_t *xoap; 2356 zfs_acl_t *aclp = NULL; 2357 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 2358 2359 if (mask == 0) 2360 return (0); 2361 2362 if (mask & AT_NOSET) 2363 return (EINVAL); 2364 2365 ZFS_ENTER(zfsvfs); 2366 ZFS_VERIFY_ZP(zp); 2367 2368 pzp = zp->z_phys; 2369 zilog = zfsvfs->z_log; 2370 2371 /* 2372 * Make sure that if we have ephemeral uid/gid or xvattr specified 2373 * that file system is at proper version level 2374 */ 2375 2376 if (zfsvfs->z_use_fuids == B_FALSE && 2377 (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) || 2378 ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) || 2379 (mask & AT_XVATTR))) { 2380 ZFS_EXIT(zfsvfs); 2381 return (EINVAL); 2382 } 2383 2384 if (mask & AT_SIZE && vp->v_type == VDIR) { 2385 ZFS_EXIT(zfsvfs); 2386 return (EISDIR); 2387 } 2388 2389 if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) { 2390 ZFS_EXIT(zfsvfs); 2391 return (EINVAL); 2392 } 2393 2394 /* 2395 * If this is an xvattr_t, then get a pointer to the structure of 2396 * optional attributes. If this is NULL, then we have a vattr_t. 2397 */ 2398 xoap = xva_getxoptattr(xvap); 2399 2400 xva_init(&tmpxvattr); 2401 2402 /* 2403 * Immutable files can only alter immutable bit and atime 2404 */ 2405 if ((pzp->zp_flags & ZFS_IMMUTABLE) && 2406 ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) || 2407 ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) { 2408 ZFS_EXIT(zfsvfs); 2409 return (EPERM); 2410 } 2411 2412 if ((mask & AT_SIZE) && (pzp->zp_flags & ZFS_READONLY)) { 2413 ZFS_EXIT(zfsvfs); 2414 return (EPERM); 2415 } 2416 2417 /* 2418 * Verify timestamps doesn't overflow 32 bits. 2419 * ZFS can handle large timestamps, but 32bit syscalls can't 2420 * handle times greater than 2039. This check should be removed 2421 * once large timestamps are fully supported. 2422 */ 2423 if (mask & (AT_ATIME | AT_MTIME)) { 2424 if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) || 2425 ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) { 2426 ZFS_EXIT(zfsvfs); 2427 return (EOVERFLOW); 2428 } 2429 } 2430 2431 top: 2432 attrzp = NULL; 2433 2434 if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) { 2435 ZFS_EXIT(zfsvfs); 2436 return (EROFS); 2437 } 2438 2439 /* 2440 * First validate permissions 2441 */ 2442 2443 if (mask & AT_SIZE) { 2444 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr); 2445 if (err) { 2446 ZFS_EXIT(zfsvfs); 2447 return (err); 2448 } 2449 /* 2450 * XXX - Note, we are not providing any open 2451 * mode flags here (like FNDELAY), so we may 2452 * block if there are locks present... this 2453 * should be addressed in openat(). 2454 */ 2455 /* XXX - would it be OK to generate a log record here? */ 2456 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE); 2457 if (err) { 2458 ZFS_EXIT(zfsvfs); 2459 return (err); 2460 } 2461 } 2462 2463 if (mask & (AT_ATIME|AT_MTIME) || 2464 ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) || 2465 XVA_ISSET_REQ(xvap, XAT_READONLY) || 2466 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) || 2467 XVA_ISSET_REQ(xvap, XAT_CREATETIME) || 2468 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) 2469 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0, 2470 skipaclchk, cr); 2471 2472 if (mask & (AT_UID|AT_GID)) { 2473 int idmask = (mask & (AT_UID|AT_GID)); 2474 int take_owner; 2475 int take_group; 2476 2477 /* 2478 * NOTE: even if a new mode is being set, 2479 * we may clear S_ISUID/S_ISGID bits. 2480 */ 2481 2482 if (!(mask & AT_MODE)) 2483 vap->va_mode = pzp->zp_mode; 2484 2485 /* 2486 * Take ownership or chgrp to group we are a member of 2487 */ 2488 2489 take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr)); 2490 take_group = (mask & AT_GID) && 2491 zfs_groupmember(zfsvfs, vap->va_gid, cr); 2492 2493 /* 2494 * If both AT_UID and AT_GID are set then take_owner and 2495 * take_group must both be set in order to allow taking 2496 * ownership. 2497 * 2498 * Otherwise, send the check through secpolicy_vnode_setattr() 2499 * 2500 */ 2501 2502 if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) || 2503 ((idmask == AT_UID) && take_owner) || 2504 ((idmask == AT_GID) && take_group)) { 2505 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0, 2506 skipaclchk, cr) == 0) { 2507 /* 2508 * Remove setuid/setgid for non-privileged users 2509 */ 2510 secpolicy_setid_clear(vap, cr); 2511 trim_mask = (mask & (AT_UID|AT_GID)); 2512 } else { 2513 need_policy = TRUE; 2514 } 2515 } else { 2516 need_policy = TRUE; 2517 } 2518 } 2519 2520 mutex_enter(&zp->z_lock); 2521 oldva.va_mode = pzp->zp_mode; 2522 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid); 2523 if (mask & AT_XVATTR) { 2524 /* 2525 * Update xvattr mask to include only those attributes 2526 * that are actually changing. 2527 * 2528 * the bits will be restored prior to actually setting 2529 * the attributes so the caller thinks they were set. 2530 */ 2531 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) { 2532 if (xoap->xoa_appendonly != 2533 ((pzp->zp_flags & ZFS_APPENDONLY) != 0)) { 2534 need_policy = TRUE; 2535 } else { 2536 XVA_CLR_REQ(xvap, XAT_APPENDONLY); 2537 XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY); 2538 } 2539 } 2540 2541 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) { 2542 if (xoap->xoa_nounlink != 2543 ((pzp->zp_flags & ZFS_NOUNLINK) != 0)) { 2544 need_policy = TRUE; 2545 } else { 2546 XVA_CLR_REQ(xvap, XAT_NOUNLINK); 2547 XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK); 2548 } 2549 } 2550 2551 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) { 2552 if (xoap->xoa_immutable != 2553 ((pzp->zp_flags & ZFS_IMMUTABLE) != 0)) { 2554 need_policy = TRUE; 2555 } else { 2556 XVA_CLR_REQ(xvap, XAT_IMMUTABLE); 2557 XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE); 2558 } 2559 } 2560 2561 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) { 2562 if (xoap->xoa_nodump != 2563 ((pzp->zp_flags & ZFS_NODUMP) != 0)) { 2564 need_policy = TRUE; 2565 } else { 2566 XVA_CLR_REQ(xvap, XAT_NODUMP); 2567 XVA_SET_REQ(&tmpxvattr, XAT_NODUMP); 2568 } 2569 } 2570 2571 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) { 2572 if (xoap->xoa_av_modified != 2573 ((pzp->zp_flags & ZFS_AV_MODIFIED) != 0)) { 2574 need_policy = TRUE; 2575 } else { 2576 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED); 2577 XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED); 2578 } 2579 } 2580 2581 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) { 2582 if ((vp->v_type != VREG && 2583 xoap->xoa_av_quarantined) || 2584 xoap->xoa_av_quarantined != 2585 ((pzp->zp_flags & ZFS_AV_QUARANTINED) != 0)) { 2586 need_policy = TRUE; 2587 } else { 2588 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED); 2589 XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED); 2590 } 2591 } 2592 2593 if (need_policy == FALSE && 2594 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) || 2595 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) { 2596 need_policy = TRUE; 2597 } 2598 } 2599 2600 mutex_exit(&zp->z_lock); 2601 2602 if (mask & AT_MODE) { 2603 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) { 2604 err = secpolicy_setid_setsticky_clear(vp, vap, 2605 &oldva, cr); 2606 if (err) { 2607 ZFS_EXIT(zfsvfs); 2608 return (err); 2609 } 2610 trim_mask |= AT_MODE; 2611 } else { 2612 need_policy = TRUE; 2613 } 2614 } 2615 2616 if (need_policy) { 2617 /* 2618 * If trim_mask is set then take ownership 2619 * has been granted or write_acl is present and user 2620 * has the ability to modify mode. In that case remove 2621 * UID|GID and or MODE from mask so that 2622 * secpolicy_vnode_setattr() doesn't revoke it. 2623 */ 2624 2625 if (trim_mask) { 2626 saved_mask = vap->va_mask; 2627 vap->va_mask &= ~trim_mask; 2628 } 2629 err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags, 2630 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp); 2631 if (err) { 2632 ZFS_EXIT(zfsvfs); 2633 return (err); 2634 } 2635 2636 if (trim_mask) 2637 vap->va_mask |= saved_mask; 2638 } 2639 2640 /* 2641 * secpolicy_vnode_setattr, or take ownership may have 2642 * changed va_mask 2643 */ 2644 mask = vap->va_mask; 2645 2646 tx = dmu_tx_create(zfsvfs->z_os); 2647 dmu_tx_hold_bonus(tx, zp->z_id); 2648 if (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) || 2649 ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid))) { 2650 if (zfsvfs->z_fuid_obj == 0) { 2651 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 2652 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 2653 FUID_SIZE_ESTIMATE(zfsvfs)); 2654 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); 2655 } else { 2656 dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); 2657 dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, 2658 FUID_SIZE_ESTIMATE(zfsvfs)); 2659 } 2660 } 2661 2662 if (mask & AT_MODE) { 2663 uint64_t pmode = pzp->zp_mode; 2664 2665 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT); 2666 2667 if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)) { 2668 dmu_tx_abort(tx); 2669 ZFS_EXIT(zfsvfs); 2670 return (err); 2671 } 2672 if (pzp->zp_acl.z_acl_extern_obj) { 2673 /* Are we upgrading ACL from old V0 format to new V1 */ 2674 if (zfsvfs->z_version <= ZPL_VERSION_FUID && 2675 pzp->zp_acl.z_acl_version == 2676 ZFS_ACL_VERSION_INITIAL) { 2677 dmu_tx_hold_free(tx, 2678 pzp->zp_acl.z_acl_extern_obj, 0, 2679 DMU_OBJECT_END); 2680 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 2681 0, aclp->z_acl_bytes); 2682 } else { 2683 dmu_tx_hold_write(tx, 2684 pzp->zp_acl.z_acl_extern_obj, 0, 2685 aclp->z_acl_bytes); 2686 } 2687 } else if (aclp->z_acl_bytes > ZFS_ACE_SPACE) { 2688 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 2689 0, aclp->z_acl_bytes); 2690 } 2691 } 2692 2693 if ((mask & (AT_UID | AT_GID)) && pzp->zp_xattr != 0) { 2694 err = zfs_zget(zp->z_zfsvfs, pzp->zp_xattr, &attrzp); 2695 if (err) { 2696 dmu_tx_abort(tx); 2697 ZFS_EXIT(zfsvfs); 2698 if (aclp) 2699 zfs_acl_free(aclp); 2700 return (err); 2701 } 2702 dmu_tx_hold_bonus(tx, attrzp->z_id); 2703 } 2704 2705 err = dmu_tx_assign(tx, zfsvfs->z_assign); 2706 if (err) { 2707 if (attrzp) 2708 VN_RELE(ZTOV(attrzp)); 2709 2710 if (aclp) { 2711 zfs_acl_free(aclp); 2712 aclp = NULL; 2713 } 2714 2715 if (err == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { 2716 dmu_tx_wait(tx); 2717 dmu_tx_abort(tx); 2718 goto top; 2719 } 2720 dmu_tx_abort(tx); 2721 ZFS_EXIT(zfsvfs); 2722 return (err); 2723 } 2724 2725 dmu_buf_will_dirty(zp->z_dbuf, tx); 2726 2727 /* 2728 * Set each attribute requested. 2729 * We group settings according to the locks they need to acquire. 2730 * 2731 * Note: you cannot set ctime directly, although it will be 2732 * updated as a side-effect of calling this function. 2733 */ 2734 2735 mutex_enter(&zp->z_lock); 2736 2737 if (mask & AT_MODE) { 2738 mutex_enter(&zp->z_acl_lock); 2739 zp->z_phys->zp_mode = new_mode; 2740 err = zfs_aclset_common(zp, aclp, cr, &fuidp, tx); 2741 ASSERT3U(err, ==, 0); 2742 mutex_exit(&zp->z_acl_lock); 2743 } 2744 2745 if (attrzp) 2746 mutex_enter(&attrzp->z_lock); 2747 2748 if (mask & AT_UID) { 2749 pzp->zp_uid = zfs_fuid_create(zfsvfs, 2750 vap->va_uid, cr, ZFS_OWNER, tx, &fuidp); 2751 if (attrzp) { 2752 attrzp->z_phys->zp_uid = zfs_fuid_create(zfsvfs, 2753 vap->va_uid, cr, ZFS_OWNER, tx, &fuidp); 2754 } 2755 } 2756 2757 if (mask & AT_GID) { 2758 pzp->zp_gid = zfs_fuid_create(zfsvfs, vap->va_gid, 2759 cr, ZFS_GROUP, tx, &fuidp); 2760 if (attrzp) 2761 attrzp->z_phys->zp_gid = zfs_fuid_create(zfsvfs, 2762 vap->va_gid, cr, ZFS_GROUP, tx, &fuidp); 2763 } 2764 2765 if (aclp) 2766 zfs_acl_free(aclp); 2767 2768 if (attrzp) 2769 mutex_exit(&attrzp->z_lock); 2770 2771 if (mask & AT_ATIME) 2772 ZFS_TIME_ENCODE(&vap->va_atime, pzp->zp_atime); 2773 2774 if (mask & AT_MTIME) 2775 ZFS_TIME_ENCODE(&vap->va_mtime, pzp->zp_mtime); 2776 2777 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */ 2778 if (mask & AT_SIZE) 2779 zfs_time_stamper_locked(zp, CONTENT_MODIFIED, tx); 2780 else if (mask != 0) 2781 zfs_time_stamper_locked(zp, STATE_CHANGED, tx); 2782 /* 2783 * Do this after setting timestamps to prevent timestamp 2784 * update from toggling bit 2785 */ 2786 2787 if (xoap && (mask & AT_XVATTR)) { 2788 2789 /* 2790 * restore trimmed off masks 2791 * so that return masks can be set for caller. 2792 */ 2793 2794 if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) { 2795 XVA_SET_REQ(xvap, XAT_APPENDONLY); 2796 } 2797 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) { 2798 XVA_SET_REQ(xvap, XAT_NOUNLINK); 2799 } 2800 if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) { 2801 XVA_SET_REQ(xvap, XAT_IMMUTABLE); 2802 } 2803 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) { 2804 XVA_SET_REQ(xvap, XAT_NODUMP); 2805 } 2806 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) { 2807 XVA_SET_REQ(xvap, XAT_AV_MODIFIED); 2808 } 2809 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) { 2810 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED); 2811 } 2812 2813 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) { 2814 size_t len; 2815 dmu_object_info_t doi; 2816 2817 ASSERT(vp->v_type == VREG); 2818 2819 /* Grow the bonus buffer if necessary. */ 2820 dmu_object_info_from_db(zp->z_dbuf, &doi); 2821 len = sizeof (xoap->xoa_av_scanstamp) + 2822 sizeof (znode_phys_t); 2823 if (len > doi.doi_bonus_size) 2824 VERIFY(dmu_set_bonus(zp->z_dbuf, len, tx) == 0); 2825 } 2826 zfs_xvattr_set(zp, xvap); 2827 } 2828 2829 if (mask != 0) 2830 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp); 2831 2832 if (fuidp) 2833 zfs_fuid_info_free(fuidp); 2834 mutex_exit(&zp->z_lock); 2835 2836 if (attrzp) 2837 VN_RELE(ZTOV(attrzp)); 2838 2839 dmu_tx_commit(tx); 2840 2841 ZFS_EXIT(zfsvfs); 2842 return (err); 2843 } 2844 2845 typedef struct zfs_zlock { 2846 krwlock_t *zl_rwlock; /* lock we acquired */ 2847 znode_t *zl_znode; /* znode we held */ 2848 struct zfs_zlock *zl_next; /* next in list */ 2849 } zfs_zlock_t; 2850 2851 /* 2852 * Drop locks and release vnodes that were held by zfs_rename_lock(). 2853 */ 2854 static void 2855 zfs_rename_unlock(zfs_zlock_t **zlpp) 2856 { 2857 zfs_zlock_t *zl; 2858 2859 while ((zl = *zlpp) != NULL) { 2860 if (zl->zl_znode != NULL) 2861 VN_RELE(ZTOV(zl->zl_znode)); 2862 rw_exit(zl->zl_rwlock); 2863 *zlpp = zl->zl_next; 2864 kmem_free(zl, sizeof (*zl)); 2865 } 2866 } 2867 2868 /* 2869 * Search back through the directory tree, using the ".." entries. 2870 * Lock each directory in the chain to prevent concurrent renames. 2871 * Fail any attempt to move a directory into one of its own descendants. 2872 * XXX - z_parent_lock can overlap with map or grow locks 2873 */ 2874 static int 2875 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp) 2876 { 2877 zfs_zlock_t *zl; 2878 znode_t *zp = tdzp; 2879 uint64_t rootid = zp->z_zfsvfs->z_root; 2880 uint64_t *oidp = &zp->z_id; 2881 krwlock_t *rwlp = &szp->z_parent_lock; 2882 krw_t rw = RW_WRITER; 2883 2884 /* 2885 * First pass write-locks szp and compares to zp->z_id. 2886 * Later passes read-lock zp and compare to zp->z_parent. 2887 */ 2888 do { 2889 if (!rw_tryenter(rwlp, rw)) { 2890 /* 2891 * Another thread is renaming in this path. 2892 * Note that if we are a WRITER, we don't have any 2893 * parent_locks held yet. 2894 */ 2895 if (rw == RW_READER && zp->z_id > szp->z_id) { 2896 /* 2897 * Drop our locks and restart 2898 */ 2899 zfs_rename_unlock(&zl); 2900 *zlpp = NULL; 2901 zp = tdzp; 2902 oidp = &zp->z_id; 2903 rwlp = &szp->z_parent_lock; 2904 rw = RW_WRITER; 2905 continue; 2906 } else { 2907 /* 2908 * Wait for other thread to drop its locks 2909 */ 2910 rw_enter(rwlp, rw); 2911 } 2912 } 2913 2914 zl = kmem_alloc(sizeof (*zl), KM_SLEEP); 2915 zl->zl_rwlock = rwlp; 2916 zl->zl_znode = NULL; 2917 zl->zl_next = *zlpp; 2918 *zlpp = zl; 2919 2920 if (*oidp == szp->z_id) /* We're a descendant of szp */ 2921 return (EINVAL); 2922 2923 if (*oidp == rootid) /* We've hit the top */ 2924 return (0); 2925 2926 if (rw == RW_READER) { /* i.e. not the first pass */ 2927 int error = zfs_zget(zp->z_zfsvfs, *oidp, &zp); 2928 if (error) 2929 return (error); 2930 zl->zl_znode = zp; 2931 } 2932 oidp = &zp->z_phys->zp_parent; 2933 rwlp = &zp->z_parent_lock; 2934 rw = RW_READER; 2935 2936 } while (zp->z_id != sdzp->z_id); 2937 2938 return (0); 2939 } 2940 2941 /* 2942 * Move an entry from the provided source directory to the target 2943 * directory. Change the entry name as indicated. 2944 * 2945 * IN: sdvp - Source directory containing the "old entry". 2946 * snm - Old entry name. 2947 * tdvp - Target directory to contain the "new entry". 2948 * tnm - New entry name. 2949 * cr - credentials of caller. 2950 * ct - caller context 2951 * flags - case flags 2952 * 2953 * RETURN: 0 if success 2954 * error code if failure 2955 * 2956 * Timestamps: 2957 * sdvp,tdvp - ctime|mtime updated 2958 */ 2959 /*ARGSUSED*/ 2960 static int 2961 zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr, 2962 caller_context_t *ct, int flags) 2963 { 2964 znode_t *tdzp, *szp, *tzp; 2965 znode_t *sdzp = VTOZ(sdvp); 2966 zfsvfs_t *zfsvfs = sdzp->z_zfsvfs; 2967 zilog_t *zilog; 2968 vnode_t *realvp; 2969 zfs_dirlock_t *sdl, *tdl; 2970 dmu_tx_t *tx; 2971 zfs_zlock_t *zl; 2972 int cmp, serr, terr; 2973 int error = 0; 2974 int zflg = 0; 2975 2976 ZFS_ENTER(zfsvfs); 2977 ZFS_VERIFY_ZP(sdzp); 2978 zilog = zfsvfs->z_log; 2979 2980 /* 2981 * Make sure we have the real vp for the target directory. 2982 */ 2983 if (VOP_REALVP(tdvp, &realvp, ct) == 0) 2984 tdvp = realvp; 2985 2986 if (tdvp->v_vfsp != sdvp->v_vfsp) { 2987 ZFS_EXIT(zfsvfs); 2988 return (EXDEV); 2989 } 2990 2991 tdzp = VTOZ(tdvp); 2992 ZFS_VERIFY_ZP(tdzp); 2993 if (zfsvfs->z_utf8 && u8_validate(tnm, 2994 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 2995 ZFS_EXIT(zfsvfs); 2996 return (EILSEQ); 2997 } 2998 2999 if (flags & FIGNORECASE) 3000 zflg |= ZCILOOK; 3001 3002 top: 3003 szp = NULL; 3004 tzp = NULL; 3005 zl = NULL; 3006 3007 /* 3008 * This is to prevent the creation of links into attribute space 3009 * by renaming a linked file into/outof an attribute directory. 3010 * See the comment in zfs_link() for why this is considered bad. 3011 */ 3012 if ((tdzp->z_phys->zp_flags & ZFS_XATTR) != 3013 (sdzp->z_phys->zp_flags & ZFS_XATTR)) { 3014 ZFS_EXIT(zfsvfs); 3015 return (EINVAL); 3016 } 3017 3018 /* 3019 * Lock source and target directory entries. To prevent deadlock, 3020 * a lock ordering must be defined. We lock the directory with 3021 * the smallest object id first, or if it's a tie, the one with 3022 * the lexically first name. 3023 */ 3024 if (sdzp->z_id < tdzp->z_id) { 3025 cmp = -1; 3026 } else if (sdzp->z_id > tdzp->z_id) { 3027 cmp = 1; 3028 } else { 3029 /* 3030 * First compare the two name arguments without 3031 * considering any case folding. 3032 */ 3033 int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER); 3034 3035 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error); 3036 ASSERT(error == 0 || !zfsvfs->z_utf8); 3037 if (cmp == 0) { 3038 /* 3039 * POSIX: "If the old argument and the new argument 3040 * both refer to links to the same existing file, 3041 * the rename() function shall return successfully 3042 * and perform no other action." 3043 */ 3044 ZFS_EXIT(zfsvfs); 3045 return (0); 3046 } 3047 /* 3048 * If the file system is case-folding, then we may 3049 * have some more checking to do. A case-folding file 3050 * system is either supporting mixed case sensitivity 3051 * access or is completely case-insensitive. Note 3052 * that the file system is always case preserving. 3053 * 3054 * In mixed sensitivity mode case sensitive behavior 3055 * is the default. FIGNORECASE must be used to 3056 * explicitly request case insensitive behavior. 3057 * 3058 * If the source and target names provided differ only 3059 * by case (e.g., a request to rename 'tim' to 'Tim'), 3060 * we will treat this as a special case in the 3061 * case-insensitive mode: as long as the source name 3062 * is an exact match, we will allow this to proceed as 3063 * a name-change request. 3064 */ 3065 if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE || 3066 (zfsvfs->z_case == ZFS_CASE_MIXED && 3067 flags & FIGNORECASE)) && 3068 u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST, 3069 &error) == 0) { 3070 /* 3071 * case preserving rename request, require exact 3072 * name matches 3073 */ 3074 zflg |= ZCIEXACT; 3075 zflg &= ~ZCILOOK; 3076 } 3077 } 3078 3079 if (cmp < 0) { 3080 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp, 3081 ZEXISTS | zflg, NULL, NULL); 3082 terr = zfs_dirent_lock(&tdl, 3083 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL); 3084 } else { 3085 terr = zfs_dirent_lock(&tdl, 3086 tdzp, tnm, &tzp, zflg, NULL, NULL); 3087 serr = zfs_dirent_lock(&sdl, 3088 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg, 3089 NULL, NULL); 3090 } 3091 3092 if (serr) { 3093 /* 3094 * Source entry invalid or not there. 3095 */ 3096 if (!terr) { 3097 zfs_dirent_unlock(tdl); 3098 if (tzp) 3099 VN_RELE(ZTOV(tzp)); 3100 } 3101 if (strcmp(snm, "..") == 0) 3102 serr = EINVAL; 3103 ZFS_EXIT(zfsvfs); 3104 return (serr); 3105 } 3106 if (terr) { 3107 zfs_dirent_unlock(sdl); 3108 VN_RELE(ZTOV(szp)); 3109 if (strcmp(tnm, "..") == 0) 3110 terr = EINVAL; 3111 ZFS_EXIT(zfsvfs); 3112 return (terr); 3113 } 3114 3115 /* 3116 * Must have write access at the source to remove the old entry 3117 * and write access at the target to create the new entry. 3118 * Note that if target and source are the same, this can be 3119 * done in a single check. 3120 */ 3121 3122 if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)) 3123 goto out; 3124 3125 if (ZTOV(szp)->v_type == VDIR) { 3126 /* 3127 * Check to make sure rename is valid. 3128 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d 3129 */ 3130 if (error = zfs_rename_lock(szp, tdzp, sdzp, &zl)) 3131 goto out; 3132 } 3133 3134 /* 3135 * Does target exist? 3136 */ 3137 if (tzp) { 3138 /* 3139 * Source and target must be the same type. 3140 */ 3141 if (ZTOV(szp)->v_type == VDIR) { 3142 if (ZTOV(tzp)->v_type != VDIR) { 3143 error = ENOTDIR; 3144 goto out; 3145 } 3146 } else { 3147 if (ZTOV(tzp)->v_type == VDIR) { 3148 error = EISDIR; 3149 goto out; 3150 } 3151 } 3152 /* 3153 * POSIX dictates that when the source and target 3154 * entries refer to the same file object, rename 3155 * must do nothing and exit without error. 3156 */ 3157 if (szp->z_id == tzp->z_id) { 3158 error = 0; 3159 goto out; 3160 } 3161 } 3162 3163 vnevent_rename_src(ZTOV(szp), sdvp, snm, ct); 3164 if (tzp) 3165 vnevent_rename_dest(ZTOV(tzp), tdvp, tnm, ct); 3166 3167 /* 3168 * notify the target directory if it is not the same 3169 * as source directory. 3170 */ 3171 if (tdvp != sdvp) { 3172 vnevent_rename_dest_dir(tdvp, ct); 3173 } 3174 3175 tx = dmu_tx_create(zfsvfs->z_os); 3176 dmu_tx_hold_bonus(tx, szp->z_id); /* nlink changes */ 3177 dmu_tx_hold_bonus(tx, sdzp->z_id); /* nlink changes */ 3178 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm); 3179 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm); 3180 if (sdzp != tdzp) 3181 dmu_tx_hold_bonus(tx, tdzp->z_id); /* nlink changes */ 3182 if (tzp) 3183 dmu_tx_hold_bonus(tx, tzp->z_id); /* parent changes */ 3184 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 3185 error = dmu_tx_assign(tx, zfsvfs->z_assign); 3186 if (error) { 3187 if (zl != NULL) 3188 zfs_rename_unlock(&zl); 3189 zfs_dirent_unlock(sdl); 3190 zfs_dirent_unlock(tdl); 3191 VN_RELE(ZTOV(szp)); 3192 if (tzp) 3193 VN_RELE(ZTOV(tzp)); 3194 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { 3195 dmu_tx_wait(tx); 3196 dmu_tx_abort(tx); 3197 goto top; 3198 } 3199 dmu_tx_abort(tx); 3200 ZFS_EXIT(zfsvfs); 3201 return (error); 3202 } 3203 3204 if (tzp) /* Attempt to remove the existing target */ 3205 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL); 3206 3207 if (error == 0) { 3208 error = zfs_link_create(tdl, szp, tx, ZRENAMING); 3209 if (error == 0) { 3210 szp->z_phys->zp_flags |= ZFS_AV_MODIFIED; 3211 3212 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL); 3213 ASSERT(error == 0); 3214 3215 zfs_log_rename(zilog, tx, 3216 TX_RENAME | (flags & FIGNORECASE ? TX_CI : 0), 3217 sdzp, sdl->dl_name, tdzp, tdl->dl_name, szp); 3218 3219 /* Update path information for the target vnode */ 3220 vn_renamepath(tdvp, ZTOV(szp), tnm, strlen(tnm)); 3221 } 3222 } 3223 3224 dmu_tx_commit(tx); 3225 out: 3226 if (zl != NULL) 3227 zfs_rename_unlock(&zl); 3228 3229 zfs_dirent_unlock(sdl); 3230 zfs_dirent_unlock(tdl); 3231 3232 VN_RELE(ZTOV(szp)); 3233 if (tzp) 3234 VN_RELE(ZTOV(tzp)); 3235 3236 ZFS_EXIT(zfsvfs); 3237 return (error); 3238 } 3239 3240 /* 3241 * Insert the indicated symbolic reference entry into the directory. 3242 * 3243 * IN: dvp - Directory to contain new symbolic link. 3244 * link - Name for new symlink entry. 3245 * vap - Attributes of new entry. 3246 * target - Target path of new symlink. 3247 * cr - credentials of caller. 3248 * ct - caller context 3249 * flags - case flags 3250 * 3251 * RETURN: 0 if success 3252 * error code if failure 3253 * 3254 * Timestamps: 3255 * dvp - ctime|mtime updated 3256 */ 3257 /*ARGSUSED*/ 3258 static int 3259 zfs_symlink(vnode_t *dvp, char *name, vattr_t *vap, char *link, cred_t *cr, 3260 caller_context_t *ct, int flags) 3261 { 3262 znode_t *zp, *dzp = VTOZ(dvp); 3263 zfs_dirlock_t *dl; 3264 dmu_tx_t *tx; 3265 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 3266 zilog_t *zilog; 3267 int len = strlen(link); 3268 int error; 3269 int zflg = ZNEW; 3270 zfs_fuid_info_t *fuidp = NULL; 3271 3272 ASSERT(vap->va_type == VLNK); 3273 3274 ZFS_ENTER(zfsvfs); 3275 ZFS_VERIFY_ZP(dzp); 3276 zilog = zfsvfs->z_log; 3277 3278 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), 3279 NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 3280 ZFS_EXIT(zfsvfs); 3281 return (EILSEQ); 3282 } 3283 if (flags & FIGNORECASE) 3284 zflg |= ZCILOOK; 3285 top: 3286 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { 3287 ZFS_EXIT(zfsvfs); 3288 return (error); 3289 } 3290 3291 if (len > MAXPATHLEN) { 3292 ZFS_EXIT(zfsvfs); 3293 return (ENAMETOOLONG); 3294 } 3295 3296 /* 3297 * Attempt to lock directory; fail if entry already exists. 3298 */ 3299 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL); 3300 if (error) { 3301 ZFS_EXIT(zfsvfs); 3302 return (error); 3303 } 3304 3305 tx = dmu_tx_create(zfsvfs->z_os); 3306 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len)); 3307 dmu_tx_hold_bonus(tx, dzp->z_id); 3308 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); 3309 if (dzp->z_phys->zp_flags & ZFS_INHERIT_ACE) 3310 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, SPA_MAXBLOCKSIZE); 3311 if (IS_EPHEMERAL(crgetuid(cr)) || IS_EPHEMERAL(crgetgid(cr))) { 3312 if (zfsvfs->z_fuid_obj == 0) { 3313 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 3314 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 3315 FUID_SIZE_ESTIMATE(zfsvfs)); 3316 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, FALSE, NULL); 3317 } else { 3318 dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); 3319 dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, 3320 FUID_SIZE_ESTIMATE(zfsvfs)); 3321 } 3322 } 3323 error = dmu_tx_assign(tx, zfsvfs->z_assign); 3324 if (error) { 3325 zfs_dirent_unlock(dl); 3326 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { 3327 dmu_tx_wait(tx); 3328 dmu_tx_abort(tx); 3329 goto top; 3330 } 3331 dmu_tx_abort(tx); 3332 ZFS_EXIT(zfsvfs); 3333 return (error); 3334 } 3335 3336 dmu_buf_will_dirty(dzp->z_dbuf, tx); 3337 3338 /* 3339 * Create a new object for the symlink. 3340 * Put the link content into bonus buffer if it will fit; 3341 * otherwise, store it just like any other file data. 3342 */ 3343 if (sizeof (znode_phys_t) + len <= dmu_bonus_max()) { 3344 zfs_mknode(dzp, vap, tx, cr, 0, &zp, len, NULL, &fuidp); 3345 if (len != 0) 3346 bcopy(link, zp->z_phys + 1, len); 3347 } else { 3348 dmu_buf_t *dbp; 3349 3350 zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, NULL, &fuidp); 3351 /* 3352 * Nothing can access the znode yet so no locking needed 3353 * for growing the znode's blocksize. 3354 */ 3355 zfs_grow_blocksize(zp, len, tx); 3356 3357 VERIFY(0 == dmu_buf_hold(zfsvfs->z_os, 3358 zp->z_id, 0, FTAG, &dbp)); 3359 dmu_buf_will_dirty(dbp, tx); 3360 3361 ASSERT3U(len, <=, dbp->db_size); 3362 bcopy(link, dbp->db_data, len); 3363 dmu_buf_rele(dbp, FTAG); 3364 } 3365 zp->z_phys->zp_size = len; 3366 3367 /* 3368 * Insert the new object into the directory. 3369 */ 3370 (void) zfs_link_create(dl, zp, tx, ZNEW); 3371 out: 3372 if (error == 0) { 3373 uint64_t txtype = TX_SYMLINK; 3374 if (flags & FIGNORECASE) 3375 txtype |= TX_CI; 3376 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link); 3377 } 3378 if (fuidp) 3379 zfs_fuid_info_free(fuidp); 3380 3381 dmu_tx_commit(tx); 3382 3383 zfs_dirent_unlock(dl); 3384 3385 VN_RELE(ZTOV(zp)); 3386 3387 ZFS_EXIT(zfsvfs); 3388 return (error); 3389 } 3390 3391 /* 3392 * Return, in the buffer contained in the provided uio structure, 3393 * the symbolic path referred to by vp. 3394 * 3395 * IN: vp - vnode of symbolic link. 3396 * uoip - structure to contain the link path. 3397 * cr - credentials of caller. 3398 * ct - caller context 3399 * 3400 * OUT: uio - structure to contain the link path. 3401 * 3402 * RETURN: 0 if success 3403 * error code if failure 3404 * 3405 * Timestamps: 3406 * vp - atime updated 3407 */ 3408 /* ARGSUSED */ 3409 static int 3410 zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct) 3411 { 3412 znode_t *zp = VTOZ(vp); 3413 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 3414 size_t bufsz; 3415 int error; 3416 3417 ZFS_ENTER(zfsvfs); 3418 ZFS_VERIFY_ZP(zp); 3419 3420 bufsz = (size_t)zp->z_phys->zp_size; 3421 if (bufsz + sizeof (znode_phys_t) <= zp->z_dbuf->db_size) { 3422 error = uiomove(zp->z_phys + 1, 3423 MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio); 3424 } else { 3425 dmu_buf_t *dbp; 3426 error = dmu_buf_hold(zfsvfs->z_os, zp->z_id, 0, FTAG, &dbp); 3427 if (error) { 3428 ZFS_EXIT(zfsvfs); 3429 return (error); 3430 } 3431 error = uiomove(dbp->db_data, 3432 MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio); 3433 dmu_buf_rele(dbp, FTAG); 3434 } 3435 3436 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 3437 ZFS_EXIT(zfsvfs); 3438 return (error); 3439 } 3440 3441 /* 3442 * Insert a new entry into directory tdvp referencing svp. 3443 * 3444 * IN: tdvp - Directory to contain new entry. 3445 * svp - vnode of new entry. 3446 * name - name of new entry. 3447 * cr - credentials of caller. 3448 * ct - caller context 3449 * 3450 * RETURN: 0 if success 3451 * error code if failure 3452 * 3453 * Timestamps: 3454 * tdvp - ctime|mtime updated 3455 * svp - ctime updated 3456 */ 3457 /* ARGSUSED */ 3458 static int 3459 zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr, 3460 caller_context_t *ct, int flags) 3461 { 3462 znode_t *dzp = VTOZ(tdvp); 3463 znode_t *tzp, *szp; 3464 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 3465 zilog_t *zilog; 3466 zfs_dirlock_t *dl; 3467 dmu_tx_t *tx; 3468 vnode_t *realvp; 3469 int error; 3470 int zf = ZNEW; 3471 uid_t owner; 3472 3473 ASSERT(tdvp->v_type == VDIR); 3474 3475 ZFS_ENTER(zfsvfs); 3476 ZFS_VERIFY_ZP(dzp); 3477 zilog = zfsvfs->z_log; 3478 3479 if (VOP_REALVP(svp, &realvp, ct) == 0) 3480 svp = realvp; 3481 3482 if (svp->v_vfsp != tdvp->v_vfsp) { 3483 ZFS_EXIT(zfsvfs); 3484 return (EXDEV); 3485 } 3486 szp = VTOZ(svp); 3487 ZFS_VERIFY_ZP(szp); 3488 3489 if (zfsvfs->z_utf8 && u8_validate(name, 3490 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 3491 ZFS_EXIT(zfsvfs); 3492 return (EILSEQ); 3493 } 3494 if (flags & FIGNORECASE) 3495 zf |= ZCILOOK; 3496 3497 top: 3498 /* 3499 * We do not support links between attributes and non-attributes 3500 * because of the potential security risk of creating links 3501 * into "normal" file space in order to circumvent restrictions 3502 * imposed in attribute space. 3503 */ 3504 if ((szp->z_phys->zp_flags & ZFS_XATTR) != 3505 (dzp->z_phys->zp_flags & ZFS_XATTR)) { 3506 ZFS_EXIT(zfsvfs); 3507 return (EINVAL); 3508 } 3509 3510 /* 3511 * POSIX dictates that we return EPERM here. 3512 * Better choices include ENOTSUP or EISDIR. 3513 */ 3514 if (svp->v_type == VDIR) { 3515 ZFS_EXIT(zfsvfs); 3516 return (EPERM); 3517 } 3518 3519 owner = zfs_fuid_map_id(zfsvfs, szp->z_phys->zp_uid, cr, ZFS_OWNER); 3520 if (owner != crgetuid(cr) && 3521 secpolicy_basic_link(cr) != 0) { 3522 ZFS_EXIT(zfsvfs); 3523 return (EPERM); 3524 } 3525 3526 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { 3527 ZFS_EXIT(zfsvfs); 3528 return (error); 3529 } 3530 3531 /* 3532 * Attempt to lock directory; fail if entry already exists. 3533 */ 3534 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL); 3535 if (error) { 3536 ZFS_EXIT(zfsvfs); 3537 return (error); 3538 } 3539 3540 tx = dmu_tx_create(zfsvfs->z_os); 3541 dmu_tx_hold_bonus(tx, szp->z_id); 3542 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); 3543 error = dmu_tx_assign(tx, zfsvfs->z_assign); 3544 if (error) { 3545 zfs_dirent_unlock(dl); 3546 if (error == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { 3547 dmu_tx_wait(tx); 3548 dmu_tx_abort(tx); 3549 goto top; 3550 } 3551 dmu_tx_abort(tx); 3552 ZFS_EXIT(zfsvfs); 3553 return (error); 3554 } 3555 3556 error = zfs_link_create(dl, szp, tx, 0); 3557 3558 if (error == 0) { 3559 uint64_t txtype = TX_LINK; 3560 if (flags & FIGNORECASE) 3561 txtype |= TX_CI; 3562 zfs_log_link(zilog, tx, txtype, dzp, szp, name); 3563 } 3564 3565 dmu_tx_commit(tx); 3566 3567 zfs_dirent_unlock(dl); 3568 3569 if (error == 0) { 3570 vnevent_link(svp, ct); 3571 } 3572 3573 ZFS_EXIT(zfsvfs); 3574 return (error); 3575 } 3576 3577 /* 3578 * zfs_null_putapage() is used when the file system has been force 3579 * unmounted. It just drops the pages. 3580 */ 3581 /* ARGSUSED */ 3582 static int 3583 zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, 3584 size_t *lenp, int flags, cred_t *cr) 3585 { 3586 pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR); 3587 return (0); 3588 } 3589 3590 /* 3591 * Push a page out to disk, klustering if possible. 3592 * 3593 * IN: vp - file to push page to. 3594 * pp - page to push. 3595 * flags - additional flags. 3596 * cr - credentials of caller. 3597 * 3598 * OUT: offp - start of range pushed. 3599 * lenp - len of range pushed. 3600 * 3601 * RETURN: 0 if success 3602 * error code if failure 3603 * 3604 * NOTE: callers must have locked the page to be pushed. On 3605 * exit, the page (and all other pages in the kluster) must be 3606 * unlocked. 3607 */ 3608 /* ARGSUSED */ 3609 static int 3610 zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, 3611 size_t *lenp, int flags, cred_t *cr) 3612 { 3613 znode_t *zp = VTOZ(vp); 3614 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 3615 zilog_t *zilog = zfsvfs->z_log; 3616 dmu_tx_t *tx; 3617 rl_t *rl; 3618 u_offset_t off, koff; 3619 size_t len, klen; 3620 uint64_t filesz; 3621 int err; 3622 3623 filesz = zp->z_phys->zp_size; 3624 off = pp->p_offset; 3625 len = PAGESIZE; 3626 /* 3627 * If our blocksize is bigger than the page size, try to kluster 3628 * muiltiple pages so that we write a full block (thus avoiding 3629 * a read-modify-write). 3630 */ 3631 if (off < filesz && zp->z_blksz > PAGESIZE) { 3632 if (!ISP2(zp->z_blksz)) { 3633 /* Only one block in the file. */ 3634 klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE); 3635 koff = 0; 3636 } else { 3637 klen = zp->z_blksz; 3638 koff = P2ALIGN(off, (u_offset_t)klen); 3639 } 3640 ASSERT(koff <= filesz); 3641 if (koff + klen > filesz) 3642 klen = P2ROUNDUP(filesz - koff, (uint64_t)PAGESIZE); 3643 pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags); 3644 } 3645 ASSERT3U(btop(len), ==, btopr(len)); 3646 top: 3647 rl = zfs_range_lock(zp, off, len, RL_WRITER); 3648 /* 3649 * Can't push pages past end-of-file. 3650 */ 3651 filesz = zp->z_phys->zp_size; 3652 if (off >= filesz) { 3653 /* ignore all pages */ 3654 err = 0; 3655 goto out; 3656 } else if (off + len > filesz) { 3657 int npages = btopr(filesz - off); 3658 page_t *trunc; 3659 3660 page_list_break(&pp, &trunc, npages); 3661 /* ignore pages past end of file */ 3662 if (trunc) 3663 pvn_write_done(trunc, flags); 3664 len = filesz - off; 3665 } 3666 3667 tx = dmu_tx_create(zfsvfs->z_os); 3668 dmu_tx_hold_write(tx, zp->z_id, off, len); 3669 dmu_tx_hold_bonus(tx, zp->z_id); 3670 err = dmu_tx_assign(tx, zfsvfs->z_assign); 3671 if (err != 0) { 3672 if (err == ERESTART && zfsvfs->z_assign == TXG_NOWAIT) { 3673 zfs_range_unlock(rl); 3674 dmu_tx_wait(tx); 3675 dmu_tx_abort(tx); 3676 err = 0; 3677 goto top; 3678 } 3679 dmu_tx_abort(tx); 3680 goto out; 3681 } 3682 3683 if (zp->z_blksz <= PAGESIZE) { 3684 caddr_t va = zfs_map_page(pp, S_READ); 3685 ASSERT3U(len, <=, PAGESIZE); 3686 dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx); 3687 zfs_unmap_page(pp, va); 3688 } else { 3689 err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx); 3690 } 3691 3692 if (err == 0) { 3693 zfs_time_stamper(zp, CONTENT_MODIFIED, tx); 3694 zfs_log_write(zilog, tx, TX_WRITE, zp, off, len, 0); 3695 dmu_tx_commit(tx); 3696 } 3697 3698 out: 3699 zfs_range_unlock(rl); 3700 pvn_write_done(pp, (err ? B_ERROR : 0) | flags); 3701 if (offp) 3702 *offp = off; 3703 if (lenp) 3704 *lenp = len; 3705 3706 return (err); 3707 } 3708 3709 /* 3710 * Copy the portion of the file indicated from pages into the file. 3711 * The pages are stored in a page list attached to the files vnode. 3712 * 3713 * IN: vp - vnode of file to push page data to. 3714 * off - position in file to put data. 3715 * len - amount of data to write. 3716 * flags - flags to control the operation. 3717 * cr - credentials of caller. 3718 * ct - caller context. 3719 * 3720 * RETURN: 0 if success 3721 * error code if failure 3722 * 3723 * Timestamps: 3724 * vp - ctime|mtime updated 3725 */ 3726 /*ARGSUSED*/ 3727 static int 3728 zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr, 3729 caller_context_t *ct) 3730 { 3731 znode_t *zp = VTOZ(vp); 3732 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 3733 page_t *pp; 3734 size_t io_len; 3735 u_offset_t io_off; 3736 uint64_t filesz; 3737 int error = 0; 3738 3739 ZFS_ENTER(zfsvfs); 3740 ZFS_VERIFY_ZP(zp); 3741 3742 if (len == 0) { 3743 /* 3744 * Search the entire vp list for pages >= off. 3745 */ 3746 error = pvn_vplist_dirty(vp, (u_offset_t)off, zfs_putapage, 3747 flags, cr); 3748 goto out; 3749 } 3750 3751 filesz = zp->z_phys->zp_size; /* get consistent copy of zp_size */ 3752 if (off > filesz) { 3753 /* past end of file */ 3754 ZFS_EXIT(zfsvfs); 3755 return (0); 3756 } 3757 3758 len = MIN(len, filesz - off); 3759 3760 for (io_off = off; io_off < off + len; io_off += io_len) { 3761 if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) { 3762 pp = page_lookup(vp, io_off, 3763 (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED); 3764 } else { 3765 pp = page_lookup_nowait(vp, io_off, 3766 (flags & B_FREE) ? SE_EXCL : SE_SHARED); 3767 } 3768 3769 if (pp != NULL && pvn_getdirty(pp, flags)) { 3770 int err; 3771 3772 /* 3773 * Found a dirty page to push 3774 */ 3775 err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr); 3776 if (err) 3777 error = err; 3778 } else { 3779 io_len = PAGESIZE; 3780 } 3781 } 3782 out: 3783 if ((flags & B_ASYNC) == 0) 3784 zil_commit(zfsvfs->z_log, UINT64_MAX, zp->z_id); 3785 ZFS_EXIT(zfsvfs); 3786 return (error); 3787 } 3788 3789 /*ARGSUSED*/ 3790 void 3791 zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct) 3792 { 3793 znode_t *zp = VTOZ(vp); 3794 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 3795 int error; 3796 3797 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER); 3798 if (zp->z_dbuf == NULL) { 3799 /* 3800 * The fs has been unmounted, or we did a 3801 * suspend/resume and this file no longer exists. 3802 */ 3803 if (vn_has_cached_data(vp)) { 3804 (void) pvn_vplist_dirty(vp, 0, zfs_null_putapage, 3805 B_INVAL, cr); 3806 } 3807 3808 mutex_enter(&zp->z_lock); 3809 vp->v_count = 0; /* count arrives as 1 */ 3810 mutex_exit(&zp->z_lock); 3811 rw_exit(&zfsvfs->z_teardown_inactive_lock); 3812 zfs_znode_free(zp); 3813 return; 3814 } 3815 3816 /* 3817 * Attempt to push any data in the page cache. If this fails 3818 * we will get kicked out later in zfs_zinactive(). 3819 */ 3820 if (vn_has_cached_data(vp)) { 3821 (void) pvn_vplist_dirty(vp, 0, zfs_putapage, B_INVAL|B_ASYNC, 3822 cr); 3823 } 3824 3825 if (zp->z_atime_dirty && zp->z_unlinked == 0) { 3826 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os); 3827 3828 dmu_tx_hold_bonus(tx, zp->z_id); 3829 error = dmu_tx_assign(tx, TXG_WAIT); 3830 if (error) { 3831 dmu_tx_abort(tx); 3832 } else { 3833 dmu_buf_will_dirty(zp->z_dbuf, tx); 3834 mutex_enter(&zp->z_lock); 3835 zp->z_atime_dirty = 0; 3836 mutex_exit(&zp->z_lock); 3837 dmu_tx_commit(tx); 3838 } 3839 } 3840 3841 zfs_zinactive(zp); 3842 rw_exit(&zfsvfs->z_teardown_inactive_lock); 3843 } 3844 3845 /* 3846 * Bounds-check the seek operation. 3847 * 3848 * IN: vp - vnode seeking within 3849 * ooff - old file offset 3850 * noffp - pointer to new file offset 3851 * ct - caller context 3852 * 3853 * RETURN: 0 if success 3854 * EINVAL if new offset invalid 3855 */ 3856 /* ARGSUSED */ 3857 static int 3858 zfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, 3859 caller_context_t *ct) 3860 { 3861 if (vp->v_type == VDIR) 3862 return (0); 3863 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0); 3864 } 3865 3866 /* 3867 * Pre-filter the generic locking function to trap attempts to place 3868 * a mandatory lock on a memory mapped file. 3869 */ 3870 static int 3871 zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset, 3872 flk_callback_t *flk_cbp, cred_t *cr, caller_context_t *ct) 3873 { 3874 znode_t *zp = VTOZ(vp); 3875 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 3876 int error; 3877 3878 ZFS_ENTER(zfsvfs); 3879 ZFS_VERIFY_ZP(zp); 3880 3881 /* 3882 * We are following the UFS semantics with respect to mapcnt 3883 * here: If we see that the file is mapped already, then we will 3884 * return an error, but we don't worry about races between this 3885 * function and zfs_map(). 3886 */ 3887 if (zp->z_mapcnt > 0 && MANDMODE((mode_t)zp->z_phys->zp_mode)) { 3888 ZFS_EXIT(zfsvfs); 3889 return (EAGAIN); 3890 } 3891 error = fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct); 3892 ZFS_EXIT(zfsvfs); 3893 return (error); 3894 } 3895 3896 /* 3897 * If we can't find a page in the cache, we will create a new page 3898 * and fill it with file data. For efficiency, we may try to fill 3899 * multiple pages at once (klustering). 3900 */ 3901 static int 3902 zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg, 3903 caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw) 3904 { 3905 znode_t *zp = VTOZ(vp); 3906 page_t *pp, *cur_pp; 3907 objset_t *os = zp->z_zfsvfs->z_os; 3908 caddr_t va; 3909 u_offset_t io_off, total; 3910 uint64_t oid = zp->z_id; 3911 size_t io_len; 3912 uint64_t filesz; 3913 int err; 3914 3915 /* 3916 * If we are only asking for a single page don't bother klustering. 3917 */ 3918 filesz = zp->z_phys->zp_size; /* get consistent copy of zp_size */ 3919 if (off >= filesz) 3920 return (EFAULT); 3921 if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) { 3922 io_off = off; 3923 io_len = PAGESIZE; 3924 pp = page_create_va(vp, io_off, io_len, PG_WAIT, seg, addr); 3925 } else { 3926 /* 3927 * Try to fill a kluster of pages (a blocks worth). 3928 */ 3929 size_t klen; 3930 u_offset_t koff; 3931 3932 if (!ISP2(zp->z_blksz)) { 3933 /* Only one block in the file. */ 3934 klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE); 3935 koff = 0; 3936 } else { 3937 /* 3938 * It would be ideal to align our offset to the 3939 * blocksize but doing so has resulted in some 3940 * strange application crashes. For now, we 3941 * leave the offset as is and only adjust the 3942 * length if we are off the end of the file. 3943 */ 3944 koff = off; 3945 klen = plsz; 3946 } 3947 ASSERT(koff <= filesz); 3948 if (koff + klen > filesz) 3949 klen = P2ROUNDUP(filesz, (uint64_t)PAGESIZE) - koff; 3950 ASSERT3U(off, >=, koff); 3951 ASSERT3U(off, <, koff + klen); 3952 pp = pvn_read_kluster(vp, off, seg, addr, &io_off, 3953 &io_len, koff, klen, 0); 3954 } 3955 if (pp == NULL) { 3956 /* 3957 * Some other thread entered the page before us. 3958 * Return to zfs_getpage to retry the lookup. 3959 */ 3960 *pl = NULL; 3961 return (0); 3962 } 3963 3964 /* 3965 * Fill the pages in the kluster. 3966 */ 3967 cur_pp = pp; 3968 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) { 3969 ASSERT3U(io_off, ==, cur_pp->p_offset); 3970 va = zfs_map_page(cur_pp, S_WRITE); 3971 err = dmu_read(os, oid, io_off, PAGESIZE, va); 3972 zfs_unmap_page(cur_pp, va); 3973 if (err) { 3974 /* On error, toss the entire kluster */ 3975 pvn_read_done(pp, B_ERROR); 3976 /* convert checksum errors into IO errors */ 3977 if (err == ECKSUM) 3978 err = EIO; 3979 return (err); 3980 } 3981 cur_pp = cur_pp->p_next; 3982 } 3983 out: 3984 /* 3985 * Fill in the page list array from the kluster. If 3986 * there are too many pages in the kluster, return 3987 * as many pages as possible starting from the desired 3988 * offset `off'. 3989 * NOTE: the page list will always be null terminated. 3990 */ 3991 pvn_plist_init(pp, pl, plsz, off, io_len, rw); 3992 3993 return (0); 3994 } 3995 3996 /* 3997 * Return pointers to the pages for the file region [off, off + len] 3998 * in the pl array. If plsz is greater than len, this function may 3999 * also return page pointers from before or after the specified 4000 * region (i.e. some region [off', off' + plsz]). These additional 4001 * pages are only returned if they are already in the cache, or were 4002 * created as part of a klustered read. 4003 * 4004 * IN: vp - vnode of file to get data from. 4005 * off - position in file to get data from. 4006 * len - amount of data to retrieve. 4007 * plsz - length of provided page list. 4008 * seg - segment to obtain pages for. 4009 * addr - virtual address of fault. 4010 * rw - mode of created pages. 4011 * cr - credentials of caller. 4012 * ct - caller context. 4013 * 4014 * OUT: protp - protection mode of created pages. 4015 * pl - list of pages created. 4016 * 4017 * RETURN: 0 if success 4018 * error code if failure 4019 * 4020 * Timestamps: 4021 * vp - atime updated 4022 */ 4023 /* ARGSUSED */ 4024 static int 4025 zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp, 4026 page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr, 4027 enum seg_rw rw, cred_t *cr, caller_context_t *ct) 4028 { 4029 znode_t *zp = VTOZ(vp); 4030 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4031 page_t *pp, **pl0 = pl; 4032 int need_unlock = 0, err = 0; 4033 offset_t orig_off; 4034 4035 ZFS_ENTER(zfsvfs); 4036 ZFS_VERIFY_ZP(zp); 4037 4038 if (protp) 4039 *protp = PROT_ALL; 4040 4041 /* no faultahead (for now) */ 4042 if (pl == NULL) { 4043 ZFS_EXIT(zfsvfs); 4044 return (0); 4045 } 4046 4047 /* can't fault past EOF */ 4048 if (off >= zp->z_phys->zp_size) { 4049 ZFS_EXIT(zfsvfs); 4050 return (EFAULT); 4051 } 4052 orig_off = off; 4053 4054 /* 4055 * If we already own the lock, then we must be page faulting 4056 * in the middle of a write to this file (i.e., we are writing 4057 * to this file using data from a mapped region of the file). 4058 */ 4059 if (rw_owner(&zp->z_map_lock) != curthread) { 4060 rw_enter(&zp->z_map_lock, RW_WRITER); 4061 need_unlock = TRUE; 4062 } 4063 4064 /* 4065 * Loop through the requested range [off, off + len] looking 4066 * for pages. If we don't find a page, we will need to create 4067 * a new page and fill it with data from the file. 4068 */ 4069 while (len > 0) { 4070 if (plsz < PAGESIZE) 4071 break; 4072 if (pp = page_lookup(vp, off, SE_SHARED)) { 4073 *pl++ = pp; 4074 off += PAGESIZE; 4075 addr += PAGESIZE; 4076 len -= PAGESIZE; 4077 plsz -= PAGESIZE; 4078 } else { 4079 err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw); 4080 if (err) 4081 goto out; 4082 /* 4083 * klustering may have changed our region 4084 * to be block aligned. 4085 */ 4086 if (((pp = *pl) != 0) && (off != pp->p_offset)) { 4087 int delta = off - pp->p_offset; 4088 len += delta; 4089 off -= delta; 4090 addr -= delta; 4091 } 4092 while (*pl) { 4093 pl++; 4094 off += PAGESIZE; 4095 addr += PAGESIZE; 4096 plsz -= PAGESIZE; 4097 if (len > PAGESIZE) 4098 len -= PAGESIZE; 4099 else 4100 len = 0; 4101 } 4102 } 4103 } 4104 4105 /* 4106 * Fill out the page array with any pages already in the cache. 4107 */ 4108 while (plsz > 0) { 4109 pp = page_lookup_nowait(vp, off, SE_SHARED); 4110 if (pp == NULL) 4111 break; 4112 *pl++ = pp; 4113 off += PAGESIZE; 4114 plsz -= PAGESIZE; 4115 } 4116 4117 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 4118 out: 4119 /* 4120 * We can't grab the range lock for the page as reader which would 4121 * stop truncation as this leads to deadlock. So we need to recheck 4122 * the file size. 4123 */ 4124 if (orig_off >= zp->z_phys->zp_size) 4125 err = EFAULT; 4126 if (err) { 4127 /* 4128 * Release any pages we have previously locked. 4129 */ 4130 while (pl > pl0) 4131 page_unlock(*--pl); 4132 } 4133 4134 *pl = NULL; 4135 4136 if (need_unlock) 4137 rw_exit(&zp->z_map_lock); 4138 4139 ZFS_EXIT(zfsvfs); 4140 return (err); 4141 } 4142 4143 /* 4144 * Request a memory map for a section of a file. This code interacts 4145 * with common code and the VM system as follows: 4146 * 4147 * common code calls mmap(), which ends up in smmap_common() 4148 * 4149 * this calls VOP_MAP(), which takes you into (say) zfs 4150 * 4151 * zfs_map() calls as_map(), passing segvn_create() as the callback 4152 * 4153 * segvn_create() creates the new segment and calls VOP_ADDMAP() 4154 * 4155 * zfs_addmap() updates z_mapcnt 4156 */ 4157 /*ARGSUSED*/ 4158 static int 4159 zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp, 4160 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr, 4161 caller_context_t *ct) 4162 { 4163 znode_t *zp = VTOZ(vp); 4164 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4165 segvn_crargs_t vn_a; 4166 int error; 4167 4168 ZFS_ENTER(zfsvfs); 4169 ZFS_VERIFY_ZP(zp); 4170 4171 if ((prot & PROT_WRITE) && 4172 (zp->z_phys->zp_flags & (ZFS_IMMUTABLE | ZFS_READONLY | 4173 ZFS_APPENDONLY))) { 4174 ZFS_EXIT(zfsvfs); 4175 return (EPERM); 4176 } 4177 4178 if ((prot & (PROT_READ | PROT_EXEC)) && 4179 (zp->z_phys->zp_flags & ZFS_AV_QUARANTINED)) { 4180 ZFS_EXIT(zfsvfs); 4181 return (EACCES); 4182 } 4183 4184 if (vp->v_flag & VNOMAP) { 4185 ZFS_EXIT(zfsvfs); 4186 return (ENOSYS); 4187 } 4188 4189 if (off < 0 || len > MAXOFFSET_T - off) { 4190 ZFS_EXIT(zfsvfs); 4191 return (ENXIO); 4192 } 4193 4194 if (vp->v_type != VREG) { 4195 ZFS_EXIT(zfsvfs); 4196 return (ENODEV); 4197 } 4198 4199 /* 4200 * If file is locked, disallow mapping. 4201 */ 4202 if (MANDMODE((mode_t)zp->z_phys->zp_mode) && vn_has_flocks(vp)) { 4203 ZFS_EXIT(zfsvfs); 4204 return (EAGAIN); 4205 } 4206 4207 as_rangelock(as); 4208 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags); 4209 if (error != 0) { 4210 as_rangeunlock(as); 4211 ZFS_EXIT(zfsvfs); 4212 return (error); 4213 } 4214 4215 vn_a.vp = vp; 4216 vn_a.offset = (u_offset_t)off; 4217 vn_a.type = flags & MAP_TYPE; 4218 vn_a.prot = prot; 4219 vn_a.maxprot = maxprot; 4220 vn_a.cred = cr; 4221 vn_a.amp = NULL; 4222 vn_a.flags = flags & ~MAP_TYPE; 4223 vn_a.szc = 0; 4224 vn_a.lgrp_mem_policy_flags = 0; 4225 4226 error = as_map(as, *addrp, len, segvn_create, &vn_a); 4227 4228 as_rangeunlock(as); 4229 ZFS_EXIT(zfsvfs); 4230 return (error); 4231 } 4232 4233 /* ARGSUSED */ 4234 static int 4235 zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr, 4236 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr, 4237 caller_context_t *ct) 4238 { 4239 uint64_t pages = btopr(len); 4240 4241 atomic_add_64(&VTOZ(vp)->z_mapcnt, pages); 4242 return (0); 4243 } 4244 4245 /* 4246 * The reason we push dirty pages as part of zfs_delmap() is so that we get a 4247 * more accurate mtime for the associated file. Since we don't have a way of 4248 * detecting when the data was actually modified, we have to resort to 4249 * heuristics. If an explicit msync() is done, then we mark the mtime when the 4250 * last page is pushed. The problem occurs when the msync() call is omitted, 4251 * which by far the most common case: 4252 * 4253 * open() 4254 * mmap() 4255 * <modify memory> 4256 * munmap() 4257 * close() 4258 * <time lapse> 4259 * putpage() via fsflush 4260 * 4261 * If we wait until fsflush to come along, we can have a modification time that 4262 * is some arbitrary point in the future. In order to prevent this in the 4263 * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is 4264 * torn down. 4265 */ 4266 /* ARGSUSED */ 4267 static int 4268 zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr, 4269 size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr, 4270 caller_context_t *ct) 4271 { 4272 uint64_t pages = btopr(len); 4273 4274 ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages); 4275 atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages); 4276 4277 if ((flags & MAP_SHARED) && (prot & PROT_WRITE) && 4278 vn_has_cached_data(vp)) 4279 (void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr, ct); 4280 4281 return (0); 4282 } 4283 4284 /* 4285 * Free or allocate space in a file. Currently, this function only 4286 * supports the `F_FREESP' command. However, this command is somewhat 4287 * misnamed, as its functionality includes the ability to allocate as 4288 * well as free space. 4289 * 4290 * IN: vp - vnode of file to free data in. 4291 * cmd - action to take (only F_FREESP supported). 4292 * bfp - section of file to free/alloc. 4293 * flag - current file open mode flags. 4294 * offset - current file offset. 4295 * cr - credentials of caller [UNUSED]. 4296 * ct - caller context. 4297 * 4298 * RETURN: 0 if success 4299 * error code if failure 4300 * 4301 * Timestamps: 4302 * vp - ctime|mtime updated 4303 */ 4304 /* ARGSUSED */ 4305 static int 4306 zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag, 4307 offset_t offset, cred_t *cr, caller_context_t *ct) 4308 { 4309 znode_t *zp = VTOZ(vp); 4310 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4311 uint64_t off, len; 4312 int error; 4313 4314 ZFS_ENTER(zfsvfs); 4315 ZFS_VERIFY_ZP(zp); 4316 4317 if (cmd != F_FREESP) { 4318 ZFS_EXIT(zfsvfs); 4319 return (EINVAL); 4320 } 4321 4322 if (error = convoff(vp, bfp, 0, offset)) { 4323 ZFS_EXIT(zfsvfs); 4324 return (error); 4325 } 4326 4327 if (bfp->l_len < 0) { 4328 ZFS_EXIT(zfsvfs); 4329 return (EINVAL); 4330 } 4331 4332 off = bfp->l_start; 4333 len = bfp->l_len; /* 0 means from off to end of file */ 4334 4335 error = zfs_freesp(zp, off, len, flag, TRUE); 4336 4337 ZFS_EXIT(zfsvfs); 4338 return (error); 4339 } 4340 4341 /*ARGSUSED*/ 4342 static int 4343 zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct) 4344 { 4345 znode_t *zp = VTOZ(vp); 4346 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4347 uint32_t gen; 4348 uint64_t object = zp->z_id; 4349 zfid_short_t *zfid; 4350 int size, i; 4351 4352 ZFS_ENTER(zfsvfs); 4353 ZFS_VERIFY_ZP(zp); 4354 gen = (uint32_t)zp->z_gen; 4355 4356 size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN; 4357 if (fidp->fid_len < size) { 4358 fidp->fid_len = size; 4359 ZFS_EXIT(zfsvfs); 4360 return (ENOSPC); 4361 } 4362 4363 zfid = (zfid_short_t *)fidp; 4364 4365 zfid->zf_len = size; 4366 4367 for (i = 0; i < sizeof (zfid->zf_object); i++) 4368 zfid->zf_object[i] = (uint8_t)(object >> (8 * i)); 4369 4370 /* Must have a non-zero generation number to distinguish from .zfs */ 4371 if (gen == 0) 4372 gen = 1; 4373 for (i = 0; i < sizeof (zfid->zf_gen); i++) 4374 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i)); 4375 4376 if (size == LONG_FID_LEN) { 4377 uint64_t objsetid = dmu_objset_id(zfsvfs->z_os); 4378 zfid_long_t *zlfid; 4379 4380 zlfid = (zfid_long_t *)fidp; 4381 4382 for (i = 0; i < sizeof (zlfid->zf_setid); i++) 4383 zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i)); 4384 4385 /* XXX - this should be the generation number for the objset */ 4386 for (i = 0; i < sizeof (zlfid->zf_setgen); i++) 4387 zlfid->zf_setgen[i] = 0; 4388 } 4389 4390 ZFS_EXIT(zfsvfs); 4391 return (0); 4392 } 4393 4394 static int 4395 zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr, 4396 caller_context_t *ct) 4397 { 4398 znode_t *zp, *xzp; 4399 zfsvfs_t *zfsvfs; 4400 zfs_dirlock_t *dl; 4401 int error; 4402 4403 switch (cmd) { 4404 case _PC_LINK_MAX: 4405 *valp = ULONG_MAX; 4406 return (0); 4407 4408 case _PC_FILESIZEBITS: 4409 *valp = 64; 4410 return (0); 4411 4412 case _PC_XATTR_EXISTS: 4413 zp = VTOZ(vp); 4414 zfsvfs = zp->z_zfsvfs; 4415 ZFS_ENTER(zfsvfs); 4416 ZFS_VERIFY_ZP(zp); 4417 *valp = 0; 4418 error = zfs_dirent_lock(&dl, zp, "", &xzp, 4419 ZXATTR | ZEXISTS | ZSHARED, NULL, NULL); 4420 if (error == 0) { 4421 zfs_dirent_unlock(dl); 4422 if (!zfs_dirempty(xzp)) 4423 *valp = 1; 4424 VN_RELE(ZTOV(xzp)); 4425 } else if (error == ENOENT) { 4426 /* 4427 * If there aren't extended attributes, it's the 4428 * same as having zero of them. 4429 */ 4430 error = 0; 4431 } 4432 ZFS_EXIT(zfsvfs); 4433 return (error); 4434 4435 case _PC_SATTR_ENABLED: 4436 case _PC_SATTR_EXISTS: 4437 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) && 4438 (vp->v_type == VREG || vp->v_type == VDIR); 4439 return (0); 4440 4441 case _PC_ACL_ENABLED: 4442 *valp = _ACL_ACE_ENABLED; 4443 return (0); 4444 4445 case _PC_MIN_HOLE_SIZE: 4446 *valp = (ulong_t)SPA_MINBLOCKSIZE; 4447 return (0); 4448 4449 default: 4450 return (fs_pathconf(vp, cmd, valp, cr, ct)); 4451 } 4452 } 4453 4454 /*ARGSUSED*/ 4455 static int 4456 zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr, 4457 caller_context_t *ct) 4458 { 4459 znode_t *zp = VTOZ(vp); 4460 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4461 int error; 4462 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 4463 4464 ZFS_ENTER(zfsvfs); 4465 ZFS_VERIFY_ZP(zp); 4466 error = zfs_getacl(zp, vsecp, skipaclchk, cr); 4467 ZFS_EXIT(zfsvfs); 4468 4469 return (error); 4470 } 4471 4472 /*ARGSUSED*/ 4473 static int 4474 zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr, 4475 caller_context_t *ct) 4476 { 4477 znode_t *zp = VTOZ(vp); 4478 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4479 int error; 4480 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 4481 4482 ZFS_ENTER(zfsvfs); 4483 ZFS_VERIFY_ZP(zp); 4484 error = zfs_setacl(zp, vsecp, skipaclchk, cr); 4485 ZFS_EXIT(zfsvfs); 4486 return (error); 4487 } 4488 4489 /* 4490 * Predeclare these here so that the compiler assumes that 4491 * this is an "old style" function declaration that does 4492 * not include arguments => we won't get type mismatch errors 4493 * in the initializations that follow. 4494 */ 4495 static int zfs_inval(); 4496 static int zfs_isdir(); 4497 4498 static int 4499 zfs_inval() 4500 { 4501 return (EINVAL); 4502 } 4503 4504 static int 4505 zfs_isdir() 4506 { 4507 return (EISDIR); 4508 } 4509 /* 4510 * Directory vnode operations template 4511 */ 4512 vnodeops_t *zfs_dvnodeops; 4513 const fs_operation_def_t zfs_dvnodeops_template[] = { 4514 VOPNAME_OPEN, { .vop_open = zfs_open }, 4515 VOPNAME_CLOSE, { .vop_close = zfs_close }, 4516 VOPNAME_READ, { .error = zfs_isdir }, 4517 VOPNAME_WRITE, { .error = zfs_isdir }, 4518 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl }, 4519 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 4520 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 4521 VOPNAME_ACCESS, { .vop_access = zfs_access }, 4522 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup }, 4523 VOPNAME_CREATE, { .vop_create = zfs_create }, 4524 VOPNAME_REMOVE, { .vop_remove = zfs_remove }, 4525 VOPNAME_LINK, { .vop_link = zfs_link }, 4526 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 4527 VOPNAME_MKDIR, { .vop_mkdir = zfs_mkdir }, 4528 VOPNAME_RMDIR, { .vop_rmdir = zfs_rmdir }, 4529 VOPNAME_READDIR, { .vop_readdir = zfs_readdir }, 4530 VOPNAME_SYMLINK, { .vop_symlink = zfs_symlink }, 4531 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync }, 4532 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 4533 VOPNAME_FID, { .vop_fid = zfs_fid }, 4534 VOPNAME_SEEK, { .vop_seek = zfs_seek }, 4535 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 4536 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr }, 4537 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr }, 4538 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 4539 NULL, NULL 4540 }; 4541 4542 /* 4543 * Regular file vnode operations template 4544 */ 4545 vnodeops_t *zfs_fvnodeops; 4546 const fs_operation_def_t zfs_fvnodeops_template[] = { 4547 VOPNAME_OPEN, { .vop_open = zfs_open }, 4548 VOPNAME_CLOSE, { .vop_close = zfs_close }, 4549 VOPNAME_READ, { .vop_read = zfs_read }, 4550 VOPNAME_WRITE, { .vop_write = zfs_write }, 4551 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl }, 4552 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 4553 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 4554 VOPNAME_ACCESS, { .vop_access = zfs_access }, 4555 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup }, 4556 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 4557 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync }, 4558 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 4559 VOPNAME_FID, { .vop_fid = zfs_fid }, 4560 VOPNAME_SEEK, { .vop_seek = zfs_seek }, 4561 VOPNAME_FRLOCK, { .vop_frlock = zfs_frlock }, 4562 VOPNAME_SPACE, { .vop_space = zfs_space }, 4563 VOPNAME_GETPAGE, { .vop_getpage = zfs_getpage }, 4564 VOPNAME_PUTPAGE, { .vop_putpage = zfs_putpage }, 4565 VOPNAME_MAP, { .vop_map = zfs_map }, 4566 VOPNAME_ADDMAP, { .vop_addmap = zfs_addmap }, 4567 VOPNAME_DELMAP, { .vop_delmap = zfs_delmap }, 4568 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 4569 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr }, 4570 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr }, 4571 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 4572 NULL, NULL 4573 }; 4574 4575 /* 4576 * Symbolic link vnode operations template 4577 */ 4578 vnodeops_t *zfs_symvnodeops; 4579 const fs_operation_def_t zfs_symvnodeops_template[] = { 4580 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 4581 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 4582 VOPNAME_ACCESS, { .vop_access = zfs_access }, 4583 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 4584 VOPNAME_READLINK, { .vop_readlink = zfs_readlink }, 4585 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 4586 VOPNAME_FID, { .vop_fid = zfs_fid }, 4587 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 4588 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 4589 NULL, NULL 4590 }; 4591 4592 /* 4593 * Extended attribute directory vnode operations template 4594 * This template is identical to the directory vnodes 4595 * operation template except for restricted operations: 4596 * VOP_MKDIR() 4597 * VOP_SYMLINK() 4598 * Note that there are other restrictions embedded in: 4599 * zfs_create() - restrict type to VREG 4600 * zfs_link() - no links into/out of attribute space 4601 * zfs_rename() - no moves into/out of attribute space 4602 */ 4603 vnodeops_t *zfs_xdvnodeops; 4604 const fs_operation_def_t zfs_xdvnodeops_template[] = { 4605 VOPNAME_OPEN, { .vop_open = zfs_open }, 4606 VOPNAME_CLOSE, { .vop_close = zfs_close }, 4607 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl }, 4608 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 4609 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 4610 VOPNAME_ACCESS, { .vop_access = zfs_access }, 4611 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup }, 4612 VOPNAME_CREATE, { .vop_create = zfs_create }, 4613 VOPNAME_REMOVE, { .vop_remove = zfs_remove }, 4614 VOPNAME_LINK, { .vop_link = zfs_link }, 4615 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 4616 VOPNAME_MKDIR, { .error = zfs_inval }, 4617 VOPNAME_RMDIR, { .vop_rmdir = zfs_rmdir }, 4618 VOPNAME_READDIR, { .vop_readdir = zfs_readdir }, 4619 VOPNAME_SYMLINK, { .error = zfs_inval }, 4620 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync }, 4621 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 4622 VOPNAME_FID, { .vop_fid = zfs_fid }, 4623 VOPNAME_SEEK, { .vop_seek = zfs_seek }, 4624 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 4625 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr }, 4626 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr }, 4627 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 4628 NULL, NULL 4629 }; 4630 4631 /* 4632 * Error vnode operations template 4633 */ 4634 vnodeops_t *zfs_evnodeops; 4635 const fs_operation_def_t zfs_evnodeops_template[] = { 4636 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 4637 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 4638 NULL, NULL 4639 }; 4640