1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* Portions Copyright 2007 Jeremy Teo */ 27 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/time.h> 31 #include <sys/systm.h> 32 #include <sys/sysmacros.h> 33 #include <sys/resource.h> 34 #include <sys/vfs.h> 35 #include <sys/vfs_opreg.h> 36 #include <sys/vnode.h> 37 #include <sys/file.h> 38 #include <sys/stat.h> 39 #include <sys/kmem.h> 40 #include <sys/taskq.h> 41 #include <sys/uio.h> 42 #include <sys/vmsystm.h> 43 #include <sys/atomic.h> 44 #include <sys/vm.h> 45 #include <vm/seg_vn.h> 46 #include <vm/pvn.h> 47 #include <vm/as.h> 48 #include <vm/kpm.h> 49 #include <vm/seg_kpm.h> 50 #include <sys/mman.h> 51 #include <sys/pathname.h> 52 #include <sys/cmn_err.h> 53 #include <sys/errno.h> 54 #include <sys/unistd.h> 55 #include <sys/zfs_dir.h> 56 #include <sys/zfs_acl.h> 57 #include <sys/zfs_ioctl.h> 58 #include <sys/fs/zfs.h> 59 #include <sys/dmu.h> 60 #include <sys/spa.h> 61 #include <sys/txg.h> 62 #include <sys/dbuf.h> 63 #include <sys/zap.h> 64 #include <sys/dirent.h> 65 #include <sys/policy.h> 66 #include <sys/sunddi.h> 67 #include <sys/filio.h> 68 #include <sys/sid.h> 69 #include "fs/fs_subr.h" 70 #include <sys/zfs_ctldir.h> 71 #include <sys/zfs_fuid.h> 72 #include <sys/dnlc.h> 73 #include <sys/zfs_rlock.h> 74 #include <sys/extdirent.h> 75 #include <sys/kidmap.h> 76 #include <sys/cred_impl.h> 77 #include <sys/attr.h> 78 79 /* 80 * Programming rules. 81 * 82 * Each vnode op performs some logical unit of work. To do this, the ZPL must 83 * properly lock its in-core state, create a DMU transaction, do the work, 84 * record this work in the intent log (ZIL), commit the DMU transaction, 85 * and wait for the intent log to commit if it is a synchronous operation. 86 * Moreover, the vnode ops must work in both normal and log replay context. 87 * The ordering of events is important to avoid deadlocks and references 88 * to freed memory. The example below illustrates the following Big Rules: 89 * 90 * (1) A check must be made in each zfs thread for a mounted file system. 91 * This is done avoiding races using ZFS_ENTER(zfsvfs). 92 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes 93 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros 94 * can return EIO from the calling function. 95 * 96 * (2) VN_RELE() should always be the last thing except for zil_commit() 97 * (if necessary) and ZFS_EXIT(). This is for 3 reasons: 98 * First, if it's the last reference, the vnode/znode 99 * can be freed, so the zp may point to freed memory. Second, the last 100 * reference will call zfs_zinactive(), which may induce a lot of work -- 101 * pushing cached pages (which acquires range locks) and syncing out 102 * cached atime changes. Third, zfs_zinactive() may require a new tx, 103 * which could deadlock the system if you were already holding one. 104 * If you must call VN_RELE() within a tx then use VN_RELE_ASYNC(). 105 * 106 * (3) All range locks must be grabbed before calling dmu_tx_assign(), 107 * as they can span dmu_tx_assign() calls. 108 * 109 * (4) Always pass TXG_NOWAIT as the second argument to dmu_tx_assign(). 110 * This is critical because we don't want to block while holding locks. 111 * Note, in particular, that if a lock is sometimes acquired before 112 * the tx assigns, and sometimes after (e.g. z_lock), then failing to 113 * use a non-blocking assign can deadlock the system. The scenario: 114 * 115 * Thread A has grabbed a lock before calling dmu_tx_assign(). 116 * Thread B is in an already-assigned tx, and blocks for this lock. 117 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open() 118 * forever, because the previous txg can't quiesce until B's tx commits. 119 * 120 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT, 121 * then drop all locks, call dmu_tx_wait(), and try again. 122 * 123 * (5) If the operation succeeded, generate the intent log entry for it 124 * before dropping locks. This ensures that the ordering of events 125 * in the intent log matches the order in which they actually occurred. 126 * During ZIL replay the zfs_log_* functions will update the sequence 127 * number to indicate the zil transaction has replayed. 128 * 129 * (6) At the end of each vnode op, the DMU tx must always commit, 130 * regardless of whether there were any errors. 131 * 132 * (7) After dropping all locks, invoke zil_commit(zilog, seq, foid) 133 * to ensure that synchronous semantics are provided when necessary. 134 * 135 * In general, this is how things should be ordered in each vnode op: 136 * 137 * ZFS_ENTER(zfsvfs); // exit if unmounted 138 * top: 139 * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD()) 140 * rw_enter(...); // grab any other locks you need 141 * tx = dmu_tx_create(...); // get DMU tx 142 * dmu_tx_hold_*(); // hold each object you might modify 143 * error = dmu_tx_assign(tx, TXG_NOWAIT); // try to assign 144 * if (error) { 145 * rw_exit(...); // drop locks 146 * zfs_dirent_unlock(dl); // unlock directory entry 147 * VN_RELE(...); // release held vnodes 148 * if (error == ERESTART) { 149 * dmu_tx_wait(tx); 150 * dmu_tx_abort(tx); 151 * goto top; 152 * } 153 * dmu_tx_abort(tx); // abort DMU tx 154 * ZFS_EXIT(zfsvfs); // finished in zfs 155 * return (error); // really out of space 156 * } 157 * error = do_real_work(); // do whatever this VOP does 158 * if (error == 0) 159 * zfs_log_*(...); // on success, make ZIL entry 160 * dmu_tx_commit(tx); // commit DMU tx -- error or not 161 * rw_exit(...); // drop locks 162 * zfs_dirent_unlock(dl); // unlock directory entry 163 * VN_RELE(...); // release held vnodes 164 * zil_commit(zilog, seq, foid); // synchronous when necessary 165 * ZFS_EXIT(zfsvfs); // finished in zfs 166 * return (error); // done, report error 167 */ 168 169 /* ARGSUSED */ 170 static int 171 zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct) 172 { 173 znode_t *zp = VTOZ(*vpp); 174 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 175 176 ZFS_ENTER(zfsvfs); 177 ZFS_VERIFY_ZP(zp); 178 179 if ((flag & FWRITE) && (zp->z_phys->zp_flags & ZFS_APPENDONLY) && 180 ((flag & FAPPEND) == 0)) { 181 ZFS_EXIT(zfsvfs); 182 return (EPERM); 183 } 184 185 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan && 186 ZTOV(zp)->v_type == VREG && 187 !(zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) && 188 zp->z_phys->zp_size > 0) { 189 if (fs_vscan(*vpp, cr, 0) != 0) { 190 ZFS_EXIT(zfsvfs); 191 return (EACCES); 192 } 193 } 194 195 /* Keep a count of the synchronous opens in the znode */ 196 if (flag & (FSYNC | FDSYNC)) 197 atomic_inc_32(&zp->z_sync_cnt); 198 199 ZFS_EXIT(zfsvfs); 200 return (0); 201 } 202 203 /* ARGSUSED */ 204 static int 205 zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr, 206 caller_context_t *ct) 207 { 208 znode_t *zp = VTOZ(vp); 209 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 210 211 ZFS_ENTER(zfsvfs); 212 ZFS_VERIFY_ZP(zp); 213 214 /* Decrement the synchronous opens in the znode */ 215 if ((flag & (FSYNC | FDSYNC)) && (count == 1)) 216 atomic_dec_32(&zp->z_sync_cnt); 217 218 /* 219 * Clean up any locks held by this process on the vp. 220 */ 221 cleanlocks(vp, ddi_get_pid(), 0); 222 cleanshares(vp, ddi_get_pid()); 223 224 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan && 225 ZTOV(zp)->v_type == VREG && 226 !(zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) && 227 zp->z_phys->zp_size > 0) 228 VERIFY(fs_vscan(vp, cr, 1) == 0); 229 230 ZFS_EXIT(zfsvfs); 231 return (0); 232 } 233 234 /* 235 * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and 236 * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter. 237 */ 238 static int 239 zfs_holey(vnode_t *vp, int cmd, offset_t *off) 240 { 241 znode_t *zp = VTOZ(vp); 242 uint64_t noff = (uint64_t)*off; /* new offset */ 243 uint64_t file_sz; 244 int error; 245 boolean_t hole; 246 247 file_sz = zp->z_phys->zp_size; 248 if (noff >= file_sz) { 249 return (ENXIO); 250 } 251 252 if (cmd == _FIO_SEEK_HOLE) 253 hole = B_TRUE; 254 else 255 hole = B_FALSE; 256 257 error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff); 258 259 /* end of file? */ 260 if ((error == ESRCH) || (noff > file_sz)) { 261 /* 262 * Handle the virtual hole at the end of file. 263 */ 264 if (hole) { 265 *off = file_sz; 266 return (0); 267 } 268 return (ENXIO); 269 } 270 271 if (noff < *off) 272 return (error); 273 *off = noff; 274 return (error); 275 } 276 277 /* ARGSUSED */ 278 static int 279 zfs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred, 280 int *rvalp, caller_context_t *ct) 281 { 282 offset_t off; 283 int error; 284 zfsvfs_t *zfsvfs; 285 znode_t *zp; 286 287 switch (com) { 288 case _FIOFFS: 289 return (zfs_sync(vp->v_vfsp, 0, cred)); 290 291 /* 292 * The following two ioctls are used by bfu. Faking out, 293 * necessary to avoid bfu errors. 294 */ 295 case _FIOGDIO: 296 case _FIOSDIO: 297 return (0); 298 299 case _FIO_SEEK_DATA: 300 case _FIO_SEEK_HOLE: 301 if (ddi_copyin((void *)data, &off, sizeof (off), flag)) 302 return (EFAULT); 303 304 zp = VTOZ(vp); 305 zfsvfs = zp->z_zfsvfs; 306 ZFS_ENTER(zfsvfs); 307 ZFS_VERIFY_ZP(zp); 308 309 /* offset parameter is in/out */ 310 error = zfs_holey(vp, com, &off); 311 ZFS_EXIT(zfsvfs); 312 if (error) 313 return (error); 314 if (ddi_copyout(&off, (void *)data, sizeof (off), flag)) 315 return (EFAULT); 316 return (0); 317 } 318 return (ENOTTY); 319 } 320 321 /* 322 * Utility functions to map and unmap a single physical page. These 323 * are used to manage the mappable copies of ZFS file data, and therefore 324 * do not update ref/mod bits. 325 */ 326 caddr_t 327 zfs_map_page(page_t *pp, enum seg_rw rw) 328 { 329 if (kpm_enable) 330 return (hat_kpm_mapin(pp, 0)); 331 ASSERT(rw == S_READ || rw == S_WRITE); 332 return (ppmapin(pp, PROT_READ | ((rw == S_WRITE) ? PROT_WRITE : 0), 333 (caddr_t)-1)); 334 } 335 336 void 337 zfs_unmap_page(page_t *pp, caddr_t addr) 338 { 339 if (kpm_enable) { 340 hat_kpm_mapout(pp, 0, addr); 341 } else { 342 ppmapout(addr); 343 } 344 } 345 346 /* 347 * When a file is memory mapped, we must keep the IO data synchronized 348 * between the DMU cache and the memory mapped pages. What this means: 349 * 350 * On Write: If we find a memory mapped page, we write to *both* 351 * the page and the dmu buffer. 352 */ 353 static void 354 update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid) 355 { 356 int64_t off; 357 358 off = start & PAGEOFFSET; 359 for (start &= PAGEMASK; len > 0; start += PAGESIZE) { 360 page_t *pp; 361 uint64_t nbytes = MIN(PAGESIZE - off, len); 362 363 if (pp = page_lookup(vp, start, SE_SHARED)) { 364 caddr_t va; 365 366 va = zfs_map_page(pp, S_WRITE); 367 (void) dmu_read(os, oid, start+off, nbytes, va+off, 368 DMU_READ_PREFETCH); 369 zfs_unmap_page(pp, va); 370 page_unlock(pp); 371 } 372 len -= nbytes; 373 off = 0; 374 } 375 } 376 377 /* 378 * When a file is memory mapped, we must keep the IO data synchronized 379 * between the DMU cache and the memory mapped pages. What this means: 380 * 381 * On Read: We "read" preferentially from memory mapped pages, 382 * else we default from the dmu buffer. 383 * 384 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when 385 * the file is memory mapped. 386 */ 387 static int 388 mappedread(vnode_t *vp, int nbytes, uio_t *uio) 389 { 390 znode_t *zp = VTOZ(vp); 391 objset_t *os = zp->z_zfsvfs->z_os; 392 int64_t start, off; 393 int len = nbytes; 394 int error = 0; 395 396 start = uio->uio_loffset; 397 off = start & PAGEOFFSET; 398 for (start &= PAGEMASK; len > 0; start += PAGESIZE) { 399 page_t *pp; 400 uint64_t bytes = MIN(PAGESIZE - off, len); 401 402 if (pp = page_lookup(vp, start, SE_SHARED)) { 403 caddr_t va; 404 405 va = zfs_map_page(pp, S_READ); 406 error = uiomove(va + off, bytes, UIO_READ, uio); 407 zfs_unmap_page(pp, va); 408 page_unlock(pp); 409 } else { 410 error = dmu_read_uio(os, zp->z_id, uio, bytes); 411 } 412 len -= bytes; 413 off = 0; 414 if (error) 415 break; 416 } 417 return (error); 418 } 419 420 offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */ 421 422 /* 423 * Read bytes from specified file into supplied buffer. 424 * 425 * IN: vp - vnode of file to be read from. 426 * uio - structure supplying read location, range info, 427 * and return buffer. 428 * ioflag - SYNC flags; used to provide FRSYNC semantics. 429 * cr - credentials of caller. 430 * ct - caller context 431 * 432 * OUT: uio - updated offset and range, buffer filled. 433 * 434 * RETURN: 0 if success 435 * error code if failure 436 * 437 * Side Effects: 438 * vp - atime updated if byte count > 0 439 */ 440 /* ARGSUSED */ 441 static int 442 zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) 443 { 444 znode_t *zp = VTOZ(vp); 445 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 446 objset_t *os; 447 ssize_t n, nbytes; 448 int error; 449 rl_t *rl; 450 451 ZFS_ENTER(zfsvfs); 452 ZFS_VERIFY_ZP(zp); 453 os = zfsvfs->z_os; 454 455 if (zp->z_phys->zp_flags & ZFS_AV_QUARANTINED) { 456 ZFS_EXIT(zfsvfs); 457 return (EACCES); 458 } 459 460 /* 461 * Validate file offset 462 */ 463 if (uio->uio_loffset < (offset_t)0) { 464 ZFS_EXIT(zfsvfs); 465 return (EINVAL); 466 } 467 468 /* 469 * Fasttrack empty reads 470 */ 471 if (uio->uio_resid == 0) { 472 ZFS_EXIT(zfsvfs); 473 return (0); 474 } 475 476 /* 477 * Check for mandatory locks 478 */ 479 if (MANDMODE((mode_t)zp->z_phys->zp_mode)) { 480 if (error = chklock(vp, FREAD, 481 uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) { 482 ZFS_EXIT(zfsvfs); 483 return (error); 484 } 485 } 486 487 /* 488 * If we're in FRSYNC mode, sync out this znode before reading it. 489 */ 490 if (ioflag & FRSYNC) 491 zil_commit(zfsvfs->z_log, zp->z_last_itx, zp->z_id); 492 493 /* 494 * Lock the range against changes. 495 */ 496 rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER); 497 498 /* 499 * If we are reading past end-of-file we can skip 500 * to the end; but we might still need to set atime. 501 */ 502 if (uio->uio_loffset >= zp->z_phys->zp_size) { 503 error = 0; 504 goto out; 505 } 506 507 ASSERT(uio->uio_loffset < zp->z_phys->zp_size); 508 n = MIN(uio->uio_resid, zp->z_phys->zp_size - uio->uio_loffset); 509 510 while (n > 0) { 511 nbytes = MIN(n, zfs_read_chunk_size - 512 P2PHASE(uio->uio_loffset, zfs_read_chunk_size)); 513 514 if (vn_has_cached_data(vp)) 515 error = mappedread(vp, nbytes, uio); 516 else 517 error = dmu_read_uio(os, zp->z_id, uio, nbytes); 518 if (error) { 519 /* convert checksum errors into IO errors */ 520 if (error == ECKSUM) 521 error = EIO; 522 break; 523 } 524 525 n -= nbytes; 526 } 527 528 out: 529 zfs_range_unlock(rl); 530 531 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 532 ZFS_EXIT(zfsvfs); 533 return (error); 534 } 535 536 /* 537 * Write the bytes to a file. 538 * 539 * IN: vp - vnode of file to be written to. 540 * uio - structure supplying write location, range info, 541 * and data buffer. 542 * ioflag - FAPPEND flag set if in append mode. 543 * cr - credentials of caller. 544 * ct - caller context (NFS/CIFS fem monitor only) 545 * 546 * OUT: uio - updated offset and range. 547 * 548 * RETURN: 0 if success 549 * error code if failure 550 * 551 * Timestamps: 552 * vp - ctime|mtime updated if byte count > 0 553 */ 554 /* ARGSUSED */ 555 static int 556 zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) 557 { 558 znode_t *zp = VTOZ(vp); 559 rlim64_t limit = uio->uio_llimit; 560 ssize_t start_resid = uio->uio_resid; 561 ssize_t tx_bytes; 562 uint64_t end_size; 563 dmu_tx_t *tx; 564 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 565 zilog_t *zilog; 566 offset_t woff; 567 ssize_t n, nbytes; 568 rl_t *rl; 569 int max_blksz = zfsvfs->z_max_blksz; 570 uint64_t pflags; 571 int error; 572 arc_buf_t *abuf; 573 574 /* 575 * Fasttrack empty write 576 */ 577 n = start_resid; 578 if (n == 0) 579 return (0); 580 581 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T) 582 limit = MAXOFFSET_T; 583 584 ZFS_ENTER(zfsvfs); 585 ZFS_VERIFY_ZP(zp); 586 587 /* 588 * If immutable or not appending then return EPERM 589 */ 590 pflags = zp->z_phys->zp_flags; 591 if ((pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) || 592 ((pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) && 593 (uio->uio_loffset < zp->z_phys->zp_size))) { 594 ZFS_EXIT(zfsvfs); 595 return (EPERM); 596 } 597 598 zilog = zfsvfs->z_log; 599 600 /* 601 * Pre-fault the pages to ensure slow (eg NFS) pages 602 * don't hold up txg. 603 */ 604 uio_prefaultpages(n, uio); 605 606 /* 607 * If in append mode, set the io offset pointer to eof. 608 */ 609 if (ioflag & FAPPEND) { 610 /* 611 * Range lock for a file append: 612 * The value for the start of range will be determined by 613 * zfs_range_lock() (to guarantee append semantics). 614 * If this write will cause the block size to increase, 615 * zfs_range_lock() will lock the entire file, so we must 616 * later reduce the range after we grow the block size. 617 */ 618 rl = zfs_range_lock(zp, 0, n, RL_APPEND); 619 if (rl->r_len == UINT64_MAX) { 620 /* overlocked, zp_size can't change */ 621 woff = uio->uio_loffset = zp->z_phys->zp_size; 622 } else { 623 woff = uio->uio_loffset = rl->r_off; 624 } 625 } else { 626 woff = uio->uio_loffset; 627 /* 628 * Validate file offset 629 */ 630 if (woff < 0) { 631 ZFS_EXIT(zfsvfs); 632 return (EINVAL); 633 } 634 635 /* 636 * If we need to grow the block size then zfs_range_lock() 637 * will lock a wider range than we request here. 638 * Later after growing the block size we reduce the range. 639 */ 640 rl = zfs_range_lock(zp, woff, n, RL_WRITER); 641 } 642 643 if (woff >= limit) { 644 zfs_range_unlock(rl); 645 ZFS_EXIT(zfsvfs); 646 return (EFBIG); 647 } 648 649 if ((woff + n) > limit || woff > (limit - n)) 650 n = limit - woff; 651 652 /* 653 * Check for mandatory locks 654 */ 655 if (MANDMODE((mode_t)zp->z_phys->zp_mode) && 656 (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) { 657 zfs_range_unlock(rl); 658 ZFS_EXIT(zfsvfs); 659 return (error); 660 } 661 end_size = MAX(zp->z_phys->zp_size, woff + n); 662 663 /* 664 * Write the file in reasonable size chunks. Each chunk is written 665 * in a separate transaction; this keeps the intent log records small 666 * and allows us to do more fine-grained space accounting. 667 */ 668 while (n > 0) { 669 abuf = NULL; 670 woff = uio->uio_loffset; 671 672 again: 673 if (zfs_usergroup_overquota(zfsvfs, 674 B_FALSE, zp->z_phys->zp_uid) || 675 zfs_usergroup_overquota(zfsvfs, 676 B_TRUE, zp->z_phys->zp_gid)) { 677 if (abuf != NULL) 678 dmu_return_arcbuf(abuf); 679 error = EDQUOT; 680 break; 681 } 682 683 /* 684 * If dmu_assign_arcbuf() is expected to execute with minimum 685 * overhead loan an arc buffer and copy user data to it before 686 * we enter a txg. This avoids holding a txg forever while we 687 * pagefault on a hanging NFS server mapping. 688 */ 689 if (abuf == NULL && n >= max_blksz && 690 woff >= zp->z_phys->zp_size && 691 P2PHASE(woff, max_blksz) == 0 && 692 zp->z_blksz == max_blksz) { 693 size_t cbytes; 694 695 abuf = dmu_request_arcbuf(zp->z_dbuf, max_blksz); 696 ASSERT(abuf != NULL); 697 ASSERT(arc_buf_size(abuf) == max_blksz); 698 if (error = uiocopy(abuf->b_data, max_blksz, 699 UIO_WRITE, uio, &cbytes)) { 700 dmu_return_arcbuf(abuf); 701 break; 702 } 703 ASSERT(cbytes == max_blksz); 704 } 705 706 /* 707 * Start a transaction. 708 */ 709 tx = dmu_tx_create(zfsvfs->z_os); 710 dmu_tx_hold_bonus(tx, zp->z_id); 711 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz)); 712 error = dmu_tx_assign(tx, TXG_NOWAIT); 713 if (error) { 714 if (error == ERESTART) { 715 dmu_tx_wait(tx); 716 dmu_tx_abort(tx); 717 goto again; 718 } 719 dmu_tx_abort(tx); 720 if (abuf != NULL) 721 dmu_return_arcbuf(abuf); 722 break; 723 } 724 725 /* 726 * If zfs_range_lock() over-locked we grow the blocksize 727 * and then reduce the lock range. This will only happen 728 * on the first iteration since zfs_range_reduce() will 729 * shrink down r_len to the appropriate size. 730 */ 731 if (rl->r_len == UINT64_MAX) { 732 uint64_t new_blksz; 733 734 if (zp->z_blksz > max_blksz) { 735 ASSERT(!ISP2(zp->z_blksz)); 736 new_blksz = MIN(end_size, SPA_MAXBLOCKSIZE); 737 } else { 738 new_blksz = MIN(end_size, max_blksz); 739 } 740 zfs_grow_blocksize(zp, new_blksz, tx); 741 zfs_range_reduce(rl, woff, n); 742 } 743 744 /* 745 * XXX - should we really limit each write to z_max_blksz? 746 * Perhaps we should use SPA_MAXBLOCKSIZE chunks? 747 */ 748 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz)); 749 750 if (abuf == NULL) { 751 tx_bytes = uio->uio_resid; 752 error = dmu_write_uio(zfsvfs->z_os, zp->z_id, uio, 753 nbytes, tx); 754 tx_bytes -= uio->uio_resid; 755 } else { 756 tx_bytes = nbytes; 757 ASSERT(tx_bytes == max_blksz); 758 dmu_assign_arcbuf(zp->z_dbuf, woff, abuf, tx); 759 ASSERT(tx_bytes <= uio->uio_resid); 760 uioskip(uio, tx_bytes); 761 } 762 if (tx_bytes && vn_has_cached_data(vp)) { 763 update_pages(vp, woff, 764 tx_bytes, zfsvfs->z_os, zp->z_id); 765 } 766 767 /* 768 * If we made no progress, we're done. If we made even 769 * partial progress, update the znode and ZIL accordingly. 770 */ 771 if (tx_bytes == 0) { 772 dmu_tx_commit(tx); 773 ASSERT(error != 0); 774 break; 775 } 776 777 /* 778 * Clear Set-UID/Set-GID bits on successful write if not 779 * privileged and at least one of the excute bits is set. 780 * 781 * It would be nice to to this after all writes have 782 * been done, but that would still expose the ISUID/ISGID 783 * to another app after the partial write is committed. 784 * 785 * Note: we don't call zfs_fuid_map_id() here because 786 * user 0 is not an ephemeral uid. 787 */ 788 mutex_enter(&zp->z_acl_lock); 789 if ((zp->z_phys->zp_mode & (S_IXUSR | (S_IXUSR >> 3) | 790 (S_IXUSR >> 6))) != 0 && 791 (zp->z_phys->zp_mode & (S_ISUID | S_ISGID)) != 0 && 792 secpolicy_vnode_setid_retain(cr, 793 (zp->z_phys->zp_mode & S_ISUID) != 0 && 794 zp->z_phys->zp_uid == 0) != 0) { 795 zp->z_phys->zp_mode &= ~(S_ISUID | S_ISGID); 796 } 797 mutex_exit(&zp->z_acl_lock); 798 799 /* 800 * Update time stamp. NOTE: This marks the bonus buffer as 801 * dirty, so we don't have to do it again for zp_size. 802 */ 803 zfs_time_stamper(zp, CONTENT_MODIFIED, tx); 804 805 /* 806 * Update the file size (zp_size) if it has changed; 807 * account for possible concurrent updates. 808 */ 809 while ((end_size = zp->z_phys->zp_size) < uio->uio_loffset) 810 (void) atomic_cas_64(&zp->z_phys->zp_size, end_size, 811 uio->uio_loffset); 812 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag); 813 dmu_tx_commit(tx); 814 815 if (error != 0) 816 break; 817 ASSERT(tx_bytes == nbytes); 818 n -= nbytes; 819 } 820 821 zfs_range_unlock(rl); 822 823 /* 824 * If we're in replay mode, or we made no progress, return error. 825 * Otherwise, it's at least a partial write, so it's successful. 826 */ 827 if (zfsvfs->z_replay || uio->uio_resid == start_resid) { 828 ZFS_EXIT(zfsvfs); 829 return (error); 830 } 831 832 if (ioflag & (FSYNC | FDSYNC)) 833 zil_commit(zilog, zp->z_last_itx, zp->z_id); 834 835 ZFS_EXIT(zfsvfs); 836 return (0); 837 } 838 839 void 840 zfs_get_done(dmu_buf_t *db, void *vzgd) 841 { 842 zgd_t *zgd = (zgd_t *)vzgd; 843 rl_t *rl = zgd->zgd_rl; 844 vnode_t *vp = ZTOV(rl->r_zp); 845 objset_t *os = rl->r_zp->z_zfsvfs->z_os; 846 847 dmu_buf_rele(db, vzgd); 848 zfs_range_unlock(rl); 849 /* 850 * Release the vnode asynchronously as we currently have the 851 * txg stopped from syncing. 852 */ 853 VN_RELE_ASYNC(vp, dsl_pool_vnrele_taskq(dmu_objset_pool(os))); 854 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 855 kmem_free(zgd, sizeof (zgd_t)); 856 } 857 858 /* 859 * Get data to generate a TX_WRITE intent log record. 860 */ 861 int 862 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 863 { 864 zfsvfs_t *zfsvfs = arg; 865 objset_t *os = zfsvfs->z_os; 866 znode_t *zp; 867 uint64_t off = lr->lr_offset; 868 dmu_buf_t *db; 869 rl_t *rl; 870 zgd_t *zgd; 871 int dlen = lr->lr_length; /* length of user data */ 872 int error = 0; 873 874 ASSERT(zio); 875 ASSERT(dlen != 0); 876 877 /* 878 * Nothing to do if the file has been removed 879 */ 880 if (zfs_zget(zfsvfs, lr->lr_foid, &zp) != 0) 881 return (ENOENT); 882 if (zp->z_unlinked) { 883 /* 884 * Release the vnode asynchronously as we currently have the 885 * txg stopped from syncing. 886 */ 887 VN_RELE_ASYNC(ZTOV(zp), 888 dsl_pool_vnrele_taskq(dmu_objset_pool(os))); 889 return (ENOENT); 890 } 891 892 /* 893 * Write records come in two flavors: immediate and indirect. 894 * For small writes it's cheaper to store the data with the 895 * log record (immediate); for large writes it's cheaper to 896 * sync the data and get a pointer to it (indirect) so that 897 * we don't have to write the data twice. 898 */ 899 if (buf != NULL) { /* immediate write */ 900 rl = zfs_range_lock(zp, off, dlen, RL_READER); 901 /* test for truncation needs to be done while range locked */ 902 if (off >= zp->z_phys->zp_size) { 903 error = ENOENT; 904 goto out; 905 } 906 VERIFY(0 == dmu_read(os, lr->lr_foid, off, dlen, buf, 907 DMU_READ_NO_PREFETCH)); 908 } else { /* indirect write */ 909 uint64_t boff; /* block starting offset */ 910 911 /* 912 * Have to lock the whole block to ensure when it's 913 * written out and it's checksum is being calculated 914 * that no one can change the data. We need to re-check 915 * blocksize after we get the lock in case it's changed! 916 */ 917 for (;;) { 918 if (ISP2(zp->z_blksz)) { 919 boff = P2ALIGN_TYPED(off, zp->z_blksz, 920 uint64_t); 921 } else { 922 boff = 0; 923 } 924 dlen = zp->z_blksz; 925 rl = zfs_range_lock(zp, boff, dlen, RL_READER); 926 if (zp->z_blksz == dlen) 927 break; 928 zfs_range_unlock(rl); 929 } 930 /* test for truncation needs to be done while range locked */ 931 if (off >= zp->z_phys->zp_size) { 932 error = ENOENT; 933 goto out; 934 } 935 zgd = (zgd_t *)kmem_alloc(sizeof (zgd_t), KM_SLEEP); 936 zgd->zgd_rl = rl; 937 zgd->zgd_zilog = zfsvfs->z_log; 938 zgd->zgd_bp = &lr->lr_blkptr; 939 VERIFY(0 == dmu_buf_hold(os, lr->lr_foid, boff, zgd, &db)); 940 ASSERT(boff == db->db_offset); 941 lr->lr_blkoff = off - boff; 942 error = dmu_sync(zio, db, &lr->lr_blkptr, 943 lr->lr_common.lrc_txg, zfs_get_done, zgd); 944 ASSERT((error && error != EINPROGRESS) || 945 lr->lr_length <= zp->z_blksz); 946 if (error == 0) 947 zil_add_block(zfsvfs->z_log, &lr->lr_blkptr); 948 /* 949 * If we get EINPROGRESS, then we need to wait for a 950 * write IO initiated by dmu_sync() to complete before 951 * we can release this dbuf. We will finish everything 952 * up in the zfs_get_done() callback. 953 */ 954 if (error == EINPROGRESS) 955 return (0); 956 dmu_buf_rele(db, zgd); 957 kmem_free(zgd, sizeof (zgd_t)); 958 } 959 out: 960 zfs_range_unlock(rl); 961 /* 962 * Release the vnode asynchronously as we currently have the 963 * txg stopped from syncing. 964 */ 965 VN_RELE_ASYNC(ZTOV(zp), dsl_pool_vnrele_taskq(dmu_objset_pool(os))); 966 return (error); 967 } 968 969 /*ARGSUSED*/ 970 static int 971 zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr, 972 caller_context_t *ct) 973 { 974 znode_t *zp = VTOZ(vp); 975 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 976 int error; 977 978 ZFS_ENTER(zfsvfs); 979 ZFS_VERIFY_ZP(zp); 980 981 if (flag & V_ACE_MASK) 982 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr); 983 else 984 error = zfs_zaccess_rwx(zp, mode, flag, cr); 985 986 ZFS_EXIT(zfsvfs); 987 return (error); 988 } 989 990 /* 991 * Lookup an entry in a directory, or an extended attribute directory. 992 * If it exists, return a held vnode reference for it. 993 * 994 * IN: dvp - vnode of directory to search. 995 * nm - name of entry to lookup. 996 * pnp - full pathname to lookup [UNUSED]. 997 * flags - LOOKUP_XATTR set if looking for an attribute. 998 * rdir - root directory vnode [UNUSED]. 999 * cr - credentials of caller. 1000 * ct - caller context 1001 * direntflags - directory lookup flags 1002 * realpnp - returned pathname. 1003 * 1004 * OUT: vpp - vnode of located entry, NULL if not found. 1005 * 1006 * RETURN: 0 if success 1007 * error code if failure 1008 * 1009 * Timestamps: 1010 * NA 1011 */ 1012 /* ARGSUSED */ 1013 static int 1014 zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp, 1015 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, 1016 int *direntflags, pathname_t *realpnp) 1017 { 1018 znode_t *zdp = VTOZ(dvp); 1019 zfsvfs_t *zfsvfs = zdp->z_zfsvfs; 1020 int error; 1021 1022 ZFS_ENTER(zfsvfs); 1023 ZFS_VERIFY_ZP(zdp); 1024 1025 *vpp = NULL; 1026 1027 if (flags & LOOKUP_XATTR) { 1028 /* 1029 * If the xattr property is off, refuse the lookup request. 1030 */ 1031 if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) { 1032 ZFS_EXIT(zfsvfs); 1033 return (EINVAL); 1034 } 1035 1036 /* 1037 * We don't allow recursive attributes.. 1038 * Maybe someday we will. 1039 */ 1040 if (zdp->z_phys->zp_flags & ZFS_XATTR) { 1041 ZFS_EXIT(zfsvfs); 1042 return (EINVAL); 1043 } 1044 1045 if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) { 1046 ZFS_EXIT(zfsvfs); 1047 return (error); 1048 } 1049 1050 /* 1051 * Do we have permission to get into attribute directory? 1052 */ 1053 1054 if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0, 1055 B_FALSE, cr)) { 1056 VN_RELE(*vpp); 1057 *vpp = NULL; 1058 } 1059 1060 ZFS_EXIT(zfsvfs); 1061 return (error); 1062 } 1063 1064 if (dvp->v_type != VDIR) { 1065 ZFS_EXIT(zfsvfs); 1066 return (ENOTDIR); 1067 } 1068 1069 /* 1070 * Check accessibility of directory. 1071 */ 1072 1073 if (error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr)) { 1074 ZFS_EXIT(zfsvfs); 1075 return (error); 1076 } 1077 1078 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm), 1079 NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 1080 ZFS_EXIT(zfsvfs); 1081 return (EILSEQ); 1082 } 1083 1084 error = zfs_dirlook(zdp, nm, vpp, flags, direntflags, realpnp); 1085 if (error == 0) { 1086 /* 1087 * Convert device special files 1088 */ 1089 if (IS_DEVVP(*vpp)) { 1090 vnode_t *svp; 1091 1092 svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr); 1093 VN_RELE(*vpp); 1094 if (svp == NULL) 1095 error = ENOSYS; 1096 else 1097 *vpp = svp; 1098 } 1099 } 1100 1101 ZFS_EXIT(zfsvfs); 1102 return (error); 1103 } 1104 1105 /* 1106 * Attempt to create a new entry in a directory. If the entry 1107 * already exists, truncate the file if permissible, else return 1108 * an error. Return the vp of the created or trunc'd file. 1109 * 1110 * IN: dvp - vnode of directory to put new file entry in. 1111 * name - name of new file entry. 1112 * vap - attributes of new file. 1113 * excl - flag indicating exclusive or non-exclusive mode. 1114 * mode - mode to open file with. 1115 * cr - credentials of caller. 1116 * flag - large file flag [UNUSED]. 1117 * ct - caller context 1118 * vsecp - ACL to be set 1119 * 1120 * OUT: vpp - vnode of created or trunc'd entry. 1121 * 1122 * RETURN: 0 if success 1123 * error code if failure 1124 * 1125 * Timestamps: 1126 * dvp - ctime|mtime updated if new entry created 1127 * vp - ctime|mtime always, atime if new 1128 */ 1129 1130 /* ARGSUSED */ 1131 static int 1132 zfs_create(vnode_t *dvp, char *name, vattr_t *vap, vcexcl_t excl, 1133 int mode, vnode_t **vpp, cred_t *cr, int flag, caller_context_t *ct, 1134 vsecattr_t *vsecp) 1135 { 1136 znode_t *zp, *dzp = VTOZ(dvp); 1137 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1138 zilog_t *zilog; 1139 objset_t *os; 1140 zfs_dirlock_t *dl; 1141 dmu_tx_t *tx; 1142 int error; 1143 ksid_t *ksid; 1144 uid_t uid; 1145 gid_t gid = crgetgid(cr); 1146 zfs_acl_ids_t acl_ids; 1147 boolean_t fuid_dirtied; 1148 1149 /* 1150 * If we have an ephemeral id, ACL, or XVATTR then 1151 * make sure file system is at proper version 1152 */ 1153 1154 ksid = crgetsid(cr, KSID_OWNER); 1155 if (ksid) 1156 uid = ksid_getid(ksid); 1157 else 1158 uid = crgetuid(cr); 1159 1160 if (zfsvfs->z_use_fuids == B_FALSE && 1161 (vsecp || (vap->va_mask & AT_XVATTR) || 1162 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) 1163 return (EINVAL); 1164 1165 ZFS_ENTER(zfsvfs); 1166 ZFS_VERIFY_ZP(dzp); 1167 os = zfsvfs->z_os; 1168 zilog = zfsvfs->z_log; 1169 1170 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), 1171 NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 1172 ZFS_EXIT(zfsvfs); 1173 return (EILSEQ); 1174 } 1175 1176 if (vap->va_mask & AT_XVATTR) { 1177 if ((error = secpolicy_xvattr((xvattr_t *)vap, 1178 crgetuid(cr), cr, vap->va_type)) != 0) { 1179 ZFS_EXIT(zfsvfs); 1180 return (error); 1181 } 1182 } 1183 top: 1184 *vpp = NULL; 1185 1186 if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr)) 1187 vap->va_mode &= ~VSVTX; 1188 1189 if (*name == '\0') { 1190 /* 1191 * Null component name refers to the directory itself. 1192 */ 1193 VN_HOLD(dvp); 1194 zp = dzp; 1195 dl = NULL; 1196 error = 0; 1197 } else { 1198 /* possible VN_HOLD(zp) */ 1199 int zflg = 0; 1200 1201 if (flag & FIGNORECASE) 1202 zflg |= ZCILOOK; 1203 1204 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, 1205 NULL, NULL); 1206 if (error) { 1207 if (strcmp(name, "..") == 0) 1208 error = EISDIR; 1209 ZFS_EXIT(zfsvfs); 1210 return (error); 1211 } 1212 } 1213 if (zp == NULL) { 1214 uint64_t txtype; 1215 1216 /* 1217 * Create a new file object and update the directory 1218 * to reference it. 1219 */ 1220 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { 1221 goto out; 1222 } 1223 1224 /* 1225 * We only support the creation of regular files in 1226 * extended attribute directories. 1227 */ 1228 if ((dzp->z_phys->zp_flags & ZFS_XATTR) && 1229 (vap->va_type != VREG)) { 1230 error = EINVAL; 1231 goto out; 1232 } 1233 1234 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr, vsecp, 1235 &acl_ids)) != 0) 1236 goto out; 1237 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { 1238 error = EDQUOT; 1239 goto out; 1240 } 1241 1242 tx = dmu_tx_create(os); 1243 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1244 fuid_dirtied = zfsvfs->z_fuid_dirty; 1245 if (fuid_dirtied) 1246 zfs_fuid_txhold(zfsvfs, tx); 1247 dmu_tx_hold_bonus(tx, dzp->z_id); 1248 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); 1249 if (acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { 1250 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 1251 0, SPA_MAXBLOCKSIZE); 1252 } 1253 error = dmu_tx_assign(tx, TXG_NOWAIT); 1254 if (error) { 1255 zfs_acl_ids_free(&acl_ids); 1256 zfs_dirent_unlock(dl); 1257 if (error == ERESTART) { 1258 dmu_tx_wait(tx); 1259 dmu_tx_abort(tx); 1260 goto top; 1261 } 1262 dmu_tx_abort(tx); 1263 ZFS_EXIT(zfsvfs); 1264 return (error); 1265 } 1266 zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, &acl_ids); 1267 1268 if (fuid_dirtied) 1269 zfs_fuid_sync(zfsvfs, tx); 1270 1271 (void) zfs_link_create(dl, zp, tx, ZNEW); 1272 1273 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap); 1274 if (flag & FIGNORECASE) 1275 txtype |= TX_CI; 1276 zfs_log_create(zilog, tx, txtype, dzp, zp, name, 1277 vsecp, acl_ids.z_fuidp, vap); 1278 zfs_acl_ids_free(&acl_ids); 1279 dmu_tx_commit(tx); 1280 } else { 1281 int aflags = (flag & FAPPEND) ? V_APPEND : 0; 1282 1283 /* 1284 * A directory entry already exists for this name. 1285 */ 1286 /* 1287 * Can't truncate an existing file if in exclusive mode. 1288 */ 1289 if (excl == EXCL) { 1290 error = EEXIST; 1291 goto out; 1292 } 1293 /* 1294 * Can't open a directory for writing. 1295 */ 1296 if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) { 1297 error = EISDIR; 1298 goto out; 1299 } 1300 /* 1301 * Verify requested access to file. 1302 */ 1303 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) { 1304 goto out; 1305 } 1306 1307 mutex_enter(&dzp->z_lock); 1308 dzp->z_seq++; 1309 mutex_exit(&dzp->z_lock); 1310 1311 /* 1312 * Truncate regular files if requested. 1313 */ 1314 if ((ZTOV(zp)->v_type == VREG) && 1315 (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) { 1316 /* we can't hold any locks when calling zfs_freesp() */ 1317 zfs_dirent_unlock(dl); 1318 dl = NULL; 1319 error = zfs_freesp(zp, 0, 0, mode, TRUE); 1320 if (error == 0) { 1321 vnevent_create(ZTOV(zp), ct); 1322 } 1323 } 1324 } 1325 out: 1326 1327 if (dl) 1328 zfs_dirent_unlock(dl); 1329 1330 if (error) { 1331 if (zp) 1332 VN_RELE(ZTOV(zp)); 1333 } else { 1334 *vpp = ZTOV(zp); 1335 /* 1336 * If vnode is for a device return a specfs vnode instead. 1337 */ 1338 if (IS_DEVVP(*vpp)) { 1339 struct vnode *svp; 1340 1341 svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr); 1342 VN_RELE(*vpp); 1343 if (svp == NULL) { 1344 error = ENOSYS; 1345 } 1346 *vpp = svp; 1347 } 1348 } 1349 1350 ZFS_EXIT(zfsvfs); 1351 return (error); 1352 } 1353 1354 /* 1355 * Remove an entry from a directory. 1356 * 1357 * IN: dvp - vnode of directory to remove entry from. 1358 * name - name of entry to remove. 1359 * cr - credentials of caller. 1360 * ct - caller context 1361 * flags - case flags 1362 * 1363 * RETURN: 0 if success 1364 * error code if failure 1365 * 1366 * Timestamps: 1367 * dvp - ctime|mtime 1368 * vp - ctime (if nlink > 0) 1369 */ 1370 /*ARGSUSED*/ 1371 static int 1372 zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct, 1373 int flags) 1374 { 1375 znode_t *zp, *dzp = VTOZ(dvp); 1376 znode_t *xzp = NULL; 1377 vnode_t *vp; 1378 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1379 zilog_t *zilog; 1380 uint64_t acl_obj, xattr_obj; 1381 zfs_dirlock_t *dl; 1382 dmu_tx_t *tx; 1383 boolean_t may_delete_now, delete_now = FALSE; 1384 boolean_t unlinked, toobig = FALSE; 1385 uint64_t txtype; 1386 pathname_t *realnmp = NULL; 1387 pathname_t realnm; 1388 int error; 1389 int zflg = ZEXISTS; 1390 1391 ZFS_ENTER(zfsvfs); 1392 ZFS_VERIFY_ZP(dzp); 1393 zilog = zfsvfs->z_log; 1394 1395 if (flags & FIGNORECASE) { 1396 zflg |= ZCILOOK; 1397 pn_alloc(&realnm); 1398 realnmp = &realnm; 1399 } 1400 1401 top: 1402 /* 1403 * Attempt to lock directory; fail if entry doesn't exist. 1404 */ 1405 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, 1406 NULL, realnmp)) { 1407 if (realnmp) 1408 pn_free(realnmp); 1409 ZFS_EXIT(zfsvfs); 1410 return (error); 1411 } 1412 1413 vp = ZTOV(zp); 1414 1415 if (error = zfs_zaccess_delete(dzp, zp, cr)) { 1416 goto out; 1417 } 1418 1419 /* 1420 * Need to use rmdir for removing directories. 1421 */ 1422 if (vp->v_type == VDIR) { 1423 error = EPERM; 1424 goto out; 1425 } 1426 1427 vnevent_remove(vp, dvp, name, ct); 1428 1429 if (realnmp) 1430 dnlc_remove(dvp, realnmp->pn_buf); 1431 else 1432 dnlc_remove(dvp, name); 1433 1434 mutex_enter(&vp->v_lock); 1435 may_delete_now = vp->v_count == 1 && !vn_has_cached_data(vp); 1436 mutex_exit(&vp->v_lock); 1437 1438 /* 1439 * We may delete the znode now, or we may put it in the unlinked set; 1440 * it depends on whether we're the last link, and on whether there are 1441 * other holds on the vnode. So we dmu_tx_hold() the right things to 1442 * allow for either case. 1443 */ 1444 tx = dmu_tx_create(zfsvfs->z_os); 1445 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); 1446 dmu_tx_hold_bonus(tx, zp->z_id); 1447 if (may_delete_now) { 1448 toobig = 1449 zp->z_phys->zp_size > zp->z_blksz * DMU_MAX_DELETEBLKCNT; 1450 /* if the file is too big, only hold_free a token amount */ 1451 dmu_tx_hold_free(tx, zp->z_id, 0, 1452 (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END)); 1453 } 1454 1455 /* are there any extended attributes? */ 1456 if ((xattr_obj = zp->z_phys->zp_xattr) != 0) { 1457 /* XXX - do we need this if we are deleting? */ 1458 dmu_tx_hold_bonus(tx, xattr_obj); 1459 } 1460 1461 /* are there any additional acls */ 1462 if ((acl_obj = zp->z_phys->zp_acl.z_acl_extern_obj) != 0 && 1463 may_delete_now) 1464 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); 1465 1466 /* charge as an update -- would be nice not to charge at all */ 1467 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 1468 1469 error = dmu_tx_assign(tx, TXG_NOWAIT); 1470 if (error) { 1471 zfs_dirent_unlock(dl); 1472 VN_RELE(vp); 1473 if (error == ERESTART) { 1474 dmu_tx_wait(tx); 1475 dmu_tx_abort(tx); 1476 goto top; 1477 } 1478 if (realnmp) 1479 pn_free(realnmp); 1480 dmu_tx_abort(tx); 1481 ZFS_EXIT(zfsvfs); 1482 return (error); 1483 } 1484 1485 /* 1486 * Remove the directory entry. 1487 */ 1488 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked); 1489 1490 if (error) { 1491 dmu_tx_commit(tx); 1492 goto out; 1493 } 1494 1495 if (unlinked) { 1496 mutex_enter(&vp->v_lock); 1497 delete_now = may_delete_now && !toobig && 1498 vp->v_count == 1 && !vn_has_cached_data(vp) && 1499 zp->z_phys->zp_xattr == xattr_obj && 1500 zp->z_phys->zp_acl.z_acl_extern_obj == acl_obj; 1501 mutex_exit(&vp->v_lock); 1502 } 1503 1504 if (delete_now) { 1505 if (zp->z_phys->zp_xattr) { 1506 error = zfs_zget(zfsvfs, zp->z_phys->zp_xattr, &xzp); 1507 ASSERT3U(error, ==, 0); 1508 ASSERT3U(xzp->z_phys->zp_links, ==, 2); 1509 dmu_buf_will_dirty(xzp->z_dbuf, tx); 1510 mutex_enter(&xzp->z_lock); 1511 xzp->z_unlinked = 1; 1512 xzp->z_phys->zp_links = 0; 1513 mutex_exit(&xzp->z_lock); 1514 zfs_unlinked_add(xzp, tx); 1515 zp->z_phys->zp_xattr = 0; /* probably unnecessary */ 1516 } 1517 mutex_enter(&zp->z_lock); 1518 mutex_enter(&vp->v_lock); 1519 vp->v_count--; 1520 ASSERT3U(vp->v_count, ==, 0); 1521 mutex_exit(&vp->v_lock); 1522 mutex_exit(&zp->z_lock); 1523 zfs_znode_delete(zp, tx); 1524 } else if (unlinked) { 1525 zfs_unlinked_add(zp, tx); 1526 } 1527 1528 txtype = TX_REMOVE; 1529 if (flags & FIGNORECASE) 1530 txtype |= TX_CI; 1531 zfs_log_remove(zilog, tx, txtype, dzp, name); 1532 1533 dmu_tx_commit(tx); 1534 out: 1535 if (realnmp) 1536 pn_free(realnmp); 1537 1538 zfs_dirent_unlock(dl); 1539 1540 if (!delete_now) { 1541 VN_RELE(vp); 1542 } else if (xzp) { 1543 /* this rele is delayed to prevent nesting transactions */ 1544 VN_RELE(ZTOV(xzp)); 1545 } 1546 1547 ZFS_EXIT(zfsvfs); 1548 return (error); 1549 } 1550 1551 /* 1552 * Create a new directory and insert it into dvp using the name 1553 * provided. Return a pointer to the inserted directory. 1554 * 1555 * IN: dvp - vnode of directory to add subdir to. 1556 * dirname - name of new directory. 1557 * vap - attributes of new directory. 1558 * cr - credentials of caller. 1559 * ct - caller context 1560 * vsecp - ACL to be set 1561 * 1562 * OUT: vpp - vnode of created directory. 1563 * 1564 * RETURN: 0 if success 1565 * error code if failure 1566 * 1567 * Timestamps: 1568 * dvp - ctime|mtime updated 1569 * vp - ctime|mtime|atime updated 1570 */ 1571 /*ARGSUSED*/ 1572 static int 1573 zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr, 1574 caller_context_t *ct, int flags, vsecattr_t *vsecp) 1575 { 1576 znode_t *zp, *dzp = VTOZ(dvp); 1577 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1578 zilog_t *zilog; 1579 zfs_dirlock_t *dl; 1580 uint64_t txtype; 1581 dmu_tx_t *tx; 1582 int error; 1583 int zf = ZNEW; 1584 ksid_t *ksid; 1585 uid_t uid; 1586 gid_t gid = crgetgid(cr); 1587 zfs_acl_ids_t acl_ids; 1588 boolean_t fuid_dirtied; 1589 1590 ASSERT(vap->va_type == VDIR); 1591 1592 /* 1593 * If we have an ephemeral id, ACL, or XVATTR then 1594 * make sure file system is at proper version 1595 */ 1596 1597 ksid = crgetsid(cr, KSID_OWNER); 1598 if (ksid) 1599 uid = ksid_getid(ksid); 1600 else 1601 uid = crgetuid(cr); 1602 if (zfsvfs->z_use_fuids == B_FALSE && 1603 (vsecp || (vap->va_mask & AT_XVATTR) || 1604 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) 1605 return (EINVAL); 1606 1607 ZFS_ENTER(zfsvfs); 1608 ZFS_VERIFY_ZP(dzp); 1609 zilog = zfsvfs->z_log; 1610 1611 if (dzp->z_phys->zp_flags & ZFS_XATTR) { 1612 ZFS_EXIT(zfsvfs); 1613 return (EINVAL); 1614 } 1615 1616 if (zfsvfs->z_utf8 && u8_validate(dirname, 1617 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 1618 ZFS_EXIT(zfsvfs); 1619 return (EILSEQ); 1620 } 1621 if (flags & FIGNORECASE) 1622 zf |= ZCILOOK; 1623 1624 if (vap->va_mask & AT_XVATTR) 1625 if ((error = secpolicy_xvattr((xvattr_t *)vap, 1626 crgetuid(cr), cr, vap->va_type)) != 0) { 1627 ZFS_EXIT(zfsvfs); 1628 return (error); 1629 } 1630 1631 /* 1632 * First make sure the new directory doesn't exist. 1633 */ 1634 top: 1635 *vpp = NULL; 1636 1637 if (error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf, 1638 NULL, NULL)) { 1639 ZFS_EXIT(zfsvfs); 1640 return (error); 1641 } 1642 1643 if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) { 1644 zfs_dirent_unlock(dl); 1645 ZFS_EXIT(zfsvfs); 1646 return (error); 1647 } 1648 1649 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr, vsecp, 1650 &acl_ids)) != 0) { 1651 zfs_dirent_unlock(dl); 1652 ZFS_EXIT(zfsvfs); 1653 return (error); 1654 } 1655 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { 1656 zfs_dirent_unlock(dl); 1657 ZFS_EXIT(zfsvfs); 1658 return (EDQUOT); 1659 } 1660 1661 /* 1662 * Add a new entry to the directory. 1663 */ 1664 tx = dmu_tx_create(zfsvfs->z_os); 1665 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname); 1666 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); 1667 fuid_dirtied = zfsvfs->z_fuid_dirty; 1668 if (fuid_dirtied) 1669 zfs_fuid_txhold(zfsvfs, tx); 1670 if (acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) 1671 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 1672 0, SPA_MAXBLOCKSIZE); 1673 error = dmu_tx_assign(tx, TXG_NOWAIT); 1674 if (error) { 1675 zfs_acl_ids_free(&acl_ids); 1676 zfs_dirent_unlock(dl); 1677 if (error == ERESTART) { 1678 dmu_tx_wait(tx); 1679 dmu_tx_abort(tx); 1680 goto top; 1681 } 1682 dmu_tx_abort(tx); 1683 ZFS_EXIT(zfsvfs); 1684 return (error); 1685 } 1686 1687 /* 1688 * Create new node. 1689 */ 1690 zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, &acl_ids); 1691 1692 if (fuid_dirtied) 1693 zfs_fuid_sync(zfsvfs, tx); 1694 /* 1695 * Now put new name in parent dir. 1696 */ 1697 (void) zfs_link_create(dl, zp, tx, ZNEW); 1698 1699 *vpp = ZTOV(zp); 1700 1701 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap); 1702 if (flags & FIGNORECASE) 1703 txtype |= TX_CI; 1704 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp, 1705 acl_ids.z_fuidp, vap); 1706 1707 zfs_acl_ids_free(&acl_ids); 1708 dmu_tx_commit(tx); 1709 1710 zfs_dirent_unlock(dl); 1711 1712 ZFS_EXIT(zfsvfs); 1713 return (0); 1714 } 1715 1716 /* 1717 * Remove a directory subdir entry. If the current working 1718 * directory is the same as the subdir to be removed, the 1719 * remove will fail. 1720 * 1721 * IN: dvp - vnode of directory to remove from. 1722 * name - name of directory to be removed. 1723 * cwd - vnode of current working directory. 1724 * cr - credentials of caller. 1725 * ct - caller context 1726 * flags - case flags 1727 * 1728 * RETURN: 0 if success 1729 * error code if failure 1730 * 1731 * Timestamps: 1732 * dvp - ctime|mtime updated 1733 */ 1734 /*ARGSUSED*/ 1735 static int 1736 zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr, 1737 caller_context_t *ct, int flags) 1738 { 1739 znode_t *dzp = VTOZ(dvp); 1740 znode_t *zp; 1741 vnode_t *vp; 1742 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1743 zilog_t *zilog; 1744 zfs_dirlock_t *dl; 1745 dmu_tx_t *tx; 1746 int error; 1747 int zflg = ZEXISTS; 1748 1749 ZFS_ENTER(zfsvfs); 1750 ZFS_VERIFY_ZP(dzp); 1751 zilog = zfsvfs->z_log; 1752 1753 if (flags & FIGNORECASE) 1754 zflg |= ZCILOOK; 1755 top: 1756 zp = NULL; 1757 1758 /* 1759 * Attempt to lock directory; fail if entry doesn't exist. 1760 */ 1761 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, 1762 NULL, NULL)) { 1763 ZFS_EXIT(zfsvfs); 1764 return (error); 1765 } 1766 1767 vp = ZTOV(zp); 1768 1769 if (error = zfs_zaccess_delete(dzp, zp, cr)) { 1770 goto out; 1771 } 1772 1773 if (vp->v_type != VDIR) { 1774 error = ENOTDIR; 1775 goto out; 1776 } 1777 1778 if (vp == cwd) { 1779 error = EINVAL; 1780 goto out; 1781 } 1782 1783 vnevent_rmdir(vp, dvp, name, ct); 1784 1785 /* 1786 * Grab a lock on the directory to make sure that noone is 1787 * trying to add (or lookup) entries while we are removing it. 1788 */ 1789 rw_enter(&zp->z_name_lock, RW_WRITER); 1790 1791 /* 1792 * Grab a lock on the parent pointer to make sure we play well 1793 * with the treewalk and directory rename code. 1794 */ 1795 rw_enter(&zp->z_parent_lock, RW_WRITER); 1796 1797 tx = dmu_tx_create(zfsvfs->z_os); 1798 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); 1799 dmu_tx_hold_bonus(tx, zp->z_id); 1800 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 1801 error = dmu_tx_assign(tx, TXG_NOWAIT); 1802 if (error) { 1803 rw_exit(&zp->z_parent_lock); 1804 rw_exit(&zp->z_name_lock); 1805 zfs_dirent_unlock(dl); 1806 VN_RELE(vp); 1807 if (error == ERESTART) { 1808 dmu_tx_wait(tx); 1809 dmu_tx_abort(tx); 1810 goto top; 1811 } 1812 dmu_tx_abort(tx); 1813 ZFS_EXIT(zfsvfs); 1814 return (error); 1815 } 1816 1817 error = zfs_link_destroy(dl, zp, tx, zflg, NULL); 1818 1819 if (error == 0) { 1820 uint64_t txtype = TX_RMDIR; 1821 if (flags & FIGNORECASE) 1822 txtype |= TX_CI; 1823 zfs_log_remove(zilog, tx, txtype, dzp, name); 1824 } 1825 1826 dmu_tx_commit(tx); 1827 1828 rw_exit(&zp->z_parent_lock); 1829 rw_exit(&zp->z_name_lock); 1830 out: 1831 zfs_dirent_unlock(dl); 1832 1833 VN_RELE(vp); 1834 1835 ZFS_EXIT(zfsvfs); 1836 return (error); 1837 } 1838 1839 /* 1840 * Read as many directory entries as will fit into the provided 1841 * buffer from the given directory cursor position (specified in 1842 * the uio structure. 1843 * 1844 * IN: vp - vnode of directory to read. 1845 * uio - structure supplying read location, range info, 1846 * and return buffer. 1847 * cr - credentials of caller. 1848 * ct - caller context 1849 * flags - case flags 1850 * 1851 * OUT: uio - updated offset and range, buffer filled. 1852 * eofp - set to true if end-of-file detected. 1853 * 1854 * RETURN: 0 if success 1855 * error code if failure 1856 * 1857 * Timestamps: 1858 * vp - atime updated 1859 * 1860 * Note that the low 4 bits of the cookie returned by zap is always zero. 1861 * This allows us to use the low range for "special" directory entries: 1862 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem, 1863 * we use the offset 2 for the '.zfs' directory. 1864 */ 1865 /* ARGSUSED */ 1866 static int 1867 zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, 1868 caller_context_t *ct, int flags) 1869 { 1870 znode_t *zp = VTOZ(vp); 1871 iovec_t *iovp; 1872 edirent_t *eodp; 1873 dirent64_t *odp; 1874 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1875 objset_t *os; 1876 caddr_t outbuf; 1877 size_t bufsize; 1878 zap_cursor_t zc; 1879 zap_attribute_t zap; 1880 uint_t bytes_wanted; 1881 uint64_t offset; /* must be unsigned; checks for < 1 */ 1882 int local_eof; 1883 int outcount; 1884 int error; 1885 uint8_t prefetch; 1886 boolean_t check_sysattrs; 1887 1888 ZFS_ENTER(zfsvfs); 1889 ZFS_VERIFY_ZP(zp); 1890 1891 /* 1892 * If we are not given an eof variable, 1893 * use a local one. 1894 */ 1895 if (eofp == NULL) 1896 eofp = &local_eof; 1897 1898 /* 1899 * Check for valid iov_len. 1900 */ 1901 if (uio->uio_iov->iov_len <= 0) { 1902 ZFS_EXIT(zfsvfs); 1903 return (EINVAL); 1904 } 1905 1906 /* 1907 * Quit if directory has been removed (posix) 1908 */ 1909 if ((*eofp = zp->z_unlinked) != 0) { 1910 ZFS_EXIT(zfsvfs); 1911 return (0); 1912 } 1913 1914 error = 0; 1915 os = zfsvfs->z_os; 1916 offset = uio->uio_loffset; 1917 prefetch = zp->z_zn_prefetch; 1918 1919 /* 1920 * Initialize the iterator cursor. 1921 */ 1922 if (offset <= 3) { 1923 /* 1924 * Start iteration from the beginning of the directory. 1925 */ 1926 zap_cursor_init(&zc, os, zp->z_id); 1927 } else { 1928 /* 1929 * The offset is a serialized cursor. 1930 */ 1931 zap_cursor_init_serialized(&zc, os, zp->z_id, offset); 1932 } 1933 1934 /* 1935 * Get space to change directory entries into fs independent format. 1936 */ 1937 iovp = uio->uio_iov; 1938 bytes_wanted = iovp->iov_len; 1939 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) { 1940 bufsize = bytes_wanted; 1941 outbuf = kmem_alloc(bufsize, KM_SLEEP); 1942 odp = (struct dirent64 *)outbuf; 1943 } else { 1944 bufsize = bytes_wanted; 1945 odp = (struct dirent64 *)iovp->iov_base; 1946 } 1947 eodp = (struct edirent *)odp; 1948 1949 /* 1950 * If this VFS supports the system attribute view interface; and 1951 * we're looking at an extended attribute directory; and we care 1952 * about normalization conflicts on this vfs; then we must check 1953 * for normalization conflicts with the sysattr name space. 1954 */ 1955 check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) && 1956 (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm && 1957 (flags & V_RDDIR_ENTFLAGS); 1958 1959 /* 1960 * Transform to file-system independent format 1961 */ 1962 outcount = 0; 1963 while (outcount < bytes_wanted) { 1964 ino64_t objnum; 1965 ushort_t reclen; 1966 off64_t *next; 1967 1968 /* 1969 * Special case `.', `..', and `.zfs'. 1970 */ 1971 if (offset == 0) { 1972 (void) strcpy(zap.za_name, "."); 1973 zap.za_normalization_conflict = 0; 1974 objnum = zp->z_id; 1975 } else if (offset == 1) { 1976 (void) strcpy(zap.za_name, ".."); 1977 zap.za_normalization_conflict = 0; 1978 objnum = zp->z_phys->zp_parent; 1979 } else if (offset == 2 && zfs_show_ctldir(zp)) { 1980 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME); 1981 zap.za_normalization_conflict = 0; 1982 objnum = ZFSCTL_INO_ROOT; 1983 } else { 1984 /* 1985 * Grab next entry. 1986 */ 1987 if (error = zap_cursor_retrieve(&zc, &zap)) { 1988 if ((*eofp = (error == ENOENT)) != 0) 1989 break; 1990 else 1991 goto update; 1992 } 1993 1994 if (zap.za_integer_length != 8 || 1995 zap.za_num_integers != 1) { 1996 cmn_err(CE_WARN, "zap_readdir: bad directory " 1997 "entry, obj = %lld, offset = %lld\n", 1998 (u_longlong_t)zp->z_id, 1999 (u_longlong_t)offset); 2000 error = ENXIO; 2001 goto update; 2002 } 2003 2004 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer); 2005 /* 2006 * MacOS X can extract the object type here such as: 2007 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer); 2008 */ 2009 2010 if (check_sysattrs && !zap.za_normalization_conflict) { 2011 zap.za_normalization_conflict = 2012 xattr_sysattr_casechk(zap.za_name); 2013 } 2014 } 2015 2016 if (flags & V_RDDIR_ENTFLAGS) 2017 reclen = EDIRENT_RECLEN(strlen(zap.za_name)); 2018 else 2019 reclen = DIRENT64_RECLEN(strlen(zap.za_name)); 2020 2021 /* 2022 * Will this entry fit in the buffer? 2023 */ 2024 if (outcount + reclen > bufsize) { 2025 /* 2026 * Did we manage to fit anything in the buffer? 2027 */ 2028 if (!outcount) { 2029 error = EINVAL; 2030 goto update; 2031 } 2032 break; 2033 } 2034 if (flags & V_RDDIR_ENTFLAGS) { 2035 /* 2036 * Add extended flag entry: 2037 */ 2038 eodp->ed_ino = objnum; 2039 eodp->ed_reclen = reclen; 2040 /* NOTE: ed_off is the offset for the *next* entry */ 2041 next = &(eodp->ed_off); 2042 eodp->ed_eflags = zap.za_normalization_conflict ? 2043 ED_CASE_CONFLICT : 0; 2044 (void) strncpy(eodp->ed_name, zap.za_name, 2045 EDIRENT_NAMELEN(reclen)); 2046 eodp = (edirent_t *)((intptr_t)eodp + reclen); 2047 } else { 2048 /* 2049 * Add normal entry: 2050 */ 2051 odp->d_ino = objnum; 2052 odp->d_reclen = reclen; 2053 /* NOTE: d_off is the offset for the *next* entry */ 2054 next = &(odp->d_off); 2055 (void) strncpy(odp->d_name, zap.za_name, 2056 DIRENT64_NAMELEN(reclen)); 2057 odp = (dirent64_t *)((intptr_t)odp + reclen); 2058 } 2059 outcount += reclen; 2060 2061 ASSERT(outcount <= bufsize); 2062 2063 /* Prefetch znode */ 2064 if (prefetch) 2065 dmu_prefetch(os, objnum, 0, 0); 2066 2067 /* 2068 * Move to the next entry, fill in the previous offset. 2069 */ 2070 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) { 2071 zap_cursor_advance(&zc); 2072 offset = zap_cursor_serialize(&zc); 2073 } else { 2074 offset += 1; 2075 } 2076 *next = offset; 2077 } 2078 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */ 2079 2080 if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) { 2081 iovp->iov_base += outcount; 2082 iovp->iov_len -= outcount; 2083 uio->uio_resid -= outcount; 2084 } else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) { 2085 /* 2086 * Reset the pointer. 2087 */ 2088 offset = uio->uio_loffset; 2089 } 2090 2091 update: 2092 zap_cursor_fini(&zc); 2093 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) 2094 kmem_free(outbuf, bufsize); 2095 2096 if (error == ENOENT) 2097 error = 0; 2098 2099 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 2100 2101 uio->uio_loffset = offset; 2102 ZFS_EXIT(zfsvfs); 2103 return (error); 2104 } 2105 2106 ulong_t zfs_fsync_sync_cnt = 4; 2107 2108 static int 2109 zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct) 2110 { 2111 znode_t *zp = VTOZ(vp); 2112 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2113 2114 /* 2115 * Regardless of whether this is required for standards conformance, 2116 * this is the logical behavior when fsync() is called on a file with 2117 * dirty pages. We use B_ASYNC since the ZIL transactions are already 2118 * going to be pushed out as part of the zil_commit(). 2119 */ 2120 if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) && 2121 (vp->v_type == VREG) && !(IS_SWAPVP(vp))) 2122 (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, B_ASYNC, cr, ct); 2123 2124 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt); 2125 2126 ZFS_ENTER(zfsvfs); 2127 ZFS_VERIFY_ZP(zp); 2128 zil_commit(zfsvfs->z_log, zp->z_last_itx, zp->z_id); 2129 ZFS_EXIT(zfsvfs); 2130 return (0); 2131 } 2132 2133 2134 /* 2135 * Get the requested file attributes and place them in the provided 2136 * vattr structure. 2137 * 2138 * IN: vp - vnode of file. 2139 * vap - va_mask identifies requested attributes. 2140 * If AT_XVATTR set, then optional attrs are requested 2141 * flags - ATTR_NOACLCHECK (CIFS server context) 2142 * cr - credentials of caller. 2143 * ct - caller context 2144 * 2145 * OUT: vap - attribute values. 2146 * 2147 * RETURN: 0 (always succeeds) 2148 */ 2149 /* ARGSUSED */ 2150 static int 2151 zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, 2152 caller_context_t *ct) 2153 { 2154 znode_t *zp = VTOZ(vp); 2155 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2156 znode_phys_t *pzp; 2157 int error = 0; 2158 uint64_t links; 2159 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */ 2160 xoptattr_t *xoap = NULL; 2161 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 2162 2163 ZFS_ENTER(zfsvfs); 2164 ZFS_VERIFY_ZP(zp); 2165 pzp = zp->z_phys; 2166 2167 mutex_enter(&zp->z_lock); 2168 2169 /* 2170 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES. 2171 * Also, if we are the owner don't bother, since owner should 2172 * always be allowed to read basic attributes of file. 2173 */ 2174 if (!(pzp->zp_flags & ZFS_ACL_TRIVIAL) && 2175 (pzp->zp_uid != crgetuid(cr))) { 2176 if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0, 2177 skipaclchk, cr)) { 2178 mutex_exit(&zp->z_lock); 2179 ZFS_EXIT(zfsvfs); 2180 return (error); 2181 } 2182 } 2183 2184 /* 2185 * Return all attributes. It's cheaper to provide the answer 2186 * than to determine whether we were asked the question. 2187 */ 2188 2189 vap->va_type = vp->v_type; 2190 vap->va_mode = pzp->zp_mode & MODEMASK; 2191 zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid); 2192 vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev; 2193 vap->va_nodeid = zp->z_id; 2194 if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp)) 2195 links = pzp->zp_links + 1; 2196 else 2197 links = pzp->zp_links; 2198 vap->va_nlink = MIN(links, UINT32_MAX); /* nlink_t limit! */ 2199 vap->va_size = pzp->zp_size; 2200 vap->va_rdev = vp->v_rdev; 2201 vap->va_seq = zp->z_seq; 2202 2203 /* 2204 * Add in any requested optional attributes and the create time. 2205 * Also set the corresponding bits in the returned attribute bitmap. 2206 */ 2207 if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) { 2208 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) { 2209 xoap->xoa_archive = 2210 ((pzp->zp_flags & ZFS_ARCHIVE) != 0); 2211 XVA_SET_RTN(xvap, XAT_ARCHIVE); 2212 } 2213 2214 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) { 2215 xoap->xoa_readonly = 2216 ((pzp->zp_flags & ZFS_READONLY) != 0); 2217 XVA_SET_RTN(xvap, XAT_READONLY); 2218 } 2219 2220 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) { 2221 xoap->xoa_system = 2222 ((pzp->zp_flags & ZFS_SYSTEM) != 0); 2223 XVA_SET_RTN(xvap, XAT_SYSTEM); 2224 } 2225 2226 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) { 2227 xoap->xoa_hidden = 2228 ((pzp->zp_flags & ZFS_HIDDEN) != 0); 2229 XVA_SET_RTN(xvap, XAT_HIDDEN); 2230 } 2231 2232 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) { 2233 xoap->xoa_nounlink = 2234 ((pzp->zp_flags & ZFS_NOUNLINK) != 0); 2235 XVA_SET_RTN(xvap, XAT_NOUNLINK); 2236 } 2237 2238 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) { 2239 xoap->xoa_immutable = 2240 ((pzp->zp_flags & ZFS_IMMUTABLE) != 0); 2241 XVA_SET_RTN(xvap, XAT_IMMUTABLE); 2242 } 2243 2244 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) { 2245 xoap->xoa_appendonly = 2246 ((pzp->zp_flags & ZFS_APPENDONLY) != 0); 2247 XVA_SET_RTN(xvap, XAT_APPENDONLY); 2248 } 2249 2250 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) { 2251 xoap->xoa_nodump = 2252 ((pzp->zp_flags & ZFS_NODUMP) != 0); 2253 XVA_SET_RTN(xvap, XAT_NODUMP); 2254 } 2255 2256 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) { 2257 xoap->xoa_opaque = 2258 ((pzp->zp_flags & ZFS_OPAQUE) != 0); 2259 XVA_SET_RTN(xvap, XAT_OPAQUE); 2260 } 2261 2262 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) { 2263 xoap->xoa_av_quarantined = 2264 ((pzp->zp_flags & ZFS_AV_QUARANTINED) != 0); 2265 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED); 2266 } 2267 2268 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) { 2269 xoap->xoa_av_modified = 2270 ((pzp->zp_flags & ZFS_AV_MODIFIED) != 0); 2271 XVA_SET_RTN(xvap, XAT_AV_MODIFIED); 2272 } 2273 2274 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) && 2275 vp->v_type == VREG && 2276 (pzp->zp_flags & ZFS_BONUS_SCANSTAMP)) { 2277 size_t len; 2278 dmu_object_info_t doi; 2279 2280 /* 2281 * Only VREG files have anti-virus scanstamps, so we 2282 * won't conflict with symlinks in the bonus buffer. 2283 */ 2284 dmu_object_info_from_db(zp->z_dbuf, &doi); 2285 len = sizeof (xoap->xoa_av_scanstamp) + 2286 sizeof (znode_phys_t); 2287 if (len <= doi.doi_bonus_size) { 2288 /* 2289 * pzp points to the start of the 2290 * znode_phys_t. pzp + 1 points to the 2291 * first byte after the znode_phys_t. 2292 */ 2293 (void) memcpy(xoap->xoa_av_scanstamp, 2294 pzp + 1, 2295 sizeof (xoap->xoa_av_scanstamp)); 2296 XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP); 2297 } 2298 } 2299 2300 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) { 2301 ZFS_TIME_DECODE(&xoap->xoa_createtime, pzp->zp_crtime); 2302 XVA_SET_RTN(xvap, XAT_CREATETIME); 2303 } 2304 } 2305 2306 ZFS_TIME_DECODE(&vap->va_atime, pzp->zp_atime); 2307 ZFS_TIME_DECODE(&vap->va_mtime, pzp->zp_mtime); 2308 ZFS_TIME_DECODE(&vap->va_ctime, pzp->zp_ctime); 2309 2310 mutex_exit(&zp->z_lock); 2311 2312 dmu_object_size_from_db(zp->z_dbuf, &vap->va_blksize, &vap->va_nblocks); 2313 2314 if (zp->z_blksz == 0) { 2315 /* 2316 * Block size hasn't been set; suggest maximal I/O transfers. 2317 */ 2318 vap->va_blksize = zfsvfs->z_max_blksz; 2319 } 2320 2321 ZFS_EXIT(zfsvfs); 2322 return (0); 2323 } 2324 2325 /* 2326 * Set the file attributes to the values contained in the 2327 * vattr structure. 2328 * 2329 * IN: vp - vnode of file to be modified. 2330 * vap - new attribute values. 2331 * If AT_XVATTR set, then optional attrs are being set 2332 * flags - ATTR_UTIME set if non-default time values provided. 2333 * - ATTR_NOACLCHECK (CIFS context only). 2334 * cr - credentials of caller. 2335 * ct - caller context 2336 * 2337 * RETURN: 0 if success 2338 * error code if failure 2339 * 2340 * Timestamps: 2341 * vp - ctime updated, mtime updated if size changed. 2342 */ 2343 /* ARGSUSED */ 2344 static int 2345 zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, 2346 caller_context_t *ct) 2347 { 2348 znode_t *zp = VTOZ(vp); 2349 znode_phys_t *pzp; 2350 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2351 zilog_t *zilog; 2352 dmu_tx_t *tx; 2353 vattr_t oldva; 2354 xvattr_t tmpxvattr; 2355 uint_t mask = vap->va_mask; 2356 uint_t saved_mask; 2357 int trim_mask = 0; 2358 uint64_t new_mode; 2359 uint64_t new_uid, new_gid; 2360 znode_t *attrzp; 2361 int need_policy = FALSE; 2362 int err; 2363 zfs_fuid_info_t *fuidp = NULL; 2364 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */ 2365 xoptattr_t *xoap; 2366 zfs_acl_t *aclp = NULL; 2367 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 2368 boolean_t fuid_dirtied = B_FALSE; 2369 2370 if (mask == 0) 2371 return (0); 2372 2373 if (mask & AT_NOSET) 2374 return (EINVAL); 2375 2376 ZFS_ENTER(zfsvfs); 2377 ZFS_VERIFY_ZP(zp); 2378 2379 pzp = zp->z_phys; 2380 zilog = zfsvfs->z_log; 2381 2382 /* 2383 * Make sure that if we have ephemeral uid/gid or xvattr specified 2384 * that file system is at proper version level 2385 */ 2386 2387 if (zfsvfs->z_use_fuids == B_FALSE && 2388 (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) || 2389 ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) || 2390 (mask & AT_XVATTR))) { 2391 ZFS_EXIT(zfsvfs); 2392 return (EINVAL); 2393 } 2394 2395 if (mask & AT_SIZE && vp->v_type == VDIR) { 2396 ZFS_EXIT(zfsvfs); 2397 return (EISDIR); 2398 } 2399 2400 if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) { 2401 ZFS_EXIT(zfsvfs); 2402 return (EINVAL); 2403 } 2404 2405 /* 2406 * If this is an xvattr_t, then get a pointer to the structure of 2407 * optional attributes. If this is NULL, then we have a vattr_t. 2408 */ 2409 xoap = xva_getxoptattr(xvap); 2410 2411 xva_init(&tmpxvattr); 2412 2413 /* 2414 * Immutable files can only alter immutable bit and atime 2415 */ 2416 if ((pzp->zp_flags & ZFS_IMMUTABLE) && 2417 ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) || 2418 ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) { 2419 ZFS_EXIT(zfsvfs); 2420 return (EPERM); 2421 } 2422 2423 if ((mask & AT_SIZE) && (pzp->zp_flags & ZFS_READONLY)) { 2424 ZFS_EXIT(zfsvfs); 2425 return (EPERM); 2426 } 2427 2428 /* 2429 * Verify timestamps doesn't overflow 32 bits. 2430 * ZFS can handle large timestamps, but 32bit syscalls can't 2431 * handle times greater than 2039. This check should be removed 2432 * once large timestamps are fully supported. 2433 */ 2434 if (mask & (AT_ATIME | AT_MTIME)) { 2435 if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) || 2436 ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) { 2437 ZFS_EXIT(zfsvfs); 2438 return (EOVERFLOW); 2439 } 2440 } 2441 2442 top: 2443 attrzp = NULL; 2444 2445 if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) { 2446 ZFS_EXIT(zfsvfs); 2447 return (EROFS); 2448 } 2449 2450 /* 2451 * First validate permissions 2452 */ 2453 2454 if (mask & AT_SIZE) { 2455 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr); 2456 if (err) { 2457 ZFS_EXIT(zfsvfs); 2458 return (err); 2459 } 2460 /* 2461 * XXX - Note, we are not providing any open 2462 * mode flags here (like FNDELAY), so we may 2463 * block if there are locks present... this 2464 * should be addressed in openat(). 2465 */ 2466 /* XXX - would it be OK to generate a log record here? */ 2467 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE); 2468 if (err) { 2469 ZFS_EXIT(zfsvfs); 2470 return (err); 2471 } 2472 } 2473 2474 if (mask & (AT_ATIME|AT_MTIME) || 2475 ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) || 2476 XVA_ISSET_REQ(xvap, XAT_READONLY) || 2477 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) || 2478 XVA_ISSET_REQ(xvap, XAT_CREATETIME) || 2479 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) 2480 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0, 2481 skipaclchk, cr); 2482 2483 if (mask & (AT_UID|AT_GID)) { 2484 int idmask = (mask & (AT_UID|AT_GID)); 2485 int take_owner; 2486 int take_group; 2487 2488 /* 2489 * NOTE: even if a new mode is being set, 2490 * we may clear S_ISUID/S_ISGID bits. 2491 */ 2492 2493 if (!(mask & AT_MODE)) 2494 vap->va_mode = pzp->zp_mode; 2495 2496 /* 2497 * Take ownership or chgrp to group we are a member of 2498 */ 2499 2500 take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr)); 2501 take_group = (mask & AT_GID) && 2502 zfs_groupmember(zfsvfs, vap->va_gid, cr); 2503 2504 /* 2505 * If both AT_UID and AT_GID are set then take_owner and 2506 * take_group must both be set in order to allow taking 2507 * ownership. 2508 * 2509 * Otherwise, send the check through secpolicy_vnode_setattr() 2510 * 2511 */ 2512 2513 if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) || 2514 ((idmask == AT_UID) && take_owner) || 2515 ((idmask == AT_GID) && take_group)) { 2516 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0, 2517 skipaclchk, cr) == 0) { 2518 /* 2519 * Remove setuid/setgid for non-privileged users 2520 */ 2521 secpolicy_setid_clear(vap, cr); 2522 trim_mask = (mask & (AT_UID|AT_GID)); 2523 } else { 2524 need_policy = TRUE; 2525 } 2526 } else { 2527 need_policy = TRUE; 2528 } 2529 } 2530 2531 mutex_enter(&zp->z_lock); 2532 oldva.va_mode = pzp->zp_mode; 2533 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid); 2534 if (mask & AT_XVATTR) { 2535 /* 2536 * Update xvattr mask to include only those attributes 2537 * that are actually changing. 2538 * 2539 * the bits will be restored prior to actually setting 2540 * the attributes so the caller thinks they were set. 2541 */ 2542 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) { 2543 if (xoap->xoa_appendonly != 2544 ((pzp->zp_flags & ZFS_APPENDONLY) != 0)) { 2545 need_policy = TRUE; 2546 } else { 2547 XVA_CLR_REQ(xvap, XAT_APPENDONLY); 2548 XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY); 2549 } 2550 } 2551 2552 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) { 2553 if (xoap->xoa_nounlink != 2554 ((pzp->zp_flags & ZFS_NOUNLINK) != 0)) { 2555 need_policy = TRUE; 2556 } else { 2557 XVA_CLR_REQ(xvap, XAT_NOUNLINK); 2558 XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK); 2559 } 2560 } 2561 2562 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) { 2563 if (xoap->xoa_immutable != 2564 ((pzp->zp_flags & ZFS_IMMUTABLE) != 0)) { 2565 need_policy = TRUE; 2566 } else { 2567 XVA_CLR_REQ(xvap, XAT_IMMUTABLE); 2568 XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE); 2569 } 2570 } 2571 2572 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) { 2573 if (xoap->xoa_nodump != 2574 ((pzp->zp_flags & ZFS_NODUMP) != 0)) { 2575 need_policy = TRUE; 2576 } else { 2577 XVA_CLR_REQ(xvap, XAT_NODUMP); 2578 XVA_SET_REQ(&tmpxvattr, XAT_NODUMP); 2579 } 2580 } 2581 2582 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) { 2583 if (xoap->xoa_av_modified != 2584 ((pzp->zp_flags & ZFS_AV_MODIFIED) != 0)) { 2585 need_policy = TRUE; 2586 } else { 2587 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED); 2588 XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED); 2589 } 2590 } 2591 2592 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) { 2593 if ((vp->v_type != VREG && 2594 xoap->xoa_av_quarantined) || 2595 xoap->xoa_av_quarantined != 2596 ((pzp->zp_flags & ZFS_AV_QUARANTINED) != 0)) { 2597 need_policy = TRUE; 2598 } else { 2599 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED); 2600 XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED); 2601 } 2602 } 2603 2604 if (need_policy == FALSE && 2605 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) || 2606 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) { 2607 need_policy = TRUE; 2608 } 2609 } 2610 2611 mutex_exit(&zp->z_lock); 2612 2613 if (mask & AT_MODE) { 2614 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) { 2615 err = secpolicy_setid_setsticky_clear(vp, vap, 2616 &oldva, cr); 2617 if (err) { 2618 ZFS_EXIT(zfsvfs); 2619 return (err); 2620 } 2621 trim_mask |= AT_MODE; 2622 } else { 2623 need_policy = TRUE; 2624 } 2625 } 2626 2627 if (need_policy) { 2628 /* 2629 * If trim_mask is set then take ownership 2630 * has been granted or write_acl is present and user 2631 * has the ability to modify mode. In that case remove 2632 * UID|GID and or MODE from mask so that 2633 * secpolicy_vnode_setattr() doesn't revoke it. 2634 */ 2635 2636 if (trim_mask) { 2637 saved_mask = vap->va_mask; 2638 vap->va_mask &= ~trim_mask; 2639 } 2640 err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags, 2641 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp); 2642 if (err) { 2643 ZFS_EXIT(zfsvfs); 2644 return (err); 2645 } 2646 2647 if (trim_mask) 2648 vap->va_mask |= saved_mask; 2649 } 2650 2651 /* 2652 * secpolicy_vnode_setattr, or take ownership may have 2653 * changed va_mask 2654 */ 2655 mask = vap->va_mask; 2656 2657 tx = dmu_tx_create(zfsvfs->z_os); 2658 dmu_tx_hold_bonus(tx, zp->z_id); 2659 2660 if (mask & AT_MODE) { 2661 uint64_t pmode = pzp->zp_mode; 2662 2663 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT); 2664 2665 if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)) 2666 goto out; 2667 if (pzp->zp_acl.z_acl_extern_obj) { 2668 /* Are we upgrading ACL from old V0 format to new V1 */ 2669 if (zfsvfs->z_version <= ZPL_VERSION_FUID && 2670 pzp->zp_acl.z_acl_version == 2671 ZFS_ACL_VERSION_INITIAL) { 2672 dmu_tx_hold_free(tx, 2673 pzp->zp_acl.z_acl_extern_obj, 0, 2674 DMU_OBJECT_END); 2675 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 2676 0, aclp->z_acl_bytes); 2677 } else { 2678 dmu_tx_hold_write(tx, 2679 pzp->zp_acl.z_acl_extern_obj, 0, 2680 aclp->z_acl_bytes); 2681 } 2682 } else if (aclp->z_acl_bytes > ZFS_ACE_SPACE) { 2683 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 2684 0, aclp->z_acl_bytes); 2685 } 2686 } 2687 2688 if (mask & (AT_UID | AT_GID)) { 2689 if (pzp->zp_xattr) { 2690 err = zfs_zget(zp->z_zfsvfs, pzp->zp_xattr, &attrzp); 2691 if (err) 2692 goto out; 2693 dmu_tx_hold_bonus(tx, attrzp->z_id); 2694 } 2695 if (mask & AT_UID) { 2696 new_uid = zfs_fuid_create(zfsvfs, 2697 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp); 2698 if (new_uid != pzp->zp_uid && 2699 zfs_usergroup_overquota(zfsvfs, B_FALSE, new_uid)) { 2700 err = EDQUOT; 2701 goto out; 2702 } 2703 } 2704 2705 if (mask & AT_GID) { 2706 new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid, 2707 cr, ZFS_GROUP, &fuidp); 2708 if (new_gid != pzp->zp_gid && 2709 zfs_usergroup_overquota(zfsvfs, B_TRUE, new_gid)) { 2710 err = EDQUOT; 2711 goto out; 2712 } 2713 } 2714 fuid_dirtied = zfsvfs->z_fuid_dirty; 2715 if (fuid_dirtied) { 2716 if (zfsvfs->z_fuid_obj == 0) { 2717 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 2718 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 2719 FUID_SIZE_ESTIMATE(zfsvfs)); 2720 dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, 2721 FALSE, NULL); 2722 } else { 2723 dmu_tx_hold_bonus(tx, zfsvfs->z_fuid_obj); 2724 dmu_tx_hold_write(tx, zfsvfs->z_fuid_obj, 0, 2725 FUID_SIZE_ESTIMATE(zfsvfs)); 2726 } 2727 } 2728 } 2729 2730 err = dmu_tx_assign(tx, TXG_NOWAIT); 2731 if (err) { 2732 if (err == ERESTART) 2733 dmu_tx_wait(tx); 2734 goto out; 2735 } 2736 2737 dmu_buf_will_dirty(zp->z_dbuf, tx); 2738 2739 /* 2740 * Set each attribute requested. 2741 * We group settings according to the locks they need to acquire. 2742 * 2743 * Note: you cannot set ctime directly, although it will be 2744 * updated as a side-effect of calling this function. 2745 */ 2746 2747 mutex_enter(&zp->z_lock); 2748 2749 if (mask & AT_MODE) { 2750 mutex_enter(&zp->z_acl_lock); 2751 zp->z_phys->zp_mode = new_mode; 2752 err = zfs_aclset_common(zp, aclp, cr, tx); 2753 ASSERT3U(err, ==, 0); 2754 mutex_exit(&zp->z_acl_lock); 2755 } 2756 2757 if (attrzp) 2758 mutex_enter(&attrzp->z_lock); 2759 2760 if (mask & AT_UID) { 2761 pzp->zp_uid = new_uid; 2762 if (attrzp) 2763 attrzp->z_phys->zp_uid = new_uid; 2764 } 2765 2766 if (mask & AT_GID) { 2767 pzp->zp_gid = new_gid; 2768 if (attrzp) 2769 attrzp->z_phys->zp_gid = new_gid; 2770 } 2771 2772 if (attrzp) 2773 mutex_exit(&attrzp->z_lock); 2774 2775 if (mask & AT_ATIME) 2776 ZFS_TIME_ENCODE(&vap->va_atime, pzp->zp_atime); 2777 2778 if (mask & AT_MTIME) 2779 ZFS_TIME_ENCODE(&vap->va_mtime, pzp->zp_mtime); 2780 2781 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */ 2782 if (mask & AT_SIZE) 2783 zfs_time_stamper_locked(zp, CONTENT_MODIFIED, tx); 2784 else if (mask != 0) 2785 zfs_time_stamper_locked(zp, STATE_CHANGED, tx); 2786 /* 2787 * Do this after setting timestamps to prevent timestamp 2788 * update from toggling bit 2789 */ 2790 2791 if (xoap && (mask & AT_XVATTR)) { 2792 2793 /* 2794 * restore trimmed off masks 2795 * so that return masks can be set for caller. 2796 */ 2797 2798 if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) { 2799 XVA_SET_REQ(xvap, XAT_APPENDONLY); 2800 } 2801 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) { 2802 XVA_SET_REQ(xvap, XAT_NOUNLINK); 2803 } 2804 if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) { 2805 XVA_SET_REQ(xvap, XAT_IMMUTABLE); 2806 } 2807 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) { 2808 XVA_SET_REQ(xvap, XAT_NODUMP); 2809 } 2810 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) { 2811 XVA_SET_REQ(xvap, XAT_AV_MODIFIED); 2812 } 2813 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) { 2814 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED); 2815 } 2816 2817 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) { 2818 size_t len; 2819 dmu_object_info_t doi; 2820 2821 ASSERT(vp->v_type == VREG); 2822 2823 /* Grow the bonus buffer if necessary. */ 2824 dmu_object_info_from_db(zp->z_dbuf, &doi); 2825 len = sizeof (xoap->xoa_av_scanstamp) + 2826 sizeof (znode_phys_t); 2827 if (len > doi.doi_bonus_size) 2828 VERIFY(dmu_set_bonus(zp->z_dbuf, len, tx) == 0); 2829 } 2830 zfs_xvattr_set(zp, xvap); 2831 } 2832 2833 if (fuid_dirtied) 2834 zfs_fuid_sync(zfsvfs, tx); 2835 2836 if (mask != 0) 2837 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp); 2838 2839 mutex_exit(&zp->z_lock); 2840 2841 out: 2842 if (attrzp) 2843 VN_RELE(ZTOV(attrzp)); 2844 2845 if (aclp) { 2846 zfs_acl_free(aclp); 2847 aclp = NULL; 2848 } 2849 2850 if (fuidp) { 2851 zfs_fuid_info_free(fuidp); 2852 fuidp = NULL; 2853 } 2854 2855 if (err) 2856 dmu_tx_abort(tx); 2857 else 2858 dmu_tx_commit(tx); 2859 2860 if (err == ERESTART) 2861 goto top; 2862 2863 ZFS_EXIT(zfsvfs); 2864 return (err); 2865 } 2866 2867 typedef struct zfs_zlock { 2868 krwlock_t *zl_rwlock; /* lock we acquired */ 2869 znode_t *zl_znode; /* znode we held */ 2870 struct zfs_zlock *zl_next; /* next in list */ 2871 } zfs_zlock_t; 2872 2873 /* 2874 * Drop locks and release vnodes that were held by zfs_rename_lock(). 2875 */ 2876 static void 2877 zfs_rename_unlock(zfs_zlock_t **zlpp) 2878 { 2879 zfs_zlock_t *zl; 2880 2881 while ((zl = *zlpp) != NULL) { 2882 if (zl->zl_znode != NULL) 2883 VN_RELE(ZTOV(zl->zl_znode)); 2884 rw_exit(zl->zl_rwlock); 2885 *zlpp = zl->zl_next; 2886 kmem_free(zl, sizeof (*zl)); 2887 } 2888 } 2889 2890 /* 2891 * Search back through the directory tree, using the ".." entries. 2892 * Lock each directory in the chain to prevent concurrent renames. 2893 * Fail any attempt to move a directory into one of its own descendants. 2894 * XXX - z_parent_lock can overlap with map or grow locks 2895 */ 2896 static int 2897 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp) 2898 { 2899 zfs_zlock_t *zl; 2900 znode_t *zp = tdzp; 2901 uint64_t rootid = zp->z_zfsvfs->z_root; 2902 uint64_t *oidp = &zp->z_id; 2903 krwlock_t *rwlp = &szp->z_parent_lock; 2904 krw_t rw = RW_WRITER; 2905 2906 /* 2907 * First pass write-locks szp and compares to zp->z_id. 2908 * Later passes read-lock zp and compare to zp->z_parent. 2909 */ 2910 do { 2911 if (!rw_tryenter(rwlp, rw)) { 2912 /* 2913 * Another thread is renaming in this path. 2914 * Note that if we are a WRITER, we don't have any 2915 * parent_locks held yet. 2916 */ 2917 if (rw == RW_READER && zp->z_id > szp->z_id) { 2918 /* 2919 * Drop our locks and restart 2920 */ 2921 zfs_rename_unlock(&zl); 2922 *zlpp = NULL; 2923 zp = tdzp; 2924 oidp = &zp->z_id; 2925 rwlp = &szp->z_parent_lock; 2926 rw = RW_WRITER; 2927 continue; 2928 } else { 2929 /* 2930 * Wait for other thread to drop its locks 2931 */ 2932 rw_enter(rwlp, rw); 2933 } 2934 } 2935 2936 zl = kmem_alloc(sizeof (*zl), KM_SLEEP); 2937 zl->zl_rwlock = rwlp; 2938 zl->zl_znode = NULL; 2939 zl->zl_next = *zlpp; 2940 *zlpp = zl; 2941 2942 if (*oidp == szp->z_id) /* We're a descendant of szp */ 2943 return (EINVAL); 2944 2945 if (*oidp == rootid) /* We've hit the top */ 2946 return (0); 2947 2948 if (rw == RW_READER) { /* i.e. not the first pass */ 2949 int error = zfs_zget(zp->z_zfsvfs, *oidp, &zp); 2950 if (error) 2951 return (error); 2952 zl->zl_znode = zp; 2953 } 2954 oidp = &zp->z_phys->zp_parent; 2955 rwlp = &zp->z_parent_lock; 2956 rw = RW_READER; 2957 2958 } while (zp->z_id != sdzp->z_id); 2959 2960 return (0); 2961 } 2962 2963 /* 2964 * Move an entry from the provided source directory to the target 2965 * directory. Change the entry name as indicated. 2966 * 2967 * IN: sdvp - Source directory containing the "old entry". 2968 * snm - Old entry name. 2969 * tdvp - Target directory to contain the "new entry". 2970 * tnm - New entry name. 2971 * cr - credentials of caller. 2972 * ct - caller context 2973 * flags - case flags 2974 * 2975 * RETURN: 0 if success 2976 * error code if failure 2977 * 2978 * Timestamps: 2979 * sdvp,tdvp - ctime|mtime updated 2980 */ 2981 /*ARGSUSED*/ 2982 static int 2983 zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr, 2984 caller_context_t *ct, int flags) 2985 { 2986 znode_t *tdzp, *szp, *tzp; 2987 znode_t *sdzp = VTOZ(sdvp); 2988 zfsvfs_t *zfsvfs = sdzp->z_zfsvfs; 2989 zilog_t *zilog; 2990 vnode_t *realvp; 2991 zfs_dirlock_t *sdl, *tdl; 2992 dmu_tx_t *tx; 2993 zfs_zlock_t *zl; 2994 int cmp, serr, terr; 2995 int error = 0; 2996 int zflg = 0; 2997 2998 ZFS_ENTER(zfsvfs); 2999 ZFS_VERIFY_ZP(sdzp); 3000 zilog = zfsvfs->z_log; 3001 3002 /* 3003 * Make sure we have the real vp for the target directory. 3004 */ 3005 if (VOP_REALVP(tdvp, &realvp, ct) == 0) 3006 tdvp = realvp; 3007 3008 if (tdvp->v_vfsp != sdvp->v_vfsp) { 3009 ZFS_EXIT(zfsvfs); 3010 return (EXDEV); 3011 } 3012 3013 tdzp = VTOZ(tdvp); 3014 ZFS_VERIFY_ZP(tdzp); 3015 if (zfsvfs->z_utf8 && u8_validate(tnm, 3016 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 3017 ZFS_EXIT(zfsvfs); 3018 return (EILSEQ); 3019 } 3020 3021 if (flags & FIGNORECASE) 3022 zflg |= ZCILOOK; 3023 3024 top: 3025 szp = NULL; 3026 tzp = NULL; 3027 zl = NULL; 3028 3029 /* 3030 * This is to prevent the creation of links into attribute space 3031 * by renaming a linked file into/outof an attribute directory. 3032 * See the comment in zfs_link() for why this is considered bad. 3033 */ 3034 if ((tdzp->z_phys->zp_flags & ZFS_XATTR) != 3035 (sdzp->z_phys->zp_flags & ZFS_XATTR)) { 3036 ZFS_EXIT(zfsvfs); 3037 return (EINVAL); 3038 } 3039 3040 /* 3041 * Lock source and target directory entries. To prevent deadlock, 3042 * a lock ordering must be defined. We lock the directory with 3043 * the smallest object id first, or if it's a tie, the one with 3044 * the lexically first name. 3045 */ 3046 if (sdzp->z_id < tdzp->z_id) { 3047 cmp = -1; 3048 } else if (sdzp->z_id > tdzp->z_id) { 3049 cmp = 1; 3050 } else { 3051 /* 3052 * First compare the two name arguments without 3053 * considering any case folding. 3054 */ 3055 int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER); 3056 3057 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error); 3058 ASSERT(error == 0 || !zfsvfs->z_utf8); 3059 if (cmp == 0) { 3060 /* 3061 * POSIX: "If the old argument and the new argument 3062 * both refer to links to the same existing file, 3063 * the rename() function shall return successfully 3064 * and perform no other action." 3065 */ 3066 ZFS_EXIT(zfsvfs); 3067 return (0); 3068 } 3069 /* 3070 * If the file system is case-folding, then we may 3071 * have some more checking to do. A case-folding file 3072 * system is either supporting mixed case sensitivity 3073 * access or is completely case-insensitive. Note 3074 * that the file system is always case preserving. 3075 * 3076 * In mixed sensitivity mode case sensitive behavior 3077 * is the default. FIGNORECASE must be used to 3078 * explicitly request case insensitive behavior. 3079 * 3080 * If the source and target names provided differ only 3081 * by case (e.g., a request to rename 'tim' to 'Tim'), 3082 * we will treat this as a special case in the 3083 * case-insensitive mode: as long as the source name 3084 * is an exact match, we will allow this to proceed as 3085 * a name-change request. 3086 */ 3087 if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE || 3088 (zfsvfs->z_case == ZFS_CASE_MIXED && 3089 flags & FIGNORECASE)) && 3090 u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST, 3091 &error) == 0) { 3092 /* 3093 * case preserving rename request, require exact 3094 * name matches 3095 */ 3096 zflg |= ZCIEXACT; 3097 zflg &= ~ZCILOOK; 3098 } 3099 } 3100 3101 if (cmp < 0) { 3102 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp, 3103 ZEXISTS | zflg, NULL, NULL); 3104 terr = zfs_dirent_lock(&tdl, 3105 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL); 3106 } else { 3107 terr = zfs_dirent_lock(&tdl, 3108 tdzp, tnm, &tzp, zflg, NULL, NULL); 3109 serr = zfs_dirent_lock(&sdl, 3110 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg, 3111 NULL, NULL); 3112 } 3113 3114 if (serr) { 3115 /* 3116 * Source entry invalid or not there. 3117 */ 3118 if (!terr) { 3119 zfs_dirent_unlock(tdl); 3120 if (tzp) 3121 VN_RELE(ZTOV(tzp)); 3122 } 3123 if (strcmp(snm, "..") == 0) 3124 serr = EINVAL; 3125 ZFS_EXIT(zfsvfs); 3126 return (serr); 3127 } 3128 if (terr) { 3129 zfs_dirent_unlock(sdl); 3130 VN_RELE(ZTOV(szp)); 3131 if (strcmp(tnm, "..") == 0) 3132 terr = EINVAL; 3133 ZFS_EXIT(zfsvfs); 3134 return (terr); 3135 } 3136 3137 /* 3138 * Must have write access at the source to remove the old entry 3139 * and write access at the target to create the new entry. 3140 * Note that if target and source are the same, this can be 3141 * done in a single check. 3142 */ 3143 3144 if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)) 3145 goto out; 3146 3147 if (ZTOV(szp)->v_type == VDIR) { 3148 /* 3149 * Check to make sure rename is valid. 3150 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d 3151 */ 3152 if (error = zfs_rename_lock(szp, tdzp, sdzp, &zl)) 3153 goto out; 3154 } 3155 3156 /* 3157 * Does target exist? 3158 */ 3159 if (tzp) { 3160 /* 3161 * Source and target must be the same type. 3162 */ 3163 if (ZTOV(szp)->v_type == VDIR) { 3164 if (ZTOV(tzp)->v_type != VDIR) { 3165 error = ENOTDIR; 3166 goto out; 3167 } 3168 } else { 3169 if (ZTOV(tzp)->v_type == VDIR) { 3170 error = EISDIR; 3171 goto out; 3172 } 3173 } 3174 /* 3175 * POSIX dictates that when the source and target 3176 * entries refer to the same file object, rename 3177 * must do nothing and exit without error. 3178 */ 3179 if (szp->z_id == tzp->z_id) { 3180 error = 0; 3181 goto out; 3182 } 3183 } 3184 3185 vnevent_rename_src(ZTOV(szp), sdvp, snm, ct); 3186 if (tzp) 3187 vnevent_rename_dest(ZTOV(tzp), tdvp, tnm, ct); 3188 3189 /* 3190 * notify the target directory if it is not the same 3191 * as source directory. 3192 */ 3193 if (tdvp != sdvp) { 3194 vnevent_rename_dest_dir(tdvp, ct); 3195 } 3196 3197 tx = dmu_tx_create(zfsvfs->z_os); 3198 dmu_tx_hold_bonus(tx, szp->z_id); /* nlink changes */ 3199 dmu_tx_hold_bonus(tx, sdzp->z_id); /* nlink changes */ 3200 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm); 3201 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm); 3202 if (sdzp != tdzp) 3203 dmu_tx_hold_bonus(tx, tdzp->z_id); /* nlink changes */ 3204 if (tzp) 3205 dmu_tx_hold_bonus(tx, tzp->z_id); /* parent changes */ 3206 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 3207 error = dmu_tx_assign(tx, TXG_NOWAIT); 3208 if (error) { 3209 if (zl != NULL) 3210 zfs_rename_unlock(&zl); 3211 zfs_dirent_unlock(sdl); 3212 zfs_dirent_unlock(tdl); 3213 VN_RELE(ZTOV(szp)); 3214 if (tzp) 3215 VN_RELE(ZTOV(tzp)); 3216 if (error == ERESTART) { 3217 dmu_tx_wait(tx); 3218 dmu_tx_abort(tx); 3219 goto top; 3220 } 3221 dmu_tx_abort(tx); 3222 ZFS_EXIT(zfsvfs); 3223 return (error); 3224 } 3225 3226 if (tzp) /* Attempt to remove the existing target */ 3227 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL); 3228 3229 if (error == 0) { 3230 error = zfs_link_create(tdl, szp, tx, ZRENAMING); 3231 if (error == 0) { 3232 szp->z_phys->zp_flags |= ZFS_AV_MODIFIED; 3233 3234 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL); 3235 ASSERT(error == 0); 3236 3237 zfs_log_rename(zilog, tx, 3238 TX_RENAME | (flags & FIGNORECASE ? TX_CI : 0), 3239 sdzp, sdl->dl_name, tdzp, tdl->dl_name, szp); 3240 3241 /* Update path information for the target vnode */ 3242 vn_renamepath(tdvp, ZTOV(szp), tnm, strlen(tnm)); 3243 } 3244 } 3245 3246 dmu_tx_commit(tx); 3247 out: 3248 if (zl != NULL) 3249 zfs_rename_unlock(&zl); 3250 3251 zfs_dirent_unlock(sdl); 3252 zfs_dirent_unlock(tdl); 3253 3254 VN_RELE(ZTOV(szp)); 3255 if (tzp) 3256 VN_RELE(ZTOV(tzp)); 3257 3258 ZFS_EXIT(zfsvfs); 3259 return (error); 3260 } 3261 3262 /* 3263 * Insert the indicated symbolic reference entry into the directory. 3264 * 3265 * IN: dvp - Directory to contain new symbolic link. 3266 * link - Name for new symlink entry. 3267 * vap - Attributes of new entry. 3268 * target - Target path of new symlink. 3269 * cr - credentials of caller. 3270 * ct - caller context 3271 * flags - case flags 3272 * 3273 * RETURN: 0 if success 3274 * error code if failure 3275 * 3276 * Timestamps: 3277 * dvp - ctime|mtime updated 3278 */ 3279 /*ARGSUSED*/ 3280 static int 3281 zfs_symlink(vnode_t *dvp, char *name, vattr_t *vap, char *link, cred_t *cr, 3282 caller_context_t *ct, int flags) 3283 { 3284 znode_t *zp, *dzp = VTOZ(dvp); 3285 zfs_dirlock_t *dl; 3286 dmu_tx_t *tx; 3287 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 3288 zilog_t *zilog; 3289 int len = strlen(link); 3290 int error; 3291 int zflg = ZNEW; 3292 zfs_acl_ids_t acl_ids; 3293 boolean_t fuid_dirtied; 3294 3295 ASSERT(vap->va_type == VLNK); 3296 3297 ZFS_ENTER(zfsvfs); 3298 ZFS_VERIFY_ZP(dzp); 3299 zilog = zfsvfs->z_log; 3300 3301 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), 3302 NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 3303 ZFS_EXIT(zfsvfs); 3304 return (EILSEQ); 3305 } 3306 if (flags & FIGNORECASE) 3307 zflg |= ZCILOOK; 3308 top: 3309 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { 3310 ZFS_EXIT(zfsvfs); 3311 return (error); 3312 } 3313 3314 if (len > MAXPATHLEN) { 3315 ZFS_EXIT(zfsvfs); 3316 return (ENAMETOOLONG); 3317 } 3318 3319 /* 3320 * Attempt to lock directory; fail if entry already exists. 3321 */ 3322 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL); 3323 if (error) { 3324 ZFS_EXIT(zfsvfs); 3325 return (error); 3326 } 3327 3328 VERIFY(0 == zfs_acl_ids_create(dzp, 0, vap, cr, NULL, &acl_ids)); 3329 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { 3330 zfs_acl_ids_free(&acl_ids); 3331 zfs_dirent_unlock(dl); 3332 ZFS_EXIT(zfsvfs); 3333 return (EDQUOT); 3334 } 3335 tx = dmu_tx_create(zfsvfs->z_os); 3336 fuid_dirtied = zfsvfs->z_fuid_dirty; 3337 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len)); 3338 dmu_tx_hold_bonus(tx, dzp->z_id); 3339 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); 3340 if (acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) 3341 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, SPA_MAXBLOCKSIZE); 3342 if (fuid_dirtied) 3343 zfs_fuid_txhold(zfsvfs, tx); 3344 error = dmu_tx_assign(tx, TXG_NOWAIT); 3345 if (error) { 3346 zfs_acl_ids_free(&acl_ids); 3347 zfs_dirent_unlock(dl); 3348 if (error == ERESTART) { 3349 dmu_tx_wait(tx); 3350 dmu_tx_abort(tx); 3351 goto top; 3352 } 3353 dmu_tx_abort(tx); 3354 ZFS_EXIT(zfsvfs); 3355 return (error); 3356 } 3357 3358 dmu_buf_will_dirty(dzp->z_dbuf, tx); 3359 3360 /* 3361 * Create a new object for the symlink. 3362 * Put the link content into bonus buffer if it will fit; 3363 * otherwise, store it just like any other file data. 3364 */ 3365 if (sizeof (znode_phys_t) + len <= dmu_bonus_max()) { 3366 zfs_mknode(dzp, vap, tx, cr, 0, &zp, len, &acl_ids); 3367 if (len != 0) 3368 bcopy(link, zp->z_phys + 1, len); 3369 } else { 3370 dmu_buf_t *dbp; 3371 3372 zfs_mknode(dzp, vap, tx, cr, 0, &zp, 0, &acl_ids); 3373 3374 if (fuid_dirtied) 3375 zfs_fuid_sync(zfsvfs, tx); 3376 /* 3377 * Nothing can access the znode yet so no locking needed 3378 * for growing the znode's blocksize. 3379 */ 3380 zfs_grow_blocksize(zp, len, tx); 3381 3382 VERIFY(0 == dmu_buf_hold(zfsvfs->z_os, 3383 zp->z_id, 0, FTAG, &dbp)); 3384 dmu_buf_will_dirty(dbp, tx); 3385 3386 ASSERT3U(len, <=, dbp->db_size); 3387 bcopy(link, dbp->db_data, len); 3388 dmu_buf_rele(dbp, FTAG); 3389 } 3390 zp->z_phys->zp_size = len; 3391 3392 /* 3393 * Insert the new object into the directory. 3394 */ 3395 (void) zfs_link_create(dl, zp, tx, ZNEW); 3396 if (error == 0) { 3397 uint64_t txtype = TX_SYMLINK; 3398 if (flags & FIGNORECASE) 3399 txtype |= TX_CI; 3400 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link); 3401 } 3402 3403 zfs_acl_ids_free(&acl_ids); 3404 3405 dmu_tx_commit(tx); 3406 3407 zfs_dirent_unlock(dl); 3408 3409 VN_RELE(ZTOV(zp)); 3410 3411 ZFS_EXIT(zfsvfs); 3412 return (error); 3413 } 3414 3415 /* 3416 * Return, in the buffer contained in the provided uio structure, 3417 * the symbolic path referred to by vp. 3418 * 3419 * IN: vp - vnode of symbolic link. 3420 * uoip - structure to contain the link path. 3421 * cr - credentials of caller. 3422 * ct - caller context 3423 * 3424 * OUT: uio - structure to contain the link path. 3425 * 3426 * RETURN: 0 if success 3427 * error code if failure 3428 * 3429 * Timestamps: 3430 * vp - atime updated 3431 */ 3432 /* ARGSUSED */ 3433 static int 3434 zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct) 3435 { 3436 znode_t *zp = VTOZ(vp); 3437 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 3438 size_t bufsz; 3439 int error; 3440 3441 ZFS_ENTER(zfsvfs); 3442 ZFS_VERIFY_ZP(zp); 3443 3444 bufsz = (size_t)zp->z_phys->zp_size; 3445 if (bufsz + sizeof (znode_phys_t) <= zp->z_dbuf->db_size) { 3446 error = uiomove(zp->z_phys + 1, 3447 MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio); 3448 } else { 3449 dmu_buf_t *dbp; 3450 error = dmu_buf_hold(zfsvfs->z_os, zp->z_id, 0, FTAG, &dbp); 3451 if (error) { 3452 ZFS_EXIT(zfsvfs); 3453 return (error); 3454 } 3455 error = uiomove(dbp->db_data, 3456 MIN((size_t)bufsz, uio->uio_resid), UIO_READ, uio); 3457 dmu_buf_rele(dbp, FTAG); 3458 } 3459 3460 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 3461 ZFS_EXIT(zfsvfs); 3462 return (error); 3463 } 3464 3465 /* 3466 * Insert a new entry into directory tdvp referencing svp. 3467 * 3468 * IN: tdvp - Directory to contain new entry. 3469 * svp - vnode of new entry. 3470 * name - name of new entry. 3471 * cr - credentials of caller. 3472 * ct - caller context 3473 * 3474 * RETURN: 0 if success 3475 * error code if failure 3476 * 3477 * Timestamps: 3478 * tdvp - ctime|mtime updated 3479 * svp - ctime updated 3480 */ 3481 /* ARGSUSED */ 3482 static int 3483 zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr, 3484 caller_context_t *ct, int flags) 3485 { 3486 znode_t *dzp = VTOZ(tdvp); 3487 znode_t *tzp, *szp; 3488 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 3489 zilog_t *zilog; 3490 zfs_dirlock_t *dl; 3491 dmu_tx_t *tx; 3492 vnode_t *realvp; 3493 int error; 3494 int zf = ZNEW; 3495 uid_t owner; 3496 3497 ASSERT(tdvp->v_type == VDIR); 3498 3499 ZFS_ENTER(zfsvfs); 3500 ZFS_VERIFY_ZP(dzp); 3501 zilog = zfsvfs->z_log; 3502 3503 if (VOP_REALVP(svp, &realvp, ct) == 0) 3504 svp = realvp; 3505 3506 if (svp->v_vfsp != tdvp->v_vfsp) { 3507 ZFS_EXIT(zfsvfs); 3508 return (EXDEV); 3509 } 3510 szp = VTOZ(svp); 3511 ZFS_VERIFY_ZP(szp); 3512 3513 if (zfsvfs->z_utf8 && u8_validate(name, 3514 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 3515 ZFS_EXIT(zfsvfs); 3516 return (EILSEQ); 3517 } 3518 if (flags & FIGNORECASE) 3519 zf |= ZCILOOK; 3520 3521 top: 3522 /* 3523 * We do not support links between attributes and non-attributes 3524 * because of the potential security risk of creating links 3525 * into "normal" file space in order to circumvent restrictions 3526 * imposed in attribute space. 3527 */ 3528 if ((szp->z_phys->zp_flags & ZFS_XATTR) != 3529 (dzp->z_phys->zp_flags & ZFS_XATTR)) { 3530 ZFS_EXIT(zfsvfs); 3531 return (EINVAL); 3532 } 3533 3534 /* 3535 * POSIX dictates that we return EPERM here. 3536 * Better choices include ENOTSUP or EISDIR. 3537 */ 3538 if (svp->v_type == VDIR) { 3539 ZFS_EXIT(zfsvfs); 3540 return (EPERM); 3541 } 3542 3543 owner = zfs_fuid_map_id(zfsvfs, szp->z_phys->zp_uid, cr, ZFS_OWNER); 3544 if (owner != crgetuid(cr) && 3545 secpolicy_basic_link(cr) != 0) { 3546 ZFS_EXIT(zfsvfs); 3547 return (EPERM); 3548 } 3549 3550 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { 3551 ZFS_EXIT(zfsvfs); 3552 return (error); 3553 } 3554 3555 /* 3556 * Attempt to lock directory; fail if entry already exists. 3557 */ 3558 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL); 3559 if (error) { 3560 ZFS_EXIT(zfsvfs); 3561 return (error); 3562 } 3563 3564 tx = dmu_tx_create(zfsvfs->z_os); 3565 dmu_tx_hold_bonus(tx, szp->z_id); 3566 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); 3567 error = dmu_tx_assign(tx, TXG_NOWAIT); 3568 if (error) { 3569 zfs_dirent_unlock(dl); 3570 if (error == ERESTART) { 3571 dmu_tx_wait(tx); 3572 dmu_tx_abort(tx); 3573 goto top; 3574 } 3575 dmu_tx_abort(tx); 3576 ZFS_EXIT(zfsvfs); 3577 return (error); 3578 } 3579 3580 error = zfs_link_create(dl, szp, tx, 0); 3581 3582 if (error == 0) { 3583 uint64_t txtype = TX_LINK; 3584 if (flags & FIGNORECASE) 3585 txtype |= TX_CI; 3586 zfs_log_link(zilog, tx, txtype, dzp, szp, name); 3587 } 3588 3589 dmu_tx_commit(tx); 3590 3591 zfs_dirent_unlock(dl); 3592 3593 if (error == 0) { 3594 vnevent_link(svp, ct); 3595 } 3596 3597 ZFS_EXIT(zfsvfs); 3598 return (error); 3599 } 3600 3601 /* 3602 * zfs_null_putapage() is used when the file system has been force 3603 * unmounted. It just drops the pages. 3604 */ 3605 /* ARGSUSED */ 3606 static int 3607 zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, 3608 size_t *lenp, int flags, cred_t *cr) 3609 { 3610 pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR); 3611 return (0); 3612 } 3613 3614 /* 3615 * Push a page out to disk, klustering if possible. 3616 * 3617 * IN: vp - file to push page to. 3618 * pp - page to push. 3619 * flags - additional flags. 3620 * cr - credentials of caller. 3621 * 3622 * OUT: offp - start of range pushed. 3623 * lenp - len of range pushed. 3624 * 3625 * RETURN: 0 if success 3626 * error code if failure 3627 * 3628 * NOTE: callers must have locked the page to be pushed. On 3629 * exit, the page (and all other pages in the kluster) must be 3630 * unlocked. 3631 */ 3632 /* ARGSUSED */ 3633 static int 3634 zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, 3635 size_t *lenp, int flags, cred_t *cr) 3636 { 3637 znode_t *zp = VTOZ(vp); 3638 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 3639 dmu_tx_t *tx; 3640 u_offset_t off, koff; 3641 size_t len, klen; 3642 uint64_t filesz; 3643 int err; 3644 3645 filesz = zp->z_phys->zp_size; 3646 off = pp->p_offset; 3647 len = PAGESIZE; 3648 /* 3649 * If our blocksize is bigger than the page size, try to kluster 3650 * multiple pages so that we write a full block (thus avoiding 3651 * a read-modify-write). 3652 */ 3653 if (off < filesz && zp->z_blksz > PAGESIZE) { 3654 klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE); 3655 koff = ISP2(klen) ? P2ALIGN(off, (u_offset_t)klen) : 0; 3656 ASSERT(koff <= filesz); 3657 if (koff + klen > filesz) 3658 klen = P2ROUNDUP(filesz - koff, (uint64_t)PAGESIZE); 3659 pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags); 3660 } 3661 ASSERT3U(btop(len), ==, btopr(len)); 3662 3663 /* 3664 * Can't push pages past end-of-file. 3665 */ 3666 if (off >= filesz) { 3667 /* ignore all pages */ 3668 err = 0; 3669 goto out; 3670 } else if (off + len > filesz) { 3671 int npages = btopr(filesz - off); 3672 page_t *trunc; 3673 3674 page_list_break(&pp, &trunc, npages); 3675 /* ignore pages past end of file */ 3676 if (trunc) 3677 pvn_write_done(trunc, flags); 3678 len = filesz - off; 3679 } 3680 3681 if (zfs_usergroup_overquota(zfsvfs, B_FALSE, zp->z_phys->zp_uid) || 3682 zfs_usergroup_overquota(zfsvfs, B_TRUE, zp->z_phys->zp_gid)) { 3683 err = EDQUOT; 3684 goto out; 3685 } 3686 top: 3687 tx = dmu_tx_create(zfsvfs->z_os); 3688 dmu_tx_hold_write(tx, zp->z_id, off, len); 3689 dmu_tx_hold_bonus(tx, zp->z_id); 3690 err = dmu_tx_assign(tx, TXG_NOWAIT); 3691 if (err != 0) { 3692 if (err == ERESTART) { 3693 dmu_tx_wait(tx); 3694 dmu_tx_abort(tx); 3695 goto top; 3696 } 3697 dmu_tx_abort(tx); 3698 goto out; 3699 } 3700 3701 if (zp->z_blksz <= PAGESIZE) { 3702 caddr_t va = zfs_map_page(pp, S_READ); 3703 ASSERT3U(len, <=, PAGESIZE); 3704 dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx); 3705 zfs_unmap_page(pp, va); 3706 } else { 3707 err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx); 3708 } 3709 3710 if (err == 0) { 3711 zfs_time_stamper(zp, CONTENT_MODIFIED, tx); 3712 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0); 3713 dmu_tx_commit(tx); 3714 } 3715 3716 out: 3717 pvn_write_done(pp, (err ? B_ERROR : 0) | flags); 3718 if (offp) 3719 *offp = off; 3720 if (lenp) 3721 *lenp = len; 3722 3723 return (err); 3724 } 3725 3726 /* 3727 * Copy the portion of the file indicated from pages into the file. 3728 * The pages are stored in a page list attached to the files vnode. 3729 * 3730 * IN: vp - vnode of file to push page data to. 3731 * off - position in file to put data. 3732 * len - amount of data to write. 3733 * flags - flags to control the operation. 3734 * cr - credentials of caller. 3735 * ct - caller context. 3736 * 3737 * RETURN: 0 if success 3738 * error code if failure 3739 * 3740 * Timestamps: 3741 * vp - ctime|mtime updated 3742 */ 3743 /*ARGSUSED*/ 3744 static int 3745 zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr, 3746 caller_context_t *ct) 3747 { 3748 znode_t *zp = VTOZ(vp); 3749 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 3750 page_t *pp; 3751 size_t io_len; 3752 u_offset_t io_off; 3753 uint_t blksz; 3754 rl_t *rl; 3755 int error = 0; 3756 3757 ZFS_ENTER(zfsvfs); 3758 ZFS_VERIFY_ZP(zp); 3759 3760 /* 3761 * Align this request to the file block size in case we kluster. 3762 * XXX - this can result in pretty aggresive locking, which can 3763 * impact simultanious read/write access. One option might be 3764 * to break up long requests (len == 0) into block-by-block 3765 * operations to get narrower locking. 3766 */ 3767 blksz = zp->z_blksz; 3768 if (ISP2(blksz)) 3769 io_off = P2ALIGN_TYPED(off, blksz, u_offset_t); 3770 else 3771 io_off = 0; 3772 if (len > 0 && ISP2(blksz)) 3773 io_len = P2ROUNDUP_TYPED(len + (off - io_off), blksz, size_t); 3774 else 3775 io_len = 0; 3776 3777 if (io_len == 0) { 3778 /* 3779 * Search the entire vp list for pages >= io_off. 3780 */ 3781 rl = zfs_range_lock(zp, io_off, UINT64_MAX, RL_WRITER); 3782 error = pvn_vplist_dirty(vp, io_off, zfs_putapage, flags, cr); 3783 goto out; 3784 } 3785 rl = zfs_range_lock(zp, io_off, io_len, RL_WRITER); 3786 3787 if (off > zp->z_phys->zp_size) { 3788 /* past end of file */ 3789 zfs_range_unlock(rl); 3790 ZFS_EXIT(zfsvfs); 3791 return (0); 3792 } 3793 3794 len = MIN(io_len, P2ROUNDUP(zp->z_phys->zp_size, PAGESIZE) - io_off); 3795 3796 for (off = io_off; io_off < off + len; io_off += io_len) { 3797 if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) { 3798 pp = page_lookup(vp, io_off, 3799 (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED); 3800 } else { 3801 pp = page_lookup_nowait(vp, io_off, 3802 (flags & B_FREE) ? SE_EXCL : SE_SHARED); 3803 } 3804 3805 if (pp != NULL && pvn_getdirty(pp, flags)) { 3806 int err; 3807 3808 /* 3809 * Found a dirty page to push 3810 */ 3811 err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr); 3812 if (err) 3813 error = err; 3814 } else { 3815 io_len = PAGESIZE; 3816 } 3817 } 3818 out: 3819 zfs_range_unlock(rl); 3820 if ((flags & B_ASYNC) == 0) 3821 zil_commit(zfsvfs->z_log, UINT64_MAX, zp->z_id); 3822 ZFS_EXIT(zfsvfs); 3823 return (error); 3824 } 3825 3826 /*ARGSUSED*/ 3827 void 3828 zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct) 3829 { 3830 znode_t *zp = VTOZ(vp); 3831 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 3832 int error; 3833 3834 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER); 3835 if (zp->z_dbuf == NULL) { 3836 /* 3837 * The fs has been unmounted, or we did a 3838 * suspend/resume and this file no longer exists. 3839 */ 3840 if (vn_has_cached_data(vp)) { 3841 (void) pvn_vplist_dirty(vp, 0, zfs_null_putapage, 3842 B_INVAL, cr); 3843 } 3844 3845 mutex_enter(&zp->z_lock); 3846 vp->v_count = 0; /* count arrives as 1 */ 3847 mutex_exit(&zp->z_lock); 3848 rw_exit(&zfsvfs->z_teardown_inactive_lock); 3849 zfs_znode_free(zp); 3850 return; 3851 } 3852 3853 /* 3854 * Attempt to push any data in the page cache. If this fails 3855 * we will get kicked out later in zfs_zinactive(). 3856 */ 3857 if (vn_has_cached_data(vp)) { 3858 (void) pvn_vplist_dirty(vp, 0, zfs_putapage, B_INVAL|B_ASYNC, 3859 cr); 3860 } 3861 3862 if (zp->z_atime_dirty && zp->z_unlinked == 0) { 3863 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os); 3864 3865 dmu_tx_hold_bonus(tx, zp->z_id); 3866 error = dmu_tx_assign(tx, TXG_WAIT); 3867 if (error) { 3868 dmu_tx_abort(tx); 3869 } else { 3870 dmu_buf_will_dirty(zp->z_dbuf, tx); 3871 mutex_enter(&zp->z_lock); 3872 zp->z_atime_dirty = 0; 3873 mutex_exit(&zp->z_lock); 3874 dmu_tx_commit(tx); 3875 } 3876 } 3877 3878 zfs_zinactive(zp); 3879 rw_exit(&zfsvfs->z_teardown_inactive_lock); 3880 } 3881 3882 /* 3883 * Bounds-check the seek operation. 3884 * 3885 * IN: vp - vnode seeking within 3886 * ooff - old file offset 3887 * noffp - pointer to new file offset 3888 * ct - caller context 3889 * 3890 * RETURN: 0 if success 3891 * EINVAL if new offset invalid 3892 */ 3893 /* ARGSUSED */ 3894 static int 3895 zfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, 3896 caller_context_t *ct) 3897 { 3898 if (vp->v_type == VDIR) 3899 return (0); 3900 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0); 3901 } 3902 3903 /* 3904 * Pre-filter the generic locking function to trap attempts to place 3905 * a mandatory lock on a memory mapped file. 3906 */ 3907 static int 3908 zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset, 3909 flk_callback_t *flk_cbp, cred_t *cr, caller_context_t *ct) 3910 { 3911 znode_t *zp = VTOZ(vp); 3912 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 3913 int error; 3914 3915 ZFS_ENTER(zfsvfs); 3916 ZFS_VERIFY_ZP(zp); 3917 3918 /* 3919 * We are following the UFS semantics with respect to mapcnt 3920 * here: If we see that the file is mapped already, then we will 3921 * return an error, but we don't worry about races between this 3922 * function and zfs_map(). 3923 */ 3924 if (zp->z_mapcnt > 0 && MANDMODE((mode_t)zp->z_phys->zp_mode)) { 3925 ZFS_EXIT(zfsvfs); 3926 return (EAGAIN); 3927 } 3928 error = fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct); 3929 ZFS_EXIT(zfsvfs); 3930 return (error); 3931 } 3932 3933 /* 3934 * If we can't find a page in the cache, we will create a new page 3935 * and fill it with file data. For efficiency, we may try to fill 3936 * multiple pages at once (klustering) to fill up the supplied page 3937 * list. Note that the pages to be filled are held with an exclusive 3938 * lock to prevent access by other threads while they are being filled. 3939 */ 3940 static int 3941 zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg, 3942 caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw) 3943 { 3944 znode_t *zp = VTOZ(vp); 3945 page_t *pp, *cur_pp; 3946 objset_t *os = zp->z_zfsvfs->z_os; 3947 u_offset_t io_off, total; 3948 size_t io_len; 3949 int err; 3950 3951 if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) { 3952 /* 3953 * We only have a single page, don't bother klustering 3954 */ 3955 io_off = off; 3956 io_len = PAGESIZE; 3957 pp = page_create_va(vp, io_off, io_len, 3958 PG_EXCL | PG_WAIT, seg, addr); 3959 } else { 3960 /* 3961 * Try to find enough pages to fill the page list 3962 */ 3963 pp = pvn_read_kluster(vp, off, seg, addr, &io_off, 3964 &io_len, off, plsz, 0); 3965 } 3966 if (pp == NULL) { 3967 /* 3968 * The page already exists, nothing to do here. 3969 */ 3970 *pl = NULL; 3971 return (0); 3972 } 3973 3974 /* 3975 * Fill the pages in the kluster. 3976 */ 3977 cur_pp = pp; 3978 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) { 3979 caddr_t va; 3980 3981 ASSERT3U(io_off, ==, cur_pp->p_offset); 3982 va = zfs_map_page(cur_pp, S_WRITE); 3983 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va, 3984 DMU_READ_PREFETCH); 3985 zfs_unmap_page(cur_pp, va); 3986 if (err) { 3987 /* On error, toss the entire kluster */ 3988 pvn_read_done(pp, B_ERROR); 3989 /* convert checksum errors into IO errors */ 3990 if (err == ECKSUM) 3991 err = EIO; 3992 return (err); 3993 } 3994 cur_pp = cur_pp->p_next; 3995 } 3996 3997 /* 3998 * Fill in the page list array from the kluster starting 3999 * from the desired offset `off'. 4000 * NOTE: the page list will always be null terminated. 4001 */ 4002 pvn_plist_init(pp, pl, plsz, off, io_len, rw); 4003 ASSERT(pl == NULL || (*pl)->p_offset == off); 4004 4005 return (0); 4006 } 4007 4008 /* 4009 * Return pointers to the pages for the file region [off, off + len] 4010 * in the pl array. If plsz is greater than len, this function may 4011 * also return page pointers from after the specified region 4012 * (i.e. the region [off, off + plsz]). These additional pages are 4013 * only returned if they are already in the cache, or were created as 4014 * part of a klustered read. 4015 * 4016 * IN: vp - vnode of file to get data from. 4017 * off - position in file to get data from. 4018 * len - amount of data to retrieve. 4019 * plsz - length of provided page list. 4020 * seg - segment to obtain pages for. 4021 * addr - virtual address of fault. 4022 * rw - mode of created pages. 4023 * cr - credentials of caller. 4024 * ct - caller context. 4025 * 4026 * OUT: protp - protection mode of created pages. 4027 * pl - list of pages created. 4028 * 4029 * RETURN: 0 if success 4030 * error code if failure 4031 * 4032 * Timestamps: 4033 * vp - atime updated 4034 */ 4035 /* ARGSUSED */ 4036 static int 4037 zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp, 4038 page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr, 4039 enum seg_rw rw, cred_t *cr, caller_context_t *ct) 4040 { 4041 znode_t *zp = VTOZ(vp); 4042 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4043 page_t **pl0 = pl; 4044 int err = 0; 4045 4046 /* we do our own caching, faultahead is unnecessary */ 4047 if (pl == NULL) 4048 return (0); 4049 else if (len > plsz) 4050 len = plsz; 4051 else 4052 len = P2ROUNDUP(len, PAGESIZE); 4053 ASSERT(plsz >= len); 4054 4055 ZFS_ENTER(zfsvfs); 4056 ZFS_VERIFY_ZP(zp); 4057 4058 if (protp) 4059 *protp = PROT_ALL; 4060 4061 /* 4062 * Loop through the requested range [off, off + len) looking 4063 * for pages. If we don't find a page, we will need to create 4064 * a new page and fill it with data from the file. 4065 */ 4066 while (len > 0) { 4067 if (*pl = page_lookup(vp, off, SE_SHARED)) 4068 *(pl+1) = NULL; 4069 else if (err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw)) 4070 goto out; 4071 while (*pl) { 4072 ASSERT3U((*pl)->p_offset, ==, off); 4073 off += PAGESIZE; 4074 addr += PAGESIZE; 4075 if (len > 0) { 4076 ASSERT3U(len, >=, PAGESIZE); 4077 len -= PAGESIZE; 4078 } 4079 ASSERT3U(plsz, >=, PAGESIZE); 4080 plsz -= PAGESIZE; 4081 pl++; 4082 } 4083 } 4084 4085 /* 4086 * Fill out the page array with any pages already in the cache. 4087 */ 4088 while (plsz > 0 && 4089 (*pl++ = page_lookup_nowait(vp, off, SE_SHARED))) { 4090 off += PAGESIZE; 4091 plsz -= PAGESIZE; 4092 } 4093 out: 4094 if (err) { 4095 /* 4096 * Release any pages we have previously locked. 4097 */ 4098 while (pl > pl0) 4099 page_unlock(*--pl); 4100 } else { 4101 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 4102 } 4103 4104 *pl = NULL; 4105 4106 ZFS_EXIT(zfsvfs); 4107 return (err); 4108 } 4109 4110 /* 4111 * Request a memory map for a section of a file. This code interacts 4112 * with common code and the VM system as follows: 4113 * 4114 * common code calls mmap(), which ends up in smmap_common() 4115 * 4116 * this calls VOP_MAP(), which takes you into (say) zfs 4117 * 4118 * zfs_map() calls as_map(), passing segvn_create() as the callback 4119 * 4120 * segvn_create() creates the new segment and calls VOP_ADDMAP() 4121 * 4122 * zfs_addmap() updates z_mapcnt 4123 */ 4124 /*ARGSUSED*/ 4125 static int 4126 zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp, 4127 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr, 4128 caller_context_t *ct) 4129 { 4130 znode_t *zp = VTOZ(vp); 4131 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4132 segvn_crargs_t vn_a; 4133 int error; 4134 4135 ZFS_ENTER(zfsvfs); 4136 ZFS_VERIFY_ZP(zp); 4137 4138 if ((prot & PROT_WRITE) && 4139 (zp->z_phys->zp_flags & (ZFS_IMMUTABLE | ZFS_READONLY | 4140 ZFS_APPENDONLY))) { 4141 ZFS_EXIT(zfsvfs); 4142 return (EPERM); 4143 } 4144 4145 if ((prot & (PROT_READ | PROT_EXEC)) && 4146 (zp->z_phys->zp_flags & ZFS_AV_QUARANTINED)) { 4147 ZFS_EXIT(zfsvfs); 4148 return (EACCES); 4149 } 4150 4151 if (vp->v_flag & VNOMAP) { 4152 ZFS_EXIT(zfsvfs); 4153 return (ENOSYS); 4154 } 4155 4156 if (off < 0 || len > MAXOFFSET_T - off) { 4157 ZFS_EXIT(zfsvfs); 4158 return (ENXIO); 4159 } 4160 4161 if (vp->v_type != VREG) { 4162 ZFS_EXIT(zfsvfs); 4163 return (ENODEV); 4164 } 4165 4166 /* 4167 * If file is locked, disallow mapping. 4168 */ 4169 if (MANDMODE((mode_t)zp->z_phys->zp_mode) && vn_has_flocks(vp)) { 4170 ZFS_EXIT(zfsvfs); 4171 return (EAGAIN); 4172 } 4173 4174 as_rangelock(as); 4175 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags); 4176 if (error != 0) { 4177 as_rangeunlock(as); 4178 ZFS_EXIT(zfsvfs); 4179 return (error); 4180 } 4181 4182 vn_a.vp = vp; 4183 vn_a.offset = (u_offset_t)off; 4184 vn_a.type = flags & MAP_TYPE; 4185 vn_a.prot = prot; 4186 vn_a.maxprot = maxprot; 4187 vn_a.cred = cr; 4188 vn_a.amp = NULL; 4189 vn_a.flags = flags & ~MAP_TYPE; 4190 vn_a.szc = 0; 4191 vn_a.lgrp_mem_policy_flags = 0; 4192 4193 error = as_map(as, *addrp, len, segvn_create, &vn_a); 4194 4195 as_rangeunlock(as); 4196 ZFS_EXIT(zfsvfs); 4197 return (error); 4198 } 4199 4200 /* ARGSUSED */ 4201 static int 4202 zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr, 4203 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr, 4204 caller_context_t *ct) 4205 { 4206 uint64_t pages = btopr(len); 4207 4208 atomic_add_64(&VTOZ(vp)->z_mapcnt, pages); 4209 return (0); 4210 } 4211 4212 /* 4213 * The reason we push dirty pages as part of zfs_delmap() is so that we get a 4214 * more accurate mtime for the associated file. Since we don't have a way of 4215 * detecting when the data was actually modified, we have to resort to 4216 * heuristics. If an explicit msync() is done, then we mark the mtime when the 4217 * last page is pushed. The problem occurs when the msync() call is omitted, 4218 * which by far the most common case: 4219 * 4220 * open() 4221 * mmap() 4222 * <modify memory> 4223 * munmap() 4224 * close() 4225 * <time lapse> 4226 * putpage() via fsflush 4227 * 4228 * If we wait until fsflush to come along, we can have a modification time that 4229 * is some arbitrary point in the future. In order to prevent this in the 4230 * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is 4231 * torn down. 4232 */ 4233 /* ARGSUSED */ 4234 static int 4235 zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr, 4236 size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr, 4237 caller_context_t *ct) 4238 { 4239 uint64_t pages = btopr(len); 4240 4241 ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages); 4242 atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages); 4243 4244 if ((flags & MAP_SHARED) && (prot & PROT_WRITE) && 4245 vn_has_cached_data(vp)) 4246 (void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr, ct); 4247 4248 return (0); 4249 } 4250 4251 /* 4252 * Free or allocate space in a file. Currently, this function only 4253 * supports the `F_FREESP' command. However, this command is somewhat 4254 * misnamed, as its functionality includes the ability to allocate as 4255 * well as free space. 4256 * 4257 * IN: vp - vnode of file to free data in. 4258 * cmd - action to take (only F_FREESP supported). 4259 * bfp - section of file to free/alloc. 4260 * flag - current file open mode flags. 4261 * offset - current file offset. 4262 * cr - credentials of caller [UNUSED]. 4263 * ct - caller context. 4264 * 4265 * RETURN: 0 if success 4266 * error code if failure 4267 * 4268 * Timestamps: 4269 * vp - ctime|mtime updated 4270 */ 4271 /* ARGSUSED */ 4272 static int 4273 zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag, 4274 offset_t offset, cred_t *cr, caller_context_t *ct) 4275 { 4276 znode_t *zp = VTOZ(vp); 4277 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4278 uint64_t off, len; 4279 int error; 4280 4281 ZFS_ENTER(zfsvfs); 4282 ZFS_VERIFY_ZP(zp); 4283 4284 if (cmd != F_FREESP) { 4285 ZFS_EXIT(zfsvfs); 4286 return (EINVAL); 4287 } 4288 4289 if (error = convoff(vp, bfp, 0, offset)) { 4290 ZFS_EXIT(zfsvfs); 4291 return (error); 4292 } 4293 4294 if (bfp->l_len < 0) { 4295 ZFS_EXIT(zfsvfs); 4296 return (EINVAL); 4297 } 4298 4299 off = bfp->l_start; 4300 len = bfp->l_len; /* 0 means from off to end of file */ 4301 4302 error = zfs_freesp(zp, off, len, flag, TRUE); 4303 4304 ZFS_EXIT(zfsvfs); 4305 return (error); 4306 } 4307 4308 /*ARGSUSED*/ 4309 static int 4310 zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct) 4311 { 4312 znode_t *zp = VTOZ(vp); 4313 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4314 uint32_t gen; 4315 uint64_t object = zp->z_id; 4316 zfid_short_t *zfid; 4317 int size, i; 4318 4319 ZFS_ENTER(zfsvfs); 4320 ZFS_VERIFY_ZP(zp); 4321 gen = (uint32_t)zp->z_gen; 4322 4323 size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN; 4324 if (fidp->fid_len < size) { 4325 fidp->fid_len = size; 4326 ZFS_EXIT(zfsvfs); 4327 return (ENOSPC); 4328 } 4329 4330 zfid = (zfid_short_t *)fidp; 4331 4332 zfid->zf_len = size; 4333 4334 for (i = 0; i < sizeof (zfid->zf_object); i++) 4335 zfid->zf_object[i] = (uint8_t)(object >> (8 * i)); 4336 4337 /* Must have a non-zero generation number to distinguish from .zfs */ 4338 if (gen == 0) 4339 gen = 1; 4340 for (i = 0; i < sizeof (zfid->zf_gen); i++) 4341 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i)); 4342 4343 if (size == LONG_FID_LEN) { 4344 uint64_t objsetid = dmu_objset_id(zfsvfs->z_os); 4345 zfid_long_t *zlfid; 4346 4347 zlfid = (zfid_long_t *)fidp; 4348 4349 for (i = 0; i < sizeof (zlfid->zf_setid); i++) 4350 zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i)); 4351 4352 /* XXX - this should be the generation number for the objset */ 4353 for (i = 0; i < sizeof (zlfid->zf_setgen); i++) 4354 zlfid->zf_setgen[i] = 0; 4355 } 4356 4357 ZFS_EXIT(zfsvfs); 4358 return (0); 4359 } 4360 4361 static int 4362 zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr, 4363 caller_context_t *ct) 4364 { 4365 znode_t *zp, *xzp; 4366 zfsvfs_t *zfsvfs; 4367 zfs_dirlock_t *dl; 4368 int error; 4369 4370 switch (cmd) { 4371 case _PC_LINK_MAX: 4372 *valp = ULONG_MAX; 4373 return (0); 4374 4375 case _PC_FILESIZEBITS: 4376 *valp = 64; 4377 return (0); 4378 4379 case _PC_XATTR_EXISTS: 4380 zp = VTOZ(vp); 4381 zfsvfs = zp->z_zfsvfs; 4382 ZFS_ENTER(zfsvfs); 4383 ZFS_VERIFY_ZP(zp); 4384 *valp = 0; 4385 error = zfs_dirent_lock(&dl, zp, "", &xzp, 4386 ZXATTR | ZEXISTS | ZSHARED, NULL, NULL); 4387 if (error == 0) { 4388 zfs_dirent_unlock(dl); 4389 if (!zfs_dirempty(xzp)) 4390 *valp = 1; 4391 VN_RELE(ZTOV(xzp)); 4392 } else if (error == ENOENT) { 4393 /* 4394 * If there aren't extended attributes, it's the 4395 * same as having zero of them. 4396 */ 4397 error = 0; 4398 } 4399 ZFS_EXIT(zfsvfs); 4400 return (error); 4401 4402 case _PC_SATTR_ENABLED: 4403 case _PC_SATTR_EXISTS: 4404 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) && 4405 (vp->v_type == VREG || vp->v_type == VDIR); 4406 return (0); 4407 4408 case _PC_ACL_ENABLED: 4409 *valp = _ACL_ACE_ENABLED; 4410 return (0); 4411 4412 case _PC_MIN_HOLE_SIZE: 4413 *valp = (ulong_t)SPA_MINBLOCKSIZE; 4414 return (0); 4415 4416 default: 4417 return (fs_pathconf(vp, cmd, valp, cr, ct)); 4418 } 4419 } 4420 4421 /*ARGSUSED*/ 4422 static int 4423 zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr, 4424 caller_context_t *ct) 4425 { 4426 znode_t *zp = VTOZ(vp); 4427 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4428 int error; 4429 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 4430 4431 ZFS_ENTER(zfsvfs); 4432 ZFS_VERIFY_ZP(zp); 4433 error = zfs_getacl(zp, vsecp, skipaclchk, cr); 4434 ZFS_EXIT(zfsvfs); 4435 4436 return (error); 4437 } 4438 4439 /*ARGSUSED*/ 4440 static int 4441 zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr, 4442 caller_context_t *ct) 4443 { 4444 znode_t *zp = VTOZ(vp); 4445 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4446 int error; 4447 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 4448 4449 ZFS_ENTER(zfsvfs); 4450 ZFS_VERIFY_ZP(zp); 4451 error = zfs_setacl(zp, vsecp, skipaclchk, cr); 4452 ZFS_EXIT(zfsvfs); 4453 return (error); 4454 } 4455 4456 /* 4457 * Predeclare these here so that the compiler assumes that 4458 * this is an "old style" function declaration that does 4459 * not include arguments => we won't get type mismatch errors 4460 * in the initializations that follow. 4461 */ 4462 static int zfs_inval(); 4463 static int zfs_isdir(); 4464 4465 static int 4466 zfs_inval() 4467 { 4468 return (EINVAL); 4469 } 4470 4471 static int 4472 zfs_isdir() 4473 { 4474 return (EISDIR); 4475 } 4476 /* 4477 * Directory vnode operations template 4478 */ 4479 vnodeops_t *zfs_dvnodeops; 4480 const fs_operation_def_t zfs_dvnodeops_template[] = { 4481 VOPNAME_OPEN, { .vop_open = zfs_open }, 4482 VOPNAME_CLOSE, { .vop_close = zfs_close }, 4483 VOPNAME_READ, { .error = zfs_isdir }, 4484 VOPNAME_WRITE, { .error = zfs_isdir }, 4485 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl }, 4486 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 4487 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 4488 VOPNAME_ACCESS, { .vop_access = zfs_access }, 4489 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup }, 4490 VOPNAME_CREATE, { .vop_create = zfs_create }, 4491 VOPNAME_REMOVE, { .vop_remove = zfs_remove }, 4492 VOPNAME_LINK, { .vop_link = zfs_link }, 4493 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 4494 VOPNAME_MKDIR, { .vop_mkdir = zfs_mkdir }, 4495 VOPNAME_RMDIR, { .vop_rmdir = zfs_rmdir }, 4496 VOPNAME_READDIR, { .vop_readdir = zfs_readdir }, 4497 VOPNAME_SYMLINK, { .vop_symlink = zfs_symlink }, 4498 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync }, 4499 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 4500 VOPNAME_FID, { .vop_fid = zfs_fid }, 4501 VOPNAME_SEEK, { .vop_seek = zfs_seek }, 4502 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 4503 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr }, 4504 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr }, 4505 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 4506 NULL, NULL 4507 }; 4508 4509 /* 4510 * Regular file vnode operations template 4511 */ 4512 vnodeops_t *zfs_fvnodeops; 4513 const fs_operation_def_t zfs_fvnodeops_template[] = { 4514 VOPNAME_OPEN, { .vop_open = zfs_open }, 4515 VOPNAME_CLOSE, { .vop_close = zfs_close }, 4516 VOPNAME_READ, { .vop_read = zfs_read }, 4517 VOPNAME_WRITE, { .vop_write = zfs_write }, 4518 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl }, 4519 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 4520 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 4521 VOPNAME_ACCESS, { .vop_access = zfs_access }, 4522 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup }, 4523 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 4524 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync }, 4525 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 4526 VOPNAME_FID, { .vop_fid = zfs_fid }, 4527 VOPNAME_SEEK, { .vop_seek = zfs_seek }, 4528 VOPNAME_FRLOCK, { .vop_frlock = zfs_frlock }, 4529 VOPNAME_SPACE, { .vop_space = zfs_space }, 4530 VOPNAME_GETPAGE, { .vop_getpage = zfs_getpage }, 4531 VOPNAME_PUTPAGE, { .vop_putpage = zfs_putpage }, 4532 VOPNAME_MAP, { .vop_map = zfs_map }, 4533 VOPNAME_ADDMAP, { .vop_addmap = zfs_addmap }, 4534 VOPNAME_DELMAP, { .vop_delmap = zfs_delmap }, 4535 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 4536 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr }, 4537 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr }, 4538 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 4539 NULL, NULL 4540 }; 4541 4542 /* 4543 * Symbolic link vnode operations template 4544 */ 4545 vnodeops_t *zfs_symvnodeops; 4546 const fs_operation_def_t zfs_symvnodeops_template[] = { 4547 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 4548 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 4549 VOPNAME_ACCESS, { .vop_access = zfs_access }, 4550 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 4551 VOPNAME_READLINK, { .vop_readlink = zfs_readlink }, 4552 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 4553 VOPNAME_FID, { .vop_fid = zfs_fid }, 4554 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 4555 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 4556 NULL, NULL 4557 }; 4558 4559 /* 4560 * special share hidden files vnode operations template 4561 */ 4562 vnodeops_t *zfs_sharevnodeops; 4563 const fs_operation_def_t zfs_sharevnodeops_template[] = { 4564 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 4565 VOPNAME_ACCESS, { .vop_access = zfs_access }, 4566 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 4567 VOPNAME_FID, { .vop_fid = zfs_fid }, 4568 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 4569 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr }, 4570 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr }, 4571 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 4572 NULL, NULL 4573 }; 4574 4575 /* 4576 * Extended attribute directory vnode operations template 4577 * This template is identical to the directory vnodes 4578 * operation template except for restricted operations: 4579 * VOP_MKDIR() 4580 * VOP_SYMLINK() 4581 * Note that there are other restrictions embedded in: 4582 * zfs_create() - restrict type to VREG 4583 * zfs_link() - no links into/out of attribute space 4584 * zfs_rename() - no moves into/out of attribute space 4585 */ 4586 vnodeops_t *zfs_xdvnodeops; 4587 const fs_operation_def_t zfs_xdvnodeops_template[] = { 4588 VOPNAME_OPEN, { .vop_open = zfs_open }, 4589 VOPNAME_CLOSE, { .vop_close = zfs_close }, 4590 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl }, 4591 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 4592 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 4593 VOPNAME_ACCESS, { .vop_access = zfs_access }, 4594 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup }, 4595 VOPNAME_CREATE, { .vop_create = zfs_create }, 4596 VOPNAME_REMOVE, { .vop_remove = zfs_remove }, 4597 VOPNAME_LINK, { .vop_link = zfs_link }, 4598 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 4599 VOPNAME_MKDIR, { .error = zfs_inval }, 4600 VOPNAME_RMDIR, { .vop_rmdir = zfs_rmdir }, 4601 VOPNAME_READDIR, { .vop_readdir = zfs_readdir }, 4602 VOPNAME_SYMLINK, { .error = zfs_inval }, 4603 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync }, 4604 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 4605 VOPNAME_FID, { .vop_fid = zfs_fid }, 4606 VOPNAME_SEEK, { .vop_seek = zfs_seek }, 4607 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 4608 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr }, 4609 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr }, 4610 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 4611 NULL, NULL 4612 }; 4613 4614 /* 4615 * Error vnode operations template 4616 */ 4617 vnodeops_t *zfs_evnodeops; 4618 const fs_operation_def_t zfs_evnodeops_template[] = { 4619 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 4620 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 4621 NULL, NULL 4622 }; 4623