1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* Portions Copyright 2007 Jeremy Teo */ 26 /* Portions Copyright 2010 Robert Milkowski */ 27 28 #include <sys/types.h> 29 #include <sys/param.h> 30 #include <sys/time.h> 31 #include <sys/systm.h> 32 #include <sys/sysmacros.h> 33 #include <sys/resource.h> 34 #include <sys/vfs.h> 35 #include <sys/vfs_opreg.h> 36 #include <sys/vnode.h> 37 #include <sys/file.h> 38 #include <sys/stat.h> 39 #include <sys/kmem.h> 40 #include <sys/taskq.h> 41 #include <sys/uio.h> 42 #include <sys/vmsystm.h> 43 #include <sys/atomic.h> 44 #include <sys/vm.h> 45 #include <vm/seg_vn.h> 46 #include <vm/pvn.h> 47 #include <vm/as.h> 48 #include <vm/kpm.h> 49 #include <vm/seg_kpm.h> 50 #include <sys/mman.h> 51 #include <sys/pathname.h> 52 #include <sys/cmn_err.h> 53 #include <sys/errno.h> 54 #include <sys/unistd.h> 55 #include <sys/zfs_dir.h> 56 #include <sys/zfs_acl.h> 57 #include <sys/zfs_ioctl.h> 58 #include <sys/fs/zfs.h> 59 #include <sys/dmu.h> 60 #include <sys/dmu_objset.h> 61 #include <sys/spa.h> 62 #include <sys/txg.h> 63 #include <sys/dbuf.h> 64 #include <sys/zap.h> 65 #include <sys/sa.h> 66 #include <sys/dirent.h> 67 #include <sys/policy.h> 68 #include <sys/sunddi.h> 69 #include <sys/filio.h> 70 #include <sys/sid.h> 71 #include "fs/fs_subr.h" 72 #include <sys/zfs_ctldir.h> 73 #include <sys/zfs_fuid.h> 74 #include <sys/zfs_sa.h> 75 #include <sys/dnlc.h> 76 #include <sys/zfs_rlock.h> 77 #include <sys/extdirent.h> 78 #include <sys/kidmap.h> 79 #include <sys/cred.h> 80 #include <sys/attr.h> 81 82 /* 83 * Programming rules. 84 * 85 * Each vnode op performs some logical unit of work. To do this, the ZPL must 86 * properly lock its in-core state, create a DMU transaction, do the work, 87 * record this work in the intent log (ZIL), commit the DMU transaction, 88 * and wait for the intent log to commit if it is a synchronous operation. 89 * Moreover, the vnode ops must work in both normal and log replay context. 90 * The ordering of events is important to avoid deadlocks and references 91 * to freed memory. The example below illustrates the following Big Rules: 92 * 93 * (1) A check must be made in each zfs thread for a mounted file system. 94 * This is done avoiding races using ZFS_ENTER(zfsvfs). 95 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes 96 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros 97 * can return EIO from the calling function. 98 * 99 * (2) VN_RELE() should always be the last thing except for zil_commit() 100 * (if necessary) and ZFS_EXIT(). This is for 3 reasons: 101 * First, if it's the last reference, the vnode/znode 102 * can be freed, so the zp may point to freed memory. Second, the last 103 * reference will call zfs_zinactive(), which may induce a lot of work -- 104 * pushing cached pages (which acquires range locks) and syncing out 105 * cached atime changes. Third, zfs_zinactive() may require a new tx, 106 * which could deadlock the system if you were already holding one. 107 * If you must call VN_RELE() within a tx then use VN_RELE_ASYNC(). 108 * 109 * (3) All range locks must be grabbed before calling dmu_tx_assign(), 110 * as they can span dmu_tx_assign() calls. 111 * 112 * (4) Always pass TXG_NOWAIT as the second argument to dmu_tx_assign(). 113 * This is critical because we don't want to block while holding locks. 114 * Note, in particular, that if a lock is sometimes acquired before 115 * the tx assigns, and sometimes after (e.g. z_lock), then failing to 116 * use a non-blocking assign can deadlock the system. The scenario: 117 * 118 * Thread A has grabbed a lock before calling dmu_tx_assign(). 119 * Thread B is in an already-assigned tx, and blocks for this lock. 120 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open() 121 * forever, because the previous txg can't quiesce until B's tx commits. 122 * 123 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT, 124 * then drop all locks, call dmu_tx_wait(), and try again. 125 * 126 * (5) If the operation succeeded, generate the intent log entry for it 127 * before dropping locks. This ensures that the ordering of events 128 * in the intent log matches the order in which they actually occurred. 129 * During ZIL replay the zfs_log_* functions will update the sequence 130 * number to indicate the zil transaction has replayed. 131 * 132 * (6) At the end of each vnode op, the DMU tx must always commit, 133 * regardless of whether there were any errors. 134 * 135 * (7) After dropping all locks, invoke zil_commit(zilog, seq, foid) 136 * to ensure that synchronous semantics are provided when necessary. 137 * 138 * In general, this is how things should be ordered in each vnode op: 139 * 140 * ZFS_ENTER(zfsvfs); // exit if unmounted 141 * top: 142 * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD()) 143 * rw_enter(...); // grab any other locks you need 144 * tx = dmu_tx_create(...); // get DMU tx 145 * dmu_tx_hold_*(); // hold each object you might modify 146 * error = dmu_tx_assign(tx, TXG_NOWAIT); // try to assign 147 * if (error) { 148 * rw_exit(...); // drop locks 149 * zfs_dirent_unlock(dl); // unlock directory entry 150 * VN_RELE(...); // release held vnodes 151 * if (error == ERESTART) { 152 * dmu_tx_wait(tx); 153 * dmu_tx_abort(tx); 154 * goto top; 155 * } 156 * dmu_tx_abort(tx); // abort DMU tx 157 * ZFS_EXIT(zfsvfs); // finished in zfs 158 * return (error); // really out of space 159 * } 160 * error = do_real_work(); // do whatever this VOP does 161 * if (error == 0) 162 * zfs_log_*(...); // on success, make ZIL entry 163 * dmu_tx_commit(tx); // commit DMU tx -- error or not 164 * rw_exit(...); // drop locks 165 * zfs_dirent_unlock(dl); // unlock directory entry 166 * VN_RELE(...); // release held vnodes 167 * zil_commit(zilog, seq, foid); // synchronous when necessary 168 * ZFS_EXIT(zfsvfs); // finished in zfs 169 * return (error); // done, report error 170 */ 171 172 /* ARGSUSED */ 173 static int 174 zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct) 175 { 176 znode_t *zp = VTOZ(*vpp); 177 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 178 179 ZFS_ENTER(zfsvfs); 180 ZFS_VERIFY_ZP(zp); 181 182 if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) && 183 ((flag & FAPPEND) == 0)) { 184 ZFS_EXIT(zfsvfs); 185 return (EPERM); 186 } 187 188 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan && 189 ZTOV(zp)->v_type == VREG && 190 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) { 191 if (fs_vscan(*vpp, cr, 0) != 0) { 192 ZFS_EXIT(zfsvfs); 193 return (EACCES); 194 } 195 } 196 197 /* Keep a count of the synchronous opens in the znode */ 198 if (flag & (FSYNC | FDSYNC)) 199 atomic_inc_32(&zp->z_sync_cnt); 200 201 ZFS_EXIT(zfsvfs); 202 return (0); 203 } 204 205 /* ARGSUSED */ 206 static int 207 zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr, 208 caller_context_t *ct) 209 { 210 znode_t *zp = VTOZ(vp); 211 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 212 213 /* 214 * Clean up any locks held by this process on the vp. 215 */ 216 cleanlocks(vp, ddi_get_pid(), 0); 217 cleanshares(vp, ddi_get_pid()); 218 219 ZFS_ENTER(zfsvfs); 220 ZFS_VERIFY_ZP(zp); 221 222 /* Decrement the synchronous opens in the znode */ 223 if ((flag & (FSYNC | FDSYNC)) && (count == 1)) 224 atomic_dec_32(&zp->z_sync_cnt); 225 226 if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan && 227 ZTOV(zp)->v_type == VREG && 228 !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) 229 VERIFY(fs_vscan(vp, cr, 1) == 0); 230 231 ZFS_EXIT(zfsvfs); 232 return (0); 233 } 234 235 /* 236 * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and 237 * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter. 238 */ 239 static int 240 zfs_holey(vnode_t *vp, int cmd, offset_t *off) 241 { 242 znode_t *zp = VTOZ(vp); 243 uint64_t noff = (uint64_t)*off; /* new offset */ 244 uint64_t file_sz; 245 int error; 246 boolean_t hole; 247 248 file_sz = zp->z_size; 249 if (noff >= file_sz) { 250 return (ENXIO); 251 } 252 253 if (cmd == _FIO_SEEK_HOLE) 254 hole = B_TRUE; 255 else 256 hole = B_FALSE; 257 258 error = dmu_offset_next(zp->z_zfsvfs->z_os, zp->z_id, hole, &noff); 259 260 /* end of file? */ 261 if ((error == ESRCH) || (noff > file_sz)) { 262 /* 263 * Handle the virtual hole at the end of file. 264 */ 265 if (hole) { 266 *off = file_sz; 267 return (0); 268 } 269 return (ENXIO); 270 } 271 272 if (noff < *off) 273 return (error); 274 *off = noff; 275 return (error); 276 } 277 278 /* ARGSUSED */ 279 static int 280 zfs_ioctl(vnode_t *vp, int com, intptr_t data, int flag, cred_t *cred, 281 int *rvalp, caller_context_t *ct) 282 { 283 offset_t off; 284 int error; 285 zfsvfs_t *zfsvfs; 286 znode_t *zp; 287 288 switch (com) { 289 case _FIOFFS: 290 return (zfs_sync(vp->v_vfsp, 0, cred)); 291 292 /* 293 * The following two ioctls are used by bfu. Faking out, 294 * necessary to avoid bfu errors. 295 */ 296 case _FIOGDIO: 297 case _FIOSDIO: 298 return (0); 299 300 case _FIO_SEEK_DATA: 301 case _FIO_SEEK_HOLE: 302 if (ddi_copyin((void *)data, &off, sizeof (off), flag)) 303 return (EFAULT); 304 305 zp = VTOZ(vp); 306 zfsvfs = zp->z_zfsvfs; 307 ZFS_ENTER(zfsvfs); 308 ZFS_VERIFY_ZP(zp); 309 310 /* offset parameter is in/out */ 311 error = zfs_holey(vp, com, &off); 312 ZFS_EXIT(zfsvfs); 313 if (error) 314 return (error); 315 if (ddi_copyout(&off, (void *)data, sizeof (off), flag)) 316 return (EFAULT); 317 return (0); 318 } 319 return (ENOTTY); 320 } 321 322 /* 323 * Utility functions to map and unmap a single physical page. These 324 * are used to manage the mappable copies of ZFS file data, and therefore 325 * do not update ref/mod bits. 326 */ 327 caddr_t 328 zfs_map_page(page_t *pp, enum seg_rw rw) 329 { 330 if (kpm_enable) 331 return (hat_kpm_mapin(pp, 0)); 332 ASSERT(rw == S_READ || rw == S_WRITE); 333 return (ppmapin(pp, PROT_READ | ((rw == S_WRITE) ? PROT_WRITE : 0), 334 (caddr_t)-1)); 335 } 336 337 void 338 zfs_unmap_page(page_t *pp, caddr_t addr) 339 { 340 if (kpm_enable) { 341 hat_kpm_mapout(pp, 0, addr); 342 } else { 343 ppmapout(addr); 344 } 345 } 346 347 /* 348 * When a file is memory mapped, we must keep the IO data synchronized 349 * between the DMU cache and the memory mapped pages. What this means: 350 * 351 * On Write: If we find a memory mapped page, we write to *both* 352 * the page and the dmu buffer. 353 */ 354 static void 355 update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid) 356 { 357 int64_t off; 358 359 off = start & PAGEOFFSET; 360 for (start &= PAGEMASK; len > 0; start += PAGESIZE) { 361 page_t *pp; 362 uint64_t nbytes = MIN(PAGESIZE - off, len); 363 364 if (pp = page_lookup(vp, start, SE_SHARED)) { 365 caddr_t va; 366 367 va = zfs_map_page(pp, S_WRITE); 368 (void) dmu_read(os, oid, start+off, nbytes, va+off, 369 DMU_READ_PREFETCH); 370 zfs_unmap_page(pp, va); 371 page_unlock(pp); 372 } 373 len -= nbytes; 374 off = 0; 375 } 376 } 377 378 /* 379 * When a file is memory mapped, we must keep the IO data synchronized 380 * between the DMU cache and the memory mapped pages. What this means: 381 * 382 * On Read: We "read" preferentially from memory mapped pages, 383 * else we default from the dmu buffer. 384 * 385 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when 386 * the file is memory mapped. 387 */ 388 static int 389 mappedread(vnode_t *vp, int nbytes, uio_t *uio) 390 { 391 znode_t *zp = VTOZ(vp); 392 objset_t *os = zp->z_zfsvfs->z_os; 393 int64_t start, off; 394 int len = nbytes; 395 int error = 0; 396 397 start = uio->uio_loffset; 398 off = start & PAGEOFFSET; 399 for (start &= PAGEMASK; len > 0; start += PAGESIZE) { 400 page_t *pp; 401 uint64_t bytes = MIN(PAGESIZE - off, len); 402 403 if (pp = page_lookup(vp, start, SE_SHARED)) { 404 caddr_t va; 405 406 va = zfs_map_page(pp, S_READ); 407 error = uiomove(va + off, bytes, UIO_READ, uio); 408 zfs_unmap_page(pp, va); 409 page_unlock(pp); 410 } else { 411 error = dmu_read_uio(os, zp->z_id, uio, bytes); 412 } 413 len -= bytes; 414 off = 0; 415 if (error) 416 break; 417 } 418 return (error); 419 } 420 421 offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */ 422 423 /* 424 * Read bytes from specified file into supplied buffer. 425 * 426 * IN: vp - vnode of file to be read from. 427 * uio - structure supplying read location, range info, 428 * and return buffer. 429 * ioflag - SYNC flags; used to provide FRSYNC semantics. 430 * cr - credentials of caller. 431 * ct - caller context 432 * 433 * OUT: uio - updated offset and range, buffer filled. 434 * 435 * RETURN: 0 if success 436 * error code if failure 437 * 438 * Side Effects: 439 * vp - atime updated if byte count > 0 440 */ 441 /* ARGSUSED */ 442 static int 443 zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) 444 { 445 znode_t *zp = VTOZ(vp); 446 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 447 objset_t *os; 448 ssize_t n, nbytes; 449 int error; 450 rl_t *rl; 451 xuio_t *xuio = NULL; 452 453 ZFS_ENTER(zfsvfs); 454 ZFS_VERIFY_ZP(zp); 455 os = zfsvfs->z_os; 456 457 if (zp->z_pflags & ZFS_AV_QUARANTINED) { 458 ZFS_EXIT(zfsvfs); 459 return (EACCES); 460 } 461 462 /* 463 * Validate file offset 464 */ 465 if (uio->uio_loffset < (offset_t)0) { 466 ZFS_EXIT(zfsvfs); 467 return (EINVAL); 468 } 469 470 /* 471 * Fasttrack empty reads 472 */ 473 if (uio->uio_resid == 0) { 474 ZFS_EXIT(zfsvfs); 475 return (0); 476 } 477 478 /* 479 * Check for mandatory locks 480 */ 481 if (MANDMODE(zp->z_mode)) { 482 if (error = chklock(vp, FREAD, 483 uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct)) { 484 ZFS_EXIT(zfsvfs); 485 return (error); 486 } 487 } 488 489 /* 490 * If we're in FRSYNC mode, sync out this znode before reading it. 491 */ 492 if (ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 493 zil_commit(zfsvfs->z_log, zp->z_last_itx, zp->z_id); 494 495 /* 496 * Lock the range against changes. 497 */ 498 rl = zfs_range_lock(zp, uio->uio_loffset, uio->uio_resid, RL_READER); 499 500 /* 501 * If we are reading past end-of-file we can skip 502 * to the end; but we might still need to set atime. 503 */ 504 if (uio->uio_loffset >= zp->z_size) { 505 error = 0; 506 goto out; 507 } 508 509 ASSERT(uio->uio_loffset < zp->z_size); 510 n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset); 511 512 if ((uio->uio_extflg == UIO_XUIO) && 513 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) { 514 int nblk; 515 int blksz = zp->z_blksz; 516 uint64_t offset = uio->uio_loffset; 517 518 xuio = (xuio_t *)uio; 519 if ((ISP2(blksz))) { 520 nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset, 521 blksz)) / blksz; 522 } else { 523 ASSERT(offset + n <= blksz); 524 nblk = 1; 525 } 526 (void) dmu_xuio_init(xuio, nblk); 527 528 if (vn_has_cached_data(vp)) { 529 /* 530 * For simplicity, we always allocate a full buffer 531 * even if we only expect to read a portion of a block. 532 */ 533 while (--nblk >= 0) { 534 (void) dmu_xuio_add(xuio, 535 dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), 536 blksz), 0, blksz); 537 } 538 } 539 } 540 541 while (n > 0) { 542 nbytes = MIN(n, zfs_read_chunk_size - 543 P2PHASE(uio->uio_loffset, zfs_read_chunk_size)); 544 545 if (vn_has_cached_data(vp)) 546 error = mappedread(vp, nbytes, uio); 547 else 548 error = dmu_read_uio(os, zp->z_id, uio, nbytes); 549 if (error) { 550 /* convert checksum errors into IO errors */ 551 if (error == ECKSUM) 552 error = EIO; 553 break; 554 } 555 556 n -= nbytes; 557 } 558 out: 559 zfs_range_unlock(rl); 560 561 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 562 ZFS_EXIT(zfsvfs); 563 return (error); 564 } 565 566 /* 567 * Write the bytes to a file. 568 * 569 * IN: vp - vnode of file to be written to. 570 * uio - structure supplying write location, range info, 571 * and data buffer. 572 * ioflag - FAPPEND flag set if in append mode. 573 * cr - credentials of caller. 574 * ct - caller context (NFS/CIFS fem monitor only) 575 * 576 * OUT: uio - updated offset and range. 577 * 578 * RETURN: 0 if success 579 * error code if failure 580 * 581 * Timestamps: 582 * vp - ctime|mtime updated if byte count > 0 583 */ 584 585 /* ARGSUSED */ 586 static int 587 zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) 588 { 589 znode_t *zp = VTOZ(vp); 590 rlim64_t limit = uio->uio_llimit; 591 ssize_t start_resid = uio->uio_resid; 592 ssize_t tx_bytes; 593 uint64_t end_size; 594 dmu_tx_t *tx; 595 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 596 zilog_t *zilog; 597 offset_t woff; 598 ssize_t n, nbytes; 599 rl_t *rl; 600 int max_blksz = zfsvfs->z_max_blksz; 601 int error; 602 arc_buf_t *abuf; 603 iovec_t *aiov; 604 xuio_t *xuio = NULL; 605 int i_iov = 0; 606 int iovcnt = uio->uio_iovcnt; 607 iovec_t *iovp = uio->uio_iov; 608 int write_eof; 609 int count = 0; 610 sa_bulk_attr_t bulk[4]; 611 uint64_t mtime[2], ctime[2]; 612 613 /* 614 * Fasttrack empty write 615 */ 616 n = start_resid; 617 if (n == 0) 618 return (0); 619 620 if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T) 621 limit = MAXOFFSET_T; 622 623 ZFS_ENTER(zfsvfs); 624 ZFS_VERIFY_ZP(zp); 625 626 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); 627 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); 628 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, 629 &zp->z_size, 8); 630 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, 631 &zp->z_pflags, 8); 632 633 /* 634 * If immutable or not appending then return EPERM 635 */ 636 if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) || 637 ((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) && 638 (uio->uio_loffset < zp->z_size))) { 639 ZFS_EXIT(zfsvfs); 640 return (EPERM); 641 } 642 643 zilog = zfsvfs->z_log; 644 645 /* 646 * Validate file offset 647 */ 648 woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset; 649 if (woff < 0) { 650 ZFS_EXIT(zfsvfs); 651 return (EINVAL); 652 } 653 654 /* 655 * Check for mandatory locks before calling zfs_range_lock() 656 * in order to prevent a deadlock with locks set via fcntl(). 657 */ 658 if (MANDMODE((mode_t)zp->z_mode) && 659 (error = chklock(vp, FWRITE, woff, n, uio->uio_fmode, ct)) != 0) { 660 ZFS_EXIT(zfsvfs); 661 return (error); 662 } 663 664 /* 665 * Pre-fault the pages to ensure slow (eg NFS) pages 666 * don't hold up txg. 667 * Skip this if uio contains loaned arc_buf. 668 */ 669 if ((uio->uio_extflg == UIO_XUIO) && 670 (((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) 671 xuio = (xuio_t *)uio; 672 else 673 uio_prefaultpages(n, uio); 674 675 /* 676 * If in append mode, set the io offset pointer to eof. 677 */ 678 if (ioflag & FAPPEND) { 679 /* 680 * Obtain an appending range lock to guarantee file append 681 * semantics. We reset the write offset once we have the lock. 682 */ 683 rl = zfs_range_lock(zp, 0, n, RL_APPEND); 684 woff = rl->r_off; 685 if (rl->r_len == UINT64_MAX) { 686 /* 687 * We overlocked the file because this write will cause 688 * the file block size to increase. 689 * Note that zp_size cannot change with this lock held. 690 */ 691 woff = zp->z_size; 692 } 693 uio->uio_loffset = woff; 694 } else { 695 /* 696 * Note that if the file block size will change as a result of 697 * this write, then this range lock will lock the entire file 698 * so that we can re-write the block safely. 699 */ 700 rl = zfs_range_lock(zp, woff, n, RL_WRITER); 701 } 702 703 if (woff >= limit) { 704 zfs_range_unlock(rl); 705 ZFS_EXIT(zfsvfs); 706 return (EFBIG); 707 } 708 709 if ((woff + n) > limit || woff > (limit - n)) 710 n = limit - woff; 711 712 /* Will this write extend the file length? */ 713 write_eof = (woff + n > zp->z_size); 714 715 end_size = MAX(zp->z_size, woff + n); 716 717 /* 718 * Write the file in reasonable size chunks. Each chunk is written 719 * in a separate transaction; this keeps the intent log records small 720 * and allows us to do more fine-grained space accounting. 721 */ 722 while (n > 0) { 723 abuf = NULL; 724 woff = uio->uio_loffset; 725 again: 726 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) || 727 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) { 728 if (abuf != NULL) 729 dmu_return_arcbuf(abuf); 730 error = EDQUOT; 731 break; 732 } 733 734 if (xuio && abuf == NULL) { 735 ASSERT(i_iov < iovcnt); 736 aiov = &iovp[i_iov]; 737 abuf = dmu_xuio_arcbuf(xuio, i_iov); 738 dmu_xuio_clear(xuio, i_iov); 739 DTRACE_PROBE3(zfs_cp_write, int, i_iov, 740 iovec_t *, aiov, arc_buf_t *, abuf); 741 ASSERT((aiov->iov_base == abuf->b_data) || 742 ((char *)aiov->iov_base - (char *)abuf->b_data + 743 aiov->iov_len == arc_buf_size(abuf))); 744 i_iov++; 745 } else if (abuf == NULL && n >= max_blksz && 746 woff >= zp->z_size && 747 P2PHASE(woff, max_blksz) == 0 && 748 zp->z_blksz == max_blksz) { 749 /* 750 * This write covers a full block. "Borrow" a buffer 751 * from the dmu so that we can fill it before we enter 752 * a transaction. This avoids the possibility of 753 * holding up the transaction if the data copy hangs 754 * up on a pagefault (e.g., from an NFS server mapping). 755 */ 756 size_t cbytes; 757 758 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), 759 max_blksz); 760 ASSERT(abuf != NULL); 761 ASSERT(arc_buf_size(abuf) == max_blksz); 762 if (error = uiocopy(abuf->b_data, max_blksz, 763 UIO_WRITE, uio, &cbytes)) { 764 dmu_return_arcbuf(abuf); 765 break; 766 } 767 ASSERT(cbytes == max_blksz); 768 } 769 770 /* 771 * Start a transaction. 772 */ 773 tx = dmu_tx_create(zfsvfs->z_os); 774 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 775 dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz)); 776 zfs_sa_upgrade_txholds(tx, zp); 777 error = dmu_tx_assign(tx, TXG_NOWAIT); 778 if (error) { 779 if (error == ERESTART) { 780 dmu_tx_wait(tx); 781 dmu_tx_abort(tx); 782 goto again; 783 } 784 dmu_tx_abort(tx); 785 if (abuf != NULL) 786 dmu_return_arcbuf(abuf); 787 break; 788 } 789 790 /* 791 * If zfs_range_lock() over-locked we grow the blocksize 792 * and then reduce the lock range. This will only happen 793 * on the first iteration since zfs_range_reduce() will 794 * shrink down r_len to the appropriate size. 795 */ 796 if (rl->r_len == UINT64_MAX) { 797 uint64_t new_blksz; 798 799 if (zp->z_blksz > max_blksz) { 800 ASSERT(!ISP2(zp->z_blksz)); 801 new_blksz = MIN(end_size, SPA_MAXBLOCKSIZE); 802 } else { 803 new_blksz = MIN(end_size, max_blksz); 804 } 805 zfs_grow_blocksize(zp, new_blksz, tx); 806 zfs_range_reduce(rl, woff, n); 807 } 808 809 /* 810 * XXX - should we really limit each write to z_max_blksz? 811 * Perhaps we should use SPA_MAXBLOCKSIZE chunks? 812 */ 813 nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz)); 814 815 if (abuf == NULL) { 816 tx_bytes = uio->uio_resid; 817 error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl), 818 uio, nbytes, tx); 819 tx_bytes -= uio->uio_resid; 820 } else { 821 tx_bytes = nbytes; 822 ASSERT(xuio == NULL || tx_bytes == aiov->iov_len); 823 /* 824 * If this is not a full block write, but we are 825 * extending the file past EOF and this data starts 826 * block-aligned, use assign_arcbuf(). Otherwise, 827 * write via dmu_write(). 828 */ 829 if (tx_bytes < max_blksz && (!write_eof || 830 aiov->iov_base != abuf->b_data)) { 831 ASSERT(xuio); 832 dmu_write(zfsvfs->z_os, zp->z_id, woff, 833 aiov->iov_len, aiov->iov_base, tx); 834 dmu_return_arcbuf(abuf); 835 xuio_stat_wbuf_copied(); 836 } else { 837 ASSERT(xuio || tx_bytes == max_blksz); 838 dmu_assign_arcbuf(sa_get_db(zp->z_sa_hdl), 839 woff, abuf, tx); 840 } 841 ASSERT(tx_bytes <= uio->uio_resid); 842 uioskip(uio, tx_bytes); 843 } 844 if (tx_bytes && vn_has_cached_data(vp)) { 845 update_pages(vp, woff, 846 tx_bytes, zfsvfs->z_os, zp->z_id); 847 } 848 849 /* 850 * If we made no progress, we're done. If we made even 851 * partial progress, update the znode and ZIL accordingly. 852 */ 853 if (tx_bytes == 0) { 854 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs), 855 (void *)&zp->z_size, sizeof (uint64_t), tx); 856 dmu_tx_commit(tx); 857 ASSERT(error != 0); 858 break; 859 } 860 861 /* 862 * Clear Set-UID/Set-GID bits on successful write if not 863 * privileged and at least one of the excute bits is set. 864 * 865 * It would be nice to to this after all writes have 866 * been done, but that would still expose the ISUID/ISGID 867 * to another app after the partial write is committed. 868 * 869 */ 870 mutex_enter(&zp->z_acl_lock); 871 if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) | 872 (S_IXUSR >> 6))) != 0 && 873 (zp->z_mode & (S_ISUID | S_ISGID)) != 0 && 874 secpolicy_vnode_setid_retain(cr, 875 (zp->z_mode & S_ISUID) != 0 && zp->z_uid == 0) != 0) { 876 uint64_t newmode; 877 zp->z_mode &= ~(S_ISUID | S_ISGID); 878 newmode = zp->z_mode; 879 (void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), 880 (void *)&newmode, sizeof (uint64_t), tx); 881 } 882 mutex_exit(&zp->z_acl_lock); 883 884 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, 885 B_TRUE); 886 887 /* 888 * Update the file size (zp_size) if it has changed; 889 * account for possible concurrent updates. 890 */ 891 while ((end_size = zp->z_size) < uio->uio_loffset) { 892 (void) atomic_cas_64(&zp->z_size, end_size, 893 uio->uio_loffset); 894 ASSERT(error == 0); 895 } 896 error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); 897 898 zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag); 899 dmu_tx_commit(tx); 900 901 if (error != 0) 902 break; 903 ASSERT(tx_bytes == nbytes); 904 n -= nbytes; 905 } 906 907 zfs_range_unlock(rl); 908 909 /* 910 * If we're in replay mode, or we made no progress, return error. 911 * Otherwise, it's at least a partial write, so it's successful. 912 */ 913 if (zfsvfs->z_replay || uio->uio_resid == start_resid) { 914 ZFS_EXIT(zfsvfs); 915 return (error); 916 } 917 918 if (ioflag & (FSYNC | FDSYNC) || 919 zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 920 zil_commit(zilog, zp->z_last_itx, zp->z_id); 921 922 ZFS_EXIT(zfsvfs); 923 return (0); 924 } 925 926 void 927 zfs_get_done(zgd_t *zgd, int error) 928 { 929 znode_t *zp = zgd->zgd_private; 930 objset_t *os = zp->z_zfsvfs->z_os; 931 932 if (zgd->zgd_db) 933 dmu_buf_rele(zgd->zgd_db, zgd); 934 935 zfs_range_unlock(zgd->zgd_rl); 936 937 /* 938 * Release the vnode asynchronously as we currently have the 939 * txg stopped from syncing. 940 */ 941 VN_RELE_ASYNC(ZTOV(zp), dsl_pool_vnrele_taskq(dmu_objset_pool(os))); 942 943 if (error == 0 && zgd->zgd_bp) 944 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 945 946 kmem_free(zgd, sizeof (zgd_t)); 947 } 948 949 #ifdef DEBUG 950 static int zil_fault_io = 0; 951 #endif 952 953 /* 954 * Get data to generate a TX_WRITE intent log record. 955 */ 956 int 957 zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 958 { 959 zfsvfs_t *zfsvfs = arg; 960 objset_t *os = zfsvfs->z_os; 961 znode_t *zp; 962 uint64_t object = lr->lr_foid; 963 uint64_t offset = lr->lr_offset; 964 uint64_t size = lr->lr_length; 965 blkptr_t *bp = &lr->lr_blkptr; 966 dmu_buf_t *db; 967 zgd_t *zgd; 968 int error = 0; 969 970 ASSERT(zio != NULL); 971 ASSERT(size != 0); 972 973 /* 974 * Nothing to do if the file has been removed 975 */ 976 if (zfs_zget(zfsvfs, object, &zp) != 0) 977 return (ENOENT); 978 if (zp->z_unlinked) { 979 /* 980 * Release the vnode asynchronously as we currently have the 981 * txg stopped from syncing. 982 */ 983 VN_RELE_ASYNC(ZTOV(zp), 984 dsl_pool_vnrele_taskq(dmu_objset_pool(os))); 985 return (ENOENT); 986 } 987 988 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP); 989 zgd->zgd_zilog = zfsvfs->z_log; 990 zgd->zgd_private = zp; 991 992 /* 993 * Write records come in two flavors: immediate and indirect. 994 * For small writes it's cheaper to store the data with the 995 * log record (immediate); for large writes it's cheaper to 996 * sync the data and get a pointer to it (indirect) so that 997 * we don't have to write the data twice. 998 */ 999 if (buf != NULL) { /* immediate write */ 1000 zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER); 1001 /* test for truncation needs to be done while range locked */ 1002 if (offset >= zp->z_size) { 1003 error = ENOENT; 1004 } else { 1005 error = dmu_read(os, object, offset, size, buf, 1006 DMU_READ_NO_PREFETCH); 1007 } 1008 ASSERT(error == 0 || error == ENOENT); 1009 } else { /* indirect write */ 1010 /* 1011 * Have to lock the whole block to ensure when it's 1012 * written out and it's checksum is being calculated 1013 * that no one can change the data. We need to re-check 1014 * blocksize after we get the lock in case it's changed! 1015 */ 1016 for (;;) { 1017 uint64_t blkoff; 1018 size = zp->z_blksz; 1019 blkoff = ISP2(size) ? P2PHASE(offset, size) : offset; 1020 offset -= blkoff; 1021 zgd->zgd_rl = zfs_range_lock(zp, offset, size, 1022 RL_READER); 1023 if (zp->z_blksz == size) 1024 break; 1025 offset += blkoff; 1026 zfs_range_unlock(zgd->zgd_rl); 1027 } 1028 /* test for truncation needs to be done while range locked */ 1029 if (lr->lr_offset >= zp->z_size) 1030 error = ENOENT; 1031 #ifdef DEBUG 1032 if (zil_fault_io) { 1033 error = EIO; 1034 zil_fault_io = 0; 1035 } 1036 #endif 1037 if (error == 0) 1038 error = dmu_buf_hold(os, object, offset, zgd, &db, 1039 DMU_READ_NO_PREFETCH); 1040 1041 if (error == 0) { 1042 zgd->zgd_db = db; 1043 zgd->zgd_bp = bp; 1044 1045 ASSERT(db->db_offset == offset); 1046 ASSERT(db->db_size == size); 1047 1048 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1049 zfs_get_done, zgd); 1050 ASSERT(error || lr->lr_length <= zp->z_blksz); 1051 1052 /* 1053 * On success, we need to wait for the write I/O 1054 * initiated by dmu_sync() to complete before we can 1055 * release this dbuf. We will finish everything up 1056 * in the zfs_get_done() callback. 1057 */ 1058 if (error == 0) 1059 return (0); 1060 1061 if (error == EALREADY) { 1062 lr->lr_common.lrc_txtype = TX_WRITE2; 1063 error = 0; 1064 } 1065 } 1066 } 1067 1068 zfs_get_done(zgd, error); 1069 1070 return (error); 1071 } 1072 1073 /*ARGSUSED*/ 1074 static int 1075 zfs_access(vnode_t *vp, int mode, int flag, cred_t *cr, 1076 caller_context_t *ct) 1077 { 1078 znode_t *zp = VTOZ(vp); 1079 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 1080 int error; 1081 1082 ZFS_ENTER(zfsvfs); 1083 ZFS_VERIFY_ZP(zp); 1084 1085 if (flag & V_ACE_MASK) 1086 error = zfs_zaccess(zp, mode, flag, B_FALSE, cr); 1087 else 1088 error = zfs_zaccess_rwx(zp, mode, flag, cr); 1089 1090 ZFS_EXIT(zfsvfs); 1091 return (error); 1092 } 1093 1094 /* 1095 * If vnode is for a device return a specfs vnode instead. 1096 */ 1097 static int 1098 specvp_check(vnode_t **vpp, cred_t *cr) 1099 { 1100 int error = 0; 1101 1102 if (IS_DEVVP(*vpp)) { 1103 struct vnode *svp; 1104 1105 svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr); 1106 VN_RELE(*vpp); 1107 if (svp == NULL) 1108 error = ENOSYS; 1109 *vpp = svp; 1110 } 1111 return (error); 1112 } 1113 1114 1115 /* 1116 * Lookup an entry in a directory, or an extended attribute directory. 1117 * If it exists, return a held vnode reference for it. 1118 * 1119 * IN: dvp - vnode of directory to search. 1120 * nm - name of entry to lookup. 1121 * pnp - full pathname to lookup [UNUSED]. 1122 * flags - LOOKUP_XATTR set if looking for an attribute. 1123 * rdir - root directory vnode [UNUSED]. 1124 * cr - credentials of caller. 1125 * ct - caller context 1126 * direntflags - directory lookup flags 1127 * realpnp - returned pathname. 1128 * 1129 * OUT: vpp - vnode of located entry, NULL if not found. 1130 * 1131 * RETURN: 0 if success 1132 * error code if failure 1133 * 1134 * Timestamps: 1135 * NA 1136 */ 1137 /* ARGSUSED */ 1138 static int 1139 zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct pathname *pnp, 1140 int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct, 1141 int *direntflags, pathname_t *realpnp) 1142 { 1143 znode_t *zdp = VTOZ(dvp); 1144 zfsvfs_t *zfsvfs = zdp->z_zfsvfs; 1145 int error = 0; 1146 1147 /* fast path */ 1148 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) { 1149 1150 if (dvp->v_type != VDIR) { 1151 return (ENOTDIR); 1152 } else if (zdp->z_sa_hdl == NULL) { 1153 return (EIO); 1154 } 1155 1156 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) { 1157 error = zfs_fastaccesschk_execute(zdp, cr); 1158 if (!error) { 1159 *vpp = dvp; 1160 VN_HOLD(*vpp); 1161 return (0); 1162 } 1163 return (error); 1164 } else { 1165 vnode_t *tvp = dnlc_lookup(dvp, nm); 1166 1167 if (tvp) { 1168 error = zfs_fastaccesschk_execute(zdp, cr); 1169 if (error) { 1170 VN_RELE(tvp); 1171 return (error); 1172 } 1173 if (tvp == DNLC_NO_VNODE) { 1174 VN_RELE(tvp); 1175 return (ENOENT); 1176 } else { 1177 *vpp = tvp; 1178 return (specvp_check(vpp, cr)); 1179 } 1180 } 1181 } 1182 } 1183 1184 DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp, char *, nm); 1185 1186 ZFS_ENTER(zfsvfs); 1187 ZFS_VERIFY_ZP(zdp); 1188 1189 *vpp = NULL; 1190 1191 if (flags & LOOKUP_XATTR) { 1192 /* 1193 * If the xattr property is off, refuse the lookup request. 1194 */ 1195 if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) { 1196 ZFS_EXIT(zfsvfs); 1197 return (EINVAL); 1198 } 1199 1200 /* 1201 * We don't allow recursive attributes.. 1202 * Maybe someday we will. 1203 */ 1204 if (zdp->z_pflags & ZFS_XATTR) { 1205 ZFS_EXIT(zfsvfs); 1206 return (EINVAL); 1207 } 1208 1209 if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) { 1210 ZFS_EXIT(zfsvfs); 1211 return (error); 1212 } 1213 1214 /* 1215 * Do we have permission to get into attribute directory? 1216 */ 1217 1218 if (error = zfs_zaccess(VTOZ(*vpp), ACE_EXECUTE, 0, 1219 B_FALSE, cr)) { 1220 VN_RELE(*vpp); 1221 *vpp = NULL; 1222 } 1223 1224 ZFS_EXIT(zfsvfs); 1225 return (error); 1226 } 1227 1228 if (dvp->v_type != VDIR) { 1229 ZFS_EXIT(zfsvfs); 1230 return (ENOTDIR); 1231 } 1232 1233 /* 1234 * Check accessibility of directory. 1235 */ 1236 1237 if (error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr)) { 1238 ZFS_EXIT(zfsvfs); 1239 return (error); 1240 } 1241 1242 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm), 1243 NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 1244 ZFS_EXIT(zfsvfs); 1245 return (EILSEQ); 1246 } 1247 1248 error = zfs_dirlook(zdp, nm, vpp, flags, direntflags, realpnp); 1249 if (error == 0) 1250 error = specvp_check(vpp, cr); 1251 1252 ZFS_EXIT(zfsvfs); 1253 return (error); 1254 } 1255 1256 /* 1257 * Attempt to create a new entry in a directory. If the entry 1258 * already exists, truncate the file if permissible, else return 1259 * an error. Return the vp of the created or trunc'd file. 1260 * 1261 * IN: dvp - vnode of directory to put new file entry in. 1262 * name - name of new file entry. 1263 * vap - attributes of new file. 1264 * excl - flag indicating exclusive or non-exclusive mode. 1265 * mode - mode to open file with. 1266 * cr - credentials of caller. 1267 * flag - large file flag [UNUSED]. 1268 * ct - caller context 1269 * vsecp - ACL to be set 1270 * 1271 * OUT: vpp - vnode of created or trunc'd entry. 1272 * 1273 * RETURN: 0 if success 1274 * error code if failure 1275 * 1276 * Timestamps: 1277 * dvp - ctime|mtime updated if new entry created 1278 * vp - ctime|mtime always, atime if new 1279 */ 1280 1281 /* ARGSUSED */ 1282 static int 1283 zfs_create(vnode_t *dvp, char *name, vattr_t *vap, vcexcl_t excl, 1284 int mode, vnode_t **vpp, cred_t *cr, int flag, caller_context_t *ct, 1285 vsecattr_t *vsecp) 1286 { 1287 znode_t *zp, *dzp = VTOZ(dvp); 1288 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1289 zilog_t *zilog; 1290 objset_t *os; 1291 zfs_dirlock_t *dl; 1292 dmu_tx_t *tx; 1293 int error; 1294 ksid_t *ksid; 1295 uid_t uid; 1296 gid_t gid = crgetgid(cr); 1297 zfs_acl_ids_t acl_ids; 1298 boolean_t fuid_dirtied; 1299 1300 /* 1301 * If we have an ephemeral id, ACL, or XVATTR then 1302 * make sure file system is at proper version 1303 */ 1304 1305 ksid = crgetsid(cr, KSID_OWNER); 1306 if (ksid) 1307 uid = ksid_getid(ksid); 1308 else 1309 uid = crgetuid(cr); 1310 1311 if (zfsvfs->z_use_fuids == B_FALSE && 1312 (vsecp || (vap->va_mask & AT_XVATTR) || 1313 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) 1314 return (EINVAL); 1315 1316 ZFS_ENTER(zfsvfs); 1317 ZFS_VERIFY_ZP(dzp); 1318 os = zfsvfs->z_os; 1319 zilog = zfsvfs->z_log; 1320 1321 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), 1322 NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 1323 ZFS_EXIT(zfsvfs); 1324 return (EILSEQ); 1325 } 1326 1327 if (vap->va_mask & AT_XVATTR) { 1328 if ((error = secpolicy_xvattr((xvattr_t *)vap, 1329 crgetuid(cr), cr, vap->va_type)) != 0) { 1330 ZFS_EXIT(zfsvfs); 1331 return (error); 1332 } 1333 } 1334 top: 1335 *vpp = NULL; 1336 1337 if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr)) 1338 vap->va_mode &= ~VSVTX; 1339 1340 if (*name == '\0') { 1341 /* 1342 * Null component name refers to the directory itself. 1343 */ 1344 VN_HOLD(dvp); 1345 zp = dzp; 1346 dl = NULL; 1347 error = 0; 1348 } else { 1349 /* possible VN_HOLD(zp) */ 1350 int zflg = 0; 1351 1352 if (flag & FIGNORECASE) 1353 zflg |= ZCILOOK; 1354 1355 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, 1356 NULL, NULL); 1357 if (error) { 1358 if (strcmp(name, "..") == 0) 1359 error = EISDIR; 1360 ZFS_EXIT(zfsvfs); 1361 return (error); 1362 } 1363 } 1364 1365 if (zp == NULL) { 1366 uint64_t txtype; 1367 1368 /* 1369 * Create a new file object and update the directory 1370 * to reference it. 1371 */ 1372 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { 1373 goto out; 1374 } 1375 1376 /* 1377 * We only support the creation of regular files in 1378 * extended attribute directories. 1379 */ 1380 1381 if ((dzp->z_pflags & ZFS_XATTR) && 1382 (vap->va_type != VREG)) { 1383 error = EINVAL; 1384 goto out; 1385 } 1386 1387 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr, vsecp, 1388 &acl_ids)) != 0) 1389 goto out; 1390 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { 1391 zfs_acl_ids_free(&acl_ids); 1392 error = EDQUOT; 1393 goto out; 1394 } 1395 1396 tx = dmu_tx_create(os); 1397 1398 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + 1399 ZFS_SA_BASE_ATTR_SIZE); 1400 1401 fuid_dirtied = zfsvfs->z_fuid_dirty; 1402 if (fuid_dirtied) 1403 zfs_fuid_txhold(zfsvfs, tx); 1404 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); 1405 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); 1406 if (!zfsvfs->z_use_sa && 1407 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { 1408 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 1409 0, acl_ids.z_aclp->z_acl_bytes); 1410 } 1411 error = dmu_tx_assign(tx, TXG_NOWAIT); 1412 if (error) { 1413 zfs_acl_ids_free(&acl_ids); 1414 zfs_dirent_unlock(dl); 1415 if (error == ERESTART) { 1416 dmu_tx_wait(tx); 1417 dmu_tx_abort(tx); 1418 goto top; 1419 } 1420 dmu_tx_abort(tx); 1421 ZFS_EXIT(zfsvfs); 1422 return (error); 1423 } 1424 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); 1425 1426 if (fuid_dirtied) 1427 zfs_fuid_sync(zfsvfs, tx); 1428 1429 (void) zfs_link_create(dl, zp, tx, ZNEW); 1430 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap); 1431 if (flag & FIGNORECASE) 1432 txtype |= TX_CI; 1433 zfs_log_create(zilog, tx, txtype, dzp, zp, name, 1434 vsecp, acl_ids.z_fuidp, vap); 1435 zfs_acl_ids_free(&acl_ids); 1436 dmu_tx_commit(tx); 1437 } else { 1438 int aflags = (flag & FAPPEND) ? V_APPEND : 0; 1439 1440 /* 1441 * A directory entry already exists for this name. 1442 */ 1443 /* 1444 * Can't truncate an existing file if in exclusive mode. 1445 */ 1446 if (excl == EXCL) { 1447 error = EEXIST; 1448 goto out; 1449 } 1450 /* 1451 * Can't open a directory for writing. 1452 */ 1453 if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) { 1454 error = EISDIR; 1455 goto out; 1456 } 1457 /* 1458 * Verify requested access to file. 1459 */ 1460 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) { 1461 goto out; 1462 } 1463 1464 mutex_enter(&dzp->z_lock); 1465 dzp->z_seq++; 1466 mutex_exit(&dzp->z_lock); 1467 1468 /* 1469 * Truncate regular files if requested. 1470 */ 1471 if ((ZTOV(zp)->v_type == VREG) && 1472 (vap->va_mask & AT_SIZE) && (vap->va_size == 0)) { 1473 /* we can't hold any locks when calling zfs_freesp() */ 1474 zfs_dirent_unlock(dl); 1475 dl = NULL; 1476 error = zfs_freesp(zp, 0, 0, mode, TRUE); 1477 if (error == 0) { 1478 vnevent_create(ZTOV(zp), ct); 1479 } 1480 } 1481 } 1482 out: 1483 1484 if (dl) 1485 zfs_dirent_unlock(dl); 1486 1487 if (error) { 1488 if (zp) 1489 VN_RELE(ZTOV(zp)); 1490 } else { 1491 *vpp = ZTOV(zp); 1492 error = specvp_check(vpp, cr); 1493 } 1494 1495 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 1496 zil_commit(zilog, UINT64_MAX, 0); 1497 1498 ZFS_EXIT(zfsvfs); 1499 return (error); 1500 } 1501 1502 /* 1503 * Remove an entry from a directory. 1504 * 1505 * IN: dvp - vnode of directory to remove entry from. 1506 * name - name of entry to remove. 1507 * cr - credentials of caller. 1508 * ct - caller context 1509 * flags - case flags 1510 * 1511 * RETURN: 0 if success 1512 * error code if failure 1513 * 1514 * Timestamps: 1515 * dvp - ctime|mtime 1516 * vp - ctime (if nlink > 0) 1517 */ 1518 1519 uint64_t null_xattr = 0; 1520 1521 /*ARGSUSED*/ 1522 static int 1523 zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct, 1524 int flags) 1525 { 1526 znode_t *zp, *dzp = VTOZ(dvp); 1527 znode_t *xzp = NULL; 1528 vnode_t *vp; 1529 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1530 zilog_t *zilog; 1531 uint64_t acl_obj, xattr_obj = 0; 1532 uint64_t xattr_obj_unlinked = 0; 1533 zfs_dirlock_t *dl; 1534 dmu_tx_t *tx; 1535 boolean_t may_delete_now, delete_now = FALSE; 1536 boolean_t unlinked, toobig = FALSE; 1537 uint64_t txtype; 1538 pathname_t *realnmp = NULL; 1539 pathname_t realnm; 1540 int error; 1541 int zflg = ZEXISTS; 1542 1543 ZFS_ENTER(zfsvfs); 1544 ZFS_VERIFY_ZP(dzp); 1545 zilog = zfsvfs->z_log; 1546 1547 if (flags & FIGNORECASE) { 1548 zflg |= ZCILOOK; 1549 pn_alloc(&realnm); 1550 realnmp = &realnm; 1551 } 1552 1553 top: 1554 /* 1555 * Attempt to lock directory; fail if entry doesn't exist. 1556 */ 1557 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, 1558 NULL, realnmp)) { 1559 if (realnmp) 1560 pn_free(realnmp); 1561 ZFS_EXIT(zfsvfs); 1562 return (error); 1563 } 1564 1565 vp = ZTOV(zp); 1566 1567 if (error = zfs_zaccess_delete(dzp, zp, cr)) { 1568 goto out; 1569 } 1570 1571 /* 1572 * Need to use rmdir for removing directories. 1573 */ 1574 if (vp->v_type == VDIR) { 1575 error = EPERM; 1576 goto out; 1577 } 1578 1579 vnevent_remove(vp, dvp, name, ct); 1580 1581 if (realnmp) 1582 dnlc_remove(dvp, realnmp->pn_buf); 1583 else 1584 dnlc_remove(dvp, name); 1585 1586 mutex_enter(&vp->v_lock); 1587 may_delete_now = vp->v_count == 1 && !vn_has_cached_data(vp); 1588 mutex_exit(&vp->v_lock); 1589 1590 /* 1591 * We may delete the znode now, or we may put it in the unlinked set; 1592 * it depends on whether we're the last link, and on whether there are 1593 * other holds on the vnode. So we dmu_tx_hold() the right things to 1594 * allow for either case. 1595 */ 1596 tx = dmu_tx_create(zfsvfs->z_os); 1597 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); 1598 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 1599 zfs_sa_upgrade_txholds(tx, zp); 1600 zfs_sa_upgrade_txholds(tx, dzp); 1601 if (may_delete_now) { 1602 toobig = 1603 zp->z_size > zp->z_blksz * DMU_MAX_DELETEBLKCNT; 1604 /* if the file is too big, only hold_free a token amount */ 1605 dmu_tx_hold_free(tx, zp->z_id, 0, 1606 (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END)); 1607 } 1608 1609 /* are there any extended attributes? */ 1610 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), 1611 &xattr_obj, sizeof (xattr_obj)); 1612 if (xattr_obj) { 1613 error = zfs_zget(zfsvfs, xattr_obj, &xzp); 1614 ASSERT3U(error, ==, 0); 1615 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); 1616 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE); 1617 } 1618 1619 /* are there any additional acls */ 1620 if ((acl_obj = ZFS_EXTERNAL_ACL(zp)) != 0 && may_delete_now) 1621 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); 1622 1623 /* charge as an update -- would be nice not to charge at all */ 1624 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 1625 1626 error = dmu_tx_assign(tx, TXG_NOWAIT); 1627 if (error) { 1628 zfs_dirent_unlock(dl); 1629 VN_RELE(vp); 1630 if (error == ERESTART) { 1631 dmu_tx_wait(tx); 1632 dmu_tx_abort(tx); 1633 goto top; 1634 } 1635 if (realnmp) 1636 pn_free(realnmp); 1637 dmu_tx_abort(tx); 1638 ZFS_EXIT(zfsvfs); 1639 return (error); 1640 } 1641 1642 /* 1643 * Remove the directory entry. 1644 */ 1645 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked); 1646 1647 if (error) { 1648 dmu_tx_commit(tx); 1649 goto out; 1650 } 1651 1652 if (unlinked) { 1653 1654 mutex_enter(&vp->v_lock); 1655 1656 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), 1657 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked)); 1658 delete_now = may_delete_now && !toobig && 1659 vp->v_count == 1 && !vn_has_cached_data(vp) && 1660 xattr_obj == xattr_obj_unlinked && ZFS_EXTERNAL_ACL(zp) == 1661 acl_obj; 1662 mutex_exit(&vp->v_lock); 1663 } 1664 1665 if (delete_now) { 1666 if (xattr_obj_unlinked) { 1667 ASSERT3U(xzp->z_links, ==, 2); 1668 mutex_enter(&xzp->z_lock); 1669 xzp->z_unlinked = 1; 1670 xzp->z_links = 0; 1671 error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs), 1672 &xzp->z_links, sizeof (xzp->z_links), tx); 1673 ASSERT3U(error, ==, 0); 1674 mutex_exit(&xzp->z_lock); 1675 zfs_unlinked_add(xzp, tx); 1676 if (zp->z_is_sa) 1677 error = sa_remove(zp->z_sa_hdl, 1678 SA_ZPL_XATTR(zfsvfs), tx); 1679 else 1680 error = sa_update(zp->z_sa_hdl, 1681 SA_ZPL_XATTR(zfsvfs), &null_xattr, 1682 sizeof (uint64_t), tx); 1683 ASSERT3U(error, ==, 0); 1684 } 1685 mutex_enter(&zp->z_lock); 1686 mutex_enter(&vp->v_lock); 1687 vp->v_count--; 1688 ASSERT3U(vp->v_count, ==, 0); 1689 mutex_exit(&vp->v_lock); 1690 mutex_exit(&zp->z_lock); 1691 zfs_znode_delete(zp, tx); 1692 } else if (unlinked) { 1693 zfs_unlinked_add(zp, tx); 1694 } 1695 1696 txtype = TX_REMOVE; 1697 if (flags & FIGNORECASE) 1698 txtype |= TX_CI; 1699 zfs_log_remove(zilog, tx, txtype, dzp, name); 1700 1701 dmu_tx_commit(tx); 1702 out: 1703 if (realnmp) 1704 pn_free(realnmp); 1705 1706 zfs_dirent_unlock(dl); 1707 1708 if (!delete_now) 1709 VN_RELE(vp); 1710 if (xzp) 1711 VN_RELE(ZTOV(xzp)); 1712 1713 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 1714 zil_commit(zilog, UINT64_MAX, 0); 1715 1716 ZFS_EXIT(zfsvfs); 1717 return (error); 1718 } 1719 1720 /* 1721 * Create a new directory and insert it into dvp using the name 1722 * provided. Return a pointer to the inserted directory. 1723 * 1724 * IN: dvp - vnode of directory to add subdir to. 1725 * dirname - name of new directory. 1726 * vap - attributes of new directory. 1727 * cr - credentials of caller. 1728 * ct - caller context 1729 * vsecp - ACL to be set 1730 * 1731 * OUT: vpp - vnode of created directory. 1732 * 1733 * RETURN: 0 if success 1734 * error code if failure 1735 * 1736 * Timestamps: 1737 * dvp - ctime|mtime updated 1738 * vp - ctime|mtime|atime updated 1739 */ 1740 /*ARGSUSED*/ 1741 static int 1742 zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr, 1743 caller_context_t *ct, int flags, vsecattr_t *vsecp) 1744 { 1745 znode_t *zp, *dzp = VTOZ(dvp); 1746 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1747 zilog_t *zilog; 1748 zfs_dirlock_t *dl; 1749 uint64_t txtype; 1750 dmu_tx_t *tx; 1751 int error; 1752 int zf = ZNEW; 1753 ksid_t *ksid; 1754 uid_t uid; 1755 gid_t gid = crgetgid(cr); 1756 zfs_acl_ids_t acl_ids; 1757 boolean_t fuid_dirtied; 1758 1759 ASSERT(vap->va_type == VDIR); 1760 1761 /* 1762 * If we have an ephemeral id, ACL, or XVATTR then 1763 * make sure file system is at proper version 1764 */ 1765 1766 ksid = crgetsid(cr, KSID_OWNER); 1767 if (ksid) 1768 uid = ksid_getid(ksid); 1769 else 1770 uid = crgetuid(cr); 1771 if (zfsvfs->z_use_fuids == B_FALSE && 1772 (vsecp || (vap->va_mask & AT_XVATTR) || 1773 IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid))) 1774 return (EINVAL); 1775 1776 ZFS_ENTER(zfsvfs); 1777 ZFS_VERIFY_ZP(dzp); 1778 zilog = zfsvfs->z_log; 1779 1780 if (dzp->z_pflags & ZFS_XATTR) { 1781 ZFS_EXIT(zfsvfs); 1782 return (EINVAL); 1783 } 1784 1785 if (zfsvfs->z_utf8 && u8_validate(dirname, 1786 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 1787 ZFS_EXIT(zfsvfs); 1788 return (EILSEQ); 1789 } 1790 if (flags & FIGNORECASE) 1791 zf |= ZCILOOK; 1792 1793 if (vap->va_mask & AT_XVATTR) 1794 if ((error = secpolicy_xvattr((xvattr_t *)vap, 1795 crgetuid(cr), cr, vap->va_type)) != 0) { 1796 ZFS_EXIT(zfsvfs); 1797 return (error); 1798 } 1799 1800 /* 1801 * First make sure the new directory doesn't exist. 1802 */ 1803 top: 1804 *vpp = NULL; 1805 1806 if (error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf, 1807 NULL, NULL)) { 1808 ZFS_EXIT(zfsvfs); 1809 return (error); 1810 } 1811 1812 if (error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr)) { 1813 zfs_dirent_unlock(dl); 1814 ZFS_EXIT(zfsvfs); 1815 return (error); 1816 } 1817 1818 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr, vsecp, 1819 &acl_ids)) != 0) { 1820 zfs_dirent_unlock(dl); 1821 ZFS_EXIT(zfsvfs); 1822 return (error); 1823 } 1824 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { 1825 zfs_acl_ids_free(&acl_ids); 1826 zfs_dirent_unlock(dl); 1827 ZFS_EXIT(zfsvfs); 1828 return (EDQUOT); 1829 } 1830 1831 /* 1832 * Add a new entry to the directory. 1833 */ 1834 tx = dmu_tx_create(zfsvfs->z_os); 1835 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname); 1836 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); 1837 fuid_dirtied = zfsvfs->z_fuid_dirty; 1838 if (fuid_dirtied) 1839 zfs_fuid_txhold(zfsvfs, tx); 1840 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { 1841 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 1842 acl_ids.z_aclp->z_acl_bytes); 1843 } 1844 1845 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + 1846 ZFS_SA_BASE_ATTR_SIZE); 1847 1848 error = dmu_tx_assign(tx, TXG_NOWAIT); 1849 if (error) { 1850 zfs_acl_ids_free(&acl_ids); 1851 zfs_dirent_unlock(dl); 1852 if (error == ERESTART) { 1853 dmu_tx_wait(tx); 1854 dmu_tx_abort(tx); 1855 goto top; 1856 } 1857 dmu_tx_abort(tx); 1858 ZFS_EXIT(zfsvfs); 1859 return (error); 1860 } 1861 1862 /* 1863 * Create new node. 1864 */ 1865 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); 1866 1867 if (fuid_dirtied) 1868 zfs_fuid_sync(zfsvfs, tx); 1869 1870 /* 1871 * Now put new name in parent dir. 1872 */ 1873 (void) zfs_link_create(dl, zp, tx, ZNEW); 1874 1875 *vpp = ZTOV(zp); 1876 1877 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap); 1878 if (flags & FIGNORECASE) 1879 txtype |= TX_CI; 1880 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp, 1881 acl_ids.z_fuidp, vap); 1882 1883 zfs_acl_ids_free(&acl_ids); 1884 1885 dmu_tx_commit(tx); 1886 1887 zfs_dirent_unlock(dl); 1888 1889 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 1890 zil_commit(zilog, UINT64_MAX, 0); 1891 1892 ZFS_EXIT(zfsvfs); 1893 return (0); 1894 } 1895 1896 /* 1897 * Remove a directory subdir entry. If the current working 1898 * directory is the same as the subdir to be removed, the 1899 * remove will fail. 1900 * 1901 * IN: dvp - vnode of directory to remove from. 1902 * name - name of directory to be removed. 1903 * cwd - vnode of current working directory. 1904 * cr - credentials of caller. 1905 * ct - caller context 1906 * flags - case flags 1907 * 1908 * RETURN: 0 if success 1909 * error code if failure 1910 * 1911 * Timestamps: 1912 * dvp - ctime|mtime updated 1913 */ 1914 /*ARGSUSED*/ 1915 static int 1916 zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr, 1917 caller_context_t *ct, int flags) 1918 { 1919 znode_t *dzp = VTOZ(dvp); 1920 znode_t *zp; 1921 vnode_t *vp; 1922 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 1923 zilog_t *zilog; 1924 zfs_dirlock_t *dl; 1925 dmu_tx_t *tx; 1926 int error; 1927 int zflg = ZEXISTS; 1928 1929 ZFS_ENTER(zfsvfs); 1930 ZFS_VERIFY_ZP(dzp); 1931 zilog = zfsvfs->z_log; 1932 1933 if (flags & FIGNORECASE) 1934 zflg |= ZCILOOK; 1935 top: 1936 zp = NULL; 1937 1938 /* 1939 * Attempt to lock directory; fail if entry doesn't exist. 1940 */ 1941 if (error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, 1942 NULL, NULL)) { 1943 ZFS_EXIT(zfsvfs); 1944 return (error); 1945 } 1946 1947 vp = ZTOV(zp); 1948 1949 if (error = zfs_zaccess_delete(dzp, zp, cr)) { 1950 goto out; 1951 } 1952 1953 if (vp->v_type != VDIR) { 1954 error = ENOTDIR; 1955 goto out; 1956 } 1957 1958 if (vp == cwd) { 1959 error = EINVAL; 1960 goto out; 1961 } 1962 1963 vnevent_rmdir(vp, dvp, name, ct); 1964 1965 /* 1966 * Grab a lock on the directory to make sure that noone is 1967 * trying to add (or lookup) entries while we are removing it. 1968 */ 1969 rw_enter(&zp->z_name_lock, RW_WRITER); 1970 1971 /* 1972 * Grab a lock on the parent pointer to make sure we play well 1973 * with the treewalk and directory rename code. 1974 */ 1975 rw_enter(&zp->z_parent_lock, RW_WRITER); 1976 1977 tx = dmu_tx_create(zfsvfs->z_os); 1978 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name); 1979 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 1980 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 1981 zfs_sa_upgrade_txholds(tx, zp); 1982 zfs_sa_upgrade_txholds(tx, dzp); 1983 error = dmu_tx_assign(tx, TXG_NOWAIT); 1984 if (error) { 1985 rw_exit(&zp->z_parent_lock); 1986 rw_exit(&zp->z_name_lock); 1987 zfs_dirent_unlock(dl); 1988 VN_RELE(vp); 1989 if (error == ERESTART) { 1990 dmu_tx_wait(tx); 1991 dmu_tx_abort(tx); 1992 goto top; 1993 } 1994 dmu_tx_abort(tx); 1995 ZFS_EXIT(zfsvfs); 1996 return (error); 1997 } 1998 1999 error = zfs_link_destroy(dl, zp, tx, zflg, NULL); 2000 2001 if (error == 0) { 2002 uint64_t txtype = TX_RMDIR; 2003 if (flags & FIGNORECASE) 2004 txtype |= TX_CI; 2005 zfs_log_remove(zilog, tx, txtype, dzp, name); 2006 } 2007 2008 dmu_tx_commit(tx); 2009 2010 rw_exit(&zp->z_parent_lock); 2011 rw_exit(&zp->z_name_lock); 2012 out: 2013 zfs_dirent_unlock(dl); 2014 2015 VN_RELE(vp); 2016 2017 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 2018 zil_commit(zilog, UINT64_MAX, 0); 2019 2020 ZFS_EXIT(zfsvfs); 2021 return (error); 2022 } 2023 2024 /* 2025 * Read as many directory entries as will fit into the provided 2026 * buffer from the given directory cursor position (specified in 2027 * the uio structure. 2028 * 2029 * IN: vp - vnode of directory to read. 2030 * uio - structure supplying read location, range info, 2031 * and return buffer. 2032 * cr - credentials of caller. 2033 * ct - caller context 2034 * flags - case flags 2035 * 2036 * OUT: uio - updated offset and range, buffer filled. 2037 * eofp - set to true if end-of-file detected. 2038 * 2039 * RETURN: 0 if success 2040 * error code if failure 2041 * 2042 * Timestamps: 2043 * vp - atime updated 2044 * 2045 * Note that the low 4 bits of the cookie returned by zap is always zero. 2046 * This allows us to use the low range for "special" directory entries: 2047 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem, 2048 * we use the offset 2 for the '.zfs' directory. 2049 */ 2050 /* ARGSUSED */ 2051 static int 2052 zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, 2053 caller_context_t *ct, int flags) 2054 { 2055 znode_t *zp = VTOZ(vp); 2056 iovec_t *iovp; 2057 edirent_t *eodp; 2058 dirent64_t *odp; 2059 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2060 objset_t *os; 2061 caddr_t outbuf; 2062 size_t bufsize; 2063 zap_cursor_t zc; 2064 zap_attribute_t zap; 2065 uint_t bytes_wanted; 2066 uint64_t offset; /* must be unsigned; checks for < 1 */ 2067 uint64_t parent; 2068 int local_eof; 2069 int outcount; 2070 int error; 2071 uint8_t prefetch; 2072 boolean_t check_sysattrs; 2073 2074 ZFS_ENTER(zfsvfs); 2075 ZFS_VERIFY_ZP(zp); 2076 2077 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), 2078 &parent, sizeof (parent))) != 0) { 2079 ZFS_EXIT(zfsvfs); 2080 return (error); 2081 } 2082 2083 /* 2084 * If we are not given an eof variable, 2085 * use a local one. 2086 */ 2087 if (eofp == NULL) 2088 eofp = &local_eof; 2089 2090 /* 2091 * Check for valid iov_len. 2092 */ 2093 if (uio->uio_iov->iov_len <= 0) { 2094 ZFS_EXIT(zfsvfs); 2095 return (EINVAL); 2096 } 2097 2098 /* 2099 * Quit if directory has been removed (posix) 2100 */ 2101 if ((*eofp = zp->z_unlinked) != 0) { 2102 ZFS_EXIT(zfsvfs); 2103 return (0); 2104 } 2105 2106 error = 0; 2107 os = zfsvfs->z_os; 2108 offset = uio->uio_loffset; 2109 prefetch = zp->z_zn_prefetch; 2110 2111 /* 2112 * Initialize the iterator cursor. 2113 */ 2114 if (offset <= 3) { 2115 /* 2116 * Start iteration from the beginning of the directory. 2117 */ 2118 zap_cursor_init(&zc, os, zp->z_id); 2119 } else { 2120 /* 2121 * The offset is a serialized cursor. 2122 */ 2123 zap_cursor_init_serialized(&zc, os, zp->z_id, offset); 2124 } 2125 2126 /* 2127 * Get space to change directory entries into fs independent format. 2128 */ 2129 iovp = uio->uio_iov; 2130 bytes_wanted = iovp->iov_len; 2131 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) { 2132 bufsize = bytes_wanted; 2133 outbuf = kmem_alloc(bufsize, KM_SLEEP); 2134 odp = (struct dirent64 *)outbuf; 2135 } else { 2136 bufsize = bytes_wanted; 2137 odp = (struct dirent64 *)iovp->iov_base; 2138 } 2139 eodp = (struct edirent *)odp; 2140 2141 /* 2142 * If this VFS supports the system attribute view interface; and 2143 * we're looking at an extended attribute directory; and we care 2144 * about normalization conflicts on this vfs; then we must check 2145 * for normalization conflicts with the sysattr name space. 2146 */ 2147 check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) && 2148 (vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm && 2149 (flags & V_RDDIR_ENTFLAGS); 2150 2151 /* 2152 * Transform to file-system independent format 2153 */ 2154 outcount = 0; 2155 while (outcount < bytes_wanted) { 2156 ino64_t objnum; 2157 ushort_t reclen; 2158 off64_t *next; 2159 2160 /* 2161 * Special case `.', `..', and `.zfs'. 2162 */ 2163 if (offset == 0) { 2164 (void) strcpy(zap.za_name, "."); 2165 zap.za_normalization_conflict = 0; 2166 objnum = zp->z_id; 2167 } else if (offset == 1) { 2168 (void) strcpy(zap.za_name, ".."); 2169 zap.za_normalization_conflict = 0; 2170 objnum = parent; 2171 } else if (offset == 2 && zfs_show_ctldir(zp)) { 2172 (void) strcpy(zap.za_name, ZFS_CTLDIR_NAME); 2173 zap.za_normalization_conflict = 0; 2174 objnum = ZFSCTL_INO_ROOT; 2175 } else { 2176 /* 2177 * Grab next entry. 2178 */ 2179 if (error = zap_cursor_retrieve(&zc, &zap)) { 2180 if ((*eofp = (error == ENOENT)) != 0) 2181 break; 2182 else 2183 goto update; 2184 } 2185 2186 if (zap.za_integer_length != 8 || 2187 zap.za_num_integers != 1) { 2188 cmn_err(CE_WARN, "zap_readdir: bad directory " 2189 "entry, obj = %lld, offset = %lld\n", 2190 (u_longlong_t)zp->z_id, 2191 (u_longlong_t)offset); 2192 error = ENXIO; 2193 goto update; 2194 } 2195 2196 objnum = ZFS_DIRENT_OBJ(zap.za_first_integer); 2197 /* 2198 * MacOS X can extract the object type here such as: 2199 * uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer); 2200 */ 2201 2202 if (check_sysattrs && !zap.za_normalization_conflict) { 2203 zap.za_normalization_conflict = 2204 xattr_sysattr_casechk(zap.za_name); 2205 } 2206 } 2207 2208 if (flags & V_RDDIR_ACCFILTER) { 2209 /* 2210 * If we have no access at all, don't include 2211 * this entry in the returned information 2212 */ 2213 znode_t *ezp; 2214 if (zfs_zget(zp->z_zfsvfs, objnum, &ezp) != 0) 2215 goto skip_entry; 2216 if (!zfs_has_access(ezp, cr)) { 2217 VN_RELE(ZTOV(ezp)); 2218 goto skip_entry; 2219 } 2220 VN_RELE(ZTOV(ezp)); 2221 } 2222 2223 if (flags & V_RDDIR_ENTFLAGS) 2224 reclen = EDIRENT_RECLEN(strlen(zap.za_name)); 2225 else 2226 reclen = DIRENT64_RECLEN(strlen(zap.za_name)); 2227 2228 /* 2229 * Will this entry fit in the buffer? 2230 */ 2231 if (outcount + reclen > bufsize) { 2232 /* 2233 * Did we manage to fit anything in the buffer? 2234 */ 2235 if (!outcount) { 2236 error = EINVAL; 2237 goto update; 2238 } 2239 break; 2240 } 2241 if (flags & V_RDDIR_ENTFLAGS) { 2242 /* 2243 * Add extended flag entry: 2244 */ 2245 eodp->ed_ino = objnum; 2246 eodp->ed_reclen = reclen; 2247 /* NOTE: ed_off is the offset for the *next* entry */ 2248 next = &(eodp->ed_off); 2249 eodp->ed_eflags = zap.za_normalization_conflict ? 2250 ED_CASE_CONFLICT : 0; 2251 (void) strncpy(eodp->ed_name, zap.za_name, 2252 EDIRENT_NAMELEN(reclen)); 2253 eodp = (edirent_t *)((intptr_t)eodp + reclen); 2254 } else { 2255 /* 2256 * Add normal entry: 2257 */ 2258 odp->d_ino = objnum; 2259 odp->d_reclen = reclen; 2260 /* NOTE: d_off is the offset for the *next* entry */ 2261 next = &(odp->d_off); 2262 (void) strncpy(odp->d_name, zap.za_name, 2263 DIRENT64_NAMELEN(reclen)); 2264 odp = (dirent64_t *)((intptr_t)odp + reclen); 2265 } 2266 outcount += reclen; 2267 2268 ASSERT(outcount <= bufsize); 2269 2270 /* Prefetch znode */ 2271 if (prefetch) 2272 dmu_prefetch(os, objnum, 0, 0); 2273 2274 skip_entry: 2275 /* 2276 * Move to the next entry, fill in the previous offset. 2277 */ 2278 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) { 2279 zap_cursor_advance(&zc); 2280 offset = zap_cursor_serialize(&zc); 2281 } else { 2282 offset += 1; 2283 } 2284 *next = offset; 2285 } 2286 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */ 2287 2288 if (uio->uio_segflg == UIO_SYSSPACE && uio->uio_iovcnt == 1) { 2289 iovp->iov_base += outcount; 2290 iovp->iov_len -= outcount; 2291 uio->uio_resid -= outcount; 2292 } else if (error = uiomove(outbuf, (long)outcount, UIO_READ, uio)) { 2293 /* 2294 * Reset the pointer. 2295 */ 2296 offset = uio->uio_loffset; 2297 } 2298 2299 update: 2300 zap_cursor_fini(&zc); 2301 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1) 2302 kmem_free(outbuf, bufsize); 2303 2304 if (error == ENOENT) 2305 error = 0; 2306 2307 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 2308 2309 uio->uio_loffset = offset; 2310 ZFS_EXIT(zfsvfs); 2311 return (error); 2312 } 2313 2314 ulong_t zfs_fsync_sync_cnt = 4; 2315 2316 static int 2317 zfs_fsync(vnode_t *vp, int syncflag, cred_t *cr, caller_context_t *ct) 2318 { 2319 znode_t *zp = VTOZ(vp); 2320 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2321 2322 /* 2323 * Regardless of whether this is required for standards conformance, 2324 * this is the logical behavior when fsync() is called on a file with 2325 * dirty pages. We use B_ASYNC since the ZIL transactions are already 2326 * going to be pushed out as part of the zil_commit(). 2327 */ 2328 if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) && 2329 (vp->v_type == VREG) && !(IS_SWAPVP(vp))) 2330 (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0, B_ASYNC, cr, ct); 2331 2332 (void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt); 2333 2334 if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) { 2335 ZFS_ENTER(zfsvfs); 2336 ZFS_VERIFY_ZP(zp); 2337 zil_commit(zfsvfs->z_log, zp->z_last_itx, zp->z_id); 2338 ZFS_EXIT(zfsvfs); 2339 } 2340 return (0); 2341 } 2342 2343 2344 /* 2345 * Get the requested file attributes and place them in the provided 2346 * vattr structure. 2347 * 2348 * IN: vp - vnode of file. 2349 * vap - va_mask identifies requested attributes. 2350 * If AT_XVATTR set, then optional attrs are requested 2351 * flags - ATTR_NOACLCHECK (CIFS server context) 2352 * cr - credentials of caller. 2353 * ct - caller context 2354 * 2355 * OUT: vap - attribute values. 2356 * 2357 * RETURN: 0 (always succeeds) 2358 */ 2359 /* ARGSUSED */ 2360 static int 2361 zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, 2362 caller_context_t *ct) 2363 { 2364 znode_t *zp = VTOZ(vp); 2365 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2366 int error = 0; 2367 uint64_t links; 2368 uint64_t mtime[2], ctime[2]; 2369 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */ 2370 xoptattr_t *xoap = NULL; 2371 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 2372 sa_bulk_attr_t bulk[2]; 2373 int count = 0; 2374 2375 ZFS_ENTER(zfsvfs); 2376 ZFS_VERIFY_ZP(zp); 2377 2378 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16); 2379 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16); 2380 2381 if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) { 2382 ZFS_EXIT(zfsvfs); 2383 return (error); 2384 } 2385 2386 /* 2387 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES. 2388 * Also, if we are the owner don't bother, since owner should 2389 * always be allowed to read basic attributes of file. 2390 */ 2391 if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) && (zp->z_uid != crgetuid(cr))) { 2392 if (error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0, 2393 skipaclchk, cr)) { 2394 ZFS_EXIT(zfsvfs); 2395 return (error); 2396 } 2397 } 2398 2399 /* 2400 * Return all attributes. It's cheaper to provide the answer 2401 * than to determine whether we were asked the question. 2402 */ 2403 2404 mutex_enter(&zp->z_lock); 2405 vap->va_type = vp->v_type; 2406 vap->va_mode = zp->z_mode & MODEMASK; 2407 vap->va_uid = zp->z_uid; 2408 vap->va_gid = zp->z_gid; 2409 vap->va_fsid = zp->z_zfsvfs->z_vfs->vfs_dev; 2410 vap->va_nodeid = zp->z_id; 2411 if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp)) 2412 links = zp->z_links + 1; 2413 else 2414 links = zp->z_links; 2415 vap->va_nlink = MIN(links, UINT32_MAX); /* nlink_t limit! */ 2416 vap->va_size = zp->z_size; 2417 vap->va_rdev = vp->v_rdev; 2418 vap->va_seq = zp->z_seq; 2419 2420 /* 2421 * Add in any requested optional attributes and the create time. 2422 * Also set the corresponding bits in the returned attribute bitmap. 2423 */ 2424 if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) { 2425 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) { 2426 xoap->xoa_archive = 2427 ((zp->z_pflags & ZFS_ARCHIVE) != 0); 2428 XVA_SET_RTN(xvap, XAT_ARCHIVE); 2429 } 2430 2431 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) { 2432 xoap->xoa_readonly = 2433 ((zp->z_pflags & ZFS_READONLY) != 0); 2434 XVA_SET_RTN(xvap, XAT_READONLY); 2435 } 2436 2437 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) { 2438 xoap->xoa_system = 2439 ((zp->z_pflags & ZFS_SYSTEM) != 0); 2440 XVA_SET_RTN(xvap, XAT_SYSTEM); 2441 } 2442 2443 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) { 2444 xoap->xoa_hidden = 2445 ((zp->z_pflags & ZFS_HIDDEN) != 0); 2446 XVA_SET_RTN(xvap, XAT_HIDDEN); 2447 } 2448 2449 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) { 2450 xoap->xoa_nounlink = 2451 ((zp->z_pflags & ZFS_NOUNLINK) != 0); 2452 XVA_SET_RTN(xvap, XAT_NOUNLINK); 2453 } 2454 2455 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) { 2456 xoap->xoa_immutable = 2457 ((zp->z_pflags & ZFS_IMMUTABLE) != 0); 2458 XVA_SET_RTN(xvap, XAT_IMMUTABLE); 2459 } 2460 2461 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) { 2462 xoap->xoa_appendonly = 2463 ((zp->z_pflags & ZFS_APPENDONLY) != 0); 2464 XVA_SET_RTN(xvap, XAT_APPENDONLY); 2465 } 2466 2467 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) { 2468 xoap->xoa_nodump = 2469 ((zp->z_pflags & ZFS_NODUMP) != 0); 2470 XVA_SET_RTN(xvap, XAT_NODUMP); 2471 } 2472 2473 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) { 2474 xoap->xoa_opaque = 2475 ((zp->z_pflags & ZFS_OPAQUE) != 0); 2476 XVA_SET_RTN(xvap, XAT_OPAQUE); 2477 } 2478 2479 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) { 2480 xoap->xoa_av_quarantined = 2481 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0); 2482 XVA_SET_RTN(xvap, XAT_AV_QUARANTINED); 2483 } 2484 2485 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) { 2486 xoap->xoa_av_modified = 2487 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0); 2488 XVA_SET_RTN(xvap, XAT_AV_MODIFIED); 2489 } 2490 2491 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) && 2492 vp->v_type == VREG) { 2493 zfs_sa_get_scanstamp(zp, xvap); 2494 } 2495 2496 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) { 2497 uint64_t times[2]; 2498 2499 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs), 2500 times, sizeof (times)); 2501 ZFS_TIME_DECODE(&xoap->xoa_createtime, times); 2502 XVA_SET_RTN(xvap, XAT_CREATETIME); 2503 } 2504 2505 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) { 2506 xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0); 2507 XVA_SET_RTN(xvap, XAT_REPARSE); 2508 } 2509 } 2510 2511 ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime); 2512 ZFS_TIME_DECODE(&vap->va_mtime, mtime); 2513 ZFS_TIME_DECODE(&vap->va_ctime, ctime); 2514 2515 mutex_exit(&zp->z_lock); 2516 2517 sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks); 2518 2519 if (zp->z_blksz == 0) { 2520 /* 2521 * Block size hasn't been set; suggest maximal I/O transfers. 2522 */ 2523 vap->va_blksize = zfsvfs->z_max_blksz; 2524 } 2525 2526 ZFS_EXIT(zfsvfs); 2527 return (0); 2528 } 2529 2530 /* 2531 * Set the file attributes to the values contained in the 2532 * vattr structure. 2533 * 2534 * IN: vp - vnode of file to be modified. 2535 * vap - new attribute values. 2536 * If AT_XVATTR set, then optional attrs are being set 2537 * flags - ATTR_UTIME set if non-default time values provided. 2538 * - ATTR_NOACLCHECK (CIFS context only). 2539 * cr - credentials of caller. 2540 * ct - caller context 2541 * 2542 * RETURN: 0 if success 2543 * error code if failure 2544 * 2545 * Timestamps: 2546 * vp - ctime updated, mtime updated if size changed. 2547 */ 2548 /* ARGSUSED */ 2549 static int 2550 zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, 2551 caller_context_t *ct) 2552 { 2553 znode_t *zp = VTOZ(vp); 2554 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 2555 zilog_t *zilog; 2556 dmu_tx_t *tx; 2557 vattr_t oldva; 2558 xvattr_t tmpxvattr; 2559 uint_t mask = vap->va_mask; 2560 uint_t saved_mask; 2561 int trim_mask = 0; 2562 uint64_t new_mode; 2563 uint64_t new_uid, new_gid; 2564 uint64_t xattr_obj = 0; 2565 uint64_t mtime[2], ctime[2]; 2566 znode_t *attrzp; 2567 int need_policy = FALSE; 2568 int err, err2; 2569 zfs_fuid_info_t *fuidp = NULL; 2570 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */ 2571 xoptattr_t *xoap; 2572 zfs_acl_t *aclp = NULL; 2573 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 2574 boolean_t fuid_dirtied = B_FALSE; 2575 sa_bulk_attr_t bulk[7], xattr_bulk[7]; 2576 int count = 0, xattr_count = 0; 2577 2578 if (mask == 0) 2579 return (0); 2580 2581 if (mask & AT_NOSET) 2582 return (EINVAL); 2583 2584 ZFS_ENTER(zfsvfs); 2585 ZFS_VERIFY_ZP(zp); 2586 2587 zilog = zfsvfs->z_log; 2588 2589 /* 2590 * Make sure that if we have ephemeral uid/gid or xvattr specified 2591 * that file system is at proper version level 2592 */ 2593 2594 if (zfsvfs->z_use_fuids == B_FALSE && 2595 (((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) || 2596 ((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) || 2597 (mask & AT_XVATTR))) { 2598 ZFS_EXIT(zfsvfs); 2599 return (EINVAL); 2600 } 2601 2602 if (mask & AT_SIZE && vp->v_type == VDIR) { 2603 ZFS_EXIT(zfsvfs); 2604 return (EISDIR); 2605 } 2606 2607 if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) { 2608 ZFS_EXIT(zfsvfs); 2609 return (EINVAL); 2610 } 2611 2612 /* 2613 * If this is an xvattr_t, then get a pointer to the structure of 2614 * optional attributes. If this is NULL, then we have a vattr_t. 2615 */ 2616 xoap = xva_getxoptattr(xvap); 2617 2618 xva_init(&tmpxvattr); 2619 2620 /* 2621 * Immutable files can only alter immutable bit and atime 2622 */ 2623 if ((zp->z_pflags & ZFS_IMMUTABLE) && 2624 ((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) || 2625 ((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) { 2626 ZFS_EXIT(zfsvfs); 2627 return (EPERM); 2628 } 2629 2630 if ((mask & AT_SIZE) && (zp->z_pflags & ZFS_READONLY)) { 2631 ZFS_EXIT(zfsvfs); 2632 return (EPERM); 2633 } 2634 2635 /* 2636 * Verify timestamps doesn't overflow 32 bits. 2637 * ZFS can handle large timestamps, but 32bit syscalls can't 2638 * handle times greater than 2039. This check should be removed 2639 * once large timestamps are fully supported. 2640 */ 2641 if (mask & (AT_ATIME | AT_MTIME)) { 2642 if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) || 2643 ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) { 2644 ZFS_EXIT(zfsvfs); 2645 return (EOVERFLOW); 2646 } 2647 } 2648 2649 top: 2650 attrzp = NULL; 2651 2652 /* Can this be moved to before the top label? */ 2653 if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) { 2654 ZFS_EXIT(zfsvfs); 2655 return (EROFS); 2656 } 2657 2658 /* 2659 * First validate permissions 2660 */ 2661 2662 if (mask & AT_SIZE) { 2663 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr); 2664 if (err) { 2665 ZFS_EXIT(zfsvfs); 2666 return (err); 2667 } 2668 /* 2669 * XXX - Note, we are not providing any open 2670 * mode flags here (like FNDELAY), so we may 2671 * block if there are locks present... this 2672 * should be addressed in openat(). 2673 */ 2674 /* XXX - would it be OK to generate a log record here? */ 2675 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE); 2676 if (err) { 2677 ZFS_EXIT(zfsvfs); 2678 return (err); 2679 } 2680 } 2681 2682 if (mask & (AT_ATIME|AT_MTIME) || 2683 ((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) || 2684 XVA_ISSET_REQ(xvap, XAT_READONLY) || 2685 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) || 2686 XVA_ISSET_REQ(xvap, XAT_CREATETIME) || 2687 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) { 2688 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0, 2689 skipaclchk, cr); 2690 } 2691 2692 if (mask & (AT_UID|AT_GID)) { 2693 int idmask = (mask & (AT_UID|AT_GID)); 2694 int take_owner; 2695 int take_group; 2696 2697 /* 2698 * NOTE: even if a new mode is being set, 2699 * we may clear S_ISUID/S_ISGID bits. 2700 */ 2701 2702 if (!(mask & AT_MODE)) 2703 vap->va_mode = zp->z_mode; 2704 2705 /* 2706 * Take ownership or chgrp to group we are a member of 2707 */ 2708 2709 take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr)); 2710 take_group = (mask & AT_GID) && 2711 zfs_groupmember(zfsvfs, vap->va_gid, cr); 2712 2713 /* 2714 * If both AT_UID and AT_GID are set then take_owner and 2715 * take_group must both be set in order to allow taking 2716 * ownership. 2717 * 2718 * Otherwise, send the check through secpolicy_vnode_setattr() 2719 * 2720 */ 2721 2722 if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) || 2723 ((idmask == AT_UID) && take_owner) || 2724 ((idmask == AT_GID) && take_group)) { 2725 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0, 2726 skipaclchk, cr) == 0) { 2727 /* 2728 * Remove setuid/setgid for non-privileged users 2729 */ 2730 secpolicy_setid_clear(vap, cr); 2731 trim_mask = (mask & (AT_UID|AT_GID)); 2732 } else { 2733 need_policy = TRUE; 2734 } 2735 } else { 2736 need_policy = TRUE; 2737 } 2738 } 2739 2740 mutex_enter(&zp->z_lock); 2741 oldva.va_mode = zp->z_mode; 2742 oldva.va_uid = zp->z_uid; 2743 oldva.va_gid = zp->z_gid; 2744 if (mask & AT_XVATTR) { 2745 /* 2746 * Update xvattr mask to include only those attributes 2747 * that are actually changing. 2748 * 2749 * the bits will be restored prior to actually setting 2750 * the attributes so the caller thinks they were set. 2751 */ 2752 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) { 2753 if (xoap->xoa_appendonly != 2754 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) { 2755 need_policy = TRUE; 2756 } else { 2757 XVA_CLR_REQ(xvap, XAT_APPENDONLY); 2758 XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY); 2759 } 2760 } 2761 2762 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) { 2763 if (xoap->xoa_nounlink != 2764 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) { 2765 need_policy = TRUE; 2766 } else { 2767 XVA_CLR_REQ(xvap, XAT_NOUNLINK); 2768 XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK); 2769 } 2770 } 2771 2772 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) { 2773 if (xoap->xoa_immutable != 2774 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) { 2775 need_policy = TRUE; 2776 } else { 2777 XVA_CLR_REQ(xvap, XAT_IMMUTABLE); 2778 XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE); 2779 } 2780 } 2781 2782 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) { 2783 if (xoap->xoa_nodump != 2784 ((zp->z_pflags & ZFS_NODUMP) != 0)) { 2785 need_policy = TRUE; 2786 } else { 2787 XVA_CLR_REQ(xvap, XAT_NODUMP); 2788 XVA_SET_REQ(&tmpxvattr, XAT_NODUMP); 2789 } 2790 } 2791 2792 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) { 2793 if (xoap->xoa_av_modified != 2794 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) { 2795 need_policy = TRUE; 2796 } else { 2797 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED); 2798 XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED); 2799 } 2800 } 2801 2802 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) { 2803 if ((vp->v_type != VREG && 2804 xoap->xoa_av_quarantined) || 2805 xoap->xoa_av_quarantined != 2806 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) { 2807 need_policy = TRUE; 2808 } else { 2809 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED); 2810 XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED); 2811 } 2812 } 2813 2814 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) { 2815 mutex_exit(&zp->z_lock); 2816 ZFS_EXIT(zfsvfs); 2817 return (EPERM); 2818 } 2819 2820 if (need_policy == FALSE && 2821 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) || 2822 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) { 2823 need_policy = TRUE; 2824 } 2825 } 2826 2827 mutex_exit(&zp->z_lock); 2828 2829 if (mask & AT_MODE) { 2830 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) { 2831 err = secpolicy_setid_setsticky_clear(vp, vap, 2832 &oldva, cr); 2833 if (err) { 2834 ZFS_EXIT(zfsvfs); 2835 return (err); 2836 } 2837 trim_mask |= AT_MODE; 2838 } else { 2839 need_policy = TRUE; 2840 } 2841 } 2842 2843 if (need_policy) { 2844 /* 2845 * If trim_mask is set then take ownership 2846 * has been granted or write_acl is present and user 2847 * has the ability to modify mode. In that case remove 2848 * UID|GID and or MODE from mask so that 2849 * secpolicy_vnode_setattr() doesn't revoke it. 2850 */ 2851 2852 if (trim_mask) { 2853 saved_mask = vap->va_mask; 2854 vap->va_mask &= ~trim_mask; 2855 } 2856 err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags, 2857 (int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp); 2858 if (err) { 2859 ZFS_EXIT(zfsvfs); 2860 return (err); 2861 } 2862 2863 if (trim_mask) 2864 vap->va_mask |= saved_mask; 2865 } 2866 2867 /* 2868 * secpolicy_vnode_setattr, or take ownership may have 2869 * changed va_mask 2870 */ 2871 mask = vap->va_mask; 2872 2873 if ((mask & (AT_UID | AT_GID))) { 2874 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xattr_obj, 2875 sizeof (xattr_obj)); 2876 2877 if (xattr_obj) { 2878 err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp); 2879 if (err) 2880 goto out2; 2881 } 2882 if (mask & AT_UID) { 2883 new_uid = zfs_fuid_create(zfsvfs, 2884 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp); 2885 if (vap->va_uid != zp->z_uid && 2886 zfs_fuid_overquota(zfsvfs, B_FALSE, new_uid)) { 2887 err = EDQUOT; 2888 goto out2; 2889 } 2890 } 2891 2892 if (mask & AT_GID) { 2893 new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid, 2894 cr, ZFS_GROUP, &fuidp); 2895 if (new_gid != zp->z_gid && 2896 zfs_fuid_overquota(zfsvfs, B_TRUE, new_gid)) { 2897 err = EDQUOT; 2898 goto out2; 2899 } 2900 } 2901 } 2902 tx = dmu_tx_create(zfsvfs->z_os); 2903 2904 if (mask & AT_MODE) { 2905 uint64_t pmode = zp->z_mode; 2906 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT); 2907 2908 if (err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)) 2909 goto out; 2910 2911 if (!zp->z_is_sa && ZFS_EXTERNAL_ACL(zp)) { 2912 /* 2913 * Are we upgrading ACL from old V0 format 2914 * to V1 format? 2915 */ 2916 if (zfsvfs->z_version <= ZPL_VERSION_FUID && 2917 ZNODE_ACL_VERSION(zp) == 2918 ZFS_ACL_VERSION_INITIAL) { 2919 dmu_tx_hold_free(tx, 2920 ZFS_EXTERNAL_ACL(zp), 0, 2921 DMU_OBJECT_END); 2922 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 2923 0, aclp->z_acl_bytes); 2924 } else { 2925 dmu_tx_hold_write(tx, ZFS_EXTERNAL_ACL(zp), 0, 2926 aclp->z_acl_bytes); 2927 } 2928 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) { 2929 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 2930 0, aclp->z_acl_bytes); 2931 } 2932 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); 2933 } else { 2934 if ((mask & AT_XVATTR) && 2935 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) 2936 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); 2937 else 2938 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 2939 } 2940 2941 if (attrzp) { 2942 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE); 2943 } 2944 2945 fuid_dirtied = zfsvfs->z_fuid_dirty; 2946 if (fuid_dirtied) 2947 zfs_fuid_txhold(zfsvfs, tx); 2948 2949 zfs_sa_upgrade_txholds(tx, zp); 2950 2951 err = dmu_tx_assign(tx, TXG_NOWAIT); 2952 if (err) { 2953 if (err == ERESTART) 2954 dmu_tx_wait(tx); 2955 goto out; 2956 } 2957 2958 count = 0; 2959 /* 2960 * Set each attribute requested. 2961 * We group settings according to the locks they need to acquire. 2962 * 2963 * Note: you cannot set ctime directly, although it will be 2964 * updated as a side-effect of calling this function. 2965 */ 2966 2967 mutex_enter(&zp->z_lock); 2968 2969 if (attrzp) 2970 mutex_enter(&attrzp->z_lock); 2971 2972 if (mask & (AT_UID|AT_GID)) { 2973 2974 if (mask & AT_UID) { 2975 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, 2976 &new_uid, sizeof (new_uid)); 2977 zp->z_uid = zfs_fuid_map_id(zfsvfs, new_uid, 2978 cr, ZFS_OWNER); 2979 if (attrzp) { 2980 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, 2981 SA_ZPL_UID(zfsvfs), NULL, &new_uid, 2982 sizeof (new_uid)); 2983 attrzp->z_gid = zp->z_uid; 2984 } 2985 } 2986 2987 if (mask & AT_GID) { 2988 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), 2989 NULL, &new_gid, sizeof (new_gid)); 2990 zp->z_gid = zfs_fuid_map_id(zfsvfs, new_gid, cr, 2991 ZFS_GROUP); 2992 if (attrzp) { 2993 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, 2994 SA_ZPL_GID(zfsvfs), NULL, &new_gid, 2995 sizeof (new_gid)); 2996 attrzp->z_gid = zp->z_gid; 2997 } 2998 } 2999 if (!(mask & AT_MODE)) { 3000 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), 3001 NULL, &new_mode, sizeof (new_mode)); 3002 new_mode = zp->z_mode; 3003 } 3004 err = zfs_acl_chown_setattr(zp); 3005 ASSERT(err == 0); 3006 if (attrzp) { 3007 err = zfs_acl_chown_setattr(attrzp); 3008 ASSERT(err == 0); 3009 } 3010 } 3011 3012 if (mask & AT_MODE) { 3013 mutex_enter(&zp->z_acl_lock); 3014 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, 3015 &new_mode, sizeof (new_mode)); 3016 zp->z_mode = new_mode; 3017 ASSERT3U((uintptr_t)aclp, !=, NULL); 3018 err = zfs_aclset_common(zp, aclp, cr, tx); 3019 ASSERT3U(err, ==, 0); 3020 zp->z_acl_cached = aclp; 3021 aclp = NULL; 3022 mutex_exit(&zp->z_acl_lock); 3023 } 3024 3025 if (attrzp) 3026 mutex_exit(&attrzp->z_lock); 3027 3028 if (mask & AT_ATIME) { 3029 ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime); 3030 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, 3031 &zp->z_atime, sizeof (zp->z_atime)); 3032 } 3033 3034 if (mask & AT_MTIME) { 3035 ZFS_TIME_ENCODE(&vap->va_mtime, mtime); 3036 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, 3037 mtime, sizeof (mtime)); 3038 } 3039 3040 /* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */ 3041 if (mask & AT_SIZE && !(mask & AT_MTIME)) { 3042 if (!(mask & AT_MTIME)) 3043 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), 3044 NULL, mtime, sizeof (mtime)); 3045 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, 3046 &ctime, sizeof (ctime)); 3047 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, 3048 B_TRUE); 3049 } else if (mask != 0) { 3050 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, 3051 &ctime, sizeof (ctime)); 3052 zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime, 3053 B_TRUE); 3054 if (attrzp) { 3055 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count, 3056 SA_ZPL_CTIME(zfsvfs), NULL, 3057 &ctime, sizeof (ctime)); 3058 zfs_tstamp_update_setup(attrzp, STATE_CHANGED, 3059 mtime, ctime, B_TRUE); 3060 } 3061 } 3062 /* 3063 * Do this after setting timestamps to prevent timestamp 3064 * update from toggling bit 3065 */ 3066 3067 if (xoap && (mask & AT_XVATTR)) { 3068 3069 /* 3070 * restore trimmed off masks 3071 * so that return masks can be set for caller. 3072 */ 3073 3074 if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) { 3075 XVA_SET_REQ(xvap, XAT_APPENDONLY); 3076 } 3077 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) { 3078 XVA_SET_REQ(xvap, XAT_NOUNLINK); 3079 } 3080 if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) { 3081 XVA_SET_REQ(xvap, XAT_IMMUTABLE); 3082 } 3083 if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) { 3084 XVA_SET_REQ(xvap, XAT_NODUMP); 3085 } 3086 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) { 3087 XVA_SET_REQ(xvap, XAT_AV_MODIFIED); 3088 } 3089 if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) { 3090 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED); 3091 } 3092 3093 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) 3094 ASSERT(vp->v_type == VREG); 3095 3096 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, 3097 &zp->z_pflags, sizeof (zp->z_pflags)); 3098 zfs_xvattr_set(zp, xvap, tx); 3099 } 3100 3101 if (fuid_dirtied) 3102 zfs_fuid_sync(zfsvfs, tx); 3103 3104 if (mask != 0) 3105 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp); 3106 3107 mutex_exit(&zp->z_lock); 3108 3109 out: 3110 if (err == 0 && attrzp) { 3111 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk, 3112 xattr_count, tx); 3113 ASSERT(err2 == 0); 3114 } 3115 3116 if (attrzp) 3117 VN_RELE(ZTOV(attrzp)); 3118 if (aclp) 3119 zfs_acl_free(aclp); 3120 3121 if (fuidp) { 3122 zfs_fuid_info_free(fuidp); 3123 fuidp = NULL; 3124 } 3125 3126 if (err) { 3127 dmu_tx_abort(tx); 3128 if (err == ERESTART) 3129 goto top; 3130 } else { 3131 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); 3132 dmu_tx_commit(tx); 3133 } 3134 3135 3136 out2: 3137 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 3138 zil_commit(zilog, UINT64_MAX, 0); 3139 3140 ZFS_EXIT(zfsvfs); 3141 return (err); 3142 } 3143 3144 typedef struct zfs_zlock { 3145 krwlock_t *zl_rwlock; /* lock we acquired */ 3146 znode_t *zl_znode; /* znode we held */ 3147 struct zfs_zlock *zl_next; /* next in list */ 3148 } zfs_zlock_t; 3149 3150 /* 3151 * Drop locks and release vnodes that were held by zfs_rename_lock(). 3152 */ 3153 static void 3154 zfs_rename_unlock(zfs_zlock_t **zlpp) 3155 { 3156 zfs_zlock_t *zl; 3157 3158 while ((zl = *zlpp) != NULL) { 3159 if (zl->zl_znode != NULL) 3160 VN_RELE(ZTOV(zl->zl_znode)); 3161 rw_exit(zl->zl_rwlock); 3162 *zlpp = zl->zl_next; 3163 kmem_free(zl, sizeof (*zl)); 3164 } 3165 } 3166 3167 /* 3168 * Search back through the directory tree, using the ".." entries. 3169 * Lock each directory in the chain to prevent concurrent renames. 3170 * Fail any attempt to move a directory into one of its own descendants. 3171 * XXX - z_parent_lock can overlap with map or grow locks 3172 */ 3173 static int 3174 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp) 3175 { 3176 zfs_zlock_t *zl; 3177 znode_t *zp = tdzp; 3178 uint64_t rootid = zp->z_zfsvfs->z_root; 3179 uint64_t oidp = zp->z_id; 3180 krwlock_t *rwlp = &szp->z_parent_lock; 3181 krw_t rw = RW_WRITER; 3182 3183 /* 3184 * First pass write-locks szp and compares to zp->z_id. 3185 * Later passes read-lock zp and compare to zp->z_parent. 3186 */ 3187 do { 3188 if (!rw_tryenter(rwlp, rw)) { 3189 /* 3190 * Another thread is renaming in this path. 3191 * Note that if we are a WRITER, we don't have any 3192 * parent_locks held yet. 3193 */ 3194 if (rw == RW_READER && zp->z_id > szp->z_id) { 3195 /* 3196 * Drop our locks and restart 3197 */ 3198 zfs_rename_unlock(&zl); 3199 *zlpp = NULL; 3200 zp = tdzp; 3201 oidp = zp->z_id; 3202 rwlp = &szp->z_parent_lock; 3203 rw = RW_WRITER; 3204 continue; 3205 } else { 3206 /* 3207 * Wait for other thread to drop its locks 3208 */ 3209 rw_enter(rwlp, rw); 3210 } 3211 } 3212 3213 zl = kmem_alloc(sizeof (*zl), KM_SLEEP); 3214 zl->zl_rwlock = rwlp; 3215 zl->zl_znode = NULL; 3216 zl->zl_next = *zlpp; 3217 *zlpp = zl; 3218 3219 if (oidp == szp->z_id) /* We're a descendant of szp */ 3220 return (EINVAL); 3221 3222 if (oidp == rootid) /* We've hit the top */ 3223 return (0); 3224 3225 if (rw == RW_READER) { /* i.e. not the first pass */ 3226 int error = zfs_zget(zp->z_zfsvfs, oidp, &zp); 3227 if (error) 3228 return (error); 3229 zl->zl_znode = zp; 3230 } 3231 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zp->z_zfsvfs), 3232 &oidp, sizeof (oidp)); 3233 rwlp = &zp->z_parent_lock; 3234 rw = RW_READER; 3235 3236 } while (zp->z_id != sdzp->z_id); 3237 3238 return (0); 3239 } 3240 3241 /* 3242 * Move an entry from the provided source directory to the target 3243 * directory. Change the entry name as indicated. 3244 * 3245 * IN: sdvp - Source directory containing the "old entry". 3246 * snm - Old entry name. 3247 * tdvp - Target directory to contain the "new entry". 3248 * tnm - New entry name. 3249 * cr - credentials of caller. 3250 * ct - caller context 3251 * flags - case flags 3252 * 3253 * RETURN: 0 if success 3254 * error code if failure 3255 * 3256 * Timestamps: 3257 * sdvp,tdvp - ctime|mtime updated 3258 */ 3259 /*ARGSUSED*/ 3260 static int 3261 zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr, 3262 caller_context_t *ct, int flags) 3263 { 3264 znode_t *tdzp, *szp, *tzp; 3265 znode_t *sdzp = VTOZ(sdvp); 3266 zfsvfs_t *zfsvfs = sdzp->z_zfsvfs; 3267 zilog_t *zilog; 3268 vnode_t *realvp; 3269 zfs_dirlock_t *sdl, *tdl; 3270 dmu_tx_t *tx; 3271 zfs_zlock_t *zl; 3272 int cmp, serr, terr; 3273 int error = 0; 3274 int zflg = 0; 3275 3276 ZFS_ENTER(zfsvfs); 3277 ZFS_VERIFY_ZP(sdzp); 3278 zilog = zfsvfs->z_log; 3279 3280 /* 3281 * Make sure we have the real vp for the target directory. 3282 */ 3283 if (VOP_REALVP(tdvp, &realvp, ct) == 0) 3284 tdvp = realvp; 3285 3286 if (tdvp->v_vfsp != sdvp->v_vfsp || zfsctl_is_node(tdvp)) { 3287 ZFS_EXIT(zfsvfs); 3288 return (EXDEV); 3289 } 3290 3291 tdzp = VTOZ(tdvp); 3292 ZFS_VERIFY_ZP(tdzp); 3293 if (zfsvfs->z_utf8 && u8_validate(tnm, 3294 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 3295 ZFS_EXIT(zfsvfs); 3296 return (EILSEQ); 3297 } 3298 3299 if (flags & FIGNORECASE) 3300 zflg |= ZCILOOK; 3301 3302 top: 3303 szp = NULL; 3304 tzp = NULL; 3305 zl = NULL; 3306 3307 /* 3308 * This is to prevent the creation of links into attribute space 3309 * by renaming a linked file into/outof an attribute directory. 3310 * See the comment in zfs_link() for why this is considered bad. 3311 */ 3312 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) { 3313 ZFS_EXIT(zfsvfs); 3314 return (EINVAL); 3315 } 3316 3317 /* 3318 * Lock source and target directory entries. To prevent deadlock, 3319 * a lock ordering must be defined. We lock the directory with 3320 * the smallest object id first, or if it's a tie, the one with 3321 * the lexically first name. 3322 */ 3323 if (sdzp->z_id < tdzp->z_id) { 3324 cmp = -1; 3325 } else if (sdzp->z_id > tdzp->z_id) { 3326 cmp = 1; 3327 } else { 3328 /* 3329 * First compare the two name arguments without 3330 * considering any case folding. 3331 */ 3332 int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER); 3333 3334 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error); 3335 ASSERT(error == 0 || !zfsvfs->z_utf8); 3336 if (cmp == 0) { 3337 /* 3338 * POSIX: "If the old argument and the new argument 3339 * both refer to links to the same existing file, 3340 * the rename() function shall return successfully 3341 * and perform no other action." 3342 */ 3343 ZFS_EXIT(zfsvfs); 3344 return (0); 3345 } 3346 /* 3347 * If the file system is case-folding, then we may 3348 * have some more checking to do. A case-folding file 3349 * system is either supporting mixed case sensitivity 3350 * access or is completely case-insensitive. Note 3351 * that the file system is always case preserving. 3352 * 3353 * In mixed sensitivity mode case sensitive behavior 3354 * is the default. FIGNORECASE must be used to 3355 * explicitly request case insensitive behavior. 3356 * 3357 * If the source and target names provided differ only 3358 * by case (e.g., a request to rename 'tim' to 'Tim'), 3359 * we will treat this as a special case in the 3360 * case-insensitive mode: as long as the source name 3361 * is an exact match, we will allow this to proceed as 3362 * a name-change request. 3363 */ 3364 if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE || 3365 (zfsvfs->z_case == ZFS_CASE_MIXED && 3366 flags & FIGNORECASE)) && 3367 u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST, 3368 &error) == 0) { 3369 /* 3370 * case preserving rename request, require exact 3371 * name matches 3372 */ 3373 zflg |= ZCIEXACT; 3374 zflg &= ~ZCILOOK; 3375 } 3376 } 3377 3378 /* 3379 * If the source and destination directories are the same, we should 3380 * grab the z_name_lock of that directory only once. 3381 */ 3382 if (sdzp == tdzp) { 3383 zflg |= ZHAVELOCK; 3384 rw_enter(&sdzp->z_name_lock, RW_READER); 3385 } 3386 3387 if (cmp < 0) { 3388 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp, 3389 ZEXISTS | zflg, NULL, NULL); 3390 terr = zfs_dirent_lock(&tdl, 3391 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL); 3392 } else { 3393 terr = zfs_dirent_lock(&tdl, 3394 tdzp, tnm, &tzp, zflg, NULL, NULL); 3395 serr = zfs_dirent_lock(&sdl, 3396 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg, 3397 NULL, NULL); 3398 } 3399 3400 if (serr) { 3401 /* 3402 * Source entry invalid or not there. 3403 */ 3404 if (!terr) { 3405 zfs_dirent_unlock(tdl); 3406 if (tzp) 3407 VN_RELE(ZTOV(tzp)); 3408 } 3409 3410 if (sdzp == tdzp) 3411 rw_exit(&sdzp->z_name_lock); 3412 3413 if (strcmp(snm, "..") == 0) 3414 serr = EINVAL; 3415 ZFS_EXIT(zfsvfs); 3416 return (serr); 3417 } 3418 if (terr) { 3419 zfs_dirent_unlock(sdl); 3420 VN_RELE(ZTOV(szp)); 3421 3422 if (sdzp == tdzp) 3423 rw_exit(&sdzp->z_name_lock); 3424 3425 if (strcmp(tnm, "..") == 0) 3426 terr = EINVAL; 3427 ZFS_EXIT(zfsvfs); 3428 return (terr); 3429 } 3430 3431 /* 3432 * Must have write access at the source to remove the old entry 3433 * and write access at the target to create the new entry. 3434 * Note that if target and source are the same, this can be 3435 * done in a single check. 3436 */ 3437 3438 if (error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)) 3439 goto out; 3440 3441 if (ZTOV(szp)->v_type == VDIR) { 3442 /* 3443 * Check to make sure rename is valid. 3444 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d 3445 */ 3446 if (error = zfs_rename_lock(szp, tdzp, sdzp, &zl)) 3447 goto out; 3448 } 3449 3450 /* 3451 * Does target exist? 3452 */ 3453 if (tzp) { 3454 /* 3455 * Source and target must be the same type. 3456 */ 3457 if (ZTOV(szp)->v_type == VDIR) { 3458 if (ZTOV(tzp)->v_type != VDIR) { 3459 error = ENOTDIR; 3460 goto out; 3461 } 3462 } else { 3463 if (ZTOV(tzp)->v_type == VDIR) { 3464 error = EISDIR; 3465 goto out; 3466 } 3467 } 3468 /* 3469 * POSIX dictates that when the source and target 3470 * entries refer to the same file object, rename 3471 * must do nothing and exit without error. 3472 */ 3473 if (szp->z_id == tzp->z_id) { 3474 error = 0; 3475 goto out; 3476 } 3477 } 3478 3479 vnevent_rename_src(ZTOV(szp), sdvp, snm, ct); 3480 if (tzp) 3481 vnevent_rename_dest(ZTOV(tzp), tdvp, tnm, ct); 3482 3483 /* 3484 * notify the target directory if it is not the same 3485 * as source directory. 3486 */ 3487 if (tdvp != sdvp) { 3488 vnevent_rename_dest_dir(tdvp, ct); 3489 } 3490 3491 tx = dmu_tx_create(zfsvfs->z_os); 3492 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE); 3493 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE); 3494 dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm); 3495 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm); 3496 if (sdzp != tdzp) { 3497 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE); 3498 zfs_sa_upgrade_txholds(tx, tdzp); 3499 } 3500 if (tzp) { 3501 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE); 3502 zfs_sa_upgrade_txholds(tx, tzp); 3503 } 3504 3505 zfs_sa_upgrade_txholds(tx, szp); 3506 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); 3507 error = dmu_tx_assign(tx, TXG_NOWAIT); 3508 if (error) { 3509 if (zl != NULL) 3510 zfs_rename_unlock(&zl); 3511 zfs_dirent_unlock(sdl); 3512 zfs_dirent_unlock(tdl); 3513 3514 if (sdzp == tdzp) 3515 rw_exit(&sdzp->z_name_lock); 3516 3517 VN_RELE(ZTOV(szp)); 3518 if (tzp) 3519 VN_RELE(ZTOV(tzp)); 3520 if (error == ERESTART) { 3521 dmu_tx_wait(tx); 3522 dmu_tx_abort(tx); 3523 goto top; 3524 } 3525 dmu_tx_abort(tx); 3526 ZFS_EXIT(zfsvfs); 3527 return (error); 3528 } 3529 3530 if (tzp) /* Attempt to remove the existing target */ 3531 error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL); 3532 3533 if (error == 0) { 3534 error = zfs_link_create(tdl, szp, tx, ZRENAMING); 3535 if (error == 0) { 3536 szp->z_pflags |= ZFS_AV_MODIFIED; 3537 3538 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs), 3539 (void *)&szp->z_pflags, sizeof (uint64_t), tx); 3540 ASSERT3U(error, ==, 0); 3541 3542 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL); 3543 ASSERT3U(error, ==, 0); 3544 3545 zfs_log_rename(zilog, tx, 3546 TX_RENAME | (flags & FIGNORECASE ? TX_CI : 0), 3547 sdzp, sdl->dl_name, tdzp, tdl->dl_name, szp); 3548 3549 /* Update path information for the target vnode */ 3550 vn_renamepath(tdvp, ZTOV(szp), tnm, strlen(tnm)); 3551 } 3552 } 3553 3554 dmu_tx_commit(tx); 3555 out: 3556 if (zl != NULL) 3557 zfs_rename_unlock(&zl); 3558 3559 zfs_dirent_unlock(sdl); 3560 zfs_dirent_unlock(tdl); 3561 3562 if (sdzp == tdzp) 3563 rw_exit(&sdzp->z_name_lock); 3564 3565 3566 VN_RELE(ZTOV(szp)); 3567 if (tzp) 3568 VN_RELE(ZTOV(tzp)); 3569 3570 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 3571 zil_commit(zilog, UINT64_MAX, 0); 3572 3573 ZFS_EXIT(zfsvfs); 3574 return (error); 3575 } 3576 3577 /* 3578 * Insert the indicated symbolic reference entry into the directory. 3579 * 3580 * IN: dvp - Directory to contain new symbolic link. 3581 * link - Name for new symlink entry. 3582 * vap - Attributes of new entry. 3583 * target - Target path of new symlink. 3584 * cr - credentials of caller. 3585 * ct - caller context 3586 * flags - case flags 3587 * 3588 * RETURN: 0 if success 3589 * error code if failure 3590 * 3591 * Timestamps: 3592 * dvp - ctime|mtime updated 3593 */ 3594 /*ARGSUSED*/ 3595 static int 3596 zfs_symlink(vnode_t *dvp, char *name, vattr_t *vap, char *link, cred_t *cr, 3597 caller_context_t *ct, int flags) 3598 { 3599 znode_t *zp, *dzp = VTOZ(dvp); 3600 zfs_dirlock_t *dl; 3601 dmu_tx_t *tx; 3602 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 3603 zilog_t *zilog; 3604 uint64_t len = strlen(link); 3605 int error; 3606 int zflg = ZNEW; 3607 zfs_acl_ids_t acl_ids; 3608 boolean_t fuid_dirtied; 3609 uint64_t txtype = TX_SYMLINK; 3610 3611 ASSERT(vap->va_type == VLNK); 3612 3613 ZFS_ENTER(zfsvfs); 3614 ZFS_VERIFY_ZP(dzp); 3615 zilog = zfsvfs->z_log; 3616 3617 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name), 3618 NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 3619 ZFS_EXIT(zfsvfs); 3620 return (EILSEQ); 3621 } 3622 if (flags & FIGNORECASE) 3623 zflg |= ZCILOOK; 3624 top: 3625 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { 3626 ZFS_EXIT(zfsvfs); 3627 return (error); 3628 } 3629 3630 if (len > MAXPATHLEN) { 3631 ZFS_EXIT(zfsvfs); 3632 return (ENAMETOOLONG); 3633 } 3634 3635 /* 3636 * Attempt to lock directory; fail if entry already exists. 3637 */ 3638 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL); 3639 if (error) { 3640 ZFS_EXIT(zfsvfs); 3641 return (error); 3642 } 3643 3644 VERIFY(0 == zfs_acl_ids_create(dzp, 0, vap, cr, NULL, &acl_ids)); 3645 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) { 3646 zfs_acl_ids_free(&acl_ids); 3647 zfs_dirent_unlock(dl); 3648 ZFS_EXIT(zfsvfs); 3649 return (EDQUOT); 3650 } 3651 tx = dmu_tx_create(zfsvfs->z_os); 3652 fuid_dirtied = zfsvfs->z_fuid_dirty; 3653 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len)); 3654 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); 3655 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + 3656 ZFS_SA_BASE_ATTR_SIZE + len); 3657 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE); 3658 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) { 3659 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, 3660 acl_ids.z_aclp->z_acl_bytes); 3661 } 3662 if (fuid_dirtied) 3663 zfs_fuid_txhold(zfsvfs, tx); 3664 error = dmu_tx_assign(tx, TXG_NOWAIT); 3665 if (error) { 3666 zfs_acl_ids_free(&acl_ids); 3667 zfs_dirent_unlock(dl); 3668 if (error == ERESTART) { 3669 dmu_tx_wait(tx); 3670 dmu_tx_abort(tx); 3671 goto top; 3672 } 3673 dmu_tx_abort(tx); 3674 ZFS_EXIT(zfsvfs); 3675 return (error); 3676 } 3677 3678 /* 3679 * Create a new object for the symlink. 3680 * for version 4 ZPL datsets the symlink will be an SA attribute 3681 */ 3682 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids); 3683 3684 if (fuid_dirtied) 3685 zfs_fuid_sync(zfsvfs, tx); 3686 3687 if (zp->z_is_sa) 3688 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs), 3689 link, len, tx); 3690 else 3691 zfs_sa_symlink(zp, link, len, tx); 3692 3693 zp->z_size = len; 3694 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs), 3695 &zp->z_size, sizeof (zp->z_size), tx); 3696 /* 3697 * Insert the new object into the directory. 3698 */ 3699 (void) zfs_link_create(dl, zp, tx, ZNEW); 3700 3701 if (flags & FIGNORECASE) 3702 txtype |= TX_CI; 3703 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link); 3704 3705 zfs_acl_ids_free(&acl_ids); 3706 3707 dmu_tx_commit(tx); 3708 3709 zfs_dirent_unlock(dl); 3710 3711 VN_RELE(ZTOV(zp)); 3712 3713 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 3714 zil_commit(zilog, UINT64_MAX, 0); 3715 3716 ZFS_EXIT(zfsvfs); 3717 return (error); 3718 } 3719 3720 /* 3721 * Return, in the buffer contained in the provided uio structure, 3722 * the symbolic path referred to by vp. 3723 * 3724 * IN: vp - vnode of symbolic link. 3725 * uoip - structure to contain the link path. 3726 * cr - credentials of caller. 3727 * ct - caller context 3728 * 3729 * OUT: uio - structure to contain the link path. 3730 * 3731 * RETURN: 0 if success 3732 * error code if failure 3733 * 3734 * Timestamps: 3735 * vp - atime updated 3736 */ 3737 /* ARGSUSED */ 3738 static int 3739 zfs_readlink(vnode_t *vp, uio_t *uio, cred_t *cr, caller_context_t *ct) 3740 { 3741 znode_t *zp = VTOZ(vp); 3742 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 3743 int error; 3744 3745 ZFS_ENTER(zfsvfs); 3746 ZFS_VERIFY_ZP(zp); 3747 3748 if (zp->z_is_sa) 3749 error = sa_lookup_uio(zp->z_sa_hdl, 3750 SA_ZPL_SYMLINK(zfsvfs), uio); 3751 else 3752 error = zfs_sa_readlink(zp, uio); 3753 3754 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 3755 3756 ZFS_EXIT(zfsvfs); 3757 return (error); 3758 } 3759 3760 /* 3761 * Insert a new entry into directory tdvp referencing svp. 3762 * 3763 * IN: tdvp - Directory to contain new entry. 3764 * svp - vnode of new entry. 3765 * name - name of new entry. 3766 * cr - credentials of caller. 3767 * ct - caller context 3768 * 3769 * RETURN: 0 if success 3770 * error code if failure 3771 * 3772 * Timestamps: 3773 * tdvp - ctime|mtime updated 3774 * svp - ctime updated 3775 */ 3776 /* ARGSUSED */ 3777 static int 3778 zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr, 3779 caller_context_t *ct, int flags) 3780 { 3781 znode_t *dzp = VTOZ(tdvp); 3782 znode_t *tzp, *szp; 3783 zfsvfs_t *zfsvfs = dzp->z_zfsvfs; 3784 zilog_t *zilog; 3785 zfs_dirlock_t *dl; 3786 dmu_tx_t *tx; 3787 vnode_t *realvp; 3788 int error; 3789 int zf = ZNEW; 3790 uint64_t parent; 3791 3792 ASSERT(tdvp->v_type == VDIR); 3793 3794 ZFS_ENTER(zfsvfs); 3795 ZFS_VERIFY_ZP(dzp); 3796 zilog = zfsvfs->z_log; 3797 3798 if (VOP_REALVP(svp, &realvp, ct) == 0) 3799 svp = realvp; 3800 3801 /* 3802 * POSIX dictates that we return EPERM here. 3803 * Better choices include ENOTSUP or EISDIR. 3804 */ 3805 if (svp->v_type == VDIR) { 3806 ZFS_EXIT(zfsvfs); 3807 return (EPERM); 3808 } 3809 3810 if (svp->v_vfsp != tdvp->v_vfsp || zfsctl_is_node(svp)) { 3811 ZFS_EXIT(zfsvfs); 3812 return (EXDEV); 3813 } 3814 3815 szp = VTOZ(svp); 3816 ZFS_VERIFY_ZP(szp); 3817 3818 /* Prevent links to .zfs/shares files */ 3819 3820 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), 3821 &parent, sizeof (uint64_t))) != 0) { 3822 ZFS_EXIT(zfsvfs); 3823 return (error); 3824 } 3825 if (parent == zfsvfs->z_shares_dir) { 3826 ZFS_EXIT(zfsvfs); 3827 return (EPERM); 3828 } 3829 3830 if (zfsvfs->z_utf8 && u8_validate(name, 3831 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) { 3832 ZFS_EXIT(zfsvfs); 3833 return (EILSEQ); 3834 } 3835 if (flags & FIGNORECASE) 3836 zf |= ZCILOOK; 3837 3838 /* 3839 * We do not support links between attributes and non-attributes 3840 * because of the potential security risk of creating links 3841 * into "normal" file space in order to circumvent restrictions 3842 * imposed in attribute space. 3843 */ 3844 if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) { 3845 ZFS_EXIT(zfsvfs); 3846 return (EINVAL); 3847 } 3848 3849 3850 if (szp->z_uid != crgetuid(cr) && 3851 secpolicy_basic_link(cr) != 0) { 3852 ZFS_EXIT(zfsvfs); 3853 return (EPERM); 3854 } 3855 3856 if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) { 3857 ZFS_EXIT(zfsvfs); 3858 return (error); 3859 } 3860 3861 top: 3862 /* 3863 * Attempt to lock directory; fail if entry already exists. 3864 */ 3865 error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL); 3866 if (error) { 3867 ZFS_EXIT(zfsvfs); 3868 return (error); 3869 } 3870 3871 tx = dmu_tx_create(zfsvfs->z_os); 3872 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE); 3873 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name); 3874 zfs_sa_upgrade_txholds(tx, szp); 3875 zfs_sa_upgrade_txholds(tx, dzp); 3876 error = dmu_tx_assign(tx, TXG_NOWAIT); 3877 if (error) { 3878 zfs_dirent_unlock(dl); 3879 if (error == ERESTART) { 3880 dmu_tx_wait(tx); 3881 dmu_tx_abort(tx); 3882 goto top; 3883 } 3884 dmu_tx_abort(tx); 3885 ZFS_EXIT(zfsvfs); 3886 return (error); 3887 } 3888 3889 error = zfs_link_create(dl, szp, tx, 0); 3890 3891 if (error == 0) { 3892 uint64_t txtype = TX_LINK; 3893 if (flags & FIGNORECASE) 3894 txtype |= TX_CI; 3895 zfs_log_link(zilog, tx, txtype, dzp, szp, name); 3896 } 3897 3898 dmu_tx_commit(tx); 3899 3900 zfs_dirent_unlock(dl); 3901 3902 if (error == 0) { 3903 vnevent_link(svp, ct); 3904 } 3905 3906 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 3907 zil_commit(zilog, UINT64_MAX, 0); 3908 3909 ZFS_EXIT(zfsvfs); 3910 return (error); 3911 } 3912 3913 /* 3914 * zfs_null_putapage() is used when the file system has been force 3915 * unmounted. It just drops the pages. 3916 */ 3917 /* ARGSUSED */ 3918 static int 3919 zfs_null_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, 3920 size_t *lenp, int flags, cred_t *cr) 3921 { 3922 pvn_write_done(pp, B_INVAL|B_FORCE|B_ERROR); 3923 return (0); 3924 } 3925 3926 /* 3927 * Push a page out to disk, klustering if possible. 3928 * 3929 * IN: vp - file to push page to. 3930 * pp - page to push. 3931 * flags - additional flags. 3932 * cr - credentials of caller. 3933 * 3934 * OUT: offp - start of range pushed. 3935 * lenp - len of range pushed. 3936 * 3937 * RETURN: 0 if success 3938 * error code if failure 3939 * 3940 * NOTE: callers must have locked the page to be pushed. On 3941 * exit, the page (and all other pages in the kluster) must be 3942 * unlocked. 3943 */ 3944 /* ARGSUSED */ 3945 static int 3946 zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, 3947 size_t *lenp, int flags, cred_t *cr) 3948 { 3949 znode_t *zp = VTOZ(vp); 3950 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 3951 dmu_tx_t *tx; 3952 u_offset_t off, koff; 3953 size_t len, klen; 3954 int err; 3955 3956 off = pp->p_offset; 3957 len = PAGESIZE; 3958 /* 3959 * If our blocksize is bigger than the page size, try to kluster 3960 * multiple pages so that we write a full block (thus avoiding 3961 * a read-modify-write). 3962 */ 3963 if (off < zp->z_size && zp->z_blksz > PAGESIZE) { 3964 klen = P2ROUNDUP((ulong_t)zp->z_blksz, PAGESIZE); 3965 koff = ISP2(klen) ? P2ALIGN(off, (u_offset_t)klen) : 0; 3966 ASSERT(koff <= zp->z_size); 3967 if (koff + klen > zp->z_size) 3968 klen = P2ROUNDUP(zp->z_size - koff, (uint64_t)PAGESIZE); 3969 pp = pvn_write_kluster(vp, pp, &off, &len, koff, klen, flags); 3970 } 3971 ASSERT3U(btop(len), ==, btopr(len)); 3972 3973 /* 3974 * Can't push pages past end-of-file. 3975 */ 3976 if (off >= zp->z_size) { 3977 /* ignore all pages */ 3978 err = 0; 3979 goto out; 3980 } else if (off + len > zp->z_size) { 3981 int npages = btopr(zp->z_size - off); 3982 page_t *trunc; 3983 3984 page_list_break(&pp, &trunc, npages); 3985 /* ignore pages past end of file */ 3986 if (trunc) 3987 pvn_write_done(trunc, flags); 3988 len = zp->z_size - off; 3989 } 3990 3991 if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) || 3992 zfs_owner_overquota(zfsvfs, zp, B_TRUE)) { 3993 err = EDQUOT; 3994 goto out; 3995 } 3996 top: 3997 tx = dmu_tx_create(zfsvfs->z_os); 3998 dmu_tx_hold_write(tx, zp->z_id, off, len); 3999 4000 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 4001 zfs_sa_upgrade_txholds(tx, zp); 4002 err = dmu_tx_assign(tx, TXG_NOWAIT); 4003 if (err != 0) { 4004 if (err == ERESTART) { 4005 dmu_tx_wait(tx); 4006 dmu_tx_abort(tx); 4007 goto top; 4008 } 4009 dmu_tx_abort(tx); 4010 goto out; 4011 } 4012 4013 if (zp->z_blksz <= PAGESIZE) { 4014 caddr_t va = zfs_map_page(pp, S_READ); 4015 ASSERT3U(len, <=, PAGESIZE); 4016 dmu_write(zfsvfs->z_os, zp->z_id, off, len, va, tx); 4017 zfs_unmap_page(pp, va); 4018 } else { 4019 err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, pp, tx); 4020 } 4021 4022 if (err == 0) { 4023 uint64_t mtime[2], ctime[2]; 4024 sa_bulk_attr_t bulk[2]; 4025 int count = 0; 4026 4027 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, 4028 &mtime, 16); 4029 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, 4030 &ctime, 16); 4031 zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime, 4032 B_TRUE); 4033 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off, len, 0); 4034 } 4035 dmu_tx_commit(tx); 4036 4037 out: 4038 pvn_write_done(pp, (err ? B_ERROR : 0) | flags); 4039 if (offp) 4040 *offp = off; 4041 if (lenp) 4042 *lenp = len; 4043 4044 return (err); 4045 } 4046 4047 /* 4048 * Copy the portion of the file indicated from pages into the file. 4049 * The pages are stored in a page list attached to the files vnode. 4050 * 4051 * IN: vp - vnode of file to push page data to. 4052 * off - position in file to put data. 4053 * len - amount of data to write. 4054 * flags - flags to control the operation. 4055 * cr - credentials of caller. 4056 * ct - caller context. 4057 * 4058 * RETURN: 0 if success 4059 * error code if failure 4060 * 4061 * Timestamps: 4062 * vp - ctime|mtime updated 4063 */ 4064 /*ARGSUSED*/ 4065 static int 4066 zfs_putpage(vnode_t *vp, offset_t off, size_t len, int flags, cred_t *cr, 4067 caller_context_t *ct) 4068 { 4069 znode_t *zp = VTOZ(vp); 4070 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4071 page_t *pp; 4072 size_t io_len; 4073 u_offset_t io_off; 4074 uint_t blksz; 4075 rl_t *rl; 4076 int error = 0; 4077 4078 ZFS_ENTER(zfsvfs); 4079 ZFS_VERIFY_ZP(zp); 4080 4081 /* 4082 * Align this request to the file block size in case we kluster. 4083 * XXX - this can result in pretty aggresive locking, which can 4084 * impact simultanious read/write access. One option might be 4085 * to break up long requests (len == 0) into block-by-block 4086 * operations to get narrower locking. 4087 */ 4088 blksz = zp->z_blksz; 4089 if (ISP2(blksz)) 4090 io_off = P2ALIGN_TYPED(off, blksz, u_offset_t); 4091 else 4092 io_off = 0; 4093 if (len > 0 && ISP2(blksz)) 4094 io_len = P2ROUNDUP_TYPED(len + (off - io_off), blksz, size_t); 4095 else 4096 io_len = 0; 4097 4098 if (io_len == 0) { 4099 /* 4100 * Search the entire vp list for pages >= io_off. 4101 */ 4102 rl = zfs_range_lock(zp, io_off, UINT64_MAX, RL_WRITER); 4103 error = pvn_vplist_dirty(vp, io_off, zfs_putapage, flags, cr); 4104 goto out; 4105 } 4106 rl = zfs_range_lock(zp, io_off, io_len, RL_WRITER); 4107 4108 if (off > zp->z_size) { 4109 /* past end of file */ 4110 zfs_range_unlock(rl); 4111 ZFS_EXIT(zfsvfs); 4112 return (0); 4113 } 4114 4115 len = MIN(io_len, P2ROUNDUP(zp->z_size, PAGESIZE) - io_off); 4116 4117 for (off = io_off; io_off < off + len; io_off += io_len) { 4118 if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) { 4119 pp = page_lookup(vp, io_off, 4120 (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED); 4121 } else { 4122 pp = page_lookup_nowait(vp, io_off, 4123 (flags & B_FREE) ? SE_EXCL : SE_SHARED); 4124 } 4125 4126 if (pp != NULL && pvn_getdirty(pp, flags)) { 4127 int err; 4128 4129 /* 4130 * Found a dirty page to push 4131 */ 4132 err = zfs_putapage(vp, pp, &io_off, &io_len, flags, cr); 4133 if (err) 4134 error = err; 4135 } else { 4136 io_len = PAGESIZE; 4137 } 4138 } 4139 out: 4140 zfs_range_unlock(rl); 4141 if ((flags & B_ASYNC) == 0 || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 4142 zil_commit(zfsvfs->z_log, UINT64_MAX, zp->z_id); 4143 ZFS_EXIT(zfsvfs); 4144 return (error); 4145 } 4146 4147 /*ARGSUSED*/ 4148 void 4149 zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct) 4150 { 4151 znode_t *zp = VTOZ(vp); 4152 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4153 int error; 4154 4155 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER); 4156 if (zp->z_sa_hdl == NULL) { 4157 /* 4158 * The fs has been unmounted, or we did a 4159 * suspend/resume and this file no longer exists. 4160 */ 4161 if (vn_has_cached_data(vp)) { 4162 (void) pvn_vplist_dirty(vp, 0, zfs_null_putapage, 4163 B_INVAL, cr); 4164 } 4165 4166 mutex_enter(&zp->z_lock); 4167 mutex_enter(&vp->v_lock); 4168 ASSERT(vp->v_count == 1); 4169 vp->v_count = 0; 4170 mutex_exit(&vp->v_lock); 4171 mutex_exit(&zp->z_lock); 4172 rw_exit(&zfsvfs->z_teardown_inactive_lock); 4173 zfs_znode_free(zp); 4174 return; 4175 } 4176 4177 /* 4178 * Attempt to push any data in the page cache. If this fails 4179 * we will get kicked out later in zfs_zinactive(). 4180 */ 4181 if (vn_has_cached_data(vp)) { 4182 (void) pvn_vplist_dirty(vp, 0, zfs_putapage, B_INVAL|B_ASYNC, 4183 cr); 4184 } 4185 4186 if (zp->z_atime_dirty && zp->z_unlinked == 0) { 4187 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os); 4188 4189 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); 4190 zfs_sa_upgrade_txholds(tx, zp); 4191 error = dmu_tx_assign(tx, TXG_WAIT); 4192 if (error) { 4193 dmu_tx_abort(tx); 4194 } else { 4195 mutex_enter(&zp->z_lock); 4196 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs), 4197 (void *)&zp->z_atime, sizeof (zp->z_atime), tx); 4198 zp->z_atime_dirty = 0; 4199 mutex_exit(&zp->z_lock); 4200 dmu_tx_commit(tx); 4201 } 4202 } 4203 4204 zfs_zinactive(zp); 4205 rw_exit(&zfsvfs->z_teardown_inactive_lock); 4206 } 4207 4208 /* 4209 * Bounds-check the seek operation. 4210 * 4211 * IN: vp - vnode seeking within 4212 * ooff - old file offset 4213 * noffp - pointer to new file offset 4214 * ct - caller context 4215 * 4216 * RETURN: 0 if success 4217 * EINVAL if new offset invalid 4218 */ 4219 /* ARGSUSED */ 4220 static int 4221 zfs_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, 4222 caller_context_t *ct) 4223 { 4224 if (vp->v_type == VDIR) 4225 return (0); 4226 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0); 4227 } 4228 4229 /* 4230 * Pre-filter the generic locking function to trap attempts to place 4231 * a mandatory lock on a memory mapped file. 4232 */ 4233 static int 4234 zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset, 4235 flk_callback_t *flk_cbp, cred_t *cr, caller_context_t *ct) 4236 { 4237 znode_t *zp = VTOZ(vp); 4238 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4239 4240 ZFS_ENTER(zfsvfs); 4241 ZFS_VERIFY_ZP(zp); 4242 4243 /* 4244 * We are following the UFS semantics with respect to mapcnt 4245 * here: If we see that the file is mapped already, then we will 4246 * return an error, but we don't worry about races between this 4247 * function and zfs_map(). 4248 */ 4249 if (zp->z_mapcnt > 0 && MANDMODE(zp->z_mode)) { 4250 ZFS_EXIT(zfsvfs); 4251 return (EAGAIN); 4252 } 4253 ZFS_EXIT(zfsvfs); 4254 return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct)); 4255 } 4256 4257 /* 4258 * If we can't find a page in the cache, we will create a new page 4259 * and fill it with file data. For efficiency, we may try to fill 4260 * multiple pages at once (klustering) to fill up the supplied page 4261 * list. Note that the pages to be filled are held with an exclusive 4262 * lock to prevent access by other threads while they are being filled. 4263 */ 4264 static int 4265 zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg, 4266 caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw) 4267 { 4268 znode_t *zp = VTOZ(vp); 4269 page_t *pp, *cur_pp; 4270 objset_t *os = zp->z_zfsvfs->z_os; 4271 u_offset_t io_off, total; 4272 size_t io_len; 4273 int err; 4274 4275 if (plsz == PAGESIZE || zp->z_blksz <= PAGESIZE) { 4276 /* 4277 * We only have a single page, don't bother klustering 4278 */ 4279 io_off = off; 4280 io_len = PAGESIZE; 4281 pp = page_create_va(vp, io_off, io_len, 4282 PG_EXCL | PG_WAIT, seg, addr); 4283 } else { 4284 /* 4285 * Try to find enough pages to fill the page list 4286 */ 4287 pp = pvn_read_kluster(vp, off, seg, addr, &io_off, 4288 &io_len, off, plsz, 0); 4289 } 4290 if (pp == NULL) { 4291 /* 4292 * The page already exists, nothing to do here. 4293 */ 4294 *pl = NULL; 4295 return (0); 4296 } 4297 4298 /* 4299 * Fill the pages in the kluster. 4300 */ 4301 cur_pp = pp; 4302 for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) { 4303 caddr_t va; 4304 4305 ASSERT3U(io_off, ==, cur_pp->p_offset); 4306 va = zfs_map_page(cur_pp, S_WRITE); 4307 err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va, 4308 DMU_READ_PREFETCH); 4309 zfs_unmap_page(cur_pp, va); 4310 if (err) { 4311 /* On error, toss the entire kluster */ 4312 pvn_read_done(pp, B_ERROR); 4313 /* convert checksum errors into IO errors */ 4314 if (err == ECKSUM) 4315 err = EIO; 4316 return (err); 4317 } 4318 cur_pp = cur_pp->p_next; 4319 } 4320 4321 /* 4322 * Fill in the page list array from the kluster starting 4323 * from the desired offset `off'. 4324 * NOTE: the page list will always be null terminated. 4325 */ 4326 pvn_plist_init(pp, pl, plsz, off, io_len, rw); 4327 ASSERT(pl == NULL || (*pl)->p_offset == off); 4328 4329 return (0); 4330 } 4331 4332 /* 4333 * Return pointers to the pages for the file region [off, off + len] 4334 * in the pl array. If plsz is greater than len, this function may 4335 * also return page pointers from after the specified region 4336 * (i.e. the region [off, off + plsz]). These additional pages are 4337 * only returned if they are already in the cache, or were created as 4338 * part of a klustered read. 4339 * 4340 * IN: vp - vnode of file to get data from. 4341 * off - position in file to get data from. 4342 * len - amount of data to retrieve. 4343 * plsz - length of provided page list. 4344 * seg - segment to obtain pages for. 4345 * addr - virtual address of fault. 4346 * rw - mode of created pages. 4347 * cr - credentials of caller. 4348 * ct - caller context. 4349 * 4350 * OUT: protp - protection mode of created pages. 4351 * pl - list of pages created. 4352 * 4353 * RETURN: 0 if success 4354 * error code if failure 4355 * 4356 * Timestamps: 4357 * vp - atime updated 4358 */ 4359 /* ARGSUSED */ 4360 static int 4361 zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp, 4362 page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr, 4363 enum seg_rw rw, cred_t *cr, caller_context_t *ct) 4364 { 4365 znode_t *zp = VTOZ(vp); 4366 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4367 page_t **pl0 = pl; 4368 int err = 0; 4369 4370 /* we do our own caching, faultahead is unnecessary */ 4371 if (pl == NULL) 4372 return (0); 4373 else if (len > plsz) 4374 len = plsz; 4375 else 4376 len = P2ROUNDUP(len, PAGESIZE); 4377 ASSERT(plsz >= len); 4378 4379 ZFS_ENTER(zfsvfs); 4380 ZFS_VERIFY_ZP(zp); 4381 4382 if (protp) 4383 *protp = PROT_ALL; 4384 4385 /* 4386 * Loop through the requested range [off, off + len) looking 4387 * for pages. If we don't find a page, we will need to create 4388 * a new page and fill it with data from the file. 4389 */ 4390 while (len > 0) { 4391 if (*pl = page_lookup(vp, off, SE_SHARED)) 4392 *(pl+1) = NULL; 4393 else if (err = zfs_fillpage(vp, off, seg, addr, pl, plsz, rw)) 4394 goto out; 4395 while (*pl) { 4396 ASSERT3U((*pl)->p_offset, ==, off); 4397 off += PAGESIZE; 4398 addr += PAGESIZE; 4399 if (len > 0) { 4400 ASSERT3U(len, >=, PAGESIZE); 4401 len -= PAGESIZE; 4402 } 4403 ASSERT3U(plsz, >=, PAGESIZE); 4404 plsz -= PAGESIZE; 4405 pl++; 4406 } 4407 } 4408 4409 /* 4410 * Fill out the page array with any pages already in the cache. 4411 */ 4412 while (plsz > 0 && 4413 (*pl++ = page_lookup_nowait(vp, off, SE_SHARED))) { 4414 off += PAGESIZE; 4415 plsz -= PAGESIZE; 4416 } 4417 out: 4418 if (err) { 4419 /* 4420 * Release any pages we have previously locked. 4421 */ 4422 while (pl > pl0) 4423 page_unlock(*--pl); 4424 } else { 4425 ZFS_ACCESSTIME_STAMP(zfsvfs, zp); 4426 } 4427 4428 *pl = NULL; 4429 4430 ZFS_EXIT(zfsvfs); 4431 return (err); 4432 } 4433 4434 /* 4435 * Request a memory map for a section of a file. This code interacts 4436 * with common code and the VM system as follows: 4437 * 4438 * common code calls mmap(), which ends up in smmap_common() 4439 * 4440 * this calls VOP_MAP(), which takes you into (say) zfs 4441 * 4442 * zfs_map() calls as_map(), passing segvn_create() as the callback 4443 * 4444 * segvn_create() creates the new segment and calls VOP_ADDMAP() 4445 * 4446 * zfs_addmap() updates z_mapcnt 4447 */ 4448 /*ARGSUSED*/ 4449 static int 4450 zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp, 4451 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr, 4452 caller_context_t *ct) 4453 { 4454 znode_t *zp = VTOZ(vp); 4455 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4456 segvn_crargs_t vn_a; 4457 int error; 4458 4459 ZFS_ENTER(zfsvfs); 4460 ZFS_VERIFY_ZP(zp); 4461 4462 if ((prot & PROT_WRITE) && (zp->z_pflags & 4463 (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) { 4464 ZFS_EXIT(zfsvfs); 4465 return (EPERM); 4466 } 4467 4468 if ((prot & (PROT_READ | PROT_EXEC)) && 4469 (zp->z_pflags & ZFS_AV_QUARANTINED)) { 4470 ZFS_EXIT(zfsvfs); 4471 return (EACCES); 4472 } 4473 4474 if (vp->v_flag & VNOMAP) { 4475 ZFS_EXIT(zfsvfs); 4476 return (ENOSYS); 4477 } 4478 4479 if (off < 0 || len > MAXOFFSET_T - off) { 4480 ZFS_EXIT(zfsvfs); 4481 return (ENXIO); 4482 } 4483 4484 if (vp->v_type != VREG) { 4485 ZFS_EXIT(zfsvfs); 4486 return (ENODEV); 4487 } 4488 4489 /* 4490 * If file is locked, disallow mapping. 4491 */ 4492 if (MANDMODE(zp->z_mode) && vn_has_flocks(vp)) { 4493 ZFS_EXIT(zfsvfs); 4494 return (EAGAIN); 4495 } 4496 4497 as_rangelock(as); 4498 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags); 4499 if (error != 0) { 4500 as_rangeunlock(as); 4501 ZFS_EXIT(zfsvfs); 4502 return (error); 4503 } 4504 4505 vn_a.vp = vp; 4506 vn_a.offset = (u_offset_t)off; 4507 vn_a.type = flags & MAP_TYPE; 4508 vn_a.prot = prot; 4509 vn_a.maxprot = maxprot; 4510 vn_a.cred = cr; 4511 vn_a.amp = NULL; 4512 vn_a.flags = flags & ~MAP_TYPE; 4513 vn_a.szc = 0; 4514 vn_a.lgrp_mem_policy_flags = 0; 4515 4516 error = as_map(as, *addrp, len, segvn_create, &vn_a); 4517 4518 as_rangeunlock(as); 4519 ZFS_EXIT(zfsvfs); 4520 return (error); 4521 } 4522 4523 /* ARGSUSED */ 4524 static int 4525 zfs_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr, 4526 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, cred_t *cr, 4527 caller_context_t *ct) 4528 { 4529 uint64_t pages = btopr(len); 4530 4531 atomic_add_64(&VTOZ(vp)->z_mapcnt, pages); 4532 return (0); 4533 } 4534 4535 /* 4536 * The reason we push dirty pages as part of zfs_delmap() is so that we get a 4537 * more accurate mtime for the associated file. Since we don't have a way of 4538 * detecting when the data was actually modified, we have to resort to 4539 * heuristics. If an explicit msync() is done, then we mark the mtime when the 4540 * last page is pushed. The problem occurs when the msync() call is omitted, 4541 * which by far the most common case: 4542 * 4543 * open() 4544 * mmap() 4545 * <modify memory> 4546 * munmap() 4547 * close() 4548 * <time lapse> 4549 * putpage() via fsflush 4550 * 4551 * If we wait until fsflush to come along, we can have a modification time that 4552 * is some arbitrary point in the future. In order to prevent this in the 4553 * common case, we flush pages whenever a (MAP_SHARED, PROT_WRITE) mapping is 4554 * torn down. 4555 */ 4556 /* ARGSUSED */ 4557 static int 4558 zfs_delmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr, 4559 size_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr, 4560 caller_context_t *ct) 4561 { 4562 uint64_t pages = btopr(len); 4563 4564 ASSERT3U(VTOZ(vp)->z_mapcnt, >=, pages); 4565 atomic_add_64(&VTOZ(vp)->z_mapcnt, -pages); 4566 4567 if ((flags & MAP_SHARED) && (prot & PROT_WRITE) && 4568 vn_has_cached_data(vp)) 4569 (void) VOP_PUTPAGE(vp, off, len, B_ASYNC, cr, ct); 4570 4571 return (0); 4572 } 4573 4574 /* 4575 * Free or allocate space in a file. Currently, this function only 4576 * supports the `F_FREESP' command. However, this command is somewhat 4577 * misnamed, as its functionality includes the ability to allocate as 4578 * well as free space. 4579 * 4580 * IN: vp - vnode of file to free data in. 4581 * cmd - action to take (only F_FREESP supported). 4582 * bfp - section of file to free/alloc. 4583 * flag - current file open mode flags. 4584 * offset - current file offset. 4585 * cr - credentials of caller [UNUSED]. 4586 * ct - caller context. 4587 * 4588 * RETURN: 0 if success 4589 * error code if failure 4590 * 4591 * Timestamps: 4592 * vp - ctime|mtime updated 4593 */ 4594 /* ARGSUSED */ 4595 static int 4596 zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag, 4597 offset_t offset, cred_t *cr, caller_context_t *ct) 4598 { 4599 znode_t *zp = VTOZ(vp); 4600 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4601 uint64_t off, len; 4602 int error; 4603 4604 ZFS_ENTER(zfsvfs); 4605 ZFS_VERIFY_ZP(zp); 4606 4607 if (cmd != F_FREESP) { 4608 ZFS_EXIT(zfsvfs); 4609 return (EINVAL); 4610 } 4611 4612 if (error = convoff(vp, bfp, 0, offset)) { 4613 ZFS_EXIT(zfsvfs); 4614 return (error); 4615 } 4616 4617 if (bfp->l_len < 0) { 4618 ZFS_EXIT(zfsvfs); 4619 return (EINVAL); 4620 } 4621 4622 off = bfp->l_start; 4623 len = bfp->l_len; /* 0 means from off to end of file */ 4624 4625 error = zfs_freesp(zp, off, len, flag, TRUE); 4626 4627 ZFS_EXIT(zfsvfs); 4628 return (error); 4629 } 4630 4631 /*ARGSUSED*/ 4632 static int 4633 zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct) 4634 { 4635 znode_t *zp = VTOZ(vp); 4636 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4637 uint32_t gen; 4638 uint64_t gen64; 4639 uint64_t object = zp->z_id; 4640 zfid_short_t *zfid; 4641 int size, i, error; 4642 4643 ZFS_ENTER(zfsvfs); 4644 ZFS_VERIFY_ZP(zp); 4645 4646 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs), 4647 &gen64, sizeof (uint64_t))) != 0) { 4648 ZFS_EXIT(zfsvfs); 4649 return (error); 4650 } 4651 4652 gen = (uint32_t)gen64; 4653 4654 size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN; 4655 if (fidp->fid_len < size) { 4656 fidp->fid_len = size; 4657 ZFS_EXIT(zfsvfs); 4658 return (ENOSPC); 4659 } 4660 4661 zfid = (zfid_short_t *)fidp; 4662 4663 zfid->zf_len = size; 4664 4665 for (i = 0; i < sizeof (zfid->zf_object); i++) 4666 zfid->zf_object[i] = (uint8_t)(object >> (8 * i)); 4667 4668 /* Must have a non-zero generation number to distinguish from .zfs */ 4669 if (gen == 0) 4670 gen = 1; 4671 for (i = 0; i < sizeof (zfid->zf_gen); i++) 4672 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i)); 4673 4674 if (size == LONG_FID_LEN) { 4675 uint64_t objsetid = dmu_objset_id(zfsvfs->z_os); 4676 zfid_long_t *zlfid; 4677 4678 zlfid = (zfid_long_t *)fidp; 4679 4680 for (i = 0; i < sizeof (zlfid->zf_setid); i++) 4681 zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i)); 4682 4683 /* XXX - this should be the generation number for the objset */ 4684 for (i = 0; i < sizeof (zlfid->zf_setgen); i++) 4685 zlfid->zf_setgen[i] = 0; 4686 } 4687 4688 ZFS_EXIT(zfsvfs); 4689 return (0); 4690 } 4691 4692 static int 4693 zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr, 4694 caller_context_t *ct) 4695 { 4696 znode_t *zp, *xzp; 4697 zfsvfs_t *zfsvfs; 4698 zfs_dirlock_t *dl; 4699 int error; 4700 4701 switch (cmd) { 4702 case _PC_LINK_MAX: 4703 *valp = ULONG_MAX; 4704 return (0); 4705 4706 case _PC_FILESIZEBITS: 4707 *valp = 64; 4708 return (0); 4709 4710 case _PC_XATTR_EXISTS: 4711 zp = VTOZ(vp); 4712 zfsvfs = zp->z_zfsvfs; 4713 ZFS_ENTER(zfsvfs); 4714 ZFS_VERIFY_ZP(zp); 4715 *valp = 0; 4716 error = zfs_dirent_lock(&dl, zp, "", &xzp, 4717 ZXATTR | ZEXISTS | ZSHARED, NULL, NULL); 4718 if (error == 0) { 4719 zfs_dirent_unlock(dl); 4720 if (!zfs_dirempty(xzp)) 4721 *valp = 1; 4722 VN_RELE(ZTOV(xzp)); 4723 } else if (error == ENOENT) { 4724 /* 4725 * If there aren't extended attributes, it's the 4726 * same as having zero of them. 4727 */ 4728 error = 0; 4729 } 4730 ZFS_EXIT(zfsvfs); 4731 return (error); 4732 4733 case _PC_SATTR_ENABLED: 4734 case _PC_SATTR_EXISTS: 4735 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) && 4736 (vp->v_type == VREG || vp->v_type == VDIR); 4737 return (0); 4738 4739 case _PC_ACCESS_FILTERING: 4740 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_ACCESS_FILTER) && 4741 vp->v_type == VDIR; 4742 return (0); 4743 4744 case _PC_ACL_ENABLED: 4745 *valp = _ACL_ACE_ENABLED; 4746 return (0); 4747 4748 case _PC_MIN_HOLE_SIZE: 4749 *valp = (ulong_t)SPA_MINBLOCKSIZE; 4750 return (0); 4751 4752 case _PC_TIMESTAMP_RESOLUTION: 4753 /* nanosecond timestamp resolution */ 4754 *valp = 1L; 4755 return (0); 4756 4757 default: 4758 return (fs_pathconf(vp, cmd, valp, cr, ct)); 4759 } 4760 } 4761 4762 /*ARGSUSED*/ 4763 static int 4764 zfs_getsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr, 4765 caller_context_t *ct) 4766 { 4767 znode_t *zp = VTOZ(vp); 4768 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4769 int error; 4770 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 4771 4772 ZFS_ENTER(zfsvfs); 4773 ZFS_VERIFY_ZP(zp); 4774 error = zfs_getacl(zp, vsecp, skipaclchk, cr); 4775 ZFS_EXIT(zfsvfs); 4776 4777 return (error); 4778 } 4779 4780 /*ARGSUSED*/ 4781 static int 4782 zfs_setsecattr(vnode_t *vp, vsecattr_t *vsecp, int flag, cred_t *cr, 4783 caller_context_t *ct) 4784 { 4785 znode_t *zp = VTOZ(vp); 4786 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4787 int error; 4788 boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE; 4789 zilog_t *zilog = zfsvfs->z_log; 4790 4791 ZFS_ENTER(zfsvfs); 4792 ZFS_VERIFY_ZP(zp); 4793 4794 error = zfs_setacl(zp, vsecp, skipaclchk, cr); 4795 4796 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS) 4797 zil_commit(zilog, UINT64_MAX, 0); 4798 4799 ZFS_EXIT(zfsvfs); 4800 return (error); 4801 } 4802 4803 /* 4804 * Tunable, both must be a power of 2. 4805 * 4806 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf 4807 * zcr_blksz_max: if set to less than the file block size, allow loaning out of 4808 * an arcbuf for a partial block read 4809 */ 4810 int zcr_blksz_min = (1 << 10); /* 1K */ 4811 int zcr_blksz_max = (1 << 17); /* 128K */ 4812 4813 /*ARGSUSED*/ 4814 static int 4815 zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr, 4816 caller_context_t *ct) 4817 { 4818 znode_t *zp = VTOZ(vp); 4819 zfsvfs_t *zfsvfs = zp->z_zfsvfs; 4820 int max_blksz = zfsvfs->z_max_blksz; 4821 uio_t *uio = &xuio->xu_uio; 4822 ssize_t size = uio->uio_resid; 4823 offset_t offset = uio->uio_loffset; 4824 int blksz; 4825 int fullblk, i; 4826 arc_buf_t *abuf; 4827 ssize_t maxsize; 4828 int preamble, postamble; 4829 4830 if (xuio->xu_type != UIOTYPE_ZEROCOPY) 4831 return (EINVAL); 4832 4833 ZFS_ENTER(zfsvfs); 4834 ZFS_VERIFY_ZP(zp); 4835 switch (ioflag) { 4836 case UIO_WRITE: 4837 /* 4838 * Loan out an arc_buf for write if write size is bigger than 4839 * max_blksz, and the file's block size is also max_blksz. 4840 */ 4841 blksz = max_blksz; 4842 if (size < blksz || zp->z_blksz != blksz) { 4843 ZFS_EXIT(zfsvfs); 4844 return (EINVAL); 4845 } 4846 /* 4847 * Caller requests buffers for write before knowing where the 4848 * write offset might be (e.g. NFS TCP write). 4849 */ 4850 if (offset == -1) { 4851 preamble = 0; 4852 } else { 4853 preamble = P2PHASE(offset, blksz); 4854 if (preamble) { 4855 preamble = blksz - preamble; 4856 size -= preamble; 4857 } 4858 } 4859 4860 postamble = P2PHASE(size, blksz); 4861 size -= postamble; 4862 4863 fullblk = size / blksz; 4864 (void) dmu_xuio_init(xuio, 4865 (preamble != 0) + fullblk + (postamble != 0)); 4866 DTRACE_PROBE3(zfs_reqzcbuf_align, int, preamble, 4867 int, postamble, int, 4868 (preamble != 0) + fullblk + (postamble != 0)); 4869 4870 /* 4871 * Have to fix iov base/len for partial buffers. They 4872 * currently represent full arc_buf's. 4873 */ 4874 if (preamble) { 4875 /* data begins in the middle of the arc_buf */ 4876 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), 4877 blksz); 4878 ASSERT(abuf); 4879 (void) dmu_xuio_add(xuio, abuf, 4880 blksz - preamble, preamble); 4881 } 4882 4883 for (i = 0; i < fullblk; i++) { 4884 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), 4885 blksz); 4886 ASSERT(abuf); 4887 (void) dmu_xuio_add(xuio, abuf, 0, blksz); 4888 } 4889 4890 if (postamble) { 4891 /* data ends in the middle of the arc_buf */ 4892 abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl), 4893 blksz); 4894 ASSERT(abuf); 4895 (void) dmu_xuio_add(xuio, abuf, 0, postamble); 4896 } 4897 break; 4898 case UIO_READ: 4899 /* 4900 * Loan out an arc_buf for read if the read size is larger than 4901 * the current file block size. Block alignment is not 4902 * considered. Partial arc_buf will be loaned out for read. 4903 */ 4904 blksz = zp->z_blksz; 4905 if (blksz < zcr_blksz_min) 4906 blksz = zcr_blksz_min; 4907 if (blksz > zcr_blksz_max) 4908 blksz = zcr_blksz_max; 4909 /* avoid potential complexity of dealing with it */ 4910 if (blksz > max_blksz) { 4911 ZFS_EXIT(zfsvfs); 4912 return (EINVAL); 4913 } 4914 4915 maxsize = zp->z_size - uio->uio_loffset; 4916 if (size > maxsize) 4917 size = maxsize; 4918 4919 if (size < blksz || vn_has_cached_data(vp)) { 4920 ZFS_EXIT(zfsvfs); 4921 return (EINVAL); 4922 } 4923 break; 4924 default: 4925 ZFS_EXIT(zfsvfs); 4926 return (EINVAL); 4927 } 4928 4929 uio->uio_extflg = UIO_XUIO; 4930 XUIO_XUZC_RW(xuio) = ioflag; 4931 ZFS_EXIT(zfsvfs); 4932 return (0); 4933 } 4934 4935 /*ARGSUSED*/ 4936 static int 4937 zfs_retzcbuf(vnode_t *vp, xuio_t *xuio, cred_t *cr, caller_context_t *ct) 4938 { 4939 int i; 4940 arc_buf_t *abuf; 4941 int ioflag = XUIO_XUZC_RW(xuio); 4942 4943 ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY); 4944 4945 i = dmu_xuio_cnt(xuio); 4946 while (i-- > 0) { 4947 abuf = dmu_xuio_arcbuf(xuio, i); 4948 /* 4949 * if abuf == NULL, it must be a write buffer 4950 * that has been returned in zfs_write(). 4951 */ 4952 if (abuf) 4953 dmu_return_arcbuf(abuf); 4954 ASSERT(abuf || ioflag == UIO_WRITE); 4955 } 4956 4957 dmu_xuio_fini(xuio); 4958 return (0); 4959 } 4960 4961 /* 4962 * Predeclare these here so that the compiler assumes that 4963 * this is an "old style" function declaration that does 4964 * not include arguments => we won't get type mismatch errors 4965 * in the initializations that follow. 4966 */ 4967 static int zfs_inval(); 4968 static int zfs_isdir(); 4969 4970 static int 4971 zfs_inval() 4972 { 4973 return (EINVAL); 4974 } 4975 4976 static int 4977 zfs_isdir() 4978 { 4979 return (EISDIR); 4980 } 4981 /* 4982 * Directory vnode operations template 4983 */ 4984 vnodeops_t *zfs_dvnodeops; 4985 const fs_operation_def_t zfs_dvnodeops_template[] = { 4986 VOPNAME_OPEN, { .vop_open = zfs_open }, 4987 VOPNAME_CLOSE, { .vop_close = zfs_close }, 4988 VOPNAME_READ, { .error = zfs_isdir }, 4989 VOPNAME_WRITE, { .error = zfs_isdir }, 4990 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl }, 4991 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 4992 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 4993 VOPNAME_ACCESS, { .vop_access = zfs_access }, 4994 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup }, 4995 VOPNAME_CREATE, { .vop_create = zfs_create }, 4996 VOPNAME_REMOVE, { .vop_remove = zfs_remove }, 4997 VOPNAME_LINK, { .vop_link = zfs_link }, 4998 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 4999 VOPNAME_MKDIR, { .vop_mkdir = zfs_mkdir }, 5000 VOPNAME_RMDIR, { .vop_rmdir = zfs_rmdir }, 5001 VOPNAME_READDIR, { .vop_readdir = zfs_readdir }, 5002 VOPNAME_SYMLINK, { .vop_symlink = zfs_symlink }, 5003 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync }, 5004 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 5005 VOPNAME_FID, { .vop_fid = zfs_fid }, 5006 VOPNAME_SEEK, { .vop_seek = zfs_seek }, 5007 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 5008 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr }, 5009 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr }, 5010 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 5011 NULL, NULL 5012 }; 5013 5014 /* 5015 * Regular file vnode operations template 5016 */ 5017 vnodeops_t *zfs_fvnodeops; 5018 const fs_operation_def_t zfs_fvnodeops_template[] = { 5019 VOPNAME_OPEN, { .vop_open = zfs_open }, 5020 VOPNAME_CLOSE, { .vop_close = zfs_close }, 5021 VOPNAME_READ, { .vop_read = zfs_read }, 5022 VOPNAME_WRITE, { .vop_write = zfs_write }, 5023 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl }, 5024 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 5025 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 5026 VOPNAME_ACCESS, { .vop_access = zfs_access }, 5027 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup }, 5028 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 5029 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync }, 5030 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 5031 VOPNAME_FID, { .vop_fid = zfs_fid }, 5032 VOPNAME_SEEK, { .vop_seek = zfs_seek }, 5033 VOPNAME_FRLOCK, { .vop_frlock = zfs_frlock }, 5034 VOPNAME_SPACE, { .vop_space = zfs_space }, 5035 VOPNAME_GETPAGE, { .vop_getpage = zfs_getpage }, 5036 VOPNAME_PUTPAGE, { .vop_putpage = zfs_putpage }, 5037 VOPNAME_MAP, { .vop_map = zfs_map }, 5038 VOPNAME_ADDMAP, { .vop_addmap = zfs_addmap }, 5039 VOPNAME_DELMAP, { .vop_delmap = zfs_delmap }, 5040 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 5041 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr }, 5042 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr }, 5043 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 5044 VOPNAME_REQZCBUF, { .vop_reqzcbuf = zfs_reqzcbuf }, 5045 VOPNAME_RETZCBUF, { .vop_retzcbuf = zfs_retzcbuf }, 5046 NULL, NULL 5047 }; 5048 5049 /* 5050 * Symbolic link vnode operations template 5051 */ 5052 vnodeops_t *zfs_symvnodeops; 5053 const fs_operation_def_t zfs_symvnodeops_template[] = { 5054 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 5055 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 5056 VOPNAME_ACCESS, { .vop_access = zfs_access }, 5057 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 5058 VOPNAME_READLINK, { .vop_readlink = zfs_readlink }, 5059 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 5060 VOPNAME_FID, { .vop_fid = zfs_fid }, 5061 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 5062 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 5063 NULL, NULL 5064 }; 5065 5066 /* 5067 * special share hidden files vnode operations template 5068 */ 5069 vnodeops_t *zfs_sharevnodeops; 5070 const fs_operation_def_t zfs_sharevnodeops_template[] = { 5071 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 5072 VOPNAME_ACCESS, { .vop_access = zfs_access }, 5073 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 5074 VOPNAME_FID, { .vop_fid = zfs_fid }, 5075 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 5076 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr }, 5077 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr }, 5078 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 5079 NULL, NULL 5080 }; 5081 5082 /* 5083 * Extended attribute directory vnode operations template 5084 * This template is identical to the directory vnodes 5085 * operation template except for restricted operations: 5086 * VOP_MKDIR() 5087 * VOP_SYMLINK() 5088 * Note that there are other restrictions embedded in: 5089 * zfs_create() - restrict type to VREG 5090 * zfs_link() - no links into/out of attribute space 5091 * zfs_rename() - no moves into/out of attribute space 5092 */ 5093 vnodeops_t *zfs_xdvnodeops; 5094 const fs_operation_def_t zfs_xdvnodeops_template[] = { 5095 VOPNAME_OPEN, { .vop_open = zfs_open }, 5096 VOPNAME_CLOSE, { .vop_close = zfs_close }, 5097 VOPNAME_IOCTL, { .vop_ioctl = zfs_ioctl }, 5098 VOPNAME_GETATTR, { .vop_getattr = zfs_getattr }, 5099 VOPNAME_SETATTR, { .vop_setattr = zfs_setattr }, 5100 VOPNAME_ACCESS, { .vop_access = zfs_access }, 5101 VOPNAME_LOOKUP, { .vop_lookup = zfs_lookup }, 5102 VOPNAME_CREATE, { .vop_create = zfs_create }, 5103 VOPNAME_REMOVE, { .vop_remove = zfs_remove }, 5104 VOPNAME_LINK, { .vop_link = zfs_link }, 5105 VOPNAME_RENAME, { .vop_rename = zfs_rename }, 5106 VOPNAME_MKDIR, { .error = zfs_inval }, 5107 VOPNAME_RMDIR, { .vop_rmdir = zfs_rmdir }, 5108 VOPNAME_READDIR, { .vop_readdir = zfs_readdir }, 5109 VOPNAME_SYMLINK, { .error = zfs_inval }, 5110 VOPNAME_FSYNC, { .vop_fsync = zfs_fsync }, 5111 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 5112 VOPNAME_FID, { .vop_fid = zfs_fid }, 5113 VOPNAME_SEEK, { .vop_seek = zfs_seek }, 5114 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 5115 VOPNAME_GETSECATTR, { .vop_getsecattr = zfs_getsecattr }, 5116 VOPNAME_SETSECATTR, { .vop_setsecattr = zfs_setsecattr }, 5117 VOPNAME_VNEVENT, { .vop_vnevent = fs_vnevent_support }, 5118 NULL, NULL 5119 }; 5120 5121 /* 5122 * Error vnode operations template 5123 */ 5124 vnodeops_t *zfs_evnodeops; 5125 const fs_operation_def_t zfs_evnodeops_template[] = { 5126 VOPNAME_INACTIVE, { .vop_inactive = zfs_inactive }, 5127 VOPNAME_PATHCONF, { .vop_pathconf = zfs_pathconf }, 5128 NULL, NULL 5129 }; 5130